pax_global_header00006660000000000000000000000064151205477760014530gustar00rootroot0000000000000052 comment=84af20892b61c8e177e391f997ded8b4cb5514f2 mkosi-26/000077500000000000000000000000001512054777600124435ustar00rootroot00000000000000mkosi-26/.codespellrc000066400000000000000000000001031512054777600147350ustar00rootroot00000000000000[codespell] skip = ./.git,./docs/style.css ignore-words-list = ans mkosi-26/.dir-locals.el000066400000000000000000000015511512054777600150760ustar00rootroot00000000000000; Sets emacs variables based on mode. ; A list of (major-mode . ((var1 . value1) (var2 . value2))) ; Mode can be nil, which gives default values. ; Note that we set a wider line width source files, but for everything else we ; stick to a more conservative 79 characters. ; NOTE: Keep this file in sync with .editorconfig. ((python-mode . ((indent-tabs-mode . nil) (tab-width . 4) (fill-column . 109) (python-indent-def-block-scale . 1))) (python-ts-mode . ((indent-tabs-mode . nil) (tab-width . 4) (fill-column . 109) (python-indent-def-block-scale . 1))) (sh-mode . ((sh-basic-offset . 4) (sh-indentation . 4))) (markdown-mode . ((fill-column . 109))) (nil . ((indent-tabs-mode . nil) (tab-width . 4) (fill-column . 79)))) mkosi-26/.editorconfig000066400000000000000000000003261512054777600151210ustar00rootroot00000000000000root = true [*] end_of_line = lf insert_final_newline = true trim_trailing_whitespace = true charset = utf-8 indent_style = space indent_size = 4 [*.{py,md}] max_line_length = 109 [*.yaml,*.yml] indent_size = 2 mkosi-26/.github/000077500000000000000000000000001512054777600140035ustar00rootroot00000000000000mkosi-26/.github/ISSUE_TEMPLATE/000077500000000000000000000000001512054777600161665ustar00rootroot00000000000000mkosi-26/.github/ISSUE_TEMPLATE/bug_report.yml000066400000000000000000000047641512054777600210740ustar00rootroot00000000000000name: Bug Report description: A report of an error in mkosi labels: ["bug"] body: - type: markdown attributes: value: Thanks for taking the time to fill out this bug report! - type: input id: version attributes: label: mkosi commit the issue has been seen with description: | Please do not submit bug reports against older releases, but use your distribution bug tracker. Please also test whether your bug has been already resolved on the current git main. placeholder: 'main' validations: required: true - type: input id: hostdistro attributes: label: Used host distribution description: Used distribution on the host (or in the tools tree) and its version placeholder: Fedora 39 validations: required: false - type: input id: targetdistro attributes: label: Used target distribution description: Used distribution for the image and its version placeholder: Fedora 39 validations: required: false - type: input id: kernel attributes: label: Linux kernel version used description: | Please use `uname -r` to get linux kernel version. placeholder: kernel-6.6.8-200.fc39.x86_64 validations: required: false - type: dropdown id: architecture attributes: label: CPU architectures issue was seen on options: - aarch64 - alpha - arm - i686 - ia64 - loongarch - mips - parisc - ppc (big endian) - ppc64 (big endian) - ppc64le - riscv64 - s390x - sparc - sparc64 - x86_64 - other validations: required: false - type: textarea id: unexpected-behaviour attributes: label: Unexpected behaviour you saw validations: required: false - type: textarea id: config attributes: label: Used mkosi config description: | Please add a, preferably minimised, mkosi config to reproduce the issue here. placeholder: This will be automatically formatted into code, so no need for backticks. render: ini validations: required: false - type: textarea id: logs attributes: label: mkosi output description: | Please paste the full mkosi debug output here. placeholder: This will be automatically formatted into code, so no need for backticks. render: sh validations: required: false mkosi-26/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000004251512054777600201570ustar00rootroot00000000000000--- # vi: ts=2 sw=2 et: # SPDX-License-Identifier: LGPL-2.1-or-later blank_issues_enabled: true contact_links: - name: mkosi Matrix room url: https://matrix.to/#/#mkosi:matrix.org about: Please ask (and answer) questions here, use the issue tracker only for issues. mkosi-26/.github/dependabot.yml000066400000000000000000000003051512054777600166310ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: interval: "monthly" open-pull-requests-limit: 2 mkosi-26/.github/workflows/000077500000000000000000000000001512054777600160405ustar00rootroot00000000000000mkosi-26/.github/workflows/ci.yml000066400000000000000000000201751512054777600171630ustar00rootroot00000000000000name: CI on: pull_request: branches: - main jobs: unit-test: runs-on: ubuntu-24.04 concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 - name: Install run: | sudo apt-get install --assume-yes --no-install-recommends pandoc python3-pytest shellcheck python3 -m pip install --break-system-packages --upgrade setuptools wheel pip python3 -m pip install --break-system-packages codespell mypy reuse ruff npm install -g pyright - name: Run ruff format run: | ruff --version if ! ruff format --check --quiet mkosi/ tests/ kernel-install/*.install then echo "Please run 'ruff format' on the above files or apply the diffs below manually" ruff format --check --quiet --diff mkosi/ tests/ kernel-install/*.install fi - name: Run ruff check run: | ruff --version ruff check --output-format=github mkosi/ tests/ kernel-install/*.install - name: Check that tabs are not used in code run: sh -c '! git grep -P "\\t" "*.py"' - name: Spell Checking (codespell) run: | codespell --version codespell - name: License Checking (reuse) run: | reuse --version if ! reuse lint then echo "Hint: If the above output lists unlicensed files tracked in git you might need to adjust REUSE.toml" exit 1 fi - name: Type Checking (mypy) run: | python3 -m mypy --version python3 -m mypy mkosi/ tests/ kernel-install/*.install - name: Type Checking (pyright) run: | pyright --version pyright mkosi/ tests/ kernel-install/*.install - name: Unit Tests run: | python3 -m pytest --version python3 -m pytest -sv tests/ - name: Test execution from current working directory run: python3 -m mkosi -h - name: Test execution from current working directory (sudo call) run: sudo python3 -m mkosi -h - name: Test venv installation run: | python3 -m venv testvenv testvenv/bin/python3 -m pip install --upgrade setuptools wheel pip testvenv/bin/python3 -m pip install . testvenv/bin/mkosi -h rm -rf testvenv - name: Test editable venv installation run: | python3 -m venv testvenv testvenv/bin/python3 -m pip install --upgrade setuptools wheel pip testvenv/bin/python3 -m pip install --editable . testvenv/bin/mkosi -h rm -rf testvenv - name: Test zipapp creation run: | ./tools/generate-zipapp.sh ./builddir/mkosi -h ./builddir/mkosi documentation - name: Run shellcheck on scripts run: | bash -c 'shopt -s globstar; shellcheck bin/mkosi tools/*.sh' bin/mkosi completion bash | shellcheck - - name: Test man page generation run: tools/make-man-page.sh integration-test: runs-on: ${{ matrix.runner }} needs: unit-test concurrency: group: ${{ github.workflow }}-${{ matrix.distro }}-${{ matrix.tools }}-${{ matrix.runner }}-${{ github.ref }} cancel-in-progress: true strategy: fail-fast: false matrix: distro: - arch - centos - debian - fedora - opensuse - ubuntu tools: - arch - centos - debian - fedora - opensuse - ubuntu runner: - ubuntu-24.04 exclude: # pacman is not packaged in EPEL. - distro: arch tools: centos # apt and debian-keyring are not packaged in EPEL. - distro: debian tools: centos - distro: ubuntu tools: centos # pacman is not packaged in openSUSE. - distro: arch tools: opensuse # apt, debian-keyring and ubuntu-keyring are not packaged in openSUSE. - distro: debian tools: opensuse - distro: ubuntu tools: opensuse include: # low rate limit on s390x/ppc64le/arm64 workers - distro: debian tools: debian runner: ubuntu-24.04-arm - distro: fedora tools: fedora runner: ubuntu-24.04-arm - distro: opensuse tools: opensuse runner: ubuntu-24.04-arm - distro: ubuntu tools: ubuntu runner: ubuntu-24.04-arm - distro: fedora tools: fedora runner: ubuntu-24.04-ppc64le - distro: debian tools: debian runner: ubuntu-24.04-ppc64le - distro: debian tools: debian runner: ubuntu-24.04-s390x steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 - uses: ./ # Freeing up disk space with rm -rf can take multiple minutes. Since we don't need the extra free space # immediately, we remove the files in the background. However, we first move them to a different location so that # nothing tries to use anything in these directories anymore while we're busy deleting them. - name: Free disk space run: | sudo mv /usr/local /usr/local.trash sudo mv /opt/hostedtoolcache /opt/hostedtoolcache.trash sudo systemd-run rm -rf /usr/local.trash /opt/hostedtoolcache.trash # Make sure the latest changes from the pull request are used. - name: Install run: sudo ln -svf $PWD/bin/mkosi /usr/bin/mkosi working-directory: ./ - name: Configure run: | tee mkosi.local.conf <&2 cat EOF chmod +x mkosi.configure # prepare and postinst are already used in CI for script in sync build finalize postoutput clean do [[ -f "mkosi.${script}" ]] && exit 1 tee "mkosi.${script}" <&2 TOK chmod +x "mkosi.${script}" done - name: Generate key run: sudo mkosi genkey - name: Summary run: sudo mkosi summary - name: Build tools tree run: sudo mkosi -f box -- true - name: Build image run: sudo mkosi -f - name: Run integration tests run: | # Without KVM the tests are way too slow and time out if [[ -e /dev/kvm ]]; then sudo mkosi box -- \ timeout -k 30 1h \ python3 -m pytest \ --tb=no \ --capture=no \ --verbose \ -m integration \ --distribution ${{ matrix.distro }} \ tests/ fi mkosi-26/.github/workflows/codeql.yml000066400000000000000000000020321512054777600200270ustar00rootroot00000000000000--- # vi: ts=2 sw=2 et: # name: "CodeQL" on: push: branches: - main pull_request: branches: - main permissions: contents: read jobs: analyze: name: Analyze runs-on: ubuntu-24.04 concurrency: group: ${{ github.workflow }}-${{ matrix.language }}-${{ github.ref }} cancel-in-progress: true permissions: actions: read security-events: write strategy: fail-fast: false matrix: language: ['python'] steps: - name: Checkout repository uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 - name: Initialize CodeQL uses: github/codeql-action/init@fe4161a26a8629af62121b670040955b330f9af2 with: languages: ${{ matrix.language }} queries: +security-extended,security-and-quality - name: Autobuild uses: github/codeql-action/autobuild@fe4161a26a8629af62121b670040955b330f9af2 - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@fe4161a26a8629af62121b670040955b330f9af2 mkosi-26/.gitignore000066400000000000000000000006651512054777600144420ustar00rootroot00000000000000.venv *.cache-pre-dev *.cache-pre-inst .cache .mkosi.1 .mkosi-addon.1 .mkosi-initrd.1 .mkosi-sandbox.1 .mypy_cache/ .project .pydevproject .pytest_cache/ /.mkosi-* /SHA256SUMS /SHA256SUMS.gpg /build /dist /mkosi.build /mkosi.egg-info /mkosi.cache /mkosi.output /mkosi.nspawn /mkosi.rootpw mkosi.local mkosi.local.conf mkosi.tools mkosi.tools.manifest /mkosi.key /mkosi.crt __pycache__ blog/output blog/pelicanconf.py blog/publishconf.py mkosi-26/.mailmap000066400000000000000000000002171512054777600140640ustar00rootroot00000000000000Daan De Meyer Jörg Behrmann Neal Gompa (ニール・ゴンパ) mkosi-26/.obs/000077500000000000000000000000001512054777600133045ustar00rootroot00000000000000mkosi-26/.obs/workflows.yml000066400000000000000000000002451512054777600160650ustar00rootroot00000000000000rebuild: steps: - trigger_services: project: system:systemd package: mkosi filters: event: push branches: only: - main mkosi-26/LICENSES/000077500000000000000000000000001512054777600136505ustar00rootroot00000000000000mkosi-26/LICENSES/GPL-2.0-only.txt000066400000000000000000000431031512054777600163100ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. mkosi-26/LICENSES/LGPL-2.1-or-later.txt000066400000000000000000000636421512054777600172030ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! mkosi-26/LICENSES/OFL-1.1.txt000066400000000000000000000105721512054777600153330ustar00rootroot00000000000000Copyright 2014 The Heebo Project Authors (https://github.com/OdedEzer/heebo) This Font Software is licensed under the SIL Open Font License, Version 1.1. This license is copied below, and is also available with a FAQ at: https://scripts.sil.org/OFL ----------------------------------------------------------- SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 ----------------------------------------------------------- PREAMBLE The goals of the Open Font License (OFL) are to stimulate worldwide development of collaborative font projects, to support the font creation efforts of academic and linguistic communities, and to provide a free and open framework in which fonts may be shared and improved in partnership with others. The OFL allows the licensed fonts to be used, studied, modified and redistributed freely as long as they are not sold by themselves. The fonts, including any derivative works, can be bundled, embedded, redistributed and/or sold with any software provided that any reserved names are not used by derivative works. The fonts and derivatives, however, cannot be released under any other type of license. The requirement for fonts to remain under this license does not apply to any document created using the fonts or their derivatives. DEFINITIONS "Font Software" refers to the set of files released by the Copyright Holder(s) under this license and clearly marked as such. This may include source files, build scripts and documentation. "Reserved Font Name" refers to any names specified as such after the copyright statement(s). "Original Version" refers to the collection of Font Software components as distributed by the Copyright Holder(s). "Modified Version" refers to any derivative made by adding to, deleting, or substituting -- in part or in whole -- any of the components of the Original Version, by changing formats or by porting the Font Software to a new environment. "Author" refers to any designer, engineer, programmer, technical writer or other person who contributed to the Font Software. PERMISSION & CONDITIONS Permission is hereby granted, free of charge, to any person obtaining a copy of the Font Software, to use, study, copy, merge, embed, modify, redistribute, and sell modified and unmodified copies of the Font Software, subject to the following conditions: 1) Neither the Font Software nor any of its individual components, in Original or Modified Versions, may be sold by itself. 2) Original or Modified Versions of the Font Software may be bundled, redistributed and/or sold with any software, provided that each copy contains the above copyright notice and this license. These can be included either as stand-alone text files, human-readable headers or in the appropriate machine-readable metadata fields within text or binary files as long as those fields can be easily viewed by the user. 3) No Modified Version of the Font Software may use the Reserved Font Name(s) unless explicit written permission is granted by the corresponding Copyright Holder. This restriction only applies to the primary font name as presented to the users. 4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font Software shall not be used to promote, endorse or advertise any Modified Version, except to acknowledge the contribution(s) of the Copyright Holder(s) and the Author(s) or with their explicit written permission. 5) The Font Software, modified or unmodified, in part or in whole, must be distributed entirely under this license, and must not be distributed under any other license. The requirement for fonts to remain under this license does not apply to any document created using the Font Software. TERMINATION This license becomes null and void if any of the above conditions are not met. DISCLAIMER THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE. mkosi-26/LICENSES/PSF-2.0.txt000066400000000000000000000045731512054777600153470ustar00rootroot00000000000000PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and the Individual or Organization ("Licensee") accessing and otherwise using this software ("Python") in source or binary form and its associated documentation. 2. Subject to the terms and conditions of this License Agreement, PSF hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019 Python Software Foundation; All Rights Reserved" are retained in Python alone or in any derivative version prepared by Licensee. 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to Python. 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between PSF and Licensee. This License Agreement does not grant permission to use PSF trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. 8. By copying, installing or otherwise using Python, Licensee agrees to be bound by the terms and conditions of this License Agreement. mkosi-26/MANIFEST.in000066400000000000000000000000201512054777600141710ustar00rootroot00000000000000include LICENSE mkosi-26/NEWS.md000077700000000000000000000000001512054777600222272mkosi/resources/man/mkosi.news.7.mdustar00rootroot00000000000000mkosi-26/README.md000066400000000000000000000152611512054777600137270ustar00rootroot00000000000000# mkosi — Build Bespoke OS Images A fancy wrapper around `dnf --installroot`, `apt`, `pacman` and `zypper` that generates customized disk images with a number of bells and whistles. For a longer description and available features and options, see the [man page](mkosi/resources/man/mkosi.1.md) or run `mkosi documentation`. When getting started see the `EXAMPLES` section. Packaging status # Installation You can install mkosi from your distribution using its package manager or install the development version from git. If you install mkosi using your distribution's package manager, make sure it installs at least mkosi v16 or newer (Use `mkosi --version` to check). If your distribution only packages an older version of mkosi, it is recommended to install mkosi using one of the alternative installation methods listed below instead. ## Running mkosi from the repository To run mkosi straight from its git repository, you can invoke the shim `bin/mkosi`. The `MKOSI_INTERPRETER` environment variable can be set when using the `bin/mkosi` shim to configure the python interpreter used to execute mkosi. The shim can be symlinked to e.g. `~/.local/bin` to make it accessible from the `PATH`. Note that to make this work you might have to add `~/.local/bin` to your user's `PATH`. ```shell git clone https://github.com/systemd/mkosi ln -s $PWD/mkosi/bin/mkosi ~/.local/bin/mkosi mkosi --version ``` mkosi also provides other companion tools that can be enabled in a similar manner. ```shell ln -s $PWD/mkosi/bin/mkosi-addon ~/.local/bin/mkosi-addon ln -s $PWD/mkosi/bin/mkosi-initrd ~/.local/bin/mkosi-initrd ln -s $PWD/mkosi/bin/mkosi-sandbox ~/.local/bin/mkosi-sandbox ``` ## Python installation methods mkosi can also be installed straight from the git repository url using `pipx`: ```shell pipx install git+https://github.com/systemd/mkosi.git mkosi --version ``` which will transparently install mkosi into a Python virtual environment and a mkosi binary to `~/.local/bin`. This is, up to the path of the virtual environment and the mkosi binary, equivalent to ```shell python3 -m venv mkosivenv mkosivenv/bin/pip install git+https://github.com/systemd/mkosi.git mkosivenv/bin/mkosi --version ``` You can also package mkosi as a [zipapp](https://docs.python.org/3/library/zipapp.html) that you can deploy anywhere in your `PATH`. Running this will leave a `mkosi` binary in `builddir/` ```shell git clone https://github.com/systemd/mkosi cd mkosi tools/generate-zipapp.sh builddir/mkosi --version ``` Besides the mkosi binary, you can also call mkosi via ```shell python3 -m mkosi ``` when not installed as a zipapp. Please note, that the python module exists solely for the usage of the mkosi binary and is not to be considered a public API. ## Installing packages from upstream repositories mkosi packages for Debian, Ubuntu, Fedora and SUSE are built from latest main and published as repositories for the respective distributions on OBS. [Follow these instructions to enable the appropriate repository](https://software.opensuse.org//download.html?project=system%3Asystemd&package=mkosi). ## kernel-install plugins mkosi can also be used as a kernel-install plugin to build initrds and addons. It is recommended to use only one of these two plugins at a given time. ### UKI plugin To enable this feature, install `kernel-install/50-mkosi.install` into `/usr/lib/kernel/install.d`. Extra distro configuration for the initrd can be configured in `/usr/lib/mkosi-initrd`. Users can add their own customizations in `/etc/mkosi-initrd`. A full self-contained UKI will be built and installed. Once installed, the mkosi plugin can be enabled by writing `initrd_generator=mkosi-initrd` and `layout=uki` to `/usr/lib/kernel/install.conf` or to `/etc/kernel/install.conf`. ### Addon plugin To enable this feature, install `kernel-install/51-mkosi-addon.install` into `/usr/lib/kernel/install.d`. Extra distro configuration for the addon can be configured in `/usr/lib/mkosi-addon`. Users can add their own customizations in `/etc/mkosi-addon` and `/run/mkosi-addon`. Note that unless at least one of the last two directories are present, the plugin will not operate. This plugin is useful to enhance a vendor-provided UKI with local-only modifications. # Hacking on mkosi To hack on mkosi itself you will also need [mypy](https://github.com/python/mypy), for type checking, and [pytest](https://github.com/pytest-dev/pytest), to run tests. We check tests and typing in CI (see `.github/workflows`), but you can run the tests locally as well. # References * [Primary mkosi git repository on GitHub](https://github.com/systemd/mkosi/) * [A re-introduction to mkosi — A Tool for Generating OS Images](https://0pointer.net/blog/a-re-introduction-to-mkosi-a-tool-for-generating-os-images.html) * [The mkosi OS generation tool](https://lwn.net/Articles/726655/) story on LWN (2017) * [systemd-repart: Building Discoverable Disk Images](https://media.ccc.de/v/all-systems-go-2023-191-systemd-repart-building-discoverable-disk-images) and [mkosi: Building Bespoke Operating System Images](https://media.ccc.de/v/all-systems-go-2023-190-mkosi-building-bespoke-operating-system-images) talks at All Systems Go! 2023 * [Building RHEL and RHEL UBI images with mkosi](https://fedoramagazine.org/create-images-directly-from-rhel-and-rhel-ubi-package-using-mkosi/) an article in Fedora Magazine (2023) * [Building USIs with mkosi](https://overhead.neocities.org/blog/build-usi-mkosi/) * [Constellation 💖 mkosi — Minimal TCB, tailor-made for measured boot](https://www.edgeless.systems/blog/constellation-mkosi-minimal-tcb-tailor-made-for-measured-boot/) * [Streamlining kernel hacking with mkosi-kernel](https://video.fosdem.org/2024/ub5132/fosdem-2024-2209-streamlining-kernel-hacking-with-mkosi-kernel.av1.webm) * [mkosi-initrd: Building initrds out of distribution packages](https://video.fosdem.org/2024/ua2118/fosdem-2024-2888-mkosi-initrd-building-initrds-out-of-distribution-packages.av1.webm) * [Running systemd integration tests with mkosi](https://video.fosdem.org/2024/ud2208/fosdem-2024-3431-running-systemd-integration-tests-with-mkosi.av1.webm) * [Arch Linux rescue image with mkosi](https://swsnr.de/archlinux-rescue-image-with-mkosi) * [Building vagrant images with mkosi](https://vdwaa.nl/mkosi-vagrant-images.html#mkosi-vagrant-images) * [Building images with mkosi on the Open Build Service (OBS)](https://openbuildservice.org/help/manuals/obs-user-guide/cha-obs-package-formats#sec-pkgfmt-mkosi) ## Community Find us on Matrix at [#mkosi:matrix.org](https://matrix.to/#/#mkosi:matrix.org). mkosi-26/REUSE.toml000066400000000000000000000032531512054777600142260ustar00rootroot00000000000000# NOTE: This project does not attribute contributors individually. Instead refer to `git log --format="%an <%aE>" | sort -u` for a list of individual contributors. version = 1 SPDX-PackageName = "mkosi" SPDX-PackageSupplier = "systemd" SPDX-PackageDownloadLocation = "https://github.com/systemd/mkosi" [[annotations]] path = [ ".codespellrc", ".dir-locals.el", ".editorconfig", "bin/mkosi", "docs/CNAME", "**.gitignore", "**.bash", "**.build", "**.chroot", "**.conf", "**.css", "**.html", "**.in", "**.install", "**.json", "**.mailmap", "**.md", "**.png", "**.postinst", "**.postoutput", "**.prepare", "**.preset", "**.py", "**.service", "**.sources", "**.sh", "**.svg", "**.toml", "**.yaml", "**.yml", "**.zsh", "mkosi/resources/pandoc/*.lua", ] precedence = "aggregate" SPDX-FileCopyrightText = "Mkosi Contributors" SPDX-License-Identifier = "LGPL-2.1-or-later" [[annotations]] path = [ "mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/udev/rules.d/10-mkosi-initrd-dm.rules", "mkosi/resources/mkosi-initrd/mkosi.profiles/raid/mkosi.extra/usr/lib/udev/rules.d/70-mkosi-initrd-md.rules", ] precedence = "aggregate" SPDX-FileCopyrightText = "Mkosi Contributors" SPDX-License-Identifier = "GPL-2.0-only" [[annotations]] path = [ "mkosi/backport.py", ] precedence = "aggregate" SPDX-FileCopyrightText = "Mkosi Contributors" SPDX-License-Identifier = "PSF-2.0" [[annotations]] path = [ "docs/fonts/heebo-bold.woff", "docs/fonts/heebo-regular.woff", ] precedence = "aggregate" SPDX-FileCopyrightText = "Mkosi Contributors" SPDX-License-Identifier = "OFL-1.1" mkosi-26/action.yaml000066400000000000000000000076611512054777600146160ustar00rootroot00000000000000name: setup-mkosi description: Install mkosi runs: using: composite steps: - name: Permit unprivileged access to kvm, vhost-vsock and vhost-net devices shell: bash run: | sudo mkdir -p /etc/tmpfiles.d sudo cp /usr/lib/tmpfiles.d/static-nodes-permissions.conf /etc/tmpfiles.d/ sudo sed -i '/kvm/s/0660/0666/g' /etc/tmpfiles.d/static-nodes-permissions.conf sudo sed -i '/vhost/s/0660/0666/g' /etc/tmpfiles.d/static-nodes-permissions.conf sudo tee /etc/udev/rules.d/99-kvm4all.rules <<- EOF KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm" KERNEL=="vhost-vsock", GROUP="kvm", MODE="0666", OPTIONS+="static_node=vhost-vsock" KERNEL=="vhost-net", GROUP="kvm", MODE="0666", OPTIONS+="static_node=vhost-net" EOF sudo udevadm control --reload-rules # kvm/vhost might not be available (e.g.: s390x, ppc64le) sudo modprobe kvm || true sudo modprobe vhost_vsock || true sudo modprobe vhost_net || true [[ -e /dev/kvm ]] && sudo udevadm trigger --name-match=kvm [[ -e /dev/vhost-vsock ]] && sudo udevadm trigger --name-match=vhost-vsock [[ -e /dev/vhost-net ]] && sudo udevadm trigger --name-match=vhost-net [[ -e /dev/kvm ]] && sudo chmod 666 /dev/kvm [[ -e /dev/vhost-vsock ]] && sudo chmod 666 /dev/vhost-vsock [[ -e /dev/vhost-net ]] && sudo chmod 666 /dev/vhost-net lsmod [[ -e /dev/kvm ]] && ls -l /dev/kvm [[ -e /dev/vhost-vsock ]] && ls -l /dev/vhost-vsock [[ -e /dev/vhost-net ]] && ls -l /dev/vhost-net id - name: Check clock source shell: bash run: cat /sys/devices/system/clocksource/clocksource0/current_clocksource - name: Show environment shell: bash run: env - name: Show CPU shell: bash run: lscpu - name: Show memory shell: bash run: lsmem - name: Enable unprivileged user namespaces shell: bash run: | sudo sysctl --ignore --write kernel.apparmor_restrict_unprivileged_unconfined=0 sudo sysctl --ignore --write kernel.apparmor_restrict_unprivileged_userns=0 - name: Create missing mountpoints shell: bash run: | sudo mkdir -p /var/lib/ca-certificates # Both the unix-chkpwd and swtpm profiles are broken (https://gitlab.com/apparmor/apparmor/-/issues/402) so let's # just disable and remove apparmor completely. It's not relevant in this context anyway. # TODO: Remove if https://github.com/actions/runner-images/issues/10015 is ever fixed. - name: Disable and mask apparmor service shell: bash run: | # This command fails with a non-zero error code even though it unloads the apparmor profiles. # https://gitlab.com/apparmor/apparmor/-/issues/403 sudo aa-teardown || true sudo apt-get remove apparmor - name: Ensure git history is available shell: bash run: | if [[ ! -e "$X_GITHUB_ACTION_PATH/.git" ]]; then rm -rf "$X_GITHUB_ACTION_PATH" git clone "https://github.com/$X_GITHUB_ACTION_REPOSITORY" "$X_GITHUB_ACTION_PATH" git -C "$X_GITHUB_ACTION_PATH" checkout "$X_GITHUB_ACTION_REF" fi # https://github.com/actions/runner/issues/2473 env: X_GITHUB_ACTION_REPOSITORY: ${{ github.action_repository }} X_GITHUB_ACTION_PATH: ${{ github.action_path }} X_GITHUB_ACTION_REF: ${{ github.action_ref }} - name: Install shell: bash run: sudo ln -svf $X_GITHUB_ACTION_PATH/bin/mkosi /usr/bin/mkosi # https://github.com/actions/runner/issues/2473 env: X_GITHUB_ACTION_PATH: ${{ github.action_path }} - name: Dependencies shell: bash run: | sudo apt-get install --assume-yes --no-install-recommends \ debian-archive-keyring \ dnf \ makepkg \ pacman-package-manager \ zypper mkosi-26/bin/000077500000000000000000000000001512054777600132135ustar00rootroot00000000000000mkosi-26/bin/mkosi000077500000000000000000000013531512054777600142650ustar00rootroot00000000000000#!/usr/bin/env bash # SPDX-License-Identifier: LGPL-2.1-or-later set -e PYTHONPATH="$(dirname "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")")" export PYTHONPATH command="$(basename "${BASH_SOURCE[0]//-/.}")" if [ -z "$MKOSI_INTERPRETER" ]; then # Note the check seems to be inverted here because the if branch is # executed when the exit status is 0 which is equal to False in Python. if python3 -c "import sys; sys.exit(sys.version_info < (3, 9))"; then MKOSI_INTERPRETER=python3 elif command -v python3.9 >/dev/null; then MKOSI_INTERPRETER=python3.9 else echo "mkosi needs python 3.9 or newer (found $(python3 --version))" exit 1 fi fi exec "$MKOSI_INTERPRETER" -B -m "$command" "$@" mkosi-26/bin/mkosi-addon000077700000000000000000000000001512054777600164032mkosiustar00rootroot00000000000000mkosi-26/bin/mkosi-initrd000077700000000000000000000000001512054777600166072mkosiustar00rootroot00000000000000mkosi-26/bin/mkosi-sandbox000077700000000000000000000000001512054777600167542mkosiustar00rootroot00000000000000mkosi-26/blog/000077500000000000000000000000001512054777600133665ustar00rootroot00000000000000mkosi-26/blog/content/000077500000000000000000000000001512054777600150405ustar00rootroot00000000000000mkosi-26/blog/content/a-reintroduction-to-mkosi.md000066400000000000000000000366451512054777600224260ustar00rootroot00000000000000Title: A re-introduction to mkosi -- A Tool for Generating OS Images Date: 2024-01-10 > This is a guest post written by Daan De Meyer, systemd and mkosi > maintainer Almost 7 years ago, Lennart first [wrote](https://0pointer.net/blog/mkosi-a-tool-for-generating-os-images.html) about `mkosi` on this blog. Some years ago, I took over development and there's been a huge amount of changes and improvements since then. So I figure this is a good time to re-introduce `mkosi`. [`mkosi`](https://github.com/systemd/mkosi) stands for *Make Operating System Image*. It generates OS images that can be used for a variety of purposes. If you prefer watching a video over reading a blog post, you can also watch my [presentation](https://www.youtube.com/watch?v=6EelcbjbUa8) on `mkosi` at All Systems Go 2023. ## What is mkosi? `mkosi` was originally written as a tool to simplify hacking on systemd and for experimenting with images using many of the new concepts being introduced in systemd at the time. In the meantime, it has evolved into a general purpose image builder that can be used in a multitude of scenarios. Instructions to install `mkosi` can be found in its [readme](https://github.com/systemd/mkosi/blob/main/README.md). We recommend running the latest version to take advantage of all the latest features and bug fixes. You'll also need `bubblewrap` and the package manager of your favorite distribution to get started. At its core, the workflow of `mkosi` can be divided into 3 steps: 1. Generate an OS tree for some distribution by installing a set of packages. 2. Package up that OS tree in a variety of output formats. 3. (Optionally) Boot the resulting image in `qemu` or `systemd-nspawn`. Images can be built for any of the following distributions: - Fedora Linux - Ubuntu - OpenSUSE - Debian - Arch Linux - CentOS Stream - RHEL - Rocky Linux - Alma Linux And the following output formats are supported: - GPT disk images built with `systemd-repart` - Tar archives - CPIO archives (for building initramfs images) - USIs (Unified System Images which are full OS images packed in a UKI) - Sysext, confext and portable images - Directory trees For example, to build an Arch Linux GPT disk image and boot it in `qemu`, you can run the following command: ```sh $ mkosi -d arch -p systemd -p udev -p linux -t disk qemu ``` To instead boot the image in systemd-nspawn, replace `qemu` with `boot`: ```sh $ mkosi -d arch -p systemd -p udev -p linux -t disk boot ``` The actual image can be found in the current working directory named `image.raw`. However, using a separate output directory is recommended which is as simple as running `mkdir mkosi.output`. To rebuild the image after it's already been built once, add `-f` to the command line before the verb to rebuild the image. Any arguments passed after the verb are forwarded to either `systemd-nspawn` or `qemu` itself. To build the image without booting it, pass `build` instead of `boot` or `qemu` or don't pass a verb at all. By default, the disk image will have an appropriately sized root partition and an ESP partition, but the partition layout and contents can be fully customized using `systemd-repart` by creating partition definition files in `mkosi.repart/`. This allows you to customize the partition as you see fit: - The root partition can be encrypted. - Partition sizes can be customized. - Partitions can be protected with signed dm-verity. - You can opt out of having a root partition and only have a /usr partition instead. - You can add various other partitions, e.g. an XBOOTLDR partition or a swap partition. - ... As part of building the image, we'll run various tools such as `systemd-sysusers`, `systemd-firstboot`, `depmod`, `systemd-hwdb` and more to make sure the image is set up correctly. ## Configuring mkosi image builds Naturally with extended use you don't want to specify all settings on the command line every time, so `mkosi` supports configuration files where the same settings that can be specified on the command line can be written down. For example, the command we used above can be written down in a configuration file `mkosi.conf`: ```ini [Distribution] Distribution=arch [Output] Format=disk [Content] Packages= systemd udev linux ``` Like systemd, `mkosi` uses INI configuration files. We also support dropins which can be placed in `mkosi.conf.d`. Configuration files can also be conditionalized using the `[Match]` section. For example, to only install a specific package on Arch Linux, you can write the following to `mkosi.conf.d/10-arch.conf`: ```ini [Match] Distribution=arch [Content] Packages=pacman ``` Because not everything you need will be supported in `mkosi`, we support running scripts at various points during the image build process where all extra image customization can be done. For example, if it is found, `mkosi.postinst` is called after packages have been installed. Scripts are executed on the host system by default (in a sandbox), but can be executed inside the image by suffixing the script with `.chroot`, so if `mkosi.postinst.chroot` is found it will be executed inside the image. To add extra files to the image, you can place them in `mkosi.extra` in the source directory and they will be automatically copied into the image after packages have been installed. ## Bootable images If the necessary packages are installed, `mkosi` will automatically generate a UEFI/BIOS bootable image. As `mkosi` is a systemd project, it will always build [UKIs](https://uapi-group.org/specifications/specs/unified_kernel_image/) (Unified Kernel Images), except if the image is BIOS-only (since UKIs cannot be used on BIOS). The initramfs is built like a regular image by installing distribution packages and packaging them up in a CPIO archive instead of a disk image. Specifically, we do not use `dracut`, `mkinitcpio` or `initramfs-tools` to generate the initramfs from the host system. `ukify` is used to assemble all the individual components into a UKI. If you don't want `mkosi` to generate a bootable image, you can set `Bootable=no` to explicitly disable this logic. ## Using mkosi for development The main requirements to use `mkosi` for development is that we can build our source code against the image we're building and install it into the image we're building. `mkosi` supports this via build scripts. If a script named `mkosi.build` (or `mkosi.build.chroot`) is found, we'll execute it as part of the build. Any files put by the build script into `$DESTDIR` will be installed into the image. Required build dependencies can be installed using the `BuildPackages=` setting. These packages are installed into an overlay which is put on top of the image when running the build script so the build packages are available when running the build script but don't end up in the final image. An example `mkosi.build.chroot` script for a project using `meson` could look as follows: ```sh #!/bin/sh meson setup "$BUILDDIR" "$SRCDIR" ninja -C "$BUILDDIR" if ((WITH_TESTS)); then meson test -C "$BUILDDIR" fi meson install -C "$BUILDDIR" ``` Now, every time the image is built, the build script will be executed and the results will be installed into the image. The `$BUILDDIR` environment variable points to a directory that can be used as the build directory for build artifacts to allow for incremental builds if the build system supports it. Of course, downloading all packages from scratch every time and re-installing them again every time the image is built is rather slow, so `mkosi` supports two modes of caching to speed things up. The first caching mode caches all downloaded packages so they don't have to be downloaded again on subsequent builds. Enabling this is as simple as running `mkdir mkosi.cache`. The second mode of caching caches the image after all packages have been installed but before running the build script. On subsequent builds, `mkosi` will copy the cache instead of reinstalling all packages from scratch. This mode can be enabled using the `Incremental=` setting. While there is some rudimentary cache invalidation, the cache can also forcibly be rebuilt by specifying `-ff` on the command line instead of `-f`. Note that when running on a btrfs filesystem, `mkosi` will automatically use subvolumes for the cached images which can be snapshotted on subsequent builds for even faster rebuilds. We'll also use reflinks to do copy-on-write copies where possible. With this setup, by running `mkosi -f qemu` in the systemd repository, it takes about 40 seconds to go from a source code change to a root shell in a virtual machine running the latest systemd with your change applied. This makes it very easy to test changes to systemd in a safe environment without risk of breaking your host system. Of course, while 40 seconds is not a very long time, it's still more than we'd like, especially if all we're doing is modifying the kernel command line. That's why we have the `KernelCommandLineExtra=` option to configure kernel command line options that are passed to the container or virtual machine at runtime instead of being embedded into the image. These extra kernel command line options are picked up when the image is booted with qemu's direct kernel boot (using `-append`), but also when booting a disk image in UEFI mode (using SMBIOS). The same applies to systemd credentials (using the `Credentials=` setting). These settings allow configuring the image without having to rebuild it, which means that you only have to run `mkosi qemu` or `mkosi boot` again afterwards to apply the new settings. ## Building images without root privileges and loop devices By using `newuidmap`/`newgidmap` and `systemd-repart`, `mkosi` is able to build images without needing root privileges. As long as proper subuid and subgid mappings are set up for your user in `/etc/subuid` and `/etc/subgid`, you can run `mkosi` as your regular user without having to switch to `root`. Note that as of the writing of this blog post this only applies to the `build` and `qemu` verbs. Booting the image in a `systemd-nspawn` container with `mkosi boot` still needs root privileges. We're hoping to fix this in an future systemd release. Regardless of whether you're running `mkosi` with root or without root, almost every tool we execute is invoked in a sandbox to isolate as much of the build process from the host as possible. For example, `/etc` and `/var` from the host are not available in this sandbox, to avoid host configuration inadvertently affecting the build. Because `systemd-repart` can build disk images without loop devices, `mkosi` can run from almost any environment, including containers. All that's needed is a UID range with 65536 UIDs available, either via running as the root user or via `/etc/subuid` and `newuidmap`. In a future systemd release, we're hoping to provide an alternative to `newuidmap` and `/etc/subuid` to allow running `mkosi` from all containers, even those with only a single UID available. ## Supporting older distributions mkosi depends on very recent versions of various systemd tools (v254 or newer). To support older distributions, we implemented so called tools trees. In short, `mkosi` can first build a tools image for you that contains all required tools to build the actual image. This can be enabled by adding `ToolsTree=yes` to your mkosi configuration. Building a tools image does not require a recent version of systemd. In the systemd mkosi configuration, we automatically use a tools tree if we detect your distribution does not have the minimum required systemd version installed. ## Configuring variants of the same image using profiles Profiles can be defined in the `mkosi.profiles/` directory. The profile to use can be selected using the `Profile=` setting (or `--profile=`) on the command line. A profile allows you to bundle various settings behind a single recognizable name. Profiles can also be matched on if you want to apply some settings only to a few profiles. For example, you could have a `bootable` profile that sets `Bootable=yes`, adds the `linux` and `systemd-boot` packages and configures `Format=disk` to end up with a bootable disk image when passing `--profile bootable` on the kernel command line. ## Building system extension images [System extension](https://uapi-group.org/specifications/specs/extension_image/) images may – dynamically at runtime — extend the base system with an overlay containing additional files. To build system extensions with `mkosi`, we need a base image on top of which we can build our extension. To keep things manageable, we'll make use of `mkosi`'s support for building multiple images so that we can build our base image and system extension in one go. We start by creating a temporary directory with a base configuration file `mkosi.conf` with some shared settings: ```ini [Output] OutputDirectory=mkosi.output CacheDirectory=mkosi.cache ``` Now let's continue with the base image definition by writing the following to `mkosi.images/base/mkosi.conf`: ```ini [Output] Format=directory [Content] CleanPackageMetadata=no Packages=systemd udev ``` We use the `directory` output format here instead of the `disk` output so that we can build our extension without needing root privileges. Now that we have our base image, we can define a sysext that builds on top of it by writing the following to `mkosi.images/btrfs/mkosi.conf`: ```ini [Config] Dependencies=base [Output] Format=sysext Overlay=yes [Content] BaseTrees=%O/base Packages=btrfs-progs ``` `BaseTrees=` point to our base image and `Overlay=yes` instructs mkosi to only package the files added on top of the base tree. We can't sign the extension image without a key. We can generate one by running `mkosi genkey` which will generate files that are automatically picked up when building the image. Finally, you can build the base image and the extensions by running `mkosi -f`. You'll find `btrfs.raw` in `mkosi.output` which is the extension image. ## Various other interesting features - To sign any generated UKIs for secure boot, put your secure boot key and certificate in `mkosi.key` and `mkosi.crt` and enable the `SecureBoot=` setting. You can also run `mkosi genkey` to have `mkosi` generate a key and certificate itself. - The `Ephemeral=` setting can be enabled to boot the image in an ephemeral copy that is thrown away when the container or virtual machine exits. - `ShimBootloader=` and `BiosBootloader=` settings are available to configure shim and grub installation if needed. - `mkosi` can boot directory trees in a virtual using `virtiofsd`. This is very useful for quickly rebuilding an image and booting it as the image does not have to be packed up as a disk image. - ... There's many more features that we won't go over in detail here in this blog post. Learn more about those by reading the [documentation](https://github.com/systemd/mkosi/blob/main/mkosi/resources/man/mkosi.1.md). ## Conclusion I'll finish with a bunch of links to more information about `mkosi` and related tooling: - [Github repository](https://github.com/systemd/mkosi) - [Building RHEL and RHEL UBI images with mkosi](https://fedoramagazine.org/create-images-directly-from-rhel-and-rhel-ubi-package-using-mkosi/) - [My presentation on systemd-repart at ASG 2023](https://media.ccc.de/v/all-systems-go-2023-191-systemd-repart-building-discoverable-disk-images) - [mkosi's Matrix channel](https://matrix.to/#/#mkosi:matrix.org). - [systemd's mkosi configuration](https://raw.githubusercontent.com/systemd/systemd/main/mkosi.conf) - [mkosi's mkosi configuration](https://github.com/systemd/systemd/tree/main/mkosi.conf.d) mkosi-26/docs/000077500000000000000000000000001512054777600133735ustar00rootroot00000000000000mkosi-26/docs/CNAME000066400000000000000000000000201512054777600141310ustar00rootroot00000000000000mkosi.systemd.iomkosi-26/docs/CODING_STYLE.md000066400000000000000000000025301512054777600156400ustar00rootroot00000000000000--- title: Coding Style category: Contributing layout: default SPDX-License-Identifier: LGPL-2.1-or-later --- # Coding Style ## Python Version - The lowest supported Python version is CPython 3.9. ## Formatting - Use the accompanying `.editorconfig` or `.dir-locals.el`. - For Python files we use the style from `ruff format` with a line length of 109 characters and four spaces indentation. - Indentation with tabs is not allowed. - When it improves readability, judicious use of `# noqa: E501` comments is allowed. - Long lists, including argument lists, should have a trailing comma to force ruff to split all elements on a line of their own. - List of commandline arguments should not split the argument of a commandline option and the option. This needs to be enforced with `# fmt: skip` comments, e.g. do ```python cmd = [ "--option", "foo", ] # fmt: skip ``` and do NOT do ```python cmd = [ "--option", "foo", ] ``` - When coercing Path-like objects to strings, use `os.fspath`, since this calls the `__fspath__` protocol instead of `__str__`. It also ensures more type-safety, since every Python object supports `__str__`, but not all support `__fspath__` and this gives the typechecker more information what is expected at this point. It also signals the intent to the reader more than a blanket `str()`. mkosi-26/docs/_data/000077500000000000000000000000001512054777600144435ustar00rootroot00000000000000mkosi-26/docs/_data/documentation_page.json000066400000000000000000000002761512054777600212100ustar00rootroot00000000000000[ { "category": "Documentation", "title": "A longer description and available features and options", "url": "https://github.com/systemd/mkosi/blob/main/mkosi/resources/man/mkosi.1.md" } ] mkosi-26/docs/_data/project_pages.json000066400000000000000000000007701512054777600201670ustar00rootroot00000000000000[ { "category": "Project", "title": "Brand", "url": "https://brand.systemd.io/" }, { "category": "Project", "title": "Releases", "url": "https://github.com/systemd/mkosi/releases" }, { "category": "Project", "title": "GitHub Project Page", "url": "https://github.com/systemd/mkosi" }, { "category": "Project", "title": "Issues", "url": "https://github.com/systemd/mkosi/issues" }, { "category": "Project", "title": "Pull Requests", "url": "https://github.com/systemd/mkosi/pulls" } ] mkosi-26/docs/_includes/000077500000000000000000000000001512054777600153405ustar00rootroot00000000000000mkosi-26/docs/_includes/footer.html000066400000000000000000000003211512054777600175200ustar00rootroot00000000000000 mkosi-26/docs/_includes/head.html000066400000000000000000000011441512054777600171270ustar00rootroot00000000000000 {% if page.title %}{{ page.title }}{% else %}{{ site.title }}{% endif %} mkosi-26/docs/_includes/header.html000066400000000000000000000004741512054777600174630ustar00rootroot00000000000000 mkosi-26/docs/_layouts/000077500000000000000000000000001512054777600152325ustar00rootroot00000000000000mkosi-26/docs/_layouts/default.html000066400000000000000000000004011512054777600175370ustar00rootroot00000000000000 {% include head.html %} {% include header.html %}
{{ content }}
{% include footer.html %} mkosi-26/docs/_layouts/forward.html000066400000000000000000000016041512054777600175650ustar00rootroot00000000000000 Redirecting to {{ page.target }} {% include header.html %}

This document has moved.
Redirecting to {{ page.target }}.

mkosi-26/docs/assets/000077500000000000000000000000001512054777600146755ustar00rootroot00000000000000mkosi-26/docs/assets/systemd-logo.svg000066400000000000000000000060601512054777600200460ustar00rootroot00000000000000 mkosi-26/docs/building-rpms-from-source.md000066400000000000000000000161031512054777600207310ustar00rootroot00000000000000--- title: Building RPMs from source with mkosi category: Tutorials layout: default SPDX-License-Identifier: LGPL-2.1-or-later --- # Building RPMs from source with mkosi If you want to build an RPM from source and install it within a mkosi image, you can do that with mkosi itself without using `mock`. The steps required are as follows: 1. Install `BuildRequires` dependencies in the build overlay 1. Install dynamic `BuildRequires` dependencies in the build overlay 1. Build the RPM with `rpmbuild` 1. Install the built rpms in the image In the following examples, we'll use mkosi itself and its Fedora RPM spec as an example. To keep things snappy, we execute the first 3 steps in a prepare script so that they're cached on subsequent runs of mkosi if the `Incremental=` setting is enabled. First, we need access to the upstream sources and the RPM spec and related files. These can be mounted into the current working directory when running mkosi scripts by using the `BuildSources=` setting. For example, in `mkosi.local.conf`, we could have the following settings: ```ini [Build] BuildSources=../mkosi:mkosi ../fedora/mkosi:mkosi/rpm BuildSourcesEphemeral=yes ``` Which instructs mkosi to mount the local version of the mkosi upstream repository at `../mkosi` to `mkosi` in the current working directory when running mkosi. The Fedora RPM spec is mounted at `mkosi/rpm`. We enable the `BuildSourcesEphemeral=` option as `rpmbuild` will write quite a few files to the source directory as part of building the rpm which we don't want to remain there after the build finishes. We use `rpmspec` and `rpmbuild`, but these do not really support running from outside of the image that the RPM is being built in, so we have to make sure they're available inside the image by adding the following to `mkosi.conf`: ```ini [Content] Packages=rpm-build # If you don't want rpm-build in the final image. RemovePackages=rpm-build ``` The prepare script `mkosi.prepare` then looks as follows: ```shell #!/bin/sh set -e if [ "$1" = "final" ]; then exit 0 fi mkosi-chroot \ env --chdir=mkosi \ rpmspec \ --query \ --buildrequires \ --define "_topdir /var/tmp" \ --define "_sourcedir $PWD/mkosi/rpm" \ rpm/mkosi.spec | sort --unique | tee /tmp/buildrequires | xargs --delimiter '\n' mkosi-install until mkosi-chroot \ env --chdir=mkosi \ rpmbuild \ -bd \ --noprep \ --build-in-place \ --define "_topdir /var/tmp" \ --define "_sourcedir $PWD/mkosi/rpm" \ --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" \ rpm/mkosi.spec do EXIT_STATUS=$? if [ $EXIT_STATUS -ne 11 ]; then exit $EXIT_STATUS fi mkosi-chroot \ rpm \ --query \ --package \ --requires \ /var/tmp/SRPMS/mkosi-*.buildreqs.nosrc.rpm | grep --invert-match '^rpmlib(' | sort --unique >/tmp/dynamic-buildrequires sort /tmp/buildrequires /tmp/dynamic-buildrequires | uniq --unique | tee --append /tmp/buildrequires | xargs --delimiter '\n' mkosi-install done ``` To install non-dynamic dependencies, we use `rpmspec`. What's important is to set `_sourcedir` to the directory containing the RPM sources for the RPM spec that we want to build. We run `rpmspec` inside the image to make sure all the RPM macros have their expected values and then run `mkosi-install` outside the image to install the required dependencies. `mkosi-install` will invoke the package manager that's being used to build the image to install the given packages. We always set `_topdir` to `/var/tmp` to avoid polluting the image with `rpmbuild` artifacts. After installing non-dynamic `BuildRequires` dependencies, we have to install the dynamic `BuildRequires` dependencies by running `rpmbuild -bd` until it succeeds or fails with an exit code that's not `11`. After each run of `rpmbuild -bd` that exits with exit code `11`, there will be an SRPM in the `SRPMS` subdirectory of the rpm working directory (`_topdir`) of which the `BuildRequires` dependencies have to be installed. We retrieve the list of `BuildRequires` dependencies with `rpm` this time (because we're operating on a package instead of a spec), remove all `rpmlib` style dependencies which can't be installed and store them in a temporary file after filtering duplicates. Because the `BuildRequires` dependencies from the SRPM will also contain the non-dynamic `BuildRequires` dependencies, we have to filter those out as well. Now we have an image and build overlay with all the necessary dependencies installed to be able to build the RPM. Next is the build script. We suffix the build script with `.chroot` so that mkosi runs it entirely inside the image. In the build script, we invoke `rpmbuild -bb --build-in-place` to have `rpmbuild` build the RPM in place from the upstream sources. Because `--build-in-place` configures `_builddir` to the current working directory, we change directory to the upstream sources before invoking `rpmbuild`. Again, `_sourcedir` has to point to the RPM spec sources. We also have to override `_rpmdir` to point to the mkosi package directory (stored in `$PACKAGEDIR`). The build script `mkosi.build.chroot` then looks as follows: ```shell #!/bin/sh set -e env --chdir=mkosi \ rpmbuild \ -bb \ --noprep \ --build-in-place \ $([ "$WITH_TESTS" = "0" ] && echo --nocheck) \ --define "_topdir /var/tmp" \ --define "_sourcedir $PWD/mkosi/rpm" \ --define "_rpmdir $PACKAGEDIR" \ ${BUILDDIR:+--define} \ ${BUILDDIR:+"_vpath_builddir $BUILDDIR"} \ --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" \ --define "_binary_payload w.ufdio" \ --define "debug_package %{nil}" \ --define "__brp_strip %{nil}" \ --define "__brp_compress %{nil}" \ --define "__brp_mangle_shebangs %{nil}" \ --define "__brp_strip_comment_note %{nil}" \ --define "__brp_strip_static_archive %{nil}" \ rpm/mkosi.spec ``` The `_vpath_builddir` directory will be used to store out-of-tree build artifacts for build systems that support out-of-tree builds (CMake, Meson) so we set it to mkosi's out-of-tree build directory in `$BUILDDIR` if one is provided. This will make subsequent RPM builds much faster as CMake or Meson will be able to do an incremental build. Setting `_binary_payload` to `w.ufdio` disables compression to speed up the build. We also disable debug package generation using `debug_package` and various rpm build root policy scripts to speed up the build. Note that the build root policy macros we use here are CentOS/Fedora specific. After the build script finishes, the produced rpms will be located in `$PACKAGEDIR`. Any packages put in this directory by the build script are added to a local package repository and become available for installation in a post-installation script or using the `VolatilePackages=` setting: ```shell #!/bin/sh set -e mkosi-install mkosi cp "$PACKAGEDIR"/*mkosi*.rpm "$OUTPUTDIR" ``` To run just the build script to build the rpms without actually building an image, you can use the `none` output format (`mkosi -t none`). mkosi-26/docs/distribution-policy.md000066400000000000000000000055371512054777600177430ustar00rootroot00000000000000--- title: Adding new distributions category: Tutorials layout: default SPDX-License-Identifier: LGPL-2.1-or-later --- # Adding new distributions Merging support for a new distribution in mkosi depends on a few factors. Not all of these are required but depending on how many of these requirements are satisfied, the chances of us merging support for your distribution will improve: 1. Is the distribution somewhat popular? mkosi's goal is not to support every distribution under the sun, the distribution should have a substantial amount of users. 2. Does the distribution differentiate itself somehow from the distributions that are already supported? We're generally not interested in supporting distributions that only consist of minimal configuration changes to another distribution. 3. Is there a long-term maintainer for the distribution in mkosi? When proposing support for a new distribution, we expect you to be the maintainer for the distribution and to respond when pinged for support on distribution specific issues. 4. Does the distribution use a custom package manager or one of the already supported ones (apt, dnf, pacman, zypper)? Supporting new package managers in mkosi is generally a lot of work. We can support new ones if needed for a new distribution, but we will insist on the package manager having a somewhat sane design, with official support for building in a chroot and running unprivileged in a user namespace being the bare minimum features we expect from any new package manager. We will only consider new distributions that satisfy all or most of these requirements. However, you can still use mkosi with the distribution by setting the `Distribution` setting to `custom` and implementing either providing the rootfs via a skeleton tree or base tree, or by providing the rootfs via a prepare script. # Implementing new distributions To actually implement a new distribution, the following checklist can be used: - Add the distribution to the `Distribution` enum - Add the implementation of the distribution in `mkosi/distribution`. If the distribution is a variant of an existing distribution, inherit from the existing distribution's installer class and only override the necessary methods. - Update any relevant methods on the `Distribution` enum to take the new distribution into account. - Update the documentation in `mkosi/resources/man/mkosi.1.md` - Update the default initrd, tools and default image configurations in `mkosi/resources/mkosi-initrd`, `mkosi/resources/mkosi-tools` and `mkosi.conf.d` respectively. If the distribution is a variant of another existing distribution, update the `[Match]` blocks for the existing distribution to also match against the new distribution. To test whether all necessary changes were made, you can run `mkosi -d --tools-tree -t disk -f vm`. mkosi-26/docs/favicon.png000066400000000000000000000006121512054777600155250ustar00rootroot00000000000000PNG  IHDRasBIT|d pHYs+tEXtSoftwarewww.inkscape.org<IDAT8œJP{ӤRbM@r!4W| ;Q|pq*NN5v"h]8ŧs#0!ՒI-i[aYq\QˀK +-` jT ua|4F8I{q1_[60ΎGz?H60YlW0٭PaB>)=4])+(虭+v(̂H0EAuBQGw{ hAJ9IENDB`mkosi-26/docs/fonts/000077500000000000000000000000001512054777600145245ustar00rootroot00000000000000mkosi-26/docs/fonts/heebo-bold.woff000066400000000000000000001235341512054777600174170ustar00rootroot00000000000000wOFF\8DSIGTGDEF>@GPOS w675GSUBH OS/2R`ыcmap|5"ηcvt bgfpgm k ^lgaspglyf|vƠhead66 2hhea!$ ?hmtxd>\{loca$_xmaxp jnameBE;ZOpostq prep d-f.n_<!gb2`bxc`d`H_ @dxW JVh ^xc`fejra&f:Ðtg`fF/;܏AA?GҟE@$8Ez @J 3 xڍ}LUe}9C ) + xJD^R 9,^Ǜ%3b -ȶfZmҭ5VQ{sōͻ}=y`y0L}:f6xt#ׂ*=U쩐ϥz5v2{:0.֋J.Vߑa\g|4Ck):Recl/I>K1/ [$y1s,[S? FN@Tzѐs)B﷙3ۉ>]7Z(qk\]q:*Os`{DbXF uC4e`"pc2B8_'H/X%sK1쯥5-j|{# PV ?dҸxi`G11by]Jvc|5R, :0CE4ضq /Ҍ! g,{@p.C.nxu-S%ㆱB (4rކ @Ąq 毌Odj430lFb>lN&$XXS?kϺuӬ3F%k:y=T8咏2jiUӣjJ#oZN=Ja8 d7M~Gm㈽ٜ7w2%JdJm6P!)ujmJ>> K>f_h$ H#Kݏo< :AZѮAy;QÇVUVXrýg?vPf=Ws9oA3(֖V =̗=ƹY}{YNDp€sDqOSˆYqtVexWY贒P k̠ `CG`q7:~$k^*rMى4ZVQ/% sWZ$;!x 9JI"ȍJC1/k|!ox͓[PUU9c(f(t2Rr K/e7+SQAQ,-9Y4%!]&ǦQzM?[t饇Z{־AN'ؕh1 jLDWKFc#w}p?ӹ~#||_:[@A,E`z0;(O&-3bqKw.0rO,2|s„)aZ:QM~:#ga~¶.v:o` [SCS!88ոTCU oB2?1"|c}%Sr!Lxɣ :,'gueNs,itu:gNw|Fnܩ/F,s/Gӛ'{7biTo]< OK?ce,,?Zˊ)SOCX,6L*q4jYW"(.@ME*d^jI::杉j>92/9Jp;Ubj6=Ls'XɬG'uyţBNdQeo>byݮZEG`q&!hQf.+)+myz+MyS~-)1nddVڏެq(MR:vVqgKݨ,`tTPt酠Lk2-~K87N'/k:Q(Vn44_ha/8 ^뎙iET s,3@/ cЫzñs!0<1Ϡטgu::ĬAœU0h2A9 ֌]> c7]2v1zcw]3v1z1>KfD?Gt3&疅pl>;S32;>w|n!o L JOпZﰳCB&`!~\?X_CB&n!ݹL/Qe7nw_go /xcp"(b##c_Ɲ  ؝2000h\,,,PO&[&]6f^A>NN^(03lTa9eš#9$$\Y,YT9YYxv0oһe3k  z'xc` 0Z1Z1|a²_GҟE,>3|f 7@7A|K@a%vAg4o (E2DxH<R P|>j@t  > l > j 4 v L & J z F|.`0rjHn@Z(xP8tBhbDnXd, r !4!|!""`"""#*#Z##$|$$%f%&&&'Z'(())*8**+*+r++,<,t,--h-.<.z../B//00<0001$1b112242V2223@3|334(44545v566`667,7h7788@8v889989: :;*;J;n;;;<$<<<<==D===>>@>h>>?$??@ @|@@AA:AABBRBBBC(CdCCDDDDtDDEEREzEEF$FFFG"GHGlGGHHHHHII\IIIJHJlJJKK^KKKKLRRSSSSSSSST T T8T8TJThTTTTU*UXUvUUUV V2VbVVVWW0W^WWXXhXXYNYnYZ2ZbZZZ[.[N[[\(\\]F]]^^&^B^b^^__:_d_____` `B`f````aa,aJahaaab6bbbbcBcd dtde.e|eeeff,fPfggo^oop.pqBqr rJrrssRs|stt*tHtrtuq{\QDQ1Mzdd4M91a 2'Z͓1/p|%X,h** kCTp0pa!0N\d>EQ(UlMVDa"S Z\1:lHi>C-j>͚`f%?{ba =v6B7#XRR*o>//%f=ޭkΝ\ڵmS:uܔgvˬ̌T[ [dG=mp9pA aBiSU1ج5Fj4ZsXKU[^֙4f-6:s*-Jss\a`~CÇ ^>|.!S%x;J>q>pbfђ(0؃~H-I:}p!nhP!!!BE*R],pqBc{ u PE;ue0^@yL>koGNzietJ&Cm]ظ*@"+ hBCB5`@DVYS}:aQIsRT(Ԝȉ5@C6<j A !9.8]]$T^Ĕ*u1bc)!$KbHLHQ),=%^!ʼnW>G❨[T)UAXj8:L\rj iDrwXgnTH!UHM9OIvح qQ&6X @u``> X(Blc` RB5(TaPo!Qou[j XvK YOb+m{C=d/~.Gk&r?porDsV/@Hx C`&ä rB6<20TTa4f3Dhy:rs:xN,3qs8W5;o7ɭܛX{=-Y}_#+aX_3g+qsb5^p^Dep}K*:)ll!dNJZF!"7 76C/7mźh](9^cW,Sx׋>(ywxj^ï=jE 7pB0Ľ k`.ӑ9B  UXxNW!B(\aE=Kizaަ] 2PE ѩA4l@J(TF\(VG~RR34 TwNW,pyʌ\aޢ0$ sRۧ-¨8TcT@ߢdBX*`*G0S1BȨׅ5Aʘ` V*CN ce"+pk+qZ\zɵa>D^M! !ckQw刘S`e e".&T4_M0>|MqڲŃ7ċ[7FV@)vTV(o1d` "/T2[hB`JU[7+ s \k9[%od 86q m \@ V**>, 0]8R4,-YGQ /tb JQj "@L&PA0hc!i`uJSt(6KX\X+X9Pw|5̽p%Qie] G$Nih_fL'?ǵsJ¬/܇ak X,f.8^sjCNL@5-:EI( LD~™ Vt[N/?c="hl;n_/Ͼ˗bZAo\9Jl D}Z-yzr:#?9s[a?j`(P(Dft+Y 73_v|[=xR.0).x$1[T` :*D>$;~g\Ws P+8q5 (TAja@+ aI!UJ0EATH !)E]H=pko-[qv(g:-l3b:7eGxU Xii-z*k$%嚩xj賵mX.6?}׃_ cP! J ;ȀJ+1[q1&>,kF,Nt1L,>-ĞD\ 2i\ѹ|q䕝oپsUO.ww}ip}ʏ?^( )@6#i2$J: ffWRE^W++FT*j![XZP*9`4} ]|Rŋt:D* Ԭ,9MVA^9ӜBA3<޵ J;>ZKl#V*GEpdD!X >KJŀ$`20Pp+A:ٮK%1_,5Jd’sII9=KO/ڎc{rU5TL1[scldpa!?1R׍G`!&M}f_u>q@;9+>>6a{X?\MN@mclI %"@הfֆ4 czYCZAJJ;,4TB a* KSy=QpOޝ`-Hnp#!vs]U% ĸ`Ifo`,/ZiWF-nϒ<0xv6hA׽wsy{nÃ8:ö$z oEhT!ŕBݓG :6H954:/ n-[z !,vXXS]]ӥ'P`C%5'HQ%V#,* 2\EKxXt$T snTWx#rg|4`bv}$eoM'_y=uY6d(ť?)fAwW&s@$4)Fj^{l%~P!&:d5RE1"s9&hZ5Úa ƷVxY%0O4/II}w^YKX*9jDQ j{0fZ,Ϗ&p_Kt p]_p܃/K+X\ R^{ѐt:Y`~ "i<J!Pp[D].-c&C+#4c &6 pʅs1xa:wUJw!z#Нl|P&D 9׎ axLm{ \)34ˬ L#8bXN#;n9>rvg6SidxT)0/4WOwSlY)xExMVsGӧunfa Ơ3F0 s*+Utt%ڜa4_b0`2rs$͒`-HRZpĚLN8W?re'ĸm}~KL:s-H0>S>$eQ7O 4>@`!H/ڽ8t@9Ǻêw)m0ş1[mrtly(6mg\{SU͊p:q|?;J-,̏rQwkGrb;/=!> COmwCS_''tycf]7j$9Ns}#קw=&>p.da\VD.>O^;8Nz(YN_}ߘG>5}Lo\֊Gq9^G)x}ުwgZ" Ղo\p4bEES/O\ D_?ȑSp7R}N"ϾrjҖ~pKN<ɈwzwWer<,p)r~~48PiSкa=.j' DDj'R vzĥ?~nBJ.Bɻ4 afhiMIܲ$$>,b穃9FԨ@5FIG&!w}#"љ@EnNvˌx;ݥ,QPpY x+K4]xTw/7CiԚPTbo4νf}{JK{t.mT}#z Бk"juPY rRA .Cv?6߽ڀXgn]\j,afn;S^"2tt&#݆uX* kvϮIərJ`$?_//{*??gQUYiB3Vsq39ǭ&3+ս(RQ<H!K_F5( R[bvt<&,ɡtDMJ>WOX_X[Y4_뗏Ϟbo(>+2povۨla9ZZC?]g?y֌noZ.?=S_ukЖ2JG72J㜮,%0\pOʱxQ2  3f]>M?jzN!r Ë񋕳۴7`ڟ7%&{=cUH{ ?v6tCvuuRR.ƨou͍GذBTs R1h4ErݒbMԨ쩾w5)U* d+遇jbiٶe\K%!Vh) ͿGEFF4z )8Y) |8X})*d((D%-GVT5D f*Vh5`Eh%IV͠'wyt^Ͼ){t~n7?*1G-Ndd?%5dgNb8}l$El+-lkC)pMt$>y)T%) ${wAeLH w65+ hi^oZ,i/]5KL]P} -T*{' n!緟;9o[یZ3R|zS+|2"@wPKWUҰli hmW_ii:f{Qݷ;yN8xn.x4XϫQEg;uf (4W *[e4\(5H{#jl 6BwfJKK{nřCtBg|"mOMUr35U58Amd5U5h\Saq|(, NE[S4L2V\T H:AީJK<|ʡ9]UWW[˷_S?J sl#): `KiVθ(8[4fAZ{p#G֌vhYieg/^#L9r^1Y]V{NWJs6|<V#^9+gflΤӥb!$4lL>eJR,l8WF3Ur EG=LBvVo3=,SfSM̜W#䢭(LG1m+69v=;ԥ0M2ܝ=եw3ӭ v`bFW%)@wi1惰*=0/"Mо~lpJ\!RtZ}cß8L.fH ÷rW(nP{/X /)^r]! ^_h`?L[qTQul/ jz3z jHU,`[ qQ&lz;wA}CO`=uf:%3zÜ~kJ8c̔c_o0~]1>*'W/4FY$RKi0 jM`ڻHetSB! LnZ`ߓ`4 =Z1=0;՗EP&A(4B03?@Ā|L |_/9 vLj=Ffb1`oqc'O"j˥?n5>vû'{q(xoOV__lߚ#Q3oN \i{`65Qk~ȆQj`,?%٣7 v{A;f;rDtMi̊swQkԥ`I}kήC6_X׶b)+KKْ^^إk/ƀ-}\m\o 6rHБ84%m84'gCEA(cRIn/r6Jm9IJFybnyUMVvM?0~ďwvo?eKuruh xPlV2wTP;] 0\2g2[^QϘ'W3 8e~sg~ ,9C_ |Fr|Gk7 -;9z4 uf/*Q%JuS:eSV⣣Xg\skp04ALa/ {Kڣ2A8Ā>&+A*] $Tc%({jr…[DŪ*ErY5U(WYܡ,IjB]J3Zhy XYK׼3\[ԩ_ɄEMz{q?~`!s#Kvس}Cds27Mt8BLƱbT%+We8&H_k:/ǒf Bh OIYY$E[1眞׺G76ޢ}j^R򻶌ܩOo|`vN[S~rh|O֌c;/Nݱ9Mڧ0Υ)XfB!{e7m*33GV V@7ҩNY] ɻgjzv= m!jF9 f^i•P)f/A}Si@*Pq} oHRL )'sa|*"6*I)Co8_Uun3c6ݷ_7nʢV T }WkqsݺҾO^wm:nyo/ܹ^,|ŏx﨑Vl~ǢmV\z_qw{W*}ghۯEFy:?K% \z~Ҥ3gxly;f[67Wn}ϕ\e\ R kv+ԅ?-%NWnL|}Qe)o]_Ͳ>^&xƥUO;ʸ5h[ͩ:Sm]]gO+3rgnLm7n mO8xk1-ɱƑ[7LЖO;+yrqȚ9zkzh}+=CˆKcg8MmnoXtmg=G|`J@^EHRSVJD4N|軷SZ*׊Eh ly٪Ζ5qg%Z>\:l s50?]wq~FALH(a~s@w+8LI3r{XH(O+kQ H j,'*;84sUqdFdzJp|0h9;1IȩIRRTIIE*[VewK Q+G0X%-.EAAyˁTAJ lePUsWӳҳ2c@z0&ʎ&ti%dEڬb{ ;OW~L5hT%\ X%5H~w+tOC:&ײ0ŮDExfgA{F&(lO)rr&L+ | 8F-6KBdBCT t2"4%gJ1\٦1ǏRޅ8RxmlA$X" !bYox )e㊥1\2aD[ T݄bPiVZjԨIVҳ^}_9RK\Ș"*+$ 8.6IȋD^ \}pDXװW$02`HYC%7*owvmXº!&K1z`'JZߊ>PkKY7{j[L1pDV>Z4ΒJ)?nө])m$۶ga Wj4}f:^RgRl/-'~^& SNɨ8n"Djz k&^0jρ6u)*E^Yhe2;Ұ ʯ$߯p?믯z|=OST<3WrK0C'=u_ 5f b{B>Sȓe_e{s.#u? hk؊9yºSwգ:ݏKZxʯ_vunӻzz商xo^(j7_pc! %ݕº1 IJn| A+U:]͞G5]ce+_ř\6?\]-(pl;-"TuRe1.;k)֘Mn]Wh"{csxX+RcL TS%,A#)(j^[dv O=O~Oi"AӐj5lȫ4P!ޚsS+ܨj k)ȣYo JF_kM. F|utd=]Ђƹ4qX-L;?ū='};)*qi:Y\.;/Yz?WuQS,[1-kYbSviĎXW,U^^}^,.J_di.?.ť} z3%xI4~E{{CKWcoĥ s_9JΥ!cV/QOO%.Csut~~~.= nT.SywuOZ&4)~kX?v?g~!o8.5 ǴldvA1/i([t څz w8svm$OT9۷נam EwGvJ<%@vK0y`˽B?^ :1%`>h^ty5z^N"yT547Hn$yߢ7DlU<} S\N;_b&}wN+NBo_iaa9zonu`J~J}QjF"mMki tz;pY3/o9:ưCϜ֍;>kSdO{QF.)?ء{{{yޛCӇã)Q{E*Oycavw3.I2wfB踩;5P &~%[mןWk͵u,5c֟ P xi;.DJ WogJ/&f6_>yxxH!!x-!ճ[zdT |d9FπAs5`MO͆%~6g/ #S+%yTٴw.v#Θ8}`oLjUngӒ:9,91|ˤ  ]ZR9՞muBk1dG=7# M{n}s3RSAtLls~6qM~#?v#[v?~:^)-|ќ^z[ʼH$x-5e3$M<n uĞ0rMٗشL&ys>8cTYpv<]Qqz%;|dIHg| zlׇG? 0,87>$@{-FZ tYSf3U5z{gרqgc[aqqONdp8VOeDVݐ"k^ޘʛ+,{+u ?AOS>6K㶘ר?,3^āO{MmQ~Dxv={֒߱e>ýncy^1p X|޸M#;B" d.yIQ< Nd#y4KȁI:+E(fH&_8/~ɡsJA Τ 1h,hg`pϬ#`QW)(Ьb ])~5}FYݸI#g_##έC׀xaٰ!YB`y4Ӭ')(}_-I*P̝s獛2پ3+뗓cfآWv3 ֦QuYgw1i՘E4pcfo;~۠A=3h=]ÆSk]U}N^gZ ެ͗kt\b{ka ~5?Q0ɬB%-e+*2 PK\a111[^XTy3'TOYOSNhxg5;#f Q99ǔʇ[v>6<؝ֽ-ϕd$ezK^qɉ BK7$gBM|_.H$goe7N 3O ֦%5|惷0O.nmrtR+.Y&Ԕjn(^/77UQ/>3f5oL6ad0r$0hTbѕ2vkd?e'Δlhap.xkEqY#Zn늕.M iH7KjMWOy_XXP8N8gN3HW$Lh-#_|A{4v=%ODl$KוYo  0z _Hd>9)Fn .] &vdPGm)<&,˜io'.7[} {|>^@.mXYcb/-hK%RV-NkJ?RҶor \GӾ7N7Fՙ5D 0R.{ (1ޏ`$|ORIC$~ҁ8A>fDVgUm3/z{?PΚBrkc("vWU><4D1* {ר22glHfz!S;=GZ1+1 ]% QוӅXD }N#$Q fp>Rs8'I'0b$r>Jκ'xg ?Ig?Xw?@I^NTtG|tC*W w4YȺݭcw;r2Wt&W 'k@$S{ֵV@eZ>J3U_8r}}@Wpb"gGZ%ob)]&_*(2^7UD` TPO #ksӉDNC .)k׾,geJm=<`F.a'OjvRw\-O!~tj;)|͇{EU1;ԭҚb|/3lLWy7O8|Lq|oJXτ,ÑgyS~(Ln34 J5ӅӶ$Bj[Y0B5Gs0T0^^ GnP$x $RPw==|}ΐ¡κ37hGxz>6R3DD}f;c Ϣz~|}8"I~hK1p];Dž;vMӻwܧ<}l!0d0! .󑼏 %8P9>* (94Z)9Gn4DhYW=JZ~]n![(>G"FPX""mI7&2%%9x C|qb[nqܶVk;P8|Jo)3H\6{]Jʅr)P5+'W< \&Sφ1d`}FX p*tyAB~ݍu41晖I-'46gu92XTcIKj&i\WlKf+dKXGovXJXW1j zxZIi?>ۀMV(T:,irTo#R$gXy"X\WWiF> $=BP i*#Ubo7{UHӗ͎kZ3gSH%WTǢ_B-ó¼.JJߎ4y'ǫsO[r|G&}]~㨂 RPBbǧde $ոq'3,v`"=W: , 8*hyshy T%8dt"ч)3`\u6"f qP8ڍb"U MvO> RxPL5iaa97V0Q&majp=90L/Y?tj9+UVPgeŤV,)k7=f t7O?&ȌdLΦVZ'9K@%]\DF S%j5%SaJi}/zRyxaV*zxS*TxbKTXQ^Q҆$&ƄDvU$$J@Ikm HXdpdsU5弚g#i?a#`Ѝe*Jv;v\5 V cYn[,a7/Fi8 DKO\! aĀEoL%,*'ДwILO,-閴$lj6ͨYxәI-zqMYs+xv~'7p\Χ|ǂgjGot35iAhc1&#aZI;ɅgZ%/7QJ=Fc(^]qZrN6XN+KJ"<fZD! ˗6 OHm4,͕D1hsJ Yn gzzڋXA픳ߓGW2M2@Qx ><0#Q;e偊S2`" e\ZIIX--8 GZkfy{0Tuuڅ0eF(yQh>W$D5j>?k<+zi t waNeqr:9UҘ*:RURU816)b K$Ib_n#0}ܦd-,l(m`ѻ/ρ<砹<܊Tu99Ӆ_w#EEQʟxZzAv6E)S.o 09V4HaL[D̉w]ǯ-ikkd]7ȭBf5Ǝ$ͷ5=t[<Ļ޻}hlo9~<mU'絸҇dPU>=0A)2Ч#sGhjRRLśG8TZMh'5\'>g- Y2P}W|>yQGe˰".bҥEBG}c%])3HAbKy1s,HaZ1b79U$t-gP,(IbAFT$P(6ҪwPsbObG#eC"}'Y3gm/~ReW~33n_Ϟ_>w|&)w /Kh?b^{nfYRz ji:˕ōpV/__y# :t~ccƏ=I..t fN?}߲WƔ\= V=vXI2fUua4?8i]tL>L[*7ZpvG8өu=I.q%?uI sܚí_柯VoYOZdM Μ DISx#$/nEdǤF3F<7s|·vf֝w炙iq*GL֭X'yhqW}uɐћfϨIa맜P_,<Ѩ|}ҼFG#$­fG i(ҦL*=N eKA܈24nXsDn/.|/k6b3~|PN3z7lVi @gK5t7ևí3W wN\ g: AI:8`Arrll39;#v)IM{[Aډ<#sp@ !?% ?ݴM(;ݕޝ5<1xH>w%_½xt֊'V:~|ʞ_|x\&kIK;ng0N`{ ]:vuauf80XSp8i6ڻqoEoPr+='veps\<?vHћ8eőf]#2NF C)x{c܃Xq?{+EsR3)&a]v_oAN |%cQ(KJ k{F\mKP^ɵSk#хfmlք8 !qָQiR^W`+P _PRRr $L] И)wt_bNvKҹu׮v;/+=;5x䡲혛ߡСٹ).\5`=:{wNj$w̕̕s_fBdB!G! `T4@E# ʊ "" x "**yL&!wwLw*KlxXtߐqI3WiR&2uo*?uľ1z+oyc7+sܧVlذqg}O_Ws[v0>D (CO2 ROwϦ{ }ij$ձǺY,l[M\*/sy+ǖ5WI cӢ[h慡 8敤O7@OC9Iqr5D {IW\;;ۍҨ@0!TAAt}BZSWO!J,8d! F,*{/,<-ʜbJJzbv#_J\]!|;exf*9StOįH^'9tr5jq-BXƱ52 SwZzu ^Wi## tơLӰeHMپbLkڦK6Z>(ޖcKsSgΜjZeSҠED9Sn2E#s:~_Ji'zNKPFх&Ueuja xD ?/3 q_Ҝҹ\5$2?)1f /ۮߪgY=Z{Pݫ$Z`Xyf(ːOgwW %&F0kfp0 >|o#"H|QnטB7ш;EGِJ|=ufUضVP[E'bxVs[έTp;f<KД`bobJڗ3mx?ӆ{%^IǸBfg\!Ba(ZERa5/K$Yw w2=q4Ba4Jfb{&#\߄;1d[W=|esW0oHnKuSm<\BxCEfzHvS=?YZᑆ"Ad| |9Bݼ,P-,]e!# 2,~#K ( Ni" 2gơ1ő1$x- 4yI,k2MvǶ~l˝$"ߖYیؽ_,-)y( {.>t56 l=$ɄE=Ik >o AOJF%Ce11{wH.2zD*Omt8:|p' K $,wl;uB6KyV<EYpay^ɝNbS?(0{Aa~F\N|g]5UطCE$9nŷù剝vV!xM7s8FA1R~vڵ?`{@>^߮zo۹BPlG}';33=J8j`5U1ٺykߤ+['Ŀ\RP*C_ c[VYo E<#! +b3kNhkbexrʜLO^,6sI?ߚ޾_ṅrmwNPQꚽM*'Ltn&N<ǂB%$\xh: Bdr]ᕝbƈ:"r^'4XI܍H)!%ĿYlZ_qiv@d! |r=f1˹ZƯgieZȐ{rL^x_v_l&oJ&2: Y[5ߥR))Ҡcc!D2G 09`_޽aC9yOb~;Op*zicjIPJ @jGD8˭DHۍd8T!%xVtE0&C KV+IDJEPorӎ\Y]]55Cekö&M#O1yKo44m@xp$CC(PC6BNZ?ל/@#*ħx^(JnS˗ <+Op;T%o̼̽|YV;$`ђT(jj돤/l`#*52APenr_qxŞsݝ'dz)ׯ~R.f{́RQ*z>#ʾ' 'd4ɛ`~˽cz [U!Nmk}y=D/YhR)T IMbXaPa~jCqyՍTDZRVA*r?\[ٔ B=Q( $Xz%mL7G(qs`3סCPr?_+-C!CV cO"~]OGMrK;ujҢjOs5 }EF2W3b b8==g)DI|LI_>~Kq%p {mWH =!yR ^mp ['N*~<2[VaNY޲--g<N쨰r8 <4߽wexI= D4{n@;wUҵiv;+|WqU+FR JzKVWrki ]K!}krϜEC@ɕ @]X4`:aJIk }_P[)U/w%ؙIqq|*,#.sۯjCnXr\JzҊgN_]}[ %Dz1B`rLE:Ql3=aز кX}q}ГVٲ7+?ƵoBXl\(§.Bpe4!@8@!?Q׈r6MUZu!kݮ]n]o{3ik{fg0+]Ù9_IAE@ ߛ٩ת;gط1^i/WCBA'u9g3{;}BzhG8%ZFߞaR,Ka B6!Icq"b>7OR 32\:u_:O5exFy x:@}7:> V5\|`fm;?|`yqp ָi~'O՟y~"3OڴS>֙ї9jDCNJVD&4.R Dx1NB";=˕:A}8WRq<R)}fʼ"vfg'q).B+ۻo{{`KOFӐ7>7 wUHa1I8NNRH}\RKjō>g#߰7o$w&pgO?^~WQ23t?>VT( V|: ;B@Y :[R4L44um nk$(qR/VV#<0G+׌,irDS .U<;dQnhJ^ֿ9!+?2|L1oc9渁80%r  q6b[a$BDeJ|}  TY?_fp@zb[sֺ̥!yUUyCk][RW_H1gF4> MSH"!;1#!;~c[s%7np7 L)*zUlkʍF9}*n5ԾsjInGLW{c#^tk===y'7IMp&&m"~HYl<@[mT%9]8pRm(Hnl8h/;[AL_חiǸ},.;4;ѫ "a3ChPC ۸6}c`L{wpgdh݃G x7{I+|DB H=!^H{c:%fޙ=HuɅ +yxQqO8 ;ɢoO럖?6; 's̵eee*\-亥#mՕ\Tj,pU,%,p楹d#~ͼ324d*1 O+] wczs[<єkO(GNp!5VJbn$FWWT1yH򚫵5o%'1чa J{h<:X7K-]pg곜cߒT'G2]:#9eYU~/ufl׭+dܥ\cv 2>lˁ}*F˗/Ri$~8 MI+>* !*&y#pAs|;v[h4RA8;*K]hp!WZ%;~fwL|5lC6eГW  y~JdS( lSKtaQ!;SMYTH+cr15"s?{ۊM>upBV07)lvɻ\MruP?|gΰrd. H~6ZRG~]Xk{6AU *`?|;sWnwl<)Z@+?Js2N;I[1 )]xu]TLB3wŅxT]. KTݞ{gXl5dZcY QoR'3?P3Yl}tб[_GEECoq*[EeYO̪]S2}+4..sUxcaLX_E%,f|Y侉*\Oyc-0sJ2.H`d=Y F_{b Q*+cYR4 <% |#02ÕbIk_!LdAf}!>R^wQwl#km~޸omu5c =~vnoê>Oy3 ۵ y%]/7ÁGi|W.>qY1i 적PitWu|sG0 bj0 i3A/!LphPjaY1Q`PQ: hqyy'͒-~\k` nxt}ls!0C/@fZ+ ȴ%AsFDP2Y) ) tMQf^UE sFUֵ2 #[hbiHW_5377YfOtv Wn׿]w!vC/}0վX4ܨTZgޛO}@ OMfGȦqށ*Ϧ1nˎϹ\GQTdϷtq|'5Xm]A%Ѩ K|/Zׅ qXČH ,bԴ]'T_*:1WHXͯJC 1)AwVEAfwS}5vU\qHCbt 7kW!V63/^v恉_|`ږSw{4cz%Py6auKrz79-]b՝,ٓ'h{zD>_kx|ـ4aCɈF~KKE&q Mgabԕםn8LOCգۦoDUVzy){_|35 CM}fӋ֎.ƏGF˂}A|ӇnHb" d4 #I$7>!HbJgQ`Ƹ.Q}K>^5.+64rpf_!װ].3mx/~{ wBQD|>J#7. 9ih<;Dj77$r0ZAː`ɣhH/1bƾFrf @jx"/._ޤG ^Rϓ JcNH{1ȯdP {  ΁'t'3N 3 (nrǷvf>̠ g̽PJ 56(33DEH HN2qbw>ܚ˜▊^8P$9m)O TpGЛwc@4+_྽qB82\DPK`{Q !2EPuPU;}BvLv)Ikݕ>nG%3dw?\Ѩ ҂G4@eB/h5OPSn b5d'H/෱w?# Wi 43QMfQfw31%c9¤ew"I +M# !^f@JXo&`nha.\UU\O H"P#p N>@-Dȓ!wX-ޱ36ƁT~0ut l"yGQXԝGNVL1cL;WYmx!TSu]Of)|v\Zڸ{|zF\|zz<{6fZkmnfLL11f0_ѓ̀ =)}bvtN Mv Mh(JB}1 L<1$Y>Iѥ8 TXHؾ^5lR%ii)Q8fTjj#LRi&q?&ٜVˎW=J>퇫\Õ-c!%YG$b4ѭ_A>HuP*V# <(2'_ 5^=/;|iѩ ^K_pVZZC~c1'%-fBv sjf*\ 1bB0,Tt8]@K/ D qzl3wlT]m!'3Hz -c OshN:lX}1B3^|Oy'~ȒZ*%2%8EDveb)͊<#70d@Yzh6?LǬKj>=Ё|HM鱏wxHD* P3oOb2)ٷщ9˲.^l Sh/VAv!ȹϙZJN tOB]3/ŗ2n=ADL *b -Jx:?ou?<_ lbZDwV V^ot=0 K",N \q~Ou#v|XHNr9{m1niCp"|^ur?0@<[Qy.詁" d5ræEI һr.uiZDpx5څC(J YD P|μG$nxObv-;YU/_YZTkV\N\8 #[W_q&iT.ѿy$*_u;|%95-j$ZMƾV97"Q"Brm sic*Z޼.Q77W#u^g`*`T#^"50|8Hie2ɘ_2K!zV!m6;xc(\d%SwjL=Aפ)1=-q r*=>$W}ɉ%HO.A x9DV#ߛe5Ꭽ]M+mћn.1/=L/R>bezlGVB\*ȵtbݚ֔8F {~28jc%v7 {E'.s+_:BF&C xڝTMo1|"EB=z6Z R?"mZ(Wg.ڮ]i{G/8q3?_^yf왔JDFK4UT% ݥUSԥ#4Gg}v:oDnoTy%-pL+|*͗7Ohri:-TA*}Joj\dq?ܨWI~"eWa^@PG*}n$lZcų y,RfyR.Xjũguͯz5K{k"&t;a![iO-a׃2#25t/V;x2&mN`XX v&j 1RjcLti}+ ry}{ŴFGټKUwZ=USjanSD2?U]7-8b2bNy=vr%u`M-Fm\7OmmM Wjzton|}q?g~+xmxe@r Ni4.%(iITQQ[P{p{Op=z~o+|P/D+õ= hH][C#'4{a(sx/`o`P6//4܈`kC=}Ua|؃=YF?YJV! f!ÑC.8N`61 1Ec1M #p$`5͘o0͘YX81`|{p{7/q3n-%s+܆r;<rw`ĝ wnݹ^ʽY}`-4h a?b1ڌsG8c8XUlN4N ăyL[d+Slc;pb1Nf]9s9GHŅ

Ǹ >ɧ4||/:|oG;/EO\ ?6Ne@l 0F/܊[Џj\ 6܎;p'#~O)?ñ8g##c.÷30_`gTe8K븞_+~o-=-7?g_o},/2v.]Ⱥe_Aq& e|)kWR׶贤FFF^ScFCHzl+Qlyq:ՑDT/ŎljZiͤ )'Hn{!1GVE \,l^.؊~K#;Lfل^)TnZKJ؛Vbo1RQ5h6vlTDsh1c5qwRS[SwMݚ6U79لDvnt+4&jLR%^Ѭ՚K*j^Q5Tiu elg:Q=x@X+ z[f\| ) CaXZ¨0&o-o-o-vD6 k!4uzaH ݠ% cB[(k!k!~FX(k)>o)}SM7E}Ser_nq$6JbrAi+$mbAL ENf ԜTNL{ՉLNS,wxtJ7]H OԽ-EZKRޒRvsŎ OZTiq/ȓWy**QYx`!  !a0N B ^HBºmؓw:ȈR=Pyd/.{+I!;.eZq0.}ȵؚzII5縩BvkbT%OgRdRMPiER$KfΒ eLd&Y2BKfDuFKf%ђYhЯq}[,%QUmOgrޥx% @pG,|x!A`g`$X–Wy=uFWIxyy xڥX U\1 ɥfU2bՄGnɃ4&dh5UdY-[t\$),s=Y;ww/JDbպmNRV ^"|(K,qN$gE4X"5\r-E% DI:PT(.dZ&cQ(9ktW=Uo2#1̏洍l=meRnYjz.ȼ4Z#gI"|cOK_,gH\H r?&k q_|;ԡ7PP0N IWʡ*TRsxю ^D;P> wJ%J3cև2320# Znr>3Ɯ;ۃ90g3-|̝k>Yl!Z~ԔN=+͊OA8ҡ0Ar *ǡr]Z.}ZF-* UĠWAu=t߆,wJs䔇*e9PjXx-*)BE2L*Icփ|,v_"֪ߍ, .2~ŕa ;# EUuVNqeIj3|㥝/w5WI.,B^GH?C!\tc.1ݘ++1Y[#۟(zʷe!q?B v# xooA0KT TlA0T&AT]XT *R*Pz*aGPcx3,j쟵PVݪk ;q>+#t 5".DBtx:?s-D Bw"tg~S+UK2 fbIEdBȆZ>~1Ez '}#vUXX1d>q+|NĎLgUC7"{ pQ#//1Gqe6Ĺio.r-fo_K&*MC*g?| C6P ' \Spjd߿WDGoPq}<y./_]w~ NɌ3fgFgA2؀8}#2+EӰ[BocjoE÷&\c]=w Do(&bx ;`X\m=+a~$n=3f%_JH#QшpGxnǝ6|ʟq3y3d9tg$tkOV5 ɺtC7Yߦ閺-P;wݞ@}@39=hY@"WW5z Ct!1}R1&gbM|3M-S4jdн&t2݁z&դeQfdtf>-3f|nvo"sʜudPƅhe[ 7lcd[@xzl{>vllGk'jڙvB]IcniθWU\3ҵY_EZY#<-ut]˭ŕz\Xt}C~C/fz\Tv]p@Ox7JgNDF!Ye'߄:=1#_s_lפg%OQ'ȗN'TNU?|^#u瓟O~6L]TbΚuMx 9B(*%C4q*,g0 Ϡ-B~k9rرd<0q_Y̡{+2tGY R0Gg1'D_-M$_8fUP/}Xc}5ߗOp?Wj f)3Fx!TX^2I*˜ER?Q_諨L[T^@Lz$_KTXx?VǬ\*R!h@ϗp;+ƈ9_D.E2_mU+}[BVQ:0 kw2 /}ÚV/EWSq2/%sK}Qat,: )G񥬺nV^;QT3iR6NI:_Oz^Vo[zw)Vn>ϚȔř*S`n%sKLcdZ&t6=LR6 6C=\3j)jf9f!{fY`/k_s9hwwSlm5kk:66fmkَ+^omڴ_ mv"mFe畲Ev]i؍vaw}''ιXjzh \#5wܽ.ur]/ߥtYFn{MrnM'h9Uk]>˵}q8-FC^V$\N/Nۃj3-lџNVu0K¬hzֲx3&7{]|<#. 2<@{WK랶p>?WQ2'ߌ9 *9x0 F( JU% v+o IW|{ <ˮ>OiI *Z\x%ŏmWw;scYW,쐭Sh׀+jAF,ԢG1J>m\=rڇ_N&0}{T9❬[x񙠉KGINz?Ĝ6_Eѧ7{WY;9.L}[M GuY(\뤮$HC\k7$ͥ')oݤ2oYxK)c$O|Y e/*M U*SeTR#Q "^ր]IyAm(#V]/M@Ȯ,-#ˎ2(MF<9? G2_žmkosi-26/docs/fonts/heebo-regular.woff000066400000000000000000001232601512054777600201340ustar00rootroot00000000000000wOFF8lDSIGGDEF\>@GPOS 5GSUB OS/2Q`wwcmap5"ηcvt ^dfpgm k ^lgaspTglyf{B&head56 M¦hhea!$ Ghmtxd>'3loca$ dmaxp rnameKi%e,.Bف&:A:Ko|ah]ۯc/Tlmi( pG0\4#YtwLJph!=HK#V@w$H82cozb9 Ry9R ?ɓlCZ h0}0p$=;"C{'Q.o-_Q$I]+9Xe^nu N$?lFDO8̻P6#G]Xb %Eד[s[4~Z3Q8c oq\}Ӂ\y.O???o&UK=T@.Jj cZO[(sY9mL7= ʥEUT3d|"E'$<8_x{#ʄey͢F"Uտ&"Aw<@ sya[jQOcqx\E*e?Zz?2NQnA6wN.ݎ?px͓[PUU9c(f(t2Rr K/e7+SQAQ,-9Y4%!]&ǦQzM?[t饇Z{־AN'ؕh1 jLDWKFc#w}p?ӹ~#||_:[@A,E`z0;(O&-3bqKw.0rO,2|s„)aZ:QM~:#ga~¶.v:o` [SCS!88ոTCU oB2?1"|c}%Sr!Lxɣ :,'gueNs,itu:gNw|Fnܩ/F,s/Gӛ'{7biTo]< OK?ce,,?Zˊ)SOCX,6L*q4jYW"(.@ME*d^jI::杉j>92/9Jp;Ubj6=Ls'XɬG'uyţBNdQeo>byݮZEG`q&!hQf.+)+myz+MyS~-)1nddVڏެq(MR:vVqgKݨ,`tTPt酠Lk2-~K87N'/k:Q(Vn44_ha/8 ^뎙iET s,3@/ cЫzñs!0<1Ϡטgu::ĬAœU0h2A9 ֌]> c7]2v1zcw]3v1z1>KfD?Gt3&疅pl>;S32;>w|n!o L JOпZﰳCB&`!~\?X_CB&n!ݹL/Qe7nw_go /xcp"(b##c_Ɲ  ؝2000h\,,,PO&[&]6f^A>NN^(03lTa9eš#9$$\Y,YT9YYxv0oһe3k  z'xc` © SYf10KHeWn 0u#+ #~ M7kCg4o7 1SL|Xp4f4"Nt&P  @ n  N :  v  J r \ :HD jJDvd(>P|>bN|(t^ D !~!!"(""#@##$$0$n$$%&&X&''d'()B)*&*x++,4,,-(-Z---.".Z.//r/0$0`0011:1l1122>2X2222303\3|334@444550566N6677r78 8<8d88899P9999::;x;<<6J>f>>>? ?V?r?@&@f@@A*AXAABBpBBCC8CrCCDD&DDDE,EhEEEF&FVF~FFG GVG|GGGH*HZHHI"IfIIJJXJ|JJKKLKpKKKLL>LLM&M~MNNvNNOOFOtOOPPrPPQQRLRRSSSSSSSST$T$TPTPTbTTTTUUBUpUUUVV"VJVzVVVWWJWxWWXXXYYlYYZZZZZZ[N[l[\(\Z\]]^.^d^^^__V___``>`Z`v````aa>alaaaabb*bRbrbbcc6ccdhde$ejeffFf^fxfffgvggghiiijk"kkl&lm2mmn0ndnnnooPozooopFppqfqrrssVststtBtlttuu8ubuvww!x}x$WInj6.etD ۀ1ӛ146 IZNBB:$!7&7k̮ ɽ3gN3gs QC, AzL(9P[fLg`#[6pG qHiŚ 8˜bՅh.)ExC0E&㘸ȘbbbQt l6'xdL7Xp>8%7T(0Ц[ pXpP0 cDח(/YةSVVfsTTh(uݩw=wܱCvYmڶnhȴgn2=-5ŜlNNY-Q(S"aՇtZ …h?&4E#Mz;+[VFQv\W0:0fܐّ9ϟ>-:uL^ӧq  % @)$5c!;1%[q_J C;kv;60׿̤.ot_b_sw <Y_/!dD&MyCXOpl> ^Qx>(H V JiJAY55A/T(T D 0HNֻQa<(\ 98ݥy(լB:B:NwkAEh#,zdeY,LB\5;ka%s/zj8bL0A!eh,Gդ8f[ľ9ӯ3%6^cU3k)k:@ &j3ς*wLphAmV,,×#cy ߴ\)*ӁDPF @08Z=#<`ZM ^nlܵwN<"՛߬d8phYMl^Kk[_}۾uenDS+Fx#`*(@xSx-]x @ZVH6 lV[DXۧnlrwS"X. gMih+4cSm N% mlN3:Ä?`n@(4\+)Z GX;n75`L>(M|kԥW7xyS+o.zq ?v^;a|<Q3@\,0)1!DDȍI\Lt[=v6[E0^Ymב#npm=;A1v1QOJT1gbJ)*H A m4 H*"g]KuWKfkþ}|~1e.t +27{P>P$ )0F((}P 8DDҜr1Ρ0F+JBP3M* !PgLxFGWU,KLHj?EmCB":xA k<L!R $3ʼxAxbYjá~]}x*S:xs׉A\eT,xάXf,(!9_ܤ.謉Zp[tWraܚ0O`#?}xZw.b/?K%e$5f겇p?kC?/{u}*|bϐV @W]raPk,hjf6@y/,tZdjX z$[%rp/t#$az:!` 2LI yc^+0U(Sw \}gpf!c VxPRA V` wnȁ?\%/,2%BD51hCTuBFְI} bt9oS&iAn)nfWp]uf. juèuh罵͹Xoaݪk>qnc8? ?#9 58>ZF(`;&&4p]1Ǟ8%M2*\$zԲ)]Q:Yź]"Jm~?qA },ΑPČ:]( 4ab ZѴU4k_86?˴e Mv2; 9SXƍ F"9vLp?}xoMv ~We6x CDZZAvDK@ ( D4(6{DхypXcl4c]HVcVS+%_ͯͫAi+*je9f%F`}tϓjz_oSO`?QtaOW9;>tx|SҟNy)|IhKdf>Iඨ3l9WN*Dl66=`mcv;gJP( utf`AjlH^plČ1&uIZq1ԯK lslȶ#ٰܲ_[1 L}wIU]d9aV$oӯqwFkG0~7=Fysǣ^fו6uK+KZnhΝE-['0&qh2Y ǗFD)dk$&:ߘUݏ6npY|jɮmԱK̘P޾}ꀝ8qqS/X${ xK7x\ I4b!:aʹpÃaZ6@cp9d&.iNv_bi0zb׋`l*T p]& <# 1Y51WQ1PHd Ia"w/vOfdx'u:Noa=aeK.^kď]=)Xgz@a È9zz8^"itޢhG2tkwnZA>Z}ŘK*:׈~_j=ox))IK{.e8[cQ%ĉbЇDF  H&;q~-%忰rkW6wH<0:|Ws{[4K9 ̕ _&U{4]5uӰ:2H@o$T"}-oѤ/ˇlsۢ/fׁKE~ ud+s]9ؾ-?4k%/o(\ީlZ~IM7g+Z/)PKx *%(mtWB.BIiCQe}gj J\uج&bdJeK>&Pp`>zXP-䗥n޺&kڂ}ۊV9g ,kk^ޣlpx^'%iGf줹hgaw\G $GF#SL{^Lgwx;n?k` VJxCg 3Q3Nni;ϟٻeOgμyQg;#A H} ًQhx^fj 𲝙FL#z %.p5}`g9L#F^oL5,}s\Ýh) `baEX3 L\f0 H"pzM$,D;]߼$,| WC#wɺ %:ƞ@YfHMulqb)6X +UfХcpBG$Nlӧ߻E={Nx25&J;0by"kV+13;f-w?_|O-]F*t~ TNbQ% CY$IC pY uϙ%d*d;,qSA4Fc9a+|RA  uxn5&a;wb&NdPQk$!q8ۮ:k6C>߷oj{ iy?,HW 9~Zb̚k.]ڎzfkPYrB W?hpf!^ 1}d;eL7jZs1&C6=q's`:u^0P{ nS376#;G9b. |ͻom}b5cYlZHןXGilq&x`( f% pAK?1O3g]}.=]^ "Dq?@|zB~y?lpdsf^-{KGegqw:{]Uu>Z؃ur+n{kSy~/t1wSH+UVqTML'OhK:Iy& )z3ö94hY-ؖ?|QYyx֠D :~` sxQ38SJ-;qģgAYl=;;CvM=]Dֿ Hl 7/.~8 DHVJO-4vݺ ֿO/{W{mшCg_ۯu*^㽥|{xܚVXڲU `"o'_+Lha\n԰K2Gfj:h<eVqF9O#ȋţS5!!GF0_p' ?QL@{!(LRvx/7J*;I636fɕqU !id!m6>"5JQi)|ڠa5~5?T͸)?7m`QE+:v\^@wќw]i}w~kϖK$tEHekoԂ9(4*ƽȪ8VJR Hdd lXpCAedDz&芅Ycז&cÍV?ǟpgwգ.}uaƕݲ/:~V/?5moM~r.K>?_#KdH؍Ig"HV t%@j6I q%+kXL F.3SʬY^޶%n|Vhev/>Zз1cg#5koвGGq|"ăe_q*Pty*q+(r7DdhgR.aIe} d2YMV-F9pW8,XTA(ADצKq?GPg6HYM) G5%χ~Q5 I&b` TjVҚƬeKemvS)"x_WEV5Ch~h zI9Ublg%>O2Լ!Q>I\'yy(D9IRtg"XM 7ojOx]iT ahmv%Bq XJÇ$ ژ(P(i9k5Xtdz, 4iO*U ze_02L_a:$Js xS-&X8bV-Y7KnhE~;6- ry0Vo` B!!Apڬ 0$RǛ%=-۷l6!=}w;\;OMXR35a.Jw0x h&^/A5f]XddP9/4.7Wy~sj*L1uX1 8T6& M 8Рy j_?Fl0#5_.gͩ "!d;SѿϨHkshWQӡ,㱜Lq9  nt,;N٭*[Zuxܯz jU!O76Y1ء|V*%VWі-m<=v<7q4BqbGW%rVҰ Qh3nªE-q_ i4 ď d%MBj8w|P , Wʐh#HIK 3Ѱ('TdY矪7BjP9T26?{ՖbrI]\v[z}õ׵װx!CQ}Ⱥ!,$B򱱖3]hՅO( v '>93qya62^EB KQH_폿-՜՞yk$O9?V1!.&"Ȣ0$@ww_d,|sZD5)V|Yٶ/\-~=zkeŻ6oǦM7>?uM$Nq18`\ >A m&9"{M\ X,IpO[3ワ_s6M.!a&#"ԛmh$uo7B:5VKjJr%͚ B}hq킟;8ۂzΧ5@Kg;,,-ՁҹCgBABkٖ5CgF<2A_*{2R[㲇om"jZݔu׵S Ӟn0 ?wnѕha :;{h "b)߰>ȹXGLj>YFkgV\TrysʉmeÊ*:w^ A]+̯{@eSzoa]M;٩XsEA1JV)ER1!$WD;;p{R#M5kMM4z2~U@8O*ѧ0-yjC&=tI\YLβNȰ,QǦN"mzx#n4ޣ{um'>0ݩ -j6H$S'@h VJKop9IKIJw ]ĖtO4OJ.s{ԾM916+v*@!M py3@l>nwz~#؈vgf>mk̩I7&Sbagq#74!ggiN]m=&N2sgnjV&7w=^4mPQ#%H 0R4 \"tDRs'hU桳5$g[=zi}U>VfF]P8=?G 8|N]J[JsmFIG;5$Fwg*_|WuCUXrsrɵ9!k"`r G%k`7D&@MD\ְ7OӮv户{eWb.OLu4^/{!Z8r>8`ۂ>[ g˚(F ԑ ZxE0Y+䰚;kC8aaR:/Gg=00DΊb#EjAE#[eaH8Hyl"Fyg6Y#j3uwz1\g v۱c-seݧOʼn9|4 Ĭ3 Ip ܓ#y)HTp%Q@=GJ*j.CJ, F .'wu\@799:ѡ5/GcΖ^<` CBOɇ?1㽕 :_^:70rYeSf9cz숻Z\Vޥc٘+[ _,wԚܞ-K {7/{ Q /=7miONHgHn?"7&_6e;:UT߻jMNm_O49ᬳ$T ,_pmUϚKF& Wջ"O nXU*<9[?Tiu&dwRt?/,' r:/";Q 0G 2[u[ԣXzssvl_uXt}t$MEJ3L6m6vߺ]JiLR&ZThvjDC."5J{gO@8s:gfjG0/Cu-R[`ש @!4I$ R )IVO_(4J+z6LA-@g$#drX`HPEF R)82D [Lm ja5'ęxF U5## , nؔ7kO|Kt\˗NQ@k}'S=+$!&JE=ƨx膠@FES.H-&?@ILEjƸy.`2p('sVYWDR7\:c&źI41x#U`l",S"Y ,n#z"FHK2;|<JUs(Y ÀI׷0rʷSP0";&=}90OjO,8k>$W5Xf- \/]pVJGt{|8 /]w\ս9q|OA.8"aD.@k4K/4>af5Il0Dtc_59:m3u7jkt9d/^'ƒ}zv ;͍4K7;%fPTUT*oKܚTZreȽeM܋lf޿!/CAH?^>cþy7^gCbɑ1obVaq6W]<]\[27HMzxp }@uH+ -v7j 3=xmAo^vyWuV0BL%ܽt kWё\ՁʍxȈ7b$BȵΔIZ%w-~rOCk/^Wfj*?woѢA{bx5,\gJz/{`'ũ%9lWkRdK[D [*dʋ<P>bW>_`k݇ZR.I>^,DKuPo)v 8Eg=vK\պ9r}uyF}Z$&I/)DçѮowX\t]oo/N J0)=H1rb ٌtIU>FLv5;ŵDRؠɞzf;$[7Qҕ!!,H9P7ӷBCs19c:ؾcG!|P42" B܇F5\ Fc6WC#ymڪڲı!wrCnF[[ gwaexQYhhc rkSfDR;hs\ {XC}y!;y~{N {'n\4bٓ?l{`{zl}zry]`"bXZ;kɭôu']ׇ)$oWj^rWY|hc'iQ|'-$4ko Dq{wjljSW7 M3 q'JNyz3 ovqrH#d}6>4~>Z[oZLdeju׵HW XI?\vkcuJ_H}p KI?\~E8Rxp^W2!ucC_0Lv4쇋.{P쌺-o ;&^5-%yd|OO\m魰mw^w6 *7ouF5qf;cou)fV,vo]8w^Pn]\W< ])ѕ!U!٢njL+8E07 Z+YK㞹zO޺x [->ϣ6xn|mM"==sƻ+3g hOҜp*ԅ~1ё]RFJwH-XRƿ9UɖP S /U;2\#;\];a}K>ž}g]r^I_ךDH=OpvwC?ug;uwӯ)+fK55y)hoc!ߵz]Cuw8x<4|m򤗫ok񻫖e)~#cJQ녙uGOis}bqOlDq5W^oǹsscM-=<6WD{1pȫwѷx?>aNXZ|;ZM1ZJǢg?㻕+*-"VTor:NyT֑_{Ta]K^LdU+n?Mv,V,-eNUOR?n|j5$v⪞:9S3Ǵ7 wSK JJ|*uo[27b+SkW-_XnٴkxyՒOYxUyS7.8<.4oz66n7y3ݖXػ -940poW|:a&YkF=_U XfEѨ۫UGrry$yh}^{ASɐVEݱnL6=Ucvuiny+s _ҙ0?FR9Iwho8ՊfZ&uF#*>ts-azR% }LtJ%)R;]$j2UjX}O3w3&`4z3SySI'ֱS~mjoz^y:1V'.ptm6CjŶ.քvK3lK$ջ>"dO@K=?}[HTnM [2i!oyG1b4Q *9<1~᠂ :GG eb,@ qWDX 1yK,VP9Ey2˗DGGۢ6֖H,V.88ƶlN{ [́K`zmMW=n)rpCݝa]q#zF 7mFmtoua1`4P(6S;iT)!名.J7c#II{Ϗ`4d[3kKiY9Lz*kܳv+&/?xR0R{^&tMsz3FR$Sd%O`hVZ)rV #/S}8ٌ8$w)M a0| ?D'\r#y$YGC.o-3&M!JU<;p?l/,nӈu$ɹKW(!8@I@O[@>L Զaڕ?3lo߮E Ul4XUvc7<ُ̢PV-{;g`E h)uᤁCޮGimȉ|µWj,vtގCMѧ*k?6h |7C%Xn* ]Hp?T!CHF65+VS?)NKH2t(2i%b!PcC֩b/N.fumn.y> A RXtVHY'2I|<\Ť[8gOľa LG*~vWc_$ZLlNvoMI 2\yefE?n6X.nwSan&|UAa[M4.(HႱ~ӂuqr ή,r9vċ8ъ?2vNm6a~SG$i [% u. v%}X ~BP-vW߭7Iu&>P?)_!̘HH񘽳ƉsE5ج™G[!˹D{ 8dP^GRHg6`6\n_<{F|-kb\FC)cv:ݸ+G39&$gޤq%S<[Ӛh0(-ŨWAKq̓~82H2G;c~/=M# &>? 7YM:Lmƒ(Yَ8DJ\.rh۳5weĀV-n2H8êܨ">}q˹#6Ui9;:A̢8{%L/{Fl1`=c=vw&^^&`g6uZ9γDžt`ȝ,t4ٹq`^=Bܞ uSߞʏqWXɹIJZr6AEuy5 SiJzUƚ>}O15.G-1䄐B};3,gO0KQ32ⵣ1u&']jͦ GOn? ]^br~~އ3~aG~lftoG֗wקt\݋l nguiCtk I敇:BsCOG k4' d O`PnR >5>q<;s}Q`VJBSRSR[ H w %9Ǵ.<;$P@9Oa7ӒT{pbg' Hvn&I]s& 9j6& ]?wnĵDLh5Gzr|jc1S{e<\{t.L rjT]t*b0 FªFWr]wC>F`. ˕4Tɂ hD{<K`}$B,̔bJEhu"])Od spX==9ArIp'K2 ,I7%p"6f3 9/DGTc8fUcҡTX\Ь /݇Lpy#3L3 ;t6zth4OC 9sE@R}| 7t.j^FPȩ;C踊΅l8YLVҔ<;ӯU*ET< /!𿖱/K|0y AλI9$`4"0I "rRD֛4qz61i1Q`yם 4Diu*::;;(!36 _h cT1.AQܝˤJFP4PI csKsz$Qi m0>>uPf-,94{6#\0hLK3&foߢE4wn -y]3XɆ49 #"9u@A% q,XHrB Ҋjܖd7>88828f4y'ɗjK/c~SECL^){apN'oD XWe.kFNS{|W#=`n2Eyhj} )?$hw 䬈rivM@a&ݶ@AZ);9rM+=tZ!Wl1yng_mP"۬W՞V^bg/ZcUDN'Iyg|&*f.%Y$! As@PtϝX,F顮DF Ojy\ ٧?۵kׯܸr 6T<3<OS?|v'-I#GBX,q,>0Q E9c]{#ީ<]0Xi.C{ $ &Xl[ep,G`ƠILyGLWr|Ԩ_o:_vƇ;^\\|э_;}=[ڂ +;H߻Ok5R3ȓNqfr]_=Dפ$EzJbOA$)ͮo/V6t#W_\V~YZ}uzFI_ W?_\dy߹ׯYzFsvN|j .@lh;>:v ZIt<`[* $$j49vݨ|+̆]-_Q4.+FζyDm^ɶꏗ:lŠ[FX4&~hdw_*Kbe[I ^Xrd14wH 5ugϥʫiY3 %QimysAsaOfZK5HYZ))+d$rR,f4(}vz`y^(vr| zA㭵 x<,MȤ9yzcBRϓxjG1D:sw)I w8paleq% hy{qnOiEaТs t@ʦN8LE:#/o.q T YҲ+))&&ɞcX 0~f3CmhIfhURG\ Dq(mjcJ s0!М 9&}R+]sDVO+]Gj򆼼ԅrpϞp0[@e֯=#7;q<ұ+WQ^~d#qtFN@<<^ˌn|n-+P;( ],iD&{(~^0bMvntފi=#Ms:^г#:n]#v06/3060 " ˢ"7pnF=5&16ѸF̢4iiK&6msߛa@0c޻{le+;{&Z»#?O^Y>? =H~[ 2Mk2 %lkF1՛PVK.mAe9l5aڣczR^+cJR2sb W1GO+6-_ghͅ*_X S/698&r(DCè=D'7X+C LJ'"lQ(1g_ݞPV3UQN;d-),)|6y&o-MM+LlA [j~yϠj>O^ b #xgu 4!,iZc@xF@a=D'ZV |㩚Mp IKmh0 R POCz^s\\s^zgURk~NVR7FG'$DG% &%NKdOD&UDkU,-X;c Ps=w,Cqo;%wC.ǵ0ՔG+Z|4SL-Z]M.)2`]yc'\ >&bxz"&!wXlBX=K"2rȤ82ki4[%gbHRL<.ʸzIDJ(A+zELVH*%(v=~N@h=Ê'$QAO3&Ē6qXzLi؇BOk!;q_3@KttsjĄ|&f' <+ &o4 YY*?LDX;\=%_%h_"+MJۉgx'1'S֏E?l$Ylb<#ºS/k9y;7By 4JMVfZe%vl#6vF<`K yVjA.{0}A&Ԁێl+5}wf&3%ƥaArb9K#sБ&LyVȷV\Wq2^=!TU 泥 =çE{xh edY)#2= )hom?AyÈ?u Np8!;`ԑU [l&mM|a1rPRBwy.l.=Sg(DNơ;)Dyo&((X09- is/>9uƚWZ?Ww-Z|G_<^@'9ӧ?%^>n_nj}ƟGb4>y^c~=a[ԛ}E ]yՊ+o-;WgnLx=ټ!PHKs4f_S1,i)s|O | gIqꢁU[V,]ޖޜ8:dŦZ:yK vϋ/3sN]kxSYyMh~im&>`/QXk,Onŋ:cEߓڐt6G:now ؀#9XP28`brȿ.cٹٯlnê9 bmOcjN/8\uuS^XS<+.ʃ 𛇚D;2I+xDzDdw\.=;=85vsa`NޠsU{Zzw0n{`,>*ޡ(omDXϡJ8I`J3X1?hX( |*Ɵ:OJwo<D>KtǴ_YHlm~lB>C5cX3raG׆ڲ$ŽLbcRLѱid] cp)*&$Ȫ|2;6Zh'pk{`0[Ȁ O27`0I\Ҷe5i;ZLͩhj2ƅOJtwyCoܔDh?^9Gʍ^Qa L~%AAX b,bAJf,[g JG<Αؐ&bp'+icC] }r z*91DDkORP@KyBTMϮN^5{Jlj/)߷*7vi ]FI`ӞJ`ϑĤœirRhCePd(`9:Ѷf}6"`o$r)\@ ~(U ;B)r2 3+mCnA8Rs.4#P59A- 1k*0,iB }JzwT;Ctw L&@>ʽ@5ه. VvDejiDNʼn B_~{?]_cNf>"L$Vy̼Dk*a۫Q v` K)J YΜ'k̗8Yf܏d~p7nNuHa 2D07B5p5>s<qZwuq5G;[G$`t|3cC1= +[c^I]ǜ螤&\Īy1їQ=ܜ]'i\euk73|a`eaX99Aų+$#׸hf9D\ ^Α^Ycuh9~vɵ. jty{;#7sKq2İj'd%oxl:K`}ྦl-ytW gd. %ԙ-*uf"wn%2$4U")&h&<չ94-r/ 6.!\)UFꡤBc`FK/^5ϕMy\Kib9K*/"pwgJ妜*>/!IrZ{(r AH8hOb}ꢫՕd4j֨zkOַo9PP,yw奰=Jub((e ܃Ao◺?)j${V_/F[_?~/qo'kwu('ؽHLt @j)TjZڨ.T^<…*_\e/_|e"[{Ã8id+,L2fޏ& &HuslK+ L{I$\-Uб&doS *z RNqY +:f:PN"!VGzļtKM_}祐>#gs{}hn\w3?rSn4KKbe_8rΐ(P U-Qy`=NXS-Gl]Ýak >d@Ӂ.3MO>jO)ouZO{՟vi;ސnƒ;7T愑NVX9axӎ=`2RKZua^|01 MHBc=`tq]in(Znΐlص  v]]jWEv!\i}]hlRܿqwbjjҞSyC{{y_AŰ A*$&{ApsP; 8d[S sJ1Mi&9P"5M+Jx}>U{nwK v۵f7+j˗;.mxzRC;l\`jWϵ?xܿ-+2j ":vAEs10lI#Cb(6@nG!1 tu PPElrxkx(%*CPK\tEёubk2ڡT-)=xÞi"v㛭we?Q(AV@H ~CJ6K$uXY).0T$R:ޚ"b+(rG#!?_E` Ʋd7 B(/tl/S+%{g-[)ab81J˳8V픐.T # !Kz]eʌхG뾒IJ(}1m)QT9H2yη 0b40,eYR&:8|k.^.n66g0lLFcJ2?iѩش/*d/y *Q9R8T ;a2_e2ÇÇ md>b pR1@G(Iz|W ͼB#_gsnŕY>.cA0=]>h9y*y$ $+( *i|ީ"(0 UB&{#HvYz+Dg);sk'9+"dDH_/Eť(./_R/_BSgA0[Ka`h"TH&lJ%ԺY`Q\aV a RQRcW f5q款>pZY>'ur77<%0/n_m*-\V4mSIɦiE{[[ o&`(M-~k-Z؊l{bfO, ˢ"! V9y8~?t-G +@hcIh^cvJ! Ki5%K1d`eȪOК)_a*zti[ %t?ջ$7n3Bi~xG60N@w3d[*Yp".pYD߱^olއe!#4ʊ ٢!0FRa,7&v`bm1vc^i)t*Gyr*rʧfhC!PǗUIԒwhP@{7 ZXBhn@F6|NH,>f)i43R4<HmmFKnRp6MmM3"nuyo5?5e3no}|N4eʳ= z$dL5i]Ys//-۸(i_Iie+W9<̏\TLC\W^~tUvH^Ľt7{.o1.Lt:"'9LSmvHg -[AXBiLuj$ I%v+)lzP#%P#c[ T#CP)%)AwupS,8TсLe,E`8=)z5~5~Pֵ+V=sr9R_<8PyO?Q]]7on}'7?C7~| _?~U?k{{FPfHSJq .n{J@Kв)dr2I.K.L UKHO0b"#C !7J#5=`[rF".X'3=T{OH:8s AmpLdʞtyɜO䇈,ɻ˱aA/baI)Q&ZT%)+ 0 8)Q-Fhl J JU4{G;m@ފ][|2+k>Z~];[ok(ڶqӕWsV%$ԛʲš,-TYsm:?.%kzzhC˪iQvEfʭ)+27SєKೖJM>V>.Ԍq0FP H,XnG4|x+3+3둬ř\Y83sqv'ߘXXht!3!h/߈5Vlprw-X-gΎ#Ab9` X8b]d<2JȨ]؀:d`2j7)s_+,;GVP9QU݌ՠ|'B={8=YFk ,ٷ|FBF#?F(cye1z@P#cHTos)o}7IKeS޾`)=x84 6J+  5j`IZ*'kUf5#25}O#Ƕmey:DRU $.^IR!# lLedt>c]bbu||ubb%+*&'''dtD=3!`]`X⊄SprX`f BR{DcCVh VZ^it+m$6td20_߰P?߰X.(CM4T4ֽjH Ձgxګ$Hj)d2Z'd'+ $$TBV;1`88רDH!2XN7|j,`4E @%P #G\$){ y^)|mHxAa ^tec@.K3ۿP}&)>jx:aEXO`'m/J $ĩ+1:@l:eH@1! +@=asV@JDZ n> !_%xٽ!\S ;ӬwGJeAԵqvI#Q]>&B:]3/寋ns3s!c qd h7Q#jE(B`Baԇވw-*0)5TZ.#.xOH파g}|C1L3C&8U\] o{BaC;5 b: (t1 S$^ uTSwJu$%:2PI* |'b3#.9Eq woKF7zc4Fa qaeL! }2)>&*~ H]h:w}4`9^Lx>\XPea+^~Mm YJ,'v6^-byrdA~]z_I@; 9qHw"ܥ A'q pzn.. 6k8Qc{PB~a[Amy=ԡH 'Q_2՗'nC`#Lj&umAF7p/m Ą PU3^rSN) > wK7>q7s;9\"/ݧ% \qx.M =yx\e-B=hx>dNU*P!VUt ?r-bӔc#@\N脎{^.~V-!6ӺCv ylq2Nv~3NS@#=N}}K\.51Kk3qBtP+ϕyoQcםqthM= ^wuݶt$ݮ*7UXF[{+[K5{W"2rUuܶ@ԞiVmjݢpwi_x#GWM :wB7?k~%7>xmxe@r Ni4.%(iITQQ[P{p{Op=z~o+|P/D+õ= hH][C#'4{a(sx/`o`P6//4܈`kC=}Ua|؃=YF?YJV! f!ÑC.8N`61 1Ec1M #p$`5͘o0͘YX81`|{p{7/q3n-%s+܆r;<rw`ĝ wnݹ^ʽY}`-4h a?b1ڌsG8c8XUlN4N ăyL[d+Slc;pb1Nf]9s9GHŅ

Ǹ >ɧ4||/:|oG;/EO\ ?6Ne@l 0F/܊[Џj\ 6܎;p'#~O)?ñ8g##c.÷30_`gTe8K븞_+~o-=-7?g_o},/2v.]Ⱥe_Aq& e|)kWR׶贤FFF^ScFCHzl+Qlyq:ՑDT/ŎljZiͤ )'Hn{!1GVE \,l^.؊~K#;Lfل^)TnZKJ؛Vbo1RQ5h6vlTDsh1c5qwRS[SwMݚ6U79لDvnt+4&jLR%^Ѭ՚K*j^Q5Tiu elg:Q=x@X+ z[f\| ) CaXZ¨0&o-o-o-vD6 k!4uzaH ݠ% cB[(k!k!~FX(k)>o)}SM7E}Ser_nq$6JbrAi+$mbAL ENf ԜTNL{ՉLNS,wxtJ7]H OԽ-EZKRޒRvsŎ OZTiq/ȓWy**QYx`!  !a0N B ^HBºmؓw:ȈR=Pyd/.{+I!;.eZq0.}ȵؚzII5縩BvkbT%OgRdRMPiER$KfΒ eLd&Y2BKfDuFKf%ђYhЯq}[,%QUmOgrޥx% @pG,|x!A`g`$X–Wy=uFWIxyy xڥX t{}eDҡNeDѩZWҙZ4ej|+5NI(5QmQ%"D zvr><(.M{1@9 pQ5Z5JbT"HLZUދɤ*Q"+5Y{ddv9 *FuQ},u8 5 .z~ 1 Ƽm9iJmMɶhm]`gly6ۅIaOċw[wSwo$n$䞑!nK2W&S.>*B 6.VSmQO*Fg`< ")pz֝@gѝTwVstQ# D)c ςDjv]kInN㴌XOAW2KIK&bGMݧ9|N"m\ZltnG]x5:xY,I_9ޅ\w P Rte5r5D D +TfaTsu0ޭct$}|rPm+S]$,HS>G U\DeDYz,Z<&"rs``г@vcq"`n K[t8^jKH"p析^5DC^E$^F7Ir6+с2ͭn 2YNG'֣щ v6H;5vsn'@ Ms _{{(bG2XAA_/uDh̹FGQ}7t:3Ї#l5n,X Z`ق5w;ůtzF)lϠK%E#ND߁;}{ݽWiC5 |+ѕ];_ՅLg9& cJ,Tb|ReJ[,!{`m>~υ-x%@_ҍ{Oxc=hp> 5cb%!eAد#CJPk)j-eTmwaϨm3i؝jC+;>xC ౕ b"n(B=RJ\{AEjXQQ.d>V{VƫI4_Dc`5w*z4ݍ_SQQKyhXJ7<ØbNT#%LzCA«H ]!8TϟrA?^?ov6Df.cVQ<5@@zCE:uJw@1z8ᒢ{un&tC hCH'I=U?g{Q׋hyz^&Eo[ׇQh'9}I_ Dz;mԚi-t1=M*f3OBK7&Lmf9f&,2o֙ p=9s6M)1]6JoB40{mbCk7Dv)35gfѾvl{f;ε/[hWrh[v}ZeC>'^Q .LXذax~vNf͙|n{ofz[r|0.l>gᛵ9w~.|^=#ϴqLjצH5ӾǃDzN^dAH"8.d&Ll"d"Cs%>xK2dy^#*?~BD2?9B}"n[vN"]tOI3tD3o#sx^|Noz@}\}Y_3(=ĘM;IoV&$N&6@fflYU%,6@_nVF캍Ĝ0gLbʭmmk{mmc"In{>1;!#mm'ةwlo;K]AYc߮"a{Ԟ"%{50 #~8lFio; ۇa*}azfHv83 pW??<=*D}󖅃1%>(꣨ץ~}/NUwm#-ų韅C/0>/۫Gu=&~[?G0f=Drv4shZgmQm~S/bMw?λ2ym sf29|ԟc`-#[břq\iQ[Ѿ2#_z]rF`T ƏOAfZ/I=ygQnRѾ5M]| ^s){(dRȜI3ޣek2q{*ciw"Ȑ>p iNm~լԓ#M6ood&?T郯_`"2_S:MfH,|Y)ke] T&Iǹ2; Z ;j mjTEH]nX\U_Zki+?DnJ:/F}d4L—8d4T~l~Y:mkosi-26/docs/index.md000066400000000000000000000014151512054777600150250ustar00rootroot00000000000000--- layout: default SPDX-License-Identifier: LGPL-2.1-or-later --- # mkosi — Build Bespoke OS Images A fancy wrapper around `dnf --installroot`, `apt`, `pacman` and `zypper` that generates customized disk images with a number of bells and whistles. --- {% assign tutorials = site.pages | group_by:"category" %} {% assign project = site.data.project_pages | group_by:"category" %} {% assign documentation = site.data.documentation_page | group_by:"category" %} {% assign merged = documentation | concat: tutorials | concat: project %} {% for pair in merged %} {% if pair.name != "" %} ## {{ pair.name }} {% assign sorted = pair.items | sort:"title" %}{% for page in sorted %} * [{{ page.title }}]({{ page.url | relative_url }}){% endfor %} {% endif %} {% endfor %} --- mkosi-26/docs/initrd.md000066400000000000000000000017531512054777600152140ustar00rootroot00000000000000--- title: Building a custom initrd and using it in a mkosi image category: Tutorials layout: default SPDX-License-Identifier: LGPL-2.1-or-later --- # Building a custom initrd and using it in a mkosi image Building an image with a mkosi-built initrd is a two step process, because you will build two images - the initrd and your distribution image. 1. Build an initrd image using the `cpio` output format with the same target distributions as you want to use for your distribution image. mkosi compresses the `cpio` output format by default. ```ini [Output] Format=cpio [Content] MakeInitrd=yes Packages=systemd udev kmod ``` 2. Invoke `mkosi` passing the initrd image via the `--initrd` option or add the `Initrd=` option to your mkosi config when building your distribution image. ```bash mkosi --initrd= ... ``` This will build an image using the provided initrd image. mkosi will add the kernel modules found in the distribution image to this initrd. mkosi-26/docs/root-verity.md000066400000000000000000000101001512054777600162100ustar00rootroot00000000000000# Operating disk images with verity protected root partition First of all, to build a disk image with a verity protected root partition, put the following in mkosi.repart: ```ini # mkosi.repart/00-esp.conf [Partition] Type=esp Format=vfat CopyFiles=/efi:/ CopyFiles=/boot:/ SizeMinBytes=1G SizeMaxBytes=1G # mkosi.repart/10-root-verity-sig.conf [Partition] Type=root-verity-sig Label=%M_%A_verity_sig Verity=signature VerityMatchKey=root SplitName=%t.%U # mkosi.repart/11-root-verity.conf [Partition] Type=root-verity Label=%M_%A_verity Verity=hash VerityMatchKey=root SizeMinBytes=300M SizeMaxBytes=300M SplitName=%t.%U # mkosi.repart/12-root.conf [Partition] Type=root Format=erofs Label=%M_%A_root Verity=data VerityMatchKey=root CopyFiles=/ ExcludeFilesTarget=/var/ Minimize=yes SplitName=%t.%U ``` Then, you'll need a dropin for systemd-repart in the initrd to make sure it runs after the root partition has been mounted, so let's create an initrd with `mkosi.images` where we customize systemd-repart to behave like this: ```ini # mkosi.images/initrd/mkosi.conf [Include] Include=mkosi-initrd # mkosi.images/initrd/mkosi.extra/usr/lib/systemd/system/systemd-repart.service.d/sysroot.conf [Unit] After=sysroot.mount ConditionDirectoryNotEmpty=|/sysroot/usr/lib/repart.d ``` To use the initrd in the top level image, add the following to mkosi.conf: ```ini [Content] Initrds=%O/initrd ``` Finally, we'll need some partition definitions in the image itself to create an A/B update setup and an encrypted `/var`. This includes the definitions from mkosi.repart in a reduced form solely for matching the existing partitions: ```ini # mkosi.extra/usr/lib/repart.d/00-esp.conf [Partition] Type=esp # mkosi.extra/usr/lib/repart.d/10-root-verity-sig.conf [Partition] Type=root-verity-sig Label=%M_%A_verity_sig # mkosi.extra/usr/lib/repart.d/11-root-verity.conf [Partition] Type=root-verity Label=%M_%A_verity # mkosi.extra/usr/lib/repart.d/12-root.conf [Partition] Type=root Label=%M_%A SizeMinBytes=2G SizeMaxBytes=2G # mkosi.extra/usr/lib/repart.d/20-root-verity-sig.conf [Partition] Type=root-verity-sig Label=_empty # mkosi.extra/usr/lib/repart.d/21-root-verity.conf [Partition] Type=root-verity Label=_empty SizeMinBytes=300M SizeMaxBytes=300M # mkosi.extra/usr/lib/repart.d/22-root.conf [Partition] Type=root Label=_empty SizeMinBytes=2G SizeMaxBytes=2G # mkosi.extra/usr/lib/repart.d/30-swap.conf [Partition] Type=swap Format=swap Encrypt=tpm2 SizeMinBytes=4G SizeMaxBytes=4G # mkosi.extra/usr/lib/repart.d/40-var.conf [Partition] Type=var Format=ext4 Encrypt=tpm2 SizeMinBytes=2G ``` Because in this setup `/etc` is immutable, we have to embed the machine ID in the image itself at build time so let's generate a machine ID and persist it by running `systemd-id128 new >mkosi.machine-id`. The machine ID is required as it is embedded in the `/var` partition UUID and systemd will refuse to mount a `/var` partition without the machine ID embedded in its UUID. You'll then also need some `systemd-sysupdate` definitions in `/usr/lib/sysupdate.d` which describe how to update the image. These will differ depending on how the image is updated but we list some example definitions here. These are all missing a `[Source]` section whose contents will depend on how updates are deployed: ```ini # /usr/lib/sysupdate.d/10-root-verity-sig.conf [Transfer] ProtectVersion=%A [Target] Type=partition Path=auto MatchPattern=%M_@v_verity_sig MatchPartitionType=root-verity-sig PartitionFlags=0 ReadOnly=1 # /usr/lib/sysupdate.d/11-root-verity.conf [Transfer] ProtectVersion=%A [Target] Type=partition Path=auto MatchPattern=%M_@v_verity MatchPartitionType=root-verity PartitionFlags=0 ReadOnly=1 # /usr/lib/sysupdate.d/12-root.conf [Transfer] ProtectVersion=%A [Target] Type=partition Path=auto MatchPattern=ParticleOS_@v MatchPartitionType=root PartitionFlags=0 ReadOnly=1 # /usr/lib/sysupdate.d/20-uki.conf [Transfer] ProtectVersion=%A [Target] Type=regular-file Path=/EFI/Linux PathRelativeTo=boot MatchPattern=%M_@v+@l-@d.efi \ %M_@v+@l.efi \ %M_@v.efi Mode=0444 TriesLeft=3 TriesDone=0 InstancesMax=2 ``` mkosi-26/docs/style.css000066400000000000000000000221421512054777600152460ustar00rootroot00000000000000/* SPDX-License-Identifier: LGPL-2.1-or-later */ @font-face { font-family: 'Heebo'; src: url('fonts/heebo-regular.woff'); font-weight: 400; } @font-face { font-family: 'Heebo'; src: url('fonts/heebo-bold.woff'); font-weight: 600; } /* Variables */ :root { --sd-brand-black: hsl(270, 19%, 13%); /* #201A26; */ --sd-brand-green: hsl(145, 66%, 51%); /* #30D475; */ --sd-brand-white: #fff; --sd-black: hsl(270, 7%, 13%); --sd-green: hsl(145, 66%, 43%); /* #26b763 */ --sd-gray-extralight: hsl(30, 10%, 96%); /* #f6f5f4 */ --sd-gray-light: hsl(30, 10%, 92%); --sd-gray: hsl(30, 10%, 85%); --sd-gray-dark: hsl(257, 23%, 20%); --sd-gray-extradark: hsl(257, 23%, 16%); /* #241f31 */ --sd-blue: hsl(200, 66%, 55%); --sd-highlight-bg-light: rgba(255, 255, 255, 1); --sd-highlight-bg-dark: rgba(0, 0, 0, .6); --sd-highlight-inline-bg-light: rgba(0, 0, 0, 0.07); --sd-highlight-inline-bg-dark: rgba(255, 255, 255, 0.1); --sd-font-weight-normal: 400; --sd-font-weight-bold: 600; /* Light mode variables */ --sd-foreground-color: var(--sd-gray-extradark); --sd-background-color: var(--sd-gray-extralight); --sd-logo-color: var(--sd-brand-black); --sd-link-color: var(--sd-green); --sd-small-color: var(--sd-gray-dark); --sd-highlight-bg: var(--sd-highlight-bg-light); --sd-highlight-inline-bg: var(--sd-highlight-inline-bg-light); --sd-link-font-weight: var(--sd-font-weight-bold); --sd-table-row-bg: var(--sd-highlight-inline-bg-light); --sd-table-row-hover-bg: var(--sd-gray); } @media (prefers-color-scheme: dark) { :root { color-scheme: dark; --sd-foreground-color: var(--sd-gray); --sd-background-color: var(--sd-black); --sd-logo-color: var(--sd-brand-white); --sd-link-color: var(--sd-brand-green); --sd-small-color: var(--sd-gray); --sd-highlight-bg: var(--sd-highlight-bg-dark); --sd-highlight-inline-bg: var(--sd-highlight-inline-bg-dark); --sd-link-font-weight: var(--sd-font-weight-normal); --sd-table-row-bg: var(--sd-highlight-inline-bg-dark); --sd-table-row-hover-bg: var(--sd-highlight-bg-dark); } } /* Typography */ * { -moz-box-sizing: border-box; -webkit-box-sizing: border-box; box-sizing: border-box; } html, body { margin: 0; padding: 0; font-size: 1rem; font-family: "Heebo", sans-serif; font-weight: 400; line-height: 1.6; } body { color: var(--sd-foreground-color); background-color: var(--sd-background-color); } h1, h2, h3, h4, h5, h6 { margin: 1rem 0 0.625rem; font-weight: 600; line-height: 1.25; } h1 { text-align: center; font-size: 1.87rem; font-weight: 400; font-style: normal; margin-bottom: 2rem; } @media screen and (min-width: 650px) { img { margin-left: 10%; margin-right: 10%; } h1 { font-size: 2.375em; } } h2 { font-size: 1.25rem; margin-top: 2.5em; } h3 { font-size: 1.15rem; } a { font-weight: var(--sd-link-font-weight); text-decoration: none; color: var(--sd-link-color); cursor: pointer; } a:hover { text-decoration: underline; } b { font-weight: 600; } small { color: var(--sd-small-color); } hr { margin: 3rem auto 4rem; width: 40%; opacity: 40%; } /* Layout */ .container { width: 80%; margin-left: auto; margin-right: auto; max-width: 720px; } /* Singletons */ .page-logo { display: block; padding: 5rem 0 3rem; color: var(--sd-logo-color); } .page-logo > svg { display: block; width: 12.625em; height: auto; margin: 0 auto; } .color-green { color: var(--sd-brand-green); } .color-blue { color: var(--sd-blue); } .page-link::after { content: " ➜"; } /* Footer */ footer { text-align: center; padding: 3em 0 3em; font-size: 1em; margin-top: 4rem; } @media (prefers-color-scheme: light) { .highlight .cm { color: #999988; font-style: italic; } .highlight .cp { color: #999999; font-weight: bold; } .highlight .c1 { color: #999988; font-style: italic; } .highlight .cs { color: #999999; font-weight: bold; font-style: italic; } .highlight .c, .highlight .ch, .highlight .cd, .highlight .cpf { color: #999988; font-style: italic; } .highlight .err { color: #a61717; background-color: #e3d2d2; } .highlight .gd { color: #000000; background-color: #ffdddd; } .highlight .ge { color: #000000; font-style: italic; } .highlight .gr { color: #aa0000; } .highlight .gh { color: #999999; } .highlight .gi { color: #000000; background-color: #ddffdd; } .highlight .go { color: #888888; } .highlight .gp { color: #555555; } .highlight .gs { font-weight: bold; } .highlight .gu { color: #aaaaaa; } .highlight .gt { color: #aa0000; } .highlight .kc { color: #000000; font-weight: bold; } .highlight .kd { color: #000000; font-weight: bold; } .highlight .kn { color: #000000; font-weight: bold; } .highlight .kp { color: #000000; font-weight: bold; } .highlight .kr { color: #000000; font-weight: bold; } .highlight .kt { color: #445588; font-weight: bold; } .highlight .k, .highlight .kv { color: #000000; font-weight: bold; } .highlight .mf { color: #009999; } .highlight .mh { color: #009999; } .highlight .il { color: #009999; } .highlight .mi { color: #009999; } .highlight .mo { color: #009999; } .highlight .m, .highlight .mb, .highlight .mx { color: #009999; } .highlight .sa { color: #000000; font-weight: bold; } .highlight .sb { color: #d14; } .highlight .sc { color: #d14; } .highlight .sd { color: #d14; } .highlight .s2 { color: #d14; } .highlight .se { color: #d14; } .highlight .sh { color: #d14; } .highlight .si { color: #d14; } .highlight .sx { color: #d14; } .highlight .sr { color: #009926; } .highlight .s1 { color: #d14; } .highlight .ss { color: #990073; } .highlight .s, .highlight .dl { color: #d14; } .highlight .na { color: #008080; } .highlight .bp { color: #999999; } .highlight .nb { color: #0086B3; } .highlight .nc { color: #445588; font-weight: bold; } .highlight .no { color: #008080; } .highlight .nd { color: #3c5d5d; font-weight: bold; } .highlight .ni { color: #800080; } .highlight .ne { color: #990000; font-weight: bold; } .highlight .nf, .highlight .fm { color: #990000; font-weight: bold; } .highlight .nl { color: #990000; font-weight: bold; } .highlight .nn { color: #555555; } .highlight .nt { color: #000080; } .highlight .vc { color: #008080; } .highlight .vg { color: #008080; } .highlight .vi { color: #008080; } .highlight .nv, .highlight .vm { color: #008080; } .highlight .ow { color: #000000; font-weight: bold; } .highlight .o { color: #000000; font-weight: bold; } .highlight .w { color: #bbbbbb; } } @media (prefers-color-scheme: dark) { /* rouge "base16.dark" code highlight */ /* generated with: rougify style base16.dark | sed '/background-color: #151515/d' */ .highlight, .highlight .w { color: #d0d0d0; } .highlight .err { color: #151515; background-color: #ac4142; } .highlight .c, .highlight .ch, .highlight .cd, .highlight .cm, .highlight .cpf, .highlight .c1, .highlight .cs { color: #505050; } .highlight .cp { color: #f4bf75; } .highlight .nt { color: #f4bf75; } .highlight .o, .highlight .ow { color: #d0d0d0; } .highlight .p, .highlight .pi { color: #d0d0d0; } .highlight .gi { color: #90a959; } .highlight .gd { color: #ac4142; } .highlight .gh { color: #6a9fb5; font-weight: bold; } .highlight .k, .highlight .kn, .highlight .kp, .highlight .kr, .highlight .kv { color: #aa759f; } .highlight .kc { color: #d28445; } .highlight .kt { color: #d28445; } .highlight .kd { color: #d28445; } .highlight .s, .highlight .sb, .highlight .sc, .highlight .dl, .highlight .sd, .highlight .s2, .highlight .sh, .highlight .sx, .highlight .s1 { color: #90a959; } .highlight .sa { color: #aa759f; } .highlight .sr { color: #75b5aa; } .highlight .si { color: #8f5536; } .highlight .se { color: #8f5536; } .highlight .nn { color: #f4bf75; } .highlight .nc { color: #f4bf75; } .highlight .no { color: #f4bf75; } .highlight .na { color: #6a9fb5; } .highlight .m, .highlight .mb, .highlight .mf, .highlight .mh, .highlight .mi, .highlight .il, .highlight .mo, .highlight .mx { color: #90a959; } .highlight .ss { color: #90a959; } } /* Code Blocks */ .highlighter-rouge { padding: 2px 1rem; border-radius: 5px; color: var(--sd-foreground-color); background-color: var(--sd-highlight-bg); overflow: auto; } .highlighter-rouge .highlight .err { background: transparent !important; color: inherit !important; } /* Inline Code */ code.highlighter-rouge { padding: 2px 6px; background-color: var(--sd-highlight-inline-bg); } a code.highlighter-rouge { color: inherit; } mkosi-26/docs/sysext.md000066400000000000000000000053311512054777600152560ustar00rootroot00000000000000--- title: Building system extensions with mkosi category: Tutorials layout: default SPDX-License-Identifier: LGPL-2.1-or-later --- # Building system extensions with mkosi [System extension](https://uapi-group.org/specifications/specs/extension_image/) images may – dynamically at runtime — extend the base system with an overlay containing additional files. To build system extensions with mkosi, we first have to create a base image on top of which we can build our extension. To keep things manageable, we'll use mkosi's support for building multiple images so that we can build our base image and system extension in one go. Start by creating a temporary directory with a base configuration file `mkosi.conf` with some shared settings: ```ini [Output] OutputDirectory=mkosi.output CacheDirectory=mkosi.cache ``` From now on we'll assume all steps are executed inside the temporary directory. Now let's continue with the base image definition by writing the following to `mkosi.images/base/mkosi.conf`: ```ini [Output] Format=directory [Content] CleanPackageMetadata=no Packages=systemd udev ``` We use the `directory` output format here instead of the `disk` output so that we can build our extension without needing root privileges. Now that we have our base image, we can define a sysext that builds on top of it by writing the following to `mkosi.images/btrfs/mkosi.conf`: ```ini [Config] Dependencies=base [Output] Format=sysext Overlay=yes [Content] BaseTrees=%O/base Packages=btrfs-progs ``` `BaseTrees=` points to our base image and `Overlay=yes` instructs mkosi to only package the files added on top of the base tree. We can't sign the extension image without a key, so let's generate one with `mkosi genkey` (or write your own private key and certificate yourself to `mkosi.key` and `mkosi.crt` respectively). Note that this key will need to be loaded into your kernel keyring either at build time or via MOK for systemd to accept the system extension at runtime as trusted. Finally, you can build the base image and the extension by running `mkosi -f`. You'll find `btrfs.raw` in `mkosi.output` which is the extension image. You'll also find the main image `image.raw` there but it will be almost empty. What we can do now is package up the base image as the main image, but in another format, for example an initrd, we can do that by adding the following to `mkosi.conf`: ```ini [Output] Format=cpio Output=initrd [Content] MakeInitrd=yes BaseTrees=%O/base ``` If we now run `mkosi -f` again, we'll find `initrd.cpio.zst` in `mkosi.output` with its accompanying extension still in `btrfs.raw`. If you don't have any need for a main image, you can configure `Format=none` in the `Output` section in `mkosi.conf` to disable it. mkosi-26/kernel-install/000077500000000000000000000000001512054777600153675ustar00rootroot00000000000000mkosi-26/kernel-install/50-mkosi.install000077500000000000000000000061341512054777600203320ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-License-Identifier: LGPL-2.1-or-later import logging import os import sys import tempfile from pathlib import Path from typing import Optional from mkosi import identify_cpu from mkosi.archive import make_cpio from mkosi.config import OutputFormat from mkosi.initrd import KernelInstallContext from mkosi.log import log_setup from mkosi.run import run, uncaught_exception_handler from mkosi.sandbox import umask from mkosi.util import PathString def we_are_wanted(context: KernelInstallContext) -> bool: return context.uki_generator == "mkosi" or context.initrd_generator in ("mkosi", "mkosi-initrd") def build_microcode_initrd(output: Path) -> Optional[Path]: vendor, ucode = identify_cpu(Path("/")) if vendor is None: logging.warning("Unable to determine the vendor of your CPU, not adding microcode") return None if ucode is None: logging.warning("Unable to find microcode for your CPU in /usr/lib/firmware, not adding microcode") return None with tempfile.TemporaryDirectory() as tmp: root = Path(tmp) / "initrd-microcode-root" destdir = root / "kernel/x86/microcode" with umask(~0o755): destdir.mkdir(parents=True, exist_ok=True) with (destdir / f"{vendor}.bin").open("wb") as f: f.write(ucode.read_bytes()) make_cpio(root, output) return output @uncaught_exception_handler() def main() -> None: context = KernelInstallContext.parse( name="50-mkosi.install", description="kernel-install plugin to build initrds or Unified Kernel Images using mkosi", ) log_setup(default_log_level="info" if context.verbose else "warning") if context.command != "add" or not we_are_wanted(context): logging.info("mkosi-initrd is not enabled, skipping") return # If kernel-install was passed a UKI, there's no need to build anything ourselves. if context.image_type == "uki": logging.info("Provided kernel image is already a unified kernel image, skipping mkosi-initrd") return # If the initrd was provided on the kernel command line, we shouldn't generate our own. if context.layout != "uki" and context.initrds: logging.info("Pre-built initrds were provided, skipping mkosi-initrd") return if context.layout == "uki" and context.uki_generator == "mkosi": format = OutputFormat.uki else: format = OutputFormat.cpio output = "initrd" if format == OutputFormat.cpio else "uki.efi" cmdline: list[PathString] = [ "mkosi-initrd", "--kernel-version", context.kernel_version, "--format", str(format), "--output", output, "--output-dir", context.staging_area, "--kernel-image", context.kernel_image, ] # fmt: skip if context.verbose: cmdline += ["--debug"] logging.info(f"Building {output}") run(cmdline, stdin=sys.stdin, stdout=sys.stdout, env=os.environ) if format == OutputFormat.cpio: build_microcode_initrd(context.staging_area / "microcode") if __name__ == "__main__": main() mkosi-26/kernel-install/51-mkosi-addon.install000077500000000000000000000027751512054777600214250ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-License-Identifier: LGPL-2.1-or-later import logging import os import sys from pathlib import Path from mkosi.initrd import KernelInstallContext from mkosi.log import log_setup from mkosi.run import run, uncaught_exception_handler from mkosi.util import PathString @uncaught_exception_handler() def main() -> None: context = KernelInstallContext.parse( name="51-mkosi-addon.install", description="kernel-install plugin to build local addon for initrd/cmdline", ) log_setup(default_log_level="info" if context.verbose else "warning") # No local configuration? Then nothing to do if not Path("/etc/mkosi-addon").exists() and not Path("/run/mkosi-addon").exists(): logging.info("No local configuration defined, skipping mkosi-addon") return if context.command != "add" or context.layout != "uki": logging.info("Not an UKI layout 'add' step, skipping mkosi-addon") return if context.image_type != "uki": logging.info("Provided kernel image is not a unified kernel image, skipping mkosi-addon") return cmdline: list[PathString] = [ "mkosi-addon", "--output", "mkosi-local.addon.efi", "--output-dir", context.staging_area / "uki.efi.extra.d", ] # fmt: skip if context.verbose: cmdline += ["--debug"] logging.info("Building mkosi-local.addon.efi") run(cmdline, stdin=sys.stdin, stdout=sys.stdout, env=os.environ) if __name__ == "__main__": main() mkosi-26/mkosi-addon000077700000000000000000000000001512054777600220522mkosi/resources/mkosi-addonustar00rootroot00000000000000mkosi-26/mkosi-initrd000077700000000000000000000000001512054777600224622mkosi/resources/mkosi-initrdustar00rootroot00000000000000mkosi-26/mkosi.conf000066400000000000000000000021751512054777600144410ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Include] Include=mkosi-vm [Build] CacheDirectory=mkosi.cache History=yes [Output] # These images are (among other things) used for running mkosi which means we need some disk space available so # default to directory output where disk space isn't a problem. Format=directory OutputDirectory=mkosi.output [Build] ToolsTree=yes Incremental=yes BuildSources=. [Content] Autologin=yes SELinuxRelabel=no ShimBootloader=unsigned Packages= binutils gdb wireless-regdb InitrdProfiles=lvm RemoveFiles= # The grub install plugin doesn't play nice with booting from virtiofs. /usr/lib/kernel/install.d/20-grub.install # The dracut install plugin doesn't honor KERNEL_INSTALL_INITRD_GENERATOR. /usr/lib/kernel/install.d/50-dracut.install # Make sure that SELinux doesn't run in enforcing mode even if it's pulled in as a dependency. KernelCommandLine= enforcing=0 systemd.log_ratelimit_kmsg=0 systemd.crash_shell printk.devkmsg=on systemd.early_core_pattern=/core KernelInitrdModules=default [Runtime] RAM=4G mkosi-26/mkosi.conf.d/000077500000000000000000000000001512054777600147335ustar00rootroot00000000000000mkosi-26/mkosi.conf.d/arch.conf000066400000000000000000000002261512054777600165170ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=arch [Content] Packages= grub openssh python shim mkosi-26/mkosi.conf.d/azure-centos-fedora/000077500000000000000000000000001512054777600206105ustar00rootroot00000000000000mkosi-26/mkosi.conf.d/azure-centos-fedora/mkosi.conf000066400000000000000000000003371512054777600226040ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|centos Distribution=|alma Distribution=|rocky Distribution=|fedora Distribution=|azure [Content] Packages= openssh-clients openssh-server mkosi-26/mkosi.conf.d/azure-centos-fedora/mkosi.conf.d/000077500000000000000000000000001512054777600231005ustar00rootroot00000000000000mkosi-26/mkosi.conf.d/azure-centos-fedora/mkosi.conf.d/arm64.conf000066400000000000000000000001751512054777600247030ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=arm64 [Content] Packages= grub2-efi-aa64-modules mkosi-26/mkosi.conf.d/azure-centos-fedora/mkosi.conf.d/uefi.conf000066400000000000000000000001741512054777600247010ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=uefi [Content] Packages= grub2-efi shim mkosi-26/mkosi.conf.d/azure-centos-fedora/mkosi.conf.d/x86-64.conf000066400000000000000000000002441512054777600246230ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 [Content] Packages= grub2-efi-x64-modules grub2-pc microcode_ctl mkosi-26/mkosi.conf.d/azure.conf000066400000000000000000000005101512054777600167240ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=azure [Distribution] Release=3.0 Repositories=base-preview [Content] # The unsigned-shim package tries to install to the same location as the signed shim package so we can't install and # test unsigned shim. ShimBootloader=none Packages= kernel-tools mkosi-26/mkosi.conf.d/bootable.conf000066400000000000000000000002371512054777600173730ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Format=|disk Format=|directory [Match] Architecture=|x86-64 Architecture=|arm64 [Content] Bootable=yes mkosi-26/mkosi.conf.d/centos.conf000066400000000000000000000003601512054777600170740ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|centos Distribution=|alma Distribution=|rocky [Distribution] Release=10 Repositories=epel [Content] # CentOS Stream 10 does not ship an unsigned shim ShimBootloader=none mkosi-26/mkosi.conf.d/debian-kali-ubuntu/000077500000000000000000000000001512054777600204135ustar00rootroot00000000000000mkosi-26/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf000066400000000000000000000003061512054777600224030ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|kali Distribution=|ubuntu [Content] Packages= openssh-client openssh-server python3 mkosi-26/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf.d/000077500000000000000000000000001512054777600227035ustar00rootroot00000000000000mkosi-26/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf.d/efi.conf000066400000000000000000000003421512054777600243140ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # Note that this is a subset of 'uefi', there is no shim-signed on riscv/loong/armv7/i386 [Match] Architecture=|arm64 Architecture=|x86-64 [Content] Packages= shim-signed mkosi-26/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf.d/x86-64.conf000066400000000000000000000003131512054777600244230ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 [Content] Packages= amd64-microcode grub-efi grub-efi-amd64 grub-pc-bin intel-microcode mkosi-26/mkosi.conf.d/debian.conf000066400000000000000000000002111512054777600170160ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=debian [Distribution] Release=testing Repositories=non-free-firmware mkosi-26/mkosi.conf.d/fedora/000077500000000000000000000000001512054777600161735ustar00rootroot00000000000000mkosi-26/mkosi.conf.d/fedora/mkosi.conf000066400000000000000000000001521512054777600201620ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=fedora [Distribution] Release=rawhide mkosi-26/mkosi.conf.d/fedora/mkosi.conf.d/000077500000000000000000000000001512054777600204635ustar00rootroot00000000000000mkosi-26/mkosi.conf.d/fedora/mkosi.conf.d/arm64.conf000066400000000000000000000001741512054777600222650ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=arm64 [Content] Packages= shim-unsigned-aarch64 mkosi-26/mkosi.conf.d/fedora/mkosi.conf.d/x86_64.conf000066400000000000000000000002241512054777600222660ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 [Content] Packages= amd-ucode-firmware shim-unsigned-x64 mkosi-26/mkosi.conf.d/kali.conf000066400000000000000000000001671512054777600165260ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=kali [Distribution] Repositories=non-free-firmware mkosi-26/mkosi.conf.d/memory.conf000066400000000000000000000001551512054777600171130ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Format=|esp Format=|uki Format=|cpio [Runtime] RAM=8G mkosi-26/mkosi.conf.d/metadata.conf000066400000000000000000000003601512054777600173610ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # Remove package manager metadata on cpio based output formats # to keep memory usage within reasonable limits. [Match] Format=|uki Format=|esp Format=|cpio [Content] CleanPackageMetadata=yes mkosi-26/mkosi.conf.d/opensuse/000077500000000000000000000000001512054777600165745ustar00rootroot00000000000000mkosi-26/mkosi.conf.d/opensuse/mkosi.conf000066400000000000000000000003761512054777600205730ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=opensuse [Distribution] Release=tumbleweed [Content] # openSUSE does not ship an unsigned shim ShimBootloader=none Packages= openssh-clients openssh-server shim mkosi-26/mkosi.conf.d/opensuse/mkosi.conf.d/000077500000000000000000000000001512054777600210645ustar00rootroot00000000000000mkosi-26/mkosi.conf.d/opensuse/mkosi.conf.d/x86-64.conf000066400000000000000000000003061512054777600226060ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 [Content] Packages= grub2-efi grub2-i386-pc grub2-x86_64-efi ucode-amd ucode-intel mkosi-26/mkosi.conf.d/postmarketos.conf000066400000000000000000000003611512054777600203350ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=postmarketos [Content] Packages= openssh-server openssh-server-pam-systemd # postmarketOS / Alpine Linux does not ship an unsigned shim ShimBootloader=none mkosi-26/mkosi.conf.d/rhel-ubi.conf000066400000000000000000000001751512054777600173140ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=rhel-ubi [Distribution] Release=9 [Content] Bootable=no mkosi-26/mkosi.conf.d/ubuntu.conf000066400000000000000000000001761512054777600171300ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=ubuntu [Distribution] Release=devel Repositories=universe mkosi-26/mkosi.conf.d/x86-64.conf000066400000000000000000000002041512054777600164520ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 Distribution=!postmarketos [Content] BiosBootloader=grub mkosi-26/mkosi.extra/000077500000000000000000000000001512054777600147075ustar00rootroot00000000000000mkosi-26/mkosi.extra/usr/000077500000000000000000000000001512054777600155205ustar00rootroot00000000000000mkosi-26/mkosi.extra/usr/lib/000077500000000000000000000000001512054777600162665ustar00rootroot00000000000000mkosi-26/mkosi.extra/usr/lib/repart.d/000077500000000000000000000000001512054777600200055ustar00rootroot00000000000000mkosi-26/mkosi.extra/usr/lib/repart.d/root.conf000066400000000000000000000001041512054777600216320ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root mkosi-26/mkosi.extra/usr/lib/systemd/000077500000000000000000000000001512054777600177565ustar00rootroot00000000000000mkosi-26/mkosi.extra/usr/lib/systemd/mkosi-check-and-shutdown.sh000077500000000000000000000003511512054777600251220ustar00rootroot00000000000000#!/bin/bash # SPDX-License-Identifier: LGPL-2.1-or-later set -eux systemctl --failed --no-legend | tee /failed-services # Exit with non-zero EC if the /failed-services file is not empty (we have -e set) [[ ! -s /failed-services ]] mkosi-26/mkosi.extra/usr/lib/systemd/system-preset/000077500000000000000000000000001512054777600226025ustar00rootroot00000000000000mkosi-26/mkosi.extra/usr/lib/systemd/system-preset/00-mkosi.preset000066400000000000000000000015421512054777600253670ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # mkosi adds its own ssh units via the --ssh switch so disable the default ones. disable ssh.service disable sshd.service # Make sure dbus-broker is started by default on Debian/Kali/Ubuntu. enable dbus-broker.service # Make sure we have networking available. enable systemd-networkd.service enable systemd-resolved.service # We install dnf in some images but it's only going to be used rarely, # so let's not have dnf create its cache. disable dnf-makecache.* # The rpmdb is already in the right location, don't try to migrate it. disable rpmdb-migrate.service # We have journald to receive audit data so let's make sure we're not running auditd as well disable auditd.service # systemd-timesyncd is not enabled by default in the default systemd preset so enable it here instead. enable systemd-timesyncd.service mkosi-26/mkosi.extra/usr/lib/systemd/system-preset/99-mkosi.preset000066400000000000000000000002161512054777600254060ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # Make sure that services are disabled by default (primarily for Debian/Kali/Ubuntu). disable * mkosi-26/mkosi.extra/usr/lib/systemd/system/000077500000000000000000000000001512054777600213025ustar00rootroot00000000000000mkosi-26/mkosi.extra/usr/lib/systemd/system/mkosi-check-and-shutdown.service000066400000000000000000000005251512054777600274740ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Unit] Description=Check if any service failed and then shut down the machine After=multi-user.target network-online.target Requires=multi-user.target SuccessAction=exit FailureAction=exit SuccessActionExitStatus=123 [Service] Type=oneshot ExecStart=/usr/lib/systemd/mkosi-check-and-shutdown.sh mkosi-26/mkosi.md000077700000000000000000000000001512054777600216142mkosi/resources/man/mkosi.1.mdustar00rootroot00000000000000mkosi-26/mkosi.postinst000077500000000000000000000004761512054777600154040ustar00rootroot00000000000000#!/bin/bash # SPDX-License-Identifier: LGPL-2.1-or-later set -e mkosi-chroot \ useradd \ --user-group \ --create-home \ --password "$(openssl passwd -1 mkosi)" \ --groups systemd-journal \ --shell /bin/bash \ --uid 4711 \ mkosi systemctl --root="$BUILDROOT" mask lvm2-monitor.service mkosi-26/mkosi.profiles/000077500000000000000000000000001512054777600154075ustar00rootroot00000000000000mkosi-26/mkosi.profiles/mkosi-depends/000077500000000000000000000000001512054777600201515ustar00rootroot00000000000000mkosi-26/mkosi.profiles/mkosi-depends/mkosi.conf000066400000000000000000000001111512054777600221330ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Content] Packages=python3 mkosi-26/mkosi.profiles/mkosi-depends/mkosi.prepare000077500000000000000000000003421512054777600226550ustar00rootroot00000000000000#!/bin/bash # SPDX-License-Identifier: LGPL-2.1-or-later set -e if [ "$1" = "build" ]; then exit 0 fi mkosi-chroot "$SRCDIR"/bin/mkosi dependencies -- --profile misc,package-manager,runtime | xargs -d '\n' mkosi-install mkosi-26/mkosi.profiles/rpm/000077500000000000000000000000001512054777600162055ustar00rootroot00000000000000mkosi-26/mkosi.profiles/rpm/mkosi.build.chroot000077500000000000000000000005741512054777600216560ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1-or-later set -ex rpmbuild \ -bb \ --noprep \ --build-in-place \ $([ "$WITH_TESTS" = "0" ] && echo --nocheck) \ --define "_topdir /var/tmp" \ --define "_sourcedir $PWD/rpm" \ --define "_rpmdir $PACKAGEDIR" \ --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" \ rpm/mkosi.spec mkosi-26/mkosi.profiles/rpm/mkosi.conf000066400000000000000000000003251512054777600201760ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=fedora [Build] BuildSources=rpm [Content] Packages= rpm-build rpmautospec VolatilePackages= mkosi mkosi-initrd mkosi-26/mkosi.profiles/rpm/mkosi.prepare000077500000000000000000000020461512054777600207140ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1-or-later set -e mkosi-chroot \ rpmspec \ --query \ --buildrequires \ --define "_topdir /var/tmp" \ --define "_sourcedir rpm" \ rpm/mkosi.spec | sort --unique | tee /tmp/buildrequires | xargs --delimiter '\n' mkosi-install until mkosi-chroot \ rpmbuild \ -bd \ --build-in-place \ --define "_topdir /var/tmp" \ --define "_sourcedir rpm" \ --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" \ rpm/mkosi.spec do EXIT_STATUS=$? if [ $EXIT_STATUS -ne 11 ]; then exit $EXIT_STATUS fi mkosi-chroot \ rpm \ --query \ --package \ --requires \ /var/tmp/SRPMS/mkosi-*.buildreqs.nosrc.rpm | grep --invert-match '^rpmlib(' | sort --unique >/tmp/dynamic-buildrequires sort /tmp/buildrequires /tmp/dynamic-buildrequires | uniq --unique | tee --append /tmp/buildrequires | xargs --delimiter '\n' mkosi-install done mkosi-26/mkosi.tools.conf/000077500000000000000000000000001512054777600156505ustar00rootroot00000000000000mkosi-26/mkosi.tools.conf/mkosi.conf000066400000000000000000000001351512054777600176400ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Content] Packages= gnupg lvm2 mkosi-26/mkosi.tools.conf/mkosi.conf.d/000077500000000000000000000000001512054777600201405ustar00rootroot00000000000000mkosi-26/mkosi.tools.conf/mkosi.conf.d/arch.conf000066400000000000000000000003071512054777600217240ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=arch [Content] Packages= cryptsetup mkinitcpio mypy python-pytest ruff sequoia-sop mkosi-26/mkosi.tools.conf/mkosi.conf.d/azure-centos-fedora.conf000066400000000000000000000003101512054777600246560ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|azure Distribution=|centos Distribution=|fedora [Content] Packages= cryptsetup python3-mypy python3-pytest mkosi-26/mkosi.tools.conf/mkosi.conf.d/debian-kali-ubuntu.conf000066400000000000000000000003361512054777600244710ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|kali Distribution=|ubuntu [Content] Packages= cryptsetup-bin fdisk mypy python3-pytest sqop mkosi-26/mkosi.tools.conf/mkosi.conf.d/fedora.conf000066400000000000000000000002001512054777600222370ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=fedora [Content] Packages= ruff sequoia-sop mkosi-26/mkosi.tools.conf/mkosi.conf.d/ncdu.conf000066400000000000000000000002571512054777600217440ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Distribution=!fedora [TriggerMatch] Distribution=fedora Architecture=!ppc64-le [Content] Packages= ncdu mkosi-26/mkosi.tools.conf/mkosi.conf.d/opensuse.conf000066400000000000000000000004361512054777600226530ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=opensuse [Content] Packages= cryptsetup grub2 # TODO: Move to default tools tree when https://bugzilla.opensuse.org/show_bug.cgi?id=1227464 is resolved. mypy python3-pytest ruff mkosi-26/mkosi/000077500000000000000000000000001512054777600135655ustar00rootroot00000000000000mkosi-26/mkosi/__init__.py000066400000000000000000005651421512054777600157130ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import datetime import functools import getpass import hashlib import itertools import json import logging import os import re import resource import shlex import shutil import signal import socket import stat import subprocess import sys import tempfile import textwrap import uuid import zipapp from collections.abc import Iterator, Mapping, Sequence from contextlib import AbstractContextManager from pathlib import Path from typing import Any, Optional, Union, cast from mkosi.archive import can_extract_tar, extract_tar, make_cpio, make_tar from mkosi.bootloader import ( KernelType, efi_boot_binary, extract_pe_section, gen_kernel_images, grub_bios_setup, install_grub, install_shim, install_systemd_boot, prepare_grub_config, python_binary, run_systemd_sign_tool, shim_second_stage_binary, sign_efi_binary, want_efi, want_grub_bios, want_grub_efi, ) from mkosi.burn import run_burn from mkosi.completion import print_completion from mkosi.config import ( Args, ArtifactOutput, Cacheonly, CertificateSourceType, Compression, Config, ConfigFeature, DocFormat, Incremental, JsonEncoder, KeySourceType, ManifestFormat, Network, OutputFormat, SecureBootSignTool, ShimBootloader, Ssh, UnifiedKernelImage, Verb, Verity, Vmm, cat_config, dump_json, expand_delayed_specifiers, finalize_configdir, format_bytes, in_box, parse_boolean, parse_config, resolve_deps, summary, systemd_pty_forward, systemd_tool_version, want_kernel, want_selinux_relabel, yes_no, ) from mkosi.context import Context from mkosi.distribution import Distribution, detect_distribution from mkosi.documentation import show_docs from mkosi.installer import clean_package_manager_metadata from mkosi.kmod import ( filter_devicetrees, gen_required_kernel_modules, is_valid_kdir, loaded_modules, process_kernel_modules, ) from mkosi.log import ARG_DEBUG, complete_step, die, log_notice, log_step, ring_terminal_bell from mkosi.manifest import Manifest from mkosi.mounts import ( finalize_certificate_mounts, finalize_source_mounts, finalize_volatile_tmpdir, mount_overlay, ) from mkosi.pager import page from mkosi.partition import Partition, finalize_root, finalize_roothash from mkosi.qemu import ( copy_ephemeral, finalize_credentials, finalize_kernel_command_line_extra, finalize_register, join_initrds, run_qemu, run_ssh, start_journal_remote, ) from mkosi.run import ( Popen, apivfs_options, chroot_cmd, chroot_options, finalize_interpreter, finalize_passwd_symlinks, find_binary, fork_and_wait, run, spawn, workdir, ) from mkosi.sandbox import ( CAP_SYS_ADMIN, CLONE_NEWNS, MOUNT_ATTR_NODEV, MOUNT_ATTR_NOEXEC, MOUNT_ATTR_NOSUID, MOUNT_ATTR_RDONLY, MS_REC, MS_SLAVE, __version__, acquire_privileges, have_effective_cap, join_new_session_keyring, mount, mount_rbind, umask, unshare, userns_has_single_user, ) from mkosi.sysupdate import run_sysupdate from mkosi.tree import copy_tree, make_tree, move_tree, rmtree from mkosi.user import INVOKING_USER, become_root_cmd from mkosi.util import ( PathString, chdir, flatten, flock_or_die, format_rlimit, hash_file, make_executable, one_zero, read_env_file, resource_path, scopedenv, ) from mkosi.versioncomp import GenericVersion from mkosi.vmspawn import run_vmspawn # Allowed characters from https://uapi-group.org/specifications/specs/version_format_specification KERNEL_VERSION_PATTERN = re.compile(r"\d+\.\d+[\w\-.~^+]*", re.ASCII) @contextlib.contextmanager def mount_base_trees(context: Context) -> Iterator[None]: if not context.config.base_trees or not context.config.overlay: yield return with complete_step("Mounting base trees…"), contextlib.ExitStack() as stack: bases = [] (context.workspace / "bases").mkdir(exist_ok=True) for path in context.config.base_trees: d = context.workspace / f"bases/{path.name}-{uuid.uuid4().hex}" path = path.resolve() if path.is_dir(): bases += [path] elif can_extract_tar(path): extract_tar(path, d, sandbox=context.sandbox) bases += [d] elif path.suffix == ".raw": run( ["systemd-dissect", "--mount", "--mkdir", path, d], env=dict(SYSTEMD_DISSECT_VERITY_EMBEDDED="no", SYSTEMD_DISSECT_VERITY_SIDECAR="no"), ) stack.callback(lambda: run(["systemd-dissect", "--umount", "--rmdir", d])) bases += [d] else: die(f"Unsupported base tree source {path}") with mount_overlay(bases, context.root, upperdir=context.root): yield stack.enter_context(mount_overlay(bases, context.workspace / "lower")) for p in context.root.rglob("*"): rel = p.relative_to(context.root) q = context.workspace / "lower" / rel if ( context.config.output_format == OutputFormat.sysext and not rel.is_relative_to("usr") and not rel.is_relative_to("opt") ): continue if context.config.output_format == OutputFormat.confext and not rel.is_relative_to("etc"): continue if not q.is_symlink() and not q.exists(): continue if not p.is_symlink() and p.is_dir(): if q.is_symlink() or not q.is_dir(): die(f"/{rel} is a directory in the overlay but not in the base tree") shutil.copystat(q, p) else: logging.info(f"Removing duplicate path /{rel} from overlay") p.unlink() def remove_files(context: Context) -> None: """Remove files based on user-specified patterns""" if context.config.remove_files: with complete_step("Removing files…"): remove = flatten( context.root.glob(pattern.lstrip("/")) for pattern in context.config.remove_files ) rmtree(*remove, sandbox=context.sandbox) if context.config.output_format.is_extension_image(): with complete_step("Removing empty directories…"): for path, dirs, _ in os.walk(context.root, topdown=False): p = Path(path) for d in dirs: t = p / d if not t.is_symlink() and not any(t.iterdir()): t.rmdir() def install_distribution(context: Context) -> None: if context.config.base_trees: if not context.config.packages: return with complete_step( f"Installing extra packages for {context.config.distribution.installer.pretty_name()}" ): context.config.distribution.installer.install_packages(context, context.config.packages) else: if context.config.overlay or context.config.output_format.is_extension_image(): if context.config.packages: die( "Cannot install packages in extension images without a base tree", hint="Configure a base tree with the BaseTrees= setting", ) return with complete_step(f"Installing {context.config.distribution.installer.pretty_name()}"): context.config.distribution.installer.install(context) if context.config.machine_id: with umask(~0o755): (context.root / "etc").mkdir(exist_ok=True) with umask(~0o444): (context.root / "etc/machine-id").write_text(context.config.machine_id.hex) elif (context.root / "etc").exists() and not (context.root / "etc/machine-id").exists(): # Uninitialized means we want it to get initialized on first boot. with umask(~0o444): (context.root / "etc/machine-id").write_text("uninitialized\n") # Ensure /efi exists so that the ESP is mounted there, as recommended by # https://0pointer.net/blog/linux-boot-partitions.html. Use the most restrictive access # mode we can without tripping up mkfs tools since this directory is only meant to be # overmounted and should not be read from or written to. with umask(~0o500): (context.root / "efi").mkdir(exist_ok=True) (context.root / "boot").mkdir(exist_ok=True) # Ensure /boot/loader/entries.srel exists and has "type1" written to it to nudge # kernel-install towards using the boot loader specification layout. with umask(~0o700): (context.root / "boot/loader").mkdir(exist_ok=True) with umask(~0o600): (context.root / "boot/loader/entries.srel").write_text("type1\n") if context.config.packages: context.config.distribution.installer.install_packages(context, context.config.packages) for f in ( "var/lib/systemd/random-seed", "var/lib/systemd/credential.secret", "etc/machine-info", "var/lib/dbus/machine-id", ): # Using missing_ok=True still causes an OSError if the mount is read-only even if the # file doesn't exist so do an explicit exists() check first. if (context.root / f).exists(): (context.root / f).unlink() def install_build_packages(context: Context) -> None: if not context.config.build_scripts or not context.config.build_packages: return with ( complete_step( f"Installing build packages for {context.config.distribution.installer.pretty_name()}" ), setup_build_overlay(context), ): context.config.distribution.installer.install_packages(context, context.config.build_packages) def install_volatile_packages(context: Context) -> None: if not context.config.volatile_packages: return with complete_step( f"Installing volatile packages for {context.config.distribution.installer.pretty_name()}" ): context.config.distribution.installer.install_packages( context, context.config.volatile_packages, allow_downgrade=True ) def remove_packages(context: Context) -> None: """Remove packages listed in config.remove_packages""" if not context.config.remove_packages: return with complete_step(f"Removing {len(context.config.remove_packages)} packages…"): try: context.config.distribution.installer.package_manager(context.config).remove( context, context.config.remove_packages ) except NotImplementedError: die(f"Removing packages is not supported for {context.config.distribution}") def check_root_populated(context: Context) -> None: if ( context.config.output_format == OutputFormat.none or context.config.output_format.is_extension_image() ): return """Check that the root was populated by looking for a os-release file.""" osrelease = context.root / "usr/lib/os-release" if not osrelease.exists(): die( f"{osrelease} not found.", hint=( "The root must be populated by the distribution, or from base trees, " "skeleton trees, and prepare scripts." ), ) def configure_os_release(context: Context) -> None: """Write IMAGE_ID and IMAGE_VERSION to /usr/lib/os-release in the image.""" if context.config.overlay or context.config.output_format.is_extension_image(): return for candidate in ["usr/lib/os-release", "usr/lib/initrd-release", "etc/os-release"]: osrelease = context.root / candidate if not osrelease.is_file() or osrelease.is_symlink(): continue if context.config.image_id or context.config.image_version or context.config.hostname: # at this point we know we will either change or add to the file newosrelease = osrelease.with_suffix(".new") image_id_written = image_version_written = default_hostname_written = False with osrelease.open("r") as old, newosrelease.open("w") as new: # fix existing values for line in old.readlines(): if context.config.image_id and line.startswith("IMAGE_ID="): new.write(f'IMAGE_ID="{context.config.image_id}"\n') image_id_written = True elif context.config.image_version and line.startswith("IMAGE_VERSION="): new.write(f'IMAGE_VERSION="{context.config.image_version}"\n') image_version_written = True elif context.config.hostname and line.startswith("DEFAULT_HOSTNAME="): new.write(f'DEFAULT_HOSTNAME="{context.config.hostname}"\n') default_hostname_written = True else: new.write(line) # append if they were missing if context.config.image_id and not image_id_written: new.write(f'IMAGE_ID="{context.config.image_id}"\n') if context.config.image_version and not image_version_written: new.write(f'IMAGE_VERSION="{context.config.image_version}"\n') if context.config.hostname and not default_hostname_written: new.write(f'DEFAULT_HOSTNAME="{context.config.hostname}"\n') newosrelease.rename(osrelease) if ArtifactOutput.os_release in context.config.split_artifacts: shutil.copy(osrelease, context.staging / context.config.output_split_os_release) def configure_extension_release(context: Context) -> None: if context.config.output_format not in (OutputFormat.sysext, OutputFormat.confext): return prefix = "SYSEXT" if context.config.output_format == OutputFormat.sysext else "CONFEXT" d = "usr/lib" if context.config.output_format == OutputFormat.sysext else "etc" p = context.root / d / f"extension-release.d/extension-release.{context.config.output}" p.parent.mkdir(parents=True, exist_ok=True) osrelease = read_env_file(q) if (q := context.root / "usr/lib/os-release").exists() else {} extrelease = read_env_file(p) if p.exists() else {} new = p.with_suffix(".new") with new.open("w") as f: for k, v in extrelease.items(): f.write(f"{k}={v}\n") if "ID" not in extrelease: f.write(f"ID={osrelease.get('ID', '_any')}\n") if f"{prefix}_LEVEL" not in extrelease and (level := osrelease.get(f"{prefix}_LEVEL")): f.write(f"{prefix}_LEVEL={level}\n") if "VERSION_ID" not in extrelease and (version := osrelease.get("VERSION_ID")): f.write(f"VERSION_ID={version}\n") if f"{prefix}_ID" not in extrelease and context.config.image_id: f.write(f"{prefix}_ID={context.config.image_id}\n") if f"{prefix}_VERSION_ID" not in extrelease and context.config.image_version: f.write(f"{prefix}_VERSION_ID={context.config.image_version}\n") if f"{prefix}_SCOPE" not in extrelease: f.write( f"{prefix}_SCOPE=" f"{context.config.finalize_environment().get(f'{prefix}_SCOPE', 'initrd system portable')}\n" ) if "ARCHITECTURE" not in extrelease: f.write(f"ARCHITECTURE={context.config.architecture}\n") new.rename(p) def configure_autologin_service(context: Context, service: str, extra: str) -> None: dropin = context.root / f"usr/lib/systemd/system/{service}.d/autologin.conf" with umask(~0o755): dropin.parent.mkdir(parents=True, exist_ok=True) with umask(~0o644): dropin.write_text( textwrap.dedent( f"""\ [Service] ExecStart= ExecStart=-agetty -o '-f -p -- \\\\u' --autologin root {extra} $TERM StandardInput=tty StandardOutput=tty """ ) ) def configure_autologin(context: Context) -> None: if not context.config.autologin: return with complete_step("Setting up autologin…"): configure_autologin_service( context, "console-getty.service", "--noclear --keep-baud console 115200,38400,9600", ) configure_autologin_service( context, "getty@tty1.service", "--noclear -", ) configure_autologin_service( context, "serial-getty@hvc0.service", "--keep-baud 115200,57600,38400,9600 -", ) def configure_verity_certificate(context: Context) -> None: if not context.config.verity_certificate: return # TODO: support providers after https://github.com/systemd/systemd/pull/39962 is merged if context.config.verity_certificate_source.type != CertificateSourceType.file: return veritydir = context.root / "usr/lib/verity.d" with umask(~0o755): veritydir.mkdir(parents=True, exist_ok=True) # dissect wants .crt and will ignore anything else dest = veritydir / context.config.verity_certificate.with_suffix(".crt").name with umask(~0o644): shutil.copy(context.config.verity_certificate, dest) def configure_mountpoints(context: Context) -> None: if context.config.output_format != OutputFormat.portable: return for f in ("passwd", "group", "shadow", "gshadow", "nsswitch.conf", "resolv.conf", "machine-id"): with umask(~0o600 if f in ("shadow", "gshadow") else ~0o644): p = context.root / "etc" / f if not p.is_symlink(): (context.root / "etc" / f).touch(exist_ok=True) @contextlib.contextmanager def setup_build_overlay(context: Context, volatile: bool = False) -> Iterator[None]: d = context.workspace / "build-overlay" if not d.is_symlink(): with umask(~0o755): d.mkdir(exist_ok=True) # We don't support multiple levels of root overlay. assert not context.lowerdirs assert not context.upperdir assert not context.workdir with contextlib.ExitStack() as stack: if volatile: context.lowerdirs = [d] context.upperdir = Path( stack.enter_context( tempfile.TemporaryDirectory( prefix="volatile-overlay.", dir=finalize_volatile_tmpdir(), ) ) ) os.chmod(context.upperdir, d.stat().st_mode) else: context.upperdir = d context.workdir = stack.enter_context( tempfile.TemporaryDirectory( dir=Path(context.upperdir).parent, prefix=f"{Path(context.upperdir).name}-workdir", ) ) try: yield finally: context.lowerdirs = [] context.upperdir = None context.workdir = None @contextlib.contextmanager def finalize_scripts(config: Config, scripts: Mapping[str, Sequence[PathString]]) -> Iterator[Path]: with tempfile.TemporaryDirectory(prefix="mkosi-scripts-") as d: for name, script in scripts.items(): # Make sure we don't end up in a recursive loop when we name a script after the binary # it execs by removing the scripts directory from the PATH when we execute a script. with (Path(d) / name).open("w") as f: f.write("#!/bin/sh\n") if config.find_binary(name): f.write( textwrap.dedent( """\ DIR="$(cd "$(dirname "$0")" && pwd)" PATH="$(echo "$PATH" | tr ':' '\\n' | grep -v "$DIR" | tr '\\n' ':')" export PATH """ ) ) f.write(f'exec {shlex.join(os.fspath(s) for s in script)} "$@"\n') make_executable(Path(d) / name) os.utime(Path(d) / name, (0, 0)) yield Path(d) def finalize_host_scripts( context: Context, helpers: Mapping[str, Sequence[PathString]] = {}, ) -> AbstractContextManager[Path]: scripts: dict[str, Sequence[PathString]] = {} for binary in ("useradd", "groupadd"): if context.config.find_binary(binary): scripts[binary] = (binary, "--root", "/buildroot") if ukify := context.config.find_binary("ukify"): scripts["ukify"] = (python_binary(context.config), ukify) return finalize_scripts(context.config, scripts | dict(helpers)) @contextlib.contextmanager def finalize_config_json(config: Config) -> Iterator[Path]: with tempfile.NamedTemporaryFile(mode="w") as f: f.write(dump_json(config.to_dict())) f.flush() yield Path(f.name) def run_configure_scripts(config: Config) -> Config: if not config.configure_scripts: return config for script in config.configure_scripts: if not os.access(script, os.X_OK): die(f"{script} is not executable") env = dict( DISTRIBUTION=str(config.distribution), RELEASE=config.release, ARCHITECTURE=str(config.architecture), QEMU_ARCHITECTURE=config.architecture.to_qemu(), DISTRIBUTION_ARCHITECTURE=config.distribution.installer.architecture(config.architecture), SRCDIR="/work/src", MKOSI_UID=str(os.getuid()), MKOSI_GID=str(os.getgid()), MKOSI_DEBUG=one_zero(ARG_DEBUG.get()), ) if config.architecture.to_efi() is not None: env["EFI_ARCHITECTURE"] = str(config.architecture.to_efi()) if config.profiles: env["PROFILES"] = " ".join(config.profiles) with finalize_source_mounts(config, ephemeral=False) as sources: for script in config.configure_scripts: with complete_step(f"Running configure script {script}…"): result = run( ["/work/configure"], env=env | config.finalize_environment(), sandbox=config.sandbox( options=[ "--dir", "/work/src", "--chdir", "/work/src", "--ro-bind", script, "/work/configure", *sources, ], ), input=dump_json(config.to_dict(), indent=None), stdout=subprocess.PIPE, ) # fmt: skip config = Config.from_json(result.stdout) return config def run_sync_scripts(config: Config) -> None: if not config.sync_scripts: return for script in config.sync_scripts: if not os.access(script, os.X_OK): die(f"{script} is not executable") env = dict( DISTRIBUTION=str(config.distribution), RELEASE=config.release, ARCHITECTURE=str(config.architecture), DISTRIBUTION_ARCHITECTURE=config.distribution.installer.architecture(config.architecture), SRCDIR="/work/src", MKOSI_UID=str(os.getuid()), MKOSI_GID=str(os.getgid()), MKOSI_CONFIG="/work/config.json", CACHED=one_zero(have_cache(config)), MKOSI_DEBUG=one_zero(ARG_DEBUG.get()), ) if config.architecture.to_efi() is not None: env["EFI_ARCHITECTURE"] = str(config.architecture.to_efi()) if config.profiles: env["PROFILES"] = " ".join(config.profiles) with ( finalize_source_mounts(config, ephemeral=False) as sources, finalize_config_json(config) as json, tempfile.TemporaryDirectory( dir=config.workspace_dir_or_default(), prefix="mkosi-metadata-", ) as sandbox_tree, ): install_sandbox_trees(config, Path(sandbox_tree)) for script in config.sync_scripts: options = [ *finalize_certificate_mounts(config), "--ro-bind", script, "/work/sync", "--ro-bind", json, "/work/config.json", # We need to make sure SSH keys and such can be accessed when git is used so bind in /home to # make sure that works. "--ro-bind", "/home", "/home", # e.g. the ssh-agent socket and such will be in /run and might be used by git so make sure # those are available as well. "--ro-bind", "/run", "/run", "--dir", "/work/src", "--chdir", "/work/src", *sources, ] # fmt: skip with complete_step(f"Running sync script {script}…"): run( ["/work/sync", "final"], env=os.environ | env | config.finalize_environment(), stdin=sys.stdin, sandbox=config.sandbox( network=True, options=options, overlay=Path(sandbox_tree), ), ) @contextlib.contextmanager def script_maybe_chroot_sandbox( context: Context, *, script: Path, options: Sequence[PathString], network: bool, ) -> Iterator[list[PathString]]: options = ["--dir", "/work/src", "--chdir", "/work/src", *options] suppress_chown = parse_boolean( context.config.finalize_environment().get("MKOSI_CHROOT_SUPPRESS_CHOWN", "0") ) helpers = { "mkosi-chroot": [ finalize_interpreter(bool(context.config.tools_tree)), "-SI", "/sandbox.py", "--bind", "/buildroot", "/", "--bind", "/var/tmp", "/var/tmp", *apivfs_options(root=Path("/")), *chroot_options(), "--bind", "/work", "/work", "--chdir", "/work/src", *(["--ro-bind-try", "/etc/resolv.conf", "/etc/resolv.conf"] if network else []), *(["--suppress-chown"] if suppress_chown else []), ], **context.config.distribution.installer.package_manager(context.config).scripts(context), } # fmt: skip with finalize_host_scripts(context, helpers) as hd: if script.suffix != ".chroot": with context.sandbox( network=network, options=[ *options, *context.rootoptions(), *context.config.distribution.installer.package_manager(context.config).mounts(context), ], scripts=hd, ) as sandbox: # fmt: skip yield sandbox else: if suppress_chown: options += ["--suppress-chown"] with chroot_cmd( root=context.rootoptions, network=network, options=options, ) as sandbox: yield sandbox def run_prepare_scripts(context: Context, build: bool) -> None: if not context.config.prepare_scripts: return if build and not context.config.build_scripts: return env = dict( DISTRIBUTION=str(context.config.distribution), RELEASE=context.config.release, ARCHITECTURE=str(context.config.architecture), DISTRIBUTION_ARCHITECTURE=context.config.distribution.installer.architecture( context.config.architecture ), BUILDROOT="/buildroot", SRCDIR="/work/src", CHROOT_SRCDIR="/work/src", PACKAGEDIR="/work/packages", ARTIFACTDIR="/work/artifacts", SCRIPT="/work/prepare", CHROOT_SCRIPT="/work/prepare", MKOSI_UID=str(os.getuid()), MKOSI_GID=str(os.getgid()), MKOSI_CONFIG="/work/config.json", WITH_DOCS=one_zero(context.config.with_docs), WITH_NETWORK=one_zero(context.config.with_network), WITH_TESTS=one_zero(context.config.with_tests), MKOSI_DEBUG=one_zero(ARG_DEBUG.get()), ) if context.config.architecture.to_efi() is not None: env["EFI_ARCHITECTURE"] = str(context.config.architecture.to_efi()) if context.config.profiles: env["PROFILES"] = " ".join(context.config.profiles) env |= context.config.finalize_environment() with ( setup_build_overlay(context) if build else contextlib.nullcontext(), finalize_source_mounts( context.config, ephemeral=bool(context.config.build_sources_ephemeral), ) as sources, finalize_config_json(context.config) as json, ): if build: step_msg = "Running prepare script {} in build overlay…" arg = "build" else: step_msg = "Running prepare script {}…" arg = "final" for script in context.config.prepare_scripts: with complete_step(step_msg.format(script)): options: list[PathString] = [ "--ro-bind", script, "/work/prepare", "--ro-bind", json, "/work/config.json", "--bind", context.artifacts, "/work/artifacts", "--bind", context.package_dir, "/work/packages", *sources, ] # fmt: skip run( ["/work/prepare", arg], env=env, stdin=sys.stdin, sandbox=script_maybe_chroot_sandbox( context, script=script, options=options, network=True, ), ) def run_build_scripts(context: Context) -> None: if not context.config.build_scripts: return env = dict( DISTRIBUTION=str(context.config.distribution), RELEASE=context.config.release, ARCHITECTURE=str(context.config.architecture), DISTRIBUTION_ARCHITECTURE=context.config.distribution.installer.architecture( context.config.architecture ), BUILDROOT="/buildroot", DESTDIR="/work/dest", CHROOT_DESTDIR="/work/dest", SRCDIR="/work/src", CHROOT_SRCDIR="/work/src", PACKAGEDIR="/work/packages", ARTIFACTDIR="/work/artifacts", SCRIPT="/work/build-script", CHROOT_SCRIPT="/work/build-script", MKOSI_UID=str(os.getuid()), MKOSI_GID=str(os.getgid()), MKOSI_CONFIG="/work/config.json", WITH_DOCS=one_zero(context.config.with_docs), WITH_NETWORK=one_zero(context.config.with_network), WITH_TESTS=one_zero(context.config.with_tests), MKOSI_DEBUG=one_zero(ARG_DEBUG.get()), ) if context.config.architecture.to_efi() is not None: env["EFI_ARCHITECTURE"] = str(context.config.architecture.to_efi()) if context.config.profiles: env["PROFILES"] = " ".join(context.config.profiles) if context.config.build_dir is not None: env |= dict( BUILDDIR="/work/build", CHROOT_BUILDDIR="/work/build", ) env |= context.config.finalize_environment() with ( setup_build_overlay(context, volatile=True), finalize_source_mounts(context.config, ephemeral=context.config.build_sources_ephemeral) as sources, finalize_config_json(context.config) as json, ): for script in context.config.build_scripts: cmdline = context.args.cmdline if context.args.verb == Verb.build else [] with complete_step(f"Running build script {script}…"): options: list[PathString] = [ "--ro-bind", script, "/work/build-script", "--ro-bind", json, "/work/config.json", "--bind", context.install_dir, "/work/dest", "--bind", context.artifacts, "/work/artifacts", "--bind", context.package_dir, "/work/packages", *( ["--bind", os.fspath(context.config.build_subdir), "/work/build"] if context.config.build_dir else [] ), *sources, ] # fmt: skip run( ["/work/build-script", *cmdline], env=env, stdin=sys.stdin, stdout=sys.stdout, sandbox=script_maybe_chroot_sandbox( context, script=script, options=options, network=context.config.with_network, ), ) def run_postinst_scripts(context: Context) -> None: if not context.config.postinst_scripts: return env = dict( DISTRIBUTION=str(context.config.distribution), RELEASE=context.config.release, ARCHITECTURE=str(context.config.architecture), DISTRIBUTION_ARCHITECTURE=context.config.distribution.installer.architecture( context.config.architecture ), BUILDROOT="/buildroot", OUTPUTDIR="/work/out", CHROOT_OUTPUTDIR="/work/out", SCRIPT="/work/postinst", CHROOT_SCRIPT="/work/postinst", SRCDIR="/work/src", CHROOT_SRCDIR="/work/src", PACKAGEDIR="/work/packages", ARTIFACTDIR="/work/artifacts", MKOSI_UID=str(os.getuid()), MKOSI_GID=str(os.getgid()), MKOSI_CONFIG="/work/config.json", WITH_NETWORK=one_zero(context.config.with_network), MKOSI_DEBUG=one_zero(ARG_DEBUG.get()), ) if context.config.architecture.to_efi() is not None: env["EFI_ARCHITECTURE"] = str(context.config.architecture.to_efi()) if context.config.profiles: env["PROFILES"] = " ".join(context.config.profiles) if context.config.build_dir is not None: env |= dict(BUILDDIR="/work/build") env |= context.config.finalize_environment() with ( finalize_source_mounts( context.config, ephemeral=bool(context.config.build_sources_ephemeral), ) as sources, finalize_config_json(context.config) as json, ): for script in context.config.postinst_scripts: with complete_step(f"Running postinstall script {script}…"): options: list[PathString] = [ "--ro-bind", script, "/work/postinst", "--ro-bind", json, "/work/config.json", "--bind", context.staging, "/work/out", "--bind", context.artifacts, "/work/artifacts", "--bind", context.package_dir, "/work/packages", *( ["--ro-bind", os.fspath(context.config.build_subdir), "/work/build"] if context.config.build_dir else [] ), *sources, ] # fmt: skip run( ["/work/postinst", "final"], env=env, stdin=sys.stdin, sandbox=script_maybe_chroot_sandbox( context, script=script, options=options, network=context.config.with_network, ), ) def run_finalize_scripts(context: Context) -> None: if not context.config.finalize_scripts: return env = dict( DISTRIBUTION=str(context.config.distribution), RELEASE=context.config.release, ARCHITECTURE=str(context.config.architecture), DISTRIBUTION_ARCHITECTURE=context.config.distribution.installer.architecture( context.config.architecture ), BUILDROOT="/buildroot", OUTPUTDIR="/work/out", CHROOT_OUTPUTDIR="/work/out", SRCDIR="/work/src", CHROOT_SRCDIR="/work/src", PACKAGEDIR="/work/packages", ARTIFACTDIR="/work/artifacts", SCRIPT="/work/finalize", CHROOT_SCRIPT="/work/finalize", MKOSI_UID=str(os.getuid()), MKOSI_GID=str(os.getgid()), MKOSI_CONFIG="/work/config.json", WITH_NETWORK=one_zero(context.config.with_network), MKOSI_DEBUG=one_zero(ARG_DEBUG.get()), ) if context.config.architecture.to_efi() is not None: env["EFI_ARCHITECTURE"] = str(context.config.architecture.to_efi()) if context.config.profiles: env["PROFILES"] = " ".join(context.config.profiles) if context.config.build_dir is not None: env |= dict(BUILDDIR="/work/build") env |= context.config.finalize_environment() with ( finalize_source_mounts( context.config, ephemeral=bool(context.config.build_sources_ephemeral), ) as sources, finalize_config_json(context.config) as json, ): for script in context.config.finalize_scripts: with complete_step(f"Running finalize script {script}…"): options: list[PathString] = [ "--ro-bind", script, "/work/finalize", "--ro-bind", json, "/work/config.json", "--bind", context.staging, "/work/out", "--bind", context.artifacts, "/work/artifacts", "--bind", context.package_dir, "/work/packages", *( ["--ro-bind", os.fspath(context.config.build_subdir), "/work/build"] if context.config.build_dir else [] ), *sources, ] # fmt: skip run( ["/work/finalize"], env=env, stdin=sys.stdin, sandbox=script_maybe_chroot_sandbox( context, script=script, options=options, network=context.config.with_network, ), ) def run_postoutput_scripts(context: Context) -> None: if not context.config.postoutput_scripts: return env = dict( DISTRIBUTION=str(context.config.distribution), RELEASE=context.config.release, ARCHITECTURE=str(context.config.architecture), DISTRIBUTION_ARCHITECTURE=context.config.distribution.installer.architecture( context.config.architecture ), SRCDIR="/work/src", OUTPUTDIR="/work/out", MKOSI_UID=str(os.getuid()), MKOSI_GID=str(os.getgid()), MKOSI_CONFIG="/work/config.json", MKOSI_DEBUG=one_zero(ARG_DEBUG.get()), ) if context.config.architecture.to_efi() is not None: env["EFI_ARCHITECTURE"] = str(context.config.architecture.to_efi()) if context.config.profiles: env["PROFILES"] = " ".join(context.config.profiles) with ( finalize_source_mounts( context.config, ephemeral=bool(context.config.build_sources_ephemeral), ) as sources, finalize_config_json(context.config) as json, ): for script in context.config.postoutput_scripts: with complete_step(f"Running post-output script {script}…"): run( ["/work/postoutput"], env=env | context.config.finalize_environment(), sandbox=context.sandbox( # postoutput scripts should run as (fake) root so that file ownership is # always recorded as if owned by root. options=[ "--ro-bind", script, "/work/postoutput", "--ro-bind", json, "/work/config.json", "--bind", context.staging, "/work/out", "--dir", "/work/src", "--chdir", "/work/src", "--dir", "/work/out", "--become-root", *sources, ], ), stdin=sys.stdin, ) # fmt: skip def install_tree( config: Config, src: Path, dst: Path, *, target: Optional[Path] = None, preserve: bool = True, ) -> None: src = src.resolve() t = dst if target: t = dst / target.relative_to("/") with umask(~0o755): t.parent.mkdir(parents=True, exist_ok=True) def copy() -> None: copy_tree( src, t, preserve=preserve, use_subvolumes=config.use_subvolumes, sandbox=config.sandbox, ) if src.is_dir() or (src.is_file() and target): copy() elif can_extract_tar(src): extract_tar(src, t, sandbox=config.sandbox) elif src.suffix == ".raw": run( ["systemd-dissect", "--copy-from", workdir(src), "/", workdir(t)], env=dict(SYSTEMD_DISSECT_VERITY_EMBEDDED="no", SYSTEMD_DISSECT_VERITY_SIDECAR="no"), sandbox=config.sandbox( devices=True, network=True, options=[ "--ro-bind", src, workdir(src), "--bind", t.parent, workdir(t.parent), ], ), ) # fmt: skip else: # If we get an unknown file without a target, we just copy it into /. copy() def install_base_trees(context: Context) -> None: if not context.config.base_trees or context.config.overlay: return with complete_step("Copying in base trees…"): for path in context.config.base_trees: install_tree(context.config, path, context.root) def install_skeleton_trees(context: Context) -> None: if not context.config.skeleton_trees: return with complete_step("Copying in skeleton file trees…"): for tree in context.config.skeleton_trees: install_tree(context.config, tree.source, context.root, target=tree.target, preserve=False) def install_sandbox_trees(config: Config, dst: Path) -> None: # Ensure /etc exists in the sandbox (dst / "etc").mkdir(exist_ok=True) if (p := config.tools() / "usr/share/crypto-policies/back-ends/DEFAULT").exists(): Path(dst / "etc/crypto-policies").mkdir(exist_ok=True) copy_tree(p, dst / "etc/crypto-policies/back-ends", sandbox=config.sandbox) if config.sandbox_trees: with complete_step("Copying in sandbox trees…"): for tree in config.sandbox_trees: install_tree(config, tree.source, dst, target=tree.target, preserve=False) if Path("/etc/passwd").exists(): shutil.copy("/etc/passwd", dst / "etc/passwd") if Path("/etc/group").exists(): shutil.copy("/etc/group", dst / "etc/group") if not (dst / "etc/mtab").is_symlink(): (dst / "etc/mtab").symlink_to("../proc/self/mounts") Path(dst / "etc/resolv.conf").unlink(missing_ok=True) Path(dst / "etc/resolv.conf").touch() if not (dst / "etc/nsswitch.conf").exists(): (dst / "etc/nsswitch.conf").write_text( textwrap.dedent( """\ passwd: files shadow: files group: files hosts: files myhostname resolve [!UNAVAIL=return] dns services: files netgroup: files automount: files aliases: files ethers: files gshadow: files networks: files dns protocols: files publickey: files rpc: files """ ) ) if not (dst / "etc/hosts").exists() and Path("/etc/hosts").exists(): shutil.copy("/etc/hosts", dst / "etc/hosts") Path(dst / "etc/static").unlink(missing_ok=True) if (config.tools() / "etc/static").is_symlink(): (dst / "etc/static").symlink_to((config.tools() / "etc/static").readlink()) # Create various mountpoints in /etc as /etc from the sandbox tree is mounted read-only into the sandbox. for d in ( "etc/pki/ca-trust", "etc/pki/tls", "etc/ssl", "etc/ca-certificates", "etc/pacman.d/gnupg", "etc/alternatives", ): (dst / d).mkdir(parents=True, exist_ok=True) for f in ( "etc/passwd", "etc/group", "etc/shadow", "etc/gshadow", "etc/ld.so.cache", ): (dst / f).touch(exist_ok=True) def install_package_directories(context: Context, directories: Sequence[Path]) -> None: directories = [d for d in directories if any(d.iterdir())] if not directories: return with complete_step("Copying in extra packages…"): for d in directories: for p in itertools.chain.from_iterable( d.glob(glob) for glob in context.config.distribution.installer.package_manager( context.config ).package_globs() ): shutil.copy(p, context.repository, follow_symlinks=True) def install_extra_trees(context: Context) -> None: if not context.config.extra_trees: return with complete_step("Copying in extra file trees…"): for tree in context.config.extra_trees: install_tree(context.config, tree.source, context.root, target=tree.target, preserve=False) def install_build_dest(context: Context) -> None: if not any(context.install_dir.iterdir()): return with complete_step("Copying in build tree…"): copy_tree( context.install_dir, context.root, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) def gzip_binary(context: Context) -> str: return "pigz" if context.config.find_binary("pigz") else "gzip" def kernel_get_ver_from_modules(context: Context) -> Optional[str]: # Try to get version from the first dir under usr/lib/modules but fail if multiple versions are found versions = [ p.name for p in (context.root / "usr/lib/modules").glob("*") if KERNEL_VERSION_PATTERN.match(p.name) ] if len(versions) > 1: die( "Multiple kernel module directories found in /usr/lib/modules, unable to determine correct version to use" # noqa: E501 ) elif len(versions) == 0: return None return versions[0] def fixup_vmlinuz_location(context: Context) -> None: modulesd = Path("usr/lib/modules") if not (context.root / modulesd).exists(): return # Don't touch anything if all the modules directories contain a kernel image already. if all( (d / "vmlinuz").is_file() or (d / "vmlinux").is_file() for d in (context.root / modulesd).iterdir() ): return # Some architectures ship an uncompressed vmlinux (ppc64el, riscv64) for type in ("vmlinuz", "vmlinux"): for d in context.root.glob(f"boot/{type}-*"): if d.is_symlink(): continue # Extract kernel version pattern from filename filename = d.name.removeprefix(f"{type}-") match = KERNEL_VERSION_PATTERN.search(filename) kver: Optional[str] if match: kver = match.group(0) else: logging.debug(f"Could not extract kernel version from {filename}, checking /usr/lib/modules") kver = kernel_get_ver_from_modules(context) if kver is None: logging.debug("Unable to get kernel version from modules directory") continue vmlinuz = context.root / "usr/lib/modules" / kver / type if not vmlinuz.parent.exists(): continue # Some distributions (OpenMandriva) symlink /usr/lib/modules//vmlinuz to # /boot/vmlinuz-, so get rid of the symlink and copy the actual vmlinuz to # /usr/lib/modules/. if vmlinuz.is_symlink() and vmlinuz.resolve().is_relative_to("/boot"): vmlinuz.unlink() if not vmlinuz.exists(): shutil.copy2(d, vmlinuz) def want_initrd(context: Context) -> bool: if context.config.bootable == ConfigFeature.disabled: return False if context.config.output_format not in (OutputFormat.disk, OutputFormat.directory): return False if not any((context.artifacts / "io.mkosi.initrd").glob("*")) and not any(gen_kernel_images(context)): return False return True def identify_cpu(root: Path) -> tuple[Optional[Path], Optional[Path]]: for entry in Path("/proc/cpuinfo").read_text().split("\n\n"): vendor_id = family = model = stepping = None for line in entry.splitlines(): key, _, value = line.partition(":") key = key.strip() value = value.strip() if not key or not value: continue if key == "vendor_id": vendor_id = value elif key == "cpu family": family = int(value) elif key == "model": model = int(value) elif key == "stepping": stepping = int(value) if vendor_id is not None and family is not None and model is not None and stepping is not None: break else: return (None, None) if vendor_id == "AuthenticAMD": uroot = root / "usr/lib/firmware/amd-ucode" if family > 21: ucode = uroot / f"microcode_amd_fam{family:x}h.bin" else: ucode = uroot / "microcode_amd.bin" if ucode.exists(): return (Path(f"{vendor_id}.bin"), ucode) elif vendor_id == "GenuineIntel": uroot = root / "usr/lib/firmware/intel-ucode" if (ucode := uroot / f"{family:02x}-{model:02x}-{stepping:02x}").exists(): return (Path(f"{vendor_id}.bin"), ucode) if (ucode := uroot / f"{family:02x}-{model:02x}-{stepping:02x}.initramfs").exists(): return (Path(f"{vendor_id}.bin"), ucode) return (Path(f"{vendor_id}.bin"), None) def build_microcode_initrd(context: Context) -> list[Path]: if not context.config.architecture.is_x86_variant(): return [] microcode = context.workspace / "microcode.initrd" if microcode.exists(): return [microcode] amd = context.root / "usr/lib/firmware/amd-ucode" intel = context.root / "usr/lib/firmware/intel-ucode" if not amd.exists() and not intel.exists(): logging.warning("/usr/lib/firmware/{amd-ucode,intel-ucode} not found, not adding microcode") return [] root = context.workspace / "microcode-root" destdir = root / "kernel/x86/microcode" with umask(~0o755): destdir.mkdir(parents=True, exist_ok=True) if context.config.microcode_host: vendorfile, ucodefile = identify_cpu(context.root) if vendorfile is None or ucodefile is None: logging.warning("Unable to identify CPU for MicrocodeHostonly=") return [] with (destdir / vendorfile).open("wb") as f: f.write(ucodefile.read_bytes()) else: if amd.exists(): with (destdir / "AuthenticAMD.bin").open("wb") as f: for p in amd.iterdir(): f.write(p.read_bytes()) if intel.exists(): with (destdir / "GenuineIntel.bin").open("wb") as f: for p in intel.iterdir(): f.write(p.read_bytes()) make_cpio(root, microcode, sandbox=context.sandbox) return [microcode] def finalize_kernel_modules_include(context: Context, *, include: Sequence[str], host: bool) -> list[str]: final = [] host_included = False if host: final.extend(loaded_modules()) host_included = True for p in include: if p == "default": with chdir(context.resources / "mkosi-initrd"): # TODO: figure out a way to propagate all relevant settings, not just arch _, _, [initrd] = parse_config( ["--architecture", str(context.config.architecture)], resources=context.resources, ) final.extend(initrd.kernel_modules_include) elif p == "host" and not host_included: final.extend(loaded_modules()) host_included = True else: final.append(p) # deduplicate while maintaining ordering return list({k: None for k in final}) def build_kernel_modules_initrd(context: Context, kver: str) -> Path: kmods = context.workspace / f"kernel-modules-{kver}.initrd" if kmods.exists(): return kmods log_step("Building kernel modules initrd") make_cpio( context.root, kmods, files=gen_required_kernel_modules( context, kver, modules_include=finalize_kernel_modules_include( context, include=context.config.kernel_modules_initrd_include, host=context.config.kernel_modules_initrd_include_host, ), modules_exclude=context.config.kernel_modules_initrd_exclude, firmware_include=context.config.firmware_include, firmware_exclude=context.config.firmware_exclude, ), sandbox=context.sandbox, ) if context.config.distribution.is_apt_distribution(): # Older Debian and Ubuntu releases do not compress their kernel modules, so we compress the # initramfs instead. Note that this is not ideal since the compressed kernel modules will # all be decompressed on boot which requires significant memory. if context.config.distribution == Distribution.debian and context.config.release in ( "sid", "testing", ): compression = Compression.none else: compression = Compression.zstd maybe_compress(context, compression, kmods, kmods) if ArtifactOutput.kernel_modules_initrd in context.config.split_artifacts: shutil.copy(kmods, context.staging / context.config.output_split_kernel_modules_initrd) return kmods def want_signed_pcrs(config: Config) -> bool: return config.sign_expected_pcr == ConfigFeature.enabled or ( config.sign_expected_pcr == ConfigFeature.auto and config.find_binary("systemd-measure", "/usr/lib/systemd/systemd-measure") is not None and bool(config.sign_expected_pcr_key) and bool(config.sign_expected_pcr_certificate) ) def run_ukify( context: Context, stub: Path, output: Path, *, cmdline: Sequence[str] = (), arguments: Sequence[PathString] = (), options: Sequence[PathString] = (), sign: bool = True, json_out: bool = False, ) -> dict[str, Any]: ukify = context.config.find_binary("ukify", "/usr/lib/systemd/ukify") if not ukify: die("Could not find ukify") if not (arch := context.config.architecture.to_efi()): die(f"Architecture {context.config.architecture} does not support UEFI") # Older versions of systemd-stub expect the cmdline section to be null terminated. We can't # embed NUL terminators in argv so let's communicate the cmdline via a file instead. (context.workspace / "cmdline").write_text(f"{' '.join(cmdline)}\x00") cmd = [ python_binary(context.config), ukify, "build", *arguments, "--efi-arch", arch, "--stub", workdir(stub), "--output", workdir(output), *(["--cmdline", f"@{workdir(context.workspace / 'cmdline')}"] if cmdline else []), ] # fmt: skip opt: list[PathString] = [ "--ro-bind", stub, workdir(stub), "--bind", output.parent, workdir(output.parent), "--ro-bind", context.workspace / "cmdline", workdir(context.workspace / "cmdline"), ] # fmt: skip if sign and context.config.secure_boot: assert context.config.secure_boot_key assert context.config.secure_boot_certificate cmd += [ "--signtool", ( "sbsign" if context.config.secure_boot_sign_tool == SecureBootSignTool.sbsign or not context.config.find_binary("systemd-sbsign", "/usr/lib/systemd/systemd-sbsign") else "systemd-sbsign" ), ] # fmt: skip if ( context.config.secure_boot_key_source.type != KeySourceType.file or context.config.secure_boot_certificate_source.type != CertificateSourceType.file ): opt += ["--bind", "/run", "/run"] if context.config.secure_boot_key_source.type == KeySourceType.engine: cmd += ["--signing-engine", context.config.secure_boot_key_source.source] elif context.config.secure_boot_key_source.type == KeySourceType.provider: cmd += ["--signing-provider", context.config.secure_boot_key_source.source] if context.config.secure_boot_key.exists(): cmd += ["--secureboot-private-key", workdir(context.config.secure_boot_key)] opt += ["--ro-bind", context.config.secure_boot_key, workdir(context.config.secure_boot_key)] else: cmd += ["--secureboot-private-key", context.config.secure_boot_key] if context.config.secure_boot_certificate_source.type == CertificateSourceType.provider: cmd += ["--certificate-provider", context.config.secure_boot_certificate_source.source] if context.config.secure_boot_certificate.exists(): cmd += ["--secureboot-certificate", workdir(context.config.secure_boot_certificate)] opt += [ "--ro-bind", context.config.secure_boot_certificate, workdir(context.config.secure_boot_certificate), # noqa: E501 ] # fmt: skip else: cmd += ["--secureboot-certificate", context.config.secure_boot_certificate] if json_out: cmd += ["--json=short"] stdout = subprocess.PIPE else: stdout = None result = run( cmd, stdin=( sys.stdin if context.config.secure_boot_key_source.type != KeySourceType.file else subprocess.DEVNULL ), stdout=stdout, env=context.config.finalize_environment(), sandbox=context.sandbox( options=[*opt, *options], devices=context.config.secure_boot_key_source.type != KeySourceType.file, ), ) if json_out: return cast(dict[str, Any], json.loads(result.stdout.strip())) return {} def build_uki( context: Context, stub: Path, kver: str, kimg: Path, microcodes: list[Path], initrds: list[Path], cmdline: Sequence[str], profiles: Sequence[Path], output: Path, ) -> dict[str, Any]: if not (ukify := context.config.find_binary("ukify", "/usr/lib/systemd/ukify")): die("Could not find ukify") json_out = False arguments: list[PathString] = [ "--os-release", f"@{workdir(context.root / 'usr/lib/os-release')}", "--uname", kver, "--linux", workdir(kimg), *flatten(["--join-profile", os.fspath(workdir(profile))] for profile in profiles), ] # fmt: skip if ( context.config.unified_kernel_image_profiles and context.config.sign_expected_pcr and any(not profile.sign_expected_pcr for profile in context.config.unified_kernel_image_profiles) ): arguments += ["--sign-profile=main"] arguments += flatten( ["--sign-profile", profile.profile["ID"]] for profile in context.config.unified_kernel_image_profiles if profile.sign_expected_pcr ) options: list[PathString] = [ "--ro-bind", context.root / "usr/lib/os-release", workdir(context.root / "usr/lib/os-release"), "--ro-bind", kimg, workdir(kimg), *flatten(["--ro-bind", os.fspath(profile), os.fspath(workdir(profile))] for profile in profiles), ] # fmt: skip if context.config.devicetrees: dtbs = filter_devicetrees(context.root, kver, include=context.config.devicetrees) switch = "--devicetree" if len(dtbs) == 1 else "--devicetree-auto" for dtb_rel in dtbs: dtb = context.root / dtb_rel arguments += [switch, workdir(dtb)] options += ["--ro-bind", dtb, workdir(dtb)] if context.config.splash: splash = context.root / os.fspath(context.config.splash).lstrip("/") arguments += ["--splash", workdir(splash)] options += ["--ro-bind", splash, workdir(splash)] if context.config.secure_boot: assert context.config.secure_boot_key assert context.config.secure_boot_certificate arguments += ["--sign-kernel"] if want_signed_pcrs(context.config): assert context.config.sign_expected_pcr_key assert context.config.sign_expected_pcr_certificate arguments += [ # SHA1 might be disabled in OpenSSL depending on the distro so we opt to not sign # for SHA1 to avoid having to manage a bunch of configuration to re-enable SHA1. "--pcr-banks", "sha256", ] # fmt: skip if ( systemd_tool_version( python_binary(context.config), ukify, sandbox=context.sandbox, ) >= "258" ): cert_parameter = "--pcr-certificate" else: cert_parameter = "--pcr-public-key" # If we're providing the private key via an engine or provider, we have to pass in a X.509 # certificate via --pcr-certificate as well. if context.config.sign_expected_pcr_key_source.type != KeySourceType.file: if context.config.sign_expected_pcr_certificate_source.type == CertificateSourceType.provider: arguments += [ "--certificate-provider", f"provider:{context.config.sign_expected_pcr_certificate_source.source}", ] options += ["--bind", "/run", "/run"] if context.config.sign_expected_pcr_certificate.exists(): arguments += [ cert_parameter, workdir(context.config.sign_expected_pcr_certificate), ] # fmt: skip options += [ "--ro-bind", context.config.sign_expected_pcr_certificate, workdir(context.config.sign_expected_pcr_certificate), # noqa: E501 ] # fmt: skip else: arguments += [cert_parameter, context.config.sign_expected_pcr_certificate] if context.config.sign_expected_pcr_key_source.type == KeySourceType.engine: arguments += ["--signing-engine", context.config.sign_expected_pcr_key_source.source] elif context.config.sign_expected_pcr_key_source.type == KeySourceType.provider: arguments += ["--signing-provider", context.config.sign_expected_pcr_key_source.source] if context.config.sign_expected_pcr_key.exists(): arguments += ["--pcr-private-key", workdir(context.config.sign_expected_pcr_key)] options += [ "--ro-bind", context.config.sign_expected_pcr_key, workdir(context.config.sign_expected_pcr_key), # noqa: E501 ] # fmt: skip else: arguments += ["--pcr-private-key", context.config.sign_expected_pcr_key] elif ArtifactOutput.pcrs in context.config.split_artifacts: assert context.config.sign_expected_pcr_certificate json_out = True arguments += [ "--policy-digest", "--pcr-banks", "sha256", "--pcr-certificate", workdir(context.config.sign_expected_pcr_certificate), ] # fmt: skip options += [ "--ro-bind", context.config.sign_expected_pcr_certificate, workdir(context.config.sign_expected_pcr_certificate), # noqa: E501 ] # fmt: skip if microcodes: # new .ucode section support? if ( systemd_tool_version( python_binary(context.config), ukify, sandbox=context.sandbox, ) >= "256" and (version := systemd_stub_version(context, stub)) and version >= "256" ): for microcode in microcodes: arguments += ["--microcode", workdir(microcode)] options += ["--ro-bind", microcode, workdir(microcode)] else: initrds = microcodes + initrds for initrd in initrds: arguments += ["--initrd", workdir(initrd)] options += ["--ro-bind", initrd, workdir(initrd)] with complete_step(f"Generating unified kernel image for kernel version {kver}"): return run_ukify( context, stub, output, cmdline=cmdline, arguments=arguments, options=options, json_out=json_out, ) def systemd_stub_binary(context: Context) -> Path: arch = context.config.architecture.to_efi() stub = context.root / f"usr/lib/systemd/boot/efi/linux{arch}.efi.stub" return stub def systemd_stub_version(context: Context, stub: Path) -> Optional[GenericVersion]: try: sdmagic = extract_pe_section(context, stub, ".sdmagic", context.workspace / "sdmagic") except KeyError: return None sdmagic_text = sdmagic.read_text().strip("\x00") # Older versions of the stub have misaligned sections which results in an empty sdmagic text. # Let's check for that explicitly and treat it as no version. # # TODO: Drop this logic once every distribution we support ships systemd-stub v254 or newer. if not sdmagic_text: return None if not ( version := re.match( r"#### LoaderInfo: systemd-stub (?P[\w\-.~^+]+) ####", sdmagic_text, re.ASCII ) ): die(f"Unable to determine systemd-stub version, found {sdmagic_text!r}") return GenericVersion(version.group("version")) def want_uki(context: Context) -> bool: return want_efi(context.config) and ( context.config.bootloader.is_uki() or context.config.unified_kernel_images.enabled() or ( context.config.unified_kernel_images == UnifiedKernelImage.auto and systemd_stub_binary(context).exists() and context.config.find_binary("ukify", "/usr/lib/systemd/ukify") is not None ) ) def find_entry_token(context: Context) -> str: if ( not context.config.find_binary("kernel-install") or ( "--version" not in run( ["kernel-install", "--help"], stdout=subprocess.PIPE, sandbox=context.sandbox(), ).stdout ) or systemd_tool_version("kernel-install", sandbox=context.sandbox) < "255.1" ): return context.config.image_id or context.config.distribution.name output = json.loads( run( ["kernel-install", "--root=/buildroot", "--json=pretty", "inspect"], sandbox=context.sandbox(options=context.rootoptions(readonly=True)), stdout=subprocess.PIPE, env={"BOOT_ROOT": "/boot"}, ).stdout ) logging.debug(json.dumps(output, indent=4)) return cast(str, output["EntryToken"]) def finalize_cmdline( context: Context, partitions: Sequence[Partition], roothash: Optional[str] ) -> list[str]: if (context.root / "etc/kernel/cmdline").exists(): cmdline = [(context.root / "etc/kernel/cmdline").read_text().strip()] elif (context.root / "usr/lib/kernel/cmdline").exists(): cmdline = [(context.root / "usr/lib/kernel/cmdline").read_text().strip()] else: cmdline = [] if roothash: cmdline += [roothash] cmdline += context.config.kernel_command_line if not roothash: for name in ("root", "mount.usr"): type_prefix = name.removeprefix("mount.") if not (root := next((p.uuid for p in partitions if p.type.startswith(type_prefix)), None)): continue cmdline = [f"{name}=PARTUUID={root}" if c == f"{name}=PARTUUID" else c for c in cmdline] return cmdline def finalize_microcode(context: Context) -> list[Path]: if any((context.artifacts / "io.mkosi.microcode").glob("*")): return sorted((context.artifacts / "io.mkosi.microcode").iterdir()) elif microcode := build_microcode_initrd(context): return microcode return [] def finalize_initrds(context: Context) -> list[Path]: return context.config.initrds + sorted((context.artifacts / "io.mkosi.initrd").glob("*")) def install_type1( context: Context, kver: str, kimg: Path, token: str, partitions: Sequence[Partition], cmdline: list[str], ) -> None: dst = context.root / "boot" / token / kver entry = context.root / f"boot/loader/entries/{token}-{kver}.conf" with umask(~0o700): dst.mkdir(parents=True, exist_ok=True) entry.parent.mkdir(parents=True, exist_ok=True) dtb = None source_dtb = None if context.config.devicetrees: dtbs = filter_devicetrees(context.root, kver, include=context.config.devicetrees) if len(dtbs) != 1: die( "Type 1 boot entries support only single devicetree, use UKI builds for multiple devicetrees" ) source_dtb = context.root / dtbs[0] dtb = dst / dtbs[0].relative_to(f"usr/lib/modules/{kver}/dtb") with umask(~0o700): dtb.parent.mkdir(parents=True, exist_ok=True) microcode = finalize_microcode(context) initrds = finalize_initrds(context) kmods = build_kernel_modules_initrd(context, kver) with umask(~0o600): if ( want_efi(context.config) and context.config.secure_boot and not context.config.bootloader.is_signed() and KernelType.identify(context.config, kimg) == KernelType.pe ): kimg = sign_efi_binary(context, kimg, dst / "vmlinuz") else: kimg = Path(shutil.copy2(context.root / kimg, dst / "vmlinuz")) initrds = [ Path(shutil.copy2(initrd, dst.parent / initrd.name)) for initrd in microcode + initrds ] + [Path(shutil.copy2(kmods, dst / "kernel-modules.initrd"))] if dtb and source_dtb: shutil.copy2(source_dtb, dtb) with entry.open("w") as f: f.write( textwrap.dedent( f"""\ title {token} {kver} version {kver} linux /{kimg.relative_to(context.root / "boot")} options {" ".join(cmdline)} """ ) ) for initrd in initrds: f.write(f"initrd /{initrd.relative_to(context.root / 'boot')}\n") if dtb: f.write(f"devicetree /{dtb.relative_to(context.root / 'boot')}\n") if want_grub_efi(context) or want_grub_bios(context, partitions): config = prepare_grub_config(context) assert config if ( not any(c.startswith("root=PARTUUID=") for c in context.config.kernel_command_line) and not any(c.startswith("mount.usr=PARTUUID=") for c in context.config.kernel_command_line) and (root := finalize_root(partitions)) ): cmdline = [root] + cmdline with config.open("a") as f: f.write("if [ ") conditions = [] if want_grub_efi(context) and not want_uki(context): conditions += ['"${grub_platform}" == "efi"'] if want_grub_bios(context, partitions): conditions += ['"${grub_platform}" == "pc"'] f.write(" -o ".join(conditions)) f.write(" ]; then\n") f.write( textwrap.dedent( f"""\ menuentry "{token}-{kver}" {{ linux /{kimg.relative_to(context.root / "boot")} {" ".join(cmdline)} initrd {" ".join(os.fspath(Path("/") / i.relative_to(context.root / "boot")) for i in initrds)} }} """ # noqa: E501 ) ) f.write("fi\n") def expand_kernel_specifiers(text: str, kver: str, token: str, roothash: str) -> str: specifiers = { "&": "&", "e": token, "k": kver, "h": roothash, } return expand_delayed_specifiers(specifiers, text) def finalize_bootloader_entry_format( context: Context, kver: str, token: str, partitions: Sequence[Partition] = (), ) -> str: bootloader_entry_format = context.config.unified_kernel_image_format or "&e-&k" roothash_value = "" if roothash := finalize_roothash(partitions): roothash_value = roothash.partition("=")[2] if not context.config.unified_kernel_image_format: bootloader_entry_format += "-&h" return expand_kernel_specifiers( bootloader_entry_format, kver=kver, token=token, roothash=roothash_value, ) def finalize_uki_path(context: Context, name: str) -> Path: if not context.config.bootloader.is_uki(): return Path(f"boot/EFI/Linux/{name}.efi") if context.config.shim_bootloader != ShimBootloader.none: return shim_second_stage_binary(context) return efi_boot_binary(context) def install_uki( context: Context, kver: str, kimg: Path, token: str, partitions: Sequence[Partition], profiles: Sequence[Path], cmdline: list[str], ) -> dict[str, Any]: boot_binary = context.root / finalize_uki_path( context, finalize_bootloader_entry_format(context, kver, token, partitions) ) pcrs: dict[str, Any] = {} # Make sure the parent directory where we'll be writing the UKI exists. with umask(~0o700): boot_binary.parent.mkdir(parents=True, exist_ok=True) if ( context.config.bootloader.is_signed() and context.config.unified_kernel_images == UnifiedKernelImage.auto ) or context.config.unified_kernel_images == UnifiedKernelImage.signed: for p in (context.root / "usr/lib/modules" / kver).glob("*.efi"): log_step(f"Installing prebuilt UKI at {p} to {boot_binary}") shutil.copy2(p, boot_binary) break else: if context.config.bootable == ConfigFeature.enabled: die(f"Couldn't find a signed UKI binary installed at /usr/lib/modules/{kver} in the image") return pcrs else: microcodes = finalize_microcode(context) initrds = finalize_initrds(context) if context.config.kernel_modules_initrd: initrds += [build_kernel_modules_initrd(context, kver)] pcrs = build_uki( context, systemd_stub_binary(context), kver, context.root / kimg, microcodes, initrds, cmdline, profiles, boot_binary, ) print_output_size(boot_binary) if want_grub_efi(context): config = prepare_grub_config(context) assert config with config.open("a") as f: f.write('if [ "${grub_platform}" == "efi" ]; then\n') f.write( textwrap.dedent( f"""\ menuentry "{boot_binary.stem}" {{ chainloader /{boot_binary.relative_to(context.root / "boot")} }} """ ) ) f.write("fi\n") return pcrs def systemd_addon_stub_binary(context: Context) -> Path: arch = context.config.architecture.to_efi() stub = context.root / f"usr/lib/systemd/boot/efi/addon{arch}.efi.stub" return stub def build_uki_profiles(context: Context, cmdline: Sequence[str]) -> list[Path]: if not context.config.unified_kernel_image_profiles: return [] stub = systemd_addon_stub_binary(context) if not stub.exists(): die(f"sd-stub not found at /{stub.relative_to(context.root)} in the image") (context.workspace / "uki-profiles").mkdir() profiles = [] for profile in context.config.unified_kernel_image_profiles: id = profile.profile["ID"] output = context.workspace / f"uki-profiles/{id}.efi" profile_section = context.workspace / f"uki-profiles/{id}.profile" with profile_section.open("w") as f: for k, v in profile.profile.items(): if not all(c.isalnum() for c in v): v = f'"{v}"' f.write(f"{k}={v}\n") with complete_step(f"Generating UKI profile '{id}'"): run_ukify( context, stub, output, cmdline=[*cmdline, *profile.cmdline], arguments=["--profile", f"@{profile_section}"], options=["--ro-bind", profile_section, profile_section], sign=False, ) profiles += [output] return profiles def install_kernel(context: Context, partitions: Sequence[Partition]) -> None: # Iterates through all kernel versions included in the image and generates a combined # kernel+initrd+cmdline+osrelease EFI file from it and places it in the /EFI/Linux directory of # the ESP. sd-boot iterates through them and shows them in the menu. These "unified" # single-file images have the benefit that they can be signed like normal EFI binaries, and can # encode everything necessary to boot a specific root device, including the root hash. if not want_kernel(context.config): return stub = systemd_stub_binary(context) if want_uki(context) and not stub.exists(): die( "Unified kernel image(s) requested but systemd-stub not found at " f"/{stub.relative_to(context.root)}" ) if context.config.bootable == ConfigFeature.enabled and not any(gen_kernel_images(context)): die("A bootable image was requested but no kernel was found") token = find_entry_token(context) cmdline = finalize_cmdline(context, partitions, finalize_roothash(partitions)) profiles = build_uki_profiles(context, cmdline) if want_uki(context) else [] # The first processed UKI is the one that will be used as split artifact, so take pcrs from # it and ignore the rest # TODO: we should probably support signing pcrs for all built UKIs pcrs: dict[str, Any] = {} for kver, kimg in gen_kernel_images(context): if want_uki(context): pcrs = pcrs or install_uki(context, kver, kimg, token, partitions, profiles, cmdline) if not want_uki(context) or want_grub_bios(context, partitions): install_type1(context, kver, kimg, token, partitions, cmdline) if context.config.bootloader.is_uki(): break if ArtifactOutput.pcrs in context.config.split_artifacts and pcrs: (context.staging / context.config.output_split_pcrs).write_text(json.dumps(pcrs)) def make_uki( context: Context, stub: Path, kver: str, kimg: Path, microcode: list[Path], output: Path, ) -> None: make_cpio(context.root, context.workspace / "initrd", sandbox=context.sandbox) maybe_compress( context, context.config.compress_output, context.workspace / "initrd", context.workspace / "initrd", ) initrds = [context.workspace / "initrd"] pcrs = build_uki( context, stub, kver, kimg, microcode, initrds, context.config.kernel_command_line, build_uki_profiles(context, context.config.kernel_command_line), output, ) if ArtifactOutput.kernel in context.config.split_artifacts: extract_pe_section(context, output, ".linux", context.staging / context.config.output_split_kernel) if ArtifactOutput.initrd in context.config.split_artifacts: extract_pe_section(context, output, ".initrd", context.staging / context.config.output_split_initrd) if ArtifactOutput.pcrs in context.config.split_artifacts and pcrs: (context.staging / context.config.output_split_pcrs).write_text(json.dumps(pcrs)) def make_addon(context: Context, stub: Path, output: Path) -> None: arguments: list[PathString] = [] options: list[PathString] = [] if any(context.root.iterdir()): make_cpio(context.root, context.workspace / "initrd", sandbox=context.sandbox) maybe_compress( context, context.config.compress_output, context.workspace / "initrd", context.workspace / "initrd", ) arguments += ["--initrd", workdir(context.workspace / "initrd")] options += [ "--ro-bind", context.workspace / "initrd", workdir(context.workspace / "initrd") ] # fmt: skip with complete_step(f"Generating PE addon {output}"): run_ukify( context, stub, output, cmdline=context.config.kernel_command_line, arguments=arguments, options=options, ) def compressor_command(context: Context, compression: Compression) -> list[PathString]: """Returns a command suitable for compressing archives.""" if compression == Compression.gz: return [gzip_binary(context), f"-{context.config.compress_level}", "--stdout", "-"] elif compression == Compression.xz: return ["xz", "--check=crc32", f"-{context.config.compress_level}", "-T0", "--stdout", "-"] elif compression == Compression.zstd: return ["zstd", "-q", f"-{context.config.compress_level}", "-T0", "--stdout", "-"] else: die(f"Unknown compression {compression}") def maybe_compress( context: Context, compression: Compression, src: Path, dst: Optional[Path] = None, ) -> None: if not compression or src.is_dir(): if dst: move_tree( src, dst, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) return if not dst: dst = src.parent / f"{src.name}.{compression.extension()}" cmd = compressor_command(context, compression) with complete_step(f"Compressing {src} with {compression}"): with src.open("rb") as i: # if src == dst, make sure dst doesn't truncate the src file but creates a new file. src.unlink() with dst.open("wb") as o: run(cmd, stdin=i, stdout=o, sandbox=context.sandbox()) def copy_nspawn_settings(context: Context) -> None: if context.config.nspawn_settings is None: return None with complete_step("Copying nspawn settings file…"): shutil.copy2(context.config.nspawn_settings, context.staging / context.config.output_nspawn_settings) def get_uki_path(context: Context) -> Optional[Path]: if not want_efi(context.config) or context.config.unified_kernel_images == UnifiedKernelImage.none: return None ukis = sorted( (context.root / "boot/EFI/Linux").glob("*.efi"), key=lambda p: GenericVersion(p.name), reverse=True, ) if (uki := context.root / efi_boot_binary(context)).exists() and ( KernelType.identify(context.config, uki) == KernelType.uki ): pass elif (uki := context.root / shim_second_stage_binary(context)).exists() and ( KernelType.identify(context.config, uki) == KernelType.uki ): pass elif ukis: uki = ukis[0] else: return None return uki def copy_uki(context: Context) -> None: if ArtifactOutput.uki not in context.config.split_artifacts: return if (context.staging / context.config.output_split_uki).exists(): return if uki := get_uki_path(context): shutil.copy(uki, context.staging / context.config.output_split_uki) def copy_vmlinuz(context: Context) -> None: if ArtifactOutput.kernel not in context.config.split_artifacts: return if (context.staging / context.config.output_split_kernel).exists(): return # ukify will have signed the kernel image as well. Let's make sure we put the signed kernel # image in the output directory instead of the unsigned one by reading it from the UKI. if uki := get_uki_path(context): extract_pe_section(context, uki, ".linux", context.staging / context.config.output_split_kernel) return for _, kimg in gen_kernel_images(context): shutil.copy(context.root / kimg, context.staging / context.config.output_split_kernel) break def copy_initrd(context: Context) -> None: if ArtifactOutput.initrd not in context.config.split_artifacts: return if not want_initrd(context): return if (context.staging / context.config.output_split_initrd).exists(): return # Extract the combined initrds from the UKI so we can use it to direct kernel boot with qemu if needed. if uki := get_uki_path(context): extract_pe_section(context, uki, ".initrd", context.staging / context.config.output_split_initrd) return for kver, _ in gen_kernel_images(context): initrds = finalize_initrds(context) if context.config.kernel_modules_initrd: kver = next(gen_kernel_images(context))[0] initrds += [build_kernel_modules_initrd(context, kver)] join_initrds(context.config, initrds, context.staging / context.config.output_split_initrd) break def copy_repart_definitions(context: Context) -> None: if ArtifactOutput.repart_definitions not in context.config.split_artifacts: return if context.config.output_format == OutputFormat.esp: definitions = [context.workspace / "esp-definitions"] elif context.config.output_format in (OutputFormat.sysext, OutputFormat.confext, OutputFormat.portable): definitions = [extension_or_portable_image_repart_definitions(context)] elif (d := context.workspace / "repart-definitions").exists(): definitions = [d] elif context.config.output_format == OutputFormat.disk: definitions = context.config.repart_dirs else: definitions = [] if not definitions: return for d in definitions: copy_tree(d, context.staging / context.config.output_split_repart_definitions) def calculate_sha256sum(context: Context) -> None: if not context.config.checksum: return with complete_step("Calculating SHA256SUMS…"): with open(context.workspace / context.config.output_checksum, "w") as f: for p in context.staging.iterdir(): if p.is_dir(): logging.warning(f"Cannot checksum directory '{p}', skipping") continue print(hash_file(p) + " *" + p.name, file=f) (context.workspace / context.config.output_checksum).rename( context.staging / context.config.output_checksum ) def calculate_signature(context: Context) -> None: if not context.config.sign or not context.config.checksum: return if context.config.openpgp_tool == "gpg": calculate_signature_gpg(context) else: calculate_signature_sop(context) def calculate_signature_gpg(context: Context) -> None: cmdline: list[PathString] = ["gpg", "--detach-sign", "--pinentry-mode", "loopback"] # Need to specify key before file to sign if context.config.key is not None: cmdline += ["--default-key", context.config.key] cmdline += [ "--output", workdir(context.staging / context.config.output_signature), workdir(context.staging / context.config.output_checksum), ] home = Path(context.config.finalize_environment().get("GNUPGHOME", Path.home() / ".gnupg")) if not home.exists(): die(f"GPG home {home} not found") env = dict(GNUPGHOME=os.fspath(workdir(home))) if sys.stderr.isatty(): env |= dict(GPG_TTY=os.ttyname(sys.stderr.fileno())) options: list[PathString] = [ "--bind", home, workdir(home), "--bind", context.staging, workdir(context.staging), "--bind", "/run", "/run", ] # fmt: skip with complete_step("Signing SHA256SUMS…"): run( cmdline, env=env, sandbox=context.sandbox(options=options), ) def calculate_signature_sop(context: Context) -> None: if context.config.key is None: die("Signing key is mandatory when using SOP signing") with ( complete_step("Signing SHA256SUMS…"), open(context.staging / context.config.output_checksum, "rb") as i, open(context.staging / context.config.output_signature, "wb") as o, ): run( [context.config.openpgp_tool, "sign", "/signing-key.pgp"], env=context.config.finalize_environment(), stdin=i, stdout=o, sandbox=context.sandbox( options=[ "--bind", context.config.key, "/signing-key.pgp", "--bind", context.staging, workdir(context.staging), "--bind", "/run", "/run", ], ), ) # fmt: skip def dir_size(path: Union[Path, os.DirEntry[str]]) -> int: dir_sum = 0 for entry in os.scandir(path): if entry.is_symlink(): # We can ignore symlinks because they either point into our tree, # in which case we'll include the size of target directory anyway, # or outside, in which case we don't need to. continue elif entry.is_file(): dir_sum += entry.stat().st_blocks * 512 elif entry.is_dir(): dir_sum += dir_size(entry) return dir_sum def save_manifest(context: Context, manifest: Optional[Manifest]) -> None: if not manifest: return if manifest.has_data(): if ManifestFormat.json in context.config.manifest_format: with complete_step(f"Saving manifest {context.config.output_manifest}"): with open(context.staging / context.config.output_manifest, "w") as f: manifest.write_json(f) if ManifestFormat.changelog in context.config.manifest_format: with complete_step(f"Saving report {context.config.output_changelog}"): with open(context.staging / context.config.output_changelog, "w") as f: manifest.write_package_report(f) def print_output_size(path: Path) -> None: if path.is_dir(): log_step(f"{path} size is " + format_bytes(dir_size(path)) + ".") else: size = format_bytes(path.stat().st_size) space = format_bytes(path.stat().st_blocks * 512) log_step(f"{path} size is {size}, consumes {space}.") def cache_tree_paths(config: Config) -> tuple[Path, Path, Path]: if config.image == "tools": return ( config.output_dir_or_cwd() / "mkosi.tools", config.output_dir_or_cwd() / "mkosi.tools.build.cache", config.output_dir_or_cwd() / "mkosi.tools.manifest", ) assert config.cache_dir return ( config.cache_dir / f"{config.expand_key_specifiers(config.cache_key)}.cache", config.cache_dir / f"{config.expand_key_specifiers(config.cache_key)}.build.cache", config.cache_dir / f"{config.expand_key_specifiers(config.cache_key)}.manifest", ) def keyring_cache(config: Config) -> Path: assert config.cache_dir return config.cache_dir / f"{config.expand_key_specifiers(config.cache_key)}.keyring.cache" def metadata_cache(config: Config) -> Path: assert config.cache_dir return config.cache_dir / f"{config.expand_key_specifiers(config.cache_key)}.metadata.cache" def check_inputs(config: Config) -> None: """ Make sure all the inputs exist that aren't checked during config parsing because they might be created by an earlier build. """ if config.overlay and not config.base_trees: die("--overlay=yes can only be used with --base-tree=") if config.is_incremental() and not config.cache_dir: die("A cache directory must be configured in order to use --incremental=yes") for base in config.base_trees: if not base.exists(): die(f"Base tree {base} not found") if base.is_file() and base.suffix == ".raw" and os.getuid() != 0: die("Must run as root to use disk images in base trees") if config.tools_tree and not config.tools_tree.exists(): die(f"Tools tree {config.tools_tree} not found") trees_with_name = [ ("skeleton", config.skeleton_trees), ("sandbox", config.sandbox_trees), ] if config.output_format != OutputFormat.none: trees_with_name += [("extra", config.extra_trees)] for name, trees in trees_with_name: for tree in trees: if not tree.source.exists(): die(f"{name.capitalize()} tree {tree.source} not found") if ( tree.source.is_file() and tree.source.suffix == ".raw" and not tree.target and os.getuid() != 0 ): die(f"Must run as root to use disk images in {name} trees") if want_kernel(config): for p in config.initrds: if not p.exists(): die(f"Initrd {p} not found") if not p.is_file(): die(f"Initrd {p} is not a file") for script in itertools.chain( config.sync_scripts, config.prepare_scripts, config.build_scripts, config.postinst_scripts, config.finalize_scripts, config.postoutput_scripts, ): if not os.access(script, os.X_OK): die(f"{script} is not executable") if config.secure_boot and not config.secure_boot_key: die( "SecureBoot= is enabled but no secure boot key is configured", hint="Run mkosi genkey to generate a key/certificate pair", ) if config.secure_boot and not config.secure_boot_certificate: die( "SecureBoot= is enabled but no secure boot certificate is configured", hint="Run mkosi genkey to generate a key/certificate pair", ) if config.sign_expected_pcr == ConfigFeature.enabled and not config.sign_expected_pcr_key: die( "SignExpectedPcr= is enabled but no private key is configured", hint="Run mkosi genkey to generate a key/certificate pair", ) if config.sign_expected_pcr == ConfigFeature.enabled and not config.sign_expected_pcr_certificate: die( "SignExpectedPcr= is enabled but no certificate is configured", hint="Run mkosi genkey to generate a key/certificate pair", ) if config.secure_boot_key_source != config.sign_expected_pcr_key_source: die("Secure boot key source and expected PCR signatures key source have to be the same") if config.secure_boot_certificate_source != config.sign_expected_pcr_certificate_source: die( "Secure boot certificate source and expected PCR signatures certificate source have to be the same" # noqa: E501 ) # fmt: skip if config.verity == Verity.signed and not config.verity_key: die( "Verity= is enabled but no verity key is configured", hint="Run mkosi genkey to generate a key/certificate pair", ) if config.verity == Verity.signed and not config.verity_certificate: die( "Verity= is enabled but no verity certificate is configured", hint="Run mkosi genkey to generate a key/certificate pair", ) for profile in config.unified_kernel_image_profiles: if "ID" not in profile.profile: die( "UKI Profile is missing ID key in its .profile section", hint="Use Profile= to configure the profile ID", ) if ( config.cacheonly not in (Cacheonly.never, Cacheonly.auto) and not config.cache_dir and config.package_cache_dir_or_default() != Path("/var") ): die(f"A cache directory must be configured in order to use CacheOnly={config.cacheonly}") if config.output_format == OutputFormat.portable and config.overlay: die( "Overlay=yes cannot be used with Format=portable", hint=( "Portable service images are always full images and cannot be overlay images.\n" "See https://systemd.io/PORTABLE_SERVICES/#extension-images for how to use extension\n" "images with portable services." ), ) def check_tool(config: Config, *tools: PathString, reason: str, hint: Optional[str] = None) -> Path: tool = config.find_binary(*tools) if not tool: die(f"Could not find '{tools[0]}' which is required to {reason}.", hint=hint) return tool def check_systemd_tool( config: Config, *tools: PathString, version: str, reason: str, hint: Optional[str] = None, ) -> None: tool = check_tool(config, *tools, reason=reason, hint=hint) v = systemd_tool_version(tool, sandbox=config.sandbox) if v < version: die( f"Found '{tool}' with version {v} but version {version} or newer is required to {reason}.", hint=f"Use ToolsTree=yes to get a newer version of '{tools[0]}'.", ) def check_ukify( config: Config, version: str, reason: str, hint: Optional[str] = None, ) -> None: ukify = check_tool(config, "ukify", "/usr/lib/systemd/ukify", reason=reason, hint=hint) v = systemd_tool_version(python_binary(config), ukify, sandbox=config.sandbox) if v < version: die( f"Found '{ukify}' with version {v} but version {version} or newer is required to {reason}.", hint="Use ToolsTree=yes to get a newer version of 'ukify'.", ) def check_tools(config: Config, verb: Verb) -> None: if verb == Verb.build: if config.output_format == OutputFormat.none: return if ( want_efi(config) or config.output_format in (OutputFormat.uki, OutputFormat.esp) ) and config.unified_kernel_image_profiles: check_ukify( config, version=( "258" if any(not profile.sign_expected_pcr for profile in config.unified_kernel_image_profiles) else "257" ), reason="build unified kernel image profiles", hint=("Use ToolsTree=yes to download most required tools including ukify automatically"), ) elif want_efi(config) and config.unified_kernel_images.enabled(): check_ukify( config, version="254", reason="build bootable images", hint=( "Use ToolsTree=yes to download most required tools including ukify " "automatically or use Bootable=no to create a non-bootable image which doesn't " "require ukify" ), ) if config.output_format in (OutputFormat.disk, OutputFormat.esp): check_systemd_tool(config, "systemd-repart", version="254", reason="build disk images") if config.selinux_relabel == ConfigFeature.enabled: check_tool(config, "setfiles", reason="relabel files") if config.secure_boot_key_source.type != KeySourceType.file: check_ukify( config, version="256", reason="sign Unified Kernel Image with OpenSSL engine", ) if want_signed_pcrs(config): check_systemd_tool( config, "systemd-measure", "/usr/lib/systemd/systemd-measure", version="256", reason="sign PCR hashes with OpenSSL engine", ) if config.verity_key_source.type != KeySourceType.file: check_systemd_tool( config, "systemd-repart", version="256", reason="sign verity roothash signature with OpenSSL engine", ) if ( want_efi(config) and config.secure_boot and config.secure_boot_auto_enroll and ( not config.find_binary("bootctl") or systemd_tool_version("bootctl", sandbox=config.sandbox) < "257" ) ): check_tool(config, "sbsiglist", reason="set up systemd-boot secure boot auto-enrollment") check_tool(config, "sbvarsign", reason="set up systemd-boot secure boot auto-enrollment") if ArtifactOutput.pcrs in config.split_artifacts: check_systemd_tool( config, "systemd-measure", "/usr/lib/systemd/systemd-measure", version="258", reason="generate TPM2 policy digests", ) if verb == Verb.boot: check_systemd_tool(config, "systemd-nspawn", version="254", reason="boot images") if verb in (Verb.vm, Verb.qemu) and config.vmm == Vmm.vmspawn: check_systemd_tool(config, "systemd-vmspawn", version="256", reason="boot images with vmspawn") if verb == Verb.sysupdate: check_systemd_tool( config, "systemd-sysupdate", "/usr/lib/systemd/systemd-sysupdate", version="257", reason="Update the host system with systemd-sysupdate", ) def configure_ssh(context: Context) -> None: if context.config.ssh in (Ssh.never, Ssh.runtime): return if context.config.ssh == Ssh.auto and ( (context.root / "usr/lib/systemd/system-generators/systemd-ssh-generator").exists() or not find_binary("sshd", root=context.root) ): # systemd-ssh-generator is installed, so we don't need to configure SSH. return unitdir = context.root / "usr/lib/systemd/system" with umask(~0o755): unitdir.mkdir(parents=True, exist_ok=True) with umask(~0o644): (unitdir / "ssh.socket").write_text( textwrap.dedent( """\ [Unit] Description=Mkosi SSH Server VSock Socket ConditionVirtualization=!container Wants=sshd-keygen.target [Socket] ListenStream=vsock::22 Accept=yes [Install] WantedBy=sockets.target """ ) ) (unitdir / "ssh@.service").write_text( textwrap.dedent( """\ [Unit] Description=Mkosi SSH Server After=sshd-keygen.target [Service] # We disable PAM because of an openssh-server bug where it sets PAM_RHOST=UNKNOWN when -i is # used causing a very slow reverse DNS lookup by pam. ExecStart=sshd -i -o UsePAM=no StandardInput=socket RuntimeDirectoryPreserve=yes RuntimeDirectory=sshd # ssh always exits with 255 even on normal disconnect, so let's mark that as success so we # don't get noisy logs about SSH service failures. SuccessExitStatus=255 """ ) ) preset = context.root / "usr/lib/systemd/system-preset/80-mkosi-ssh.preset" with umask(~0o755): preset.parent.mkdir(parents=True, exist_ok=True) with umask(~0o644): preset.write_text("enable ssh.socket\n") def configure_initrd(context: Context) -> None: if context.config.overlay or context.config.output_format.is_extension_or_portable_image(): return if ( not (context.root / "init").exists() and not (context.root / "init").is_symlink() and (context.root / "usr/lib/systemd/systemd").exists() ): (context.root / "init").symlink_to("/usr/lib/systemd/systemd") if not context.config.make_initrd: return if ( not (context.root / "etc/initrd-release").exists() and not (context.root / "etc/initrd-release").is_symlink() ): (context.root / "etc/initrd-release").symlink_to("/etc/os-release") def configure_clock(context: Context) -> None: if context.config.overlay or context.config.output_format.is_extension_image(): return with umask(~0o644): (context.root / "usr/lib/clock-epoch").touch() def run_depmod(context: Context, *, cache: bool = False) -> None: if context.config.overlay or context.config.image == "tools": return if not cache: for modulesd in (context.root / "usr/lib/modules").glob("*"): if not is_valid_kdir(modulesd): continue process_kernel_modules( context, modulesd.name, modules_include=finalize_kernel_modules_include( context, include=context.config.kernel_modules_include, host=context.config.kernel_modules_include_host, ), modules_exclude=context.config.kernel_modules_exclude, firmware_include=context.config.firmware_include, firmware_exclude=context.config.firmware_exclude, ) if context.config.output_format.is_extension_or_portable_image(): return outputs = ( "modules.dep", "modules.dep.bin", "modules.symbols", "modules.symbols.bin", ) for modulesd in (context.root / "usr/lib/modules").glob("*"): if not is_valid_kdir(modulesd): continue if ( not cache and not context.config.kernel_modules_exclude and all((modulesd / o).exists() for o in outputs) ): mtime = (modulesd / "modules.dep").stat().st_mtime if all(m.stat().st_mtime <= mtime for m in modulesd.rglob("*.ko*")): continue with complete_step(f"Running depmod for {modulesd.name}"): run(["depmod", "--all", modulesd.name], sandbox=chroot_cmd(root=context.rootoptions)) def run_sysusers(context: Context) -> None: if ( context.config.overlay or context.config.output_format.is_extension_image() or context.config.image == "tools" ): return if not context.config.find_binary("systemd-sysusers"): logging.warning("systemd-sysusers is not installed, not generating system users") return with complete_step("Generating system users"): run( ["systemd-sysusers", "--root=/buildroot"], sandbox=context.sandbox(options=context.rootoptions()), env=context.config.finalize_environment(), ) def run_tmpfiles(context: Context) -> None: if ( context.config.overlay or context.config.output_format.is_extension_image() or context.config.image == "tools" ): return if not context.config.find_binary("systemd-tmpfiles"): logging.warning("systemd-tmpfiles is not installed, not generating volatile files") return with complete_step("Generating volatile files"): run( [ "systemd-tmpfiles", "--root=/buildroot", "--boot", "--create", "--remove", # Exclude APIVFS and temporary files directories. *(f"--exclude-prefix={d}" for d in ("/tmp", "/var/tmp", "/run", "/proc", "/sys", "/dev")), # Exclude /var if we're not invoked as root as all the chown()'s for daemon owned # directories will fail. *(["--exclude-prefix=/var"] if os.getuid() != 0 or userns_has_single_user() else []), ], env={"SYSTEMD_TMPFILES_FORCE_SUBVOL": "0"}, # systemd-tmpfiles can exit with DATAERR or CANTCREAT in some cases which are handled # as success by the systemd-tmpfiles service so we handle those as success as well. success_exit_status=(0, 65, 73), sandbox=context.sandbox( options=[ *context.rootoptions(), # systemd uses acl.h to parse ACLs in tmpfiles snippets which uses the host's # passwd so we have to symlink the image's passwd to make ACL parsing work. *finalize_passwd_symlinks("/buildroot"), # Sometimes directories are configured to be owned by root in tmpfiles snippets # so we want to make sure those chown()'s succeed by making ourselves the root # user so that the root user exists. "--become-root", ], ), ) # fmt: skip def run_preset(context: Context) -> None: if ( context.config.overlay or context.config.output_format.is_extension_image() or context.config.image == "tools" ): return if not context.config.find_binary("systemctl"): logging.warning("systemctl is not installed, not applying presets") return with complete_step("Applying presets…"): run( ["systemctl", "--root=/buildroot", "preset-all"], sandbox=context.sandbox(options=context.rootoptions()), ) run( ["systemctl", "--root=/buildroot", "--global", "preset-all"], sandbox=context.sandbox(options=context.rootoptions()), ) def run_hwdb(context: Context) -> None: if ( context.config.overlay or context.config.output_format.is_extension_image() or context.config.image == "tools" ): return if not context.config.find_binary("systemd-hwdb"): logging.warning("systemd-hwdb is not installed, not generating hwdb") return with complete_step("Generating hardware database"): run( ["systemd-hwdb", "--root=/buildroot", "--usr", "--strict", "update"], sandbox=context.sandbox(options=context.rootoptions()), ) # Remove any existing hwdb in /etc in favor of the one we just put in /usr. (context.root / "etc/udev/hwdb.bin").unlink(missing_ok=True) def run_firstboot(context: Context) -> None: if context.config.overlay or context.config.output_format.is_extension_or_portable_image(): return if not context.config.find_binary("systemd-firstboot"): logging.warning("systemd-firstboot is not installed, not applying first boot settings") return password, hashed = context.config.root_password or (None, False) if password and not hashed: password = run( ["openssl", "passwd", "-stdin", "-6"], sandbox=context.sandbox(), input=password, stdout=subprocess.PIPE, ).stdout.strip() settings = ( ("--locale", "firstboot.locale", context.config.locale), ("--locale-messages", "firstboot.locale-messages", context.config.locale_messages), ("--keymap", "firstboot.keymap", context.config.keymap), ("--timezone", "firstboot.timezone", context.config.timezone), ("--hostname", None, context.config.hostname), ("--root-password-hashed", "passwd.hashed-password.root", password), ("--root-shell", "passwd.shell.root", context.config.root_shell), ) # fmt: skip options = [] creds = [] for option, cred, value in settings: # Check for None as password might be the empty string if value is None: continue options += [option, value] if cred: creds += [(cred, value)] if not options and not creds: return with complete_step("Applying first boot settings"): run( ["systemd-firstboot", "--root=/buildroot", "--force", *options], sandbox=context.sandbox(options=context.rootoptions()), ) # Initrds generally don't ship with only /usr so there's not much point in putting the # credentials in /usr/lib/credstore. if context.config.output_format != OutputFormat.cpio or not context.config.make_initrd: with umask(~0o755): (context.root / "usr/lib/credstore").mkdir(exist_ok=True) for cred, value in creds: with umask(~0o600 if "password" in cred else ~0o644): (context.root / "usr/lib/credstore" / cred).write_text(value) def run_selinux_relabel(context: Context) -> None: if not (selinux := want_selinux_relabel(context.config, context.root)): return setfiles, policy, fc, binpolicy = selinux fc = Path("/buildroot") / fc.relative_to(context.root) binpolicy = Path("/buildroot") / binpolicy.relative_to(context.root) with complete_step(f"Relabeling files using {policy} policy"): run( [setfiles, "-mFr", "/buildroot", "-T0", "-c", binpolicy, fc, "/buildroot"], sandbox=context.sandbox(options=context.rootoptions()), check=context.config.selinux_relabel == ConfigFeature.enabled, ) def need_build_overlay(config: Config) -> bool: return bool(config.build_scripts and (config.build_packages or config.prepare_scripts)) def save_cache(context: Context) -> None: if not context.config.is_incremental(): return final, build, manifest = cache_tree_paths(context.config) with complete_step("Installing cache copies"): rmtree(final) move_tree( context.root, final, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) if need_build_overlay(context.config) and (context.workspace / "build-overlay").exists(): rmtree(build) move_tree( context.workspace / "build-overlay", build, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) manifest.write_text( json.dumps( context.config.cache_manifest(), cls=JsonEncoder, indent=4, sort_keys=True, ) ) def have_cache(config: Config) -> bool: # The default tools tree is always cached regardless of the incremental mode. if not config.is_incremental() and config.image != "tools": return False final, build, manifest = cache_tree_paths(config) if not final.exists(): logging.debug(f"{final} does not exist, not reusing cached images") return False if config.image != "tools" and (uid := final.stat().st_uid) != os.getuid(): logging.debug( f"{final} uid ({uid}) does not match user uid ({os.getuid()}), not reusing cached images" ) return False if need_build_overlay(config) and not build.exists(): logging.debug(f"{build} does not exist, not reusing cached images") return False if manifest.exists(): prev = json.loads(manifest.read_text()) new = json.dumps(config.cache_manifest(), cls=JsonEncoder, indent=4, sort_keys=True) if prev != json.loads(new): logging.info(f"Cache manifest mismatch for {config.image} image, not reusing cached images") if ARG_DEBUG.get(): run( ["diff", "--unified", workdir(manifest), "-"], input=new, check=False, sandbox=config.sandbox( tools=False, options=["--bind", manifest, workdir(manifest)], ), ) return False else: logging.debug(f"{manifest} does not exist, not reusing cached images") return False return True def reuse_cache(context: Context) -> bool: if not context.config.is_incremental(): return False final, build, _ = cache_tree_paths(context.config) if not final.exists() or (need_build_overlay(context.config) and not build.exists()): return False with complete_step("Copying cached trees"): copy_tree( final, context.root, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) if need_build_overlay(context.config): (context.workspace / "build-overlay").symlink_to(build) return True def save_esp_components( context: Context, ) -> tuple[Optional[Path], Optional[str], Optional[Path], list[Path]]: if context.config.output_format == OutputFormat.addon: stub = systemd_addon_stub_binary(context) if not stub.exists(): die(f"sd-stub not found at /{stub.relative_to(context.root)} in the image") return Path(shutil.copy2(stub, context.workspace)), None, None, [] if context.config.output_format not in (OutputFormat.uki, OutputFormat.esp): return None, None, None, [] try: kver, kimg = next(gen_kernel_images(context)) except StopIteration: if context.config.output_format == OutputFormat.uki: die("A kernel must be installed in the image to build a UKI") if ( context.config.output_format == OutputFormat.esp and context.config.bootable == ConfigFeature.enabled ): die("A kernel must be installed in the image to build a bootable ESP image") return None, None, None, [] kimg = Path(shutil.copy2(context.root / kimg, context.workspace)) if not context.config.architecture.to_efi(): die(f"Architecture {context.config.architecture} does not support UEFI") stub = systemd_stub_binary(context) if not stub.exists(): die(f"sd-stub not found at /{stub.relative_to(context.root)} in the image") stub = Path(shutil.copy2(stub, context.workspace)) microcode = build_microcode_initrd(context) return stub, kver, kimg, microcode def make_image( context: Context, msg: str, skip: Sequence[str] = [], split: bool = False, tabs: bool = False, verity: Verity = Verity.disabled, definitions: Sequence[Path] = [], options: Sequence[PathString] = (), ) -> list[Partition]: cmdline: list[PathString] = [ "systemd-repart", "--empty=allow", "--size=auto", "--dry-run=no", "--json=pretty", "--no-pager", "--root=/buildroot", f"--offline={yes_no(context.config.repart_offline)}", "--seed", str(context.config.seed), workdir(context.staging / context.config.output_with_format), ] # fmt: skip opts: list[PathString] = [ *options, # Make sure we're root so that the mkfs tools invoked by systemd-repart think the files # that go into the disk image are owned by root. "--become-root", "--bind", context.staging, workdir(context.staging), *context.rootoptions(), ] # fmt: skip if not context.config.architecture.is_native(): cmdline += ["--architecture", str(context.config.architecture)] if not (context.staging / context.config.output_with_format).exists(): cmdline += ["--empty=create"] if context.config.passphrase: cmdline += ["--key-file", workdir(context.config.passphrase)] opts += ["--ro-bind", context.config.passphrase, workdir(context.config.passphrase)] if skip: cmdline += ["--defer-partitions", ",".join(skip)] if split: cmdline += ["--split=yes"] if context.config.sector_size: cmdline += ["--sector-size", str(context.config.sector_size)] if tabs and systemd_tool_version("systemd-repart", sandbox=context.sandbox) >= 256: cmdline += [ "--generate-fstab=/etc/fstab", "--generate-crypttab=/etc/crypttab", ] if tabs and systemd_tool_version("systemd-repart", sandbox=context.sandbox) >= 258: cmdline += ["--append-fstab=auto"] for d in definitions: cmdline += ["--definitions", workdir(d)] opts += ["--ro-bind", d, workdir(d)] def can_orphan_file(distribution: Optional[Distribution], release: Optional[str]) -> bool: if distribution is None: return True return not ( (distribution == Distribution.centos and release and GenericVersion(release) == 9) or (distribution == Distribution.ubuntu and release == "jammy") ) # Make sure the ext4 orphan_file feature is disabled if the target distribution official kernel does not # support it. env = {} if ( not can_orphan_file(context.config.distribution, context.config.release) and can_orphan_file(*detect_distribution(context.config.tools())) and "SYSTEMD_REPART_MKFS_EXT4_OPTIONS" not in context.config.environment ): env["SYSTEMD_REPART_MKFS_EXT4_OPTIONS"] = "-O ^orphan_file" if ARG_DEBUG.get(): for dir in definitions: for c in dir.glob("*.conf"): # Do not spam the logs in case something goes wrong logging.debug(f"# {c} (truncated to 100 lines)") with open(c) as f: for line in itertools.islice(f, 100): logging.debug(line.strip()) with complete_step(msg): output = json.loads( run_systemd_sign_tool( context.config, cmdline=cmdline, options=opts, certificate=( context.config.verity_certificate if verity in (Verity.auto, Verity.signed) else None ), certificate_source=context.config.verity_certificate_source, key=context.config.verity_key if verity in (Verity.auto, Verity.signed) else None, key_source=context.config.verity_key_source, env=env, stdout=subprocess.PIPE, devices=not context.config.repart_offline, ).stdout ) logging.debug(json.dumps(output, indent=4)) partitions = [Partition.from_dict(d) for d in output] arch = context.config.architecture if verity == Verity.signed and not any( p.type.startswith(f"usr-{arch}-verity-sig") or p.type.startswith(f"root-{arch}-verity-sig") for p in partitions ): die( "Verity is explicitly enabled but didn't find any verity signature partition", hint="Make sure to add verity signature partitions in mkosi.repart if building a disk image", ) if split: for p in partitions: if p.split_path and p.type not in skip: maybe_compress(context, context.config.compress_output, p.split_path) write_split_roothash(context, partitions) return partitions def make_disk( context: Context, msg: str, skip: Sequence[str] = [], split: bool = False, tabs: bool = False, ) -> list[Partition]: if context.config.output_format != OutputFormat.disk: return [] if context.config.repart_dirs: definitions = context.config.repart_dirs else: defaults = context.workspace / "repart-definitions" if not defaults.exists(): defaults.mkdir() if context.config.architecture.to_efi(): bootloader = context.root / efi_boot_binary(context) else: bootloader = None esp = context.config.bootable == ConfigFeature.enabled or ( context.config.bootable == ConfigFeature.auto and bootloader and bootloader.exists() ) bios = context.config.bootable != ConfigFeature.disabled and want_grub_bios(context) if esp or bios: # Even if we're doing BIOS, let's still use the ESP to store the kernels, initrds # and grub modules. We can't use UKIs so we have to put each kernel and initrd on # the ESP twice, so let's make the ESP twice as big in that case. (defaults / "00-esp.conf").write_text( textwrap.dedent( f"""\ [Partition] Type=esp Format=vfat CopyFiles=/boot:/ CopyFiles=/efi:/ SizeMinBytes={"1G" if bios else "512M"} SizeMaxBytes={"1G" if bios else "512M"} """ ) ) # If grub for BIOS is installed, let's add a BIOS boot partition onto which we can # install grub. if bios: (defaults / "05-bios.conf").write_text( textwrap.dedent( f"""\ [Partition] Type={Partition.GRUB_BOOT_PARTITION_UUID} SizeMinBytes=1M SizeMaxBytes=1M """ ) ) (defaults / "10-root.conf").write_text( textwrap.dedent( f"""\ [Partition] Type=root Format={context.config.distribution.installer.filesystem()} CopyFiles=/ Minimize=guess """ ) ) definitions = [defaults] if context.config.verity == Verity.defer: skip = [ *skip, f"root-{context.config.architecture}-verity-sig", f"usr-{context.config.architecture}-verity-sig", ] return make_image( context, msg=msg, skip=skip, split=split, tabs=tabs, verity=context.config.verity, definitions=definitions, ) def make_oci(context: Context, root_layer: Path, dst: Path) -> None: ca_store = dst / "blobs" / "sha256" with umask(~0o755): ca_store.mkdir(parents=True) layer_diff_digest = hash_file(root_layer) maybe_compress( context, context.config.compress_output, context.staging / "rootfs.layer", # Pass explicit destination to suppress adding an extension context.staging / "rootfs.layer", ) layer_digest = hash_file(root_layer) root_layer.rename(ca_store / layer_digest) creation_time = ( datetime.datetime.fromtimestamp(context.config.source_date_epoch, tz=datetime.timezone.utc) if context.config.source_date_epoch is not None else datetime.datetime.now(tz=datetime.timezone.utc) ).isoformat() oci_config = { "created": creation_time, "architecture": context.config.architecture.to_oci(), # Name of the operating system which the image is built to run on as defined by # https://github.com/opencontainers/image-spec/blob/v1.0.2/config.md#properties. "os": "linux", "rootfs": { "type": "layers", "diff_ids": [f"sha256:{layer_diff_digest}"], }, "config": { "Cmd": [ "/sbin/init", *context.config.kernel_command_line, ], }, "history": [ { "created": creation_time, "comment": "Created by mkosi", }, ], } oci_config_blob = json.dumps(oci_config) oci_config_digest = hashlib.sha256(oci_config_blob.encode()).hexdigest() with umask(~0o644): (ca_store / oci_config_digest).write_text(oci_config_blob) layer_suffix = context.config.compress_output.oci_media_type_suffix() oci_manifest = { "schemaVersion": 2, "mediaType": "application/vnd.oci.image.manifest.v1+json", "config": { "mediaType": "application/vnd.oci.image.config.v1+json", "digest": f"sha256:{oci_config_digest}", "size": (ca_store / oci_config_digest).stat().st_size, }, "layers": [ { "mediaType": f"application/vnd.oci.image.layer.v1.tar{layer_suffix}", "digest": f"sha256:{layer_digest}", "size": (ca_store / layer_digest).stat().st_size, } ], "annotations": { "io.systemd.mkosi.version": __version__, **( { "org.opencontainers.image.version": context.config.image_version, } if context.config.image_version else {} ), }, } oci_manifest_blob = json.dumps(oci_manifest) oci_manifest_digest = hashlib.sha256(oci_manifest_blob.encode()).hexdigest() with umask(~0o644): (ca_store / oci_manifest_digest).write_text(oci_manifest_blob) (dst / "index.json").write_text( json.dumps( { "schemaVersion": 2, "mediaType": "application/vnd.oci.image.index.v1+json", "manifests": [ { "mediaType": "application/vnd.oci.image.manifest.v1+json", "digest": f"sha256:{oci_manifest_digest}", "size": (ca_store / oci_manifest_digest).stat().st_size, } ], } ) ) (dst / "oci-layout").write_text(json.dumps({"imageLayoutVersion": "1.0.0"})) def make_esp( context: Context, stub: Optional[Path], kver: Optional[str], kimg: Optional[Path], microcode: list[Path], ) -> list[Partition]: if not context.config.architecture.to_efi(): die(f"Architecture {context.config.architecture} does not support UEFI") if context.config.bootable == ConfigFeature.enabled or ( context.config.bootable == ConfigFeature.auto and stub and kver and kimg ): assert stub and kver and kimg token = find_entry_token(context) uki = context.root / finalize_uki_path( context, finalize_bootloader_entry_format(context, kver, token) ) with umask(~0o700): uki.parent.mkdir(parents=True, exist_ok=True) make_uki(context, stub, kver, kimg, microcode, uki) definitions = context.workspace / "esp-definitions" definitions.mkdir(exist_ok=True) # Use a minimum of 36MB or 260MB depending on sector size because otherwise the generated FAT # filesystem will have too few clusters to be considered a FAT32 filesystem by OVMF which will # refuse to boot from it. See # https://superuser.com/questions/1702331/what-is-the-minimum-size-of-a-4k-native-partition-when-formatted-with-fat32/1717643#1717643 if context.config.sector_size == 512: m = 36 # TODO: Figure out minimum size for 2K sector size else: m = 260 size = dir_size(context.root / "boot") + dir_size(context.root / "efi") # Always reserve 10MB for filesystem metadata. size = max(size, (m - 10) * 1024**2) + 10 * 1024**2 # TODO: Remove the extra 4096 for the max size once # https://github.com/systemd/systemd/pull/29954 is in a stable release. (definitions / "00-esp.conf").write_text( textwrap.dedent( f"""\ [Partition] Type=esp Format=vfat CopyFiles=/boot:/ CopyFiles=/efi:/ SizeMinBytes={size} SizeMaxBytes={size + 4096} """ ) ) return make_image( context, msg="Generating ESP image", definitions=[definitions], ) def write_split_roothash(context: Context, partitions: Sequence[Partition]) -> None: if ArtifactOutput.roothash in context.config.split_artifacts and ( roothash := finalize_roothash(partitions) ): (context.staging / context.config.output_split_roothash).write_text(roothash.partition("=")[2]) def extension_or_portable_image_repart_definitions(context: Context) -> Path: if context.config.verity == Verity.disabled or ( context.config.verity == Verity.auto and (not context.config.verity_key or not context.config.verity_certificate) ): unsigned = "-unsigned" else: unsigned = "" return context.resources / f"repart/definitions/{context.config.output_format}{unsigned}.repart.d" def make_extension_or_portable_image(context: Context, output: Path) -> None: definitions = extension_or_portable_image_repart_definitions(context) cmdline: list[PathString] = [ "systemd-repart", "--root=/buildroot", "--json=pretty", "--dry-run=no", "--no-pager", f"--offline={yes_no(context.config.repart_offline)}", "--seed", str(context.config.seed) if context.config.seed else "random", "--empty=create", "--size=auto", "--definitions", workdir(definitions), workdir(output), ] # fmt: skip options: list[PathString] = [ # Make sure we're root so that the mkfs tools invoked by systemd-repart think the files # that go into the disk image are owned by root. "--become-root", "--bind", output.parent, workdir(output.parent), *context.rootoptions(readonly=True), "--ro-bind", definitions, workdir(definitions), ] # fmt: skip if not context.config.architecture.is_native(): cmdline += ["--architecture", str(context.config.architecture)] if context.config.passphrase: cmdline += ["--key-file", context.config.passphrase] options += ["--ro-bind", context.config.passphrase, workdir(context.config.passphrase)] if context.config.sector_size: cmdline += ["--sector-size", str(context.config.sector_size)] if ArtifactOutput.partitions in context.config.split_artifacts: cmdline += ["--split=yes"] verity = [ f"root-{context.config.architecture}-verity-sig", f"usr-{context.config.architecture}-verity-sig", ] if context.config.verity == Verity.hash: cmdline += [f"--exclude-partitions={','.join(verity)}"] elif context.config.verity == Verity.defer: cmdline += [f"--defer-partitions={','.join(verity)}"] with complete_step(f"Building {context.config.output_format} extension image"): j = json.loads( run_systemd_sign_tool( context.config, cmdline=cmdline, options=options, certificate=( context.config.verity_certificate if context.config.verity in (Verity.auto, Verity.signed) else None ), certificate_source=context.config.verity_certificate_source, key=( context.config.verity_key if context.config.verity in (Verity.auto, Verity.signed) else None ), key_source=context.config.verity_key_source, stdout=subprocess.PIPE, devices=not context.config.repart_offline, ).stdout ) logging.debug(json.dumps(j, indent=4)) if ArtifactOutput.partitions in context.config.split_artifacts: for p in (Partition.from_dict(d) for d in j): if p.split_path: maybe_compress(context, context.config.compress_output, p.split_path) write_split_roothash(context, [Partition.from_dict(d) for d in j]) def finalize_staging(context: Context) -> None: rmtree( *(context.config.output_dir_or_cwd() / f.name for f in context.staging.iterdir()), sandbox=context.sandbox, ) for f in context.staging.iterdir(): if f.is_symlink(): (context.config.output_dir_or_cwd() / f.name).symlink_to(f.readlink()) continue if f.is_file() and context.config.output_mode is not None: os.chmod(f, context.config.output_mode) move_tree( f, context.config.output_dir_or_cwd(), use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) def clamp_mtime(path: Path, mtime: int) -> None: st = os.stat(path, follow_symlinks=False) orig = (st.st_atime_ns, st.st_mtime_ns) updated = (min(orig[0], mtime * 1_000_000_000), min(orig[1], mtime * 1_000_000_000)) # fmt: skip if orig != updated: os.utime(path, ns=updated, follow_symlinks=False) def normalize_mtime(root: Path, mtime: Optional[int], directory: Path = Path("")) -> None: if mtime is None: return if not (root / directory).exists(): return with complete_step(f"Normalizing modification times of /{directory}"): clamp_mtime(root / directory, mtime) for p in (root / directory).rglob("*"): clamp_mtime(p, mtime) @contextlib.contextmanager def setup_workspace(args: Args, config: Config) -> Iterator[Path]: with contextlib.ExitStack() as stack: workspace = Path(tempfile.mkdtemp(dir=config.workspace_dir_or_default(), prefix="mkosi-workspace-")) # Discard setuid/setgid bits as these are inherited and can leak into the image. workspace.chmod(stat.S_IMODE(workspace.stat().st_mode) & ~(stat.S_ISGID | stat.S_ISUID)) # Explicitly pass the "root" subdirectory first because on btrfs it's likely a subvolume and this # allows us to delete it with btrfs subvolume delete instead of a costly rm -rf. stack.callback(lambda: rmtree(workspace / "root", workspace, sandbox=config.sandbox)) (workspace / "tmp").mkdir(mode=0o1777) with scopedenv({"TMPDIR": os.fspath(workspace / "tmp")}): try: yield Path(workspace) finally: if args.debug_workspace: stack.pop_all() log_notice(f"Workspace: {workspace}") @contextlib.contextmanager def createrepo(context: Context) -> Iterator[None]: st = context.repository.stat() try: yield finally: if context.repository.stat().st_mtime_ns != st.st_mtime_ns: with complete_step("Rebuilding local package repository"): context.config.distribution.installer.package_manager(context.config).createrepo(context) def make_rootdir(context: Context) -> None: if context.root.exists(): return with umask(~0o755): # Using a btrfs subvolume as the upperdir in an overlayfs results in EXDEV so make sure we # create the root directory as a regular directory if the Overlay= option is enabled. if context.config.overlay: context.root.mkdir() else: make_tree(context.root, use_subvolumes=context.config.use_subvolumes) def build_image(context: Context) -> None: manifest = Manifest(context) if context.config.manifest_format else None install_sandbox_trees(context.config, context.sandbox_tree) with mount_base_trees(context): install_base_trees(context) cached = reuse_cache(context) make_rootdir(context) wantrepo = ( ( not cached and ( context.config.packages or context.config.build_packages or context.config.prepare_scripts ) ) or context.config.volatile_packages or context.config.postinst_scripts or context.config.finalize_scripts ) context.config.distribution.installer.setup(context) if wantrepo: with createrepo(context): install_package_directories(context, context.config.package_directories) install_package_directories(context, context.config.volatile_package_directories) install_package_directories(context, [context.package_dir]) if not cached: install_skeleton_trees(context) install_distribution(context) run_prepare_scripts(context, build=False) install_build_packages(context) run_prepare_scripts(context, build=True) fixup_vmlinuz_location(context) run_depmod(context, cache=True) save_cache(context) reuse_cache(context) check_root_populated(context) run_build_scripts(context) if context.config.output_format == OutputFormat.none or context.args.rerun_build_scripts: return if wantrepo: with createrepo(context): install_package_directories(context, [context.package_dir]) install_volatile_packages(context) install_build_dest(context) install_extra_trees(context) run_postinst_scripts(context) fixup_vmlinuz_location(context) configure_autologin(context) configure_os_release(context) configure_extension_release(context) configure_initrd(context) configure_ssh(context) configure_clock(context) configure_verity_certificate(context) configure_mountpoints(context) if manifest: manifest.record_extension_release() install_systemd_boot(context) install_grub(context) install_shim(context) run_sysusers(context) run_tmpfiles(context) run_preset(context) run_depmod(context) run_firstboot(context) run_hwdb(context) # These might be removed by the next steps, so let's save them for later if needed. stub, kver, kimg, microcode = save_esp_components(context) remove_packages(context) if manifest: manifest.record_packages() run_selinux_relabel(context) clean_package_manager_metadata(context) remove_files(context) run_finalize_scripts(context) rmtree(context.root / "work") normalize_mtime(context.root, context.config.source_date_epoch) partitions = make_disk(context, skip=("esp", "xbootldr"), tabs=True, msg="Generating disk image") install_kernel(context, partitions) normalize_mtime(context.root, context.config.source_date_epoch, directory=Path("boot")) normalize_mtime(context.root, context.config.source_date_epoch, directory=Path("efi")) partitions = make_disk(context, msg="Formatting ESP/XBOOTLDR partitions") grub_bios_setup(context, partitions) if ArtifactOutput.partitions in context.config.split_artifacts: make_disk(context, split=True, msg="Extracting partitions") if ( context.config.output_format != OutputFormat.tar and ArtifactOutput.tar in context.config.split_artifacts ): make_tar(context.root, context.staging / context.config.output_tar, sandbox=context.sandbox) copy_nspawn_settings(context) copy_uki(context) copy_vmlinuz(context) copy_initrd(context) copy_repart_definitions(context) if context.config.output_format == OutputFormat.tar: make_tar(context.root, context.staging / context.config.output_with_format, sandbox=context.sandbox) elif context.config.output_format == OutputFormat.oci: make_tar(context.root, context.staging / "rootfs.layer", sandbox=context.sandbox) make_oci( context, context.staging / "rootfs.layer", context.staging / context.config.output_with_format, ) elif context.config.output_format == OutputFormat.cpio: make_cpio(context.root, context.staging / context.config.output_with_format, sandbox=context.sandbox) elif context.config.output_format == OutputFormat.uki: assert stub and kver and kimg make_uki(context, stub, kver, kimg, microcode, context.staging / context.config.output_with_format) elif context.config.output_format == OutputFormat.esp: make_esp(context, stub, kver, kimg, microcode) elif context.config.output_format == OutputFormat.addon: assert stub make_addon(context, stub, context.staging / context.config.output_with_format) elif context.config.output_format.is_extension_or_portable_image(): make_extension_or_portable_image(context, context.staging / context.config.output_with_format) elif context.config.output_format == OutputFormat.directory: context.root.rename(context.staging / context.config.output_with_format) if context.config.output_format.use_outer_compression(): maybe_compress( context, context.config.compress_output, context.staging / context.config.output_with_format, context.staging / context.config.output_with_compression, ) calculate_sha256sum(context) calculate_signature(context) save_manifest(context, manifest) output_base = context.staging / context.config.output if not output_base.exists() or output_base.is_symlink(): output_base.unlink(missing_ok=True) output_base.symlink_to(context.config.output_with_compression) run_postoutput_scripts(context) finalize_staging(context) print_output_size(context.config.output_dir_or_cwd() / context.config.output_with_compression) def run_box(args: Args, config: Config) -> None: if in_box(): die( "mkosi box cannot be invoked from within another mkosi box environment", hint="Exit the current mkosi box environment and try again", ) if not args.cmdline: die("Please specify a command to execute in the sandbox") mounts = finalize_certificate_mounts(config, relaxed=True) # Since we reuse almost every top level directory from the host except /usr and /etc, the crypto # mountpoints have to exist already in these directories or we'll fail with a permission error. Let's # check this early and show a better error and a suggestion on how users can fix this issue. We use # slice notation to get every 3rd item from the mounts list which is the destination path. for dst in mounts[2::3]: if not Path(dst).exists(): die( f"Missing mountpoint {dst}", hint=f"Create an empty directory at {dst} using 'mkdir -p {dst}' as root and try again", ) hd, hr = detect_distribution() env = {"MKOSI_IN_BOX": "1"} if hd: env |= {"MKOSI_HOST_DISTRIBUTION": str(hd)} if hr: env |= {"MKOSI_HOST_RELEASE": hr} if config.tools() != Path("/"): env |= {"MKOSI_DEFAULT_TOOLS_TREE_PATH": os.fspath(config.tools())} cmdline = [*args.cmdline] if sys.stdin.isatty() and sys.stdout.isatty(): cmdline = systemd_pty_forward(config, background="48;2;12;51;51", title="mkosi-sandbox") + cmdline with contextlib.ExitStack() as stack: if config.tools() != Path("/"): d = stack.enter_context(tempfile.TemporaryDirectory(prefix="mkosi-path-")) # We have to point zipapp to a directory containing the mkosi module and set the entrypoint # manually instead of directly at the mkosi package, otherwise we get ModuleNotFoundError when # trying to run a zipapp created from a packaged version of mkosi. While zipapp.create_archive() # supports a filter= argument, trying to use this within a site-packages directory is rather slow # so we copy the mkosi package to a temporary directory instead which is much faster. with ( tempfile.TemporaryDirectory(prefix="mkosi-zipapp-") as tmp, resource_path(sys.modules[__package__ or __name__]) as module, ): copy_tree(module, Path(tmp) / module.name, sandbox=config.sandbox) zipapp.create_archive( source=tmp, target=Path(d) / "mkosi", main="mkosi.__main__:main", interpreter="/usr/bin/env python3", ) make_executable(Path(d) / "mkosi") mounts += ["--ro-bind", d, "/mkosi"] stack.enter_context(scopedenv({"PATH": f"/mkosi:{os.environ['PATH']}"})) run( cmdline, stdin=sys.stdin, stdout=sys.stdout, env=os.environ | env, log=False, sandbox=config.sandbox( devices=True, network=True, relaxed=True, options=["--same-dir", *mounts], ), ) def run_latest_snapshot(args: Args, config: Config) -> None: print(config.distribution.installer.latest_snapshot(config)) def run_shell(args: Args, config: Config) -> None: opname = "acquire shell in" if args.verb == Verb.shell else "boot" if config.output_format not in (OutputFormat.directory, OutputFormat.disk): die(f"Cannot {opname} {config.output_format} images with systemd-nspawn") if config.output_format.use_outer_compression() and config.compress_output: die(f"Cannot {opname} compressed {config.output_format} images with systemd-nspawn") cmdline: list[PathString] = ["systemd-nspawn", "--quiet", "--link-journal=no"] if config.runtime_network == Network.user: cmdline += ["--resolv-conf=auto"] elif config.runtime_network == Network.interface: cmdline += ["--private-network", "--network-veth"] elif config.runtime_network == Network.none: cmdline += ["--private-network"] # If we copied in a .nspawn file, make sure it's actually honoured if config.nspawn_settings: cmdline += ["--settings=trusted"] if args.verb == Verb.boot: cmdline += ["--boot"] else: cmdline += [ f"--rlimit=RLIMIT_CORE={format_rlimit(resource.RLIMIT_CORE)}", "--console=autopipe", ] # Underscores are not allowed in machine names so replace them with hyphens. name = config.machine_or_name().replace("_", "-") cmdline += ["--machine", name, "--register", yes_no(finalize_register(config))] with contextlib.ExitStack() as stack: for f in finalize_credentials(config, stack).iterdir(): cmdline += [f"--load-credential={f.name}:{f}"] # Make sure the latest nspawn settings are always used. if config.nspawn_settings: if not (config.output_dir_or_cwd() / f"{name}.nspawn").exists(): stack.callback( lambda: (config.output_dir_or_cwd() / f"{name}.nspawn").unlink(missing_ok=True) ) shutil.copy2(config.nspawn_settings, config.output_dir_or_cwd() / f"{name}.nspawn") # If we're booting a directory image that wasn't built by root, we always make an ephemeral # copy to avoid ending up with files not owned by the directory image owner in the # directory image. if config.ephemeral or ( config.output_format == OutputFormat.directory and args.verb == Verb.boot and (config.output_dir_or_cwd() / config.output).stat().st_uid != 0 ): fname = stack.enter_context(copy_ephemeral(config, config.output_dir_or_cwd() / config.output)) else: fname = stack.enter_context(flock_or_die(config.output_dir_or_cwd() / config.output)) if config.output_format == OutputFormat.disk and args.verb == Verb.boot: run( [ "systemd-repart", "--image", workdir(fname), *([f"--size={config.runtime_size}"] if config.runtime_size else []), "--no-pager", "--dry-run=no", "--offline=no", "--pretty=no", workdir(fname), ], stdin=sys.stdin, env=config.finalize_environment(), sandbox=config.sandbox( network=True, devices=True, options=["--bind", fname, workdir(fname)], ), setup=become_root_cmd(), ) # fmt: skip if config.output_format == OutputFormat.directory: cmdline += ["--directory", fname] owner = os.stat(fname).st_uid if owner != 0: # Let's allow running a shell in a non-ephemeral image but in that case only map a # single user into the image so it can't get polluted with files or directories # owned by other users. if ( args.verb == Verb.shell and config.output_format == OutputFormat.directory and not config.ephemeral ): range = 1 else: range = 65536 cmdline += [f"--private-users={owner}:{range}"] else: cmdline += ["--image", fname] if config.runtime_build_sources: for t in config.build_sources: src, dst = t.with_prefix("/work/src") uidmap = "rootidmap" if src.stat().st_uid != 0 else "noidmap" cmdline += ["--bind", f"{src}:{dst}:norbind,{uidmap}"] if config.build_dir: uidmap = "rootidmap" if config.build_subdir.stat().st_uid != 0 else "noidmap" cmdline += ["--bind", f"{config.build_subdir}:/work/build:norbind,{uidmap}"] for tree in config.runtime_trees: target = Path("/root/src") / (tree.target or "") # We add norbind because very often RuntimeTrees= will be used to mount the source # directory into the container and the output directory from which we're running will # very likely be a subdirectory of the source directory which would mean we'd be # mounting the container root directory as a subdirectory in itself which tends to lead # to all kinds of weird issues, which we avoid by not doing a recursive mount which # means the container root directory mounts will be skipped. uidmap = "rootidmap" if tree.source.stat().st_uid != 0 else "noidmap" cmdline += ["--bind", f"{tree.source}:{target}:norbind,{uidmap}"] if config.bind_user: cmdline += ["--bind-user", getpass.getuser(), "--bind-user-group=wheel"] if args.verb == Verb.boot and config.forward_journal: with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock: addr = ( Path(os.getenv("TMPDIR", "/tmp")) / f"mkosi-journal-remote-unix-{uuid.uuid4().hex[:16]}" ) sock.bind(os.fspath(addr)) sock.listen() if config.output_format == OutputFormat.directory and (stat := os.stat(fname)).st_uid != 0: os.chown(addr, stat.st_uid, stat.st_gid) stack.enter_context(start_journal_remote(config, sock.fileno())) uidmap = "rootidmap" if addr.stat().st_uid != 0 else "noidmap" cmdline += [ f"--bind={addr}:/run/host/journal/socket:{uidmap}", "--set-credential=journal.forward_to_socket:/run/host/journal/socket", ] for p in config.unit_properties: cmdline += ["--property", p] if args.verb == Verb.boot: # Add nspawn options first since systemd-nspawn ignores all options after the first argument. argv = args.cmdline # When invoked by the kernel, all unknown arguments are passed as environment variables # to pid1. Let's mimic the same behavior when we invoke nspawn as a container. for arg in itertools.chain( config.kernel_command_line, finalize_kernel_command_line_extra(config), ): name, sep, value = arg.partition("=") # If there's a '.' in the argument name, it's not considered an environment # variable by the kernel. if sep and "." not in name: cmdline += ["--setenv", f"{name.replace('-', '_')}={value}"] else: # kernel cmdline config of the form systemd.xxx= get interpreted by systemd # when running in nspawn as well. argv += [arg] cmdline += argv elif args.cmdline: cmdline += ["--"] cmdline += args.cmdline run( cmdline, stdin=sys.stdin, stdout=sys.stdout, env=os.environ | config.finalize_environment(), log=False, sandbox=config.sandbox( devices=True, network=True, relaxed=True, options=["--same-dir"], ), setup=become_root_cmd(), ) def run_systemd_tool(tool: str, args: Args, config: Config) -> None: if config.output_format not in (OutputFormat.disk, OutputFormat.directory): die(f"{config.output_format} images cannot be inspected with {tool}") if ( args.verb in (Verb.journalctl, Verb.coredumpctl) and config.output_format == OutputFormat.disk and os.getuid() != 0 ): need_root = True else: need_root = False if (tool_path := config.find_binary(tool)) is None: die(f"Failed to find {tool}") if config.ephemeral: die(f"Images booted in ephemeral mode cannot be inspected with {tool}") if not (output := config.output_dir_or_cwd() / config.output).exists(): die( f"Output {output} does not exist, cannot inspect with {tool}", hint=f"Build and boot the image first before inspecting it with {tool}", ) run( [tool_path, "--root" if output.is_dir() else "--image", output, *args.cmdline], stdin=sys.stdin, stdout=sys.stdout, env=os.environ | config.finalize_environment(), log=False, sandbox=config.sandbox( network=True, devices=config.output_format == OutputFormat.disk, relaxed=True, ), setup=become_root_cmd() if need_root else [], ) def run_journalctl(args: Args, config: Config) -> None: run_systemd_tool("journalctl", args, config) def run_coredumpctl(args: Args, config: Config) -> None: run_systemd_tool("coredumpctl", args, config) def start_storage_target_mode(config: Config) -> AbstractContextManager[Optional[Popen]]: if config.storage_target_mode == ConfigFeature.disabled: return contextlib.nullcontext() if config.storage_target_mode == ConfigFeature.auto and os.getuid() != 0: return contextlib.nullcontext() if config.output_format != OutputFormat.disk: if config.storage_target_mode == ConfigFeature.enabled: die("Storage target mode is only supported for the 'disk' output format") return contextlib.nullcontext() if not config.find_binary("/usr/lib/systemd/systemd-storagetm"): if config.storage_target_mode == ConfigFeature.enabled: die("Storage target mode enabled but systemd-storagetm is not installed") return contextlib.nullcontext() return spawn( ["/usr/lib/systemd/systemd-storagetm", config.output_with_format], stdin=sys.stdin, stdout=sys.stdout, sandbox=config.sandbox( network=True, relaxed=True, options=["--chdir", config.output_dir_or_cwd()], ), setup=become_root_cmd(), ) def run_serve(args: Args, config: Config) -> None: """Serve the output directory via a tiny HTTP server""" with contextlib.ExitStack() as stack: http = stack.enter_context( spawn( [python_binary(config), "-m", "http.server", "8081"], stdin=sys.stdin, stdout=sys.stdout, sandbox=config.sandbox( network=True, relaxed=True, options=["--chdir", config.output_dir_or_cwd()], ), ) ) storagetm = stack.enter_context(start_storage_target_mode(config)) # If we run systemd-storagetm with run0, it replaces the foreground process group with its own which # means the http process doesn't get SIGINT from the terminal, so let's send it ourselves in that # case. if storagetm and os.getuid() != 0: storagetm.wait() http.send_signal(signal.SIGINT) def generate_key_cert_pair(args: Args) -> None: """Generate a private key and accompanying X509 certificate using openssl""" keylength = 2048 expiration_date = datetime.date.today() + datetime.timedelta(int(args.genkey_valid_days)) configdir = finalize_configdir(args.directory) if not configdir: die("genkey cannot be used with empty --directory") for f in (configdir / "mkosi.key", configdir / "mkosi.crt"): if f.exists() and not args.force: die( f"{f} already exists", hint="To generate new keys, first remove mkosi.key and mkosi.crt", ) log_step(f"Generating keys rsa:{keylength} for CN {args.genkey_common_name!r}.") logging.info( textwrap.dedent( f""" The keys will expire in {args.genkey_valid_days} days ({expiration_date:%A %d. %B %Y}). Remember to roll them over to new ones before then. """ ) ) run( [ "openssl", "req", "-new", "-x509", "-newkey", f"rsa:{keylength}", "-keyout", configdir / "mkosi.key", "-out", configdir / "mkosi.crt", "-days", str(args.genkey_valid_days), "-subj", f"/CN={args.genkey_common_name}/", "-nodes" ], env=dict(OPENSSL_CONF="/dev/null"), ) # fmt: skip def finalize_image_version(args: Args, config: Config) -> None: configdir = finalize_configdir(args.directory) if not configdir: die("Image version cannot be finalized with empty --directory") p = configdir / "mkosi.version" assert config.image_version p.write_text(config.image_version) logging.info(f"Wrote new version {config.image_version} to {p}") def check_workspace_directory(config: Config) -> None: wd = config.workspace_dir_or_default() for tree in config.build_sources: if wd.is_relative_to(tree.source): die( f"The workspace directory ({wd}) cannot be a subdirectory of " f"any source directory ({tree.source})", hint="Set BuildSources= to the empty string or use WorkspaceDirectory= to configure " "a different workspace directory", ) def run_clean_scripts(config: Config) -> None: if not config.clean_scripts: return for script in config.clean_scripts: if not os.access(script, os.X_OK): die(f"{script} is not executable") env = dict( DISTRIBUTION=str(config.distribution), RELEASE=config.release, ARCHITECTURE=str(config.architecture), DISTRIBUTION_ARCHITECTURE=config.distribution.installer.architecture(config.architecture), SRCDIR="/work/src", OUTPUTDIR="/work/out", MKOSI_UID=str(os.getuid()), MKOSI_GID=str(os.getgid()), MKOSI_CONFIG="/work/config.json", MKOSI_DEBUG=one_zero(ARG_DEBUG.get()), ) if config.architecture.to_efi() is not None: env["EFI_ARCHITECTURE"] = str(config.architecture.to_efi()) if config.profiles: env["PROFILES"] = " ".join(config.profiles) with ( finalize_source_mounts(config, ephemeral=False) as sources, finalize_config_json(config) as json, ): for script in config.clean_scripts: with complete_step(f"Running clean script {script}…"): run( ["/work/clean"], env=env | config.finalize_environment(), sandbox=config.sandbox( tools=False, options=[ "--dir", "/work/src", "--chdir", "/work/src", "--dir", "/work/out", "--ro-bind", script, "/work/clean", "--ro-bind", json, "/work/config.json", *(["--bind", os.fspath(o), "/work/out"] if (o := config.output_dir_or_cwd()).exists() else []), # noqa: E501 *sources, ], ), stdin=sys.stdin, ) # fmt: skip def validate_certificates_and_keys(config: Config) -> None: keyutil = config.find_binary("systemd-keyutil", "/usr/lib/systemd/systemd-keyutil") if not keyutil: return if config.verity in (Verity.auto, Verity.signed) and config.verity_certificate and config.verity_key: run_systemd_sign_tool( config, cmdline=[keyutil, "validate"], options=[], certificate=config.verity_certificate, certificate_source=config.verity_certificate_source, key=config.verity_key, key_source=config.verity_key_source, stdout=subprocess.DEVNULL, ) if ( config.bootable != ConfigFeature.disabled and config.secure_boot and config.secure_boot_certificate and config.secure_boot_key ): run_systemd_sign_tool( config, cmdline=[keyutil, "validate"], options=[], certificate=config.secure_boot_certificate, certificate_source=config.secure_boot_certificate_source, key=config.secure_boot_key, key_source=config.secure_boot_key_source, stdout=subprocess.DEVNULL, ) if ( config.bootable != ConfigFeature.disabled and config.sign_expected_pcr != ConfigFeature.disabled and config.sign_expected_pcr_certificate and config.sign_expected_pcr_key ): run_systemd_sign_tool( config, cmdline=[keyutil, "validate"], options=[], certificate=config.sign_expected_pcr_certificate, certificate_source=config.sign_expected_pcr_certificate_source, key=config.sign_expected_pcr_key, key_source=config.sign_expected_pcr_key_source, stdout=subprocess.DEVNULL, ) def needs_build(args: Args, config: Config, force: int = 1) -> bool: if args.rerun_build_scripts: return False return ( (args.force >= force) or not (config.output_dir_or_cwd() / config.output_with_compression).exists() # When the output is a directory, its name is the same as the symlink we create that points to the # actual output when not building a directory. So if the full output path exists, we have to check # that it's not a symlink as well. or (config.output_dir_or_cwd() / config.output_with_compression).is_symlink() ) def remove_cache_entries(config: Config) -> None: if not config.cache_dir: return sandbox = functools.partial(config.sandbox, tools=False) if any(p.exists() for p in cache_tree_paths(config)): with complete_step(f"Removing cache entries of {config.image} image…"): rmtree(*(p for p in cache_tree_paths(config) if p.exists()), sandbox=sandbox) def run_clean(args: Args, config: Config) -> None: # We remove any cached images if either the user used --force twice, or he/she called "clean" # with it passed once. Let's also remove the downloaded package cache if the user specified one # additional "--force". # We don't want to require a tools tree to run mkosi clean so we pass in a sandbox that # disables use of the tools tree. We still need a sandbox as we need to acquire privileges to # be able to remove various files from the rootfs. sandbox = functools.partial(config.sandbox, tools=False) if args.verb == Verb.clean: remove_outputs = True remove_build_cache = args.force > 0 or args.wipe_build_dir remove_image_cache = args.force > 0 remove_package_cache = args.force > 1 else: remove_outputs = args.force > 0 or (config.is_incremental() and not have_cache(config)) remove_build_cache = args.force > 1 or args.wipe_build_dir remove_image_cache = args.force > 1 or not have_cache(config) remove_package_cache = args.force > 2 if remove_outputs: outputs = { config.output_dir_or_cwd() / output for output in config.outputs if ( (config.output_dir_or_cwd() / output).exists() or (config.output_dir_or_cwd() / output).is_symlink() ) } # Make sure we resolve the symlink we create in the output directory and remove its target # as well as it might not be in the list of outputs anymore if the compression or output # format was changed. outputs |= {o.resolve() for o in outputs} if outputs: with ( complete_step(f"Removing output files of {config.image} image…"), flock_or_die(config.output_dir_or_cwd() / config.output) if (config.output_dir_or_cwd() / config.output).exists() else contextlib.nullcontext(), ): rmtree(*outputs, sandbox=sandbox) run_clean_scripts(config) if ( remove_build_cache and config.build_dir and config.build_subdir.exists() and any(config.build_subdir.iterdir()) ): with complete_step(f"Clearing out build directory of {config.image} image…"): rmtree(*config.build_subdir.iterdir(), sandbox=sandbox) if remove_image_cache and config.cache_dir: remove_cache_entries(config) if remove_package_cache and config.cache_dir and config.image in ("main", "tools"): with complete_step(f"Clearing out metadata and keyring cache of {config.image} image…"): rmtree( metadata_cache(config), keyring_cache(config), sandbox=sandbox, ) def ensure_directories_exist(config: Config) -> None: for p in ( config.output_dir, config.cache_dir, config.package_cache_dir_or_default(), config.build_dir, config.workspace_dir_or_default(), ): if not p or p.exists(): continue p.mkdir(parents=True, exist_ok=True) if config.build_dir: config.build_subdir.mkdir(exist_ok=True) st = config.build_subdir.stat() # Discard setuid/setgid bits if set as these are inherited and can leak into the image. if stat.S_IMODE(st.st_mode) & (stat.S_ISGID | stat.S_ISUID): config.build_subdir.chmod(stat.S_IMODE(st.st_mode) & ~(stat.S_ISGID | stat.S_ISUID)) def sync_repository_metadata( args: Args, images: Sequence[Config], *, resources: Path, stack: contextlib.ExitStack, ) -> tuple[Path, Path]: last = images[-1] if last.cache_dir: keyring_dir = keyring_cache(last) else: keyring_dir = Path( stack.enter_context( tempfile.TemporaryDirectory( dir=last.workspace_dir_or_default(), prefix="mkosi-keyring-", ) ) ) # If /var is used as the package cache directory, we are reusing the system package cache directory in # mkosi-initrd so we want to pick up the metadata from there in that case. if last.package_cache_dir_or_default() == Path("/var"): metadata_dir = last.package_cache_dir_or_default() elif last.cache_dir: metadata_dir = metadata_cache(last) else: metadata_dir = Path( stack.enter_context( tempfile.TemporaryDirectory( dir=last.workspace_dir_or_default(), prefix="mkosi-metadata-", ) ) ) subdir = last.distribution.installer.package_manager(last).subdir(last) for d in ("cache", "lib"): (metadata_dir / d / subdir).mkdir(parents=True, exist_ok=True) src = metadata_dir / "lib" / subdir for p in last.distribution.installer.package_manager(last).state_subdirs(): (src / p).mkdir(parents=True, exist_ok=True) (last.package_cache_dir_or_default() / "cache" / subdir).mkdir(parents=True, exist_ok=True) # Sync repository metadata unless explicitly disabled. if last.cacheonly == Cacheonly.never or ( last.cacheonly == Cacheonly.auto and not any(have_cache(config) for config in images) ): with setup_workspace(args, last) as workspace: context = Context( args, last, workspace=workspace, resources=resources, keyring_dir=keyring_dir, metadata_dir=metadata_dir, ) context.root.mkdir(mode=0o755) install_sandbox_trees(context.config, context.sandbox_tree) context.config.distribution.installer.setup(context) context.config.distribution.installer.keyring(context) with complete_step("Syncing package manager metadata"): context.config.distribution.installer.package_manager(context.config).sync( context, force=context.args.force > 1 or context.config.cacheonly == Cacheonly.never, ) src = metadata_dir / "cache" / subdir dst = last.package_cache_dir_or_default() / "cache" / subdir # We just synced package manager metadata, in the case of dnf, this means we can now iterate the # synced repository metadata directories and use that to create the corresponding directories in the # package cache directory. for srcsubdir, _ in last.distribution.installer.package_manager(last).package_subdirs(src): (dst / srcsubdir).mkdir(parents=True, exist_ok=True) return keyring_dir, metadata_dir def run_build( args: Args, config: Config, *, resources: Path, keyring_dir: Path, metadata_dir: Path, package_dir: Optional[Path] = None, ) -> None: if not have_effective_cap(CAP_SYS_ADMIN): acquire_privileges() unshare(CLONE_NEWNS) else: unshare(CLONE_NEWNS) mount("", "/", "", MS_SLAVE | MS_REC, "") # For extra safety when running as root, remount a bunch of directories read-only unless the output # directory is located in it. if os.getuid() == 0: remount = ["/etc", "/opt", "/boot", "/efi", "/media", "/usr"] for d in remount: if not Path(d).exists(): continue if any( p and p.is_relative_to(d) for p in ( config.workspace_dir_or_default(), config.package_cache_dir_or_default(), config.cache_dir, config.output_dir_or_cwd(), ) ): continue attrs = MOUNT_ATTR_RDONLY if d in ("/boot", "/efi"): attrs |= MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | MOUNT_ATTR_NOEXEC mount_rbind(d, d, attrs) with ( complete_step(f"Building {config.image} image"), setup_workspace(args, config) as workspace, ): build_image( Context( args, config, workspace=workspace, resources=resources, keyring_dir=keyring_dir, metadata_dir=metadata_dir, package_dir=package_dir, ) ) def ensure_tools_tree_has_etc_resolv_conf(config: Config) -> None: if not config.tools_tree: return # We can't bind mount in the hosts's /etc/resolv.conf if this file doesn't exist without making the # entirety of /etc writable or messing around with overlayfs, so let's just ensure it exists. path = config.tools_tree / "etc/resolv.conf" if not path.is_symlink() and not path.exists(): die( f"Tools tree {config.tools_tree} is missing /etc/resolv.conf", hint="If you're using a default tools tree, run mkosi -f clean to remove the old tools tree " "without /etc/resolv.conf", ) def run_verb(args: Args, tools: Optional[Config], images: Sequence[Config], *, resources: Path) -> None: images = list(images) if args.verb == Verb.init: copy_tree(resources / "tmpfiles.d", INVOKING_USER.tmpfiles_dir(), preserve=False) log_notice(f"Copied mkosi tmpfiles dropins to {INVOKING_USER.tmpfiles_dir()}") return if args.verb == Verb.completion: return print_completion(args, resources=resources) if args.verb == Verb.documentation: if args.cmdline: manual = { "addon": "mkosi-addon", "initrd": "mkosi-initrd", "sandbox": "mkosi-sandbox", "news": "mkosi.news", }.get(args.cmdline[0], args.cmdline[0]) else: manual = "mkosi" formats: list[DocFormat] = ( [args.doc_format] if args.doc_format != DocFormat.auto else DocFormat.all() ) chapter = {"mkosi.news": 7}.get(manual, 1) return show_docs(manual, formats, man_chapter=chapter, resources=resources, pager=args.pager) if args.verb == Verb.genkey: return generate_key_cert_pair(args) if args.verb == Verb.dependencies: _, _, [deps] = parse_config( ["--directory=", "--repositories=", *args.cmdline, "--include=mkosi-tools", "build"], resources=resources, ) for p in sorted(deps.packages): print(p) return if all(config == Config.default() for config in images): die( "No configuration found", hint="Make sure mkosi is run from a directory with configuration files", ) if args.verb == Verb.summary: if args.json: text = dump_json( { "Tools": tools.to_dict() if tools else None, "Images": [config.to_dict() for config in images], } ) else: text = f"{summary(tools)}\n" if tools else "" text += "\n".join(summary(config) for config in images) page(text, args.pager) return if args.verb == Verb.cat_config: text = cat_config(images) page(text, args.pager) return # The images array has been modified so we need to reevaluate last again. last = images[-1] if args.verb == Verb.bump: finalize_image_version(args, last) return if args.verb == Verb.latest_snapshot: run_latest_snapshot(args, last) return if args.verb == Verb.clean: if tools and args.force > 0: run_clean(args, tools) for config in images: run_clean(args, config) rmtree(Path(".mkosi-private")) return # For the default tools tree have_cache() encompasses the "has the tools tree been built at all" check. if tools and not have_cache(tools): if (args.rerun_build_scripts or args.verb != Verb.build) and args.force == 0: die( "Default tools tree requested but it is out-of-date or has not been built yet", hint="Make sure to (re)build the tools tree first with 'mkosi build' or use '--force'", ) elif last.incremental == Incremental.strict: die( "Default tools tree is out-of-date but the strict incremental mode is enabled", hint="Build once without --incremental=strict to rebuild the default tools tree", ) run_clean(args, tools) ensure_directories_exist(tools) run_sync_scripts(tools) check_tools(tools, Verb.build) with contextlib.ExitStack() as stack: tkd, tmd = sync_repository_metadata( args, [tools], resources=resources, stack=stack, ) fork_and_wait( run_build, args, tools, resources=resources, keyring_dir=tkd, metadata_dir=tmd, ) resolv = tools.output_dir_or_cwd() / tools.output / "etc/resolv.conf" if not resolv.is_symlink() and not resolv.exists(): resolv.touch() _, _, manifest = cache_tree_paths(tools) manifest.write_text(dump_json(tools.cache_manifest())) ensure_tools_tree_has_etc_resolv_conf(last) if args.verb.needs_tools(): return { Verb.ssh: run_ssh, Verb.journalctl: run_journalctl, Verb.coredumpctl: run_coredumpctl, Verb.box: run_box, Verb.sandbox: run_box, }[args.verb](args, last) if last.output_format == OutputFormat.none: if args.verb != Verb.build: die(f"Cannot run '{args.verb}' verb on image with output format 'none'") output = last.output_dir_or_cwd() / last.output_with_compression if ( args.verb == Verb.build and not args.force and last.output_format != OutputFormat.none and output.exists() and not output.is_symlink() and not args.rerun_build_scripts ): logging.info(f"Output path {output} exists already. (Use --force to rebuild.)") return if args.rerun_build_scripts and last.output_format != OutputFormat.none and not output.exists(): die( f"Image '{last.image}' must be built once before --rerun-build-scripts can be used", hint="Build the image once with 'mkosi build'", ) if ( args.verb != Verb.build and not args.force and last.output_format != OutputFormat.none and not output.exists() ): die( f"Image '{last.image}' has not been built yet", hint="Make sure to build the image first with 'mkosi build' or use '--force'", ) if not last.repart_offline and os.getuid() != 0: die(f"Must be root to build {last.image} image configured with RepartOffline=no") check_workspace_directory(last) if args.rerun_build_scripts and not last.is_incremental(): die("Incremental= must be enabled to be able to use --rerun-build-scripts") if last.is_incremental(): for a, b in itertools.combinations(images, 2): if a.expand_key_specifiers(a.cache_key) == b.expand_key_specifiers(b.cache_key): die( f"Image {a.image} and {b.image} have the same cache key '{a.expand_key_specifiers(a.cache_key)}'", # noqa: E501 hint="Add the &I specifier to the cache key to avoid this issue", ) if last.is_incremental() and (last.incremental == Incremental.strict or args.rerun_build_scripts): if args.force > 1: die( "Cannot remove incremental caches when building with Incremental=strict", hint="Build once with '-i yes' to update the image cache", ) if any((c := config).is_incremental() and not have_cache(config) for config in images): if args.rerun_build_scripts: die( f"Cannot use --rerun-build-scripts as the cache for image {c.image} is out-of-date", hint="Rebuild the image to update the image cache", ) else: die( f"Strict incremental mode is enabled and cache for image {c.image} is out-of-date", hint="Build once with '-i yes' to update the image cache", ) # First, process all directory removals because otherwise if different images share directories # a later image build could end up deleting the output generated by an earlier image build. if needs_build(args, last) or args.wipe_build_dir: for config in images: run_clean(args, config) for i, config in enumerate(images): if args.verb != Verb.build: check_tools(config, args.verb) images[i] = config = run_configure_scripts(config) # The images array has been modified so we need to reevaluate last again. # Also ensure that all other images are reordered in case their dependencies were modified. last = images[-1] images = resolve_deps(images[:-1], last.dependencies) + [last] if ( args.rerun_build_scripts or last.output_format == OutputFormat.none or not (last.output_dir_or_cwd() / last.output).exists() ): for config in images: if any( source.type != KeySourceType.file for source in ( config.verity_key_source, config.secure_boot_key_source, config.sign_expected_pcr_key_source, ) ): join_new_session_keyring() break validate = [ c for c in images if c.output_format != OutputFormat.none and not (c.output_dir_or_cwd() / c.output).exists() ] if validate: with complete_step("Validating certificates and keys"): for config in validate: validate_certificates_and_keys(config) ensure_directories_exist(last) with contextlib.ExitStack() as stack: package_dir = Path( stack.enter_context( tempfile.TemporaryDirectory( dir=last.workspace_dir_or_default(), prefix="mkosi-packages-", ) ) ) for config in images: ensure_directories_exist(config) run_sync_scripts(config) ikd = imd = None for config in images: # If the output format is "none" or we're rebuilding and there are no build scripts, there's # nothing to do so exit early. if ( config.output_format == OutputFormat.none or (args.rerun_build_scripts and (config.output_dir_or_cwd() / config.output).exists()) ) and not config.build_scripts: continue check_tools(config, Verb.build) check_inputs(config) if not ikd and not imd: ikd, imd = sync_repository_metadata( args, images, resources=resources, stack=stack, ) fork_and_wait( run_build, args, config, resources=resources, keyring_dir=ikd, metadata_dir=imd, package_dir=package_dir, ) if not ikd and not imd: logging.info("All images have already been built and do not have any build scripts") else: ring_terminal_bell() if args.auto_bump: finalize_image_version(args, last) if args.verb == Verb.build: return if ( last.output_format == OutputFormat.directory and (last.output_dir_or_cwd() / last.output).stat().st_uid == 0 and os.getuid() != 0 ): die( "Cannot operate on directory images built as root when running unprivileged", hint="Clean the root owned image by running mkosi -ff clean as root and then rebuild the image", ) run_vm = { Vmm.qemu: run_qemu, Vmm.vmspawn: run_vmspawn, }[last.vmm] { Verb.shell: run_shell, Verb.boot: run_shell, Verb.vm: run_vm, Verb.qemu: run_vm, Verb.serve: run_serve, Verb.burn: run_burn, Verb.sysupdate: run_sysupdate, }[args.verb](args, last) mkosi-26/mkosi/__main__.py000066400000000000000000000024571512054777600156670ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # PYTHON_ARGCOMPLETE_OK import faulthandler import signal import sys from types import FrameType from typing import Optional import mkosi.resources from mkosi import run_verb from mkosi.config import parse_config from mkosi.log import log_setup, stash_terminal_title from mkosi.run import find_binary, run, uncaught_exception_handler from mkosi.util import resource_path INTERRUPTED = False def onsignal(signal: int, frame: Optional[FrameType]) -> None: global INTERRUPTED if INTERRUPTED: return INTERRUPTED = True raise KeyboardInterrupt() @uncaught_exception_handler() def main() -> None: signal.signal(signal.SIGINT, onsignal) signal.signal(signal.SIGTERM, onsignal) signal.signal(signal.SIGHUP, onsignal) log_setup() with resource_path(mkosi.resources) as resources, stash_terminal_title(): args, tools, images = parse_config(sys.argv[1:], resources=resources) if args.debug: faulthandler.enable() try: run_verb(args, tools, images, resources=resources) finally: if sys.stderr.isatty() and find_binary("tput"): run(["tput", "cnorm"], check=False) run(["tput", "smam"], check=False) if __name__ == "__main__": main() mkosi-26/mkosi/addon.py000066400000000000000000000047071512054777600152340ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import argparse import os import sys import tempfile from pathlib import Path import mkosi.resources from mkosi.config import DocFormat from mkosi.documentation import show_docs from mkosi.initrd import include_system_config, initrd_common_args, initrd_finalize, process_crypttab from mkosi.log import ARG_DEBUG, ARG_DEBUG_SHELL, log_setup from mkosi.run import run, uncaught_exception_handler from mkosi.util import PathString, resource_path @uncaught_exception_handler() def main() -> None: log_setup() parser = argparse.ArgumentParser( prog="mkosi-addon", description="Build initrd/cmdline/ucode addon for the current system using mkosi", allow_abbrev=False, usage="mkosi-addon [options...]", ) parser.add_argument( "-o", "--output", metavar="NAME", help="Output name", default="mkosi-local.addon.efi", ) initrd_common_args(parser) args = parser.parse_args() if args.show_documentation: with resource_path(mkosi.resources) as r: show_docs("mkosi-addon", DocFormat.all(), resources=r) return with tempfile.TemporaryDirectory() as staging_dir: cmdline: list[PathString] = [ "mkosi", "--force", "--directory", "", "--output", args.output, "--output-directory", staging_dir, "--build-sources", "", "--include=mkosi-addon", "--extra-tree", f"/usr/lib/modules/{args.kernel_version}:/usr/lib/modules/{args.kernel_version}", "--extra-tree=/usr/lib/firmware:/usr/lib/firmware", ] # fmt: skip if args.debug: ARG_DEBUG.set(args.debug) cmdline += ["--debug"] if args.debug_shell: ARG_DEBUG_SHELL.set(args.debug_shell) cmdline += ["--debug-shell"] if args.debug_sandbox: cmdline += ["--debug-sandbox"] if os.getuid() == 0: cmdline += ["--output-mode=600"] cmdline += include_system_config("mkosi-addon") cmdline += process_crypttab(Path(staging_dir)) if Path("/etc/kernel/cmdline").exists(): cmdline += ["--kernel-command-line", Path("/etc/kernel/cmdline").read_text()] run(cmdline, stdin=sys.stdin, stdout=sys.stdout) initrd_finalize(Path(staging_dir), args.output, args.output_dir) if __name__ == "__main__": main() mkosi-26/mkosi/archive.py000066400000000000000000000104161512054777600155620ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import os from collections.abc import Iterable, Sequence from pathlib import Path from typing import Optional from mkosi.log import log_step from mkosi.run import SandboxProtocol, finalize_passwd_symlinks, nosandbox, run, workdir from mkosi.sandbox import umask from mkosi.util import PathString, chdir def tar_exclude_apivfs_tmp() -> list[str]: return [ "--exclude", "./dev/*", "--exclude", "./proc/*", "--exclude", "./sys/*", "--exclude", "./tmp/*", "--exclude", "./run/*", "--exclude", "./var/tmp/*", ] # fmt: skip def make_tar(src: Path, dst: Path, *, sandbox: SandboxProtocol = nosandbox) -> None: log_step(f"Creating tar archive {dst}…") with dst.open("wb") as f: run( [ "tar", "--create", "--file", "-", "--directory", workdir(src, sandbox), "--acls", "--selinux", # --xattrs implies --format=pax "--xattrs", # PAX format emits additional headers for atime, ctime and mtime # that would make the archive non-reproducible. "--pax-option=delete=atime,delete=ctime,delete=mtime", "--sparse", "--force-local", *(["--owner=root:0"] if os.getuid() != 0 else []), *(["--group=root:0"] if os.getuid() != 0 else []), *tar_exclude_apivfs_tmp(), ".", ], stdout=f, # Make sure tar uses user/group information from the root directory instead of the host. sandbox=sandbox( options=[ "--ro-bind", src, workdir(src, sandbox), *finalize_passwd_symlinks(workdir(src, sandbox)), ], ), ) # fmt: skip def can_extract_tar(src: Path) -> bool: return ".tar" in src.suffixes[-2:] def extract_tar( src: Path, dst: Path, *, log: bool = True, dirs: Sequence[PathString] = (), options: Sequence[PathString] = (), sandbox: SandboxProtocol = nosandbox, ) -> None: if log: log_step(f"Extracting tar archive {src}…") with umask(~0o755): dst.mkdir(exist_ok=True) run( [ "tar", "--extract", "--file", workdir(src, sandbox), "--directory", workdir(dst, sandbox), "--keep-directory-symlink", "--no-overwrite-dir", "--same-permissions", "--same-owner" if (dst / "etc/passwd").exists() and os.getuid() == 0 else "--numeric-owner", "--same-order", "--acls", "--selinux", "--xattrs", "--force-local", *tar_exclude_apivfs_tmp(), *options, *dirs, ], sandbox=sandbox( # Make sure tar uses user/group information from the root directory instead of the host. options=[ "--ro-bind", src, workdir(src, sandbox), "--bind", dst, workdir(dst, sandbox), *finalize_passwd_symlinks(workdir(dst, sandbox)), ], ), ) # fmt: skip def make_cpio( src: Path, dst: Path, *, files: Optional[Iterable[Path]] = None, sandbox: SandboxProtocol = nosandbox, ) -> None: if not files: with chdir(src): files = sorted(Path(".").rglob("*")) else: files = sorted(files) log_step(f"Creating cpio archive {dst}…") with dst.open("wb") as f: run( [ "cpio", "--create", "--reproducible", "--renumber-inodes", "--null", "--format=newc", "--quiet", "--directory", workdir(src, sandbox), *(["--owner=0:0"] if os.getuid() != 0 else []), ], input="\0".join(os.fspath(f) for f in files), stdout=f, sandbox=sandbox( options=[ "--ro-bind", src, workdir(src, sandbox), *finalize_passwd_symlinks(workdir(src, sandbox)) ], ), ) # fmt: skip mkosi-26/mkosi/bootloader.py000066400000000000000000001031711512054777600162740ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import enum import itertools import logging import os import shutil import subprocess import sys import tempfile import textwrap from collections.abc import Iterator, Mapping, Sequence from pathlib import Path from typing import Optional from mkosi.config import ( BiosBootloader, Bootloader, CertificateSource, CertificateSourceType, Config, ConfigFeature, KeySource, KeySourceType, OutputFormat, SecureBootSignTool, ShimBootloader, systemd_tool_version, ) from mkosi.context import Context from mkosi.distribution import Distribution from mkosi.log import complete_step, die, log_step from mkosi.partition import Partition from mkosi.run import CompletedProcess, run, workdir from mkosi.sandbox import umask from mkosi.util import _FILE, PathString, StrEnum, flatten from mkosi.versioncomp import GenericVersion class KernelType(StrEnum): pe = enum.auto() uki = enum.auto() addon = enum.auto() unknown = enum.auto() @classmethod def identify(cls, config: Config, path: Path) -> "KernelType": pefile = textwrap.dedent( f"""\ import pefile try: pe = pefile.PE("{workdir(path)}", fast_load=True) sections = {{s.Name.decode().strip("\\0") for s in pe.sections}} if all(s in sections for s in (".linux", ".sdmagic", ".osrel")): print("{KernelType.uki}") elif ( all(s in sections for s in (".linux", ".sdmagic")) and any(s in sections for s in (".cmdline", ".dtb", ".initrd", ".ucode")) ): print("{KernelType.addon}") else: print("{KernelType.pe}") except pefile.PEFormatError: print("{KernelType.unknown}") """ ) result = run( [python_binary(config)], input=pefile, stdout=subprocess.PIPE, sandbox=config.sandbox(options=["--ro-bind", path, workdir(path)]), ) return KernelType(result.stdout.strip()) def want_efi(config: Config) -> bool: # Do we want to make the image bootable on EFI firmware? # Note that this returns True also in the case where autodetection might later cause the system to not be # made bootable on EFI firmware after the filesystem has been populated. if config.output_format == OutputFormat.esp: return True if config.bootable == ConfigFeature.disabled: return False if config.bootloader == Bootloader.none: return False if ( config.output_format in (OutputFormat.cpio, OutputFormat.directory, OutputFormat.none, OutputFormat.oci, OutputFormat.uki) or config.output_format.is_extension_or_portable_image() or config.overlay ) and config.bootable == ConfigFeature.auto: return False if config.architecture.to_efi() is None: if config.bootable == ConfigFeature.enabled: die(f"Cannot make image bootable on UEFI on {config.architecture} architecture") return False return True def want_grub_efi(context: Context) -> bool: if not want_efi(context.config): return False if not context.config.bootloader.is_grub(): return False if not (arch := context.config.architecture.to_grub()): return False if not context.config.bootloader.is_signed(): have = find_grub_directory(context, target=f"{arch}-efi") is not None if not have and context.config.bootable == ConfigFeature.enabled: die("An EFI bootable image with grub was requested but grub for EFI is not installed") return True def want_grub_bios(context: Context, partitions: Sequence[Partition] = ()) -> bool: if context.config.bootable == ConfigFeature.disabled: return False if context.config.output_format != OutputFormat.disk: return False if context.config.bios_bootloader != BiosBootloader.grub: return False if context.config.overlay: return False have = find_grub_directory(context, target="i386-pc") is not None if not have and context.config.bootable == ConfigFeature.enabled: die("A BIOS bootable image with grub was requested but grub for BIOS is not installed") bios = any(p.type == Partition.GRUB_BOOT_PARTITION_UUID for p in partitions) if partitions and not bios and context.config.bootable == ConfigFeature.enabled: die("A BIOS bootable image with grub was requested but no BIOS Boot Partition was configured") esp = any(p.type == "esp" for p in partitions) if partitions and not esp and context.config.bootable == ConfigFeature.enabled: die("A BIOS bootable image with grub was requested but no ESP partition was configured") root = any(p.type.startswith("root") or p.type.startswith("usr") for p in partitions) if partitions and not root and context.config.bootable == ConfigFeature.enabled: die("A BIOS bootable image with grub was requested but no root or usr partition was configured") installed = True for binary in ("mkimage", "bios-setup"): if find_grub_binary(context.config, binary): continue if context.config.bootable == ConfigFeature.enabled: die(f"A BIOS bootable image with grub was requested but {binary} was not found") installed = False return (have and bios and esp and root and installed) if partitions else have def find_grub_directory(context: Context, *, target: str) -> Optional[Path]: for d in ("usr/lib/grub", "usr/share/grub2"): if (p := context.root / d / target).exists() and any(p.iterdir()): return p return None def find_grub_binary(config: Config, binary: str) -> Optional[Path]: assert "grub" not in binary # Debian has a bespoke setup where if only grub-pc-bin is installed, grub-bios-setup is installed in # /usr/lib/i386-pc instead of in /usr/bin. Let's take that into account and look for binaries in # /usr/lib/grub/i386-pc as well. return config.find_binary(f"grub-{binary}", f"grub2-{binary}", f"/usr/lib/grub/i386-pc/grub-{binary}") def prepare_grub_config(context: Context) -> Optional[Path]: config = context.root / "efi" / context.config.distribution.installer.grub_prefix() / "grub.cfg" with umask(~0o700): config.parent.mkdir(exist_ok=True) # For some unknown reason, if we don't set the timeout to zero, grub never leaves its menu, so we default # to a zero timeout, but only if the config file hasn't been provided by the user. if not config.exists(): with umask(~0o600), config.open("w") as f: f.write("set timeout=0\n") if want_grub_efi(context): # Signed EFI grub shipped by distributions reads its configuration from /EFI//grub.cfg # (except in openSUSE) in the ESP so let's put a shim there to redirect to the actual configuration # file. if context.config.distribution == Distribution.opensuse: earlyconfig = context.root / "efi/EFI/BOOT/grub.cfg" elif context.config.distribution == Distribution.alma: earlyconfig = context.root / "efi/EFI/almalinux/grub.cfg" else: earlyconfig = context.root / "efi/EFI" / context.config.distribution.name / "grub.cfg" with umask(~0o700): earlyconfig.parent.mkdir(parents=True, exist_ok=True) # Read the actual config file from the root of the ESP. earlyconfig.write_text( f"configfile /{context.config.distribution.installer.grub_prefix()}/grub.cfg\n" ) return config def grub_mkimage( context: Context, *, target: str, modules: Sequence[str] = (), output: Optional[Path] = None, sbat: Optional[Path] = None, ) -> None: mkimage = find_grub_binary(context.config, "mkimage") assert mkimage directory = find_grub_directory(context, target=target) assert directory prefix = context.config.distribution.installer.grub_prefix() with ( complete_step(f"Generating grub image for {target}"), tempfile.NamedTemporaryFile("w", prefix="grub-early-config") as earlyconfig, ): earlyconfig.write( textwrap.dedent( f"""\ search --no-floppy --set=root --file /{prefix}/grub.cfg set prefix=($root)/{prefix} """ ) ) earlyconfig.flush() run( [ mkimage, "--directory", "/grub", "--config", workdir(Path(earlyconfig.name)), "--prefix", f"/{prefix}", "--output", workdir(output) if output else "/grub/core.img", "--format", target, *(["--sbat", os.fspath(workdir(sbat))] if sbat else []), *(["--disable-shim-lock"] if context.config.shim_bootloader == ShimBootloader.none else []), "cat", "cmp", "div", "echo", "fat", "hello", "help", *(["keylayouts"] if context.config.architecture.is_x86_variant() else []), "linux", "loadenv", "ls", "normal", "part_gpt", "read", "reboot", "search_fs_file", "search", "sleep", "test", "tr", "true", *modules, ], sandbox=context.sandbox( options=[ "--bind", directory, "/grub", "--ro-bind", earlyconfig.name, workdir(Path(earlyconfig.name)), *(["--bind", os.fspath(output.parent), os.fspath(workdir(output.parent))] if output else []), # noqa: E501 *(["--ro-bind", os.fspath(sbat), os.fspath(workdir(sbat))] if sbat else []), ], ), ) # fmt: skip def find_signed_grub_image(context: Context) -> Optional[Path]: arch = context.config.architecture.to_efi() patterns = [ f"usr/lib/grub/*-signed/grub{arch}.efi.signed", # Debian/Ubuntu f"boot/efi/EFI/*/grub{arch}.efi", # Fedora/CentOS "usr/share/efi/*/grub.efi", # openSUSE ] for p in flatten(context.root.glob(pattern) for pattern in patterns): if p.is_symlink() and p.readlink().is_absolute(): logging.warning(f"Ignoring signed grub EFI binary which is an absolute path to {p.readlink()}") continue return p return None def python_binary(config: Config) -> PathString: # If there's no tools tree, prefer the interpreter from MKOSI_INTERPRETER. If there is a tools # tree, just use the default python3 interpreter. exe = Path(sys.executable) return "python3" if config.tools_tree or not exe.is_relative_to("/usr") else exe def extract_pe_section(context: Context, binary: Path, section: str, output: Path) -> Path: # When using a tools tree, we want to use the pefile module from the tools tree instead of requiring that # python-pefile is installed on the host. So we execute python as a subprocess to make sure we load # pefile from the tools tree if one is used. # TODO: Use ignore_padding=True instead of length once we can depend on a newer pefile. # TODO: Drop KeyError logic once we drop support for Ubuntu Jammy and sdmagic will always be available. # Misc_VirtualSize is the section size in memory, which can be bigger or smaller than SizeOfRawData, # which is the aligned section size on disk. The closest approximation of the actual section size will be # the minimum of these two. If Misc_VirtualSize < SizeOfRawData, we'll get the actual size. Otherwise # padding might be inclduded. pefile = textwrap.dedent( f"""\ import pefile import sys from pathlib import Path pe = pefile.PE("{workdir(binary)}", fast_load=True) section = {{s.Name.decode().strip("\\0"): s for s in pe.sections}}.get("{section}") if not section: sys.exit(67) sys.stdout.buffer.write( section.get_data(length=min(section.Misc_VirtualSize, section.SizeOfRawData)) ) """ ) with open(output, "wb") as f: result = run( [python_binary(context.config)], input=pefile, stdout=f, sandbox=context.sandbox( options=["--ro-bind", binary, workdir(binary)], ), success_exit_status=(0, 67), ) if result.returncode == 67: raise KeyError(f"{section} section not found in {binary}") return output def install_grub(context: Context) -> None: if not want_grub_bios(context) and not want_grub_efi(context): return if want_grub_bios(context): grub_mkimage(context, target="i386-pc", modules=("biosdisk",)) if want_grub_efi(context): if context.config.shim_bootloader != ShimBootloader.none: output = context.root / shim_second_stage_binary(context) else: output = context.root / efi_boot_binary(context) with umask(~0o700): output.parent.mkdir(parents=True, exist_ok=True) if context.config.bootloader.is_signed(): if not (signed := find_signed_grub_image(context)): if context.config.bootable == ConfigFeature.enabled: die("Couldn't find a signed grub EFI binary installed in the image") return rel = output.relative_to(context.root) log_step(f"Installing signed grub EFI binary from /{signed.relative_to(context.root)} to /{rel}") shutil.copy2(signed, output) else: if context.config.secure_boot and context.config.shim_bootloader != ShimBootloader.none: if not (signed := find_signed_grub_image(context)): die("Couldn't find a signed grub EFI binary installed in the image to extract SBAT from") sbat = extract_pe_section(context, signed, ".sbat", context.workspace / "sbat") else: sbat = None grub_mkimage( context, target=f"{context.config.architecture.to_grub()}-efi", output=output, modules=("chain",), sbat=sbat, ) if context.config.secure_boot: sign_efi_binary(context, output, output) dst = context.root / "efi" / context.config.distribution.installer.grub_prefix() / "fonts" with umask(~0o700): dst.mkdir(parents=True, exist_ok=True) for d in ("grub", "grub2"): unicode = context.root / "usr/share" / d / "unicode.pf2" if unicode.exists(): shutil.copy2(unicode, dst) def grub_bios_setup(context: Context, partitions: Sequence[Partition]) -> None: if not want_grub_bios(context, partitions): return setup = find_grub_binary(context.config, "bios-setup") assert setup directory = find_grub_directory(context, target="i386-pc") assert directory with ( complete_step("Installing grub boot loader for BIOS…"), tempfile.NamedTemporaryFile(mode="w") as mountinfo, ): # grub-bios-setup insists on being able to open the root device that --directory is located on, which # needs root privileges. However, it only uses the root device when it is unable to embed itself in # the bios boot partition. To make installation work unprivileged, we trick grub to think that the # root device is our image by mounting over its /proc/self/mountinfo file (where it gets its # information from) with our own file correlating the root directory to our image file. mountinfo.write( f"1 0 1:1 / / - fat {workdir(context.staging / context.config.output_with_format)}\n" ) mountinfo.flush() run( [ setup, "--directory", "/grub", workdir(context.staging / context.config.output_with_format), ], sandbox=context.sandbox( options=[ "--bind", directory, "/grub", "--bind", context.staging, workdir(context.staging), "--bind", mountinfo.name, "/proc/self/mountinfo", ], ), ) # fmt: skip def efi_boot_binary(context: Context) -> Path: arch = context.config.architecture.to_efi() assert arch return Path(f"efi/EFI/BOOT/BOOT{arch.upper()}.EFI") def shim_second_stage_binary(context: Context) -> Path: arch = context.config.architecture.to_efi() assert arch if context.config.distribution == Distribution.opensuse: return Path("efi/EFI/BOOT/grub.EFI") else: return Path(f"efi/EFI/BOOT/grub{arch}.EFI") def run_systemd_sign_tool( config: Config, *, cmdline: Sequence[PathString], options: Sequence[PathString], certificate: Optional[Path], certificate_source: CertificateSource, key: Optional[Path], key_source: KeySource, env: Mapping[str, str] = {}, stdout: _FILE = None, devices: bool = False, ) -> CompletedProcess: if not certificate and not key: return run( cmdline, stdout=stdout, env={**config.finalize_environment(), **env}, sandbox=config.sandbox(options=options, devices=devices), ) assert certificate assert key cmd: list[PathString] = [*cmdline] opt: list[PathString] = [*options] if certificate_source.type != CertificateSourceType.file or key_source.type != KeySourceType.file: opt += ["--bind", "/run", "/run"] if certificate_source.type != CertificateSourceType.file: cmd += ["--certificate-source", str(certificate_source)] if certificate.exists(): cmd += ["--certificate", workdir(certificate)] opt += ["--ro-bind", certificate, workdir(certificate)] else: cmd += ["--certificate", certificate] if key_source.type != KeySourceType.file: cmd += ["--private-key-source", str(key_source)] if key.exists(): cmd += ["--private-key", workdir(key)] opt += ["--ro-bind", key, workdir(key)] else: cmd += ["--private-key", key] return run( cmd, stdin=(sys.stdin if key_source.type != KeySourceType.file else subprocess.DEVNULL), stdout=stdout, env={**config.finalize_environment(), **env}, sandbox=config.sandbox( options=opt, devices=( devices or key_source.type != KeySourceType.file or certificate_source.type != CertificateSourceType.file ), ), ) def sign_efi_binary(context: Context, input: Path, output: Path) -> Path: assert context.config.secure_boot_key assert context.config.secure_boot_certificate sbsign = context.config.find_binary("systemd-sbsign", "/usr/lib/systemd/systemd-sbsign") if context.config.secure_boot_sign_tool == SecureBootSignTool.systemd_sbsign and not sbsign: die("Could not find systemd-sbsign") cmd: list[PathString] options: list[PathString] if context.config.secure_boot_sign_tool == SecureBootSignTool.systemd_sbsign or ( context.config.secure_boot_sign_tool == SecureBootSignTool.auto and sbsign ): assert sbsign options = ["--bind", output.parent, workdir(output.parent)] if input != output: options += ["--ro-bind", input, workdir(input)] run_systemd_sign_tool( context.config, cmdline=[sbsign, "sign", "--output", workdir(output), workdir(input)], options=options, certificate=context.config.secure_boot_certificate, certificate_source=context.config.secure_boot_certificate_source, key=context.config.secure_boot_key, key_source=context.config.secure_boot_key_source, ) elif ( context.config.secure_boot_sign_tool == SecureBootSignTool.sbsign or context.config.secure_boot_sign_tool == SecureBootSignTool.auto and context.config.find_binary("sbsign") is not None ): if context.config.secure_boot_certificate_source.type != CertificateSourceType.file: die("Secure boot certificate source must be 'file' when using sbsign as the signing tool") cmd = [ "sbsign", "--cert", workdir(context.config.secure_boot_certificate), "--output", workdir(output), ] # fmt: skip options = [ "--ro-bind", context.config.secure_boot_certificate, workdir(context.config.secure_boot_certificate), # noqa: E501 "--bind", output.parent, workdir(output.parent), ] # fmt: skip if input != output: options += ["--ro-bind", input, workdir(input)] if context.config.secure_boot_key_source.type == KeySourceType.engine: cmd += ["--engine", context.config.secure_boot_key_source.source] options += ["--bind", "/run", "/run"] if context.config.secure_boot_key.exists(): cmd += ["--key", workdir(context.config.secure_boot_key)] options += ["--ro-bind", context.config.secure_boot_key, workdir(context.config.secure_boot_key)] else: cmd += ["--key", context.config.secure_boot_key] cmd += [workdir(input)] run( cmd, stdin=( sys.stdin if context.config.secure_boot_key_source.type != KeySourceType.file else subprocess.DEVNULL ), env=context.config.finalize_environment(), sandbox=context.sandbox( options=options, devices=context.config.secure_boot_key_source.type != KeySourceType.file, ), ) else: die("One of systemd-sbsign or sbsign is required to use SecureBoot=") return output def find_and_install_shim_binary( context: Context, name: str, signed: Sequence[str], unsigned: Sequence[str], output: Path, ) -> None: if context.config.shim_bootloader == ShimBootloader.signed: for pattern in signed: for p in context.root.glob(pattern): if p.is_symlink() and p.readlink().is_absolute(): logging.warning( f"Ignoring signed {name} EFI binary which is an absolute path to {p.readlink()}" ) continue rel = p.relative_to(context.root) if (context.root / output).is_dir(): output /= rel.name # The ESP wants .efi files, not .efi.signed or .efi.signed.latest if output.suffix and output.suffix != ".efi": left_stem, _ = output.name.split(".", maxsplit=1) output = output.with_name(f"{left_stem}.efi") log_step(f"Installing signed {name} EFI binary from /{rel} to /{output}") shutil.copy2(p, context.root / output) return if context.config.bootable == ConfigFeature.enabled: die(f"Couldn't find signed {name} EFI binary installed in the image") else: for pattern in unsigned: for p in context.root.glob(pattern): if p.is_symlink() and p.readlink().is_absolute(): logging.warning( f"Ignoring unsigned {name} EFI binary which is an absolute path to {p.readlink()}" ) continue rel = p.relative_to(context.root) if (context.root / output).is_dir(): output /= rel.name if context.config.secure_boot: log_step(f"Signing and installing unsigned {name} EFI binary from /{rel} to /{output}") sign_efi_binary(context, p, context.root / output) else: log_step(f"Installing unsigned {name} EFI binary /{rel} to /{output}") shutil.copy2(p, context.root / output) return if context.config.bootable == ConfigFeature.enabled: die(f"Couldn't find unsigned {name} EFI binary installed in the image") def gen_kernel_images(context: Context) -> Iterator[tuple[str, Path]]: if not (context.root / "usr/lib/modules").exists(): return for kver in sorted( (k for k in (context.root / "usr/lib/modules").iterdir() if k.is_dir()), key=lambda k: GenericVersion(k.name), reverse=True, ): # Make sure we look for anything that remotely resembles vmlinuz, as the arch specific install # scripts in the kernel source tree sometimes do weird stuff. But let's make sure we're not returning # UKIs as the UKI on Fedora is named vmlinuz-virt.efi. Also look for uncompressed images (vmlinux) as # some architectures ship those. Prefer vmlinuz if both are present. for kimg in kver.glob("vmlinuz*"): if KernelType.identify(context.config, kimg) != KernelType.uki: yield kver.name, kimg break else: for kimg in kver.glob("vmlinux*"): if KernelType.identify(context.config, kimg) != KernelType.uki: yield kver.name, kimg break def install_systemd_boot(context: Context) -> None: if not want_efi(context.config): return if not context.config.bootloader.is_systemd_boot(): return if not any(gen_kernel_images(context)) and context.config.bootable == ConfigFeature.auto: return if not context.config.find_binary("bootctl"): if context.config.bootable == ConfigFeature.enabled: die("An EFI bootable image with systemd-boot was requested but bootctl was not found") return directory = context.root / "usr/lib/systemd/boot/efi" signed = context.config.bootloader.is_signed() if not any(directory.glob("*.efi.signed" if signed else "*.efi")): if context.config.bootable == ConfigFeature.enabled: die( f"An EFI bootable image with systemd-boot was requested but a {'signed ' if signed else ''}" f"systemd-boot binary was not found at /{directory.relative_to(context.root)}" ) return if context.config.secure_boot and not signed: with complete_step("Signing systemd-boot binaries…"): for input in itertools.chain(directory.glob("*.efi"), directory.glob("*.EFI")): output = directory / f"{input}.signed" sign_efi_binary(context, input, output) cmd: list[PathString] = [ "bootctl", "install", "--root=/buildroot", "--install-source=image", "--all-architectures", "--no-variables", ] bootctlver = systemd_tool_version("bootctl", sandbox=context.sandbox) if want_bootctl_auto_enroll := ( context.config.secure_boot and context.config.secure_boot_auto_enroll and bootctlver >= "257" ): cmd += ["--secure-boot-auto-enroll=yes"] with complete_step("Installing systemd-boot…"): run_systemd_sign_tool( context.config, cmdline=cmd, options=context.rootoptions(), certificate=context.config.secure_boot_certificate if want_bootctl_auto_enroll else None, certificate_source=context.config.secure_boot_certificate_source, key=context.config.secure_boot_key if want_bootctl_auto_enroll else None, key_source=context.config.secure_boot_key_source, env={"SYSTEMD_ESP_PATH": "/efi", "SYSTEMD_XBOOTLDR_PATH": "/boot"}, ) # TODO: Use --random-seed=no when we can depend on systemd 256. Path(context.root / "efi/loader/random-seed").unlink(missing_ok=True) if context.config.shim_bootloader != ShimBootloader.none: shutil.copy2( context.root / f"efi/EFI/systemd/systemd-boot{context.config.architecture.to_efi()}.efi", context.root / shim_second_stage_binary(context), ) if context.config.secure_boot and context.config.secure_boot_auto_enroll and bootctlver < "257": assert context.config.secure_boot_key assert context.config.secure_boot_certificate with complete_step("Setting up secure boot auto-enrollment…"): keys = context.root / "efi/loader/keys/auto" with umask(~0o700): keys.mkdir(parents=True, exist_ok=True) # sbsiglist expects a DER certificate. run( [ "openssl", "x509", "-outform", "DER", "-in", workdir(context.config.secure_boot_certificate), "-out", workdir(context.workspace / "mkosi.der"), ], sandbox=context.sandbox( options=[ "--ro-bind", context.config.secure_boot_certificate, workdir(context.config.secure_boot_certificate), "--bind", context.workspace, workdir(context.workspace), ], ), ) # fmt: skip run( [ "sbsiglist", "--owner", "00000000-0000-0000-0000-000000000000", "--type", "x509", "--output", workdir(context.workspace / "mkosi.esl"), workdir(context.workspace / "mkosi.der"), ], sandbox=context.sandbox( options=[ "--bind", context.workspace, workdir(context.workspace), "--ro-bind", context.workspace / "mkosi.der", workdir(context.workspace / "mkosi.der"), # noqa: E501 ] ), ) # fmt: skip # We reuse the key for all secure boot databases to keep things simple. for db in ["PK", "KEK", "db"]: with umask(~0o600): cmd = [ "sbvarsign", "--attr", "NON_VOLATILE,BOOTSERVICE_ACCESS,RUNTIME_ACCESS,TIME_BASED_AUTHENTICATED_WRITE_ACCESS", "--cert", workdir(context.config.secure_boot_certificate), "--output", workdir(keys / f"{db}.auth"), ] # fmt: skip options: list[PathString] = [ "--ro-bind", context.config.secure_boot_certificate, workdir(context.config.secure_boot_certificate), "--ro-bind", context.workspace / "mkosi.esl", workdir(context.workspace / "mkosi.esl"), # noqa: E501 "--bind", keys, workdir(keys), ] # fmt: skip if context.config.secure_boot_key_source.type == KeySourceType.engine: cmd += ["--engine", context.config.secure_boot_key_source.source] options += ["--bind", "/run", "/run"] if context.config.secure_boot_key.exists(): cmd += ["--key", workdir(context.config.secure_boot_key)] options += [ "--ro-bind", context.config.secure_boot_key, workdir(context.config.secure_boot_key), # noqa: E501 ] # fmt: skip else: cmd += ["--key", context.config.secure_boot_key] cmd += [db, workdir(context.workspace / "mkosi.esl")] run( cmd, stdin=( sys.stdin if context.config.secure_boot_key_source.type != KeySourceType.file else subprocess.DEVNULL ), sandbox=context.sandbox( options=options, devices=context.config.secure_boot_key_source.type != KeySourceType.file, ), ) def install_shim(context: Context) -> None: if not want_efi(context.config): return if context.config.shim_bootloader == ShimBootloader.none: return if not any(gen_kernel_images(context)) and context.config.bootable == ConfigFeature.auto: return dst = efi_boot_binary(context) with umask(~0o700): (context.root / dst).parent.mkdir(parents=True, exist_ok=True) arch = context.config.architecture.to_efi() signed = [ f"usr/lib/shim/shim{arch}.efi.signed.latest", # Ubuntu f"usr/lib/shim/shim{arch}.efi.signed", # Debian f"boot/efi/EFI/*/shim{arch}.efi", # Fedora/CentOS "usr/share/efi/*/shim.efi", # openSUSE ] unsigned = [ f"usr/lib/shim/shim{arch}.efi", # Debian/Ubuntu f"usr/share/shim/*/*/shim{arch}.efi", # Fedora/CentOS f"usr/share/shim/shim{arch}.efi", # Arch ] find_and_install_shim_binary(context, "shim", signed, unsigned, dst) signed = [ f"usr/lib/shim/mm{arch}.efi.signed", # Debian f"usr/lib/shim/mm{arch}.efi", # Ubuntu f"boot/efi/EFI/*/mm{arch}.efi", # Fedora/CentOS "usr/share/efi/*/MokManager.efi", # openSUSE ] unsigned = [ f"usr/lib/shim/mm{arch}.efi", # Debian/Ubuntu f"usr/share/shim/*/*/mm{arch}.efi", # Fedora/CentOS f"usr/share/shim/mm{arch}.efi", # Arch ] find_and_install_shim_binary(context, "mok", signed, unsigned, dst.parent) mkosi-26/mkosi/burn.py000066400000000000000000000026111512054777600151050ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import os import sys from mkosi.config import Args, Config, OutputFormat from mkosi.log import complete_step, die from mkosi.run import run from mkosi.user import become_root_cmd def run_burn(args: Args, config: Config) -> None: if config.output_format not in (OutputFormat.disk, OutputFormat.esp): die(f"{config.output_format} images cannot be burned to disk") if not args.cmdline: die("Please specify a device to burn the image to", hint="For example /dev/disk/by-id/usb-foobar") fname = config.output_dir_or_cwd() / config.output if len(args.cmdline) != 1: die("Expected device argument.") cmd = [ "systemd-repart", "--no-pager", "--pretty=no", "--offline=yes", "--empty=force", "--dry-run=no", "--definitions=/", f"--copy-from={fname}", *args.cmdline, ] with complete_step("Burning 🔥🔥🔥 to medium…", "Burnt. 🔥🔥🔥"): run( cmd, stdin=sys.stdin, stdout=sys.stdout, env=os.environ | config.finalize_environment(), log=False, sandbox=config.sandbox( devices=True, network=True, relaxed=True, options=["--same-dir"], ), setup=become_root_cmd(), ) mkosi-26/mkosi/completion.py000066400000000000000000000201441512054777600163110ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import argparse import dataclasses import enum import io import shlex from collections.abc import Iterable, Mapping from pathlib import Path from textwrap import indent from typing import Optional, Union from mkosi import config from mkosi.log import die from mkosi.util import StrEnum class CompGen(StrEnum): default = enum.auto() files = enum.auto() dirs = enum.auto() @staticmethod def from_action(action: argparse.Action) -> "CompGen": if isinstance(action.default, Path): if action.default.is_dir(): return CompGen.dirs else: return CompGen.files # TODO: the type of action.type is Union[Callable[[str], Any], FileType] # the type of Path is type, but Path also works in this position, # because the constructor is a callable from str -> Path elif action.type is not None and (isinstance(action.type, type) and issubclass(action.type, Path)): # type: ignore if isinstance(action.default, Path) and action.default.is_dir(): # type: ignore return CompGen.dirs else: return CompGen.files return CompGen.default def to_bash(self) -> str: return f"_mkosi_compgen_{self}" def to_fish(self) -> str: if self == CompGen.files: return "--force-files" elif self == CompGen.dirs: return "--force-files -a '(__fish_complete_directories)'" else: return "-f" def to_zsh(self) -> str: if self == CompGen.files: return ":path:_files -/" elif self == CompGen.dirs: return ":directory:_files -f" else: return "" @dataclasses.dataclass(frozen=True) class CompletionItem: short: Optional[str] long: Optional[str] help: Optional[str] choices: list[str] compgen: CompGen def collect_completion_arguments() -> list[CompletionItem]: parser = config.create_argument_parser() options = [ CompletionItem( short=next((s for s in action.option_strings if not s.startswith("--")), None), long=next((s for s in action.option_strings if s.startswith("--")), None), help=action.help, choices=[str(c) for c in action.choices] if action.choices is not None else [], compgen=CompGen.from_action(action), ) for action in parser._actions if ( action.option_strings and action.help != argparse.SUPPRESS and action.dest not in config.SETTINGS_LOOKUP_BY_DEST ) ] options += [ CompletionItem( short=setting.short, long=setting.long, help=setting.help, choices=[str(c) for c in setting.choices] if setting.choices is not None else [], compgen=CompGen.default, ) for setting in config.SETTINGS ] return options def finalize_completion_bash(options: list[CompletionItem], resources: Path) -> str: def to_bash_array(name: str, entries: Iterable[str]) -> str: return f"{name.replace('-', '_')}=(" + " ".join(shlex.quote(str(e)) for e in entries) + ")" def to_bash_hasharray(name: str, entries: Mapping[str, Union[str, int]]) -> str: return ( f"{name.replace('-', '_')}=(" + " ".join(f"[{shlex.quote(str(k))}]={shlex.quote(str(v))}" for k, v in entries.items()) + ")" ) completion = resources / "completion.bash" options_by_key = {o.short: o for o in options if o.short} | {o.long: o for o in options if o.long} template = completion.read_text() with io.StringIO() as c: c.write(to_bash_array("_mkosi_options", options_by_key.keys())) c.write("\n\n") choices = to_bash_hasharray( "_mkosi_choices", {optname: " ".join(v.choices) for optname, v in options_by_key.items() if v.choices}, ) c.write(choices) c.write("\n\n") compgen = to_bash_hasharray( "_mkosi_compgen", { optname: v.compgen.to_bash() for optname, v in options_by_key.items() if v.compgen != CompGen.default }, ) c.write(compgen) c.write("\n\n") c.write(to_bash_hasharray("_mkosi_verbs", {str(v): str(v) for v in config.Verb})) definitions = c.getvalue() return template.replace("##VARIABLEDEFINITIONS##", indent(definitions, " " * 4)) def finalize_completion_fish(options: list[CompletionItem], resources: Path) -> str: with io.StringIO() as c: # REUSE-IgnoreStart c.write("# SPDX-License-Identifier: LGPL-2.1-or-later\n\n") # REUSE-IgnoreEnd c.write("complete -c mkosi -f\n") c.write("complete -c mkosi -n '__fish_is_first_token' -a \"") c.write(" ".join(str(v) for v in config.Verb)) c.write('"\n') # Complete paths after first token c.write("complete -c mkosi -F -n 'not __fish_is_first_token'\n") for option in options: if not option.short and not option.long: continue c.write("complete -c mkosi ") if option.short: c.write(f"-s {option.short.lstrip('-')} ") if option.long: c.write(f"-l {option.long.lstrip('-')} ") c.write("-r ") if option.choices: c.write('-a "') c.write(" ".join(option.choices)) c.write('" ') if option.help is not None: help = option.help.replace("'", "\\'") c.write(f'-d "{help}" ') c.write(option.compgen.to_fish()) c.write("\n") return c.getvalue() def finalize_completion_zsh(options: list[CompletionItem], resources: Path) -> str: def to_zsh_array(name: str, entries: Iterable[str]) -> str: return ( f"declare -a {name.replace('-', '_')}=(" + " ".join(shlex.quote(str(e)) for e in entries) + ")" ) completion = resources / "completion.zsh" with io.StringIO() as c: c.write(completion.read_text()) c.write("\n") c.write(to_zsh_array("_mkosi_verbs", [str(v) for v in config.Verb])) c.write("\n\n") c.write("_arguments -s \\\n") c.write(" '(- *)'{-h,--help}'[Show this help]' \\\n") c.write(" '(- *)--version[Show package version]' \\\n") for option in options: if not option.short and not option.long: continue posix = option.help and "'" in option.help open_quote = "$'" if posix else "'" if option.short and option.long: c.write(f" '({option.short} {option.long})'{{{option.short},{option.long}}}{open_quote}") else: c.write(f" {open_quote}{option.short or option.long}") if option.help: help = option.help.replace("'", r"\'") c.write(f"[{help}]") if option.choices: # TODO: maybe use metavar here? At least for me it's not shown, though c.write(":arg:(") c.write(" ".join(option.choices)) c.write(")") c.write(option.compgen.to_zsh()) c.write("' \\\n") c.write(" '*::mkosi verb:_mkosi_verb'\n\n") return c.getvalue() def print_completion(args: config.Args, *, resources: Path) -> None: if not args.cmdline: die( "No shell to generate completion script for specified", hint="Please specify either one of: bash, fish, zsh", ) shell = args.cmdline[0] if shell == "bash": func = finalize_completion_bash elif shell == "fish": func = finalize_completion_fish elif shell == "zsh": func = finalize_completion_zsh else: die( f"{shell!r} is not supported for completion scripts.", hint="Please specify either one of: bash, fish, zsh", ) completion_args = collect_completion_arguments() print(func(completion_args, resources)) mkosi-26/mkosi/config.py000066400000000000000000006133011512054777600154100ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import argparse import base64 import copy import dataclasses import enum import fnmatch import functools import getpass import graphlib import io import itertools import json import logging import math import operator import os.path import platform import re import shlex import string import subprocess import sys import tempfile import textwrap import typing import uuid from collections.abc import Collection, Iterable, Iterator, Sequence from contextlib import AbstractContextManager from pathlib import Path from typing import Any, Callable, ClassVar, Generic, Optional, Protocol, TypeVar, Union, cast from mkosi.distribution import Distribution, detect_distribution from mkosi.log import ARG_DEBUG, ARG_DEBUG_SANDBOX, ARG_DEBUG_SHELL, complete_step, die from mkosi.pager import page from mkosi.run import SandboxProtocol, find_binary, nosandbox, run, sandbox_cmd, workdir from mkosi.sandbox import ANSI_BLUE, ANSI_BOLD, ANSI_RESET, __version__ from mkosi.user import INVOKING_USER from mkosi.util import ( PathString, StrEnum, SupportsRead, chdir, flatten, is_power_of_2, make_executable, startswith, ) from mkosi.versioncomp import GenericVersion # taken from # https://github.com/python/typeshed/blob/c67f9da3732f4374bc208f896a18c60435863e1b/stdlib/_typeshed/__init__.pyi#L352 class DataclassInstance(Protocol): __dataclass_fields__: ClassVar[dict[str, dataclasses.Field[Any]]] T = TypeVar("T") D = TypeVar("D", bound=DataclassInstance) SE = TypeVar("SE", bound=StrEnum) ConfigParseCallback = Callable[[Optional[str], Optional[T]], Optional[T]] ConfigMatchCallback = Callable[[str, T], bool] ConfigDefaultCallback = Callable[[dict[str, Any]], T] BUILTIN_CONFIGS = ("mkosi-tools", "mkosi-initrd", "mkosi-vm", "mkosi-addon", "mkosi-obs") class Verb(StrEnum): build = enum.auto() clean = enum.auto() summary = enum.auto() cat_config = enum.auto() shell = enum.auto() boot = enum.auto() vm = enum.auto() qemu = enum.auto() ssh = enum.auto() serve = enum.auto() bump = enum.auto() help = enum.auto() genkey = enum.auto() documentation = enum.auto() journalctl = enum.auto() coredumpctl = enum.auto() burn = enum.auto() dependencies = enum.auto() completion = enum.auto() sysupdate = enum.auto() box = enum.auto() sandbox = enum.auto() init = enum.auto() latest_snapshot = enum.auto() def supports_cmdline(self) -> bool: return self in ( Verb.build, Verb.shell, Verb.boot, Verb.vm, Verb.qemu, Verb.ssh, Verb.journalctl, Verb.coredumpctl, Verb.burn, Verb.completion, Verb.documentation, Verb.sysupdate, Verb.box, Verb.sandbox, Verb.dependencies, ) def needs_tools(self) -> bool: return self in ( Verb.box, Verb.sandbox, Verb.journalctl, Verb.coredumpctl, Verb.ssh, Verb.latest_snapshot, ) def needs_build(self) -> bool: return self in ( Verb.build, Verb.shell, Verb.boot, Verb.vm, Verb.qemu, Verb.serve, Verb.burn, Verb.sysupdate, ) def needs_config(self) -> bool: return self not in ( Verb.help, Verb.genkey, Verb.documentation, Verb.dependencies, Verb.completion, Verb.init, ) class ConfigFeature(StrEnum): auto = enum.auto() enabled = enum.auto() disabled = enum.auto() def to_tristate(self) -> str: if self == ConfigFeature.enabled: return "yes" if self == ConfigFeature.disabled: return "no" return "" @dataclasses.dataclass(frozen=True) class ConfigTree: source: Path target: Optional[Path] def with_prefix(self, prefix: PathString = "/") -> tuple[Path, Path]: return ( self.source, Path(prefix) / os.fspath(self.target).lstrip("/") if self.target else Path(prefix), ) def __str__(self) -> str: return f"{self.source}:{self.target}" if self.target else f"{self.source}" class DriveFlag(StrEnum): persist = enum.auto() @dataclasses.dataclass(frozen=True) class Drive: id: str size: int directory: Optional[Path] options: Optional[str] file_id: str flags: list[DriveFlag] # We use negative numbers for specifying special constants # for VSock CIDs since they're not valid CIDs anyway. class VsockCID(enum.IntEnum): auto = -1 hash = -2 @classmethod def format(cls, cid: int) -> str: if cid == VsockCID.auto: return "auto" if cid == VsockCID.hash: return "hash" return str(cid) class SecureBootSignTool(StrEnum): auto = enum.auto() sbsign = enum.auto() systemd_sbsign = enum.auto() class OutputFormat(StrEnum): confext = enum.auto() cpio = enum.auto() directory = enum.auto() disk = enum.auto() esp = enum.auto() none = enum.auto() portable = enum.auto() sysext = enum.auto() tar = enum.auto() uki = enum.auto() oci = enum.auto() addon = enum.auto() def extension(self) -> str: return { OutputFormat.confext: "raw", OutputFormat.cpio: "cpio", OutputFormat.disk: "raw", OutputFormat.esp: "raw", OutputFormat.portable: "raw", OutputFormat.sysext: "raw", OutputFormat.tar: "tar", OutputFormat.uki: "efi", OutputFormat.addon: "efi", }.get(self, "") # fmt: skip def use_outer_compression(self) -> bool: return self in ( OutputFormat.tar, OutputFormat.cpio, OutputFormat.disk, OutputFormat.sysext, OutputFormat.confext, OutputFormat.portable, ) def is_extension_image(self) -> bool: return self in (OutputFormat.sysext, OutputFormat.confext, OutputFormat.addon) def is_extension_or_portable_image(self) -> bool: return self.is_extension_image() or self == OutputFormat.portable class ManifestFormat(StrEnum): json = enum.auto() # the standard manifest in json format changelog = enum.auto() # human-readable text file with package changelogs class Compression(StrEnum): # fmt: off none = enum.auto() zstd = enum.auto() zst = zstd xz = enum.auto() bz2 = enum.auto() gz = enum.auto() gzip = gz lz4 = enum.auto() lzma = enum.auto() # fmt: on def __bool__(self) -> bool: return self != Compression.none def extension(self) -> str: return {Compression.zstd: "zst"}.get(self, str(self)) def oci_media_type_suffix(self) -> str: suffix = { Compression.none: "", Compression.gz: "+gzip", Compression.zstd: "+zstd", }.get(self) # fmt: skip if not suffix: die(f"Compression {self} not supported for OCI layers") return suffix class DocFormat(StrEnum): auto = enum.auto() markdown = enum.auto() man = enum.auto() pandoc = enum.auto() system = enum.auto() @classmethod def all(cls) -> list["DocFormat"]: # this excludes auto and encodes the order in which these should be # checked when searching for docs return [cls.man, cls.pandoc, cls.markdown, cls.system] class Bootloader(StrEnum): none = enum.auto() uki = enum.auto() systemd_boot = enum.auto() grub = enum.auto() uki_signed = enum.auto() systemd_boot_signed = enum.auto() grub_signed = enum.auto() def is_uki(self) -> bool: return self in (Bootloader.uki, Bootloader.uki_signed) def is_systemd_boot(self) -> bool: return self in (Bootloader.systemd_boot, Bootloader.systemd_boot_signed) def is_grub(self) -> bool: return self in (Bootloader.grub, Bootloader.grub_signed) def is_signed(self) -> bool: return self in (Bootloader.uki_signed, Bootloader.systemd_boot_signed, Bootloader.grub_signed) class BiosBootloader(StrEnum): none = enum.auto() grub = enum.auto() class ShimBootloader(StrEnum): none = enum.auto() signed = enum.auto() unsigned = enum.auto() class UnifiedKernelImage(StrEnum): none = enum.auto() auto = enum.auto() signed = enum.auto() unsigned = enum.auto() def enabled(self) -> bool: return self in (UnifiedKernelImage.signed, UnifiedKernelImage.unsigned) class Cacheonly(StrEnum): always = enum.auto() auto = enum.auto() none = auto metadata = enum.auto() never = enum.auto() class Firmware(StrEnum): auto = enum.auto() linux = enum.auto() linux_noinitrd = enum.auto() uefi = enum.auto() uefi_secure_boot = enum.auto() bios = enum.auto() def is_uefi(self) -> bool: return self in (Firmware.uefi, Firmware.uefi_secure_boot) def is_linux(self) -> bool: return self in (Firmware.linux, Firmware.linux_noinitrd) class ConsoleMode(StrEnum): interactive = enum.auto() read_only = enum.auto() native = enum.auto() gui = enum.auto() class Network(StrEnum): interface = enum.auto() user = enum.auto() none = enum.auto() class Vmm(StrEnum): qemu = enum.auto() vmspawn = enum.auto() class Ssh(StrEnum): always = enum.auto() auto = enum.auto() runtime = enum.auto() never = enum.auto() class Incremental(StrEnum): yes = enum.auto() no = enum.auto() strict = enum.auto() def __bool__(self) -> bool: return self != Incremental.no class BuildSourcesEphemeral(StrEnum): yes = enum.auto() no = enum.auto() buildcache = enum.auto() def __bool__(self) -> bool: return self != BuildSourcesEphemeral.no class Verity(StrEnum): disabled = enum.auto() hash = enum.auto() signed = enum.auto() defer = enum.auto() auto = enum.auto() class Architecture(StrEnum): alpha = enum.auto() arc = enum.auto() arm = enum.auto() arm64 = enum.auto() ia64 = enum.auto() loongarch64 = enum.auto() mips_le = enum.auto() mips64_le = enum.auto() parisc = enum.auto() ppc = enum.auto() ppc64 = enum.auto() ppc64_le = enum.auto() riscv32 = enum.auto() riscv64 = enum.auto() s390 = enum.auto() s390x = enum.auto() tilegx = enum.auto() x86 = enum.auto() x86_64 = enum.auto() @staticmethod def from_uname(s: str) -> "Architecture": a = { "aarch64": Architecture.arm64, "aarch64_be": Architecture.arm64, "armv8l": Architecture.arm, "armv8b": Architecture.arm, "armv7ml": Architecture.arm, "armv7mb": Architecture.arm, "armv7l": Architecture.arm, "armv7b": Architecture.arm, "armv6l": Architecture.arm, "armv6b": Architecture.arm, "armv5tl": Architecture.arm, "armv5tel": Architecture.arm, "armv5tejl": Architecture.arm, "armv5tejb": Architecture.arm, "armv5teb": Architecture.arm, "armv5tb": Architecture.arm, "armv4tl": Architecture.arm, "armv4tb": Architecture.arm, "armv4l": Architecture.arm, "armv4b": Architecture.arm, "alpha": Architecture.alpha, "arc": Architecture.arc, "arceb": Architecture.arc, "x86_64": Architecture.x86_64, "i686": Architecture.x86, "i586": Architecture.x86, "i486": Architecture.x86, "i386": Architecture.x86, "ia64": Architecture.ia64, "parisc64": Architecture.parisc, "parisc": Architecture.parisc, "loongarch64": Architecture.loongarch64, "mips64": Architecture.mips64_le, "mips": Architecture.mips_le, "ppc64le": Architecture.ppc64_le, "ppc64": Architecture.ppc64, "ppc": Architecture.ppc, "riscv64": Architecture.riscv64, "riscv32": Architecture.riscv32, "riscv": Architecture.riscv64, "s390x": Architecture.s390x, "s390": Architecture.s390, "tilegx": Architecture.tilegx, }.get(s) # fmt: skip if not a: die(f"Architecture {s} is not supported") return a def to_efi(self) -> Optional[str]: return { Architecture.x86: "ia32", Architecture.x86_64: "x64", Architecture.arm: "arm", Architecture.arm64: "aa64", Architecture.riscv32: "riscv32", Architecture.riscv64: "riscv64", Architecture.loongarch64: "loongarch64", }.get(self) # fmt: skip def to_grub(self) -> Optional[str]: return { Architecture.x86_64: "x86_64", Architecture.x86: "i386", Architecture.arm64: "arm64", Architecture.arm: "arm", }.get(self) # fmt: skip def to_qemu(self) -> str: a = { Architecture.alpha: "alpha", Architecture.arm: "arm", Architecture.arm64: "aarch64", Architecture.loongarch64: "loongarch64", Architecture.mips64_le: "mips", Architecture.mips_le: "mips", Architecture.parisc: "hppa", Architecture.ppc: "ppc", Architecture.ppc64: "ppc64", Architecture.ppc64_le: "ppc64", Architecture.riscv32: "riscv32", Architecture.riscv64: "riscv64", Architecture.s390x: "s390x", Architecture.x86: "i386", Architecture.x86_64: "x86_64", }.get(self) # fmt: skip if not a: die(f"Architecture {self} not supported by QEMU") return a def to_oci(self) -> str: a = { Architecture.arm: "arm", Architecture.arm64: "arm64", Architecture.loongarch64: "loong64", Architecture.mips64_le: "mips64le", Architecture.mips_le: "mipsle", Architecture.ppc: "ppc", Architecture.ppc64: "ppc64", Architecture.ppc64_le: "ppc64le", Architecture.riscv32: "riscv", Architecture.riscv64: "riscv64", Architecture.s390x: "s390x", Architecture.x86: "386", Architecture.x86_64: "amd64", }.get(self) # fmt: skip if not a: die(f"Architecture {self} not supported by OCI") return a def supports_smbios(self, firmware: Firmware) -> bool: if self.is_x86_variant(): return True return self.is_arm_variant() and firmware.is_uefi() def supports_fw_cfg(self) -> bool: return self.is_x86_variant() or self.is_arm_variant() def supports_smm(self) -> bool: return self.is_x86_variant() def supports_hpet(self) -> bool: return self.is_x86_variant() def supports_cxl(self) -> bool: return self.is_x86_variant() or self.is_arm_variant() def can_kvm(self) -> bool: return self == Architecture.native() or ( Architecture.native() == Architecture.x86_64 and self == Architecture.x86 ) def default_qemu_machine(self) -> str: m = { Architecture.x86: "q35", Architecture.x86_64: "q35", Architecture.arm: "virt", Architecture.arm64: "virt", Architecture.s390: "s390-ccw-virtio", Architecture.s390x: "s390-ccw-virtio", Architecture.ppc: "pseries", Architecture.ppc64: "pseries", Architecture.ppc64_le: "pseries", Architecture.riscv64: "virt", } # fmt: skip if self not in m: die(f"No qemu machine defined for architecture {self}") return m[self] def default_qemu_nic_model(self) -> str: return { Architecture.s390: "virtio", Architecture.s390x: "virtio", }.get(self, "virtio-net-pci") # fmt: skip def is_native(self) -> bool: return self == self.native() def is_x86_variant(self) -> bool: return self in (Architecture.x86, Architecture.x86_64) def is_arm_variant(self) -> bool: return self in (Architecture.arm, Architecture.arm64) @classmethod def native(cls) -> "Architecture": return cls.from_uname(platform.machine()) class ArtifactOutput(StrEnum): uki = enum.auto() kernel = enum.auto() initrd = enum.auto() partitions = enum.auto() tar = enum.auto() pcrs = enum.auto() roothash = enum.auto() os_release = enum.auto() kernel_modules_initrd = enum.auto() repart_definitions = enum.auto() @staticmethod def compat_no() -> list["ArtifactOutput"]: return [ ArtifactOutput.uki, ArtifactOutput.kernel, ArtifactOutput.initrd, ] @staticmethod def compat_yes() -> list["ArtifactOutput"]: return [ ArtifactOutput.uki, ArtifactOutput.kernel, ArtifactOutput.initrd, ArtifactOutput.partitions, ] class ToolsTreeProfile(StrEnum): devel = enum.auto() misc = enum.auto() package_manager = enum.auto() runtime = enum.auto() @classmethod def default(cls) -> tuple["ToolsTreeProfile", ...]: return (cls.misc, cls.package_manager, cls.runtime) class InitrdProfile(StrEnum): lvm = enum.auto() network = enum.auto() nfs = enum.auto() pkcs11 = enum.auto() plymouth = enum.auto() raid = enum.auto() def expand_delayed_specifiers(specifiers: dict[str, str], text: str) -> str: def replacer(match: re.Match[str]) -> str: m = match.group("specifier") if (specifier := specifiers.get(m)) is not None: return specifier logging.warning(f"Unknown specifier '&{m}' found in {text}, ignoring") return "" return re.sub(r"&(?P[&a-zA-Z])", replacer, text) def try_parse_boolean(s: str) -> Optional[bool]: "Parse 1/true/yes/y/t/on as true and 0/false/no/n/f/off/None as false" s_l = s.lower() if s_l in {"1", "true", "yes", "y", "t", "on", "always"}: return True if s_l in {"0", "false", "no", "n", "f", "off", "never"}: return False return None def parse_boolean(s: str) -> bool: value = try_parse_boolean(s) if value is None: die(f"Invalid boolean literal: {s!r}") return value def in_box() -> bool: return parse_boolean(os.getenv("MKOSI_IN_BOX", "0")) def parse_path( value: str, *, required: bool = True, resolve: bool = True, expanduser: bool = True, expandvars: bool = True, secret: bool = False, absolute: bool = False, directory: bool = False, exclude: Sequence[PathString] = (), constants: Sequence[str] = (), ) -> Path: if value in constants: return Path(value) if expandvars: value = os.path.expandvars(value) path = Path(value) if expanduser: path = path.expanduser() if required: if not path.exists(): die(f"{value} does not exist") if directory and not path.is_dir(): die(f"{value} is not a directory") if absolute and not path.is_absolute(): die(f"{value} must be an absolute path") for e in exclude: if path.is_relative_to(e): die(f"{path} can not be relative to {e}") if resolve: path = path.resolve() if secret and path.exists(): mode = path.stat().st_mode & 0o777 if mode & 0o007: die( textwrap.dedent(f"""\ Permissions of '{path}' of '{mode:04o}' are too open. When creating secret files use an access mode that restricts access to the owner only. """) ) return path def parse_paths_from_directory( value: str, *, required: bool = True, resolve: bool = True, expanduser: bool = True, expandvars: bool = True, secret: bool = False, absolute: bool = False, constants: Sequence[str] = (), ) -> list[Path]: base = os.path.dirname(value) glob = os.path.basename(value) path = parse_path( base, required=required, resolve=resolve, expanduser=expanduser, expandvars=expandvars, secret=secret, absolute=absolute, constants=constants, ) if not path.exists(): return [] if path.exists() and not path.is_dir(): die(f"{path} should be a directory, but isn't.") return sorted(parse_path(os.fspath(p), resolve=resolve, secret=secret) for p in path.glob(glob)) def config_parse_key(value: Optional[str], old: Optional[str]) -> Optional[Path]: if not value: return None return parse_path(value, secret=True) if Path(value).exists() else Path(value) def config_parse_certificate(value: Optional[str], old: Optional[str]) -> Optional[Path]: if not value: return None return parse_path(value) if Path(value).exists() else Path(value) def make_tree_parser( absolute: bool = True, required: bool = False, directory: bool = False, ) -> Callable[[str], ConfigTree]: def parse_tree(value: str) -> ConfigTree: src, sep, tgt = value.partition(":") return ConfigTree( source=parse_path( src, required=required, directory=directory, ), target=parse_path( tgt, required=False, resolve=False, expanduser=False, absolute=absolute, ) if sep else None, ) return parse_tree def config_match_build_sources(match: str, value: list[ConfigTree]) -> bool: return Path(match.lstrip("/")) in [tree.target for tree in value if tree.target] def config_make_list_matcher(parse: Callable[[str], T]) -> ConfigMatchCallback[list[T]]: def config_match_list(match: str, value: list[T]) -> bool: if not match: return len(value) == 0 return parse(match) in value return config_match_list def config_parse_string(value: Optional[str], old: Optional[str]) -> Optional[str]: return value or None def config_make_string_matcher(allow_globs: bool = False) -> ConfigMatchCallback[str]: def config_match_string(match: str, value: str) -> bool: if allow_globs: return fnmatch.fnmatchcase(value, match) else: return match == value return config_match_string def config_match_key_value(match: str, value: dict[str, str]) -> bool: k, sep, v = match.partition("=") if not sep: return k in value return value.get(k, None) == v def config_parse_boolean(value: Optional[str], old: Optional[bool]) -> Optional[bool]: if value is None: return False if not value: return None return parse_boolean(value) def parse_feature(value: str) -> ConfigFeature: try: return ConfigFeature(value) except ValueError: return ConfigFeature.enabled if parse_boolean(value) else ConfigFeature.disabled def config_parse_feature(value: Optional[str], old: Optional[ConfigFeature]) -> Optional[ConfigFeature]: if value is None: return ConfigFeature.auto if not value: return None return parse_feature(value) def config_match_feature(match: str, value: ConfigFeature) -> bool: return value == parse_feature(match) def config_parse_compression(value: Optional[str], old: Optional[Compression]) -> Optional[Compression]: if not value: return None try: return Compression[value] except KeyError: return Compression.zstd if parse_boolean(value) else Compression.none def config_parse_uuid(value: Optional[str], old: Optional[str]) -> Optional[uuid.UUID]: if not value: return None if value == "random": return uuid.uuid4() try: return uuid.UUID(value) except ValueError: die(f"{value} is not a valid UUID") def config_parse_source_date_epoch(value: Optional[str], old: Optional[int]) -> Optional[int]: if not value: return None try: timestamp = int(value) except ValueError: die(f"Timestamp {value!r} is not a valid integer") if timestamp < 0: die(f"Source date epoch timestamp cannot be negative (got {value})") return timestamp def config_parse_compress_level(value: Optional[str], old: Optional[int]) -> Optional[int]: if not value: return None try: level = int(value) except ValueError: die(f"Compression level {value!r} is not a valid integer") if level < 0: die(f"Compression level cannot be negative (got {value})") return level def config_parse_mode(value: Optional[str], old: Optional[int]) -> Optional[int]: if not value: return None try: mode = int(value, base=8) except ValueError: die(f"Access mode {value!r} is not a valid integer in base 8") if mode < 0: die(f"Access mode cannot be negative (got {value})") if mode > 0o1777: die(f"Access mode cannot be greater than 1777 (got {value})") return mode def config_default_compression(namespace: dict[str, Any]) -> Compression: if namespace["output_format"] in ( OutputFormat.tar, OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp, OutputFormat.addon, ): return Compression.zstd elif namespace["output_format"] == OutputFormat.oci: return Compression.gz else: return Compression.none def config_default_output(namespace: dict[str, Any]) -> str: if namespace["image"] != "main": output = cast(str, namespace["image"]) else: output = namespace["image_id"] or "image" if namespace["image_version"]: output += f"_{namespace['image_version']}" return output def config_default_distribution(namespace: dict[str, Any]) -> Distribution: if d := os.getenv("MKOSI_HOST_DISTRIBUTION"): return Distribution(d) detected = detect_distribution()[0] if not detected: logging.info( "Distribution of your host can't be detected or isn't a supported target. " "Defaulting to Distribution=custom." ) return Distribution.custom return detected def config_default_release(namespace: dict[str, Any]) -> str: hd: Optional[Distribution] hr: Optional[str] if (d := os.getenv("MKOSI_HOST_DISTRIBUTION")) and (r := os.getenv("MKOSI_HOST_RELEASE")): hd, hr = Distribution(d), r else: hd, hr = detect_distribution() # If the configured distribution matches the host distribution, use the same release as the host. if namespace["distribution"] == hd and hr is not None: return hr return cast(str, namespace["distribution"].installer.default_release()) def config_default_tools_tree_distribution(namespace: dict[str, Any]) -> Distribution: if d := os.getenv("MKOSI_HOST_DISTRIBUTION"): return Distribution(d).installer.default_tools_tree_distribution() or Distribution(d) detected = detect_distribution()[0] if not detected: return Distribution.custom return detected.installer.default_tools_tree_distribution() or detected def config_default_repository_key_fetch(namespace: dict[str, Any]) -> bool: def needs_repository_key_fetch(distribution: Distribution) -> bool: return distribution == Distribution.arch or distribution.is_rpm_distribution() if namespace["tools_tree"] not in (Path("default"), Path("yes")): return ( detect_distribution(namespace["tools_tree"] or Path("/"))[0] == Distribution.ubuntu and needs_repository_key_fetch(namespace["distribution"]) ) # fmt: skip return namespace["tools_tree_distribution"] == Distribution.ubuntu and needs_repository_key_fetch( namespace["distribution"] ) def config_default_source_date_epoch(namespace: dict[str, Any]) -> Optional[int]: for env in namespace["environment"]: if s := startswith(env, "SOURCE_DATE_EPOCH="): break else: s = os.environ.get("SOURCE_DATE_EPOCH") return config_parse_source_date_epoch(s, None) def config_default_proxy_url(namespace: dict[str, Any]) -> Optional[str]: names = ("http_proxy", "https_proxy", "HTTP_PROXY", "HTTPS_PROXY") for env in namespace["environment"]: k, _, v = env.partition("=") if k in names: return cast(str, v) for k, v in os.environ.items(): if k in names: return v return None def config_default_proxy_exclude(namespace: dict[str, Any]) -> Optional[list[str]]: names = ("no_proxy", "NO_PROXY") for env in namespace["environment"]: k, _, v = cast(str, env).partition("=") if k in names: return v.split(",") for k, v in os.environ.items(): if k in names: return v.split(",") return None def config_default_proxy_peer_certificate(namespace: dict[str, Any]) -> Optional[Path]: for p in (Path("/etc/pki/tls/certs/ca-bundle.crt"), Path("/etc/ssl/certs/ca-certificates.crt")): if p.exists(): return p return None def make_enum_parser(type: type[SE]) -> Callable[[str], SE]: def parse_enum(value: str) -> SE: try: return type(value) except ValueError: die(f"'{value}' is not a valid {type.__name__}") return parse_enum def config_make_enum_parser(type: type[SE]) -> ConfigParseCallback[SE]: def config_parse_enum(value: Optional[str], old: Optional[SE]) -> Optional[SE]: return make_enum_parser(type)(value) if value else None return config_parse_enum def config_make_enum_parser_with_boolean(type: type[SE], *, yes: SE, no: SE) -> ConfigParseCallback[SE]: def config_parse_enum(value: Optional[str], old: Optional[SE]) -> Optional[SE]: if not value: return None if value in type.values(): return type(value) return yes if parse_boolean(value) else no return config_parse_enum def config_make_enum_matcher(type: type[SE]) -> ConfigMatchCallback[SE]: def config_match_enum(match: str, value: SE) -> bool: return make_enum_parser(type)(match) == value return config_match_enum def config_match_architecture(match: str, value: Architecture) -> bool: if match == "uefi": return value.to_efi() is not None return config_make_enum_matcher(Architecture)(match, value) def package_sort_key(package: str) -> tuple[int, str]: """Sorts packages: normal first, paths second, conditional third""" if package.startswith("("): return 2, package elif package.startswith("/"): return 1, package return 0, package def config_make_list_parser( *, delimiter: Optional[str] = None, parse: Callable[[str], T] = str, # type: ignore # see mypy#3737 unescape: bool = False, reset: bool = True, key: Optional[Callable[[T], Any]] = None, ) -> ConfigParseCallback[list[T]]: def config_parse_list(value: Optional[str], old: Optional[list[T]]) -> Optional[list[T]]: new = old.copy() if old else [] if value is None: return [] # Empty strings reset the list. if unescape: lex = shlex.shlex(value, posix=True) lex.whitespace_split = True lex.whitespace = f"\n{delimiter or ''}" lex.commenters = "" values = list(lex) if reset and not values: return None else: if delimiter: value = value.replace(delimiter, "\n") values = value.split("\n") if reset and len(values) == 1 and values[0] == "": return None new += [parse(v) for v in values if v] if key: new.sort(key=key) return new return config_parse_list def config_match_version(match: str, value: str) -> bool: version = GenericVersion(value) for sigil, opfunc in { "==": operator.eq, "!=": operator.ne, "<=": operator.le, ">=": operator.ge, ">": operator.gt, "<": operator.lt, }.items(): if (rhs := startswith(match, sigil)) is not None: op = opfunc comp_version = GenericVersion(rhs) break else: # default to equality if no operation is specified op = operator.eq comp_version = GenericVersion(match) # all constraints must be fulfilled if not op(version, comp_version): return False return True def config_make_dict_parser( *, delimiter: Optional[str] = None, parse: Callable[[str], tuple[str, PathString]], unescape: bool = False, allow_paths: bool = False, reset: bool = True, ) -> ConfigParseCallback[dict[str, PathString]]: def config_parse_dict( value: Optional[str], old: Optional[dict[str, PathString]], ) -> Optional[dict[str, PathString]]: new = old.copy() if old else {} if value is None: return {} if allow_paths and value and "=" not in value: if Path(value).is_dir(): new.update({p.name: p.absolute() for p in sorted(Path(value).iterdir()) if not p.is_dir()}) elif (p := Path(value)).exists(): new[p.name] = p.absolute() else: die(f"{p.absolute()} does not exist") return new # Empty strings reset the dict. if unescape: lex = shlex.shlex(value, posix=True) lex.whitespace_split = True lex.whitespace = f"\n{delimiter or ''}" lex.commenters = "" values = list(lex) if reset and not values: return None else: if delimiter: value = value.replace(delimiter, "\n") values = value.split("\n") if reset and len(values) == 1 and values[0] == "": return None return new | dict(parse(v) for v in values if v) return config_parse_dict def parse_environment(value: str) -> tuple[str, str]: key, sep, value = value.partition("=") key, value = key.strip(), value.strip() value = value if sep else os.getenv(key, "") return (key, value) def parse_key_value(value: str) -> tuple[str, str]: key, _, value = value.partition("=") key, value = key.strip(), value.strip() return (key, value) def make_path_parser( *, required: bool = True, resolve: bool = True, expanduser: bool = True, expandvars: bool = True, secret: bool = False, exclude: Sequence[PathString] = (), constants: Sequence[str] = (), ) -> Callable[[str], Path]: return functools.partial( parse_path, required=required, resolve=resolve, expanduser=expanduser, expandvars=expandvars, secret=secret, exclude=exclude, constants=constants, ) def config_make_path_parser( *, required: bool = True, resolve: bool = True, expanduser: bool = True, expandvars: bool = True, secret: bool = False, absolute: bool = False, constants: Sequence[str] = (), ) -> ConfigParseCallback[Path]: def config_parse_path(value: Optional[str], old: Optional[Path]) -> Optional[Path]: if not value: return None return parse_path( value, required=required, resolve=resolve, expanduser=expanduser, expandvars=expandvars, secret=secret, absolute=absolute, constants=constants, ) return config_parse_path def is_valid_filename(s: str) -> bool: s = s.strip() return not (s == "." or s == ".." or "/" in s) def config_make_filename_parser(hint: str) -> ConfigParseCallback[str]: def config_parse_filename(value: Optional[str], old: Optional[str]) -> Optional[str]: if not value: return None if not is_valid_filename(value): die( f"{value!r} is not a valid filename.", hint=hint, ) return value return config_parse_filename def match_path_exists(image: str, value: str) -> bool: if not value: return False return Path(value).exists() def config_parse_root_password( value: Optional[str], old: Optional[tuple[str, bool]] ) -> Optional[tuple[str, bool]]: if not value: return None value = value.strip() hashed = value.startswith("hashed:") value = value.removeprefix("hashed:") return (value, hashed) def match_systemd_version(image: str, value: str) -> bool: if not value: return False version = run(["systemctl", "--version"], stdout=subprocess.PIPE).stdout.strip().split()[1] return config_match_version(value, version) def match_host_architecture(image: str, value: str) -> bool: return Architecture(value) == Architecture.native() def match_image(image: str, value: str) -> bool: return value == image def parse_bytes(value: str) -> int: if value.endswith("G"): factor = 1024**3 elif value.endswith("M"): factor = 1024**2 elif value.endswith("K"): factor = 1024 else: factor = 1 if factor > 1: value = value[:-1] result = math.ceil(float(value) * factor) if result <= 0: die("Size out of range") rem = result % 4096 if rem != 0: result += 4096 - rem return result def config_parse_bytes(value: Optional[str], old: Optional[int] = None) -> Optional[int]: if not value: return None return parse_bytes(value) def config_parse_number(value: Optional[str], old: Optional[int] = None) -> Optional[int]: if not value: return None try: return int(value) except ValueError: die(f"{value!r} is not a valid number") def parse_profile(value: str) -> str: if not is_valid_filename(value): die( f"{value!r} is not a valid profile", hint="Profiles= or --profile= requires a name with no path components.", ) return value def parse_drive(value: str) -> Drive: parts = value.split(":") if len(parts) > 6: die(f"Too many components in drive '{value}") if len(parts) < 1: die(f"No ID specified for drive '{value}'") if len(parts) < 2: die(f"Missing size in drive '{value}") id = parts[0] if not is_valid_filename(id): die(f"Unsupported path character in drive id '{id}'") flag_parser = make_enum_parser(DriveFlag) flag_list = p.split(",") if len(parts) > 5 and (p := parts[5]) else [] return Drive( id=id, size=parse_bytes(parts[1]), directory=parse_path(p) if len(parts) > 2 and (p := parts[2]) else None, options=p if len(parts) > 3 and (p := parts[3]) else None, file_id=p if len(parts) > 4 and (p := parts[4]) else id, flags=[flag_parser(f) for f in flag_list], ) def config_parse_sector_size(value: Optional[str], old: Optional[int]) -> Optional[int]: if not value: return None try: size = int(value) except ValueError: die(f"'{value}' is not a valid number") if size < 512 or size > 4096: die(f"Sector size not between 512 and 4096: {size}") if not is_power_of_2(size): die(f"Sector size not power of 2: {size}") return size def config_parse_vsock_cid(value: Optional[str], old: Optional[int]) -> Optional[int]: if not value: return None if value == "auto": return VsockCID.auto if value == "hash": return VsockCID.hash try: cid = int(value) except ValueError: die(f"VSock connection ID '{value}' is not a valid number or one of 'auto' or 'hash'") if cid not in range(3, 0xFFFFFFFF): die(f"{cid} is not in the valid VSock connection ID range [3, 0xFFFFFFFF)") return cid def config_parse_minimum_version(value: Optional[str], old: Optional[str]) -> Optional[str]: if not value: return old if hash := startswith(value, "commit:"): if not in_box(): gitdir = Path(__file__).parent.parent if not (gitdir / ".git").exists(): die("Cannot check mkosi git version, not running mkosi from a git repository") git: list[PathString] = ["git", "-C", gitdir] if os.getuid() == 0: git += ["-c", f"safe.directory={gitdir}"] current = run([*git, "rev-parse", "HEAD"], stdout=subprocess.PIPE).stdout.strip() result = run( [*git, "rev-parse", "--quiet", "--verify", f"{hash}^{{commit}}"], # git rev-parse seems to produce output even with --quiet added to the options. stdout=subprocess.DEVNULL, check=False, ) if result.returncode == 0: result = run([*git, "merge-base", "--is-ancestor", hash, current], check=False) if result.returncode == 1: die( f"mkosi commit {hash} or newer is required by this configuration", hint=f"Currently checked out commit is {current}", ) elif result.returncode != 0: die(f"Failed to check if mkosi git checkout is newer than commit {hash}") return value new = GenericVersion(value) if new > __version__: die(f"mkosi {new} or newer is required by this configuration (found {__version__})") if not old: return value return value if new > old else old def file_run_or_read(file: Path) -> str: "Run the specified file and capture its output if it's executable, else read file contents" if os.access(file, os.X_OK): return run([file.absolute()], stdout=subprocess.PIPE, env=os.environ).stdout content = file.read_text() if content.startswith("#!/"): die( f"{file} starts with a shebang ({content.splitlines()[0]})", hint="This file should be executable", ) return content class KeySourceType(StrEnum): file = enum.auto() engine = enum.auto() provider = enum.auto() @dataclasses.dataclass(frozen=True) class KeySource: type: KeySourceType source: str = "" def __str__(self) -> str: return f"{self.type}:{self.source}" if self.source else str(self.type) def config_parse_key_source(value: Optional[str], old: Optional[KeySource]) -> Optional[KeySource]: if not value: return KeySource(type=KeySourceType.file) typ, _, source = value.partition(":") try: type = KeySourceType(typ) except ValueError: die(f"'{value}' is not a valid key source") return KeySource(type=type, source=source) class CertificateSourceType(StrEnum): file = enum.auto() provider = enum.auto() @dataclasses.dataclass(frozen=True) class CertificateSource: type: CertificateSourceType source: str = "" def __str__(self) -> str: return f"{self.type}:{self.source}" if self.source else str(self.type) def config_parse_certificate_source( value: Optional[str], old: Optional[CertificateSource], ) -> Optional[CertificateSource]: if not value: return CertificateSource(type=CertificateSourceType.file) typ, _, source = value.partition(":") try: type = CertificateSourceType(typ) except ValueError: die(f"'{value}' is not a valid certificate source") return CertificateSource(type=type, source=source) def config_parse_artifact_output_list( value: Optional[str], old: Optional[list[ArtifactOutput]] ) -> Optional[list[ArtifactOutput]]: if not value: return [] # Keep for backwards compatibility boolean_value = try_parse_boolean(value) if boolean_value is not None: return ArtifactOutput.compat_yes() if boolean_value else ArtifactOutput.compat_no() list_parser = config_make_list_parser(delimiter=",", parse=make_enum_parser(ArtifactOutput)) return list_parser(value, old) class SettingScope(StrEnum): # Not passed down to tools tree or subimages, can be configured everywhere. local = enum.auto() # Passed down to subimages from main image, can be configured in main and tools tree images. universal = enum.auto() # Passed down to subimages and the tools tree, can only be configured in main image. multiversal = enum.auto() # Passed down to subimages from main image, can be configured everywhere. inherit = enum.auto() # Not passed down anywhere, can only be configured in main image. main = enum.auto() # Only passed down to tools tree, can only be configured in main image. tools = enum.auto() # Only passed down to initrd, can only be configured in main image. initrd = enum.auto() # Like inherit, but only inherited by the default initrd. initrd_inherit = enum.auto() def is_main_setting(self) -> bool: return self in (SettingScope.main, SettingScope.tools, SettingScope.initrd, SettingScope.multiversal) def removeprefix(self, setting: str) -> str: if self == SettingScope.tools: return setting.removeprefix("tools_tree_") elif self == SettingScope.initrd: return setting.removeprefix("initrd_") else: return setting @dataclasses.dataclass(frozen=True) class ConfigSetting(Generic[T]): dest: str section: str parse: ConfigParseCallback[T] = config_parse_string # type: ignore # see mypy#3737 match: Optional[ConfigMatchCallback[T]] = None name: str = "" default: Optional[T] = None default_factory: Optional[ConfigDefaultCallback[T]] = None default_factory_depends: tuple[str, ...] = tuple() path_suffixes: tuple[str, ...] = () recursive_path_suffixes: tuple[str, ...] = () path_read_text: bool = False path_secret: bool = False specifier: str = "" scope: SettingScope = SettingScope.local # settings for argparse short: Optional[str] = None long: str = "" choices: Optional[list[str]] = None metavar: Optional[str] = None const: Optional[Any] = None help: Optional[str] = None # backward compatibility compat_names: tuple[str, ...] = () compat_longs: tuple[str, ...] = () # Tools tree specific settings tools: bool = False def __post_init__(self) -> None: if not self.name: object.__setattr__(self, "name", "".join(x.capitalize() for x in self.dest.split("_") if x)) if not self.long: object.__setattr__(self, "long", f"--{self.dest.replace('_', '-')}") @dataclasses.dataclass(frozen=True) class Match: name: str match: Callable[[str, str], bool] @dataclasses.dataclass(frozen=True) class Specifier: char: str callback: Callable[[dict[str, Any], Path], str] depends: tuple[str, ...] = tuple() class CustomHelpFormatter(argparse.HelpFormatter): def _format_action_invocation(self, action: argparse.Action) -> str: if not action.option_strings or action.nargs == 0: return super()._format_action_invocation(action) default = self._get_default_metavar_for_optional(action) args_string = self._format_args(action, default) return ", ".join(action.option_strings) + " " + args_string def _split_lines(self, text: str, width: int) -> list[str]: """Wraps text to width, each line separately. If the first line of text ends in a colon, we assume that this is a list of option descriptions, and subindent them. Otherwise, the text is wrapped without indentation. """ lines = text.splitlines() subindent = " " if lines[0].endswith(":") else "" return flatten( textwrap.wrap( line, width, break_long_words=False, break_on_hyphens=False, subsequent_indent=subindent ) for line in lines ) def parse_chdir(path: str) -> Optional[Path]: if not path: # The current directory should be ignored return None # Immediately change the current directory so that it's taken into # account when parsing the following options that take a relative path try: os.chdir(path) except (FileNotFoundError, NotADirectoryError): die(f"{path} is not a directory!") except OSError as e: die(f"Cannot change the directory to {path}: {e}") # Keep track of the current directory return Path.cwd() class IgnoreAction(argparse.Action): """Argparse action for deprecated options that can be ignored.""" def __init__( self, option_strings: Sequence[str], dest: str, nargs: Union[int, str, None] = None, default: Any = argparse.SUPPRESS, help: Optional[str] = argparse.SUPPRESS, ) -> None: super().__init__(option_strings, dest, nargs=nargs, default=default, help=help) def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None], option_string: Optional[str] = None, ) -> None: logging.warning(f"{option_string} is no longer supported") class PagerHelpAction(argparse._HelpAction): def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None] = None, option_string: Optional[str] = None, ) -> None: page(parser.format_help(), namespace.pager) parser.exit() def dict_with_capitalised_keys_factory(pairs: list[tuple[str, T]]) -> dict[str, T]: def key_transformer(k: str) -> str: if (s := SETTINGS_LOOKUP_BY_DEST.get(k)) is not None: return s.name return "".join(p.capitalize() for p in k.split("_")) return {key_transformer(k): v for k, v in dict(pairs).items()} @dataclasses.dataclass(frozen=True) class Args: verb: Verb cmdline: list[str] force: int directory: Optional[Path] debug: bool debug_shell: bool debug_workspace: bool debug_sandbox: bool pager: bool genkey_valid_days: str genkey_common_name: str auto_bump: bool doc_format: DocFormat json: bool wipe_build_dir: bool rerun_build_scripts: bool @classmethod def default(cls) -> "Args": """Alternative constructor to generate an all-default Args. This prevents Args being generated with defaults values implicitly. """ with tempfile.TemporaryDirectory() as tempdir: with chdir(tempdir): args, _, _ = parse_config([]) return args @classmethod @functools.lru_cache(maxsize=1) def fields(cls) -> dict[str, dataclasses.Field[Any]]: return {f.name: f for f in dataclasses.fields(cls)} @classmethod def from_namespace(cls, ns: dict[str, Any]) -> "Args": return cls(**{k: v for k, v in ns.items() if k in cls.fields()}) def to_dict(self) -> dict[str, Any]: return dataclasses.asdict(self, dict_factory=dict_with_capitalised_keys_factory) @classmethod def from_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> "Args": """Instantiate a Args object from a (partial) JSON dump.""" if isinstance(s, str): j = json.loads(s) elif isinstance(s, dict): j = s elif hasattr(s, "read"): j = json.load(s) else: raise ValueError( f"{cls.__name__} can only be constructed from JSON from strings, dictionaries and files." ) def key_transformer(k: str) -> str: return "_".join(part.lower() for part in FALLBACK_NAME_TO_DEST_SPLITTER.split(k)) for k, v in j.items(): k = key_transformer(k) if k not in cls.fields() and (not isinstance(v, (dict, list, set)) or v): die( f"Serialized JSON has unknown field {k} with value {v}", hint="Re-running mkosi once with -f should solve the issue by re-generating the JSON", ) value_transformer = json_type_transformer(cls) j = {(tk := key_transformer(k)): value_transformer(tk, v) for k, v in j.items()} return dataclasses.replace(cls.default(), **{k: v for k, v in j.items() if k in cls.fields()}) @dataclasses.dataclass(frozen=True) class UKIProfile: profile: dict[str, str] cmdline: list[str] sign_expected_pcr: bool def make_simple_config_parser( settings: Sequence[ConfigSetting[object]], valtype: type[D], ) -> Callable[[str], D]: lookup_by_name = {s.name: s for s in settings} lookup_by_dest = {s.dest: s for s in settings} def finalize_value(config: dict[str, Any], setting: ConfigSetting[object]) -> None: if setting.dest in config: return if setting.default_factory: for d in setting.default_factory_depends: finalize_value(config, lookup_by_dest[d]) default = setting.default_factory(config) elif setting.default: default = setting.default else: default = setting.parse(None, None) config[setting.dest] = default def parse_simple_config(value: str) -> D: path = parse_path(value) config: dict[str, Any] = {} for section, name, value in parse_ini(path, only_sections=[s.section for s in settings]): if not name and not value: continue if not (s := lookup_by_name.get(name)): logging.warning(f"{path.absolute()}: Unknown setting {name}") continue if section != s.section: logging.warning( f"{path.absolute()}: Setting {name} should be configured in [{s.section}], not " f"[{section}]." ) if name != s.name: logging.warning( f"{path.absolute()}: Setting {name} is deprecated, please use {s.name} instead." ) config[s.dest] = s.parse(value, config.get(s.dest)) for setting in settings: finalize_value(config, setting) parameters = {f.name for f in dataclasses.fields(valtype)} return valtype(**{k: v for k, v in config.items() if k in parameters}) return parse_simple_config @dataclasses.dataclass(frozen=True) class Config: """Type-hinted storage for command line arguments. Only user configuration is stored here while dynamic state exists in Mkosicontext. If a field of the same name exists in both classes always access the value from context. """ profiles: list[str] files: list[Path] dependencies: list[str] minimum_version: Optional[str] pass_environment: list[str] distribution: Distribution release: str architecture: Architecture mirror: Optional[str] snapshot: Optional[str] local_mirror: Optional[str] repository_key_check: bool repository_key_fetch: bool repositories: list[str] output_format: OutputFormat manifest_format: list[ManifestFormat] output: str output_extension: str compress_output: Compression compress_level: int output_dir: Optional[Path] output_mode: Optional[int] image_id: Optional[str] image_version: Optional[str] split_artifacts: list[ArtifactOutput] repart_dirs: list[Path] sysupdate_dir: Optional[Path] sector_size: Optional[int] overlay: bool seed: uuid.UUID packages: list[str] build_packages: list[str] volatile_packages: list[str] package_directories: list[Path] volatile_package_directories: list[Path] with_recommends: bool with_docs: bool base_trees: list[Path] skeleton_trees: list[ConfigTree] extra_trees: list[ConfigTree] remove_packages: list[str] remove_files: list[str] clean_package_metadata: ConfigFeature source_date_epoch: Optional[int] configure_scripts: list[Path] sync_scripts: list[Path] prepare_scripts: list[Path] build_scripts: list[Path] postinst_scripts: list[Path] finalize_scripts: list[Path] postoutput_scripts: list[Path] clean_scripts: list[Path] bootable: ConfigFeature bootloader: Bootloader bios_bootloader: BiosBootloader shim_bootloader: ShimBootloader unified_kernel_images: UnifiedKernelImage unified_kernel_image_format: str unified_kernel_image_profiles: list[UKIProfile] initrds: list[Path] initrd_profiles: list[str] initrd_packages: list[str] initrd_volatile_packages: list[str] microcode_host: bool devicetrees: list[str] splash: Optional[Path] kernel_command_line: list[str] kernel_modules_include: list[str] kernel_modules_exclude: list[str] kernel_modules_include_host: bool firmware_include: list[str] firmware_exclude: list[str] kernel_modules_initrd: bool kernel_modules_initrd_include: list[str] kernel_modules_initrd_exclude: list[str] kernel_modules_initrd_include_host: bool locale: Optional[str] locale_messages: Optional[str] keymap: Optional[str] timezone: Optional[str] hostname: Optional[str] root_password: Optional[tuple[str, bool]] root_shell: Optional[str] machine_id: Optional[uuid.UUID] autologin: bool make_initrd: bool ssh: Ssh selinux_relabel: ConfigFeature secure_boot: bool secure_boot_auto_enroll: bool secure_boot_key: Optional[Path] secure_boot_key_source: KeySource secure_boot_certificate: Optional[Path] secure_boot_certificate_source: CertificateSource secure_boot_sign_tool: SecureBootSignTool verity: Verity verity_key: Optional[Path] verity_key_source: KeySource verity_certificate: Optional[Path] verity_certificate_source: CertificateSource sign_expected_pcr: ConfigFeature sign_expected_pcr_key: Optional[Path] sign_expected_pcr_key_source: KeySource sign_expected_pcr_certificate: Optional[Path] sign_expected_pcr_certificate_source: CertificateSource passphrase: Optional[Path] checksum: bool sign: bool openpgp_tool: str key: Optional[str] tools_tree: Optional[Path] tools_tree_certificates: bool extra_search_paths: list[Path] incremental: Incremental cacheonly: Cacheonly sandbox_trees: list[ConfigTree] workspace_dir: Optional[Path] cache_dir: Optional[Path] cache_key: str package_cache_dir: Optional[Path] build_dir: Optional[Path] build_key: str use_subvolumes: ConfigFeature repart_offline: bool history: bool build_sources: list[ConfigTree] build_sources_ephemeral: BuildSourcesEphemeral environment: dict[str, str] environment_files: list[Path] with_tests: bool with_network: bool proxy_url: Optional[str] proxy_exclude: list[str] proxy_peer_certificate: Optional[Path] proxy_client_certificate: Optional[Path] proxy_client_key: Optional[Path] nspawn_settings: Optional[Path] ephemeral: bool credentials: dict[str, PathString] kernel_command_line_extra: list[str] register: ConfigFeature storage_target_mode: ConfigFeature runtime_trees: list[ConfigTree] runtime_size: Optional[int] runtime_network: Network runtime_build_sources: bool bind_user: bool unit_properties: list[str] ssh_key: Optional[Path] ssh_certificate: Optional[Path] machine: Optional[str] forward_journal: Optional[Path] vmm: Vmm console: ConsoleMode cpus: int ram: int maxmem: int kvm: ConfigFeature cxl: bool vsock: ConfigFeature vsock_cid: int tpm: ConfigFeature removable: bool firmware: Firmware firmware_variables: Optional[Path] linux: Optional[str] drives: list[Drive] qemu_args: list[str] image: str def finalize_environment(self) -> dict[str, str]: env = { "SYSTEMD_TMPFILES_FORCE_SUBVOL": "0", "SYSTEMD_ASK_PASSWORD_KEYRING_TIMEOUT_SEC": "infinity", "SYSTEMD_ASK_PASSWORD_KEYRING_TYPE": "session", "TERM": finalize_term(), } if self.image != "main": env["SUBIMAGE"] = self.image if self.image_id is not None: env["IMAGE_ID"] = self.image_id if self.image_version is not None: env["IMAGE_VERSION"] = self.image_version if self.source_date_epoch is not None: env["SOURCE_DATE_EPOCH"] = str(self.source_date_epoch) if self.proxy_url is not None: for e in ("http_proxy", "https_proxy"): env[e] = self.proxy_url env[e.upper()] = self.proxy_url if self.proxy_exclude: env["no_proxy"] = ",".join(self.proxy_exclude) env["NO_PROXY"] = ",".join(self.proxy_exclude) if self.proxy_peer_certificate: env["GIT_PROXY_SSL_CAINFO"] = "/proxy.cacert" if self.proxy_client_certificate: env["GIT_PROXY_SSL_CERT"] = "/proxy.clientcert" if self.proxy_client_key: env["GIT_PROXY_SSL_KEY"] = "/proxy.clientkey" if dnf := os.getenv("MKOSI_DNF"): env["MKOSI_DNF"] = dnf if gnupghome := os.getenv("GNUPGHOME"): env["GNUPGHOME"] = os.fspath(Path(gnupghome).absolute()) env |= dict( parse_environment(line) for f in self.environment_files for line in f.read_text().strip().splitlines() ) env |= self.environment env |= finalize_git_config(self.proxy_url, env) return env def is_incremental(self) -> bool: return bool(self.incremental) and not self.base_trees and not self.overlay def machine_or_name(self) -> str: return self.machine or self.image def output_dir_or_cwd(self) -> Path: return self.output_dir or Path.cwd() def workspace_dir_or_default(self) -> Path: if self.workspace_dir: return self.workspace_dir if ( (cache := INVOKING_USER.cache_dir()) and cache != Path("/var/cache") and os.access(cache, os.W_OK) ): return cache / "mkosi" return Path("/var/tmp") def package_cache_dir_or_default(self) -> Path: key = f"{self.distribution}~{self.release}~{self.architecture}" return self.package_cache_dir or (INVOKING_USER.cache_dir() / "mkosi" / key) def tools(self) -> Path: if in_box(): return Path("/") return self.tools_tree or Path("/") @classmethod def default(cls) -> "Config": """Alternative constructor to generate an all-default Config. This prevents Config being generated with defaults values implicitly. """ with chdir("/proc"): _, _, [config] = parse_config([]) return config @classmethod @functools.lru_cache(maxsize=1) def fields(cls) -> dict[str, dataclasses.Field[Any]]: return {f.name: f for f in dataclasses.fields(cls)} @classmethod def from_dict(cls, ns: dict[str, Any]) -> "Config": return cls(**{k: v for k, v in ns.items() if k in cls.fields()}) @property def output_with_format(self) -> str: ext = self.output_extension if not ext: return self.output return f"{self.output}.{ext}" @property def output_with_compression(self) -> str: output = self.output_with_format if self.compress_output and self.output_format.use_outer_compression(): output += f".{self.compress_output.extension()}" return output @property def output_split_uki(self) -> str: return f"{self.output}.efi" @property def output_split_kernel(self) -> str: return f"{self.output}.vmlinuz" @property def output_split_initrd(self) -> str: return f"{self.output}.initrd" @property def output_split_pcrs(self) -> str: return f"{self.output}.pcrs" @property def output_split_roothash(self) -> str: return f"{self.output}.roothash" @property def output_split_os_release(self) -> str: return f"{self.output}.osrelease" @property def output_split_kernel_modules_initrd(self) -> str: return f"{self.output}.kernel-modules-initrd" @property def output_split_repart_definitions(self) -> str: return f"{self.output}.repart.d" @property def output_nspawn_settings(self) -> str: return f"{self.output}.nspawn" @property def output_checksum(self) -> str: return f"{self.output}.SHA256SUMS" @property def output_signature(self) -> str: return f"{self.output}.SHA256SUMS.gpg" @property def output_manifest(self) -> str: return f"{self.output}.manifest" @property def output_changelog(self) -> str: return f"{self.output}.changelog" @property def output_tar(self) -> str: output = f"{self.output}.tar" if self.compress_output: output += f".{self.compress_output.extension()}" return output @property def outputs(self) -> list[str]: return [ self.output, self.output_with_format, self.output_with_compression, self.output_split_uki, self.output_split_kernel, self.output_split_initrd, self.output_split_pcrs, self.output_split_roothash, self.output_split_os_release, self.output_split_kernel_modules_initrd, self.output_split_repart_definitions, self.output_nspawn_settings, self.output_checksum, self.output_signature, self.output_manifest, self.output_changelog, self.output_tar, ] @property def build_subdir(self) -> Path: assert self.build_dir subdir = self.expand_key_specifiers(self.build_key) if subdir == "-": return self.build_dir return self.build_dir / subdir def cache_manifest(self) -> dict[str, Any]: return { "distribution": self.distribution, "release": self.release, "mirror": self.mirror, "snapshot": self.snapshot, "architecture": self.architecture, # Caching the package manager used does not matter for the default tools tree because we don't # cache the package manager metadata for the tools tree either. In fact, it can cause issues as # the cache manifest for the tools tree will sometimes be different depending on whether we're # running inside or outside of the mkosi box environment. To avoid these issues, don't cache the # package manager used in the tools tree cache manifest. **( {"package_manager": self.distribution.installer.package_manager(self).executable(self)} if self.image != "tools" else {} ), "packages": sorted(self.packages), "build_packages": sorted(self.build_packages), "package_directories": [ (p.name, p.stat().st_mtime_ns) for d in self.package_directories for p in sorted( flatten( d.glob(glob) for glob in self.distribution.installer.package_manager(self).package_globs() ) ) ], "repositories": sorted(self.repositories), "overlay": self.overlay, "prepare_scripts": sorted( base64.b64encode(script.read_bytes()).decode() for script in self.prepare_scripts ), } def expand_key_specifiers(self, key: str) -> str: specifiers = { "&": "&", "d": str(self.distribution), "r": self.release, "a": str(self.architecture), "i": self.image_id or "", "v": self.image_version or "", "I": self.image, } return expand_delayed_specifiers(specifiers, key) def expand_linux_specifiers(self) -> Path: assert self.linux specifiers = { "&": "&", "b": os.fspath(self.build_subdir) if self.build_dir else "", } return parse_path(expand_delayed_specifiers(specifiers, self.linux)) def to_dict(self) -> dict[str, Any]: d = dataclasses.asdict(self, dict_factory=dict_with_capitalised_keys_factory) if self.build_dir: d["BuildSubdirectory"] = self.build_subdir return d @classmethod def to_partial_dict(cls, partial: dict[str, Any]) -> dict[str, Any]: return dict_with_capitalised_keys_factory([(k, v) for k, v in partial.items() if k in cls.fields()]) @classmethod def from_partial_json( cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]], ) -> dict[str, Any]: """Instantiate a Config object from a (partial) JSON dump.""" if isinstance(s, str): j = json.loads(s) elif isinstance(s, dict): j = s elif hasattr(s, "read"): j = json.load(s) else: raise ValueError( f"{cls.__name__} can only be constructed from JSON from strings, dictionaries and files." ) def key_transformer(k: str) -> str: if (s := SETTINGS_LOOKUP_BY_NAME.get(k)) is not None: return s.dest return "_".join(part.lower() for part in FALLBACK_NAME_TO_DEST_SPLITTER.split(k)) j.pop("BuildSubdirectory", None) for k, v in j.items(): k = key_transformer(k) if k not in cls.fields() and (not isinstance(v, (dict, list, set)) or v): die( f"Serialized JSON has unknown field {k} with value {v}", hint="Re-running mkosi once with -f should solve the issue by re-generating the JSON", ) value_transformer = json_type_transformer(cls) return {(tk := key_transformer(k)): value_transformer(tk, v) for k, v in j.items()} @classmethod def from_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> "Config": return dataclasses.replace( cls.default(), **{k: v for k, v in cls.from_partial_json(s).items() if k in cls.fields()} ) def find_binary(self, *names: PathString, tools: bool = True) -> Optional[Path]: return find_binary(*names, root=self.tools() if tools else Path("/"), extra=self.extra_search_paths) def sandbox( self, *, network: bool = False, devices: bool = False, relaxed: bool = False, tools: bool = True, scripts: Optional[Path] = None, overlay: Optional[Path] = None, options: Sequence[PathString] = (), ) -> AbstractContextManager[list[PathString]]: opt: list[PathString] = [*options] if not relaxed: opt += flatten(("--ro-bind", d, d) for d in self.extra_search_paths) if p := self.proxy_peer_certificate: opt += ["--ro-bind", os.fspath(p), "/proxy.cacert"] if p := self.proxy_client_certificate: opt += ["--ro-bind", os.fspath(p), "/proxy.clientcert"] if p := self.proxy_client_key: opt += ["--ro-bind", os.fspath(p), "/proxy.clientkey"] return sandbox_cmd( network=network, devices=devices, relaxed=relaxed, scripts=scripts, tools=self.tools() if tools else Path("/"), overlay=overlay, options=opt, extra=self.extra_search_paths, ) def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple[str, str, str]]: """ We have our own parser instead of using configparser as the latter does not support specifying the same setting multiple times in the same configuration file. """ section: Optional[str] = None setting: Optional[str] = None value: Optional[str] = None for line in textwrap.dedent(path.read_text()).splitlines(): comment = line.find("#") if comment >= 0: line = line[:comment] if not line.strip(): continue # If we have a section, setting and value, any line that's indented is considered part of the # setting's value. if section and setting and value is not None and line[0].isspace(): value = f"{value}\n{line.strip()}" continue # So the line is not indented, that means we either found a new section or a new setting. Either way, # let's yield the previous setting and its value before parsing the new section/setting. if section and setting and value is not None: yield section, setting, value setting = value = None line = line.strip() if line[0] == "[": if line[-1] != "]": die(f"{line} is not a valid section") # Yield the section name with an empty key and value to indicate we've finished the current # section. if section: yield section, "", "" section = line[1:-1].strip() if not section: die("Section name cannot be empty or whitespace") continue if not section: die(f"Setting {line} is located outside of section") if only_sections and section not in only_sections: continue setting, delimiter, value = line.partition("=") if not delimiter: die(f"Setting {setting} must be followed by '='") if not setting: die(f"Missing setting name before '=' in {line}") setting = setting.strip() value = value.strip() # Make sure we yield any final setting and its value. if section and setting and value is not None: yield section, setting, value if section and (not only_sections or section in only_sections): yield section, "", "" def parse_kernel_module_filter_regexp(p: str) -> str: if p in ("default", "host"): return p return f"re:{p}" UKI_PROFILE_SETTINGS: list[ConfigSetting[Any]] = [ ConfigSetting( dest="profile", section="UKIProfile", parse=config_make_dict_parser(parse=parse_key_value), ), ConfigSetting( dest="cmdline", section="UKIProfile", parse=config_make_list_parser(delimiter=" "), ), ConfigSetting( dest="sign_expected_pcr", section="UKIProfile", parse=config_parse_boolean, default=True, ), ] SETTINGS: list[ConfigSetting[Any]] = [ # Include section ConfigSetting( dest="include", short="-I", section="Include", parse=config_make_list_parser( delimiter=",", reset=False, parse=make_path_parser(constants=BUILTIN_CONFIGS), ), help="Include configuration from the specified file or directory", tools=True, ), # Config section ConfigSetting( dest="profiles", long="--profile", section="Config", help="Build the specified profiles", parse=config_make_list_parser(delimiter=",", parse=parse_profile), match=config_make_list_matcher(parse=parse_profile), scope=SettingScope.inherit, compat_names=("Profile",), tools=True, ), ConfigSetting( dest="dependencies", long="--dependency", section="Config", parse=config_make_list_parser(delimiter=","), help="Specify other images that this image depends on", ), ConfigSetting( dest="minimum_version", section="Config", parse=config_parse_minimum_version, help="Specify the minimum required mkosi version", scope=SettingScope.main, ), ConfigSetting( dest="configure_scripts", long="--configure-script", metavar="PATH", section="Config", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), path_suffixes=("configure",), help="Configure script to run before doing anything", ), ConfigSetting( dest="pass_environment", metavar="NAME", section="Config", parse=config_make_list_parser(delimiter=" "), help="Environment variables to pass to subimages", scope=SettingScope.main, ), # Distribution section ConfigSetting( dest="distribution", short="-d", section="Distribution", specifier="d", parse=config_make_enum_parser(Distribution), match=config_make_enum_matcher(Distribution), default_factory=config_default_distribution, choices=Distribution.choices(), help="Distribution to install", scope=SettingScope.universal, ), ConfigSetting( dest="release", short="-r", section="Distribution", specifier="r", parse=config_parse_string, match=config_make_string_matcher(), default_factory=config_default_release, default_factory_depends=("distribution",), help="Distribution release to install", scope=SettingScope.universal, ), ConfigSetting( dest="architecture", section="Distribution", specifier="a", parse=config_make_enum_parser(Architecture), match=config_match_architecture, default=Architecture.native(), choices=Architecture.choices(), help="Override the architecture of installation", scope=SettingScope.universal, ), ConfigSetting( dest="mirror", short="-m", section="Distribution", help="Distribution mirror to use", scope=SettingScope.universal, ), ConfigSetting( dest="snapshot", section="Distribution", help="Distribution snapshot to use", path_suffixes=("snapshot",), path_read_text=True, scope=SettingScope.universal, tools=True, ), ConfigSetting( dest="local_mirror", section="Distribution", help="Use a single local, flat and plain mirror to build the image", scope=SettingScope.universal, ), ConfigSetting( dest="repository_key_check", metavar="BOOL", section="Distribution", default=True, parse=config_parse_boolean, help="Controls signature and key checks on repositories", scope=SettingScope.multiversal, ), ConfigSetting( dest="repository_key_fetch", metavar="BOOL", section="Distribution", default_factory_depends=("distribution", "tools_tree", "tools_tree_distribution"), default_factory=config_default_repository_key_fetch, parse=config_parse_boolean, help="Controls whether distribution GPG keys can be fetched remotely", scope=SettingScope.universal, tools=True, ), ConfigSetting( dest="repositories", metavar="REPOS", section="Distribution", parse=config_make_list_parser(delimiter=","), match=config_make_list_matcher(parse=str), help="Repositories to use", scope=SettingScope.universal, tools=True, ), # Output section ConfigSetting( dest="output_format", short="-t", long="--format", name="Format", section="Output", specifier="t", parse=config_make_enum_parser(OutputFormat), match=config_make_enum_matcher(OutputFormat), default=OutputFormat.disk, choices=OutputFormat.choices(), help="Output Format", ), ConfigSetting( dest="manifest_format", metavar="FORMAT", section="Output", parse=config_make_list_parser(delimiter=",", parse=make_enum_parser(ManifestFormat)), help="Manifest Format", ), ConfigSetting( dest="output", short="-o", metavar="NAME", section="Output", specifier="o", parse=config_make_filename_parser( "Output= or --output= requires a filename with no path components. " "Use OutputDirectory= or --output-directory= to configure the output directory." ), default_factory=config_default_output, default_factory_depends=("image_id", "image_version"), help="Output name", tools=True, ), ConfigSetting( dest="output_extension", metavar="EXTENSION", section="Output", parse=config_make_filename_parser( "OutputExtension= or --output-extension= requires a valid extension with no path components." ), help="Output extension", default_factory=lambda ns: ns["output_format"].extension(), default_factory_depends=("output_format",), ), ConfigSetting( dest="compress_output", metavar="ALG", section="Output", parse=config_parse_compression, default_factory=config_default_compression, default_factory_depends=("distribution", "release", "output_format"), help="Enable whole-output compression (with images or archives)", ), ConfigSetting( dest="compress_level", metavar="LEVEL", section="Output", parse=config_parse_compress_level, default=3, help="Set the compression level to use", scope=SettingScope.inherit, ), ConfigSetting( dest="output_dir", short="-O", long="--output-directory", compat_longs=("--output-dir",), metavar="DIR", name="OutputDirectory", section="Output", specifier="O", parse=config_make_path_parser(required=False), path_suffixes=("output",), help="Output directory", scope=SettingScope.universal, tools=True, ), ConfigSetting( dest="output_mode", metavar="MODE", section="Output", parse=config_parse_mode, help="Set file system access mode for image", scope=SettingScope.universal, ), ConfigSetting( dest="image_version", match=config_match_version, section="Output", specifier="v", help="Set version for image", path_suffixes=("version",), path_read_text=True, scope=SettingScope.inherit, ), ConfigSetting( dest="image_id", match=config_make_string_matcher(allow_globs=True), section="Output", specifier="i", help="Set ID for image", scope=SettingScope.inherit, ), ConfigSetting( dest="split_artifacts", section="Output", parse=config_parse_artifact_output_list, default=ArtifactOutput.compat_no(), help="Split artifacts out of the final image", ), ConfigSetting( dest="repart_dirs", long="--repart-directory", compat_longs=("--repart-dir",), metavar="PATH", name="RepartDirectories", section="Output", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), path_suffixes=("repart",), help="Directory containing systemd-repart partition definitions", ), ConfigSetting( dest="sector_size", section="Output", parse=config_parse_sector_size, help="Set the disk image sector size", scope=SettingScope.inherit, ), ConfigSetting( dest="overlay", metavar="BOOL", section="Output", parse=config_parse_boolean, help="Only output the additions on top of the given base trees", ), ConfigSetting( dest="seed", metavar="UUID", section="Output", parse=config_parse_uuid, default=uuid.uuid4(), path_suffixes=("seed",), path_read_text=True, help="Set the seed for systemd-repart", ), ConfigSetting( dest="clean_scripts", long="--clean-script", metavar="PATH", section="Output", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), path_suffixes=("clean",), recursive_path_suffixes=("clean.d/*",), help="Clean script to run after cleanup", ), # Content section ConfigSetting( dest="packages", short="-p", long="--package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=",", key=package_sort_key), help="Add an additional package to the OS image", tools=True, ), ConfigSetting( dest="build_packages", long="--build-package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=",", key=package_sort_key), help="Additional packages needed for build scripts", ), ConfigSetting( dest="volatile_packages", long="--volatile-package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=",", key=package_sort_key), help="Packages to install after executing build scripts", ), ConfigSetting( dest="package_directories", long="--package-directory", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), path_suffixes=("packages",), help="Specify a directory containing extra packages", scope=SettingScope.universal, tools=True, ), ConfigSetting( dest="volatile_package_directories", long="--volatile-package-directory", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), help="Specify a directory containing extra volatile packages", scope=SettingScope.universal, ), ConfigSetting( dest="with_recommends", metavar="BOOL", section="Content", parse=config_parse_boolean, help="Install recommended packages", ), ConfigSetting( dest="with_docs", metavar="BOOL", section="Content", parse=config_parse_boolean, default=True, help="Install documentation", ), ConfigSetting( dest="base_trees", long="--base-tree", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser(required=False)), help="Use the given tree as base tree (e.g. lower sysext layer)", ), ConfigSetting( dest="skeleton_trees", long="--skeleton-tree", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser(required=True)), path_suffixes=("skeleton", "skeleton.tar"), help="Use a skeleton tree to bootstrap the image before installing anything", ), ConfigSetting( dest="extra_trees", long="--extra-tree", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser()), path_suffixes=("extra", "extra.tar"), help="Copy an extra tree on top of image", ), ConfigSetting( dest="remove_packages", long="--remove-package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=","), help="Remove package from the image OS image after installation", ), ConfigSetting( dest="remove_files", metavar="GLOB", section="Content", parse=config_make_list_parser(delimiter=","), help="Remove files from built image", ), ConfigSetting( dest="clean_package_metadata", metavar="FEATURE", section="Content", parse=config_parse_feature, help="Remove package manager database and other files", ), ConfigSetting( dest="source_date_epoch", metavar="TIMESTAMP", section="Content", parse=config_parse_source_date_epoch, default_factory=config_default_source_date_epoch, default_factory_depends=("environment",), help="Set the $SOURCE_DATE_EPOCH timestamp", scope=SettingScope.multiversal, ), ConfigSetting( dest="sync_scripts", long="--sync-script", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), path_suffixes=("sync",), recursive_path_suffixes=("sync.d/*",), help="Sync script to run before starting the build", tools=True, ), ConfigSetting( dest="prepare_scripts", long="--prepare-script", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), path_suffixes=("prepare", "prepare.chroot"), recursive_path_suffixes=("prepare.d/*",), help="Prepare script to run inside the image before it is cached", compat_names=("PrepareScript",), tools=True, ), ConfigSetting( dest="build_scripts", long="--build-script", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), path_suffixes=("build", "build.chroot"), recursive_path_suffixes=("build.d/*",), help="Build script to run inside image", compat_names=("BuildScript",), ), ConfigSetting( dest="postinst_scripts", long="--postinst-script", metavar="PATH", name="PostInstallationScripts", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), path_suffixes=("postinst", "postinst.chroot"), recursive_path_suffixes=("postinst.d/*",), help="Postinstall script to run inside image", compat_names=("PostInstallationScript",), ), ConfigSetting( dest="finalize_scripts", long="--finalize-script", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), path_suffixes=("finalize", "finalize.chroot"), recursive_path_suffixes=("finalize.d/*",), help="Postinstall script to run outside image", compat_names=("FinalizeScript",), ), ConfigSetting( dest="postoutput_scripts", long="--postoutput-script", metavar="PATH", name="PostOutputScripts", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), path_suffixes=("postoutput",), recursive_path_suffixes=("postoutput.d/*",), help="Output postprocessing script to run outside image", ), ConfigSetting( dest="bootable", metavar="FEATURE", section="Content", parse=config_parse_feature, match=config_match_feature, help="Generate ESP partition with systemd-boot and UKIs for installed kernels", ), ConfigSetting( dest="bootloader", section="Content", parse=config_make_enum_parser(Bootloader), choices=Bootloader.choices(), default=Bootloader.systemd_boot, help="Specify which UEFI bootloader to use", ), ConfigSetting( dest="bios_bootloader", section="Content", parse=config_make_enum_parser(BiosBootloader), choices=BiosBootloader.choices(), default=BiosBootloader.none, help="Specify which BIOS bootloader to use", ), ConfigSetting( dest="shim_bootloader", section="Content", parse=config_make_enum_parser(ShimBootloader), choices=ShimBootloader.choices(), default=ShimBootloader.none, help="Specify whether to use shim", ), ConfigSetting( dest="unified_kernel_images", metavar="FEATURE", section="Content", parse=config_make_enum_parser_with_boolean( UnifiedKernelImage, yes=UnifiedKernelImage.signed, no=UnifiedKernelImage.none ), default=UnifiedKernelImage.auto, help="Specify whether to use UKIs with grub/systemd-boot in UEFI mode", ), ConfigSetting( dest="unified_kernel_image_format", section="Content", parse=config_make_filename_parser( "UnifiedKernelImageFormat= or --unified-kernel-image-format= " "requires a filename with no path components." ), # The default value is set in `__init__.py` in `install_uki`. # `None` is used to determine if the roothash should be appended # to the filename if they are found. # default= help="Specify the format used for the UKI filename", ), ConfigSetting( dest="unified_kernel_image_profiles", long="--uki-profile", metavar="PATH", section="Content", parse=config_make_list_parser( delimiter=",", parse=make_simple_config_parser(UKI_PROFILE_SETTINGS, UKIProfile), ), recursive_path_suffixes=("uki-profiles/*.conf",), help="Configuration files to generate UKI profiles", ), ConfigSetting( dest="initrds", long="--initrd", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser(required=False)), help="Add a user-provided initrd to image", ), ConfigSetting( dest="microcode_host", metavar="BOOL", section="Content", parse=config_parse_boolean, default=False, help="Only include the host CPU's microcode", ), ConfigSetting( dest="initrd_profiles", long="--initrd-profile", metavar="PROFILE", section="Content", parse=config_make_list_parser(delimiter=","), choices=InitrdProfile.values(), default=[], help="Which profiles to enable for the default initrd", scope=SettingScope.initrd, ), ConfigSetting( dest="initrd_packages", long="--initrd-package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=","), help="Add additional packages to the default initrd", scope=SettingScope.initrd, ), ConfigSetting( dest="initrd_volatile_packages", long="--initrd-volatile-package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=","), help="Packages to install in the initrd that are not cached", scope=SettingScope.initrd, ), ConfigSetting( dest="devicetrees", section="Content", parse=config_make_list_parser(delimiter=","), help="Devicetree(s) to be used by the booting kernel", compat_names=("Devicetree",), ), ConfigSetting( dest="splash", section="Content", parse=config_make_path_parser(required=False, absolute=True), help="Splash screen image to be used by the booting kernel", ), ConfigSetting( dest="kernel_command_line", metavar="OPTIONS", section="Content", parse=config_make_list_parser(delimiter=" "), help="Set the kernel command line (only bootable images)", ), ConfigSetting( dest="kernel_modules_include", metavar="REGEX", section="Content", parse=config_make_list_parser( delimiter=",", parse=parse_kernel_module_filter_regexp, ), help="Include the specified kernel modules in the image", ), ConfigSetting( dest="kernel_modules_exclude", metavar="REGEX", section="Content", parse=config_make_list_parser( delimiter=",", parse=parse_kernel_module_filter_regexp, ), help="Exclude the specified kernel modules from the image", ), ConfigSetting( dest="kernel_modules_include", name="KernelModules", long="--kernel-modules", metavar="GLOB", section="Content", parse=config_make_list_parser(delimiter=","), help="Include/exclude the specified kernel modules in the image", ), ConfigSetting( dest="kernel_modules_include_host", metavar="BOOL", section="Content", parse=config_parse_boolean, help="Include the currently loaded modules on the host in the image", ), ConfigSetting( dest="kernel_modules_initrd", metavar="BOOL", section="Content", parse=config_parse_boolean, default=True, help="When building a bootable image, add an extra initrd containing the kernel modules", ), ConfigSetting( dest="kernel_modules_initrd_include", metavar="REGEX", section="Content", parse=config_make_list_parser( delimiter=",", parse=parse_kernel_module_filter_regexp, ), help="When building a kernel modules initrd, include the specified kernel modules", ), ConfigSetting( dest="kernel_modules_initrd_exclude", metavar="REGEX", section="Content", parse=config_make_list_parser( delimiter=",", parse=parse_kernel_module_filter_regexp, ), help="When building a kernel modules initrd, exclude the specified kernel modules", ), ConfigSetting( dest="kernel_modules_initrd_include", name="KernelInitrdModules", long="--kernel-initrd-modules", metavar="GLOB", section="Content", parse=config_make_list_parser(delimiter=","), help="Include/exclude modules in the initrd", ), ConfigSetting( dest="kernel_modules_initrd_include_host", metavar="BOOL", section="Content", parse=config_parse_boolean, help="When building a kernel modules initrd, include the currently loaded modules " "on the host in the image", ), ConfigSetting( dest="firmware_include", metavar="REGEX", section="Content", parse=config_make_list_parser( delimiter=",", parse=parse_kernel_module_filter_regexp, ), help="Include the specified firmware in the image", ), ConfigSetting( dest="firmware_exclude", metavar="REGEX", section="Content", parse=config_make_list_parser( delimiter=",", parse=parse_kernel_module_filter_regexp, ), help="Exclude the specified firmware from the image", ), ConfigSetting( dest="firmware_include", name="FirmwareFiles", long="--firmware-files", metavar="GLOB", section="Content", parse=config_make_list_parser(delimiter=","), help="Include/exclude the specified firmware in the image", ), ConfigSetting( dest="locale", section="Content", parse=config_parse_string, help="Set the system locale", ), ConfigSetting( dest="locale_messages", metavar="LOCALE", section="Content", parse=config_parse_string, help="Set the messages locale", ), ConfigSetting( dest="keymap", metavar="KEYMAP", section="Content", parse=config_parse_string, help="Set the system keymap", scope=SettingScope.initrd_inherit, ), ConfigSetting( dest="timezone", metavar="TIMEZONE", section="Content", parse=config_parse_string, help="Set the system timezone", scope=SettingScope.initrd_inherit, ), ConfigSetting( dest="hostname", metavar="HOSTNAME", section="Content", parse=config_parse_string, help="Set the system hostname", scope=SettingScope.initrd_inherit, ), ConfigSetting( dest="root_password", metavar="PASSWORD", section="Content", parse=config_parse_root_password, path_suffixes=("rootpw",), path_read_text=True, path_secret=True, help="Set the password for root", scope=SettingScope.initrd_inherit, ), ConfigSetting( dest="root_shell", metavar="SHELL", section="Content", parse=config_parse_string, help="Set the shell for root", ), ConfigSetting( dest="machine_id", metavar="MACHINE_ID", section="Content", parse=config_parse_uuid, path_suffixes=("machine-id",), path_read_text=True, help="Set the machine ID to use", ), ConfigSetting( dest="autologin", short="-a", const=True, metavar="BOOL", section="Content", parse=config_parse_boolean, help="Enable root autologin", ), ConfigSetting( dest="make_initrd", metavar="BOOL", section="Content", parse=config_parse_boolean, help="Make sure the image can be used as an initramfs", ), ConfigSetting( dest="ssh", section="Content", parse=config_make_enum_parser_with_boolean(Ssh, yes=Ssh.always, no=Ssh.never), default=Ssh.auto, choices=Ssh.choices(), help="Set up SSH access from the host to the final image via 'mkosi ssh'", ), ConfigSetting( dest="selinux_relabel", name="SELinuxRelabel", metavar="FEATURE", section="Content", parse=config_parse_feature, help="Specify whether to relabel all files with setfiles", ), # Validation section ConfigSetting( dest="secure_boot", metavar="BOOL", section="Validation", parse=config_parse_boolean, help="Sign the resulting kernel/initrd image for UEFI SecureBoot", ), ConfigSetting( dest="secure_boot_auto_enroll", metavar="BOOL", section="Validation", parse=config_parse_boolean, default=True, help="Automatically enroll the secureboot signing key on first boot", ), ConfigSetting( dest="secure_boot_key", metavar="KEY", section="Validation", parse=config_parse_key, path_suffixes=("key",), help="UEFI SecureBoot private key", scope=SettingScope.universal, ), ConfigSetting( dest="secure_boot_key_source", section="Validation", metavar="SOURCE[:ENGINE]", parse=config_parse_key_source, default=KeySource(type=KeySourceType.file), help="The source to use to retrieve the secure boot signing key", scope=SettingScope.universal, ), ConfigSetting( dest="secure_boot_certificate", metavar="PATH", section="Validation", parse=config_parse_certificate, path_suffixes=("crt",), help="UEFI SecureBoot certificate in X509 format", scope=SettingScope.universal, ), ConfigSetting( dest="secure_boot_certificate_source", section="Validation", metavar="SOURCE[:PROVIDER]", parse=config_parse_certificate_source, default=CertificateSource(type=CertificateSourceType.file), help="The source to use to retrieve the secure boot signing certificate", scope=SettingScope.universal, ), ConfigSetting( dest="secure_boot_sign_tool", section="Validation", parse=config_make_enum_parser(SecureBootSignTool), default=SecureBootSignTool.auto, choices=SecureBootSignTool.choices(), help="Tool to use for signing PE binaries for secure boot", ), ConfigSetting( dest="verity", section="Validation", metavar="FEATURE", parse=config_make_enum_parser_with_boolean(Verity, yes=Verity.signed, no=Verity.disabled), default=Verity.auto, choices=Verity.values(), help="Configure whether to enforce or disable verity partitions for disk images", ), ConfigSetting( dest="verity_key", metavar="KEY", section="Validation", parse=config_parse_key, path_suffixes=("key",), help="Private key for signing verity signature", scope=SettingScope.universal, ), ConfigSetting( dest="verity_key_source", section="Validation", metavar="SOURCE[:ENGINE]", parse=config_parse_key_source, default=KeySource(type=KeySourceType.file), help="The source to use to retrieve the verity signing key", scope=SettingScope.universal, ), ConfigSetting( dest="verity_certificate", metavar="PATH", section="Validation", parse=config_parse_certificate, path_suffixes=("crt",), help="Certificate for signing verity signature in X509 format", scope=SettingScope.universal, ), ConfigSetting( dest="verity_certificate_source", section="Validation", metavar="SOURCE[:PROVIDER]", parse=config_parse_certificate_source, default=CertificateSource(type=CertificateSourceType.file), help="The source to use to retrieve the verity signing certificate", scope=SettingScope.universal, ), ConfigSetting( dest="sign_expected_pcr", metavar="FEATURE", section="Validation", parse=config_parse_feature, help="Measure the components of the unified kernel image (UKI) and " "embed the PCR signature into the UKI", ), ConfigSetting( dest="sign_expected_pcr_key", metavar="KEY", section="Validation", parse=config_parse_key, path_suffixes=("key",), help="Private key for signing expected PCR signature", scope=SettingScope.inherit, ), ConfigSetting( dest="sign_expected_pcr_key_source", section="Validation", metavar="SOURCE[:ENGINE]", parse=config_parse_key_source, default=KeySource(type=KeySourceType.file), help="The source to use to retrieve the expected PCR signing key", scope=SettingScope.inherit, ), ConfigSetting( dest="sign_expected_pcr_certificate", metavar="PATH", section="Validation", parse=config_parse_certificate, path_suffixes=("crt",), help="Certificate for signing expected PCR signature in X509 format", scope=SettingScope.inherit, ), ConfigSetting( dest="sign_expected_pcr_certificate_source", section="Validation", metavar="SOURCE[:PROVIDER]", parse=config_parse_certificate_source, default=CertificateSource(type=CertificateSourceType.file), help="The source to use to retrieve the expected PCR signing certificate", scope=SettingScope.inherit, ), ConfigSetting( dest="passphrase", metavar="PATH", section="Validation", parse=config_make_path_parser(required=False, secret=True), path_suffixes=("passphrase",), help="Path to a file containing the passphrase to use when LUKS encryption is selected", ), ConfigSetting( dest="checksum", metavar="BOOL", section="Validation", parse=config_parse_boolean, default_factory=lambda ns: True if ns["sign"] else False, default_factory_depends=("sign",), help="Write SHA256SUMS file", ), ConfigSetting( dest="sign", metavar="BOOL", section="Validation", parse=config_parse_boolean, help="Write and sign SHA256SUMS file", ), ConfigSetting( dest="key", section="Validation", help="GPG key to use for signing", ), ConfigSetting( name="OpenPGPTool", dest="openpgp_tool", section="Validation", default="gpg", help="OpenPGP implementation to use for signing", ), # Build section ConfigSetting( dest="tools_tree", metavar="PATH", section="Build", parse=config_make_path_parser(constants=("default", "yes", "no")), path_suffixes=("tools",), help="Look up programs to execute inside the given tree", scope=SettingScope.universal, ), ConfigSetting( dest="tools_tree_distribution", section="Build", parse=config_make_enum_parser(Distribution), match=config_make_enum_matcher(Distribution), choices=Distribution.choices(), default_factory=config_default_tools_tree_distribution, help="Set the distribution to use for the default tools tree", scope=SettingScope.tools, ), ConfigSetting( dest="tools_tree_release", metavar="RELEASE", section="Build", parse=config_parse_string, match=config_make_string_matcher(), default_factory_depends=("tools_tree_distribution",), default_factory=( lambda ns: d.installer.default_release() if (d := ns["tools_tree_distribution"]) else None ), help="Set the release to use for the default tools tree", scope=SettingScope.tools, ), ConfigSetting( dest="tools_tree_profiles", long="--tools-tree-profile", metavar="PROFILE", section="Build", parse=config_make_list_parser(delimiter=","), choices=ToolsTreeProfile.values(), default=[str(s) for s in ToolsTreeProfile.default()], help="Which profiles to enable for the default tools tree", scope=SettingScope.tools, ), ConfigSetting( dest="tools_tree_mirror", metavar="MIRROR", section="Build", default_factory_depends=("distribution", "mirror", "tools_tree_distribution"), default_factory=( lambda ns: ns["mirror"] if ns["mirror"] and ns["distribution"] == ns["tools_tree_distribution"] else None ), help="Set the mirror to use for the default tools tree", scope=SettingScope.tools, ), ConfigSetting( dest="tools_tree_repositories", long="--tools-tree-repository", metavar="REPOS", section="Build", parse=config_make_list_parser(delimiter=","), help="Repositories to use for the default tools tree", scope=SettingScope.tools, ), ConfigSetting( dest="tools_tree_sandbox_trees", long="--tools-tree-sandbox-tree", compat_names=("ToolsTreePackageManagerTrees",), compat_longs=("--tools-tree-package-manager-tree",), metavar="PATH", section="Build", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser(required=True)), help="Sandbox trees for the default tools tree", scope=SettingScope.tools, ), ConfigSetting( dest="tools_tree_packages", long="--tools-tree-package", metavar="PACKAGE", section="Build", parse=config_make_list_parser(delimiter=","), help="Add additional packages to the default tools tree", scope=SettingScope.tools, ), ConfigSetting( dest="tools_tree_package_directories", long="--tools-tree-package-directory", metavar="PATH", section="Build", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), help="Specify a directory containing extra tools tree packages", scope=SettingScope.tools, ), ConfigSetting( dest="tools_tree_certificates", metavar="BOOL", section="Build", parse=config_parse_boolean, help="Use certificates from the tools tree", default=True, scope=SettingScope.universal, ), ConfigSetting( dest="extra_search_paths", long="--extra-search-path", metavar="PATH", section="Build", parse=config_make_list_parser(delimiter=",", parse=make_path_parser(exclude=["/usr"])), help="List of comma-separated paths to look for programs before looking in PATH", scope=SettingScope.universal, ), ConfigSetting( dest="incremental", short="-i", const=Incremental.yes, section="Build", parse=config_make_enum_parser_with_boolean(Incremental, yes=Incremental.yes, no=Incremental.no), default=Incremental.no, help="Make use of and generate intermediary cache images", scope=SettingScope.universal, choices=Incremental.values(), ), ConfigSetting( dest="cacheonly", long="--cache-only", name="CacheOnly", section="Build", parse=config_make_enum_parser_with_boolean(Cacheonly, yes=Cacheonly.always, no=Cacheonly.auto), default=Cacheonly.auto, help="Only use the package cache when installing packages", choices=Cacheonly.choices(), scope=SettingScope.multiversal, ), ConfigSetting( dest="sandbox_trees", long="--sandbox-tree", compat_names=("PackageManagerTrees",), compat_longs=("--package-manager-tree",), metavar="PATH", section="Build", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser(required=True)), help="Use a sandbox tree to configure the various tools that mkosi executes", path_suffixes=("sandbox", "sandbox.tar", "pkgmngr", "pkgmngr.tar"), scope=SettingScope.universal, tools=True, ), ConfigSetting( dest="workspace_dir", long="--workspace-directory", compat_longs=("--workspace-dir",), metavar="DIR", name="WorkspaceDirectory", section="Build", parse=config_make_path_parser(required=False), help="Workspace directory", scope=SettingScope.multiversal, ), ConfigSetting( dest="cache_dir", long="--cache-directory", compat_longs=("--cache-dir",), metavar="PATH", name="CacheDirectory", section="Build", parse=config_make_path_parser(required=False), path_suffixes=("cache",), help="Incremental cache directory", scope=SettingScope.universal, ), ConfigSetting( dest="cache_key", metavar="KEY", section="Build", parse=config_parse_string, help="Cache key to use within cache directory", default="&d~&r~&a~&I", scope=SettingScope.inherit, ), ConfigSetting( dest="package_cache_dir", long="--package-cache-directory", compat_longs=("--package-cache-dir",), metavar="PATH", name="PackageCacheDirectory", section="Build", parse=config_make_path_parser(required=False), path_suffixes=("pkgcache",), help="Package cache directory", scope=SettingScope.multiversal, ), ConfigSetting( dest="build_dir", long="--build-directory", compat_longs=("--build-dir",), metavar="PATH", name="BuildDirectory", section="Build", parse=config_make_path_parser(required=False), path_suffixes=("builddir",), help="Path to use as persistent build directory", scope=SettingScope.universal, ), ConfigSetting( dest="build_key", metavar="KEY", section="Build", parse=config_parse_string, help="Build key to use within build directory", default="&d~&r~&a", scope=SettingScope.inherit, ), ConfigSetting( dest="use_subvolumes", metavar="FEATURE", section="Build", parse=config_parse_feature, help="Use btrfs subvolumes for faster directory operations where possible", scope=SettingScope.universal, ), ConfigSetting( dest="repart_offline", section="Build", parse=config_parse_boolean, help="Build disk images without using loopback devices", default=True, scope=SettingScope.universal, ), ConfigSetting( dest="history", metavar="BOOL", section="Build", parse=config_parse_boolean, help="Whether mkosi can store information about previous builds", scope=SettingScope.main, ), ConfigSetting( dest="build_sources", metavar="PATH", section="Build", parse=config_make_list_parser( delimiter=",", parse=make_tree_parser( absolute=False, required=True, directory=True, ), ), match=config_match_build_sources, default_factory=lambda ns: [ConfigTree(ns["directory"], None)] if ns["directory"] else [], help="Path for sources to build", scope=SettingScope.multiversal, ), ConfigSetting( dest="build_sources_ephemeral", section="Build", parse=config_make_enum_parser_with_boolean( BuildSourcesEphemeral, yes=BuildSourcesEphemeral.yes, no=BuildSourcesEphemeral.no ), default=BuildSourcesEphemeral.no, help="Make build sources ephemeral when running scripts", scope=SettingScope.multiversal, choices=BuildSourcesEphemeral.values(), ), ConfigSetting( dest="environment", short="-E", metavar="NAME[=VALUE]", section="Build", parse=config_make_dict_parser(delimiter=" ", parse=parse_environment, unescape=True), match=config_match_key_value, help="Set an environment variable when running scripts", tools=True, ), ConfigSetting( dest="environment_files", long="--env-file", metavar="PATH", section="Build", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), path_suffixes=("env",), help="Environment files to set when running scripts", tools=True, ), ConfigSetting( dest="with_tests", short="-T", const=False, section="Build", parse=config_parse_boolean, default=True, help="Do not run tests as part of build scripts, if supported", scope=SettingScope.universal, ), ConfigSetting( dest="with_network", metavar="BOOL", section="Build", parse=config_parse_boolean, help="Run build and postinst scripts with network access (instead of private network)", scope=SettingScope.universal, ), ConfigSetting( dest="proxy_url", section="Build", default_factory=config_default_proxy_url, default_factory_depends=("environment",), metavar="URL", help="Set the proxy to use", scope=SettingScope.multiversal, ), ConfigSetting( dest="proxy_exclude", section="Build", default_factory=config_default_proxy_exclude, default_factory_depends=("environment",), metavar="HOST", parse=config_make_list_parser(delimiter=","), help="Don't use the configured proxy for the specified host(s)", scope=SettingScope.multiversal, ), ConfigSetting( dest="proxy_peer_certificate", section="Build", parse=config_make_path_parser(), default_factory=config_default_proxy_peer_certificate, help="Set the proxy peer certificate", scope=SettingScope.multiversal, ), ConfigSetting( dest="proxy_client_certificate", section="Build", parse=config_make_path_parser(secret=True), help="Set the proxy client certificate", scope=SettingScope.multiversal, ), ConfigSetting( dest="proxy_client_key", section="Build", default_factory=lambda ns: ns["proxy_client_certificate"], default_factory_depends=("proxy_client_certificate",), parse=config_make_path_parser(secret=True), help="Set the proxy client key", scope=SettingScope.multiversal, ), # Runtime section ConfigSetting( dest="nspawn_settings", name="NSpawnSettings", long="--settings", metavar="PATH", section="Runtime", parse=config_make_path_parser(), path_suffixes=("nspawn",), help="Add in .nspawn settings file", scope=SettingScope.main, ), ConfigSetting( dest="ephemeral", metavar="BOOL", section="Runtime", parse=config_parse_boolean, help=( "If specified, the container/VM is run with a temporary snapshot of the output " "image that is removed immediately when the container/VM terminates" ), scope=SettingScope.main, ), ConfigSetting( dest="credentials", long="--credential", metavar="NAME=VALUE", section="Runtime", parse=config_make_dict_parser(delimiter=" ", parse=parse_key_value, allow_paths=True, unescape=True), help="Pass a systemd credential to a systemd-nspawn container or a virtual machine", path_suffixes=("credentials",), scope=SettingScope.main, ), ConfigSetting( dest="kernel_command_line_extra", metavar="OPTIONS", section="Runtime", parse=config_make_list_parser(delimiter=" "), help="Append extra entries to the kernel command line when booting the image", scope=SettingScope.main, ), ConfigSetting( dest="runtime_trees", long="--runtime-tree", metavar="SOURCE:[TARGET]", section="Runtime", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser(absolute=False)), help="Additional mounts to add when booting the image", scope=SettingScope.main, ), ConfigSetting( dest="runtime_size", metavar="SIZE", section="Runtime", parse=config_parse_bytes, help="Grow disk images to the specified size before booting them", scope=SettingScope.main, ), ConfigSetting( dest="runtime_network", section="Runtime", parse=config_make_enum_parser(Network), choices=Network.choices(), help="Set networking backend to use when booting the image", default=Network.user, scope=SettingScope.main, ), ConfigSetting( dest="runtime_build_sources", metavar="BOOL", section="Runtime", parse=config_parse_boolean, help="Mount build sources and build directory in /work when booting the image", scope=SettingScope.main, ), ConfigSetting( dest="bind_user", metavar="BOOL", section="Runtime", parse=config_parse_boolean, help="Bind current user from host into container or virtual machine", scope=SettingScope.main, ), ConfigSetting( dest="unit_properties", long="--unit-property", metavar="PROPERTY", section="Runtime", parse=config_make_list_parser(delimiter=" ", unescape=True), help="Set properties on the scopes spawned by systemd-nspawn or systemd-run", scope=SettingScope.main, ), ConfigSetting( dest="ssh_key", metavar="PATH", section="Runtime", parse=config_make_path_parser(secret=True), path_suffixes=("key",), help="Private key for use with mkosi ssh in PEM format", scope=SettingScope.main, ), ConfigSetting( dest="ssh_certificate", metavar="PATH", section="Runtime", parse=config_make_path_parser(), path_suffixes=("crt",), help="Certificate for use with mkosi ssh in X509 format", scope=SettingScope.main, ), ConfigSetting( dest="vmm", name="VirtualMachineMonitor", section="Runtime", choices=Vmm.choices(), parse=config_make_enum_parser(Vmm), default=Vmm.qemu, help="Set the virtual machine monitor to use for mkosi vm", scope=SettingScope.main, ), ConfigSetting( dest="machine", metavar="NAME", section="Runtime", help="Set the machine name to use when booting the image", scope=SettingScope.main, ), ConfigSetting( dest="forward_journal", metavar="PATH", section="Runtime", parse=config_make_path_parser(required=False), help="Set the path used to store forwarded machine journals", scope=SettingScope.main, ), ConfigSetting( dest="sysupdate_dir", long="--sysupdate-directory", compat_longs=("--sysupdate-dir",), metavar="PATH", name="SysupdateDirectory", section="Runtime", parse=config_make_path_parser(), path_suffixes=("sysupdate",), help="Directory containing systemd-sysupdate transfer definitions", scope=SettingScope.main, ), ConfigSetting( dest="console", metavar="MODE", section="Runtime", parse=config_make_enum_parser(ConsoleMode), help="Configure the virtual machine console mode to use", default=ConsoleMode.native, scope=SettingScope.main, ), ConfigSetting( dest="cpus", name="CPUs", metavar="CPUS", section="Runtime", parse=config_parse_number, default=1, help="Configure number of CPUs in virtual machine", compat_longs=("--qemu-smp",), compat_names=("QemuSmp",), scope=SettingScope.main, ), ConfigSetting( dest="ram", name="RAM", metavar="BYTES", section="Runtime", parse=config_parse_bytes, default=parse_bytes("2G"), help="Configure guest's RAM size", compat_longs=("--qemu-mem",), compat_names=("QemuMem",), scope=SettingScope.main, ), ConfigSetting( dest="maxmem", name="MaxMem", metavar="BYTES", section="Runtime", parse=config_parse_bytes, help="Configure guest's MaxMem size", scope=SettingScope.main, ), ConfigSetting( dest="kvm", name="KVM", metavar="FEATURE", section="Runtime", parse=config_parse_feature, help="Configure whether to use KVM or not", compat_longs=("--qemu-kvm",), compat_names=("QemuKvm",), scope=SettingScope.main, ), ConfigSetting( dest="cxl", name="CXL", metavar="BOOLEAN", section="Runtime", parse=config_parse_boolean, help="Enable CXL device support", scope=SettingScope.main, ), ConfigSetting( dest="vsock", name="VSock", metavar="FEATURE", section="Runtime", parse=config_parse_feature, help="Configure whether to use vsock or not", compat_longs=("--qemu-vsock",), compat_names=("QemuVsock",), scope=SettingScope.main, ), ConfigSetting( dest="vsock_cid", name="VSockCID", long="--vsock-cid", metavar="NUMBER|auto|hash", section="Runtime", parse=config_parse_vsock_cid, default=VsockCID.auto, help="Specify the vsock connection ID to use", compat_longs=("--qemu-vsock-cid",), compat_names=("QemuVsockConnectionId",), scope=SettingScope.main, ), ConfigSetting( dest="tpm", name="TPM", metavar="FEATURE", section="Runtime", parse=config_parse_feature, help="Configure whether to use a virtual tpm or not", compat_longs=("--qemu-swtpm",), compat_names=("QemuSwtpm",), scope=SettingScope.main, ), ConfigSetting( dest="removable", metavar="BOOLEAN", section="Runtime", parse=config_parse_boolean, help="Attach the image as a removable drive to the virtual machine", compat_longs=("--qemu-removable",), compat_names=("QemuRemovable",), scope=SettingScope.main, ), ConfigSetting( dest="firmware", section="Runtime", parse=config_make_enum_parser(Firmware), default=Firmware.auto, help="Select the virtual machine firmware to use", choices=Firmware.choices(), compat_longs=("--qemu-firmware",), compat_names=("QemuFirmware",), scope=SettingScope.main, ), ConfigSetting( dest="firmware_variables", metavar="PATH", section="Runtime", parse=config_make_path_parser(constants=("custom", "microsoft", "microsoft-mok"), required=False), help="Set the path to the firmware variables file to use", compat_longs=("--qemu-firmware-variables",), compat_names=("QemuFirmwareVariables",), scope=SettingScope.main, ), ConfigSetting( dest="linux", metavar="PATH", section="Runtime", parse=config_parse_string, help="Specify the kernel to use for direct kernel boot", compat_longs=("--qemu-kernel",), compat_names=("QemuKernel",), scope=SettingScope.main, ), ConfigSetting( dest="drives", long="--drive", metavar="DRIVE", section="Runtime", parse=config_make_list_parser(delimiter=" ", parse=parse_drive), help="Specify drive that mkosi should create and pass to the virtual machine", compat_longs=("--qemu-drive",), compat_names=("QemuDrives",), scope=SettingScope.main, ), ConfigSetting( dest="qemu_args", metavar="ARGS", section="Runtime", parse=config_make_list_parser(delimiter=" ", unescape=True), # Suppress the command line option because it's already possible to pass qemu args as normal # arguments. help=argparse.SUPPRESS, scope=SettingScope.main, ), ConfigSetting( dest="register", metavar="BOOL", section="Runtime", parse=config_parse_feature, default=ConfigFeature.auto, help="Register booted vm/container with systemd-machined", scope=SettingScope.main, ), ConfigSetting( dest="storage_target_mode", metavar="FEATURE", section="Runtime", parse=config_parse_feature, default=ConfigFeature.auto, help="Run systemd-storagetm as part of the serve verb", scope=SettingScope.main, ), ] SETTINGS_LOOKUP_BY_NAME = {name: s for s in SETTINGS for name in [s.name, *s.compat_names]} SETTINGS_LOOKUP_BY_DEST = {s.dest: s for s in SETTINGS} SETTINGS_LOOKUP_BY_OPTION = { name: s for s in SETTINGS for name in [s.long, *s.compat_longs, s.short] if name } # fmt: skip SETTINGS_LOOKUP_BY_SPECIFIER = {s.specifier: s for s in SETTINGS if s.specifier} MATCHES = ( Match( name="PathExists", match=match_path_exists, ), Match( name="SystemdVersion", match=match_systemd_version, ), Match( name="HostArchitecture", match=match_host_architecture, ), Match( name="Image", match=match_image, ), ) MATCH_LOOKUP = {m.name: m for m in MATCHES} SPECIFIERS = ( Specifier( char="C", callback=lambda ns, config: os.fspath(config.resolve().parent), ), Specifier( char="P", callback=lambda ns, config: os.fspath(Path.cwd()), ), Specifier( char="D", callback=lambda ns, config: os.fspath(ns["directory"].resolve()), ), Specifier( char="F", callback=lambda ns, config: ns["distribution"].installer.filesystem(), depends=("distribution",), ), Specifier( char="I", callback=lambda ns, config: ns["image"], ), ) SPECIFIERS_LOOKUP_BY_CHAR = {s.char: s for s in SPECIFIERS} # This regular expression can be used to split "AutoBump" -> ["Auto", "Bump"] # and "NSpawnSettings" -> ["NSpawn", "Settings"] # The first part (?<=[a-z]) is a positive look behind for a lower case letter # and (?=[A-Z]) is a lookahead assertion matching an upper case letter but not # consuming it FALLBACK_NAME_TO_DEST_SPLITTER = re.compile("(?<=[a-z])(?=[A-Z])") def create_argument_parser(chdir: bool = True) -> argparse.ArgumentParser: parser = argparse.ArgumentParser( prog="mkosi", description="Build Bespoke OS Images", # the synopsis below is supposed to be indented by two spaces usage="\n " + textwrap.dedent("""\ mkosi [options…] {b}init{e} mkosi [options…] {b}summary{e} mkosi [options…] {b}cat-config{e} mkosi [options…] {b}build{e} [-- command line…] mkosi [options…] {b}shell{e} [-- command line…] mkosi [options…] {b}boot{e} [-- nspawn settings…] mkosi [options…] {b}vm{e} [-- vmm parameters…] mkosi [options…] {b}ssh{e} [-- command line…] mkosi [options…] {b}journalctl{e} [-- command line…] mkosi [options…] {b}coredumpctl{e} [-- command line…] mkosi [options…] {b}sysupdate{e} [-- command line…] mkosi [options…] {b}sandbox{e} [-- command line…] mkosi [options…] {b}dependencies{e} [-- options…] mkosi [options…] {b}clean{e} mkosi [options…] {b}serve{e} mkosi [options…] {b}burn{e} [device] mkosi [options…] {b}bump{e} mkosi [options…] {b}genkey{e} mkosi [options…] {b}documentation{e} [manual] mkosi [options…] {b}completion{e} [shell] mkosi [options…] {b}help{e} mkosi -h | --help mkosi --version """).format(b=ANSI_BOLD, e=ANSI_RESET), add_help=False, allow_abbrev=False, argument_default=argparse.SUPPRESS, formatter_class=CustomHelpFormatter, ) parser.add_argument( "--version", action="version", version="%(prog)s " + __version__, help=argparse.SUPPRESS, ) parser.add_argument( "-f", "--force", action="count", dest="force", default=0, help="Remove existing image file before operation", ) parser.add_argument( "-C", "--directory", type=parse_chdir if chdir else str, default=Path.cwd(), help="Change to specified directory before doing anything", metavar="PATH", ) parser.add_argument( "--debug", help="Turn on debugging output", action="store_true", default=False, ) parser.add_argument( "--debug-shell", help="Spawn an interactive shell in the image if a chroot command fails", action="store_true", default=False, ) parser.add_argument( "--debug-workspace", help="When an error occurs, the workspace directory will not be deleted", action="store_true", default=False, ) parser.add_argument( "--debug-sandbox", help="Run mkosi-sandbox with strace", action="store_true", default=False, ) parser.add_argument( "--no-pager", action="store_false", dest="pager", default=True, help="Enable paging for long output", ) parser.add_argument( "--genkey-valid-days", metavar="DAYS", help="Number of days keys should be valid when generating keys", default="730", ) parser.add_argument( "--genkey-common-name", metavar="CN", help="Template for the CN when generating keys", default=f"mkosi of {getpass.getuser()}", ) parser.add_argument( "-B", "--auto-bump", help="Automatically bump image version after building", action="store_true", default=False, ) parser.add_argument( "--doc-format", help="The format to show documentation in", default=DocFormat.auto, type=DocFormat, choices=list(DocFormat), ) parser.add_argument( "--json", help="Show summary as JSON", action="store_true", default=False, ) parser.add_argument( "-w", "--wipe-build-dir", help="Remove the build directory before building the image", action="store_true", default=False, ) parser.add_argument( "-R", "--rerun-build-scripts", help="Run build scripts even if the image is not rebuilt", action="store_true", default=False, ) # These can be removed once mkosi v15 is available in LTS distros and compatibility with <= v14 # is no longer needed in build infrastructure (e.g.: OBS). parser.add_argument( "--nspawn-keep-unit", nargs=0, action=IgnoreAction, ) for arg in ("--default", "--cache", "--runtime-scratch"): parser.add_argument(arg, action=IgnoreAction) parser.add_argument( "verb", type=Verb, nargs="?", choices=list(Verb), default=Verb.build, help=argparse.SUPPRESS, ) parser.add_argument( "cmdline", nargs="*", help=argparse.SUPPRESS, default=[], ) parser.add_argument( "-h", "--help", action=PagerHelpAction, help=argparse.SUPPRESS, ) last_section: Optional[str] = None for s in SETTINGS: if s.section != last_section: group = parser.add_argument_group(f"{s.section} configuration options") last_section = s.section if s.short and s.const is not None: group.add_argument( # type: ignore s.short, metavar="", dest=s.dest, const=s.const, help="", action="store_const", default=argparse.SUPPRESS, ) for long in [s.long, *s.compat_longs]: opts = [s.short, long] if s.short and long == s.long and s.const is None else [long] group.add_argument( # type: ignore *opts, dest=s.dest, choices=s.choices, metavar=s.metavar, help=s.help if long == s.long else argparse.SUPPRESS, action=ConfigAction, # TODO: Remove once https://github.com/openSUSE/obs-build/pull/1059 is deployed in OBS. nargs="?" if s.dest == "checksum" else None, const="yes" if s.dest == "checksum" else None, ) return parser def resolve_deps(images: Sequence[Config], include: Sequence[str]) -> list[Config]: graph = {config.image: config.dependencies for config in images} if any((missing := i) not in graph for i in include): die(f"No image found with name {missing}") deps = set() queue = [*include] while queue: if (image := queue.pop(0)) not in deps: deps.add(image) queue.extend(graph[image]) images = [config for config in images if config.image in deps] graph = {config.image: config.dependencies for config in images} try: order = list(graphlib.TopologicalSorter(graph).static_order()) except graphlib.CycleError as e: die(f"Image dependency cycle detected: {' => '.join(e.args[1])}") return sorted(images, key=lambda i: order.index(i.image)) class ConfigAction(argparse.Action): def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None], option_string: Optional[str] = None, ) -> None: assert option_string is not None # For options that have the same dest, try to figure out the right # option by matching the option name s = SETTINGS_LOOKUP_BY_OPTION[self.option_strings[0]] if values is None or isinstance(values, str): values = [values] for v in values: assert isinstance(v, str) or v is None parsed_value = s.parse(v, getattr(namespace, self.dest, None)) if parsed_value is None: setattr(namespace, f"{s.dest}_was_none", True) setattr(namespace, s.dest, parsed_value) class ParseContext: def __init__(self, resources: Path = Path("/")) -> None: self.resources = resources # We keep two namespaces around, one for the settings specified on the CLI and one for # the settings specified in configuration files. This is required to implement both [Match] # support and the behavior where settings specified on the CLI always override settings # specified in configuration files. self.cli: dict[str, Any] = {} self.config: dict[str, Any] = {"files": []} self.defaults: dict[str, Any] = {} # Compare inodes instead of paths so we can't get tricked by bind mounts and such. self.includes: set[tuple[int, int]] = set() def setting_prohibited(self, setting: ConfigSetting[T]) -> bool: image = self.config["image"] return ( (not setting.tools and image == "tools") or (setting.scope.is_main_setting() and image != "main") or (setting.scope == SettingScope.universal and image not in ("main", "tools")) ) def expand_specifiers(self, text: str, path: Path) -> str: percent = False result: list[str] = [] for c in text: if percent: percent = False if c == "%": result += "%" elif setting := SETTINGS_LOOKUP_BY_SPECIFIER.get(c): if (v := self.finalize_value(setting)) is None: logging.warning( f"{path.absolute()}: Setting {setting.name} specified by specifier '%{c}' " f"in {text} is not yet set, ignoring" ) continue result += str(v) elif specifier := SPECIFIERS_LOOKUP_BY_CHAR.get(c): # Some specifier methods might want to access the image name or directory mkosi was # invoked in so let's make sure those are available. specifierns = { "image": self.config["image"], "directory": self.config["directory"], } for d in specifier.depends: setting = SETTINGS_LOOKUP_BY_DEST[d] if (v := self.finalize_value(setting)) is None: logging.warning( f"{path.absolute()}: Setting {setting.name} which specifier '%{c}' in " f"{text} depends on is not yet set, ignoring" ) break specifierns[d] = v else: result += specifier.callback(specifierns, path) else: logging.warning(f"{path.absolute()}: Unknown specifier '%{c}' found in {text}, ignoring") elif c == "%": percent = True else: result += c if percent: result += "%" return "".join(result) def parse_new_includes(self) -> None: # Parse any includes that were added after yielding. for p in self.cli.get("include", []) + self.config.get("include", []): for c in BUILTIN_CONFIGS: if p == Path(c): path = self.resources / c break else: path = p st = path.stat() if (st.st_dev, st.st_ino) in self.includes: continue self.includes.add((st.st_dev, st.st_ino)) if any(p == Path(c) for c in BUILTIN_CONFIGS): context = ParseContext(self.resources) context.config["image"] = "main" context.config["directory"] = path with chdir(path): context.parse_config_one(path) config = Config.from_dict(context.finalize()) make_executable( *config.configure_scripts, *config.clean_scripts, *config.sync_scripts, *config.prepare_scripts, *config.build_scripts, *config.postinst_scripts, *config.finalize_scripts, *config.postoutput_scripts, ) with chdir(path if path.is_dir() else Path.cwd()): self.parse_config_one(path if path.is_file() else Path.cwd(), parse_profiles=path.is_dir()) def finalize_value(self, setting: ConfigSetting[T]) -> Optional[T]: # If a value was specified on the CLI, it always takes priority. If the setting is a collection of # values, we merge the value from the CLI with the value from the configuration, making sure that the # value from the CLI always takes priority. if (v := cast(Optional[T], self.cli.get(setting.dest))) is not None: cfg_value = self.config.get(setting.dest) # We either have no corresponding value in the config files # or the values was assigned the empty string on the CLI # and should thus be treated as a reset and override of the value from the config file. if cfg_value is None or self.cli.get(f"{setting.dest}_was_none", False): return v # The instance asserts are pushed down to help mypy/pylance narrow the types. # Mypy still cannot properly infer that the merged collections conform to T # so we ignore the return-value error for it. if isinstance(v, list): assert isinstance(cfg_value, type(v)) return cfg_value + v # type: ignore[return-value] elif isinstance(v, dict): assert isinstance(cfg_value, type(v)) return cfg_value | v # type: ignore[return-value] elif isinstance(v, set): assert isinstance(cfg_value, type(v)) return cfg_value | v # type: ignore[return-value] else: return v # If the setting was assigned the empty string on the CLI, we don't use any value configured in the # configuration file. Additionally, if the setting is a collection of values, we won't use any # default value either if the setting is set to the empty string on the command line. if ( setting.dest not in self.cli and setting.dest in self.config and (v := cast(Optional[T], self.config[setting.dest])) is not None ): return v # If the type is a collection or optional and the setting was set explicitly, don't use the default # value. field = Config.fields().get(setting.scope.removeprefix(setting.dest)) origin = typing.get_origin(field.type) if field else None args = typing.get_args(field.type) if field else [] if ( (setting.dest in self.cli or setting.dest in self.config) and field and (origin in (dict, list, str) or (origin is typing.Union and type(None) in args)) ): default = setting.parse(None, None) elif setting.dest in self.defaults: default = self.defaults[setting.dest] elif setting.default_factory: # Some default factory methods want to access the image name or directory mkosi was invoked in so # let's make sure those are available. factoryns = { "image": self.config["image"], "directory": self.config["directory"], } # To determine default values, we need the final values of various settings in a namespace # object, but we don't want to copy the final values into the config namespace object just yet so # we create a new namespace object instead. factoryns |= { d: self.finalize_value(SETTINGS_LOOKUP_BY_DEST[d]) for d in setting.default_factory_depends } default = setting.default_factory(factoryns) elif setting.default is not None: default = setting.default else: default = setting.parse(None, None) self.defaults[setting.dest] = default return default def match_config(self, path: Path, asserts: bool = False) -> bool: condition_triggered: Optional[bool] = None match_triggered: Optional[bool] = None skip = False # If the config file does not exist, we assume it matches so that we look at the other files in the # directory as well (mkosi.conf.d/ and extra files). if not path.exists(): return True sections = ("Assert", "TriggerAssert") if asserts else ("Match", "TriggerMatch") for section, k, v in parse_ini(path, only_sections=sections): if not k and not v: if condition_triggered is False: if section == "Assert": die(f"{path.absolute()}: Trigger condition in [Assert] section was not satisfied") elif section == "Match": return False if section in ("TriggerAssert", "TriggerMatch"): match_triggered = bool(match_triggered) or condition_triggered is not False condition_triggered = None skip = False continue if skip: continue raw = v trigger = v.startswith("|") v = v.removeprefix("|") negate = v.startswith("!") v = v.removeprefix("!") v = self.expand_specifiers(v, path) if s := SETTINGS_LOOKUP_BY_NAME.get(k): if not s.match: die(f"{path.absolute()}: {k} cannot be used in [{section}]") if s.scope == SettingScope.main and self.config["image"] != "main": die(f"{path.absolute()}: {k} cannot be matched on outside of the main image") if k != s.name: logging.warning( f"{path.absolute()}: Setting {k} is deprecated, please use {s.name} instead." ) # If we encounter a setting that has not been explicitly configured yet, we assign the # default value first so that we can match on default values for settings. if (value := self.finalize_value(s)) is None: result = False else: result = s.match(v, value) elif m := MATCH_LOOKUP.get(k): result = m.match(self.config["image"], v) else: die(f"{path.absolute()}: {k} cannot be used in [{section}]") if negate: result = not result if not trigger and not result: if section.startswith("Trigger"): skip = True condition_triggered = False continue if asserts: die(f"{path.absolute()}: {k}={raw} in [Assert] section was not satisfied") return False if trigger: condition_triggered = bool(condition_triggered) or result if match_triggered is False and asserts: die(f"{path.absolute()}: None of the [TriggerAssert] sections was satisfied") return match_triggered is not False def parse_config_one(self, path: Path, parse_profiles: bool = False, parse_local: bool = False) -> bool: s: Optional[ConfigSetting[object]] # Hint to mypy that we might assign None assert path.is_absolute() extras = path.is_dir() if extras: path /= "mkosi.conf" if not self.match_config(path): return False self.match_config(path, asserts=True) if extras: if parse_local: for localpath in ( *([p] if (p := path.parent / "mkosi.local").is_dir() else []), *([p] if (p := path.parent / "mkosi.local.conf").is_file() else []), ): with chdir(localpath if localpath.is_dir() else Path.cwd()): self.parse_config_one(localpath if localpath.is_file() else Path.cwd()) # Local configuration should override other file based # configuration but not the CLI itself so move the finalized # values to the CLI namespace. for s in SETTINGS: if s.dest in self.config: self.cli[s.dest] = self.finalize_value(s) del self.config[s.dest] for s in SETTINGS: image = self.config["image"] if self.setting_prohibited(s): continue for f in s.path_suffixes: f = f"mkosi.{f}" extra = parse_path( f, secret=s.path_secret, required=False, resolve=False, expanduser=False, expandvars=False, ) if extra.exists(): self.config[s.dest] = s.parse( file_run_or_read(extra).rstrip("\n") if s.path_read_text else f, self.config.get(s.dest), ) for f in s.recursive_path_suffixes: f = f"mkosi.{f}" recursive_extras = parse_paths_from_directory( f, secret=s.path_secret, required=False, resolve=False, expanduser=False, expandvars=False, ) for e in recursive_extras: if e.exists(): self.config[s.dest] = s.parse(os.fspath(e), self.config.get(s.dest)) if path.exists(): logging.debug(f"Loading configuration file {path}") files = self.config["files"] files += [path] for section, k, v in parse_ini( path, only_sections={s.section for s in SETTINGS} | {"Host"}, ): if not k and not v: continue name = k.removeprefix("@") if name != k: logging.warning( f"{path.absolute()}: The '@' specifier is deprecated, please use {name} instead of " f"{k}" ) if not (s := SETTINGS_LOOKUP_BY_NAME.get(name)): logging.warning(f"{path.absolute()}: Unknown setting {name}") continue image = self.config["image"] if self.setting_prohibited(s): die(f"{path.absolute()}: Setting {name} cannot be configured in {image} image") if section != s.section: logging.warning( f"{path.absolute()}: Setting {name} should be configured in [{s.section}], not " f"[{section}]." ) if name != s.name: logging.warning( f"{path.absolute()}: Setting {name} is deprecated, please use {s.name} instead." ) v = self.expand_specifiers(v, path) self.config[s.dest] = s.parse(v, self.config.get(s.dest)) self.parse_new_includes() if extras and (path.parent / "mkosi.conf.d").exists(): for p in sorted((path.parent / "mkosi.conf.d").iterdir()): p = p.absolute() if p.is_dir() or p.suffix == ".conf": with chdir(p if p.is_dir() else Path.cwd()): self.parse_config_one(p if p.is_file() else Path.cwd()) if parse_profiles: for profile in self.finalize_value(SETTINGS_LOOKUP_BY_DEST["profiles"]) or []: for p in (Path(profile), Path(f"{profile}.conf")): p = Path.cwd() / "mkosi.profiles" / p if p.exists(): with chdir(p if p.is_dir() else Path.cwd()): self.parse_config_one(p if p.is_file() else Path.cwd()) return True def finalize(self) -> dict[str, Any]: ns = copy.deepcopy(self.config) # After we've finished parsing the configuration, we'll have values in both namespaces (context.cli, # context.config). To be able to parse the values from a single namespace, we merge the final values # of each setting into one namespace. for s in SETTINGS: ns[s.dest] = copy.deepcopy(self.finalize_value(s)) return ns def want_new_history(args: Args) -> bool: if args.directory is None: return False if not args.verb.needs_build(): return False if args.rerun_build_scripts: return False if args.verb != Verb.build and args.force == 0: return False return True def have_history(args: Args) -> bool: if want_new_history(args): return False if args.directory is None: return False if args.verb in (Verb.clean, Verb.box, Verb.sandbox, Verb.latest_snapshot): return False if args.verb == Verb.summary and args.force > 0: return False if args.verb.needs_tools() and args.force > 0: return False if args.verb.needs_build() and args.force > 0: return False if args.verb == Verb.build and not args.rerun_build_scripts: return False return Path(".mkosi-private/history/latest.json").exists() def finalize_default_tools( main: ParseContext, finalized: dict[str, Any], *, configdir: Optional[Path], resources: Path, ) -> Config: context = ParseContext(resources) for s in SETTINGS: if s.scope == SettingScope.multiversal: context.cli[s.dest] = copy.deepcopy(finalized[s.dest]) elif s.scope == SettingScope.tools: # If the setting was specified on the CLI for the main config, we treat it as specified on the # CLI for the tools tree as well. Idem for config and defaults. dest = s.scope.removeprefix(s.dest) if s.dest in main.cli: ns = context.cli if f"{s.dest}_was_none" in main.cli: ns[f"{dest}_was_none"] = main.cli[f"{s.dest}_was_none"] elif s.dest in main.config: ns = context.config else: ns = context.defaults ns[dest] = copy.deepcopy(finalized[s.dest]) context.cli["output_format"] = OutputFormat.directory context.config |= { "image": "tools", "directory": finalized["directory"], "files": [], } context.config["environment"] = { name: finalized["environment"][name] for name in finalized.get("environment", {}).keys() & finalized.get("pass_environment", []) } if configdir and (p := configdir / "mkosi.tools.conf").exists(): with chdir(p if p.is_dir() else Path.cwd()): context.parse_config_one(p, parse_profiles=p.is_dir(), parse_local=p.is_dir()) with chdir(resources / "mkosi-tools"): context.parse_config_one(resources / "mkosi-tools", parse_profiles=True) return Config.from_dict(context.finalize()) def finalize_default_initrd( main: ParseContext, finalized: dict[str, Any], *, resources: Path, ) -> Config: context = ParseContext(resources) for s in SETTINGS: if s.scope in (SettingScope.universal, SettingScope.multiversal): context.cli[s.dest] = copy.deepcopy(finalized[s.dest]) elif s.scope in (SettingScope.inherit, SettingScope.initrd_inherit) and s.dest in finalized: context.config[s.dest] = copy.deepcopy(finalized[s.dest]) elif s.scope == SettingScope.initrd: # If the setting was specified on the CLI for the main config, we treat it as specified on the # CLI for the default initrd as well. Idem for config and defaults. dest = s.scope.removeprefix(s.dest) if s.dest in main.cli: ns = context.cli if f"{s.dest}_was_none" in main.cli: ns[f"{dest}_was_none"] = main.cli[f"{s.dest}_was_none"] elif s.dest in main.config: ns = context.config else: ns = context.defaults ns[dest] = copy.deepcopy(finalized[s.dest]) context.config |= { "image": "default-initrd", "directory": finalized["directory"], "files": [], } context.config["environment"] = { name: finalized["environment"][name] for name in finalized.get("environment", {}).keys() & finalized.get("pass_environment", []) } with chdir(resources / "mkosi-initrd"): context.parse_config_one(resources / "mkosi-initrd", parse_profiles=True) return Config.from_dict(context.finalize()) def finalize_configdir(directory: Optional[Path]) -> Optional[Path]: """Allow locating all mkosi configuration in a mkosi/ subdirectory instead of in the top-level directory of a git repository. """ if directory is None: return None if not ((directory / "mkosi.conf").exists() or (directory / "mkosi.tools.conf").exists()) and ( (directory / "mkosi/mkosi.conf").is_file() or (directory / "mkosi/mkosi.tools.conf").exists() ): return directory / "mkosi" return directory def bump_image_version(configdir: Path) -> str: version_file = configdir / "mkosi.version" if os.access(version_file, os.X_OK): die(f"Cannot bump image version, '{version_file}' is executable") if version_file.exists(): version = version_file.read_text().strip() else: version = None if (bump := configdir / "mkosi.bump").exists(): with complete_step(f"Running bump script {bump}"): new_version = run([bump], stdout=subprocess.PIPE).stdout.strip() elif version is not None: v = version.split(".") try: v[-1] = str(int(v[-1]) + 1) except ValueError: v += ["2"] logging.warning("Last component of current version is not a decimal integer, appending '.2'") new_version = ".".join(v) else: new_version = "1" logging.info(f"Bumping version: '{none_to_na(version)}' → '{new_version}'") return new_version def want_kernel(config: Config) -> bool: if config.output_format in (OutputFormat.uki, OutputFormat.esp): return False if config.bootable == ConfigFeature.disabled: return False if config.bootable == ConfigFeature.auto and ( config.output_format == OutputFormat.cpio or config.output_format.is_extension_or_portable_image() or config.overlay ): return False return True def want_default_initrd(config: Config) -> bool: if not want_kernel(config): return False if config.initrds: return False if config.bootable == ConfigFeature.auto and not any( config.distribution.installer.is_kernel_package(p) for p in itertools.chain(config.packages, config.volatile_packages) ): return False return True def parse_config( argv: Sequence[str] = (), *, resources: Path = Path("/"), ) -> tuple[Args, Optional[Config], tuple[Config, ...]]: argv = list(argv) context = ParseContext(resources) # The "image" field does not directly map to a setting but is required to determine some default values # for settings, so let's set it on the config namespace immediately so it's available. context.config["image"] = "main" # First, we parse the command line arguments into a separate namespace. argparser = create_argument_parser() ns = argparse.Namespace() ns.__dict__ = context.cli argparser.parse_args(argv, ns) args = Args.from_namespace(context.cli) if args.debug: ARG_DEBUG.set(args.debug) if args.debug_shell: ARG_DEBUG_SHELL.set(args.debug_shell) if args.debug_sandbox: ARG_DEBUG_SANDBOX.set(args.debug_sandbox) if args.rerun_build_scripts and not args.verb.needs_build(): die(f"--rerun-build-scripts cannot be used with the '{args.verb}' command") if args.rerun_build_scripts and args.force: die("--force cannot be used together with --rerun-build-scripts") if args.cmdline and not args.verb.supports_cmdline(): die(f"Arguments after verb are not supported for the '{args.verb}' command") # If --debug was passed, apply it as soon as possible. if ARG_DEBUG.get(): logging.getLogger().setLevel(logging.DEBUG) # Do the same for help. if args.verb == Verb.help: page(argparser.format_help(), context.cli["pager"]) sys.exit(0) if not args.verb.needs_config(): return args, None, () if have_history(args): history = Config.from_partial_json(Path(".mkosi-private/history/latest.json").read_text()) # If we're operating on a previously built image (vm, boot, shell, ...), we're not rebuilding the # image and the configuration of the latest build is available, we load the config that was used to # build the previous image from there instead of parsing configuration files, except for the Host # section settings which we allow changing without requiring a rebuild of the image. for s in SETTINGS: if s.section in ("Include", "Runtime"): history.pop(s.dest, None) continue if s.dest in context.cli and s.dest in history and context.cli[s.dest] != history[s.dest]: logging.warning( f"Ignoring {s.long} from the CLI. Run with -f to rebuild the image with this setting" ) context.cli |= history cli = copy.deepcopy(context.cli) # One of the specifiers needs access to the directory, so make sure it is available. context.config["directory"] = args.directory context.parse_new_includes() context.config["files"] = [] configdir = finalize_configdir(args.directory) if ( ((args.auto_bump and args.verb.needs_build()) or args.verb == Verb.bump) and context.cli.get("image_version") is None and configdir is not None ): context.cli["image_version"] = bump_image_version(configdir) # Parse the global configuration unless the user explicitly asked us not to. if configdir is not None: with chdir(configdir): context.parse_config_one(configdir, parse_profiles=True, parse_local=True) config = context.finalize() if config["history"] and want_new_history(args): Path(".mkosi-private/history").mkdir(parents=True, exist_ok=True) Path(".mkosi-private/history/latest.json").write_text(dump_json(Config.to_partial_dict(cli))) tools = None if config.get("tools_tree") in (Path("default"), Path("yes")): if in_box(): config["tools_tree"] = Path(os.environ["MKOSI_DEFAULT_TOOLS_TREE_PATH"]) else: tools = finalize_default_tools(context, config, configdir=configdir, resources=resources) config["tools_tree"] = tools.output_dir_or_cwd() / tools.output elif config.get("tools_tree") == Path("no"): config["tools_tree"] = None images = [] # If Dependencies= was not explicitly specified on the CLI or in the configuration, # we want to default to all subimages. However, if a subimage has a [Match] section # and does not successfully match, we don't want to add it to the default dependencies. # To make this work, we can't use default_factory as it is evaluated too early, so # we check here to see if dependencies were explicitly provided and if not we gather # the list of default dependencies while we parse the subimages. dependencies: Optional[list[str]] = ( None if "dependencies" in context.cli or "dependencies" in context.config else [] ) # For the subimages in mkosi.images/, we want settings that are marked as # "universal" to override whatever settings are specified in the subimage # configuration files. We achieve this by making it appear like these settings # were specified on the CLI by copying them to the CLI namespace. Any settings # that are not marked as "universal" are deleted from the CLI namespace. for s in SETTINGS: if s.scope in (SettingScope.universal, SettingScope.multiversal): context.cli[s.dest] = copy.deepcopy(config[s.dest]) elif s.dest in context.cli: del context.cli[s.dest] todo = [] if configdir is not None and (imagedir := configdir / "mkosi.images").exists(): todo += sorted(imagedir.iterdir()) for p in todo: p = p.absolute() if not p.is_dir() and not p.suffix == ".conf": continue name = p.name.removesuffix(".conf") if not name: die(f"{p} is not a valid image name") context.config = { "image": name, "directory": args.directory, "files": [], } # Settings that are marked as "inherit" are passed down to subimages but can # be overridden, so we copy these to the config namespace so that they'll be # overridden if the setting is explicitly configured by the subimage. for s in SETTINGS: if s.scope == SettingScope.inherit and s.dest in config: context.config[s.dest] = copy.deepcopy(config[s.dest]) context.config["environment"] = { name: config["environment"][name] for name in config.get("pass_environment", {}) if name in config.get("environment", {}) } # Allow subimage configuration to include everything again. context.includes = set() context.defaults = {} with chdir(p if p.is_dir() else Path.cwd()): if not context.parse_config_one( p if p.is_file() else Path.cwd(), parse_profiles=p.is_dir(), parse_local=True, ): continue images += [context.finalize()] if dependencies is not None: dependencies += [name] if dependencies is not None: config["dependencies"] = dependencies main = Config.from_dict(config) subimages = [Config.from_dict(ns) for ns in images] if any(want_default_initrd(image) for image in subimages + [main]): initrd = finalize_default_initrd(context, config, resources=resources) if want_default_initrd(main): main = dataclasses.replace( main, initrds=[*main.initrds, initrd.output_dir_or_cwd() / initrd.output], dependencies=main.dependencies + [initrd.image], ) subimages = [ ( dataclasses.replace( image, initrds=[*image.initrds, initrd.output_dir_or_cwd() / initrd.output], dependencies=image.dependencies + [initrd.image], ) if want_default_initrd(image) else image ) for image in subimages ] subimages += [initrd] subimages = resolve_deps(subimages, main.dependencies) return args, tools, tuple(subimages + [main]) def finalize_term() -> str: term = os.getenv("TERM", "unknown") if term == "unknown": term = "vt220" if sys.stderr.isatty() else "dumb" return term if sys.stderr.isatty() else "dumb" def finalize_git_config(proxy_url: Optional[str], env: dict[str, str]) -> dict[str, str]: if proxy_url is None: return {} try: cnt = int(env.get("GIT_CONFIG_COUNT", "0")) except ValueError: raise ValueError("GIT_CONFIG_COUNT environment variable must be set to a valid integer") # Override HTTP/HTTPS proxy in case its set in .gitconfig to a different value than proxy_url. # No need to override http.proxy / https.proxy if set in a previous GIT_CONFIG_* variable since # the last setting always wins. return { "GIT_CONFIG_COUNT": str(cnt + 2), f"GIT_CONFIG_KEY_{cnt}": "http.proxy", f"GIT_CONFIG_VALUE_{cnt}": proxy_url, f"GIT_CONFIG_KEY_{cnt + 1}": "https.proxy", f"GIT_CONFIG_VALUE_{cnt + 1}": proxy_url, } def yes_no(b: bool) -> str: return "yes" if b else "no" def none_to_na(s: Optional[object]) -> str: return "n/a" if s is None else str(s) def none_to_random(s: Optional[object]) -> str: return "random" if s is None else str(s) def none_to_none(s: Optional[object]) -> str: return "none" if s is None else str(s) def none_to_default(s: Optional[object]) -> str: return "default" if s is None else str(s) def line_join_list(array: Iterable[object]) -> str: return "\n ".join(str(item) for item in array) if array else "none" def format_bytes(num_bytes: int) -> str: if num_bytes >= 1024**3: return f"{num_bytes / 1024**3:0.1f}G" if num_bytes >= 1024**2: return f"{num_bytes / 1024**2:0.1f}M" if num_bytes >= 1024: return f"{num_bytes / 1024:0.1f}K" return f"{num_bytes}B" def format_bytes_or_none(num_bytes: Optional[int]) -> str: return format_bytes(num_bytes) if num_bytes is not None else "none" def format_octal(oct_value: int) -> str: return f"{oct_value:>04o}" def format_octal_or_default(oct_value: Optional[int]) -> str: return format_octal(oct_value) if oct_value is not None else "default" def bold(s: Any) -> str: return f"{ANSI_BOLD}{s}{ANSI_RESET}" def cat_config(images: Sequence[Config]) -> str: c = io.StringIO() for n, config in enumerate(images): if n > 0: print(file=c) print(bold(f"### IMAGE: {config.image}"), file=c) for path in config.files: # Display the paths as relative to ., if underneath. if path.is_relative_to(Path.cwd()): path = path.relative_to(Path.cwd()) print(f"{ANSI_BLUE}# {path}{ANSI_RESET}", file=c) print(path.read_text(), file=c) return c.getvalue() def summary(config: Config) -> str: maniformats = (" ".join(i.name for i in config.manifest_format)) or "(none)" env = [f"{k}={v}" for k, v in config.environment.items()] summary = f"""\ {bold(f"IMAGE: {config.image}")} """ if config.image in ("main", "tools"): summary += f"""\ {bold("CONFIG")}: Profiles: {line_join_list(config.profiles)} Dependencies: {line_join_list(config.dependencies)} Minimum Version: {none_to_none(config.minimum_version)} Configure Scripts: {line_join_list(config.configure_scripts)} Pass Environment: {line_join_list(config.pass_environment)} {bold("DISTRIBUTION")}: Distribution: {bold(config.distribution)} Release: {bold(none_to_na(config.release))} Architecture: {config.architecture} Mirror: {none_to_default(config.mirror)} Snapshot: {none_to_none(config.snapshot)} Local Mirror (build): {none_to_none(config.local_mirror)} Repo Signature/Key check: {yes_no(config.repository_key_check)} Fetch Repository Keys: {yes_no(config.repository_key_fetch)} Repositories: {line_join_list(config.repositories)} """ summary += f"""\ {bold("OUTPUT")}: Output Format: {config.output_format} Manifest Formats: {maniformats} Output: {bold(config.output_with_compression)} Compression: {config.compress_output} Compression Level: {config.compress_level} Output Directory: {config.output_dir_or_cwd()} Output Mode: {format_octal_or_default(config.output_mode)} Image ID: {config.image_id} Image Version: {config.image_version} Split Artifacts: {line_join_list(config.split_artifacts)} Repart Directories: {line_join_list(config.repart_dirs)} Sector Size: {none_to_default(config.sector_size)} Overlay: {yes_no(config.overlay)} Seed: {none_to_random(config.seed)} Clean Scripts: {line_join_list(config.clean_scripts)} {bold("CONTENT")}: Packages: {line_join_list(config.packages)} Build Packages: {line_join_list(config.build_packages)} Volatile Packages: {line_join_list(config.volatile_packages)} Package Directories: {line_join_list(config.package_directories)} Volatile Package Directories: {line_join_list(config.volatile_package_directories)} With Documentation: {yes_no(config.with_docs)} Base Trees: {line_join_list(config.base_trees)} Skeleton Trees: {line_join_list(config.skeleton_trees)} Extra Trees: {line_join_list(config.extra_trees)} Remove Packages: {line_join_list(config.remove_packages)} Remove Files: {line_join_list(config.remove_files)} Clean Package Manager Metadata: {config.clean_package_metadata} Source Date Epoch: {none_to_none(config.source_date_epoch)} Sync Scripts: {line_join_list(config.sync_scripts)} Prepare Scripts: {line_join_list(config.prepare_scripts)} Build Scripts: {line_join_list(config.build_scripts)} Postinstall Scripts: {line_join_list(config.postinst_scripts)} Finalize Scripts: {line_join_list(config.finalize_scripts)} Postoutput Scripts: {line_join_list(config.postoutput_scripts)} Bootable: {config.bootable} Bootloader: {config.bootloader} BIOS Bootloader: {config.bios_bootloader} Shim Bootloader: {config.shim_bootloader} Unified Kernel Images: {config.unified_kernel_images} Unified Kernel Image Format: {config.unified_kernel_image_format} Unified Kernel Image Profiles: {line_join_list(config.unified_kernel_image_profiles)} Initrds: {line_join_list(config.initrds)} Initrd Profiles: {line_join_list(config.initrd_profiles)} Initrd Packages: {line_join_list(config.initrd_packages)} Initrd Volatile Packages: {line_join_list(config.initrd_volatile_packages)} Devicetrees: {line_join_list(config.devicetrees)} Splash: {none_to_none(config.splash)} Kernel Command Line: {line_join_list(config.kernel_command_line)} Kernel Modules: {line_join_list(config.kernel_modules_include)} """ if config.kernel_modules_exclude: summary += f"""\ Kernel Modules Exclude: {line_join_list(config.kernel_modules_exclude)} """ summary += f"""\ Kernel Modules Include Host: {yes_no(config.kernel_modules_include_host)} Firmware Files: {line_join_list(config.firmware_include)} """ if config.firmware_exclude: summary += f"""\ Firmware Files Exclude: {line_join_list(config.firmware_exclude)} """ summary += f"""\ Kernel Modules Initrd: {yes_no(config.kernel_modules_initrd)} Kernel Initrd Modules: {line_join_list(config.kernel_modules_initrd_include)} """ if config.kernel_modules_initrd_exclude: summary += f"""\ Kernel Modules Initrd Exclude: {line_join_list(config.kernel_modules_initrd_exclude)} """ summary += f"""\ Kernel Modules Initrd Include Host: {yes_no(config.kernel_modules_initrd_include_host)} Locale: {none_to_default(config.locale)} Locale Messages: {none_to_default(config.locale_messages)} Keymap: {none_to_default(config.keymap)} Timezone: {none_to_default(config.timezone)} Hostname: {none_to_default(config.hostname)} Root Password: {("(set)" if config.root_password else "(default)")} Root Shell: {none_to_default(config.root_shell)} Machine ID: {none_to_none(config.machine_id)} Autologin: {yes_no(config.autologin)} Make Initrd: {yes_no(config.make_initrd)} SSH: {config.ssh} SELinux Relabel: {config.selinux_relabel} """ if config.output_format.is_extension_or_portable_image() or config.output_format in ( OutputFormat.disk, OutputFormat.uki, OutputFormat.esp, ): summary += f"""\ {bold("VALIDATION")}: UEFI SecureBoot: {yes_no(config.secure_boot)} UEFI SecureBoot AutoEnroll: {yes_no(config.secure_boot_auto_enroll)} SecureBoot Signing Key: {none_to_none(config.secure_boot_key)} SecureBoot Signing Key Source: {config.secure_boot_key_source} SecureBoot Certificate: {none_to_none(config.secure_boot_certificate)} SecureBoot Certificate Source: {config.secure_boot_certificate_source} SecureBoot Sign Tool: {config.secure_boot_sign_tool} Verity: {config.verity} Verity Signing Key: {none_to_none(config.verity_key)} Verity Signing Key Source: {config.verity_key_source} Verity Certificate: {none_to_none(config.verity_certificate)} Verity Certificate Source: {config.verity_certificate_source} Sign Expected PCRs: {config.sign_expected_pcr} Expected PCRs Signing Key: {none_to_none(config.sign_expected_pcr_key)} Expected PCRs Key Source: {config.sign_expected_pcr_key_source} Expected PCRs Certificate: {none_to_none(config.sign_expected_pcr_certificate)} Expected PCRs Certificate Source: {config.sign_expected_pcr_certificate_source} Passphrase: {none_to_none(config.passphrase)} Checksum: {yes_no(config.checksum)} Sign: {yes_no(config.sign)} OpenPGP Tool: {config.openpgp_tool} GPG Key: ({"default" if config.key is None else config.key}) """ if config.image == "main": summary += f"""\ {bold("BUILD CONFIGURATION")}: Tools Tree: {config.tools_tree} Tools Tree Certificates: {yes_no(config.tools_tree_certificates)} Extra Search Paths: {line_join_list(config.extra_search_paths)} Incremental: {config.incremental} Use Only Package Cache: {config.cacheonly} Sandbox Trees: {line_join_list(config.sandbox_trees)} Workspace Directory: {config.workspace_dir_or_default()} Cache Directory: {none_to_none(config.cache_dir)} Cache Key: {config.cache_key} Package Cache Directory: {none_to_default(config.package_cache_dir)} Build Directory: {none_to_none(config.build_dir)} Build Key: {config.build_key} Use Subvolumes: {config.use_subvolumes} Repart Offline: {yes_no(config.repart_offline)} Save History: {yes_no(config.history)} Build Sources: {line_join_list(config.build_sources)} Build Sources Ephemeral: {config.build_sources_ephemeral} Script Environment: {line_join_list(env)} Environment Files: {line_join_list(config.environment_files)} Run Tests in Build Scripts: {yes_no(config.with_tests)} Scripts With Network: {yes_no(config.with_network)} Proxy URL: {none_to_none(config.proxy_url)} Proxy Peer Certificate: {none_to_none(config.proxy_peer_certificate)} Proxy Client Certificate: {none_to_none(config.proxy_client_certificate)} Proxy Client Key: {none_to_none(config.proxy_client_key)} {bold("HOST CONFIGURATION")}: NSpawn Settings: {none_to_none(config.nspawn_settings)} Ephemeral: {config.ephemeral} Credentials: {line_join_list(config.credentials.keys())} Extra Kernel Command Line: {line_join_list(config.kernel_command_line_extra)} Runtime Trees: {line_join_list(config.runtime_trees)} Runtime Size: {format_bytes_or_none(config.runtime_size)} Runtime Network: {config.runtime_network} Runtime Build Sources: {config.runtime_build_sources} Bind User: {yes_no(config.bind_user)} Unit Properties: {line_join_list(config.unit_properties)} SSH Signing Key: {none_to_none(config.ssh_key)} SSH Certificate: {none_to_none(config.ssh_certificate)} Machine: {config.machine_or_name()} Forward Journal: {none_to_none(config.forward_journal)} Register guest with machined: {config.register} Storage Target Mode: {config.storage_target_mode} Virtual Machine Monitor: {config.vmm} Console: {config.console} CPU Cores: {config.cpus} RAM: {format_bytes(config.ram)} MaxMem: {format_bytes_or_none(config.maxmem)} KVM: {config.kvm} CXL: {config.cxl} VSock: {config.vsock} VSock Connection ID: {VsockCID.format(config.vsock_cid)} TPM: {config.tpm} Firmware: {config.firmware} Firmware Variables: {none_to_none(config.firmware_variables)} Linux: {none_to_none(config.linux)} QEMU Extra Arguments: {line_join_list(config.qemu_args)} """ return summary class JsonEncoder(json.JSONEncoder): def default(self, o: Any) -> Any: if isinstance(o, StrEnum): return str(o) elif isinstance(o, GenericVersion): return str(o) elif isinstance(o, os.PathLike): return os.fspath(o) elif isinstance(o, uuid.UUID): return str(o) elif isinstance(o, (Args, Config)): return o.to_dict() elif dataclasses.is_dataclass(o) and not isinstance(o, type): return dataclasses.asdict(o) return super().default(o) def dump_json(dict: dict[str, Any], indent: Optional[int] = 4) -> str: return json.dumps(dict, cls=JsonEncoder, indent=indent, sort_keys=True) E = TypeVar("E", bound=StrEnum) def json_type_transformer(refcls: Union[type[Args], type[Config]]) -> Callable[[str, Any], Any]: fields_by_name = {field.name: field for field in dataclasses.fields(refcls)} def path_transformer(path: str, fieldtype: type[Path]) -> Path: return Path(path) def optional_path_transformer(path: Optional[str], fieldtype: type[Optional[Path]]) -> Optional[Path]: return Path(path) if path is not None else None def path_list_transformer(pathlist: list[str], fieldtype: type[list[Path]]) -> list[Path]: return [Path(p) for p in pathlist] def uuid_transformer(uuidstr: str, fieldtype: type[uuid.UUID]) -> uuid.UUID: return uuid.UUID(uuidstr) def optional_uuid_transformer( uuidstr: Optional[str], fieldtype: type[Optional[uuid.UUID]] ) -> Optional[uuid.UUID]: return uuid.UUID(uuidstr) if uuidstr is not None else None def root_password_transformer( rootpw: Optional[list[Union[str, bool]]], fieldtype: type[Optional[tuple[str, bool]]] ) -> Optional[tuple[str, bool]]: if rootpw is None: return None return (cast(str, rootpw[0]), cast(bool, rootpw[1])) def config_tree_transformer( trees: list[dict[str, Any]], fieldtype: type[ConfigTree] ) -> list[ConfigTree]: # TODO: exchange for TypeGuard and list comprehension once on 3.10 ret = [] for d in trees: assert "Source" in d assert "Target" in d ret.append( ConfigTree( source=Path(d["Source"]), target=Path(d["Target"]) if d["Target"] is not None else None, ) ) return ret def enum_transformer(enumval: str, fieldtype: type[E]) -> E: return fieldtype(enumval) def optional_enum_transformer(enumval: Optional[str], fieldtype: type[Optional[E]]) -> Optional[E]: return typing.get_args(fieldtype)[0](enumval) if enumval is not None else None def enum_list_transformer(enumlist: list[str], fieldtype: type[list[E]]) -> list[E]: enumtype = fieldtype.__args__[0] # type: ignore return [enumtype(e) for e in enumlist] def config_drive_transformer(drives: list[dict[str, Any]], fieldtype: type[Drive]) -> list[Drive]: # TODO: exchange for TypeGuard and list comprehension once on 3.10 ret = [] for d in drives: assert "Id" in d assert "Size" in d ret.append( Drive( id=d["Id"], size=d["Size"] if isinstance(d["Size"], int) else parse_bytes(d["Size"]), directory=Path(d["Directory"]) if d.get("Directory") else None, options=d.get("Options"), file_id=d.get("FileId", d["Id"]), flags=[DriveFlag(f) for f in d.get("Flags", [])], ) ) return ret def generic_version_transformer( version: Optional[str], fieldtype: type[Optional[GenericVersion]], ) -> Optional[GenericVersion]: return GenericVersion(version) if version is not None else None def certificate_source_transformer( certificate_source: dict[str, Any], fieldtype: type[CertificateSource] ) -> CertificateSource: assert "Type" in certificate_source return CertificateSource( type=CertificateSourceType(certificate_source["Type"]), source=certificate_source.get("Source", ""), ) def key_source_transformer(keysource: dict[str, Any], fieldtype: type[KeySource]) -> KeySource: assert "Type" in keysource return KeySource(type=KeySourceType(keysource["Type"]), source=keysource.get("Source", "")) def uki_profile_transformer( profiles: list[dict[str, Any]], fieldtype: type[UKIProfile], ) -> list[UKIProfile]: return [ UKIProfile( profile=profile["Profile"], cmdline=profile["Cmdline"], sign_expected_pcr=profile["SignExpectedPcr"], ) for profile in profiles ] # The type of this should be # dict[ # type, # Callable[a stringy JSON object (str, null, list or dict of str), type of the key], type of the key # ] # though this seems impossible to express, since e.g. mypy will make this a # builtins.dict[builtins.object, builtins.function] # whereas pyright gives the type of the dict keys as the proper union of # all functions in the dict. We therefore squash all the types here to Any # to shut up the type checkers and rely on the tests. transformers: dict[Any, Callable[[Any, Any], Any]] = { Path: path_transformer, Optional[Path]: optional_path_transformer, list[Path]: path_list_transformer, uuid.UUID: uuid_transformer, Optional[uuid.UUID]: optional_uuid_transformer, Optional[tuple[str, bool]]: root_password_transformer, list[ConfigTree]: config_tree_transformer, Architecture: enum_transformer, BiosBootloader: enum_transformer, ShimBootloader: enum_transformer, Ssh: enum_transformer, Bootloader: enum_transformer, Compression: enum_transformer, ConfigFeature: enum_transformer, Distribution: enum_transformer, OutputFormat: enum_transformer, Firmware: enum_transformer, SecureBootSignTool: enum_transformer, Incremental: enum_transformer, BuildSourcesEphemeral: enum_transformer, Optional[Distribution]: optional_enum_transformer, list[ManifestFormat]: enum_list_transformer, Verb: enum_transformer, DocFormat: enum_transformer, list[Drive]: config_drive_transformer, GenericVersion: generic_version_transformer, Cacheonly: enum_transformer, Network: enum_transformer, KeySource: key_source_transformer, Vmm: enum_transformer, list[UKIProfile]: uki_profile_transformer, UnifiedKernelImage: enum_transformer, list[ArtifactOutput]: enum_list_transformer, CertificateSource: certificate_source_transformer, ConsoleMode: enum_transformer, Verity: enum_transformer, } def json_transformer(key: str, val: Any) -> Any: fieldtype: Optional[dataclasses.Field[Any]] = fields_by_name.get(key) # It is unlikely that the type of a field will be None only, so let's not bother with a different # sentinel value if fieldtype is None: raise ValueError(f"{refcls} has no field {key}") transformer = transformers.get(fieldtype.type) if transformer is not None: try: return transformer(val, fieldtype.type) except (ValueError, IndexError, AssertionError) as e: raise ValueError( f"Unable to parse {val!r} for attribute {key!r} for {refcls.__name__}" ) from e return val return json_transformer def want_selinux_relabel( config: Config, root: Path, fatal: bool = True, ) -> Optional[tuple[Path, str, Path, Path]]: if config.selinux_relabel == ConfigFeature.disabled: return None if config.selinux_relabel == ConfigFeature.auto and config.output_format == OutputFormat.directory: return None selinux = root / "etc/selinux/config" if not selinux.exists(): if fatal and config.selinux_relabel == ConfigFeature.enabled: die("SELinux relabel is requested but could not find selinux config at /etc/selinux/config") return None policy = run( ["sh", "-c", f". {workdir(selinux)} && echo $SELINUXTYPE"], sandbox=config.sandbox(options=["--ro-bind", selinux, workdir(selinux)]), stdout=subprocess.PIPE, ).stdout.strip() if not policy: if fatal and config.selinux_relabel == ConfigFeature.enabled: die("SELinux relabel is requested but no selinux policy is configured in /etc/selinux/config") return None if not (setfiles := config.find_binary("setfiles")): if fatal and config.selinux_relabel == ConfigFeature.enabled: die("SELinux relabel is requested but setfiles is not installed") return None fc = root / "etc/selinux" / policy / "contexts/files/file_contexts" if not fc.exists(): if fatal and config.selinux_relabel == ConfigFeature.enabled: die(f"SELinux relabel is requested but SELinux file contexts not found in {fc}") return None binpolicydir = root / "etc/selinux" / policy / "policy" # The policy file is named policy.XX where XX is the policy version that indicates what features are # available. We check for string.digits instead of using isdecimal() as the latter checks for more than # just digits. policies = [ p for p in binpolicydir.glob("*") if p.suffix and all(c in string.digits for c in p.suffix[1:]) ] if not policies: if fatal and config.selinux_relabel == ConfigFeature.enabled: die(f"SELinux relabel is requested but SELinux binary policy not found in {binpolicydir}") return None binpolicy = sorted(policies, key=lambda p: GenericVersion(p.name), reverse=True)[0] return setfiles, policy, fc, binpolicy def swtpm_setup_version(sandbox: SandboxProtocol = nosandbox) -> GenericVersion: version = GenericVersion( run( ["swtpm_setup", "--version"], stdout=subprocess.PIPE, sandbox=sandbox(), success_exit_status=(0, 1), ).stdout.split()[-1] ) logging.debug(f"Version reported by swtpm_setup is {version}") return version def systemd_tool_version(*tool: PathString, sandbox: SandboxProtocol = nosandbox) -> GenericVersion: version = GenericVersion( run( [*tool, "--version"], stdout=subprocess.PIPE, sandbox=sandbox(), ) .stdout.split()[2] .strip("()") .removeprefix("v") ) logging.debug(f"Version reported by {tool[-1]} is {version}") return version def systemd_pty_forward( config: Config, *, background: Optional[str] = None, title: Optional[str] = None, ) -> list[str]: tint_bg = parse_boolean(config.environment.get("SYSTEMD_TINT_BACKGROUND", "1")) and parse_boolean( os.environ.get("SYSTEMD_TINT_BACKGROUND", "1") ) adjust_title = parse_boolean( config.environment.get("SYSTEMD_ADJUST_TERMINAL_TITLE", "1") ) and parse_boolean(os.environ.get("SYSTEMD_ADJUST_TERMINAL_TITLE", "1")) if not tint_bg and not adjust_title: return [] if not config.find_binary("systemd-pty-forward"): return [] cmd = ["systemd-pty-forward"] if tint_bg and background: cmd += ["--background", background] if adjust_title and title: cmd += ["--title", title] return cmd mkosi-26/mkosi/context.py000066400000000000000000000057071512054777600156340ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import os from collections.abc import Sequence from contextlib import AbstractContextManager from pathlib import Path from typing import Optional from mkosi.config import Args, Config from mkosi.util import PathString, flatten class Context: """State related properties.""" def __init__( self, args: Args, config: Config, *, workspace: Path, resources: Path, keyring_dir: Path, metadata_dir: Path, package_dir: Optional[Path] = None, ) -> None: self.args = args self.config = config self.workspace = workspace self.resources = resources self.keyring_dir = keyring_dir self.metadata_dir = metadata_dir self.package_dir = package_dir or (self.workspace / "packages") self.lowerdirs: list[PathString] = [] self.upperdir: Optional[PathString] = None self.workdir: Optional[PathString] = None self.package_dir.mkdir(exist_ok=True) self.staging.mkdir() self.sandbox_tree.mkdir() self.repository.mkdir() self.artifacts.mkdir() self.install_dir.mkdir() @property def root(self) -> Path: return self.workspace / "root" def rootoptions(self, dst: PathString = "/buildroot", *, readonly: bool = False) -> list[str]: if self.lowerdirs or self.upperdir: return [ "--overlay-lowerdir", os.fspath(self.root), *flatten(["--overlay-lowerdir", os.fspath(lowerdir)] for lowerdir in self.lowerdirs), *( ["--overlay-lowerdir" if readonly else "--overlay-upperdir", os.fspath(self.upperdir)] if self.upperdir else [] ), *(["--overlay-workdir", os.fspath(self.workdir)] if self.workdir and not readonly else []), "--overlay", os.fspath(dst), ] # fmt: skip else: return ["--ro-bind" if readonly else "--bind", os.fspath(self.root), os.fspath(dst)] @property def staging(self) -> Path: return self.workspace / "staging" @property def sandbox_tree(self) -> Path: return self.workspace / "sandbox" @property def repository(self) -> Path: return self.workspace / "repository" @property def artifacts(self) -> Path: return self.workspace / "artifacts" @property def install_dir(self) -> Path: return self.workspace / "dest" def sandbox( self, *, network: bool = False, devices: bool = False, scripts: Optional[Path] = None, options: Sequence[PathString] = (), ) -> AbstractContextManager[list[PathString]]: return self.config.sandbox( network=network, devices=devices, scripts=scripts, overlay=self.sandbox_tree, options=options, ) mkosi-26/mkosi/curl.py000066400000000000000000000034231512054777600151060ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import os import subprocess from pathlib import Path from typing import Optional, overload from mkosi.config import Config from mkosi.mounts import finalize_certificate_mounts from mkosi.run import run, workdir @overload def curl( config: Config, url: str, *, output_dir: Optional[Path], log: bool = True, ) -> None: ... @overload def curl( config: Config, url: str, *, output_dir: None = None, log: bool = True, ) -> str: ... def curl(config: Config, url: str, *, output_dir: Optional[Path] = None, log: bool = True) -> Optional[str]: result = run( [ "curl", "--location", *(["--output-dir", workdir(output_dir)] if output_dir else []), *(["--remote-name"] if output_dir else []), "--no-progress-meter", "--fail", *(["--silent"] if not log else []), *(["--proxy", config.proxy_url] if config.proxy_url else []), *(["--noproxy", ",".join(config.proxy_exclude)] if config.proxy_exclude else []), *(["--proxy-capath", "/proxy.cacert"] if config.proxy_peer_certificate else []), *(["--proxy-cert", "/proxy.clientcert"] if config.proxy_client_certificate else []), *(["--proxy-key", "/proxy.clientkey"] if config.proxy_client_key else []), url, ], stdout=None if output_dir else subprocess.PIPE, sandbox=config.sandbox( network=True, options=[ *(["--bind", os.fspath(output_dir), workdir(output_dir)] if output_dir else []), *finalize_certificate_mounts(config) ], ), log=log, ) # fmt: skip return None if output_dir else result.stdout mkosi-26/mkosi/distribution/000077500000000000000000000000001512054777600163045ustar00rootroot00000000000000mkosi-26/mkosi/distribution/__init__.py000066400000000000000000000121541512054777600204200ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import enum import importlib import urllib.parse from collections.abc import Sequence from pathlib import Path from typing import TYPE_CHECKING, Optional from mkosi.log import die from mkosi.util import StrEnum, read_env_file if TYPE_CHECKING: from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.installer import PackageManager class PackageType(StrEnum): none = enum.auto() rpm = enum.auto() deb = enum.auto() pkg = enum.auto() apk = enum.auto() class Distribution(StrEnum): # Please consult docs/distribution-policy.md and contact one # of the mkosi maintainers before implementing a new distribution. fedora = enum.auto() debian = enum.auto() kali = enum.auto() ubuntu = enum.auto() postmarketos = enum.auto() arch = enum.auto() opensuse = enum.auto() mageia = enum.auto() centos = enum.auto() rhel = enum.auto() rhel_ubi = enum.auto() openmandriva = enum.auto() rocky = enum.auto() alma = enum.auto() azure = enum.auto() custom = enum.auto() def is_centos_variant(self) -> bool: return self in ( Distribution.centos, Distribution.alma, Distribution.rocky, Distribution.rhel, Distribution.rhel_ubi, ) def is_apt_distribution(self) -> bool: return self in (Distribution.debian, Distribution.ubuntu, Distribution.kali) def is_rpm_distribution(self) -> bool: return self in ( Distribution.azure, Distribution.fedora, Distribution.opensuse, Distribution.mageia, Distribution.centos, Distribution.rhel, Distribution.rhel_ubi, Distribution.openmandriva, Distribution.rocky, Distribution.alma, ) @property def installer(self) -> type["DistributionInstaller"]: importlib.import_module(f"mkosi.distribution.{self.name}") return DistributionInstaller.registry[self] class DistributionInstaller: registry: dict[Distribution, "type[DistributionInstaller]"] = {} def __init_subclass__(cls, distribution: Distribution): cls.registry[distribution] = cls @classmethod def pretty_name(cls) -> str: raise NotImplementedError @classmethod def package_manager(cls, config: "Config") -> type["PackageManager"]: raise NotImplementedError @classmethod def keyring(cls, context: "Context") -> None: pass @classmethod def setup(cls, context: "Context") -> None: raise NotImplementedError @classmethod def install(cls, context: "Context") -> None: raise NotImplementedError @classmethod def install_packages( cls, context: "Context", packages: Sequence[str], *, apivfs: bool = True, allow_downgrade: bool = False, ) -> None: return cls.package_manager(context.config).install( context, packages, apivfs=apivfs, allow_downgrade=allow_downgrade, ) @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def architecture(cls, arch: "Architecture") -> str: raise NotImplementedError @classmethod def package_type(cls) -> PackageType: return PackageType.none @classmethod def default_release(cls) -> str: return "" @classmethod def default_tools_tree_distribution(cls) -> Optional[Distribution]: return None @classmethod def grub_prefix(cls) -> str: return "grub" @classmethod def latest_snapshot(cls, config: "Config") -> str: die(f"{cls.pretty_name()} does not support snapshots") @classmethod def is_kernel_package(cls, package: str) -> bool: return False def detect_distribution(root: Path = Path("/")) -> tuple[Optional[Distribution], Optional[str]]: try: os_release = read_env_file(root / "etc/os-release") except FileNotFoundError: try: os_release = read_env_file(root / "usr/lib/os-release") except FileNotFoundError: return None, None dist_id = os_release.get("ID", "linux") dist_id_like = os_release.get("ID_LIKE", "").split() version_id = os_release.get("VERSION_ID", None) version_codename = os_release.get("VERSION_CODENAME", None) quirks = { "azurelinux": Distribution.azure, } d: Optional[Distribution] = None for the_id in [dist_id, *dist_id_like]: d = Distribution.__members__.get(the_id, quirks.get(the_id)) if d is not None: break if d and d.is_apt_distribution() and version_codename: version_id = version_codename return d, version_id def join_mirror(mirror: str, link: str) -> str: # urljoin() behaves weirdly if the base does not end with a / or the path starts with a / so fix them up # as needed. if not mirror.endswith("/"): mirror = f"{mirror}/" link = link.removeprefix("/") return urllib.parse.urljoin(mirror, link) mkosi-26/mkosi/distribution/alma.py000066400000000000000000000026231512054777600175730ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from mkosi.context import Context from mkosi.distribution import Distribution, centos, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey from mkosi.log import die class Installer(centos.Installer, distribution=Distribution.alma): @classmethod def pretty_name(cls) -> str: return "AlmaLinux" @classmethod def gpgurls(cls, context: Context) -> tuple[str, ...]: major = cls.major_release(context.config) return ( find_rpm_gpgkey( context, f"RPM-GPG-KEY-AlmaLinux-{major}", f"https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux-{major}", ), ) @classmethod def repository_variants( cls, context: Context, gpgurls: tuple[str, ...], repo: str, ) -> list[RpmRepository]: if context.config.snapshot: die(f"Snapshot= is not supported for {cls.pretty_name()}") if context.config.mirror: url = f"baseurl={join_mirror(context.config.mirror, f'$releasever/{repo}/$basearch/os')}" else: url = f"mirrorlist=https://mirrors.almalinux.org/mirrorlist/$releasever/{repo.lower()}" return [RpmRepository(repo, url, gpgurls)] @classmethod def sig_repositories(cls, context: Context) -> list[RpmRepository]: return [] mkosi-26/mkosi/distribution/arch.py000066400000000000000000000112121512054777600175700ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import datetime import tempfile from collections.abc import Iterable from pathlib import Path from mkosi.archive import extract_tar from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.curl import curl from mkosi.distribution import Distribution, DistributionInstaller, PackageType, join_mirror from mkosi.installer.pacman import Pacman, PacmanRepository from mkosi.log import complete_step, die class Installer(DistributionInstaller, distribution=Distribution.arch): @classmethod def pretty_name(cls) -> str: return "Arch Linux" @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def package_type(cls) -> PackageType: return PackageType.pkg @classmethod def default_release(cls) -> str: return "rolling" @classmethod def package_manager(cls, config: "Config") -> type[Pacman]: return Pacman @classmethod def keyring(cls, context: Context) -> None: if context.config.repository_key_fetch: with ( complete_step(f"Downloading {cls.pretty_name()} keyring"), tempfile.TemporaryDirectory() as d, ): curl( context.config, "https://archlinux.org/packages/core/any/archlinux-keyring/download", output_dir=Path(d), ) extract_tar( next(Path(d).iterdir()), context.sandbox_tree, dirs=["usr/share/pacman/keyrings"], sandbox=context.sandbox, ) Pacman.keyring(context) @classmethod def setup(cls, context: Context) -> None: Pacman.setup(context, list(cls.repositories(context))) @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["filesystem"], apivfs=False) @classmethod def repositories(cls, context: Context) -> Iterable[PacmanRepository]: if context.config.local_mirror: yield PacmanRepository("core", context.config.local_mirror) else: if context.config.architecture.is_arm_variant(): if context.config.snapshot and not context.config.mirror: die("There is no known public mirror for snapshots of Arch Linux ARM") mirror = context.config.mirror or "http://mirror.archlinuxarm.org" else: if context.config.mirror: mirror = context.config.mirror elif context.config.snapshot: mirror = "https://archive.archlinux.org" else: mirror = "https://fastly.mirror.pkgbuild.com" if context.config.snapshot: url = join_mirror(mirror, f"repos/{context.config.snapshot}/$repo/os/$arch") elif context.config.architecture.is_arm_variant(): url = join_mirror(mirror, "$arch/$repo") else: url = join_mirror(mirror, "$repo/os/$arch") # Testing repositories have to go before regular ones to to take precedence. repos = [ repo for repo in ( "core-testing", "core-testing-debug", "extra-testing", "extra-testing-debug", "core-debug", "extra-debug", "multilib-testing", "multilib", ) if repo in context.config.repositories ] + ["core", "extra"] if context.config.architecture.is_arm_variant(): repos += ["alarm"] for repo in repos: yield PacmanRepository(repo, url) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64: "x86_64", Architecture.arm64: "aarch64", Architecture.arm: "armv7h", }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by Arch Linux") return a @classmethod def latest_snapshot(cls, config: Config) -> str: url = join_mirror(config.mirror or "https://archive.archlinux.org", "repos/last/lastsync") return datetime.datetime.fromtimestamp(int(curl(config, url)), datetime.timezone.utc).strftime( "%Y/%m/%d" ) @classmethod def is_kernel_package(cls, package: str) -> bool: return package in ("linux", "linux-lts", "linux-zen", "linux-hardened", "linux-rt", "linux-rt-lts") mkosi-26/mkosi/distribution/azure.py000066400000000000000000000067321512054777600200140ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable from mkosi.config import Architecture from mkosi.context import Context from mkosi.distribution import ( Distribution, fedora, join_mirror, ) from mkosi.installer.dnf import Dnf from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey, setup_rpm from mkosi.log import die class Installer(fedora.Installer, distribution=Distribution.azure): @classmethod def pretty_name(cls) -> str: return "Azure Linux" @classmethod def default_release(cls) -> str: return "3.0" @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def setup(cls, context: Context) -> None: setup_rpm(context, dbpath="/var/lib/rpm") Dnf.setup(context, list(cls.repositories(context)), filelists=False) @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["filesystem", "azurelinux-release"], apivfs=False) @classmethod def repositories(cls, context: Context) -> Iterable[RpmRepository]: if context.config.snapshot: die(f"Snapshot= is not supported for {cls.pretty_name()}") gpgurls = ( find_rpm_gpgkey( context, "MICROSOFT-RPM-GPG-KEY", "https://raw.githubusercontent.com/rpm-software-management/distribution-gpg-keys/main/keys/azure-linux/MICROSOFT-RPM-GPG-KEY", ), ) if context.config.local_mirror: yield RpmRepository("base", f"baseurl={context.config.local_mirror}", gpgurls) return mirror = context.config.mirror or "https://packages.microsoft.com/azurelinux" url = join_mirror(mirror, context.config.release) for repo in ("base", "extended", "ms-oss", "ms-non-oss", "cloud-native", "nvidia"): yield RpmRepository( repo, f"baseurl={url}/prod/{repo}/$basearch", gpgurls, ) repo = "NVIDIA" if repo == "nvidia" else repo yield RpmRepository( f"{repo}-preview", f"baseurl={url}/preview/{repo}/$basearch", gpgurls, enabled=False, ) for repo in ("base", "cloud-native", "extended"): yield RpmRepository( f"{repo}-debuginfo", f"baseurl={url}/prod/{repo}/debuginfo/$basearch", gpgurls, enabled=False, ) yield RpmRepository( f"{repo}-preview-debuginfo", f"baseurl={url}/preview/{repo}/debuginfo/$basearch", gpgurls, enabled=False, ) for repo in ("base", "cloud-native", "extended", "ms-oss"): yield RpmRepository( f"{repo}-source", f"baseurl={url}/prod/{repo}/srpms", gpgurls, enabled=False, ) yield RpmRepository( f"{repo}-source", f"baseurl={url}/preview/{repo}/srpms", gpgurls, enabled=False, ) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.arm64: "aarch64", Architecture.x86_64: "x86_64", }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by {cls.pretty_name()}") return a mkosi-26/mkosi/distribution/centos.py000066400000000000000000000426331512054777600201610ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.curl import curl from mkosi.distribution import ( Distribution, DistributionInstaller, PackageType, join_mirror, ) from mkosi.installer.dnf import Dnf from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey, setup_rpm from mkosi.log import die from mkosi.util import startswith from mkosi.versioncomp import GenericVersion CENTOS_SIG_REPO_PRIORITY = 50 class Installer(DistributionInstaller, distribution=Distribution.centos): @classmethod def pretty_name(cls) -> str: return "CentOS" @classmethod def filesystem(cls) -> str: return "xfs" @classmethod def package_type(cls) -> PackageType: return PackageType.rpm @classmethod def default_release(cls) -> str: return "10" @classmethod def default_tools_tree_distribution(cls) -> Distribution: return Distribution.fedora @classmethod def major_release(cls, config: "Config") -> str: return config.release.partition(".")[0] @classmethod def package_manager(cls, config: "Config") -> type[Dnf]: return Dnf @classmethod def grub_prefix(cls) -> str: return "grub2" @classmethod def dbpath(cls, context: Context) -> str: # The Hyperscale SIG uses /usr/lib/sysimage/rpm in its rebuild of rpm for C9S that's shipped in the # hyperscale-packages-experimental repository. if ( GenericVersion(context.config.release) > 9 or "hyperscale-packages-experimental" in context.config.repositories ): return "/usr/lib/sysimage/rpm" return "/var/lib/rpm" @classmethod def setup(cls, context: Context) -> None: if GenericVersion(context.config.release) <= 8: die(f"{cls.pretty_name()} Stream 8 or earlier variants are not supported") setup_rpm(context, dbpath=cls.dbpath(context)) Dnf.setup(context, list(cls.repositories(context))) (context.sandbox_tree / "etc/dnf/vars/stream").write_text( f"{cls.major_release(context.config)}-stream\n" ) @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["basesystem"], apivfs=False) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64: "x86_64", Architecture.ppc64_le: "ppc64le", Architecture.s390x: "s390x", Architecture.arm64: "aarch64", }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by {cls.pretty_name()}") return a @classmethod def gpgurls(cls, context: Context) -> tuple[str, ...]: # First, start with the names of the appropriate keys in /etc/pki/rpm-gpg. if GenericVersion(context.config.release) == 9: rel = "RPM-GPG-KEY-centosofficial" else: rel = "RPM-GPG-KEY-centosofficial-SHA256" one = find_rpm_gpgkey(context, rel, required=False) # Next, follow up with the names of the appropriate keys in /usr/share/distribution-gpg-keys. if GenericVersion(context.config.release) == 9: rel = "RPM-GPG-KEY-CentOS-Official" else: rel = "RPM-GPG-KEY-CentOS-Official-SHA256" # The key in /usr/share/distribution-gpg-keys is only required if we didn't find one in # /etc/pki/rpm-gpg. two = find_rpm_gpgkey(context, rel, f"https://www.centos.org/keys/{rel}", required=bool(one)) # Finally, look up the key for the SIG-Extras repository. sig = find_rpm_gpgkey( context, "RPM-GPG-KEY-CentOS-SIG-Extras", "https://www.centos.org/keys/RPM-GPG-KEY-CentOS-SIG-Extras", ) return tuple(key for key in (one, two, sig) if key is not None) @classmethod def repository_variants( cls, context: Context, gpgurls: tuple[str, ...], repo: str, ) -> Iterable[RpmRepository]: mirror = context.config.mirror if not mirror and context.config.snapshot: mirror = "https://composes.stream.centos.org" if context.config.snapshot and mirror not in ( "https://composes.stream.centos.org", "https://mirror.facebook.net/centos-composes", ): die( f"Snapshot= is only supported for {cls.pretty_name()} if Mirror=https://composes.stream.centos.org" ) if ( mirror in ("https://composes.stream.centos.org", "https://mirror.facebook.net/centos-composes") and not context.config.snapshot ): die(f"Snapshot= must be used on {cls.pretty_name()} if Mirror={mirror}") if context.config.local_mirror: yield RpmRepository(repo, f"baseurl={context.config.local_mirror}", gpgurls) elif mirror: if mirror == "https://composes.stream.centos.org": subdir = f"stream-{context.config.release}/production" elif mirror == "https://mirror.facebook.net/centos-composes": subdir = context.config.release elif repo == "extras": subdir = "SIGs/$stream" else: subdir = "$stream" if context.config.snapshot: subdir += f"/CentOS-Stream-{context.config.release}-{context.config.snapshot}/compose" if repo == "extras": yield RpmRepository( repo.lower(), f"baseurl={join_mirror(mirror, f'{subdir}/{repo}/$basearch/extras-common')}", gpgurls, ) yield RpmRepository( f"{repo.lower()}-source", f"baseurl={join_mirror(mirror, f'{subdir}/{repo}/source/extras-common')}", gpgurls, enabled=False, ) else: yield RpmRepository( repo.lower(), f"baseurl={join_mirror(mirror, f'{subdir}/{repo}/$basearch/os')}", gpgurls, ) yield RpmRepository( f"{repo.lower()}-debuginfo", f"baseurl={join_mirror(mirror, f'{subdir}/{repo}/$basearch/debug/tree')}", gpgurls, enabled=False, ) yield RpmRepository( f"{repo.lower()}-source", f"baseurl={join_mirror(mirror, f'{subdir}/{repo}/source/tree')}", gpgurls, enabled=False, ) else: url = "metalink=https://mirrors.centos.org/metalink" if repo == "extras": yield RpmRepository( repo.lower(), f"{url}?arch=$basearch&repo=centos-extras-sig-extras-common-$stream", gpgurls, ) yield RpmRepository( f"{repo.lower()}-source", f"{url}?arch=source&repo=centos-extras-sig-extras-common-source-$stream", gpgurls, enabled=False, ) else: yield RpmRepository( repo.lower(), f"{url}?arch=$basearch&repo=centos-{repo.lower()}-$stream", gpgurls, ) yield RpmRepository( f"{repo.lower()}-debuginfo", f"{url}?arch=$basearch&repo=centos-{repo.lower()}-debug-$stream", gpgurls, enabled=False, ) yield RpmRepository( f"{repo.lower()}-source", f"{url}?arch=source&repo=centos-{repo.lower()}-source-$stream", gpgurls, enabled=False, ) @classmethod def repositories(cls, context: Context) -> Iterable[RpmRepository]: gpgurls = cls.gpgurls(context) if context.config.local_mirror: yield from cls.repository_variants(context, gpgurls, "AppStream") return yield from cls.repository_variants(context, gpgurls, "BaseOS") yield from cls.repository_variants(context, gpgurls, "AppStream") yield from cls.repository_variants(context, gpgurls, "CRB") if not context.config.snapshot: yield from cls.repository_variants(context, gpgurls, "extras") yield from cls.epel_repositories(context) yield from cls.sig_repositories(context) @classmethod def epel_repositories(cls, context: Context) -> Iterable[RpmRepository]: # Since EPEL 10, there's an associated minor release for every RHEL minor release. if GenericVersion(context.config.release) >= 10: release = context.config.release else: release = cls.major_release(context.config) gpgurls = ( find_rpm_gpgkey( context, f"RPM-GPG-KEY-EPEL-{cls.major_release(context.config)}", f"https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{cls.major_release(context.config)}", ), ) if context.config.local_mirror: return if mirror := context.config.mirror: # epel-next does not exist anymore since EPEL 10. repodirs = [ ("epel", "epel"), ("epel-testing", "epel/testing"), ] if GenericVersion(context.config.release) < 10: repodirs += [ ("epel-next", "epel/next"), ("epel-next-testing", "epel/testing/next"), ] for repo, dir in repodirs: # For EPEL we make the assumption that epel is mirrored in the parent directory of the mirror # URL and path we were given. Since this doesn't work for all scenarios, we also allow # overriding the mirror via an environment variable. url = context.config.finalize_environment().get( "EPEL_MIRROR", join_mirror(mirror, "../fedora") ) yield RpmRepository( repo, f"baseurl={url}/{dir}/{release}/Everything/$basearch", gpgurls, enabled=False, ) yield RpmRepository( f"{repo}-debuginfo", f"baseurl={url}/{dir}/{release}/Everything/$basearch/debug", gpgurls, enabled=False, ) yield RpmRepository( f"{repo}-source", f"baseurl={url}/{dir}/{release}/Everything/source/tree", gpgurls, enabled=False, ) else: url = "metalink=https://mirrors.fedoraproject.org/metalink?arch=$basearch" # epel-next does not exist anymore since EPEL 10. repos = ["epel"] if GenericVersion(context.config.release) < 10: repos += ["epel-next"] for repo in repos: yield RpmRepository( repo, f"{url}&repo={repo}-{release}", gpgurls, enabled=False, ) yield RpmRepository( f"{repo}-debuginfo", f"{url}&repo={repo}-debug-{release}", gpgurls, enabled=False, ) yield RpmRepository( f"{repo}-source", f"{url}&repo={repo}-source-{release}", gpgurls, enabled=False, ) yield RpmRepository( "epel-testing", f"{url}&repo=testing-epel{release}", gpgurls, enabled=False, ) yield RpmRepository( "epel-testing-debuginfo", f"{url}&repo=testing-debug-epel{release}", gpgurls, enabled=False, ) yield RpmRepository( "epel-testing-source", f"{url}&repo=testing-source-epel{release}", gpgurls, enabled=False, ) # epel-next does not exist anymore since EPEL 10. if GenericVersion(context.config.release) < 10: yield RpmRepository( "epel-next-testing", f"{url}&repo=epel-testing-next-{release}", gpgurls, enabled=False, ) yield RpmRepository( "epel-next-testing-debuginfo", f"{url}&repo=epel-testing-next-debug-{release}", gpgurls, enabled=False, ) yield RpmRepository( "epel-next-testing-source", f"{url}&repo=epel-testing-next-source-{release}", gpgurls, enabled=False, ) @classmethod def sig_repositories(cls, context: Context) -> Iterable[RpmRepository]: if context.config.local_mirror: return sigs = ( ( "hyperscale", ( f"packages-{c}" for c in ("main", "experimental", "facebook", "hotfixes", "spin", "intel", "kernel") ), ("RPM-GPG-KEY-CentOS-SIG-HyperScale",), ), ) for sig, components, keys in sigs: gpgurls = tuple( find_rpm_gpgkey(context, key, f"https://www.centos.org/keys/{key}") for key in keys ) for c in components: if mirror := context.config.mirror: yield RpmRepository( f"{sig}-{c}", f"baseurl={join_mirror(mirror, f'SIGs/$stream/{sig}/$basearch/{c}')}", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) yield RpmRepository( f"{sig}-{c}-debuginfo", f"baseurl={join_mirror(mirror, f'SIGs/$stream/{sig}/$basearch/{c}/debug')}", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) yield RpmRepository( f"{sig}-{c}-source", f"baseurl={join_mirror(mirror, f'SIGs/$stream/{sig}/source/{c}')}", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) else: url = "metalink=https://mirrors.centos.org/metalink" yield RpmRepository( f"{sig}-{c}", f"{url}?arch=$basearch&repo=centos-{sig}-sig-{c}-$stream", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) yield RpmRepository( f"{sig}-{c}-debuginfo", f"{url}?arch=$basearch&repo=centos-{sig}-sig-{c}-debug-$stream", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) yield RpmRepository( f"{sig}-{c}-source", f"{url}?arch=source&repo=centos-{sig}-sig-{c}-source-$stream", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) yield RpmRepository( f"{sig}-{c}-testing", f"baseurl=https://buildlogs.centos.org/centos/$stream/{sig}/$basearch/{c}", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) yield RpmRepository( f"{sig}-{c}-testing-debuginfo", f"baseurl=https://buildlogs.centos.org/centos/$stream/{sig}/$basearch/{c}", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) @classmethod def latest_snapshot(cls, config: Config) -> str: mirror = config.mirror or "https://composes.stream.centos.org" if mirror == "https://mirror.facebook.net/centos-composes": subdir = config.release else: subdir = f"stream-{config.release}/production" url = join_mirror(mirror, f"{subdir}/latest-CentOS-Stream/compose/.composeinfo") for line in curl(config, url).splitlines(): if snapshot := startswith(line, f"id = CentOS-Stream-{config.release}-"): return snapshot die("composeinfo is missing compose ID field") @classmethod def is_kernel_package(cls, package: str) -> bool: return package in ("kernel", "kernel-core") mkosi-26/mkosi/distribution/custom.py000066400000000000000000000013651512054777600201750ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.distribution import Distribution, DistributionInstaller from mkosi.installer import PackageManager class Installer(DistributionInstaller, distribution=Distribution.custom): @classmethod def pretty_name(cls) -> str: return "Custom" @classmethod def architecture(cls, arch: Architecture) -> str: return str(arch) @classmethod def package_manager(cls, config: Config) -> type[PackageManager]: return PackageManager @classmethod def setup(cls, context: Context) -> None: pass @classmethod def install(cls, context: Context) -> None: pass mkosi-26/mkosi/distribution/debian.py000066400000000000000000000304011512054777600200760ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import json import tempfile from collections.abc import Iterable, Sequence from pathlib import Path from typing import cast from mkosi.archive import extract_tar from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.curl import curl from mkosi.distribution import Distribution, DistributionInstaller, PackageType, join_mirror from mkosi.installer.apt import Apt, AptRepository from mkosi.log import die from mkosi.run import run, workdir from mkosi.sandbox import umask class Installer(DistributionInstaller, distribution=Distribution.debian): @classmethod def pretty_name(cls) -> str: return "Debian" @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def package_type(cls) -> PackageType: return PackageType.deb @classmethod def default_release(cls) -> str: return "testing" @classmethod def package_manager(cls, config: Config) -> type[Apt]: return Apt @classmethod def repositories(cls, context: Context, for_image: bool = False) -> Iterable[AptRepository]: types = ("deb", "deb-src") components = ("main", *context.config.repositories) mirror = None if for_image else context.config.mirror snapshot = None if for_image else context.config.snapshot if context.config.local_mirror and not for_image: yield AptRepository( types=("deb",), url=context.config.local_mirror, suite=context.config.release, components=("main",), signedby=None, ) return if mirror: pass elif snapshot: mirror = "https://snapshot.debian.org" else: mirror = "http://deb.debian.org" if snapshot: url = join_mirror(mirror, f"archive/debian/{snapshot}") else: url = join_mirror(mirror, "debian") signedby = Path("/usr/share/keyrings/debian-archive-keyring.gpg") yield AptRepository( types=types, url=url, suite=context.config.release, components=components, signedby=signedby, ) # Debug repos are typically not mirrored. if snapshot: url = join_mirror(mirror, f"archive/debian-debug/{snapshot}") else: url = join_mirror(mirror, "debian-debug") yield AptRepository( types=types, url=url, suite=f"{context.config.release}-debug", components=components, signedby=signedby, ) if context.config.release in ("unstable", "sid"): return if not snapshot: yield AptRepository( types=types, url=join_mirror(mirror, "debian"), suite=f"{context.config.release}-updates", components=components, signedby=signedby, ) # Security updates repos are never mirrored. if snapshot: url = join_mirror(mirror, f"archive/debian-security/{snapshot}") else: url = join_mirror(mirror, "debian-security") yield AptRepository( types=types, url=url, suite=f"{context.config.release}-security", components=components, signedby=signedby, ) @classmethod def setup(cls, context: Context) -> None: Apt.setup(context, list(cls.repositories(context))) @classmethod def install(cls, context: Context) -> None: # Instead of using debootstrap, we replicate its core functionality here. Because dpkg does not have # an option to delay running pre-install maintainer scripts when it installs a package, it's # impossible to use apt directly to bootstrap a Debian chroot since dpkg will try to run a maintainer # script which depends on some basic tool to be available in the chroot from a deb which hasn't been # unpacked yet, causing the script to fail. To avoid these issues, we have to extract all the # essential debs first, and only then run the maintainer scripts for them. # First, we set up merged usr. This list is taken from # https://salsa.debian.org/installer-team/debootstrap/-/blob/master/functions#L1369. subdirs = ["bin", "sbin", "lib"] + { "amd64" : ["lib32", "lib64", "libx32"], "i386" : ["lib64", "libx32"], "mips" : ["lib32", "lib64"], "mipsel" : ["lib32", "lib64"], "mips64el" : ["lib32", "lib64", "libo32"], "loong64" : ["lib32", "lib64"], "powerpc" : ["lib64"], "ppc64" : ["lib32", "lib64"], "ppc64el" : ["lib64"], "s390x" : ["lib32"], "sparc" : ["lib64"], "sparc64" : ["lib32", "lib64"], "x32" : ["lib32", "lib64", "libx32"], }.get( context.config.distribution.installer.architecture(context.config.architecture), [] ) # fmt: skip with umask(~0o755): for d in subdirs: (context.root / d).symlink_to(f"usr/{d}") (context.root / f"usr/{d}").mkdir(parents=True, exist_ok=True) # Next, we invoke apt-get install to download all the essential packages. With # DPkg::Pre-Install-Pkgs, we specify a shell command that will receive the list of packages that will # be installed on stdin. By configuring Debug::pkgDpkgPm=1, apt-get install will not actually # execute any dpkg commands, so all it does is download the essential debs and tell us their full in # the apt cache without actually installing them. with tempfile.NamedTemporaryFile(mode="r") as f: Apt.invoke( context, "install", [ "-oDebug::pkgDPkgPm=1", f"-oDPkg::Pre-Install-Pkgs::=cat >{workdir(Path(f.name))}", "?essential", "base-files", ], options=["--bind", f.name, workdir(Path(f.name))], ) essential = f.read().strip().splitlines() # Now, extract the debs to the chroot by first extracting the sources tar file out of the deb and # then extracting the tar file into the chroot. for deb in essential: # If a deb path is in the form of "/var/cache/apt/", we transform it to the corresponding # path in mkosi's package cache directory. If it's relative to /repository, we transform it to # the corresponding path in mkosi's local package repository. Otherwise, we use the path as is. if Path(deb).is_relative_to("/var/cache"): path = context.config.package_cache_dir_or_default() / Path(deb).relative_to("/var") elif Path(deb).is_relative_to("/repository"): path = context.repository / Path(deb).relative_to("/repository") else: path = Path(deb) with open(path, "rb") as i, tempfile.NamedTemporaryFile() as o: run( ["dpkg-deb", "--fsys-tarfile", "/dev/stdin"], stdin=i, stdout=o, sandbox=context.sandbox(), ) extract_tar( Path(o.name), context.root, log=False, options=( [f"--exclude=./{glob}" for glob in Apt.documentation_exclude_globs] if not context.config.with_docs else [] ), sandbox=context.sandbox, ) # Finally, run apt to properly install packages in the chroot without having to worry that maintainer # scripts won't find basic tools that they depend on. cls.install_packages( context, [Path(deb).name.partition("_")[0].removesuffix(".deb") for deb in essential] ) fixup_os_release(context) @classmethod def install_packages( cls, context: Context, packages: Sequence[str], *, apivfs: bool = True, allow_downgrade: bool = False, ) -> None: super().install_packages(context, packages, apivfs=apivfs, allow_downgrade=allow_downgrade) if "apt" in packages: install_apt_sources(context, cls.repositories(context, for_image=True)) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.arm64: "arm64", Architecture.arm: "armhf", Architecture.alpha: "alpha", Architecture.x86_64: "amd64", Architecture.x86: "i386", Architecture.ia64: "ia64", Architecture.loongarch64: "loong64", Architecture.mips64_le: "mips64el", Architecture.mips_le: "mipsel", Architecture.parisc: "hppa", Architecture.ppc64_le: "ppc64el", Architecture.ppc64: "ppc64", Architecture.riscv64: "riscv64", Architecture.s390x: "s390x", Architecture.s390: "s390", }.get(arch) # fmt: skip if not a: die(f"Architecture {arch} is not supported by {cls.pretty_name()}") return a @classmethod def latest_snapshot(cls, config: Config) -> str: url = join_mirror(config.mirror or "https://snapshot.debian.org", "mr/timestamp") return cast(str, json.loads(curl(config, url))["result"]["debian"][-1]) @classmethod def is_kernel_package(cls, package: str) -> bool: return package.startswith("linux-image-") def install_apt_sources(context: Context, repos: Iterable[AptRepository]) -> None: sources = context.root / f"etc/apt/sources.list.d/{context.config.release}.sources" if not sources.exists(): with umask(~0o755): sources.parent.mkdir(parents=True, exist_ok=True) with umask(~0o644), sources.open("w") as f: for repo in repos: f.write(str(repo)) def fixup_os_release(context: Context) -> None: if context.config.release not in ("unstable", "sid"): return # Debian being Debian means we need to special case handling os-release. Fix the content to actually # match what we are building, and set up a diversion so that dpkg doesn't overwrite it on package # updates. Upstream bug report: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1008735. for candidate in ["etc/os-release", "usr/lib/os-release", "usr/lib/initrd-release"]: osrelease = context.root / candidate newosrelease = osrelease.with_suffix(".new") if not osrelease.is_file(): continue if osrelease.is_symlink() and candidate != "etc/os-release": continue with osrelease.open("r") as old, newosrelease.open("w") as new: for line in old.readlines(): if line.startswith("VERSION_CODENAME="): new.write("VERSION_CODENAME=sid\n") else: new.write(line) # On dpkg distributions we cannot simply overwrite /etc/os-release as it is owned by a package. We # need to set up a diversion first, so that it is not overwritten by package updates. We do this for # /etc/os-release as that will be overwritten on package updates and has precedence over # /usr/lib/os-release, and ignore the latter and assume that if an usr-only image is built then the # package manager will not run on it. if candidate == "etc/os-release": run( [ "dpkg-divert", "--quiet", "--root=/buildroot", "--local", "--add", "--rename", "--divert", f"/{candidate}.dpkg", f"/{candidate}", ], sandbox=context.sandbox(options=context.rootoptions()), ) newosrelease.rename(osrelease) mkosi-26/mkosi/distribution/fedora.py000066400000000000000000000263031512054777600201220ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import re import subprocess import tempfile from collections.abc import Iterable from pathlib import Path from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.curl import curl from mkosi.distribution import ( Distribution, DistributionInstaller, PackageType, join_mirror, ) from mkosi.installer.dnf import Dnf from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey, setup_rpm from mkosi.log import die from mkosi.util import tuplify DISTRIBUTION_GPG_KEYS_UPSTREAM = ( "https://raw.githubusercontent.com/rpm-software-management/distribution-gpg-keys/main/keys/fedora" ) def read_remote_rawhide_key_symlink(context: Context) -> str: # https://fedoraproject.org/fedora.gpg is always outdated when the rawhide key changes. Instead, # let's fetch it from distribution-gpg-keys on github if necessary, which is generally up-to-date. with tempfile.TemporaryDirectory() as d: # The rawhide key is a symlink and github doesn't redirect those to the actual file for some reason curl( context.config, f"{DISTRIBUTION_GPG_KEYS_UPSTREAM}/RPM-GPG-KEY-fedora-rawhide-primary", output_dir=Path(d), ) return (Path(d) / "RPM-GPG-KEY-fedora-rawhide-primary").read_text() @tuplify def find_fedora_rpm_gpgkeys(context: Context) -> Iterable[str]: versionre = re.compile(r"RPM-GPG-KEY-fedora-(\d+)-primary", re.ASCII) # ELN uses the rawhide GPG keys. release = "rawhide" if context.config.release == "eln" else context.config.release if release == "rawhide" and context.config.repository_key_fetch: # Rawhide is a moving target and signed with a different GPG key every time a new Fedora release is # done. In distribution-gpg-keys this is modeled by a symlink that is continuously updated to point # to the current GPG key for rawhide. Of course, this symlink gets outdated when using a locally # installed distribution-gpg-keys package. If we're allowed to look up GPG keys remotely, look up the # current rawhide version remotely and use the associated remote key. key = read_remote_rawhide_key_symlink(context) if not (rawhide_will_be := versionre.match(key)): die(f"Missing Fedora version in remote rawhide key {key} from distribution-gpg-keys") version = int(rawhide_will_be.group(1)) yield f"{DISTRIBUTION_GPG_KEYS_UPSTREAM}/RPM-GPG-KEY-fedora-{version}-primary" # Also use the N+1 key if it exists to avoid issues when rawhide has been moved to the next key but # the rawhide symlink in distribution-gpg-keys hasn't been updated yet. try: with tempfile.TemporaryDirectory() as d: curl( context.config, f"{DISTRIBUTION_GPG_KEYS_UPSTREAM}/RPM-GPG-KEY-fedora-{version + 1}-primary", output_dir=Path(d), log=False, ) yield f"{DISTRIBUTION_GPG_KEYS_UPSTREAM}/RPM-GPG-KEY-fedora-{version + 1}-primary" except subprocess.CalledProcessError: pass return key = find_rpm_gpgkey( context, key=f"RPM-GPG-KEY-fedora-{release}-primary", fallback=f"{DISTRIBUTION_GPG_KEYS_UPSTREAM}/RPM-GPG-KEY-fedora-{release}-primary", ) yield key if release == "rawhide" and (rawhide_will_be := versionre.match(Path(key).name)): # When querying the rawhide version remotely, we add the N+1 key as the symlink might not have been # updated yet. We do expect the symlink update to happen in reasonable time so we only add the N+1 # key. When using a locally installed distribution-gpg-keys package on older Fedora versions, there's # a non-zero chance that rawhide might already be using the N+2 key. So let's play it safe and add # all newer keys in this case. version = int(rawhide_will_be.group(1)) i = 1 while newerkey := find_rpm_gpgkey( context, key=f"RPM-GPG-KEY-fedora-{version + i}-primary", required=False, ): yield newerkey i += 1 class Installer(DistributionInstaller, distribution=Distribution.fedora): @classmethod def pretty_name(cls) -> str: return "Fedora Linux" @classmethod def filesystem(cls) -> str: return "btrfs" @classmethod def package_type(cls) -> PackageType: return PackageType.rpm @classmethod def default_release(cls) -> str: return "rawhide" @classmethod def grub_prefix(cls) -> str: return "grub2" @classmethod def package_manager(cls, config: Config) -> type[Dnf]: return Dnf @classmethod def setup(cls, context: Context) -> None: setup_rpm(context) Dnf.setup( context, list(cls.repositories(context)), filelists=False, metadata_expire="6h" if context.config.release in ("eln", "rawhide") else None, ) @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["basesystem"], apivfs=False) @classmethod def repositories(cls, context: Context) -> Iterable[RpmRepository]: gpgurls = find_fedora_rpm_gpgkeys(context) if context.config.snapshot and context.config.release != "rawhide": die(f"Snapshot= is only supported for rawhide on {cls.pretty_name()}") mirror = context.config.mirror if not mirror and context.config.snapshot: mirror = "https://kojipkgs.fedoraproject.org" if context.config.snapshot and mirror != "https://kojipkgs.fedoraproject.org": die( f"Snapshot= is only supported for {cls.pretty_name()} if Mirror=https://kojipkgs.fedoraproject.org" ) if mirror == "https://kojipkgs.fedoraproject.org" and not context.config.snapshot: die( f"Snapshot= must be used on {cls.pretty_name()} if Mirror=https://kojipkgs.fedoraproject.org" ) if context.config.local_mirror: yield RpmRepository("fedora", f"baseurl={context.config.local_mirror}", gpgurls) return if context.config.release == "eln": mirror = context.config.mirror or "https://dl.fedoraproject.org/pub/eln/1/" for repo in ("AppStream", "BaseOS", "Extras", "CRB"): url = f"baseurl={join_mirror(mirror, repo)}" yield RpmRepository(repo.lower(), f"{url}/$basearch/os", gpgurls) yield RpmRepository( f"{repo.lower()}-debuginfo", f"{url}/$basearch/debug/tree", gpgurls, enabled=False ) yield RpmRepository(f"{repo.lower()}-source", f"{url}/source/tree", gpgurls, enabled=False) elif mirror: if mirror == "https://kojipkgs.fedoraproject.org": subdir = f"compose/{context.config.release}" else: subdir = "linux/" subdir += "development" if context.config.release == "rawhide" else "releases" subdir += "/$releasever" if context.config.snapshot: subdir += f"/Fedora-{context.config.release.capitalize()}-{context.config.snapshot}/compose" url = f"baseurl={join_mirror(mirror, f'{subdir}/Everything')}" yield RpmRepository("fedora", f"{url}/$basearch/os", gpgurls) yield RpmRepository("fedora-debuginfo", f"{url}/$basearch/debug/tree", gpgurls, enabled=False) yield RpmRepository("fedora-source", f"{url}/source/tree", gpgurls, enabled=False) if context.config.release != "rawhide": url = f"baseurl={join_mirror(mirror, 'linux/updates/$releasever/Everything')}" yield RpmRepository("updates", f"{url}/$basearch", gpgurls) yield RpmRepository("updates-debuginfo", f"{url}/$basearch/debug", gpgurls, enabled=False) yield RpmRepository("updates-source", f"{url}/source/tree", gpgurls, enabled=False) url = f"baseurl={join_mirror(mirror, 'linux/updates/testing/$releasever/Everything')}" yield RpmRepository("updates-testing", f"{url}/$basearch", gpgurls, enabled=False) yield RpmRepository( "updates-testing-debuginfo", f"{url}/$basearch/debug", gpgurls, enabled=False ) yield RpmRepository("updates-testing-source", f"{url}/source/tree", gpgurls, enabled=False) else: url = "metalink=https://mirrors.fedoraproject.org/metalink?arch=$basearch" yield RpmRepository("fedora", f"{url}&repo=fedora-$releasever", gpgurls) yield RpmRepository( "fedora-debuginfo", f"{url}&repo=fedora-debug-$releasever", gpgurls, enabled=False ) yield RpmRepository( "fedora-source", f"{url}&repo=fedora-source-$releasever", gpgurls, enabled=False ) if context.config.release != "rawhide": yield RpmRepository("updates", f"{url}&repo=updates-released-f$releasever", gpgurls) yield RpmRepository( "updates-debuginfo", f"{url}&repo=updates-released-debug-f$releasever", gpgurls, enabled=False, ) yield RpmRepository( "updates-source", f"{url}&repo=updates-released-source-f$releasever", gpgurls, enabled=False, ) yield RpmRepository( "updates-testing", f"{url}&repo=updates-testing-f$releasever", gpgurls, enabled=False ) yield RpmRepository( "updates-testing-debuginfo", f"{url}&repo=updates-testing-debug-f$releasever", gpgurls, enabled=False, ) yield RpmRepository( "updates-testing-source", f"{url}&repo=updates-testing-source-f$releasever", gpgurls, enabled=False, ) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.arm64: "aarch64", Architecture.mips64_le: "mips64el", Architecture.mips_le: "mipsel", Architecture.ppc64_le: "ppc64le", Architecture.riscv64: "riscv64", Architecture.s390x: "s390x", Architecture.x86_64: "x86_64", }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by Fedora") return a @classmethod def latest_snapshot(cls, config: Config) -> str: mirror = config.mirror or "https://kojipkgs.fedoraproject.org" url = join_mirror( mirror, f"compose/{config.release}/latest-Fedora-{config.release.capitalize()}/COMPOSE_ID" ) return curl(config, url).removeprefix(f"Fedora-{config.release.capitalize()}-").strip() @classmethod def is_kernel_package(cls, package: str) -> bool: return package in ("kernel", "kernel-core") mkosi-26/mkosi/distribution/kali.py000066400000000000000000000037331512054777600176040ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable from pathlib import Path from mkosi.config import Architecture from mkosi.context import Context from mkosi.distribution import Distribution, debian from mkosi.installer.apt import AptRepository from mkosi.log import die class Installer(debian.Installer, distribution=Distribution.kali): @classmethod def pretty_name(cls) -> str: return "Kali Linux" @classmethod def default_release(cls) -> str: return "kali-rolling" @classmethod def default_tools_tree_distribution(cls) -> Distribution: return Distribution.kali @classmethod def repositories(cls, context: Context, for_image: bool = False) -> Iterable[AptRepository]: mirror = None if for_image else context.config.mirror if not mirror: mirror = "http://http.kali.org/kali" if context.config.snapshot and not for_image: die(f"Snapshot= is not supported for {cls.pretty_name()}") if context.config.local_mirror and not for_image: yield AptRepository( types=("deb",), url=context.config.local_mirror, suite=context.config.release, components=("main",), signedby=None, ) return yield AptRepository( types=("deb", "deb-src"), url=mirror, suite=context.config.release, components=("main", *context.config.repositories), signedby=Path("/usr/share/keyrings/kali-archive-keyring.gpg"), ) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.arm64: "arm64", Architecture.arm: "armhf", Architecture.x86_64: "amd64", Architecture.x86: "i386", }.get(arch) if not a: die(f"Architecture {arch} is not supported by {cls.pretty_name()}") return a mkosi-26/mkosi/distribution/mageia.py000066400000000000000000000043501512054777600201030ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable from mkosi.config import Architecture from mkosi.context import Context from mkosi.distribution import Distribution, fedora, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey from mkosi.log import die class Installer(fedora.Installer, distribution=Distribution.mageia): @classmethod def pretty_name(cls) -> str: return "Mageia" @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def default_release(cls) -> str: return "cauldron" @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["filesystem"], apivfs=False) @classmethod def repositories(cls, context: Context) -> Iterable[RpmRepository]: if context.config.snapshot: die(f"Snapshot= is not supported for {cls.pretty_name()}") gpgurls = ( find_rpm_gpgkey( context, "RPM-GPG-KEY-Mageia", "https://mirrors.kernel.org/mageia/distrib/$releasever/$basearch/media/core/release/media_info/pubkey", ), ) if context.config.local_mirror: yield RpmRepository("core-release", f"baseurl={context.config.local_mirror}", gpgurls) return if context.config.mirror: url = ( f"baseurl={join_mirror(context.config.mirror, 'distrib/$releasever/$basearch/media/core/')}" ) yield RpmRepository("core-release", f"{url}/release", gpgurls) yield RpmRepository("core-updates", f"{url}/updates/", gpgurls) else: url = "mirrorlist=https://www.mageia.org/mirrorlist/?release=$releasever&arch=$basearch§ion=core" yield RpmRepository("core-release", f"{url}&repo=release", gpgurls) yield RpmRepository("core-updates", f"{url}&repo=updates", gpgurls) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64: "x86_64", Architecture.arm64: "aarch64", }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by Mageia") return a mkosi-26/mkosi/distribution/openmandriva.py000066400000000000000000000037771512054777600213570ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable from mkosi.config import Architecture from mkosi.context import Context from mkosi.distribution import Distribution, fedora, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey from mkosi.log import die class Installer(fedora.Installer, distribution=Distribution.openmandriva): @classmethod def pretty_name(cls) -> str: return "OpenMandriva" @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def default_release(cls) -> str: return "cooker" @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["filesystem"], apivfs=False) @classmethod def repositories(cls, context: Context) -> Iterable[RpmRepository]: if context.config.snapshot: die(f"Snapshot= is not supported for {cls.pretty_name()}") mirror = context.config.mirror or "http://mirror.openmandriva.org" gpgurls = ( find_rpm_gpgkey( context, "RPM-GPG-KEY-OpenMandriva", "https://raw.githubusercontent.com/OpenMandrivaAssociation/openmandriva-repos/master/RPM-GPG-KEY-OpenMandriva", ), ) if context.config.local_mirror: yield RpmRepository("main-release", f"baseurl={context.config.local_mirror}", gpgurls) return url = f"baseurl={join_mirror(mirror, '$releasever/repository/$basearch/main')}" yield RpmRepository("main-release", f"{url}/release", gpgurls) yield RpmRepository("main-updates", f"{url}/updates", gpgurls) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64: "x86_64", Architecture.arm64: "aarch64", Architecture.riscv64: "riscv64", }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by OpenMandriva") return a mkosi-26/mkosi/distribution/opensuse.py000066400000000000000000000255731512054777600205330ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import tempfile from collections.abc import Iterable from pathlib import Path from typing import Union from xml.etree import ElementTree from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.curl import curl from mkosi.distribution import Distribution, DistributionInstaller, PackageType, join_mirror from mkosi.installer.dnf import Dnf from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey, setup_rpm from mkosi.installer.zypper import Zypper from mkosi.log import die from mkosi.mounts import finalize_certificate_mounts from mkosi.run import run from mkosi.versioncomp import GenericVersion class Installer(DistributionInstaller, distribution=Distribution.opensuse): @classmethod def pretty_name(cls) -> str: return "openSUSE" @classmethod def filesystem(cls) -> str: return "btrfs" @classmethod def package_type(cls) -> PackageType: return PackageType.rpm @classmethod def default_release(cls) -> str: return "tumbleweed" @classmethod def grub_prefix(cls) -> str: return "grub2" @classmethod def package_manager(cls, config: Config) -> Union[type[Dnf], type[Zypper]]: if config.find_binary("zypper"): return Zypper else: return Dnf @classmethod def setup(cls, context: Context) -> None: setup_rpm(context, dbbackend="ndb") cls.package_manager(context.config).setup(context, list(cls.repositories(context))) @classmethod def install(cls, context: Context) -> None: packages = ["filesystem"] if not any(p.endswith("-release") for p in context.config.packages): if context.config.release in ("current", "stable", "leap") or ( context.config.release != "tumbleweed" and GenericVersion(context.config.release) >= 16 ): packages += ["Leap-release"] else: packages += ["openSUSE-release"] cls.install_packages(context, packages, apivfs=False) @classmethod def repositories(cls, context: Context) -> Iterable[RpmRepository]: if context.config.local_mirror: yield RpmRepository(id="local-mirror", url=f"baseurl={context.config.local_mirror}", gpgurls=()) return zypper = cls.package_manager(context.config) is Zypper mirror = context.config.mirror or "https://download.opensuse.org" if context.config.release == "tumbleweed": gpgkeys = tuple( p for key in ("RPM-GPG-KEY-openSUSE-Tumbleweed", "RPM-GPG-KEY-openSUSE") if (p := find_rpm_gpgkey(context, key, required=False)) ) if not gpgkeys and not context.config.repository_key_fetch: die( "openSUSE GPG keys not found in /usr/share/distribution-gpg-keys", hint="Make sure the distribution-gpg-keys package is installed", ) if zypper and gpgkeys: run( [ "rpm", "--root=/buildroot", "--import", *(key.removeprefix("file://") for key in gpgkeys), ], sandbox=context.sandbox( options=[ *context.rootoptions(), *finalize_certificate_mounts(context.config), ], ), ) # fmt: skip if context.config.snapshot: if context.config.architecture != Architecture.x86_64: die(f"Snapshot= is only supported for x86-64 on {cls.pretty_name()}") subdir = f"history/{context.config.snapshot}" else: if context.config.architecture == Architecture.x86_64: subdir = "" elif context.config.architecture == Architecture.arm64: subdir = "ports/aarch64" elif context.config.architecture == Architecture.arm: subdir = "ports/armv7hl" elif context.config.architecture in ( Architecture.ppc64_le, Architecture.ppc64, Architecture.ppc, ): subdir = "ports/ppc" elif context.config.architecture in (Architecture.s390x, Architecture.s390): subdir = "ports/zsystems" elif context.config.architecture == Architecture.riscv64: subdir = "ports/riscv" else: die(f"{context.config.architecture} not supported by openSUSE Tumbleweed") for repo in ("oss", "non-oss"): url = join_mirror(mirror, f"{subdir}/tumbleweed/repo/{repo}") yield RpmRepository( id=repo, url=f"baseurl={url}", gpgurls=gpgkeys or (fetch_gpgurls(context, url) if not zypper else ()), enabled=repo == "oss", ) if not context.config.snapshot: for d in ("debug", "source"): if repo == "non-oss" and d == "debug": continue url = join_mirror(mirror, f"{subdir}/{d}/tumbleweed/repo/{repo}") yield RpmRepository( id=f"{repo}-{d}", url=f"baseurl={url}", gpgurls=gpgkeys or (fetch_gpgurls(context, url) if not zypper else ()), enabled=False, ) if not context.config.snapshot: url = join_mirror(mirror, f"{subdir}/update/tumbleweed") yield RpmRepository( id="oss-update", url=f"baseurl={url}", gpgurls=gpgkeys or (fetch_gpgurls(context, url) if not zypper else ()), ) url = join_mirror(mirror, f"{subdir}/update/tumbleweed-non-oss") yield RpmRepository( id="non-oss-update", url=f"baseurl={url}", gpgurls=gpgkeys or (fetch_gpgurls(context, url) if not zypper else ()), enabled=False, ) else: if context.config.snapshot: die(f"Snapshot= is only supported for Tumbleweed on {cls.pretty_name()}") if ( context.config.release in ("current", "stable", "leap") and context.config.architecture != Architecture.x86_64 ): die( f"{cls.pretty_name()} only supports current and stable releases " "for the x86-64 architecture", hint="Specify either tumbleweed or a specific leap release such as 15.6", ) if context.config.release in ("current", "stable", "leap"): release = "openSUSE-current" else: release = f"leap/{context.config.release}" if context.config.architecture == Architecture.x86_64: subdir = "" elif context.config.architecture == Architecture.arm64: subdir = "ports/aarch64" elif context.config.architecture == Architecture.arm: subdir = "ports/armv7hl" elif context.config.architecture in ( Architecture.ppc64_le, Architecture.ppc64, Architecture.ppc, ): subdir = "ports/ppc" elif context.config.architecture in (Architecture.s390x, Architecture.s390): subdir = "ports/zsystems" else: die(f"{context.config.architecture} not supported by openSUSE {context.config.release}") for repo in ("oss", "non-oss"): url = join_mirror(mirror, f"{subdir}/distribution/{release}/repo/{repo}") yield RpmRepository( id=repo, url=f"baseurl={url}", gpgurls=fetch_gpgurls(context, url) if not zypper else (), enabled=repo == "oss", ) for d in ("debug", "source"): for repo in ("oss", "non-oss"): url = join_mirror(mirror, f"{subdir}/{d}/distribution/{release}/repo/{repo}") yield RpmRepository( id=f"{repo}-{d}", url=f"baseurl={url}", gpgurls=fetch_gpgurls(context, url) if not zypper else (), enabled=False, ) if ( context.config.release in ("current", "stable", "leap") or GenericVersion(context.config.release) >= 16 ): subdir += f"distribution/{release}/repo" else: subdir += f"update/{release}" for repo in ("oss", "non-oss"): url = join_mirror(mirror, f"{subdir}/{repo}") yield RpmRepository( id=f"{repo}-update", url=f"baseurl={url}", gpgurls=fetch_gpgurls(context, url) if not zypper else (), enabled=repo == "oss", ) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64: "x86_64", Architecture.arm64: "aarch64", Architecture.ppc64_le: "ppc64le", Architecture.riscv64: "riscv64", Architecture.s390x: "s390x", }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by openSUSE") return a @classmethod def latest_snapshot(cls, config: Config) -> str: url = join_mirror(config.mirror or "https://download.opensuse.org", "history/latest") return curl(config, url).strip() @classmethod def is_kernel_package(cls, package: str) -> bool: return package in ("kernel-default", "kernel-kvmsmall") def fetch_gpgurls(context: Context, repourl: str) -> tuple[str, ...]: gpgurls = [f"{repourl}/repodata/repomd.xml.key"] with tempfile.TemporaryDirectory() as d: curl(context.config, f"{repourl}/repodata/repomd.xml", output_dir=Path(d)) xml = (Path(d) / "repomd.xml").read_text() root = ElementTree.fromstring(xml) tags = root.find("{http://linux.duke.edu/metadata/repo}tags") if tags is None: die("repomd.xml missing element") for child in tags.iter("{http://linux.duke.edu/metadata/repo}content"): if child.text and child.text.startswith("gpg-pubkey"): gpgkey = child.text.partition("?")[0] gpgurls += [f"{repourl}{gpgkey}"] return tuple(gpgurls) mkosi-26/mkosi/distribution/postmarketos.py000066400000000000000000000107521512054777600214160ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import shutil from collections.abc import Iterable from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.distribution import Distribution, DistributionInstaller, PackageType from mkosi.installer import PackageManager from mkosi.installer.apk import Apk, ApkRepository from mkosi.log import complete_step, die class Installer(DistributionInstaller, distribution=Distribution.postmarketos): @classmethod def pretty_name(cls) -> str: return "postmarketOS" @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def package_type(cls) -> PackageType: return PackageType.apk @classmethod def default_release(cls) -> str: return "edge" @classmethod def default_tools_tree_distribution(cls) -> Distribution: return Distribution.postmarketos @classmethod def package_manager(cls, config: Config) -> type[PackageManager]: return Apk @classmethod def setup(cls, context: Context) -> None: with complete_step("Setting up postmarketOS keyring"): keys = context.sandbox_tree / "etc/apk/keys" keys.mkdir(parents=True, exist_ok=True) for d in [ context.config.tools() / "usr/lib/apk/keys", context.config.tools() / "usr/share/distribution-gpg-keys/alpine-linux", context.config.tools() / "usr/share/distribution-gpg-keys/postmarketos", ]: if not d.exists(): continue # Do not overwrite keys in /etc/apk/keys to make sure that user provided keys take priority. for key in d.iterdir(): if key.is_dir(): continue dest = keys / key.name if dest.exists(): continue shutil.copy2(key, dest) Apk.setup(context, list(cls.repositories(context))) @classmethod def install(cls, context: Context) -> None: # TODO: Create merged /usr manually for now until our upstream (Alpine Linux) supports it: # https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/85504 for dir in ["lib", "bin", "sbin"]: (context.root / "usr" / dir).mkdir(parents=True, exist_ok=True) (context.root / dir).symlink_to(f"usr/{dir}") cls.install_packages(context, ["postmarketos-baselayout", "postmarketos-release"], apivfs=False) @classmethod def repositories(cls, context: Context) -> Iterable[ApkRepository]: if context.config.release != "edge": die(f"Only 'edge' release is currently supported, got '{context.config.release}'") if context.config.local_mirror: yield ApkRepository(url=context.config.local_mirror) return # Alpine repos # Note: "testing" is enabled here because it's also enabled by default when pmbootstrap builds pmOS # images, sometimes pmOS pkgs temporarily depend on things in testing. for repo_name in ["main", "community", "testing"]: yield ApkRepository( url=f"https://dl-cdn.alpinelinux.org/alpine/{context.config.release}/{repo_name}" ) # postmarketOS repos mirror = context.config.mirror or "https://mirror.postmarketos.org/postmarketos" subdir = "master" if context.config.release == "edge" else f"v{context.config.release}" yield ApkRepository(url=f"{mirror}/extra-repos/systemd/{subdir}") yield ApkRepository(url=f"{mirror}/{subdir}") @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64: "x86_64", Architecture.arm64: "aarch64", Architecture.arm: "armv7", }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by postmarketOS") return a @classmethod def is_kernel_package(cls, package: str) -> bool: if not package.startswith("linux-"): return False if package.endswith(("-doc", "-dev", "-manual")): return False # These pkgs end with many different things if package.startswith(("linux-tools-", "linux-firmware-")): return False if package in { "linux-timemachine", "linux-headers", "linux-apfs-rw-src", }: return False return True mkosi-26/mkosi/distribution/rhel.py000066400000000000000000000104031512054777600176060ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable from pathlib import Path from typing import Any, Optional from mkosi.context import Context from mkosi.distribution import Distribution, centos, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey from mkosi.log import die from mkosi.run import exists_in_sandbox, glob_in_sandbox class Installer(centos.Installer, distribution=Distribution.rhel): @classmethod def pretty_name(cls) -> str: return "RHEL" @classmethod def gpgurls(cls, context: Context) -> tuple[str, ...]: return ( find_rpm_gpgkey( context, f"RPM-GPG-KEY-redhat{cls.major_release(context.config)}-release", "https://access.redhat.com/security/data/fd431d51.txt", ), ) @staticmethod def sslcacert(context: Context) -> Optional[Path]: if context.config.mirror: return None path = Path("/etc/rhsm/ca/redhat-uep.pem") if not exists_in_sandbox(path, sandbox=context.sandbox()): die( f"redhat-uep.pem certificate not found in sandbox at {path}", hint="Add the certificate to the sandbox with SandboxTrees= or mkosi.sandbox/", ) return path @staticmethod def sslclientkey(context: Context) -> Optional[Path]: if context.config.mirror: return None glob = "/etc/pki/entitlement/*-key.pem" paths = glob_in_sandbox(glob, sandbox=context.sandbox()) if not paths: die( f"No entitlement keys found at {glob} in sandbox", hint="Add an entitlement key to the sandbox with SandboxTrees= or mkosi.sandbox/", ) return paths[0] @staticmethod def sslclientcert(context: Context) -> Optional[Path]: if context.config.mirror: return None glob = "/etc/pki/entitlement/*.pem" paths = [p for p in glob_in_sandbox(glob, sandbox=context.sandbox()) if "-key.pem" not in p.name] if not paths: die( f"No entitlement certificates found at {glob} in sandbox", hint="Add an entitlement certificate to the sandbox with SandboxTrees= or mkosi.sandbox/", ) return paths[0] @classmethod def repository_variants( cls, context: Context, gpgurls: tuple[str, ...], repo: str, ) -> Iterable[RpmRepository]: if context.config.local_mirror: yield RpmRepository(repo, f"baseurl={context.config.local_mirror}", gpgurls) else: mirror = context.config.mirror or "https://cdn.redhat.com/content/dist/" common: dict[str, Any] = dict( gpgurls=gpgurls, sslcacert=cls.sslcacert(context), sslclientcert=cls.sslclientcert(context), sslclientkey=cls.sslclientkey(context), ) v = context.config.release major = cls.major_release(context.config) yield RpmRepository( f"rhel-{v}-{repo}-rpms", f"baseurl={join_mirror(mirror, f'rhel{major}/{v}/$basearch/{repo}/os')}", enabled=True, **common, ) yield RpmRepository( f"rhel-{v}-{repo}-debug-rpms", f"baseurl={join_mirror(mirror, f'rhel{major}/{v}/$basearch/{repo}/debug')}", enabled=False, **common, ) yield RpmRepository( f"rhel-{v}-{repo}-source", f"baseurl={join_mirror(mirror, f'rhel{major}/{v}/$basearch/{repo}/source')}", enabled=False, **common, ) @classmethod def repositories(cls, context: Context) -> Iterable[RpmRepository]: if context.config.snapshot: die(f"Snapshot= is not supported for {cls.pretty_name()}") gpgurls = cls.gpgurls(context) yield from cls.repository_variants(context, gpgurls, "baseos") yield from cls.repository_variants(context, gpgurls, "appstream") yield from cls.repository_variants(context, gpgurls, "codeready-builder") yield from cls.epel_repositories(context) mkosi-26/mkosi/distribution/rhel_ubi.py000066400000000000000000000045221512054777600204520ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable from mkosi.context import Context from mkosi.distribution import Distribution, centos, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey from mkosi.log import die class Installer(centos.Installer, distribution=Distribution.rhel_ubi): @classmethod def pretty_name(cls) -> str: return "RHEL UBI" @classmethod def gpgurls(cls, context: Context) -> tuple[str, ...]: return ( find_rpm_gpgkey( context, f"RPM-GPG-KEY-redhat{cls.major_release(context.config)}-release", "https://access.redhat.com/security/data/fd431d51.txt", ), ) @classmethod def repository_variants( cls, context: Context, gpgurls: tuple[str, ...], repo: str, ) -> Iterable[RpmRepository]: if context.config.local_mirror: yield RpmRepository(repo, f"baseurl={context.config.local_mirror}", gpgurls) else: mirror = context.config.mirror or "https://cdn-ubi.redhat.com/content/public/ubi/dist/" v = context.config.release yield RpmRepository( f"ubi-{v}-{repo}-rpms", f"baseurl={join_mirror(mirror, f'ubi{v}/{v}/$basearch/{repo}/os')}", gpgurls, ) yield RpmRepository( f"ubi-{v}-{repo}-debug-rpms", f"baseurl={join_mirror(mirror, f'ubi{v}/{v}/$basearch/{repo}/debug')}", gpgurls, enabled=False, ) yield RpmRepository( f"ubi-{v}-{repo}-source", f"baseurl={join_mirror(mirror, f'ubi{v}/{v}/$basearch/{repo}/source')}", gpgurls, enabled=False, ) @classmethod def repositories(cls, context: Context) -> Iterable[RpmRepository]: if context.config.snapshot: die(f"Snapshot= is not supported for {cls.pretty_name()}") gpgurls = cls.gpgurls(context) yield from cls.repository_variants(context, gpgurls, "baseos") yield from cls.repository_variants(context, gpgurls, "appstream") yield from cls.repository_variants(context, gpgurls, "codeready-builder") yield from cls.epel_repositories(context) mkosi-26/mkosi/distribution/rocky.py000066400000000000000000000026401512054777600200070ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from mkosi.context import Context from mkosi.distribution import Distribution, centos, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey from mkosi.log import die class Installer(centos.Installer, distribution=Distribution.rocky): @classmethod def pretty_name(cls) -> str: return "Rocky Linux" @classmethod def gpgurls(cls, context: Context) -> tuple[str, ...]: major = cls.major_release(context.config) return ( find_rpm_gpgkey( context, f"RPM-GPG-KEY-Rocky-{major}", f"https://download.rockylinux.org/pub/rocky/RPM-GPG-KEY-Rocky-{major}", ), ) @classmethod def repository_variants( cls, context: Context, gpgurls: tuple[str, ...], repo: str, ) -> list[RpmRepository]: if context.config.snapshot: die(f"Snapshot= is not supported for {cls.pretty_name()}") if context.config.mirror: url = f"baseurl={join_mirror(context.config.mirror, f'$releasever/{repo}/$basearch/os')}" else: url = f"mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=$basearch&repo={repo}-$releasever" return [RpmRepository(repo, url, gpgurls)] @classmethod def sig_repositories(cls, context: Context) -> list[RpmRepository]: return [] mkosi-26/mkosi/distribution/ubuntu.py000066400000000000000000000074061512054777600202070ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import datetime import locale from collections.abc import Iterable from pathlib import Path from mkosi.config import Config from mkosi.context import Context from mkosi.curl import curl from mkosi.distribution import Distribution, debian, join_mirror from mkosi.installer.apt import AptRepository from mkosi.log import die from mkosi.util import startswith class Installer(debian.Installer, distribution=Distribution.ubuntu): @classmethod def pretty_name(cls) -> str: return "Ubuntu" @classmethod def default_release(cls) -> str: return "devel" @classmethod def default_tools_tree_distribution(cls) -> Distribution: return Distribution.debian @classmethod def repositories(cls, context: Context, for_image: bool = False) -> Iterable[AptRepository]: types = ("deb", "deb-src") mirror = None if for_image else context.config.mirror snapshot = None if for_image else context.config.snapshot components = ( "main", *context.config.repositories, ) if context.config.local_mirror and not for_image: yield AptRepository( types=("deb",), url=context.config.local_mirror, suite=context.config.release, components=("main",), signedby=None, ) return if mirror: pass elif context.config.architecture.is_x86_variant(): mirror = "http://archive.ubuntu.com/ubuntu" else: mirror = "http://ports.ubuntu.com" signedby = Path("/usr/share/keyrings/ubuntu-archive-keyring.gpg") yield AptRepository( types=types, url=mirror, suite=context.config.release, components=components, signedby=signedby, snapshot=snapshot, ) yield AptRepository( types=types, url=mirror, suite=f"{context.config.release}-updates", components=components, signedby=signedby, snapshot=snapshot, ) # Security updates repos are never mirrored. But !x86 are on the ports server. if context.config.architecture.is_x86_variant(): mirror = "http://security.ubuntu.com/ubuntu" else: mirror = "http://ports.ubuntu.com" yield AptRepository( types=types, url=mirror, suite=f"{context.config.release}-security", components=components, signedby=signedby, snapshot=snapshot, ) @classmethod def latest_snapshot(cls, config: Config) -> str: mirror = config.mirror or "http://snapshot.ubuntu.com" release = curl(config, join_mirror(mirror, f"ubuntu/dists/{config.release}-updates/Release")) for line in release.splitlines(): if date := startswith(line, "Date: "): # %a and %b parse the abbreviated day of the week and the abbreviated month which are both # locale-specific so set the locale to C explicitly to make sure we try to parse the english # abbreviations used in the Release file. lc = locale.setlocale(locale.LC_TIME) try: locale.setlocale(locale.LC_TIME, "C") return datetime.datetime.strptime(date, "%a, %d %b %Y %H:%M:%S %Z").strftime( "%Y%m%dT%H%M%SZ" ) finally: locale.setlocale(locale.LC_TIME, lc) die("Release file is missing Date field") @classmethod def is_kernel_package(cls, package: str) -> bool: return package.startswith("linux-") mkosi-26/mkosi/documentation.py000066400000000000000000000036731512054777600170210ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import logging import subprocess from pathlib import Path from mkosi.config import DocFormat from mkosi.log import die from mkosi.pager import page from mkosi.run import find_binary, run def show_docs( manual: str, formats: list[DocFormat], *, man_chapter: int = 1, resources: Path, pager: bool = True, ) -> None: while formats: form = formats.pop(0) try: if form == DocFormat.man: man = resources / f"man/{manual}.{man_chapter}" if not man.exists(): raise FileNotFoundError() run(["man", "--local-file", man]) return elif form == DocFormat.pandoc: if not find_binary("pandoc"): logging.warn("pandoc is not available") continue pandoc = run( [ "pandoc", "-t", "man", "-s", resources / f"man/{manual}.{man_chapter}.md", "--lua-filter", resources / "pandoc/md2man.lua", ], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, log=False, ) # fmt: skip run(["man", "--local-file", "-"], input=pandoc.stdout) return elif form == DocFormat.markdown: page((resources / f"man/{manual}.{man_chapter}.md").read_text(), pager) return elif form == DocFormat.system: run(["man", str(man_chapter), manual], log=False) return except (FileNotFoundError, subprocess.CalledProcessError) as e: if not formats: if isinstance(e, FileNotFoundError): die("The mkosi package does not contain the man page {manual!r}.") raise e mkosi-26/mkosi/initrd.py000066400000000000000000000330421512054777600154320ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import argparse import contextlib import dataclasses import logging import os import platform import shutil import subprocess import sys import tempfile from pathlib import Path from typing import Optional, cast import mkosi.resources from mkosi.config import DocFormat, InitrdProfile, OutputFormat from mkosi.documentation import show_docs from mkosi.log import ARG_DEBUG, ARG_DEBUG_SHELL, die, log_notice, log_setup from mkosi.run import find_binary, run, uncaught_exception_handler from mkosi.sandbox import __version__, umask from mkosi.tree import copy_tree, move_tree, rmtree from mkosi.util import PathString, mandatory_variable, resource_path @dataclasses.dataclass(frozen=True) class KernelInstallContext: command: str kernel_version: str entry_dir: Path kernel_image: Path initrds: list[Path] staging_area: Path layout: str image_type: str initrd_generator: Optional[str] uki_generator: Optional[str] verbose: bool @staticmethod def parse(*, name: str, description: str) -> "KernelInstallContext": parser = argparse.ArgumentParser( description=description, allow_abbrev=False, usage=f"{name} COMMAND KERNEL_VERSION ENTRY_DIR KERNEL_IMAGE…", ) parser.add_argument( "command", metavar="COMMAND", help="The action to perform. Only 'add' is supported.", ) parser.add_argument( "kernel_version", metavar="KERNEL_VERSION", help="Kernel version string", ) parser.add_argument( "entry_dir", metavar="ENTRY_DIR", type=Path, nargs="?", help="Type#1 entry directory (ignored)", ) parser.add_argument( "kernel_image", metavar="KERNEL_IMAGE", type=Path, nargs="?", help="Kernel image", ) parser.add_argument( "initrds", metavar="INITRD…", type=Path, nargs="*", help="Initrd files", ) parser.add_argument( "--version", action="version", version=f"mkosi {__version__}", ) args = parser.parse_args() return KernelInstallContext( command=args.command, kernel_version=args.kernel_version, entry_dir=args.entry_dir, kernel_image=args.kernel_image, initrds=args.initrds, staging_area=Path(mandatory_variable("KERNEL_INSTALL_STAGING_AREA")), layout=mandatory_variable("KERNEL_INSTALL_LAYOUT"), image_type=mandatory_variable("KERNEL_INSTALL_IMAGE_TYPE"), initrd_generator=os.getenv("KERNEL_INSTALL_INITRD_GENERATOR"), uki_generator=os.getenv("KERNEL_INSTALL_UKI_GENERATOR"), verbose=int(os.getenv("KERNEL_INSTALL_VERBOSE", 0)) > 0, ) def create_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( prog="mkosi-initrd", description="Build initrds or unified kernel images for the current system using mkosi", allow_abbrev=False, usage="mkosi-initrd [options...]", ) parser.add_argument( "-o", "--output", metavar="NAME", help="Output name", default="initrd", ) parser.add_argument( "--kernel-image", metavar="KERNEL_IMAGE", help="Kernel image", type=Path, ) parser.add_argument( "-t", "--format", choices=[str(OutputFormat.cpio), str(OutputFormat.uki), str(OutputFormat.directory)], help="Output format (CPIO archive, UKI or local directory)", default="cpio", ) parser.add_argument( "-g", "--generic", help="Build a generic initrd without host-specific kernel modules", action="store_true", default=False, ) parser.add_argument( "--profile", choices=InitrdProfile.values(), help="Which profiles to enable for the initrd", action="append", default=[], ) initrd_common_args(parser) return parser def is_valid_modulesd(modulesd: Path) -> bool: # Check whether a provided kernel modules directory is valid return modulesd.is_dir() and ( (modulesd / "modules.dep").exists() or (modulesd / "modules.dep.bin").exists() ) def weak_modules(modulesd: Path) -> list[str]: return [ f"--extra-tree={m.resolve()}:{m.resolve()}" for m in (modulesd / "weak-updates").rglob("*.ko*") if m.is_symlink() ] def process_crypttab(staging_dir: Path) -> list[str]: cmdline = [] # Generate crypttab with all the x-initrd.attach entries if Path("/etc/crypttab").exists(): try: crypttab = [ line for line in Path("/etc/crypttab").read_text().splitlines() if ( len(entry := line.split()) >= 4 and not entry[0].startswith("#") and "x-initrd.attach" in entry[3] ) ] if crypttab: with (staging_dir / "crypttab").open("w") as f: f.write("# Automatically generated by mkosi-initrd\n") f.write("\n".join(crypttab)) cmdline += ["--extra-tree", f"{staging_dir / 'crypttab'}:/etc/crypttab"] # Add key files for line in crypttab: entry = line.split() if ( entry[2] in ["-", "none"] and Path(keyfile := f"/etc/cryptsetup-keys.d/{entry[0]}.key").exists() ) or Path(keyfile := entry[2]).exists(): cmdline += ["--extra-tree", f"{keyfile}:{keyfile}"] except PermissionError: logging.warning("Permission denied to access /etc/crypttab, the initrd may be unbootable") return cmdline def network_config() -> list[str]: return [ f"--extra-tree={f}:{f}" for f in ( "/etc/systemd/network", "/etc/systemd/networkd.conf", "/etc/systemd/networkd.conf.d", "/etc/systemd/resolved.conf", "/etc/systemd/resolved.conf.d", ) if Path(f).exists() ] def nfs_config() -> list[str]: return [f"--extra-tree={f}:{f}" for f in ("/etc/idmapd.conf", "/etc/idmapd.conf.d") if Path(f).exists()] def raid_config() -> list[str]: return [ f"--extra-tree={f}:{f}" for f in ("/etc/mdadm.conf", "/etc/mdadm.conf.d", "/etc/mdadm/mdadm.conf", "/etc/mdadm/mdadm.conf.d") if Path(f).exists() ] def vconsole_config() -> list[str]: return [ f"--extra-tree={f}:{f}" for f in ("/etc/default/keyboard", "/etc/vconsole.conf") if Path(f).exists() ] def initrd_finalize(staging_dir: Path, output: str, output_dir: Optional[Path]) -> None: if output_dir: with umask(~0o700) if os.getuid() == 0 else cast(umask, contextlib.nullcontext()): Path(output_dir).mkdir(parents=True, exist_ok=True) else: output_dir = Path.cwd() staging = staging_dir / output tmp = output_dir / f"{output}.new" final = output_dir / output log_notice(f"Copying {staging} to {tmp}") # mkosi symlinks the expected output image, so dereference it try: copy_tree(staging.resolve(), tmp) except subprocess.CalledProcessError: rmtree(tmp) raise log_notice(f"Moving {tmp} to {final}") rmtree(final) move_tree(tmp, final) def initrd_common_args(parser: argparse.ArgumentParser) -> None: parser.add_argument( "-k", "--kernel-version", metavar="KERNEL_VERSION", help="Kernel version string", default=platform.uname().release, ) parser.add_argument( "-O", "--output-dir", metavar="DIR", help="Output directory", default=None, type=Path, ) parser.add_argument( "--debug", help="Turn on debugging output", action="store_true", default=False, ) parser.add_argument( "--debug-shell", help="Spawn debug shell if a sandboxed command fails", action="store_true", default=False, ) parser.add_argument( "--debug-sandbox", help="Run mkosi-sandbox with strace", action="store_true", default=False, ) parser.add_argument( "-D", "--show-documentation", help="Show the man page", action="store_true", default=False, ) parser.add_argument( "--version", action="version", version=f"mkosi {__version__}", ) def include_system_config(name: str) -> list[str]: cmdline = [] for d in ("/usr/lib", "/usr/local/lib", "/run", "/etc"): p = Path(d) / name if p.exists(): cmdline += ["--include", os.fspath(p)] return cmdline @uncaught_exception_handler() def main() -> None: log_setup() args = create_parser().parse_args() if args.show_documentation: with resource_path(mkosi.resources) as r: show_docs("mkosi-initrd", DocFormat.all(), resources=r) return modulesd = Path("/usr/lib/modules") / args.kernel_version if not is_valid_modulesd(modulesd): die(f"Invalid kernel directory: {modulesd}") with ( tempfile.TemporaryDirectory() as staging_dir, tempfile.TemporaryDirectory() as sandbox_tree, ): cmdline: list[PathString] = [ "mkosi", "--force", "--directory=", f"--format={args.format}", f"--output={args.output}", f"--output-directory={staging_dir}", f"--extra-tree={modulesd}:{modulesd}", "--extra-tree=/usr/lib/firmware:/usr/lib/firmware", "--remove-files=/usr/lib/firmware/*-ucode", "--build-sources=", "--include=mkosi-initrd", ] # fmt: skip if not args.generic: cmdline += ["--kernel-modules=host"] cmdline += weak_modules(modulesd) for p in args.profile: cmdline += ["--profile", p] if p == "network": cmdline += network_config() elif p == "nfs": cmdline += nfs_config() elif p == "raid": cmdline += raid_config() if args.kernel_image: cmdline += [ f"--extra-tree={args.kernel_image}:{modulesd}/vmlinuz", ] if args.debug: ARG_DEBUG.set(args.debug) cmdline += ["--debug"] if args.debug_shell: ARG_DEBUG_SHELL.set(args.debug_shell) cmdline += ["--debug-shell"] if args.debug_sandbox: cmdline += ["--debug-sandbox"] if os.getuid() == 0: cmdline += [ "--package-cache-dir=/var", "--cache-only=metadata", ] if args.format != OutputFormat.directory.value: cmdline += ["--output-mode=600"] cmdline += include_system_config("mkosi-initrd") # Make sure we don't use any of mkosi's default repositories. for p in ( "yum.repos.d/mkosi.repo", "apt/sources.list.d/mkosi.sources", "zypp/repos.d/mkosi.repo", "pacman.conf", ): (Path(sandbox_tree) / "etc" / p).parent.mkdir(parents=True, exist_ok=True) (Path(sandbox_tree) / "etc" / p).touch() # Copy in the host's package manager configuration. for p in ( "dnf", "yum.repos.d/", "pki/rpm-gpg", "apt", "zypp", "pacman.conf", "pacman.d/", ): if not (Path("/etc") / p).exists(): continue (Path(sandbox_tree) / "etc" / p).parent.mkdir(parents=True, exist_ok=True) if (Path("/etc") / p).resolve().is_file(): shutil.copy2(Path("/etc") / p, Path(sandbox_tree) / "etc" / p) else: shutil.copytree( Path("/etc") / p, Path(sandbox_tree) / "etc" / p, # If we're running as root, use the keyring from the host, but make sure we don't try to # copy any gpg-agent sockets that might be in /etc/pacman.d/gnupg. If we're not running # as root, we might not have the necessary permissions to access the keyring so don't try # to copy the keyring in that case. ignore=shutil.ignore_patterns("S.*" if os.getuid() == 0 else "gnupg"), dirs_exist_ok=True, ) cmdline += [f"--sandbox-tree={sandbox_tree}"] cmdline += process_crypttab(Path(staging_dir)) if Path("/etc/kernel/cmdline").exists(): cmdline += ["--kernel-command-line", Path("/etc/kernel/cmdline").read_text()] cmdline += vconsole_config() # Resolve dnf binary to determine which version the host uses by default # (to avoid preferring dnf5 if the host uses dnf4) # as there's a much bigger chance that it has a populated dnf cache directory. run( cmdline, stdin=sys.stdin, stdout=sys.stdout, env=os.environ | ({"MKOSI_DNF": dnf.resolve().name} if (dnf := find_binary("dnf")) else {}), ) initrd_finalize(Path(staging_dir), args.output, args.output_dir) if __name__ == "__main__": main() mkosi-26/mkosi/installer/000077500000000000000000000000001512054777600155625ustar00rootroot00000000000000mkosi-26/mkosi/installer/__init__.py000066400000000000000000000213231512054777600176740ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Sequence from contextlib import AbstractContextManager from pathlib import Path from mkosi.config import Config, ConfigFeature, OutputFormat from mkosi.context import Context from mkosi.mounts import finalize_certificate_mounts from mkosi.run import apivfs_options, finalize_interpreter, finalize_passwd_symlinks, find_binary from mkosi.tree import rmtree from mkosi.util import PathString, flatten, startswith class PackageManager: @classmethod def executable(cls, config: Config) -> str: return "custom" @classmethod def subdir(cls, config: Config) -> Path: return Path("custom") @classmethod def package_subdirs(cls, cache: Path) -> list[tuple[Path, Path]]: return [] @classmethod def package_globs(cls) -> list[str]: return [] @classmethod def state_subdirs(cls) -> list[Path]: return [] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: return {} @classmethod def architecture(cls, context: Context) -> str: return context.config.distribution.installer.architecture(context.config.architecture) @classmethod def finalize_environment(cls, context: Context) -> dict[str, str]: env = { "HOME": "/", # Make sure rpm doesn't pick up ~/.rpmmacros and ~/.rpmrc. # systemd's chroot detection doesn't work when unprivileged so tell it explicitly. "SYSTEMD_IN_CHROOT": "1", } if "SYSTEMD_HWDB_UPDATE_BYPASS" not in context.config.finalize_environment(): env["SYSTEMD_HWDB_UPDATE_BYPASS"] = "1" if ( "KERNEL_INSTALL_BYPASS" not in context.config.finalize_environment() and context.config.bootable != ConfigFeature.disabled ): env["KERNEL_INSTALL_BYPASS"] = "1" else: env |= { "BOOT_ROOT": "/boot", # Required to make 90-loaderentry.install put the right paths into the bootloader entry. "BOOT_MNT": "/boot", # Hack to tell dracut to not create a hostonly initrd when it's invoked by kernel-install. "hostonly_l": "no", } return context.config.finalize_environment() | env @classmethod def env_cmd(cls, context: Context) -> list[PathString]: return ["env", *([f"{k}={v}" for k, v in cls.finalize_environment(context).items()])] @classmethod def mounts(cls, context: Context) -> list[PathString]: mounts = [ *finalize_certificate_mounts(context.config), "--bind", context.repository, "/repository", ] # fmt: skip if context.config.local_mirror and (mirror := startswith(context.config.local_mirror, "file://")): mounts += ["--ro-bind", mirror, mirror] subdir = context.config.distribution.installer.package_manager(context.config).subdir(context.config) src = context.metadata_dir / "lib" / subdir mounts += flatten( ("--bind", src / state_subdir, Path("/var/lib") / subdir / state_subdir) for state_subdir in context.config.distribution.installer.package_manager( context.config ).state_subdirs() ) src = context.metadata_dir / "cache" / subdir caches = context.config.distribution.installer.package_manager(context.config).package_subdirs(src) # If there are no package cache subdirectories, we always operate on the package cache directory, # since we can't do any mount tricks to combine caches from different locations in this case. if caches == [(Path("."), Path("."))]: mounts += [ "--bind", context.config.package_cache_dir_or_default() / "cache" / subdir, Path("/var/cache") / subdir, ] else: mounts += ["--bind", src, Path("/var/cache") / subdir] # If we're not operating on the configured package cache directory, we're operating on a snapshot # of the repository metadata. To make sure any downloaded packages are still cached in the # configured package cache directory in this scenario, we mount in the relevant directories from # the configured package cache directory. if context.metadata_dir != context.config.package_cache_dir_or_default(): mounts += flatten( ( "--bind", context.config.package_cache_dir_or_default() / "cache" / subdir / srcsubdir, Path("/var/cache") / subdir / dstsubdir, ) for srcsubdir, dstsubdir in caches if ( context.config.package_cache_dir_or_default() / "cache" / subdir / srcsubdir ).exists() ) return mounts @classmethod def options(cls, *, root: PathString, apivfs: bool = True) -> list[PathString]: return [ *(apivfs_options() if apivfs else []), "--become-root", "--suppress-chown", "--suppress-sync", # Make sure /etc/machine-id is not overwritten by any package manager post install scripts. "--ro-bind-try", Path(root) / "etc/machine-id", "/buildroot/etc/machine-id", # Nudge gpg to create its sockets in /run by making sure /run/user/0 exists. "--dir", "/run/user/0", # Some package managers (e.g. dpkg) read from the host's /etc/passwd instead of the buildroot's # /etc/passwd so we symlink /etc/passwd from the buildroot to make sure it gets used. *(finalize_passwd_symlinks("/buildroot") if apivfs else []), ] # fmt: skip @classmethod def apivfs_script_cmd(cls, context: Context) -> list[PathString]: return [ finalize_interpreter(bool(context.config.tools_tree)), "-SI", "/sandbox.py", "--bind", "/", "/", "--same-dir", "--bind", "/var/tmp", "/buildroot/var/tmp", *apivfs_options(), *cls.options(root="/buildroot"), ] # fmt: skip @classmethod def sandbox( cls, context: Context, *, apivfs: bool, options: Sequence[PathString] = (), ) -> AbstractContextManager[list[PathString]]: return context.sandbox( network=True, options=[ *context.rootoptions(), *cls.mounts(context), *cls.options(root=context.root, apivfs=apivfs), *options, ], ) # fmt: skip @classmethod def install( cls, context: Context, packages: Sequence[str], *, apivfs: bool = True, allow_downgrade: bool = False, ) -> None: pass @classmethod def remove(cls, context: Context, packages: Sequence[str]) -> None: pass @classmethod def sync(cls, context: Context, force: bool) -> None: pass @classmethod def createrepo(cls, context: Context) -> None: pass def clean_package_manager_metadata(context: Context) -> None: """ Remove package manager metadata Try them all regardless of the distro: metadata is only removed if the package manager is not present in the image. """ subdir = context.config.distribution.installer.package_manager(context.config).subdir(context.config) if context.config.clean_package_metadata == ConfigFeature.disabled: return if context.config.clean_package_metadata == ConfigFeature.auto and context.config.output_format in ( OutputFormat.directory, OutputFormat.tar, ): return # If cleaning is not explicitly requested, keep the repository metadata if we're building a directory or # tar image (which are often used as a base tree for extension images and thus should retain package # manager metadata) or if the corresponding package manager is installed in the image. executable = context.config.distribution.installer.package_manager(context.config).executable( context.config ) remove = [] for tool, paths in ( ("rpm", ["var/lib/rpm", "usr/lib/sysimage/rpm"]), ("dnf5", ["usr/lib/sysimage/libdnf5"]), ("dpkg", ["var/lib/dpkg"]), (executable, [f"var/lib/{subdir}", f"var/cache/{subdir}"]), ): # fmt: skip if context.config.clean_package_metadata == ConfigFeature.enabled or not find_binary( tool, root=context.root ): remove += [context.root / p for p in paths if (context.root / p).exists()] rmtree(*remove, sandbox=context.sandbox) mkosi-26/mkosi/installer/apk.py000066400000000000000000000221301512054777600167050ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import dataclasses import shutil from collections.abc import Sequence from pathlib import Path from mkosi.config import Config from mkosi.context import Context from mkosi.installer import PackageManager from mkosi.run import CompletedProcess, run, workdir from mkosi.tree import rmtree from mkosi.util import _FILE, PathString @dataclasses.dataclass(frozen=True) class ApkRepository: url: str class Apk(PackageManager): @classmethod def executable(cls, config: Config) -> str: return "apk" @classmethod def subdir(cls, config: Config) -> Path: return Path("apk") @classmethod def package_subdirs(cls, cache: Path) -> list[tuple[Path, Path]]: return [(Path("."), Path("."))] @classmethod def package_globs(cls) -> list[str]: return ["*.apk"] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: return { "apk": cls.apivfs_script_cmd(context) + cls.env_cmd(context) + cls.cmd(context), "mkosi-install": ["apk", "add", "--upgrade", "--cache-max-age", "999999999"], "mkosi-upgrade": ["apk", "upgrade"], "mkosi-remove": ["apk", "--remove", "del"], "mkosi-reinstall": ["apk", "fix", "--reinstall"], } # fmt: skip @classmethod def setup(cls, context: Context, repositories: Sequence[ApkRepository]) -> None: config = context.sandbox_tree / "etc/apk/repositories" if config.exists(): return config.parent.mkdir(exist_ok=True, parents=True) config.write_text("\n".join(repo.url for repo in repositories) + "\n") @classmethod def finalize_environment(cls, context: Context) -> dict[str, str]: return super().finalize_environment(context) | { # apk requires SHA1 support for signature verification, and this is disabled in the default # crypto-policies for Fedora/RH/SuSE. This variable is set to re-enable SHA1 support on these # distributions. # Also see: https://gitlab.alpinelinux.org/alpine/apk-tools/-/issues/11139#note_542183 "OPENSSL_ENABLE_SHA1_SIGNATURES": "1", } @classmethod def cmd(cls, context: Context) -> list[PathString]: return [ "apk", "--root", "/buildroot", "--cache-packages", "--cache-dir", "/var/cache/apk", "--cache-predownload", "--arch", cls.architecture(context), "--no-interactive", "--preserve-env", "--keys-dir", "/etc/apk/keys", "--repositories-file", "/etc/apk/repositories", *(["--allow-untrusted"] if not context.config.repository_key_check else []), ] # fmt: skip @classmethod def invoke( cls, context: Context, operation: str, arguments: Sequence[str] = (), *, apivfs: bool = False, stdout: _FILE = None, ) -> CompletedProcess: return run( cls.cmd(context) + [operation, *arguments], sandbox=cls.sandbox(context, apivfs=apivfs), env=cls.finalize_environment(context), stdout=stdout, ) @classmethod def install( cls, context: Context, packages: Sequence[str], *, apivfs: bool = True, allow_downgrade: bool = False, ) -> None: cls.invoke( context, "add", [ "--initdb", "--upgrade", # effectively disable refreshing the cache in this situation "--cache-max-age", "999999999", *packages, ], apivfs=apivfs, ) # fmt: skip @classmethod def remove(cls, context: Context, packages: Sequence[str]) -> None: cls.invoke(context, "del", packages, apivfs=True) @classmethod def sync(cls, context: Context, force: bool) -> None: # Updating the cache requires an initialized apk database but we don't want to touch the image root # directory so temporarily replace it with an empty directory to make apk happy. saved = context.root.rename(context.workspace / "saved-root") context.root.mkdir() cls.invoke(context, "add", ["--initdb"]) cls.invoke(context, "update", ["--update-cache"] if force else []) rmtree(context.root) saved.rename(context.root) @classmethod def createrepo(cls, context: Context) -> None: packages = [p.name for p in context.repository.glob("*.apk")] if not packages: return # Move apk files to arch-specific directory arch = cls.architecture(context) arch_dir = context.repository / arch arch_dir.mkdir(exist_ok=True) for package in packages: (context.repository / package).rename(arch_dir / package) # Generate temporary signing key using openssl # This uses the same method as abuild-keygen, because this tool is not available on all distros key = "mkosi@local" priv = context.workspace / f"{key}.rsa" pub = context.workspace / f"{key}.rsa.pub" if not priv.exists(): run( ["openssl", "genrsa", "-out", workdir(priv), "2048"], sandbox=context.sandbox(options=["--bind", priv.parent, workdir(priv.parent)]), env=cls.finalize_environment(context), ) run( ["openssl", "rsa", "-in", workdir(priv), "-pubout", "-out", workdir(pub)], sandbox=context.sandbox( options=[ "--bind", priv, workdir(priv), "--bind", pub.parent, workdir(pub.parent), ] ), env=cls.finalize_environment(context), ) # fmt: skip keys = context.sandbox_tree / "etc/apk/keys" keys.mkdir(parents=True, exist_ok=True) shutil.copy2(pub, keys / pub.name) run( [ "apk", "index", "-o", "APKINDEX.tar.gz", "--rewrite-arch", arch, # packages may be signed by another key that might not be available. "--allow-untrusted", *packages, ], sandbox=context.sandbox( options=[ "--bind", context.repository, workdir(context.repository), "--chdir", workdir(arch_dir), ] ), env=cls.finalize_environment(context), ) # fmt: skip # Create and sign index signature file # Note: The index signing stuff below was largely inspired by what abuild-sign and abuild-tar tools # do on Alpine Linux. These tools are not always packages for other distros. index = arch_dir / "APKINDEX.tar.gz" sig = arch_dir / f".SIGN.RSA.{pub.name}" run( [ "openssl", "dgst", "-sha1", "-sign", workdir(priv), "-out", workdir(sig), workdir(index), ], sandbox=context.sandbox( options=[ "--bind", priv, workdir(priv), "--bind", sig.parent, workdir(sig.parent), "--bind", index, workdir(index), ] ), env=cls.finalize_environment(context), ) # fmt: skip tar = context.workspace / "sig.tar" with tar.open("wb") as f: run( [ "tar", "-cf", "-", "--format=posix", "--owner=0", "--group=0", "--numeric-owner", "-C", workdir(arch_dir), sig.name, ], sandbox=context.sandbox(options=["--bind", arch_dir, workdir(arch_dir)]), stdout=f, ) # fmt: skip # Strip EOF markers to allow concatenation with compressed index. data = tar.read_bytes() while data.endswith(b"\x00" * 512): data = data[:-512] tar.write_bytes(data) # Prepend gzipped signature to original index signed = context.workspace / "signed.tar.gz" with signed.open("wb") as out: run( ["gzip", "-n", "-9", "-c", workdir(tar)], sandbox=context.sandbox(options=["--bind", tar, workdir(tar)]), stdout=out, ) out.write(index.read_bytes()) # Finally, overwrite the original index archive with the signed index archive signed.rename(index) repos = context.sandbox_tree / "etc/apk/repositories" repo = "file:///repository/" if repos.exists(): content = repos.read_text() if repo not in content: with repos.open("a") as f: f.write(f"{repo}\n") else: repos.write_text(f"{repo}\n") cls.sync(context, force=True) mkosi-26/mkosi/installer/apt.py000066400000000000000000000303251512054777600167230ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import dataclasses import textwrap from collections.abc import Sequence from pathlib import Path from typing import Final, Optional from mkosi.config import Config, ConfigFeature from mkosi.context import Context from mkosi.installer import PackageManager from mkosi.log import die from mkosi.run import CompletedProcess, run, workdir from mkosi.sandbox import umask from mkosi.util import _FILE, PathString @dataclasses.dataclass(frozen=True) class AptRepository: types: tuple[str, ...] url: str suite: str components: tuple[str, ...] signedby: Optional[Path] snapshot: Optional[str] = None def __str__(self) -> str: return textwrap.dedent( f"""\ Types: {" ".join(self.types)} URIs: {self.url} Suites: {self.suite} Components: {" ".join(self.components)} {"Signed-By" if self.signedby else "Trusted"}: {self.signedby or "yes"} {f"Snapshot: {self.snapshot}" if self.snapshot else ""} """ ) class Apt(PackageManager): documentation_exclude_globs: Final[list[str]] = [ "usr/share/doc/*", "usr/share/man/*", "usr/share/groff/*", "usr/share/gtk-doc/*", "usr/share/info/*", ] @classmethod def executable(cls, config: Config) -> str: return "apt-get" @classmethod def subdir(cls, config: Config) -> Path: return Path("apt") @classmethod def package_subdirs(cls, cache: Path) -> list[tuple[Path, Path]]: return [(Path("archives"), Path("archives"))] @classmethod def package_globs(cls) -> list[str]: return ["*.deb", "*.ddeb"] @classmethod def state_subdirs(cls) -> list[Path]: return [Path("lists")] @classmethod def dpkg_cmd(cls, command: str) -> list[PathString]: return [ command, "--admindir=/buildroot/var/lib/dpkg", "--root=/buildroot", ] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: cmd = cls.apivfs_script_cmd(context) return { **{ command: cmd + cls.env_cmd(context) + cls.cmd(context, command) for command in ( "apt", "apt-cache", "apt-cdrom", "apt-config", "apt-extracttemplates", "apt-get", "apt-key", "apt-mark", "apt-sortpkgs", ) }, **{ command: cmd + cls.dpkg_cmd(command) for command in ( "dpkg", "dpkg-query", ) }, "mkosi-install": ["apt-get", "install"], "mkosi-upgrade": ["apt-get", "upgrade"], "mkosi-remove": ["apt-get", "purge"], "mkosi-reinstall": ["apt-get", "install", "--reinstall"], } # fmt: skip @classmethod def options(cls, *, root: PathString, apivfs: bool = True) -> list[PathString]: return super().options(root=root, apivfs=apivfs) + ["--dir", "/var/lib/apt/lists/partial"] @classmethod def setup(cls, context: Context, repositories: Sequence[AptRepository]) -> None: (context.sandbox_tree / "etc/apt").mkdir(exist_ok=True, parents=True) (context.sandbox_tree / "etc/apt/apt.conf.d").mkdir(exist_ok=True, parents=True) (context.sandbox_tree / "etc/apt/preferences.d").mkdir(exist_ok=True, parents=True) (context.sandbox_tree / "etc/apt/sources.list.d").mkdir(exist_ok=True, parents=True) # We have a special apt.conf outside of the sandbox tree that only configures "Dir::Etc" that we pass # to APT_CONFIG to tell apt it should read config files from /etc/apt in case this is overridden by # distributions. This is required because apt parses CLI configuration options after parsing its # configuration files and as such we can't use CLI options to tell apt where to look for # configuration files. config = context.sandbox_tree / "etc/apt.conf" if not config.exists(): config.write_text( textwrap.dedent( """\ Dir::Etc "/etc/apt"; """ ) ) sources = context.sandbox_tree / "etc/apt/sources.list.d/mkosi.sources" if not sources.exists(): for repo in repositories: if repo.signedby and not (context.config.tools() / str(repo.signedby).lstrip("/")).exists(): die( f"Keyring for repo {repo.url} not found at {repo.signedby}", hint="Make sure the right keyring package (e.g. debian-archive-keyring, " "kali-archive-keyring or ubuntu-keyring) is installed", ) with sources.open("w") as f: for repo in repositories: f.write(str(repo)) @classmethod def finalize_environment(cls, context: Context) -> dict[str, str]: env = { "APT_CONFIG": "/etc/apt.conf", "DEBIAN_FRONTEND": "noninteractive", "DEBCONF_INTERACTIVE_SEEN": "true", } if ( "INITRD" not in context.config.finalize_environment() and context.config.bootable != ConfigFeature.disabled ): env["INITRD"] = "No" return super().finalize_environment(context) | env @classmethod def cmd(cls, context: Context, command: str = "apt-get") -> list[PathString]: cmdline: list[PathString] = [ command, "-o", f"APT::Architecture={cls.architecture(context)}", "-o", f"APT::Architectures={cls.architecture(context)}", "-o", f"APT::Install-Recommends={str(context.config.with_recommends).lower()}", "-o", "APT::Immediate-Configure=off", "-o", "APT::Get::Assume-Yes=true", "-o", "APT::Get::AutomaticRemove=true", "-o", "APT::Get::Allow-Change-Held-Packages=true", "-o", "APT::Get::Allow-Remove-Essential=true", "-o", "APT::Sandbox::User=root", "-o", "Acquire::AllowReleaseInfoChange=true", "-o", "Acquire::Check-Valid-Until=false", "-o", "Dir::Cache=/var/cache/apt", "-o", "Dir::State=/buildroot/var/lib/apt", "-o", "Dir::State::lists=/var/lib/apt/lists/", "-o", "Dir::Log=/var/log/apt", "-o", "Dir::State::Status=/buildroot/var/lib/dpkg/status", "-o", f"Dir::Bin::DPkg={context.config.find_binary('dpkg')}", "-o", "Debug::NoLocking=true", "-o", "DPkg::Options::=--root=/buildroot", "-o", "DPkg::Options::=--force-unsafe-io", "-o", "DPkg::Options::=--force-architecture", "-o", "DPkg::Options::=--force-depends", "-o", "DPkg::Options::=--no-debsig", "-o", "DPkg::Use-Pty=false", "-o", "DPkg::Install::Recursive::Minimum=1000", "-o", "pkgCacheGen::ForceEssential=,", ] # fmt: skip if not context.config.repository_key_check: cmdline += [ "-o", "Acquire::AllowInsecureRepositories=true", "-o", "Acquire::AllowDowngradeToInsecureRepositories=true", "-o", "APT::Get::AllowUnauthenticated=true", ] # fmt: skip if not context.config.with_docs: cmdline += [ f"--option=DPkg::Options::=--path-exclude=/{glob}" for glob in cls.documentation_exclude_globs ] cmdline += ["--option=DPkg::Options::=--path-include=/usr/share/doc/*/copyright"] if context.config.proxy_url: cmdline += [ "-o", f"Acquire::http::Proxy={context.config.proxy_url}", "-o", f"Acquire::https::Proxy={context.config.proxy_url}", ] # fmt: skip return cmdline @classmethod def invoke( cls, context: Context, operation: str, arguments: Sequence[str] = (), *, apivfs: bool = False, options: Sequence[PathString] = (), stdout: _FILE = None, ) -> CompletedProcess: with umask(~0o755): # TODO: Drop once apt 2.5.4 is widely available. (context.root / "var/lib/dpkg").mkdir(parents=True, exist_ok=True) (context.root / "var/lib/dpkg/status").touch(exist_ok=True) (context.root / "var/lib/dpkg/available").touch(exist_ok=True) return run( cls.cmd(context) + [operation, *arguments], sandbox=cls.sandbox(context, apivfs=apivfs, options=options), env=cls.finalize_environment(context), stdout=stdout, ) @classmethod def install( cls, context: Context, packages: Sequence[str], *, apivfs: bool = True, allow_downgrade: bool = False, ) -> None: # Debian policy is to start daemons by default. The policy-rc.d script can be used choose which ones # to start. Let's install one that denies all daemon startups. # See https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt for more information. # Note: despite writing in /usr/sbin, this file is not shipped by the OS and instead should be # managed by the admin. policyrcd = context.root / "usr/sbin/policy-rc.d" with umask(~0o755): policyrcd.parent.mkdir(parents=True, exist_ok=True) with umask(~0o644): policyrcd.write_text("#!/bin/sh\nexit 101\n") arguments = [] if allow_downgrade: arguments += ["--allow-downgrades"] arguments += [*packages] cls.invoke(context, "install", arguments, apivfs=apivfs) policyrcd.unlink() # systemd-gpt-auto-generator is disabled by default in Ubuntu: # https://git.launchpad.net/ubuntu/+source/systemd/tree/debian/systemd.links?h=ubuntu/noble-proposed. # Let's make sure it is enabled by default in our images. (context.root / "etc/systemd/system-generators/systemd-gpt-auto-generator").unlink(missing_ok=True) @classmethod def remove(cls, context: Context, packages: Sequence[str]) -> None: cls.invoke(context, "purge", packages, apivfs=True) @classmethod def sync(cls, context: Context, force: bool) -> None: cls.invoke(context, "update") @classmethod def createrepo(cls, context: Context) -> None: names = [d.name for glob in cls.package_globs() for d in context.repository.glob(glob)] if not names: return with (context.repository / "Packages").open("wb") as f: run( ["apt-ftparchive", "packages", "."], stdout=f, sandbox=context.sandbox( options=[ "--ro-bind", context.repository, workdir(context.repository), "--chdir", workdir(context.repository), ], ), ) # fmt: skip (context.sandbox_tree / "etc/apt/sources.list.d").mkdir(parents=True, exist_ok=True) (context.sandbox_tree / "etc/apt/sources.list.d/mkosi-local.sources").write_text( textwrap.dedent( """\ Enabled: yes Types: deb URIs: file:///repository Suites: ./ Trusted: yes """ ) ) (context.sandbox_tree / "etc/apt/preferences.d").mkdir(parents=True, exist_ok=True) (context.sandbox_tree / "etc/apt/preferences.d/mkosi-local.pref").write_text( textwrap.dedent( """\ Package: * Pin: origin "" Pin-Priority: 1100 """ ) ) cls.invoke( context, "update", arguments=[ "-o", "Dir::Etc::sourcelist=sources.list.d/mkosi-local.sources", "-o", "Dir::Etc::sourceparts=-", "-o", "APT::Get::List-Cleanup=0", ], ) # fmt: skip mkosi-26/mkosi/installer/dnf.py000066400000000000000000000253441512054777600167130ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import textwrap from collections.abc import Sequence from pathlib import Path from typing import Optional from mkosi.config import Cacheonly, Config from mkosi.context import Context from mkosi.installer import PackageManager from mkosi.installer.rpm import RpmRepository, rpm_cmd from mkosi.log import ARG_DEBUG from mkosi.run import CompletedProcess, run, workdir from mkosi.util import _FILE, PathString class Dnf(PackageManager): @classmethod def executable(cls, config: Config) -> str: # Allow the user to override autodetection with an environment variable dnf = config.finalize_environment().get("MKOSI_DNF") return Path(dnf or config.find_binary("dnf5") or "dnf").name @classmethod def subdir(cls, config: Config) -> Path: return Path("libdnf5" if cls.executable(config) == "dnf5" else "dnf") @classmethod def package_subdirs(cls, cache: Path) -> list[tuple[Path, Path]]: dirs = [p for p in cache.iterdir() if p.is_dir() and "-" in p.name and "mkosi" not in p.name] return [ ( # If the package cache directory is set to /var, we need to make sure we look up packages # where they were stored by dnf, so don't do any special handling in that case. d.relative_to(cache) / "packages" if cache.parent == Path("/var/cache") # Cache directories look like - so let's strip off the hash to reuse # the same package cache directory regardless of baseurl. else Path("packages") / d.name[: d.name.rfind("-")], d.relative_to(cache) / "packages", ) for d in dirs ] @classmethod def package_globs(cls) -> list[str]: return ["*.rpm"] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: return { "dnf": cls.apivfs_script_cmd(context) + cls.env_cmd(context) + cls.cmd(context), "rpm": cls.apivfs_script_cmd(context) + rpm_cmd(), "mkosi-install": ["dnf", "install"], "mkosi-upgrade": ["dnf", "upgrade"], "mkosi-remove": ["dnf", "remove"], "mkosi-reinstall": ["dnf", "reinstall"], } # fmt: skip @classmethod def setup( cls, context: Context, repositories: Sequence[RpmRepository], filelists: bool = True, metadata_expire: Optional[str] = None, ) -> None: (context.sandbox_tree / "etc/dnf/vars").mkdir(parents=True, exist_ok=True) (context.sandbox_tree / "etc/yum.repos.d").mkdir(parents=True, exist_ok=True) config = context.sandbox_tree / "etc/dnf/dnf.conf" if not config.exists(): config.parent.mkdir(exist_ok=True, parents=True) with config.open("w") as f: # Make sure we download filelists so all dependencies can be resolved. # See https://bugzilla.redhat.com/show_bug.cgi?id=2180842 if cls.executable(context.config) == "dnf5" and filelists: f.write("[main]\noptional_metadata_types=filelists\n") # The CentOS Hyperscale ships a COW plugin for dnf that's disabled by default. Let's enable it so we # can take advantage of faster rpm package installations. reflink = context.sandbox_tree / "etc/dnf/plugins/reflink.conf" if not reflink.exists(): reflink.parent.mkdir(parents=True, exist_ok=True) reflink.write_text( textwrap.dedent( """\ [main] enabled=1 """ ) ) # The versionlock plugin will fail if enabled without a configuration file so lets' write a noop # configuration file to make it happy which can be overridden by users. versionlock = context.sandbox_tree / "etc/dnf/plugins/versionlock.conf" if not versionlock.exists(): versionlock.parent.mkdir(parents=True, exist_ok=True) versionlock.write_text( textwrap.dedent( """\ [main] enabled=0 locklist=/dev/null """ ) ) repofile = context.sandbox_tree / "etc/yum.repos.d/mkosi.repo" if not repofile.exists(): repofile.parent.mkdir(exist_ok=True, parents=True) with repofile.open("w") as f: for repo in repositories: f.write( textwrap.dedent( f"""\ [{repo.id}] name={repo.id} {repo.url} gpgcheck=1 enabled={int(repo.enabled)} """ ) ) if repo.sslcacert: f.write(f"sslcacert={repo.sslcacert}\n") if repo.sslclientcert: f.write(f"sslclientcert={repo.sslclientcert}\n") if repo.sslclientkey: f.write(f"sslclientkey={repo.sslclientkey}\n") if repo.priority: f.write(f"priority={repo.priority}\n") if metadata_expire: f.write(f"metadata_expire={metadata_expire}\n") for i, url in enumerate(repo.gpgurls): f.write("gpgkey=" if i == 0 else len("gpgkey=") * " ") f.write(f"{url}\n") f.write("\n") @classmethod def finalize_environment(cls, context: Context) -> dict[str, str]: return super().finalize_environment(context) | { "RPM_FORCE_DEBIAN": "1", } @classmethod def cmd( cls, context: Context, cached_metadata: bool = True, ) -> list[PathString]: dnf = cls.executable(context.config) cmdline: list[PathString] = [ dnf, "--assumeyes", "--best", f"--releasever={context.config.release}", "--installroot=/buildroot", "--setopt=keepcache=1", "--setopt=logdir=/var/log", f"--setopt=cachedir=/var/cache/{cls.subdir(context.config)}", f"--setopt=install_weak_deps={int(context.config.with_recommends)}", "--setopt=check_config_file_age=0", "--setopt=persistdir=/buildroot/var/lib/dnf", ] if ARG_DEBUG.get(): cmdline += ["--setopt=debuglevel=10"] if not context.config.repository_key_check: cmdline += ["--nogpgcheck"] if context.config.repositories: opt = "--enable-repo" if dnf == "dnf5" else "--enablerepo" cmdline += [f"{opt}={repo}" for repo in context.config.repositories] if context.config.cacheonly == Cacheonly.always: cmdline += ["--cacheonly"] elif cached_metadata: cmdline += ["--setopt=metadata_expire=never"] if dnf == "dnf5": cmdline += ["--setopt=cacheonly=metadata"] if not context.config.architecture.is_native(): cmdline += [f"--forcearch={cls.architecture(context)}"] if not context.config.with_docs: cmdline += ["--no-docs" if dnf == "dnf5" else "--nodocs"] if dnf == "dnf5": cmdline += ["--use-host-config"] else: cmdline += [ "--config=/etc/dnf/dnf.conf", "--setopt=reposdir=/etc/yum.repos.d", "--setopt=varsdir=/etc/dnf/vars", ] if context.config.proxy_url: cmdline += [f"--setopt=proxy={context.config.proxy_url}"] if context.config.proxy_peer_certificate: cmdline += ["--setopt=proxy_sslcacert=/proxy.cacert"] if context.config.proxy_client_certificate: cmdline += ["--setopt=proxy_sslclientcert=/proxy.clientcert"] if context.config.proxy_client_key: cmdline += ["--setopt=proxy_sslclientkey=/proxy.clientkey"] return cmdline @classmethod def invoke( cls, context: Context, operation: str, arguments: Sequence[str] = (), *, apivfs: bool = False, stdout: _FILE = None, cached_metadata: bool = True, ) -> CompletedProcess: try: return run( cls.cmd(context, cached_metadata=cached_metadata) + [operation, *arguments], sandbox=cls.sandbox(context, apivfs=apivfs), env=cls.finalize_environment(context), stdout=stdout, ) finally: # dnf interprets the log directory relative to the install root so there's nothing we can do but # to remove the log files from the install root afterwards. if (context.root / "var/log").exists(): for p in (context.root / "var/log").iterdir(): if any(p.name.startswith(prefix) for prefix in ("dnf", "hawkey", "yum")): p.unlink() @classmethod def install( cls, context: Context, packages: Sequence[str], *, apivfs: bool = True, allow_downgrade: bool = False, ) -> None: arguments = [] if allow_downgrade and Dnf.executable(context.config) == "dnf5": arguments += ["--allow-downgrade"] arguments += [*packages] cls.invoke(context, "install", arguments, apivfs=apivfs) @classmethod def remove(cls, context: Context, packages: Sequence[str]) -> None: cls.invoke(context, "remove", packages, apivfs=True) @classmethod def sync(cls, context: Context, force: bool, arguments: Sequence[str] = ()) -> None: cls.invoke( context, "makecache", arguments=[*(["--refresh"] if force else []), *arguments], cached_metadata=False, ) @classmethod def createrepo(cls, context: Context) -> None: run( ["createrepo_c", workdir(context.repository)], sandbox=context.sandbox(options=["--bind", context.repository, workdir(context.repository)]), ) (context.sandbox_tree / "etc/yum.repos.d/mkosi-local.repo").write_text( textwrap.dedent( """\ [mkosi] name=mkosi baseurl=file:///repository gpgcheck=0 metadata_expire=never priority=10 """ ) ) cls.sync(context, force=True, arguments=["--disablerepo=*", "--enablerepo=mkosi"]) mkosi-26/mkosi/installer/pacman.py000066400000000000000000000256041512054777600174020ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import dataclasses import shutil import textwrap from collections.abc import Sequence from contextlib import AbstractContextManager from pathlib import Path from mkosi.config import Config from mkosi.context import Context from mkosi.distribution import detect_distribution from mkosi.installer import PackageManager from mkosi.log import complete_step from mkosi.run import CompletedProcess, run, workdir from mkosi.sandbox import umask from mkosi.tree import copy_tree from mkosi.util import _FILE, PathString from mkosi.versioncomp import GenericVersion @dataclasses.dataclass(frozen=True) class PacmanRepository: id: str url: str class Pacman(PackageManager): @classmethod def executable(cls, config: Config) -> str: return "pacman" @classmethod def subdir(cls, config: Config) -> Path: return Path("pacman") @classmethod def package_subdirs(cls, cache: Path) -> list[tuple[Path, Path]]: return [(Path("pkg"), Path("pkg"))] @classmethod def package_globs(cls) -> list[str]: return ["*.pkg.tar*"] @classmethod def state_subdirs(cls) -> list[Path]: return [Path("sync")] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: return { "pacman": cls.apivfs_script_cmd(context) + cls.env_cmd(context) + cls.cmd(context), "mkosi-install": ["pacman", "--sync", "--needed"], "mkosi-upgrade": ["pacman", "--sync", "--sysupgrade", "--needed"], "mkosi-remove": ["pacman", "--remove", "--recursive", "--nosave"], "mkosi-reinstall": ["pacman", "--sync"], } # fmt: skip @classmethod def mounts(cls, context: Context) -> list[PathString]: mounts = [ *super().mounts(context), # pacman writes downloaded packages to the first writable cache directory. We don't want it to # write to our local repository directory so we expose it as a read-only directory to pacman. "--ro-bind", context.repository, "/var/cache/pacman/mkosi", ] # fmt: skip if any(context.keyring_dir.iterdir()): mounts += ["--ro-bind", context.keyring_dir, "/etc/pacman.d/gnupg"] if (context.root / "var/lib/pacman/local").exists(): # pacman reuses the same directory for the sync databases and the local database containing the # list of installed packages. The former should go in the cache directory, the latter should go # in the image, so we bind mount the local directory from the image to make sure that happens. We # make sure to bind mount directly from the mounted /buildroot directly instead of from the host # root directory since /buildroot might be an overlay mount and we want to make sure any writes # are done to the upperdir of the overlay mount. mounts += ["--bind", "+/buildroot/var/lib/pacman/local", "/var/lib/pacman/local"] return mounts @classmethod def setup(cls, context: Context, repositories: Sequence[PacmanRepository]) -> None: if context.config.repository_key_check: sig_level = "Required DatabaseOptional" else: # If we are using a single local mirror built on the fly there # will be no signatures sig_level = "Never" (context.sandbox_tree / "etc/mkosi-local.conf").touch() config = context.sandbox_tree / "etc/pacman.conf" if config.exists(): # If DownloadUser is specified, remove it as the user won't be available in the sandbox. lines = config.read_text().splitlines() lines = [line for line in lines if not line.strip().startswith("DownloadUser")] config.write_text("\n".join(lines)) return config.parent.mkdir(exist_ok=True, parents=True) with config.open("w") as f: f.write( textwrap.dedent( f"""\ [options] SigLevel = {sig_level} LocalFileSigLevel = Optional ParallelDownloads = 5 Architecture = {cls.architecture(context)} """ ) ) if not context.config.with_docs: f.write( textwrap.dedent( """\ NoExtract = usr/share/doc/* NoExtract = usr/share/man/* NoExtract = usr/share/groff/* NoExtract = usr/share/gtk-doc/* NoExtract = usr/share/info/* """ ) ) # This has to go first so that our local repository always takes precedence over any other ones. f.write("Include = /etc/mkosi-local.conf\n") if any((context.sandbox_tree / "etc/pacman.d/").glob("*.conf")): f.write( textwrap.dedent( """\ Include = /etc/pacman.d/*.conf """ ) ) for repo in repositories: f.write( textwrap.dedent( f"""\ [{repo.id}] Server = {repo.url} """ ) ) @classmethod def cmd(cls, context: Context) -> list[PathString]: return [ "pacman", "--root=/buildroot", "--logfile=/dev/null", "--dbpath=/var/lib/pacman", # Make sure pacman looks at our local repository first by putting it as the first cache # directory. We mount it read-only so the second directory will still be used for writing new # cache entries. "--cachedir=/var/cache/pacman/mkosi", "--cachedir=/var/cache/pacman/pkg", "--hookdir=/buildroot/etc/pacman.d/hooks", "--arch", cls.architecture(context), "--color", "auto", "--noconfirm", ] # fmt: skip @classmethod def invoke( cls, context: Context, operation: str, arguments: Sequence[str] = (), *, apivfs: bool = False, stdout: _FILE = None, ) -> CompletedProcess: with umask(~0o755): (context.root / "var/lib/pacman/local").mkdir(parents=True, exist_ok=True) (context.root / "etc/pacman.d/hooks").mkdir(parents=True, exist_ok=True) return run( cls.cmd(context) + [operation, *arguments], sandbox=cls.sandbox(context, apivfs=apivfs), env=cls.finalize_environment(context), stdout=stdout, ) @classmethod def install( cls, context: Context, packages: Sequence[str], *, apivfs: bool = True, allow_downgrade: bool = False, ) -> None: arguments = ["--needed", "--assume-installed", "initramfs"] if allow_downgrade: arguments += ["--sysupgrade", "--sysupgrade"] arguments += [*packages] cls.invoke(context, "--sync", arguments, apivfs=apivfs) @classmethod def remove(cls, context: Context, packages: Sequence[str]) -> None: installed = { cls.parse_pkg_desc(i)[0] for i in (context.root / "var/lib/pacman/local").glob("*/desc") } remove = [p for p in packages if p in installed] if remove: cls.invoke(context, "--remove", ["--nosave", "--recursive", *remove], apivfs=True) @classmethod def keyring(cls, context: Context) -> None: def sandbox() -> AbstractContextManager[list[PathString]]: return cls.sandbox( context, apivfs=False, # By default the keyring is mounted read-only so we override the read-only mount with a # writable mount to make it writable for the following pacman-key commands. options=["--bind", context.keyring_dir, "/etc/pacman.d/gnupg"], ) if ( (d := detect_distribution(context.config.tools())[0]) and d.is_apt_distribution() and (context.sandbox_tree / "usr/share/pacman/keyrings").exists() ): # pacman on Debian/Ubuntu looks for keyrings in /usr/share/keyrings so make sure all sandbox # trees keyrings are available in that location as well. (context.sandbox_tree / "usr/share").mkdir(parents=True, exist_ok=True) copy_tree( context.sandbox_tree / "usr/share/pacman/keyrings", context.sandbox_tree / "usr/share/keyrings", dereference=True, sandbox=context.sandbox, ) context.keyring_dir.mkdir(parents=True, exist_ok=True) with complete_step("Populating pacman keyring"): run(["pacman-key", "--init"], sandbox=sandbox()) run(["pacman-key", "--populate"], sandbox=sandbox()) @classmethod def sync(cls, context: Context, force: bool) -> None: cls.invoke(context, "--sync", ["--refresh", *(["--refresh"] if force else [])]) @classmethod def createrepo(cls, context: Context) -> None: run( [ "repo-add", "--quiet", workdir(context.repository / "mkosi.db.tar"), *sorted( (workdir(p) for p in context.repository.glob("*.pkg.tar*")), key=lambda p: GenericVersion(Path(p).name), ), ], sandbox=context.sandbox(options=["--bind", context.repository, workdir(context.repository)]), ) (context.sandbox_tree / "etc/mkosi-local.conf").write_text( textwrap.dedent( """\ [mkosi] Server = file:///i/dont/exist SigLevel = Never Usage = Install Search Upgrade """ ) ) # pacman can't sync a single repository, so we go behind its back and do it ourselves. shutil.move(context.repository / "mkosi.db.tar", context.metadata_dir / "lib/pacman/sync/mkosi.db") @classmethod def parse_pkg_desc(cls, path: Path) -> tuple[str, str, str, str]: name = version = base = arch = "" with path.open() as desc: for line in desc: line = line.strip() if line == "%NAME%": name = next(desc).strip() elif line == "%VERSION%": version = next(desc).strip() elif line == "%BASE%": base = next(desc).strip() elif line == "%ARCH%": arch = next(desc).strip() break return name, version, base, arch mkosi-26/mkosi/installer/rpm.py000066400000000000000000000075641512054777600167460ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import dataclasses import textwrap from pathlib import Path from typing import Literal, Optional, overload from mkosi.context import Context from mkosi.distribution import Distribution from mkosi.log import die from mkosi.run import glob_in_sandbox from mkosi.util import PathString @dataclasses.dataclass(frozen=True) class RpmRepository: id: str url: str gpgurls: tuple[str, ...] enabled: bool = True sslcacert: Optional[Path] = None sslclientkey: Optional[Path] = None sslclientcert: Optional[Path] = None priority: Optional[int] = None @overload def find_rpm_gpgkey( context: Context, key: str, fallback: Optional[str] = None, *, required: Literal[True] = True, ) -> str: ... @overload def find_rpm_gpgkey( context: Context, key: str, fallback: Optional[str] = None, *, required: bool, ) -> Optional[str]: ... def find_rpm_gpgkey( context: Context, key: str, fallback: Optional[str] = None, *, required: bool = True, ) -> Optional[str]: # We assume here that GPG keys will only ever be relative symlinks and never absolute symlinks. paths = glob_in_sandbox( f"/usr/share/distribution-gpg-keys/*/{key}*", f"/etc/pki/rpm-gpg/{key}*", sandbox=context.sandbox(), ) if paths: return Path(paths[0]).as_uri() if fallback and context.config.repository_key_fetch: return fallback if required: die( f"{key} GPG key not found in /usr/share/distribution-gpg-keys or /etc/pki/rpm-gpg", hint="Make sure the distribution-gpg-keys package is installed", ) return None def setup_rpm( context: Context, *, dbpath: str = "/usr/lib/sysimage/rpm", dbbackend: Optional[str] = None, ) -> None: confdir = context.sandbox_tree / "etc/rpm" confdir.mkdir(parents=True, exist_ok=True) if not (confdir / "macros.lang").exists() and context.config.locale: (confdir / "macros.lang").write_text(f"%_install_langs {context.config.locale}") if not (confdir / "macros.dbpath").exists(): (confdir / "macros.dbpath").write_text(f"%_dbpath {dbpath}") if dbbackend: (confdir / "macros.db_backend").write_text(f"%_db_backend {dbbackend}") if context.config.distribution == Distribution.opensuse or ( context.config.distribution.is_centos_variant() and context.config.release == "9" ): # Write an rpm sequoia policy that makes sure "sha1.second_preimage_resistance = always" is # configured and makes sure that a minimal config is in place to make sure builds succeed. # TODO: Remove when distributions GPG keys are accepted by the default rpm-sequoia config everywhere. p = context.sandbox_tree / "etc/crypto-policies/back-ends/rpm-sequoia.config" p.parent.mkdir(parents=True, exist_ok=True) prev = p.read_text() if p.exists() else "" with p.open("w") as f: for line in prev.splitlines(keepends=True): if line.startswith("sha1.second_preimage_resistance"): f.write('sha1.second_preimage_resistance = "always"\n') else: f.write(line) if not any(line.startswith("[hash_algorithms]") for line in prev.splitlines()): f.write( textwrap.dedent( """ [hash_algorithms] sha1.second_preimage_resistance = "always" sha224 = "always" sha256 = "always" sha384 = "always" sha512 = "always" default_disposition = "never" """ ) ) def rpm_cmd() -> list[PathString]: return ["env", "HOME=/", "rpm", "--root=/buildroot"] mkosi-26/mkosi/installer/zypper.py000066400000000000000000000146011512054777600174670ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import textwrap from collections.abc import Sequence from pathlib import Path from mkosi.config import Config, yes_no from mkosi.context import Context from mkosi.installer import PackageManager from mkosi.installer.rpm import RpmRepository, rpm_cmd from mkosi.log import ARG_DEBUG from mkosi.run import CompletedProcess, run, workdir from mkosi.util import _FILE, PathString class Zypper(PackageManager): @classmethod def executable(cls, config: Config) -> str: return "zypper" @classmethod def subdir(cls, config: Config) -> Path: return Path("zypp") @classmethod def package_subdirs(cls, cache: Path) -> list[tuple[Path, Path]]: return [(Path("packages"), Path("packages"))] @classmethod def package_globs(cls) -> list[str]: return ["*.rpm"] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: install: list[PathString] = [ "zypper", "install", "--download", "in-advance", "--recommends" if context.config.with_recommends else "--no-recommends", "--force-resolution", ] # fmt: skip return { "zypper": cls.apivfs_script_cmd(context) + cls.env_cmd(context) + cls.cmd(context), "rpm": cls.apivfs_script_cmd(context) + rpm_cmd(), "mkosi-install": install, "mkosi-upgrade": ["zypper", "update"], "mkosi-remove": ["zypper", "--ignore-unknown", "remove", "--clean-deps"], "mkosi-reinstall": install + ["--force"], } # fmt: skip @classmethod def setup(cls, context: Context, repositories: Sequence[RpmRepository]) -> None: config = context.sandbox_tree / "etc/zypp/zypp.conf" config.parent.mkdir(exist_ok=True, parents=True) # rpm.install.excludedocs can only be configured in zypp.conf so we append to any user provided # config file. Let's also bump the refresh delay to the same default as dnf which is 48 hours. with config.open("a") as f: f.write( textwrap.dedent( f""" [main] rpm.install.excludedocs = {yes_no(not context.config.with_docs)} repo.refresh.delay = {48 * 60} """ ) ) repofile = context.sandbox_tree / "etc/zypp/repos.d/mkosi.repo" if not repofile.exists(): repofile.parent.mkdir(exist_ok=True, parents=True) with repofile.open("w") as f: for repo in repositories: f.write( textwrap.dedent( f"""\ [{repo.id}] name={repo.id} {repo.url} gpgcheck=1 enabled={int(repo.enabled)} autorefresh=0 keeppackages=1 """ ) ) if repo.priority: f.write(f"priority={repo.priority}\n") for i, url in enumerate(repo.gpgurls): f.write("gpgkey=" if i == 0 else len("gpgkey=") * " ") f.write(f"{url}\n") f.write("\n") @classmethod def finalize_environment(cls, context: Context) -> dict[str, str]: return super().finalize_environment(context) | { "ZYPP_CONF": "/etc/zypp/zypp.conf", "RPM_FORCE_DEBIAN": "1", } @classmethod def cmd(cls, context: Context) -> list[PathString]: return [ "zypper", "--installroot=/buildroot", "--cache-dir=/var/cache/zypp", "--non-interactive", "--no-refresh", f"--releasever={context.config.release}", *(["--gpg-auto-import-keys"] if context.config.repository_key_fetch else []), *(["--no-gpg-checks"] if not context.config.repository_key_check else []), *([f"--plus-content={repo}" for repo in context.config.repositories]), *(["-vv"] if ARG_DEBUG.get() else []), ] @classmethod def invoke( cls, context: Context, operation: str, arguments: Sequence[str] = (), *, options: Sequence[str] = (), apivfs: bool = False, stdout: _FILE = None, ) -> CompletedProcess: return run( cls.cmd(context) + [*options, operation, *arguments], sandbox=cls.sandbox(context, apivfs=apivfs), env=cls.finalize_environment(context), stdout=stdout, ) @classmethod def install( cls, context: Context, packages: Sequence[str], *, apivfs: bool = True, allow_downgrade: bool = False, ) -> None: arguments = [ "--download", "in-advance", "--recommends" if context.config.with_recommends else "--no-recommends", "--force-resolution", ] # fmt: skip if allow_downgrade: arguments += ["--allow-downgrade"] arguments += [*packages] cls.invoke(context, "install", arguments, apivfs=apivfs) @classmethod def remove(cls, context: Context, packages: Sequence[str]) -> None: cls.invoke(context, "remove", ["--clean-deps", *packages], apivfs=True, options=["--ignore-unknown"]) @classmethod def sync(cls, context: Context, force: bool, arguments: Sequence[str] = ()) -> None: cls.invoke(context, "refresh", [*(["--force"] if force else []), *arguments]) @classmethod def createrepo(cls, context: Context) -> None: run( ["createrepo_c", workdir(context.repository)], sandbox=context.sandbox(options=["--bind", context.repository, workdir(context.repository)]), ) (context.sandbox_tree / "etc/zypp/repos.d/mkosi-local.repo").write_text( textwrap.dedent( """\ [mkosi] name=mkosi baseurl=file:///repository gpgcheck=0 autorefresh=0 keeppackages=0 priority=10 """ ) ) cls.sync(context, force=True, arguments=["mkosi"]) mkosi-26/mkosi/kmod.py000066400000000000000000000460041512054777600150750ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import fnmatch import itertools import logging import os import re import subprocess from collections.abc import Iterable, Iterator, Reversible from pathlib import Path from mkosi.context import Context from mkosi.log import complete_step, log_step from mkosi.run import chroot_cmd, run from mkosi.sandbox import chase from mkosi.util import chdir, parents_below def loaded_modules() -> list[str]: # Loaded modules are listed with underscores but the filenames might use dashes instead. return [ normalize_module_name(line.split()[0]) for line in Path("/proc/modules").read_text().splitlines() ] def globs_match_filename( name: str, globs: Reversible[str], *, match_default: bool = False, ) -> bool: # Check whether the path matches any of the globs for glob in reversed(globs): # Patterns are evaluated in order and last matching one wins. # Patterns may be prefixed with '-' to exclude modules. if negative := glob.startswith("-"): glob = glob[1:] # As a special case, if a directory is specified, all items # below that directory are matched. if glob.endswith("/"): glob += "*" if ( # Match globs starting with / relative to kernel/ first, since in-tree module are the common case (glob.startswith("/") and fnmatch.fnmatch(f"/{name}", f"/kernel{glob}")) # Now match absolute globs relative to lib/modules/KVER/ or (glob.startswith("/") and fnmatch.fnmatch(f"/{name}", glob)) # match a subset of the path, at path element boundary or ("/" in glob and fnmatch.fnmatch(f"/{name}", f"*/{glob}")) # match the basename or fnmatch.fnmatch(name.split("/")[-1], glob) ): return not negative return match_default def globs_match_module( name: str, globs: Reversible[str], ) -> bool: # Strip '.ko' suffix and an optional compression suffix name = re.sub(r"\.ko(\.(gz|xz|zst))?$", "", name) # Check whether the suffixless-path matches any of the globs return globs_match_filename(name, globs) def globs_match_firmware( name: str, globs: Reversible[str], *, match_default: bool = False, ) -> bool: # Strip any known compression suffixes name = re.sub(r"\.(gz|xz|zst)$", "", name) # Check whether the suffixless-path matches any of the globs return globs_match_filename(name, globs, match_default=match_default) def filter_kernel_modules( root: Path, kver: str, *, include: Iterable[str], exclude: Iterable[str], ) -> list[str]: log_step("Applying kernel modules include/exclude configuration") if include: logging.debug(f"Kernel modules include directives: {' '.join(include)}") if exclude: logging.debug(f"Kernel modules exclude directives: {' '.join(exclude)}") modulesd = Path("usr/lib/modules") / kver with chdir(root): # The glob may match additional paths. # Narrow this down to *.ko, *.ko.gz, *.ko.xz, *.ko.zst. modules = { m for m in modulesd.rglob("*.ko*") if m.name.endswith((".ko", ".ko.gz", ".ko.xz", ".ko.zst")) } n_modules = len(modules) keep = set() if include: patterns = [p[3:] for p in include if p.startswith("re:")] regex = re.compile("|".join(patterns)) globs = [normalize_module_glob(p) for p in include if not p.startswith("re:")] for m in modules: rel = os.fspath(m.relative_to(modulesd)) # old regexes match relative to modulesd/subdir/ not modulesd/ legacy_rel = os.fspath(Path(*m.parts[5:])) if (patterns and regex.search(legacy_rel)) or globs_match_module( normalize_module_name(rel), globs ): keep.add(rel) if exclude: assert all(p.startswith("re:") for p in exclude) patterns = [p[3:] for p in exclude] regex = re.compile("|".join(patterns)) remove = set() for m in modules: rel = os.fspath(m.relative_to(modulesd)) # old regexes match relative to modulesd/subdir/ not modulesd/ legacy_rel = os.fspath(Path(*m.parts[5:])) if rel not in keep and regex.search(legacy_rel): remove.add(m) modules -= remove elif include: # If no exclude patterns are specified, only keep the specified kernel modules. modules = {modulesd / m for m in keep} logging.debug(f"Passing {len(modules)}/{n_modules} kernel modules on to dependency resolution.") return sorted(module_path_to_name(m) for m in modules) def filter_firmware( root: Path, firmware: set[Path], *, include: Iterable[str], exclude: Iterable[str], ) -> set[Path]: log_step("Applying firmware include/exclude configuration") if include: logging.debug(f"Firmware include directives: {' '.join(include)}") if exclude: logging.debug(f"Firmware exclude directives: {' '.join(exclude)}") firmwared = Path("usr/lib/firmware") # globs can be also used to exclude firmware, so we we need to apply them # to the inherited list of firmware files too. globs = [p for p in include if not p.startswith("re:")] if exclude or globs: assert all(p.startswith("re:") for p in exclude) remove = set() patterns = [p[3:] for p in exclude] regex = re.compile("|".join(patterns)) for f in firmware: rel = os.fspath(f.relative_to(firmwared)) if (patterns and regex.search(rel)) or not globs_match_firmware(rel, globs, match_default=True): remove.add(f) firmware -= remove if include: with chdir(root): all_firmware = {p for p in firmwared.rglob("*") if p.is_file() or p.is_symlink()} patterns = [p[3:] for p in include if p.startswith("re:")] regex = re.compile("|".join(patterns)) for f in all_firmware: rel = os.fspath(f.relative_to(firmwared)) if (patterns and regex.search(rel)) or globs_match_firmware(rel, globs): firmware.add(f) logging.debug(f"A total of {len(firmware)} firmware files will be included in the image") return firmware def normalize_module_name(name: str) -> str: # Replace '_' by '-' return name.replace("_", "-") def normalize_module_glob(name: str) -> str: # We want to replace '_' by '-', except when used in […] ans = "" while name: i = (name + "[").index("[") ans += name[:i].replace("_", "-") name = name[i:] i = (name + "]").index("]") ans += name[: i + 1] name = name[i + 1 :] return ans def module_path_to_name(path: Path) -> str: return normalize_module_name(path.name.partition(".")[0]) def modinfo(context: Context, kver: str, modules: Iterable[str]) -> str: cmdline = ["modinfo", "--set-version", kver, "--null"] if context.config.output_format.is_extension_image() and not context.config.overlay: cmdline += ["--basedir", "/buildroot"] sandbox = context.sandbox(options=context.rootoptions(readonly=True)) else: sandbox = chroot_cmd(root=context.rootoptions) cmdline += [*modules] return run( cmdline, stdout=subprocess.PIPE, sandbox=sandbox, ).stdout.strip() def resolve_module_dependencies( context: Context, kver: str, modules: Iterable[str], ) -> tuple[set[Path], set[Path]]: """ Returns a tuple of lists containing the paths to the module and firmware dependencies of the given list of module names (including the given module paths themselves). The paths are returned relative to the root directory. """ modulesd = Path("usr/lib/modules") / kver if (p := context.root / modulesd / "modules.builtin").exists(): builtin = set(module_path_to_name(Path(m)) for m in p.read_text().splitlines()) else: builtin = set() with chdir(context.root): allmodules = set(modulesd.rglob("*.ko*")) nametofile = {module_path_to_name(m): m for m in allmodules} log_step("Running modinfo to fetch kernel module dependencies") # We could run modinfo once for each module but that's slow. Luckily we can pass multiple modules to # modinfo and it'll process them all in a single go. We get the modinfo for all modules to build two maps # that map the path of the module to its module dependencies and its firmware dependencies # respectively. Because there's more kernel modules than the max number of accepted CLI arguments, we # split the modules list up into chunks. info = "" for i in range(0, len(nametofile.keys()), 8500): chunk = list(nametofile.keys())[i : i + 8500] info += modinfo(context, kver, chunk) log_step("Calculating required kernel modules and firmware") moddep = {} firmwaredep = {} depends: set[str] = set() firmware: set[Path] = set() with chdir(context.root): for line in info.split("\0"): key, sep, value = line.partition(":") if not sep: key, sep, value = line.partition("=") value = value.strip() if key == "depends": depends.update(normalize_module_name(d) for d in value.split(",") if d) elif key == "softdep": # softdep is delimited by spaces and can contain strings like pre: and post: so discard # anything that ends with a colon. depends.update(normalize_module_name(d) for d in value.split() if not d.endswith(":")) elif key == "firmware": if (Path("usr/lib/firmware") / value).exists(): firmware.add(Path("usr/lib/firmware") / value) glob = "" if value.endswith("*") else ".*" firmware.update(Path("usr/lib/firmware").glob(f"{value}{glob}")) elif key == "name": # The file names use dashes, but the module names use underscores. We track the names in # terms of the file names, since the depends use dashes and therefore filenames as well. name = normalize_module_name(value) moddep[name] = depends firmwaredep[name] = firmware depends = set() firmware = set() todo = [*builtin, *modules] mods = set() firmware = set() while todo: m = todo.pop() if m in mods: continue depends = moddep.get(m, set()) for d in depends: if d not in nametofile and d not in builtin: logging.warning(f"{d} is a dependency of {m} but is not installed, ignoring ") mods.add(m) todo += depends firmware.update(firmwaredep.get(m, set())) return set(nametofile[m] for m in mods if m in nametofile), set(firmware) def gen_required_kernel_modules( context: Context, kver: str, *, modules_include: Iterable[str], modules_exclude: Iterable[str], firmware_include: Iterable[str], firmware_exclude: Iterable[str], ) -> Iterator[Path]: modulesd = Path("usr/lib/modules") / kver firmwared = Path("usr/lib/firmware") # There is firmware in /usr/lib/firmware that is not depended on by any modules so if any firmware was # installed we have to take the slow path to make sure we don't copy firmware into the initrd that is not # depended on by any kernel modules. if modules_include or modules_exclude or any((context.root / firmwared).glob("*")): modules, firmware = resolve_module_dependencies( context, kver, modules=filter_kernel_modules( context.root, kver, include=modules_include, exclude=modules_exclude, ), ) else: logging.debug( "No modules excluded and no firmware installed, using kernel modules generation fast path" ) with chdir(context.root): modules = set(modulesd.rglob("*.ko*")) firmware = set() # Include or exclude firmware explicitly configured firmware = filter_firmware(context.root, firmware, include=firmware_include, exclude=firmware_exclude) # /usr/lib/firmware makes use of symbolic links so we have to make sure the symlinks and their targets # are all included. fwcopy = firmware.copy() firmware.clear() for fw in fwcopy: # Every path component from /usr/lib/firmware up to and including the firmware file itself might be a # symlink. We need to make sure we include all of them so we iterate over them and keep resolving # each symlink separately (and recursively) and add all of them to the list of firmware to add. # # As of the time of writing this logic, the only firmware that actually requires intermediate path # symlink resolution are the following: # # $ find /usr/lib/firmware -type l | grep -v "\." # /usr/lib/firmware/intel/sof-ace-tplg # /usr/lib/firmware/nvidia/ad103 # /usr/lib/firmware/nvidia/ad104 # /usr/lib/firmware/nvidia/ad106 # /usr/lib/firmware/nvidia/ad107 # /usr/lib/firmware/nvidia/ga103/gsp # /usr/lib/firmware/nvidia/ga104/gsp # /usr/lib/firmware/nvidia/ga106/gsp # /usr/lib/firmware/nvidia/ga107/gsp # /usr/lib/firmware/nvidia/gb102 # /usr/lib/firmware/nvidia/gb203 # /usr/lib/firmware/nvidia/gb205 # /usr/lib/firmware/nvidia/gb206 # /usr/lib/firmware/nvidia/gb207 # /usr/lib/firmware/nvidia/tu104/gsp # /usr/lib/firmware/nvidia/tu106/gsp # /usr/lib/firmware/nvidia/tu117/gsp todo = list(reversed(fw.parts)) current = context.root while todo: part = todo.pop() if part == "/": current = context.root continue elif part == "..": current = current.parent continue elif part == ".": continue current /= part if not current.is_symlink(): continue if current.readlink().is_relative_to("/etc/alternatives"): target = chase(os.fspath(context.root), os.fspath(current.relative_to(context.root))) current.unlink() current.symlink_to(os.path.relpath(target, start=current.parent)) firmware.add(current.relative_to(context.root)) todo += list(reversed(current.readlink().parts)) # Relative symlinks are resolved relative to the directory # the symlink is located in. If the symlink is absolute we'll # override the current path anyway so modifying it here doesn't # matter. current = current.parent # Finally, add the actual fully resolved path to the firmware file. if current.exists(): firmware.add(current.relative_to(context.root)) yield from sorted( itertools.chain( { p.relative_to(context.root) for f in modules | firmware for p in parents_below(context.root / f, context.root / "usr/lib") }, modules, firmware, (p.relative_to(context.root) for p in (context.root / modulesd).glob("modules*")), ) ) if (modulesd / "vdso").exists(): if not modules: yield from ( p.relative_to(context.root) for p in parents_below(context.root / modulesd / "vdso", context.root / "usr/lib") ) yield modulesd / "vdso" yield from sorted(p.relative_to(context.root) for p in (context.root / modulesd / "vdso").iterdir()) def process_kernel_modules( context: Context, kver: str, *, modules_include: Iterable[str], modules_exclude: Iterable[str], firmware_include: Iterable[str], firmware_exclude: Iterable[str], ) -> None: if not (modules_include or modules_exclude or firmware_include or firmware_exclude): return modulesd = Path("usr/lib/modules") / kver firmwared = Path("usr/lib/firmware") with complete_step("Applying kernel module filters"): required = set( gen_required_kernel_modules( context, kver, modules_include=modules_include, modules_exclude=modules_exclude, firmware_include=firmware_include, firmware_exclude=firmware_exclude, ) ) with chdir(context.root): modules = sorted(modulesd.rglob("*.ko*"), reverse=True) firmware = sorted(firmwared.rglob("*"), reverse=True) for m in modules: if m in required: continue p = context.root / m if p.is_file() or p.is_symlink(): if p.is_symlink(): p_target = Path(chase(os.fspath(context.root), os.fspath(m))) if p_target.exists(): p_target.unlink() p.unlink() elif p.exists(): p.rmdir() for fw in firmware: if fw in required: continue if any(fw.is_relative_to(firmwared / d) for d in ("amd-ucode", "intel-ucode")): continue p = context.root / fw if p.is_file() or p.is_symlink(): p.unlink() if p.parent != context.root / firmwared and not any(p.parent.iterdir()): p.parent.rmdir() elif p.exists(): p.rmdir() def is_valid_kdir(kdir: Path) -> bool: dircontent = list(kdir.glob("*")) # kdir does not exist or is empty if not dircontent: return False # check that kdir contains more than just updates return dircontent != [kdir / "updates"] def filter_devicetrees( root: Path, kver: str, *, include: Iterable[str], ) -> list[Path]: if not include: return [] logging.debug(f"Devicetrees include: {' '.join(include)}") # Search standard DTB locations dtb_dirs = [ Path("usr/lib/firmware") / kver / "device-tree", Path(f"usr/lib/linux-image-{kver}"), Path("usr/lib/modules") / kver / "dtb", ] matched_dtbs = [] globs = list(include) with chdir(root): for dtb_dir in dtb_dirs: all_dtbs = [p for p in dtb_dir.rglob("*.dtb") if p.is_file() or p.is_symlink()] logging.debug(f"Found {len(all_dtbs)} DTB files in {dtb_dir}") for dtb in all_dtbs: rel_path = os.fspath(dtb.relative_to(dtb_dir)) if globs_match_filename(rel_path, globs): logging.debug(f"Matched DTB: {rel_path} in {dtb_dir}") matched_dtbs.append(dtb) if not matched_dtbs: logging.warning(f"Devicetrees patterns '{globs}' matched 0 files") else: logging.debug(f"Including {len(matched_dtbs)} devicetree files") return sorted(matched_dtbs) mkosi-26/mkosi/log.py000066400000000000000000000113671512054777600147300ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import contextvars import logging import os import sys from collections.abc import Iterator from typing import Any, NoReturn, Optional from mkosi.sandbox import ANSI_BOLD, ANSI_GRAY, ANSI_RED, ANSI_RESET, ANSI_YELLOW, terminal_is_dumb # This global should be initialized after parsing arguments ARG_DEBUG = contextvars.ContextVar("debug", default=False) ARG_DEBUG_SHELL = contextvars.ContextVar("debug-shell", default=False) ARG_DEBUG_SANDBOX = contextvars.ContextVar("debug-sandbox", default=False) LEVEL = 0 def die(message: str, *, hint: Optional[str] = None) -> NoReturn: logging.error(f"{message}") if hint: logging.info(f"({hint})") sys.exit(1) class ConsoleCodes: OSC = "\033]" CSI = "\033[" ST = "\033\\" # \033\\ is one possible ECMA-48 string terminators, BEL would be valid for most terminal emulators as # well, but kitty, annoyingly, actually rings the bell in that case. Other string terminators would be # valid as well, but this one worked for: # - alacritty # - ghostty # - gnome-terminal # - kitty # - konsole # - ptyxis # - xterm # References: # https://learn.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences#window-title # https://en.wikipedia.org/wiki/ANSI_escape_code#Operating_System_Command_sequences # https://ghostty.org/docs/vt/concepts/sequences#osc-sequences # https://sw.kovidgoyal.net/kitty/shell-integration/#notes-for-shell-developers BEL = "\07" @classmethod def set_window_title(cls, title: str) -> str: return f"{cls.OSC}0;mkosi: {title}{cls.ST}" @classmethod def push_window_title(cls) -> str: """Push the window title on the window title stack.""" # not supported by konsole return f"{cls.CSI}22t" @classmethod def pop_window_title(cls) -> str: """Pop latest window title from the window title stack.""" # not supported by konsole return f"{cls.CSI}23t" @classmethod def ring_bell(cls) -> str: return cls.BEL def ring_terminal_bell() -> None: if terminal_is_dumb(): return None print(ConsoleCodes.ring_bell(), file=sys.stderr, end="") def log_step(text: str) -> None: prefix = " " * LEVEL if sys.exc_info()[0]: # We are falling through exception handling blocks. # De-emphasize this step here, so the user can tell more # easily which step generated the exception. The exception # or error will only be printed after we finish cleanup. if not terminal_is_dumb(): print(ConsoleCodes.set_window_title(text), file=sys.stderr, end="") logging.info(f"{prefix}({text})") else: if not terminal_is_dumb(): print(ConsoleCodes.set_window_title(text), file=sys.stderr, end="") logging.info(f"{prefix}{ANSI_BOLD}{text}{ANSI_RESET}") def log_notice(text: str) -> None: logging.info(f"{ANSI_BOLD}{text}{ANSI_RESET}") @contextlib.contextmanager def complete_step(text: str, text2: Optional[str] = None) -> Iterator[list[Any]]: global LEVEL log_step(text) LEVEL += 1 try: args: list[Any] = [] yield args finally: LEVEL -= 1 assert LEVEL >= 0 if text2 is not None: log_step(text2.format(*args)) class Formatter(logging.Formatter): def __init__(self, fmt: Optional[str] = None, *args: Any, **kwargs: Any) -> None: fmt = fmt or "%(message)s" self.formatters = { logging.DEBUG: logging.Formatter(f"‣ {ANSI_GRAY}{fmt}{ANSI_RESET}"), logging.INFO: logging.Formatter(f"‣ {fmt}"), logging.WARNING: logging.Formatter(f"‣ {ANSI_YELLOW}{fmt}{ANSI_RESET}"), logging.ERROR: logging.Formatter(f"‣ {ANSI_RED}{fmt}{ANSI_RESET}"), logging.CRITICAL: logging.Formatter(f"‣ {ANSI_RED}{ANSI_BOLD}{fmt}{ANSI_RESET}"), } # fmt: skip super().__init__(fmt, *args, **kwargs) def format(self, record: logging.LogRecord) -> str: return self.formatters[record.levelno].format(record) def log_setup(default_log_level: str = "info") -> None: handler = logging.StreamHandler(stream=sys.stderr) handler.setFormatter(Formatter()) logging.getLogger().addHandler(handler) logging.getLogger().setLevel( logging.getLevelName(os.getenv("SYSTEMD_LOG_LEVEL", default_log_level).upper()) ) @contextlib.contextmanager def stash_terminal_title() -> Iterator[None]: try: if not terminal_is_dumb(): print(ConsoleCodes.push_window_title(), file=sys.stderr, end="") yield finally: if not terminal_is_dumb(): print(ConsoleCodes.pop_window_title(), file=sys.stderr, end="") mkosi-26/mkosi/manifest.py000066400000000000000000000253731512054777600157570ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import dataclasses import datetime import json import subprocess import textwrap from typing import IO, Any, Optional from mkosi.config import ManifestFormat, OutputFormat from mkosi.context import Context from mkosi.distribution import PackageType from mkosi.installer.apt import Apt from mkosi.installer.pacman import Pacman from mkosi.log import complete_step from mkosi.run import run from mkosi.util import read_env_file @dataclasses.dataclass class PackageManifest: """A description of a package The fields used here must match https://systemd.io/COREDUMP_PACKAGE_METADATA/#well-known-keys. """ type: str name: str version: str architecture: str size: int def as_dict(self) -> dict[str, str]: return { "type": self.type, "name": self.name, "version": self.version, "architecture": self.architecture, } @dataclasses.dataclass class SourcePackageManifest: name: str changelog: Optional[str] packages: list[PackageManifest] = dataclasses.field(default_factory=list) def add(self, package: PackageManifest) -> None: self.packages.append(package) def report(self) -> str: size = sum(p.size for p in self.packages) t = textwrap.dedent( f"""\ SourcePackage: {self.name} Packages: {" ".join(p.name for p in self.packages)} Size: {size} """ ) if self.changelog: t += f"""\nChangelog:\n{self.changelog}\n""" return t @dataclasses.dataclass class Manifest: context: Context packages: list[PackageManifest] = dataclasses.field(default_factory=list) source_packages: dict[str, SourcePackageManifest] = dataclasses.field(default_factory=dict) extension: dict[str, str] = dataclasses.field(default_factory=dict) _init_timestamp: datetime.datetime = dataclasses.field( init=False, default_factory=lambda: datetime.datetime.now().replace(microsecond=0) ) def need_source_info(self) -> bool: return ManifestFormat.changelog in self.context.config.manifest_format def record_packages(self) -> None: with complete_step("Recording packages in manifest…"): if self.context.config.distribution.installer.package_type() == PackageType.rpm: self.record_rpm_packages() if self.context.config.distribution.installer.package_type() == PackageType.deb: self.record_deb_packages() if self.context.config.distribution.installer.package_type() == PackageType.pkg: self.record_pkg_packages() def record_rpm_packages(self) -> None: c = run( [ "rpm", "--root=/buildroot", "--query", "--all", "--queryformat", r"%{NEVRA}\t%{SOURCERPM}\t%{NAME}\t%{ARCH}\t%{LONGSIZE}\t%{INSTALLTIME}\n", ], stdout=subprocess.PIPE, sandbox=( self.context.sandbox(options=["--ro-bind", self.context.root, "/buildroot"]) ), ) # fmt: skip packages = sorted(c.stdout.splitlines()) for package in packages: nevra, srpm, name, arch, size, installtime = package.split("\t") assert nevra.startswith(f"{name}-") evra = nevra.removeprefix(f"{name}-") # Some packages have architecture '(none)', and it's not part of NEVRA, e.g.: # gpg-pubkey-45719a39-5f2c0192 gpg-pubkey (none) 0 1635985199 if arch != "(none)": assert nevra.endswith(f".{arch}") evr = evra.removesuffix(f".{arch}") else: evr = evra arch = "" # If we are creating a layer based on a BaseImage=, e.g. a sysext, filter by # packages that were installed in this execution of mkosi. We assume that the # upper layer is put together in one go, which currently is always true. if ( self.context.config.base_trees and datetime.datetime.fromtimestamp(int(installtime)) < self._init_timestamp ): continue manifest = PackageManifest("rpm", name, evr, arch, int(size)) self.packages.append(manifest) if not self.need_source_info(): continue source = self.source_packages.get(srpm) if source is None: c = run( [ "rpm", "--root=/buildroot", "--query", "--changelog", nevra, ], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, sandbox=self.context.sandbox(options=["--ro-bind", self.context.root, "/buildroot"]), ) changelog = c.stdout.strip() source = SourcePackageManifest(srpm, changelog) self.source_packages[srpm] = source source.add(manifest) def record_deb_packages(self) -> None: c = run( [ "dpkg-query", "--admindir=/buildroot/var/lib/dpkg", "--show", "--showformat", r"${Package}\t${source:Package}\t${Version}\t${Architecture}\t${Installed-Size}\t${db-fsys:Last-Modified}\n", # noqa: E501 ], stdout=subprocess.PIPE, sandbox=self.context.sandbox(options=["--ro-bind", self.context.root, "/buildroot"]), ) # fmt: skip packages = sorted(c.stdout.splitlines()) for package in packages: name, source, version, arch, size, installtime = package.split("\t") # dpkg records the size in KBs, the field is optional db-fsys:Last-Modified is not available in # very old dpkg, so just skip creating the manifest for sysext when building on very old # distributions by setting the timestamp to epoch. This only affects Ubuntu Bionic which is # nearing EOL. If we are creating a layer based on a BaseImage=, e.g. a sysext, filter by # packages that were installed in this execution of mkosi. We assume that the upper layer is put # together in one go, which currently is always true. install_timestamp = datetime.datetime.fromtimestamp(int(installtime) if installtime else 0) if self.context.config.base_trees and install_timestamp < self._init_timestamp: continue manifest = PackageManifest("deb", name, version, arch, int(size or 0) * 1024) self.packages.append(manifest) if not self.need_source_info(): continue source_package = self.source_packages.get(source) if source_package is None: # Yes, --quiet is specified twice, to avoid output about download stats. Note that the # argument of the 'changelog' verb is the binary package name, not the source package # name. We also have to set "Dir" explicitly because apt has no separate option to configure # the changelog directory. Apt.invoke() sets all options that are interpreted relative to Dir # to absolute paths by default so this is safe. result = Apt.invoke( self.context, "changelog", ["--quiet", "--quiet", "-o", "Dir=/buildroot", name], stdout=subprocess.PIPE, ) source_package = SourcePackageManifest(source, result.stdout.strip()) self.source_packages[source] = source_package source_package.add(manifest) def record_pkg_packages(self) -> None: packages = sorted((self.context.root / "var/lib/pacman/local").glob("*/desc")) for desc in packages: name, version, source, arch = Pacman.parse_pkg_desc(desc) package = PackageManifest("pkg", name, version, arch, 0) self.packages.append(package) source_package = self.source_packages.get(source) if source_package is None: source_package = SourcePackageManifest(source, None) self.source_packages[source] = source_package source_package.add(package) def record_extension_release(self) -> None: if self.context.config.output_format not in (OutputFormat.sysext, OutputFormat.confext): return with complete_step(f"Recording {type} information in manifest…"): d = "usr/lib" if self.context.config.output_format == OutputFormat.sysext else "etc" p = self.context.root / d / f"extension-release.d/extension-release.{self.context.config.output}" self.extension = read_env_file(p) def has_data(self) -> bool: # We might add more data in the future return len(self.packages) > 0 or len(self.extension) > 0 def as_dict(self) -> dict[str, Any]: config = { "name": self.context.config.image_id or "image", "distribution": str(self.context.config.distribution), "architecture": str(self.context.config.architecture), "output_format": str(self.context.config.output_format), } if self.context.config.image_version is not None: config["version"] = self.context.config.image_version if self.context.config.release is not None: config["release"] = self.context.config.release return { # Bump this when incompatible changes are made to the manifest format. "manifest_version": 1, # Describe the image itself. "config": config, # Describe the image content in terms of packages. "packages": [package.as_dict() for package in self.packages], # Describe the SYSEXT / CONFEXT metadata. "extension": self.extension, } def write_json(self, out: IO[str]) -> None: json.dump(self.as_dict(), out, indent=2) def write_package_report(self, out: IO[str]) -> None: """Create a human-readable report about packages This is modelled after "Fedora compose reports" that are sent to fedora-devel. The format describes added and removed packages, and includes the changelogs. A diff between two such reports shows what changed *in* the packages quite nicely. """ out.write(f"Packages: {len(self.packages)}\n") out.write(f"Size: {sum(p.size for p in self.packages)}") for package in self.source_packages.values(): out.write(f"\n{80 * '-'}\n") out.write(package.report()) mkosi-26/mkosi/mounts.py000066400000000000000000000121671512054777600154730ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import os import stat import tempfile from collections.abc import Iterator, Sequence from pathlib import Path from typing import Optional, Union from mkosi.config import BuildSourcesEphemeral, Config from mkosi.log import die from mkosi.sandbox import OVERLAYFS_SUPER_MAGIC, OverlayOperation, statfs from mkosi.util import PathString, flatten def stat_is_whiteout(st: os.stat_result) -> bool: return stat.S_ISCHR(st.st_mode) and st.st_rdev == 0 def delete_whiteout_files(path: Path) -> None: """Delete any char(0,0) device nodes underneath @path Overlayfs uses such files to mark "whiteouts" (files present in the lower layers, but removed in the upper one). """ for entry in path.rglob("*"): # TODO: Use Path.stat() once we depend on Python 3.10+. if stat_is_whiteout(os.stat(entry, follow_symlinks=False)): entry.unlink() def finalize_volatile_tmpdir() -> Path: if tempfile.tempdir and statfs(tempfile.tempdir) != OVERLAYFS_SUPER_MAGIC: return Path(tempfile.tempdir) for path in ("/var/tmp", "/tmp", "/dev/shm"): if Path(path).exists() and statfs(path) != OVERLAYFS_SUPER_MAGIC: return Path(path) die( "Cannot find temporary directory for volatile overlayfs upperdir", hint="Either /var/tmp, /tmp or /dev/shm must exist and not be located on overlayfs", ) @contextlib.contextmanager def mount_overlay( lowerdirs: Sequence[Path], dst: Path, *, upperdir: Optional[Path] = None, ) -> Iterator[Path]: with contextlib.ExitStack() as stack: if upperdir is None: upperdir = Path( stack.enter_context( tempfile.TemporaryDirectory( prefix="volatile-overlay.", dir=finalize_volatile_tmpdir(), ) ) ) st = lowerdirs[-1].stat() os.chmod(upperdir, st.st_mode) workdir = Path( stack.enter_context( tempfile.TemporaryDirectory(dir=upperdir.parent, prefix=f"{upperdir.name}-workdir") ) ) try: with OverlayOperation( tuple(os.fspath(p) for p in lowerdirs), os.fspath(upperdir), os.fspath(workdir), os.fspath(dst), ): yield dst finally: delete_whiteout_files(upperdir) @contextlib.contextmanager def finalize_source_mounts( config: Config, *, ephemeral: Union[BuildSourcesEphemeral, bool], ) -> Iterator[list[PathString]]: with contextlib.ExitStack() as stack: options: list[PathString] = [] for t in config.build_sources: src, dst = t.with_prefix("/work/src") if ephemeral: if ephemeral == BuildSourcesEphemeral.buildcache: if config.build_dir is None: die( "BuildSourcesEphemeral=buildcache was configured, but no build directory exists.", # noqa: E501 hint="Configure BuildDirectory= or create mkosi.builddir.", ) upperdir = config.build_subdir / f"mkosi.buildovl.{src.name}" upperdir.mkdir(mode=src.stat().st_mode, exist_ok=True) else: upperdir = Path( stack.enter_context( tempfile.TemporaryDirectory( prefix="volatile-overlay.", dir=finalize_volatile_tmpdir(), ) ) ) os.chmod(upperdir, src.stat().st_mode) workdir = Path( stack.enter_context( tempfile.TemporaryDirectory(dir=upperdir.parent, prefix=f"{upperdir.name}-workdir.") ) ) options += [ "--overlay-lowerdir", src, "--overlay-upperdir", upperdir, "--overlay-workdir", workdir, "--overlay", dst, ] # fmt: skip else: options += ["--bind", src, dst] yield options def finalize_certificate_mounts(config: Config, relaxed: bool = False) -> list[PathString]: mounts = [] root = config.tools() if config.tools_tree_certificates else Path("/") if not relaxed or root != Path("/"): subdirs = [Path("var/lib/ca-certificates")] if not relaxed: subdirs += [ Path("etc/pki/ca-trust"), Path("etc/pki/tls"), Path("etc/ssl"), Path("etc/ca-certificates"), ] mounts += [ (root / subdir, Path("/") / subdir) for subdir in subdirs if (root / subdir).exists() and any(p for p in (root / subdir).rglob("*") if not p.is_dir()) ] return flatten(("--ro-bind", src, target) for src, target in sorted(set(mounts), key=lambda s: s[1])) mkosi-26/mkosi/pager.py000066400000000000000000000010201512054777600152260ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import os import pydoc from typing import Optional def page(text: str, enabled: Optional[bool]) -> None: if enabled: # Initialize less options from $MKOSI_LESS or provide a suitable fallback. # F: don't page if one screen # X: do not clear screen # M: verbose prompt # K: quit on ^C # R: allow rich formatting os.environ["LESS"] = os.getenv("MKOSI_LESS", "FXMKR") pydoc.pager(text) else: print(text) mkosi-26/mkosi/partition.py000066400000000000000000000052701512054777600161540ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import dataclasses import json import subprocess from collections.abc import Mapping, Sequence from pathlib import Path from typing import Any, Final, Optional from mkosi.log import die from mkosi.run import SandboxProtocol, nosandbox, run, workdir @dataclasses.dataclass(frozen=True) class Partition: type: str uuid: str partno: Optional[int] split_path: Optional[Path] roothash: Optional[str] @classmethod def from_dict(cls, dict: Mapping[str, Any]) -> "Partition": return cls( type=dict["type"], uuid=dict["uuid"], partno=int(partno) if (partno := dict.get("partno")) else None, # We have to translate the sandbox path to the path on the host by removing the /work prefix. split_path=( Path(p.removeprefix("/work")) if ((p := dict.get("split_path")) and p != "-") else None ), roothash=dict.get("roothash"), ) GRUB_BOOT_PARTITION_UUID: Final[str] = "21686148-6449-6e6f-744e-656564454649" def find_partitions(image: Path, *, sandbox: SandboxProtocol = nosandbox) -> list[Partition]: output = json.loads( run( ["systemd-repart", "--json=short", workdir(image, sandbox)], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, sandbox=sandbox(options=["--ro-bind", image, workdir(image, sandbox)]), ).stdout ) return [Partition.from_dict(d) for d in output] def finalize_roothash(partitions: Sequence[Partition]) -> Optional[str]: roothash: Optional[str] = None usrhash: Optional[str] = None for p in partitions: if (h := p.roothash) is None: continue if not (p.type.startswith("usr") or p.type.startswith("root")): die(f"Found roothash property on unexpected partition type {p.type}") # When the partition is deferred/skipped the roothash is listed as the literal 'TBD' string if h == "TBD": continue # When there's multiple verity enabled root or usr partitions, the first one wins. if p.type.startswith("usr"): usrhash = usrhash or h else: roothash = roothash or h return f"roothash={roothash}" if roothash else f"usrhash={usrhash}" if usrhash else None def finalize_root(partitions: Sequence[Partition]) -> Optional[str]: root = finalize_roothash(partitions) if not root: root = next((f"root=PARTUUID={p.uuid}" for p in partitions if p.type.startswith("root")), None) if not root: root = next((f"mount.usr=PARTUUID={p.uuid}" for p in partitions if p.type.startswith("usr")), None) return root mkosi-26/mkosi/qemu.py000066400000000000000000001625571512054777600151260ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import base64 import contextlib import dataclasses import enum import errno import fcntl import functools import hashlib import io import json import logging import os import queue import random import resource import shutil import signal import socket import struct import subprocess import sys import tempfile import textwrap import uuid from collections.abc import Iterator, Sequence from pathlib import Path from typing import Optional from mkosi.bootloader import KernelType from mkosi.config import ( Args, Config, ConfigFeature, ConsoleMode, Drive, DriveFlag, Firmware, Network, OutputFormat, Ssh, VsockCID, finalize_term, format_bytes, swtpm_setup_version, systemd_pty_forward, systemd_tool_version, want_selinux_relabel, yes_no, ) from mkosi.log import ARG_DEBUG, die from mkosi.partition import finalize_root, find_partitions from mkosi.run import AsyncioThread, find_binary, fork_and_wait, run, spawn, workdir from mkosi.tree import copy_tree, maybe_make_nocow, rmtree from mkosi.user import INVOKING_USER, become_root_in_subuid_range, become_root_in_subuid_range_cmd from mkosi.util import ( PathString, StrEnum, flock, flock_or_die, groupby, round_up, try_or, ) from mkosi.versioncomp import GenericVersion QEMU_KVM_DEVICE_VERSION = GenericVersion("9.0") VHOST_VSOCK_SET_GUEST_CID = 0x4008AF60 # Maximum permissible virtio-fs tag length (UTF-8 encoded, not NUL-terminated) VIRTIOFS_MAX_TAG_LEN = 36 class QemuDeviceNode(StrEnum): kvm = enum.auto() vhost_vsock = enum.auto() def device(self) -> Path: return Path("/dev") / str(self) def description(self) -> str: return { QemuDeviceNode.kvm: "KVM acceleration", QemuDeviceNode.vhost_vsock: "a VSock device", }[self] def feature(self, config: Config) -> ConfigFeature: return { QemuDeviceNode.kvm: config.kvm, QemuDeviceNode.vhost_vsock: config.vsock, }[self] def open(self) -> int: return os.open(self.device(), os.O_RDWR | os.O_CLOEXEC | os.O_NONBLOCK) def available(self, log: bool = False) -> bool: try: os.close(self.open()) except OSError as e: if e.errno not in (errno.ENOENT, errno.ENODEV, errno.EPERM, errno.EACCES): raise e if log and e.errno in (errno.ENOENT, errno.ENODEV): logging.warning( f"{self.device()} not found. Not adding {self.description()} to the virtual machine." ) if log and e.errno in (errno.EPERM, errno.EACCES): logging.warning( f"Permission denied to access {self.device()}. " f"Not adding {self.description()} to the virtual machine. " "(Maybe a kernel module could not be loaded?)" ) return False return True def hash_output(config: Config) -> "hashlib._Hash": p = os.fspath(config.output_dir_or_cwd() / config.output) return hashlib.sha256(p.encode()) def hash_to_vsock_cid(hash: "hashlib._Hash") -> int: cid = int.from_bytes(hash.digest()[:4], byteorder="little") # Make sure we don't return any of the well-known CIDs. return max(3, min(cid, 0xFFFFFFFF - 1)) def vsock_cid_in_use(vfd: int, cid: int) -> bool: try: fcntl.ioctl(vfd, VHOST_VSOCK_SET_GUEST_CID, struct.pack("=Q", cid)) except OSError as e: if e.errno != errno.EADDRINUSE: raise return True return False def find_unused_vsock_cid(config: Config, vfd: int) -> int: hash = hash_output(config) for i in range(64): cid = hash_to_vsock_cid(hash) if not vsock_cid_in_use(vfd, cid): return cid hash.update(i.to_bytes(length=4, byteorder="little")) for i in range(64): cid = random.randint(0, 0xFFFFFFFF - 1) if not vsock_cid_in_use(vfd, cid): return cid die("Failed to find an unused VSock connection ID") def find_qemu_binary(config: Config) -> Path: options = [f"qemu-system-{config.architecture.to_qemu()}"] if config.architecture.is_native(): options += ["/usr/libexec/qemu-kvm"] for o in options: if qemu := config.find_binary(o): return qemu die( "qemu not found.", hint=f"Is qemu-system-{config.architecture.to_qemu()} installed?", ) @dataclasses.dataclass(frozen=True) class OvmfConfig: description: Path firmware: Path format: str vars: Path vars_format: str def find_ovmf_firmware(config: Config, firmware: Firmware) -> Optional[OvmfConfig]: if not firmware.is_uefi(): return None desc = list((config.tools() / "usr/share/qemu/firmware").glob("*")) if config.tools() == Path("/"): desc += list((config.tools() / "etc/qemu/firmware").glob("*")) arch = config.architecture.to_qemu() machine = config.architecture.default_qemu_machine() for p in sorted(desc): if p.is_dir(): continue j = json.loads(p.read_text()) if "uefi" not in j["interface-types"]: logging.debug(f"{p.name} firmware description does not target UEFI, skipping") continue for target in j["targets"]: if target["architecture"] != arch: continue # We cannot use fnmatch as for example our default machine for x86-64 is q35 and the firmware # description lists "pc-q35-*" so we use a substring check instead. if any(machine in glob for glob in target["machines"]): break else: logging.debug( f"{p.name} firmware description does not target architecture {arch} or " f"machine {machine}, skipping" ) continue if "nvram-template" not in j["mapping"]: logging.debug(f"{p.name} firmware description is missing nvram-template, skipping") continue if firmware == Firmware.uefi_secure_boot and "secure-boot" not in j["features"]: logging.debug(f"{p.name} firmware description does not include secure boot, skipping") continue if firmware != Firmware.uefi_secure_boot and "secure-boot" in j["features"]: logging.debug(f"{p.name} firmware description includes secure boot, skipping") continue if ( config.firmware_variables in (Path("microsoft"), Path("microsoft-mok")) and "enrolled-keys" not in j["features"] ): logging.debug(f"{p.name} firmware description does not have enrolled Microsoft keys, skipping") continue if ( config.firmware_variables not in (Path("microsoft"), Path("microsoft-mok")) and "enrolled-keys" in j["features"] ): logging.debug(f"{p.name} firmware description has enrolled Microsoft keys, skipping") continue logging.debug(f"Using {p.name} firmware description") return OvmfConfig( description=Path("/") / p.relative_to(config.tools()), firmware=Path(j["mapping"]["executable"]["filename"]), format=j["mapping"]["executable"]["format"], vars=Path(j["mapping"]["nvram-template"]["filename"]), vars_format=j["mapping"]["nvram-template"]["format"], ) die("Couldn't find matching OVMF UEFI firmware description") @contextlib.contextmanager def start_swtpm(config: Config) -> Iterator[Path]: with tempfile.TemporaryDirectory(prefix="mkosi-swtpm-") as state: # swtpm_setup is noisy and doesn't have a --quiet option so we pipe it's stdout to /dev/null. run( [ "swtpm_setup", "--tpm-state", workdir(Path(state)), "--tpm2", "--pcr-banks", "sha256", "--config", "/dev/null", *( ["--profile-name=custom", "--profile-remove-disabled=check"] if swtpm_setup_version(config.sandbox) >= "0.10.0" else [] ), ], sandbox=config.sandbox(options=["--bind", state, workdir(Path(state))]), stdout=None if ARG_DEBUG.get() else subprocess.DEVNULL, ) # fmt: skip cmdline = ["swtpm", "socket", "--tpm2", "--tpmstate", f"dir={workdir(Path(state))}"] # We create the socket ourselves and pass the fd to swtpm to avoid race conditions where we start # qemu before swtpm has had the chance to create the socket (or where we try to chown it first). with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock: path = Path(state) / Path("sock") sock.bind(os.fspath(path)) sock.listen() cmdline += ["--ctrl", f"type=unixio,fd={sock.fileno()}"] with spawn( cmdline, pass_fds=(sock.fileno(),), sandbox=config.sandbox(options=["--bind", state, workdir(Path(state))]), setup=scope_cmd( name=f"mkosi-swtpm-{config.machine_or_name()}", description=f"swtpm for {config.machine_or_name()}", ), ) as proc: yield path proc.terminate() def find_virtiofsd(*, root: Path = Path("/"), extra: Sequence[Path] = ()) -> Optional[Path]: if p := find_binary("virtiofsd", root=root, extra=extra): return p if (p := root / "usr/libexec/virtiofsd").exists(): return Path("/") / p.relative_to(root) if (p := root / "usr/lib/virtiofsd").exists(): return Path("/") / p.relative_to(root) return None def unshare_version() -> str: return run(["unshare", "--version"], stdout=subprocess.PIPE).stdout.strip().split()[-1] def systemd_escape(config: Config, s: PathString, path: bool = False) -> str: cmdline = ["systemd-escape", s] if path: cmdline += ["--path"] return run(cmdline, stdout=subprocess.PIPE, sandbox=config.sandbox()).stdout.strip() @contextlib.contextmanager def start_virtiofsd( config: Config, directory: Path, *, uidmap: bool = True, name: Optional[str] = None, selinux: bool = False, ) -> Iterator[Path]: virtiofsd = find_virtiofsd(root=config.tools(), extra=config.extra_search_paths) if virtiofsd is None: die("virtiofsd must be installed to boot directory images or use RuntimeTrees= with mkosi vm") cmdline: list[PathString] = [ virtiofsd, "--shared-dir", workdir(directory), "--xattr", # qemu's client doesn't seem to support announcing submounts so disable the feature to avoid the # warning. "--no-announce-submounts", "--sandbox=chroot", f"--inode-file-handles={'prefer' if os.getuid() == 0 and not uidmap else 'never'}", "--log-level=error", "--modcaps=-mknod", ] # fmt: skip if selinux: cmdline += ["--security-label"] st = None if uidmap: st = Path(directory).stat() # If we're already running as the same user that we'll be running virtiofsd as, don't bother doing # any explicit user switching or chown()'ing as it's not needed in this case. if st.st_uid == os.getuid() and st.st_gid == os.getgid(): st = None # We create the socket ourselves and pass the fd to virtiofsd to avoid race conditions where we start # qemu before virtiofsd has had the chance to create the socket (or where we try to chown it first). with ( tempfile.TemporaryDirectory(prefix="mkosi-virtiofsd-") as context, socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock, ): if st: # Make sure virtiofsd can access the socket in this directory. os.chown(context, st.st_uid, st.st_gid) path = Path(context) / f"sock-{uuid.uuid4().hex}" sock.bind(os.fspath(path)) sock.listen() if st: # Make sure virtiofsd can connect to the socket. os.chown(path, st.st_uid, st.st_gid) cmdline += ["--fd", str(sock.fileno())] # We want RuntimeBuildSources= and RuntimeTrees= to do the right thing even when running mkosi vm # as root without the source directories necessarily being owned by root. We achieve this by running # virtiofsd as the owner of the source directory and then mapping that uid to root. if not name: name = f"{config.machine_or_name()}-{systemd_escape(config, directory, path=True)}" else: name = systemd_escape(config, name) name = f"mkosi-virtiofsd-{name}" description = f"virtiofsd for machine {config.machine_or_name()} for {directory}" scope = [] if st: scope = scope_cmd(name=name, description=description, user=st.st_uid, group=st.st_gid) elif not uidmap and (os.getuid() == 0 or unshare_version() >= "2.38"): scope = scope_cmd(name=name, description=description) with spawn( cmdline, pass_fds=(sock.fileno(),), user=st.st_uid if st and not scope else None, group=st.st_gid if st and not scope else None, # If we're booting from virtiofs and unshare is too old, we don't set up a scope so we can use # our own function to become root in the subuid range. # TODO: Drop this as soon as we drop CentOS Stream 9 support and can rely on newer unshare # features. preexec=become_root_in_subuid_range if not scope and not uidmap else None, sandbox=config.sandbox( options=[ "--bind", directory, workdir(directory), *(["--become-root"] if uidmap else []), ], ), setup=scope + (become_root_in_subuid_range_cmd() if scope and not uidmap else []), ) as proc: # fmt: skip yield path proc.terminate() async def notify(messages: queue.SimpleQueue[tuple[str, str]], *, sock: socket.socket) -> None: import asyncio loop = asyncio.get_running_loop() num_messages = 0 num_bytes = 0 try: while True: s, _ = await loop.sock_accept(sock) num_messages += 1 with s: data = [] try: while buf := await loop.sock_recv(s, 4096): data.append(buf) except ConnectionResetError: logging.debug("notify listener connection reset by peer") for msg in b"".join(data).decode().split("\n"): if not msg: continue num_bytes += len(msg) k, _, v = msg.partition("=") messages.put((k, v)) except asyncio.CancelledError: logging.debug(f"Received {num_messages} notify messages totalling {format_bytes(num_bytes)} bytes") @contextlib.contextmanager def vsock_notify_handler() -> Iterator[tuple[str, AsyncioThread[tuple[str, str]]]]: """ This yields a vsock address and an object that will be filled in with the notifications from the VM. """ with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as vsock: vsock.bind((socket.VMADDR_CID_ANY, socket.VMADDR_PORT_ANY)) vsock.listen() vsock.setblocking(False) with AsyncioThread(functools.partial(notify, sock=vsock)) as thread: yield f"vsock-stream:{socket.VMADDR_CID_HOST}:{vsock.getsockname()[1]}", thread @contextlib.contextmanager def start_journal_remote(config: Config, sockfd: int) -> Iterator[None]: assert config.forward_journal bin = config.find_binary("systemd-journal-remote", "/usr/lib/systemd/systemd-journal-remote") if not bin: die("systemd-journal-remote must be installed to forward logs from the virtual machine") if config.forward_journal.suffix == ".journal": d = config.forward_journal.parent else: d = config.forward_journal if not d.exists(): # Pass exist_ok=True because multiple mkosi processes might be trying to create the parent directory # at the same time. d.mkdir(exist_ok=True, parents=True) # Make sure COW is disabled so systemd-journal-remote doesn't complain on btrfs filesystems. maybe_make_nocow(d) INVOKING_USER.chown(d) with tempfile.NamedTemporaryFile(mode="w", prefix="mkosi-journal-remote-config-") as f: os.chmod(f.name, 0o644) # Make sure we capture all the logs by bumping the limits. We set MaxFileSize=4G because with the # compact mode enabled the files cannot grow any larger anyway. f.write( textwrap.dedent( f"""\ [Remote] MaxUse=1T KeepFree=1G MaxFileSize=4G MaxFiles={1 if config.forward_journal.suffix == ".journal" else 100} """ ) ) f.flush() user = d.stat().st_uid if os.getuid() == 0 else None group = d.stat().st_gid if os.getuid() == 0 else None scope = scope_cmd( name=f"mkosi-journal-remote-{config.machine_or_name()}", description=f"mkosi systemd-journal-remote for {config.machine_or_name()}", user=user, group=group, ) with spawn( [ bin, "--output", workdir(config.forward_journal), "--split-mode", "none" if config.forward_journal.suffix == ".journal" else "host", ], pass_fds=(sockfd,), sandbox=config.sandbox( options=[ "--bind", config.forward_journal.parent, workdir(config.forward_journal.parent), "--ro-bind", f.name, "/etc/systemd/journal-remote.conf", "--pack-fds", ], ), setup=scope, user=user if not scope else None, group=group if not scope else None, ) as proc: # fmt: skip yield proc.terminate() @contextlib.contextmanager def start_journal_remote_vsock(config: Config) -> Iterator[str]: with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as sock: sock.bind((socket.VMADDR_CID_ANY, socket.VMADDR_PORT_ANY)) sock.listen() with start_journal_remote(config, sock.fileno()): yield f"vsock-stream:{socket.VMADDR_CID_HOST}:{sock.getsockname()[1]}" @contextlib.contextmanager def copy_ephemeral(config: Config, src: Path) -> Iterator[Path]: if config.output_format in (OutputFormat.cpio, OutputFormat.uki): yield src return # If we're booting a directory image that was not built as root, we have to make an ephemeral copy. If # we're running as root, we have to make an ephemeral copy so that all the files in the directory tree # are also owned by root. If we're not running as root, we'll be making use of a subuid/subgid user # namespace and we don't want any leftover files from the subuid/subgid user namespace to remain after we # shut down the container or virtual machine. if not config.ephemeral and (config.output_format != OutputFormat.directory or src.stat().st_uid == 0): with flock_or_die(src): yield src return src = src.resolve() # tempfile doesn't provide an API to get a random filename in an arbitrary directory so we do this # instead. Limit the size to 16 characters as the output name might be used in a unix socket path by # vmspawn and needs to fit in 108 characters. tmp = src.parent / f"{src.name}-{uuid.uuid4().hex[:16]}" try: def copy() -> None: copy_tree( src, tmp, preserve=( config.output_format == OutputFormat.directory and (os.getuid() != 0 or src.stat().st_uid == 0) ), use_subvolumes=config.use_subvolumes, sandbox=config.sandbox, ) with flock(src, flags=fcntl.LOCK_SH): fork_and_wait(copy) yield tmp finally: def rm() -> None: if config.output_format == OutputFormat.directory: become_root_in_subuid_range() rmtree(tmp, sandbox=config.sandbox) fork_and_wait(rm) def join_initrds(config: Config, initrds: Sequence[Path], output: Path) -> Path: assert initrds if len(initrds) == 1: copy_tree(initrds[0], output, sandbox=config.sandbox) return output seq = io.BytesIO() for p in initrds: initrd = p.read_bytes() n = len(initrd) padding = b"\0" * (round_up(n, 4) - n) # pad to 32 bit alignment seq.write(initrd) seq.write(padding) output.write_bytes(seq.getbuffer()) return output def qemu_version(config: Config, binary: Path) -> GenericVersion: return GenericVersion( run( [binary, "--version"], stdout=subprocess.PIPE, sandbox=config.sandbox(), ).stdout.split()[3] ) def finalize_firmware( config: Config, kernel: Optional[Path], kerneltype: Optional[KernelType] = None, ) -> Firmware: if config.firmware != Firmware.auto: return config.firmware if kernel: if (kerneltype or KernelType.identify(config, kernel)) != KernelType.unknown: return Firmware.uefi_secure_boot return Firmware.linux if ( config.output_format in (OutputFormat.cpio, OutputFormat.directory) or config.architecture.to_efi() is None ): return Firmware.linux # At the moment there are no qemu firmware descriptions for non-x86 architectures that advertise # secure-boot support so let's default to no secure boot for non-x86 architectures. # Debian/Ubuntu however do ship those, so enable it there. if config.architecture.is_x86_variant() or ( config.architecture.is_arm_variant() and config.distribution.is_apt_distribution() ): return Firmware.uefi_secure_boot return Firmware.uefi def finalize_firmware_variables( config: Config, ovmf: OvmfConfig, stack: contextlib.ExitStack, ) -> tuple[Path, str]: ovmf_vars = Path(stack.enter_context(tempfile.NamedTemporaryFile(prefix="mkosi-ovmf-vars-")).name) if config.firmware_variables in (None, Path("custom"), Path("microsoft")): ovmf_vars_format = ovmf.vars_format else: ovmf_vars_format = "raw" if config.firmware_variables == Path("custom"): assert config.secure_boot_certificate run( [ "virt-fw-vars", "--input", workdir(ovmf.vars), "--output", workdir(ovmf_vars), "--enroll-cert", workdir(config.secure_boot_certificate), "--add-db", "OvmfEnrollDefaultKeys", workdir(config.secure_boot_certificate), "--no-microsoft", "--secure-boot", "--loglevel", "WARNING", ], sandbox=config.sandbox( options=[ "--bind", ovmf_vars, workdir(ovmf_vars), "--ro-bind", ovmf.vars, workdir(ovmf.vars), "--ro-bind", config.secure_boot_certificate, workdir(config.secure_boot_certificate), ], ), ) # fmt: skip elif config.firmware_variables == Path("microsoft-mok"): assert config.secure_boot_certificate run( [ "virt-fw-vars", "--input", workdir(ovmf.vars), "--output", workdir(ovmf_vars), "--add-mok", "605dab50-e046-4300-abb6-3dd810dd8b23", workdir(config.secure_boot_certificate), "--loglevel", "WARNING", ], sandbox=config.sandbox( options=[ "--bind", ovmf_vars, workdir(ovmf_vars), "--ro-bind", ovmf.vars, workdir(ovmf.vars), "--ro-bind", config.secure_boot_certificate, workdir(config.secure_boot_certificate), ], ), ) # fmt: skip else: vars = ( config.tools() / ovmf.vars.relative_to("/") if config.firmware_variables == Path("microsoft") or not config.firmware_variables else config.firmware_variables ) if not vars.exists(): die(f"Firmware variables file {vars} does not exist") shutil.copy(vars, ovmf_vars) return ovmf_vars, ovmf_vars_format def apply_runtime_size(config: Config, image: Path) -> None: if config.output_format != OutputFormat.disk or not config.runtime_size: return run( [ "systemd-repart", "--definitions=/", "--no-pager", # To use qemu's cache.direct option, the drive size has to be a multiple of the page size. f"--size={round_up(config.runtime_size, resource.getpagesize())}", "--pretty=no", "--offline=yes", workdir(image), ], sandbox=config.sandbox(options=["--bind", image, workdir(image)]), ) # fmt: skip @contextlib.contextmanager def finalize_drive(config: Config, drive: Drive) -> Iterator[Path]: dir = Path(drive.directory or "/var/tmp") filename = f"mkosi-drive-{config.machine_or_name()}-{drive.id}" with ( (dir / filename).open("a+b") if DriveFlag.persist in drive.flags else tempfile.NamedTemporaryFile(dir=dir, prefix=f"{filename}-") ) as file: maybe_make_nocow(Path(file.name)) file.truncate(round_up(drive.size, resource.getpagesize())) yield Path(file.name) @contextlib.contextmanager def finalize_initrd(config: Config) -> Iterator[Optional[Path]]: with contextlib.ExitStack() as stack: if (config.output_dir_or_cwd() / config.output_split_initrd).exists(): yield config.output_dir_or_cwd() / config.output_split_initrd elif config.initrds: initrd = config.output_dir_or_cwd() / f"initrd-{uuid.uuid4().hex}" join_initrds(config, config.initrds, initrd) stack.callback(lambda: initrd.unlink()) yield initrd else: yield None @contextlib.contextmanager def finalize_state(config: Config, cid: int) -> Iterator[None]: statedir = INVOKING_USER.runtime_dir() / "mkosi/machine" statedir.mkdir(parents=True, exist_ok=True) with flock(statedir): if (p := statedir / f"{config.machine_or_name()}.json").exists(): state = json.loads(p.read_text()) if "Pid" not in state or Path(f"/proc/{state['Pid']}").exists(): die( f"Another virtual machine named {config.machine_or_name()} is already running", hint="Use --machine to specify a different virtual machine name", ) p.write_text( json.dumps( { "Machine": config.machine_or_name(), "Pid": os.getpid(), "ProxyCommand": f"socat - VSOCK-CONNECT:{cid}:%p", "SshKey": os.fspath(config.ssh_key) if config.ssh_key else None, }, sort_keys=True, indent=4, ) ) try: yield finally: with flock(statedir): p.unlink(missing_ok=True) def finalize_kernel_command_line_extra(config: Config) -> list[str]: columns, lines = shutil.get_terminal_size() term = finalize_term() cmdline = [ "rw", # Make sure we set up networking in the VM/container. "systemd.wants=network.target", # Make sure we don't load vmw_vmci which messes with virtio vsock. "module_blacklist=vmw_vmci", f"systemd.tty.term.hvc0={term}", f"systemd.tty.columns.hvc0={columns}", f"systemd.tty.rows.hvc0={lines}", ] if not any(s.startswith("ip=") for s in config.kernel_command_line_extra): cmdline += ["ip=enc0:any", "ip=enp0s1:any", "ip=enp0s2:any", "ip=host0:any", "ip=none"] if not any(s.startswith("loglevel=") for s in config.kernel_command_line_extra): cmdline += ["loglevel=4"] if not any(s.startswith("SYSTEMD_SULOGIN_FORCE=") for s in config.kernel_command_line_extra): cmdline += ["SYSTEMD_SULOGIN_FORCE=1"] if ( not any(s.startswith("systemd.hostname=") for s in config.kernel_command_line_extra) and config.machine ): cmdline += [f"systemd.hostname={config.machine}"] if config.console != ConsoleMode.gui: cmdline += [ f"systemd.tty.term.console={term}", f"systemd.tty.columns.console={columns}", f"systemd.tty.rows.console={lines}", "console=hvc0", f"TERM={term}", ] elif config.architecture.is_arm_variant(): cmdline += ["console=tty0"] for s in config.kernel_command_line_extra: key, sep, value = s.partition("=") if " " in value: value = f'"{value}"' cmdline += [key if not sep else f"{key}={value}"] return cmdline def finalize_credentials(config: Config, stack: contextlib.ExitStack) -> Path: d = Path(stack.enter_context(tempfile.TemporaryDirectory(prefix="mkosi-credentials-"))) (d / "firstboot.locale").write_text("C.UTF-8") for k, v in config.credentials.items(): with (d / k).open("w") as f: if isinstance(v, str): f.write(v) elif os.access(v, os.X_OK): run([v], stdout=f, env=os.environ) else: f.write(v.read_text()) if not (d / "firstboot.timezone").exists(): if config.find_binary("timedatectl"): tz = run( ["timedatectl", "show", "-p", "Timezone", "--value"], stdout=subprocess.PIPE, check=False, # timedatectl needs to be able to talk via dbus to timedated. sandbox=config.sandbox(options=["--ro-bind", "/run", "/run"]), ).stdout.strip() else: tz = "UTC" (d / "firstboot.timezone").write_text(tz) if not (d / "ssh.authorized_keys.root").exists(): if config.ssh_certificate: pubkey = run( ["openssl", "x509", "-in", workdir(config.ssh_certificate), "-pubkey", "-noout"], stdout=subprocess.PIPE, env=dict(OPENSSL_CONF="/dev/null"), sandbox=config.sandbox( options=["--ro-bind", config.ssh_certificate, workdir(config.ssh_certificate)], ), ).stdout.strip() with (d / "ssh.authorized_keys.root").open("w") as f: run( ["ssh-keygen", "-f", "/dev/stdin", "-i", "-m", "PKCS8"], input=pubkey, stdout=f, # ssh-keygen insists on being able to resolve the current user which doesn't always work # (think sssd or similar) so let's switch to root which is always resolvable. sandbox=config.sandbox( options=["--become-root", "--ro-bind", "/etc/passwd", "/etc/passwd"] ), ) elif config.ssh in (Ssh.always, Ssh.runtime): die( "Ssh= is enabled but no SSH certificate was found", hint="Run 'mkosi genkey' to automatically create one", ) return d def scope_cmd( name: str, description: str, user: Optional[int] = None, group: Optional[int] = None, properties: Sequence[str] = (), environment: bool = True, ) -> list[str]: if not find_binary("systemd-run"): return [] if os.getuid() != 0 and "DBUS_SESSION_BUS_ADDRESS" in os.environ and "XDG_RUNTIME_DIR" in os.environ: env = { "DBUS_SESSION_BUS_ADDRESS": os.environ["DBUS_SESSION_BUS_ADDRESS"], "XDG_RUNTIME_DIR": os.environ["XDG_RUNTIME_DIR"], } elif os.getuid() == 0: if "DBUS_SYSTEM_ADDRESS" in os.environ: env = {"DBUS_SYSTEM_ADDRESS": os.environ["DBUS_SYSTEM_ADDRESS"]} elif Path("/run/dbus/system_bus_socket").exists(): env = {"DBUS_SYSTEM_ADDRESS": "/run/dbus/system_bus_socket"} else: return [] else: return [] return [ "env", *(f"{k}={v}" for k, v in env.items() if environment), "systemd-run", "--system" if os.getuid() == 0 else "--user", *(["--quiet"] if not ARG_DEBUG.get() else []), "--unit", name, "--description", description, "--scope", "--collect", *(["--expand-environment=no"] if systemd_tool_version("systemd-run") >= 254 else []), *(["--uid", str(user)] if user is not None else []), *(["--gid", str(group)] if group is not None else []), *([f"--property={p}" for p in properties]), ] # fmt: skip def machine1_is_available(config: Config) -> bool: if "DBUS_SYSTEM_ADDRESS" not in os.environ and not Path("/run/dbus/system_bus_socket").is_socket(): return False services = json.loads( run( ["busctl", "list", "--json=pretty"], env=os.environ | config.finalize_environment(), sandbox=config.sandbox(relaxed=True), stdout=subprocess.PIPE, ).stdout.strip() ) return any(service["name"] == "org.freedesktop.machine1" for service in services) def finalize_register(config: Config) -> bool: if config.register == ConfigFeature.disabled: return False if config.register == ConfigFeature.auto and os.getuid() != 0: return False # Unprivileged registration via polkit was added after the varlink interface was added, so if the varlink # interface is not available, we can assume unprivileged registration is not available either. if ( not (p := Path("/run/systemd/machine/io.systemd.Machine")).is_socket() or not os.access(p, os.R_OK | os.W_OK) ) and (not machine1_is_available(config) or os.getuid() != 0): if config.register == ConfigFeature.enabled: die( "Container registration was requested but systemd-machined is not available", hint="Is the systemd-container package installed and is systemd-machined running?", ) return False return True def register_machine(config: Config, pid: int, fname: Path, cid: Optional[int]) -> None: if not finalize_register(config): return if (p := Path("/run/systemd/machine/io.systemd.Machine")).is_socket() and os.access( p, os.R_OK | os.W_OK ): run( [ "varlinkctl", "call", p, "io.systemd.Machine.Register", json.dumps( { "name": config.machine_or_name().replace("_", "-"), "service": "mkosi", "class": "vm", "leader": pid, **({"rootDirectory": os.fspath(fname)} if fname.is_dir() else {}), **({"vSockCid": cid} if cid is not None else {}), **({"sshAddress": f"vsock/{cid}"} if cid is not None else {}), **({"sshPrivateKeyPath": f"{config.ssh_key}"} if config.ssh_key else {}), } ), ], env=os.environ | config.finalize_environment(), sandbox=config.sandbox(relaxed=True), stdin=sys.stdin, # Prevent varlinkctl's empty '{}' response from showing up in the terminal. stdout=subprocess.DEVNULL, # systemd 256 exposes the systemd-machined varlink interface only to the root user, but makes the # varlink socket world readable/writable, which means this will fail when executed as an # unprivileged user, so ignore the error in that case. # TODO: Remove when https://github.com/systemd/systemd/pull/36344 is in a stable release. check=os.getuid() == 0, ) else: run( [ "busctl", "call", "--quiet", "org.freedesktop.machine1", "/org/freedesktop/machine1", "org.freedesktop.machine1.Manager", "RegisterMachine", "sayssus", config.machine_or_name().replace("_", "-"), "0", "mkosi", "vm", str(pid), fname if fname.is_dir() else "", ], # fmt: skip env=os.environ | config.finalize_environment(), sandbox=config.sandbox(relaxed=True), stdin=sys.stdin, stdout=sys.stdout, ) def run_qemu(args: Args, config: Config) -> None: if config.output_format not in ( OutputFormat.disk, OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp, OutputFormat.directory, ): die(f"{config.output_format} images cannot be booted in qemu") if ( config.output_format in (OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp) and not config.firmware.is_linux() and not config.firmware.is_uefi() and config.firmware != Firmware.auto ): die(f"{config.output_format} images cannot be booted with the '{config.firmware}' firmware") if config.runtime_trees and config.firmware == Firmware.bios: die("RuntimeTrees= cannot be used when booting in BIOS firmware") if config.kvm == ConfigFeature.enabled and not config.architecture.is_native(): die( f"KVM acceleration requested but {config.architecture} does not match " "the native host architecture" ) if ( config.firmware_variables in (Path("custom"), Path("microsoft-mok")) and not config.secure_boot_certificate ): die("SecureBootCertificate= must be configured to use FirmwareVariables=custom|microsoft-mok") if config.bind_user: die("mkosi qemu does not support --bind-user=") # After we unshare the user namespace to sandbox qemu, we might not have access to /dev/kvm or related # device nodes anymore as access to these might be gated behind the kvm group and we won't be part of the # kvm group anymore after unsharing the user namespace. To get around this, open all those device nodes # early can pass them as file descriptors to qemu later. Note that we can't pass the kvm file descriptor # to qemu until version 9.0. qemu_device_fds = { d: d.open() for d in QemuDeviceNode if d.feature(config) != ConfigFeature.disabled and d.available(log=True) } qemu = find_qemu_binary(config) qemuver = qemu_version(config, qemu) have_kvm = (qemuver < QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm.available()) or ( qemuver >= QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm in qemu_device_fds ) if config.kvm == ConfigFeature.enabled and not have_kvm: die("KVM acceleration requested but cannot access /dev/kvm") if config.vsock == ConfigFeature.enabled and QemuDeviceNode.vhost_vsock not in qemu_device_fds: die("VSock requested but cannot access /dev/vhost-vsock") if config.console not in (ConsoleMode.native, ConsoleMode.gui) and not config.find_binary( "systemd-pty-forward" ): die(f"Console mode {config.console} requested but systemd-pty-forward not found") if config.linux: kernel = config.expand_linux_specifiers() elif "-kernel" in args.cmdline: kernel = Path(args.cmdline[args.cmdline.index("-kernel") + 1]) else: kernel = None if config.output_format in (OutputFormat.uki, OutputFormat.esp) and kernel: logging.warning( f"Booting UKI output, kernel {kernel} configured with Linux= or " "passed with -kernel will not be used" ) kernel = None if kernel and not kernel.exists(): die(f"Kernel not found at {kernel}") kerneltype = KernelType.identify(config, kernel) if kernel else None firmware = finalize_firmware(config, kernel, kerneltype) if not kernel and ( firmware.is_linux() or config.output_format in (OutputFormat.cpio, OutputFormat.directory, OutputFormat.uki) ): if firmware.is_uefi(): name = config.output if config.output_format == OutputFormat.uki else config.output_split_uki kernel = config.output_dir_or_cwd() / name else: kernel = config.output_dir_or_cwd() / config.output_split_kernel if not kernel.exists(): die( f"Kernel or UKI not found at {kernel}, please install a kernel in the image " "or provide a -kernel argument to mkosi vm" ) ovmf = find_ovmf_firmware(config, firmware) # A shared memory backend might increase ram usage so only add one if actually necessary for virtiofsd. shm = [] if ( config.runtime_trees or config.runtime_build_sources or config.output_format == OutputFormat.directory ): shm = ["-object", f"memory-backend-memfd,id=mem,size={config.ram // 1024**2}M,share=on"] machine = f"type={config.architecture.default_qemu_machine()}" if firmware.is_uefi() and config.architecture.supports_smm(): machine += f",smm={'on' if firmware == Firmware.uefi_secure_boot else 'off'}" if shm: machine += ",memory-backend=mem" if config.cxl and config.architecture.supports_cxl(): machine += ",cxl=on" if config.architecture.supports_hpet(): machine += ",hpet=off" cmdline: list[PathString] = [] if config.console in (ConsoleMode.interactive, ConsoleMode.read_only): cmdline += systemd_pty_forward( config, background="48;2;12;51;19", title=f"Virtual Machine {config.machine_or_name()}", ) if config.console == ConsoleMode.read_only: cmdline += ["--read-only"] memory = f"{config.ram // 1024**2}M" if config.maxmem: memory += f",maxmem={(config.maxmem // 1024**2)}M" cmdline += [ qemu, "-machine", machine, "-smp", str(config.cpus or os.cpu_count()), "-m", memory, "-object", "rng-random,filename=/dev/urandom,id=rng0", "-device", "virtio-rng-pci,rng=rng0,id=rng-device0", "-device", "virtio-balloon,free-page-reporting=on", "-no-user-config", *shm, ] # fmt: skip if config.runtime_network == Network.user: cmdline += ["-nic", f"user,model={config.architecture.default_qemu_nic_model()}"] elif config.runtime_network == Network.interface: if os.getuid() != 0: die("RuntimeNetwork=interface requires root privileges") cmdline += ["-nic", "tap,script=no,model=virtio-net-pci"] elif config.runtime_network == Network.none: cmdline += ["-nic", "none"] if config.kvm != ConfigFeature.disabled and have_kvm and config.architecture.can_kvm(): accel = "kvm" if qemuver >= QEMU_KVM_DEVICE_VERSION: cmdline += ["--add-fd", f"fd={qemu_device_fds[QemuDeviceNode.kvm]},set=1,opaque=/dev/kvm"] accel += ",device=/dev/fdset/1" cmdline += ["-cpu", "host"] else: accel = "tcg" cmdline += ["-cpu", "max"] cmdline += ["-accel", accel] cid: Optional[int] = None if QemuDeviceNode.vhost_vsock in qemu_device_fds: if config.vsock_cid == VsockCID.auto: cid = find_unused_vsock_cid(config, qemu_device_fds[QemuDeviceNode.vhost_vsock]) elif config.vsock_cid == VsockCID.hash: cid = hash_to_vsock_cid(hash_output(config)) else: cid = config.vsock_cid if vsock_cid_in_use(qemu_device_fds[QemuDeviceNode.vhost_vsock], cid): die( f"VSock connection ID {cid} is already in use by another virtual machine", hint="Use VsockConnectionId=auto to have mkosi automatically " "find a free vsock connection ID", ) cmdline += [ "-device", f"vhost-vsock-pci,guest-cid={cid},vhostfd={qemu_device_fds[QemuDeviceNode.vhost_vsock]}", # noqa: E501 ] # fmt: skip if config.console == ConsoleMode.gui: if config.architecture.is_arm_variant(): cmdline += ["-device", "virtio-gpu-pci"] else: cmdline += ["-device", "virtio-vga"] cmdline += [ "-nodefaults", "-display", "sdl,gl=on", "-audio", "driver=pipewire,model=virtio", ] # fmt: skip else: # -nodefaults removes the default CDROM device which avoids an error message during boot # -serial mon:stdio adds back the serial device removed by -nodefaults. cmdline += [ "-nographic", "-nodefaults", "-chardev", "stdio,mux=on,id=console,signal=off", "-device", "virtio-serial-pci,id=mkosi-virtio-serial-pci", "-device", "virtconsole,chardev=console", "-mon", "console", ] # fmt: skip # QEMU has built-in logic to look for the BIOS firmware so we don't need to do anything special for that. if firmware.is_uefi(): assert ovmf cmdline += ["-drive", f"if=pflash,format={ovmf.format},readonly=on,file={ovmf.firmware}"] notify: Optional[AsyncioThread[tuple[str, str]]] = None with contextlib.ExitStack() as stack: if firmware.is_uefi(): assert ovmf ovmf_vars, ovmf_vars_format = finalize_firmware_variables(config, ovmf, stack) cmdline += ["-drive", f"file={ovmf_vars},if=pflash,format={ovmf_vars_format}"] # These configurations break booting aarch64 if firmware == Firmware.uefi_secure_boot and not config.architecture.is_arm_variant(): cmdline += [ "-global", "ICH9-LPC.disable_s3=1", "-global", "driver=cfi.pflash01,property=secure,value=on", ] # fmt: skip fname = stack.enter_context( copy_ephemeral(config, config.output_dir_or_cwd() / config.output_with_compression) ) apply_runtime_size(config, fname) kcl = [] if kernel: cmdline += ["-kernel", kernel] if any(s.startswith("root=") for s in finalize_kernel_command_line_extra(config)): pass elif config.output_format == OutputFormat.disk: # We can't rely on gpt-auto-generator when direct kernel booting so synthesize a root= # kernel argument instead. root = finalize_root(find_partitions(fname, sandbox=config.sandbox)) if not root: die("Cannot perform a direct kernel boot without a root or usr partition") kcl += [root] elif config.output_format == OutputFormat.directory: sock = stack.enter_context( start_virtiofsd( config, fname, name=config.machine_or_name(), uidmap=False, selinux=bool(want_selinux_relabel(config, fname, fatal=False)), ), ) cmdline += [ "-chardev", f"socket,id={sock.name},path={sock}", "-device", f"vhost-user-fs-pci,queue-size=1024,chardev={sock.name},tag=root", ] # fmt: skip kcl += ["root=root", "rootfstype=virtiofs"] credentials = finalize_credentials(config, stack) def add_virtiofs_mount( sock: Path, dst: PathString, cmdline: list[PathString], credentials: Path, ) -> None: tag = os.fspath(dst) if len(tag.encode()) > VIRTIOFS_MAX_TAG_LEN: die(f"virtio-fs tag {tag} derived from destination is too long") cmdline += [ "-chardev", f"socket,id={sock.name},path={sock}", "-device", f"vhost-user-fs-pci,queue-size=1024,chardev={sock.name},tag={tag}", ] # fmt: skip if not (credentials / "fstab.extra").exists(): fstab = "" else: fstab = (credentials / "fstab.extra").read_text() if fstab and not fstab[-1] == "\n": fstab += "\n" fstab += f"{tag} {dst} virtiofs x-initrd.mount\n" (credentials / "fstab.extra").write_text(fstab) if config.runtime_build_sources: for t in config.build_sources: src, dst = t.with_prefix("/work/src") sock = stack.enter_context(start_virtiofsd(config, src)) add_virtiofs_mount(sock, dst, cmdline, credentials) if config.build_dir: sock = stack.enter_context(start_virtiofsd(config, config.build_subdir)) add_virtiofs_mount(sock, "/work/build", cmdline, credentials) for tree in config.runtime_trees: sock = stack.enter_context(start_virtiofsd(config, tree.source)) add_virtiofs_mount(sock, Path("/root/src") / (tree.target or ""), cmdline, credentials) if config.output_format in (OutputFormat.disk, OutputFormat.esp): cmdline += ["-device", "virtio-scsi-pci,id=mkosi"] if config.output_format == OutputFormat.cpio: cmdline += ["-initrd", fname] elif ( kernel and kerneltype != KernelType.uki and "-initrd" not in args.cmdline and firmware != Firmware.linux_noinitrd and (initrd := stack.enter_context(finalize_initrd(config))) ): cmdline += ["-initrd", initrd] if config.output_format in (OutputFormat.disk, OutputFormat.esp): blockdev = [ "driver=raw", "node-name=mkosi", "discard=unmap", "file.driver=file", f"file.filename={fname}", "file.aio=io_uring", f"cache.direct={yes_no(fname.stat().st_size % resource.getpagesize() == 0)}", f"cache.no-flush={yes_no(config.ephemeral)}", ] device_type = "virtio-blk-pci" if config.removable: device_type = "scsi-hd,device_id=mkosi,removable=on" cmdline += [ "-blockdev", ",".join(blockdev), "-device", f"{device_type},drive=mkosi,bootindex=1", ] # fmt: skip if config.tpm == ConfigFeature.enabled or ( config.tpm == ConfigFeature.auto and firmware.is_uefi() and config.find_binary("swtpm") is not None ): sock = stack.enter_context(start_swtpm(config)) cmdline += [ "-chardev", f"socket,id=chrtpm,path={sock}", "-tpmdev", "emulator,id=tpm0,chardev=chrtpm", ] # fmt: skip if config.architecture.is_x86_variant(): cmdline += ["-device", "tpm-tis,tpmdev=tpm0"] elif config.architecture.is_arm_variant(): cmdline += ["-device", "tpm-tis-device,tpmdev=tpm0"] if QemuDeviceNode.vhost_vsock in qemu_device_fds: addr, notify = stack.enter_context(vsock_notify_handler()) (credentials / "vmm.notify_socket").write_text(addr) if config.forward_journal: (credentials / "journal.forward_to_socket").write_text( stack.enter_context(start_journal_remote_vsock(config)) ) smbiosdir = Path(stack.enter_context(tempfile.TemporaryDirectory(prefix="mkosi-smbios-"))) for p in credentials.iterdir(): payload = base64.b64encode(p.read_bytes()) if config.architecture.supports_smbios(firmware): with (smbiosdir / p.name).open("wb") as f: f.write(f"io.systemd.credential.binary:{p.name}=".encode()) f.write(payload) cmdline += ["-smbios", f"type=11,path={smbiosdir / p.name}"] # qemu's fw_cfg device only supports keys up to 55 characters long. elif config.architecture.supports_fw_cfg() and len(f"opt/io.systemd.credentials/{p.name}") <= 55: cmdline += ["-fw_cfg", f"name=opt/io.systemd.credentials/{p.name},file={p}"] elif kernel: kcl += [f"systemd.set_credential_binary={p.name}:{payload.decode()}"] kcl += finalize_kernel_command_line_extra(config) if kernel and (kerneltype != KernelType.uki or not config.architecture.supports_smbios(firmware)): cmdline += ["-append", " ".join(config.kernel_command_line + kcl)] elif config.architecture.supports_smbios(firmware): cmdline += [ "-smbios", f"type=11,value=io.systemd.stub.kernel-cmdline-extra={' '.join(kcl).replace(',', ',,')}", "-smbios", f"type=11,value=io.systemd.boot.kernel-cmdline-extra={' '.join(kcl).replace(',', ',,')}", ] for _, drives in groupby(config.drives, key=lambda d: d.file_id): file = stack.enter_context(finalize_drive(config, drives[0])) for drive in drives: arg = [ "driver=raw", f"node-name={drive.id}", "file.driver=file", f"file.filename={file}", "file.aio=io_uring", "file.locking=off", "cache.direct=on", "cache.no-flush=yes", ] if drive.options: arg += [drive.options] cmdline += ["-blockdev", ",".join(arg)] cmdline += config.qemu_args cmdline += args.cmdline if cid is not None: stack.enter_context(finalize_state(config, cid)) # Reopen stdin, stdout and stderr to give qemu a private copy of them. This is a mitigation for the # case when running mkosi under meson and one or two of the three are redirected and their pipe might # block, but qemu opens all of them non-blocking because at least one of them is opened this way. stdin = try_or( lambda: os.open(f"/proc/self/fd/{sys.stdin.fileno()}", os.O_RDONLY), OSError, sys.stdin.fileno(), ) stdout = try_or( lambda: os.open(f"/proc/self/fd/{sys.stdout.fileno()}", os.O_WRONLY), OSError, sys.stdout.fileno(), ) stderr = try_or( lambda: os.open(f"/proc/self/fd/{sys.stderr.fileno()}", os.O_WRONLY), OSError, sys.stderr.fileno(), ) name = f"mkosi-{config.machine_or_name().replace('_', '-')}" with spawn( cmdline, stdin=stdin, stdout=stdout, stderr=stderr, pass_fds=qemu_device_fds.values(), env=os.environ | config.finalize_environment(), sandbox=config.sandbox( network=True, devices=True, relaxed=True, options=["--same-dir", "--suspend"], ), setup=scope_cmd( name=name, description=f"mkosi Virtual Machine {name}", properties=config.unit_properties, environment=False, ), ) as proc: # We have to close these before we wait for qemu otherwise we'll deadlock as qemu will never # exit. for fd in qemu_device_fds.values(): os.close(fd) os.waitid(os.P_PID, proc.pid, os.WEXITED | os.WSTOPPED | os.WNOWAIT) register_machine(config, proc.pid, fname, cid) proc.send_signal(signal.SIGCONT) if notify and (status := int({k: v for k, v in notify.process()}.get("EXIT_STATUS", "0"))) != 0: raise subprocess.CalledProcessError(status, cmdline) def run_ssh(args: Args, config: Config) -> None: statedir = INVOKING_USER.runtime_dir() / "mkosi/machine" with flock(statedir): if not (p := statedir / f"{config.machine_or_name()}.json").exists(): die( f"{p} not found, cannot SSH into virtual machine {config.machine_or_name()}", hint="Is the machine running and was it built with Ssh=yes and Vsock=yes?", ) state = json.loads(p.read_text()) if not state["SshKey"]: die( "An SSH key must be configured when booting the image to use 'mkosi ssh'", hint="Use 'mkosi genkey' to generate a new SSH key and certificate", ) cmd: list[PathString] = [ "ssh", "-i", state["SshKey"], "-F", "none", # Silence known hosts file errors/warnings. "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no", "-o", "LogLevel=ERROR", "-o", f"ProxyCommand={state['ProxyCommand']}", "root@mkosi", ] # fmt: skip cmd += args.cmdline run( cmd, stdin=sys.stdin, stdout=sys.stdout, env=os.environ | config.finalize_environment() | {"SHELL": "/bin/bash"}, log=False, sandbox=config.sandbox( network=True, devices=True, relaxed=True, # ssh insists on being able to resolve the current user which doesn't always work (think sssd or # similar) so let's switch to root which is always resolvable. options=["--same-dir", "--become-root"], ), ) mkosi-26/mkosi/resources/000077500000000000000000000000001512054777600155775ustar00rootroot00000000000000mkosi-26/mkosi/resources/__init__.py000066400000000000000000000047331512054777600177170ustar00rootroot00000000000000# SPDX-License-Identifier: PSF-2.0 # Based on code from https://github.com/python/cpython/blob/main/Lib/importlib/resources/_common.py import contextlib import functools import os import stat import sys import tempfile from collections.abc import Iterator from contextlib import AbstractContextManager from pathlib import Path if sys.version_info >= (3, 11): from importlib.resources.abc import Traversable else: from importlib.abc import Traversable def make_executable_if_script(path: Path) -> Path: with path.open("rb") as f: buf = f.read(3) if buf == b"#!/": os.chmod(path, path.stat().st_mode | stat.S_IEXEC) return path @contextlib.contextmanager def temporary_file(path: Traversable, suffix: str = "") -> Iterator[Path]: fd, raw_path = tempfile.mkstemp(suffix=suffix) try: try: os.write(fd, path.read_bytes()) finally: os.close(fd) yield make_executable_if_script(Path(raw_path)) finally: try: os.remove(raw_path) except FileNotFoundError: pass def dir_is_present(path: Traversable) -> bool: """ Some Traversables implement ``is_dir()`` to raise an exception (i.e. ``FileNotFoundError``) when the directory doesn't exist. This function wraps that call to always return a boolean and only return True if there's a dir and it exists. """ with contextlib.suppress(FileNotFoundError): return path.is_dir() return False @functools.singledispatch def as_file(path: Traversable) -> AbstractContextManager[Path]: """ Given a Traversable object, return that object as a path on the local file system in a context manager. """ return temporary_dir(path) if dir_is_present(path) else temporary_file(path, suffix=path.name) @contextlib.contextmanager def temporary_dir(path: Traversable) -> Iterator[Path]: """ Given a traversable dir, recursively replicate the whole tree to the file system in a context manager. """ assert path.is_dir() with tempfile.TemporaryDirectory() as temp_dir: yield write_contents(Path(temp_dir), path) def write_contents(target: Path, source: Traversable) -> Path: child = target.joinpath(source.name) if source.is_dir(): child.mkdir() for item in source.iterdir(): write_contents(child, item) else: child.write_bytes(source.read_bytes()) make_executable_if_script(child) return child mkosi-26/mkosi/resources/completion.bash000066400000000000000000000044111512054777600206070ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # shellcheck shell=bash _mkosi_compgen_files() { compgen -f -- "$1" } _mkosi_compgen_dirs() { compgen -d -- "$1" } _mkosi_completion() { local -a _mkosi_options local -A _mkosi_choices _mkosi_compgen _mkosi_verbs local -i curword_idx verb_seen ##VARIABLEDEFINITIONS## # completing_program="$1" local completing_word="$2" local completing_word_preceding="$3" if [[ "$completing_word" =~ ^- ]] # completing an option then readarray -t COMPREPLY < <(compgen -W "${_mkosi_options[*]}" -- "${completing_word}") return elif [[ "$completing_word_preceding" =~ ^- ]] # the previous word was an option then current_option="${completing_word_preceding}" current_option_choices="${_mkosi_choices[${current_option}]}" current_option_compgen="${_mkosi_compgen[${current_option}]}" # compgen options if we have them if [[ -n "${current_option_compgen}" ]] then readarray -t COMPREPLY < <("${current_option_compgen}" "${completing_word}") return fi # add choices if the current option has them readarray -t COMPREPLY -O "${#COMPREPLY[@]}" \ < <(compgen -W "${current_option_choices}" -- "${completing_word}") # if this (maybe) takes arguments, we'll just fall back to files readarray -t COMPREPLY -O "${#COMPREPLY[@]}" \ < <(_mkosi_compgen_files "${completing_word}") fi # the preceding word wasn't an option or one that doesn't take arguments, # let's get creative and check the whole argument list so far while ((curword_idx < COMP_CWORD)) do # check if we've seen a verb already, then we just try files if [[ -n "${_mkosi_verbs[${COMP_WORDS[${curword_idx}]}]}" ]] then verb_seen=$curword_idx break fi curword_idx=$((curword_idx + 1)) done if ((verb_seen)) then readarray -t COMPREPLY < <(_mkosi_compgen_files "${completing_word}") else readarray -t COMPREPLY < <(compgen -W "${_mkosi_verbs[*]}" -- "${completing_word}") fi } complete -o filenames -F _mkosi_completion mkosi complete -o filenames -F _mkosi_completion python -m mkosi mkosi-26/mkosi/resources/completion.zsh000066400000000000000000000005321512054777600204760ustar00rootroot00000000000000#compdef mkosi # SPDX-License-Identifier: LGPL-2.1-or-later # shellcheck shell=zsh _mkosi_verb(){ if (( CURRENT == 1 )); then _describe -t commands 'mkosi verb' _mkosi_verbs else local curcontext="$curcontext" if [[ "$curcontext" == ':complete:mkosi:argument-rest' ]]; then _files fi fi } mkosi-26/mkosi/resources/man/000077500000000000000000000000001512054777600163525ustar00rootroot00000000000000mkosi-26/mkosi/resources/man/mkosi-addon.1.md000066400000000000000000000024561512054777600212470ustar00rootroot00000000000000% mkosi-addon(1) % % # NAME mkosi-addon — Build addons for unified kernel images for the current system using mkosi # SYNOPSIS `mkosi-addon [options…]` # DESCRIPTION **mkosi-addon** is a wrapper on top of **mkosi** to simplify the generation of PE addons containing customizations for unified kernel images specific to the running or local system. Will include entries in `/etc/crypttab` marked with `x-initrd.attach`, and `/etc/kernel/cmdline`. Kernel modules and firmwares for the running hardware can be included if a local configuration with the option `KernelModulesIncludeHost=` is provided. # OPTIONS `--kernel-version=` : Kernel version where to look for the kernel modules to include. Defaults to the kernel version of the running system (`uname -r`). `--output=`, `-o` : Name to use for the generated output addon. Defaults to `mkosi-local.addon.efi`. `--output-dir=`, `-O` : Path to a directory where to place all generated artifacts. Defaults to the current working directory. `--debug` : Enable additional debugging output. `--debug-shell` : Spawn debug shell in sandbox if a sandboxed command fails. `--debug-sandbox` : Run **mkosi-sandbox** with **strace**. `--version` : Show package version. `--help`, `-h` : Show brief usage information. # SEE ALSO `mkosi(1)` mkosi-26/mkosi/resources/man/mkosi-initrd.1.md000066400000000000000000000041331512054777600214450ustar00rootroot00000000000000% mkosi-initrd(1) % % # NAME mkosi-initrd — Build initrds or unified kernel images for the current system using mkosi # SYNOPSIS `mkosi-initrd [options…]` # DESCRIPTION **mkosi-initrd** is wrapper on top of **mkosi** to simplify the generation of initrds and Unified Kernel Images for the current running system. # OPTIONS `--kernel-version=`, `-k` : Kernel version where to look for the kernel modules to include. Defaults to the kernel version of the running system (`uname -r`). `--format=`, `-t` : Output format. One of `cpio` (CPIO archive), `uki` (a unified kernel image with the image in the `.initrd` PE section) or `directory` (for generating an image directly in a local directory). Defaults to `cpio`. `--output=`, `-o` : Name to use for the generated output image file or directory. Defaults to `initrd`. `--output-dir=`, `-O` : Path to a directory where to place all generated artifacts. Defaults to the current working directory. `--generic`, `-g` : Build a generic initrd without host-specific kernel modules, which should allow the local system to boot on different hardware, although it's tied to the kernel version of the running system or set with `--kernel-version=`. `--profile=` : Set the profiles to enable for the initrd. By default, all profiles are disabled. The `lvm` profile enables support for LVM. The `network` profile enables support for network via **systemd-networkd**. The `nfs` profile enables support for NFS. It requires networking in the initrd, using the `network` profile, or some other custom method. The `pkcs11` profile enables support for PKCS#11. The `plymouth` profile provides a graphical interface at boot (animation and password prompt). The `raid` profile enables support for RAID arrays. `--debug` : Enable additional debugging output. `--debug-shell` : Spawn debug shell in sandbox if a sandboxed command fails. `--debug-sandbox` : Run **mkosi-sandbox** with **strace**. `--version` : Show package version. `--help`, `-h` : Show brief usage information. # SEE ALSO `mkosi(1)` mkosi-26/mkosi/resources/man/mkosi-sandbox.1.md000066400000000000000000000144661512054777600216240ustar00rootroot00000000000000% mkosi-sandbox(1) % % # NAME mkosi-sandbox — Run commands in a custom sandbox # SYNOPSIS `mkosi-sandbox [options…] command [arguments]` # DESCRIPTION `mkosi-sandbox` runs the given command in a custom sandbox. The sandbox is configured by specifying command line options that configure individual parts of the sandbox. If no command is specified, `mkosi-sandbox` will start `bash` in the sandbox. Note that this sandbox is not designed to be a security boundary. Its intended purpose is to allow running commands in an isolated environment so they are not affected by the host system. It is possible to use `mkosi-sandbox` to create an in process sandbox for python applications by importing it as a module and invoking its main function with only options. No command line to execute is needed in this case. As an example: ```python import mkosi.sandbox mkosi.sandbox.main(["--become-root"]) print(os.getuid()) ``` Only the `main` function can be invoked. Invoking any other functions from `mkosi.sandbox` is not supported and may break in future versions. # OPTIONS `--tmpfs DST` : Mounts a new tmpfs at `DST` in the sandbox. `--dev DST` : Sets up a private `/dev` at `DST` in the sandbox. This private `/dev` will only contain the basic device nodes required for a functioning sandbox (e.g. `/dev/null`) and no actual devices. `--proc DST` : Mounts `/proc` from the host at `DST` in the sandbox. `--dir DST` : Creates a directory and all missing parent directories at `DST` in the sandbox. All directories are created with mode 755 unless the path ends with `/tmp` or `/var/tmp` in which case it is created with mode 1777. `--bind SRC DST` : The source path `SRC` is recursively bind mounted to `DST` in the sandbox. The mountpoint is created in the sandbox if it does not yet exist. Any missing parent directories in the sandbox are created as well. The source path may optionally be prefixed with a `+` character. If so, the source path is interpreted relative to the sandbox root directory instead of the host root directory. `--bind-try SRC DST` : Like `--bind`, but doesn't fail if the source path doesn't exist. `--ro-bind SRC DST` : Like `--bind`, but does a recursive readonly bind mount. `--ro-bind-try SRC DST` : Like `--bind-try`, but does a recursive readonly bind mount. `--symlink SRC DST` : Creates a symlink at `DST` in the sandbox pointing to `SRC`. If `DST` already exists and is a file or symlink, a temporary symlink is created and mounted on top of `DST`. `--write DATA DST` : Writes the string from `DATA` to `DST` in the sandbox. `--overlay-lowerdir DIR` : Adds `DIR` from the host as a new lower directory for the next overlayfs mount. `--overlay-upperdir DIR` : Sets the upper directory for the next overlayfs mount to `DIR` from the host. If set to `tmpfs`, the upperdir and workdir will be subdirectories of a fresh tmpfs mount. `--overlay-workdir DIR` : Sets the working directory for the next overlayfs mount to `DIR` from the host. `--overlay DST` : Mounts a new overlay filesystem at `DST` in the sandbox. The lower directories, upper directory and working directory are specified using the `--overlay-lowerdir`, `--overlay-upperdir` and `--overlay-workdir` options respectively. After each `--overlay` option is parsed, the other overlay options are reset. `--unsetenv NAME` : Unsets the `NAME` environment variable in the sandbox. `--setenv NAME VALUE` : Sets the `NAME` environment variable to `VALUE` in the sandbox `--chdir DIR` : Changes the working directory to `DIR` in the sandbox. `--same-dir` : Changes to the working directory in the sandbox to the current working directory that `mkosi-sandbox` is invoked in on the host. `--become-root` : Maps the current user to the root user in the sandbox. If this option is not specified, the current user is mapped to itself in the sandbox. Regardless of whether this option is specified or not, the current user will have a full set of ambient capabilities in the sandbox. This includes `CAP_SYS_ADMIN` which means that the invoked process in the sandbox will be able to do bind mounts and other operations. If `mkosi-sandbox` is invoked as the root user, this option won't do anything. `--suppress-chown` : Specifying this option causes all calls to `chown()` or similar system calls to become a noop in the sandbox. This is primarily useful when invoking package managers in the sandbox which might try to `chown()` files to different users or groups which would fail unless `mkosi-sandbox` is invoked by a privileged user. `--suppress-sync` : Specifying this option causes all calls to `sync()` or similar system calls to become a noop in the sandbox. This is primarily useful when invoking package managers in the sandbox which might try to `sync()` files when extracting packages which is unnecessary when building images and slows down execution time significantly. `--unshare-net` : Specifying this option makes `mkosi-sandbox` unshare a network namespace if possible. `--unshare-ipc` : Specifying this option makes `mkosi-sandbox` unshare an IPC namespace if possible. `--suspend` : Make the `mkosi-sandbox` process suspend itself with `SIGSTOP` just before it calls `execve()`. This is useful to wait until all setup logic has completed before continuing execution in the parent process invoking `mkosi-sandbox` by using `waitid()` with the `WNOWAIT` AND `WSTOPPED` flags. `--pack-fds` : Pack inherited file descriptors together starting at file descriptor number 3 and set `$LISTEN_FDS` to the number of packed file descriptors and `$LISTEN_PID` to the current process pid. `--version` : Show package version. `--help`, `-h` : Show brief usage information. # EXAMPLES Start `bash` in the current working directory in its own network namespace as the current user. ```sh mkosi-sandbox --bind / / --same-dir --unshare-net ``` Run `id` as the root user in a sandbox with only `/usr` from the host plus the necessary symlinks to be able to run commands. ```sh mkosi-sandbox \ --ro-bind /usr /usr \ --symlink usr/bin /bin \ --symlink usr/lib /lib \ --symlink usr/lib64 /lib64 \ --symlink usr/sbin /sbin \ --dev /dev \ --proc /proc \ --tmpfs /tmp \ --become-root \ id ``` # SEE ALSO `mkosi(1)` mkosi-26/mkosi/resources/man/mkosi.1.md000066400000000000000000005152211512054777600201630ustar00rootroot00000000000000% mkosi(1) % % # NAME mkosi — Build Bespoke OS Images # SYNOPSIS `mkosi [options…] init` `mkosi [options…] summary` `mkosi [options…] cat-config` `mkosi [options…] build [-- command line…]` `mkosi [options…] shell [-- command line…]` `mkosi [options…] boot [-- nspawn settings…]` `mkosi [options…] vm [-- vmm parameters…]` `mkosi [options…] ssh [-- command line…]` `mkosi [options…] journalctl [-- command line…]` `mkosi [options…] coredumpctl [-- command line…]` `mkosi [options…] sysupdate [-- sysupdate settings…]` `mkosi [options…] box [-- command line…]` `mkosi [options…] dependencies [-- options…]` `mkosi [options…] clean` `mkosi [options…] serve` `mkosi [options…] burn ` `mkosi [options…] bump` `mkosi [options…] genkey` `mkosi [options…] documentation [manual]` `mkosi [options…] completion [shell]` `mkosi [options…] latest-snapshot` `mkosi [options…] help` # DESCRIPTION **mkosi** is a tool for easily building customized OS images. It's a fancy wrapper around **dnf**, **apt**, **pacman** and **zypper** that may generate disk images with a number of bells and whistles. ## Command Line Verbs The following command line verbs are known: `init` : Initialize **mkosi**. This is a one time operation that sets up various config files required for an optimal experience. Currently this only initialized a `tmpfiles.d` dropin for the mkosi package cache directory to make sure old, unused files are cleaned up automatically. `summary` : Show a human-readable summary of all options used for building the images. This will parse the command line and configuration files, but only print what it is configured for and not actually build or run anything. `cat-config` : Output the names and contents of all loaded configuration files. **mkosi** loads a bunch of files from different locations and this command makes it easier to figure out what is configured where. `build` : Build the image-based on the settings passed on the command line and in the configuration files. This command is the default if no verb is specified. Arguments may be passed to the build scripts, if some are defined. To pass options to the build scripts, separate them from regular mkosi options with `--`. `shell` : This builds the image if it is not built yet, and then invokes **systemd-nspawn** to run an interactive shell in the image. This doesn't require booting the system, it's like a better chroot. An optional command line may be specified after the `shell` verb, to be invoked in place of the shell in the container. To pass extra options to nspawn, separate them from regular options with `--`. `boot` : Similar to `shell`, but instead of spawning a shell, it boots systemd in the image using **systemd-nspawn**. Extra arguments may be specified after the `boot` verb, which are passed as the *kernel command line* to the init system in the image. To pass extra options to nspawn, separate them from regular options with `--`. `vm` : Similar to `boot`, but uses the configured virtual machine monitor (by default `qemu`) to boot up the image, i.e. instead of container virtualization, virtual machine virtualization is used. How extra command line arguments are interpreted depends on the configured virtual machine monitor. See `VirtualMachineMonitor=` for more information. To pass extra options to the configured virtual machine monitor, separate them from regular options with `--`. `ssh` : When the image is built with the `Ssh=always` option or if systemd's `sshd-vsock` service is running in the VM (systemd v256+), this command connects to a booted virtual machine via SSH. Make sure to run `mkosi ssh` with the same config as `mkosi build` so that it has the necessary information available to connect to the running virtual machine via SSH. Specifically, the SSH private key from the `SshKey=` setting is used to connect to the virtual machine. Use `mkosi genkey` to automatically generate a key and certificate that will be picked up by **mkosi**. Any arguments passed after the `ssh` verb are passed as arguments to the **ssh** invocation. To pass extra options, separate them from regular options with `--`.To connect to a container, use `machinectl login` or `machinectl shell`. The `Machine=` option can be used to give the machine a custom hostname when booting it which can later be used to **ssh** into the image (e.g. `mkosi --machine=mymachine vm` followed by `mkosi --machine=mymachine ssh`). `journalctl` : Uses **journalctl** to inspect the journal inside the image. All arguments specified after the `journalctl` verb and separated by `--` from the regular options are appended to the **journalctl** invocation. `coredumpctl` : Uses **coredumpctl** to look for coredumps inside the image. All arguments specified after the `coredumpctl` verb and separated by `--` from the regular options are appended to the **coredumpctl** invocation. `sysupdate` : Invokes **systemd-sysupdate** with the `--transfer-source=` option set to the output directory and the `--definitions=` option set to the directory configured with `SysupdateDirectory=`. All arguments specified after the `sysupdate` verb and separated from the regular options with `--` are passed directly to **systemd-sysupdate**. `box` : Run arbitrary commands inside of the same environment used to execute other verbs such as `boot`, `shell`, `vm` and more. This means `/usr` will be replaced by `/usr` from the tools tree if one is used while everything else will remain in place. If no command is provided, `$SHELL` will be executed or **bash** if `$SHELL` is not set. To pass extra options to the given command, separate them from regular options with `--`. `clean` : Remove build artifacts generated on a previous build. If combined with `-f`, also removes incremental build cache images and the tools tree. If `-f` is specified twice, also removes any package cache. `serve` : This builds the image if it is not built yet, and then serves the output directory (i.e. usually `mkosi.output/`, see below) via a small embedded HTTP server, listening on port 8081. Combine with `-f` in order to rebuild the image unconditionally before serving it. This command is useful for testing network-based acquisition of OS images, for example via `machinectl pull-raw …` and `machinectl pull-tar …`. `burn ` : This builds the image if it is not built yet, and then writes it to the specified block device. The partition contents are written as-is, but the GPT partition table is corrected to match sector and disk size of the specified medium. `bump` : Bumps the image version from `mkosi.version` and writes the resulting version string to `mkosi.version`. This is useful for implementing a simple versioning scheme: each time this verb is called the version is bumped in preparation for the subsequent build. Note that `--auto-bump`/`-B` may be used to automatically bump the version as part of a build. The new version is only written to `mkosi.version` if the build succeeds in that case. If `mkosi.bump` exists, it is invoked to generate the new version to be used instead of using mkosi's own logic. `genkey` : Generate a pair of SecureBoot keys for usage with the `SecureBootKey=`/`--secure-boot-key=` and `SecureBootCertificate=`/`--secure-boot-certificate=` options. `documentation` : Show **mkosi**'s documentation. If no argument is given, the **mkosi** man page is shown, but the arguments `mkosi`, `mkosi-initrd`, `initrd`, `mkosi-sandbox`, `sandbox`, `mkosi.news` and `news` are supported and respectively show the man pages for **mkosi**, **mkosi-initrd**, **mkosi-sandbox** and **mkosi**'s NEWS file. By default this verb will try several ways to output the documentation, but a specific option can be chosen with the `--doc-format` option. Distro packagers are encouraged to add a file `mkosi.1` into the `mkosi/resources` directory of the Python package, if it is missing, as well as to install it in the appropriate search path for man pages. The man page can be generated from the markdown file `mkosi/resources/man/mkosi.1.md` e.g via `pandoc -t man -s -o mkosi.1 mkosi.1.md`. `completion` : Generate shell completion for the shell given as argument and print it to stdout. The arguments `bash`, `fish`, and `zsh` are understood. `dependencies` : Output the list of packages required by **mkosi** to build and boot images. This list can be piped directly to a package manager to install the packages. For example, if the host system uses the **dnf** package manager, the packages could be installed as follows: ```sh mkosi dependencies | xargs -d '\n' dnf install ``` By default, only the dependencies required to build images with mkosi are shown. Extra tools tree profiles can be enabled to also output the packages belonging to those profiles. For example, running `mkosi dependencies -- --profile runtime` will also output the packages in the runtime profile on top of the regular packages. See the documentation for `ToolsTreeProfiles=` for a list of available profiles. `latest-snapshot` : Output the latest available snapshot in the configured mirror. This verb is useful to automatically bump snapshots every so often. Note that this verb only outputs the latest snapshot. It's up to the caller to ensure that the snapshot is written to the intended configuration file. `help` : This verb is equivalent to the `--help` switch documented below: it shows a brief usage explanation. ## Command-Line-Only Options Those settings cannot be configured in the configuration files. `--force`, `-f` : Replace the output file if it already exists, when building an image. By default when building an image and an output artifact already exists **mkosi** will refuse operation. Specify this option once to delete all build artifacts from a previous run before re-building the image. If incremental builds are enabled, specifying this option twice will ensure the intermediary cache files are removed, too, before the re-build is initiated. If a package cache is used (also see the **FILES** section below), specifying this option thrice will ensure the package cache is removed too, before the re-build is initiated. For the `clean` operation this option has a slightly different effect: by default the verb will only remove build artifacts from a previous run, when specified once the incremental cache files and the tools tree are deleted too, and when specified twice the package cache is also removed. `--directory=`, `-C` : Takes a path to a directory. **mkosi** switches to this directory before doing anything. Note that the various configuration files are searched for in this directory, hence using this option is an effective way to build a project located in a specific directory. Defaults to the current working directory. If the empty string is specified, all configuration in the current working directory will be ignored. `--debug` : Enable additional debugging output. `--debug-shell` : When executing a command in the image fails, **mkosi** will start an interactive shell in the image allowing further debugging. `--debug-workspace` : When specified, the workspace directory will not be deleted and its location will be logged when **mkosi** exits. `--debug-sandbox` : Run **mkosi-sandbox** with **strace**. `--version` : Show package version. `--help`, `-h` : Show brief usage information. `--genkey-common-name=` : Common name to be used when generating keys via **mkosi**'s `genkey` command. Defaults to `mkosi of %u`, where `%u` expands to the username of the user invoking **mkosi**. `--genkey-valid-days=` : Number of days that the keys should remain valid when generating keys via **mkosi**'s `genkey` command. Defaults to two years (730 days). `--auto-bump=`, `-B` : If specified, the version is bumped and if the build succeeds, the version is written to `mkosi.version` in a fashion equivalent to the `bump` verb. This is useful for simple, linear version management: each build in a series will have a version number one higher then the previous one. If `mkosi.bump` exists, it is invoked to generate the new version to be used instead of using mkosi's own logic. `--doc-format` : The format to show the documentation in. Supports the values `markdown`, `man`, `pandoc`, `system` and `auto`. In the case of `markdown` the documentation is shown in the original Markdown format. `man` shows the documentation in man page format, if it is available. **pandoc** will generate the man page format on the fly, if **pandoc** is available. `system` will show the system-wide man page for **mkosi**, which may or may not correspond to the version you are using, depending on how you installed **mkosi**. `auto`, which is the default, will try all methods in the order `man`, `pandoc`, `markdown`, `system`. `--json` : Show the summary output as JSON-SEQ. `--wipe-build-dir`, `-w` : Wipe the build directory if one is configured before building the image. `--rerun-build-scripts`, `-R` : Rerun build scripts. Requires the `Incremental=` option to be enabled and the image to have been built once already. If `History=` is enabled, the history from the previous build will be reused and no new history will be written. ## Supported output formats The following output formats are supported: * Raw *GPT* disk image, created using **systemd-repart** (*disk*) * Plain directory, containing the OS tree (*directory*) * Tar archive (*tar*) * CPIO archive (*cpio*) * Unified Kernel Image (*UKI*) * ... and much more. See `Format=` documentation below. The output format may also be set to *none* to have **mkosi** produce no image at all. This can be useful if you only want to use the image to produce another output in the build scripts (e.g. build an RPM). When a *GPT* disk image is created, repart partition definition files may be placed in `mkosi.repart/` to configure the generated disk image. It is highly recommended to run **mkosi** on a file system that supports reflinks such as XFS and btrfs and to keep all related directories on the same file system. This allows **mkosi** to create images very quickly by using reflinks to perform copying via copy-on-write operations. ## Configuration Settings The following settings can be set through configuration files (the syntax with `SomeSetting=value`) and on the command line (the syntax with `--some-setting=value`). For some command line parameters, a single-letter shortcut is also allowed. In the configuration files, the setting must be in the appropriate section, so the settings are grouped by section below. Configuration is parsed in the following order: * The command line arguments are parsed. * `mkosi.local.conf` and `mkosi.local/` are parsed if they exists (in that order). This file and directory should be in `.gitignore` (or equivalent) and are intended for local configuration. * If an option has a corresponding default path, it is parsed if the corresponding default path exists. * `mkosi.conf` is parsed if it exists in the directory configured with `--directory=` or the current working directory if `--directory=` is not used. If the specified directory does not contain a `mkosi.conf` or `mkosi.tools.conf` and a `mkosi/mkosi.conf` or `mkosi/mkosi.tools.conf` exists, the configuration will be parsed from the `mkosi/` subdirectory of the specified directory instead. * `mkosi.conf.d/` is parsed in the same directory as `mkosi.conf` if it exists. Each directory and each file with the `.conf` extension in `mkosi.conf.d/` is parsed. Any directory in `mkosi.conf.d` is parsed as if it were a regular top level directory, except for `mkosi.images/` and `mkosi.tools.conf`, which are only picked up in the top level directory. * If any profiles are configured, their configuration is parsed from the `mkosi.profiles/` directory. * Subimages are parsed from the `mkosi.images/` directory if it exists. Note that settings configured via the command line always override settings configured via configuration files. If the same setting is configured more than once via configuration files, later assignments override earlier assignments except for settings that take a collection of values. Also, settings read from `mkosi.local.conf` or `mkosi.local/` will override settings from configuration files that are parsed later, but not settings specified on the CLI. For settings that take a single value, the empty assignment (`SomeSetting=` or `--some-setting=`) can be used to override a previous setting and reset to the default. Settings that take a collection of values are merged by appending the new values to the previously configured values. Assigning the empty string to such a setting removes all previously assigned values, and overrides any configured default values as well. The values specified on the CLI are appended after all the values from configuration files. To conditionally include configuration files, the `[Match]` section can be used. A `[Match]` section consists of individual conditions. Conditions can use a pipe symbol (`|`) after the equals sign (`…=|…`), which causes the condition to become a triggering condition. The config file will be included if the logical AND of all non-triggering conditions and the logical OR of all triggering conditions is satisfied. To negate the result of a condition, prefix the argument with an exclamation mark. If an argument is prefixed with the pipe symbol and an exclamation mark, the pipe symbol must be passed first, and the exclamation second. Note that `[Match]` conditions compare against the current values of specific settings, and do not take into account changes made to the setting in configuration files that have not been parsed yet (settings specified on the CLI are taken into account). Also note that matching against a setting and then changing its value afterwards in a different config file may lead to unexpected results. The `[Match]` section of a `mkosi.conf` file in a directory applies to the entire directory. If the conditions are not satisfied, the entire directory is skipped. The `[Match]` sections of files in `mkosi.conf.d/` and `mkosi.local.conf` only apply to the file itself. If there are multiple `[Match]` sections in the same configuration file, each of them has to be satisfied in order for the configuration file to be included. Specifically, triggering conditions only apply to the current `[Match]` section and are reset between multiple `[Match]` sections. As an example, the following will only match if the output format is one of `disk` or `directory` and the architecture is one of `x86-64` or `arm64`: ```ini [Match] Format=|disk Format=|directory [Match] Architecture=|x86-64 Architecture=|arm64 ``` The `[TriggerMatch]` section can be used to indicate triggering matches. These are identical to triggering conditions in systemd units except they apply to the entire match section instead of just a single condition. As an example, the following will match if the distribution is `debian` and the release is `bookworm` or if the distribution is `ubuntu` and the release is `noble`. ```ini [TriggerMatch] Distribution=debian Release=bookworm [TriggerMatch] Distribution=ubuntu Release=noble ``` The semantics of conditions in `[TriggerMatch]` sections is the same as in `[Match]`, i.e. all normal conditions are joined by a logical AND and all triggering conditions are joined by a logical OR. When mixing `[Match]` and `[TriggerMatch]` sections, a match is achieved when all `[Match]` sections match and at least one `[TriggerMatch]` section matches. The absence of match sections is valued as true. Logically this means: ``` (⋀ᵢ Matchᵢ) ∧ (⋁ᵢ TriggerMatchᵢ) ``` There is also support for `[Assert]` and `[TriggerAssert]` sections which behave identically to match sections except parsing configuration will fail if the assert sections are not satisfied, i.e. all `[Assert]` sections in a file as well as at least one `[TriggertAssert]` section have to be satisfied or config parsing will fail. Command line options that take no argument are shown without `=` in their long version. In the config files, they should be specified with a boolean argument: either `1`, `yes`, or `true` to enable, or `0`, `no`, `false` to disable. ### [Distribution] Section `Distribution=`, `--distribution=`, `-d` : The distribution to install in the image. Takes one of the following arguments: `fedora`, `debian`, `kali`, `ubuntu`, `arch`, `opensuse`, `mageia`, `centos`, `rhel`, `rhel-ubi`, `openmandriva`, `rocky`, `alma`, `azure` or `custom`. If not specified, defaults to the distribution of the host or `custom` if the distribution of the host is not a supported distribution. `Release=`, `--release=`, `-r` : The release of the distribution to install in the image. The precise syntax of the argument this takes depends on the distribution used, and is either a numeric string (in case of Fedora Linux, CentOS, …, e.g. `29`), or a distribution version name (in case of Debian, Kali, Ubuntu, …, e.g. `artful`). Defaults to a recent version of the chosen distribution, or the version of the distribution running on the host if it matches the configured distribution. `Architecture=`, `--architecture=` : The architecture to build the image for. The architectures that are actually supported depends on the distribution used and whether a bootable image is requested or not. When building for a foreign architecture, you'll also need to install and register a user mode emulator for that architecture. One of the following architectures can be specified per image built: `alpha`, `arc`, `arm`, `arm64`, `ia64`, `loongarch64`, `mips64-le`, `mips-le`, `parisc`, `ppc`, `ppc64`, `ppc64-le`, `riscv32`, `riscv64`, `s390`, `s390x`, `tilegx`, `x86`, `x86-64`. `Mirror=`, `--mirror=`, `-m` : The mirror to use for downloading the distribution packages. Expects a mirror URL as argument. If not provided, the default mirror for the distribution is used. The default mirrors for each distribution are as follows (unless specified, the same mirror is used for all architectures): | | x86-64 | aarch64 | |----------------|------------------------------------|--------------------------------| | `debian` | http://deb.debian.org | | | `arch` | https://fastly.mirror.pkgbuild.com | http://mirror.archlinuxarm.org | | `opensuse` | http://download.opensuse.org | | | `kali` | http://http.kali.org/kali | | | `ubuntu` | http://archive.ubuntu.com | http://ports.ubuntu.com | | `centos` | https://mirrors.centos.org | | | `rocky` | https://mirrors.rockylinux.org | | | `alma` | https://mirrors.almalinux.org | | | `fedora` | https://mirrors.fedoraproject.org | | | `rhel-ubi` | https://cdn-ubi.redhat.com | | | `mageia` | https://www.mageia.org | | | `openmandriva` | http://mirrors.openmandriva.org | | | `azure` | https://packages.microsoft.com/ | | `Snapshot=` : Download packages from the given snapshot instead of downloading the latest distribution packages from the given mirror. Takes a snapshot ID (the format of the snapshot ID differs per distribution), use the `latest-snapshot` verb to figure out the latest available snapshot. If this setting is configured and `Mirror=` is not explicitly configured, different default mirrors are used: | | x86-64 | aarch64 | |----------------|------------------------------------|--------------------------------| | `debian` | https://snapshot.debian.org | | | `arch` | https://archive.archlinux.org | http://mirror.archlinuxarm.org | | `opensuse` | http://download.opensuse.org | | | `ubuntu` | http://archive.ubuntu.com | http://ports.ubuntu.com | | `centos` | https://composes.stream.centos.org | | | `fedora` | https://kojipkgs.fedoraproject.org | | For any distribution not listed above, snapshots are not supported. `LocalMirror=`, `--local-mirror=` : The mirror will be used as a local, plain and direct mirror instead of using it as a prefix for the full set of repositories normally supported by distributions. Useful for fully offline builds with a single repository. Supported on **deb**-, **rpm**-, and **pacman**-based distributions. Overrides `--mirror=` but only for the local **mkosi** build, it will not be configured inside the final image, `--mirror=` (or the default repository) will be configured inside the final image instead. `RepositoryKeyCheck=`, `--repository-key-check=` : Controls signature/key checks when using repositories, enabled by default. Useful to disable checks when combined with `--local-mirror=` and using only a repository from a local filesystem. `RepositoryKeyFetch=`, `--repository-key-fetch=` : Controls whether **mkosi** will fetch distribution GPG keys remotely. Enabled by default on Ubuntu when not using a tools tree or when using Ubuntu tools trees to build Arch Linux or RPM-based distributions. Disabled by default on all other distributions. When disabled, the distribution GPG keys for the target distribution have to be installed locally on the host system alongside the package manager for that distribution. This setting is only implemented for distributions using **dnf**, **pacman** or **zypper** as their package manager. For other distributions the distribution GPG keys are always looked up locally regardless of the value of this setting. To make the distribution GPG keys for distributions available without enabling this setting, the corresponding package has to be installed on the host. This is usually one of `archlinux-keyring`, `debian-keyring`, `kali-archive-keyring`, `ubuntu-keyring` or `distribution-gpg-keys` (for RPM-based distributions). `Repositories=`, `--repositories=` : Enable package repositories that are disabled by default. This can be used to enable the EPEL repos for CentOS or different components of the Debian/Kali/Ubuntu repositories. ### [Output] Section `Format=`, `--format=`, `-t` : The image format type to generate. One of `directory` (for generating an OS image directly in a local directory), `tar` (similar, but a tarball of the OS image is generated), `cpio` (similar, but a cpio archive is generated), `disk` (a block device OS image with a GPT partition table), `uki` (a unified kernel image with the OS image in the `.initrd` PE section), `esp` (a disk image with only an ESP partition, bootloader and optionally a UKI), `oci` (a directory compatible with the OCI image specification), `sysext`, `confext`, `portable`, `addon` or `none` (the OS image is solely intended as a build image to produce another artifact). If the `disk` output format is used, the disk image is generated using **systemd-repart**. The repart partition definition files to use can be configured using the `RepartDirectories=` setting or via `mkosi.repart/`. When verity partitions are configured using **systemd-repart**'s `Verity=` setting, **mkosi** will automatically parse the verity hash partition's roothash from **systemd-repart**'s JSON output and include it in the kernel command line of every unified kernel image built by **mkosi**. If the `none` output format is used, the outputs from a previous build are not removed, but clean scripts (see `CleanScripts=`) are still executed. This allows rerunning a build script (see `BuildScripts=`) without removing the results of a previous build. `ManifestFormat=`, `--manifest-format=` : The manifest format type or types to generate. A comma-delimited list consisting of `json` (the standard JSON output format that describes the packages installed), `changelog` (a human-readable text format designed for diffing). By default no manifest is generated. `Output=`, `--output=`, `-o` : Name to use for the generated output image file or directory. Defaults to `image` or, if `ImageId=` is specified, it is used as the default output name, optionally suffixed with the version set with `ImageVersion=` or if a specific image is built from `mkosi.images`, the name of the image is preferred over `ImageId`. Note that this option does not allow configuring the output directory, use `OutputDirectory=` for that. Note that this only specifies the output prefix, depending on the specific output format, compression and image version used, the full output name might be `image_7.8.raw.xz`. `OutputExtension=`, `--output-extension=` : Use the specified extension for the output file. Defaults to the appropriate extension based on the output format. Only includes the file extension, not any compression extension which will be appended to this extension if compression is enabled. `CompressOutput=`, `--compress-output=` : Configure compression for the resulting image or archive. The argument can be either a boolean or a compression algorithm (**xz**, **zstd**). **zstd** compression is used by default, except CentOS and derivatives up to version 8, which default to **xz**, and OCI images, which default to **gzip**. Note that when applied to block device image types, compression means the image cannot be started directly but needs to be decompressed first. This also means that the `shell`, `boot`, `vm` verbs are not available when this option is used. Implied for `tar`, `cpio`, `uki`, `esp`, `oci` and `addon`. `CompressLevel=`, `--compress-level=` : Configure the compression level to use. Takes an integer. The possible values depend on the compression being used. `OutputDirectory=`, `--output-directory=`, `-O` : Path to a directory where to place all generated artifacts. If this is not specified and the directory `mkosi.output/` exists in the local directory, it is automatically used for this purpose. `OutputMode=`, `--output-mode=` : File system access mode used when creating the output image file. Takes an access mode in octal notation. If not set, uses the current system defaults. `ImageVersion=`, `--image-version=` : Configure the image version. This accepts any string, but it is recommended to specify a series of dot separated components. The version may also be configured by reading a `mkosi.version` file (in which case it may be conveniently managed via the `bump` verb or the `--auto-bump` option) or by reading stdout if it is executable (see the **Scripts** section below). When specified the image version is included in the default output file name, i.e. instead of `image.raw` the default will be `image_0.1.raw` for version `0.1` of the image, and similar. The version is also passed via the `$IMAGE_VERSION` to any build scripts invoked (which may be useful to patch it into `/usr/lib/os-release` or similar, in particular the `IMAGE_VERSION=` field of it). `ImageId=`, `--image-id=` : Configure the image identifier. This accepts a freeform string that shall be used to identify the image with. If set the default output file will be named after it (possibly suffixed with the version). The identifier is also passed via the `$IMAGE_ID` to any build scripts invoked. The image ID is automatically added to `/usr/lib/os-release`. `SplitArtifacts=`, `--split-artifacts=` : The artifact types to split out of the final image. A comma-delimited list consisting of `uki`, `kernel`, `initrd`, `os-release`, `prcs`, `partitions`, `roothash`, `kernel-modules-initrd`, `repart-definitions` and `tar`. When building a bootable image `kernel` and `initrd` correspond to their artifact found in the image (or in the UKI), while `uki` copies out the entire UKI. If `pcrs` is specified, a JSON file containing the pre-calculated TPM2 digests is written out, according to the [UKI specification](https://uapi-group.org/specifications/specs/unified_kernel_image/#json-format-for-pcrsig), which is useful for offline signing. When building a disk image and `partitions` is specified, pass `--split=yes` to **systemd-repart** to have it write out split partition files for each configured partition. Read the [man](https://www.freedesktop.org/software/systemd/man/systemd-repart.html#--split=BOOL) page for more information. This is useful in A/B update scenarios where an existing disk image shall be augmented with a new version of a root or `/usr` partition along with its Verity partition and unified kernel. When `tar` is specified, the rootfs is additionally archived as a tar archive (compressed according to `CompressOutput=`). When `roothash` is specified and a dm-verity disk image is built, the dm-verity roothash is written out as a separate file, which is useful for offline signing. `kernel-modules-initrd` corresponds to the separate kernel modules initrd which mkosi appends to the main initrd. This is primarily intended for debugging as many initrd inspection tools don't properly handle multiple initrds appended to each other. When `repart-definitions` is specified, a directory containing the used repart definition files is written to the output directory. If multiple directories are configured via `RepartDirectories=`, they are merged, with later directories taking priority over earlier ones when files with identical names exist. By default `uki`, `kernel` and `initrd` are split out. `RepartDirectories=`, `--repart-directory=` : Paths to directories containing **systemd-repart** partition definition files that are used when **mkosi** invokes **systemd-repart** when building a disk image. If `mkosi.repart/` exists in the local directory, it will be used for this purpose as well. Note that **mkosi** invokes repart with `--root=` set to the root of the image root, so any `CopyFiles=` source paths in partition definition files will be relative to the image root directory. `SectorSize=`, `--sector-size=` : Override the default sector size that **systemd-repart** uses when building a disk image. `Overlay=`, `--overlay=` : When used together with `BaseTrees=`, the output will consist only out of changes to the specified base trees. Each base tree is attached as a lower layer in an overlayfs structure, and the output becomes the upper layer, initially empty. Thus files that are not modified compared to the base trees will not be present in the final output. This option may be used to create [systemd *system extensions*](https://uapi-group.org/specifications/specs/extension_image). `Seed=`, `--seed=` : Takes a UUID as argument or the special value `random`. Overrides the seed that **systemd-repart** uses when building a disk image. This is useful to achieve reproducible builds, where deterministic UUIDs and other partition metadata should be derived on each build. If not specified explicitly and the file `mkosi.seed` exists in the local directory, the UUID to use is read from it. Otherwise, a random UUID is used. `CleanScripts=`, `--clean-script=` : Takes a comma-separated list of paths to executables that are used as the clean scripts for this image. See the **SCRIPTS** section for more information. ### [Content] Section `Packages=`, `--package=`, `-p` : Install the specified distribution packages (i.e. RPM, deb, …) in the image. Takes a comma-separated list of package specifications. This option may be used multiple times in which case the specified package lists are combined. Use `BuildPackages=` to specify packages that shall only be installed in an overlay that is mounted when the prepare scripts are executed with the `build` argument and when the build scripts are executed. The types and syntax of *package specifications* that are allowed depend on the package installer (e.g. **dnf** for RPM-based distros or **apt** for deb-based distros), but may include package names, package names with version and/or architecture, package name globs, package groups, and virtual provides, including file paths. See `PackageDirectories=` for information on how to make local packages available for installation with `Packages=`. **Example**: when using a distro that uses **dnf**, the following configuration would install the **meson** package (in the latest version), the 32-bit version of the `libfdisk-devel` package, all available packages that start with the `git-` prefix, a **systemd** RPM from the local file system, one of the packages that provides `/usr/bin/ld`, the packages in the *Development Tools* group, and the package that contains the `mypy` python module. ```ini Packages=meson libfdisk-devel.i686 git-* /usr/bin/ld @development-tools python3dist(mypy) ``` `BuildPackages=`, `--build-package=` : Similar to `Packages=`, but configures packages to install only in an overlay that is made available on top of the image to the prepare scripts when executed with the `build` argument and the build scripts. This option should be used to list packages containing header files, compilers, build systems, linkers and other build tools the `mkosi.build` scripts require to operate. Note that packages listed here will be absent in the final image. `VolatilePackages=`, `--volatile-package=` : Similar to `Packages=`, but packages configured with this setting are not cached when `Incremental=` is enabled and are installed after executing any build scripts. Specifically, this setting can be used to install packages that change often or which are built by a build script. `PackageDirectories=`, `--package-directory=` : Specify directories containing extra packages to be made available during the build. **mkosi** will create a local repository containing all packages in these directories and make it available when installing packages or running scripts. If the `mkosi.packages/` directory is found in the local directory it is also used for this purpose. `VolatilePackageDirectories=`, `--volatile-package-directory=` : Like `PackageDirectories=`, but any changes to the packages in these directories will not invalidate the cached images if `Incremental=` is enabled. Additionally, build scripts can add more packages to the local repository by placing the built packages in `$PACKAGEDIR`. The packages placed in `$PACKAGEDIR` are shared between all image builds and thus available for installation in all images using `VolatilePackages=`. `WithRecommends=`, `--with-recommends=` : Configures whether to install recommended or weak dependencies, depending on how they are named by the used package manager, or not. By default, recommended packages are not installed. This is only used for package managers that support the concept, which are currently **apt**, **dnf** and **zypper**. `WithDocs=`, `--with-docs=` : Include documentation in the image. Enabled by default. When disabled, if the underlying distribution package manager supports it documentation is not included in the image. The `$WITH_DOCS` environment variable passed to the `mkosi.build` scripts is set to `0` or `1` depending on whether this option is enabled or disabled. `BaseTrees=`, `--base-tree=` : Takes a comma-separated list of paths to use as base trees. When used, these base trees are each copied into the OS tree and form the base distribution instead of installing the distribution from scratch. Only extra packages are installed on top of the ones already installed in the base trees. Note that for this to work properly, the base image still needs to contain the package manager metadata by setting `CleanPackageMetadata=no` (see `CleanPackageMetadata=`). Instead of a directory, a tar file or a disk image may be provided. In this case it is unpacked into the OS tree. This mode of operation allows setting permissions and file ownership explicitly, in particular for projects stored in a version control system such as **git** which retain full file ownership and access mode metadata for committed files. `SkeletonTrees=`, `--skeleton-tree=` : Takes a comma-separated list of colon-separated path pairs. The first path of each pair refers to a directory to copy into the OS tree before invoking the package manager. The second path of each pair refers to the target directory inside the image. If the second path is not provided, the directory is copied on top of the root directory of the image. The second path is always interpreted as an absolute path. Use this to insert files and directories into the OS tree before the package manager installs any packages. If the `mkosi.skeleton/` directory is found in the local directory it is also used for this purpose with the root directory as target (also see the **FILES** section below). Note that skeleton trees are cached and any changes to skeleton trees after a cached image has been built (when using `Incremental=`) are only applied when the cached image is rebuilt (by using `-ff` or running `mkosi -f clean`). As with the base tree logic above, instead of a directory, a tar file may be provided too. `mkosi.skeleton.tar` will be automatically used if found in the local directory. To add extra package manager configuration files such as extra repositories, use `SandboxTrees=` as **mkosi** invokes the package managers from outside the image and not inside so any package manager configuration files provided via `SkeletonTrees=` won't take effect when **mkosi** invokes a package manager to install packages. `ExtraTrees=`, `--extra-tree=` : Takes a comma-separated list of colon-separated path pairs. The first path of each pair refers to a directory to copy from the host into the image. The second path of each pair refers to the target directory inside the image. If the second path is not provided, the directory is copied on top of the root directory of the image. The second path is always interpreted as an absolute path. Use this to override any default configuration files shipped with the distribution. If the `mkosi.extra/` directory is found in the local directory it is also used for this purpose with the root directory as target (also see the **FILES** section below). As with the base tree logic above, instead of a directory, a tar file may be provided too. `mkosi.extra.tar` will be automatically used if found in the local directory. `RemovePackages=`, `--remove-package=` : Takes a comma-separated list of package specifications for removal, in the same format as `Packages=`. The removal will be performed as one of the last steps. This step is skipped if `CleanPackageMetadata=no` is used. `RemoveFiles=`, `--remove-files=` : Takes a comma-separated list of globs. Files in the image matching the globs will be purged at the end. `CleanPackageMetadata=`, `--clean-package-metadata=` : Enable/disable removal of package manager databases and repository metadata at the end of installation. Can be specified as `true`, `false`, or `auto` (the default). With `auto`, package manager databases and repository metadata will be removed if the respective package manager executable is *not* present at the end of the installation. `SourceDateEpoch=`, `--source-date-epoch=` : Takes a timestamp in seconds since the UNIX epoch as argument. File modification times of all files will be clamped to this value. The variable is also propagated to **systemd-repart** and scripts executed by **mkosi**. If not set explicitly, `SOURCE_DATE_EPOCH` from `--environment=` and from the host environment are tried in that order. This is useful to make builds reproducible. See [SOURCE_DATE_EPOCH](https://reproducible-builds.org/specs/source-date-epoch/) for more information. `SyncScripts=`, `--sync-script=` : Takes a comma-separated list of paths to executables that are used as the sync scripts for this image. See the **SCRIPTS** section for more information. `PrepareScripts=`, `--prepare-script=` : Takes a comma-separated list of paths to executables that are used as the prepare scripts for this image. See the **SCRIPTS** section for more information. `BuildScripts=`, `--build-script=` : Takes a comma-separated list of paths to executables that are used as the build scripts for this image. See the **SCRIPTS** section for more information. `PostInstallationScripts=`, `--postinst-script=` : Takes a comma-separated list of paths to executables that are used as the post-installation scripts for this image. See the **SCRIPTS** section for more information. `FinalizeScripts=`, `--finalize-script=` : Takes a comma-separated list of paths to executables that are used as the finalize scripts for this image. See the **SCRIPTS** section for more information. `PostOutputScripts=`, `--postoutput-script=` : Takes a comma-separated list of paths to executables that are used as the post output scripts for this image. See the **SCRIPTS** section for more information. `Bootable=`, `--bootable=` : Takes a boolean or `auto`. Enables or disables generation of a bootable image. If enabled, **mkosi** will install an EFI bootloader, and add an ESP partition when the disk image output is used. If the selected EFI bootloader (see `Bootloader=`) is not installed or no kernel images can be found, the build will fail. `auto` behaves as if the option was enabled, but the build won't fail if either no kernel images or the selected EFI bootloader can't be found. If disabled, no bootloader will be installed even if found inside the image, no unified kernel images will be generated and no ESP partition will be added to the image if the disk output format is used. If the `esp` output format is used, an ESP partition and bootloader are always added, and the `Bootable=` option only controls whether a UKI is added to the ESP partition or not. `Bootloader=`, `--bootloader=` : Takes one of `none`, `systemd-boot`, `uki`, `grub`, `systemd-boot-signed`, `uki-signed` or `grub-signed`. Defaults to `systemd-boot`. If set to `none`, no EFI bootloader will be installed into the image. If set to `systemd-boot`, **systemd-boot** will be installed and for each installed kernel, a UKI will be generated and stored in `EFI/Linux` in the ESP. If set to `uki`, a single UKI will be generated for the latest installed kernel (the one with the highest version) which is installed to `EFI/BOOT/BOOTX64.EFI` in the ESP. If set to `grub`, for each installed kernel, a UKI will be generated and stored in `EFI/Linux` in the ESP. For each generated UKI, a menu entry is appended to the grub configuration in `grub/grub.cfg` in the ESP which chainloads into the UKI. A shim grub.cfg is also written to `EFI//grub.cfg` in the ESP which loads `grub/grub.cfg` in the ESP for compatibility with signed versions of grub which load the grub configuration from this location. The `signed` variants will only install pre-signed EFI binaries shipped by the distribution. Kernels need to be placed into the root filesystem (for example using `ExtraTrees=`) under `/usr/lib/modules/$version`, named `vmlinux` or `vmlinuz`. The `$version` is as produced by Kbuild's `kernelversion` make target. Note: When using `systemd-boot` or `systemd-boot-signed`, `mkosi` expects the `systemd-boot` EFI binaries to be present in the image. Depending on your distribution, these may be packaged separately. For example, Debian- based images will need `systemd-boot-efi`. `BiosBootloader=`, `--bios-bootloader=` : Takes one of `none` or `grub`. Defaults to `none`. If set to `none`, no BIOS bootloader will be installed. If set to `grub`, grub is installed as the BIOS boot loader if a bootable image is requested with the `Bootable=` option. If no repart partition definition files are configured, **mkosi** will add a grub BIOS boot partition and an EFI system partition to the default partition definition files. Note that this option is not mutually exclusive with `Bootloader=`. It is possible to have an image that is both bootable on UEFI and BIOS by configuring both `Bootloader=` and `BiosBootloader=`. The grub BIOS boot partition should have UUID `21686148-6449-6e6f-744e-656564454649` and should be at least 1MB. Even if no EFI bootloader is installed, we still need an ESP for BIOS boot as that's where we store the kernel, initrd and grub modules. `ShimBootloader=`, `--shim-bootloader=` : Takes one of `none`, `unsigned`, or `signed`. Defaults to `none`. If set to `none`, shim and MokManager will not be installed to the ESP. If set to `unsigned`, **mkosi** will search for unsigned shim and MokManager EFI binaries and install them. If `SecureBoot=` is enabled, **mkosi** will sign the unsigned EFI binaries before installing them. If set to `signed`, **mkosi** will search for signed EFI binaries and install those. Even if `SecureBoot=` is enabled, **mkosi** won't sign these binaries again. Note that this option only takes effect when an image that is bootable on UEFI firmware is requested using other options (`Bootable=`, `Bootloader=`). Note that when this option is enabled, **mkosi** will only install already signed bootloader binaries, kernel image files and unified kernel images as self-signed binaries would not be accepted by the signed version of shim. `UnifiedKernelImages=`, `--unified-kernel-images=` : Specifies whether to use unified kernel images or not when `Bootloader=` is set to `systemd-boot` or `grub`. Takes one of `none`, `unsigned`, `signed` or `auto`. Defaults to `auto`. If `unsigned` or `signed`, unified kernel images are always used and the build will fail if any components required to build unified kernel images are missing. If set to `auto`, unified kernel images will be used if all necessary components are available. Otherwise Type 1 entries as defined by the Boot Loader Specification will be used instead. If disabled, Type 1 entries will always be used. If `Bootloader=` is set to one of the signed variant, a pre-built UKI will be searched and the build will fail if it cannot be found, unless `UnifiedKernelImages=` is set to `unsigned`, in which case the UKI will be built locally. This is useful when combined with the runtime `Firmware=` option set to `custom` so that the local signing key is enrolled in UEFI db. `UnifiedKernelImageFormat=`, `--unified-kernel-image-format=` : Takes a filename without any path components to specify the format that unified kernel images should be installed as. This may include both the regular specifiers (see **Specifiers**) and special delayed specifiers, that are expanded during the installation of the files, which are described below. The default format for this parameter is `&e-&k` with `-&h` being appended if `roothash=` or `usrhash=` is found on the kernel command line and `+&c` if `/etc/kernel/tries` is found in the image. The following specifiers may be used: | Specifier | Value | |-----------|----------------------------------------------------| | `&&` | `&` character | | `&e` | Entry Token | | `&k` | Kernel version | | `&h` | `roothash=` or `usrhash=` value of kernel argument | `UnifiedKernelImageProfiles=`, `--uki-profile=` : Build additional UKI profiles. Takes a comma-separated list of paths to UKI profile config files. This option may be used multiple times in which case each config gets built into a corresponding UKI profile. Config files in the `mkosi.uki-profiles/` directory are automatically picked up. All configured UKI profiles are added as additional UKI profiles to each UKI built by **mkosi**. See the documentation for the `UKIProfile` section for information on which settings can be configured in UKI profile config files. `Initrds=`, `--initrd=` : Use user-provided initrd(s). Takes a comma-separated list of paths to initrd files. This option may be used multiple times in which case the initrd lists are combined. If no initrds are specified and a bootable image is requested, **mkosi** will automatically build a default initrd. **mkosi** will also look for initrds in a subdirectory `io.mkosi.initrd` of the artifact directory (see `$ARTIFACTDIR` in the section **ENVIRONMENT VARIABLES**). Any initrds found there are appended to the user-provided initrd(s) and any default initrd built by mkosi. `InitrdProfiles=`, `--initrd-profile=` : Set the profiles to enable for the default initrd. Takes a comma-delimited list of profiles. By default, all profiles are disabled. The `lvm` profile enables support for LVM. The `network` profile enables support for network via **systemd-networkd**. The `nfs` profile enables support for NFS. It requires networking in the initrd, using the `network` profile, or some other custom method. The `pkcs11` profile enables support for PKCS#11. The `plymouth` profile provides a graphical interface at boot (animation and password prompt). The `raid` profile enables support for RAID arrays. `InitrdPackages=`, `--initrd-package=` : Extra packages to install into the default initrd. Takes a comma separated list of package specifications. This option may be used multiple times in which case the specified package lists are combined. `InitrdVolatilePackages=`, `--initrd-volatile-package=` : Similar to `VolatilePackages=`, except it applies to the default initrd. `Devicetrees=`, `--devicetrees=` : Comma-separated list of devicetree patterns for automatic hardware-based selection. Patterns are glob expressions. **mkosi** searches for devicetree files in standard locations relative to `/usr/lib/modules//dtb/`, `/usr/lib/firmware//device-tree/`, and `/usr/lib/linux-image-/`. For UKI builds, multiple matches enable automatic hardware-based selection using the `.dtbauto` sections. Type 1 boot entries require exactly one match. Example: `Devicetrees=rockchip/*,imx.*` would include all Rockchip devicetrees and any IMX devicetrees. `Splash=`, `--splash=` : When set, the boot splash for any unified kernel image built by **mkosi** will be picked up from the given path inside the image. `MicrocodeHost=`, `--microcode-host=` : When set to true only include microcode for the host's CPU in the image. `KernelCommandLine=`, `--kernel-command-line=` : Use the specified kernel command line when building images. If the root or usr partition are created with verity enabled, `roothash=` or `usrhash=` respectively are automatically added to the kernel command line and `root=` or `mount.usr=` should not be added. Otherwise, if the value of this setting contains the literals `root=PARTUUID` or `mount.usr=PARTUUID`, these are replaced with the partition UUID of the root or usr partition respectively. For example, `root=PARTUUID` would be replaced with `root=PARTUUID=58c7d0b2-d224-4834-a16f-e036322e88f7` where `58c7d0b2-d224-4834-a16f-e036322e88f7` is the partition UUID of the root partition. `KernelModules=`, `--kernel-modules=` : Takes a list of glob patterns that specify which kernel modules to include in the image. Each argument may be prefixed with a dash (`-`), to *exclude* matching modules. The arguments are evaluated in order, the last positive or negative matching pattern determines the result. The modules that were last matched by a positive pattern are included in the image, as well as their module and firmware dependencies. The `.ko` suffix and compression suffix are ignored during matching. Globs are matched against module paths relative to `/usr/lib/module/`, e.g. the module at `/usr/lib/module//kernel/foo/bar.ko.xz` becomes `kernel/foo/bar` for matching purposes. Globs beginning with "/" are treated specially. The glob is *first* matched against a path relative to `/usr/lib/module//kernel`, and only then against `/usr/lib/module//`. This is a convenience, since usually only the in-tree modules under `kernel/` are of interest. For example, the module at`/usr/lib/module//kernel/foo/bar.ko.xz` can be matched by either `/foo/bar` or `/kernel/foo/bar`. The glob patterns may include just the basename (e.g. `loop`), which must match the basename of the module, the relative path (e.g. `block/loop`), which must match the final components of the module path up to the basename, or an absolute path (e.g. `/drivers/block/loop`), which must match the full path to the module. When suffixed with `/`, the pattern will match all modules underneath that directory. The patterns may include shell-style globs (`*`, `?`, `[…-…]`). If the special value `default` is used, the default kernel modules defined in the **mkosi-initrd** configuration are included as well. If the special value `host` is used, the currently loaded modules on the host system are included as well. `KernelModulesInitrd=`, `--kernel-modules-initrd=` : Boolean value, enabled (true) by default. If enabled, when building a bootable image, **mkosi** will generate an extra initrd for each unified kernel image it assembles. This initrd contains only modules and possibly firmware, and is then appended to the base initrd to form the final initrd file. This keeps the base initrd kernel independent, and only augments it with the necessary kernel-version specific modules when the UKI is assembled. If disabled, no extra initrd is generated. Note that the kernel modules will still **not** be included in the base initrd, which remains kernel independent. Instead, it is assumed the user provides the necessary modules, if any, via an additional custom initrd. `KernelInitrdModules=`, `--kernel-modules-initrd-include=` : Like `KernelModules=`, but specifies the kernel modules to include in the initrd. `FirmwareFiles=`, `--firmware-files=` : Takes a list of glob patterns that specify which firmware files to include in the image. The patterns are interpreted in the same way as in the `KernelModules=` settings, except that the paths are relative to `/usr/lib/firmware/`. The compression suffix is ignored and must not be included in the pattern. Firmware dependencies of kernel modules installed in the image are automatically included. Example: `FirmwareFiles=cxgb4/bcm8483.bin` or `FirmwareFiles=bcm8483.*` would both cause `/usr/lib/firmware/cxgb4/bcm8483.bin.xz` to be included, even if not listed by a module. `Locale=`, `--locale=`, `LocaleMessages=`, `--locale-messages=`, `Keymap=`, `--keymap=`, `Timezone=`, `--timezone=`, `Hostname=`, `--hostname=`, `RootShell=`, `--root-shell=` : The settings `Locale=`, `--locale=`, `LocaleMessages=`, `--locale-messages=`, `Keymap=`, `--keymap=`, `Timezone=`, `--timezone=`, `Hostname=`, `--hostname=`, `RootShell=`, `--root-shell=` correspond to the identically named systemd-firstboot options. See **systemd-firstboot**(1) for more information. Additionally, where applicable, the corresponding systemd credentials for these settings are written to `/usr/lib/credstore`, so that they apply even if only `/usr` is shipped in the image. `RootPassword=`, `--root-password=`, : Set the system root password. If this option is not used, but a `mkosi.rootpw` file is found in the local directory, the password is automatically read from it or if the file is executable it is run as a script and stdout is read instead (see the **SCRIPTS** section below). If the password starts with `hashed:`, it is treated as an already hashed root password. The root password is also stored in `/usr/lib/credstore` under the appropriate systemd credential so that it applies even if only `/usr` is shipped in the image. To create an unlocked account without any password use `hashed:` without a hash. `Autologin=`, `--autologin=`, `-a` : Enable autologin for the `root` user on `/dev/pts/0` (nspawn), `/dev/tty1` and `/dev/hvc0`. `MakeInitrd=`, `--make-initrd=` : Add `/etc/initrd-release` and `/init` to the image so that it can be used as an initramfs. `Ssh=`, `--ssh=` : Specifies whether to install an **sshd** socket unit and matching service in the final image. Takes one of `always`, `never`, `auto` or `runtime`. Defaults to `auto`. If set to `auto` and `sshd` is present in the image and the generator binary `systemd-ssh-generator` is not present, or if set to `always`, mkosi will install **sshd** units in the final image that expose SSH over VSock. If set to `never`, mkosi will not install these units. If the `runtime` value is used, mkosi will also not install any units but abort starting `mkosi vm` if no SSH credentials are configured. When building with this option and running the image using `mkosi vm`, the `mkosi ssh` command can be used to connect to the container/VM via SSH. Note that you still have to make sure openssh is installed in the image to make `mkosi ssh` behave correctly. Run `mkosi genkey` to automatically generate an X.509 certificate and private key to be used by **mkosi** to enable SSH access to any virtual machines via `mkosi ssh`. To access images booted using `mkosi boot`, use **machinectl**. Starting with systemd v256, systemd-ssh-generator(8) will automatically provide `sshd` over VSock when running inside a VM. So if you're using a recent version of systemd inside a VM, this option is generally not used. You still need openssh installed in the image, and the default setting of `--vsock=auto` is enough to ensure a VSock is available inside the VM. Note: if the image distro uses SELinux, mkosi's sshd service will be denied access to the VSock, resulting in failure to connect to it from the host. You will need to either disable SELinux enforcement, or create a custom policy module (e.g. with `audit2allow`). `SELinuxRelabel=`, `--selinux-relabel=` : Specifies whether to relabel files to match the image's SELinux policy. Takes a boolean value or `auto`. Defaults to `auto`. If disabled, files will not relabeled. If enabled, an SELinux policy has to be installed in the image and **setfiles** has to be available to relabel files. If any errors occur during **setfiles**, the build will fail. If set to `auto`, files will be relabeled if mkosi is not building a directory image, an SELinux policy is installed in the image and if **setfiles** is available. Any errors occurred during **setfiles** will be ignored. Note that when running unprivileged, **setfiles** will fail to set any labels that are not in the host's SELinux policy. To ensure **setfiles** succeeds without errors, make sure to run **mkosi** as root or build from a host system with the same SELinux policy as the image you're building. `MachineId=`, `--machine-id=` : Takes a UUID or the special value `random`. Sets the machine ID of the image to the specified UUID. If set to `random`, a random UUID will be written to `/etc/machine-id`. If not specified explicitly and the file `mkosi.machine-id` exists in the local directory, the UUID to use is read from it. Otherwise, `uninitialized` will be written to `/etc/machine-id`. ### [Validation] Section `SecureBoot=`, `--secure-boot=` : Sign **systemd-boot** (if it is not signed yet) and any generated unified kernel images for UEFI SecureBoot. `SecureBootAutoEnroll=`, `--secure-boot-auto-enroll=` : Set up automatic enrollment of the secure boot keys in virtual machines as documented in **systemd-boot**(7) if `SecureBoot=` is used. Note that **systemd-boot** will only do automatic secure boot key enrollment in virtual machines starting from systemd v253. To do auto enrollment on systemd v252 or on bare metal machines, write a **systemd-boot** configuration file to `/efi/loader/loader.conf` using an extra tree with `secure-boot-enroll force` or `secure-boot-enroll manual` in it. Auto enrollment is not supported on systemd versions older than v252. Defaults to `yes`. `SecureBootKey=`, `--secure-boot-key=` : Path to the PEM file containing the secret key for signing the UEFI kernel image if `SecureBoot=` is used and PCR signatures when `SignExpectedPcr=` is also used. When `SecureBootKeySource=` is specified, the input type depends on the source. `SecureBootCertificate=`, `--secure-boot-certificate=` : Path to the X.509 file containing the certificate for the signed UEFI kernel image, if `SecureBoot=` is used. `SecureBootSignTool=`, `--secure-boot-sign-tool=` : Tool to use to sign secure boot PE binaries. Takes one of `systemd-sbsign`, `sbsign` or `auto`. Defaults to `auto`. If set to `auto`, either **systemd-sbsign** or **sbsign** are used if available, with **systemd-sbsign** being preferred. `Verity=`, `--verity=` : Whether to enforce or disable verity for extension images. Takes one of `signed`, `hash`, `defer`, `auto` or a boolean value. If set to `signed`, a verity key and certificate must be present and the build will fail if we don't detect any verity partitions in the disk image produced by **systemd-repart**. If disabled, verity partitions will be excluded from the extension images produced by **systemd-repart**. If set to `hash`, **mkosi** configures **systemd-repart** to create a verity hash partition, but no signature partition. If set to `defer`, space for the verity sig partition will be allocated but it will not be populated yet. If set to `auto` and a verity key and certificate are present, **mkosi** will pass them to **systemd-repart** and expects the generated disk image to contain verity partitions, but the build won't fail if no verity partitions are found in the disk image produced by **systemd-repart**. Note that explicitly disabling verity signature and/or hash is not yet implemented for the `disk` output and only works for extension images at the moment. `VerityKey=`, `--verity-key=` : Path to the PEM file containing the secret key for signing the verity signature, if a verity signature partition is added with **systemd-repart**. When `VerityKeySource=` is specified, the input type depends on the source. `VerityCertificate=`, `--verity-certificate=` : Path to the X.509 file containing the certificate for signing the verity signature, if a verity signature partition is added with **systemd-repart**. `SignExpectedPcr=`, `--sign-expected-pcr=` : Measure the components of the unified kernel image (UKI) using **systemd-measure** and embed the PCR signature into the unified kernel image. This option takes a boolean value or the special value `auto`, which is the default, which is equal to a true value if the **systemd-measure** binary is in `PATH`. Depends on `SecureBoot=` being enabled and key from `SecureBootKey=`. `SignExpectedPcrKey=`, `--sign-expected-pcr-key=` : Path to the PEM file containing the secret key for signing the expected PCR signatures. When `SignExpectedPcrKeySource=` is specified, the input type depends on the source. `SignExpectedPcrCertificate=`, `--sign-expected-pcr-certificate=` : Path to the X.509 file containing the certificate for signing the expected PCR signatures. `SecureBootKeySource=`, `--secure-boot-key-source=`, `VerityKeySource=`, `--verity-key-source=`, `SignExpectedPcrKeySource=`, `--sign-expected-key-source=` : The source of the corresponding private key, to support OpenSSL engines and providers, e.g. `--secure-boot-key-source=engine:pkcs11` or `--secure-boot-key-source=provider:pkcs11`. `SecureBootCertificateSource=`, `--secure-boot-certificate-source=`, `VerityCertificateSource=`, `--verity-certificate-source=`, `SignExpectedPcrCertificateSource=`, `--sign-expected-certificate-source=` : The source of the corresponding certificate, to support OpenSSL providers, e.g. `--secure-boot-certificate-source=provider:pkcs11`. Note that engines are not supported. `Passphrase=`, `--passphrase=` : Specify the path to a file containing the passphrase to use for LUKS encryption. It should contain the passphrase literally, and not end in a newline character (i.e. in the same format as **cryptsetup** and `/etc/crypttab` expect the passphrase files). The file must have an access mode of 0600 or less. Note that this setting on its own does not enable any encryption. You also have to add one or more partition definition files to `mkosi.repart/` with `Encrypt=key-file` to add encrypted partitions to the image. `Checksum=`, `--checksum=` : Generate a `.SHA256SUMS` file of all generated artifacts after the build is complete. `Sign=`, `--sign=` : Sign the generated `SHA256SUMS` using **gpg** after completion. `OpenPGPTool=`, `--openpgp-tool=` : OpenPGP implementation to use for signing. `gpg` is the default. Selecting a value different than the default will use the given Stateless OpenPGP (SOP) tool for signing the `SHA256SUMS` file. Exemplary choices are `sqop` and `rsop`, but any implementation from https://www.openpgp.org/about/sop/ that can be installed locally will work. `Key=`, `--key=` : Select the **gpg** key to use for signing `SHA256SUMS`. This key must be already present in the **gpg** keyring. ### [Build] Section `ToolsTree=`, `--tools-tree=` : If specified, programs executed by **mkosi** to build and boot an image are looked up inside the given tree instead of in the host system. Use this option to make image builds more reproducible by always using the same versions of programs to build the final image instead of whatever version is installed on the host system. If this option is not used, but the `mkosi.tools/` directory is found in the local directory it is automatically used for this purpose with the root directory as target. The tools tree directory is kept between repeated image builds unless cleaned by calling `mkosi clean -f`. Note that binaries found in any of the paths configured with `ExtraSearchPaths=` will be executed with `/usr/` from the tools tree instead of from the host. If the host distribution or release does not match the tools tree distribution or release respectively, this might result in failures when trying to execute binaries from any of the extra search paths. If set to `yes`, **mkosi** will automatically add an extra tools tree image and use it as the tools tree. This image can be further configured using the settings below or with `mkosi.tools.conf` which can either be a file or directory containing extra configuration for the default tools tree. See the **TOOLS TREE** section for further details. `ToolsTreeDistribution=`, `--tools-tree-distribution=` : Set the distribution to use for the default tools tree. Defaults to the host distribution except for Ubuntu, which defaults to Debian, and RHEL, CentOS, Alma and Rocky, which default to Fedora, or `custom` if the distribution of the host is not a supported distribution. `ToolsTreeRelease=`, `--tools-tree-release=` : Set the distribution release to use for the default tools tree. By default, the hardcoded default release in **mkosi** for the distribution is used. `ToolsTreeProfiles=`, `--tools-tree-profile=` : Set the profiles to enable for the default tools tree. Takes a comma-delimited list consisting of `devel`, `misc`, `package-manager` and `runtime`. By default, all profiles except `devel` are enabled. The `devel` profile contains tools required to build (C/C++) projects. The `misc` profile contains various useful tools that are handy to have available in scripts. The package manager profile contains package managers and related tools other than those native to the tools tree distribution. The `runtime` profile contains the tools required to boot images in a systemd-nspawn container or in a virtual machine. `ToolsTreeMirror=`, `--tools-tree-mirror=` : Set the mirror to use for the default tools tree. By default, the default mirror for the tools tree distribution is used. `ToolsTreeRepositories=`, `--tools-tree-repository=` : Same as `Repositories=` but for the default tools tree. `ToolsTreeSandboxTrees=`, `--tools-tree-sandbox-tree=` : Same as `SandboxTrees=` but for the default tools tree. `ToolsTreePackages=`, `--tools-tree-package=` : Extra packages to install into the default tools tree. Takes a comma separated list of package specifications. This option may be used multiple times in which case the specified package lists are combined. `ToolsTreePackageDirectories=`, `--tools-tree-package-directory=` : Same as `PackageDirectories=`, but for the default tools tree. `ToolsTreeCertificates=`, `--tools-tree-certificates=` : Specify whether to use certificates and keys from the tools tree. Enabled by default. If enabled, `/etc/pki/ca-trust`, `/etc/pki/tls`, `/etc/ssl`, `/etc/ca-certificates`, and `/var/lib/ca-certificates` from the tools tree are used. Otherwise, these directories are picked up from the host. `ExtraSearchPaths=`, `--extra-search-path=` : List of colon-separated paths to look for tools in, before using the regular `$PATH` search path. `Incremental=`, `--incremental=`, `-i` : Takes either `strict` or a boolean value as its argument. Enables incremental build mode. In this mode, a copy of the OS image is created immediately after all OS packages are installed and the prepare scripts have executed but before the `mkosi.build` scripts are invoked (or anything that happens after it). On subsequent invocations of **mkosi** with the `-i` switch this cached image may be used to skip the OS package installation, thus drastically speeding up repetitive build times. Note that while there is some rudimentary cache invalidation, it is definitely not perfect. In order to force a rebuild of the cached image, combine `-i` with `-ff` to ensure the cached image is first removed and then re-created. If set to `strict`, the build fails if previously built cached image does not exist. `CacheOnly=`, `--cache-only=` : Takes one of `auto`, `metadata`, `always` or `never`. Defaults to `auto`. If `always`, the package manager is instructed not to contact the network. This provides a minimal level of reproducibility, as long as the package cache is already fully populated. If set to `metadata`, the package manager can still download packages, but we won't sync the repository metadata. If set to `auto`, the repository metadata is synced unless we have a cached image (see `Incremental=`) and packages can be downloaded during the build. If set to `never`, repository metadata is always synced and packages can be downloaded during the build. `SandboxTrees=`, `--sandbox-tree=` : Takes a comma-separated list of colon-separated path pairs. The first path of each pair refers to a directory to copy into the mkosi sandbox before executing a tool. The second path of each pair refers to the target directory inside the sandbox. If the second path is not provided, the directory is copied on top of the root directory of the sandbox. The second path is always interpreted as an absolute path. If the `mkosi.sandbox/` directory is found in the local directory it is used for this purpose with the root directory as target (also see the **FILES** section below). **mkosi** will look for the package manager configuration and related files in the configured sandbox trees. Unless specified otherwise, it will use the configuration files from their canonical locations in `/usr` or `/etc` in the sandbox trees. For example, it will look for `/etc/dnf/dnf.conf` in the sandbox trees if **dnf** is used to install packages. `WorkspaceDirectory=`, `--workspace-directory=` : Path to a directory where to store data required temporarily while building the image. This directory should have enough space to store the full OS image, though in most modes the actually used disk space is smaller. If not specified, a subdirectory of `$XDG_CACHE_HOME` (if set), `$CACHE_DIRECTORY` (if set), `$HOME/.cache` (if set) or `/var/tmp` is used. The data in this directory is removed automatically after each build. It's safe to manually remove the contents of this directory should an **mkosi** invocation be aborted abnormally (for example, due to reboot/power failure). `CacheDirectory=`, `--cache-directory=` : Takes a path to a directory to use as the incremental cache directory for the incremental images produced when the `Incremental=` option is enabled. If this option is not used, but a `mkosi.cache/` directory is found in the local directory it is automatically used for this purpose. `CacheKey=`, `--cache-key=` : Specifies the subdirectory within the cache directory where to store the cached image. This may include both the regular specifiers (see **Specifiers**) and special delayed specifiers, that are expanded after config parsing has finished, instead of during config parsing, which are described below. The default format for this parameter is `&d~&r~&a~&I`. The following specifiers may be used: | Specifier | Value | |-----------|----------------------------------------------------| | `&&` | `&` character | | `&d` | `Distribution=` | | `&r` | `Release=` | | `&a` | `Architecture=` | | `&i` | `ImageId=` | | `&v` | `ImageVersion=` | | `&I` | Subimage name within mkosi.images/ or `main` | Note that all images within a build must have a unique cache key. `PackageCacheDirectory=`, `--package-cache-dir=` : Takes a path to a directory to use as the package cache directory for the distribution package manager used. If unset, but a `mkosi.pkgcache/` directory is found in the local directory it is automatically used for this purpose, otherwise a suitable directory in the user's home directory or system is used. `BuildDirectory=`, `--build-directory=` : Takes a path to a directory to use as the build directory for build systems that support out-of-tree builds (such as Meson). The directory used this way is shared between repeated builds, and allows the build system to reuse artifacts (such as object files, executable, …) generated on previous invocations. The build scripts can find the path to this directory in the `$BUILDDIR` environment variable. This directory is mounted into the image's root directory when **mkosi-chroot** is invoked during execution of the build scripts. If this option is not specified, but a directory `mkosi.builddir/` exists in the local directory it is automatically used for this purpose (also see the **FILES** section below). `BuildKey=`, `--build-key=` : Specifies the subdirectory within the build directory where to store incremental build artifacts. This may include both the regular specifiers (see **Specifiers**) and special delayed specifiers, that are expanded after config parsing has finished, instead of during config parsing, which are the same delayed specifiers that are supported by `CacheKey=`. The default format for this parameter is `&d~&r~&a`. To disable usage of a build subdirectory completely, assign a literal `-` to this setting. `UseSubvolumes=`, `--use-subvolumes=` : Takes a boolean or `auto`. Enables or disables use of btrfs subvolumes for directory tree outputs. If enabled, **mkosi** will create the root directory as a btrfs subvolume and use btrfs subvolume snapshots where possible to copy base or cached trees which is much faster than doing a recursive copy. If explicitly enabled and `btrfs` is not installed or subvolumes cannot be created, an error is raised. If `auto`, missing **btrfs** or failures to create subvolumes are ignored. `RepartOffline=`, `--repart-offline=` : Specifies whether to build disk images using loopback devices. Enabled by default. When enabled, **systemd-repart** will not use loopback devices to build disk images. When disabled, **systemd-repart** will always use loopback devices to build disk images. Note that when using `RepartOffline=no`, **mkosi** cannot run unprivileged and the image build has to be done as the root user outside of any containers and with loopback devices available on the host system. There are currently two known scenarios where `RepartOffline=no` has to be used. The first is when using `Subvolumes=` in a repart partition definition file, as subvolumes cannot be created without using loopback devices. The second is when creating a system with SELinux and an XFS root partition. Because **mkfs.xfs** does not support populating an XFS filesystem with extended attributes, loopback devices have to be used to ensure the SELinux extended attributes end up in the generated XFS filesystem. `History=`, `--history=` : Takes a boolean. If enabled, **mkosi** will write the configuration provided via the CLI for the latest build to the `.mkosi-private` subdirectory in the directory from which it was invoked. These arguments are then reused as long as the image is not rebuilt to avoid having to specify them over and over again. To give an example of why this is useful, if you run `mkosi -O my-custom-output-dir -f` followed by `mkosi vm`, **mkosi** will fail saying the image hasn't been built yet. If you run `mkosi -O my-custom-output-dir --history=yes -f` followed by `mkosi vm`, it will boot the image built in the previous step as expected. `BuildSources=`, `--build-sources=` : Takes a comma-separated list of colon-separated path pairs. The first path of each pair refers to a directory to mount from the host. The second path of each pair refers to the directory where the source directory should be mounted when running scripts. Every target path is prefixed with `/work/src` and all build sources are sorted lexicographically by their target before mounting, so that top level paths are mounted first. If not configured explicitly, the current working directory is mounted to `/work/src`. `BuildSourcesEphemeral=`, `--build-sources-ephemeral=` : Takes a boolean or the special value `buildcache`. Disabled by default. Configures whether changes to source directories, the working directory and configured using `BuildSources=`, are persisted. If enabled, all source directories will be reset to their original state every time after running all scripts of a specific type (except sync scripts). 💥💣💥 If set to `buildcache` the overlay is not discarded when running build scripts, but saved to the build directory, configured via `BuildDirectory=`, and will be reused on subsequent runs. The overlay is still discarded for all other scripts. This option can be used to implement more advanced caching of builds, but can lead to unexpected states of the source directory. When using this option, a build directory must be configured. 💥💣💥 `Environment=`, `--environment=` : Adds variables to the environment that package managers and the prepare/build/postinstall/finalize scripts are executed with. Takes a space-separated list of variable assignments or just variable names. In the latter case, the values of those variables will be passed through from the environment in which **mkosi** was invoked. This option may be specified more than once, in which case all listed variables will be set. If the same variable is set twice, the later setting overrides the earlier one. `EnvironmentFiles=`, `--env-file=` : Takes a comma-separated list of paths to files that contain environment variable definitions to be added to the scripting environment. Uses `mkosi.env` if it is found in the local directory. The variables are first read from `mkosi.env` if it exists, then from the given list of files and then from the `Environment=` settings. `WithTests=`, `--with-tests=`, `-T` : If set to false (or when the command-line option is used), the `$WITH_TESTS` environment variable is set to `0` when the `mkosi.build` scripts are invoked. This is supposed to be used by the build scripts to bypass any unit or integration tests that are normally run during the source build process. Note that this option has no effect unless the `mkosi.build` build scripts honor it. `WithNetwork=`, `--with-network=` : When true, enables network connectivity while the build scripts `mkosi.build` are invoked. By default, the build scripts run with networking turned off. The `$WITH_NETWORK` environment variable is passed to the `mkosi.build` build scripts indicating whether the build is done with or without network. `ProxyUrl=`, `--proxy-url=` : Configure a proxy to be used for all outgoing network connections. Various tools that **mkosi** invokes and for which the proxy can be configured are configured to use this proxy. **mkosi** also sets various well-known environment variables to specify the proxy to use for any programs it invokes that may need internet access. `ProxyExclude=`, `--proxy-exclude=` : Configure hostnames for which requests should not go through the proxy. Takes a comma-separated list of hostnames. `ProxyPeerCertificate=`, `--proxy-peer-certificate=` : Configure a file containing certificates used to verify the proxy. Defaults to the system-wide certificate store. Currently, setting a proxy peer certificate is only supported when **dnf** or **dnf5** is used to build the image. `ProxyClientCertificate=`, `--proxy-client-certificate=` : Configure a file containing the certificate used to authenticate the client with the proxy. Currently, setting a proxy client certificate is only supported when **dnf** or **dnf5** is used to build the image. `ProxyClientKey=`, `--proxy-client-key=` : Configure a file containing the private key used to authenticate the client with the proxy. Defaults to the proxy client certificate if one is provided. Currently, setting a proxy client key is only supported when **dnf** or **dnf5** is used to build the image. ### [Runtime] Section (previously known as the [Host] section) `NSpawnSettings=`, `--settings=` : Specifies a `.nspawn` settings file for **systemd-nspawn** to use in the `boot` and `shell` verbs, and to place next to the generated image file. This is useful to configure the **systemd-nspawn** environment when the image is run. If this setting is not used but an `mkosi.nspawn` file found in the local directory it is automatically used for this purpose. `VirtualMachineMonitor=`, `--vmm=` : Configures the virtual machine monitor to use. Takes one of `qemu` or `vmspawn`. Defaults to `qemu`. When set to `qemu`, the image is booted with **qemu**. Most output formats can be booted in **qemu**. Any arguments specified after the verb are appended to the **qemu** invocation and are interpreted as extra **qemu** command line arguments. When set to `vmspawn`, **systemd-vmspawn** is used to boot up the image, `vmspawn` only supports disk and directory type images. Any arguments specified after the verb are appended to the **systemd-vmspawn** invocation and are interpreted as extra vmspawn options and extra kernel command line arguments. `Console=`, `--console=` : Configures how to set up the console of the VM. Takes one of `interactive`, `read-only`, `native`, or `gui`. Defaults to `interactive`. `interactive` provides an interactive terminal interface to the VM. `read-only` is similar, but is strictly read-only, i.e. does not accept any input from the user. `native` also provides a TTY-based interface, but uses **qemu**'s native implementation (which means the **qemu** monitor is available). `gui` shows the **qemu** graphical UI. `CPUs=`, `--cpus=` : Configures the number of CPU cores to assign to the guest when booting a virtual machine. Defaults to `2`. When set to `0`, the number of CPUs available to the **mkosi** process will be used. `RAM=`, `--ram=` : Configures the amount of RAM assigned to the guest when booting a virtual machine. Defaults to `2G`. `MaxMem=`, `--maxmem=` : Configures the maximum amount of memory the guest may deploy in total (RAM + hotplug memory devices). Defaults to the amount of RAM configured. `KVM=`, `--kvm=` : Configures whether KVM acceleration should be used when booting a virtual machine. Takes a boolean value or `auto`. Defaults to `auto`. `CXL=`, `--cxl=` : Configures whether CXL devices are enabled for a given machine. Only valid if the architecture supports cxl. Takes a boolean value. Defaults to `false`. `VSock=`, `--vsock=` : Configures whether to provision a vsock when booting a virtual machine. Takes a boolean value or `auto`. Defaults to `auto`. `VSockCID=`, `--vsock-cid=` : Configures the vsock connection ID to use when booting a virtual machine. Takes a number in the interval `[3, 0xFFFFFFFF)` or `hash` or `auto`. Defaults to `auto`. When set to `hash`, the connection ID will be derived from the full path to the image. When set to `auto`, **mkosi** will try to find a free connection ID automatically. Otherwise, the provided number will be used as is. `TPM=`, `--tpm=` : Configure whether to use a virtual TPM when booting a virtual machine. Takes a boolean value or `auto`. Defaults to `auto`. `Removable=`, `--removable=` : Configures whether to attach the image as a removable device when booting a virtual machine. Takes a boolean value. Defaults to `no`. `Firmware=`, `--firmware=` : Configures the virtual machine firmware to use. Takes one of `uefi`, `uefi-secure-boot`, `bios`, `linux`, `linux-noinitrd` or `auto`. Defaults to `auto`. When set to `uefi`, the OVMF firmware without secure boot support is used. When set to `uefi-secure-boot`, the OVMF firmware with secure boot support is used. When set to `bios`, the default SeaBIOS firmware is used. When set to `linux`, direct kernel boot is used. See the `Linux=` option for more details on which kernel image is used with direct kernel boot. `linux-noinitrd` is identical to `linux` except that no initrd is used. When set to `auto`, `uefi-secure-boot` is used if possible and `linux` otherwise. `FirmwareVariables=`, `--firmware-variables=` : Configures the path to the virtual machine firmware variables file to use. Currently, this option is only taken into account when the `uefi` or `uefi-secure-boot` firmware is used. If not specified, **mkosi** will search for the default variables file and use that instead. When set to `microsoft`, a firmware variables file with the Microsoft secure boot certificates already enrolled will be used. When set to `microsoft-mok`, a firmware variables file with the Microsoft secure boot certificates already enrolled will be extended with a `MokList` variable containing the secure boot certificate from `SecureBootCertificate=`. This is intended to be used together with shim binaries signed by the distribution and locally signed EFI binaries. When set to `custom`, the secure boot certificate from `SecureBootCertificate=` will be enrolled into the default firmware variables file. `virt-fw-vars` from the [virt-firmware](https://gitlab.com/kraxel/virt-firmware) project can be used to customize OVMF variable files. `Linux=`, `--linux=` : Set the kernel image to use for **qemu** direct kernel boot. If not specified, **mkosi** will use the kernel provided via the command line (`-kernel` option) or the latest kernel that was installed into the image (or fail if no kernel was installed into the image). Note that when the `cpio` output format is used, direct kernel boot is used regardless of the configured firmware. Depending on the configured firmware, **qemu** might boot the kernel itself or using the configured firmware. This setting may include both the regular specifiers (see **Specifiers**) and special delayed specifiers, that are expanded after config parsing has finished, instead of during config parsing, which are described below. The following specifiers may be used: | Specifier | Value | |-----------|----------------------------------------------------| | `&&` | `&` character | | `&b` | The final build directory (including subdirectory) | `Drives=`, `--drive=` : Add a drive. Takes a colon-delimited string of format `:[:[:[:[:]]]]`. `id` specifies the ID assigned to the drive. This can be used as the `drive=` property in various **qemu** devices. `size` specifies the size of the drive. This takes a size in bytes. Additionally, the suffixes `K`, `M` and `G` can be used to specify a size in kilobytes, megabytes and gigabytes respectively. `directory` optionally specifies the directory in which to create the file backing the drive. If unset, the file will be created under `/var/tmp`. `options` optionally specifies extra comma-delimited properties which are passed verbatim to **qemu**'s `-blockdev` option. `file-id` specifies the ID of the file backing the drive. If unset, this defaults to the drive ID. Drives with the same file ID will share the backing file. The directory and size of the file will be determined from the first drive with a given file ID. `flags` takes a comma-separated list of drive flags which currently only supports `persist`. `persist` determines whether the drive will be persisted across **qemu** invocations. The files backing the drives will be created with the schema `//mkosi-drive--`. You can skip values by setting them to the empty string, specifying e.g. `myfs:1G::::persist` will create a persistent drive under `/var/tmp/mkosi-drive-main-myfs`. **Example usage:** ```ini [Runtime] Drives=btrfs:10G ext4:20G QemuArgs=-device nvme,serial=btrfs,drive=btrfs -device nvme,serial=ext4,drive=ext4 ``` `QemuArgs=` : Space-delimited list of additional arguments to pass when invoking **qemu**. `Ephemeral=`, `--ephemeral=` : When used with the `shell`, `boot`, or `vm` verbs, this option runs the specified verb on a temporary snapshot of the output image that is removed immediately when the container terminates. Taking the temporary snapshot is more efficient on file systems that support reflinks natively (**btrfs** or **xfs**) than on more traditional file systems that do not (ext4). `Credentials=`, `--credential=` : Set credentials to be passed to **systemd-nspawn** or the virtual machine respectively when `mkosi shell/boot` or `mkosi vm` are used. This option takes a space separated list of values which can be either key=value pairs or paths. If a path is provided, if it is a file, the credential name will be the name of the file. If the file is executable, the credential value will be the output of executing the file. Otherwise, the credential value will be the contents of the file. If the path is a directory, the same logic applies to each file in the directory. Note that values will only be treated as paths if they do not contain the delimiter (`=`). `KernelCommandLineExtra=`, `--kernel-command-line-extra=` : Set extra kernel command line entries that are appended to the kernel command line at runtime when booting the image. When booting in a container, these are passed as extra arguments to systemd. When booting in a VM, these are appended to the kernel command line via the SMBIOS io.systemd.stub.kernel-cmdline-extra OEM string. This will only be picked up by **systemd-boot** and **systemd-stub** versions newer than or equal to v254. `RuntimeTrees=`, `--runtime-tree=` : Takes a colon-separated pair of paths. The first path refers to a directory to mount into any machine (container or VM) started by mkosi. The second path refers to the target directory inside the machine. If the second path is not provided, the directory is mounted at `/root/src` in the machine. If the second path is relative, it is interpreted relative to `/root/src` in the machine. For each mounted directory, the uid and gid of the user running mkosi are mapped to the root user in the machine. This means that all the files and directories will appear as if they're owned by root in the machine, and all new files and directories created by root in the machine in these directories will be owned by the user running mkosi on the host. Note that when using `mkosi vm` with this feature systemd v254 or newer has to be installed in the image. `RuntimeSize=`, `--runtime-size=` : If specified, disk images are grown to the specified size when they're booted with `mkosi boot` or `mkosi vm`. Takes a size in bytes. Additionally, the suffixes `K`, `M` and `G` can be used to specify a size in kilobytes, megabytes and gigabytes respectively. `RuntimeNetwork=`, `--runtime-network=` : Takes one of `user`, `interface` or `none`. Defaults to `user`. Specifies the networking to set up when booting the image. `user` sets up usermode networking. `interface` sets up a virtual network connection between the host and the image. This translates to a veth interface for `mkosi shell` and `mkosi boot` and a tap interface for `mkosi vm` and `mkosi vmspawn`. Note that when using `interface`, **mkosi** does not automatically configure the host interface. It is expected that a recent version of **systemd-networkd** is running on the host which will automatically configure the host interface of the link. `RuntimeBuildSources=`, `--runtime-build-sources=` : Mount the build sources configured with `BuildSources=` and the build directory (if one is configured) to the same locations in `/work` that they were mounted to when running the build script when using `mkosi boot` or `mkosi vm`. `BindUser=`, `--bind-user=` : Bind the home directory of the current user into the container/vm. Takes a boolean. Disabled by default. `UnitProperties=`, `--unit-property=` : Configure systemd unit properties to add to the systemd scopes allocated when using `mkosi boot` or `mkosi vm`. These are passed directly to the `--property=` options of **systemd-nspawn** and **systemd-run** respectively. `SshKey=`, `--ssh-key=` : Path to the X.509 private key in PEM format to use to connect to a virtual machine started with `mkosi vm` and built with the `Ssh=` option enabled (or with **systemd-ssh-generator** installed) via the `mkosi ssh` command. If not configured and `mkosi.key` exists in the working directory, it will automatically be used for this purpose. Run `mkosi genkey` to automatically generate a key in `mkosi.key`. `SshCertificate=`, `--ssh-certificate=` : Path to the X.509 certificate in PEM format to provision as the SSH public key in virtual machines started with `mkosi vm`. If not configured and `mkosi.crt` exists in the working directory, it will automatically be used for this purpose. Run `mkosi genkey` to automatically generate a certificate in `mkosi.crt`. `Machine=`, `--machine=` : Specify the machine name to use when booting the image. Can also be used to refer to a specific image when SSH-ing into an image (e.g. `mkosi --image=myimage ssh`). Note that `Ephemeral=` has to be enabled to start multiple instances of the same image. `Register=`, `--register=` : Takes a boolean value or `auto`. Specifies whether to register the vm/container with systemd-machined. If enabled, mkosi will fail if it can't register the vm/container with systemd-machined. If disabled, mkosi will not register the vm/container with systemd-machined. If `auto`, mkosi will register the vm/container with systemd-machined if it is available. Defaults to `auto`. `ForwardJournal=`, `--forward-journal=` : Specify the path to which journal logs from containers and virtual machines should be forwarded. If the path has the `.journal` extension, it is interpreted as a file to which the journal should be written. Otherwise, the path is interpreted as a directory to which the journal should be written. Note that systemd v256 or newer is required in the virtual machine for log forwarding to work. Note that if a path with the `.journal` extension is given, the journal size is limited to `4G`. Configure an output directory instead of file if your workload produces more than `4G` worth of journal data. `StorageTargetMode=`, `--storage-target-mode=` : Specifies whether the `serve` verb should start **systemd-storagetm** to serve disk images over NVME-TCP. Takes a boolean value or `auto`. If enabled, systemd-storagetm is always started and mkosi will fail if it cannot start systemd-storagetm. If disabled, systemd-storagetm is never started. If `auto`, systemd-storagetm will be started if a disk image is being built, the systemd-storagetm binary is found and `mkosi serve` is being invoked as the root user. `SysupdateDirectory=`, `--sysupdate-directory=` : Path to a directory containing systemd-sysupdate transfer definition files that are used by `mkosi sysupdate`. If `mkosi.sysupdate/` exists in the local directory, it will be used for this purpose as well. Note that `mkosi sysupdate` invokes `systemd-sysupdate` with `--transfer-source=` set to the **mkosi** output directory. To make use of this in a transfer definition file, set `PathRelativeTo=explicit` to have the `Path=` setting for the transfer source be interpreted relative to the **mkosi** output directory. Generally, configuring `PathRelativeTo=explicit` and `Path=/` for the transfer source is sufficient for the match pattern to be interpreted relative to the **mkosi** output directory. ### [Match] Section `Profiles=` : Matches against the configured profiles. `Distribution=` : Matches against the configured distribution. `Release=` : Matches against the configured distribution release. If this condition is used and no distribution has been explicitly configured yet, the host distribution and release are used. `Architecture=` : Matches against the configured architecture. If this condition is used and no architecture has been explicitly configured yet, the host architecture is used. : `Architecture=uefi` can be used to match against any architecture that supports UEFI. `Repositories=` : Matches against repositories enabled with the `Repositories=` setting. Takes a single repository name. `PathExists=` : This condition is satisfied if the given path exists. Relative paths are interpreted relative to the parent directory of the config file that the condition is read from. `ImageId=` : Matches against the configured image ID, supporting globs. If this condition is used and no image ID has been explicitly configured yet, this condition fails. `ImageVersion=` : Matches against the configured image version. Image versions can be prepended by the operators `==`, `!=`, `>=`, `<=`, `<`, `>` for rich version comparisons according to the UAPI group version format specification. If no operator is prepended, the equality operator is assumed by default. If this condition is used and no image version has been explicitly configured yet, this condition fails. `Bootable=` : Matches against the configured value for the `Bootable=` feature. Takes a boolean value or `auto`. `Format=` : Matches against the configured value for the `Format=` option. Takes an output format (see the `Format=` option). `SystemdVersion=` : Matches against the systemd version on the host (as reported by `systemctl --version`). Values can be prepended by the operators `==`, `!=`, `>=`, `<=`, `<`, `>` for rich version comparisons according to the UAPI group version format specification. If no operator is prepended, the equality operator is assumed by default. `BuildSources=` : Takes a build source target path (see `BuildSources=`). This match is satisfied if any of the configured build sources uses this target path. For example, if we have a `mkosi.conf` file containing: ```ini [Build] BuildSources=../abc/qed:kernel ``` and a drop-in containing: ```ini [Match] BuildSources=kernel ``` The drop-in will be included. Any absolute paths passed to this setting are interpreted relative to the current working directory. `HostArchitecture=` : Matches against the host's native architecture. See the `Architecture=` setting for a list of possible values. `ToolsTreeDistribution=` : Matches against the configured tools tree distribution. `ToolsTreeRelease=` : Matches against the configured tools tree release. `Environment=` : Matches against a specific key/value pair configured with `Environment=`. If no value is provided, check if the given key is in the environment regardless of which value it has. `Image=` : Match against the current (sub)image name. The name of a subimage is its name in `mkosi.images/` (without any `.conf` suffix). The name of the top level image is `main`. The main use case is to allow having a shared config that can be included by both the top level image and subimages by gating the universal settings behind a `Image=main` match. This table shows which matchers support globs, rich comparisons and the default value that is matched against if no value has been configured at the time the config file is read: | Matcher | Globs | Rich Comparisons | Default | |--------------------------|-------|------------------|----------------------------------------------------------------------------------------| | `Profiles=` | no | no | match fails | | `Distribution=` | no | no | match host distribution | | `Release=` | no | no | match host release | | `Architecture=` | no | no | match host architecture | | `PathExists=` | no | no | n/a | | `ImageId=` | yes | no | match fails | | `ImageVersion=` | no | yes | match fails | | `Bootable=` | no | no | match auto feature | | `Format=` | no | no | match default format | | `SystemdVersion=` | no | yes | n/a | | `BuildSources=` | no | no | match fails | | `HostArchitecture=` | no | no | n/a | | `ToolsTreeDistribution=` | no | no | match the fallback tools tree distribution (see `ToolsTreeDistribution=` in `[Build]`) | | `ToolsTreeRelease=` | no | no | match default tools tree release | | `Environment=` | no | no | n/a | | `Image=` | no | no | n/a | ### [Include] `Include=`, `--include=`, `-I` : Include extra configuration from the given file or directory. The extra configuration is included immediately after parsing the setting, except when used on the command line, in which case the extra configuration is included after parsing all command line arguments. Note that each path containing extra configuration is only parsed once, even if included more than once with `Include=`. The builtin configs for the **mkosi** default initrd, default tools tree, default virtual machine image and default UKI addon can be included by including the literal value `mkosi-initrd`, `mkosi-tools`, `mkosi-vm` or `mkosi-addon` respectively. Note: Include names starting with either of the literals `mkosi-` or `contrib-` are reserved for use by **mkosi** itself. ### [Config] Section `Profiles=`, `--profile=` : Select the given profiles. A profile is a configuration file or directory in the `mkosi.profiles/` directory. The configuration files and directories of each profile are included after parsing the `mkosi.conf.d/*.conf` drop in configuration. `Dependencies=`, `--dependency=` : The images that this image depends on specified as a comma-separated list. All images configured in this option will be built before this image. When this setting is specified for the "main" image, it specifies which subimages should be built. See the **BUILDING MULTIPLE IMAGES** section for more information. `MinimumVersion=`, `--minimum-version=` : The minimum **mkosi** version required to build this configuration. If specified multiple times, the highest specified version is used. The minimum version can also be specified as a git commit hash when prefixed with `commit:`, in which case mkosi must be executed from a git checkout and the specified git commit hash must be an ancestor of the currently checked out git commit in the repository that mkosi is being executed from. `ConfigureScripts=`, `--configure-script=` : Takes a comma-separated list of paths to executables that are used as the configure scripts for this image. See the **SCRIPTS** section for more information. `PassEnvironment=`, `--pass-environment=` : Takes a list of environment variable names separated by spaces. When building multiple images, pass the listed environment variables to each individual subimage as if they were "universal" settings. See the **BUILDING MULTIPLE IMAGES** section for more information. ### [UKIProfile] Section The `UKIProfile` section can be used in UKI profile config files which are passed to the `UnifiedKernelImageProfiles=` setting. The following settings can be specified in the `UKIProfile` section: `Profile=` : The contents of the `.profile` section of the UKI profile. Takes a list of key/value pairs separated by `=`. The `ID=` key must be specified. See the UKI [specification](https://uapi-group.org/specifications/specs/unified_kernel_image/#multi-profile-ukis) for a full list of possible keys. `Cmdline=` : Extra kernel command line options for the UKI profile. Takes a space delimited list of extra kernel command line arguments. Note that the final `.cmdline` section will the combination of the base `.cmdline` section and the extra kernel command line arguments specified with this setting. `SignExpectedPcr=` : Sign expected PCR measurements for this UKI profile. Takes a boolean. Enabled by default. ## Specifiers The current value of various settings can be accessed when parsing configuration files by using specifiers. To write a literal `%` character in a configuration file without treating it as a specifier, use `%%`. The following specifiers are understood: | Setting | Specifier | |--------------------|-----------| | `Distribution=` | `%d` | | `Release=` | `%r` | | `Architecture=` | `%a` | | `Format=` | `%t` | | `Output=` | `%o` | | `OutputDirectory=` | `%O` | | `ImageId=` | `%i` | | `ImageVersion=` | `%v` | There are also specifiers that are independent of settings: | Specifier | Value | |-----------|------------------------------------------------| | `%C` | Parent directory of current config file | | `%P` | Current working directory | | `%D` | Directory that **mkosi** was invoked in | | `%I` | Name of the current subimage in `mkosi.images` | Finally, there are specifiers that are derived from a setting: | Specifier | Value | |-----------|-------------------------------------------------------| | `%F` | The default filesystem of the configured distribution | Note that the current working directory changes as **mkosi** parses its configuration. Specifically, each time **mkosi** parses a directory containing a `mkosi.conf` file, **mkosi** changes its working directory to that directory. Note that the directory that **mkosi** was invoked in is influenced by the `--directory=` command line argument. The following table shows example values for the directory specifiers listed above: | | `$D/mkosi.conf` | `$D/mkosi.conf.d/abc/abc.conf` | `$D/mkosi.conf.d/abc/mkosi.conf` | |------|-----------------|--------------------------------|----------------------------------| | `%C` | `$D` | `$D/mkosi.conf.d` | `$D/mkosi.conf.d/abc` | | `%P` | `$D` | `$D` | `$D/mkosi.conf.d/abc` | | `%D` | `$D` | `$D` | `$D` | ## Supported distributions Images may be created containing installations of the following distributions: * *Fedora Linux* * *Debian* * *Kali Linux* * *Ubuntu* * *Arch Linux* * *openSUSE* * *Mageia* * *CentOS* * *RHEL* * *RHEL UBI* * *OpenMandriva* * *Rocky Linux* * *Alma Linux* * *Azure Linux* * *postmarketOS* * *None* (**Requires the user to provide a pre-built rootfs**) In theory, any distribution may be used on the host for building images containing any other distribution, as long as the necessary tools are available. Specifically, any distribution that packages **apt** may be used to build *Debian*, *Kali* or *Ubuntu* images. Any distribution that packages **dnf** may be used to build images for any of the RPM-based distributions. Any distro that packages **pacman** may be used to build *Arch Linux* images. Any distribution that packages **zypper** may be used to build *openSUSE* images. Other distributions and build automation tools for embedded Linux systems such as Buildroot, OpenEmbedded and Yocto Project may be used by selecting the `custom` distribution, and populating the rootfs via a combination of base trees, skeleton trees, and prepare scripts. Currently, *Fedora Linux* packages all relevant tools as of Fedora 28. Note that when not using a custom mirror, `RHEL` images can only be built from a host system with a `RHEL` subscription (established using e.g. `subscription-manager`). # EXECUTION FLOW Execution flow for `mkosi build`. Default values/calls are shown in parentheses. When building with `--incremental=yes` **mkosi** creates a cache of the distribution installation if not already existing and replaces the distribution installation in consecutive runs with data from the cached one. 1. Parse CLI options 1. Parse configuration files 1. Run configure scripts (`mkosi.configure`) 1. If we're not running as root, unshare the user namespace and map the subuid range configured in `/etc/subuid` and `/etc/subgid` into it. 1. Unshare the mount namespace 1. Remount the following directories read-only if they exist: - `/usr` - `/etc` - `/opt` - `/srv` - `/boot` - `/efi` - `/media` - `/mnt` Then, for each image, we execute the following steps: 1. Copy sandbox trees into the workspace 1. Sync the package manager repository metadata 1. Run sync scripts (`mkosi.sync`) 1. Copy base trees (`--base-tree=`) into the image 1. Reuse a cached image if one is available 1. Copy a snapshot of the package manager repository metadata into the image 1. Copy skeleton trees (`mkosi.skeleton`) into image 1. Install distribution and packages into image 1. Run prepare scripts on image with the `final` argument (`mkosi.prepare`) 1. Install build packages in overlay if any build scripts are configured 1. Run prepare scripts on overlay with the `build` argument if any build scripts are configured (`mkosi.prepare`) 1. Cache the image if configured (`--incremental=yes`) 1. Run build scripts on image + overlay if any build scripts are configured (`mkosi.build`) 1. Finalize the build if the output format `none` is configured 1. Copy the build scripts outputs into the image 1. Copy the extra trees into the image (`mkosi.extra`) 1. Run post-install scripts (`mkosi.postinst`) 1. Write config files required for `Ssh=`, `Autologin=` and `MakeInitrd=` 1. Install systemd-boot and configure secure boot if configured (`--secure-boot=yes`) 1. Run **systemd-sysusers** 1. Run **systemd-tmpfiles** 1. Run `systemctl preset-all` 1. Run **depmod** 1. Run **systemd-firstboot** 1. Run **systemd-hwdb** 1. Remove packages and files (`RemovePackages=`, `RemoveFiles=`) 1. Run SELinux relabel is a SELinux policy is installed 1. Run finalize scripts (`mkosi.finalize`) 1. Generate unified kernel image if configured to do so 1. Generate final output format 1. Run post-output scripts (`mkosi.postoutput`) # SCRIPTS To allow for image customization that cannot be implemented using **mkosi**'s builtin features, **mkosi** supports running scripts at various points during the image build process that can customize the image as needed. Scripts are executed on the host system as root (either real root or root within the user namespace that **mkosi** created when running unprivileged) with a customized environment to simplify modifying the image. For each script, the configured build sources (`BuildSources=`) are mounted into the current working directory before running the script in the current working directory. `$SRCDIR` is set to point to the current working directory. The following scripts are supported: * If **`mkosi.configure`** (`ConfigureScripts=`) exists, it is executed before building the image. This script may be used to dynamically modify the configuration. It receives the configuration serialized as JSON on stdin and should output the modified configuration serialized as JSON on stdout. Note that this script only runs when building or booting the image (`build`, `vm`, `boot` and `shell` verbs). If a default tools tree is configured, it will be built before running the configure scripts and the configure scripts will run with the tools tree available. This also means that the modifications made by configure scripts will not be visible in the `summary` output. * If **`mkosi.sync`** (`SyncScripts=`) exists, it is executed before the image is built. This script may be used to update various sources that are used to build the image. One use case is to run `git pull` on various source repositories before building the image. Specifically, the `BuildSourcesEphemeral=` setting does not apply to sync scripts, which means sync scripts can be used to update build sources even if `BuildSourcesEphemeral=` is enabled. * If **`mkosi.prepare`** (`PrepareScripts=`) exists, it is first called with the `final` argument, right after the software packages are installed. It is called a second time with the `build` command line parameter, right after the build packages are installed and the build overlay mounted on top of the image's root directory . This script has network access and may be used to install packages from other sources than the distro's package manager (e.g. **pip**, **npm**, ...), after all software packages are installed but before the image is cached (if incremental mode is enabled). In contrast to a general purpose installation, it is safe to install packages to the system (`pip install`, `npm install -g`) instead of in `$SRCDIR` itself because the build image is only used for a single project and can easily be thrown away and rebuilt so there's no risk of conflicting dependencies and no risk of polluting the host system. * If **`mkosi.build`** (`BuildScripts=`) exists, it is executed with the build overlay mounted on top of the image's root directory. When running the build script, `$DESTDIR` points to a directory where the script should place any files generated it would like to end up in the image. Note that **make**-, **automake**-, and **meson**-based build systems generally honor `$DESTDIR`, thus making it very natural to build *source* trees from the build script. After running the build script, the contents of `$DESTDIR` are copied into the image. * If **`mkosi.postinst`** (`PostInstallationScripts=`) exists, it is executed after the (optional) build tree and extra trees have been installed. This script may be used to alter the images without any restrictions, after all software packages and built sources have been installed. * If **`mkosi.finalize`** (`FinalizeScripts=`) exists, it is executed as the last step of preparing an image. * If **`mkosi.postoutput`** (`PostOutputScripts=`) exists, it is executed right after all the output files have been generated, before they are finally moved into the output directory. This can be used to generate additional or alternative outputs, e.g. `SHA256FILES` or SBOM manifests. * If **`mkosi.clean`** (`CleanScripts=`) exists, it is executed right after the outputs of a previous build have been cleaned up. A clean script can clean up any outputs that **mkosi** does not know about (e.g. artifacts from `SplitArtifacts=partitions` or RPMs built in a build script). Note that this script does not use the tools tree even if one is configured. * If **`mkosi.version`** exists and is executable, it is run during configuration parsing and populates `ImageVersion=` with the output on stdout. This can be used for external version tracking, e.g. with `git describe` or `date '+%Y-%m-%d'`. Note that this script is executed on the host system without any sandboxing. * If **`mkosi.rootpw`** exists and is executable, it is run during configuration parsing and populates `RootPassword=` with the output on stdout. This can be used to randomly generate a password and can be remembered by outputting it to stderr or by reading `$MKOSI_CONFIG` in another script (e.g. `mkosi.postoutput`). Note that this script is executed on the host system without any sandboxing. If a script uses the `.chroot` extension, **mkosi** will chroot into the image using **mkosi-chroot** (see below) before executing the script. For example, if `mkosi.postinst.chroot` exists, **mkosi** will chroot into the image and execute it as the post-installation script. Instead of a single file script, **mkosi** will also read all files in lexicographical order from appropriately named `.d` directories, e.g. all files in a `mkosi.build.d` would be used as build scripts. This is supported by * `mkosi.sync.d`, * `mkosi.prepare.d`, * `mkosi.build.d`, * `mkosi.postinst.d`, * `mkosi.finalize.d`, * `mkosi.postoutput.d`, and * `mkosi.clean.d`. This can be combined with the `.chroot` extension, e.g. `mkosi.build.d/01-foo.sh` would be run without chrooting into the image and `mkosi.build.d/02-bar.sh.chroot` would be run after chrooting into the image first. Scripts executed by **mkosi** receive the following environment variables: * `$ARCHITECTURE` contains the architecture from the `Architecture=` setting. If `Architecture=` is not set, it will contain the native architecture of the host machine. See the documentation of `Architecture=` for possible values for this variable. * `$QEMU_ARCHITECTURE` contains the architecture from `$ARCHITECTURE` in the format used by **qemu**. Useful for finding the qemu binary ( `qemu-system-$QEMU_ARCHITECTURE`). * `$EFI_ARCHITECTURE` contains the architecture from `$ARCHITECTURE` in the format used by **UEFI**. It is unset on architectures that do not support **UEFI**. * `$DISTRIBUTION` contains the distribution from the `Distribution=` setting. * `$RELEASE` contains the release from the `Release=` setting. * `$DISTRIBUTION_ARCHITECTURE` contains the architecture from `$ARCHITECTURE` in the format used by the configured distribution. * `$PROFILES` contains the profiles from the `Profiles=` setting as a comma-delimited string. * `$CACHED` is set to `1` if a cached image is available, `0` otherwise. * `$CHROOT_SCRIPT` contains the path to the running script relative to the image root directory. The primary usecase for this variable is in combination with the **mkosi-chroot** script. See the description of **mkosi-chroot** below for more information. * `$SRCDIR` contains the path to the directory **mkosi** was invoked from, with any configured build sources mounted on top. `$CHROOT_SRCDIR` contains the value that `$SRCDIR` will have after invoking **mkosi-chroot**. * `$BUILDDIR` is only defined if `mkosi.builddir` exists and points to the build directory to use. This is useful for all build systems that support out-of-tree builds to reuse already built artifacts from previous runs. `$CHROOT_BUILDDIR` contains the value that `$BUILDDIR` will have after invoking **mkosi-chroot**. * `$DESTDIR` is a directory into which any installed software generated by a build script may be placed. This variable is only set when executing a build script. `$CHROOT_DESTDIR` contains the value that `$DESTDIR` will have after invoking **mkosi-chroot**. * `$OUTPUTDIR` points to the staging directory used to store build artifacts generated during the build. `$CHROOT_OUTPUTDIR` contains the value that `$OUTPUTDIR` will have after invoking **mkosi-chroot**. * `$PACKAGEDIR` points to the directory containing the local package repository. Build scripts can add more packages to the local repository by writing the packages to `$PACKAGEDIR`. * `$ARTIFACTDIR` points to the directory that is used to pass around build artifacts generated during the build and make them available for use by mkosi. This is similar to `PACKAGEDIR`, but is meant for artifacts that may not be packages understood by the package manager, e.g. initrds created by other initrd generators than mkosi. Build scripts can add more artifacts to the directory by placing them in `$ARTIFACTDIR`. Files in this directory are only available for the current build and are not copied out like the contents of `$OUTPUTDIR`. **mkosi** will also use certain subdirectories of an artifacts directory to automatically use their contents at certain steps. Currently the following two subdirectories in the artifact directory are used by mkosi: - `io.mkosi.microcode`: All files in this directory are used as microcode files, i.e. they are prepended to the initrds in lexicographical order. - `io.mkosi.initrd`: All files in this directory are used as initrds and joined in lexicographical order. It is recommended, that users of `$ARTIFACTDIR` put things for their own use in a similar namespaced directory, e.g. `local.my.namespace`. * `$BUILDROOT` is the root directory of the image being built, optionally with the build overlay mounted on top depending on the script that's being executed. * `$WITH_DOCS` is either `0` or `1` depending on whether a build without or with installed documentation was requested (`WithDocs=yes`). A build script should suppress installation of any package documentation to `$DESTDIR` in case `$WITH_DOCS` is set to `0`. * `$WITH_TESTS` is either `0` or `1` depending on whether a build without or with running the test suite was requested (`WithTests=no`). A build script should avoid running any unit or integration tests in case `$WITH_TESTS` is `0`. * `$WITH_NETWORK` is either `0` or `1` depending on whether a build without or with networking is being executed (`WithNetwork=no`). A build script should avoid any network communication in case `$WITH_NETWORK` is `0`. * `$SOURCE_DATE_EPOCH` is defined if requested (`SourceDateEpoch=TIMESTAMP`, `Environment=SOURCE_DATE_EPOCH=TIMESTAMP` or the host environment variable `$SOURCE_DATE_EPOCH`). This is useful to make builds reproducible. See [SOURCE_DATE_EPOCH](https://reproducible-builds.org/specs/source-date-epoch/) for more information. * `$MKOSI_UID` and `$MKOSI_GID` respectively are the uid, gid of the user that invoked mkosi. * `$MKOSI_CONFIG` is a file containing a json summary of the settings of the current image. This file can be parsed inside scripts to gain access to all settings for the current image. * `$IMAGE_ID` contains the identifier from the `ImageId=` or `--image-id=` setting. * `$IMAGE_VERSION` contains the version from the `ImageVersion=` or `--image-version=` setting. * `$MKOSI_DEBUG` is either `0` or `1` depending on whether debugging output is enabled. Consult this table for which script receives which environment variables: | Variable | `configure` | `sync` | `prepare` | `build` | `postinst` | `finalize` | `postoutput` | `clean` | |-----------------------------|:-----------:|:------:|:---------:|:-------:|:----------:|:----------:|:------------:|:-------:| | `ARCHITECTURE` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `ARTIFACTDIR` | | | ✓ | ✓ | ✓ | ✓ | | | | `BUILDDIR` | | | | ✓ | ✓ | ✓ | | | | `BUILDROOT` | | | ✓ | ✓ | ✓ | ✓ | | | | `CACHED` | | ✓ | | | | | | | | `CHROOT_BUILDDIR` | | | | ✓ | | | | | | `CHROOT_DESTDIR` | | | | ✓ | | | | | | `CHROOT_OUTPUTDIR` | | | | | ✓ | ✓ | | | | `CHROOT_SCRIPT` | | | ✓ | ✓ | ✓ | ✓ | | | | `CHROOT_SRCDIR` | | | ✓ | ✓ | ✓ | ✓ | | | | `MKOSI_DEBUG` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `DESTDIR` | | | | ✓ | | | | | | `DISTRIBUTION` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `DISTRIBUTION_ARCHITECTURE` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `EFI_ARCHITECTURE` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `IMAGE_ID` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `IMAGE_VERSION` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `MKOSI_CONFIG` | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `MKOSI_GID` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `MKOSI_UID` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `OUTPUTDIR` | | | | | ✓ | ✓ | ✓ | ✓ | | `PACKAGEDIR` | | | ✓ | ✓ | ✓ | ✓ | | | | `PROFILES` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | ✓ | | `QEMU_ARCHITECTURE` | ✓ | | | | | | | | | `RELEASE` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `SOURCE_DATE_EPOCH` | | | ✓ | ✓ | ✓ | ✓ | | ✓ | | `SRCDIR` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `WITH_DOCS` | | | ✓ | ✓ | | | | | | `WITH_NETWORK` | | | ✓ | ✓ | ✓ | ✓ | | | | `WITH_TESTS` | | | ✓ | ✓ | | | | | Additionally, when a script is executed, a few scripts are made available via `$PATH` to simplify common usecases. * **mkosi-chroot**: This script will chroot into the image and execute the given command. On top of chrooting into the image, it will also mount various files and directories (`$SRCDIR`, `$DESTDIR`, `$BUILDDIR`, `$OUTPUTDIR`, `$CHROOT_SCRIPT`) into the image and modify the corresponding environment variables to point to the locations inside the image. It will also mount APIVFS filesystems (`/proc`, `/dev`, ...) to make sure scripts and tools executed inside the chroot work properly. It also propagates `/etc/resolv.conf` from the host into the chroot if requested so that DNS resolution works inside the chroot. After the mkosi-chroot command exits, various mount points are cleaned up. For example, to invoke **ls** inside of the image, use the following: ```sh mkosi-chroot ls ... ``` To execute the entire script inside the image, add a `.chroot` suffix to the name (`mkosi.build.chroot` instead of `mkosi.build`, etc.). * For all of the supported package managers (**dnf**, **rpm**, **apt**, **dpkg**, **pacman**, **zypper**), scripts of the same name are put into `$PATH` that make sure these commands operate on the image's root directory with the configuration supplied by the user instead of on the host system. This means that from a script, you can do e.g. `dnf install vim` to install vim into the image. Additionally, `mkosi-install`, `mkosi-reinstall`, `mkosi-upgrade` and `mkosi-remove` will invoke the corresponding operation of the package manager being used to built the image. * **git** is automatically invoked with `safe.directory=*` to avoid permissions errors when running as the root user in a user namespace. * **useradd** and **groupadd** are automatically invoked with `--root=$BUILDROOT` when executed outside of the image. When scripts are executed, any directories that are still writable are also made read-only (`/home`, `/var`, `/root`, ...) and only the minimal set of directories that need to be writable remain writable. This is to ensure that scripts can't mess with the host system when **mkosi** is running as root. Note that when executing scripts, all source directories are made ephemeral which means all changes made to source directories while running scripts are thrown away after the scripts finish executing. Use the output, build or cache directories if you need to persist data between builds. # FILES To make it easy to build images for development versions of your projects, **mkosi** can read configuration data from the local directory, under the assumption that it is invoked from a *source* tree. Specifically, the following files are used if they exist in the local directory: * The **`mkosi.skeleton/`** directory or **`mkosi.skeleton.tar`** archive may be used to insert files into the image. The files are copied *before* the distribution packages are installed into the image. This allows creation of files that need to be provided early, for example to configure the package manager or set systemd presets. When using the directory, file ownership is not preserved: all files copied will be owned by root. To preserve ownership, use a tar archive. * The **`mkosi.extra/`** directory or **`mkosi.extra.tar`** archive may be used to insert additional files into the image, on top of what the distribution includes in its packages. They are similar to `mkosi.skeleton/` and `mkosi.skeleton.tar`, but the files are copied into the directory tree of the image *after* the OS was installed. When using the directory, file ownership is not preserved: all files copied will be owned by root. To preserve ownership, use a tar archive. * The **`mkosi.sandbox/`** directory or **`mkosi.sandbox.tar`** archive may be used to configure the package manager without the files being inserted into the image. If the files should be included in the image `mkosi.skeleton/` and `mkosi.skeleton.tar` should be used instead. When using the directory, file ownership is not preserved: all files copied will be owned by root. To preserve ownership, use a tar archive. * The **`mkosi.nspawn`** nspawn settings file will be copied into the same place as the output image file, if it exists. This is useful since nspawn looks for settings files next to image files it boots, for additional container runtime settings. * The **`mkosi.cache/`** directory, if it exists, is automatically used as package download cache, in order to speed repeated runs of the tool. * The **`mkosi.builddir/`** directory, if it exists, is automatically used as out-of-tree build directory, if the build commands in the `mkosi.build` scripts support it. Specifically, this directory will be mounted into the build container, and the `$BUILDDIR` environment variable will be set to it when the build scripts are invoked. A build script may then use this directory as build directory, for **automake**-style or **ninja**-style out-of-tree builds. This speeds up builds considerably, in particular when **mkosi** is used in incremental mode (`-i`): not only the image and build overlay, but also the build tree is reused between subsequent invocations. Note that if this directory does not exist the `$BUILDDIR` environment variable is not set, and it is up to the build scripts to decide whether to do an in-tree or an out-of-tree build, and which build directory to use. * The **`mkosi.rootpw`** file can be used to provide the password for the root user of the image. If the password is prefixed with `hashed:` it is treated as an already hashed root password. The password may optionally be followed by a newline character which is implicitly removed. The file must have an access mode of 0600 or less. If this file does not exist, the distribution's default root password is set (which usually means access to the root user is blocked). * The **`mkosi.passphrase`** file provides the passphrase to use when LUKS encryption is selected. It should contain the passphrase literally, and not end in a newline character (i.e. in the same format as **cryptsetup** and `/etc/crypttab` expect the passphrase files). The file must have an access mode of 0600 or less. * The **`mkosi.crt`** and **`mkosi.key`** files contain an X.509 certificate and PEM private key to use when signing is required (UEFI SecureBoot, verity, ...). * The **`mkosi.output/`** directory is used to store all build artifacts. * The **`mkosi.credentials/`** directory is used as a source of extra credentials similar to the `Credentials=` option. For each file in the directory, the filename will be used as the credential name and the file contents become the credential value, or, if the file is executable, **mkosi** will execute the file and the command's output to stdout will be used as the credential value. Output to stderr will be ignored. Credentials configured with `Credentials=` take precedence over files in `mkosi.credentials`. * The **`mkosi.repart/`** directory is used as the source for **systemd-repart** partition definition files which are passed to **systemd-repart** when building a disk image. If it does not exist and the `RepartDirectories=` setting is not configured, **mkosi** will default to the following partition definition files: `00-esp.conf` (if we're building a bootable image): ```ini [Partition] Type=esp Format=vfat CopyFiles=/boot:/ CopyFiles=/efi:/ SizeMinBytes=512M SizeMaxBytes=512M ``` `05-bios.conf` (if we're building a BIOS bootable image): ```ini [Partition] # UUID of the grub BIOS boot partition which grubs needs on GPT to # embed itself into. Type=21686148-6449-6e6f-744e-656564454649 SizeMinBytes=1M SizeMaxBytes=1M ``` `10-root.conf` ```ini [Partition] Type=root Format= CopyFiles=/ Minimize=guess ``` Note that if either `mkosi.repart/` is found or `RepartDirectories=` is used, we will not use any of the default partition definitions. All these files are optional. Note that the location of all these files may also be configured during invocation via command line switches, and as settings in `mkosi.conf`, in case the default settings are not acceptable for a project. # CACHING **mkosi** supports three different caches for speeding up repetitive re-building of images. Specifically: 1. The package cache of the distribution package manager may be cached between builds. This is configured with the `--cache-directory=` option or the `mkosi.cache/` directory. This form of caching relies on the distribution's package manager, and caches distribution packages (RPM, deb, …) after they are downloaded, but before they are unpacked. 2. If the incremental build mode is enabled with `--incremental=yes`, cached copies of the final image and build overlay are made immediately before the build sources are copied in (for the build overlay) or the artifacts generated by `mkosi.build` are copied in (in case of the final image). This form of caching allows bypassing the time-consuming package unpacking step of the distribution package managers, but is only effective if the list of packages to use remains stable, but the build sources and its scripts change regularly. Note that this cache requires manual flushing: whenever the package list is modified the cached images need to be explicitly removed before the next re-build, using the `-f` switch. 3. Finally, between multiple builds the build artifact directory may be shared, using the `mkosi.builddir/` directory. This directory allows build systems such as Meson to reuse already compiled sources from a previous built, thus speeding up the build process of a `mkosi.build` build script. The package cache and incremental mode are unconditionally useful. The final cache only apply to uses of **mkosi** with a source tree and build script. When all three are enabled together turn-around times for complete image builds are minimal, as only changed source files need to be recompiled. # TOOLS TREES Tools trees are a secondary image that mkosi can use to build the actual images. This is useful to make image builds more reproducible, but also allows to use newer tooling, that is not yet available in the host distribution running mkosi. Tools trees can be provided via the `ToolsTree=` option, the `mkosi.tools` directory or built automatically by mkosi if set to `ToolsTree=yes`. For most use cases setting it is sufficient to use the default tools trees and the use of a tools tree is recommended. Fully custom tools trees can be built like any other mkosi image, but mkosi provides a builtin include providing the default tools tree packages: ```bash mkosi --include=mkosi-tools --format=directory ``` Tools trees, including default tools trees, can be further customized via the different `ToolsTree*=` variables as well as the `mkosi.tools.conf` configuration file or directory. The output format for tools trees cannot currently be changed via configuration files. The following table shows for which distributions default tools tree packages are defined and which packages are included in those default tools trees: | | Fedora | CentOS | Debian | Kali | Ubuntu | Arch | openSUSE | postmarketOS | |-------------------------|:------:|:------:|:------:|:----:|:------:|:----:|:--------:|:------------:| | `acl` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `apt` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | | | `archlinux-keyring` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | | | `attr` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `bash` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `btrfs-progs` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `ca-certificates` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `coreutils` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `cpio` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `createrepo_c` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `curl` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `debian-keyring` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | ✓ | | `diffutils` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `distribution-gpg-keys` | ✓ | ✓ | ✓ | ✓ | | ✓ | ✓ | ✓ | | `dnf` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `dosfstools` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `e2fsprogs` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `edk2-ovmf` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `erofs-utils` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `findutils` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `git` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `grep` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `grub-tools` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | | | `jq` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `kali-archive-keyring` | | | | ✓ | | | | | | `kmod` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `less` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `mtools` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `nano` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `opensc` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `openssh` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `openssl` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `pkcs11-provider` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `perf` | ✓ | ✓ | ✓ | ✓ | | ✓ | ✓ | ✓ | | `sed` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `pacman` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | ✓ | | `p11-kit` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `policycoreutils` | ✓ | ✓ | ✓ | ✓ | ✓ | | ✓ | ✓ | | `qemu` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `sbsigntools` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `socat` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `squashfs-tools` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `strace` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `swtpm` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `systemd` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `ukify` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `tar` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `ubuntu-keyring` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | | | `util-linux` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `virtiofsd` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `virt-firmware` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `xfsprogs` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `xz` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `zstd` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `zypper` | ✓ | | ✓ | ✓ | ✓ | ✓ | ✓ | | # BUILDING MULTIPLE IMAGES If the `mkosi.images/` directory exists, **mkosi** will load individual subimage configurations from it and build each of them. Image configurations can be either directories containing **mkosi** configuration files or regular files with the `.conf` extension. When image configurations are found in `mkosi.images/`, **mkosi** will build the images specified in the `Dependencies=` setting of the main image and all of their dependencies (or all of them if no images were explicitly configured using `Dependencies=` in the main image configuration). To add dependencies between subimages, the `Dependencies=` setting can be used as well. Subimages are always built before the main image. When images are defined, **mkosi** will first read the main image configuration (configuration outside of the `mkosi.images/` directory), followed by the image specific configuration. Several "multiversal" settings apply to the default tools tree and to the main image and cannot be configured separately outside of the main image: - `RepositoryKeyCheck=` - `RepositoryKeyFetch=` - `SourceDateEpoch=` - `CacheOnly=` - `WorkspaceDirectory=` - `PackageCacheDirectory=` - `BuildSources=` - `BuildSourcesEphemeral=` - `ProxyClientCertificate=` - `ProxyClientKey=` - `ProxyExclude=` - `ProxyPeerCertificate=` - `ProxyUrl=` Several "universal" settings apply to the main image and all its subimages and cannot be configured separately in subimages. The following settings are universal and cannot be configured in subimages: - `Architecture=` - `BuildDirectory=` - `CacheDirectory=` - `Distribution=` - `ExtraSearchPaths=` - `Incremental=` - `LocalMirror=` - `Mirror=` - `OutputDirectory=` - `OutputMode=` - `PackageDirectories=` - `Release=` - `RepartOffline=` - `Repositories=` - `SandboxTrees=` - `ToolsTree=` - `ToolsTreeCertificates=` - `UseSubvolumes=` - `SecureBootCertificate=` - `SecureBootCertificateSource=` - `SecureBootKey=` - `SecureBootKeySource=` - `VerityCertificate=` - `VerityCertificateSource=` - `VerityKey=` - `VerityKeySource=` - `VolatilePackageDirectories=` - `WithNetwork=` - `WithTests` There are also settings which are passed down to subimages but can be overridden. For these settings, values configured explicitly in the subimage will take priority over values configured on the CLI or in the main image config. Currently the following settings are passed down to subimages but can be overridden: - `Profiles=` - `ImageId=` - `ImageVersion=` - `SectorSize=` - `CacheKey=` - `BuildKey=` - `CompressLevel=` - `SignExpectedPcrKey=` - `SignExpectedPcrKeySource=` - `SignExpectedPcrCertificate=` - `SignExpectedPcrCertificateSource=` Additionally, there are various settings that can only be configured in the main image but which are not passed down to subimages: - `MinimumVersion=` - `PassEnvironment=` - `ToolsTreeDistribution=` - `ToolsTreeRelease=` - `ToolsTreeProfiles=` - `ToolsTreeMirror=` - `ToolsTreeRepositories=` - `ToolsTreeSandboxTrees=` - `ToolsTreePackages=` - `ToolsTreePackageDirectories=` - `History=` - Every setting in the `[Runtime]` section Images can refer to outputs of images they depend on. Specifically, for the following options, **mkosi** will only check whether the inputs exist just before building the image: - `BaseTrees=` - `ExtraTrees=` - `Initrds=` To refer to outputs of a image's dependencies, simply configure any of these options with a relative path to the output to use in the output directory of the dependency. Or use the `%O` specifier to refer to the output directory. A good example on how to build multiple images can be found in the [systemd](https://github.com/systemd/systemd/tree/main/mkosi/mkosi.images) repository. # ENVIRONMENT VARIABLES * `$MKOSI_LESS` overrides options for **less** when it is invoked by **mkosi** to page output. * `$MKOSI_DNF` can be used to override the executable used as **dnf**. This is particularly useful to select between **dnf** and **dnf5**. * `$EPEL_MIRROR` can be used to override the default mirror location used for the epel repositories when `Mirror=` is used. By default **mkosi** looks for the epel repositories in the `fedora` subdirectory of the parent directory of the mirror specified in `Mirror=`. For example if the mirror is set to `https://mirror.net/centos-stream` **mkosi** will look for the epel repositories in `https://mirror.net/fedora/epel`. * `SYSEXT_SCOPE` and `CONFEXT_SCOPE` can be used to override the default value of the respective `extension-release` file when building a sysext or confext. By default the value is set to `initrd system portable`. # EXAMPLES Create and run a raw *GPT* image with *ext4*, as `image.raw`: ```console # mkosi -p systemd -i boot ``` Create and run a bootable *GPT* image, as `foobar.raw`: ```console $ mkosi -d fedora -p kernel-core -p systemd -p systemd-boot -p udev -o foobar.raw # mkosi --output foobar.raw boot $ mkosi --output foobar.raw vm ``` Create and run a *Fedora Linux* image in a plain directory: ```console # mkosi --distribution fedora --format directory boot ``` Create a compressed image `image.raw.xz` with SSH installed and add a checksum file: ```console $ mkosi --distribution fedora --format disk --checksum=yes --compress-output=yes --package=openssh-clients ``` Inside the source directory of an **automake**-based project, configure **mkosi** so that simply invoking **mkosi** without any parameters builds an OS image containing a built version of the project in its current state: ```console $ cat >mkosi.conf <mkosi.build <, include /resolved/path/to/mkosi flags=(default_allow) { userns, } ``` # FREQUENTLY ASKED QUESTIONS (FAQ) - Why does `mkosi vm` with KVM not work on Debian/Kali/Ubuntu? While other distributions are OK with allowing access to `/dev/kvm`, on Debian/Kali/Ubuntu this is only allowed for users in the `kvm` group. Because **mkosi** unshares a user namespace when running unprivileged, even if the calling user was in the kvm group, when **mkosi** unshares the user namespace to run unprivileged, it loses access to the `kvm` group and by the time we start **qemu** we don't have access to `/dev/kvm` anymore. As a workaround, you can change the permissions of the device nodes to `0666` which is sufficient to make KVM work unprivileged. To persist these settings across reboots, copy `/usr/lib/tmpfiles.d/static-nodes-permissions.conf` to `/etc/tmpfiles.d/static-nodes-permissions.conf` and change the mode of `/dev/kvm` from `0660` to `0666`. - How do I add a regular user to an image? You can use the following snippet in a post-installation script: ```sh useradd --create-home --user-group $USER --password "$(openssl passwd -stdin -6 <$USER_PASSWORD_FILE)" ``` Note that from systemd v256 onwards, if enabled, **systemd-homed-firstboot.service** will prompt to create a regular user on first boot if there are no regular users. - Why do I see failures to chown files when building images? When not running as root, your user is not able to change ownership of files to arbitrary owners. Various distributions still ship files in their packages that are not owned by the root user. When not running as root, mkosi maps the current user to root when invoking package managers, which means that changing ownership to root will work but changing ownership to any other user or group will fail. Note that chown calls are only suppressed when running package managers, but not when running scripts. If this is required, e.g. for a build script, you can set the `MKOSI_CHROOT_SUPPRESS_CHOWN` variable to a true value (`1`, `yes`, `true`) to suppress chown calls in **mkosi-chroot** and `.chroot` scripts. If this behavior causes applications running in your image to misbehave, you can consider running **mkosi** as root which avoids this problem. Alternatively, if running **mkosi** as root is not desired, you can use `unshare --map-auto --map-current-user --setuid 0 --setgid 0` to become root in a user namespace with more than one user assuming the UID/GID mappings in `/etc/subuid` and `/etc/subgid` are configured correctly. Note that running mkosi as root or with `unshare` means that all output files produced by **mkosi** will not be owned by your current user anymore. Note that for systemd services that need directories in `/var` owned by the service user and group, an alternative to shipping these directories in packages or creating them via systemd-tmpfiles is to use `StateDirectory=`, `CacheDirectory=` or `LogsDirectory=` in the service file which instructs systemd to create the directory when it first starts the service. Alternatively, the `z` or `Z` directives for `systemd-tmpfiles` can be used to chown various directories and files to their owning user when the system first boots up. - Why does `portablectl inspect `/`systemd-dissect ` say my portable service isn't one? `systemd-dissect` and`portablectl inspect` check for `PORTABLE_PREFIXES=` in `os-release` and if the key is missing, will fail to recognise a portable service as one, showing ✗ under *Use as* for in the case of `systemd-dissect` or `n/a` under *Portable Service* for `portablectl`. Since there is no good default to set for this key and the generated portable service images will still attach properly, even when the key is not set, **mkosi** doesn't set one. You can set `PORTABLE_PREFIXES=` in the `os-release` file yourself in a postinst script. # REFERENCES * [Primary mkosi git repository on GitHub](https://github.com/systemd/mkosi/) * [mkosi — A Tool for Generating OS Images](https://0pointer.net/blog/mkosi-a-tool-for-generating-os-images.html) introductory blog post by Lennart Poettering * [The mkosi OS generation tool](https://lwn.net/Articles/726655/) story on LWN # SEE ALSO **systemd-nspawn**(1), **systemd-repart**(8), **dnf**(8) mkosi-26/mkosi/resources/man/mkosi.news.7.md000066400000000000000000002075131512054777600211460ustar00rootroot00000000000000% mkosi.news(7) % % # mkosi Changelog ## v26 - Extra options to commands invoked by mkosi (e.g. when using `mkosi boot` or `mkosi shell`) should now be delimited from regular options using `--`. Options passed after the verb without using the `--` delimiter are now interpreted as regular mkosi options. - Boolean options specified on the command line now always expect a boolean argument. For example, `--repository-key-check` needs to become `--repository-key-check=yes`. The reason for this change is to remove ambiguity when parsing e.g. `--repository-key-check build` where `build` would be interpreted as the argument for `--repository-key-check` whereas now it'll be properly interpreted as the verb. - Teach `--verity` a new `hash` value, which skips the verity signature partition for extension / portable images. To align the possible values, `yes` is renamed to `signed`. - Teach `--verity` a new `defer` value, which defers creation of the verity signature partition for disk, extension and portable images (but still allocates space for it). This is useful to implement offline signing of the verity roothash. - A new way to specify modules included in the initrd has been added. The new default is that modules need to be specified to be included. Previous separate Include and Exclude options that take regexps are now deprecated and are replaced by a single option that takes a list of positive and negative globs. The new options are KernelModules= and KernelInitrdModules=. - A new way to specify firmware files included in the initrd has been added. Previous separate Include and Exclude options that take regexps are now deprecated and are replaced by a single option that takes a list of positive and negative globs. The new option is FirmwareFiles=. - The `RuntimeScratch=` option has been dropped. Use `RuntimeSize=` instead to grow the image before booting it. - The `CDROM=` option has been dropped. - Unknown settings now generate a warning message instead of failing the image build. This allows configs to work across a wider range of mkosi versions while still taking advantage of newer settings. MinimumVersion can still be used to enforce a minimum version of mkosi that knows all required settings. ## v25 - Instead of using bubblewrap, sandboxing is now done with a new tool `mkosi-sandbox`. This tool has a public API and can be used independently of mkosi. - Image builds are now done in a user namespace with a single user when running unprivileged instead of using newuidmap/newgidmap. When running unprivileged, all files and directories in the image will be owned by the invoking user (and by root inside any produced archives). Any attempt to chown files to other users in scripts will fail unless the new environment variable `$MKOSI_CHROOT_SUPPRESS_CHOWN` is set to a true value. - `mkosi` does not drop privileges anymore to the invoking user when running as root for various steps. - A new `cat-config` verb will show all configuration files that were included for each configured image. - Added support for Azure Linux - Added support for Kali Linux - If `mkosi.version` is executable, we now execute it and read the version from stdout. - Added `--wipe-build-dir` to wipe the build directory before rebuilding the image. - Introduced `RepositoryKeyFetch=` to control whether to fetch distribution GPG keys remotely. This setting is **disabled** by default for security reasons except when building rpm based or Arch Linux images on Ubuntu. - We now handle `SIGHUP` gracefully - Universal settings that take a collection of values cannot be appended to anymore in subimages. Usage of package manager trees in subimages will have to be moved to the top level image. Similarly, repositories will have to be enabled in the top level image. - Repository metadata is not copied into images anymore. - Repository metadata from base trees is not used anymore. - Package manager trees are now named sandbox trees. - Package manager trees (sandbox trees) do not use the skeleton trees as their default anymore if unset. - Note to packagers: The manual pages have been moved to resources/man and now include man pages for mkosi-initrd and mkosi-sandbox as well. - `InitrdInclude=` was removed. If you're using `InitrdInclude=`, please build your initrd via a subimage in `mkosi.images` containing `Include=mkosi-initrd` and any customizations you wish to add and use the `Initrds=` setting to use it as the initrd for the main image instead of the default initrd. - Added `History=` to have mkosi save the config used to build the image and reuse it when verbs such as `qemu`, `boot`, … are invoked without `-f`. - Introduced new `[Build]` section and moved various settings to it. - Moved `Include=` to `[Include]` section - Added `sysupdate` verb as a wrapper around `systemd-sysupdate` which invokes it with definitions from `mkosi.sysupdate`. - Added `RuntimeHome=` to mount the current home directory to `/root` when running a command that boots the image - More directories aside from `/etc` and `/usr` are now picked up from sandbox trees (formerly known as package manager trees). - Profile configuration from `mkosi.profiles` is now parsed after `mkosi.conf.d` instead of before it. To set defaults for use in `mkosi.conf.d` based on the configured profile, use an early dropin in `mkosi.conf.d` that matches on the configured profile instead. - `Profile=` is renamed to `Profiles=` and takes a comma separated list of profiles now. Scripts now receive `$PROFILES` with a space-separated list of profiles instead of `$PROFILE`. The `%p` specifier for profiles is removed. - Multiple sync, prepare, build, postinst, finalize, postoutput and clean scripts are now picked up from `mkosi.$SCRIPT.d`. - `run0` is now automatically used to escalate privileges for commands that need it, like the `burn` verb. - `/usr/share/keyrings` and `/usr/share/distribution-gpg-keys` are no longer automatically picked up from the tools tree when `ToolsTreeCertificates=` is set, since they aren't certificates, use a sandbox tree instead. This allows one to override `SignedBy=` keys for APT repositories. - The `agetty.autologin` and `login.noauth` credentials are no longer set unconditionally. - Access to the output directory in build scripts was removed. To put artifacts from the build directory into the output directory, copy them from the build directory to the output directory in a post-installation script which does have access to the build directory and the output directory. - `BuildDirectory=` is no longer available in `PrepareScripts=`. If you need to acquire some files for the build process place them somewhere sensible within `$BUILDROOT` so that they can be cached when building incrementally. - When using a tools tree and a relaxed sandbox is used to run a command (qemu, nspawn, ...), we now keep all entries from `$PATH` outside of `/usr` intact. Note that this may cause issues if a `$PATH` entry contains binaries linked against libraries in `/usr` from the host. - Introduced a new specifier `%I` which resolves to the name of the current subimage when used in a config under `mkosi.images/`. This differs to `%o` as it is always the name of the config file without extension (or the name of the directory). - If `/dev/fuse` is found in the host context, it is made available in the sandbox context too. - Added a `sandbox` verb to run a command within a relaxed mkosi sandbox (the same sandbox that `mkosi vm`, `mkosi boot`, ... run in). - OpenSSL providers are now supported as key sources for the various key settings if a recent enough systemd version (257 or newer) is used. - Added support for loading X.509 certificates from OpenSSL providers if a recent enough systemd version (257 or newer) is used. - Added `ToolsTreePackageDirectories=` - Added `--kernel-image=` to `mkosi-initrd` to specify the kernel image to use when building a UKI. - Setting a collection based setting to the empty string via the CLI and then appending to the same setting will now override the settings coming from configuration files, whereas previously the CLI values would be appended to the values from configuration files. - The `mkosi-initrd` default config now includes various extra kernel modules by default. - The `coredumpctl` and `journalctl` verbs will now always operate on the image, even if `ForwardJournal=` is configured. - Bumped default Fedora release to `41`. - Added `addon` output format to build UKI addons. - Renamed `[Host]` section to `[Runtime]` section. - Renamed various settings from `[Host]`. - Binaries coming from `ExtraSearchPaths=` are now executed with the tools tree mounted if one is configured (unlike before where the tools tree was not mounted). This means that any binaries coming from `ExtraSearchPaths=` have to be linked against libraries from the tools tree (or have to be statically linked). Alternatively, the tools tree distribution and release have to match the host. - Binaries from `ExtraSearchPaths=` are not used anymore when building the default tools tree. - Dropped support for `pesign` as a secure boot signing tool. - Added support for `systemd-sbsign` as a secure boot signing tool. - Added `--register=` to control whether to register containers and VMs with systemd-machined or not. - `mkosi.profiles` is now parsed in subimages as well. - `mkosi-initrd` now uses `dnf5` on systems where it is the default. - Added various packages to the default tools tree. - Dropped support for Ubuntu Focal. - Added `Devicetree=` setting for configuring bootloader device trees - Added systemd-machined registration using varlink for `mkosi qemu` vms, which includes the vsock CID so that `ssh vsock/` or `ssh machine/` will work on systems running `systemd-machined` 257 or newer. - Bumped CentOS Stream default release to 10. - mkosi now manages the pacman keyring itself so `/etc/pacman.d/gnupg` from the host is not used anymore and mkosi will run `pacman-key --init` and `pacman-key --populate` itself. - Added `ToolsTreeRelease=` match - mkosi now enforces that images built with `Overlay=yes` only add files on top of the base tree(s) and don't overwrite any existing files or directories. - Added a `mkosi-addon` tool and accompanying kernel-install plugin that allows building PE addons to extend a vendor provided unified kernel image. - Added `systemd-boot-signed`, `uki-signed` and `grub-signed` variants for the `Bootloader=` option which instruct mkosi to only install pre-signed EFI binaries. - `mkosi.profiles` is now parsed in configuration included with `Include=`. - Any initrds configured with `Initrds=` are now used as fallback when booting with qemu direct kernel boot (`--firmware=linux`) if no split initrd was produced by the image build. - mkosi now makes a greater effort to ensure the crypto-policies are configured to allow GPG keys from older distributions. - We don't pick up pre-signed bootloader binaries anymore when `ShimBootloader=signed` is configured. To force usage of pre-signed EFI binaries, use the new `systemd-boot-signed`, `uki-signed` and `grub-signed` variants for the `Bootloader=` option. - Added a new constant `microsoft-mok` for the `FirmwareVariables=` option. If specified, a firmware variables file with the Microsoft keys enrolled will be extended to include a `MokList` entry that trusts the certificate configured with `SecureBootCertificate=` and passed to `qemu`. - We now use `mkosi.pkgcache` as the package cache directory if the directory exists. - `BuildSourcesEphemeral=` learned a new variant `buildcache` in which case the overlay will be cached in the build directory configured with `BuildDirectory=`. ## v24 - The default kernel command line of `console=ttyS0` (or equivalent for other architectures) has been removed. The required `console=` argument to have the kernel output to the serial console has to be added manually from `v24` onwards. - Support for installing local packages located in directories in `BuildSources=` was dropped. Instead, the packages can be made available for installation via `PackageManagerTrees=`. - Configuration parsing was reworked to remove the need for the `@` specifier and to streamline building multiple images with `mkosi.images/`. If you were building multiple images with `mkosi.images/`, you'll need to adapt your configuration to the rework. Read the **Building multiple images** section in the documentation for more information. - mkosi has gained the option to generate completion scripts for bash, fish and zsh. Packagers should generate the scripts during packaging and ship them in the appropriate places. - Added support for CentOS Stream 10. - mkosi now installs a separate `mkosi-initrd` script that can be used to build initramfs images intended for use on the local system. - We do not automatically append `centos-stream` or `fedora` anymore to CentOS (and derivatives) and Fedora mirrors specified with `Mirror=` as not all mirrors store the repository metadata under these subdirectories. Users are now required to add these subdirectories themselves in `Mirror=`. If the EPEL repositories are enabled for CentOS Stream (and derivatives) and `Mirror=` is used, we look for the EPEL repositories in `../fedora` relative to the mirror specified in `Mirror=`. - We now support compressed tar archives wherever we already accept tar archives as input. - We now always rerun the build if `Format=none` and don't remove previous outputs in that case (unless `--force` is specified). This allows using `mkosi -t none` to rerun the build scripts without removing the previous image. This can then be combined with `RuntimeBuildSources=yes` to make the build script outputs available in a booted container or virtual machine so they can be installed without having to rebuild the image. - We now use `virtconsole` to provide the serial console when booting with `qemu`. - `root=PARTUUID` and `mount.usr=PARTUUID` on the kernel command line are now automatically extended with the actual PARTUUID of the corresponding partition. - All available OpenSUSE repositories are now supported and can be enabled with `Repositories=`. - Building OpenSUSE `aarch64` images is now supported - `mkosi dependencies` was beefed up to handle more scenarios properly - The default list of kernel modules that are always added to the initramfs was extended with various virtualization modules. - Added a `Repositories=` match. - Cached images are now invalidated if packages specified via `PackageDirectories=` change. - Added `VolatilePackageDirectories=` which can be used to provide local packages that do not invalidate cached images. - `mkosi.pkgmngr` is now used as the default path for `PackageManagerTrees=`. - The package directory that build scripts can use to make built packages available for installation (`$PACKAGEDIR`) is now shared between all image builds. This means that packages built in earlier images and stored in `$PACKAGEDIR` become available for installation in all subsequent image builds. - The default tools tree distribution is now chosen based on the host distribution instead of the target distribution. - mkosi can now be invoked from the initramfs. ## v23.1 - Respin due to git tag mismatch ## v23 - Added `CleanScripts=` to allow running custom cleanup code whenever mkosi cleans up the output directory. This allows cleaning up extra outputs produced by e.g. a build script that mkosi doesn't know about. - Added `ConfigureScripts=` to allow dynamically modifying the mkosi configuration. Each configure script receives the current config as JSON on stdin and should output the new config as JSON on stdout. - When building a UKI, we don't measure for the TPM SHA1 PCR bank anymore. - All keys in the mkosi config JSON output are now in pascal case, except for credentials and environments, where the keys encode names of credentials and environment variables and are therefore case sensitive. - Added various settings to allow running mkosi behind a proxy. - Various fixes to kernel module filtering that should result in fewer modules being pulled into the default initrd when `KernelModulesExclude=` or `KernelModulesInitrdExclude=` are used. - Added `ToolsTreeDistribution=` match. - Removed `vmspawn` verb and replaced it with `VirtualMachineMonitor=`. - New specifiers for various directories were added. `%D` resolves to the directory that mkosi was invoked in, `%P` to the current working directory, and `%C` to the parent directory of the config file. - Added `ForwardJournal=` to have systemd inside a container/VM forward its journal to the specified file or directory. - Systemd scopes are now allocated for qemu, swtpm, virtiofsd and systemd-journal-remote if available. - The `mkosi qemu` virtual machine is now registered with systemd-machined if available. - Added new `oci` output format - Runtime trees without a target are now mounted to `/root/src` instead of a subdirectory of it (To have the same behaviour as `BuildSources=`). - Added `RuntimeBuildSources=` to mount build and source directories when booting the image with `mkosi nspawn` or `mkosi qemu`. - Introduced `--append` to allow command line settings to be parsed after parsing configuration files. - `distribution-release` is not installed by default anymore on OpenSUSE. - Setting `QemuSmp=` to `0` will now make qemu use all available CPUs - Free page reporting and discard request processing are now enabled by default in VMs spawned by `mkosi qemu`. - Added `ToolsTreeCertificates=` to allow configuring whether to use certificates and keys from the tools tree (if one is used) or the host. - Added `never` for `CacheOnly=` to specify that repository metadata should always be refreshed. - Renamed the `none` option for `CacheOnly=` to `auto`. - Added `ProxyExclude=` to configure hostnames for which requests should not go through the configured proxy. - The default tools tree is now reused on incremental builds. - Added `VolatilePackages=` and `InitrdVolatilePackages=` to configure packages that should be installed after executing build scripts and which should not be cached when using `Incremental=`. - `PackageDirectories=` now has an associated default path `mkosi.packages`. - `reprepro` is now used to generate local apt repositories. - Support for BSD tar/cpio was dropped. - When both `ExtraSearchPaths=` and `ToolsTree=` are used, mkosi will now prefer running a binary found in `ExtraSearchPaths=` without the tools tree over running the binary from the tools tree. If a binary is not found in `ExtraSearchPaths=`, the tools tree is used instead. - An artifact directory is now made available when running scripts which can be used to pass around data between different scripts. mkosi will also look for microcode and initrds in the artifact directory under the `io.mkosi.microcode` and `io.mkosi.initrd` subdirectories. - Added `Environment=` match setting to check for environment variables defined with the `Environment=` setting. - The `basesystem` package is now always installed in Fedora and CentOS images instead of the `filesystem` package. - The `qemu`, `shell` and `boot` verbs do not automatically build the image anymore unless `--force` is specified. - `SplitArtifacts=` is now supported for the portable, sysext and confext outputs. - The `WithDocs=` option was implemented for pacman-based distributions. - The default Fedora release was bumped to 40. - `QemuSwtpm=` can now be used with `QemuFirmware=` set to `linux` or `bios`. - Added `UnitProperties=` to allow configure properties on the scopes generated by `systemd-nspawn` and `systemd-run`. - mkosi now only builds a single default tools tree per build using the settings from the last regular image that we'll build. - Configure scripts are now only executed for verbs which imply an image build and are executed with the tools tree instead of without it. - `$QEMU_ARCHITECTURE` is now set for configure scripts to easily allow scripts to figure out which qemu binary will be used to run qemu. - A file ID can now be specified for `QemuDrives=`. This allows adding multiple qemu drives that are backed by the same file. - mkosi doesn't fail anymore if images already exist when running `mkosi build`. - Image names from `mkosi.images/` are now preferred over the specified image ID when determining the output filename to use for an image. - `--include` now has a shorthand option `-I`. - The `WITH_NETWORK` environment variable is now passed to build and finalize scripts. - We now clamp mtimes to the specified source date epoch timestamp instead of resetting all mtimes. This means that we won't touch any mtimes that are already older than the given source date epoch timestamp. - Removed support for CentOS 8 Stream as it is now EOL. - The `coredumpctl` and `journalctl` verbs now operrate on the path specified in `ForwardJournal=` if one is set. - Added `UnifiedKernelImageFormat=` format setting to allow configuring the naming of unified kernel images generated by mkosi. - The `versionlock` plugin is now enabled by default for dnf with a noop configuration. - `Repositories=` is now implemented for zypper. - `KernelModulesInclude=` and `KernelModulesInitrdInclude=` now take the special values `host` and `default` to include the host's loaded modules and the default kernel modules defined in `mkosi-initrd` respectively. - `KernelModulesIncludeHost=` and `KernelModulesInitrdIncludeHost=` are now deprecated. - Added `mkosi dependencies` to output the list of packages required by mkosi to build and boot images. ## v22 - We'll now try to delete btrfs subvolumes with `btrfs subvolume delete` first before falling back to recursively deleting the directory. - The invoking user is now always mapped to `root` when running sync scripts. This fixes an issue where we would fail when a package manager tree or skeleton tree contained a `/usr` directory as we would not have permissions to run mount in the sandbox. - We now use qemu's official firmware descriptions to find EDK2/OVMF UEFI firmware. Addititionally, `QemuFirmware=uefi` now boots without SecureBoot support, and `QemuFirmware=uefi-secure-boot` was introduced to boot with SecureBoot support. By default we will still boot with SecureBoot support if `QemuFirmware=auto`. - Added support for `QemuFirmwareVariables=custom` and `QemuFirmwareVariables=microsoft` to use OVMF/EDK2 variables with either the user's custom keys enrolled or with the Microsoft keys enrolled. - Added `UnifiedKernelImages=` to control whether we generate unified kernel images or not. - `Bootloader=grub` will now generate a grub EFI image and install it. If `SecureBoot=` is enabled and `ShimBootloader=` is not set to `signed`, the grub EFI image will be signed for SecureBoot. - `ShimBootloader=signed` will now also instruct mkosi to look for and install already signed grub, systemd-boot, kernel and UKI binaries. - We now build grub images with a fixed set of modules and don't copy any grub modules to the ESP anymore. - The configuration is now made available as a JSON file to all mkosi scripts via the `$MKOSI_CONFIG` environment variable. - `$PROFILE` is now set for all mkosi scripts containing the value of `Profile=` if it is set. ## v21 - We now handle unmerged-usr systems correctly - Builtin configs (`mkosi-initrd`, `mkosi-tools`) can now be included using `Include=` (e.g. `Include=mkosi-initrd`) - The kernel-install plugin now uses the builtin `mkosi-initrd` config so there's no need anymore to copy the full `mkosi-initrd` config into `/usr/lib/mkosi-initrd`. - We don't require a build anymore for the `journalctl` and `coredumpctl` verbs. - `mkosi ssh` works again when used with `ToolsTree=default` - We now use `.zst` instead of `.zstd` for compressed split artifacts produced by `systemd-repart`. - `systemd-repart` uses a persistent temporary directory again for assembling images instead of a tmpfs. - Added `MicrocodeHost=` setting to only include the CPU specific microcode for the current host system. - The kernel-install plugin now only includes the CPU specific microcode - Introduced `PackageCacheDirectory=` to set the directory for package manager caches. This setting defaults to a suitable location in the system or user directory depending on how mkosi is invoked. `CacheDirectory=` is only used for incremental cached images now. - Repository metadata is now synced once at the start of each image build and never during an image build. Each image includes a snapshot of the repository metadata in the canonical locations in `/var` so that incremental images and extension images can reuse the same snapshot. When building an image intended to be used with `BaseTrees=`, disable `CleanPackageMetadata=` to make sure the repository metadata in `/var` is not cleaned up, otherwise any extension images using this image as their base tree will not be able to install additional packages. - Implemented `CacheOnly=metadata`. Note that in the JSON output, the value of `CacheOnly=` will now be a string instead of a boolean. - Added `CompressLevel=` to set the compression level to use. - Dropped experimental Gentoo support. - Added `TriggerMatch=` to specify multiple match sections of which only one should be satisfied. - Added `jq`, `attr`, `acl`, `git`, `sed`, `grep` and `findutils` to the default tools tree. - Added `mkosi-install`, `mkosi-upgrade`, `mkosi-remove` and `mkosi-reinstall` scripts which allow writing scripts that are independent of the package manager being used to build the image. - We now expand specifiers in `Match` section values - Made GPG key handling for Fedora rawhide more robust - If systemd-repart 256 or newer is available, mkosi will instruct it to generate `/etc/fstab` and `/etc/crypttab` for the image if any partition definitions contain the corresponding settings (`MountPoint=` and `EncryptedVolume=`). - `bash` is now started in the debug shell instead of `sh`. - The default release for Ubuntu is now `noble`. - Ubuntu is now used as the default tools tree distribution for Ubuntu instead of Debian. - Added `mkosi vmspawn` which boots the image with `systemd-vmspawn`. Note that `systemd-vmspawn` is experimental and its interface may still change. As such `mkosi vmspawn` is also considered experimental. Note that `systemd-vmspawn` version `256` or newer is required. - Added `SyncScripts=` which can be used to update various build sources before starting the image build. - The `DISTRIBUTION=` and `RELEASE=` environment variables are now set when running scripts. - Added `ToolsTreeRepositories=` and `ToolsTreePackageManagerTrees=`. - Added `RuntimeNetwork=` to configure the networking used when booting the image. - Added `SecureBootKeySource=` and `VerityKeySource=` to support signing images with OpenSSL engines. Note that these settings require various systemd tools to be version `256` or newer. - We don't clean up package manager metadata anymore unless explicitly requested with `CleanPackageManagerMetadata=yes` when building `directory` and `tar` images. ## v20.2 - Fixed a bug in signing unsigned shim EFI binaries. - We now build an early microcode initrd in the mkosi kernel-install plugin. - Added `PackageDirectories=` to allow providing extra packages to be made available during the build. - Fixed issue where `KernelModulesIncludeHost` was including unnecessary modules - Fixed `--mirror` specification for CentOS (and variants) and Fedora. Previously a subdirectory within the mirror had to be specified which prevented using CentOS and EPEL repositories from the same mirror. Now only the URL has be specified. - We now mount package manager cache directories when running scripts on the host so that any packages installed in scripts are properly cached. - We don't download filelists on Fedora anymore - Nested build sources don't cause errors anymore when trying to install packages. - We don't try to build the same tools tree more than once anymore when building multiple images. - We now create the `/etc/mtab` compatibility symlink in mkosi's sandbox. - We now always hash the root password ourselves instead of leaving it to `systemd-firstboot`. - `/srv` and `/mnt` are not mounted read-only anymore during builds. - Fixed a crash when running mkosi in a directory with fewer than two parent directories. - Implemented `RepositoryKeyCheck=` for apt-based distributions. ## v20.1 - `BuildSources=` are now mounted when we install packages so local packages can be made available in the sandbox. - Fixed check to see if we're running as root which makes sure we don't do shared mounts when running as root. - The extension release file is now actually written when building system or configuration extensions. - The nspawn settings are copied to the output directory again. - Incremental caching is now skipped when `Overlay=` is enabled as this combination isn't supported. - The SELinux relabel check is more granular and now checks for all required files instead of just whether there's a policy configured. - `qemu-system-xxx` binaries are now preferred over the generic `qemu` and `qemu-kvm` binaries. - Grub tools from the tools tree are now used to install grub instead of grub tools from the image itself. The grub tools were added to the default tools trees as well. - The pacman keyring in tools trees is now only populated from the Arch Linux keyring (and not the Debian/Ubuntu ones anymore). - `gpg` is allowed to access `/run/pscsd/pscsd.comm` on the host if it exists to allow interaction with smartcards. ## v20 - The current working directory is not mounted unconditionally to `/work/src` anymore. Instead, the default value for `BuildSources=` now mounts the current working directory to `/work/src`. This means that the current working directory is no longer implicitly included when `BuildSources=` is explicitly configured. - Assigning the empty string to a setting that takes a list of values now overrides any configured default value as well. - The github action does not build and install systemd from source anymore. Instead, `ToolsTree=default` can be used to make sure a recent version of systemd is used to do the image build. - Added `EnvironmentFiles=` to read environment variables from environment files. - We drastically reduced how much of the host system we expose to scripts. Aside from `/usr`, a few directories in `/etc`, `/tmp`, `/var/tmp` and various directories configured in mkosi settings, all host directories are hidden from scripts, package managers and other tools executed by mkosi. - Added `RuntimeScratch=` to automatically mount a directory with extra scratch space into mkosi-spawned containers and virtual machines. - Package manager trees can now be used to configure every tool invoked by mkosi while building an image that reads config files from `/etc` or `/usr`. - Added `SELinuxRelabel=` to specify whether to relabel selinux files or not. - Many fixes to tools trees were made and tools trees are now covered by CI. Some combinations aren't possible yet but we're actively working to make these possible. - `mkosi qemu` now supports direct kernel boots of `s390x` and `powerpc` images. - Added `HostArchitecture=` match to match against the host architecture. - We don't use the user's SSH public/private keypair anymore for `mkosi ssh` but instead use a separate key pair which can be generated by `mkosi genkey`. Users using `mkosi ssh` will have to run `mkosi genkey` once to generate the necessary files to keep `mkosi ssh` working. - We don't automatically set `--offline=no` anymore when we detect the `Subvolumes=` setting is used in a `systemd-repart` partition definition file. Instead, use the new `RepartOffline=` option to explicitly disable running `systemd-repart` in offline mode. - During the image build we now install UKIs/kernels/initrds to `/boot` instead of `/efi`. While this will generally not be noticeable, users with custom systemd-repart ESP partition definitions will need to add `CopyFiles=/boot:/` along with the usual `CopyFiles=/efi:/` to their ESP partition definitions. By installing UKIs/kernels/initrds to `/boot`, it becomes possible to use `/boot` to populate an XBOOTLDR partition which wasn't possible before. Note that this is also safe to do before `v20` so `CopyFiles=/boot:/` can unconditionally be added to any ESP partition definition files. - Added `QemuFirmwareVariables=` to allow specifying a custom OVMF variables file to use. - Added `MinimumVersion=` to allow specifying the minimum required mkosi version to build an image. - Added support for Arch Linux's debug repositories. - Merged the mkosi-initrd project into mkosi itself. mkosi-initrd is now used to build the default initrd. - Implemented mkosi-initrd for all supported distributions. - Added `ShimBootloader=` to support installing shim to the ESP. - Added sysext, confext and portable output formats. These will produce signed disk images that can be used as sysexts, confexts and portable services respectively. - Added `QemuVsockConnectionId=` to configure how to allocate the vsock connection ID when `QemUVsock=` is enabled. - Added documentation on how to build sysexts with mkosi. - Global systemd user presets are now also configured. - Implemented `WithDocs=` for `apt`. - On supported package managers, locale data for other locales is now stripped if the local is explicitly configured using `Locale=`. - All `rpm` plugins are now disabled when building images. - Added `KernelModulesIncludeHost=` and `KernelModulesInitrdIncludeHost=` to only include modules loaded on the host system in the image/initrd respectively. - Implemented `RemovePackages=` for Arch Linux. - Added `useradd` and `groupadd` scripts to configure these binaries to operate on the image during builds instead on the host. - Added microcode support. If installed into the image, an early microcode initrd will automatically be built and prepended to the initrd. - A passwordless root account may now be created by specifying `hashed:`. - The `Autologin=` feature was extended with support for `arm64`, `s390x` and `powerpc` architectures. - Added `SecureBootAutoEnroll=` to control automatic enrollment of secureboot keys separately from signing `systemd-boot` and generated UKIs. - `ImageVersion=` is no longer automatically appended to the output files, instead this is automatically appended to `Output=` if not specified and results in the `%o` specifier being equivalent to `%i` or `%i_%v` depending on whether `ImageVersion=` is specified. ## v19 - Support for RHEL was added! - Added `journalctl` and `coredumpctl` verbs for running the respective tools on built directory or disk images. - Added a `burn` verb to write the output image to a block device. - Added a new `esp` output format, which is largely similar to the existing `uki` output format but wraps it in a disk image with only an ESP. - `Presets` were renamed to `Images`. `mkosi.images/` is now used instead of `mkosi.presets/`, the `Presets=` setting was renamed to `Images=` and the `Presets` section was merged into the `Config` section. The old names can still be used for backwards compatibility. - Added profiles to support building variants of the same image in one repository. Profiles can be defined in `mkosi.profiles/` and one can be selected using the new `Profile=` setting. - mkosi will now parse `mkosi.local.conf` before any other config files if that exists. - Added a kernel-install plugin. This is only shipped in source tree and not included in the Python module. - Added a `--json` option to get the output of `mkosi summary` as JSON. - Added shorthand `-a` for `--autologin`. - Added a `--debug-workspace` option to not remove the workspace directory after a build. This is useful to inspect the workspace after failing builds. As a consequence the prefix for the default workspace directory prefix has been changed from `.mkosi-tmp` to `mkosi-workspace`. - Scripts with the `.chroot` extension are now executed in the image automatically. - Added `rpm` helper script to have `rpm` automatically operate on the image when running scripts. - Added `mkosi-as-caller` helper script that can be used in scripts to run commands as the user invoking mkosi. - `mkosi-chroot` will now start a shell if no arguments are specified. - Added `WithRecommends=` to configure whether to install recommended packages by default or not where this is supported. It is disabled by default. - Added `ToolsTreeMirror=` setting for configuring the mirror to use for the default tools tree. - `WithDocs=` is now enabled by default. - Added `BuildSourcesEphemeral=` to make source directories ephemeral when running scripts. This means any changes made to source directories while running scripts will be undone after the scripts have finished executing. - Added `QemuDrives=` to have mkosi create extra qemu drives and pass them to qemu when using the `qemu` verb. - Added `BuildSources=` match to match against configured build source targets. - `PackageManagerTrees=` was moved to the `Distribution` section. - We now automatically configure the qemu firmware, kernel cmdline and initrd based on what type of kernel is passed by the user via `-kernel` or `QemuKernel=`. - The mkosi repository itself now ships configuration to build basic bootable images that can be used to test mkosi. - Added support for enabling `updates-testing` repositories for Fedora. - GPG keys for CentOS, Fedora, Alma and Rocky are now looked up locally first before fetching them remotely. - Signatures are not required for local packages on Arch anymore. - Packages on opensuse are now always downloaded in advance before installation when using zypper. - The tar output is now reproducible. - We now make sure `git` can be executed from mkosi scripts without running into permission errors. - We don't create subdirectories beneath the configured cache directory anymore. - Workspace directories are now created outside of any source directories. mkosi will either use `XDG_CACHE_HOME`, `$HOME/.cache` or `/var/tmp` depending on the situation. - Added environment variable `MKOSI_DNF` to override which dnf to use for building images (`dnf` or `dnf5`). - The rootfs can now be modified when running build scripts (with all changes thrown away after the last build script has been executed). - mkosi now fails if configuration specified via the CLI does not apply to any image (because it is overridden). - Added a new doc on building rpms from source with mkosi (`docs/building-rpms-from-source.md`). - `/etc/resolv.conf` will now only be mounted for scripts when they are run with network access. ## v18 - `$SCRIPT` was renamed to `$CHROOT_SCRIPT`. `$SCRIPT` can still be used but is considered deprecated. - Added `RuntimeTrees=` setting to mount directories when booting images via `mkosi boot`, `mkosi shell` or `mkosi qemu`. The directories are mounted with a uid map that maps the user invoking mkosi to the root user so that all files in the directory appear as if owned by the root user in the container or virtual machine and any new files created in the directories are owned by the user invoking mkosi. To make this work in VMs, we use `VirtioFS` via `virtiofsd`. Note that this requires systemd v254 or newer to be installed in the image. - Added support for booting directory images with `mkosi qemu` via `VirtioFS`. When `CONFIG_VIRTIOFS` and `CONFIG_VIRTIO_PCI` are builtin modules, no initramfs is required to make this work. - Added `Include=` or `--include` to include extra configuration files or directories. - Added support for specifiers to access the current value of certain settings during configuration file parsing. - `mkosi` will now exit with an error when no configuration was provided. - Multiple scripts of the same type are now supported. - Custom distributions are now supported via the new `custom` distribution. When using `custom` as the distribution, the rootfs must be provided via base trees, skeleton trees or prepare scripts. - We now use local GPG keys for rpm based distributions if the `distribution-gpg-keys` package is installed on the host. - Added `RuntimeSize=` to grow the image to a specific size before booting it when using `mkosi boot` or `mkosi qemu`. - We now set `MKOSI_UID` and `MKOSI_GID` when running scripts which are set to the uid and gid of the user invoking mkosi respectively. These can be used to run commands as the user that invoked mkosi. - Added an `Architecture=` match - Initrds specified with `Initrds=` are now used for grub menuentries as well. - `ImageId=` and `ImageVersion=` are now written to os-release as `IMAGE_ID` and `IMAGE_VERSION` if provided. - We pass command line arguments passed to the `build` verb to the build script again. - We added support for the "RHEL Universal Base Image" distribution. ## v17.1 - Fixed bug where `--autologin` was broken when used in combination with a tools tree when using a packaged version of mkosi. ## v17 - Added `ToolsTreePackages=` to add extra packages to the default tools tree. - Added `SystemdVersion=` match to match on the host's systemd version - Added `Format=` match to match on the configured output format - `Presets=` can now be configured in global configuration files to select which presets to build - UKIs can now be booted using direct linux boot. - We don't try to make images UEFI bootable anymore on architectures that do not support UEFI - Fixed `--help` to show all options again - We now warn when settings are configured in the wrong section ## v16 - `mkosi.version` is now picked up from preset and dropin directories as well following the usual config precedence logic - Removed the "first assignment wins" logic from configuration parsing. Settings parsed later will now override earlier values - Removed the `!` operator for lists. Instead, assign the empty string to the list to remove all previous values. - Added support for configuring custom default values for settings by prefixing their name in the configuration file with `@`. - Added `QemuCdrom=` to attach the image to the virtual machine as a CD-ROM instead of a block device. - Added `SectorSize=` to set the sector size of the disk images built by systemd-repart. - Added back grub support (BIOS/UEFI). Note that we don't install grub on UEFI yet but we do add the necessary configuration and partitions. - Added `Bootloader=` option to configure which EFI bootloader to install. Added `uki` option to install just the UKI without systemd-boot and `grub` to generate grub configuration to chainload into the built UKIs. - Added `BiosBootloader=` to configure whether grub for BIOS gets installed or not. - Added `QemuFirmware=` to select which qemu firmware to use (OVMF, Seabios or direct kernel boot). - Added `QemuKernel=` to specify the kernel that should be used with direct kernel boot. - `/var/lib/dbus/machine-id` is now removed if it was added by a package manager postinstall script. - The manifest is not generated by default anymore. Use `ManifestFormat=json` to make sure the manifest is generated. - Added `SourceDateEpoch=` to enable more reproducible image builds. - Added `Seed=` to set the seed passed to systemd-repart. - Updated the default Fedora release to Fedora 39. - If `ToolsTree=` is set to `default`, mkosi will now build a default tools tree containing all the necessary tools to build images. The distribution and release to use can be configured with `ToolsTreeDistribution=` and `ToolsTreeRelease=` or are determined automatically based on the image being built. - Added `uki` output format. This is similar to `cpio`, except the cpio is packaged up as a UKI with a kernel image and stub picked up from the rootfs. ## v15.1 - The man page can be generated from the markdown file via `tools/make-man-page.sh`. - Fixed issue where not all packages and data files where included in the generated python package. - mkosi doesn't try to unshare the network namespace anymore when it doesn't have `CAP_NET_ADMIN`. - Fixed issue when the workspace was located in `/tmp`. - Don't try to run `timedatectl` or `ssh-add` when they're not installed. ## v15 - Migrated to systemd-repart. Many options are dropped in favor of specifying them directly in repart partition definition files: - Format=gpt_xxx options are replaced with a single "disk" options. Filesystem to use can now be specified with repart's Format= option - Format=plain_squashfs (Can be reproduced by a single repart squashfs root partition combined with SplitArtifacts=yes) - Verity= (Replaced by repart's Verity= options) - Encrypt= (Replaced by repart's Encrypt= option) - RootSize=, HomeSize=, VarSize=, TmpSize=, ESPSize=, SwapSize=, SrvSize= (Replaced by repart's size options) - UsrOnly= (replaced with `CopyFiles=/:/usr` in a usr partition definition) - OutputSplitRoot=, OutputSplitVerity=, (Replaced by repart's SplitName= option) - OutputSplitKernel= (UKI is now always written to its own output file) - GPTFirstLBA (Removed, no equivalent in repart) - ReadOnly= (Replaced by repart's ReadOnly= option per partition) - Minimize= (Replaced by repart's Minimize= option per partition) - CompressFs= (No equivalent in repart, can be replicated by replacing mkfs. in $PATH with a script that adds the necessary command line option) - MkSquashfs= (Can be replaced with a script in $PATH that invokes the correct binary) We also remove the WithoutUnifiedKernelImages= switch as building unified kernel images is trivial and fast these days. - Support for --qemu-boot was dropped - Support for --use-host-repositories was dropped, use --repository-directory instead - `RepositoryDirectory` was removed, use `PackageManagerTrees=` or `SkeletonTrees=` instead. - `--repositories` is now only usable on Debian/RPM based distros and can only be used to enable additional repositories. Specifically, it cannot be used on Arch Linux anymore to add new repositories. - The `_epel` distributions were removed. Use `--repositories=epel` instead to enable the EPEL repository. - Removed `-stream` from CentOS release specifiers. Instead of specifying `8-stream`, you know just specify `8`. - Removed default kernel command line arguments `rhgb`, `selinux=0` and `audit=0`. - Dropped --all and --all-directory as this functionality is better implemented by using a build system. - mkosi now builds images without needing root privileges. - Removed `--no-chown`, `--idmap` and `--nspawn-keep-unit` options as they were made obsolete by moving to rootless builds. - Removed `--source-file-transfer`, `--source-file-transfer-final`, `--source-resolve-symlinks` and `--source-resolve-symlinks-final` in favor of always mounting the source directory into the build image. `--source-file-transfer-final` might be reimplemented in the future using virtiofsd. - Dropped `--include-dir` option. Usage can be replaced by using `--incremental` and reading includes from the cached build image tree. - Removed `--machine-id` in favor of shipping images without a machine ID at all. - Removed `--skip-final-phase` as we only have a single phase now. - The post install script is only called for the final image now and not for the build image anymore. Use the prepare script instead. - `--ssh-key`, `--ssh-agent`, `--ssh-port` and `--ssh-timeout` options were dropped as the SSH support was reimplemented using VSock. `mkosi ssh` can only be used with images booted with `mkosi qemu`. Use `machinectl` to access images booted with `mkosi boot`. Use --extra-tree or --credential with the `.ssh.authorized_keys.root` credentials as alternatives for provisioning the public key inside the image. - Only configuration files matching `*.conf` are parsed in dropin directories now. - Removed `--qemu-headless`, we now start qemu in the terminal by default and configure the serial console at runtime. Use the new `--qemu-gui` option to start qemu in its graphical interface. - Removed `--netdev`. Can be replaced by manually installing systemd-networkd, putting a network file in the image and enabling systemd-networkd. - If `mkosi.extra/` or `mkosi.skeleton/` exist, they are now always used instead of only when no explicit extra/skeleton trees are defined. - mkosi doesn't install any default packages anymore aside from packages required by the distro or the base filesystem layout package if there are no required packages. In practice, this means systemd and other basic tools have to be installed explicitly from now on. - Removed `--base-packages` as it's not needed anymore since we don't install any packages by default anymore aside from the base filesystem layout package. - Removed `--qcow2` option in favor of supporting only raw disk images as the disk image output format. - Removed `--bmap` option as it can be trivially added manually by utilizing a finalize script. - The `never` value for `--with-network` was spun of into its own custom option `--cache-only`. - `--bootable` now defaults to `auto`. When set to `auto`, mkosi will generate a bootable image only if all the necessary packages are installed. Documentation was added in docs/bootable.md on how a bootable image can be generated on mainstream distros. - The RPM db is no longer rebuilt in bdb format on CentOS Stream 8. To be able to install packages on a CentOS Stream 8 image with a RPM db in sqlite format, rewrite the db in bdb format using `rpm --rebuilddb --define _db_backend bdb`. - Repositories are now only written to /etc/apt/sources.list if apt is installed in the image. - Removed the dependency on `debootstrap` to build Ubuntu or Debian images. - Apt now uses the keyring from the host instead of the keyring from the image. This means `debian-archive-keyring` or `ubuntu-archive-keyring` are now required to be installed to build Debian or Ubuntu images respectively. - `--base-image` is split into `--base-tree` and `--overlay`. - Removed `--cache-initrd`, instead, use a prebuilt initrd with `Initrds=` to avoid rebuilding the initrd all the time. - Disk images are now resized to 8G when booted to give some disk space to play around with in the booted image. - Removed `--install-directory=` option. This was originally added for caching the installation results, but this doesn't work properly as it might result in leftover files in the install directory from a previous installation, so we have to empty the directory before reusing it, invalidating the caching, so the option was removed. - Build scripts are now executed on the host. See the `SCRIPTS` section in the manual for more information. Existing build scripts will need to be updated to make sure they keep working. Specifically, most paths in scripts will need to be prefixed with $BUILDROOT to have them operate on the image instead of on the host system. To ensure the host system cannot be modified when running a script, most host directories are mounted read-only when running a script to ensure a script cannot modify the host in any way. Alternatively to making the script run on the host, the script can also still be executed in the image itself by putting the following snippet at the top of the script: ```sh if [ "$container" != "mkosi" ]; then exec mkosi-chroot "$SCRIPT" "$@" fi ``` - Removed `--tar-strip-selinux-context=` option. We now label all files properly if selinux is enabled and if users don't want the labels, they can simply exclude them when extracting the archive. - Gentoo is now marked as experimental and unsupported and there's no guarantee at all that it will work. Issues related to gentoo will generally not receive attention from core maintainers. All gentoo specific hacks outside of the gentoo implementation module have been removed. - A verb `documentation` has been added. Calling mkosi with this verb will show the documentation. This is useful when running mkosi during development to always have the documentation in the correct version available. By default it will try several ways to output the documentation, but a specific option can be chosen with the `--doc-format` option. Distro packagers are encouraged to add a file `mkosi.1` into the `mkosi/resources` directory of the Python package, if it is missing, as well es install it in the appropriate search path for man pages. The man page can be generated from the markdown file `mkosi/resources/mkosi.md` e.g via `pandoc -t man -s -o mkosi.1 mkosi.md`. - BuildSources= now takes source:target pairs which specify the source directory and where to mount it relative to the top level source directory when running scripts. (e.g. BuildSources=../my-project:my-project) ## v14 - Support for Clear Linux was dropped. See https://github.com/systemd/mkosi/pull/1037 for more information. - Support for Photon was dropped. See https://github.com/systemd/mkosi/pull/1048 for more information. - The Arch kernel/bootloader pacman hooks were removed. For anyone that still wants to use them, they can be found [here](https://github.com/systemd/mkosi/tree/v13/mkosi/resources/arch). - mkosi now creates `distro~release` subdirectories inside the build, cache and output directories for each `distro~release` combination that is built. This allows building for multiple distros without throwing away the results of a previous distro build every time. - The preferred names for mkosi configuration files and directories are now `mkosi.conf` and `mkosi.conf.d/` respectively. The old names (`mkosi.default` and `mkosi.default.d`) have been removed from the docs but are still supported for backwards compatibility. - `plain_squashfs` type images will now also be named with a `.raw` suffix. - `tar` type images will now respect the `--compress` option. - Pacman's `SigLevel` option was changed to use the same default value as used on Arch which is `SigLevel = Required DatabaseOptional`. If this results in keyring errors, you need to update the keyring by running `pacman-key --populate archlinux`. - Support for CentOS 7 was dropped. If you still need to support CentOS 7, we recommend using any mkosi version up to 13. - Support for BIOS/grub was dropped. because EFI hardware is widely available and legacy BIOS systems do not support the feature set to fully verify a boot chain from firmware to userland and it has become bothersome to maintain for little use. To generate BIOS images you can use any version of mkosi up to mkosi 13 or the new `--bios-size` option. This can be used to add a BIOS boot partition of the specified size on which `grub` (or any other bootloader) can be installed with the help of mkosi's script support (depending on your needs most likely `mkosi.postinst` or `mkosi.finalize`). This method can also be used for other EFI bootloaders that mkosi intentionally does not support. - mkosi now unconditionally copies the kernel, initrd and kernel cmdline from the image that were previously only copied out for Qemu boot. - mkosi now runs apt and dpkg on the host. As such, we now require apt and dpkg to be installed on the host along with debootstrap in order to be able to build debian/ubuntu images. - Split dm-verity artifacts default names have been changed to match what `systemd` and other tools expect: `image.root.raw`, `image.root.verity`, `image.root.roothash`, `image.root.roothash.p7s` (same for `usr` variants). - `mkosi` will again default to the same OS release as the host system when the host system uses the same distribution as the image that's being built. - By default, `mkosi` will now change the owner of newly created directories to `SUDO_UID` or `PKEXEC_UID` if defined, unless `--no-chown` is used. - If `systemd-nspawn` v252 or newer is used, bind-mounted directories with `systemd-nspawn` will use the new `rootidmap` option so files and directories created from within the container will be owned by the actual directory owner on the host. ## v13 - The `--network-veth` option has been renamed to `--netdev`. The old name made sense with virtual ethernet devices, but when booting images with qemu a TUN/TAP device is used instead. - The network config file installed by mkosi when the `--netdev` (previously `--network-veth`) option is used (formerly `/etc/systemd/network/80-mkosi-network-veth.network` in the image) now only matches network interfaces using the `virtio_net` driver. Please make sure you weren't relying on this file to configure any network interfaces other than the tun/tap virtio-net interface created by mkosi when booting the image in QEMU with the `--netdev` option. If you were relying on this config file when the host system uses the same distribution as the image that's being built. Instead, when no release is specified, mkosi will now always default to the default version embedded in mkosi itself. - `mkosi` will now use the `pacman` keyring from the host when building Arch images. This means that users will, on top of installing `archlinux-keyring`, also have to run `pacman-key --init` and `pacman-key --populate archlinux` on the host system to be able to build Arch images. Also, unless the package manager is configured to do it automatically, the host keyring will have to be updated after `archlinux-keyring` updates by running `pacman-key --populate archlinux` and `pacman-key --updatedb`. - Direct qemu linux boot is now supported with `BootProtocols=linux`. When enabled, the kernel image, initrd, and cmdline will be extracted from the image and passed to `qemu` by `mkosi qemu` to directly boot into the kernel image without a bootloader. This can be used to boot for example s390x images in `qemu`. - The initrd will now always be rebuilt after the extra trees and build artifacts have been installed into the image. - The github action has been migrated to Ubuntu Jammy. To migrate any jobs using the action, add `runs-on: ubuntu-22.04` to the job config. - All images are now configured by default with the `C.UTF-8` locale. - New `--repository-directory` option can be used to configure a directory with extra repository files to be used by the package manager when building an image. Note that this option is currently only supported for `pacman` and `dnf`-based distros. - Option `--skeleton-tree` is now supported on Debian-based distros. - Removed `--hostname` as its trivial to configure using systemd-firstboot. - Removed default locale configuration as its trivial to configure using systemd-firstboot and systemd writes a default locale well. ## v12 - Fix handling of baselayout in Gentoo installations. ## v11 - Support for Rocky Linux, Alma Linux, and Gentoo has been added! - A new `ManifestFormat=` option can be used to generate "manifest" files that describe what packages were installed. With `json`, a JSON file that shows the names and versions of all installed packages will be created. With `changelog`, a longer human-readable file that shows package descriptions and changelogs will be generated. This latter format should be considered experimental and likely to change in later versions. - A new `RemovePackages=` option can be used to uninstall packages after the build and finalize scripts have been done. This is useful for the case where packages are required by the build scripts, or pulled in as dependencies for scriptlets of other packages, but are not necessary in the final image. - A new `BaseImage=` option can be used to build "system extensions" a.k.a. "sysexts" — partial images which are mounted on top of an existing system to provide additional files under `/usr/`. See the [systemd-sysext man page](https://www.freedesktop.org/software/systemd/man/systemd-sysext.html) for more information. - A new `CleanPackageMetadata=` option can be used to force or disable the removal of package manager files. When this option is not used, they are removed when the package manager is not installed in the final image. - A new `UseHostRepositories=` option instructs mkosi to use repository configuration from the host system, instead of the internal list. - A new `SshAgent=` option configures the path to the ssh agent. - A new `SshPort=` option overrides the port used for ssh. - The `Verity=` setting supports a new value `signed`. When set, verity data will be signed and the result inserted as an additional partition in the image. See https://systemd.io/DISCOVERABLE_PARTITIONS for details about signed disk images. This information is used by `systemd-nspawn`, `systemd-dissect`, `systemd-sysext`, `systemd-portabled` and `systemd`'s `RootImage=` setting (among others) to cryptographically validate the image file systems before use. - The `--build-environment=` option was renamed to `--environment=` and extended to cover *all* invoked scripts, not just the `mkosi.build`. The old name is still understood. - With `--with-network=never`, `dnf` is called with `--cacheonly`, so that the package lists are not refreshed. This gives a degree of reproducibility when doing repeated installs with the same package set (and also makes installs significantly faster). - The `--debug=` option gained a new value `disk` to show information about disk sized and partition allocations. - Some sections and settings have been renamed for clarity: [Packages] is now [Content], `Password=`, `PasswordIsHashed=`, and `Autologin=` are now in [Content]. The old names are still supported, but not documented. - When `--prepare-script=`/`--build-script=`/`--finalize-script=` is used with an empty argument, the corresponding script will not be called. - Python 3.7 is the minimal supported version. - Note to packagers: the Python `cryptography` module is needed for signing of verity data. ## v10 - Minimum supported Python version is now 3.7. - Automatic configuration of the network for Arch Linux was removed to bring different distros more in line with each other. To add it back, add a postinstall script to configure your network manager of choice. - The `--default` option was changed to not affect the search location of `mkosi.default.d/`. mkosi now always searches for `mkosi.default.d/` in the working directory. - `quiet` was dropped from the default kernel command line. - `--source-file-transfer` and `--source-file-transfer-final` now accept an empty value as the argument which can be used to override a previous setting. - A new command `mkosi serve` can be used to serve build artifacts using a small embedded HTTP server. This is useful for `machinectl pull-raw …` and `machinectl pull-tar …`. - A new command `mkosi genkey` can be used to generate secure boot keys for use with mkosi's `--secure-boot` options. The number of days the keys should remain valid can be specified via `--secure-boot-valid-days=` and their CN via `--secure-boot-common-name=`. - When booting images with `qemu`, firmware that supports Secure Boot will be used if available. - `--source-resolve-symlinks` and `--source-resolve-symlinks-final` options are added to control how symlinks in the build sources are handled when `--source-file-transfer[-final]=copy-all` is used. - `--build-environment=` option was added to set variables for the build script. - `--usr-only` option was added to build images that comprise only the `/usr/` directory, instead of the whole root file system. This is useful for stateless systems where `/etc/` and `/var/` are populated by `systemd-tmpfiles`/`systemd-sysusers` and related calls at boot, or systems that are originally shipped without a root file system, but where `systemd-repart` adds one on the first boot. - Support for "image versions" has been added. The version number can be set with `--version-number=`. It is included in the default output filename and passed as `$IMAGE_VERSION` to the build script. In addition, `mkosi bump` can be used to increase the version number by one, and `--auto-bump` can be used to increase it automatically after successful builds. - Support for "image identifiers" has been added. The id can be set with `--image=id` and is passed to the build script as `$IMAGE_ID`. - The list of packages to install can be configured with `--base-packages=`. With `--base-packages=no`, only packages specified with `--packages=` will be installed. With `--base-packages=conditional`, various packages will be installed "conditionally", i.e. only if some other package is otherwise pulled in. For example, `systemd-udev` may be installed only if `systemd` is listed in `--packages=`. - CPIO output format has been added. This is useful for kernel initramfs images. - Output compression can be configured with `--compress-fs=` and `--compress-output=`, and support for `zstd` has been added. - `--ssh-key=` option was added to control the ssh key used to connect to the image. - `--remove-files=` option was added to remove file from the generated images. - Inline comments are now allowed in config files (anything from `#` until the end of the line will be ignored). - The development branch was renamed from `master` to `main`. ## v9 ### Highlighted Changes - The mkosi Github action now defaults to the current release of mkosi instead of the tip of the master branch. - Add a `ssh` verb and accompanying `--ssh` option. The latter sets up SSH keys for direct SSH access into a booted image, whereas the former can be used to start an SSH connection to the image. - Allow for distribution specific `mkosi.*` files in subdirectories of `mkosi.default.d/`. These files are only processed if a subdirectory named after the target distribution of the image is found in `mkosi.default.d/`. - The summary of used options for the image is now only printed when building the image for the first time or when the `summary` verb is used. - All of mkosi's output, except for the build script, will now go to stderr. There was no clear policy on this before and this choice makes it easier to use images generated and booted via mkosi with language servers using stdin and stdout for communication. - `--source-file-transfer` now defaults to `copy-git-others` to also include untracked files. - [black](https://github.com/psf/black) is now used as a code style and conformance with it is checked in CI. - Add a new `--ephemeral` option to boot into a temporary snapshot of the image that will be thrown away on shutdown. - Add a new option `--network-veth` to set up a virtual Ethernet link between the host and the image for usage with nspawn or QEMU - Add a new `--autologin` option to automatically log into the root account upon boot of the image. This is useful when using mkosi for boot tests. - Add a new `--hostonly` option to generate host specific initrds. This is useful when using mkosi for boot tests. - Add a new `--install-directory` option and special directory `mkosi.installdir/` that will be used as `$DESTDIR` for the build script, so that the contents of this directory can be shared between builds. - Add a new `--include-directory` option and special directory `mkosi.includedir/` that will be mounted at `/usr/include` during the build. This way headers files installed during the build can be made available to the host system, which is useful for usage with language servers. - Add a new `--source-file-transfer-final` option to complement `--source-file-transfer`. It does the same `--source-file-transfer` does for the build image, but for the final one. - Add a new `--tar-strip-selinux-context` option to remove SELinux xattrs. This is useful when an image with a target distribution not using SELinux is generated on a host that is using it. - Document the `--no-chown` option. Using this option, artifacts generated by mkosi are not chowned to the user invoking mkosi when it is invoked via sudo. It has been with as for a while, but hasn't been documented until now. ### Fixed Issues - [#506](https://github.com/systemd/mkosi/issues/506) - [#559](https://github.com/systemd/mkosi/issues/559) - [#561](https://github.com/systemd/mkosi/issues/561) - [#562](https://github.com/systemd/mkosi/issues/562) - [#575](https://github.com/systemd/mkosi/issues/575) - [#580](https://github.com/systemd/mkosi/issues/580) - [#593](https://github.com/systemd/mkosi/issues/593) ### Authors - Daan De Meyer - Joerg Behrmann - Luca Boccassi - Peter Hutterer - ValdikSS mkosi-26/mkosi/resources/mkosi-addon/000077500000000000000000000000001512054777600200045ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-addon/mkosi.conf000066400000000000000000000020461512054777600217770ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Distribution] Distribution=custom [Output] Output=addon Format=addon ManifestFormat= SplitArtifacts= [Content] Bootable=no # Needs to be available for the addon stub, but don't want it in the initrd ExtraTrees=/usr/lib/systemd/boot/efi:/usr/lib/systemd/boot/efi RemoveFiles=/usr/lib/systemd/boot/efi/ RemoveFiles= # Including kernel images in the initrd is generally not useful. # This also stops mkosi from extracting the kernel image out of the image as a separate output. /usr/lib/modules/*/vmlinuz* /usr/lib/modules/*/vmlinux* /usr/lib/modules/*/System.map # This is an addon so drop all modules files as these would override the ones from the base image. /usr/lib/modules/*/modules.* # Arch Linux specific file. /usr/lib/modules/*/pkgbase # Drop microcode directories explicitly as these are not dropped by the kernel modules processing # logic. /usr/lib/firmware/intel-ucode /usr/lib/firmware/amd-ucode mkosi-26/mkosi/resources/mkosi-initrd/000077500000000000000000000000001512054777600202105ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf000066400000000000000000000062021512054777600222010ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Output] Output=initrd Format=cpio ManifestFormat= SplitArtifacts= [Content] Bootable=no MakeInitrd=yes CleanPackageMetadata=yes Packages= systemd # sine qua non udev bash # for emergency logins less # this makes 'systemctl' much nicer to use ;) gzip # For compressed keymap unpacking by loadkeys RemoveFiles= # we don't need this after the binary catalogs have been built /usr/lib/systemd/catalog /etc/udev/hwdb.d /usr/lib/udev/hwdb.d # this is not needed by anything updated in the last 20 years /etc/services # Including kernel images in the initrd is generally not useful. # This also stops mkosi from extracting the kernel image out of the image as a separate output. /usr/lib/modules/*/vmlinuz* /usr/lib/modules/*/vmlinux* /usr/lib/modules/*/System.map /var/cache /var/log # Configure locale explicitly so that all other locale data is stripped on distros whose package manager supports it. Locale=C.UTF-8 WithDocs=no # Make sure various core modules are always included in the initrd. KernelModules= ahci amd_atl amd_ctl amd-pmc amd64_edac atkbd autofs4 binfmt_misc btrfs cdrom cfg80211 configfs crc-ccitt dm-crypt dm-integrity dm-mod dm-multipath dm-raid dm-verity dmi-sysfs drm_buddy drm_display_helper edac_mce_amd efi-pstore efivarfs erofs evdev ext4 hid-generic i2c-algo-bit i2c_hid_acpi i2c-mux i2c-smbus i8042 intel-gtt intel_pmc_ssram_telemetry intel_rapl_common intel-uncore-frequency-common intel-vsec kvm libphy loop mdio_devres mei mxm-wmi nvme nvmet-tcp overlay parport pmt_telemetry qemu_fw_cfg raid[0-9]* rapl scsi_mod sd_mod serio sg skx_edac_common snd-intel-dspcfg snd-soc-hda-codec squashfs thunderbolt_net tpm_tis ttm typec_ucsi ucsi_acpi usbhid usb-storage uvc vfat video videobuf2-v4l2 videobuf2-vmalloc virtio_balloon virtio_blk virtio_console virtio_dma_buf virtio_mmio virtio_net virtio_pci virtio_scsi virtio-rng virtiofs vmd vmw_vsock_virtio_transport vsock watchdog wmi wmi-bmof x_tables xfs xhci-pci-renesas /fs/nls/ crypto/ -/drivers/crypto/ # exclude specialized hardware ccp_crypto # AMD crypto accelerator available in ryzen zram # for early/systemd-zram-setup nfnetlink # for firewalld.service mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/000077500000000000000000000000001512054777600225005ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/arch.conf000066400000000000000000000016001512054777600242610ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=arch [Content] Packages= btrfs-progs e2fsprogs xfsprogs dosfstools # Various libraries that are dlopen'ed by systemd libfido2 libseccomp tpm2-tss procps-ng util-linux RemoveFiles= # Arch Linux doesn't split their gcc-libs package so we manually remove # unneeded stuff here to make sure it doesn't end up in the initrd. /usr/lib/libgfortran.so* /usr/lib/libgo.so* /usr/lib/libgomp.so* /usr/lib/libgphobos.so* /usr/lib/libobjc.so* /usr/lib/libgdruntime.so* # Remove all files that are only required for development. /usr/lib/*.a /usr/include/* /usr/share/i18n/* /usr/share/hwdata/* /usr/share/iana-etc/* /usr/share/locale/* mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/arm.conf000066400000000000000000000003621512054777600241270ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=arm64 [Content] # Needed on ARM boards like RPIs to get USB and sdcards working KernelModules= /drivers/mmc/host/ brcmutil udc-core mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/azure-centos-fedora.conf000066400000000000000000000006521512054777600272270ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|fedora Distribution=|centos Distribution=|alma Distribution=|rocky Distribution=|rhel Distribution=|azure [Content] Packages= # Various libraries that are dlopen'ed by systemd libseccomp tpm2-tss # File system checkers for supported root file systems e2fsprogs xfsprogs dosfstools procps-ng mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/azure.conf000066400000000000000000000002051512054777600244720ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=azure [Content] Packages= btrfs-progs util-linux mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/centos/000077500000000000000000000000001512054777600237735ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/centos/mkosi.conf000066400000000000000000000005011512054777600257600ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|centos Distribution=|alma Distribution=|rocky Distribution=|rhel [Content] Packages= libfido2 util-linux RemovePackages= # Various packages pull in shadow-utils to create users, we can remove it afterwards shadow-utils mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/centos/mkosi.conf.d/000077500000000000000000000000001512054777600262635ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/centos/mkosi.conf.d/epel.conf000066400000000000000000000001611512054777600300550ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Repositories=epel [Content] Packages= btrfs-progs mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/debian-kali-ubuntu/000077500000000000000000000000001512054777600261605ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf000066400000000000000000000012201512054777600301440ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|kali Distribution=|ubuntu [Content] Packages= kmod # Not pulled in as a dependency on Debian/Ubuntu dmsetup # Not pulled in as a dependency on Debian/Ubuntu libcryptsetup12 libseccomp2 # xfsprogs pulls in python on Debian (???) and XFS generally # isn't used on Debian so we don't install xfsprogs. btrfs-progs e2fsprogs dosfstools procps util-linux # Various libraries that are dlopen'ed by systemd libfido2-1 RemoveFiles= /usr/share/locale/* mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf.d/000077500000000000000000000000001512054777600304505ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf.d/dpkg.conf000066400000000000000000000004641512054777600322500ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Distribution=debian Release=!bullseye Release=!bookworm [TriggerMatch] Distribution=ubuntu Release=!jammy Release=!noble Release=!oracular [Content] RemovePackages= # Needs perl >= 5.40.0-8 and dash >= 0.5.12-7 to drop this dpkg mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf.d/libtss.conf000066400000000000000000000004231512054777600326160ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|kali Distribution=|ubuntu [Content] Packages= ^libtss2-esys-[0-9.]+-0(t64)?$ ^libtss2-mu[0-9.-]+(t64)?$ ^libtss2-rc0(t64)?$ ^libtss2-tcti-device0(t64)?$ systemd-cryptsetup.conf000066400000000000000000000004001512054777600351420ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Distribution=debian Release=!bullseye Release=!bookworm [TriggerMatch] Distribution=ubuntu Release=!jammy Release=!noble [TriggerMatch] Distribution=kali [Content] Packages=systemd-cryptsetup systemd-repart.conf000066400000000000000000000003361512054777600342250ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Distribution=debian Release=!bullseye Release=!bookworm [TriggerMatch] Distribution=ubuntu [TriggerMatch] Distribution=kali [Content] Packages=systemd-repart mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/fedora-stable.conf000066400000000000000000000003731512054777600260620ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=fedora Release=|40 Release=|41 Release=|42 [Content] RemovePackages= # Various packages pull in shadow-utils to create users, we can remove it afterwards shadow-utils mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/fedora.conf000066400000000000000000000002341512054777600246060ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=fedora [Content] Packages= btrfs-progs libfido2 util-linux-core mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/opensuse.conf000066400000000000000000000016551512054777600252170ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=opensuse [Content] Packages= patterns-base-minimal_base # Various packages added as dependencies. If they are not explicitly installed, the zypper inner # logic picks the busybox-package variant, which adds also busybox in the initrd. diffutils grep xz # Various libraries that are dlopen'ed by systemd libfido2-1 libseccomp2 libtss2-esys0 libtss2-mu0 libtss2-rc0 libtss2-tcti-device0 # File system checkers for supported root file systems btrfsprogs e2fsprogs xfsprogs dosfstools procps util-linux RemovePackages= # Various packages pull in shadow to create users, we can remove it afterwards shadow sysuser-shadow RemoveFiles= /usr/share/locale/* /usr/etc/services mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/postmarketos.conf000066400000000000000000000002271512054777600261030ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=postmarketos [Content] Packages= kbd libseccomp util-linux mkosi-26/mkosi/resources/mkosi-initrd/mkosi.conf.d/stub.conf000066400000000000000000000001651512054777600243260ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Format=uki Distribution=!arch [Content] Packages=systemd-boot mkosi-26/mkosi/resources/mkosi-initrd/mkosi.extra/000077500000000000000000000000001512054777600224545ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.extra/usr/000077500000000000000000000000001512054777600232655ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/000077500000000000000000000000001512054777600240335ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/000077500000000000000000000000001512054777600255235ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/system-preset/000077500000000000000000000000001512054777600303475ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/system-preset/99-mkosi.preset000066400000000000000000000002111512054777600331460ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # Make sure that services are disabled by default (primarily for Debian/Ubuntu). disable * mkosi-26/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/system/000077500000000000000000000000001512054777600270475ustar00rootroot00000000000000systemd-cryptsetup@.service.d/000077500000000000000000000000001512054777600346615ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/systemcredential.conf000066400000000000000000000004501512054777600376410ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/system/systemd-cryptsetup@.service.d[Service] ImportCredential=cryptsetup.* # Compat with older systemd versions that don't support ImportCredential=. LoadCredential=cryptsetup.passphrase LoadCredential=cryptsetup.fido2-pin LoadCredential=cryptsetup.tpm2-pin LoadCredential=cryptsetup.luks2-pin LoadCredential=cryptsetup.pkcs11-pin mkosi-26/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/udev/000077500000000000000000000000001512054777600247765ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/udev/rules.d/000077500000000000000000000000001512054777600263525ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/udev/rules.d/10-mkosi-initrd-dm.rules000066400000000000000000000004161512054777600326540ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0-only # Copied from https://github.com/dracutdevs/dracut/blob/059/modules.d/90dm/11-dm.rules SUBSYSTEM!="block", GOTO="dm_end" KERNEL!="dm-[0-9]*", GOTO="dm_end" ACTION!="add|change", GOTO="dm_end" OPTIONS+="db_persist" LABEL="dm_end" mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/000077500000000000000000000000001512054777600231545ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/lvm/000077500000000000000000000000001512054777600237525ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/lvm/mkosi.conf000066400000000000000000000001061512054777600257400ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Content] Packages=lvm2 mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/network/000077500000000000000000000000001512054777600246455ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/network/mkosi.conf.d/000077500000000000000000000000001512054777600271355ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/network/mkosi.conf.d/systemd-container.conf000066400000000000000000000001571512054777600334570ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=!arch [Content] Packages=systemd-container mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/network/mkosi.conf.d/systemd-networkd.conf000066400000000000000000000002071512054777600333260ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|fedora Distribution=|opensuse [Content] Packages=systemd-networkd mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/network/mkosi.conf.d/systemd-resolved.conf000066400000000000000000000001561512054777600333170ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=!arch [Content] Packages=systemd-resolved mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/network/mkosi.postinst000077500000000000000000000003051512054777600275750ustar00rootroot00000000000000#!/bin/bash # SPDX-License-Identifier: LGPL-2.1-or-later mkdir -p "$BUILDROOT/usr/lib/systemd/network/" ln -sf 89-ethernet.network.example "$BUILDROOT/usr/lib/systemd/network/89-ethernet.network" mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/nfs/000077500000000000000000000000001512054777600237425ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/nfs/mkosi.conf000066400000000000000000000001731512054777600257340ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Content] KernelModules= fs/nfs/ net/sunrpc/ nfs_acl mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/nfs/mkosi.conf.d/000077500000000000000000000000001512054777600262325ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/nfs/mkosi.conf.d/arch.conf000066400000000000000000000001461512054777600300170ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=arch [Content] Packages=nfs-utils mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/nfs/mkosi.conf.d/debian-ubuntu.conf000066400000000000000000000001771512054777600316500ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|ubuntu [Content] Packages=nfs-common mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/nfs/mkosi.conf.d/fedora.conf000066400000000000000000000002051512054777600303360ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=fedora [Content] Packages= nfs-utils libnfsidmap mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/nfs/mkosi.conf.d/opensuse.conf000066400000000000000000000002111512054777600307340ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=opensuse [Content] Packages= nfs-client libnfsidmap1 mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/pkcs11/000077500000000000000000000000001512054777600242565ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/pkcs11/mkosi.conf000066400000000000000000000001111512054777600262400ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Content] Packages=p11-kit mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/plymouth/000077500000000000000000000000001512054777600250355ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/plymouth/mkosi.conf000066400000000000000000000001121512054777600270200ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Content] Packages=plymouth mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/plymouth/mkosi.conf.d/000077500000000000000000000000001512054777600273255ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/plymouth/mkosi.conf.d/debian-ubuntu.conf000066400000000000000000000002311512054777600327320ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|ubuntu [Content] Packages= kbd plymouth-themes mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/plymouth/mkosi.conf.d/debian.conf000066400000000000000000000003051512054777600314140ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=debian [Content] Packages= # plymouth default theme in debian is "emerald", shipped by desktop-base desktop-base mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/plymouth/mkosi.conf.d/fedora.conf000066400000000000000000000002351512054777600314340ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=fedora [Content] Packages= abattis-cantarell-fonts plymouth-system-theme mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/plymouth/mkosi.conf.d/opensuse.conf000066400000000000000000000003131512054777600320320ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=opensuse [Content] Packages= cantarell-fonts distribution-logos-openSUSE-Tumbleweed plymouth-branding-openSUSE mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/plymouth/mkosi.conf.d/ubuntu.conf000066400000000000000000000002021512054777600315100ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=ubuntu [Content] Packages= plymouth-theme-ubuntu-text mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/raid/000077500000000000000000000000001512054777600240735ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/raid/mkosi.conf000066400000000000000000000001071512054777600260620ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Content] Packages=mdadm mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/raid/mkosi.extra/000077500000000000000000000000001512054777600263375ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/raid/mkosi.extra/usr/000077500000000000000000000000001512054777600271505ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/raid/mkosi.extra/usr/lib/000077500000000000000000000000001512054777600277165ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/raid/mkosi.extra/usr/lib/udev/000077500000000000000000000000001512054777600306615ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/raid/mkosi.extra/usr/lib/udev/rules.d/000077500000000000000000000000001512054777600322355ustar00rootroot0000000000000070-mkosi-initrd-md.rules000066400000000000000000000017531512054777600364730ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-initrd/mkosi.profiles/raid/mkosi.extra/usr/lib/udev/rules.d# SPDX-License-Identifier: GPL-2.0-only # Copied from https://github.com/dracutdevs/dracut/blob/059/modules.d/90mdraid/59-persistent-storage-md.rules SUBSYSTEM!="block", GOTO="md_end" ACTION!="add|change", GOTO="md_end" # Also don't process disks that are slated to be a multipath device ENV{DM_MULTIPATH_DEVICE_PATH}=="1", GOTO="md_end" KERNEL!="md[0-9]*|md_d[0-9]*|md/*", KERNEL!="md*", GOTO="md_end" # partitions have no md/{array_state,metadata_version} ENV{DEVTYPE}=="partition", GOTO="md_ignore_state" # container devices have a metadata version of e.g. 'external:ddf' and # never leave state 'inactive' ATTR{md/metadata_version}=="external:[A-Za-z]*", ATTR{md/array_state}=="inactive", GOTO="md_ignore_state" TEST!="md/array_state", GOTO="md_end" ATTR{md/array_state}=="|clear|inactive", GOTO="md_end" LABEL="md_ignore_state" IMPORT{program}="/sbin/mdadm --detail --export $devnode" IMPORT{builtin}="blkid" OPTIONS+="link_priority=100" OPTIONS+="watch" OPTIONS+="db_persist" LABEL="md_end" mkosi-26/mkosi/resources/mkosi-obs/000077500000000000000000000000001512054777600175025ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-obs/mkosi.build000077500000000000000000000266411512054777600216610ustar00rootroot00000000000000#!/bin/bash # SPDX-License-Identifier: LGPL-2.1-or-later # # Second stage of build: # - signed hashes are in cpio archive in SOURCES/ together with artifacts from previous build # - if there are PCR policies to attach, do so and then prepare the hashes of the UKIs themselves # - if not, attach the signatures to the UKI(s) with pesign # - remove shasums of previous artifacts, given we are re-creating them after this step # - place artifacts from previous builds and signed UKI in output directory set -e if [ ! -f /usr/src/packages/SOURCES/hashes.cpio.rsasign.sig ]; then exit 0 fi echo "Signed files to be attached:" cpio -t "${pcrs}.new" mv "${pcrs}.new" "$pcrs" cp "$pcrs" "${pcrs}.sig" rm -f "$SIG" done < <(find hashes/pcrs -type f -name '*.sig') rm -rf hashes/pcrs mkdir -p "$nss_db" certutil -N -d sql:"$nss_db" --empty-password # Fourth step: now that the JSON blob is rebuilt, merge it in the UKI while read -r PCRS; do uki="${PCRS%.pcrs.sig}.efi" ukify --json=short --pcrsig "@$PCRS" --join-pcrsig "$uki" --output "$uki.attached" build mv "$uki.attached" "$uki" mkdir -p hashes/ukis pesign --force -n sql:"$nss_db" -i "$uki" -E "hashes/ukis/$(basename "$uki")" done < <(find "$OUTPUTDIR" -type f -name '*.pcrs.sig') rm -f "$OUTPUTDIR"/*.pcrs* # Fifth step: finalize any DDI by attaching the verity roothash signatures while read -r SIG; do test -f "/usr/src/packages/SOURCES/$(basename "${SIG%roothash.sig}repart.tar")" || continue PATH=/usr/lib/systemd/:$PATH systemd-keyutil --help | grep -q 'pkcs7' || break PATH=/usr/lib/systemd/:$PATH systemd-keyutil --certificate /usr/src/packages/SOURCES/_projectcert.crt --output "${SIG%sig}p7s" --signature "$SIG" pkcs7 mkdir -p mkosi.repart tar xf "/usr/src/packages/SOURCES/$(basename "${SIG%roothash.sig}repart.tar")" -C mkosi.repart rm -f "$OUTPUTDIR/$(basename "${SIG%roothash.sig}repart.tar")" if [ -f "$OUTPUTDIR/$(basename "${SIG%roothash.sig}raw.zst")" ] || [ -f "$OUTPUTDIR/$(basename "${SIG%roothash.sig}img.zst")" ]; then recompress=1 unzstd --force "$OUTPUTDIR/$(basename "${SIG%roothash.sig}")"raw*.zst rm -f "$OUTPUTDIR/$(basename "${SIG%roothash.sig}")"raw*.zst else recompress=0 fi ARGS=( --join-signature "$(cat "${SIG%.sig}"):${SIG%sig}p7s" \ --certificate /usr/src/packages/SOURCES/_projectcert.crt \ --definitions mkosi.repart \ --dry-run=no \ "$OUTPUTDIR/$(basename "${SIG%roothash.sig}")"raw* ) # sd-repart might be configured to create labels based on os-release fields. Due to the # mkosi sandbox we cannot use --image, so we need to provide the os-release file manually # via --root if [ -f "/usr/src/packages/SOURCES/$(basename "${SIG%roothash.sig}osrelease")" ]; then mkdir -p mkosi.repart/usr/lib cp "/usr/src/packages/SOURCES/$(basename "${SIG%roothash.sig}osrelease")" mkosi.repart/usr/lib/os-release ARGS+=(--root="$PWD"/mkosi.repart) fi if jq -r '.SplitArtifacts[]' "$MKOSI_CONFIG" | grep -q partitions; then # Need to recreate the split artifact and compress it if needed ARGS+=(--split=yes) split=1 fi systemd-repart "${ARGS[@]}" rm -rf mkosi.repart if ((recompress)); then zstd --force "$OUTPUTDIR/$(basename "${SIG%roothash.sig}")"raw* rm -f "$OUTPUTDIR/$(basename "${SIG%roothash.sig}raw")" "$OUTPUTDIR/$(basename "${SIG%roothash.sig}raw.img")" if ((split)); then zstd --force "$OUTPUTDIR/$(basename "${SIG%roothash.sig}")"*-verity-sig.*.raw # sd-repart will split out all partitions again rm -f "$OUTPUTDIR/$(basename "${SIG%roothash.sig}")"usr-*.raw \ "$OUTPUTDIR/$(basename "${SIG%roothash.sig}")"root-*.raw \ "$OUTPUTDIR/$(basename "${SIG%roothash.sig}")"esp.raw fi fi # Do not publish the roothash here, as importctl and friends will mistake it as the roothash of the .raw image rm -f "$OUTPUTDIR/$(basename "${SIG%.sig}")" done < <(find hashes/roothashes -type f -name '*.sig') rm -rf hashes/roothashes # Sixth step: prepare EFI authvars for self-enrollment while read -r SIG; do varname="${SIG%.auth.sig}" auth="${varname}.auth" esl="${varname}.esl" p7s="${varname}.p7s" PATH=/usr/lib/systemd/:$PATH systemd-keyutil --certificate /usr/src/packages/SOURCES/_projectcert.crt --output "$p7s" --signature "$SIG" pkcs7 rm -f "$auth" sign-efi-sig-list -t "$(date -d "@${SOURCE_DATE_EPOCH:-0}" "+%Y-%m-%d %H:%M:%S")" -i "$p7s" "$varname" "$esl" "$auth" rm -f "$esl" "$SIG" "$p7s" done < <(find hashes/authvars -type f -name '*.auth.sig') declare -a AUTHVARS mapfile -t AUTHVARS < <(find hashes/authvars -type f -name "*.auth") if (( ${#AUTHVARS[@]} > 0 )); then for ddi in "$OUTPUTDIR"/*.raw*; do test -f "$ddi" || continue # Skip over split artifacts, if any [[ "$ddi" =~ \.usr- ]] && continue [[ "$ddi" =~ \.root- ]] && continue [[ "$ddi" =~ -verity ]] && continue if [[ $ddi == *.zst ]]; then unzstd --force "${ddi}" recompress=1 else recompress=0 fi offset="$(systemd-repart --json=short "${ddi%.zst}" | jq -r '.[] | select(.type == "esp") | .offset')" if [ -z "$offset" ] || [ "$offset" = "null" ]; then if [[ $ddi == *.zst ]]; then rm -f "${ddi%.zst}" fi continue fi mmd -D s -i "${ddi%.zst}@@${offset}" ::loader ::loader/keys ::loader/keys/auto || true for authvar in "${AUTHVARS[@]}"; do test -f "$authvar" || continue mcopy -o -i "${ddi%.zst}@@${offset}" "$authvar" "::loader/keys/auto/$(basename "$authvar")" done if ((recompress)); then zstd --force "${ddi%.zst}" rm -f "${ddi%.zst}" fi done fi rm -rf hashes/authvars # Final step: if there are any hashes staged, prepare for the next stage rmdir --ignore-fail-on-non-empty hashes if [ -d hashes ]; then pushd hashes find . -type f | cpio -H newc -o >"$OUTPUTDIR/hashes.cpio.rsasign" popd cp /usr/src/packages/SOURCES/mkosi.conf "$OUTPUTDIR" echo "Staging the following files for signing:" cpio -t <"$OUTPUTDIR/hashes.cpio.rsasign" fi rm -rf hashes "$nss_db" mkosi-26/mkosi/resources/mkosi-obs/mkosi.conf000066400000000000000000000003161512054777600214730ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Output] SplitArtifacts=pcrs,roothash,os-release,repart-definitions CompressOutput=zstd [Validation] SecureBoot=no SignExpectedPcr=no Verity=defer Checksum=yes mkosi-26/mkosi/resources/mkosi-obs/mkosi.conf.d/000077500000000000000000000000001512054777600217725ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-obs/mkosi.conf.d/main.conf000066400000000000000000000010061512054777600235620ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Image=main [Build] History=no SandboxTrees=/usr/src/packages/SOURCES:/usr/src/packages/SOURCES BuildSources=/usr/src/packages/OTHER:/usr/src/packages/OTHER ToolsTree= CacheDirectory= Incremental=no WithNetwork=never [Distribution] RepositoryKeyCheck=no LocalMirror=file:///.build.binaries/ [Output] OutputDirectory= [Validation] SignExpectedPcrCertificate=/usr/src/packages/SOURCES/_projectcert.crt VerityCertificate=/usr/src/packages/SOURCES/_projectcert.crt mkosi-26/mkosi/resources/mkosi-obs/mkosi.postoutput000077500000000000000000000133721512054777600230250ustar00rootroot00000000000000#!/bin/bash # SPDX-License-Identifier: LGPL-2.1-or-later # # End of first stage of build: # - built UKI is in $OUTPUTDIR # - get PCR policy digests if any, or PE hash(es) with pesign # - pack them up in a cpio as OBS expects and store them in OTHER/ # - create minimal recipe for second stage that will continue from here set -e declare -a UKIS mapfile -t UKIS < <(find "$OUTPUTDIR" -type f -name "*.efi" -printf '%P\n') declare -a KERNELS mapfile -t KERNELS < <(find "$OUTPUTDIR" -type f -name "vmlinu*" -printf '%P\n') declare -a ROOTHASHES mapfile -t ROOTHASHES < <(find "$OUTPUTDIR" -type f -name "*.roothash" -printf '%P\n') declare -a DDIS mapfile -t DDIS < <(find "$OUTPUTDIR" -type f -name "*.raw*") if ((${#UKIS[@]} == 0)) && ((${#KERNELS[@]} == 0)) && ((${#ROOTHASHES[@]} == 0)) && ((${#DDIS[@]} == 0)); then echo "No unsigned files found, exiting" exit 0 fi nss_db="$PWD/nss-db" # certutil will fail if it's called twice rm -rf "$nss_db" mkdir -p "$nss_db" hashes certutil -N -d sql:"$nss_db" --empty-password # When a single build has multiple images, postoutput is called for each image, # so make sure the hashes.cpio from the previous stages gets its content preserved if [ -f /usr/src/packages/OTHER/hashes.cpio.rsasign ]; then pushd hashes cpio -idm "hashes/pcrs/${f}/${pol}" done < <(jq -r 'to_entries[] | .value[].pol' <"${OUTPUTDIR}/${f%.efi}.pcrs") else mkdir -p "$(dirname "hashes/ukis/$f")" pesign --force -n sql:"$nss_db" -i "${OUTPUTDIR}/${f}" -E "hashes/ukis/$f" fi done for f in "${KERNELS[@]}"; do test -f "${OUTPUTDIR}/${f}" || continue mkdir -p "$(dirname "hashes/kernels/$f")" pesign --force -n sql:"$nss_db" -i "${OUTPUTDIR}/${f}" -E "hashes/kernels/$f" done repart_dir="$(jq -r '.RepartDirectories[-1]' "$MKOSI_CONFIG")" if [ -z "$repart_dir" ] || [ "$repart_dir" = "null" ]; then output="$(jq -r '.Output' "$MKOSI_CONFIG")" if [ -n "$output" ] && [ "$output" != "null" ] && [ -d "$OUTPUTDIR/$output.repart.d" ]; then repart_dir="$OUTPUTDIR/$output.repart.d" fi fi for f in "${ROOTHASHES[@]}"; do test -f "${OUTPUTDIR}/${f}" || continue mkdir -p hashes/roothashes cp "${OUTPUTDIR}/$f" hashes/roothashes/ # If we have a DDI to operate on, we need the repart definitions, so save the configs across to the next stage if [ -n "$repart_dir" ] && [ "$repart_dir" != "null" ]; then pushd "$repart_dir" 2>/dev/null tar cf "$OUTPUTDIR/${f%roothash}repart.tar" ./* popd 2>/dev/null fi done # Handle bootloaders separately from UKIs found_esp=0 for ddi in "${DDIS[@]}"; do test -f "$ddi" || continue # Skip over split artifacts, if any [[ "$ddi" =~ \.usr- ]] && continue [[ "$ddi" =~ \.root- ]] && continue [[ "$ddi" =~ -verity ]] && continue if [[ $ddi == *.zst ]]; then unzstd --force "${ddi}" fi offset="$(systemd-repart --json=short "${ddi%.zst}" | jq -r '.[] | select(.type == "esp") | .offset')" if [ -z "$offset" ] || [ "$offset" = "null" ]; then if [[ $ddi == *.zst ]]; then rm -f "${ddi%.zst}" fi continue fi rm -rf EFI mcopy -s -i "${ddi%.zst}@@${offset}" ::EFI EFI || true found_esp=1 # UKIs are handled separately rm -rf EFI/Linux while read -r BOOTLOADER; do mkdir -p "hashes/bootloaders/$(basename "${ddi%.zst}")/$(dirname "$BOOTLOADER")" pesign --force -n sql:"$nss_db" -i "$BOOTLOADER" -E "hashes/bootloaders/$(basename "${ddi%.zst}")/$BOOTLOADER" done < <(find EFI -type f -iname '*.efi') if [[ $ddi == *.zst ]]; then rm -f "${ddi%.zst}" fi rm -rf EFI done # If there is an ESP prepare the authvars for self-enrollment if [ "$found_esp" -eq 1 ]; then mkdir -p hashes/authvars pushd hashes/authvars 2>/dev/null # Same as the GUID used by bootctl guid=a5c059a1-94e4-4aa7-87b5-ab155c2bf072 cert-to-efi-sig-list -g "$guid" /usr/src/packages/SOURCES/_projectcert.crt db.esl cp db.esl KEK.esl cp db.esl PK.esl # If there are any additional certs, concatenate them for cert in /usr/src/packages/SOURCES/*/mkosi.uefi.db/*.crt; do test -f "$cert" || continue cert-to-efi-sig-list -g "$guid" "$cert" tmp.esl cat tmp.esl >>db.esl rm -f tmp.esl done for cert in /usr/src/packages/SOURCES/*/mkosi.uefi.KEK/*.crt; do test -f "$cert" || continue cert-to-efi-sig-list -g "$guid" "$cert" tmp.esl cat tmp.esl >>KEK.esl rm -f tmp.esl done for i in *.esl; do sign-efi-sig-list -o -g "$guid" -t "$(date -d "@${SOURCE_DATE_EPOCH:-0}" "+%Y-%m-%d %H:%M:%S")" "${i%.esl}" "$i" "${i%.esl}.auth" done popd 2>/dev/null fi # Pack everything into a CPIO archive and place it where OBS expects it pushd hashes find . -type f | cpio -H newc -o >"$OUTPUTDIR/hashes.cpio.rsasign" popd rm -rf hashes "$nss_db" echo "Staging the following files for signing:" cpio -t <"$OUTPUTDIR/hashes.cpio.rsasign" # The second stage will not do a full rebuild, but only attach signatures to the existing UKI # Remember whether we need to split out the verity signature partition, as it's generated later if jq -r '.SplitArtifacts[]' "$MKOSI_CONFIG" | grep -q partitions; then split="SplitArtifacts=partitions" fi cat >"$OUTPUTDIR/mkosi.conf" <"$BUILDROOT/usr/share/p11-kit/modules/opensc.module" mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/000077500000000000000000000000001512054777600230235ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/debug/000077500000000000000000000000001512054777600241115ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/debug/mkosi.conf000066400000000000000000000001161512054777600261000ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Content] Packages= gdb mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/debug/mkosi.conf.d/000077500000000000000000000000001512054777600264015ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/debug/mkosi.conf.d/arch.conf000066400000000000000000000001521512054777600301630ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=arch [Content] Packages= perf mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/debug/mkosi.conf.d/centos-fedora.conf000066400000000000000000000002741512054777600320040ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|centos Distribution=|alma Distribution=|rocky Distribution=|rhel Distribution=|fedora [Content] Packages= perf mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/debug/mkosi.conf.d/debian-kali.conf000066400000000000000000000002061512054777600314060ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|kali [Content] Packages= linux-perf mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/debug/mkosi.conf.d/opensuse.conf000066400000000000000000000001561512054777600311130ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=opensuse [Content] Packages= perf mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/devel/000077500000000000000000000000001512054777600241225ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/devel/mkosi.conf000066400000000000000000000004521512054777600261140ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Content] Packages= autoconf automake binutils bison bzip2 cmake flex gcc gettext groff libtool m4 make meson patch pkgconf mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/gui/000077500000000000000000000000001512054777600236075ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/gui/mkosi.conf.d/000077500000000000000000000000001512054777600260775ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/gui/mkosi.conf.d/arch.conf000066400000000000000000000005361512054777600276670ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=arch [Content] Packages= pipewire pipewire-audio qemu-audio-pipewire qemu-hw-display-virtio-gpu qemu-hw-display-virtio-gpu-gl qemu-hw-display-virtio-vga qemu-hw-display-virtio-vga-gl qemu-ui-opengl qemu-ui-sdl mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/gui/mkosi.conf.d/debian-ubuntu-kali.conf000066400000000000000000000002651512054777600324310ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|ubuntu Distribution=|kali [Content] Packages= pipewire-bin qemu-system-gui mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/gui/mkosi.conf.d/fedora.conf000066400000000000000000000005311512054777600302050ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=fedora [Content] Packages= pipewire qemu-audio-pipewire qemu-device-display-virtio-gpu qemu-device-display-virtio-gpu-gl qemu-device-display-virtio-vga qemu-device-display-virtio-vga-gl qemu-ui-opengl qemu-ui-sdl mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/misc/000077500000000000000000000000001512054777600237565ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/misc/mkosi.conf000066400000000000000000000004011512054777600257420ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Content] Packages= acl attr bash diffutils file findutils gawk grep jq less nano sed strace which mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/misc/mkosi.conf.d/000077500000000000000000000000001512054777600262465ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/misc/mkosi.conf.d/arch.conf000066400000000000000000000002241512054777600300300ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=arch [Content] Packages= base man man-pages git mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/misc/mkosi.conf.d/azure-centos-fedora.conf000066400000000000000000000003621512054777600327730ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|centos Distribution=|alma Distribution=|rocky Distribution=|rhel Distribution=|fedora Distribution=|azure [Content] Packages= git-core man man-pages mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/misc/mkosi.conf.d/debian-kali-ubuntu.conf000066400000000000000000000004001512054777600325670ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|kali Distribution=|ubuntu [Content] Packages= git-core libnss-resolve libnss-myhostname man manpages systemd-coredump mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/misc/mkosi.conf.d/opensuse.conf000066400000000000000000000003311512054777600307530ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=opensuse [Content] Packages= git-core man man-pages patterns-base-minimal_base perf systemd-coredump mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/misc/mkosi.conf.d/postmarketos.conf000066400000000000000000000002221512054777600316440ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=postmarketos [Content] Packages= git man-db man-pages mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/package-manager/000077500000000000000000000000001512054777600260265ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/package-manager/mkosi.conf.d/000077500000000000000000000000001512054777600303165ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/package-manager/mkosi.conf.d/arch.conf000066400000000000000000000003561512054777600321060ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=arch [Content] Packages= apt debian-archive-keyring createrepo_c distribution-gpg-keys dnf5 ubuntu-keyring zypper centos-epel-packages-9.conf000066400000000000000000000004641512054777600352520ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/package-manager/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|centos Distribution=|alma Distribution=|rocky Distribution=|rhel Repositories=epel Release=9 [Content] Packages= apt archlinux-keyring debian-keyring distribution-gpg-keys pacman ubu-keyring mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/package-manager/mkosi.conf.d/debian-kali-ubuntu/000077500000000000000000000000001512054777600337765ustar00rootroot00000000000000mkosi.conf000066400000000000000000000005651512054777600357160ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/package-manager/mkosi.conf.d/debian-kali-ubuntu# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|kali Distribution=|ubuntu [Content] Packages= ?exact-name(distribution-gpg-keys) ?exact-name(kali-archive-keyring) archlinux-keyring createrepo-c debian-archive-keyring dnf makepkg pacman-package-manager zypper mkosi.conf.d/000077500000000000000000000000001512054777600362075ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/package-manager/mkosi.conf.d/debian-kali-ubuntuubuntu-keyring.conf000066400000000000000000000002131512054777600420420ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/package-manager/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|!debian Release=|!bookworm [Content] Packages= ubuntu-keyring mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/package-manager/mkosi.conf.d/fedora.conf000066400000000000000000000003541512054777600324270ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=fedora [Content] Packages= apk apt apt-utils archlinux-keyring debian-keyring pacman ubu-keyring zypper mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/package-manager/mkosi.conf.d/opensuse.conf000066400000000000000000000002031512054777600330210ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=opensuse [Content] Packages= dnf5 dnf5-plugins mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/package-manager/mkosi.conf.d/postmarketos.conf000066400000000000000000000002761512054777600337250ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=postmarketos [Content] Packages= apt pacman debian-archive-keyring ubuntu-archive-keyring mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/000077500000000000000000000000001512054777600245065ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf000066400000000000000000000001361512054777600264770ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Content] Packages= socat swtpm mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/000077500000000000000000000000001512054777600267765ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/arch.conf000066400000000000000000000003101512054777600305540ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=arch [Content] Packages= edk2-ovmf openssh qemu-base shadow virt-firmware virtiofsd mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/azure-centos-fedora/000077500000000000000000000000001512054777600326535ustar00rootroot00000000000000mkosi.conf000066400000000000000000000005731512054777600345720ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/azure-centos-fedora# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|centos Distribution=|alma Distribution=|rocky Distribution=|rhel Distribution=|fedora Distribution=|azure [Content] Packages= openssh-clients qemu-kvm-core shadow-utils swtpm-tools systemd-container systemd-journal-remote virt-firmware virtiofsd mkosi.conf.d/000077500000000000000000000000001512054777600350645ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/azure-centos-fedoraedk2-ovmf.conf000066400000000000000000000002061512054777600375230ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/azure-centos-fedora/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=|x86-64 Architecture=|arm64 [Content] Packages= edk2-ovmf mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/azure.conf000066400000000000000000000002361512054777600307740ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=azure [Content] Packages= qemu-system-aarch64-core qemu-system-s390x-core mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/debian-kali-ubuntu/000077500000000000000000000000001512054777600324565ustar00rootroot00000000000000mkosi.conf000066400000000000000000000004651512054777600343750ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/debian-kali-ubuntu# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|kali Distribution=|ubuntu [Content] Packages= openssh-client ovmf qemu-efi-aarch64 qemu-system swtpm-tools systemd-container systemd-journal-remote uidmap mkosi.conf.d/000077500000000000000000000000001512054777600346675ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/debian-kali-ubuntupython3-virt-firmware.conf000066400000000000000000000003751512054777600417430ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Distribution=debian Release=!bookworm Release=!bullseye [TriggerMatch] Distribution=ubuntu Release=!jammy [TriggerMatch] Distribution=kali [Content] Packages= python3-virt-firmware virtiofsd.conf000066400000000000000000000004741512054777600375540ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Distribution=debian Release=!bullseye [TriggerMatch] Distribution=ubuntu Release=!jammy [TriggerMatch] Distribution=kali [Match] Architecture=|x86-64 Architecture=|arm64 Architecture=|ppc64-le Architecture=|s390x [Content] Packages= virtiofsd mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/fedora.conf000066400000000000000000000002741512054777600311100ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=fedora [Content] Packages= qemu-system-aarch64-core qemu-system-ppc-core qemu-system-s390x-core mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/opensuse/000077500000000000000000000000001512054777600306375ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/opensuse/mkosi.conf000066400000000000000000000003771512054777600326370ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=opensuse [Content] Packages= openssh-clients qemu-headless shadow systemd-container systemd-journal-remote virt-firmware virtiofsd mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/opensuse/mkosi.conf.d/000077500000000000000000000000001512054777600331275ustar00rootroot00000000000000efi.conf000066400000000000000000000002571512054777600344660ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/opensuse/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=uefi [Content] Packages= ovmf qemu-ipxe qemu-ovmf-x86_64 qemu-uefi-aarch64 mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/postmarketos/000077500000000000000000000000001512054777600315315ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/postmarketos/mkosi.conf000066400000000000000000000012311512054777600335170ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=postmarketos [Content] Packages= mesa-dri-gallium mesa-egl mesa-gl openssh-client openssh-keygen qemu-audio-alsa qemu-audio-pa qemu-audio-pipewire qemu-audio-sdl qemu-hw-display-virtio-gpu qemu-hw-display-virtio-gpu-gl qemu-hw-display-virtio-gpu-pci qemu-hw-display-virtio-gpu-pci-gl qemu-hw-display-virtio-vga qemu-hw-display-virtio-vga-gl qemu-hw-usb-host qemu-hw-usb-redirect qemu-ui-opengl qemu-ui-sdl qemu-modules virtiofsd mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/postmarketos/mkosi.conf.d/000077500000000000000000000000001512054777600340215ustar00rootroot00000000000000arm64.conf000066400000000000000000000002401512054777600355360ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/postmarketos/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=arm64 [Content] Packages= aavmf qemu-system-aarch64 qemu-system-arm x86-64.conf000066400000000000000000000002401512054777600354610ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.profiles/runtime/mkosi.conf.d/postmarketos/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 [Content] Packages= ovmf qemu-system-i386 qemu-system-x86_64 mkosi-26/mkosi/resources/mkosi-tools/mkosi.skeleton/000077500000000000000000000000001512054777600230245ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.skeleton/etc/000077500000000000000000000000001512054777600235775ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-tools/mkosi.skeleton/etc/resolv.conf000066400000000000000000000000001512054777600257460ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-vm/000077500000000000000000000000001512054777600173415ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-vm/mkosi.conf000066400000000000000000000004341512054777600213330ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Content] Bootable=yes Packages= bash dbus-broker diffutils gawk grep gzip kmod less nano sed strace systemd udev util-linux mkosi-26/mkosi/resources/mkosi-vm/mkosi.conf.d/000077500000000000000000000000001512054777600216315ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-vm/mkosi.conf.d/arch.conf000066400000000000000000000003051512054777600234130ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=arch [Content] Packages= dbus-broker-units iproute iputils linux polkit tpm2-tss mkosi-26/mkosi/resources/mkosi-vm/mkosi.conf.d/azure-centos-fedora/000077500000000000000000000000001512054777600255065ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-vm/mkosi.conf.d/azure-centos-fedora/mkosi.conf000066400000000000000000000004531512054777600275010ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|alma Distribution=|rocky Distribution=|centos Distribution=|fedora Distribution=|azure [Content] Packages= iproute iputils kernel polkit systemd-resolved tpm2-tss util-linux mkosi-26/mkosi/resources/mkosi-vm/mkosi.conf.d/azure-centos-fedora/mkosi.conf.d/000077500000000000000000000000001512054777600277765ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-vm/mkosi.conf.d/azure-centos-fedora/mkosi.conf.d/networkd.conf000066400000000000000000000002151512054777600325000ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|fedora Distribution=|azure [Content] Packages= systemd-networkd mkosi-26/mkosi/resources/mkosi-vm/mkosi.conf.d/azure-centos-fedora/mkosi.conf.d/uefi.conf000066400000000000000000000001621512054777600315740ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=uefi [Content] Packages= systemd-boot mkosi-26/mkosi/resources/mkosi-vm/mkosi.conf.d/bootloader.conf000066400000000000000000000003331512054777600246310ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=!uefi [Content] # Non-UEFI arches are not yet setup for bootloader support, # but can still do direct boot in qemu BiosBootloader=none Bootloader=none mkosi-26/mkosi/resources/mkosi-vm/mkosi.conf.d/debian-kali-ubuntu/000077500000000000000000000000001512054777600253115ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-vm/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf000066400000000000000000000004341512054777600273030ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|kali Distribution=|ubuntu [Content] Packages= iproute2 iputils-ping login polkitd systemd-coredump systemd-sysv tpm2-tools tzdata mkosi-26/mkosi/resources/mkosi-vm/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf.d/000077500000000000000000000000001512054777600276015ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-vm/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf.d/arm64.conf000066400000000000000000000004171512054777600314030ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=arm64 Distribution=debian [Content] # The non-cloud arm64 kernel does not enable various options # that are needed for our qemu builds, like generic TPM support Packages= linux-image-cloud-arm64 mkosi-26/mkosi/resources/mkosi-vm/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf.d/kernel-generic.conf000066400000000000000000000002441512054777600333420ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=|!arm64 Distribution=|ubuntu Distribution=|kali [Content] Packages= linux-image-generic systemd-boot-signed.conf000066400000000000000000000003341512054777600342710ustar00rootroot00000000000000mkosi-26/mkosi/resources/mkosi-vm/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Distribution=debian Release=!bullseye Release=!bookworm [Match] Architecture=|x86-64 Architecture=|arm64 [Content] Packages= systemd-boot-efi-signed mkosi-26/mkosi/resources/mkosi-vm/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf.d/systemd-boot.conf000066400000000000000000000004261512054777600331030ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Distribution=debian Release=!bullseye [TriggerMatch] Distribution=ubuntu Release=!jammy [TriggerMatch] Distribution=kali [Match] Architecture=uefi [Content] Packages= systemd-boot systemd-boot-efi mkosi-26/mkosi/resources/mkosi-vm/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf.d/systemd-resolved.conf000066400000000000000000000003351512054777600337620ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Distribution=debian Release=!bullseye [TriggerMatch] Distribution=ubuntu Release=!jammy [TriggerMatch] Distribution=kali [Content] Packages=systemd-resolved mkosi-26/mkosi/resources/mkosi-vm/mkosi.conf.d/opensuse.conf000066400000000000000000000010061512054777600243360ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=opensuse [Content] Packages= iproute iputils kernel-default libtss2-tcti-device0 patterns-base-minimal_base polkit strace systemd-boot systemd-network tpm2.0-tools # Various packages added as dependencies. If they are not explicitly installed, the zypper inner # logic picks the busybox-package variant, which adds also busybox in the image. xz mkosi-26/mkosi/resources/mkosi-vm/mkosi.conf.d/postmarketos.conf000066400000000000000000000006511512054777600252350ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=postmarketos [Content] Packages= agetty # required for *getty@ services iproute2 iputils kmod # mkosi is incompatible with busybox depmod linux-virt musl-utils postmarketos-base postmarketos-base-systemd systemd-boot systemd-networkd systemd-udevd tpm2-tools mkosi-26/mkosi/resources/mkosi-vm/mkosi.conf.d/ubuntu.conf000066400000000000000000000001601512054777600240170ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=ubuntu [Distribution] Repositories=universe mkosi-26/mkosi/resources/pandoc/000077500000000000000000000000001512054777600170435ustar00rootroot00000000000000mkosi-26/mkosi/resources/pandoc/md2man.lua000066400000000000000000000004201512054777600207200ustar00rootroot00000000000000-- SPDX-License-Identifier: LGPL-2.1-or-later -- Modify the way pandoc transforms markdown to man pages -- Convert `code` syntax to **code** in man page output function Code(elem) -- Returns the content as a Strong (bold) element return pandoc.Strong(elem.text) end mkosi-26/mkosi/resources/repart/000077500000000000000000000000001512054777600170745ustar00rootroot00000000000000mkosi-26/mkosi/resources/repart/definitions/000077500000000000000000000000001512054777600214075ustar00rootroot00000000000000mkosi-26/mkosi/resources/repart/definitions/confext-unsigned.repart.d/000077500000000000000000000000001512054777600264055ustar00rootroot00000000000000mkosi-26/mkosi/resources/repart/definitions/confext-unsigned.repart.d/10-root.conf000066400000000000000000000001561512054777600304570ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root Format=erofs CopyFiles=/etc/ Minimize=best mkosi-26/mkosi/resources/repart/definitions/confext.repart.d/000077500000000000000000000000001512054777600245735ustar00rootroot00000000000000mkosi-26/mkosi/resources/repart/definitions/confext.repart.d/10-root.conf000066400000000000000000000002161512054777600266420ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root Format=erofs CopyFiles=/etc/ Verity=data VerityMatchKey=root Minimize=best mkosi-26/mkosi/resources/repart/definitions/confext.repart.d/20-root-verity.conf000066400000000000000000000001701512054777600301620ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root-verity Verity=hash VerityMatchKey=root Minimize=best mkosi-26/mkosi/resources/repart/definitions/confext.repart.d/30-root-verity-sig.conf000066400000000000000000000001631512054777600307450ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root-verity-sig Verity=signature VerityMatchKey=root mkosi-26/mkosi/resources/repart/definitions/portable-unsigned.repart.d/000077500000000000000000000000001512054777600265475ustar00rootroot00000000000000mkosi-26/mkosi/resources/repart/definitions/portable-unsigned.repart.d/10-root.conf000066400000000000000000000001521512054777600306150ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root Format=erofs CopyFiles=/ Minimize=best mkosi-26/mkosi/resources/repart/definitions/portable.repart.d/000077500000000000000000000000001512054777600247355ustar00rootroot00000000000000mkosi-26/mkosi/resources/repart/definitions/portable.repart.d/10-root.conf000066400000000000000000000002121512054777600270000ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root Format=erofs CopyFiles=/ Verity=data VerityMatchKey=root Minimize=best mkosi-26/mkosi/resources/repart/definitions/portable.repart.d/20-root-verity.conf000066400000000000000000000001701512054777600303240ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root-verity Verity=hash VerityMatchKey=root Minimize=best mkosi-26/mkosi/resources/repart/definitions/portable.repart.d/30-root-verity-sig.conf000066400000000000000000000001631512054777600311070ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root-verity-sig Verity=signature VerityMatchKey=root mkosi-26/mkosi/resources/repart/definitions/sysext-unsigned.repart.d/000077500000000000000000000000001512054777600262765ustar00rootroot00000000000000mkosi-26/mkosi/resources/repart/definitions/sysext-unsigned.repart.d/10-root.conf000066400000000000000000000001761512054777600303520ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root Format=erofs CopyFiles=/opt/ CopyFiles=/usr/ Minimize=best mkosi-26/mkosi/resources/repart/definitions/sysext.repart.d/000077500000000000000000000000001512054777600244645ustar00rootroot00000000000000mkosi-26/mkosi/resources/repart/definitions/sysext.repart.d/10-root.conf000066400000000000000000000002361512054777600265350ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root Format=erofs CopyFiles=/opt/ CopyFiles=/usr/ Verity=data VerityMatchKey=root Minimize=best mkosi-26/mkosi/resources/repart/definitions/sysext.repart.d/20-root-verity.conf000066400000000000000000000001701512054777600300530ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root-verity Verity=hash VerityMatchKey=root Minimize=best mkosi-26/mkosi/resources/repart/definitions/sysext.repart.d/30-root-verity-sig.conf000066400000000000000000000001631512054777600306360ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root-verity-sig Verity=signature VerityMatchKey=root mkosi-26/mkosi/resources/tmpfiles.d/000077500000000000000000000000001512054777600176445ustar00rootroot00000000000000mkosi-26/mkosi/resources/tmpfiles.d/mkosi.conf000066400000000000000000000001031512054777600216270ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later d %C/mkosi - - - 90d mkosi-26/mkosi/run.py000066400000000000000000000640431512054777600147520ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import errno import logging import os import queue import shlex import shutil import signal import subprocess import sys import threading import uuid from collections.abc import Awaitable, Collection, Iterator, Mapping, Sequence from contextlib import AbstractContextManager from pathlib import Path from types import TracebackType from typing import TYPE_CHECKING, Any, Callable, Generic, NoReturn, Optional, Protocol, TypeVar import mkosi.sandbox from mkosi.log import ARG_DEBUG, ARG_DEBUG_SANDBOX, ARG_DEBUG_SHELL, die from mkosi.sandbox import SD_LISTEN_FDS_START, acquire_privileges, joinpath, umask from mkosi.util import _FILE, PathString, flatten, one_zero, resource_path, unique # These types are only generic during type checking and not at runtime, leading # to a TypeError during compilation. # Let's be as strict as we can with the description for the usage we have. if TYPE_CHECKING: CompletedProcess = subprocess.CompletedProcess[str] Popen = subprocess.Popen[str] else: CompletedProcess = subprocess.CompletedProcess Popen = subprocess.Popen T = TypeVar("T") def ensure_exc_info() -> tuple[type[BaseException], BaseException, TracebackType]: exctype, exc, tb = sys.exc_info() assert exctype assert exc assert tb return (exctype, exc, tb) @contextlib.contextmanager def uncaught_exception_handler(exit: Callable[[int], NoReturn] = sys.exit) -> Iterator[None]: rc = 0 try: yield except SystemExit as e: rc = e.code if isinstance(e.code, int) else 1 if ARG_DEBUG.get(): sys.excepthook(*ensure_exc_info()) except KeyboardInterrupt: rc = 1 if ARG_DEBUG.get(): sys.excepthook(*ensure_exc_info()) else: logging.error("Interrupted") except subprocess.CalledProcessError as e: # We always log when subprocess.CalledProcessError is raised, so we don't log again here. rc = e.returncode # Failures from qemu, ssh and systemd-nspawn are expected and we won't log stacktraces for those. # Failures from self come from the forks we spawn to build images in a user namespace. We've already # done all the logging for those failures so we don't log stacktraces for those either. if ( ARG_DEBUG.get() and e.cmd and str(e.cmd[0]) not in ("self", "ssh", "systemd-nspawn") and "qemu-system" not in str(e.cmd[0]) ): sys.excepthook(*ensure_exc_info()) except BaseException: sys.excepthook(*ensure_exc_info()) rc = 1 finally: sys.stdout.flush() sys.stderr.flush() exit(rc) def fork_and_wait(target: Callable[..., None], *args: Any, **kwargs: Any) -> None: pid = os.fork() if pid == 0: with uncaught_exception_handler(exit=os._exit): target(*args, **kwargs) try: _, status = os.waitpid(pid, 0) except KeyboardInterrupt: os.kill(pid, signal.SIGINT) _, status = os.waitpid(pid, 0) except BaseException: os.kill(pid, signal.SIGTERM) _, status = os.waitpid(pid, 0) rc = os.waitstatus_to_exitcode(status) if rc != 0: raise subprocess.CalledProcessError(rc, ["self"]) def log_process_failure(sandbox: Sequence[str], cmdline: Sequence[str], returncode: int) -> None: if -returncode in (signal.SIGINT, signal.SIGTERM): logging.error(f"Interrupted by {signal.Signals(-returncode).name} signal") elif returncode < 0: logging.error( f'"{shlex.join(["mkosi-sandbox", *sandbox, *cmdline] if ARG_DEBUG.get() else cmdline)}"' f" was killed by {signal.Signals(-returncode).name} signal." ) elif returncode == 127 and cmdline[0] != "mkosi": # Anything invoked beneath /work is a script that we mount into place (so we know it exists). If one # of these scripts fails with exit code 127, it's either because the script interpreter was not # installed or because one of the commands in the script failed with exit code 127. if cmdline[0].startswith("/work"): logging.error(f"{cmdline[0]} failed with non-zero exit code 127") logging.info( "(Maybe a program was not found or the script interpreter (e.g. bash) is not installed?)" ) else: logging.error(f"{cmdline[0]} not found.") else: logging.error( f'"{shlex.join(["mkosi-sandbox", *sandbox, *cmdline] if ARG_DEBUG.get() else cmdline)}"' f" returned non-zero exit code {returncode}." ) def run( cmdline: Sequence[PathString], check: bool = True, stdin: _FILE = None, stdout: _FILE = None, stderr: _FILE = None, input: Optional[str] = None, env: Mapping[str, str] = {}, log: bool = True, success_exit_status: Sequence[int] = (0,), setup: Sequence[PathString] = (), sandbox: AbstractContextManager[Sequence[PathString]] = contextlib.nullcontext([]), ) -> CompletedProcess: if input is not None: assert stdin is None # stdin and input cannot be specified together stdin = subprocess.PIPE with spawn( cmdline, check=check, stdin=stdin, stdout=stdout, stderr=stderr, env=env, log=log, success_exit_status=success_exit_status, setup=setup, sandbox=sandbox, ) as process: out, err = process.communicate(input) return CompletedProcess(cmdline, process.returncode, out, err) @contextlib.contextmanager def spawn( cmdline: Sequence[PathString], check: bool = True, stdin: _FILE = None, stdout: _FILE = None, stderr: _FILE = None, user: Optional[int] = None, group: Optional[int] = None, pass_fds: Collection[int] = (), env: Mapping[str, str] = {}, log: bool = True, preexec: Optional[Callable[[], None]] = None, success_exit_status: Sequence[int] = (0,), setup: Sequence[PathString] = (), sandbox: AbstractContextManager[Sequence[PathString]] = contextlib.nullcontext([]), ) -> Iterator[Popen]: cmd = [os.fspath(x) for x in cmdline] if ARG_DEBUG.get(): logging.info(f"+ {shlex.join(cmd)}") if not stdout and not stderr: # Unless explicit redirection is done, print all subprocess output on stderr, since we do so as well # for mkosi's own output. stdout = sys.stderr if stdin is None: stdin = subprocess.DEVNULL env = { "PATH": os.environ["PATH"], "TERM": os.getenv("TERM", "vt220"), "LANG": "C.UTF-8", **{k: v for k, v in env.items() if k != "LANG" and not k.startswith("LC_")}, } if "TMPDIR" in os.environ: env["TMPDIR"] = os.environ["TMPDIR"] for e in ("SYSTEMD_LOG_LEVEL", "SYSTEMD_LOG_LOCATION"): if e in os.environ: env[e] = os.environ[e] if "HOME" not in env: env["HOME"] = "/" with contextlib.ExitStack() as stack: sbx = [os.fspath(x) for x in stack.enter_context(sandbox)] apply_sandbox_in_preexec = not setup and not ARG_DEBUG_SANDBOX.get() def _preexec() -> None: if preexec: preexec() if sbx and apply_sandbox_in_preexec: # The env passed to subprocess.Popen() replaces the environment wholesale so any # modifications made by mkosi-sandbox would be overridden if we used that. Hence process the # environment in the preexec function. os.environ.clear() os.environ.update(env) try: mkosi.sandbox.main(sbx) except Exception: sys.excepthook(*ensure_exc_info()) os._exit(1) prefix = [] if sbx and not apply_sandbox_in_preexec: module = stack.enter_context(resource_path(sys.modules[__package__ or __name__])) prefix = [ *(["strace", "--detach-on=execve", "--string-limit=256"] if ARG_DEBUG_SANDBOX.get() else []), sys.executable, "-SI", os.fspath(module / "sandbox.py"), *sbx, ] # fmt: skip try: proc = subprocess.Popen( [*setup, *prefix, *cmdline], stdin=stdin, stdout=stdout, stderr=stderr, text=True, user=user, group=group, # Python closes file descriptors after calling the preexec function. Hence we need to tell it # to keep the packed file descriptors intact instead of the original ones if --pack-fds is # used. pass_fds=( range(SD_LISTEN_FDS_START, SD_LISTEN_FDS_START + len(pass_fds)) if apply_sandbox_in_preexec and "--pack-fds" in sbx else pass_fds ), env=env if not sbx or not apply_sandbox_in_preexec else None, preexec_fn=_preexec, ) except FileNotFoundError as e: die(f"{e.filename} not found.") try: yield proc proc.wait() except KeyboardInterrupt: proc.send_signal(signal.SIGINT) raise except BaseException: proc.terminate() raise finally: # Make sure any SIGINT/SIGTERM signal we sent is actually processed. proc.send_signal(signal.SIGCONT) returncode = proc.wait() if check and returncode is not None and returncode not in success_exit_status: if log: log_process_failure(sbx, cmd, returncode) if ARG_DEBUG_SHELL.get(): # --suspend will freeze the debug shell with no way to unfreeze it so strip it from the # sandbox if it's there. if "--suspend" in prefix: prefix.remove("--suspend") if "--suspend" in sbx: sbx.remove("--suspend") subprocess.run( [*setup, *prefix, "bash"], check=False, stdin=sys.stdin, text=True, user=user, group=group, env=env if not sbx or not apply_sandbox_in_preexec else None, preexec_fn=_preexec, ) raise subprocess.CalledProcessError(returncode, cmdline) def finalize_path( root: Optional[Path] = None, extra: Sequence[Path] = (), prefix_usr: bool = False, relaxed: bool = False, ) -> str: root = root or Path("/") path = [os.fspath(p) for p in extra] if relaxed: path += [ s for s in os.environ["PATH"].split(":") if s in ("/usr/bin", "/usr/sbin") or not s.startswith("/usr") ] # Make sure that /usr/bin and /usr/sbin are always in $PATH. path += [s for s in ("/usr/bin", "/usr/sbin") if s not in path] else: path += ["/usr/bin", "/usr/sbin"] if prefix_usr: path = [os.fspath(root / s.lstrip("/")) if s in ("/usr/bin", "/usr/sbin") else s for s in path] return ":".join(unique(path)) def find_binary( *names: PathString, root: Optional[Path] = None, extra: Sequence[Path] = (), ) -> Optional[Path]: root = root or Path("/") path = finalize_path(root=root, extra=extra, prefix_usr=True) for name in names: if any(Path(name).is_relative_to(d) for d in extra): pass elif Path(name).is_absolute(): name = root / Path(name).relative_to("/") elif "/" in str(name): name = root / name if binary := shutil.which(name, path=path): if root != Path("/") and not Path(binary).is_relative_to(root): return Path(binary) else: return Path("/") / Path(binary).relative_to(root) return None class AsyncioThread(threading.Thread, Generic[T]): """ The default threading.Thread() is not interruptible, so we make our own version by using the concurrency feature in python that is interruptible, namely asyncio. Additionally, we store any exception that the coroutine raises and re-raise it in join() if no other exception was raised before. """ def __init__( self, target: Callable[[queue.SimpleQueue[T]], Awaitable[Any]], *args: Any, **kwargs: Any ) -> None: import asyncio self.target = target self.loop: queue.SimpleQueue[asyncio.AbstractEventLoop] = queue.SimpleQueue() self.exc: queue.SimpleQueue[BaseException] = queue.SimpleQueue() self.queue: queue.SimpleQueue[T] = queue.SimpleQueue() self.messages: list[T] = [] super().__init__(*args, **kwargs) def run(self) -> None: import asyncio async def wrapper() -> None: self.loop.put(asyncio.get_running_loop()) await self.target(self.queue) try: asyncio.run(wrapper()) except asyncio.CancelledError: pass except BaseException as e: self.exc.put(e) def process(self) -> list[T]: while not self.queue.empty(): self.messages += [self.queue.get()] return self.messages def wait_for(self, expected: T) -> None: while (message := self.queue.get()) != expected: self.messages += [message] def cancel(self) -> None: import asyncio.tasks loop = self.loop.get() for task in asyncio.tasks.all_tasks(loop): loop.call_soon_threadsafe(task.cancel) def __enter__(self) -> "AsyncioThread[T]": self.start() return self def __exit__( self, type: Optional[type[BaseException]], value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: self.cancel() self.join() self.process() if type is None: try: raise self.exc.get_nowait() except queue.Empty: pass class SandboxProtocol(Protocol): def __call__( self, *, options: Sequence[PathString] = (), ) -> AbstractContextManager[list[PathString]]: ... def nosandbox( *, options: Sequence[PathString] = (), ) -> AbstractContextManager[list[PathString]]: return contextlib.nullcontext([]) def workdir(path: Path, sandbox: Optional[SandboxProtocol] = None) -> str: subdir = "/" if sandbox and sandbox == nosandbox else "/work" return joinpath(subdir, os.fspath(path)) def finalize_passwd_symlinks(root: PathString) -> list[PathString]: """ If passwd or a related file exists in the apivfs directory, bind mount it over the host files while we run the command, to make sure that the command we run uses user/group information from the apivfs directory instead of from the host. """ return flatten( ("--symlink", Path(root) / "etc" / f, f"/etc/{f}") for f in ("passwd", "group", "shadow", "gshadow") ) def network_options(*, network: bool) -> list[PathString]: return [ "--setenv", "SYSTEMD_OFFLINE", one_zero(network), *(["--unshare-net"] if not network else []), ] # fmt: skip @contextlib.contextmanager def vartmpdir() -> Iterator[Path]: # We want to use an empty subdirectory in the host's temporary directory as the sandbox's /var/tmp. d = Path(os.getenv("TMPDIR", "/var/tmp")) / f"mkosi-var-tmp-{uuid.uuid4().hex[:16]}" d.mkdir() try: yield d finally: # A directory that's used as an overlayfs workdir will contain a "work" subdirectory after the # overlayfs is unmounted. This "work" subdirectory will have permissions 000 and as such can't be # opened or searched unless the user has the CAP_DAC_OVERRIDE capability. shutil.rmtree() will try to # search the "work" subdirectory to remove anything in it which will fail with a permission error. To # circumvent this, if the work directory exists and is not empty, let's fork off a subprocess where # we acquire extra privileges and then invoke shutil.rmtree(). If the work directory exists but is # empty, let's just delete the "work" subdirectory first and then invoke shutil.rmtree(). Deleting # the subdirectory when it is empty is not a problem because deleting a subdirectory depends on the # permissions of the parent directory and not the directory itself. try: if (p := d / "work").exists(): p.rmdir() except OSError as e: if e.errno == errno.ENOTEMPTY: def remove() -> None: acquire_privileges() shutil.rmtree(d) fork_and_wait(remove) else: raise else: shutil.rmtree(d) @contextlib.contextmanager def sandbox_cmd( *, network: bool = False, devices: bool = False, scripts: Optional[Path] = None, tools: Path = Path("/"), relaxed: bool = False, overlay: Optional[Path] = None, options: Sequence[PathString] = (), extra: Sequence[Path] = (), ) -> Iterator[list[PathString]]: assert not (overlay and relaxed) with contextlib.ExitStack() as stack: module = stack.enter_context(resource_path(sys.modules[__package__ or __name__])) cmdline: list[PathString] = [ "--proc", "/proc", # We mounted a subdirectory of TMPDIR to /var/tmp so we unset TMPDIR so that /tmp or /var/tmp are # used instead. "--unsetenv", "TMPDIR", *network_options(network=network), ] # fmt: skip for d in ("usr", "opt"): if not (tools / d).exists(): continue if overlay and (overlay / d).exists(): cmdline += [ "--overlay-lowerdir", tools / d, "--overlay-lowerdir", overlay / d, "--overlay", Path("/") / d, ] # fmt: skip else: cmdline += ["--ro-bind", tools / d, Path("/") / d] for d in ("bin", "sbin", "lib", "lib32", "lib64"): if (p := tools / d).is_symlink(): cmdline += ["--symlink", p.readlink(), Path("/") / p.relative_to(tools)] elif p.is_dir(): cmdline += ["--ro-bind", p, Path("/") / p.relative_to(tools)] if (tools / "nix/store").exists(): cmdline += ["--bind", tools / "nix/store", "/nix/store"] if relaxed: for p in Path("/").iterdir(): if p not in ( Path("/proc"), Path("/usr"), Path("/opt"), Path("/nix"), Path("/bin"), Path("/sbin"), Path("/lib"), Path("/lib32"), Path("/lib64"), Path("/etc"), ): if p.is_symlink(): cmdline += ["--symlink", p.readlink(), p] else: cmdline += ["--bind", p, p] cmdline += ["--ro-bind", tools / "etc", "/etc"] if tools != Path("/"): for f in ("passwd", "group", "shadow", "gshadow", "nsswitch.conf", "machine-id"): if Path(f"/etc/{f}").exists() and (tools / "etc" / f).exists(): cmdline += ["--ro-bind", f"/etc/{f}", f"/etc/{f}"] else: cmdline += [ "--dir", "/var/tmp", "--dir", "/var/log", "--unshare-ipc", # apivfs_script_cmd() and chroot_script_cmd() are executed from within the sandbox, but they # still use sandbox.py, so we make sure it is available inside the sandbox so it can be # executed there as well. "--ro-bind", module / "sandbox.py", "/sandbox.py", ] # fmt: skip if devices: cmdline += ["--bind", "/sys", "/sys", "--bind", "/dev", "/dev"] else: cmdline += ["--dev", "/dev"] # If we're using /usr from a tools tree, we have to use /etc/alternatives and /etc/ld.so.cache # from the tools tree as well if they exists since those are directly related to /usr. for p in (Path("etc/alternatives"), Path("etc/ld.so.cache")): if (tools / p).exists(): cmdline += ["--ro-bind", tools / p, Path("/") / p] if network and (p := Path("/run/systemd/resolve")).exists(): cmdline += ["--ro-bind", p, p] if network and (p := Path("/etc/resolv.conf")).exists(): cmdline += ["--ro-bind", p, p] path = finalize_path( root=tools, extra=[Path("/scripts"), *extra] if scripts else extra, relaxed=relaxed, ) cmdline += ["--setenv", "PATH", path] if scripts: cmdline += ["--ro-bind", scripts, "/scripts"] tmp: Optional[Path] if not overlay and not relaxed: tmp = stack.enter_context(vartmpdir()) yield [ *cmdline, "--bind", tmp, "/var/tmp", "--dir", "/etc", "--dir", "/var", "--dir", "/tmp", "--dir", "/run", *options, ] # fmt: skip return if overlay and (overlay / "etc").exists(): cmdline += ["--ro-bind", overlay / "etc", "/etc"] else: cmdline += ["--dir", "/etc"] for d in ("srv", "media", "mnt", "var", "run", "tmp"): tmp = None if d not in ("run", "tmp"): with umask(~0o755): tmp = stack.enter_context(vartmpdir()) if overlay and (overlay / d).exists(): work = None if tmp: with umask(~0o755): work = stack.enter_context(vartmpdir()) cmdline += [ "--overlay-lowerdir", overlay / d, "--overlay-upperdir", tmp or "tmpfs", *(["--overlay-workdir", os.fspath(work)] if work else []), "--overlay", Path("/") / d, ] # fmt: skip elif not relaxed: if tmp: cmdline += ["--bind", tmp, Path("/") / d] else: cmdline += ["--dir", Path("/") / d] # If we put an overlayfs on /var, and /var/tmp is not in the sandbox tree, make sure /var/tmp is a # bind mount of a regular empty directory instead of the overlays so tools like systemd-repart can # use the underlying filesystem features from btrfs when using /var/tmp. if overlay and not (overlay / "var/tmp").exists(): tmp = stack.enter_context(vartmpdir()) cmdline += ["--bind", tmp, "/var/tmp"] yield [*cmdline, *options] def apivfs_options(*, root: Path = Path("/buildroot")) -> list[PathString]: return [ "--tmpfs", root / "run", "--tmpfs", root / "tmp", "--proc", root / "proc", "--dev", root / "dev", # Nudge gpg to create its sockets in /run by making sure /run/user/0 exists. "--dir", root / "run/user/0", # Make sure anything running in the root directory thinks it's in a container. $container can't # always be accessed so we write /run/host/container-manager as well which is always accessible. "--write", "mkosi", root / "run/host/container-manager", ] # fmt: skip def chroot_options() -> list[PathString]: return [ # Let's always run as (fake) root when we chroot inside the image as tools executed within the image # could have builtin assumptions about files being owned by root. "--become-root", # Unshare IPC namespace so any tests that exercise IPC related features don't fail with permission # errors as --become-root implies unsharing a user namespace which won't have access to the parent's # IPC namespace anymore. "--unshare-ipc", "--setenv", "container", "mkosi", "--setenv", "HOME", "/", "--setenv", "PATH", "/usr/bin:/usr/sbin", "--setenv", "BUILDROOT", "/", ] # fmt: skip @contextlib.contextmanager def chroot_cmd( *, root: Callable[[PathString], list[str]], network: bool = False, options: Sequence[PathString] = (), ) -> Iterator[list[PathString]]: with vartmpdir() as dir: cmdline: list[PathString] = [ *root("/"), # We mounted a subdirectory of TMPDIR to /var/tmp so we unset TMPDIR so that /tmp or /var/tmp are # used instead. "--unsetenv", "TMPDIR", *network_options(network=network), *apivfs_options(root=Path("/")), *chroot_options(), ] # fmt: skip if network: for p in (Path("/etc/resolv.conf"), Path("/run/systemd/resolve")): if p.exists(): cmdline += ["--ro-bind", p, p] yield [*cmdline, "--bind", dir, "/var/tmp", *options] def finalize_interpreter(tools: bool) -> str: if tools: return "python3" exe = sys.executable if Path(exe).is_relative_to("/usr"): return exe return "python3" def glob_in_sandbox( *globs: str, sandbox: AbstractContextManager[Sequence[PathString]] = contextlib.nullcontext([]), ) -> list[Path]: return [ Path(s) for s in run( [ "bash", "-c", rf"shopt -s nullglob && printf '%s\n' {' '.join(globs)} | xargs -r readlink -f", ], sandbox=sandbox, stdout=subprocess.PIPE, ) .stdout.strip() .splitlines() ] def exists_in_sandbox( path: PathString, sandbox: AbstractContextManager[Sequence[PathString]] = contextlib.nullcontext([]), ) -> bool: return ( run( ["bash", "-c", rf"test -e {path}"], sandbox=sandbox, check=False, ).returncode == 0 ) mkosi-26/mkosi/sandbox.py000077500000000000000000001241231512054777600156030ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later """ This is a standalone implementation of sandboxing which is used by mkosi. Note that this is invoked many times while building the image and as a result, the performance of this script has a substantial impact on the performance of mkosi itself. To keep the runtime of this script to a minimum, please don't import any extra modules if it can be avoided. """ import ctypes import os import sys import warnings # noqa: F401 (loaded lazily by os.execvp() which happens too late) __version__ = "26" # The following constants are taken from the Linux kernel headers. AT_EMPTY_PATH = 0x1000 AT_FDCWD = -100 AT_NO_AUTOMOUNT = 0x800 AT_RECURSIVE = 0x8000 AT_SYMLINK_NOFOLLOW = 0x100 BTRFS_IOC_SNAP_CREATE_V2 = 0x50009417 BTRFS_IOC_SUBVOL_CREATE_V2 = 0x50009418 BTRFS_IOC_SNAP_DESTROY_V2 = 0x5000943F BTRFS_SUBVOL_NAME_MAX = 4039 BTRFS_SUPER_MAGIC = 0x9123683E CAP_CHOWN = 0 CAP_DAC_OVERRIDE = 1 CAP_DAC_READ_SEARCH = 2 CAP_FOWNER = 3 CAP_FSETID = 4 CAP_SETGID = 6 CAP_SETUID = 7 CAP_SETPCAP = 8 CAP_NET_BIND_SERVICE = 10 CAP_NET_ADMIN = 12 CAP_SYS_CHROOT = 18 CAP_SYS_PTRACE = 19 CAP_SYS_ADMIN = 21 CAP_SYS_RESOURCE = 24 CAP_SETFCAP = 31 CLONE_NEWIPC = 0x08000000 CLONE_NEWNET = 0x40000000 CLONE_NEWNS = 0x00020000 CLONE_NEWUSER = 0x10000000 EBADF = 9 ENAMETOOLONG = 36 EPERM = 1 ENOENT = 2 ENOSYS = 38 F_DUPFD = 0 F_GETFD = 1 FD_CLOEXEC = 1 FS_IOC_GETFLAGS = 0x80086601 FS_IOC_SETFLAGS = 0x40086602 FS_NOCOW_FL = 0x00800000 LINUX_CAPABILITY_U32S_3 = 2 LINUX_CAPABILITY_VERSION_3 = 0x20080522 MNT_DETACH = 2 MOUNT_ATTR_RDONLY = 0x00000001 MOUNT_ATTR_NOSUID = 0x00000002 MOUNT_ATTR_NODEV = 0x00000004 MOUNT_ATTR_NOEXEC = 0x00000008 MOUNT_ATTR_SIZE_VER0 = 32 MOVE_MOUNT_F_EMPTY_PATH = 0x00000004 MS_BIND = 4096 MS_MOVE = 8192 MS_REC = 16384 MS_SHARED = 1 << 20 MS_SLAVE = 1 << 19 NR_mount_setattr = 442 NR_move_mount = 429 NR_open_tree = 428 OPEN_TREE_CLOEXEC = os.O_CLOEXEC OPEN_TREE_CLONE = 1 OVERLAYFS_SUPER_MAGIC = 0x794C7630 PR_CAP_AMBIENT = 47 PR_CAP_AMBIENT_IS_SET = 1 PR_CAP_AMBIENT_RAISE = 2 PR_CAP_AMBIENT_LOWER = 3 PR_CAPBSET_DROP = 24 # These definitions are taken from the libseccomp headers SCMP_ACT_ALLOW = 0x7FFF0000 SCMP_ACT_ERRNO = 0x00050000 SD_LISTEN_FDS_START = 3 SIGSTOP = 19 class btrfs_ioctl_vol_args_v2(ctypes.Structure): _fields_ = [ ("fd", ctypes.c_int64), ("transid", ctypes.c_uint64), ("flags", ctypes.c_uint64), ("unused", ctypes.c_uint64 * 4), ("name", ctypes.c_char * (BTRFS_SUBVOL_NAME_MAX + 1)), ] class mount_attr(ctypes.Structure): _fields_ = [ ("attr_set", ctypes.c_uint64), ("attr_clr", ctypes.c_uint64), ("propagation", ctypes.c_uint64), ("userns_fd", ctypes.c_uint64), ] class cap_user_header_t(ctypes.Structure): # __user_cap_header_struct _fields_ = [ ("version", ctypes.c_uint32), ("pid", ctypes.c_int), ] class cap_user_data_t(ctypes.Structure): # __user_cap_data_struct _fields_ = [ ("effective", ctypes.c_uint32), ("permitted", ctypes.c_uint32), ("inheritable", ctypes.c_uint32), ] libc = ctypes.CDLL(None, use_errno=True) libc.syscall.restype = ctypes.c_long libc.unshare.argtypes = (ctypes.c_int,) libc.statfs.argtypes = (ctypes.c_char_p, ctypes.c_void_p) libc.eventfd.argtypes = (ctypes.c_int, ctypes.c_int) libc.mount.argtypes = (ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_ulong, ctypes.c_char_p) libc.pivot_root.argtypes = (ctypes.c_char_p, ctypes.c_char_p) libc.umount2.argtypes = (ctypes.c_char_p, ctypes.c_int) libc.capget.argtypes = (ctypes.c_void_p, ctypes.c_void_p) libc.capset.argtypes = (ctypes.c_void_p, ctypes.c_void_p) libc.fcntl.argtypes = (ctypes.c_int, ctypes.c_int, ctypes.c_int) def terminal_is_dumb() -> bool: return not sys.stdout.isatty() or not sys.stderr.isatty() or os.getenv("TERM", "") == "dumb" # fmt: off ANSI_BOLD = "\033[0;1;39m" if not terminal_is_dumb() else "" ANSI_BLUE = "\033[0;1;34m" if not terminal_is_dumb() else "" ANSI_GRAY = "\033[0;38;5;245m" if not terminal_is_dumb() else "" ANSI_RED = "\033[31;1m" if not terminal_is_dumb() else "" ANSI_YELLOW = "\033[33;1m" if not terminal_is_dumb() else "" ANSI_RESET = "\033[0m" if not terminal_is_dumb() else "" # fmt: on ENOSYS_MSG = f"""\ {ANSI_RED}mkosi was unable to invoke the {{syscall}}() system call.{ANSI_RESET} This probably means either the system call is not implemented by the running kernel version ({{kver}}) or the system call is prohibited via seccomp if mkosi is being executed inside a containerized environment.\ """ def is_main() -> bool: return __name__ == "__main__" def oserror(syscall: str, filename: str = "", errno: int = 0) -> None: errno = abs(errno) or ctypes.get_errno() if errno == ENOSYS and is_main(): print(ENOSYS_MSG.format(syscall=syscall, kver=os.uname().version), file=sys.stderr) raise OSError(ctypes.get_errno(), os.strerror(errno), filename or None) def unshare(flags: int) -> None: if libc.unshare(flags) < 0: oserror("unshare") def statfs(path: str) -> int: # struct statfs is 120 bytes, which equals 15 longs. Since we only care about the first field # and the first field is of type long, we avoid declaring the full struct by just passing an # array of 15 longs as the output argument. buffer = (ctypes.c_long * 15)() if libc.statfs(path.encode(), ctypes.byref(buffer)) < 0: oserror("statfs", path) return int(buffer[0]) def mount(src: str, dst: str, type: str, flags: int, options: str) -> None: srcb = src.encode() if src else None typeb = type.encode() if type else None optionsb = options.encode() if options else None if libc.mount(srcb, dst.encode(), typeb, flags, optionsb) < 0: oserror("mount", dst) def umount2(path: str, flags: int = 0) -> None: if libc.umount2(path.encode(), flags) < 0: oserror("umount2", path) def capability_mask(capabilities: list[int]) -> int: mask = 0 for cap in capabilities: mask |= 1 << cap return mask def drop_capabilities(*, keep: list[int]) -> None: # First, fetch the permitted capabilities and AND them # with the ones with we want to keep to get the final list # of capabilities. header = cap_user_header_t(LINUX_CAPABILITY_VERSION_3, 0) payload = (cap_user_data_t * LINUX_CAPABILITY_U32S_3)() if libc.capget(ctypes.addressof(header), ctypes.byref(payload)) < 0: oserror("capget") permitted = payload[1].permitted << 32 | payload[0].permitted permitted &= capability_mask(keep) # Next, drop unwanted capabilities from the bounding set as # later we'll drop the capability that lets us do so (CAP_SETPCAP). with open("/proc/sys/kernel/cap_last_cap", "rb") as f: last_cap = int(f.read()) libc.prctl.argtypes = (ctypes.c_int, ctypes.c_ulong) for cap in range(ctypes.sizeof(ctypes.c_uint64) * 8): if cap > last_cap: break if not (permitted & (1 << cap)) and libc.prctl(PR_CAPBSET_DROP, cap) < 0: oserror("prctl") # Now, modify the permitted, effective and inheritable # capability sets with capset(). payload[0].permitted = permitted payload[1].permitted = permitted >> 32 payload[0].effective = permitted payload[1].effective = permitted >> 32 payload[0].inheritable = permitted payload[1].inheritable = permitted >> 32 if libc.capset(ctypes.addressof(header), ctypes.byref(payload)) < 0: oserror("capset") # Finally, modify the ambient set using the associated pcrtl()'s. libc.prctl.argtypes = (ctypes.c_int, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong) for cap in range(ctypes.sizeof(ctypes.c_uint64) * 8): if cap > last_cap: break if permitted & (1 << cap): if libc.prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, cap, 0, 0) < 0: oserror("prctl") else: r = libc.prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_IS_SET, cap, 0, 0) if r < 0: oserror("prctl") if r > 0 and libc.prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_LOWER, cap, 0, 0) < 0: oserror("prctl") def have_effective_cap(capability: int) -> bool: with open("/proc/self/status", "rb") as f: for line in f.readlines(): if line.startswith(b"CapEff:"): return (int(line[7:], 16) & (1 << capability)) != 0 return False def seccomp_suppress(*, chown: bool = False, sync: bool = False) -> None: """ There's still a few files and directories left in distributions in /usr and /etc that are not owned by root. This causes package managers to fail to install the corresponding packages when run from a single uid user namespace. Unfortunately, non-root users can only create files owned by their own uid. To still allow non-root users to build images, if requested we install a seccomp filter that makes calls to chown() and friends a noop. """ if not chown and not sync: return libseccomp = ctypes.CDLL("libseccomp.so.2") if libseccomp is None: raise FileNotFoundError("libseccomp.so.2") libseccomp.seccomp_init.argtypes = (ctypes.c_uint32,) libseccomp.seccomp_init.restype = ctypes.c_void_p libseccomp.seccomp_release.argtypes = (ctypes.c_void_p,) libseccomp.seccomp_release.restype = None libseccomp.seccomp_syscall_resolve_name.argtypes = (ctypes.c_char_p,) libseccomp.seccomp_rule_add_exact.argtypes = ( ctypes.c_void_p, ctypes.c_uint32, ctypes.c_int, ctypes.c_uint, ) libseccomp.seccomp_load.argtypes = (ctypes.c_void_p,) seccomp = libseccomp.seccomp_init(SCMP_ACT_ALLOW) suppress = [] if chown: suppress += [ b"chown", b"chown32", b"fchown", b"fchown32", b"fchownat", b"lchown", b"lchown32", ] if sync: suppress += [ b"fdatasync", b"fsync", b"msync", b"sync", b"sync_file_range", b"sync_file_range2", b"syncfs", ] try: for syscall in suppress: id = libseccomp.seccomp_syscall_resolve_name(syscall) if id < 0: continue r = libseccomp.seccomp_rule_add_exact(seccomp, SCMP_ACT_ERRNO, id, 0) if r < 0: oserror("seccomp_rule_add_exact", errno=r) r = libseccomp.seccomp_load(seccomp) if r < 0: oserror("seccomp_load", errno=r) finally: libseccomp.seccomp_release(seccomp) def lsattr(path: str) -> int: attr = ctypes.c_int() r = 0 fd = os.open(path, os.O_CLOEXEC | os.O_RDONLY) libc.ioctl.argtypes = (ctypes.c_int, ctypes.c_long, ctypes.c_void_p) if libc.ioctl(fd, FS_IOC_GETFLAGS, ctypes.byref(attr)) < 0: r = ctypes.get_errno() os.close(fd) if r != 0: raise OSError(r, os.strerror(r), path) return attr.value def chattr(path: str, attr: int) -> None: cattr = ctypes.c_int(attr) fd = os.open(path, os.O_CLOEXEC | os.O_RDONLY) r = 0 libc.ioctl.argtypes = (ctypes.c_int, ctypes.c_long, ctypes.c_void_p) if libc.ioctl(fd, FS_IOC_SETFLAGS, ctypes.byref(cattr)) < 0: r = ctypes.get_errno() os.close(fd) if r != 0: raise OSError(r, os.strerror(r), path) def validate_subvol_name(name: str) -> None: if len(name) > BTRFS_SUBVOL_NAME_MAX: raise OSError(ENAMETOOLONG, os.strerror(ENAMETOOLONG), name) def btrfs_subvol_ioctl(path: str, cmd: int, src_fd: int = 0) -> None: parent = os.path.dirname(path) name = os.path.basename(path) validate_subvol_name(name) fd = os.open(parent, os.O_CLOEXEC | os.O_RDONLY | os.O_DIRECTORY) try: args = btrfs_ioctl_vol_args_v2(fd=src_fd, name=name.encode()) libc.ioctl.argtypes = (ctypes.c_int, ctypes.c_long, ctypes.c_void_p) if libc.ioctl(fd, cmd, ctypes.byref(args)) < 0: oserror("ioctl", path) finally: os.close(fd) def btrfs_subvol_create(path: str) -> None: btrfs_subvol_ioctl(path, BTRFS_IOC_SUBVOL_CREATE_V2) def btrfs_subvol_snapshot(src: str, dst: str) -> None: src_fd = os.open(src, os.O_CLOEXEC | os.O_RDONLY | os.O_DIRECTORY) try: btrfs_subvol_ioctl(dst, BTRFS_IOC_SNAP_CREATE_V2, src_fd) finally: os.close(src_fd) def btrfs_subvol_delete(path: str) -> None: btrfs_subvol_ioctl(path, BTRFS_IOC_SNAP_DESTROY_V2) def join_new_session_keyring() -> None: libkeyutils = ctypes.CDLL("libkeyutils.so.1") if libkeyutils is None: raise FileNotFoundError("libkeyutils.so.1") libkeyutils.keyctl_join_session_keyring.argtypes = (ctypes.c_char_p,) libkeyutils.keyctl_join_session_keyring.restype = ctypes.c_int32 keyring = libkeyutils.keyctl_join_session_keyring(None) if keyring == -1: oserror("keyctl") def mount_rbind(src: str, dst: str, attrs: int = 0) -> None: """ When using the old mount syscall to do a recursive bind mount, mount options are not applied recursively. Because we want to do recursive read-only bind mounts in some cases, we use the new mount API for that which does allow recursively changing mount options when doing bind mounts. """ flags = AT_NO_AUTOMOUNT | AT_RECURSIVE | AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE try: libc.open_tree.argtypes = (ctypes.c_int, ctypes.c_char_p, ctypes.c_uint) fd = libc.open_tree(AT_FDCWD, src.encode(), flags) except AttributeError: libc.syscall.argtypes = (ctypes.c_long, ctypes.c_int, ctypes.c_char_p, ctypes.c_uint) fd = libc.syscall(NR_open_tree, AT_FDCWD, src.encode(), flags) if fd < 0: oserror("open_tree", src) try: attr = mount_attr() attr.attr_set = attrs flags = AT_EMPTY_PATH | AT_RECURSIVE try: libc.mount_setattr.argtypes = ( ctypes.c_int, ctypes.c_char_p, ctypes.c_uint, ctypes.c_void_p, ctypes.c_size_t, ) r = libc.mount_setattr(fd, b"", flags, ctypes.addressof(attr), MOUNT_ATTR_SIZE_VER0) except AttributeError: libc.syscall.argtypes = ( ctypes.c_long, ctypes.c_int, ctypes.c_char_p, ctypes.c_uint, ctypes.c_void_p, ctypes.c_size_t, ) r = libc.syscall(NR_mount_setattr, fd, b"", flags, ctypes.addressof(attr), MOUNT_ATTR_SIZE_VER0) if r < 0: oserror("mount_setattr", src) try: libc.move_mount.argtypes = ( ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_uint, ) r = libc.move_mount(fd, b"", AT_FDCWD, dst.encode(), MOVE_MOUNT_F_EMPTY_PATH) except AttributeError: libc.syscall.argtypes = ( ctypes.c_long, ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_uint, ) r = libc.syscall(NR_move_mount, fd, b"", AT_FDCWD, dst.encode(), MOVE_MOUNT_F_EMPTY_PATH) if r < 0: oserror("move_mount", dst) finally: os.close(fd) class umask: def __init__(self, mask: int) -> None: self.mask = mask def __enter__(self) -> None: self.mask = os.umask(self.mask) def __exit__(self, *args: object, **kwargs: object) -> None: os.umask(self.mask) def become_user(uid: int, gid: int) -> None: """ This function implements the required dance to unshare a user namespace and map the current user to itself or to root within it. The kernel only allows a process running outside of the unshared user namespace to write the necessary uid and gid mappings, so we fork off a child process, make it wait until the parent process has unshared a user namespace, and then writes the necessary uid and gid mappings. """ ppid = os.getpid() event = libc.eventfd(0, 0) if event < 0: oserror("eventfd") pid = os.fork() if pid == 0: try: os.read(event, ctypes.sizeof(ctypes.c_uint64)) os.close(event) with open(f"/proc/{ppid}/setgroups", "wb") as f: f.write(b"deny\n") with open(f"/proc/{ppid}/gid_map", "wb") as f: f.write(f"{gid} {os.getgid()} 1\n".encode()) with open(f"/proc/{ppid}/uid_map", "wb") as f: f.write(f"{uid} {os.getuid()} 1\n".encode()) except OSError as e: os._exit(e.errno or 1) except BaseException: os._exit(1) else: os._exit(0) try: unshare(CLONE_NEWUSER) except OSError as e: if e.errno == EPERM and is_main(): print(UNSHARE_EPERM_MSG, file=sys.stderr) raise finally: os.write(event, ctypes.c_uint64(1)) os.close(event) _, status = os.waitpid(pid, 0) rc = os.waitstatus_to_exitcode(status) if rc != 0: raise OSError(rc, os.strerror(rc)) def acquire_privileges(*, become_root: bool = False, network: bool = False) -> bool: if have_effective_cap(CAP_SYS_ADMIN) and (os.getuid() == 0 or not become_root): return False if become_root: become_user(0, 0) else: become_user(os.getuid(), os.getgid()) # When unsharing a user namespace, the process user has a full set of capabilities in the new user # namespace. This allows the process to do mounts after unsharing a mount namespace for example. However, # these capabilities are lost again when the user executes a subprocess. As we also want subprocesses # invoked by the user to be able to mount stuff, we make sure the capabilities we are interested in are # inherited across execve() by adding all the these capabilities to the inherited and ambient capability # sets, which makes sure that they are passed down to subprocesses, regardless if we're uid 0 in the user # namespace or not. caps = [ CAP_CHOWN, CAP_DAC_OVERRIDE, CAP_DAC_READ_SEARCH, CAP_FOWNER, CAP_FSETID, CAP_SETGID, CAP_SETUID, CAP_SETPCAP, CAP_SYS_CHROOT, CAP_SYS_PTRACE, CAP_SYS_ADMIN, CAP_SYS_RESOURCE, CAP_SETFCAP, ] if network: # If we're unsharing the network namespace, we want CAP_NET_BIND_SERVICE and CAP_NET_ADMIN as well. caps += [ CAP_NET_BIND_SERVICE, CAP_NET_ADMIN, ] drop_capabilities(keep=caps) return True def userns_has_single_user() -> bool: try: with open("/proc/self/uid_map", "rb") as f: lines = f.readlines() except FileNotFoundError: return False return len(lines) == 1 and int(lines[0].split()[-1]) == 1 def chase(root: str, path: str) -> str: if root == "/": return os.path.realpath(path) cwd = os.getcwd() fd = os.open("/", os.O_CLOEXEC | os.O_PATH | os.O_DIRECTORY) try: os.chroot(root) os.chdir("/") return joinpath(root, os.path.realpath(path)) finally: os.fchdir(fd) os.close(fd) os.chroot(".") os.chdir(cwd) def splitpath(path: str) -> tuple[str, ...]: return tuple(p for p in path.split("/") if p) def joinpath(path: str, *paths: str) -> str: return os.path.join(path, *[p.lstrip("/") for p in paths]) def is_relative_to(one: str, two: str) -> bool: return os.path.commonpath((one, two)) == two def pack_file_descriptors() -> int: fds = [] with os.scandir("/proc/self/fd") as it: for e in it: if not e.is_symlink() and (e.is_file() or e.is_dir()): continue try: fd = int(e.name) except ValueError: continue if fd < SD_LISTEN_FDS_START: continue fds.append(fd) # os.scandir() either opens a file descriptor to the given path or dups the given file descriptor. Either # way, there will be an extra file descriptor in the fds array that's not valid anymore now, so find out # which one and drop it. # # Also, since python 3.14, importing ctypes can result in an extra file descriptor being opened by # libffi for handling closures, which we don't use. This file descriptor will have O_CLOEXEC set, and # since we immediately call execvp() after calling pack_file_descriptors(), we should be OK to close all # file descriptors marked with FD_CLOEXEC here already. def process_fd(fd: int) -> bool: flags = libc.fcntl(fd, F_GETFD, 0) if flags < 0: return False if flags & FD_CLOEXEC: os.close(fd) return False return True fds = sorted(fd for fd in fds if process_fd(fd)) # The following is a reimplementation of pack_fds() in systemd. if len(fds) == 0: return 0 start = 0 while True: restart_from = -1 for i in range(start, len(fds)): if fds[i] == SD_LISTEN_FDS_START + i: continue nfd = libc.fcntl(fds[i], F_DUPFD, SD_LISTEN_FDS_START + i) if nfd < 0: oserror("fcntl") try: os.close(fds[i]) except OSError as e: if e.errno != EBADF: raise fds[i] = nfd if nfd != (SD_LISTEN_FDS_START + i) and restart_from < 0: restart_from = i if restart_from < 0: break start = restart_from assert fds[0] == SD_LISTEN_FDS_START return len(fds) class FSOperation: def __init__(self, dst: str, *, relative: bool = False) -> None: self.dst = dst self.relative = relative def execute(self, oldroot: str, newroot: str) -> None: raise NotImplementedError() @classmethod def optimize(cls, fsops: list["FSOperation"]) -> list["FSOperation"]: binds: dict[BindOperation, None] = {} rest = [] for fsop in fsops: if isinstance(fsop, BindOperation): binds[fsop] = None else: rest.append(fsop) # Drop all bind mounts that are mounted from beneath another bind mount to the same # location within the new rootfs. optimized = [ m for m in binds if not any( m != n and m.readonly == n.readonly and m.required == n.required and m.relative == n.relative and is_relative_to(m.src, n.src) and is_relative_to(m.dst, n.dst) and os.path.relpath(m.src, n.src) == os.path.relpath(m.dst, n.dst) for n in binds ) ] # Make sure bind mounts override other operations on the same destination by appending them # to the rest and depending on python's stable sort behavior. Additionally, relative operations # always go last. return sorted([*rest, *optimized], key=lambda fsop: (fsop.relative, splitpath(fsop.dst))) class BindOperation(FSOperation): def __init__(self, src: str, dst: str, *, readonly: bool, required: bool, relative: bool) -> None: self.src = src self.readonly = readonly self.required = required super().__init__(dst, relative=relative) def __hash__(self) -> int: return hash((splitpath(self.src), splitpath(self.dst), self.readonly, self.required)) def __eq__(self, other: object) -> bool: return isinstance(other, BindOperation) and self.__hash__() == other.__hash__() def execute(self, oldroot: str, newroot: str) -> None: src = chase(newroot if self.relative else oldroot, self.src) if not os.path.exists(src) and not self.required: return # If we're mounting a file on top of a symlink, mount directly on top of the symlink instead of # resolving it. dst = joinpath(newroot, self.dst) if not os.path.isdir(src) and os.path.islink(dst): return mount_rbind(src, dst, attrs=MOUNT_ATTR_RDONLY if self.readonly else 0) dst = chase(newroot, self.dst) if not os.path.exists(dst): isfile = os.path.isfile(src) with umask(~0o755): os.makedirs(os.path.dirname(dst), exist_ok=True) with umask(~0o644 if isfile else ~0o755): if isfile: os.close(os.open(dst, os.O_CREAT | os.O_CLOEXEC | os.O_EXCL)) else: os.mkdir(dst) mount_rbind(src, dst, attrs=MOUNT_ATTR_RDONLY if self.readonly else 0) class ProcOperation(FSOperation): def execute(self, oldroot: str, newroot: str) -> None: dst = chase(newroot, self.dst) with umask(~0o755): os.makedirs(dst, exist_ok=True) mount_rbind(joinpath(oldroot, "proc"), dst) class DevOperation(FSOperation): def __init__(self, ttyname: str, dst: str) -> None: self.ttyname = ttyname super().__init__(dst) def execute(self, oldroot: str, newroot: str) -> None: # We don't put actual devices in /dev, just the API stuff in there that all manner of # things depend on, like /dev/null. dst = chase(newroot, self.dst) with umask(~0o755): os.makedirs(dst, exist_ok=True) # Note that the mode is crucial here. If the default mode (1777) is used, trying to access # /dev/null fails with EACCESS for unknown reasons. mount("tmpfs", dst, "tmpfs", 0, "mode=0755") for node in ("null", "zero", "full", "random", "urandom", "tty", "fuse"): nsrc = joinpath(oldroot, "dev", node) if node == "fuse" and not os.path.exists(nsrc): continue ndst = joinpath(dst, node) os.close(os.open(ndst, os.O_CREAT | os.O_CLOEXEC | os.O_EXCL)) mount(nsrc, ndst, "", MS_BIND, "") for i, node in enumerate(("stdin", "stdout", "stderr")): os.symlink(f"/proc/self/fd/{i}", joinpath(dst, node)) os.symlink("/proc/self/fd", joinpath(dst, "fd")) os.symlink("/proc/kcore", joinpath(dst, "core")) with umask(~0o1777): os.mkdir(joinpath(dst, "shm"), mode=0o1777) with umask(~0o755): os.mkdir(joinpath(dst, "pts")) mount("devpts", joinpath(dst, "pts"), "devpts", 0, "newinstance,ptmxmode=0666,mode=620") os.symlink("pts/ptmx", joinpath(dst, "ptmx")) if self.ttyname: os.close(os.open(joinpath(dst, "console"), os.O_CREAT | os.O_CLOEXEC | os.O_EXCL)) mount(joinpath(oldroot, self.ttyname), joinpath(dst, "console"), "", MS_BIND, "") class TmpfsOperation(FSOperation): def execute(self, oldroot: str, newroot: str) -> None: dst = chase(newroot, self.dst) with umask(~0o755): os.makedirs(dst, exist_ok=True) options = "" if any(dst.endswith(suffix) for suffix in ("/tmp", "/var/tmp")) else "mode=0755" mount("tmpfs", dst, "tmpfs", 0, options) class DirOperation(FSOperation): def execute(self, oldroot: str, newroot: str) -> None: dst = chase(newroot, self.dst) with umask(~0o755): os.makedirs(os.path.dirname(dst), exist_ok=True) mode = 0o1777 if any(dst.endswith(suffix) for suffix in ("/tmp", "/var/tmp")) else 0o755 if not os.path.exists(dst): with umask(~mode): os.mkdir(dst, mode=mode) class SymlinkOperation(FSOperation): def __init__(self, src: str, dst: str) -> None: self.src = src super().__init__(dst) def execute(self, oldroot: str, newroot: str) -> None: dst = joinpath(newroot, self.dst) try: return os.symlink(self.src, dst) except FileExistsError: if os.path.islink(dst) and os.readlink(dst) == self.src: return if os.path.isdir(dst): raise # If the target already exists and is not a directory, create the symlink somewhere else and mount # it over the existing file or symlink. os.symlink(self.src, "/symlink") mount_rbind("/symlink", dst) os.unlink("/symlink") class WriteOperation(FSOperation): def __init__(self, data: str, dst: str) -> None: self.data = data super().__init__(dst) def execute(self, oldroot: str, newroot: str) -> None: dst = chase(newroot, self.dst) with umask(~0o755): os.makedirs(os.path.dirname(dst), exist_ok=True) with open(dst, "wb") as f: f.write(self.data.encode()) class OverlayOperation(FSOperation): def __init__(self, lowerdirs: tuple[str, ...], upperdir: str, workdir: str, dst: str) -> None: self.lowerdirs = lowerdirs self.upperdir = upperdir self.workdir = workdir super().__init__(dst) # This supports being used as a context manager so we can reuse the logic for mount_overlay() # in mounts.py. def __enter__(self) -> None: self.execute("/", "/") def __exit__(self, *args: object, **kwargs: object) -> None: umount2(self.dst) def execute(self, oldroot: str, newroot: str) -> None: lowerdirs = tuple(chase(oldroot, p) for p in self.lowerdirs) upperdir = ( chase(oldroot, self.upperdir) if self.upperdir and self.upperdir != "tmpfs" else self.upperdir ) workdir = chase(oldroot, self.workdir) if self.workdir else None dst = chase(newroot, self.dst) for p in lowerdirs: if not os.path.exists(p): oserror("mount", p, ENOENT) if upperdir and upperdir != "tmpfs" and upperdir != dst and not os.path.exists(upperdir): oserror("mount", upperdir, ENOENT) if workdir and not os.path.exists(workdir): oserror("mount", workdir, ENOENT) with umask(~0o755): os.makedirs(os.path.dirname(dst), exist_ok=True) mode = 0o1777 if any(dst.endswith(suffix) for suffix in ("/tmp", "/var/tmp")) else 0o755 if not os.path.exists(dst): with umask(~mode): os.mkdir(dst, mode=mode) options = [ f"lowerdir={':'.join(lowerdirs)}", "userxattr", # Disable the inodes index and metacopy (only copy metadata upwards if possible) # options. If these are enabled (e.g., if the kernel enables them by default), # the mount will fail if the upper directory has been earlier used with a different # lower directory, such as with a build overlay that was generated on top of a # different temporary root. # See https://www.kernel.org/doc/html/latest/filesystems/overlayfs.html#sharing-and-copying-layers # and https://github.com/systemd/mkosi/issues/1841. "index=off", "metacopy=off", ] if upperdir and upperdir == "tmpfs": mount("tmpfs", dst, "tmpfs", 0, "mode=0755") with umask(~mode): os.mkdir(f"{dst}/upper", mode=mode) with umask(~0o755): os.mkdir(f"{dst}/work") options += [f"upperdir={dst}/upper", f"workdir={dst}/work"] else: if upperdir: options += [f"upperdir={upperdir}"] if workdir: options += [f"workdir={workdir}"] mount("overlayfs", dst, "overlay", 0, ",".join(options)) ANSI_HIGHLIGHT = "\x1b[0;1;39m" if os.isatty(2) else "" ANSI_NORMAL = "\x1b[0m" if os.isatty(2) else "" HELP = f"""\ mkosi-sandbox [OPTIONS...] COMMAND [ARGUMENTS...] {ANSI_HIGHLIGHT}Run the specified command in a custom sandbox.{ANSI_NORMAL} -h --help Show this help --version Show package version --tmpfs DST Mount a new tmpfs on DST --dev DST Mount dev on DST --proc DST Mount procfs on DST --dir DST Create a new directory at DST --bind SRC DST Bind mount the host path SRC to DST --bind-try SRC DST Bind mount the host path SRC to DST if it exists --ro-bind SRC DST Bind mount the host path SRC to DST read-only --ro-bind-try SRC DST Bind mount the host path SRC to DST read-only if it exists --symlink SRC DST Create a symlink at DST pointing to SRC --write DATA DST Write DATA to DST --overlay-lowerdir DIR Add a lower directory for the next overlayfs mount --overlay-upperdir DIR Set the upper directory for the next overlayfs mount --overlay-workdir DIR Set the working directory for the next overlayfs mount --overlay DST Mount an overlay filesystem at DST --unsetenv NAME Unset the environment variable with name NAME --setenv NAME VALUE Set the environment variable with name NAME to VALUE --chdir DIR Change the working directory in the sandbox to DIR --same-dir Change the working directory in the sandbox to $PWD --become-root Map the current user/group to root:root in the sandbox --suppress-chown Make chown() syscalls in the sandbox a noop --suppress-sync Make sync() syscalls in the sandbox a noop --unshare-net Unshare the network namespace if possible --unshare-ipc Unshare the IPC namespace if possible --suspend Stop process before execve() See the mkosi-sandbox(1) man page for details.\ """ UNSHARE_EPERM_MSG = f"""\ {ANSI_RED}mkosi was forbidden to unshare namespaces{ANSI_RESET}. This probably means your distribution has restricted unprivileged user namespaces. Please consult the REQUIREMENTS section of the mkosi man page, e.g. via "mkosi documentation", for workarounds.\ """ def main(argv: list[str] = sys.argv[1:]) -> None: # We don't use argparse as it takes +- 10ms to import and since this is primarily for internal # use, it's not necessary to have amazing UX for this CLI interface so it's trivial to write # ourselves. argv = list(reversed(argv)) fsops: list[FSOperation] = [] setenv = [] unsetenv = [] lowerdirs = [] upperdir = "" workdir = "" chdir = None become_root = suppress_chown = suppress_sync = unshare_net = unshare_ipc = suspend = pack_fds = False try: ttyname = os.ttyname(2) if os.isatty(2) else "" except FileNotFoundError: ttyname = "" while argv: arg = argv.pop() if arg == "--": break if arg in ("-h", "--help"): print(HELP, file=sys.stderr) sys.exit(0) elif arg == "--version": print(__version__, file=sys.stderr) sys.exit(0) if arg == "--tmpfs": fsops.append(TmpfsOperation(os.path.abspath(argv.pop()))) elif arg == "--dev": fsops.append(DevOperation(ttyname, os.path.abspath(argv.pop()))) elif arg == "--proc": fsops.append(ProcOperation(os.path.abspath(argv.pop()))) elif arg == "--dir": fsops.append(DirOperation(os.path.abspath(argv.pop()))) elif arg in ("--bind", "--ro-bind", "--bind-try", "--ro-bind-try"): readonly = arg.startswith("--ro") required = not arg.endswith("-try") src = argv.pop() fsops.append( BindOperation( os.path.abspath(src.removeprefix("+")), os.path.abspath(argv.pop()), readonly=readonly, required=required, relative=src.startswith("+"), ) ) elif arg == "--symlink": fsops.append(SymlinkOperation(argv.pop(), os.path.abspath(argv.pop()))) elif arg == "--write": fsops.append(WriteOperation(argv.pop(), os.path.abspath(argv.pop()))) elif arg == "--overlay-lowerdir": lowerdirs.append(os.path.abspath(argv.pop())) elif arg == "--overlay-upperdir": upperdir = argv.pop() if upperdir != "tmpfs": upperdir = os.path.abspath(upperdir) elif arg == "--overlay-workdir": workdir = os.path.abspath(argv.pop()) elif arg == "--overlay": fsops.append( OverlayOperation( tuple(reversed(lowerdirs)), upperdir, workdir, os.path.abspath(argv.pop()), ) ) upperdir = "" workdir = "" lowerdirs = [] elif arg == "--unsetenv": unsetenv.append(argv.pop()) elif arg == "--setenv": setenv.append((argv.pop(), argv.pop())) elif arg == "--chdir": chdir = os.path.abspath(argv.pop()) elif arg == "--same-dir": chdir = os.getcwd() elif arg == "--become-root": become_root = True elif arg == "--suppress-chown": suppress_chown = True elif arg == "--suppress-sync": suppress_sync = True elif arg == "--unshare-net": unshare_net = True elif arg == "--unshare-ipc": unshare_ipc = True elif arg == "--suspend": suspend = True elif arg == "--pack-fds": pack_fds = True elif arg.startswith("-"): raise ValueError(f"Unrecognized option {arg}") else: argv.append(arg) break if argv: if not is_main(): raise ValueError(f"A command line to execute can only be provided if {__name__} is executed") argv.reverse() else: argv = ["bash"] if is_main() else [] # Make sure all destination paths are absolute. for fsop in fsops: if fsop.dst[0] != "/": raise ValueError(f"{fsop.dst} is not an absolute path") fsops = FSOperation.optimize(fsops) for k, v in setenv: os.environ[k] = v for e in unsetenv: if e in os.environ: del os.environ[e] namespaces = CLONE_NEWNS if unshare_net and have_effective_cap(CAP_NET_ADMIN): namespaces |= CLONE_NEWNET if unshare_ipc: namespaces |= CLONE_NEWIPC userns = acquire_privileges(become_root=become_root, network=bool(namespaces & CLONE_NEWNET)) seccomp_suppress( # If we're root in a user namespace with a single user, we're still not going to be able to # chown() stuff, so check for that and apply the seccomp filter as well in that case. chown=suppress_chown and (userns or userns_has_single_user()), sync=suppress_sync, ) try: unshare(namespaces) except OSError as e: # This can happen here as well as in become_user, it depends on exactly # how the userns restrictions are implemented. if e.errno == EPERM and is_main(): print(UNSHARE_EPERM_MSG, file=sys.stderr) raise # If we unshared the user namespace the mount propagation of root is changed to slave automatically. if not userns: mount("", "/", "", MS_SLAVE | MS_REC, "") # We need a workspace to setup the sandbox, the easiest way to do this in a tmpfs, since it's # automatically cleaned up. We need a mountpoint to put the workspace on and it can't be root, # so let's use /tmp which is almost guaranteed to exist. mount("tmpfs", "/tmp", "tmpfs", 0, "") os.chdir("/tmp") with umask(~0o755): # This is where we set up the sandbox rootfs os.mkdir("newroot") # This is the old rootfs which is used as the source for mounts in the new rootfs. os.mkdir("oldroot") # Make sure that newroot is a mountpoint. mount("newroot", "newroot", "", MS_BIND | MS_REC, "") # Make the workspace in /tmp / and put the old rootfs in oldroot. if libc.pivot_root(b".", b"oldroot") < 0: # pivot_root() can fail in the initramfs since / isn't a mountpoint there, so let's fall # back to MS_MOVE if that's the case. # First we move the old rootfs to oldroot. mount("/", "oldroot", "", MS_BIND | MS_REC, "") # Then we move the workspace (/tmp) to /. mount(".", "/", "", MS_MOVE, "") # chroot and chdir to fully make the workspace the new root. os.chroot(".") os.chdir(".") # When we use MS_MOVE we have to unmount oldroot/tmp manually to reveal the original /tmp # again as it might contain stuff that we want to mount into the sandbox. umount2("oldroot/tmp", MNT_DETACH) for fsop in fsops: fsop.execute("oldroot", "newroot") # Now that we're done setting up the sandbox let's pivot root into newroot to make it the new # root. We use the pivot_root(".", ".") process described in the pivot_root() man page. os.chdir("newroot") # We're guaranteed to have / be a mount when we get here, so pivot_root() won't fail anymore, # even if we're in the initramfs. if libc.pivot_root(b".", b".") < 0: oserror("pivot_root") # As documented in the pivot_root() man page, this will unmount the old rootfs. umount2(".", MNT_DETACH) # Avoid surprises by making sure the sandbox's mount propagation is shared. This doesn't # actually mean mounts get propagated into the host. Instead, a new mount propagation peer # group is set up. mount("", ".", "", MS_SHARED | MS_REC, "") if chdir: os.chdir(chdir) if pack_fds: nfds = pack_file_descriptors() if nfds > 0: os.environ["LISTEN_FDS"] = str(nfds) os.environ["LISTEN_PID"] = str(os.getpid()) if suspend: os.kill(os.getpid(), SIGSTOP) if is_main(): try: os.execvp(argv[0], argv) except OSError as e: # Let's return a recognizable error when the binary we're going to execute is not found. # We use 127 as that's the exit code used by shells when a program to execute is not found. if e.errno == ENOENT: sys.exit(127) raise if is_main(): main() mkosi-26/mkosi/sysupdate.py000066400000000000000000000100671512054777600161640ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import os import subprocess import sys import tempfile from pathlib import Path from mkosi.config import Args, ArtifactOutput, Config, OutputFormat from mkosi.log import die from mkosi.run import run, workdir from mkosi.user import become_root_cmd from mkosi.util import PathString, flatten def run_sysupdate(args: Args, config: Config) -> None: if not config.sysupdate_dir: die( "No sysupdate definitions directory specified", hint="Specify a directory containing systemd-sysupdate transfer definitions with " "SysupdateDirectory=", ) if not (sysupdate := config.find_binary("systemd-sysupdate", "/usr/lib/systemd/systemd-sysupdate")): die("Could not find systemd-sysupdate") with contextlib.ExitStack() as stack: if config.tools() != Path("/"): # We explicitly run this without a sandbox, because / has to be the original root mountpoint for # bootctl --print-root-device to work properly. blockdev = run(["bootctl", "--print-root-device"], stdout=subprocess.PIPE).stdout.strip() tmp = stack.enter_context(tempfile.TemporaryDirectory()) # If /run/systemd/volatile-root exists, systemd skips its root block device detection logic and # uses whatever block device /run/systemd/volatile-root points to instead. Let's make use of that # when using a tools tree as in that case the block device detection logic doesn't work properly. (Path(tmp) / "volatile-root").symlink_to(blockdev) else: tmp = None if ( config.output_format == OutputFormat.disk and ArtifactOutput.partitions not in config.split_artifacts ): old = {p for p in config.output_dir_or_cwd().iterdir() if p.is_file()} # If we didn't generate split partitions as part of the image build, let's do it now. run( [ "systemd-repart", "--no-pager", "--pretty=no", "--split=yes", *([f"--definitions={workdir(d)}" for d in config.repart_dirs]), workdir(config.output_dir_or_cwd() / config.output_with_format), ], sandbox=config.sandbox( options=[ "--bind", config.output_dir_or_cwd(), workdir(config.output_dir_or_cwd()), *flatten(["--ro-bind", os.fspath(d), workdir(d)] for d in config.repart_dirs), ], ), ) # fmt: skip for p in config.output_dir_or_cwd().iterdir(): if p not in old and p.is_file(): stack.callback(p.unlink) cmd: list[PathString] = [ sysupdate, "--definitions", config.sysupdate_dir, "--transfer-source", config.output_dir_or_cwd(), *args.cmdline, ] # fmt: skip run( cmd, stdin=sys.stdin, stdout=sys.stdout, env=os.environ | config.finalize_environment(), log=False, sandbox=config.sandbox( devices=True, network=True, relaxed=True, options=[ *(["--bind", "/boot", "/boot"] if Path("/boot").exists() else []), *(["--bind", "/efi", "/efi"] if Path("/efi").exists() else []), *( [ # Make sure systemd-sysupdate parses os-release from the host and not the tools # tree. "--bind", "/usr/lib/os-release", "/usr/lib/os-release", "--bind", tmp, "/run/systemd", ] if tmp else [] ), "--same-dir", ], ), setup=become_root_cmd(), ) # fmt: skip mkosi-26/mkosi/tree.py000066400000000000000000000172261512054777600151060ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import errno import logging import os import shutil import subprocess import tempfile from collections.abc import Iterator from pathlib import Path from mkosi.config import ConfigFeature from mkosi.log import die from mkosi.run import SandboxProtocol, nosandbox, run, workdir from mkosi.sandbox import ( BTRFS_SUPER_MAGIC, FS_NOCOW_FL, OVERLAYFS_SUPER_MAGIC, btrfs_subvol_create, btrfs_subvol_delete, btrfs_subvol_snapshot, chattr, lsattr, statfs, ) from mkosi.util import PathString, flatten from mkosi.versioncomp import GenericVersion def is_subvolume(path: Path) -> bool: return path.is_dir() and path.stat().st_ino == 256 and statfs(os.fspath(path)) == BTRFS_SUPER_MAGIC def cp_version(*, sandbox: SandboxProtocol = nosandbox) -> GenericVersion: return GenericVersion( run( ["cp", "--version"], sandbox=sandbox(), stdout=subprocess.PIPE, ) .stdout.splitlines()[0] .split()[3] ) def make_tree(path: Path, *, use_subvolumes: ConfigFeature = ConfigFeature.disabled) -> Path: path = path.absolute() if statfs(os.fspath(path.parent)) != BTRFS_SUPER_MAGIC: if use_subvolumes == ConfigFeature.enabled: die(f"Subvolumes requested but {path} is not located on a btrfs filesystem") path.mkdir() return path if use_subvolumes != ConfigFeature.disabled: try: btrfs_subvol_create(os.fspath(path)) return path except OSError as e: if use_subvolumes == ConfigFeature.enabled: raise e if e.errno != errno.ENOTTY: logging.debug(f"Failed to create subvolume {path}, using a regular directory: {e}") path.mkdir() return path @contextlib.contextmanager def preserve_target_directories_stat(src: Path, dst: Path) -> Iterator[None]: dirs = [p for d in src.glob("**/") if (dst / (p := d.relative_to(src))).exists()] with tempfile.TemporaryDirectory() as tmp: for d in dirs: (tmp / d).mkdir(exist_ok=True) shutil.copystat(dst / d, tmp / d) yield for d in dirs: shutil.copystat(tmp / d, dst / d) def maybe_make_nocow(path: Path) -> None: try: chattr(os.fspath(path), lsattr(os.fspath(path)) | FS_NOCOW_FL) except OSError as e: if e.errno not in (errno.ENOTTY, errno.EOPNOTSUPP, errno.EINVAL): raise def tree_has_selinux_xattr(path: Path) -> bool: return any( "security.selinux" in os.listxattr(p, follow_symlinks=False) for p in (path, *path.rglob("*")) ) def copy_tree( src: Path, dst: Path, *, preserve: bool = True, dereference: bool = False, use_subvolumes: ConfigFeature = ConfigFeature.disabled, sandbox: SandboxProtocol = nosandbox, ) -> Path: src = src.absolute() dst = dst.absolute() options: list[PathString] = [ "--ro-bind", src, workdir(src, sandbox), "--bind", dst.parent, workdir(dst.parent, sandbox), ] # fmt: skip attrs = "mode,links" if preserve: attrs += ",timestamps,ownership" # Trying to copy selinux xattrs to overlayfs fails with "Operation not supported" in containers. if statfs(os.fspath(dst.parent)) != OVERLAYFS_SUPER_MAGIC or not tree_has_selinux_xattr(src): attrs += ",xattr" def copy() -> None: if src.is_file(): try: attr = lsattr(os.fspath(src)) except OSError: attr = 0 if attr & FS_NOCOW_FL: fdst = dst / src.name if dst.is_dir() else dst fdst.touch() maybe_make_nocow(fdst) cmdline: list[PathString] = [ "cp", "--recursive", "--dereference" if dereference else "--no-dereference", f"--preserve={attrs}", "--reflink=auto", "--copy-contents", workdir(src, sandbox), workdir(dst, sandbox), ] if dst.exists() and dst.is_dir() and any(dst.iterdir()) and cp_version(sandbox=sandbox) >= "9.5": cmdline += ["--keep-directory-symlink"] # If the source and destination are both directories, we want to merge the source directory with the # destination directory. If the source if a file and the destination is a directory, we want to copy # the source inside the directory. if src.is_dir(): cmdline += ["--no-target-directory"] run(cmdline, sandbox=sandbox(options=options)) # Subvolumes always have inode 256 so we can use that to check if a directory is a subvolume. if ( use_subvolumes == ConfigFeature.disabled or not preserve or not is_subvolume(src) or statfs(os.fspath(dst.parent)) != BTRFS_SUPER_MAGIC or (dst.exists() and (not dst.is_dir() or any(dst.iterdir()))) ): with preserve_target_directories_stat(src, dst) if not preserve else contextlib.nullcontext(): copy() return dst # btrfs can't snapshot to an existing directory so make sure the destination does not exist. if dst.exists(): dst.rmdir() try: btrfs_subvol_snapshot(os.fspath(src), os.fspath(dst)) except OSError as e: if use_subvolumes == ConfigFeature.enabled: raise e if e.errno != errno.ENOTTY: logging.debug(f"Failed to snapshot subvolume {src} to {dst}, falling back to copying: {e}") with preserve_target_directories_stat(src, dst) if not preserve else contextlib.nullcontext(): copy() return dst def rmtree(*paths: Path, sandbox: SandboxProtocol = nosandbox) -> None: if not paths: return paths = tuple(p.absolute() for p in paths) # Silence and ignore failures since when not running as root, this will fail with a permission error # unless the btrfs filesystem is mounted with user_subvol_rm_allowed. for p in sorted({p for p in paths if not p.is_symlink() and p.exists() and is_subvolume(p)}): try: # Make sure the subvolume is writable which is required to be able to remove it. os.chmod(p, 755) btrfs_subvol_delete(os.fspath(p)) except OSError as e: if e.errno in (errno.EPERM, errno.EACCES): logging.debug( f"Could not delete subvolume {p} due to permission issues.\n" "Consider adding user_subvol_rm_allowed to your btrfs filesystem mount options." ) else: logging.debug(f"Failed to delete subvolume {p}: {e}") filtered = sorted({p for p in paths if p.exists() or p.is_symlink()}) if filtered: run( ["rm", "-rf", "--", *(workdir(p, sandbox) for p in filtered)], sandbox=sandbox( options=flatten(("--bind", p.parent, workdir(p.parent, sandbox)) for p in filtered), ), ) def move_tree( src: Path, dst: Path, *, use_subvolumes: ConfigFeature = ConfigFeature.disabled, sandbox: SandboxProtocol = nosandbox, ) -> Path: src = src.absolute() dst = dst.absolute() if src == dst: return dst if dst.is_dir(): dst = dst / src.name try: src.rename(dst) except OSError as e: if e.errno != errno.EXDEV: raise e logging.info( f"Could not rename {src} to {dst} as they are located on different devices, " "falling back to copying" ) copy_tree(src, dst, use_subvolumes=use_subvolumes, sandbox=sandbox) rmtree(src, sandbox=sandbox) return dst mkosi-26/mkosi/user.py000066400000000000000000000131021512054777600151120ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import fcntl import os import pwd import tempfile from pathlib import Path from mkosi.log import die from mkosi.run import find_binary, spawn from mkosi.sandbox import CLONE_NEWUSER, unshare from mkosi.util import flock, parents_below SUBRANGE = 65536 class INVOKING_USER: @classmethod def is_regular_user(cls, uid: int) -> bool: return uid >= 1000 @classmethod def cache_dir(cls) -> Path: if (env := os.getenv("XDG_CACHE_HOME")) or (env := os.getenv("CACHE_DIRECTORY")): cache = Path(env) elif cls.is_regular_user(os.getuid()) and Path.home() != Path("/"): cache = Path.home() / ".cache" else: cache = Path("/var/cache") return cache @classmethod def runtime_dir(cls) -> Path: if (env := os.getenv("XDG_RUNTIME_DIR")) or (env := os.getenv("RUNTIME_DIRECTORY")): d = Path(env) elif cls.is_regular_user(os.getuid()): d = Path(f"/run/user/{os.getuid()}") else: d = Path("/run") return d @classmethod def tmpfiles_dir(cls) -> Path: config = Path(os.getenv("XDG_CONFIG_HOME", Path.home() / ".config")) if config in (Path("/"), Path("/root")): return Path("/etc/tmpfiles.d") return config / "user-tmpfiles.d" @classmethod def chown(cls, path: Path) -> None: # If we created a file/directory in a parent directory owned by a regular user, make sure the path # and any parent directories are owned by the invoking user as well. if q := next((parent for parent in path.parents if cls.is_regular_user(parent.stat().st_uid)), None): st = q.stat() os.chown(path, st.st_uid, st.st_gid) for parent in parents_below(path, q): os.chown(parent, st.st_uid, st.st_gid) def read_subrange(path: Path) -> int: if not path.exists(): die(f"{path} does not exist, cannot allocate subuid/subgid user namespace") uid = str(os.getuid()) try: user = pwd.getpwuid(os.getuid()).pw_name except KeyError: user = None for line in path.read_text().splitlines(): name, start, count = line.split(":") if name == uid or name == user: break else: die(f"No mapping found for {user or uid} in {path}") if int(count) < SUBRANGE: die( f"subuid/subgid range length must be at least {SUBRANGE}, " f"got {count} for {user or uid} from line '{line}'" ) return int(start) def become_root_in_subuid_range() -> None: """ Set up a new user namespace mapping using /etc/subuid and /etc/subgid. The current user is mapped to root and the current process becomes the root user in the new user namespace. The other IDs will be mapped through. """ if os.getuid() == 0: return subuid = read_subrange(Path("/etc/subuid")) subgid = read_subrange(Path("/etc/subgid")) pid = os.getpid() with tempfile.NamedTemporaryFile(prefix="mkosi-uidmap-lock-") as lockfile: lock = Path(lockfile.name) # We map the private UID range configured in /etc/subuid and /etc/subgid into the user namespace # using newuidmap and newgidmap. On top of that, we also make sure to map in the user running mkosi # to root so that we can access files and directories from the current user from within the user # namespace. newuidmap = [ "flock", "--exclusive", "--close", lock, "newuidmap", pid, 0, os.getuid(), 1, 1, subuid + 1, SUBRANGE - 1, ] # fmt: skip newgidmap = [ "flock", "--exclusive", "--close", lock, "newgidmap", pid, 0, os.getgid(), 1, 1, subgid + 1, SUBRANGE - 1, ] # fmt: skip # newuidmap and newgidmap have to run from outside the user namespace to be able to assign a uid # mapping to the process in the user namespace. The mapping can only be assigned after the user # namespace has been unshared. To make this work, we first lock a temporary file, then spawn the # newuidmap and newgidmap processes, which we execute using flock so they don't execute before they # can get a lock on the same temporary file, then we unshare the user namespace and finally we unlock # the temporary file, which allows the newuidmap and newgidmap processes to execute. we then wait for # the processes to finish before continuing. with ( flock(lock) as fd, spawn([str(x) for x in newuidmap]) as uidmap, spawn([str(x) for x in newgidmap]) as gidmap, ): unshare(CLONE_NEWUSER) fcntl.flock(fd, fcntl.LOCK_UN) uidmap.wait() gidmap.wait() os.setresuid(0, 0, 0) os.setresgid(0, 0, 0) os.setgroups([0]) def become_root_in_subuid_range_cmd() -> list[str]: if os.getuid() == 0: return [] subuid = read_subrange(Path("/etc/subuid")) subgid = read_subrange(Path("/etc/subgid")) cmd = [ "unshare", "--setuid", "0", "--setgid", "0", "--map-users", f"0:{os.getuid()}:1", "--map-users", f"1:{subuid + 1}:{SUBRANGE - 1}", "--map-groups", f"0:{os.getgid()}:1", "--map-groups", f"1:{subgid + 1}:{SUBRANGE - 1}", "--keep-caps", ] # fmt: skip return cmd def become_root_cmd() -> list[str]: if os.getuid() == 0: return [] return ["run0"] if find_binary("run0") and Path("/run/systemd/system").exists() else ["sudo"] mkosi-26/mkosi/util.py000066400000000000000000000157361512054777600151300ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import ast import contextlib import copy import enum import errno import fcntl import functools import hashlib import importlib import importlib.resources import itertools import logging import os import re import resource import stat import tempfile from collections.abc import Hashable, Iterable, Iterator, Mapping, Sequence from pathlib import Path from types import ModuleType from typing import IO, Any, Callable, Optional, Protocol, TypeVar, Union from mkosi.log import die from mkosi.resources import as_file T = TypeVar("T") V = TypeVar("V") S = TypeVar("S", bound=Hashable) # Borrowed from https://github.com/python/typeshed/blob/3d14016085aed8bcf0cf67e9e5a70790ce1ad8ea/stdlib/3/subprocess.pyi#L24 _FILE = Union[None, int, IO[Any]] PathString = Union[Path, str] # Borrowed from # https://github.com/python/typeshed/blob/ec52bf1adde1d3183d0595d2ba982589df48dff1/stdlib/_typeshed/__init__.pyi#L19 # and # https://github.com/python/typeshed/blob/ec52bf1adde1d3183d0595d2ba982589df48dff1/stdlib/_typeshed/__init__.pyi#L224 _T_co = TypeVar("_T_co", covariant=True) class SupportsRead(Protocol[_T_co]): def read(self, __length: int = ...) -> _T_co: ... def dictify(f: Callable[..., Iterator[tuple[T, V]]]) -> Callable[..., dict[T, V]]: def wrapper(*args: Any, **kwargs: Any) -> dict[T, V]: return dict(f(*args, **kwargs)) return functools.update_wrapper(wrapper, f) def tuplify(f: Callable[..., Iterable[T]]) -> Callable[..., tuple[T, ...]]: def wrapper(*args: Any, **kwargs: Any) -> tuple[T, ...]: return tuple(f(*args, **kwargs)) return functools.update_wrapper(wrapper, f) def one_zero(b: bool) -> str: return "1" if b else "0" def is_power_of_2(x: int) -> bool: return x > 0 and (x & x - 1 == 0) def round_up(x: int, blocksize: int = 4096) -> int: return (x + blocksize - 1) // blocksize * blocksize def startswith(s: str, prefix: str) -> Optional[str]: if s.startswith(prefix): return s.removeprefix(prefix) return None @dictify def read_env_file(path: PathString) -> Iterator[tuple[str, str]]: with Path(path).open() as f: for line_number, line in enumerate(f, start=1): line = line.rstrip() if not line or line.startswith("#"): continue if m := re.match(r"([a-zA-Z_][a-zA-Z_0-9]+)=(.*)", line): name, val = m.groups() if val and val[0] in "\"'": val = ast.literal_eval(val) yield name, val else: logging.info(f"{path}:{line_number}: bad line {line!r}") def format_rlimit(rlimit: int) -> str: limits = resource.getrlimit(rlimit) soft = "infinity" if limits[0] == resource.RLIM_INFINITY else str(limits[0]) hard = "infinity" if limits[1] == resource.RLIM_INFINITY else str(limits[1]) return f"{soft}:{hard}" def flatten(lists: Iterable[Iterable[T]]) -> list[T]: """Flatten a sequence of sequences into a single list.""" return list(itertools.chain.from_iterable(lists)) @contextlib.contextmanager def chdir(directory: PathString) -> Iterator[None]: old = Path.cwd() if old == directory: yield return try: os.chdir(directory) yield finally: os.chdir(old) def make_executable(*paths: Path) -> None: for path in paths: st = path.stat() os.chmod(path, st.st_mode | stat.S_IEXEC) @contextlib.contextmanager def flock(path: Path, flags: int = fcntl.LOCK_EX) -> Iterator[int]: fd = os.open(path, os.O_CLOEXEC | os.O_RDONLY) try: fcntl.fcntl(fd, fcntl.FD_CLOEXEC) logging.debug(f"Acquiring lock on {path}") fcntl.flock(fd, flags) logging.debug(f"Acquired lock on {path}") yield fd finally: os.close(fd) @contextlib.contextmanager def flock_or_die(path: Path, flags: int = fcntl.LOCK_EX) -> Iterator[Path]: try: with flock(path, flags | fcntl.LOCK_NB): yield path except OSError as e: if e.errno != errno.EWOULDBLOCK: raise e die( f"Cannot lock {path} as it is locked by another process", hint="Maybe another mkosi process is still using it? Use Ephemeral=yes to enable booting " "multiple instances of the same image", ) @contextlib.contextmanager def scopedenv(env: Mapping[str, Any]) -> Iterator[None]: old = copy.deepcopy(os.environ) os.environ |= env # python caches the default temporary directory so when we might modify TMPDIR we have to make sure it # gets recalculated (see https://docs.python.org/3/library/tempfile.html#tempfile.tempdir). tempfile.tempdir = None try: yield finally: os.environ = old tempfile.tempdir = None class StrEnum(enum.Enum): def __str__(self) -> str: assert isinstance(self.value, str) return self.value # Used by enum.auto() to get the next value. @staticmethod def _generate_next_value_(name: str, start: int, count: int, last_values: Sequence[str]) -> str: return name.replace("_", "-") @classmethod def values(cls) -> list[str]: return list(s.replace("_", "-") for s in map(str, cls.__members__)) @classmethod def choices(cls) -> list[str]: return [*cls.values(), ""] def parents_below(path: Path, below: Path) -> list[Path]: parents = list(path.parents) return parents[: parents.index(below)] @contextlib.contextmanager def resource_path(mod: ModuleType) -> Iterator[Path]: t = importlib.resources.files(mod) with as_file(t) as p: # Make sure any temporary directory that the resources are unpacked in is accessible to the invoking # user so that any commands executed as the invoking user can access files within it. if ( p.parent.parent == Path(os.getenv("TMPDIR", "/tmp")) and stat.S_IMODE(p.parent.stat().st_mode) == 0o700 ): p.parent.chmod(0o755) yield p def hash_file(path: Path) -> str: # TODO Replace with hashlib.file_digest after dropping support for Python 3.10. h = hashlib.sha256() b = bytearray(16 * 1024**2) mv = memoryview(b) with path.open("rb", buffering=0) as f: while n := f.readinto(mv): h.update(mv[:n]) return h.hexdigest() def try_or(fn: Callable[..., T], exception: type[Exception], default: T) -> T: try: return fn() except exception: return default def groupby(seq: Sequence[T], key: Callable[[T], S]) -> list[tuple[S, list[T]]]: grouped: dict[S, list[T]] = {} for i in seq: k = key(i) if k not in grouped: grouped[k] = [] grouped[k].append(i) return [(key, group) for key, group in grouped.items()] def unique(seq: Sequence[T]) -> list[T]: return list(dict.fromkeys(seq)) def mandatory_variable(name: str) -> str: try: return os.environ[name] except KeyError: die(f"${name} must be set in the environment") mkosi-26/mkosi/versioncomp.py000066400000000000000000000157071512054777600165150ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import functools import itertools import string from typing import Final @functools.total_ordering class GenericVersion: # These constants follow the convention of the return value of rpmdev-vercmp that are followed # by systemd-analyze compare-versions when called with only two arguments (without a comparison # operator), recreated in the compare_versions method. _EQUAL: Final[int] = 0 _RIGHT_SMALLER: Final[int] = 1 _LEFT_SMALLER: Final[int] = -1 def __init__(self, version: str): self._version = version @classmethod def compare_versions(cls, v1: str, v2: str) -> int: """Implements comparison according to UAPI Group Version Format Specification""" def rstrip_invalid_version_chars(s: str) -> str: valid_version_chars = {*string.ascii_letters, *string.digits, "~", "-", "^", "."} for i, c in enumerate(s): if c in valid_version_chars: return s[i:] return "" def digit_prefix(s: str) -> str: return "".join(itertools.takewhile(lambda c: c in string.digits, s)) def letter_prefix(s: str) -> str: return "".join(itertools.takewhile(lambda c: c in string.ascii_letters, s)) while True: # Any characters which are outside of the set of listed above (a-z, A-Z, 0-9, -, ., ~, # ^) are skipped in both strings. In particular, this means that non-ASCII characters # that are Unicode digits or letters are skipped too. v1 = rstrip_invalid_version_chars(v1) v2 = rstrip_invalid_version_chars(v2) # If the remaining part of one of strings starts with "~": if other remaining part does # not start with ~, the string with ~ compares lower. Otherwise, both tilde characters # are skipped. if v1.startswith("~") and v2.startswith("~"): v1 = v1.removeprefix("~") v2 = v2.removeprefix("~") elif v1.startswith("~"): return cls._LEFT_SMALLER elif v2.startswith("~"): return cls._RIGHT_SMALLER # If one of the strings has ended: if the other string hasn’t, the string that has # remaining characters compares higher. Otherwise, the strings compare equal. if not v1 and not v2: return cls._EQUAL elif not v1 and v2: return cls._LEFT_SMALLER elif v1 and not v2: return cls._RIGHT_SMALLER # If the remaining part of one of strings starts with "-": if the other remaining part # does not start with -, the string with - compares lower. Otherwise, both minus # characters are skipped. if v1.startswith("-") and v2.startswith("-"): v1 = v1.removeprefix("-") v2 = v2.removeprefix("-") elif v1.startswith("-"): return cls._LEFT_SMALLER elif v2.startswith("-"): return cls._RIGHT_SMALLER # If the remaining part of one of strings starts with "^": if the other remaining part # does not start with ^, the string with ^ compares higher. Otherwise, both caret # characters are skipped. if v1.startswith("^") and v2.startswith("^"): v1 = v1.removeprefix("^") v2 = v2.removeprefix("^") elif v1.startswith("^"): # TODO: bug? return cls._LEFT_SMALLER # cls._RIGHT_SMALLER elif v2.startswith("^"): return cls._RIGHT_SMALLER # cls._LEFT_SMALLER # If the remaining part of one of strings starts with ".": if the other remaining part # does not start with ., the string with . compares lower. Otherwise, both dot # characters are skipped. if v1.startswith(".") and v2.startswith("."): v1 = v1.removeprefix(".") v2 = v2.removeprefix(".") elif v1.startswith("."): return cls._LEFT_SMALLER elif v2.startswith("."): return cls._RIGHT_SMALLER # If either of the remaining parts starts with a digit: numerical prefixes are compared # numerically. Any leading zeroes are skipped. The numerical prefixes (until the first # non-digit character) are evaluated as numbers. If one of the prefixes is empty, it # evaluates as 0. If the numbers are different, the string with the bigger number # compares higher. Otherwise, the comparison continues at the following characters at # point 1. v1_digit_prefix = digit_prefix(v1) v2_digit_prefix = digit_prefix(v2) if v1_digit_prefix or v2_digit_prefix: v1_digits = int(v1_digit_prefix) if v1_digit_prefix else 0 v2_digits = int(v2_digit_prefix) if v2_digit_prefix else 0 if v1_digits < v2_digits: return cls._LEFT_SMALLER elif v1_digits > v2_digits: return cls._RIGHT_SMALLER v1 = v1.removeprefix(v1_digit_prefix) v2 = v2.removeprefix(v2_digit_prefix) continue # Leading alphabetical prefixes are compared alphabetically. The substrings are # compared letter-by-letter. If both letters are the same, the comparison continues # with the next letter. Capital letters compare lower than lower-case letters (A < # a). When the end of one substring has been reached (a non-letter character or the end # of the whole string), if the other substring has remaining letters, it compares # higher. Otherwise, the comparison continues at the following characters at point 1. v1_letter_prefix = letter_prefix(v1) v2_letter_prefix = letter_prefix(v2) if v1_letter_prefix < v2_letter_prefix: return cls._LEFT_SMALLER elif v1_letter_prefix > v2_letter_prefix: return cls._RIGHT_SMALLER v1 = v1.removeprefix(v1_letter_prefix) v2 = v2.removeprefix(v2_letter_prefix) def __eq__(self, other: object) -> bool: if isinstance(other, (str, int)): other = GenericVersion(str(other)) elif not isinstance(other, GenericVersion): return False return self.compare_versions(self._version, other._version) == self._EQUAL def __lt__(self, other: object) -> bool: if isinstance(other, (str, int)): other = GenericVersion(str(other)) elif not isinstance(other, GenericVersion): return False return self.compare_versions(self._version, other._version) == self._LEFT_SMALLER def __str__(self) -> str: return self._version def __repr__(self) -> str: return f"GenericVersion('{self._version}')" mkosi-26/mkosi/vmspawn.py000066400000000000000000000102501512054777600156300ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import getpass import os import sys from pathlib import Path from mkosi.config import ( Args, Config, Firmware, Network, OutputFormat, yes_no, ) from mkosi.log import die from mkosi.qemu import ( copy_ephemeral, finalize_credentials, finalize_firmware, finalize_initrd, finalize_kernel_command_line_extra, finalize_register, ) from mkosi.run import run from mkosi.util import PathString def run_vmspawn(args: Args, config: Config) -> None: if config.output_format not in (OutputFormat.disk, OutputFormat.esp, OutputFormat.directory): die(f"{config.output_format} images cannot be booted in systemd-vmspawn") if config.firmware == Firmware.bios: die("systemd-vmspawn cannot boot BIOS firmware images") if config.firmware_variables and config.firmware_variables != Path("microsoft"): die("mkosi vmspawn does not support FirmwareVariables=") kernel = config.expand_linux_specifiers() if config.linux else None firmware = finalize_firmware(config, kernel) if not kernel and firmware.is_linux(): kernel = config.output_dir_or_cwd() / config.output_split_kernel if not kernel.exists(): die( f"Kernel or UKI not found at {kernel}", hint="Please install a kernel in the image or provide a --linux argument to mkosi vmspawn", ) cmdline: list[PathString] = [ "systemd-vmspawn", "--cpus", str(config.cpus or os.cpu_count()), "--ram", str(config.ram), "--kvm", config.kvm.to_tristate(), "--vsock", config.vsock.to_tristate(), "--tpm", config.tpm.to_tristate(), "--secure-boot", yes_no(config.secure_boot), "--register", yes_no(finalize_register(config)), "--console", str(config.console), ] # fmt: skip if config.runtime_size: cmdline += ["--grow-image", str(config.runtime_size)] if config.bind_user: cmdline += ["--bind-user", getpass.getuser(), "--bind-user-group=wheel"] if config.runtime_network == Network.user: cmdline += ["--network-user-mode"] elif config.runtime_network == Network.interface: cmdline += ["--network-tap"] with contextlib.ExitStack() as stack: for f in finalize_credentials(config, stack).iterdir(): cmdline += [f"--load-credential={f.name}:{f}"] fname = stack.enter_context(copy_ephemeral(config, config.output_dir_or_cwd() / config.output)) if config.runtime_build_sources: for t in config.build_sources: src, dst = t.with_prefix("/work/src") cmdline += ["--bind", f"{src}:{dst}"] if config.build_dir: cmdline += ["--bind", f"{config.build_subdir}:/work/build"] for tree in config.runtime_trees: target = Path("/root/src") / (tree.target or "") cmdline += ["--bind", f"{tree.source}:{target}"] if kernel: cmdline += ["--linux", kernel] if firmware != Firmware.linux_noinitrd and ( initrd := stack.enter_context(finalize_initrd(config)) ): cmdline += ["--initrd", initrd] if config.output_format == OutputFormat.directory: cmdline += ["--directory", fname] owner = os.stat(fname).st_uid if owner != 0: cmdline += [f"--private-users={str(owner)}"] else: cmdline += ["--image", fname] if config.forward_journal: cmdline += ["--forward-journal", config.forward_journal] cmdline += [*args.cmdline, *finalize_kernel_command_line_extra(config)] env = os.environ.copy() if config.qemu_args: env["SYSTEMD_VMSPAWN_QEMU_EXTRA"] = " ".join(config.qemu_args) run( cmdline, stdin=sys.stdin, stdout=sys.stdout, env=env | config.finalize_environment(), log=False, sandbox=config.sandbox( network=True, devices=True, relaxed=True, options=["--same-dir"], ), ) mkosi-26/pyproject.toml000066400000000000000000000036251512054777600153650ustar00rootroot00000000000000[build-system] requires = ["setuptools", "setuptools-scm"] build-backend = "setuptools.build_meta" [project] name = "mkosi" authors = [ {name = "mkosi contributors", email = "systemd-devel@lists.freedesktop.org"}, ] version = "26" description = "Build Bespoke OS Images" readme = "README.md" requires-python = ">=3.9" license = {text = "LGPL-2.1-or-later"} [project.optional-dependencies] bootable = [ "pefile >= 2021.9.3", ] [project.scripts] mkosi = "mkosi.__main__:main" mkosi-initrd = "mkosi.initrd:main" mkosi-sandbox = "mkosi.sandbox:main" mkosi-addon = "mkosi.addon:main" [tool.setuptools] packages = [ "mkosi", "mkosi.distribution", "mkosi.installer", "mkosi.resources", ] [tool.setuptools.package-data] "mkosi.resources" = [ "completion.*", "man/*", "mkosi-addon/**/*", "mkosi-initrd/**/*", "mkosi-obs/**/*", "mkosi-tools/**/*", "mkosi-vm/**/*", "repart/**/*", "tmpfiles.d/*", "pandoc/*", ] [tool.isort] profile = "black" include_trailing_comma = true multi_line_output = 3 py_version = "39" [tool.pyright] pythonVersion = "3.9" [tool.mypy] python_version = 3.9 # belonging to --strict warn_unused_configs = true disallow_any_generics = true disallow_subclassing_any = true disallow_untyped_calls = true disallow_untyped_defs = true disallow_untyped_decorators = true disallow_incomplete_defs = true check_untyped_defs = true no_implicit_optional = true warn_redundant_casts = true warn_unused_ignores = false warn_return_any = true no_implicit_reexport = true # extra options not in --strict pretty = true show_error_codes = true show_column_numbers = true warn_unreachable = true strict_equality = true scripts_are_modules = true [tool.ruff] target-version = "py39" line-length = 109 lint.select = ["E", "F", "I", "UP", "W291"] [tool.pytest.ini_options] markers = [ "integration: mark a test as an integration test." ] addopts = "-m \"not integration\"" mkosi-26/tests/000077500000000000000000000000001512054777600136055ustar00rootroot00000000000000mkosi-26/tests/.gitignore000066400000000000000000000000071512054777600155720ustar00rootroot00000000000000/*.pyc mkosi-26/tests/__init__.py000066400000000000000000000127271512054777600157270ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import dataclasses import os import subprocess import sys import uuid from collections.abc import Iterator, Mapping, Sequence from pathlib import Path from types import TracebackType from typing import Any, Optional import pytest from mkosi.distribution import Distribution from mkosi.run import CompletedProcess, fork_and_wait, run from mkosi.sandbox import acquire_privileges from mkosi.tree import rmtree from mkosi.user import INVOKING_USER from mkosi.util import _FILE, PathString @dataclasses.dataclass(frozen=True) class ImageConfig: distribution: Distribution release: str debug_shell: bool class Image: def __init__(self, config: ImageConfig) -> None: self.config = config def __enter__(self) -> "Image": if (cache := INVOKING_USER.cache_dir() / "mkosi") and os.access(cache, os.W_OK): tmpdir = cache else: tmpdir = Path("/var/tmp") self.output_dir = Path(os.getenv("TMPDIR", tmpdir)) / uuid.uuid4().hex[:16] return self def __exit__( self, type: Optional[type[BaseException]], value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: def clean() -> None: acquire_privileges() rmtree(self.output_dir) fork_and_wait(clean) def mkosi( self, verb: str, options: Sequence[PathString] = (), args: Sequence[str] = (), stdin: _FILE = None, check: bool = True, env: Mapping[str, str] = {}, ) -> CompletedProcess: return run( [ "python3", "-m", "mkosi", "--debug", *options, verb, *args, ], check=check, stdin=stdin, stdout=sys.stdout, env=os.environ | env, ) # fmt: skip def build( self, options: Sequence[PathString] = (), args: Sequence[str] = (), env: Mapping[str, str] = {}, ) -> CompletedProcess: kcl = [ "loglevel=6", "systemd.log_level=debug", "udev.log_level=info", "systemd.show_status=false", "systemd.journald.forward_to_console", "systemd.journald.max_level_console=info", "systemd.firstboot=no", "systemd.unit=mkosi-check-and-shutdown.service", ] opt: list[PathString] = [ "--distribution", str(self.config.distribution), "--release", self.config.release, *(f"--kernel-command-line={i}" for i in kcl), "--force", "--incremental=strict", "--output-directory", self.output_dir, *(["--debug-shell"] if self.config.debug_shell else []), *options, ] # fmt: skip self.mkosi("summary", opt, env=env) return self.mkosi( "build", opt, args, stdin=sys.stdin if sys.stdin.isatty() else None, env=env, ) def boot(self, options: Sequence[str] = (), args: Sequence[str] = ()) -> CompletedProcess: result = self.mkosi( "boot", [ "--runtime-build-sources=no", "--ephemeral=yes", "--register=no", *options, ], args, stdin=sys.stdin if sys.stdin.isatty() else None, check=False, ) if result.returncode != 123: raise subprocess.CalledProcessError(result.returncode, result.args, result.stdout, result.stderr) return result def vm(self, options: Sequence[str] = (), args: Sequence[str] = ()) -> CompletedProcess: need_hyperv_workaround = os.uname().machine == "x86_64" result = self.mkosi( "vm", [ "--runtime-build-sources=no", "--vsock=yes", # TODO: Drop once both Hyper-V bugs are fixed in Github Actions. *(["--qemu-args=-cpu max,pcid=off"] if need_hyperv_workaround else []), "--ram=2G", "--ephemeral=yes", "--register=no", *options, ], args, stdin=sys.stdin if sys.stdin.isatty() else None, check=False, ) if result.returncode != 123: raise subprocess.CalledProcessError(result.returncode, result.args, result.stdout, result.stderr) return result @pytest.fixture(scope="session", autouse=True) def suspend_capture_stdin(pytestconfig: Any) -> Iterator[None]: """ When --capture=no (or -s) is specified, pytest will still intercept stdin. Let's explicitly make it not capture stdin when --capture=no is specified so we can debug image boot failures by logging into the emergency shell. """ capmanager: Any = pytestconfig.pluginmanager.getplugin("capturemanager") if pytestconfig.getoption("capture") == "no": capmanager.suspend_global_capture(in_=True) yield if pytestconfig.getoption("capture") == "no": capmanager.resume_global_capture() @contextlib.contextmanager def ci_group(s: str) -> Iterator[None]: github_actions = os.getenv("GITHUB_ACTIONS") if github_actions: print(f"\n::group::{s}", flush=True) try: yield finally: if github_actions: print("\n::endgroup::", flush=True) mkosi-26/tests/conftest.py000066400000000000000000000033631512054777600160110ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterator from typing import Any, cast import pytest import mkosi.resources from mkosi.config import parse_config from mkosi.distribution import Distribution, detect_distribution from mkosi.log import log_setup from mkosi.util import resource_path from . import ImageConfig, ci_group def pytest_addoption(parser: Any) -> None: parser.addoption( "-D", "--distribution", metavar="DISTRIBUTION", help="Run the integration tests for the given distribution.", default=detect_distribution()[0], type=Distribution, choices=[Distribution(d) for d in Distribution.values()], ) parser.addoption( "-R", "--release", metavar="RELEASE", help="Run the integration tests for the given release.", ) parser.addoption( "--debug-shell", help="Pass --debug-shell when running mkosi", action="store_true", ) @pytest.fixture(scope="session") def config(request: Any) -> ImageConfig: distribution = cast(Distribution, request.config.getoption("--distribution")) with resource_path(mkosi.resources) as resources: release = cast( str, request.config.getoption("--release") or parse_config(["-d", str(distribution)], resources=resources)[2][0].release, ) return ImageConfig( distribution=distribution, release=release, debug_shell=request.config.getoption("--debug-shell"), ) @pytest.fixture(autouse=True) def ci_sections(request: Any) -> Iterator[None]: with ci_group(request.node.name): yield @pytest.fixture(scope="session", autouse=True) def logging() -> None: log_setup() mkosi-26/tests/test_boot.py000066400000000000000000000060261512054777600161650ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import os import subprocess import pytest from mkosi.config import Bootloader, Firmware, OutputFormat from mkosi.distribution import Distribution from mkosi.qemu import find_virtiofsd from mkosi.run import find_binary, run from mkosi.sandbox import userns_has_single_user from mkosi.versioncomp import GenericVersion from . import Image, ImageConfig pytestmark = pytest.mark.integration def have_vmspawn() -> bool: return find_binary("systemd-vmspawn") is not None and ( GenericVersion(run(["systemd-vmspawn", "--version"], stdout=subprocess.PIPE).stdout.strip()) >= 256 ) @pytest.mark.parametrize("format", [f for f in OutputFormat if not f.is_extension_image()]) def test_format(config: ImageConfig, format: OutputFormat) -> None: with Image(config) as image: if image.config.distribution == Distribution.rhel_ubi and format in ( OutputFormat.esp, OutputFormat.uki, ): pytest.skip("Cannot build RHEL-UBI images with format 'esp' or 'uki'") image.build(options=["--format", str(format)]) if format in (OutputFormat.disk, OutputFormat.directory) and os.getuid() == 0: # systemd-resolved is enabled by default in Arch/Debian/Ubuntu (systemd default preset) but fails # to start in a systemd-nspawn container with --private-users so we mask it out here to avoid CI # failures. # FIXME: Remove when Arch/Debian/Ubuntu ship systemd v253 args = ["systemd.mask=systemd-resolved.service"] if format == OutputFormat.directory else [] image.boot(args=args) if format in (OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp): pytest.skip("Default image is too large to be able to boot in CPIO/UKI/ESP format") if image.config.distribution == Distribution.rhel_ubi: return if format in (OutputFormat.tar, OutputFormat.oci, OutputFormat.none, OutputFormat.portable): return if format == OutputFormat.directory: if not find_virtiofsd(): pytest.skip("virtiofsd is not installed, cannot boot from directory output") if userns_has_single_user(): pytest.skip("Running in user namespace with single user, cannot boot from directory") return image.vm() if have_vmspawn() and format in (OutputFormat.disk, OutputFormat.directory): image.vm(options=["--vmm=vmspawn"]) if format != OutputFormat.disk: return image.vm(["--firmware=bios"]) @pytest.mark.parametrize("bootloader", Bootloader) def test_bootloader(config: ImageConfig, bootloader: Bootloader) -> None: if config.distribution == Distribution.rhel_ubi or bootloader.is_signed(): return firmware = Firmware.linux if bootloader == Bootloader.none else Firmware.auto with Image(config) as image: image.build(["--format=disk", "--bootloader", str(bootloader)]) image.vm(["--firmware", str(firmware)]) mkosi-26/tests/test_config.py000066400000000000000000001273771512054777600165040ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import argparse import itertools import logging import operator import os from pathlib import Path import pytest import mkosi.resources from mkosi import expand_kernel_specifiers from mkosi.config import ( Architecture, ArtifactOutput, Compression, Config, ConfigFeature, ConfigTree, OutputFormat, Verb, config_parse_bytes, in_box, parse_config, parse_ini, ) from mkosi.distribution import Distribution, detect_distribution from mkosi.util import chdir, resource_path def test_compression_enum_creation() -> None: assert Compression["none"] == Compression.none assert Compression["zstd"] == Compression.zstd assert Compression["zst"] == Compression.zstd assert Compression["xz"] == Compression.xz assert Compression["bz2"] == Compression.bz2 assert Compression["gz"] == Compression.gz assert Compression["lz4"] == Compression.lz4 assert Compression["lzma"] == Compression.lzma def test_compression_enum_bool() -> None: assert not bool(Compression.none) assert bool(Compression.zstd) assert bool(Compression.xz) assert bool(Compression.bz2) assert bool(Compression.gz) assert bool(Compression.lz4) assert bool(Compression.lzma) def test_compression_enum_str() -> None: assert str(Compression.none) == "none" assert str(Compression.zstd) == "zstd" assert str(Compression.zst) == "zstd" assert str(Compression.xz) == "xz" assert str(Compression.bz2) == "bz2" assert str(Compression.gz) == "gz" assert str(Compression.lz4) == "lz4" assert str(Compression.lzma) == "lzma" def test_parse_ini(tmp_path: Path) -> None: p = tmp_path / "ini" p.write_text( """\ [MySection] Value=abc Other=def ALLCAPS=txt # Comment [EmptySection] [AnotherSection] EmptyValue= Multiline=abc def qed ord """ ) g = parse_ini(p) assert next(g) == ("MySection", "Value", "abc") assert next(g) == ("MySection", "Other", "def") assert next(g) == ("MySection", "ALLCAPS", "txt") assert next(g) == ("MySection", "", "") assert next(g) == ("EmptySection", "", "") assert next(g) == ("AnotherSection", "EmptyValue", "") assert next(g) == ("AnotherSection", "Multiline", "abc\ndef\nqed\nord") def test_parse_config(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Distribution] Distribution=ubuntu Architecture=arm64 Repositories=epel,epel-next [Config] Profiles=abc [Build] Environment=MY_KEY=MY_VALUE [Output] Format=cpio ImageId=base [Runtime] Credentials=my.cred=my.value """ ) with chdir(d): _, _, [config] = parse_config() assert config.distribution == Distribution.ubuntu assert config.architecture == Architecture.arm64 assert config.profiles == ["abc"] assert config.output_format == OutputFormat.cpio assert config.image_id == "base" with chdir(d): _, _, [config] = parse_config( [ "--distribution", "fedora", "--environment", "MY_KEY=CLI_VALUE", "--credential", "my.cred=cli.value", "--repositories", "universe", ] ) # fmt: skip # Values from the CLI should take priority. assert config.distribution == Distribution.fedora assert config.environment["MY_KEY"] == "CLI_VALUE" assert config.credentials["my.cred"] == "cli.value" assert config.repositories == ["epel", "epel-next", "universe"] with chdir(d): _, _, [config] = parse_config( [ "--distribution", "", "--environment", "", "--credential", "", "--repositories", "", ] ) # fmt: skip # Empty values on the CLIs resets non-collection based settings to their defaults and collection based # settings to empty collections. assert config.distribution == (detect_distribution()[0] or Distribution.custom) assert "MY_KEY" not in config.environment assert "my.cred" not in config.credentials assert config.repositories == [] (d / "mkosi.conf.d").mkdir() (d / "mkosi.conf.d/d1.conf").write_text( """\ [Distribution] Distribution=debian [Config] Profiles=qed def [Output] ImageId=00-dropin ImageVersion=0 @Output=abc """ ) with chdir(d): _, _, [config] = parse_config(["--profile", "last"]) # Setting a value explicitly in a dropin should override the default from mkosi.conf. assert config.distribution == Distribution.debian # Lists should be merged by appending the new values to the existing values. Any values from the CLI # should be appended to the values from the configuration files. assert config.profiles == ["abc", "qed", "def", "last"] assert config.output_format == OutputFormat.cpio assert config.image_id == "00-dropin" assert config.image_version == "0" # '@' specifier should be automatically dropped. assert config.output == "abc" (d / "mkosi.version").write_text("1.2.3") (d / "mkosi.conf.d/d2.conf").write_text( """\ [Content] Packages= [Output] ImageId= """ ) with chdir(d): _, _, [config] = parse_config() # Test that empty assignment resets settings. assert config.packages == [] assert config.image_id is None # mkosi.version should only be used if no version is set explicitly. assert config.image_version == "0" (d / "mkosi.conf.d/d1.conf").unlink() with chdir(d): _, _, [config] = parse_config() # ImageVersion= is not set explicitly anymore, so now the version from mkosi.version should be used. assert config.image_version == "1.2.3" (d / "abc").mkdir() (d / "abc/mkosi.conf").write_text( """\ [Content] BuildPackages=abc [Runtime] CXL=yes """ ) (d / "abc/mkosi.conf.d").mkdir() (d / "abc/mkosi.conf.d/abc.conf").write_text( """\ [Output] SplitArtifacts=yes """ ) with chdir(d): _, _, [config] = parse_config() assert not config.cxl assert config.split_artifacts == ArtifactOutput.compat_no() # Passing the directory should include both the main config file and the dropin. _, _, [config] = parse_config(["--include", os.fspath(d / "abc")] * 2) assert config.cxl assert config.split_artifacts == ArtifactOutput.compat_yes() # The same extra config should not be parsed more than once. assert config.build_packages == ["abc"] # Passing the main config file should not include the dropin. _, _, [config] = parse_config(["--include", os.fspath(d / "abc/mkosi.conf")]) assert config.cxl assert config.split_artifacts == ArtifactOutput.compat_no() (d / "mkosi.images").mkdir() (d / "mkosi.images/one.conf").write_text( """\ [Content] Packages=one """ ) (d / "mkosi.images/two").mkdir() (d / "mkosi.images/two/mkosi.skeleton").mkdir() (d / "mkosi.images/two/mkosi.conf").write_text( """ [Content] Packages=two [Output] ImageVersion=4.5.6 """ ) with chdir(d): _, _, [one, two, config] = parse_config( ["--package", "qed", "--build-package", "def", "--repositories", "cli"] ) # Universal settings should always come from the main image. assert one.distribution == config.distribution assert two.distribution == config.distribution assert one.release == config.release assert two.release == config.release # Non-universal settings should not be passed to the subimages. assert one.packages == ["one"] assert two.packages == ["two"] assert one.build_packages == [] assert two.build_packages == [] # But should apply to the main image of course. assert config.packages == ["qed"] assert config.build_packages == ["def"] # Inherited settings should be passed down to subimages but overridable by subimages. assert one.image_version == "1.2.3" assert two.image_version == "4.5.6" # Default values from subimages for universal settings should not be picked up. assert len(one.sandbox_trees) == 0 assert len(two.sandbox_trees) == 0 with chdir(d): _, _, [one, two, config] = parse_config(["--image-version", "7.8.9"]) # Inherited settings specified on the CLI should not override subimages that configure the setting # explicitly. assert config.image_version == "7.8.9" assert one.image_version == "7.8.9" assert two.image_version == "4.5.6" def test_parse_includes_once(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Content] BuildPackages=abc """ ) (d / "abc.conf").write_text( """\ [Content] BuildPackages=def """ ) with chdir(d): _, _, [config] = parse_config(["--include", "abc.conf", "--include", "abc.conf"]) assert config.build_packages == ["abc", "def"] (d / "mkosi.images").mkdir() for n in ("one", "two"): (d / "mkosi.images" / f"{n}.conf").write_text( """\ [Config] Include=abc.conf """ ) with chdir(d): _, _, [one, two, config] = parse_config([]) assert one.build_packages == ["def"] assert two.build_packages == ["def"] def test_profiles(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.profiles").mkdir() (d / "mkosi.profiles/profile.conf").write_text( """\ [Distribution] Distribution=fedora [Runtime] KVM=yes """ ) (d / "mkosi.conf").write_text( """\ [Config] Profiles=profile """ ) (d / "mkosi.conf.d").mkdir() (d / "mkosi.conf.d/abc.conf").write_text( """\ [Distribution] Distribution=debian """ ) with chdir(d): _, _, [config] = parse_config() assert config.profiles == ["profile"] # The profile should override mkosi.conf.d/ assert config.distribution == Distribution.fedora assert config.kvm == ConfigFeature.enabled (d / "mkosi.conf").unlink() with chdir(d): _, _, [config] = parse_config(["--profile", "profile"]) assert config.profiles == ["profile"] # The profile should override mkosi.conf.d/ assert config.distribution == Distribution.fedora assert config.kvm == ConfigFeature.enabled (d / "mkosi.conf").write_text( """\ [Config] Profiles=profile,abc """ ) (d / "mkosi.profiles/abc.conf").write_text( """\ [Match] Profiles=abc [Distribution] Distribution=opensuse """ ) with chdir(d): _, _, [config] = parse_config() assert config.profiles == ["profile", "abc"] assert config.distribution == Distribution.opensuse # Check that mkosi.profiles/ is parsed in subimages as well. (d / "mkosi.images/subimage/mkosi.profiles").mkdir(parents=True) (d / "mkosi.images/subimage/mkosi.profiles/abc.conf").write_text( """ [Build] Environment=Image=%I """ ) with chdir(d): _, _, [subimage, config] = parse_config() assert subimage.environment["Image"] == "subimage" def test_override_default(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Build] Environment=MY_KEY=MY_VALUE ToolsTree=yes """ ) with chdir(d): _, _, [config] = parse_config(["--tools-tree", "", "--environment", ""]) assert config.tools_tree is None assert "MY_KEY" not in config.environment (d / "mkosi.tools.conf").touch() (d / "mkosi.local.conf").write_text( """\ [Build] ToolsTree= """ ) with chdir(d): _, _, [config] = parse_config([]) assert config.tools_tree is None def test_local_config(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.local.conf").write_text( """\ [Distribution] Distribution=debian [Content] WithTests=yes Environment=FOO=override Environment=BAZ=normal """ ) with chdir(d): _, _, [config] = parse_config() assert config.distribution == Distribution.debian (d / "mkosi.conf").write_text( """\ [Distribution] Distribution=fedora [Content] WithTests=no Environment=FOO=normal Environment=BAR=normal """ ) with chdir(d): _, _, [config] = parse_config() # Local config should take precedence over non-local config. assert config.distribution == Distribution.debian assert config.with_tests with chdir(d): _, _, [config] = parse_config(["--distribution", "fedora", "-T"]) assert config.distribution == Distribution.fedora assert not config.with_tests (d / "mkosi.local/mkosi.conf.d").mkdir(parents=True) (d / "mkosi.local/mkosi.conf.d/10-test.conf").write_text( """\ [Content] Environment=BAR=override Environment=BAZ=override """ ) with chdir(d): _, _, [config] = parse_config() assert config.environment == {"FOO": "override", "BAR": "override", "BAZ": "override"} def test_parse_load_verb(tmp_path: Path) -> None: with chdir(tmp_path): assert parse_config(["build"])[0].verb == Verb.build assert parse_config(["clean"])[0].verb == Verb.clean assert parse_config(["genkey"])[0].verb == Verb.genkey assert parse_config(["bump"])[0].verb == Verb.bump assert parse_config(["serve"])[0].verb == Verb.serve assert parse_config(["build"])[0].verb == Verb.build assert parse_config(["shell"])[0].verb == Verb.shell assert parse_config(["boot"])[0].verb == Verb.boot assert parse_config(["qemu"])[0].verb == Verb.qemu assert parse_config(["vm"])[0].verb == Verb.vm assert parse_config(["journalctl"])[0].verb == Verb.journalctl assert parse_config(["coredumpctl"])[0].verb == Verb.coredumpctl with pytest.raises(SystemExit): parse_config(["invalid"]) def test_os_distribution(tmp_path: Path) -> None: with chdir(tmp_path): for dist in Distribution: _, _, [config] = parse_config(["-d", dist.value]) assert config.distribution == dist with pytest.raises(tuple((argparse.ArgumentError, SystemExit))): parse_config(["-d", "invalidDistro"]) with pytest.raises(tuple((argparse.ArgumentError, SystemExit))): parse_config(["-d"]) for dist in Distribution: Path("mkosi.conf").write_text(f"[Distribution]\nDistribution={dist}") _, _, [config] = parse_config() assert config.distribution == dist def test_parse_config_files_filter(tmp_path: Path) -> None: with chdir(tmp_path): confd = Path("mkosi.conf.d") confd.mkdir() (confd / "10-file.conf").write_text("[Content]\nPackages=yes") (confd / "20-file.noconf").write_text("[Content]\nPackages=nope") _, _, [config] = parse_config() assert config.packages == ["yes"] def test_compression(tmp_path: Path) -> None: with chdir(tmp_path): _, _, [config] = parse_config(["--format", "disk", "--compress-output", "False"]) assert config.compress_output == Compression.none def test_match_only(tmp_path: Path) -> None: with chdir(tmp_path): Path("mkosi.conf").write_text( """\ [Match] Format=|directory Format=|disk """ ) Path("mkosi.conf.d").mkdir() Path("mkosi.conf.d/10-abc.conf").write_text( """\ [Output] ImageId=abcde """ ) _, _, [config] = parse_config(["--format", "tar"]) assert config.image_id != "abcde" def test_match_multiple(tmp_path: Path) -> None: with chdir(tmp_path): Path("mkosi.conf").write_text( """\ [Match] Format=|disk Format=|directory [Match] Architecture=|x86-64 Architecture=|arm64 [Output] ImageId=abcde """ ) # Both sections are not matched, so image ID should not be "abcde". _, _, [config] = parse_config(["--format", "tar", "--architecture", "s390x"]) assert config.image_id != "abcde" # Only a single section is matched, so image ID should not be "abcde". _, _, [config] = parse_config(["--format", "disk", "--architecture", "s390x"]) assert config.image_id != "abcde" # Both sections are matched, so image ID should be "abcde". _, _, [config] = parse_config(["--format", "disk", "--architecture", "x86-64"]) assert config.image_id == "abcde" Path("mkosi.conf").write_text( """\ [TriggerMatch] Format=disk Architecture=x86-64 [TriggerMatch] Format=directory Architecture=arm64 [Output] ImageId=abcde """ ) # Both sections are not matched, so image ID should not be "abcde". _, _, [config] = parse_config(["--format", "tar", "--architecture", "s390x"]) assert config.image_id != "abcde" # The first section is matched, so image ID should be "abcde". _, _, [config] = parse_config(["--format", "disk", "--architecture", "x86-64"]) assert config.image_id == "abcde" # The second section is matched, so image ID should be "abcde". _, _, [config] = parse_config(["--format", "directory", "--architecture", "arm64"]) assert config.image_id == "abcde" # Parts of all section are matched, but none is matched fully, so image ID should not be "abcde". _, _, [config] = parse_config(["--format", "disk", "--architecture", "arm64"]) assert config.image_id != "abcde" Path("mkosi.conf").write_text( """\ [TriggerMatch] Format=|disk Format=|directory [TriggerMatch] Format=directory Architecture=arm64 [Output] ImageId=abcde """ ) # The first section is matched, so image ID should be "abcde". _, _, [config] = parse_config(["--format", "disk"]) assert config.image_id == "abcde" Path("mkosi.conf").write_text( """\ [TriggerMatch] Format=|disk Format=|directory Architecture=x86-64 [TriggerMatch] Format=directory Architecture=arm64 [Output] ImageId=abcde """ ) # No sections are matched, so image ID should be not "abcde". _, _, [config] = parse_config(["--format", "disk", "--architecture=arm64"]) assert config.image_id != "abcde" # Mixing both [Match] and [TriggerMatch] Path("mkosi.conf").write_text( """\ [Match] Format=disk [TriggerMatch] Architecture=arm64 [TriggerMatch] Architecture=x86-64 [Output] ImageId=abcde """ ) # Match and first TriggerMatch sections match _, _, [config] = parse_config(["--format", "disk", "--architecture=arm64"]) assert config.image_id == "abcde" # Match section matches, but no TriggerMatch section matches _, _, [config] = parse_config(["--format", "disk", "--architecture=s390x"]) assert config.image_id != "abcde" # Second TriggerMatch section matches, but the Match section does not _, _, [config] = parse_config(["--format", "tar", "--architecture=x86-64"]) assert config.image_id != "abcde" def test_match_empty(tmp_path: Path) -> None: with chdir(tmp_path): Path("mkosi.conf").write_text( """\ [Match] Profiles= [Build] Environment=ABC=QED """ ) _, _, [config] = parse_config([]) assert config.environment.get("ABC") == "QED" _, _, [config] = parse_config(["--profile", "profile"]) assert config.environment.get("ABC") is None @pytest.mark.parametrize( "dist1,dist2", itertools.combinations_with_replacement([Distribution.debian, Distribution.opensuse], 2), ) def test_match_distribution(tmp_path: Path, dist1: Distribution, dist2: Distribution) -> None: with chdir(tmp_path): parent = Path("mkosi.conf") parent.write_text( f"""\ [Distribution] Distribution={dist1} """ ) Path("mkosi.conf.d").mkdir() child1 = Path("mkosi.conf.d/child1.conf") child1.write_text( f"""\ [Match] Distribution={dist1} [Content] Packages=testpkg1 """ ) child2 = Path("mkosi.conf.d/child2.conf") child2.write_text( f"""\ [Match] Distribution={dist2} [Content] Packages=testpkg2 """ ) child3 = Path("mkosi.conf.d/child3.conf") child3.write_text( f"""\ [Match] Distribution=|{dist1} Distribution=|{dist2} [Content] Packages=testpkg3 """ ) _, _, [conf] = parse_config() assert "testpkg1" in conf.packages if dist1 == dist2: assert "testpkg2" in conf.packages else: assert "testpkg2" not in conf.packages assert "testpkg3" in conf.packages @pytest.mark.parametrize("release1,release2", itertools.combinations_with_replacement([36, 37], 2)) def test_match_release(tmp_path: Path, release1: int, release2: int) -> None: with chdir(tmp_path): parent = Path("mkosi.conf") parent.write_text( f"""\ [Distribution] Distribution=fedora Release={release1} """ ) Path("mkosi.conf.d").mkdir() child1 = Path("mkosi.conf.d/child1.conf") child1.write_text( f"""\ [Match] Release={release1} [Content] Packages=testpkg1 """ ) child2 = Path("mkosi.conf.d/child2.conf") child2.write_text( f"""\ [Match] Release={release2} [Content] Packages=testpkg2 """ ) child3 = Path("mkosi.conf.d/child3.conf") child3.write_text( f"""\ [Match] Release=|{release1} Release=|{release2} [Content] Packages=testpkg3 """ ) _, _, [conf] = parse_config() assert "testpkg1" in conf.packages if release1 == release2: assert "testpkg2" in conf.packages else: assert "testpkg2" not in conf.packages assert "testpkg3" in conf.packages def test_match_build_sources(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Match] BuildSources=kernel BuildSources=/kernel [Output] Output=abc """ ) with chdir(d): _, _, [config] = parse_config(["--build-sources", ".:kernel"]) assert config.output == "abc" def test_match_repositories(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Match] Repositories=epel [Content] Output=qed """ ) with chdir(d): _, _, [config] = parse_config(["--repositories", "epel,epel-next"]) assert config.output == "qed" def test_match_architecture(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Match] Architecture=uefi [Content] Output=qed """ ) with chdir(d): _, _, [config] = parse_config(["--architecture", "arm64"]) assert config.output == "qed" @pytest.mark.parametrize("image1,image2", itertools.combinations_with_replacement(["image_a", "image_b"], 2)) def test_match_imageid(tmp_path: Path, image1: str, image2: str) -> None: with chdir(tmp_path): parent = Path("mkosi.conf") parent.write_text( f"""\ [Distribution] Distribution=fedora [Output] ImageId={image1} """ ) Path("mkosi.conf.d").mkdir() child1 = Path("mkosi.conf.d/child1.conf") child1.write_text( f"""\ [Match] ImageId={image1} [Content] Packages=testpkg1 """ ) child2 = Path("mkosi.conf.d/child2.conf") child2.write_text( f"""\ [Match] ImageId={image2} [Content] Packages=testpkg2 """ ) child3 = Path("mkosi.conf.d/child3.conf") child3.write_text( f"""\ [Match] ImageId=|{image1} ImageId=|{image2} [Content] Packages=testpkg3 """ ) child4 = Path("mkosi.conf.d/child4.conf") child4.write_text( """\ [Match] ImageId=image* [Content] Packages=testpkg4 """ ) _, _, [conf] = parse_config() assert "testpkg1" in conf.packages if image1 == image2: assert "testpkg2" in conf.packages else: assert "testpkg2" not in conf.packages assert "testpkg3" in conf.packages assert "testpkg4" in conf.packages @pytest.mark.parametrize( "op,version", itertools.product( ["", "==", "<", ">", "<=", ">="], [122, 123], ), ) def test_match_imageversion(tmp_path: Path, op: str, version: str) -> None: opfunc = { "==": operator.eq, "!=": operator.ne, "<": operator.lt, "<=": operator.le, ">": operator.gt, ">=": operator.ge, }.get(op, operator.eq) with chdir(tmp_path): parent = Path("mkosi.conf") parent.write_text( """\ [Output] ImageId=testimage ImageVersion=123 """ ) Path("mkosi.conf.d").mkdir() child1 = Path("mkosi.conf.d/child1.conf") child1.write_text( f"""\ [Match] ImageVersion={op}{version} [Content] Packages=testpkg1 """ ) child2 = Path("mkosi.conf.d/child2.conf") child2.write_text( f"""\ [Match] ImageVersion=<200 ImageVersion={op}{version} [Content] Packages=testpkg2 """ ) child3 = Path("mkosi.conf.d/child3.conf") child3.write_text( f"""\ [Match] ImageVersion=>9000 ImageVersion={op}{version} [Content] Packages=testpkg3 """ ) _, _, [conf] = parse_config() assert ("testpkg1" in conf.packages) == opfunc(123, version) assert ("testpkg2" in conf.packages) == opfunc(123, version) assert "testpkg3" not in conf.packages def test_match_environment(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Match] Environment=MYENV=abc [Content] ImageId=matched """ ) with chdir(d): _, _, [conf] = parse_config(["--environment", "MYENV=abc"]) assert conf.image_id == "matched" _, _, [conf] = parse_config(["--environment", "MYENV=bad"]) assert conf.image_id != "matched" _, _, [conf] = parse_config(["--environment", "MYEN=abc"]) assert conf.image_id != "matched" _, _, [conf] = parse_config(["--environment", "MYEN=bad"]) assert conf.image_id != "matched" (d / "mkosi.conf").write_text( """\ [Match] Environment=MYENV [Content] ImageId=matched """ ) with chdir(d): _, _, [conf] = parse_config(["--environment", "MYENV=abc"]) assert conf.image_id == "matched" _, _, [conf] = parse_config(["--environment", "MYENV=bad"]) assert conf.image_id == "matched" _, _, [conf] = parse_config(["--environment", "MYEN=abc"]) assert conf.image_id != "matched" def test_paths_with_default_factory(tmp_path: Path) -> None: """ If both paths= and default_factory= are defined, default_factory= should not be used when at least one of the files/directories from paths= has been found. """ with chdir(tmp_path): Path("mkosi.sandbox.tar").touch() _, _, [config] = parse_config() assert config.sandbox_trees == [ ConfigTree(Path.cwd() / "mkosi.sandbox.tar", None), ] @pytest.mark.parametrize( "sections,args,warning_count", [ (["Output"], [], 0), (["Content"], [], 1), (["Content", "Output"], [], 1), (["Output", "Content"], [], 1), (["Output", "Content", "Distribution"], [], 2), (["Content"], ["--image-id=testimage"], 1), ], ) def test_wrong_section_warning( tmp_path: Path, caplog: pytest.LogCaptureFixture, sections: list[str], args: list[str], warning_count: int, ) -> None: with chdir(tmp_path): # Create a config with ImageId in the wrong section, # and sometimes in the correct section Path("mkosi.conf").write_text( "\n".join( f"""\ [{section}] ImageId=testimage """ for section in sections ) ) with caplog.at_level(logging.WARNING): # Parse the config, with --image-id sometimes given on the command line parse_config(args) assert len(caplog.records) == warning_count def test_config_parse_bytes() -> None: assert config_parse_bytes(None) is None assert config_parse_bytes("1") == 4096 assert config_parse_bytes("8000") == 8192 assert config_parse_bytes("8K") == 8192 assert config_parse_bytes("4097") == 8192 assert config_parse_bytes("1M") == 1024**2 assert config_parse_bytes("1.9M") == 1994752 assert config_parse_bytes("1G") == 1024**3 assert config_parse_bytes("7.3G") == 7838318592 with pytest.raises(SystemExit): config_parse_bytes("-1") with pytest.raises(SystemExit): config_parse_bytes("-2K") with pytest.raises(SystemExit): config_parse_bytes("-3M") with pytest.raises(SystemExit): config_parse_bytes("-4G") def test_specifiers(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Distribution] Distribution=ubuntu Release=lunar Architecture=arm64 [Output] ImageId=my-image-id ImageVersion=1.2.3 OutputDirectory=abcde Output=test [Build] Environment=Distribution=%d Release=%r Architecture=%a Image=%I ImageId=%i ImageVersion=%v OutputDirectory=%O Output=%o ConfigRootDirectory=%D ConfigRootConfdir=%C ConfigRootPwd=%P Filesystem=%F """ ) (d / "mkosi.conf.d").mkdir() (d / "mkosi.conf.d/abc.conf").write_text( """\ [Build] Environment=ConfigAbcDirectory=%D ConfigAbcConfdir=%C ConfigAbcPwd=%P """ ) (d / "mkosi.conf.d/qed").mkdir() (d / "mkosi.conf.d/qed/mkosi.conf").write_text( """ [Build] Environment=ConfigQedDirectory=%D ConfigQedConfdir=%C ConfigQedPwd=%P """ ) (d / "mkosi.images").mkdir() (d / "mkosi.images/subimage.conf").write_text( """ [Build] Environment=Image=%I """ ) with chdir(d): _, _, [subimage, config] = parse_config() expected = { "Distribution": "ubuntu", "Release": "lunar", "Architecture": "arm64", "Image": "main", "ImageId": "my-image-id", "ImageVersion": "1.2.3", "OutputDirectory": os.fspath(Path.cwd() / "abcde"), "Output": "test", "ConfigRootDirectory": os.fspath(d), "ConfigRootConfdir": os.fspath(d), "ConfigRootPwd": os.fspath(d), "ConfigAbcDirectory": os.fspath(d), "ConfigAbcConfdir": os.fspath(d / "mkosi.conf.d"), "ConfigAbcPwd": os.fspath(d), "ConfigQedDirectory": os.fspath(d), "ConfigQedConfdir": os.fspath(d / "mkosi.conf.d/qed"), "ConfigQedPwd": os.fspath(d / "mkosi.conf.d/qed"), "Filesystem": "ext4", } assert {k: v for k, v in config.environment.items() if k in expected} == expected assert subimage.environment["Image"] == "subimage" def test_kernel_specifiers(tmp_path: Path) -> None: kver = "13.0.8-5.10.0-1057-oem" # taken from reporter of #1638 token = "MySystemImage" roothash = "67e893261799236dcf20529115ba9fae4fd7c2269e1e658d42269503e5760d38" def test_expand_kernel_specifiers(text: str) -> str: return expand_kernel_specifiers( text, kver=kver, token=token, roothash=roothash, ) assert test_expand_kernel_specifiers("&&") == "&" assert test_expand_kernel_specifiers("&k") == kver assert test_expand_kernel_specifiers("&e") == token assert test_expand_kernel_specifiers("&h") == roothash assert test_expand_kernel_specifiers("Image_1.0.3") == "Image_1.0.3" assert test_expand_kernel_specifiers("Image+&h-&k-&e") == f"Image+{roothash}-{kver}-{token}" def test_output_id_version(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """ [Output] ImageId=output ImageVersion=1.2.3 """ ) with chdir(d): _, _, [config] = parse_config() assert config.output == "output_1.2.3" def test_deterministic() -> None: assert Config.default() == Config.default() def test_environment(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Config] PassEnvironment=PassThisEnv [Build] Environment=TestValue2=300 TestValue3=400 PassThisEnv=abc EnvironmentFiles=other.env """ ) (d / "mkosi.env").write_text( """\ TestValue1=90 TestValue4=99 """ ) (d / "other.env").write_text( """\ TestValue1=100 TestValue2=200 """ ) (d / "mkosi.images").mkdir() (d / "mkosi.images/sub.conf").touch() with chdir(d): _, _, [sub, config] = parse_config() expected = { "TestValue1": "100", # from other.env "TestValue2": "300", # from mkosi.conf "TestValue3": "400", # from mkosi.conf "TestValue4": "99", # from mkosi.env } # Only check values for keys from expected, as config.environment contains other items as well assert {k: config.finalize_environment()[k] for k in expected.keys()} == expected assert config.environment_files == [Path.cwd() / "mkosi.env", Path.cwd() / "other.env"] assert sub.environment["PassThisEnv"] == "abc" assert "TestValue2" not in sub.environment def test_proxy(tmp_path: Path) -> None: d = tmp_path # Verify environment variables are set correctly when GIT_CONFIG_COUNT is not set (d / "mkosi.conf").write_text( """\ [Build] ProxyUrl=http://proxy:8080 """ ) with chdir(d): _, _, [config] = parse_config() expected = { "GIT_CONFIG_COUNT": "2", "GIT_CONFIG_KEY_0": "http.proxy", "GIT_CONFIG_VALUE_0": "http://proxy:8080", "GIT_CONFIG_KEY_1": "https.proxy", "GIT_CONFIG_VALUE_1": "http://proxy:8080", } # Only check values for keys from expected, as config.environment contains other items as well assert {k: config.finalize_environment()[k] for k in expected.keys()} == expected (d / "mkosi.conf").write_text( """\ [Build] ProxyUrl=http://proxy:8080 Environment=GIT_CONFIG_COUNT=1 GIT_CONFIG_KEY_0=user.name GIT_CONFIG_VALUE_0=bob """ ) with chdir(d): _, _, [config] = parse_config() expected = { "GIT_CONFIG_COUNT": "3", "GIT_CONFIG_KEY_0": "user.name", "GIT_CONFIG_VALUE_0": "bob", "GIT_CONFIG_KEY_1": "http.proxy", "GIT_CONFIG_VALUE_1": "http://proxy:8080", "GIT_CONFIG_KEY_2": "https.proxy", "GIT_CONFIG_VALUE_2": "http://proxy:8080", } # Only check values for keys from expected, as config.environment contains other items as well assert {k: config.finalize_environment()[k] for k in expected.keys()} == expected def test_mkosi_version_executable(tmp_path: Path) -> None: d = tmp_path version = d / "mkosi.version" version.write_text("#!/bin/sh\necho '1.2.3'\n") with chdir(d): with pytest.raises(SystemExit) as error: _, _, [config] = parse_config() assert error.type is SystemExit assert error.value.code != 0 version.chmod(0o755) with chdir(d): _, _, [config] = parse_config() assert config.image_version == "1.2.3" def test_split_artifacts(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """ [Output] SplitArtifacts=uki """ ) with chdir(d): _, _, [config] = parse_config() assert config.split_artifacts == [ArtifactOutput.uki] (d / "mkosi.conf").write_text( """ [Output] SplitArtifacts=uki SplitArtifacts=kernel SplitArtifacts=initrd """ ) with chdir(d): _, _, [config] = parse_config() assert config.split_artifacts == [ ArtifactOutput.uki, ArtifactOutput.kernel, ArtifactOutput.initrd, ] def test_split_artifacts_compat(tmp_path: Path) -> None: d = tmp_path with chdir(d): _, _, [config] = parse_config() assert config.split_artifacts == ArtifactOutput.compat_no() (d / "mkosi.conf").write_text( """ [Output] SplitArtifacts=yes """ ) with chdir(d): _, _, [config] = parse_config() assert config.split_artifacts == ArtifactOutput.compat_yes() def test_cli_collection_reset(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """ [Content] Packages=abc """ ) with chdir(d): _, _, [config] = parse_config(["--package", ""]) assert config.packages == [] _, _, [config] = parse_config(["--package", "", "--package", "foo"]) assert config.packages == ["foo"] _, _, [config] = parse_config(["--package", "foo", "--package", "", "--package", "bar"]) assert config.packages == ["bar"] _, _, [config] = parse_config(["--package", "foo", "--package", ""]) assert config.packages == [] def test_tools(tmp_path: Path) -> None: d = tmp_path argv = ["--tools-tree=default"] if in_box(): pytest.skip("Cannot run test_tools() test within mkosi box environment") with resource_path(mkosi.resources) as resources, chdir(d): _, tools, _ = parse_config(argv, resources=resources) assert tools host = detect_distribution()[0] if host: assert tools.distribution == ( host.installer.default_tools_tree_distribution() or tools.distribution ) (d / "mkosi.tools.conf").write_text( f""" [Content] PackageDirectories={d} """ ) _, tools, _ = parse_config(argv, resources=resources) assert tools assert tools.package_directories == [Path(d)] _, tools, _ = parse_config( argv + ["--tools-tree-distribution=arch", "--tools-tree-package-directory=/tmp"], resources=resources, ) assert tools assert tools.distribution == Distribution.arch assert tools.package_directories == [Path(d), Path("/tmp")] _, tools, _ = parse_config(argv + ["--tools-tree-package-directory="], resources=resources) assert tools assert tools.package_directories == [] (d / "mkosi.conf").write_text( """ [Build] ToolsTreeDistribution=arch """ ) _, tools, _ = parse_config(argv, resources=resources) assert tools assert tools.distribution == Distribution.arch def test_subdir(tmp_path: Path) -> None: d = tmp_path with chdir(d): (d / "mkosi").mkdir() (d / "mkosi/mkosi.conf").write_text( """ [Output] Output=qed """ ) _, _, [config] = parse_config() assert config.output == "qed" os.chdir(d) (d / "mkosi.conf").write_text( """ [Output] Output=abc """ ) _, _, [config] = parse_config() assert config.output == "abc" def test_assert(tmp_path: Path) -> None: d = tmp_path with chdir(d): (d / "mkosi.conf").write_text( """ [Assert] ImageId=abcde """ ) with pytest.raises(SystemExit): parse_config() # Does not raise, i.e. parses successfully, but we don't care for the content. parse_config(["--image-id", "abcde"]) (d / "mkosi.conf").write_text( """ [Assert] ImageId=abcde [Assert] Environment=ABC=QED """ ) with pytest.raises(SystemExit): parse_config([]) with pytest.raises(SystemExit): parse_config(["--image-id", "abcde"]) with pytest.raises(SystemExit): parse_config(["--environment", "ABC=QED"]) parse_config(["--image-id", "abcde", "--environment", "ABC=QED"]) (d / "mkosi.conf").write_text( """ [TriggerAssert] ImageId=abcde [TriggerAssert] Environment=ABC=QED """ ) with pytest.raises(SystemExit): parse_config() parse_config(["--image-id", "abcde"]) parse_config(["--environment", "ABC=QED"]) (d / "mkosi.conf").write_text( """ [Assert] ImageId=abcde [TriggerAssert] Environment=ABC=QED [TriggerAssert] Environment=DEF=QEE """ ) with pytest.raises(SystemExit): parse_config() parse_config(["--image-id", "abcde", "--environment", "ABC=QED"]) parse_config(["--image-id", "abcde", "--environment", "DEF=QEE"]) mkosi-26/tests/test_extension.py000066400000000000000000000016411512054777600172340ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from pathlib import Path import pytest from mkosi.config import OutputFormat from . import Image, ImageConfig pytestmark = pytest.mark.integration @pytest.mark.parametrize("format", [f for f in OutputFormat if f.is_extension_image()]) def test_extension(config: ImageConfig, format: OutputFormat) -> None: with Image(config) as image: image.build(["--clean-package-metadata=no", "--format=directory"]) with Image(image.config) as sysext: sysext.build( [ "--directory", "", "--incremental=no", "--base-tree", Path(image.output_dir) / "image", "--overlay=yes", "--selinux-relabel=no", "--package=lsof", f"--format={format}", ] ) # fmt: skip mkosi-26/tests/test_initrd.py000066400000000000000000000205741512054777600165170ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import os import subprocess import tempfile import textwrap from collections.abc import Iterator from pathlib import Path import pytest from mkosi.run import run from mkosi.sandbox import umask from mkosi.tree import copy_tree from mkosi.util import PathString from . import Image, ImageConfig pytestmark = pytest.mark.integration @contextlib.contextmanager def mount(what: PathString, where: PathString) -> Iterator[Path]: where = Path(where) if not where.exists(): with umask(~0o755): where.mkdir(parents=True) run(["mount", "--no-mtab", what, where]) try: yield where finally: run(["umount", "--no-mtab", where]) @pytest.fixture(scope="module") def passphrase() -> Iterator[Path]: # We can't use tmp_path fixture because pytest creates it in a nested directory we can't access using our # unprivileged user. # TODO: Use delete_on_close=False and close() instead of flush() when we require Python 3.12 or newer. with tempfile.NamedTemporaryFile(prefix="mkosi.passphrase", mode="w") as passphrase: passphrase.write("mkosi") passphrase.flush() st = Path.cwd().stat() os.fchown(passphrase.fileno(), st.st_uid, st.st_gid) os.fchmod(passphrase.fileno(), 0o600) yield Path(passphrase.name) def test_initrd(config: ImageConfig) -> None: with Image(config) as image: image.build(options=["--format=disk"]) image.vm() @pytest.mark.skipif(os.getuid() != 0, reason="mkosi-initrd LVM test can only be executed as root") def test_initrd_lvm(config: ImageConfig) -> None: with Image(config) as image, contextlib.ExitStack() as stack: image.build(["--format=disk"]) lvm = Path(image.output_dir) / "lvm.raw" lvm.touch() os.truncate(lvm, 5000 * 1024**2) lodev = run( ["losetup", "--show", "--find", "--partscan", lvm], stdout=subprocess.PIPE ).stdout.strip() stack.callback(lambda: run(["losetup", "--detach", lodev])) run(["sfdisk", "--label", "gpt", lodev], input="type=E6D6D379-F507-44C2-A23C-238F2A3DF928 bootable") run(["lvm", "pvcreate", "--devicesfile", "", f"{lodev}p1"]) run(["lvm", "pvs", "--devicesfile", ""]) run(["lvm", "vgcreate", "--devicesfile", "", "-An", "vg_mkosi", f"{lodev}p1"]) run(["lvm", "vgchange", "--devicesfile", "", "-ay", "vg_mkosi"]) run(["lvm", "vgs", "--devicesfile", ""]) stack.callback(lambda: run(["lvm", "vgchange", "--devicesfile", "", "-an", "vg_mkosi"])) run(["lvm", "lvcreate", "--devicesfile", "", "-An", "-l", "100%FREE", "-n", "lv0", "vg_mkosi"]) run(["lvm", "lvs", "--devicesfile", ""]) run(["udevadm", "wait", "--timeout=30", "/dev/vg_mkosi/lv0"]) run([f"mkfs.{image.config.distribution.installer.filesystem()}", "-L", "root", "/dev/vg_mkosi/lv0"]) src = Path(stack.enter_context(tempfile.TemporaryDirectory())) run(["systemd-dissect", "--mount", "--mkdir", Path(image.output_dir) / "image.raw", src]) stack.callback(lambda: run(["systemd-dissect", "--umount", "--rmdir", src])) dst = Path(stack.enter_context(tempfile.TemporaryDirectory())) stack.enter_context(mount(Path("/dev/vg_mkosi/lv0"), dst)) copy_tree(src, dst) stack.close() lvm.rename(Path(image.output_dir) / "image.raw") image.vm( [ "--firmware=linux", # LVM confuses systemd-repart so we mask it for this test. "--kernel-command-line-extra=systemd.mask=systemd-repart.service", "--kernel-command-line-extra=root=LABEL=root", ] ) def test_initrd_luks(config: ImageConfig, passphrase: Path) -> None: with tempfile.TemporaryDirectory() as repartd: st = Path.cwd().stat() os.chown(repartd, st.st_uid, st.st_gid) (Path(repartd) / "00-esp.conf").write_text( textwrap.dedent( """\ [Partition] Type=esp Format=vfat CopyFiles=/boot:/ CopyFiles=/efi:/ SizeMinBytes=1G SizeMaxBytes=1G """ ) ) (Path(repartd) / "05-bios.conf").write_text( textwrap.dedent( """\ [Partition] # UUID of the grub BIOS boot partition which grubs needs on GPT to # embed itself into. Type=21686148-6449-6e6f-744e-656564454649 SizeMinBytes=1M SizeMaxBytes=1M """ ) ) (Path(repartd) / "10-root.conf").write_text( textwrap.dedent( f"""\ [Partition] Type=root Format={config.distribution.installer.filesystem()} Minimize=guess Encrypt=key-file CopyFiles=/ """ ) ) with Image(config) as image: image.build(["--repart-directory", repartd, "--passphrase", passphrase, "--format=disk"]) image.vm(["--credential=cryptsetup.passphrase=mkosi"]) @pytest.mark.skipif(os.getuid() != 0, reason="mkosi-initrd LUKS+LVM test can only be executed as root") def test_initrd_luks_lvm(config: ImageConfig, passphrase: Path) -> None: with Image(config) as image, contextlib.ExitStack() as stack: image.build(["--format=disk"]) lvm = Path(image.output_dir) / "lvm.raw" lvm.touch() os.truncate(lvm, 5000 * 1024**2) lodev = run( ["losetup", "--show", "--find", "--partscan", lvm], stdout=subprocess.PIPE ).stdout.strip() stack.callback(lambda: run(["losetup", "--detach", lodev])) run(["sfdisk", "--label", "gpt", lodev], input="type=E6D6D379-F507-44C2-A23C-238F2A3DF928 bootable") run( [ "cryptsetup", "--key-file", passphrase, "--use-random", "--pbkdf", "pbkdf2", "--pbkdf-force-iterations", "1000", "luksFormat", f"{lodev}p1", ] ) # fmt: skip run(["cryptsetup", "--key-file", passphrase, "luksOpen", f"{lodev}p1", "lvm_root"]) stack.callback(lambda: run(["cryptsetup", "close", "lvm_root"])) luks_uuid = run(["cryptsetup", "luksUUID", f"{lodev}p1"], stdout=subprocess.PIPE).stdout.strip() run(["lvm", "pvcreate", "--devicesfile", "", "/dev/mapper/lvm_root"]) run(["lvm", "pvs", "--devicesfile", ""]) run(["lvm", "vgcreate", "--devicesfile", "", "-An", "vg_mkosi", "/dev/mapper/lvm_root"]) run(["lvm", "vgchange", "--devicesfile", "", "-ay", "vg_mkosi"]) run(["lvm", "vgs", "--devicesfile", ""]) stack.callback(lambda: run(["lvm", "vgchange", "--devicesfile", "", "-an", "vg_mkosi"])) run(["lvm", "lvcreate", "--devicesfile", "", "-An", "-l", "100%FREE", "-n", "lv0", "vg_mkosi"]) run(["lvm", "lvs", "--devicesfile", ""]) run(["udevadm", "wait", "--timeout=30", "/dev/vg_mkosi/lv0"]) run([f"mkfs.{image.config.distribution.installer.filesystem()}", "-L", "root", "/dev/vg_mkosi/lv0"]) src = Path(stack.enter_context(tempfile.TemporaryDirectory())) run(["systemd-dissect", "--mount", "--mkdir", Path(image.output_dir) / "image.raw", src]) stack.callback(lambda: run(["systemd-dissect", "--umount", "--rmdir", src])) dst = Path(stack.enter_context(tempfile.TemporaryDirectory())) stack.enter_context(mount(Path("/dev/vg_mkosi/lv0"), dst)) copy_tree(src, dst) stack.close() lvm.rename(Path(image.output_dir) / "image.raw") image.vm( [ "--format=disk", "--credential=cryptsetup.passphrase=mkosi", "--firmware=linux", "--kernel-command-line-extra=root=LABEL=root", f"--kernel-command-line-extra=rd.luks.uuid={luks_uuid}", ] ) def test_initrd_size(config: ImageConfig) -> None: with Image(config) as image: image.build() # Set a reasonably high limit to avoid having to bump it every single time by # small amounts. 100M should do. maxsize = 1024**2 * 100 assert (Path(image.output_dir) / "image.initrd").stat().st_size <= maxsize mkosi-26/tests/test_json.py000066400000000000000000000462271512054777600162020ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import os import textwrap import uuid from pathlib import Path from typing import Optional import pytest from mkosi.config import ( Architecture, Args, ArtifactOutput, BiosBootloader, Bootloader, BuildSourcesEphemeral, Cacheonly, CertificateSource, CertificateSourceType, Compression, Config, ConfigFeature, ConfigTree, ConsoleMode, DocFormat, Drive, DriveFlag, Firmware, Incremental, InitrdProfile, KeySource, KeySourceType, ManifestFormat, Network, OutputFormat, SecureBootSignTool, ShimBootloader, Ssh, UKIProfile, UnifiedKernelImage, Verb, Verity, Vmm, VsockCID, dump_json, ) from mkosi.distribution import Distribution @pytest.mark.parametrize("path", [None, "/baz/qux"]) def test_args(path: Optional[Path]) -> None: dump = textwrap.dedent( f"""\ {{ "AutoBump": false, "Cmdline": [ "foo", "bar" ], "Debug": false, "DebugSandbox": false, "DebugShell": false, "DebugWorkspace": false, "Directory": {f'"{os.fspath(path)}"' if path is not None else "null"}, "DocFormat": "auto", "Force": 9001, "GenkeyCommonName": "test", "GenkeyValidDays": "100", "Json": false, "Pager": true, "RerunBuildScripts": true, "Verb": "build", "WipeBuildDir": true }} """ ) args = Args( auto_bump=False, cmdline=["foo", "bar"], debug=False, debug_sandbox=False, debug_shell=False, debug_workspace=False, directory=Path(path) if path is not None else None, doc_format=DocFormat.auto, force=9001, genkey_common_name="test", genkey_valid_days="100", json=False, pager=True, rerun_build_scripts=True, verb=Verb.build, wipe_build_dir=True, ) assert dump_json(args.to_dict()) == dump.rstrip() assert Args.from_json(dump) == args def test_config() -> None: dump = textwrap.dedent( """\ { "Architecture": "ia64", "Autologin": false, "BaseTrees": [ "/hello/world" ], "BindUser": true, "BiosBootloader": "none", "Bootable": "disabled", "Bootloader": "grub", "BuildDirectory": "abc", "BuildKey": "abc", "BuildPackages": [ "pkg1", "pkg2" ], "BuildScripts": [ "/path/to/buildscript" ], "BuildSources": [ { "Source": "/qux", "Target": "/frob" } ], "BuildSourcesEphemeral": "yes", "BuildSubdirectory": "abc/abc", "CPUs": 2, "CXL": false, "CacheDirectory": "/is/this/the/cachedir", "CacheKey": "qed", "CacheOnly": "always", "Checksum": false, "CleanPackageMetadata": "auto", "CleanScripts": [ "/clean" ], "CompressLevel": 3, "CompressOutput": "bz2", "ConfigureScripts": [ "/configure" ], "Console": "gui", "Credentials": { "credkey": "credval" }, "Dependencies": [ "dep1" ], "Devicetrees": [ "freescale/imx8mm-verdin-nonwifi-dev.dtb" ], "Distribution": "fedora", "Drives": [ { "Directory": "/foo/bar", "FileId": "red", "Flags": [], "Id": "abc", "Options": "abc,qed", "Size": 200 }, { "Directory": null, "FileId": "wcd", "Flags": [], "Id": "abc", "Options": "", "Size": 200 }, { "Directory": null, "FileId": "bla", "Flags": [ "persist" ], "Id": "abc", "Options": "", "Size": 200 } ], "Environment": { "BAR": "BAR", "Qux": "Qux", "foo": "foo" }, "EnvironmentFiles": [], "Ephemeral": true, "ExtraSearchPaths": [], "ExtraTrees": [], "Files": [], "FinalizeScripts": [], "Firmware": "linux", "FirmwareExclude": [ "brcm/" ], "FirmwareFiles": [ "ath3k-1" ], "FirmwareVariables": "/foo/bar", "Format": "uki", "ForwardJournal": "/mkosi.journal", "History": true, "Hostname": null, "Image": "main", "ImageId": "myimage", "ImageVersion": "5", "Incremental": "no", "InitrdPackages": [ "clevis" ], "InitrdProfiles": [ "lvm" ], "InitrdVolatilePackages": [ "abc" ], "Initrds": [ "/efi/initrd1", "/efi/initrd2" ], "KVM": "auto", "KernelCommandLine": [], "KernelCommandLineExtra": [ "look", "im", "on", "the", "kernel", "command", "line" ], "KernelInitrdModules": [], "KernelModules": [ "loop" ], "KernelModulesExclude": [ "nvidia" ], "KernelModulesIncludeHost": true, "KernelModulesInitrd": true, "KernelModulesInitrdExclude": [], "KernelModulesInitrdIncludeHost": true, "Key": null, "Keymap": "wow, so much keymap", "Linux": null, "LocalMirror": null, "Locale": "en_C.UTF-8", "LocaleMessages": "", "Machine": "machine", "MachineId": "b58253b0-cc92-4a34-8782-bcd99b20d07f", "MakeInitrd": false, "ManifestFormat": [ "json", "changelog" ], "MaxMem": 123, "MicrocodeHost": true, "MinimumVersion": "123", "Mirror": null, "NSpawnSettings": null, "OpenPGPTool": "gpg", "Output": "outfile", "OutputDirectory": "/your/output/here", "OutputExtension": "raw", "OutputMode": 83, "Overlay": true, "PackageCacheDirectory": "/a/b/c", "PackageDirectories": [], "Packages": [], "PassEnvironment": [ "abc" ], "Passphrase": null, "PostInstallationScripts": [ "/bar/qux" ], "PostOutputScripts": [ "/foo/src" ], "PrepareScripts": [ "/run/foo" ], "Profiles": [ "profile" ], "ProxyClientCertificate": "/my/client/cert", "ProxyClientKey": "/my/client/key", "ProxyExclude": [ "www.example.com" ], "ProxyPeerCertificate": "/my/peer/cert", "ProxyUrl": "https://my/proxy", "QemuArgs": [], "RAM": 123, "Register": "enabled", "Release": "53", "Removable": false, "RemoveFiles": [], "RemovePackages": [ "all" ], "RepartDirectories": [], "RepartOffline": true, "Repositories": [], "RepositoryKeyCheck": false, "RepositoryKeyFetch": true, "RootPassword": [ "test1234", false ], "RootShell": "/bin/tcsh", "RuntimeBuildSources": true, "RuntimeNetwork": "interface", "RuntimeSize": 8589934592, "RuntimeTrees": [ { "Source": "/foo/bar", "Target": "/baz" }, { "Source": "/bar/baz", "Target": "/qux" } ], "SELinuxRelabel": "disabled", "SandboxTrees": [ { "Source": "/foo/bar", "Target": null } ], "SectorSize": null, "SecureBoot": true, "SecureBootAutoEnroll": true, "SecureBootCertificate": null, "SecureBootCertificateSource": { "Source": "", "Type": "file" }, "SecureBootKey": "/path/to/keyfile", "SecureBootKeySource": { "Source": "", "Type": "file" }, "SecureBootSignTool": "systemd-sbsign", "Seed": "7496d7d8-7f08-4a2b-96c6-ec8c43791b60", "ShimBootloader": "none", "Sign": false, "SignExpectedPcr": "disabled", "SignExpectedPcrCertificate": "/my/cert", "SignExpectedPcrCertificateSource": { "Source": "", "Type": "file" }, "SignExpectedPcrKey": "/my/key", "SignExpectedPcrKeySource": { "Source": "", "Type": "file" }, "SkeletonTrees": [ { "Source": "/foo/bar", "Target": "/" }, { "Source": "/bar/baz", "Target": "/qux" } ], "Snapshot": "snapshot", "SourceDateEpoch": 12345, "Splash": "/splash", "SplitArtifacts": [ "uki", "kernel" ], "Ssh": "auto", "SshCertificate": "/path/to/cert", "SshKey": null, "StorageTargetMode": "enabled", "SyncScripts": [ "/sync" ], "SysupdateDirectory": "/sysupdate", "TPM": "auto", "Timezone": null, "ToolsTree": null, "ToolsTreeCertificates": true, "UnifiedKernelImageFormat": "myuki", "UnifiedKernelImageProfiles": [ { "Cmdline": [ "key=value" ], "Profile": { "key": "value" }, "SignExpectedPcr": true } ], "UnifiedKernelImages": "auto", "UnitProperties": [ "PROPERTY=VALUE" ], "UseSubvolumes": "auto", "VSock": "enabled", "VSockCID": -2, "Verity": "signed", "VerityCertificate": "/path/to/cert", "VerityCertificateSource": { "Source": "", "Type": "file" }, "VerityKey": null, "VerityKeySource": { "Source": "", "Type": "file" }, "VirtualMachineMonitor": "qemu", "VolatilePackageDirectories": [ "def" ], "VolatilePackages": [ "abc" ], "WithDocs": true, "WithNetwork": false, "WithRecommends": true, "WithTests": true, "WorkspaceDirectory": "/cwd" } """ ) args = Config( architecture=Architecture.ia64, autologin=False, base_trees=[Path("/hello/world")], bind_user=True, bios_bootloader=BiosBootloader.none, bootable=ConfigFeature.disabled, bootloader=Bootloader.grub, build_dir=Path("abc"), build_key="abc", build_packages=["pkg1", "pkg2"], build_scripts=[Path("/path/to/buildscript")], build_sources_ephemeral=BuildSourcesEphemeral.yes, build_sources=[ConfigTree(Path("/qux"), Path("/frob"))], cache_dir=Path("/is/this/the/cachedir"), cache_key="qed", cacheonly=Cacheonly.always, checksum=False, clean_package_metadata=ConfigFeature.auto, clean_scripts=[Path("/clean")], compress_level=3, compress_output=Compression.bz2, configure_scripts=[Path("/configure")], console=ConsoleMode.gui, cpus=2, credentials={"credkey": "credval"}, dependencies=["dep1"], distribution=Distribution.fedora, drives=[ Drive("abc", 200, Path("/foo/bar"), "abc,qed", "red", []), Drive("abc", 200, None, "", "wcd", []), Drive("abc", 200, None, "", "bla", [DriveFlag.persist]), ], environment_files=[], environment={"foo": "foo", "BAR": "BAR", "Qux": "Qux"}, ephemeral=True, extra_search_paths=[], extra_trees=[], files=[], finalize_scripts=[], firmware_exclude=["brcm/"], firmware_include=["ath3k-1"], firmware_variables=Path("/foo/bar"), firmware=Firmware.linux, forward_journal=Path("/mkosi.journal"), history=True, hostname=None, image_id="myimage", image_version="5", image="main", incremental=Incremental.no, initrd_packages=["clevis"], initrd_profiles=[str(InitrdProfile.lvm)], initrd_volatile_packages=["abc"], initrds=[Path("/efi/initrd1"), Path("/efi/initrd2")], kernel_command_line_extra=["look", "im", "on", "the", "kernel", "command", "line"], kernel_command_line=[], kernel_modules_exclude=["nvidia"], kernel_modules_include_host=True, kernel_modules_include=["loop"], kernel_modules_initrd_exclude=[], kernel_modules_initrd_include_host=True, kernel_modules_initrd_include=[], kernel_modules_initrd=True, key=None, keymap="wow, so much keymap", kvm=ConfigFeature.auto, cxl=False, linux=None, local_mirror=None, locale_messages="", locale="en_C.UTF-8", machine_id=uuid.UUID("b58253b0cc924a348782bcd99b20d07f"), machine="machine", make_initrd=False, manifest_format=[ManifestFormat.json, ManifestFormat.changelog], maxmem=123, microcode_host=True, devicetrees=["freescale/imx8mm-verdin-nonwifi-dev.dtb"], minimum_version="123", mirror=None, nspawn_settings=None, openpgp_tool="gpg", output_dir=Path("/your/output/here"), output_extension="raw", output_format=OutputFormat.uki, output_mode=0o123, output="outfile", overlay=True, package_cache_dir=Path("/a/b/c"), package_directories=[], packages=[], pass_environment=["abc"], passphrase=None, postinst_scripts=[Path("/bar/qux")], postoutput_scripts=[Path("/foo/src")], prepare_scripts=[Path("/run/foo")], profiles=["profile"], proxy_client_certificate=Path("/my/client/cert"), proxy_client_key=Path("/my/client/key"), proxy_exclude=["www.example.com"], proxy_peer_certificate=Path("/my/peer/cert"), proxy_url="https://my/proxy", qemu_args=[], ram=123, register=ConfigFeature.enabled, release="53", removable=False, remove_files=[], remove_packages=["all"], repart_dirs=[], repart_offline=True, repositories=[], repository_key_check=False, repository_key_fetch=True, root_password=("test1234", False), root_shell="/bin/tcsh", runtime_build_sources=True, runtime_network=Network.interface, runtime_size=8589934592, runtime_trees=[ ConfigTree(Path("/foo/bar"), Path("/baz")), ConfigTree(Path("/bar/baz"), Path("/qux")), ], sandbox_trees=[ConfigTree(Path("/foo/bar"), None)], sector_size=None, secure_boot_auto_enroll=True, secure_boot_certificate_source=CertificateSource(type=CertificateSourceType.file), secure_boot_certificate=None, secure_boot_key_source=KeySource(type=KeySourceType.file), secure_boot_key=Path("/path/to/keyfile"), secure_boot_sign_tool=SecureBootSignTool.systemd_sbsign, secure_boot=True, seed=uuid.UUID("7496d7d8-7f08-4a2b-96c6-ec8c43791b60"), selinux_relabel=ConfigFeature.disabled, shim_bootloader=ShimBootloader.none, sign_expected_pcr_certificate_source=CertificateSource(type=CertificateSourceType.file), sign_expected_pcr_certificate=Path("/my/cert"), sign_expected_pcr_key_source=KeySource(type=KeySourceType.file), sign_expected_pcr_key=Path("/my/key"), sign_expected_pcr=ConfigFeature.disabled, sign=False, skeleton_trees=[ConfigTree(Path("/foo/bar"), Path("/")), ConfigTree(Path("/bar/baz"), Path("/qux"))], snapshot="snapshot", source_date_epoch=12345, splash=Path("/splash"), split_artifacts=[ArtifactOutput.uki, ArtifactOutput.kernel], ssh_certificate=Path("/path/to/cert"), ssh_key=None, ssh=Ssh.auto, storage_target_mode=ConfigFeature.enabled, sync_scripts=[Path("/sync")], sysupdate_dir=Path("/sysupdate"), timezone=None, tools_tree_certificates=True, tools_tree=None, tpm=ConfigFeature.auto, unified_kernel_image_format="myuki", unified_kernel_image_profiles=[ UKIProfile( profile={"key": "value"}, cmdline=["key=value"], sign_expected_pcr=True, ) ], unified_kernel_images=UnifiedKernelImage.auto, unit_properties=["PROPERTY=VALUE"], use_subvolumes=ConfigFeature.auto, verity_certificate_source=CertificateSource(type=CertificateSourceType.file), verity_certificate=Path("/path/to/cert"), verity_key_source=KeySource(type=KeySourceType.file), verity_key=None, verity=Verity.signed, vmm=Vmm.qemu, volatile_package_directories=[Path("def")], volatile_packages=["abc"], vsock_cid=VsockCID.hash, vsock=ConfigFeature.enabled, with_docs=True, with_network=False, with_recommends=True, with_tests=True, workspace_dir=Path("/cwd"), ) assert dump_json(args.to_dict()) == dump.rstrip() assert Config.from_json(dump) == args mkosi-26/tests/test_kmod.py000066400000000000000000000073041512054777600161540ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from mkosi import kmod def test_globs_match_module() -> None: assert kmod.globs_match_module("drivers/ata/ahci.ko.xz", ["ahci"]) assert not kmod.globs_match_module("drivers/ata/ahci.ko.xz.2", ["ahci"]) assert not kmod.globs_match_module("drivers/ata/ahci.ko.xz", ["ata"]) assert not kmod.globs_match_module("drivers/ata/ahci.ko.xz", ["drivers"]) assert not kmod.globs_match_module("drivers/ata/ahci.ko.xz", ["/drivers"]) assert not kmod.globs_match_module("drivers/ata/ahci.ko.xz", ["/drivers"]) assert not kmod.globs_match_module("drivers/ata/ahci-2.ko.xz", ["ahci"]) assert not kmod.globs_match_module("drivers/ata/ahci2.ko.zst", ["ahci"]) assert kmod.globs_match_module("drivers/ata/ahci.ko", ["ata/*"]) assert not kmod.globs_match_module("drivers/ata/ahci.ko.xz", ["/ata/*"]) assert kmod.globs_match_module("drivers/ata/ahci.ko", ["drivers/*"]) assert kmod.globs_match_module("drivers/ata/ahci.ko", ["/drivers/*"]) assert not kmod.globs_match_module("drivers/ata/ahci.ko", ["ahci/*"]) assert not kmod.globs_match_module("drivers/ata/ahci.ko", ["bahci*"]) assert kmod.globs_match_module("drivers/ata/ahci.ko.zst", ["ahci*"]) assert kmod.globs_match_module("drivers/ata/ahci.ko.xz", ["ahc*"]) assert kmod.globs_match_module("drivers/ata/ahci.ko", ["ah*"]) assert kmod.globs_match_module("drivers/ata/ahci.ko", ["*"]) assert kmod.globs_match_module("drivers/ata/ahci.ko", ["ata/"]) assert kmod.globs_match_module("drivers/ata/ahci.ko", ["drivers/"]) assert kmod.globs_match_module("drivers/ata/ahci.ko", ["drivers/ata/"]) assert kmod.globs_match_module("drivers/ata/ahci.ko", ["-ahci", "*"]) assert kmod.globs_match_module("drivers/ata/ahci.ko", ["-ahci", "*", "ahciahci"]) assert kmod.globs_match_module("drivers/ata/ahci.ko.xz", ["-ahci", "*"]) assert kmod.globs_match_module("drivers/ata/ahci.ko.zst", ["-ahci", "*"]) assert kmod.globs_match_module("drivers/ata/ahci.ko.gz", ["-ahci", "*"]) assert kmod.globs_match_module("drivers/ata/ahci.ko.gz", ["-ahci", "drivers/"]) assert kmod.globs_match_module("drivers/ata/ahci.ko.gz", ["-ahci", "ata/"]) assert not kmod.globs_match_module("drivers/ata/ahci.ko.gz", ["-ahci", "ata/ata/"]) assert kmod.globs_match_module("drivers/ata/ahci.ko.gz", ["-ahci", "drivers/ata/"]) assert not kmod.globs_match_module("drivers/ata/ahci.ko", ["*", "-ahci"]) assert not kmod.globs_match_module("drivers/ata/ahci.ko", ["ahci", "-*"]) assert not kmod.globs_match_module("drivers/ata/ahci.ko.zst", ["-*"]) assert not kmod.globs_match_module("drivers/ata/ahci.ko.xz", ["-*"]) # absolute glob behavior unchanged when paths are relative to /lib/module/ assert kmod.globs_match_module("kernel/drivers/ata/ahci.ko", ["drivers/*"]) assert kmod.globs_match_module("kernel/drivers/ata/ahci.ko", ["/drivers/*"]) assert not kmod.globs_match_module("kernel/drivers/ata/ahci.ko.xz", ["/ata/*"]) # absolute globs match both relative to kernel/ and module_dir root assert kmod.globs_match_module("kernel/drivers/ata/ahci.ko.xz", ["/drivers/ata/ahci"]) assert kmod.globs_match_module("kernel/drivers/ata/ahci.ko.xz", ["/kernel/drivers/ata/ahci"]) def test_normalize_module_glob() -> None: assert kmod.normalize_module_glob("raid[0-9]") == "raid[0-9]" assert kmod.normalize_module_glob("raid[0_9]") == "raid[0_9]" assert kmod.normalize_module_glob("raid[0_9]a_z") == "raid[0_9]a-z" assert kmod.normalize_module_glob("0_9") == "0-9" assert kmod.normalize_module_glob("[0_9") == "[0_9" assert kmod.normalize_module_glob("0_9]") == "0-9]" assert kmod.normalize_module_glob("raid[0_9]a_z[a_c]") == "raid[0_9]a-z[a_c]" mkosi-26/tests/test_signing.py000066400000000000000000000045721512054777600166640ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import os import tempfile from pathlib import Path import pytest from mkosi.run import find_binary, run from . import Image, ImageConfig pytestmark = pytest.mark.integration def test_signing_checksums_with_sop(config: ImageConfig) -> None: if find_binary("sqop") is None: pytest.skip("Need 'sqop' binary to perform sop tests.") with tempfile.TemporaryDirectory() as path, Image(config) as image: tmp_path = Path(path) signing_key = tmp_path / "signing-key.pgp" signing_cert = tmp_path / "signing-cert.pgp" # create a brand new signing key with open(signing_key, "wb") as o: run(cmdline=["sqop", "generate-key", "--signing-only", "Test"], stdout=o) # extract public key (certificate) with open(signing_key, "rb") as i, open(signing_cert, "wb") as o: run(cmdline=["sqop", "extract-cert"], stdin=i, stdout=o) image.build( options=["--checksum=true", "--openpgp-tool=sqop", "--sign=true", f"--key={signing_key}"] ) signed_file = image.output_dir / "image.SHA256SUMS" signature = image.output_dir / "image.SHA256SUMS.gpg" with open(signed_file, "rb") as i: run(cmdline=["sqop", "verify", signature, signing_cert], stdin=i) def test_signing_checksums_with_gpg(config: ImageConfig) -> None: with tempfile.TemporaryDirectory() as path, Image(config) as image: tmp_path = Path(path) signing_key = "mkosi-test@example.org" signing_cert = tmp_path / "signing-cert.pgp" gnupghome = tmp_path / ".gnupg" gnupghome.mkdir() env = dict(GNUPGHOME=os.fspath(gnupghome)) # create a brand new signing key run( cmdline=["gpg", "--quick-gen-key", "--batch", "--passphrase", "", signing_key], env=env, ) # export public key (certificate) with open(signing_cert, "wb") as o: run( cmdline=["gpg", "--export", signing_key], env=env, stdout=o, ) image.build(options=["--checksum=true", "--sign=true", f"--key={signing_key}"], env=env) signed_file = image.output_dir / "image.SHA256SUMS" signature = image.output_dir / "image.SHA256SUMS.gpg" run(cmdline=["gpg", "--verify", signature, signed_file], env=env) mkosi-26/tests/test_util.py000066400000000000000000000024141512054777600161740ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from pathlib import Path import pytest from mkosi.util import parents_below def test_parents_below_basic() -> None: path = Path("/a/b/c/d/e") below = Path("/a/b") assert parents_below(path, below) == [Path("/a/b/c/d"), Path("/a/b/c")] def test_parents_below_root() -> None: path = Path("/a/b/c") below = Path("/") assert parents_below(path, below) == [Path("/a/b"), Path("/a")] def test_parents_below_direct_child() -> None: path = Path("/a/b/c") below = Path("/a/b") assert parents_below(path, below) == [] def test_parents_below_relative_paths() -> None: path = Path("a/b/c/d") below = Path("a/b") assert parents_below(path, below) == [Path("a/b/c")] def test_parents_below_same_path_raises() -> None: path = Path("/a/b/c") below = Path("/a/b/c") with pytest.raises(ValueError): parents_below(path, below) def test_parents_below_not_parent_raises() -> None: path = Path("/a/b/c") below = Path("/x/y/z") with pytest.raises(ValueError): parents_below(path, below) def test_parents_below_below_is_child_raises() -> None: path = Path("/a/b") below = Path("/a/b/c") with pytest.raises(ValueError): parents_below(path, below) mkosi-26/tests/test_versioncomp.py000066400000000000000000000223711512054777600175670ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import itertools import pytest from mkosi.versioncomp import GenericVersion def test_conversion() -> None: assert GenericVersion("1") < 2 assert GenericVersion("1") < "2" assert GenericVersion("2") > 1 assert GenericVersion("2") > "1" assert GenericVersion("1") == "1" def test_generic_version_systemd() -> None: """Same as the first block of systemd/test/test-compare-versions.sh""" assert GenericVersion("1") < GenericVersion("2") assert GenericVersion("1") <= GenericVersion("2") assert GenericVersion("1") != GenericVersion("2") assert not (GenericVersion("1") > GenericVersion("2")) assert not (GenericVersion("1") == GenericVersion("2")) assert not (GenericVersion("1") >= GenericVersion("2")) assert GenericVersion.compare_versions("1", "2") == -1 assert GenericVersion.compare_versions("2", "2") == 0 assert GenericVersion.compare_versions("2", "1") == 1 def test_generic_version_spec() -> None: """Examples from the uapi group version format spec""" assert GenericVersion("11") == GenericVersion("11") assert GenericVersion("systemd-123") == GenericVersion("systemd-123") assert GenericVersion("bar-123") < GenericVersion("foo-123") assert GenericVersion("123a") > GenericVersion("123") assert GenericVersion("123.a") > GenericVersion("123") assert GenericVersion("123.a") < GenericVersion("123.b") assert GenericVersion("123a") > GenericVersion("123.a") assert GenericVersion("11α") == GenericVersion("11β") assert GenericVersion("A") < GenericVersion("a") assert GenericVersion("") < GenericVersion("0") assert GenericVersion("0.") > GenericVersion("0") assert GenericVersion("0.0") > GenericVersion("0") assert GenericVersion("0") > GenericVersion("~") assert GenericVersion("") > GenericVersion("~") assert GenericVersion("1_") == GenericVersion("1") assert GenericVersion("_1") == GenericVersion("1") assert GenericVersion("1_") < GenericVersion("1.2") assert GenericVersion("1_2_3") > GenericVersion("1.3.3") assert GenericVersion("1+") == GenericVersion("1") assert GenericVersion("+1") == GenericVersion("1") assert GenericVersion("1+") < GenericVersion("1.2") assert GenericVersion("1+2+3") > GenericVersion("1.3.3") @pytest.mark.parametrize( "s1,s2", itertools.combinations_with_replacement( enumerate( [ GenericVersion("122.1"), GenericVersion("123~rc1-1"), GenericVersion("123"), GenericVersion("123-a"), GenericVersion("123-a.1"), GenericVersion("123-1"), GenericVersion("123-1.1"), GenericVersion("123^post1"), GenericVersion("123.a-1"), GenericVersion("123.1-1"), GenericVersion("123a-1"), GenericVersion("124-1"), ], ), 2, ), ) def test_generic_version_strverscmp_improved_doc( s1: tuple[int, GenericVersion], s2: tuple[int, GenericVersion], ) -> None: """Example from the doc string of strverscmp_improved. strverscmp_improved can be found in systemd/src/fundamental/string-util-fundamental.c """ i1, v1 = s1 i2, v2 = s2 assert (v1 == v2) == (i1 == i2) assert (v1 < v2) == (i1 < i2) assert (v1 <= v2) == (i1 <= i2) assert (v1 > v2) == (i1 > i2) assert (v1 >= v2) == (i1 >= i2) assert (v1 != v2) == (i1 != i2) def RPMVERCMP(a: str, b: str, expected: int) -> None: assert (GenericVersion(a) > GenericVersion(b)) - (GenericVersion(a) < GenericVersion(b)) == expected def test_generic_version_rpmvercmp() -> None: EQUAL = 0 RIGHT_SMALLER = 1 LEFT_SMALLER = -1 # Tests copied from rpm's rpmio test suite, under the LGPL license: # https://github.com/rpm-software-management/rpm/blob/master/tests/rpmvercmp.at. # The original form is retained as much as possible for easy comparisons and updates. RPMVERCMP("1.0", "1.0", EQUAL) RPMVERCMP("1.0", "2.0", LEFT_SMALLER) RPMVERCMP("2.0", "1.0", RIGHT_SMALLER) RPMVERCMP("2.0.1", "2.0.1", EQUAL) RPMVERCMP("2.0", "2.0.1", LEFT_SMALLER) RPMVERCMP("2.0.1", "2.0", RIGHT_SMALLER) RPMVERCMP("2.0.1a", "2.0.1a", EQUAL) RPMVERCMP("2.0.1a", "2.0.1", RIGHT_SMALLER) RPMVERCMP("2.0.1", "2.0.1a", LEFT_SMALLER) RPMVERCMP("5.5p1", "5.5p1", EQUAL) RPMVERCMP("5.5p1", "5.5p2", LEFT_SMALLER) RPMVERCMP("5.5p2", "5.5p1", RIGHT_SMALLER) RPMVERCMP("5.5p10", "5.5p10", EQUAL) RPMVERCMP("5.5p1", "5.5p10", LEFT_SMALLER) RPMVERCMP("5.5p10", "5.5p1", RIGHT_SMALLER) RPMVERCMP("10xyz", "10.1xyz", RIGHT_SMALLER) # Note: this is reversed from rpm's vercmp */ RPMVERCMP("10.1xyz", "10xyz", LEFT_SMALLER) # Note: this is reversed from rpm's vercmp */ RPMVERCMP("xyz10", "xyz10", EQUAL) RPMVERCMP("xyz10", "xyz10.1", LEFT_SMALLER) RPMVERCMP("xyz10.1", "xyz10", RIGHT_SMALLER) RPMVERCMP("xyz.4", "xyz.4", EQUAL) RPMVERCMP("xyz.4", "8", LEFT_SMALLER) RPMVERCMP("8", "xyz.4", RIGHT_SMALLER) RPMVERCMP("xyz.4", "2", LEFT_SMALLER) RPMVERCMP("2", "xyz.4", RIGHT_SMALLER) RPMVERCMP("5.5p2", "5.6p1", LEFT_SMALLER) RPMVERCMP("5.6p1", "5.5p2", RIGHT_SMALLER) RPMVERCMP("5.6p1", "6.5p1", LEFT_SMALLER) RPMVERCMP("6.5p1", "5.6p1", RIGHT_SMALLER) RPMVERCMP("6.0.rc1", "6.0", RIGHT_SMALLER) RPMVERCMP("6.0", "6.0.rc1", LEFT_SMALLER) RPMVERCMP("10b2", "10a1", RIGHT_SMALLER) RPMVERCMP("10a2", "10b2", LEFT_SMALLER) RPMVERCMP("1.0aa", "1.0aa", EQUAL) RPMVERCMP("1.0a", "1.0aa", LEFT_SMALLER) RPMVERCMP("1.0aa", "1.0a", RIGHT_SMALLER) RPMVERCMP("10.0001", "10.0001", EQUAL) RPMVERCMP("10.0001", "10.1", EQUAL) RPMVERCMP("10.1", "10.0001", EQUAL) RPMVERCMP("10.0001", "10.0039", LEFT_SMALLER) RPMVERCMP("10.0039", "10.0001", RIGHT_SMALLER) RPMVERCMP("4.999.9", "5.0", LEFT_SMALLER) RPMVERCMP("5.0", "4.999.9", RIGHT_SMALLER) RPMVERCMP("20101121", "20101121", EQUAL) RPMVERCMP("20101121", "20101122", LEFT_SMALLER) RPMVERCMP("20101122", "20101121", RIGHT_SMALLER) RPMVERCMP("2_0", "2_0", EQUAL) RPMVERCMP("2.0", "2_0", LEFT_SMALLER) # Note: in rpm those compare equal RPMVERCMP("2_0", "2.0", RIGHT_SMALLER) # Note: in rpm those compare equal # RhBug:178798 case */ RPMVERCMP("a", "a", EQUAL) RPMVERCMP("a+", "a+", EQUAL) RPMVERCMP("a+", "a_", EQUAL) RPMVERCMP("a_", "a+", EQUAL) RPMVERCMP("+a", "+a", EQUAL) RPMVERCMP("+a", "_a", EQUAL) RPMVERCMP("_a", "+a", EQUAL) RPMVERCMP("+_", "+_", EQUAL) RPMVERCMP("_+", "+_", EQUAL) RPMVERCMP("_+", "_+", EQUAL) RPMVERCMP("+", "_", EQUAL) RPMVERCMP("_", "+", EQUAL) # Basic testcases for tilde sorting RPMVERCMP("1.0~rc1", "1.0~rc1", EQUAL) RPMVERCMP("1.0~rc1", "1.0", LEFT_SMALLER) RPMVERCMP("1.0", "1.0~rc1", RIGHT_SMALLER) RPMVERCMP("1.0~rc1", "1.0~rc2", LEFT_SMALLER) RPMVERCMP("1.0~rc2", "1.0~rc1", RIGHT_SMALLER) RPMVERCMP("1.0~rc1~git123", "1.0~rc1~git123", EQUAL) RPMVERCMP("1.0~rc1~git123", "1.0~rc1", LEFT_SMALLER) RPMVERCMP("1.0~rc1", "1.0~rc1~git123", RIGHT_SMALLER) # Basic testcases for caret sorting RPMVERCMP("1.0^", "1.0^", EQUAL) RPMVERCMP("1.0^", "1.0", RIGHT_SMALLER) RPMVERCMP("1.0", "1.0^", LEFT_SMALLER) RPMVERCMP("1.0^git1", "1.0^git1", EQUAL) RPMVERCMP("1.0^git1", "1.0", RIGHT_SMALLER) RPMVERCMP("1.0", "1.0^git1", LEFT_SMALLER) RPMVERCMP("1.0^git1", "1.0^git2", LEFT_SMALLER) RPMVERCMP("1.0^git2", "1.0^git1", RIGHT_SMALLER) RPMVERCMP("1.0^git1", "1.01", LEFT_SMALLER) RPMVERCMP("1.01", "1.0^git1", RIGHT_SMALLER) RPMVERCMP("1.0^20160101", "1.0^20160101", EQUAL) RPMVERCMP("1.0^20160101", "1.0.1", LEFT_SMALLER) RPMVERCMP("1.0.1", "1.0^20160101", RIGHT_SMALLER) RPMVERCMP("1.0^20160101^git1", "1.0^20160101^git1", EQUAL) RPMVERCMP("1.0^20160102", "1.0^20160101^git1", RIGHT_SMALLER) RPMVERCMP("1.0^20160101^git1", "1.0^20160102", LEFT_SMALLER) # Basic testcases for tilde and caret sorting */ RPMVERCMP("1.0~rc1^git1", "1.0~rc1^git1", EQUAL) RPMVERCMP("1.0~rc1^git1", "1.0~rc1", RIGHT_SMALLER) RPMVERCMP("1.0~rc1", "1.0~rc1^git1", LEFT_SMALLER) RPMVERCMP("1.0^git1~pre", "1.0^git1~pre", EQUAL) RPMVERCMP("1.0^git1", "1.0^git1~pre", RIGHT_SMALLER) RPMVERCMP("1.0^git1~pre", "1.0^git1", LEFT_SMALLER) # These are included here to document current, arguably buggy behaviors # for reference purposes and for easy checking against unintended # behavior changes. */ print("/* RPM version comparison oddities */") # RhBug:811992 case RPMVERCMP("1b.fc17", "1b.fc17", EQUAL) RPMVERCMP("1b.fc17", "1.fc17", RIGHT_SMALLER) # Note: this is reversed from rpm's vercmp, WAT! */ RPMVERCMP("1.fc17", "1b.fc17", LEFT_SMALLER) RPMVERCMP("1g.fc17", "1g.fc17", EQUAL) RPMVERCMP("1g.fc17", "1.fc17", RIGHT_SMALLER) RPMVERCMP("1.fc17", "1g.fc17", LEFT_SMALLER) # Non-ascii characters are considered equal so these are all the same, eh… */ RPMVERCMP("1.1.α", "1.1.α", EQUAL) RPMVERCMP("1.1.α", "1.1.β", EQUAL) RPMVERCMP("1.1.β", "1.1.α", EQUAL) RPMVERCMP("1.1.αα", "1.1.α", EQUAL) RPMVERCMP("1.1.α", "1.1.ββ", EQUAL) RPMVERCMP("1.1.ββ", "1.1.αα", EQUAL) mkosi-26/tools/000077500000000000000000000000001512054777600136035ustar00rootroot00000000000000mkosi-26/tools/do-a-release.sh000077500000000000000000000013131512054777600163760ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1-or-later if [ -z "$1" ] ; then echo "Version number not specified." exit 1 fi VERSION="$1" if ! git diff-index --quiet HEAD; then echo "Repo has modified files." exit 1 fi sed -r -i "s/^version = \".*\"$/version = \"$VERSION\"/" pyproject.toml sed -r -i "s/^__version__ = \".*\"$/__version__ = \"$VERSION\"/" mkosi/sandbox.py git add -p pyproject.toml mkosi git commit -m "Release $VERSION" git tag -s "v$VERSION" -m "mkosi $VERSION" VERSION_MAJOR=${VERSION%%.*} VERSION="$((VERSION_MAJOR + 1))~devel" sed -r -i "s/^__version__ = \".*\"$/__version__ = \"$VERSION\"/" mkosi/sandbox.py git add -p mkosi git commit -m "Bump version to $VERSION" mkosi-26/tools/generate-zipapp.sh000077500000000000000000000004261512054777600172370ustar00rootroot00000000000000#!/bin/bash BUILDDIR=$(mktemp -d -q) cleanup() { rm -rf "$BUILDDIR" } trap cleanup EXIT mkdir -p builddir cp -r mkosi "${BUILDDIR}/" python3 -m zipapp \ -p "/usr/bin/env python3" \ -o builddir/mkosi \ -m mkosi.__main__:main \ "$BUILDDIR" mkosi-26/tools/make-man-page.sh000077500000000000000000000004731512054777600165460ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1-or-later set -ex MD_DIR=mkosi/resources/man OUTPUT_DIR=mkosi/resources/man for mdfile in "$MD_DIR"/*.?.md; do pandoc \ --lua-filter=mkosi/resources/pandoc/md2man.lua \ -s -t man \ -o "${OUTPUT_DIR}/$(basename "${mdfile}" .md)" \ "${mdfile}" done