debian-changelog-0.2.14/.cargo_vcs_info.json0000644000000001360000000000100142670ustar { "git": { "sha1": "ee15ccbe3bd235d7116b140cae2435a9214c474b" }, "path_in_vcs": "" }debian-changelog-0.2.14/.codespellrc000064400000000000000000000000461046102023000153570ustar 00000000000000[codespell] ignore-words-list = crate debian-changelog-0.2.14/.github/CODEOWNERS000064400000000000000000000000121046102023000160030ustar 00000000000000* @jelmer debian-changelog-0.2.14/.github/FUNDING.yml000064400000000000000000000000171046102023000162320ustar 00000000000000github: jelmer debian-changelog-0.2.14/.github/dependabot.yml000064400000000000000000000006251046102023000172520ustar 00000000000000# Please see the documentation for all configuration options: # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates version: 2 updates: - package-ecosystem: "cargo" directory: "/" schedule: interval: "weekly" rebase-strategy: "disabled" - package-ecosystem: "github-actions" directory: "/" schedule: interval: weekly debian-changelog-0.2.14/.github/workflows/auto-merge.yaml000064400000000000000000000011341046102023000214040ustar 00000000000000name: Dependabot auto-merge on: pull_request_target permissions: pull-requests: write contents: write jobs: dependabot: runs-on: ubuntu-latest if: ${{ github.actor == 'dependabot[bot]' }} steps: - name: Dependabot metadata id: metadata uses: dependabot/fetch-metadata@v2 with: github-token: "${{ secrets.GITHUB_TOKEN }}" - name: Enable auto-merge for Dependabot PRs run: gh pr merge --auto --squash "$PR_URL" env: PR_URL: ${{github.event.pull_request.html_url}} GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} debian-changelog-0.2.14/.github/workflows/disperse.yml000064400000000000000000000002741046102023000210200ustar 00000000000000--- name: Disperse configuration "on": - push jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - uses: jelmer/action-disperse-validate@v2 debian-changelog-0.2.14/.github/workflows/rust.yml000064400000000000000000000015621046102023000202000ustar 00000000000000--- name: Rust "on": push: pull_request: env: CARGO_TERM_COLOR: always jobs: build: runs-on: ${{ matrix.os }} strategy: matrix: os: [ubuntu-latest, macos-latest, windows-latest] fail-fast: false steps: - uses: actions/checkout@v6 - name: Build run: cargo build --verbose env: RUSTFLAGS: -Dwarnings - name: Run tests run: cargo test --verbose env: RUSTFLAGS: -Dwarnings minimal-versions: runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - name: Install Rust toolchains run: | rustup toolchain install stable - name: Install cargo-minimal-versions run: cargo install cargo-minimal-versions cargo-hack - name: Run minimal versions test run: cargo +stable minimal-versions test --all-features debian-changelog-0.2.14/.gitignore000064400000000000000000000000331046102023000150430ustar 00000000000000/target *~ .testrepository debian-changelog-0.2.14/.testr.conf000064400000000000000000000001631046102023000151450ustar 00000000000000[DEFAULT] test_command=cargo subunit $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list debian-changelog-0.2.14/CODE_OF_CONDUCT.md000064400000000000000000000125451046102023000156650ustar 00000000000000 # Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socioeconomic status, nationality, personal appearance, race, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official email address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at [INSERT CONTACT METHOD]. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at [https://www.contributor-covenant.org/version/2/0/code_of_conduct.html][v2.0]. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. For answers to common questions about this code of conduct, see the FAQ at [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at [https://www.contributor-covenant.org/translations][translations]. [homepage]: https://www.contributor-covenant.org [v2.0]: https://www.contributor-covenant.org/version/2/0/code_of_conduct.html [Mozilla CoC]: https://github.com/mozilla/diversity [FAQ]: https://www.contributor-covenant.org/faq [translations]: https://www.contributor-covenant.org/translations debian-changelog-0.2.14/Cargo.lock0000644000000401740000000000100122500ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 4 [[package]] name = "adler2" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "aho-corasick" version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" dependencies = [ "memchr", ] [[package]] name = "android_system_properties" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" dependencies = [ "libc", ] [[package]] name = "autocfg" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "bitflags" version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" [[package]] name = "bumpalo" version = "3.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" [[package]] name = "cc" version = "1.2.49" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215" dependencies = [ "find-msvc-tools", "shlex", ] [[package]] name = "cfg-if" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "chrono" version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ "iana-time-zone", "js-sys", "num-traits", "wasm-bindgen", "windows-link", ] [[package]] name = "core-foundation-sys" version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "countme" version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7704b5fdd17b18ae31c4c1da5a2e0305a2bf17b5249300a9ee9ed7b72114c636" [[package]] name = "crc32fast" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" dependencies = [ "cfg-if", ] [[package]] name = "debian-changelog" version = "0.2.14" dependencies = [ "chrono", "debversion", "flate2", "lazy-regex", "log", "maplit", "rowan", "tempfile", "textwrap", "whoami", ] [[package]] name = "debversion" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4f5cc9ce1d5067bee8060dd75208525dd0133ffea0b2960fef64ab85d58c4c5" dependencies = [ "chrono", "lazy-regex", "num-bigint", ] [[package]] name = "errno" version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", "windows-sys", ] [[package]] name = "fastrand" version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "find-msvc-tools" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" [[package]] name = "flate2" version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" dependencies = [ "crc32fast", "miniz_oxide", ] [[package]] name = "getrandom" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", "libc", "r-efi", "wasip2", ] [[package]] name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" [[package]] name = "iana-time-zone" version = "0.1.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "log", "wasm-bindgen", "windows-core", ] [[package]] name = "iana-time-zone-haiku" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" dependencies = [ "cc", ] [[package]] name = "js-sys" version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" dependencies = [ "once_cell", "wasm-bindgen", ] [[package]] name = "lazy-regex" version = "3.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "191898e17ddee19e60bccb3945aa02339e81edd4a8c50e21fd4d48cdecda7b29" dependencies = [ "lazy-regex-proc_macros", "once_cell", "regex", ] [[package]] name = "lazy-regex-proc_macros" version = "3.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c35dc8b0da83d1a9507e12122c80dea71a9c7c613014347392483a83ea593e04" dependencies = [ "proc-macro2", "quote", "regex", "syn", ] [[package]] name = "libc" version = "0.2.178" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" [[package]] name = "libredox" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" dependencies = [ "bitflags", "libc", "redox_syscall", ] [[package]] name = "linux-raw-sys" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "log" version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] name = "maplit" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" [[package]] name = "memchr" version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "miniz_oxide" version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", "simd-adler32", ] [[package]] name = "num-bigint" version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ "num-integer", "num-traits", ] [[package]] name = "num-integer" version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ "num-traits", ] [[package]] name = "num-traits" version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] [[package]] name = "once_cell" version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "proc-macro2" version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" dependencies = [ "proc-macro2", ] [[package]] name = "r-efi" version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] name = "redox_syscall" version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ "bitflags", ] [[package]] name = "regex" version = "1.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" dependencies = [ "aho-corasick", "memchr", "regex-automata", "regex-syntax", ] [[package]] name = "regex-automata" version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] [[package]] name = "regex-syntax" version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "rowan" version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "417a3a9f582e349834051b8a10c8d71ca88da4211e4093528e36b9845f6b5f21" dependencies = [ "countme", "hashbrown", "rustc-hash", "text-size", ] [[package]] name = "rustc-hash" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustix" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" dependencies = [ "bitflags", "errno", "libc", "linux-raw-sys", "windows-sys", ] [[package]] name = "rustversion" version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "shlex" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "simd-adler32" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" [[package]] name = "smawk" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" [[package]] name = "syn" version = "2.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "tempfile" version = "3.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" dependencies = [ "fastrand", "getrandom", "once_cell", "rustix", "windows-sys", ] [[package]] name = "text-size" version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f18aa187839b2bdb1ad2fa35ead8c4c2976b64e4363c386d45ac0f7ee85c9233" [[package]] name = "textwrap" version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c13547615a44dc9c452a8a534638acdf07120d4b6847c8178705da06306a3057" dependencies = [ "smawk", "unicode-linebreak", "unicode-width", ] [[package]] name = "unicode-ident" version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" [[package]] name = "unicode-linebreak" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" [[package]] name = "unicode-width" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254" [[package]] name = "wasip2" version = "1.0.1+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" dependencies = [ "wit-bindgen", ] [[package]] name = "wasite" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" dependencies = [ "bumpalo", "proc-macro2", "quote", "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" dependencies = [ "unicode-ident", ] [[package]] name = "whoami" version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d4a4db5077702ca3015d3d02d74974948aba2ad9e12ab7df718ee64ccd7e97d" dependencies = [ "libredox", "wasite", ] [[package]] name = "windows-core" version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement", "windows-interface", "windows-link", "windows-result", "windows-strings", ] [[package]] name = "windows-implement" version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "windows-interface" version = "0.59.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "windows-link" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] name = "windows-result" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ "windows-link", ] [[package]] name = "windows-strings" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ "windows-link", ] [[package]] name = "windows-sys" version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ "windows-link", ] [[package]] name = "wit-bindgen" version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" debian-changelog-0.2.14/Cargo.toml0000644000000034500000000000100122670ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" name = "debian-changelog" version = "0.2.14" authors = ["Jelmer Vernooij "] build = false autolib = false autobins = false autoexamples = false autotests = false autobenches = false description = "Parser for Debian changelog files" readme = "README.md" license = "Apache-2.0" repository = "https://github.com/jelmer/debian-changelog-rs" [features] default = ["chrono"] [lib] name = "debian_changelog" path = "src/lib.rs" [[example]] name = "build" path = "examples/build.rs" required-features = ["chrono"] [[example]] name = "dch" path = "examples/dch.rs" required-features = ["chrono"] [[example]] name = "simple" path = "examples/simple.rs" [[test]] name = "builder_tests" path = "tests/builder_tests.rs" [[test]] name = "display_tests" path = "tests/display_tests.rs" [[test]] name = "parse_type" path = "tests/parse_type.rs" [dependencies.chrono] version = "0.4.42" optional = true [dependencies.debversion] version = ">=0.4.6, <0.6" [dependencies.lazy-regex] version = ">=3, <4" [dependencies.log] version = "0.4" [dependencies.rowan] version = "0.16.1" [dependencies.textwrap] version = "0.16.2" [dependencies.whoami] version = ">=1.5, <2" default-features = false [dev-dependencies.flate2] version = "1.1" [dev-dependencies.maplit] version = "1.0.2" [dev-dependencies.tempfile] version = "3.23" debian-changelog-0.2.14/Cargo.toml.orig000064400000000000000000000013241046102023000157460ustar 00000000000000[package] name = "debian-changelog" repository = "https://github.com/jelmer/debian-changelog-rs" description = "Parser for Debian changelog files" version = "0.2.14" edition = "2021" license = "Apache-2.0" readme = "README.md" authors = [ "Jelmer Vernooij ",] [dependencies] chrono = { version = "0.4.42", optional = true } debversion = ">=0.4.6, <0.6" lazy-regex = ">=3, <4" log = "0.4" rowan = "0.16.1" textwrap = "0.16.2" whoami = { version = ">=1.5, <2", default-features = false } [features] default = ["chrono"] [dev-dependencies] flate2 = "1.1" maplit = "1.0.2" tempfile = "3.23" [[example]] name = "build" required-features = ["chrono"] [[example]] name = "dch" required-features = ["chrono"] debian-changelog-0.2.14/LICENSE000064400000000000000000000261361046102023000140740ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. debian-changelog-0.2.14/README.md000064400000000000000000000030231046102023000143340ustar 00000000000000Debian Changelog parser ======================= This crate provides a parser for debian/changelog files, as described in the Debian policy, [section 4.4](https://www.debian.org/doc/debian-policy/ch-source.html#debian-changelog-debian-changelog). The parser builds a CST. It is lossless - i.e. preserves formatting, and allows editing and partial parsing. Example: ```rust use std::io::Read; fn main() -> Result<(), Box> { let file = std::fs::File::open("/usr/share/doc/rustc/changelog.Debian.gz")?; let mut gz = flate2::read::GzDecoder::new(file); let mut contents = String::new(); gz.read_to_string(&mut contents)?; let changelog: debian_changelog::ChangeLog = contents.parse()?; for entry in changelog.entries() { println!( "{}: {}", entry.package().unwrap(), entry.version().unwrap().to_string() ); } Ok(()) } ``` Or to update an existing changelog file: ```rust use std::io::Read; fn main() -> Result<(), Box> { let file = std::fs::File::open("debian/changelog")?; let mut contents = String::new(); file.read_to_string(&mut contents)?; let mut changelog: debian_changelog::ChangeLog = contents.parse()?; changelog.try_auto_add_change( &["* Make a change"], ( "Jelmer Vernooij".to_string(), "jelmer@debian.org".to_string(), ), None, None, )?; std::fs::write("debian/changelog", changelog.to_string())?; Ok(()) } ``` debian-changelog-0.2.14/disperse.toml000064400000000000000000000000531046102023000155700ustar 00000000000000tag-name = "v$VERSION" release-timeout = 5 debian-changelog-0.2.14/examples/build.rs000064400000000000000000000014451046102023000163460ustar 00000000000000//! A simple example of generate a Debian changelog file. use debian_changelog::{ChangeLog, Urgency}; fn main() { let mut changelog = ChangeLog::new(); // Note that most of these are optional and fall back to sensible defaults. changelog .new_entry() .package("example".to_string()) .version("0.1.0".parse().unwrap()) .distribution("unstable".to_string()) .urgency(Urgency::Low) .maintainer(("John Doe".to_string(), "john@example.com".to_string())) .datetime(chrono::DateTime::parse_from_rfc3339("2018-01-01T00:00:00+00:00").unwrap()) .change_line("* This is a change".to_string()) .finish(); // You can also use changelog.try_auto_add_change(), which behaves similarly to "dch" println!("{}", changelog); } debian-changelog-0.2.14/examples/dch.rs000064400000000000000000000012641046102023000160040ustar 00000000000000//! A simple example of making a change to a changelog file use std::io::Read; fn main() -> Result<(), Box> { let file = std::fs::File::open("/usr/share/doc/rustc/changelog.Debian.gz")?; let mut gz = flate2::read::GzDecoder::new(file); let mut contents = String::new(); gz.read_to_string(&mut contents)?; let mut changelog: debian_changelog::ChangeLog = contents.parse()?; changelog.try_auto_add_change( &["* Make a change"], ( "Jelmer Vernooij".to_string(), "jelmer@debian.org".to_string(), ), None::, None, )?; changelog.write(std::io::stdout())?; Ok(()) } debian-changelog-0.2.14/examples/simple.rs000064400000000000000000000010351046102023000165330ustar 00000000000000//! A simple example of parsing a Debian changelog. use std::io::Read; fn main() -> Result<(), Box> { let file = std::fs::File::open("/usr/share/doc/rustc/changelog.Debian.gz")?; let mut gz = flate2::read::GzDecoder::new(file); let mut contents = String::new(); gz.read_to_string(&mut contents)?; let changelog: debian_changelog::ChangeLog = contents.parse()?; for entry in changelog.iter() { println!("{}: {}", entry.package().unwrap(), entry.version().unwrap()); } Ok(()) } debian-changelog-0.2.14/src/changes.rs000064400000000000000000000417721046102023000156370ustar 00000000000000//! Functions to parse the changes from a changelog entry. use lazy_regex::regex_captures; // A specific section in a changelog entry, e.g.: // // ``` // [ Joe Example] // * Foo, bar // + Blah // * Foo // * Foo // ``` #[derive(Default, Debug, PartialEq, Eq)] struct Section<'a> { // Title of the section, if any title: Option<&'a str>, // Line numbers of the section linenos: Vec, // List of changes in the section changes: Vec>, } /// Return the different sections from a set of changelog entries. /// /// # Arguments /// * `changes`: list of changes from a changelog entry /// /// # Returns /// /// An iterator over tuples with: /// (author, list of line numbers, list of list of (lineno, line) tuples fn changes_sections<'a>( changes: impl Iterator, ) -> impl Iterator> { let mut ret: Vec> = vec![]; let mut section = Section::<'a>::default(); let mut change = Vec::<(usize, &'a str)>::new(); let mut saw_empty = false; for (i, line) in changes.enumerate() { if line.is_empty() && i == 0 { // Skip the first line continue; } if line.is_empty() { section.linenos.push(i); saw_empty = true; continue; } // Check for author section header if let Some(author) = extract_author_name(line) { if !change.is_empty() { section.changes.push(change); change = Vec::new(); } if !section.changes.is_empty() { ret.push(section); } section = Section { title: Some(author), linenos: vec![i], changes: vec![], }; saw_empty = false; } else if !line.starts_with("* ") { change.push((i, line)); section.linenos.push(i); saw_empty = false; } else { // Starting a new bullet point // If we saw an empty line and we're in a titled section, start a new anonymous section if saw_empty && section.title.is_some() && !change.is_empty() { section.changes.push(change); change = Vec::new(); ret.push(section); section = Section { title: None, linenos: vec![], changes: vec![], }; } if !change.is_empty() { section.changes.push(change); } change = vec![(i, line)]; section.linenos.push(i); saw_empty = false; } } if !change.is_empty() { section.changes.push(change); } if !section.changes.is_empty() { ret.push(section); } ret.into_iter() } /// Iterate over changes by author /// /// # Arguments /// * `changes`: list of changes from a changelog entry /// /// # Returns /// An iterator over tuples with: /// (author, list of line numbers, list of lines) pub fn changes_by_author<'a>( changes: impl Iterator, ) -> impl Iterator, Vec, Vec<&'a str>)> { changes_sections(changes).map(|section| { let mut all_linenos = Vec::new(); let mut all_lines = Vec::new(); for change_entry in section.changes { for (lineno, line) in change_entry { all_linenos.push(lineno); all_lines.push(line); } } (section.title, all_linenos, all_lines) }) } #[cfg(test)] mod changes_sections_tests { #[test] fn test_simple() { let iter = super::changes_sections(vec!["", "* Change 1", "* Change 2", " rest", ""].into_iter()); assert_eq!( vec![super::Section { title: None, linenos: vec![1, 2, 3, 4], changes: vec![ (vec![(1, "* Change 1")]), (vec![(2, "* Change 2"), (3, " rest")]) ] }], iter.collect::>() ); } #[test] fn test_with_header() { assert_eq!( vec![ super::Section { title: Some("Author 1"), linenos: vec![1, 2, 3], changes: vec![(vec![(2, "* Change 1")])] }, super::Section { title: Some("Author 2"), linenos: vec![4, 5, 6, 7], changes: vec![(vec![(5, "* Change 2"), (6, " rest")])] }, ], super::changes_sections( vec![ "", "[ Author 1 ]", "* Change 1", "", "[ Author 2 ]", "* Change 2", " rest", "", ] .into_iter() ) .collect::>() ); } } /// Strip a changelog message like debcommit does. /// /// Takes a list of changes from a changelog entry and applies a transformation /// so the message is well formatted for a commit message. /// /// # Arguments: /// * `changes` - a list of lines from the changelog entry /// /// # Returns /// Another list of lines with blank lines stripped from the start and the /// spaces the start of the lines split if there is only one logical entry. pub fn strip_for_commit_message(mut changes: Vec<&str>) -> Vec<&str> { if changes.is_empty() { return vec![]; } while let Some(last) = changes.last() { if last.trim().is_empty() { changes.pop(); } else { break; } } while let Some(first) = changes.first() { if first.trim().is_empty() { changes.remove(0); } else { break; } } let changes = changes .into_iter() .map(|mut line| loop { if line.starts_with(" ") { line = &line[2..]; } else if line.starts_with('\t') { line = &line[1..]; } else { break line; } }) .collect::>(); // Drop bullet points let bullet_points_dropped = changes .iter() .map(|line| { let line = line.trim_start(); if line.starts_with("* ") || line.starts_with("+ ") || line.starts_with("- ") { line[1..].trim_start() } else { line } }) .collect::>(); if bullet_points_dropped.len() == 1 { bullet_points_dropped } else { changes } } #[cfg(test)] mod strip_for_commit_message_tests { #[test] fn test_no_changes() { assert_eq!(super::strip_for_commit_message(vec![]), Vec::<&str>::new()); } #[test] fn test_empty_changes() { assert_eq!( super::strip_for_commit_message(vec![""]), Vec::<&str>::new() ); } #[test] fn test_removes_leading_whitespace() { assert_eq!( super::strip_for_commit_message(vec!["foo", "bar", "\tbaz", " bang"]), vec!["foo", "bar", "baz", " bang"] ); } #[test] fn test_removes_star_if_one() { assert_eq!(super::strip_for_commit_message(vec!["* foo"]), vec!["foo"]); assert_eq!( super::strip_for_commit_message(vec!["\t* foo"]), vec!["foo"] ); assert_eq!(super::strip_for_commit_message(vec!["+ foo"]), vec!["foo"]); assert_eq!(super::strip_for_commit_message(vec!["- foo"]), vec!["foo"]); assert_eq!(super::strip_for_commit_message(vec!["* foo"]), vec!["foo"]); assert_eq!( super::strip_for_commit_message(vec!["* foo", " bar"]), vec!["* foo", " bar"] ); } #[test] fn test_leaves_start_if_multiple() { assert_eq!( super::strip_for_commit_message(vec!["* foo", "* bar"]), vec!["* foo", "* bar"] ); assert_eq!( super::strip_for_commit_message(vec!["* foo", "+ bar"]), vec!["* foo", "+ bar"] ); assert_eq!( super::strip_for_commit_message(vec!["* foo", "bar", "* baz"]), vec!["* foo", "bar", "* baz"] ); } } /// Format a section title. pub fn format_section_title(title: &str) -> String { format!("[ {} ]", title) } #[cfg(test)] mod format_section_title_tests { #[test] fn test() { assert_eq!(super::format_section_title("foo"), "[ foo ]"); } } /// Extract the author name from an author section header. /// /// Returns `Some(author)` if the line is an author section header, /// or `None` if it's not. /// /// # Example /// /// ``` /// assert_eq!(debian_changelog::changes::extract_author_name("[ Alice ]"), Some("Alice")); /// assert_eq!(debian_changelog::changes::extract_author_name(" [ Bob Smith ] "), Some("Bob Smith")); /// assert_eq!(debian_changelog::changes::extract_author_name("* Change line"), None); /// ``` pub fn extract_author_name(line: &str) -> Option<&str> { regex_captures!(r"^\s*\[\s*(.*?)\s*\]\s*$", line).map(|(_, author)| author) } #[cfg(test)] mod extract_author_name_tests { #[test] fn test() { assert_eq!(super::extract_author_name("[ Alice ]"), Some("Alice")); assert_eq!(super::extract_author_name(" [ Bob ] "), Some("Bob")); assert_eq!( super::extract_author_name("[ Multi Word Name ]"), Some("Multi Word Name") ); assert_eq!(super::extract_author_name("* Change line"), None); assert_eq!(super::extract_author_name("Regular text"), None); assert_eq!(super::extract_author_name(""), None); } } /// Add a change to the list of changes, attributed to a specific author. /// /// This will add a new section for the author if there are no sections yet. /// /// Returns an error if text rewrapping fails. /// /// # Example /// /// ``` /// let mut changes = vec![]; /// debian_changelog::changes::try_add_change_for_author(&mut changes, "Author 1", vec!["* Change 1"], None); /// assert_eq!(changes, vec!["* Change 1"]); /// ``` pub fn try_add_change_for_author( changes: &mut Vec, author_name: &str, change: Vec<&str>, default_author: Option<(String, String)>, ) -> Result<(), crate::textwrap::Error> { let by_author = changes_by_author(changes.iter().map(|s| s.as_str())).collect::>(); // There are no per author sections yet, so attribute current changes to changelog entry author if by_author.iter().all(|(a, _, _)| a.is_none()) { if let Some((default_name, _default_email)) = default_author { if author_name != default_name.as_str() { if !changes.is_empty() { changes.insert(0, format_section_title(default_name.as_str())); if !changes.last().unwrap().is_empty() { changes.push("".to_string()); } } changes.push(format_section_title(author_name)); } } } else if let Some(last_section) = by_author.last().as_ref() { // There is a last section, so add a new section only if it is not for the same author if last_section.0 != Some(author_name) { changes.push("".to_string()); changes.push(format_section_title(author_name)); } } changes.extend( crate::textwrap::try_rewrap_changes(change.into_iter())? .iter() .map(|s| s.to_string()), ); Ok(()) } /// Add a change to the list of changes, attributed to a specific author. /// /// This will add a new section for the author if there are no sections yet. /// /// # Deprecated /// /// This function panics on errors. Use [`try_add_change_for_author`] instead for proper error handling. /// /// # Panics /// /// Panics if text rewrapping fails. /// /// # Example /// /// ``` /// let mut changes = vec![]; /// debian_changelog::changes::add_change_for_author(&mut changes, "Author 1", vec!["* Change 1"], None); /// assert_eq!(changes, vec!["* Change 1"]); /// ``` #[deprecated( since = "0.2.10", note = "Use try_add_change_for_author for proper error handling" )] pub fn add_change_for_author( changes: &mut Vec, author_name: &str, change: Vec<&str>, default_author: Option<(String, String)>, ) { try_add_change_for_author(changes, author_name, change, default_author).unwrap() } #[cfg(test)] mod add_change_for_author_tests { use super::*; #[test] fn test_matches_default() { let mut changes = vec![]; try_add_change_for_author( &mut changes, "Author 1", vec!["* Change 1"], Some(("Author 1".to_string(), "jelmer@debian.org".to_string())), ) .unwrap(); assert_eq!(changes, vec!["* Change 1"]); } #[test] fn test_not_matches_default() { let mut changes = vec![]; try_add_change_for_author( &mut changes, "Author 1", vec!["* Change 1"], Some(( "Default Author".to_string(), "jelmer@debian.org".to_string(), )), ) .unwrap(); assert_eq!(changes, vec!["[ Author 1 ]", "* Change 1"]); } } /// Find additional authors from a changelog entry pub fn find_extra_authors<'a>(changes: &'a [&'a str]) -> std::collections::HashSet<&'a str> { changes_by_author(changes.iter().copied()) .filter_map(|(author, _, _)| author) .collect::>() } #[test] fn test_find_extra_authors() { assert_eq!( find_extra_authors(&["[ Author 1 ]", "* Change 1"]), maplit::hashset! {"Author 1"} ); assert_eq!( find_extra_authors(&["[ Author 1 ]", "[ Author 2 ]", "* Change 1"]), maplit::hashset! {"Author 2"} ); assert_eq!( find_extra_authors(&["[ Author 1 ]", "[ Author 2 ]", "* Change 1", "* Change 2"]), maplit::hashset! {"Author 2"} ); assert_eq!( find_extra_authors(&["[ Author 1 ]", "* Change 1", "[ Author 2 ]", "* Change 2"]), maplit::hashset! {"Author 1", "Author 2"} ); assert_eq!( find_extra_authors(&["* Change 1", "* Change 2",]), maplit::hashset! {} ); } /// Find authors that are thanked in a changelog entry pub fn find_thanks<'a>(changes: &'a [&'a str]) -> std::collections::HashSet<&'a str> { let regex = lazy_regex::regex!( r"[tT]hank(?:(?:s)|(?:you))(?:\s*to)?((?:\s+(?:(?:\w\.)|(?:\w+(?:-\w+)*)))+(?:\s+<[^@>]+@[^@>]+>)?)" ); changes_by_author(changes.iter().copied()) .flat_map(|(_, _, lines)| { lines.into_iter().map(|line| { regex .captures_iter(line) .map(|m| m.get(1).unwrap().as_str().trim()) }) }) .flatten() .collect::>() } #[test] fn test_find_thanks() { assert_eq!(find_thanks(&[]), maplit::hashset! {}); assert_eq!(find_thanks(&["* Do foo", "* Do bar"]), maplit::hashset! {}); assert_eq!( find_thanks(&["* Thanks to A. Hacker"]), maplit::hashset! {"A. Hacker"} ); assert_eq!( find_thanks(&["* Thanks to James A. Hacker"]), maplit::hashset! {"James A. Hacker"} ); assert_eq!( find_thanks(&["* Thankyou to B. Hacker"]), maplit::hashset! {"B. Hacker"} ); assert_eq!( find_thanks(&["* thanks to A. Hacker"]), maplit::hashset! {"A. Hacker"} ); assert_eq!( find_thanks(&["* thankyou to B. Hacker"]), maplit::hashset! {"B. Hacker"} ); assert_eq!( find_thanks(&["* Thanks A. Hacker"]), maplit::hashset! {"A. Hacker"} ); assert_eq!( find_thanks(&["* Thankyou B. Hacker"]), maplit::hashset! {"B. Hacker"} ); assert_eq!( find_thanks(&["* Thanks to Mark A. Super-Hacker"]), maplit::hashset! {"Mark A. Super-Hacker"} ); assert_eq!( find_thanks(&["* Thanks to A. Hacker "]), maplit::hashset! {"A. Hacker "} ); assert_eq!( find_thanks(&["* Thanks to Adeodato Simó"]), maplit::hashset! {"Adeodato Simó"} ); } /// Check if all lines in a changelog entry are prefixed with a sha. /// /// This is generally done by gbp-dch(1). pub fn all_sha_prefixed(changes: &[&str]) -> bool { changes_sections(changes.iter().cloned()) .flat_map(|section| { section .changes .into_iter() .flat_map(|ls| ls.into_iter().map(|(_, l)| l)) }) .all(|line| lazy_regex::regex_is_match!(r"^\* \[[0-9a-f]{7}\] ", line)) } #[test] fn test_all_sha_prefixed() { assert!(all_sha_prefixed(&[ "* [a1b2c3d] foo", "* [a1b2c3d] bar", "* [a1b2c3d] baz", ])); assert!(!all_sha_prefixed(&[ "* [a1b2c3d] foo", "* bar", "* [a1b2c3d] baz", ])); } debian-changelog-0.2.14/src/lex.rs000064400000000000000000000232611046102023000150100ustar 00000000000000use crate::SyntaxKind; use std::iter::Peekable; use std::str::Chars; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] enum LineType { Header, Body, Footer, } pub struct Lexer<'a> { input: Peekable>, line_type: Option, } impl<'a> Lexer<'a> { pub fn new(input: &'a str) -> Self { Lexer { input: input.chars().peekable(), line_type: None, } } fn is_whitespace(c: char) -> bool { c == ' ' || c == '\t' } fn is_newline(c: char) -> bool { c == '\n' || c == '\r' } fn is_valid_identifier_char(c: char) -> bool { c.is_ascii_alphanumeric() || c == '-' || c == '.' } fn read_while(&mut self, predicate: F) -> String where F: Fn(char) -> bool, { let mut result = String::new(); while let Some(&c) = self.input.peek() { if predicate(c) { result.push(c); self.input.next(); } else { break; } } result } fn read_while_n(&mut self, n: usize, predicate: F) -> String where F: Fn(char) -> bool, { let mut result = String::new(); while let Some(&c) = self.input.peek() { if predicate(c) { result.push(c); self.input.next(); if result.len() >= n { break; } } else { break; } } result } fn next_token(&mut self) -> Option<(SyntaxKind, String)> { if let Some(&c) = self.input.peek() { match (c, self.line_type) { (c, None) | (c, Some(LineType::Header)) if Self::is_valid_identifier_char(c) => { let identifier = self.read_while(Self::is_valid_identifier_char); self.line_type = Some(LineType::Header); Some((SyntaxKind::IDENTIFIER, identifier)) } (c, None) if Self::is_whitespace(c) => { let mut indent = self.read_while_n(2, |c| c == ' '); if indent.len() == 1 { let dashes = self.read_while(|c| c == '-' || c == ' '); indent.push_str(dashes.as_str()); self.line_type = Some(LineType::Footer); } else { self.line_type = Some(LineType::Body); } Some((SyntaxKind::INDENT, indent)) } ('#', None) => { let comment = self.read_while(|c| !Self::is_newline(c)); let n = self.input.next(); if let Some(n) = n { Some((SyntaxKind::COMMENT, comment + &n.to_string())) } else { Some((SyntaxKind::COMMENT, comment)) } } (c, _) if Self::is_newline(c) => { self.input.next(); self.line_type = None; Some((SyntaxKind::NEWLINE, String::from(c))) } (';', Some(LineType::Header)) => Some(( SyntaxKind::SEMICOLON, String::from(self.input.next().unwrap()), )), ('(', Some(LineType::Header)) => { let version = self .read_while(|c| c != ')' && c != ';' && c != ' ' && !Self::is_newline(c)); let n = self.input.next(); if n == Some(')') { Some((SyntaxKind::VERSION, version + &n.unwrap().to_string())) } else if let Some(n) = n { Some((SyntaxKind::ERROR, version + &n.to_string())) } else { Some((SyntaxKind::ERROR, version)) } } ('=', Some(LineType::Header)) => { Some((SyntaxKind::EQUALS, String::from(self.input.next().unwrap()))) } (_, Some(LineType::Body)) => { let detail = self.read_while(|c| !Self::is_newline(c)); Some((SyntaxKind::DETAIL, detail)) } (c, _) if Self::is_whitespace(c) => { let ws = self.read_while(Self::is_whitespace); Some((SyntaxKind::WHITESPACE, ws)) } ('<', Some(LineType::Footer)) => { let email = self.read_while(|c| c != '>' && c != ' ' && !Self::is_newline(c)); let n = self.input.next(); if n == Some('>') { Some((SyntaxKind::EMAIL, email + &n.unwrap().to_string())) } else if let Some(n) = n { Some((SyntaxKind::ERROR, email + &n.to_string())) } else { Some((SyntaxKind::ERROR, email)) } } (c, Some(LineType::Footer)) if !Self::is_whitespace(c) && !Self::is_newline(c) => { let identifier = self.read_while(|c| c != ' ' && c != '<' && !Self::is_newline(c)); Some((SyntaxKind::TEXT, identifier)) } (_, _) => { self.input.next(); Some((SyntaxKind::ERROR, String::from(c))) } } } else { None } } } impl Iterator for Lexer<'_> { type Item = (crate::SyntaxKind, String); fn next(&mut self) -> Option { self.next_token() } } pub(crate) fn lex(input: &str) -> Vec<(SyntaxKind, String)> { let mut lexer = Lexer::new(input); lexer.by_ref().collect::>() } #[cfg(test)] mod tests { use crate::SyntaxKind::*; #[test] fn test_empty() { assert_eq!(super::lex(""), vec![]); } #[test] fn test_simple() { assert_eq!( super::lex( r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0000 # Oh, and here is a comment "# ) .iter() .map(|(kind, text)| (*kind, text.as_str())) .collect::>(), vec![ (IDENTIFIER, "breezy"), (WHITESPACE, " "), (VERSION, "(3.3.4-1)"), (WHITESPACE, " "), (IDENTIFIER, "unstable"), (SEMICOLON, ";"), (WHITESPACE, " "), (IDENTIFIER, "urgency"), (EQUALS, "="), (IDENTIFIER, "low"), (NEWLINE, "\n"), (NEWLINE, "\n"), (INDENT, " "), (DETAIL, "* New upstream release."), (NEWLINE, "\n"), (NEWLINE, "\n"), (INDENT, " -- "), (TEXT, "Jelmer"), (WHITESPACE, " "), (TEXT, "Vernooij"), (WHITESPACE, " "), (EMAIL, ""), (WHITESPACE, " "), (TEXT, "Mon,"), (WHITESPACE, " "), (TEXT, "04"), (WHITESPACE, " "), (TEXT, "Sep"), (WHITESPACE, " "), (TEXT, "2023"), (WHITESPACE, " "), (TEXT, "18:13:45"), (WHITESPACE, " "), (TEXT, "-0000"), (NEWLINE, "\n"), (NEWLINE, "\n"), (COMMENT, "# Oh, and here is a comment\n"), ] ); } #[test] fn test_email_edge_cases() { // Test email without closing > assert_eq!( super::lex(" -- Name >(), vec![ (INDENT, " -- "), (TEXT, "Name"), (WHITESPACE, " "), (ERROR, " assert_eq!( super::lex(" -- Name x") .iter() .map(|(kind, text)| (*kind, text.as_str())) .collect::>(), vec![ (INDENT, " -- "), (TEXT, "Name"), (WHITESPACE, " "), (EMAIL, ""), (TEXT, "x"), ] ); } #[test] fn test_comment_without_newline() { assert_eq!( super::lex("# Comment without newline") .iter() .map(|(kind, text)| (*kind, text.as_str())) .collect::>(), vec![(COMMENT, "# Comment without newline")] ); } #[test] fn test_footer_text_parsing() { // Test footer line with various characters assert_eq!( super::lex(" -- Name123-test") .iter() .map(|(kind, text)| (*kind, text.as_str())) .collect::>(), vec![(INDENT, " -- "), (TEXT, "Name123-test"),] ); } #[test] fn test_whitespace_handling() { // Test various whitespace scenarios - when at start of line with detail context, it becomes INDENT assert_eq!( super::lex(" \t ") .iter() .map(|(kind, text)| (*kind, text.as_str())) .collect::>(), vec![(INDENT, " "), (DETAIL, "\t ")] ); } } debian-changelog-0.2.14/src/lib.rs000064400000000000000000002755061046102023000150010ustar 00000000000000#![deny(missing_docs)] //! A lossless parser for Debian changelog files. //! //! See https://manpages.debian.org/bookworm/dpkg-dev/deb-changelog.5.en.html //! //! For its format specification, see [Debian Policy](https://www.debian.org/doc/debian-policy/ch-source.html#debian-changelog-debian-changelog). //! //! Example: //! //! ```rust //! use std::io::Read; //! let contents = r#"rustc (1.70.0+dfsg1-1) unstable; urgency=medium //! //! * Upload to unstable //! //! -- Jelmer Vernooij Wed, 20 Sep 2023 20:18:40 +0200 //! "#; //! let changelog: debian_changelog::ChangeLog = contents.parse().unwrap(); //! assert_eq!( //! vec![("rustc".to_string(), "1.70.0+dfsg1-1".parse().unwrap())], //! changelog.iter().map( //! |e| (e.package().unwrap(), e.version().unwrap())) //! .collect::>()); //! ``` mod lex; mod parse; use lazy_regex::regex_captures; pub mod changes; pub mod textwrap; use crate::parse::{SyntaxNode, SyntaxToken}; use debversion::Version; use rowan::ast::AstNode; pub use crate::changes::changes_by_author; pub use crate::parse::{ ChangeLog, Entry, EntryBody, EntryFooter, EntryHeader, Error, IntoTimestamp, Maintainer, MetadataEntry, MetadataKey, MetadataValue, Parse, ParseError, Timestamp, Urgency, }; /// Represents a logical change within a changelog entry. /// /// This struct wraps specific DETAIL tokens within an Entry's syntax tree /// and provides methods to manipulate them while maintaining the AST structure. #[derive(Debug, Clone)] pub struct Change { /// The parent entry containing this change entry: Entry, /// The author of the change (if attributed) author: Option, /// Line numbers in the original entry where this change appears line_numbers: Vec, /// The actual change lines as tokens in the syntax tree detail_tokens: Vec, } impl Change { /// Create a new Change instance. pub(crate) fn new( entry: Entry, author: Option, line_numbers: Vec, detail_tokens: Vec, ) -> Self { Self { entry, author, line_numbers, detail_tokens, } } /// Get the author of this change. pub fn author(&self) -> Option<&str> { self.author.as_deref() } /// Get the line numbers in the original entry where this change appears. pub fn line_numbers(&self) -> &[usize] { &self.line_numbers } /// Get the lines of this change. pub fn lines(&self) -> Vec { self.detail_tokens .iter() .map(|token| token.text().to_string()) .collect() } /// Get the package name this change belongs to. pub fn package(&self) -> Option { self.entry.package() } /// Get the version this change belongs to, returning an error if the version string is invalid. /// /// Returns: /// - `Some(Ok(version))` if a valid version is found /// - `Some(Err(err))` if a version token exists but cannot be parsed /// - `None` if no version token is present pub fn try_version(&self) -> Option> { self.entry.try_version() } /// Get the version this change belongs to. /// /// Note: This method silently returns `None` if the version string is invalid. /// Consider using [`try_version`](Self::try_version) instead to handle parsing errors properly. pub fn version(&self) -> Option { self.try_version().and_then(|r| r.ok()) } /// Check if this change is attributed to a specific author. pub fn is_attributed(&self) -> bool { self.author.is_some() } /// Get a reference to the parent entry. pub fn entry(&self) -> &Entry { &self.entry } /// Get the line number (0-indexed) where this change starts. /// /// Returns the line number of the first detail token, or None if the change has no tokens. pub fn line(&self) -> Option { self.detail_tokens.first().map(|token| { parse::line_col_at_offset(self.entry.syntax(), token.text_range().start()).0 }) } /// Get the column number (0-indexed, in bytes) where this change starts. /// /// Returns the column number of the first detail token, or None if the change has no tokens. pub fn column(&self) -> Option { self.detail_tokens.first().map(|token| { parse::line_col_at_offset(self.entry.syntax(), token.text_range().start()).1 }) } /// Get both line and column (0-indexed) where this change starts. /// /// Returns (line, column) where column is measured in bytes from the start of the line, /// or None if the change has no tokens. pub fn line_col(&self) -> Option<(usize, usize)> { self.detail_tokens .first() .map(|token| parse::line_col_at_offset(self.entry.syntax(), token.text_range().start())) } /// Remove this change from its parent entry. /// /// This removes all DETAIL tokens (ENTRY_BODY nodes) associated with this change /// from the syntax tree. If this removes the last change in an author section, /// the empty section header will also be removed. pub fn remove(self) { // Store info we'll need after removal let author = self.author.clone(); // Collect the parent ENTRY_BODY nodes that contain our detail tokens let mut body_nodes_to_remove = Vec::new(); for token in &self.detail_tokens { if let Some(parent) = token.parent() { if parent.kind() == SyntaxKind::ENTRY_BODY { // Check if we haven't already marked this node for removal if !body_nodes_to_remove .iter() .any(|n: &SyntaxNode| n == &parent) { body_nodes_to_remove.push(parent); } } } } // Find the section header node if this is an attributed change // and capture its index BEFORE we remove any nodes let section_header_index = if author.is_some() && !body_nodes_to_remove.is_empty() { Self::find_section_header_for_changes(&self.entry, &body_nodes_to_remove) .map(|node| node.index()) } else { None }; // Remove the ENTRY_BODY nodes from the entry's syntax tree // We need to remove from highest index to lowest to avoid index shifting issues let mut sorted_nodes = body_nodes_to_remove; sorted_nodes.sort_by_key(|n| std::cmp::Reverse(n.index())); // Track which indices to remove (ENTRY_BODY nodes and trailing EMPTY_LINE nodes) let mut indices_to_remove = Vec::new(); let children: Vec<_> = self.entry.syntax().children().collect(); for body_node in &sorted_nodes { let index = body_node.index(); indices_to_remove.push(index); // Remove trailing EMPTY_LINE if it exists and would create consecutive blanks if Self::should_remove_trailing_empty(&children, index) { indices_to_remove.push(index + 1); } } // Sort indices in reverse order and remove duplicates indices_to_remove.sort_by_key(|&i| std::cmp::Reverse(i)); indices_to_remove.dedup(); // Remove the nodes for index in indices_to_remove { self.entry .syntax() .splice_children(index..index + 1, vec![]); } // Check if section is now empty and remove header if needed // After removing bullets, we need to adjust the header index based on how many // nodes were removed before it if let Some(original_header_idx) = section_header_index { // Count how many nodes we removed that were before the header let nodes_removed_before_header = sorted_nodes .iter() .filter(|n| n.index() < original_header_idx) .count(); // Adjust the header index let adjusted_header_idx = original_header_idx - nodes_removed_before_header; Self::remove_section_header_if_empty_at_index(&self.entry, adjusted_header_idx); } } /// Check if a node is a section header (e.g., "[ Author Name ]") fn is_section_header(node: &SyntaxNode) -> bool { if node.kind() != SyntaxKind::ENTRY_BODY { return false; } for token in node.descendants_with_tokens() { if let Some(token) = token.as_token() { if token.kind() == SyntaxKind::DETAIL && crate::changes::extract_author_name(token.text()).is_some() { return true; } } } false } /// Check if the trailing EMPTY_LINE after an entry should be removed /// Returns true if removing it would prevent consecutive blank lines fn should_remove_trailing_empty(children: &[SyntaxNode], entry_index: usize) -> bool { // Check if there's a trailing EMPTY_LINE let has_trailing_empty = children .get(entry_index + 1) .is_some_and(|n| n.kind() == SyntaxKind::EMPTY_LINE); if !has_trailing_empty { return false; } // Remove if there's already an EMPTY_LINE before (would create consecutive blanks) let has_preceding_empty = entry_index > 0 && children .get(entry_index - 1) .is_some_and(|n| n.kind() == SyntaxKind::EMPTY_LINE); if has_preceding_empty { return true; } // Remove if what follows would create consecutive blanks or be a section header match children.get(entry_index + 2) { Some(node) if node.kind() == SyntaxKind::EMPTY_LINE => true, Some(node) if Self::is_section_header(node) => true, _ => false, } } /// Check if the preceding EMPTY_LINE before a section header should be removed /// Preserves the blank line if it's the first one after the entry header fn should_remove_preceding_empty(children: &[SyntaxNode], header_index: usize) -> bool { if header_index == 0 { return false; } // Check if there's a preceding EMPTY_LINE let has_preceding_empty = children .get(header_index - 1) .is_some_and(|n| n.kind() == SyntaxKind::EMPTY_LINE); if !has_preceding_empty { return false; } // Don't remove if it's the first blank line after the entry header let is_first_blank_after_header = header_index >= 2 && children .get(header_index - 2) .is_some_and(|n| n.kind() == SyntaxKind::ENTRY_HEADER); !is_first_blank_after_header } /// Find the section header that precedes the given change nodes fn find_section_header_for_changes( entry: &Entry, change_nodes: &[SyntaxNode], ) -> Option { if change_nodes.is_empty() { return None; } let first_change_index = change_nodes.iter().map(|n| n.index()).min().unwrap(); let mut header_node = None; for child in entry.syntax().children() { for token_or_node in child.children_with_tokens() { let Some(token) = token_or_node.as_token() else { continue; }; if token.kind() != SyntaxKind::DETAIL { continue; } let Some(parent) = token.parent() else { continue; }; if parent.kind() != SyntaxKind::ENTRY_BODY { continue; } let parent_index = parent.index(); if parent_index >= first_change_index { continue; } if crate::changes::extract_author_name(token.text()).is_some() { header_node = Some(parent); } } } header_node } /// Remove a section header if its section is now empty fn remove_section_header_if_empty_at_index(entry: &Entry, header_index: usize) { // Check if there are any bullet points after this header and before the next header let mut has_bullets_in_section = false; 'outer: for child in entry.syntax().children() { for token_or_node in child.children_with_tokens() { let Some(token) = token_or_node.as_token() else { continue; }; if token.kind() != SyntaxKind::DETAIL { continue; } let Some(parent) = token.parent() else { continue; }; if parent.kind() != SyntaxKind::ENTRY_BODY { continue; } let parent_index = parent.index(); if parent_index <= header_index { continue; } let text = token.text(); // If we hit another section header, stop searching if crate::changes::extract_author_name(text).is_some() { break 'outer; } // If we find a bullet point, section is not empty if text.starts_with("* ") { has_bullets_in_section = true; break 'outer; } } } // Remove the header if section is empty if !has_bullets_in_section { let children: Vec<_> = entry.syntax().children().collect(); // Determine if we should also remove the preceding EMPTY_LINE // (but preserve the blank line right after the entry header) let start_index = if Self::should_remove_preceding_empty(&children, header_index) { header_index - 1 } else { header_index }; // Important: rowan's splice_children iterates and detaches nodes in order. // When a node is detached, it changes the tree immediately, which can cause // the iteration to skip nodes. Removing in reverse order avoids this issue. for idx in (start_index..=header_index).rev() { entry.syntax().splice_children(idx..idx + 1, vec![]); } } } /// Replace this change with new lines. /// /// This removes the current change lines and replaces them with the provided lines. /// /// # Arguments /// * `new_lines` - The new change lines to replace with (e.g., `["* Updated feature"]`) pub fn replace_with(&self, new_lines: Vec<&str>) { use rowan::GreenNodeBuilder; // Find the first ENTRY_BODY node to determine insertion point let first_body_node = self .detail_tokens .first() .and_then(|token| token.parent()) .filter(|parent| parent.kind() == SyntaxKind::ENTRY_BODY); if let Some(_first_node) = first_body_node { // Collect all ENTRY_BODY nodes to remove let mut body_nodes_to_remove = Vec::new(); for token in &self.detail_tokens { if let Some(parent) = token.parent() { if parent.kind() == SyntaxKind::ENTRY_BODY && !body_nodes_to_remove .iter() .any(|n: &SyntaxNode| n == &parent) { body_nodes_to_remove.push(parent); } } } // Build replacement nodes let mut new_nodes = Vec::new(); for line in new_lines { let mut builder = GreenNodeBuilder::new(); builder.start_node(SyntaxKind::ENTRY_BODY.into()); if !line.is_empty() { builder.token(SyntaxKind::INDENT.into(), " "); builder.token(SyntaxKind::DETAIL.into(), line); } builder.token(SyntaxKind::NEWLINE.into(), "\n"); builder.finish_node(); let syntax = SyntaxNode::new_root_mut(builder.finish()); new_nodes.push(syntax.into()); } // Remove old nodes and insert new ones // We need to remove from highest index to lowest to avoid index shifting issues let mut sorted_nodes = body_nodes_to_remove.clone(); sorted_nodes.sort_by_key(|n| std::cmp::Reverse(n.index())); for (i, node) in sorted_nodes.iter().enumerate() { let idx = node.index(); if i == 0 { // For the first removal, insert the new nodes self.entry .syntax() .splice_children(idx..idx + 1, new_nodes.clone()); } else { // For subsequent removals, just remove self.entry.syntax().splice_children(idx..idx + 1, vec![]); } } } } /// Replace a specific line in this change by index. /// /// # Arguments /// * `index` - The zero-based index of the line to replace /// * `new_text` - The new text for the line /// /// # Returns /// * `Ok(())` if the line was replaced successfully /// * `Err(Error)` if the index is out of bounds /// /// # Examples /// ``` /// use debian_changelog::{ChangeLog, iter_changes_by_author}; /// /// let changelog_text = r#"blah (1.0-1) unstable; urgency=low /// /// * First change /// * Second change /// /// -- Author Mon, 01 Jan 2024 00:00:00 +0000 /// "#; /// /// let changelog = ChangeLog::read_relaxed(changelog_text.as_bytes()).unwrap(); /// let changes = iter_changes_by_author(&changelog); /// changes[0].replace_line(0, "* Updated first change").unwrap(); /// # Ok::<(), Box>(()) /// ``` pub fn replace_line(&self, index: usize, new_text: &str) -> Result<(), Error> { if index >= self.detail_tokens.len() { return Err(Error::Io(std::io::Error::new( std::io::ErrorKind::InvalidInput, format!( "Line index {} out of bounds (0..{})", index, self.detail_tokens.len() ), ))); } let mut new_lines = self.lines(); new_lines[index] = new_text.to_string(); self.replace_with(new_lines.iter().map(|s| s.as_str()).collect()); Ok(()) } /// Update lines in this change that match a predicate. /// /// This method finds all lines that match the predicate function and replaces /// them with the result of the updater function. /// /// # Arguments /// * `predicate` - A function that returns true for lines that should be updated /// * `updater` - A function that takes the old line text and returns the new line text /// /// # Returns /// The number of lines that were updated /// /// # Examples /// ``` /// use debian_changelog::{ChangeLog, iter_changes_by_author}; /// /// let changelog_text = r#"blah (1.0-1) unstable; urgency=low /// /// * First change /// * Second change /// * Third change /// /// -- Author Mon, 01 Jan 2024 00:00:00 +0000 /// "#; /// /// let changelog = ChangeLog::read_relaxed(changelog_text.as_bytes()).unwrap(); /// let changes = iter_changes_by_author(&changelog); /// /// // Update lines containing "First" or "Second" /// let count = changes[0].update_lines( /// |line| line.contains("First") || line.contains("Second"), /// |line| format!("{} (updated)", line) /// ); /// assert_eq!(count, 2); /// ``` pub fn update_lines(&self, predicate: F, updater: G) -> usize where F: Fn(&str) -> bool, G: Fn(&str) -> String, { let mut new_lines = self.lines(); let mut update_count = 0; for line in &mut new_lines { if predicate(line) { *line = updater(line); update_count += 1; } } if update_count > 0 { self.replace_with(new_lines.iter().map(|s| s.as_str()).collect()); } update_count } /// Split this change into individual bullet points. /// /// Each bullet point (line starting with "* ") and its continuation lines /// (indented lines that follow) become a separate Change object. /// /// # Returns /// A vector of Change objects, one per bullet point. Each Change contains: /// - The same entry and author as the parent /// - Subset of line_numbers for that specific bullet /// - Subset of detail_tokens for that bullet and its continuation lines /// /// # Examples /// ``` /// use debian_changelog::{ChangeLog, iter_changes_by_author}; /// /// let changelog_text = r#"blah (1.0-1) unstable; urgency=low /// /// * First change /// * Second change /// with continuation /// /// -- Author Mon, 01 Jan 2024 00:00:00 +0000 /// "#; /// /// let changelog = ChangeLog::read_relaxed(changelog_text.as_bytes()).unwrap(); /// let changes = iter_changes_by_author(&changelog); /// let bullets = changes[0].split_into_bullets(); /// assert_eq!(bullets.len(), 2); /// assert_eq!(bullets[0].lines(), vec!["* First change"]); /// assert_eq!(bullets[1].lines(), vec!["* Second change", " with continuation"]); /// ``` pub fn split_into_bullets(&self) -> Vec { let mut result = Vec::new(); let mut current_bullet_tokens = Vec::new(); let mut current_bullet_line_numbers = Vec::new(); for (i, token) in self.detail_tokens.iter().enumerate() { let text = token.text(); let line_number = self.line_numbers.get(i).copied().unwrap_or(0); // Check if this is a new bullet point (starts with "* ") if text.starts_with("* ") { // If we have a previous bullet, save it if !current_bullet_tokens.is_empty() { result.push(Change::new( self.entry.clone(), self.author.clone(), current_bullet_line_numbers.clone(), current_bullet_tokens.clone(), )); current_bullet_tokens.clear(); current_bullet_line_numbers.clear(); } // Start a new bullet current_bullet_tokens.push(token.clone()); current_bullet_line_numbers.push(line_number); } else { // This is a continuation line, add to current bullet current_bullet_tokens.push(token.clone()); current_bullet_line_numbers.push(line_number); } } // Don't forget the last bullet if !current_bullet_tokens.is_empty() { result.push(Change::new( self.entry.clone(), self.author.clone(), current_bullet_line_numbers, current_bullet_tokens, )); } result } } /// Let's start with defining all kinds of tokens and /// composite nodes. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[allow(non_camel_case_types)] #[repr(u16)] #[allow(missing_docs)] pub enum SyntaxKind { IDENTIFIER = 0, INDENT, TEXT, WHITESPACE, VERSION, // "(3.3.4-1)" SEMICOLON, // ";" EQUALS, // "=" DETAIL, // "* New upstream release." NEWLINE, // newlines are explicit ERROR, // as well as errors COMMENT, // "#" // composite nodes ROOT, // The entire file ENTRY, // A single entry ENTRY_HEADER, ENTRY_FOOTER, METADATA, METADATA_ENTRY, METADATA_KEY, METADATA_VALUE, ENTRY_BODY, DISTRIBUTIONS, EMPTY_LINE, TIMESTAMP, MAINTAINER, EMAIL, } /// Convert our `SyntaxKind` into the rowan `SyntaxKind`. impl From for rowan::SyntaxKind { fn from(kind: SyntaxKind) -> Self { Self(kind as u16) } } /// Parse a identity string /// /// # Arguments /// * `s` - The string to parse /// /// # Returns /// A tuple with name and email address pub fn parseaddr(s: &str) -> (Option<&str>, &str) { if let Some((_, name, email)) = regex_captures!(r"^(.*)\s+<(.*)>$", s) { if name.is_empty() { (None, email) } else { (Some(name), email) } } else { (None, s) } } /// Get the maintainer information from the environment. pub fn get_maintainer_from_env( get_env: impl Fn(&str) -> Option, ) -> Option<(String, String)> { use std::io::BufRead; let mut debemail = get_env("DEBEMAIL"); let mut debfullname = get_env("DEBFULLNAME"); // Split email and name if let Some(email) = debemail.as_ref() { let (parsed_name, parsed_email) = parseaddr(email); if let Some(parsed_name) = parsed_name { if debfullname.is_none() { debfullname = Some(parsed_name.to_string()); } } debemail = Some(parsed_email.to_string()); } if debfullname.is_none() || debemail.is_none() { if let Some(email) = get_env("EMAIL") { let (parsed_name, parsed_email) = parseaddr(email.as_str()); if let Some(parsed_name) = parsed_name { if debfullname.is_none() { debfullname = Some(parsed_name.to_string()); } } debemail = Some(parsed_email.to_string()); } } // Get maintainer's name let maintainer = if let Some(m) = debfullname { Some(m.trim().to_string()) } else if let Some(m) = get_env("NAME") { Some(m.trim().to_string()) } else { Some(whoami::realname()) }; // Get maintainer's mail address let email_address = if let Some(email) = debemail { Some(email) } else if let Some(email) = get_env("EMAIL") { Some(email) } else { // Read /etc/mailname or use hostname let mut addr: Option = None; if let Ok(mailname_file) = std::fs::File::open("/etc/mailname") { let mut reader = std::io::BufReader::new(mailname_file); if let Ok(line) = reader.fill_buf() { if !line.is_empty() { addr = Some(String::from_utf8_lossy(line).trim().to_string()); } } } if addr.is_none() { match whoami::fallible::hostname() { Ok(hostname) => { addr = Some(hostname); } Err(e) => { log::debug!("Failed to get hostname: {}", e); addr = None; } } } addr.map(|hostname| format!("{}@{}", whoami::username(), hostname)) }; if let (Some(maintainer), Some(email_address)) = (maintainer, email_address) { Some((maintainer, email_address)) } else { None } } /// Get the maintainer information in the same manner as dch. /// /// This function gets the information about the current user for /// the maintainer field using environment variables of gecos /// information as appropriate. /// /// It uses the same algorithm as dch to get the information, namely /// DEBEMAIL, DEBFULLNAME, EMAIL, NAME, /etc/mailname and gecos. /// /// # Returns /// /// a tuple of the full name, email pair as strings. /// Either of the pair may be None if that value couldn't /// be determined. pub fn get_maintainer() -> Option<(String, String)> { get_maintainer_from_env(|s| std::env::var(s).ok()) } #[cfg(test)] mod get_maintainer_from_env_tests { use super::*; #[test] fn test_normal() { get_maintainer(); } #[test] fn test_deb_vars() { let mut d = std::collections::HashMap::new(); d.insert("DEBFULLNAME".to_string(), "Jelmer".to_string()); d.insert("DEBEMAIL".to_string(), "jelmer@example.com".to_string()); let t = get_maintainer_from_env(|s| d.get(s).cloned()); assert_eq!( Some(("Jelmer".to_string(), "jelmer@example.com".to_string())), t ); } #[test] fn test_email_var() { let mut d = std::collections::HashMap::new(); d.insert("NAME".to_string(), "Jelmer".to_string()); d.insert("EMAIL".to_string(), "foo@example.com".to_string()); let t = get_maintainer_from_env(|s| d.get(s).cloned()); assert_eq!( Some(("Jelmer".to_string(), "foo@example.com".to_string())), t ); } } /// Simple representation of an identity. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Identity { /// Name of the maintainer pub name: String, /// Email address of the maintainer pub email: String, } impl Identity { /// Create a new identity. pub fn new(name: String, email: String) -> Self { Self { name, email } } /// Get the maintainer information from the environment. pub fn from_env() -> Option { get_maintainer().map(|(name, email)| Self { name, email }) } } impl From<(String, String)> for Identity { fn from((name, email): (String, String)) -> Self { Self { name, email } } } impl std::fmt::Display for Identity { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{} <{}>", self.name, self.email) } } /// Constant for the unreleased distribution name pub const UNRELEASED: &str = "UNRELEASED"; /// Prefix for unreleased distribution variants const UNRELEASED_PREFIX: &str = "UNRELEASED-"; /// Check if the given distribution marks an unreleased entry. pub fn distribution_is_unreleased(distribution: &str) -> bool { distribution == UNRELEASED || distribution.starts_with(UNRELEASED_PREFIX) } /// Check if any of the given distributions marks an unreleased entry. pub fn distributions_is_unreleased(distributions: &[&str]) -> bool { distributions.iter().any(|x| distribution_is_unreleased(x)) } #[test] fn test_distributions_is_unreleased() { assert!(distributions_is_unreleased(&["UNRELEASED"])); assert!(distributions_is_unreleased(&[ "UNRELEASED-1", "UNRELEASED-2" ])); assert!(distributions_is_unreleased(&["UNRELEASED", "UNRELEASED-2"])); assert!(!distributions_is_unreleased(&["stable"])); } /// Check whether this is a traditional inaugural release pub fn is_unreleased_inaugural(cl: &ChangeLog) -> bool { let mut entries = cl.iter(); if let Some(entry) = entries.next() { if entry.is_unreleased() == Some(false) { return false; } let changes = entry.change_lines().collect::>(); if changes.len() > 1 || !changes[0].starts_with("* Initial release") { return false; } entries.next().is_none() } else { false } } #[cfg(test)] mod is_unreleased_inaugural_tests { use super::*; #[test] fn test_empty() { assert!(!is_unreleased_inaugural(&ChangeLog::new())); } #[test] fn test_unreleased_inaugural() { let mut cl = ChangeLog::new(); cl.new_entry() .maintainer(("Jelmer Vernooij".into(), "jelmer@debian.org".into())) .distribution(UNRELEASED.to_string()) .version("1.0.0".parse().unwrap()) .change_line("* Initial release".to_string()) .finish(); assert!(is_unreleased_inaugural(&cl)); } #[test] fn test_not_unreleased_inaugural() { let mut cl = ChangeLog::new(); cl.new_entry() .maintainer(("Jelmer Vernooij".into(), "jelmer@debian.org".into())) .distributions(vec!["unstable".to_string()]) .version("1.0.0".parse().unwrap()) .change_line("* Initial release".to_string()) .finish(); assert_eq!(cl.iter().next().unwrap().is_unreleased(), Some(false)); // Not unreleased assert!(!is_unreleased_inaugural(&cl)); cl.new_entry() .maintainer(("Jelmer Vernooij".into(), "jelmer@debian.org".into())) .distribution(UNRELEASED.to_string()) .version("1.0.1".parse().unwrap()) .change_line("* Some change".to_string()) .finish(); // Not inaugural assert!(!is_unreleased_inaugural(&cl)); } } const DEFAULT_DISTRIBUTION: &[&str] = &[UNRELEASED]; /// Create a release for a changelog file. /// /// # Arguments /// * `cl` - The changelog to release /// * `distribution` - The distribution to release to. If None, the distribution /// of the previous entry is used. /// * `timestamp` - The timestamp to use for the release. If None, the current time is used (requires chrono feature). /// * `maintainer` - The maintainer to use for the release. If None, the maintainer /// is extracted from the environment. /// /// # Returns /// Whether a release was created. /// /// # Panics /// Panics if timestamp is None and the chrono feature is not enabled. pub fn release( cl: &mut ChangeLog, distribution: Option>, timestamp: Option, maintainer: Option<(String, String)>, ) -> bool { let mut entries = cl.iter(); let mut first_entry = entries.next().unwrap(); let second_entry = entries.next(); let distribution = distribution.unwrap_or_else(|| { // Inherit from previous entry second_entry .and_then(|e| e.distributions()) .unwrap_or_else(|| { DEFAULT_DISTRIBUTION .iter() .map(|s| s.to_string()) .collect::>() }) }); if first_entry.is_unreleased() == Some(false) { take_uploadership(&mut first_entry, maintainer); first_entry.set_distributions(distribution); let timestamp_str = if let Some(ts) = timestamp { ts.into_timestamp() } else { #[cfg(feature = "chrono")] { chrono::offset::Utc::now().into_timestamp() } #[cfg(not(feature = "chrono"))] { panic!("timestamp is required when chrono feature is disabled"); } }; first_entry.set_timestamp(timestamp_str); true } else { false } } /// Take uploadership of a changelog entry, but attribute contributors. /// /// # Arguments /// * `entry` - Changelog entry to modify /// * `maintainer` - Tuple with (name, email) of maintainer to take ownership pub fn take_uploadership(entry: &mut Entry, maintainer: Option<(String, String)>) { let (maintainer_name, maintainer_email) = if let Some(m) = maintainer { m } else { get_maintainer().unwrap() }; if let (Some(current_maintainer), Some(current_email)) = (entry.maintainer(), entry.email()) { if current_maintainer != maintainer_name || current_email != maintainer_email { if let Some(first_line) = entry.change_lines().next() { if first_line.starts_with("[ ") { entry.prepend_change_line( crate::changes::format_section_title(current_maintainer.as_str()).as_str(), ); } } } } entry.set_maintainer((maintainer_name, maintainer_email)); } /// Update changelog with commit messages from commits pub fn gbp_dch(path: &std::path::Path) -> std::result::Result<(), std::io::Error> { // Run the "gbp dch" command with working copy at `path` let output = std::process::Command::new("gbp") .arg("dch") .arg("--ignore-branch") .current_dir(path) .output()?; if !output.status.success() { return Err(std::io::Error::other(format!( "gbp dch failed: {}", String::from_utf8_lossy(&output.stderr) ))); } Ok(()) } /// Iterator over changelog entries grouped by author (maintainer). /// /// This function returns an iterator that groups changelog entries by their maintainer /// (author), similar to debmutate.changelog functionality. /// /// # Arguments /// * `changelog` - The changelog to iterate over /// /// # Returns /// An iterator over tuples of (author_name, author_email, Vec) pub fn iter_entries_by_author( changelog: &ChangeLog, ) -> impl Iterator)> + '_ { use std::collections::BTreeMap; let mut grouped: BTreeMap<(String, String), Vec> = BTreeMap::new(); for entry in changelog.iter() { let maintainer_name = entry.maintainer().unwrap_or_else(|| "Unknown".to_string()); let maintainer_email = entry .email() .unwrap_or_else(|| "unknown@unknown".to_string()); let key = (maintainer_name, maintainer_email); grouped.entry(key).or_default().push(entry); } grouped .into_iter() .map(|((name, email), entries)| (name, email, entries)) } /// Iterator over all changes across all entries, grouped by author. /// /// This function iterates through all entries in a changelog and returns changes /// grouped by their attributed authors, including those in author sections like [ Author Name ]. /// /// # Arguments /// * `changelog` - The changelog to iterate over /// /// # Returns /// A vector of Change objects that can be manipulated or filtered pub fn iter_changes_by_author(changelog: &ChangeLog) -> Vec { let mut result = Vec::new(); for entry in changelog.iter() { let changes: Vec = entry.change_lines().map(|s| s.to_string()).collect(); // Collect all DETAIL tokens from the entry with their text let all_detail_tokens: Vec = entry .syntax() .children() .flat_map(|n| { n.children_with_tokens() .filter_map(|it| it.as_token().cloned()) .filter(|token| token.kind() == SyntaxKind::DETAIL) }) .collect(); // Track which tokens have been used to avoid matching duplicates to the same token let mut token_index = 0; for (author, linenos, lines) in crate::changes::changes_by_author(changes.iter().map(|s| s.as_str())) { let author_name = author.map(|s| s.to_string()); // Extract the specific DETAIL tokens for this change by matching text content // We iterate through tokens in order to handle duplicate lines correctly let detail_tokens: Vec = lines .iter() .filter_map(|line_text| { // Find the next token matching this line's text while token_index < all_detail_tokens.len() { let token = &all_detail_tokens[token_index]; token_index += 1; if token.text() == *line_text { return Some(token.clone()); } } None }) .collect(); let change = Change::new(entry.clone(), author_name, linenos, detail_tokens); result.push(change); } } result } #[cfg(test)] mod tests { use super::*; #[test] fn test_parseaddr() { assert_eq!( (Some("Jelmer"), "jelmer@jelmer.uk"), parseaddr("Jelmer ") ); assert_eq!((None, "jelmer@jelmer.uk"), parseaddr("jelmer@jelmer.uk")); } #[test] fn test_parseaddr_empty() { assert_eq!((None, ""), parseaddr("")); } #[test] #[cfg(feature = "chrono")] fn test_release_already_released() { use crate::parse::ChangeLog; let mut changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let result = release( &mut changelog, Some(vec!["unstable".to_string()]), None::, None, ); // The function returns true if the entry is NOT unreleased (already released) assert!(result); } #[test] #[cfg(feature = "chrono")] fn test_release_unreleased() { use crate::parse::ChangeLog; let mut changelog: ChangeLog = r#"breezy (3.3.4-1) UNRELEASED; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let result = release( &mut changelog, Some(vec!["unstable".to_string()]), None::, Some(("Test User".to_string(), "test@example.com".to_string())), ); // The function returns false if the entry is unreleased assert!(!result); } #[test] fn test_take_uploadership_same_maintainer() { use crate::parse::ChangeLog; let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Test User Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let mut entries: Vec = changelog.into_iter().collect(); take_uploadership( &mut entries[0], Some(("Test User".to_string(), "test@example.com".to_string())), ); // Should not add author section when maintainer is the same assert!(!entries[0].to_string().contains("[ Test User ]")); } #[test] fn test_take_uploadership_different_maintainer() { use crate::parse::ChangeLog; let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Original User Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let mut entries: Vec = changelog.into_iter().collect(); take_uploadership( &mut entries[0], Some(("New User".to_string(), "new@example.com".to_string())), ); // The take_uploadership function updates the maintainer in the footer assert!(entries[0] .to_string() .contains("New User ")); assert_eq!(entries[0].email(), Some("new@example.com".to_string())); } #[test] fn test_identity_display() { let identity = Identity { name: "Test User".to_string(), email: "test@example.com".to_string(), }; assert_eq!(identity.to_string(), "Test User "); let identity_empty_name = Identity { name: "".to_string(), email: "test@example.com".to_string(), }; assert_eq!(identity_empty_name.to_string(), " "); } #[test] fn test_gbp_dch_failure() { // Test with invalid path that would cause gbp dch to fail let result = gbp_dch(std::path::Path::new("/nonexistent/path")); assert!(result.is_err()); } #[test] fn test_iter_entries_by_author() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 breezy (3.3.3-1) unstable; urgency=low * Bug fix release. -- Jelmer Vernooij Sun, 03 Sep 2023 17:12:30 -0500 breezy (3.3.2-1) unstable; urgency=low * Another release. -- Jane Doe Sat, 02 Sep 2023 16:11:15 -0500 "# .parse() .unwrap(); let authors: Vec<(String, String, Vec)> = iter_entries_by_author(&changelog).collect(); assert_eq!(authors.len(), 2); assert_eq!(authors[0].0, "Jane Doe"); assert_eq!(authors[0].1, "jane@example.com"); assert_eq!(authors[0].2.len(), 1); assert_eq!(authors[1].0, "Jelmer Vernooij"); assert_eq!(authors[1].1, "jelmer@debian.org"); assert_eq!(authors[1].2.len(), 2); } #[test] fn test_iter_changes_by_author() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low [ Author 1 ] * Change by Author 1 [ Author 2 ] * Change by Author 2 * Unattributed change -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); assert_eq!(changes.len(), 3); // First change attributed to Author 1 assert_eq!(changes[0].author(), Some("Author 1")); assert_eq!(changes[0].package(), Some("breezy".to_string())); assert_eq!(changes[0].lines(), vec!["* Change by Author 1"]); // Second change attributed to Author 2 assert_eq!(changes[1].author(), Some("Author 2")); assert_eq!(changes[1].package(), Some("breezy".to_string())); assert_eq!(changes[1].lines(), vec!["* Change by Author 2"]); // Third change unattributed assert_eq!(changes[2].author(), None); assert_eq!(changes[2].package(), Some("breezy".to_string())); assert_eq!(changes[2].lines(), vec!["* Unattributed change"]); } #[test] fn test_change_remove() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low [ Author 1 ] * Change by Author 1 [ Author 2 ] * Change by Author 2 * Unattributed change -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); assert_eq!(changes.len(), 3); // Remove the second change (Author 2) changes[1].clone().remove(); // Re-read the changes let remaining_changes = iter_changes_by_author(&changelog); // The Author 2 section header remains but with no changes, // so it will show up as an empty change for Author 2, // followed by the unattributed change assert_eq!(remaining_changes.len(), 2); // Should have Author 1 and Author 2 (but with no lines) assert_eq!(remaining_changes[0].author(), Some("Author 1")); assert_eq!(remaining_changes[0].lines(), vec!["* Change by Author 1"]); // Author 2's section header remains but the change is removed assert_eq!(remaining_changes[1].author(), Some("Author 2")); assert_eq!(remaining_changes[1].lines(), vec!["* Unattributed change"]); } #[test] fn test_change_replace_with() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low [ Author 1 ] * Change by Author 1 [ Author 2 ] * Change by Author 2 -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); assert_eq!(changes.len(), 2); // Replace Author 2's change changes[1].replace_with(vec!["* Updated change by Author 2", "* Another line"]); // Re-read the changes let updated_changes = iter_changes_by_author(&changelog); assert_eq!(updated_changes.len(), 2); // Author 1's change should be unchanged assert_eq!(updated_changes[0].author(), Some("Author 1")); assert_eq!(updated_changes[0].lines(), vec!["* Change by Author 1"]); // Author 2's change should be replaced assert_eq!(updated_changes[1].author(), Some("Author 2")); assert_eq!( updated_changes[1].lines(), vec!["* Updated change by Author 2", "* Another line"] ); } #[test] fn test_change_replace_with_single_line() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * Old change -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); assert_eq!(changes.len(), 1); // Replace with a new single line changes[0].replace_with(vec!["* New change"]); // Re-read the changes let updated_changes = iter_changes_by_author(&changelog); assert_eq!(updated_changes.len(), 1); assert_eq!(updated_changes[0].lines(), vec!["* New change"]); } #[test] fn test_change_accessors() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low [ Alice ] * Change by Alice -- Bob Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); assert_eq!(changes.len(), 1); let change = &changes[0]; // Test all accessors assert_eq!(change.author(), Some("Alice")); assert_eq!(change.package(), Some("breezy".to_string())); assert_eq!( change.version().map(|v| v.to_string()), Some("3.3.4-1".to_string()) ); assert_eq!(change.is_attributed(), true); assert_eq!(change.lines(), vec!["* Change by Alice"]); // Test entry accessor assert_eq!(change.entry().package(), Some("breezy".to_string())); } #[test] fn test_change_unattributed_accessors() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * Unattributed change -- Bob Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); assert_eq!(changes.len(), 1); let change = &changes[0]; assert_eq!(change.author(), None); assert_eq!(change.is_attributed(), false); } #[test] fn test_replace_single_line_with_multiple() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * Single line change -- Bob Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); changes[0].replace_with(vec!["* First line", "* Second line", "* Third line"]); let updated = iter_changes_by_author(&changelog); assert_eq!( updated[0].lines(), vec!["* First line", "* Second line", "* Third line"] ); } #[test] fn test_replace_multiple_lines_with_single() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * First line * Second line * Third line -- Bob Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); assert_eq!(changes[0].lines().len(), 3); changes[0].replace_with(vec!["* Single replacement line"]); let updated = iter_changes_by_author(&changelog); assert_eq!(updated[0].lines(), vec!["* Single replacement line"]); } #[test] fn test_split_into_bullets_single_line() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * First change * Second change * Third change -- Bob Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); assert_eq!(changes.len(), 1); // Split the single Change into individual bullets let bullets = changes[0].split_into_bullets(); assert_eq!(bullets.len(), 3); assert_eq!(bullets[0].lines(), vec!["* First change"]); assert_eq!(bullets[1].lines(), vec!["* Second change"]); assert_eq!(bullets[2].lines(), vec!["* Third change"]); // Each bullet should have the same package and version for bullet in &bullets { assert_eq!(bullet.package(), Some("breezy".to_string())); assert_eq!( bullet.version().map(|v| v.to_string()), Some("3.3.4-1".to_string()) ); } } #[test] fn test_split_into_bullets_with_continuations() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * First change with a continuation line * Second change with multiple continuation lines * Third change -- Bob Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); assert_eq!(changes.len(), 1); let bullets = changes[0].split_into_bullets(); assert_eq!(bullets.len(), 3); assert_eq!( bullets[0].lines(), vec!["* First change", " with a continuation line"] ); assert_eq!( bullets[1].lines(), vec!["* Second change", " with multiple", " continuation lines"] ); assert_eq!(bullets[2].lines(), vec!["* Third change"]); } #[test] fn test_split_into_bullets_mixed() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * Single line bullet * Multi-line bullet with continuation * Another single line -- Bob Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); let bullets = changes[0].split_into_bullets(); assert_eq!(bullets.len(), 3); assert_eq!(bullets[0].lines(), vec!["* Single line bullet"]); assert_eq!( bullets[1].lines(), vec!["* Multi-line bullet", " with continuation"] ); assert_eq!(bullets[2].lines(), vec!["* Another single line"]); } #[test] fn test_split_into_bullets_with_author() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low [ Alice ] * Change by Alice * Another change by Alice [ Bob ] * Change by Bob -- Maintainer Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); assert_eq!(changes.len(), 2); // Split Alice's changes let alice_bullets = changes[0].split_into_bullets(); assert_eq!(alice_bullets.len(), 2); assert_eq!(alice_bullets[0].lines(), vec!["* Change by Alice"]); assert_eq!(alice_bullets[1].lines(), vec!["* Another change by Alice"]); // Both bullets should preserve the author for bullet in &alice_bullets { assert_eq!(bullet.author(), Some("Alice")); } // Split Bob's changes let bob_bullets = changes[1].split_into_bullets(); assert_eq!(bob_bullets.len(), 1); assert_eq!(bob_bullets[0].lines(), vec!["* Change by Bob"]); assert_eq!(bob_bullets[0].author(), Some("Bob")); } #[test] fn test_split_into_bullets_single_bullet() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * Single bullet point -- Bob Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); let bullets = changes[0].split_into_bullets(); assert_eq!(bullets.len(), 1); assert_eq!(bullets[0].lines(), vec!["* Single bullet point"]); } #[test] fn test_split_into_bullets_and_remove() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * First change * Duplicate change * Duplicate change * Last change -- Bob Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); let bullets = changes[0].split_into_bullets(); assert_eq!(bullets.len(), 4); // Remove the second duplicate (index 2) bullets[2].clone().remove(); // Re-read and verify let updated_changes = iter_changes_by_author(&changelog); let updated_bullets = updated_changes[0].split_into_bullets(); assert_eq!(updated_bullets.len(), 3); assert_eq!(updated_bullets[0].lines(), vec!["* First change"]); assert_eq!(updated_bullets[1].lines(), vec!["* Duplicate change"]); assert_eq!(updated_bullets[2].lines(), vec!["* Last change"]); } #[test] fn test_split_into_bullets_preserves_line_numbers() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * First change * Second change * Third change -- Bob Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); let bullets = changes[0].split_into_bullets(); // Each bullet should have distinct line numbers assert_eq!(bullets.len(), 3); assert_eq!(bullets[0].line_numbers().len(), 1); assert_eq!(bullets[1].line_numbers().len(), 1); assert_eq!(bullets[2].line_numbers().len(), 1); // Line numbers should be in ascending order assert!(bullets[0].line_numbers()[0] < bullets[1].line_numbers()[0]); assert!(bullets[1].line_numbers()[0] < bullets[2].line_numbers()[0]); } #[test] fn test_split_and_remove_from_multi_author_entry() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low [ Alice ] * Change 1 by Alice * Change 2 by Alice * Change 3 by Alice [ Bob ] * Change 1 by Bob * Change 2 by Bob [ Charlie ] * Change 1 by Charlie * Unattributed change -- Maintainer Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); assert_eq!(changes.len(), 4); // Alice, Bob, Charlie, Unattributed // Split Alice's changes and remove the second one let alice_bullets = changes[0].split_into_bullets(); assert_eq!(alice_bullets.len(), 3); alice_bullets[1].clone().remove(); // Remove "Change 2 by Alice" // Re-read and verify let updated_changes = iter_changes_by_author(&changelog); assert_eq!(updated_changes.len(), 4); // Alice should now have 2 changes let updated_alice_bullets = updated_changes[0].split_into_bullets(); assert_eq!(updated_alice_bullets.len(), 2); assert_eq!( updated_alice_bullets[0].lines(), vec!["* Change 1 by Alice"] ); assert_eq!( updated_alice_bullets[1].lines(), vec!["* Change 3 by Alice"] ); assert_eq!(updated_alice_bullets[0].author(), Some("Alice")); // Bob should be unchanged let bob_bullets = updated_changes[1].split_into_bullets(); assert_eq!(bob_bullets.len(), 2); assert_eq!(bob_bullets[0].lines(), vec!["* Change 1 by Bob"]); assert_eq!(bob_bullets[1].lines(), vec!["* Change 2 by Bob"]); // Charlie should be unchanged let charlie_bullets = updated_changes[2].split_into_bullets(); assert_eq!(charlie_bullets.len(), 1); assert_eq!(charlie_bullets[0].lines(), vec!["* Change 1 by Charlie"]); // Unattributed should be unchanged let unattributed_bullets = updated_changes[3].split_into_bullets(); assert_eq!(unattributed_bullets.len(), 1); assert_eq!( unattributed_bullets[0].lines(), vec!["* Unattributed change"] ); } #[test] fn test_remove_multiple_bullets_from_different_authors() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low [ Alice ] * Alice change 1 * Alice change 2 * Alice change 3 [ Bob ] * Bob change 1 * Bob change 2 * Bob change 3 -- Maintainer Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); assert_eq!(changes.len(), 2); // Remove Alice's first and third changes let alice_bullets = changes[0].split_into_bullets(); alice_bullets[0].clone().remove(); alice_bullets[2].clone().remove(); // Remove Bob's second change let bob_bullets = changes[1].split_into_bullets(); bob_bullets[1].clone().remove(); // Re-read and verify let updated_changes = iter_changes_by_author(&changelog); let updated_alice = updated_changes[0].split_into_bullets(); assert_eq!(updated_alice.len(), 1); assert_eq!(updated_alice[0].lines(), vec!["* Alice change 2"]); let updated_bob = updated_changes[1].split_into_bullets(); assert_eq!(updated_bob.len(), 2); assert_eq!(updated_bob[0].lines(), vec!["* Bob change 1"]); assert_eq!(updated_bob[1].lines(), vec!["* Bob change 3"]); } #[test] fn test_remove_bullet_with_continuation_from_multi_author() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low [ Alice ] * Simple change by Alice [ Bob ] * Multi-line change by Bob with a continuation line and another continuation * Simple change by Bob [ Charlie ] * Change by Charlie -- Maintainer Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); assert_eq!(changes.len(), 3); // Remove Bob's multi-line change let bob_bullets = changes[1].split_into_bullets(); assert_eq!(bob_bullets.len(), 2); assert_eq!( bob_bullets[0].lines(), vec![ "* Multi-line change by Bob", " with a continuation line", " and another continuation" ] ); bob_bullets[0].clone().remove(); // Re-read and verify let updated_changes = iter_changes_by_author(&changelog); // Alice unchanged let alice_bullets = updated_changes[0].split_into_bullets(); assert_eq!(alice_bullets.len(), 1); assert_eq!(alice_bullets[0].lines(), vec!["* Simple change by Alice"]); // Bob now has only the simple change let updated_bob = updated_changes[1].split_into_bullets(); assert_eq!(updated_bob.len(), 1); assert_eq!(updated_bob[0].lines(), vec!["* Simple change by Bob"]); // Charlie unchanged let charlie_bullets = updated_changes[2].split_into_bullets(); assert_eq!(charlie_bullets.len(), 1); assert_eq!(charlie_bullets[0].lines(), vec!["* Change by Charlie"]); } #[test] fn test_remove_all_bullets_from_one_author_section() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low [ Alice ] * Change 1 by Alice * Change 2 by Alice [ Bob ] * Change 1 by Bob -- Maintainer Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); assert_eq!(changes.len(), 2); // Remove all of Alice's changes let alice_bullets = changes[0].split_into_bullets(); for bullet in alice_bullets { bullet.remove(); } // Re-read and verify let updated_changes = iter_changes_by_author(&changelog); // Alice's section header remains but with no changes // Bob's section follows with its change, so only Bob's change remains assert_eq!(updated_changes.len(), 1); assert_eq!(updated_changes[0].author(), Some("Bob")); let bob_bullets = updated_changes[0].split_into_bullets(); assert_eq!(bob_bullets.len(), 1); assert_eq!(bob_bullets[0].lines(), vec!["* Change 1 by Bob"]); // Verify the section header was removed from the changelog text let changelog_text = changelog.to_string(); assert!( !changelog_text.contains("[ Alice ]"), "Alice's empty section header should be removed" ); } #[test] fn test_remove_section_header_with_multiple_sections() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low [ Alice ] * Alice's first section change [ Bob ] * Bob's change [ Alice ] * Alice's second section change 1 * Alice's second section change 2 -- Maintainer Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); assert_eq!(changes.len(), 3); // Remove all changes from the second Alice section only let alice_second = &changes[2]; assert_eq!(alice_second.author(), Some("Alice")); let alice_second_bullets = alice_second.split_into_bullets(); assert_eq!(alice_second_bullets.len(), 2); // Remove all bullets from the second Alice section for bullet in alice_second_bullets { bullet.remove(); } // Re-read and verify let updated_changes = iter_changes_by_author(&changelog); // Should now only have Alice's first section and Bob's section assert_eq!(updated_changes.len(), 2); assert_eq!(updated_changes[0].author(), Some("Alice")); assert_eq!(updated_changes[1].author(), Some("Bob")); // Verify the first Alice section is intact let alice_first = updated_changes[0].split_into_bullets(); assert_eq!(alice_first.len(), 1); assert_eq!( alice_first[0].lines(), vec!["* Alice's first section change"] ); // Verify the changelog text - second Alice header should be gone let changelog_text = changelog.to_string(); let alice_header_count = changelog_text.matches("[ Alice ]").count(); assert_eq!( alice_header_count, 1, "Should only have one Alice section header remaining" ); } #[test] fn test_remove_duplicate_from_specific_author() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low [ Alice ] * New upstream release * Fix typo in documentation * New upstream release [ Bob ] * New upstream release * Update dependencies -- Maintainer Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); assert_eq!(changes.len(), 2); // Find and remove duplicate "New upstream release" from Alice let alice_bullets = changes[0].split_into_bullets(); assert_eq!(alice_bullets.len(), 3); // Verify the order before removal assert_eq!(alice_bullets[0].lines(), vec!["* New upstream release"]); assert_eq!( alice_bullets[1].lines(), vec!["* Fix typo in documentation"] ); assert_eq!(alice_bullets[2].lines(), vec!["* New upstream release"]); // Remove the duplicate (third item) alice_bullets[2].clone().remove(); // Re-read and verify let updated_changes = iter_changes_by_author(&changelog); // Alice should now have 2 changes (first "New upstream release" and "Fix typo") let updated_alice = updated_changes[0].split_into_bullets(); assert_eq!(updated_alice.len(), 2); assert_eq!(updated_alice[0].lines(), vec!["* New upstream release"]); assert_eq!( updated_alice[1].lines(), vec!["* Fix typo in documentation"] ); // Bob should be unchanged let bob_bullets = updated_changes[1].split_into_bullets(); assert_eq!(bob_bullets.len(), 2); assert_eq!(bob_bullets[0].lines(), vec!["* New upstream release"]); assert_eq!(bob_bullets[1].lines(), vec!["* Update dependencies"]); } #[test] fn test_remove_empty_section_headers_and_blank_lines() { // Test that when all bullets are removed from a section, the section header // and its preceding blank line are also removed let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low [ Alice ] * Change 1 by Alice * Change 2 by Alice [ Bob ] * Change 1 by Bob -- Maintainer Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); assert_eq!(changes.len(), 2); // Remove all of Alice's changes let alice_bullets = changes[0].split_into_bullets(); for bullet in alice_bullets { bullet.remove(); } // Verify Alice's section is completely gone let updated_changes = iter_changes_by_author(&changelog); assert_eq!(updated_changes.len(), 1); assert_eq!(updated_changes[0].author(), Some("Bob")); // Verify the changelog text has no Alice header or extra blank lines let changelog_text = changelog.to_string(); assert!(!changelog_text.contains("[ Alice ]")); // Count blank lines before signature - should be exactly 1 let lines: Vec<&str> = changelog_text.lines().collect(); let sig_idx = lines.iter().position(|l| l.starts_with(" --")).unwrap(); let mut blank_count = 0; for i in (0..sig_idx).rev() { if lines[i].trim().is_empty() { blank_count += 1; } else { break; } } assert_eq!( blank_count, 1, "Should have exactly 1 blank line before signature" ); } #[test] fn test_remove_first_entry_before_author_section() { // Test that when removing the first entry before an author section, // the extra newline is properly removed let changelog: ChangeLog = r#"lintian-brush (0.1-2) UNRELEASED; urgency=medium * Team upload. [ Jelmer Vernooij ] * blah -- Jelmer Vernooij Fri, 23 Nov 2018 14:00:02 +0000 lintian-brush (0.1-1) unstable; urgency=medium * Initial release. (Closes: #XXXXXX) -- Jelmer Vernooij Sun, 28 Oct 2018 00:09:52 +0000 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); // Find and remove the "Team upload" entry (should be the first one, unattributed) let team_upload_change = changes .iter() .find(|c| c.lines().iter().any(|l| l.contains("Team upload"))) .unwrap(); team_upload_change.clone().remove(); let expected = r#"lintian-brush (0.1-2) UNRELEASED; urgency=medium [ Jelmer Vernooij ] * blah -- Jelmer Vernooij Fri, 23 Nov 2018 14:00:02 +0000 lintian-brush (0.1-1) unstable; urgency=medium * Initial release. (Closes: #XXXXXX) -- Jelmer Vernooij Sun, 28 Oct 2018 00:09:52 +0000 "#; assert_eq!(changelog.to_string(), expected); } // Helper function for remove tests to reduce repetition // Splits changes into individual bullets before applying filter fn test_remove_change(input: &str, change_filter: impl Fn(&Change) -> bool, expected: &str) { let changelog: ChangeLog = input.parse().unwrap(); let changes = iter_changes_by_author(&changelog); // Split all changes into individual bullets let mut all_bullets = Vec::new(); for change in changes { all_bullets.extend(change.split_into_bullets()); } let change = all_bullets.iter().find(|c| change_filter(c)).unwrap(); change.clone().remove(); assert_eq!(changelog.to_string(), expected); } #[test] fn test_remove_entry_followed_by_regular_bullet() { // Empty line should be preserved when followed by a regular bullet, not a section header test_remove_change( r#"lintian-brush (0.1-2) UNRELEASED; urgency=medium * First change. * Second change. -- Jelmer Vernooij Fri, 23 Nov 2018 14:00:02 +0000 "#, |c| c.lines().iter().any(|l| l.contains("First change")), r#"lintian-brush (0.1-2) UNRELEASED; urgency=medium * Second change. -- Jelmer Vernooij Fri, 23 Nov 2018 14:00:02 +0000 "#, ); } #[test] fn test_remove_entry_not_followed_by_empty_line() { // No trailing empty line to remove test_remove_change( r#"lintian-brush (0.1-2) UNRELEASED; urgency=medium * First change. * Second change. -- Jelmer Vernooij Fri, 23 Nov 2018 14:00:02 +0000 "#, |c| c.lines().iter().any(|l| l.contains("First change")), r#"lintian-brush (0.1-2) UNRELEASED; urgency=medium * Second change. -- Jelmer Vernooij Fri, 23 Nov 2018 14:00:02 +0000 "#, ); } #[test] fn test_remove_only_entry() { // Empty line before footer should be preserved test_remove_change( r#"lintian-brush (0.1-2) UNRELEASED; urgency=medium * Only change. -- Jelmer Vernooij Fri, 23 Nov 2018 14:00:02 +0000 "#, |c| c.lines().iter().any(|l| l.contains("Only change")), r#"lintian-brush (0.1-2) UNRELEASED; urgency=medium -- Jelmer Vernooij Fri, 23 Nov 2018 14:00:02 +0000 "#, ); } #[test] fn test_remove_middle_entry_between_bullets() { // Empty lines around remaining bullets should be preserved test_remove_change( r#"lintian-brush (0.1-2) UNRELEASED; urgency=medium * First change. * Middle change. * Last change. -- Jelmer Vernooij Fri, 23 Nov 2018 14:00:02 +0000 "#, |c| c.lines().iter().any(|l| l.contains("Middle change")), r#"lintian-brush (0.1-2) UNRELEASED; urgency=medium * First change. * Last change. -- Jelmer Vernooij Fri, 23 Nov 2018 14:00:02 +0000 "#, ); } #[test] fn test_remove_entry_before_multiple_section_headers() { // Empty line before first section header should be removed test_remove_change( r#"lintian-brush (0.1-2) UNRELEASED; urgency=medium * Team upload. [ Author One ] * Change by author one. [ Author Two ] * Change by author two. -- Jelmer Vernooij Fri, 23 Nov 2018 14:00:02 +0000 "#, |c| c.lines().iter().any(|l| l.contains("Team upload")), r#"lintian-brush (0.1-2) UNRELEASED; urgency=medium [ Author One ] * Change by author one. [ Author Two ] * Change by author two. -- Jelmer Vernooij Fri, 23 Nov 2018 14:00:02 +0000 "#, ); } #[test] fn test_remove_first_of_two_section_headers() { // Empty line before remaining section should be preserved test_remove_change( r#"lintian-brush (0.1-2) UNRELEASED; urgency=medium [ Author One ] * Change by author one. [ Author Two ] * Change by author two. -- Jelmer Vernooij Fri, 23 Nov 2018 14:00:02 +0000 "#, |c| c.author() == Some("Author One"), r#"lintian-brush (0.1-2) UNRELEASED; urgency=medium [ Author Two ] * Change by author two. -- Jelmer Vernooij Fri, 23 Nov 2018 14:00:02 +0000 "#, ); } #[test] fn test_remove_last_entry_no_empty_line_follows() { // Edge case: last entry with no trailing empty before footer test_remove_change( r#"lintian-brush (0.1-2) UNRELEASED; urgency=medium * First change. * Last change. -- Jelmer Vernooij Fri, 23 Nov 2018 14:00:02 +0000 "#, |c| c.lines().iter().any(|l| l.contains("Last change")), r#"lintian-brush (0.1-2) UNRELEASED; urgency=medium * First change. -- Jelmer Vernooij Fri, 23 Nov 2018 14:00:02 +0000 "#, ); } #[test] fn test_remove_first_unattributed_before_section_exact() { // Exact reproduction of the lintian-brush test case // Using the exact sequence: iter_changes_by_author -> split_into_bullets -> remove let changelog: ChangeLog = r#"lintian-brush (0.1-2) UNRELEASED; urgency=medium * Team upload. [ Jelmer Vernooij ] * blah -- Jelmer Vernooij Fri, 23 Nov 2018 14:00:02 +0000 "# .parse() .unwrap(); // Exact sequence from lintian-brush: iter_changes_by_author -> split_into_bullets -> remove let changes = iter_changes_by_author(&changelog); let team_upload_change = changes .iter() .find(|c| c.author().is_none() && c.lines().iter().any(|l| l.contains("Team upload"))) .unwrap(); let bullets = team_upload_change.split_into_bullets(); bullets[0].clone().remove(); let result = changelog.to_string(); // Should have exactly one blank line after header, not two let expected = r#"lintian-brush (0.1-2) UNRELEASED; urgency=medium [ Jelmer Vernooij ] * blah -- Jelmer Vernooij Fri, 23 Nov 2018 14:00:02 +0000 "#; assert_eq!(result, expected); } #[test] fn test_replace_with_preserves_first_blank_line() { // Test that replace_with preserves the blank line after the entry header // This reproduces the issue from debian-changelog-line-too-long/subitem test let changelog: ChangeLog = r#"blah (2.6.0) unstable; urgency=medium * New upstream release. * Fix blocks/blockedby of archived bugs (Closes: #XXXXXXX). Thanks to somebody who fixed it. -- Joe Example Mon, 26 Feb 2018 11:31:48 -0800 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); // Replace all changes with wrapped version changes[0].replace_with(vec![ "* New upstream release.", " * Fix blocks/blockedby of archived bugs (Closes: #XXXXXXX). Thanks to", " somebody who fixed it.", ]); let result = changelog.to_string(); // The blank line after the header should be preserved let expected = r#"blah (2.6.0) unstable; urgency=medium * New upstream release. * Fix blocks/blockedby of archived bugs (Closes: #XXXXXXX). Thanks to somebody who fixed it. -- Joe Example Mon, 26 Feb 2018 11:31:48 -0800 "#; assert_eq!(result, expected); } #[test] fn test_parse_serialize_preserves_blank_line() { // Test that simply parsing and serializing preserves the blank line let input = r#"blah (2.6.0) unstable; urgency=medium * New upstream release. * Fix blocks/blockedby of archived bugs (Closes: #XXXXXXX). Thanks to somebody who fixed it. -- Joe Example Mon, 26 Feb 2018 11:31:48 -0800 "#; let changelog: ChangeLog = input.parse().unwrap(); let output = changelog.to_string(); assert_eq!(output, input, "Parse/serialize should not modify changelog"); } #[test] fn test_replace_with_first_entry_preserves_blank() { // Simulate what a Rust line-too-long fixer would do: // Replace the changes in the first entry with wrapped versions let changelog: ChangeLog = r#"blah (2.6.0) unstable; urgency=medium * New upstream release. * Fix blocks/blockedby of archived bugs (Closes: #XXXXXXX). Thanks to somebody who fixed it. -- Joe Example Mon, 26 Feb 2018 11:31:48 -0800 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); assert_eq!(changes.len(), 1); // Replace with wrapped version (what the fixer would do) changes[0].replace_with(vec![ "* New upstream release.", " * Fix blocks/blockedby of archived bugs (Closes: #XXXXXXX). Thanks to", " somebody who fixed it.", ]); let result = changelog.to_string(); // The blank line after header MUST be preserved let expected = r#"blah (2.6.0) unstable; urgency=medium * New upstream release. * Fix blocks/blockedby of archived bugs (Closes: #XXXXXXX). Thanks to somebody who fixed it. -- Joe Example Mon, 26 Feb 2018 11:31:48 -0800 "#; assert_eq!(result, expected); } #[test] fn test_pop_append_preserves_first_blank() { // Test the exact pattern used by the Rust line-too-long fixer: // pop all lines, then append wrapped ones let changelog: ChangeLog = r#"blah (2.6.0) unstable; urgency=medium * New upstream release. * Fix blocks/blockedby of archived bugs (Closes: #XXXXXXX). Thanks to somebody who fixed it. -- Joe Example Mon, 26 Feb 2018 11:31:48 -0800 "# .parse() .unwrap(); let entry = changelog.iter().next().unwrap(); // Pop all change lines (simulating the fixer) while entry.pop_change_line().is_some() {} // Append wrapped lines entry.append_change_line("* New upstream release."); entry.append_change_line( " * Fix blocks/blockedby of archived bugs (Closes: #XXXXXXX). Thanks to", ); entry.append_change_line(" somebody who fixed it."); let result = changelog.to_string(); // The blank line after header MUST be preserved let expected = r#"blah (2.6.0) unstable; urgency=medium * New upstream release. * Fix blocks/blockedby of archived bugs (Closes: #XXXXXXX). Thanks to somebody who fixed it. -- Joe Example Mon, 26 Feb 2018 11:31:48 -0800 "#; assert_eq!(result, expected); } #[test] fn test_replace_line() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * First change * Second change * Third change -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); assert_eq!(changes.len(), 1); // Replace the second line changes[0] .replace_line(1, "* Updated second change") .unwrap(); // Re-read and verify let updated_changes = iter_changes_by_author(&changelog); assert_eq!( updated_changes[0].lines(), vec![ "* First change", "* Updated second change", "* Third change" ] ); } #[test] fn test_replace_line_out_of_bounds() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * First change -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); assert_eq!(changes.len(), 1); // Try to replace a line that doesn't exist let result = changes[0].replace_line(5, "* Updated"); assert!(result.is_err()); } #[test] fn test_update_lines() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * First change * Second change * Third change -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); // Update lines containing "First" or "Second" let count = changes[0].update_lines( |line| line.contains("First") || line.contains("Second"), |line| format!("{} (updated)", line), ); assert_eq!(count, 2); // Verify the changes let updated_changes = iter_changes_by_author(&changelog); assert_eq!( updated_changes[0].lines(), vec![ "* First change (updated)", "* Second change (updated)", "* Third change" ] ); } #[test] fn test_update_lines_no_matches() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * First change * Second change -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); // Update lines that don't exist let count = changes[0].update_lines( |line| line.contains("NonExistent"), |line| format!("{} (updated)", line), ); assert_eq!(count, 0); // Verify nothing changed let updated_changes = iter_changes_by_author(&changelog); assert_eq!( updated_changes[0].lines(), vec!["* First change", "* Second change"] ); } #[test] fn test_update_lines_with_continuation() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * First change with continuation line * Second change -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); // Update the continuation line let count = changes[0].update_lines( |line| line.contains("continuation"), |line| line.replace("continuation", "updated"), ); assert_eq!(count, 1); // Verify the changes let updated_changes = iter_changes_by_author(&changelog); assert_eq!( updated_changes[0].lines(), vec!["* First change", " with updated line", "* Second change"] ); } #[test] fn test_add_bullet() { let mut changelog = ChangeLog::new(); let entry = changelog .new_entry() .maintainer(("Test User".into(), "test@example.com".into())) .distribution("unstable".to_string()) .version("1.0.0".parse().unwrap()) .finish(); // Add bullets - always prepends "* " automatically entry.add_bullet("First change"); entry.add_bullet("Second change"); entry.add_bullet("Third change"); let lines: Vec<_> = entry.change_lines().collect(); assert_eq!(lines.len(), 3); assert_eq!(lines[0], "* First change"); assert_eq!(lines[1], "* Second change"); assert_eq!(lines[2], "* Third change"); } #[test] fn test_add_bullet_empty_entry() { let mut changelog = ChangeLog::new(); let entry = changelog .new_entry() .maintainer(("Test User".into(), "test@example.com".into())) .distribution("unstable".to_string()) .version("1.0.0".parse().unwrap()) .finish(); entry.add_bullet("Only bullet"); let lines: Vec<_> = entry.change_lines().collect(); assert_eq!(lines.len(), 1); assert_eq!(lines[0], "* Only bullet"); } #[test] fn test_add_bullet_long_text() { let mut changelog = ChangeLog::new(); let entry = changelog .new_entry() .maintainer(("Test User".into(), "test@example.com".into())) .distribution("unstable".to_string()) .version("1.0.0".parse().unwrap()) .finish(); // Add a bullet with text that's too long and should be wrapped entry.add_bullet("This is a very long line that exceeds the 78 column limit and should be automatically wrapped to multiple lines with proper indentation"); let lines: Vec<_> = entry.change_lines().collect(); // Should be wrapped into multiple lines assert!(lines.len() > 1); // First line should start with "* " assert!(lines[0].starts_with("* ")); // Continuation lines should start with " " (two spaces) for line in &lines[1..] { assert!(line.starts_with(" ")); } // No line should exceed 78 characters for line in &lines { assert!(line.len() <= 78, "Line exceeds 78 chars: {}", line); } } #[test] fn test_add_bullet_preserves_closes() { let mut changelog = ChangeLog::new(); let entry = changelog .new_entry() .maintainer(("Test User".into(), "test@example.com".into())) .distribution("unstable".to_string()) .version("1.0.0".parse().unwrap()) .finish(); // Add a bullet with "Closes: #" that should not be broken entry.add_bullet("Fix a very important bug that was causing problems (Closes: #123456)"); let lines: Vec<_> = entry.change_lines().collect(); let text = lines.join(" "); // "Closes: #123456" should not be split across lines assert!(text.contains("Closes: #123456")); } #[test] fn test_add_bullet_multiple_closes() { let mut changelog = ChangeLog::new(); let entry = changelog .new_entry() .maintainer(("Test User".into(), "test@example.com".into())) .distribution("unstable".to_string()) .version("1.0.0".parse().unwrap()) .finish(); // Add bullet with multiple bug references entry.add_bullet("Fix several bugs (Closes: #123456, #789012)"); let lines: Vec<_> = entry.change_lines().collect(); let text = lines.join(" "); assert!(text.contains("Closes: #123456")); assert!(text.contains("#789012")); } #[test] fn test_add_bullet_preserves_lp() { let mut changelog = ChangeLog::new(); let entry = changelog .new_entry() .maintainer(("Test User".into(), "test@example.com".into())) .distribution("unstable".to_string()) .version("1.0.0".parse().unwrap()) .finish(); // Add bullet with Launchpad bug reference entry.add_bullet("Fix bug (LP: #123456)"); let lines: Vec<_> = entry.change_lines().collect(); let text = lines.join(" "); // "LP: #123456" should not be split assert!(text.contains("LP: #123456")); } #[test] fn test_add_bullet_with_existing_bullets() { let mut changelog = ChangeLog::new(); let entry = changelog .new_entry() .maintainer(("Test User".into(), "test@example.com".into())) .distribution("unstable".to_string()) .version("1.0.0".parse().unwrap()) .change_line("* Existing change".to_string()) .finish(); // Add more bullets entry.add_bullet("New change"); let lines: Vec<_> = entry.change_lines().collect(); assert_eq!(lines.len(), 2); assert_eq!(lines[0], "* Existing change"); assert_eq!(lines[1], "* New change"); } #[test] fn test_add_bullet_special_characters() { let mut changelog = ChangeLog::new(); let entry = changelog .new_entry() .maintainer(("Test User".into(), "test@example.com".into())) .distribution("unstable".to_string()) .version("1.0.0".parse().unwrap()) .finish(); entry.add_bullet("Fix issue with \"quotes\" and 'apostrophes'"); entry.add_bullet("Handle paths like /usr/bin/foo"); entry.add_bullet("Support $VARIABLES and ${EXPANSIONS}"); let lines: Vec<_> = entry.change_lines().collect(); assert_eq!(lines.len(), 3); assert!(lines[0].contains("\"quotes\"")); assert!(lines[1].contains("/usr/bin/foo")); assert!(lines[2].contains("$VARIABLES")); } #[test] fn test_add_bullet_empty_string() { let mut changelog = ChangeLog::new(); let entry = changelog .new_entry() .maintainer(("Test User".into(), "test@example.com".into())) .distribution("unstable".to_string()) .version("1.0.0".parse().unwrap()) .finish(); // Empty string gets filtered out by textwrap - this is expected behavior entry.add_bullet(""); let lines: Vec<_> = entry.change_lines().collect(); // textwrap filters out empty strings, so no line is added assert_eq!(lines.len(), 0); } #[test] fn test_add_bullet_url() { let mut changelog = ChangeLog::new(); let entry = changelog .new_entry() .maintainer(("Test User".into(), "test@example.com".into())) .distribution("unstable".to_string()) .version("1.0.0".parse().unwrap()) .finish(); // Long URL should not be broken entry.add_bullet("Update documentation at https://www.example.com/very/long/path/to/documentation/page.html"); let lines: Vec<_> = entry.change_lines().collect(); let text = lines.join(" "); assert!(text.contains("https://www.example.com")); } #[test] fn test_add_bullet_mixed_with_manual_changes() { let mut changelog = ChangeLog::new(); let entry = changelog .new_entry() .maintainer(("Test User".into(), "test@example.com".into())) .distribution("unstable".to_string()) .version("1.0.0".parse().unwrap()) .finish(); // Mix add_bullet with manual append_change_line entry.add_bullet("First bullet"); entry.append_change_line(" Manual continuation line"); entry.add_bullet("Second bullet"); let lines: Vec<_> = entry.change_lines().collect(); assert_eq!(lines.len(), 3); assert_eq!(lines[0], "* First bullet"); assert_eq!(lines[1], " Manual continuation line"); assert_eq!(lines[2], "* Second bullet"); } #[test] fn test_replace_line_with_continuation() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * First change with continuation line * Second change -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); // Replace the continuation line changes[0] .replace_line(1, " with updated continuation") .unwrap(); let updated_changes = iter_changes_by_author(&changelog); assert_eq!( updated_changes[0].lines(), vec![ "* First change", " with updated continuation", "* Second change" ] ); } #[test] fn test_change_line_col() { let changelog: ChangeLog = r#"foo (1.0-1) unstable; urgency=low * First change * Second change -- Maintainer Mon, 01 Jan 2024 12:00:00 +0000 bar (2.0-1) experimental; urgency=high [ Alice ] * Alice's change * Alice's second change [ Bob ] * Bob's change -- Another Tue, 02 Jan 2024 13:00:00 +0000 "# .parse() .unwrap(); let changes = iter_changes_by_author(&changelog); // Total: 1 unattributed (first entry) + Alice + Bob = 3 changes assert_eq!(changes.len(), 3); // First change (unattributed) should be at line 2 (0-indexed) assert_eq!(changes[0].line(), Some(2)); assert_eq!(changes[0].column(), Some(2)); // After " " assert_eq!(changes[0].line_col(), Some((2, 2))); assert_eq!(changes[0].lines().len(), 2); // Two bullets in first entry // Alice's changes - starts at line 10 (after " [ Alice ]" on line 9) assert_eq!(changes[1].line(), Some(10)); assert_eq!(changes[1].column(), Some(2)); // After " " assert_eq!(changes[1].lines().len(), 2); // Two bullets // Bob's changes - starts at line 14 (after blank line and " [ Bob ]" on line 13) assert_eq!(changes[2].line(), Some(14)); assert_eq!(changes[2].column(), Some(2)); // After " " assert_eq!(changes[2].lines().len(), 1); // One bullet } } debian-changelog-0.2.14/src/parse.rs000064400000000000000000003447541046102023000153470ustar 00000000000000use crate::lex::lex; use crate::SyntaxKind; use crate::SyntaxKind::*; #[cfg(feature = "chrono")] use chrono::{DateTime, FixedOffset}; use debversion::Version; use rowan::ast::AstNode; use std::str::FromStr; /// Trait for types that can be converted to a timestamp string /// /// This trait allows both chrono DateTime types and plain strings to be used /// as timestamps in the changelog API. pub trait IntoTimestamp { /// Convert this value into a timestamp string in Debian changelog format fn into_timestamp(self) -> String; } impl IntoTimestamp for String { fn into_timestamp(self) -> String { self } } impl IntoTimestamp for &str { fn into_timestamp(self) -> String { self.to_string() } } #[cfg(feature = "chrono")] impl IntoTimestamp for DateTime where Tz::Offset: std::fmt::Display, { fn into_timestamp(self) -> String { const CHANGELOG_TIME_FORMAT: &str = "%a, %d %b %Y %H:%M:%S %z"; self.format(CHANGELOG_TIME_FORMAT).to_string() } } #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, PartialOrd, Ord)] /// Urgency of the changes in the changelog entry pub enum Urgency { #[default] /// Low urgency Low, /// Medium urgency Medium, /// High urgency High, /// Emergency urgency Emergency, /// Critical urgency Critical, } impl std::fmt::Display for Urgency { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { Urgency::Low => f.write_str("low"), Urgency::Medium => f.write_str("medium"), Urgency::High => f.write_str("high"), Urgency::Emergency => f.write_str("emergency"), Urgency::Critical => f.write_str("critical"), } } } impl FromStr for Urgency { type Err = ParseError; fn from_str(s: &str) -> Result { match s.to_lowercase().as_str() { "low" => Ok(Urgency::Low), "medium" => Ok(Urgency::Medium), "high" => Ok(Urgency::High), "emergency" => Ok(Urgency::Emergency), "critical" => Ok(Urgency::Critical), _ => Err(ParseError(vec![format!("invalid urgency: {}", s)])), } } } #[derive(Debug)] /// Error while reading a changelog file. pub enum Error { /// I/O Error Io(std::io::Error), /// Parsing error Parse(ParseError), } impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match &self { Error::Io(e) => write!(f, "IO error: {}", e), Error::Parse(e) => write!(f, "Parse error: {}", e), } } } impl From for Error { fn from(e: std::io::Error) -> Self { Error::Io(e) } } impl std::error::Error for Error {} #[derive(Debug, Clone, PartialEq, Eq, Hash)] /// Error while parsing pub struct ParseError(Vec); impl std::fmt::Display for ParseError { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { for err in &self.0 { writeln!(f, "{}", err)?; } Ok(()) } } impl std::error::Error for ParseError {} impl From for Error { fn from(e: ParseError) -> Self { Error::Parse(e) } } /// Second, implementing the `Language` trait teaches rowan to convert between /// these two SyntaxKind types, allowing for a nicer SyntaxNode API where /// "kinds" are values from our `enum SyntaxKind`, instead of plain u16 values. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum Lang {} impl rowan::Language for Lang { type Kind = SyntaxKind; fn kind_from_raw(raw: rowan::SyntaxKind) -> Self::Kind { unsafe { std::mem::transmute::(raw.0) } } fn kind_to_raw(kind: Self::Kind) -> rowan::SyntaxKind { kind.into() } } /// GreenNode is an immutable tree, which is cheap to change, /// but doesn't contain offsets and parent pointers. use rowan::{GreenNode, GreenToken}; /// You can construct GreenNodes by hand, but a builder /// is helpful for top-down parsers: it maintains a stack /// of currently in-progress nodes use rowan::GreenNodeBuilder; /// The result of parsing: a syntax tree and a collection of errors. /// /// This type is designed to be stored in Salsa databases as it contains /// the thread-safe `GreenNode` instead of the non-thread-safe `SyntaxNode`. #[derive(Debug)] pub struct Parse { green: GreenNode, errors: Vec, _ty: std::marker::PhantomData, } // The T parameter is only used as a phantom type, so we can implement Clone and PartialEq // without requiring T to implement them impl Clone for Parse { fn clone(&self) -> Self { Parse { green: self.green.clone(), errors: self.errors.clone(), _ty: std::marker::PhantomData, } } } impl PartialEq for Parse { fn eq(&self, other: &Self) -> bool { self.green == other.green && self.errors == other.errors } } impl Eq for Parse {} // Implement Send + Sync since GreenNode is thread-safe unsafe impl Send for Parse {} unsafe impl Sync for Parse {} impl Parse { /// Create a new Parse result from a GreenNode and errors pub fn new(green: GreenNode, errors: Vec) -> Self { Parse { green, errors, _ty: std::marker::PhantomData, } } /// Get the green node (thread-safe representation) pub fn green(&self) -> &GreenNode { &self.green } /// Get the syntax errors pub fn errors(&self) -> &[String] { &self.errors } /// Check if there are any errors pub fn ok(&self) -> bool { self.errors.is_empty() } /// Convert to a Result, returning the tree if there are no errors pub fn to_result(self) -> Result where T: AstNode, { if self.errors.is_empty() { let node = SyntaxNode::new_root(self.green); Ok(T::cast(node).expect("root node has wrong type")) } else { Err(ParseError(self.errors)) } } /// Convert to a Result, returning a mutable tree if there are no errors pub fn to_mut_result(self) -> Result where T: AstNode, { if self.errors.is_empty() { let node = SyntaxNode::new_root_mut(self.green); Ok(T::cast(node).expect("root node has wrong type")) } else { Err(ParseError(self.errors)) } } /// Get the parsed syntax tree, panicking if there are errors pub fn tree(&self) -> T where T: AstNode, { assert!( self.errors.is_empty(), "tried to get tree with errors: {:?}", self.errors ); let node = SyntaxNode::new_root(self.green.clone()); T::cast(node).expect("root node has wrong type") } /// Get the syntax node pub fn syntax_node(&self) -> SyntaxNode { SyntaxNode::new_root(self.green.clone()) } /// Get a mutable parsed syntax tree, panicking if there are errors pub fn tree_mut(&self) -> T where T: AstNode, { assert!( self.errors.is_empty(), "tried to get tree with errors: {:?}", self.errors ); let node = SyntaxNode::new_root_mut(self.green.clone()); T::cast(node).expect("root node has wrong type") } } fn parse(text: &str) -> Parse { struct Parser { /// input tokens, including whitespace, /// in *reverse* order. tokens: Vec<(SyntaxKind, String)>, /// the in-progress tree. builder: GreenNodeBuilder<'static>, /// the list of syntax errors we've accumulated /// so far. errors: Vec, } impl Parser { fn error(&mut self, msg: String) { self.builder.start_node(ERROR.into()); if self.current().is_some() { self.bump(); } self.errors.push(msg); self.builder.finish_node(); } fn parse_entry_header(&mut self) { self.builder.start_node(ENTRY_HEADER.into()); self.expect(IDENTIFIER); self.skip_ws(); if self.current() == Some(NEWLINE) { self.bump(); self.builder.finish_node(); return; } self.expect(VERSION); self.skip_ws(); self.builder.start_node(DISTRIBUTIONS.into()); loop { match self.current() { Some(IDENTIFIER) => self.bump(), Some(NEWLINE) => { self.bump(); self.builder.finish_node(); self.builder.finish_node(); return; } Some(SEMICOLON) => { break; } _ => { self.error("expected distribution or semicolon".to_string()); break; } } self.skip_ws(); } self.builder.finish_node(); self.skip_ws(); self.builder.start_node(METADATA.into()); if self.current() == Some(SEMICOLON) { self.bump(); loop { self.skip_ws(); if self.current() == Some(NEWLINE) { break; } self.builder.start_node(METADATA_ENTRY.into()); if self.current() == Some(IDENTIFIER) { self.builder.start_node(METADATA_KEY.into()); self.bump(); self.builder.finish_node(); } else { self.error("expected metadata key".to_string()); self.builder.finish_node(); break; } if self.current() == Some(EQUALS) { self.bump(); } else { self.error("expected equals".to_string()); self.builder.finish_node(); break; } if self.current() == Some(IDENTIFIER) { self.builder.start_node(METADATA_VALUE.into()); self.bump(); // Handle old-style metadata values that may contain spaces and multiple tokens // e.g., "closes=53715 56047 56607" loop { match (self.current(), self.next()) { // Stop if we see a new key=value pattern (IDENTIFIER followed by EQUALS) (Some(WHITESPACE), Some(IDENTIFIER)) => { // Look further ahead to see if there's an EQUALS after the identifier // If there is, this is a new metadata entry, so stop here // Otherwise, consume the whitespace and identifier as part of the value if self.tokens.len() >= 3 { if let Some((kind, _)) = self.tokens.get(self.tokens.len() - 3) { if *kind == EQUALS { break; // Next token starts a new metadata entry } } } self.bump(); // consume whitespace } (Some(WHITESPACE), _) => self.bump(), (Some(IDENTIFIER), _) => self.bump(), _ => break, } } self.builder.finish_node(); } else { self.error("expected metadata value".to_string()); self.builder.finish_node(); break; } self.builder.finish_node(); // Skip comma separators (old-style format) self.skip_ws(); if self.current() == Some(ERROR) { // Peek at the token text to see if it's a comma if let Some((_, text)) = self.tokens.last() { if text == "," { self.bump(); // consume the comma continue; } } } } } else if self.current() == Some(NEWLINE) { } else { self.error("expected semicolon or newline".to_string()); } self.builder.finish_node(); self.expect(NEWLINE); self.builder.finish_node(); } fn parse_entry(&mut self) { self.builder.start_node(ENTRY.into()); self.parse_entry_header(); loop { match self .tokens .last() .map(|(kind, token)| (kind, token.as_str())) { None => { // End of file - entry without footer is valid break; } // empty line Some((NEWLINE, _)) => { self.builder.start_node(EMPTY_LINE.into()); self.bump(); self.builder.finish_node(); } // details Some((INDENT, " ")) => { self.parse_entry_detail(); } // footer Some((INDENT, " -- ")) => { self.parse_entry_footer(); break; } _ => break, } } self.builder.finish_node(); } pub fn parse_entry_detail(&mut self) { self.builder.start_node(ENTRY_BODY.into()); self.expect(INDENT); match self.current() { Some(DETAIL) => { self.bump(); } Some(NEWLINE) => {} _ => { self.error("expected detail".to_string()); } } self.expect(NEWLINE); self.builder.finish_node(); } pub fn parse_entry_footer(&mut self) { self.builder.start_node(ENTRY_FOOTER.into()); if self.current() != Some(INDENT) { self.error("expected indent".to_string()); } else { let dashes = &self.tokens.last().unwrap().1; if dashes != " -- " { self.error("expected --".to_string()); } else { self.bump(); } } self.builder.start_node(MAINTAINER.into()); while self.current() == Some(TEXT) || (self.current() == Some(WHITESPACE) && self.next() != Some(EMAIL)) { self.bump(); } self.builder.finish_node(); if self.current().is_some() && self.current() != Some(NEWLINE) { self.expect(WHITESPACE); } if self.current().is_some() && self.current() != Some(NEWLINE) { self.expect(EMAIL); } if self.tokens.last().map(|(k, t)| (*k, t.as_str())) == Some((WHITESPACE, " ")) { self.bump(); } else if self.current() == Some(WHITESPACE) { self.error("expected two spaces".to_string()); } else if self.current() == Some(NEWLINE) { self.bump(); self.builder.finish_node(); return; } else { self.error(format!("expected whitespace, got {:?}", self.current())); } self.builder.start_node(TIMESTAMP.into()); loop { if self.current() != Some(TEXT) && self.current() != Some(WHITESPACE) { break; } self.bump(); } self.builder.finish_node(); self.expect(NEWLINE); self.builder.finish_node(); } fn parse(mut self) -> Parse { self.builder.start_node(ROOT.into()); loop { match self.current() { None => break, Some(NEWLINE) => { self.builder.start_node(EMPTY_LINE.into()); self.bump(); self.builder.finish_node(); } Some(COMMENT) => { self.bump(); } Some(IDENTIFIER) => { self.parse_entry(); } t => { self.error(format!("unexpected token {:?}", t)); break; } } } // Close the root node. self.builder.finish_node(); // Turn the builder into a GreenNode Parse::new(self.builder.finish(), self.errors) } /// Advance one token, adding it to the current branch of the tree builder. fn bump(&mut self) { let (kind, text) = self.tokens.pop().unwrap(); self.builder.token(kind.into(), text.as_str()); } /// Peek at the first unprocessed token fn current(&self) -> Option { self.tokens.last().map(|(kind, _)| *kind) } fn next(&self) -> Option { self.tokens .get(self.tokens.len() - 2) .map(|(kind, _)| *kind) } fn expect(&mut self, expected: SyntaxKind) { if self.current() != Some(expected) { self.error(format!("expected {:?}, got {:?}", expected, self.current())); } else { self.bump(); } } fn skip_ws(&mut self) { while self.current() == Some(WHITESPACE) { self.bump() } } } let mut tokens = lex(text); tokens.reverse(); Parser { tokens, builder: GreenNodeBuilder::new(), errors: Vec::new(), } .parse() } // To work with the parse results we need a view into the // green tree - the Syntax tree. // It is also immutable, like a GreenNode, // but it contains parent pointers, offsets, and // has identity semantics. pub type SyntaxNode = rowan::SyntaxNode; #[allow(unused)] pub type SyntaxToken = rowan::SyntaxToken; #[allow(unused)] type SyntaxElement = rowan::NodeOrToken; /// Calculate line and column (both 0-indexed) for the given offset in the tree. /// Column is measured in bytes from the start of the line. pub(crate) fn line_col_at_offset(node: &SyntaxNode, offset: rowan::TextSize) -> (usize, usize) { let root = node.ancestors().last().unwrap_or_else(|| node.clone()); let mut line = 0; let mut last_newline_offset = rowan::TextSize::from(0); for element in root.preorder_with_tokens() { if let rowan::WalkEvent::Enter(rowan::NodeOrToken::Token(token)) = element { if token.text_range().start() >= offset { break; } // Count newlines and track position of last one for (idx, _) in token.text().match_indices('\n') { line += 1; last_newline_offset = token.text_range().start() + rowan::TextSize::from((idx + 1) as u32); } } } let column: usize = (offset - last_newline_offset).into(); (line, column) } macro_rules! ast_node { ($ast:ident, $kind:ident) => { #[derive(Debug, Clone, PartialEq, Eq, Hash)] #[repr(transparent)] /// A node in the changelog syntax tree. pub struct $ast(SyntaxNode); impl AstNode for $ast { type Language = Lang; fn can_cast(kind: SyntaxKind) -> bool { kind == $kind } fn cast(syntax: SyntaxNode) -> Option { if Self::can_cast(syntax.kind()) { Some(Self(syntax)) } else { None } } fn syntax(&self) -> &SyntaxNode { &self.0 } } impl $ast { #[allow(dead_code)] fn replace_root(&mut self, new_root: SyntaxNode) { self.0 = Self::cast(new_root).unwrap().0; } /// Get the line number (0-indexed) where this node starts. pub fn line(&self) -> usize { line_col_at_offset(&self.0, self.0.text_range().start()).0 } /// Get the column number (0-indexed, in bytes) where this node starts. pub fn column(&self) -> usize { line_col_at_offset(&self.0, self.0.text_range().start()).1 } /// Get both line and column (0-indexed) where this node starts. /// Returns (line, column) where column is measured in bytes from the start of the line. pub fn line_col(&self) -> (usize, usize) { line_col_at_offset(&self.0, self.0.text_range().start()) } } impl std::fmt::Display for $ast { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { f.write_str(self.0.text().to_string().as_str()) } } }; } ast_node!(ChangeLog, ROOT); ast_node!(Entry, ENTRY); ast_node!(EntryHeader, ENTRY_HEADER); ast_node!(EntryBody, ENTRY_BODY); ast_node!(EntryFooter, ENTRY_FOOTER); ast_node!(Maintainer, MAINTAINER); ast_node!(Timestamp, TIMESTAMP); ast_node!(MetadataEntry, METADATA_ENTRY); ast_node!(MetadataKey, METADATA_KEY); ast_node!(MetadataValue, METADATA_VALUE); impl MetadataEntry { /// Returns the key of the metadata entry. pub fn key(&self) -> Option { self.0 .children() .find_map(MetadataKey::cast) .map(|k| k.to_string()) } /// Returns the value of the metadata entry. pub fn value(&self) -> Option { self.0 .children() .find_map(MetadataValue::cast) .map(|k| k.to_string()) } /// Sets the value of the metadata entry. pub fn set_value(&mut self, value: &str) { let node = self .0 .children_with_tokens() .find(|it| it.kind() == METADATA_VALUE); let mut builder = GreenNodeBuilder::new(); builder.start_node(METADATA_VALUE.into()); builder.token(IDENTIFIER.into(), value); builder.finish_node(); let root = SyntaxNode::new_root_mut(builder.finish()); let range = if let Some(node) = node { node.index()..node.index() + 1 } else { let count = self.0.children().count(); count..count }; self.0.splice_children(range, vec![root.into()]); } } /// A builder for a changelog entry. pub struct EntryBuilder { root: SyntaxNode, package: Option, version: Option, distributions: Option>, urgency: Option, maintainer: Option<(String, String)>, timestamp_string: Option, change_lines: Vec, } impl EntryBuilder { /// Set the package name #[must_use] pub fn package(mut self, package: String) -> Self { self.package = Some(package); self } /// Set the package version #[must_use] pub fn version(mut self, version: Version) -> Self { self.version = Some(version); self } /// Set the distribution(s) #[must_use] pub fn distributions(mut self, distributions: Vec) -> Self { self.distributions = Some(distributions); self } #[must_use] pub fn distribution(mut self, distribution: String) -> Self { self.distributions .get_or_insert_with(Vec::new) .push(distribution); self } #[must_use] pub fn urgency(mut self, urgency: Urgency) -> Self { self.urgency = Some(urgency); self } #[must_use] pub fn maintainer(mut self, maintainer: (String, String)) -> Self { self.maintainer = Some(maintainer); self } /// Set the timestamp (accepts chrono::DateTime or String) #[must_use] pub fn datetime(mut self, timestamp: impl IntoTimestamp) -> Self { self.timestamp_string = Some(timestamp.into_timestamp()); self } #[must_use] pub fn change_line(mut self, line: String) -> Self { self.change_lines.push(line); self } pub fn verify(&self) -> Result<(), String> { if self.package.is_none() { return Err("package is required".to_string()); } if self.version.is_none() { return Err("version is required".to_string()); } match self.distributions { None => { return Err("at least one distribution is required".to_string()); } Some(ref distributions) => { if distributions.is_empty() { return Err("at least one distribution is required".to_string()); } } } if self.change_lines.is_empty() { return Err("at least one change line is required".to_string()); } Ok(()) } fn metadata(&self) -> impl Iterator { let mut ret = vec![]; if let Some(urgency) = self.urgency.as_ref() { ret.push(("urgency".to_string(), urgency.to_string())); } ret.into_iter() } pub fn finish(self) -> Entry { if self.root.children().find_map(Entry::cast).is_some() { let mut builder = GreenNodeBuilder::new(); builder.start_node(EMPTY_LINE.into()); builder.token(NEWLINE.into(), "\n"); builder.finish_node(); let syntax = SyntaxNode::new_root_mut(builder.finish()); self.root.splice_children(0..0, vec![syntax.into()]); } let mut builder = GreenNodeBuilder::new(); builder.start_node(ENTRY.into()); builder.start_node(ENTRY_HEADER.into()); if let Some(package) = self.package.as_ref() { builder.token(IDENTIFIER.into(), package.as_str()); } if let Some(version) = self.version.as_ref() { builder.token(WHITESPACE.into(), " "); builder.token(VERSION.into(), format!("({})", version).as_str()); } if let Some(distributions) = self.distributions.as_ref() { builder.token(WHITESPACE.into(), " "); builder.start_node(DISTRIBUTIONS.into()); let mut it = distributions.iter().peekable(); while it.peek().is_some() { builder.token(IDENTIFIER.into(), it.next().unwrap()); if it.peek().is_some() { builder.token(WHITESPACE.into(), " "); } } builder.finish_node(); // DISTRIBUTIONS } let mut metadata = self.metadata().peekable(); if metadata.peek().is_some() { builder.token(SEMICOLON.into(), ";"); builder.token(WHITESPACE.into(), " "); builder.start_node(METADATA.into()); for (key, value) in metadata { builder.start_node(METADATA_ENTRY.into()); builder.start_node(METADATA_KEY.into()); builder.token(IDENTIFIER.into(), key.as_str()); builder.finish_node(); // METADATA_KEY builder.token(EQUALS.into(), "="); builder.start_node(METADATA_VALUE.into()); builder.token(METADATA_VALUE.into(), value.as_str()); builder.finish_node(); // METADATA_VALUE builder.finish_node(); // METADATA_ENTRY } builder.finish_node(); // METADATA } builder.token(NEWLINE.into(), "\n"); builder.finish_node(); // ENTRY_HEADER builder.start_node(EMPTY_LINE.into()); builder.token(NEWLINE.into(), "\n"); builder.finish_node(); // EMPTY_LINE for line in self.change_lines { builder.start_node(ENTRY_BODY.into()); builder.token(INDENT.into(), " "); builder.token(DETAIL.into(), line.as_str()); builder.token(NEWLINE.into(), "\n"); builder.finish_node(); // ENTRY_BODY } builder.start_node(EMPTY_LINE.into()); builder.token(NEWLINE.into(), "\n"); builder.finish_node(); // EMPTY_LINE builder.start_node(ENTRY_FOOTER.into()); builder.token(INDENT.into(), " -- "); if let Some(maintainer) = self.maintainer.as_ref() { builder.start_node(MAINTAINER.into()); let mut it = maintainer.0.split(' ').peekable(); while let Some(p) = it.next() { builder.token(TEXT.into(), p); if it.peek().is_some() { builder.token(WHITESPACE.into(), " "); } } builder.finish_node(); // MAINTAINER } if let Some(maintainer) = self.maintainer.as_ref() { builder.token(WHITESPACE.into(), " "); builder.token(EMAIL.into(), format!("<{}>", maintainer.1).as_str()); } if let Some(timestamp) = self.timestamp_string.as_ref() { builder.token(WHITESPACE.into(), " "); builder.start_node(TIMESTAMP.into()); let mut it = timestamp.split(' ').peekable(); while let Some(p) = it.next() { builder.token(TEXT.into(), p); if it.peek().is_some() { builder.token(WHITESPACE.into(), " "); } } builder.finish_node(); // TIMESTAMP } builder.token(NEWLINE.into(), "\n"); builder.finish_node(); // ENTRY_FOOTER builder.finish_node(); // ENTRY let syntax = SyntaxNode::new_root_mut(builder.finish()); self.root.splice_children(0..0, vec![syntax.clone().into()]); Entry(syntax) } } impl IntoIterator for ChangeLog { type Item = Entry; type IntoIter = std::vec::IntoIter; fn into_iter(self) -> Self::IntoIter { // TODO: This is inefficient self.iter().collect::>().into_iter() } } fn replay(builder: &mut GreenNodeBuilder, node: SyntaxNode) { builder.start_node(node.kind().into()); for child in node.children_with_tokens() { match child { SyntaxElement::Node(n) => replay(builder, n), SyntaxElement::Token(t) => { builder.token(t.kind().into(), t.text()); } } } builder.finish_node(); } impl FromIterator for ChangeLog { fn from_iter>(iter: T) -> Self { let mut builder = GreenNodeBuilder::new(); builder.start_node(ROOT.into()); for entry in iter { replay(&mut builder, entry.0.clone()); } builder.finish_node(); ChangeLog(SyntaxNode::new_root_mut(builder.finish())) } } impl ChangeLog { /// Create a new, empty changelog. pub fn new() -> ChangeLog { let mut builder = GreenNodeBuilder::new(); builder.start_node(ROOT.into()); builder.finish_node(); let syntax = SyntaxNode::new_root_mut(builder.finish()); ChangeLog(syntax) } /// Parse changelog text, returning a Parse result pub fn parse(text: &str) -> Parse { parse(text) } /// Returns an iterator over all entries in the changelog file. pub fn iter(&self) -> impl Iterator + '_ { self.0.children().filter_map(Entry::cast) } /// Returns an iterator over all entries in the changelog file. #[deprecated(since = "0.2.0", note = "use `iter` instead")] pub fn entries(&self) -> impl Iterator + '_ { self.iter() } /// Create a new, empty entry. pub fn new_empty_entry(&mut self) -> EntryBuilder { EntryBuilder { root: self.0.clone(), package: None, version: None, distributions: None, urgency: None, maintainer: None, timestamp_string: None, change_lines: vec![], } } fn first_valid_entry(&self) -> Option { self.iter().find(|entry| { entry.package().is_some() && entry.header().is_some() && entry.footer().is_some() }) } /// Return a builder for a new entry. pub fn new_entry(&mut self) -> EntryBuilder { let base_entry = self.first_valid_entry(); let package = base_entry .as_ref() .and_then(|first_entry| first_entry.package()); let mut version = base_entry .as_ref() .and_then(|first_entry| first_entry.version()); if let Some(version) = version.as_mut() { version.increment_debian(); } EntryBuilder { root: if self.0.is_mutable() { self.0.clone() } else { self.0.clone_for_update() }, package, version, distributions: Some(vec![crate::UNRELEASED.into()]), urgency: Some(Urgency::default()), maintainer: crate::get_maintainer(), #[cfg(feature = "chrono")] timestamp_string: Some(chrono::Utc::now().into_timestamp()), #[cfg(not(feature = "chrono"))] timestamp_string: None, change_lines: vec![], } } /// Add a change to the changelog. /// /// This will update the current changelog entry if it is considered /// unreleased. Otherwise, a new entry will be created. /// /// If there is an existing entry, the change will be added to the end of /// the entry. If the previous change was attributed to another author, /// a new section line ("[ Author Name ]") will be added as well. /// /// # Arguments /// * `change` - The change to add, e.g. &["* Fix a bug"] /// * `author` - The author of the change, e.g. ("John Doe", "john@example") /// /// # Errors /// /// Returns an error if text rewrapping fails. pub fn try_auto_add_change( &mut self, change: &[&str], author: (String, String), datetime: Option, urgency: Option, ) -> Result { match self.first_valid_entry() { Some(entry) if entry.is_unreleased() != Some(false) => { // Add to existing entry entry.try_add_change_for_author(change, author)?; // TODO: set timestamp to std::cmp::max(entry.timestamp(), datetime) // TODO: set urgency to std::cmp::max(entry.urgency(), urgency) Ok(entry) } Some(_entry) => { // Create new entry let mut builder = self.new_entry(); builder = builder.maintainer(author); if let Some(datetime) = datetime { builder = builder.datetime(datetime); } if let Some(urgency) = urgency { builder = builder.urgency(urgency); } for change in change { builder = builder.change_line(change.to_string()); } Ok(builder.finish()) } None => { panic!("No existing entries found in changelog"); } } } /// Automatically add a change to the changelog /// /// If there is an existing entry, the change will be added to the end of /// the entry. If the previous change was attributed to another author, /// a new section line ("[ Author Name ]") will be added as well. /// /// # Deprecated /// /// This function panics on errors. Use [`ChangeLog::try_auto_add_change`] instead for proper error handling. /// /// # Panics /// /// Panics if text rewrapping fails. /// /// # Arguments /// * `change` - The change to add, e.g. &["* Fix a bug"] /// * `author` - The author of the change, e.g. ("John Doe", "john@example") #[cfg(feature = "chrono")] #[deprecated( since = "0.2.10", note = "Use try_auto_add_change for proper error handling" )] pub fn auto_add_change( &mut self, change: &[&str], author: (String, String), datetime: Option>, urgency: Option, ) -> Entry { self.try_auto_add_change(change, author, datetime, urgency) .unwrap() } /// Pop the first entry from the changelog. pub fn pop_first(&mut self) -> Option { let mut it = self.iter(); if let Some(entry) = it.next() { // Drop trailing newlines while let Some(sibling) = entry.0.next_sibling() { if sibling.kind() == EMPTY_LINE { sibling.detach(); } else { break; } } entry.0.detach(); Some(entry) } else { None } } /// Read a changelog file from a path pub fn read_path(path: impl AsRef) -> Result { let mut file = std::fs::File::open(path)?; Self::read(&mut file) } /// Read a changelog file from a reader pub fn read(mut r: R) -> Result { let mut buf = String::new(); r.read_to_string(&mut buf)?; Ok(buf.parse()?) } /// Read a changelog file from a reader, allowing for syntax errors pub fn read_relaxed(mut r: R) -> Result { let mut buf = String::new(); r.read_to_string(&mut buf)?; let parsed = parse(&buf); // For relaxed parsing, we ignore errors and return the tree anyway let node = SyntaxNode::new_root_mut(parsed.green().clone()); Ok(ChangeLog::cast(node).expect("root node has wrong type")) } /// Write the changelog to a writer pub fn write(&self, mut w: W) -> Result<(), Error> { let buf = self.to_string(); w.write_all(buf.as_bytes())?; Ok(()) } /// Write the changelog to a path pub fn write_to_path(&self, p: &std::path::Path) -> Result<(), Error> { let f = std::fs::File::create(p)?; self.write(f)?; Ok(()) } /// Iterator over entries grouped by their maintainer (author). /// /// Returns an iterator over tuples of (maintainer_name, maintainer_email, Vec) /// where entries with the same maintainer are grouped together. pub fn iter_by_author(&self) -> impl Iterator)> + '_ { crate::iter_entries_by_author(self) } /// Get all unique authors across all entries in the changelog. /// /// This includes both maintainers from entry footers and authors from [ Author Name ] sections. pub fn get_all_authors(&self) -> std::collections::HashSet { let mut authors = std::collections::HashSet::new(); // Add maintainers from entry footers for entry in self.iter() { if let Some(identity) = entry.get_maintainer_identity() { authors.insert(identity); } } // Add authors from change sections for entry in self.iter() { for author_name in entry.get_authors() { // Create identity with empty email since we only have names from change sections authors.insert(crate::Identity::new(author_name, "".to_string())); } } authors } } impl Default for ChangeLog { fn default() -> Self { Self::new() } } impl FromStr for ChangeLog { type Err = ParseError; fn from_str(s: &str) -> Result { ChangeLog::parse(s).to_mut_result() } } impl FromStr for Entry { type Err = ParseError; fn from_str(s: &str) -> Result { let cl: ChangeLog = s.parse()?; let mut entries = cl.iter(); let entry = entries .next() .ok_or_else(|| ParseError(vec!["no entries found".to_string()]))?; if entries.next().is_some() { return Err(ParseError(vec!["multiple entries found".to_string()])); } Ok(entry) } } impl EntryHeader { /// Returns the version of the entry, returning an error if the version string is invalid. /// /// Returns: /// - `Some(Ok(version))` if a valid version is found /// - `Some(Err(err))` if a version token exists but cannot be parsed /// - `None` if no version token is present pub fn try_version(&self) -> Option> { self.0.children_with_tokens().find_map(|it| { if let Some(token) = it.as_token() { if token.kind() == VERSION { let text = token.text()[1..token.text().len() - 1].to_string(); return Some(text.parse()); } } None }) } /// Returns the version of the entry. /// /// Note: This method silently returns `None` if the version string is invalid. /// Consider using [`try_version`](Self::try_version) instead to handle parsing errors properly. pub fn version(&self) -> Option { self.try_version().and_then(|r| r.ok()) } /// Returns the package name of the entry. pub fn package(&self) -> Option { self.0.children_with_tokens().find_map(|it| { if let Some(token) = it.as_token() { if token.kind() == IDENTIFIER { return Some(token.text().to_string()); } } None }) } /// Returns the distributions of the entry. pub fn distributions(&self) -> Option> { let node = self.0.children().find(|it| it.kind() == DISTRIBUTIONS); node.map(|node| { node.children_with_tokens() .filter_map(|it| { if let Some(token) = it.as_token() { if token.kind() == IDENTIFIER { return Some(token.text().to_string()); } } None }) .collect::>() }) } /// Set distributions for the entry. pub fn set_distributions(&mut self, _distributions: Vec) { let node = self .0 .children_with_tokens() .find(|it| it.kind() == DISTRIBUTIONS); let mut builder = GreenNodeBuilder::new(); builder.start_node(DISTRIBUTIONS.into()); for (i, distribution) in _distributions.iter().enumerate() { if i > 0 { builder.token(WHITESPACE.into(), " "); } builder.token(IDENTIFIER.into(), distribution); } builder.finish_node(); let (range, green) = if let Some(node) = node { ( node.index()..node.index() + 1, vec![builder.finish().into()], ) } else if let Some(version) = self .0 .children_with_tokens() .find(|it| it.kind() == VERSION) { ( version.index()..version.index() + 1, vec![ GreenToken::new(WHITESPACE.into(), " ").into(), builder.finish().into(), ], ) } else if let Some(metadata) = self .0 .children_with_tokens() .find(|it| it.kind() == METADATA) { ( metadata.index() - 1..metadata.index() - 1, vec![ GreenToken::new(WHITESPACE.into(), " ").into(), builder.finish().into(), ], ) } else { ( self.0.children().count()..self.0.children().count(), vec![ GreenToken::new(WHITESPACE.into(), " ").into(), builder.finish().into(), ], ) }; let new_root = SyntaxNode::new_root_mut(self.0.green().splice_children(range, green)); self.replace_root(new_root); } /// Set the version for the entry. pub fn set_version(&mut self, version: &Version) { // Find the version token let node = self .0 .children_with_tokens() .find(|it| it.kind() == VERSION); let (range, green) = if let Some(token) = node { ( token.index()..token.index() + 1, vec![GreenToken::new(VERSION.into(), &format!("({})", version)).into()], ) } else { let index = self .0 .children_with_tokens() .position(|it| it.kind() == IDENTIFIER) .unwrap_or(0); ( index + 1..index + 1, vec![ GreenToken::new(WHITESPACE.into(), " ").into(), GreenToken::new(VERSION.into(), &format!("({})", version)).into(), ], ) }; let new_root = SyntaxNode::new_root_mut(self.0.green().splice_children(range, green)); self.replace_root(new_root); } /// Set the package name for the entry. pub fn set_package(&mut self, package: String) { let node = self .0 .children_with_tokens() .find(|it| it.kind() == IDENTIFIER); let new_root = if let Some(token) = node { SyntaxNode::new_root_mut(self.0.green().splice_children( token.index()..token.index() + 1, vec![GreenToken::new(IDENTIFIER.into(), &package).into()], )) } else { SyntaxNode::new_root_mut(self.0.green().splice_children( 0..0, vec![ GreenToken::new(IDENTIFIER.into(), &package).into(), GreenToken::new(WHITESPACE.into(), " ").into(), ], )) }; self.replace_root(new_root); } /// Set extra metadata for the entry. pub fn set_metadata(&mut self, key: &str, value: &str) { // Find the appropriate metadata node if let Some(mut node) = self .metadata_nodes() .find(|it| it.key().map(|k| k == key).unwrap_or(false)) { node.set_value(value); } else if let Some(metadata) = self .0 .children_with_tokens() .find(|it| it.kind() == METADATA) { let mut builder = GreenNodeBuilder::new(); builder.start_node(METADATA_ENTRY.into()); builder.start_node(METADATA_KEY.into()); builder.token(IDENTIFIER.into(), key); builder.finish_node(); builder.token(EQUALS.into(), "="); builder.start_node(METADATA_VALUE.into()); builder.token(IDENTIFIER.into(), value); builder.finish_node(); builder.finish_node(); let metadata = metadata.as_node().unwrap(); let count = metadata.children_with_tokens().count(); self.0.splice_children( metadata.index()..metadata.index() + 1, vec![SyntaxNode::new_root_mut(metadata.green().splice_children( count..count, vec![ GreenToken::new(WHITESPACE.into(), " ").into(), builder.finish().into(), ], )) .into()], ); } else { let mut builder = GreenNodeBuilder::new(); builder.start_node(METADATA.into()); builder.token(SEMICOLON.into(), ";"); builder.token(WHITESPACE.into(), " "); builder.start_node(METADATA_ENTRY.into()); builder.start_node(METADATA_KEY.into()); builder.token(IDENTIFIER.into(), key); builder.finish_node(); builder.token(EQUALS.into(), "="); builder.start_node(METADATA_VALUE.into()); builder.token(IDENTIFIER.into(), value); builder.finish_node(); builder.finish_node(); let new_root = SyntaxNode::new_root_mut(builder.finish()); // Add either just after DISTRIBUTIONS if let Some(distributions) = self .0 .children_with_tokens() .find(|it| it.kind() == DISTRIBUTIONS) { self.0.splice_children( distributions.index() + 1..distributions.index() + 1, vec![new_root.into()], ); } else if let Some(nl) = self .0 .children_with_tokens() .find(|it| it.kind() == NEWLINE) { // Just before the newline self.0 .splice_children(nl.index()..nl.index(), vec![new_root.into()]); } else { let count = self.0.children_with_tokens().count(); self.0.splice_children(count..count, vec![new_root.into()]); } } } /// Returns an iterator over the metadata entry AST nodes. pub fn metadata_nodes(&self) -> impl Iterator + '_ { let node = self.0.children().find(|it| it.kind() == METADATA); node.into_iter().flat_map(|node| { node.children_with_tokens() .filter_map(|it| MetadataEntry::cast(it.into_node()?)) }) } /// Returns an iterator over the metadata key-value pairs. pub fn metadata(&self) -> impl Iterator + '_ { self.metadata_nodes().filter_map(|entry| { if let (Some(key), Some(value)) = (entry.key(), entry.value()) { Some((key, value)) } else { None } }) } /// Returns the urgency of the entry.3 pub fn urgency(&self) -> Option { for (key, value) in self.metadata() { if key.as_str() == "urgency" { return Some(value.parse().unwrap()); } } None } } impl EntryFooter { /// Returns the email address of the maintainer from the footer. pub fn email(&self) -> Option { self.0.children_with_tokens().find_map(|it| { if let Some(token) = it.as_token() { let text = token.text(); if token.kind() == EMAIL { return Some(text[1..text.len() - 1].to_string()); } } None }) } /// Returns the maintainer name from the footer. pub fn maintainer(&self) -> Option { self.0 .children() .find_map(Maintainer::cast) .map(|m| m.text()) .filter(|s| !s.is_empty()) } /// Set the maintainer for the entry. pub fn set_maintainer(&mut self, maintainer: String) { let node = self .0 .children_with_tokens() .find(|it| it.kind() == MAINTAINER); let new_root = if let Some(node) = node { SyntaxNode::new_root_mut(self.0.green().splice_children( node.index()..node.index() + 1, vec![GreenToken::new(MAINTAINER.into(), &maintainer).into()], )) } else if let Some(node) = self.0.children_with_tokens().find(|it| it.kind() == INDENT) { SyntaxNode::new_root_mut(self.0.green().splice_children( node.index() + 1..node.index() + 1, vec![GreenToken::new(MAINTAINER.into(), &maintainer).into()], )) } else { SyntaxNode::new_root_mut(self.0.green().splice_children( 0..0, vec![ GreenToken::new(INDENT.into(), " -- ").into(), GreenToken::new(MAINTAINER.into(), &maintainer).into(), ], )) }; self.replace_root(new_root); } /// Set email for the entry. pub fn set_email(&mut self, _email: String) { let node = self.0.children_with_tokens().find(|it| it.kind() == EMAIL); let new_root = if let Some(node) = node { SyntaxNode::new_root_mut(self.0.green().splice_children( node.index()..node.index() + 1, vec![GreenToken::new(EMAIL.into(), &format!("<{}>", _email)).into()], )) } else if let Some(node) = self .0 .children_with_tokens() .find(|it| it.kind() == MAINTAINER) { SyntaxNode::new_root_mut(self.0.green().splice_children( node.index() + 1..node.index() + 1, vec![GreenToken::new(EMAIL.into(), &format!("<{}>", _email)).into()], )) } else if let Some(node) = self.0.children_with_tokens().find(|it| it.kind() == INDENT) { SyntaxNode::new_root_mut(self.0.green().splice_children( node.index() + 1..node.index() + 1, vec![ GreenToken::new(MAINTAINER.into(), "").into(), GreenToken::new(WHITESPACE.into(), " ").into(), GreenToken::new(EMAIL.into(), &format!("<{}>", _email)).into(), ], )) } else { SyntaxNode::new_root_mut(self.0.green().splice_children( 0..0, vec![ GreenToken::new(INDENT.into(), " -- ").into(), GreenToken::new(MAINTAINER.into(), "").into(), GreenToken::new(WHITESPACE.into(), " ").into(), GreenToken::new(EMAIL.into(), &format!("<{}>", _email)).into(), ], )) }; self.replace_root(new_root); } /// Returns the timestamp from the footer. pub fn timestamp(&self) -> Option { self.0 .children() .find_map(Timestamp::cast) .map(|m| m.text()) } /// Set timestamp for the entry. pub fn set_timestamp(&mut self, timestamp: String) { let node = self .0 .children_with_tokens() .find(|it| it.kind() == TIMESTAMP); let new_root = if let Some(node) = node { SyntaxNode::new_root_mut(self.0.green().splice_children( node.index()..node.index() + 1, vec![GreenToken::new(TIMESTAMP.into(), ×tamp).into()], )) } else if let Some(node) = self.0.children_with_tokens().find(|it| it.kind() == INDENT) { SyntaxNode::new_root_mut(self.0.green().splice_children( node.index() + 1..node.index() + 1, vec![GreenToken::new(TIMESTAMP.into(), ×tamp).into()], )) } else if let Some(node) = self.0.children_with_tokens().find(|it| it.kind() == EMAIL) { SyntaxNode::new_root_mut(self.0.green().splice_children( node.index() + 1..node.index() + 1, vec![GreenToken::new(TIMESTAMP.into(), ×tamp).into()], )) } else { SyntaxNode::new_root_mut(self.0.green().splice_children( 0..0, vec![ GreenToken::new(INDENT.into(), " -- ").into(), GreenToken::new(TIMESTAMP.into(), ×tamp).into(), ], )) }; self.replace_root(new_root); } } impl EntryBody { fn text(&self) -> String { self.0 .children_with_tokens() .filter_map(|it| { if let Some(token) = it.as_token() { if token.kind() == DETAIL { return Some(token.text().to_string()); } } None }) .collect::>() .concat() } } impl Timestamp { fn text(&self) -> String { self.0.text().to_string() } } impl Maintainer { fn text(&self) -> String { self.0.text().to_string() } } impl Entry { /// Returns the header AST node of the entry. pub fn header(&self) -> Option { self.0.children().find_map(EntryHeader::cast) } /// Returns the body AST node of the entry. pub fn body(&self) -> Option { self.0.children().find_map(EntryBody::cast) } /// Returns the footer AST node of the entry. pub fn footer(&self) -> Option { self.0.children().find_map(EntryFooter::cast) } /// Return the package name of the entry. pub fn package(&self) -> Option { self.header().and_then(|h| h.package()) } /// Set the package name of the entry. pub fn set_package(&mut self, package: String) { if let Some(mut header) = self.header() { let header_index = header.0.index(); header.set_package(package); self.0 .splice_children(header_index..header_index + 1, vec![header.0.into()]); } else { self.create_header().set_package(package); } } /// Returns the version of the entry, returning an error if the version string is invalid. /// /// Returns: /// - `Some(Ok(version))` if a valid version is found /// - `Some(Err(err))` if a version token exists but cannot be parsed /// - `None` if no version token is present or no header exists pub fn try_version(&self) -> Option> { self.header().and_then(|h| h.try_version()) } /// Returns the version of the entry. /// /// Note: This method silently returns `None` if the version string is invalid. /// Consider using [`try_version`](Self::try_version) instead to handle parsing errors properly. pub fn version(&self) -> Option { self.try_version().and_then(|r| r.ok()) } /// Set the version of the entry. pub fn set_version(&mut self, version: &Version) { if let Some(mut header) = self.header() { let header_index = header.0.index(); header.set_version(version); self.0 .splice_children(header_index..header_index + 1, vec![header.0.into()]); } else { self.create_header().set_version(version); } } /// Return the distributions of the entry. pub fn distributions(&self) -> Option> { self.header().and_then(|h| h.distributions()) } /// Set the distributions for the entry pub fn set_distributions(&mut self, distributions: Vec) { if let Some(mut header) = self.header() { let header_index = header.0.index(); header.set_distributions(distributions); self.0 .splice_children(header_index..header_index + 1, vec![header.0.into()]); } else { self.create_header().set_distributions(distributions); } } /// Returns the email address of the maintainer. pub fn email(&self) -> Option { self.footer().and_then(|f| f.email()) } /// Returns the maintainer AST node. pub fn maintainer_node(&self) -> Option { self.footer() .and_then(|f| f.0.children().find_map(Maintainer::cast)) } /// Returns the name of the maintainer. pub fn maintainer(&self) -> Option { self.footer().and_then(|f| f.maintainer()) } /// Set the maintainer of the entry. pub fn set_maintainer(&mut self, maintainer: (String, String)) { if let Some(mut footer) = self.footer() { let footer_index = footer.0.index(); footer.set_maintainer(maintainer.0); footer.set_email(maintainer.1); self.0 .splice_children(footer_index..footer_index + 1, vec![footer.0.into()]); } else { let mut footer = self.create_footer(); footer.set_maintainer(maintainer.0); footer.set_email(maintainer.1); } } /// Returns the timestamp AST node. pub fn timestamp_node(&self) -> Option { self.footer() .and_then(|f| f.0.children().find_map(Timestamp::cast)) } /// Returns the timestamp of the entry, as the raw string. pub fn timestamp(&self) -> Option { self.footer().and_then(|f| f.timestamp()) } /// Set the timestamp of the entry. pub fn set_timestamp(&mut self, timestamp: String) { if let Some(mut footer) = self.footer() { let footer_index = footer.0.index(); footer.set_timestamp(timestamp); self.0 .splice_children(footer_index..footer_index + 1, vec![footer.0.into()]); } else { self.create_footer().set_timestamp(timestamp); } } /// Set the datetime of the entry. #[cfg(feature = "chrono")] pub fn set_datetime(&mut self, datetime: DateTime) { self.set_timestamp(format!("{}", datetime.format("%a, %d %b %Y %H:%M:%S %z"))); } /// Returns the datetime of the entry. #[cfg(feature = "chrono")] pub fn datetime(&self) -> Option> { self.timestamp().and_then(|ts| parse_time_string(&ts).ok()) } /// Returns the urgency of the entry. pub fn urgency(&self) -> Option { self.header().and_then(|h| h.urgency()) } fn create_header(&self) -> EntryHeader { let mut builder = GreenNodeBuilder::new(); builder.start_node(ENTRY_HEADER.into()); builder.token(NEWLINE.into(), "\n"); builder.finish_node(); let syntax = SyntaxNode::new_root_mut(builder.finish()); self.0.splice_children(0..0, vec![syntax.into()]); EntryHeader(self.0.children().next().unwrap().clone_for_update()) } fn create_footer(&self) -> EntryFooter { let mut builder = GreenNodeBuilder::new(); builder.start_node(ENTRY_FOOTER.into()); builder.token(NEWLINE.into(), "\n"); builder.finish_node(); let syntax = SyntaxNode::new_root_mut(builder.finish()); let count = self.0.children().count(); self.0.splice_children(count..count, vec![syntax.into()]); EntryFooter(self.0.children().last().unwrap().clone_for_update()) } /// Set the urgency of the entry. pub fn set_urgency(&mut self, urgency: Urgency) { self.set_metadata("urgency", urgency.to_string().as_str()); } /// Set a metadata key-value pair for the entry. pub fn set_metadata(&mut self, key: &str, value: &str) { if let Some(mut header) = self.header() { let header_index = header.0.index(); header.set_metadata(key, value); self.0 .splice_children(header_index..header_index + 1, vec![header.0.into()]); } else { self.create_header().set_metadata(key, value); } } /// Add a change for the specified author /// /// If the author is not the same as the current maintainer, a new /// section will be created for the author in the entry (e.g. "[ John Doe ]"). /// /// Returns an error if text rewrapping fails. pub fn try_add_change_for_author( &self, change: &[&str], author: (String, String), ) -> Result<(), crate::textwrap::Error> { let changes_lines = self.change_lines().collect::>(); let by_author = crate::changes::changes_by_author(changes_lines.iter().map(|s| s.as_str())) .collect::>(); // There are no per author sections yet, so attribute current changes to changelog entry author if by_author.iter().all(|(a, _, _)| a.is_none()) { if let Some(maintainer_name) = self.maintainer() { if author.0 != maintainer_name { self.prepend_change_line( crate::changes::format_section_title(maintainer_name.as_str()).as_str(), ); if !self.change_lines().last().unwrap().is_empty() { self.append_change_line(""); } self.append_change_line( crate::changes::format_section_title(author.0.as_str()).as_str(), ); } } } else if let Some(last_section) = by_author.last().as_ref() { if last_section.0 != Some(author.0.as_str()) { self.append_change_line(""); self.append_change_line( crate::changes::format_section_title(author.0.as_str()).as_str(), ); } } if let Some(last) = self.change_lines().last() { if last.trim().is_empty() { self.pop_change_line(); } } for line in crate::textwrap::try_rewrap_changes(change.iter().copied())? { self.append_change_line(line.as_ref()); } Ok(()) } /// Add a change for the specified author /// /// If the author is not the same as the current maintainer, a new /// section will be created for the author in the entry (e.g. "[ John Doe ]"). /// /// # Deprecated /// /// This function panics on errors. Use [`Entry::try_add_change_for_author`] instead for proper error handling. /// /// # Panics /// /// Panics if text rewrapping fails. #[deprecated( since = "0.2.10", note = "Use try_add_change_for_author for proper error handling" )] pub fn add_change_for_author(&self, change: &[&str], author: (String, String)) { self.try_add_change_for_author(change, author).unwrap() } /// Prepend a change line to the entry pub fn prepend_change_line(&self, line: &str) { let mut builder = GreenNodeBuilder::new(); builder.start_node(ENTRY_BODY.into()); if !line.is_empty() { builder.token(INDENT.into(), " "); builder.token(DETAIL.into(), line); } builder.token(NEWLINE.into(), "\n"); builder.finish_node(); // Insert just after the header let mut it = self.0.children(); let header = it.find(|n| n.kind() == ENTRY_HEADER); let previous_line = it.find(|n| n.kind() == EMPTY_LINE).or(header); let index = previous_line.map_or(0, |l| l.index() + 1); let syntax = SyntaxNode::new_root_mut(builder.finish()); self.0.splice_children(index..index, vec![syntax.into()]); } /// Pop the last change line from the entry pub fn pop_change_line(&self) -> Option { // Find the last child of type ENTRY_BODY let last_child = self.0.children().filter(|n| n.kind() == ENTRY_BODY).last(); if let Some(last_child) = last_child { let text = last_child.children_with_tokens().find_map(|it| { if let Some(token) = it.as_token() { if token.kind() == DETAIL { return Some(token.text().to_string()); } } None }); self.0 .splice_children(last_child.index()..last_child.index() + 1, vec![]); text } else { None } } /// Append a line to the changelog entry pub fn append_change_line(&self, line: &str) { let mut builder = GreenNodeBuilder::new(); builder.start_node(ENTRY_BODY.into()); if !line.is_empty() { builder.token(INDENT.into(), " "); builder.token(DETAIL.into(), line); } builder.token(NEWLINE.into(), "\n"); builder.finish_node(); // Find the last child of type ENTRY_BODY let last_child = self .0 .children() .filter(|n| n.kind() == ENTRY_BODY) .last() .unwrap_or_else(|| { // No ENTRY_BODY nodes exist. Insert after the EMPTY_LINE that follows // the ENTRY_HEADER (if it exists), to preserve required blank line. let children: Vec<_> = self.0.children().collect(); if children.len() >= 2 && children[0].kind() == ENTRY_HEADER && children[1].kind() == EMPTY_LINE { children[1].clone() } else { children[0].clone() } }); let syntax = SyntaxNode::new_root_mut(builder.finish()).into(); self.0 .splice_children(last_child.index() + 1..last_child.index() + 1, vec![syntax]); } /// Add a bullet point to the changelog entry. /// /// This is a convenience method that appends a bullet point line to the entry. /// Always prepends "* " to the text and wraps the text to 78 columns if needed. /// /// # Arguments /// * `text` - The text of the bullet point (without the "* " prefix) /// /// # Examples /// ``` /// use debian_changelog::ChangeLog; /// /// let mut changelog = ChangeLog::new(); /// let entry = changelog.new_entry() /// .maintainer(("Author".into(), "author@example.com".into())) /// .distribution("unstable".to_string()) /// .version("1.0.0".parse().unwrap()) /// .finish(); /// /// entry.add_bullet("First change"); /// entry.add_bullet("Second change"); /// /// let lines: Vec<_> = entry.change_lines().collect(); /// assert_eq!(lines[0], "* First change"); /// assert_eq!(lines[1], "* Second change"); /// ``` pub fn add_bullet(&self, text: &str) { // Wrap the text with "* " prefix let wrapped = crate::textwrap::textwrap( text, Some(crate::textwrap::DEFAULT_WIDTH), Some(crate::textwrap::INITIAL_INDENT), Some(" "), ); // Append each wrapped line for line in wrapped { self.append_change_line(&line); } } /// Returns the changes of the entry. pub fn change_lines(&self) -> impl Iterator + '_ { let mut lines = self .0 .children() .filter_map(|n| { if let Some(ref change) = EntryBody::cast(n.clone()) { Some(change.text()) } else if n.kind() == EMPTY_LINE { Some("".to_string()) } else { None } }) .collect::>(); while let Some(last) = lines.last() { if last.is_empty() { lines.pop(); } else { break; } } lines.into_iter().skip_while(|it| it.is_empty()) } /// Ensure that the first line of the entry is the specified line /// /// If the first line is not the specified line, it will be prepended to the entry. pub fn ensure_first_line(&self, line: &str) { let first_line = self.change_lines().next().map(|it| it.trim().to_string()); if first_line != Some(line.to_string()) { self.prepend_change_line(line); } } /// Return whether the entry is marked as being unreleased pub fn is_unreleased(&self) -> Option { let distro_is_unreleased = self.distributions().as_ref().map(|ds| { let ds = ds.iter().map(|d| d.as_str()).collect::>(); crate::distributions_is_unreleased(ds.as_slice()) }); let footer_is_unreleased = if self.maintainer().is_none() && self.email().is_none() { Some(true) } else { None }; match (distro_is_unreleased, footer_is_unreleased) { (Some(true), _) => Some(true), (_, Some(true)) => Some(true), (Some(false), _) => Some(false), (_, Some(false)) => Some(false), _ => None, } } /// Iterator over changes in this entry grouped by author. /// /// Returns a vector of tuples (author_name, line_numbers, change_lines) /// where author_name is Some for attributed changes or None for changes without attribution. pub fn iter_changes_by_author(&self) -> Vec<(Option, Vec, Vec)> { let changes: Vec = self.change_lines().map(|s| s.to_string()).collect(); crate::changes::changes_by_author(changes.iter().map(|s| s.as_str())) .map(|(author, linenos, lines)| { let author_name = author.map(|s| s.to_string()); let change_lines = lines.into_iter().map(|s| s.to_string()).collect(); (author_name, linenos, change_lines) }) .collect() } /// Get all authors mentioned in this entry's changes. /// /// This includes authors from [ Author Name ] sections in the change text, /// but not the main maintainer/uploader from the entry footer. pub fn get_authors(&self) -> std::collections::HashSet { let changes: Vec = self.change_lines().map(|s| s.to_string()).collect(); let change_strs: Vec<&str> = changes.iter().map(|s| s.as_str()).collect(); crate::changes::find_extra_authors(&change_strs) .into_iter() .map(|s| s.to_string()) .collect() } /// Get the maintainer information as an Identity struct. /// /// Returns the maintainer name and email from the entry footer if available. pub fn get_maintainer_identity(&self) -> Option { if let (Some(name), Some(email)) = (self.maintainer(), self.email()) { Some(crate::Identity::new(name, email)) } else { None } } /// Add changes for a specific author to this entry. /// /// This will add an author section (e.g., `[ Author Name ]`) if needed, /// and append the changes under that section. If this is the first attributed /// change and there are existing unattributed changes, they will be wrapped /// in the maintainer's section. /// /// # Arguments /// * `author_name` - The name of the author to attribute the changes to /// * `changes` - A list of change lines to add (e.g., `["* Fixed bug"]`) /// /// # Example /// ``` /// use debian_changelog::Entry; /// let entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low /// /// * Existing change /// /// -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 /// "#.parse().unwrap(); /// /// entry.try_add_changes_for_author("Alice", vec!["* New feature"]); /// ``` pub fn try_add_changes_for_author( &self, author_name: &str, changes: Vec<&str>, ) -> Result<(), crate::textwrap::Error> { let mut change_lines: Vec = self.change_lines().collect(); let original_len = change_lines.len(); let default_author = self.get_maintainer_identity().map(|id| (id.name, id.email)); crate::changes::try_add_change_for_author( &mut change_lines, author_name, changes, default_author, )?; // The function modifies change_lines in place. We need to handle two cases: // 1. Lines were inserted at the beginning (when wrapping existing changes) // 2. Lines were appended at the end (normal case) if change_lines.len() > original_len { // New lines were added let original_changes: Vec<_> = self.change_lines().collect(); // Check if lines were inserted at the start let inserted_at_start = original_len > 0 && change_lines[0] != original_changes[0]; if inserted_at_start { // Lines were inserted at the beginning - we need to rebuild // This happens when converting unattributed changes to attributed ones while self.pop_change_line().is_some() {} for line in change_lines { self.append_change_line(&line); } } else { // Lines were appended at the end - just append the new ones for line in change_lines.iter().skip(original_len) { self.append_change_line(line); } } } Ok(()) } /// Add changes for the specified author /// /// # Deprecated /// /// This function panics on errors. Use [`Entry::try_add_changes_for_author`] instead for proper error handling. /// /// # Panics /// /// Panics if text rewrapping fails. #[deprecated( since = "0.2.10", note = "Use try_add_changes_for_author for proper error handling" )] pub fn add_changes_for_author(&self, author_name: &str, changes: Vec<&str>) { self.try_add_changes_for_author(author_name, changes) .unwrap() } } #[cfg(feature = "chrono")] const CHANGELOG_TIME_FORMAT: &str = "%a, %d %b %Y %H:%M:%S %z"; #[cfg(feature = "chrono")] fn parse_time_string(time_str: &str) -> Result, chrono::ParseError> { // First try parsing with day-of-week validation if let Ok(dt) = DateTime::parse_from_str(time_str, CHANGELOG_TIME_FORMAT) { return Ok(dt); } // If that fails, try parsing without day-of-week validation // This is more lenient for changelogs that have incorrect day-of-week values // Skip the day name (everything before the first comma and space) if let Some(after_comma) = time_str.split_once(", ") { DateTime::parse_from_str(after_comma.1, "%d %b %Y %H:%M:%S %z") } else { // If there's no comma, return the original error DateTime::parse_from_str(time_str, CHANGELOG_TIME_FORMAT) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_simple() { const CHANGELOG: &str = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 breezy (3.3.3-2) unstable; urgency=medium * Drop unnecessary dependency on python3-six. Closes: #1039011 * Drop dependency on cython3-dbg. Closes: #1040544 -- Jelmer Vernooij Sat, 24 Jun 2023 14:58:57 +0100 # Oh, and here is a comment "#; let parsed = parse(CHANGELOG); assert_eq!(parsed.errors(), &Vec::::new()); let node = parsed.syntax_node(); assert_eq!( format!("{:#?}", node), r###"ROOT@0..405 ENTRY@0..140 ENTRY_HEADER@0..39 IDENTIFIER@0..6 "breezy" WHITESPACE@6..7 " " VERSION@7..16 "(3.3.4-1)" WHITESPACE@16..17 " " DISTRIBUTIONS@17..25 IDENTIFIER@17..25 "unstable" METADATA@25..38 SEMICOLON@25..26 ";" WHITESPACE@26..27 " " METADATA_ENTRY@27..38 METADATA_KEY@27..34 IDENTIFIER@27..34 "urgency" EQUALS@34..35 "=" METADATA_VALUE@35..38 IDENTIFIER@35..38 "low" NEWLINE@38..39 "\n" EMPTY_LINE@39..40 NEWLINE@39..40 "\n" ENTRY_BODY@40..66 INDENT@40..42 " " DETAIL@42..65 "* New upstream release." NEWLINE@65..66 "\n" EMPTY_LINE@66..67 NEWLINE@66..67 "\n" ENTRY_FOOTER@67..140 INDENT@67..71 " -- " MAINTAINER@71..86 TEXT@71..77 "Jelmer" WHITESPACE@77..78 " " TEXT@78..86 "Vernooij" WHITESPACE@86..87 " " EMAIL@87..106 "" WHITESPACE@106..108 " " TIMESTAMP@108..139 TEXT@108..112 "Mon," WHITESPACE@112..113 " " TEXT@113..115 "04" WHITESPACE@115..116 " " TEXT@116..119 "Sep" WHITESPACE@119..120 " " TEXT@120..124 "2023" WHITESPACE@124..125 " " TEXT@125..133 "18:13:45" WHITESPACE@133..134 " " TEXT@134..139 "-0500" NEWLINE@139..140 "\n" EMPTY_LINE@140..141 NEWLINE@140..141 "\n" ENTRY@141..376 ENTRY_HEADER@141..183 IDENTIFIER@141..147 "breezy" WHITESPACE@147..148 " " VERSION@148..157 "(3.3.3-2)" WHITESPACE@157..158 " " DISTRIBUTIONS@158..166 IDENTIFIER@158..166 "unstable" METADATA@166..182 SEMICOLON@166..167 ";" WHITESPACE@167..168 " " METADATA_ENTRY@168..182 METADATA_KEY@168..175 IDENTIFIER@168..175 "urgency" EQUALS@175..176 "=" METADATA_VALUE@176..182 IDENTIFIER@176..182 "medium" NEWLINE@182..183 "\n" EMPTY_LINE@183..184 NEWLINE@183..184 "\n" ENTRY_BODY@184..249 INDENT@184..186 " " DETAIL@186..248 "* Drop unnecessary de ..." NEWLINE@248..249 "\n" ENTRY_BODY@249..302 INDENT@249..251 " " DETAIL@251..301 "* Drop dependency on ..." NEWLINE@301..302 "\n" EMPTY_LINE@302..303 NEWLINE@302..303 "\n" ENTRY_FOOTER@303..376 INDENT@303..307 " -- " MAINTAINER@307..322 TEXT@307..313 "Jelmer" WHITESPACE@313..314 " " TEXT@314..322 "Vernooij" WHITESPACE@322..323 " " EMAIL@323..342 "" WHITESPACE@342..344 " " TIMESTAMP@344..375 TEXT@344..348 "Sat," WHITESPACE@348..349 " " TEXT@349..351 "24" WHITESPACE@351..352 " " TEXT@352..355 "Jun" WHITESPACE@355..356 " " TEXT@356..360 "2023" WHITESPACE@360..361 " " TEXT@361..369 "14:58:57" WHITESPACE@369..370 " " TEXT@370..375 "+0100" NEWLINE@375..376 "\n" EMPTY_LINE@376..377 NEWLINE@376..377 "\n" COMMENT@377..405 "# Oh, and here is a c ..." "### ); let mut root = parsed.tree_mut(); let entries: Vec<_> = root.iter().collect(); assert_eq!(entries.len(), 2); let entry = &entries[0]; assert_eq!(entry.package(), Some("breezy".into())); assert_eq!(entry.version(), Some("3.3.4-1".parse().unwrap())); assert_eq!(entry.distributions(), Some(vec!["unstable".into()])); assert_eq!(entry.urgency(), Some(Urgency::Low)); assert_eq!(entry.maintainer(), Some("Jelmer Vernooij".into())); assert_eq!(entry.email(), Some("jelmer@debian.org".into())); assert_eq!( entry.timestamp(), Some("Mon, 04 Sep 2023 18:13:45 -0500".into()) ); #[cfg(feature = "chrono")] assert_eq!( entry.datetime(), Some("2023-09-04T18:13:45-05:00".parse().unwrap()) ); let changes_lines: Vec<_> = entry.change_lines().collect(); assert_eq!(changes_lines, vec!["* New upstream release.".to_string()]); assert_eq!(node.text(), CHANGELOG); let first = root.pop_first().unwrap(); assert_eq!(first.version(), Some("3.3.4-1".parse().unwrap())); assert_eq!( root.to_string(), r#"breezy (3.3.3-2) unstable; urgency=medium * Drop unnecessary dependency on python3-six. Closes: #1039011 * Drop dependency on cython3-dbg. Closes: #1040544 -- Jelmer Vernooij Sat, 24 Jun 2023 14:58:57 +0100 # Oh, and here is a comment "# ); } #[test] fn test_from_io_read() { let changelog = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "#; let input = changelog.as_bytes(); let input = Box::new(std::io::Cursor::new(input)) as Box; let parsed = ChangeLog::read(input).unwrap(); assert_eq!(parsed.to_string(), changelog); } #[test] #[cfg(feature = "chrono")] fn test_new_entry() { let mut cl = ChangeLog::new(); cl.new_entry() .package("breezy".into()) .version("3.3.4-1".parse().unwrap()) .distributions(vec!["unstable".into()]) .urgency(Urgency::Low) .maintainer(("Jelmer Vernooij".into(), "jelmer@debian.org".into())) .change_line("* A change.".into()) .datetime("Mon, 04 Sep 2023 18:13:45 -0500") .finish(); assert_eq!( r###"breezy (3.3.4-1) unstable; urgency=low * A change. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "###, cl.to_string() ); assert!(!cl.iter().next().unwrap().is_unreleased().unwrap()); } #[test] #[cfg(feature = "chrono")] fn test_new_empty_default() { let mut cl = ChangeLog::new(); cl.new_entry() .package("breezy".into()) .version("3.3.4-1".parse().unwrap()) .maintainer(("Jelmer Vernooij".into(), "jelmer@debian.org".into())) .change_line("* A change.".into()) .datetime("Mon, 04 Sep 2023 18:13:45 -0500") .finish(); assert_eq!( r###"breezy (3.3.4-1) UNRELEASED; urgency=low * A change. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "###, cl.to_string() ); } #[test] fn test_new_empty_entry() { let mut cl = ChangeLog::new(); cl.new_empty_entry() .change_line("* A change.".into()) .finish(); assert_eq!( r###" * A change. -- "###, cl.to_string() ); assert_eq!(cl.iter().next().unwrap().is_unreleased(), Some(true)); } #[test] fn test_parse_invalid_line() { let text = r#"THIS IS NOT A PARSEABLE LINE lintian-brush (0.35) UNRELEASED; urgency=medium * Support updating templated debian/control files that use cdbs template. -- Joe Example Fri, 04 Oct 2019 02:36:13 +0000 "#; let cl = ChangeLog::read_relaxed(text.as_bytes()).unwrap(); let entry = cl.iter().nth(1).unwrap(); assert_eq!(entry.package(), Some("lintian-brush".into())); assert_eq!(entry.version(), Some("0.35".parse().unwrap())); assert_eq!(entry.urgency(), Some(Urgency::Medium)); assert_eq!(entry.maintainer(), Some("Joe Example".into())); assert_eq!(entry.email(), Some("joe@example.com".into())); assert_eq!(entry.distributions(), Some(vec!["UNRELEASED".into()])); #[cfg(feature = "chrono")] assert_eq!( entry.datetime(), Some("2019-10-04T02:36:13+00:00".parse().unwrap()) ); } #[cfg(test)] mod entry_manipulate_tests { use super::*; #[test] fn test_append_change_line() { let mut cl = ChangeLog::new(); let entry = cl .new_empty_entry() .change_line("* A change.".into()) .finish(); entry.append_change_line("* Another change."); assert_eq!( r###" * A change. * Another change. -- "###, cl.to_string() ); } #[test] fn test_prepend_change_line() { let mut cl = ChangeLog::new(); let entry = cl .new_empty_entry() .change_line("* A change.".into()) .finish(); entry.prepend_change_line("* Another change."); assert_eq!( r###" * Another change. * A change. -- "###, cl.to_string() ); assert_eq!(entry.maintainer(), None); assert_eq!(entry.email(), None); assert_eq!(entry.timestamp(), None); assert_eq!(entry.package(), None); assert_eq!(entry.version(), None); } } #[cfg(test)] mod auto_add_change_tests { #[test] fn test_unreleased_existing() { let text = r#"lintian-brush (0.35) unstable; urgency=medium * This line already existed. [ Jane Example ] * And this one has an existing author. -- "#; let mut cl = super::ChangeLog::read(text.as_bytes()).unwrap(); let entry = cl.iter().next().unwrap(); assert_eq!(entry.package(), Some("lintian-brush".into())); assert_eq!(entry.is_unreleased(), Some(true)); let entry = cl .try_auto_add_change( &["* And this one is new."], ("Joe Example".to_string(), "joe@example.com".to_string()), None::, None, ) .unwrap(); assert_eq!(cl.iter().count(), 1); assert_eq!(entry.package(), Some("lintian-brush".into())); assert_eq!(entry.is_unreleased(), Some(true)); assert_eq!( entry.change_lines().collect::>(), &[ "* This line already existed.", "", "[ Jane Example ]", "* And this one has an existing author.", "", "[ Joe Example ]", "* And this one is new.", ] ); } } #[test] fn test_ensure_first_line() { let text = r#"lintian-brush (0.35) unstable; urgency=medium * This line already existed. [ Jane Example ] * And this one has an existing author. -- "#; let cl = ChangeLog::read(text.as_bytes()).unwrap(); let entry = cl.iter().next().unwrap(); assert_eq!(entry.package(), Some("lintian-brush".into())); entry.ensure_first_line("* QA upload."); entry.ensure_first_line("* QA upload."); assert_eq!( r#"lintian-brush (0.35) unstable; urgency=medium * QA upload. * This line already existed. [ Jane Example ] * And this one has an existing author. -- "#, cl.to_string() ); } #[test] fn test_set_version() { let mut entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); entry.set_version(&"3.3.5-1".parse().unwrap()); assert_eq!( r#"breezy (3.3.5-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "#, entry.to_string() ); } #[test] fn test_set_package() { let mut entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); entry.set_package("bzr".into()); assert_eq!( r#"bzr (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "#, entry.to_string() ); } #[test] fn test_set_distributions() { let mut entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); entry.set_distributions(vec!["unstable".into(), "experimental".into()]); assert_eq!( r#"breezy (3.3.4-1) unstable experimental; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "#, entry.to_string() ); } #[test] fn test_set_distributions_no_existing() { let mut entry: Entry = r#"breezy (3.3.4-1); urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); entry.set_distributions(vec!["unstable".into()]); assert!(entry.to_string().contains("unstable")); } #[test] fn test_set_maintainer() { let mut entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); entry.set_maintainer(("Joe Example".into(), "joe@example.com".into())); assert_eq!( r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Joe Example Mon, 04 Sep 2023 18:13:45 -0500 "#, entry.to_string() ); } #[test] fn test_set_timestamp() { let mut entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); entry.set_timestamp("Mon, 04 Sep 2023 18:13:46 -0500".into()); assert_eq!( r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:46 -0500 "#, entry.to_string() ); } #[test] #[cfg(feature = "chrono")] fn test_set_datetime() { let mut entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); entry.set_datetime("2023-09-04T18:13:46-05:00".parse().unwrap()); assert_eq!( r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:46 -0500 "#, entry.to_string() ); } #[test] fn test_set_urgency() { let mut entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); entry.set_urgency(Urgency::Medium); assert_eq!( r#"breezy (3.3.4-1) unstable; urgency=medium * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "#, entry.to_string() ); } #[test] fn test_set_metadata() { let mut entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); entry.set_metadata("foo", "bar"); assert_eq!( r#"breezy (3.3.4-1) unstable; urgency=low foo=bar * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "#, entry.to_string() ); } #[test] fn test_set_metadata_replace_existing() { let mut entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low foo=old * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); entry.set_metadata("foo", "new"); assert_eq!( r#"breezy (3.3.4-1) unstable; urgency=low foo=new * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "#, entry.to_string() ); } #[test] fn test_set_metadata_after_distributions() { let mut entry: Entry = r#"breezy (3.3.4-1) unstable experimental; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); entry.set_metadata("foo", "bar"); assert_eq!( r#"breezy (3.3.4-1) unstable experimental; urgency=low foo=bar * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "#, entry.to_string() ); } #[test] fn test_add_change_for_author() { let entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. [ Jelmer Vernooij ] * A change by the maintainer. -- Joe Example Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); entry .try_add_change_for_author( &["A change by the maintainer."], ("Jelmer Vernooij".into(), "jelmer@debian.org".into()), ) .unwrap(); } #[test] fn test_changelog_from_entry_iter() { let text = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "#; let entry: Entry = text.parse().unwrap(); let cl = std::iter::once(entry).collect::(); assert_eq!(cl.to_string(), text); } #[test] fn test_pop_change_line() { let entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. * Fixed bug #123. * Added new feature. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); // Test popping existing lines assert_eq!( entry.pop_change_line(), Some("* Added new feature.".to_string()) ); assert_eq!( entry.pop_change_line(), Some("* Fixed bug #123.".to_string()) ); assert_eq!( entry.pop_change_line(), Some("* New upstream release.".to_string()) ); // Test popping from empty entry assert_eq!(entry.pop_change_line(), None); } #[test] fn test_pop_change_line_empty_entry() { let entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); assert_eq!(entry.pop_change_line(), None); } #[test] fn test_pop_change_line_empty_string() { let entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low * Something -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); entry.pop_change_line(); entry.append_change_line(""); // Empty lines don't have DETAIL tokens, so pop_change_line returns None assert_eq!(entry.pop_change_line(), None); } #[test] fn test_append_change_line() { let entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); entry.append_change_line("* Fixed bug #456."); assert_eq!( entry.to_string(), r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. * Fixed bug #456. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# ); } #[test] fn test_append_change_line_empty() { let entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); entry.append_change_line(""); let lines: Vec = entry.change_lines().collect(); // Empty lines are not returned by change_lines() assert_eq!(lines.len(), 1); assert_eq!(lines[0], "* New upstream release.".to_string()); } #[test] fn test_changelog_write_to_path() { use tempfile::NamedTempFile; let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let temp_file = NamedTempFile::new().unwrap(); let path = temp_file.path().to_path_buf(); changelog.write_to_path(&path).unwrap(); let contents = std::fs::read_to_string(&path).unwrap(); assert_eq!(contents, changelog.to_string()); } #[test] fn test_changelog_into_iter() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 breezy (3.3.3-1) unstable; urgency=low * Previous release. -- Jelmer Vernooij Mon, 03 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let entries: Vec = changelog.into_iter().collect(); assert_eq!(entries.len(), 2); } #[test] fn test_set_version_no_existing() { let mut entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); entry.set_version(&"1.0.0".parse().unwrap()); assert!(entry.to_string().contains("(1.0.0)")); } #[test] fn test_entry_footer_set_email_edge_cases() { let entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); // Test checking email through entry assert_eq!(entry.email(), Some("jelmer@debian.org".to_string())); } #[test] fn test_entry_footer_set_maintainer_edge_cases() { let mut entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); // Test setting maintainer entry.set_maintainer(("New Maintainer".into(), "new@example.com".into())); assert!(entry .to_string() .contains("New Maintainer ")); } #[test] fn test_entry_footer_set_timestamp_edge_cases() { let mut entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij "# .parse() .unwrap(); // Test setting timestamp when it's missing entry.set_timestamp("Mon, 04 Sep 2023 18:13:45 -0500".into()); assert!(entry .to_string() .contains("Mon, 04 Sep 2023 18:13:45 -0500")); } #[test] fn test_parse_multiple_distributions_frozen_unstable() { // Test case for https://github.com/jelmer/debian-changelog-rs/issues/93 // The "at" package has entries with "frozen unstable" distributions from 1998 const CHANGELOG: &str = r#"at (3.1.8-10) frozen unstable; urgency=high * Suidunregister /usr/bin (closes: Bug#59421). -- Siggy Brentrup Mon, 3 Apr 2000 13:56:47 +0200 "#; let parsed = parse(CHANGELOG); assert_eq!(parsed.errors(), &Vec::::new()); let root = parsed.tree(); let entries: Vec<_> = root.iter().collect(); assert_eq!(entries.len(), 1); let entry = &entries[0]; assert_eq!(entry.package(), Some("at".into())); assert_eq!(entry.version(), Some("3.1.8-10".parse().unwrap())); assert_eq!( entry.distributions(), Some(vec!["frozen".into(), "unstable".into()]) ); } #[test] fn test_parse_old_metadata_format_with_comma() { // Test case for https://github.com/jelmer/debian-changelog-rs/issues/93 // The "at" package has old-style metadata with comma-separated values const CHANGELOG: &str = r#"at (3.1.8-9) frozen unstable; urgency=low, closes=53715 56047 56607 55560 55514 * Added SIGCHLD handler to release zombies (closes 53715 56047 56607) -- Siggy Brentrup Sun, 30 Jan 2000 22:00:46 +0100 "#; let parsed = parse(CHANGELOG); // This old format currently fails to parse if !parsed.errors().is_empty() { eprintln!("Parse errors: {:?}", parsed.errors()); } assert_eq!(parsed.errors(), &Vec::::new()); let root = parsed.tree(); let entries: Vec<_> = root.iter().collect(); assert_eq!(entries.len(), 1); let entry = &entries[0]; assert_eq!(entry.package(), Some("at".into())); assert_eq!(entry.version(), Some("3.1.8-9".parse().unwrap())); assert_eq!( entry.distributions(), Some(vec!["frozen".into(), "unstable".into()]) ); assert_eq!(entry.urgency(), Some(Urgency::Low)); // Verify we can access the "closes" metadata let header = entry.header().unwrap(); let metadata: Vec<(String, String)> = header.metadata().collect(); // Should have both urgency and closes assert_eq!(metadata.len(), 2); assert!(metadata.iter().any(|(k, v)| k == "urgency" && v == "low")); // Get the closes value and verify exact match let closes_value = metadata .iter() .find(|(k, _)| k == "closes") .map(|(_, v)| v) .expect("closes metadata should exist"); assert_eq!(closes_value, "53715 56047 56607 55560 55514"); } #[test] fn test_entry_iter_changes_by_author() { let entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low [ Author 1 ] * Change by Author 1 [ Author 2 ] * Change by Author 2 * Another change by Author 2 * Unattributed change -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let changes = entry.iter_changes_by_author(); assert_eq!(changes.len(), 3); assert_eq!(changes[0].0, Some("Author 1".to_string())); assert_eq!(changes[0].2, vec!["* Change by Author 1".to_string()]); assert_eq!(changes[1].0, Some("Author 2".to_string())); assert_eq!( changes[1].2, vec![ "* Change by Author 2".to_string(), "* Another change by Author 2".to_string() ] ); assert_eq!(changes[2].0, None); assert_eq!(changes[2].2, vec!["* Unattributed change".to_string()]); } #[test] fn test_entry_get_authors() { let entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low [ Author 1 ] * Change by Author 1 [ Author 2 ] * Change by Author 2 -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let authors = entry.get_authors(); assert_eq!(authors.len(), 2); assert!(authors.contains("Author 1")); assert!(authors.contains("Author 2")); // Maintainer should not be in the authors from change sections assert!(!authors.contains("Jelmer Vernooij")); } #[test] fn test_entry_get_maintainer_identity() { let entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); let identity = entry.get_maintainer_identity().unwrap(); assert_eq!(identity.name, "Jelmer Vernooij"); assert_eq!(identity.email, "jelmer@debian.org"); } #[test] fn test_entry_get_maintainer_identity_missing() { let entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. "# .parse() .unwrap(); let identity = entry.get_maintainer_identity(); assert!(identity.is_none()); } #[test] fn test_changelog_iter_by_author() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low * New upstream release. -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 breezy (3.3.3-1) unstable; urgency=low * Bug fix release. -- Jane Doe Sun, 03 Sep 2023 17:12:30 -0500 breezy (3.3.2-1) unstable; urgency=low * Another release. -- Jelmer Vernooij Sat, 02 Sep 2023 16:11:15 -0500 "# .parse() .unwrap(); let authors: Vec<(String, String, Vec)> = changelog.iter_by_author().collect(); assert_eq!(authors.len(), 2); assert_eq!(authors[0].0, "Jane Doe"); assert_eq!(authors[0].1, "jane@example.com"); assert_eq!(authors[0].2.len(), 1); assert_eq!(authors[1].0, "Jelmer Vernooij"); assert_eq!(authors[1].1, "jelmer@debian.org"); assert_eq!(authors[1].2.len(), 2); } #[test] fn test_changelog_get_all_authors() { let changelog: ChangeLog = r#"breezy (3.3.4-1) unstable; urgency=low [ Contributor 1 ] * Contribution * Main change -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 breezy (3.3.3-1) unstable; urgency=low * Bug fix release. -- Jane Doe Sun, 03 Sep 2023 17:12:30 -0500 "# .parse() .unwrap(); let authors = changelog.get_all_authors(); assert_eq!(authors.len(), 3); let author_names: std::collections::HashSet = authors .iter() .map(|identity| identity.name.clone()) .collect(); assert!(author_names.contains("Jelmer Vernooij")); assert!(author_names.contains("Jane Doe")); assert!(author_names.contains("Contributor 1")); } #[test] fn test_add_changes_for_author_no_existing_sections() { let entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low * Existing change -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); entry .try_add_changes_for_author("Alice", vec!["* Alice's change"]) .unwrap(); let lines: Vec<_> = entry.change_lines().collect(); // Should have wrapped existing changes in maintainer's section assert!(lines.iter().any(|l| l.contains("[ Jelmer Vernooij ]"))); // Should have added Alice's section assert!(lines.iter().any(|l| l.contains("[ Alice ]"))); // Should have both changes assert!(lines.iter().any(|l| l.contains("Existing change"))); assert!(lines.iter().any(|l| l.contains("Alice's change"))); } #[test] fn test_add_changes_for_author_with_existing_sections() { let entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low [ Author 1 ] * Change by Author 1 -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); entry .try_add_changes_for_author("Alice", vec!["* Alice's new change"]) .unwrap(); let lines: Vec<_> = entry.change_lines().collect(); // Should have Author 1's section assert!(lines.iter().any(|l| l.contains("[ Author 1 ]"))); // Should have added Alice's section assert!(lines.iter().any(|l| l.contains("[ Alice ]"))); // Should have both changes assert!(lines.iter().any(|l| l.contains("Change by Author 1"))); assert!(lines.iter().any(|l| l.contains("Alice's new change"))); } #[test] fn test_add_changes_for_author_same_author() { let entry: Entry = r#"breezy (3.3.4-1) unstable; urgency=low [ Alice ] * First change -- Jelmer Vernooij Mon, 04 Sep 2023 18:13:45 -0500 "# .parse() .unwrap(); entry .try_add_changes_for_author("Alice", vec!["* Second change"]) .unwrap(); let lines: Vec<_> = entry.change_lines().collect(); // Should only have one Alice section (not duplicated) let alice_count = lines.iter().filter(|l| l.contains("[ Alice ]")).count(); assert_eq!(alice_count, 1); // Should have both changes assert!(lines.iter().any(|l| l.contains("First change"))); assert!(lines.iter().any(|l| l.contains("Second change"))); } #[test] fn test_datetime_with_incorrect_day_of_week() { // Test for bug: datetime() should parse leniently even when day-of-week doesn't match // This changelog entry has "Mon, 22 Mar 2011" but Mar 22, 2011 was actually Tuesday let entry: Entry = r#"blah (0.1-2) UNRELEASED; urgency=medium * New release. -- Jelmer Vernooij Mon, 22 Mar 2011 16:47:42 +0000 "# .parse() .unwrap(); // timestamp() should return just the date portion assert_eq!( entry.timestamp(), Some("Mon, 22 Mar 2011 16:47:42 +0000".into()) ); // datetime() should successfully parse the timestamp despite incorrect day-of-week let datetime = entry.datetime(); assert!( datetime.is_some(), "datetime() should not return None for timestamp with incorrect day-of-week" ); assert_eq!(datetime.unwrap().to_rfc3339(), "2011-03-22T16:47:42+00:00"); } #[test] fn test_line_col() { let text = r#"foo (1.0-1) unstable; urgency=low * First change -- Maintainer Mon, 01 Jan 2024 12:00:00 +0000 bar (2.0-1) experimental; urgency=high * Second change * Third change -- Another Tue, 02 Jan 2024 13:00:00 +0000 "#; let changelog = text.parse::().unwrap(); // Test changelog root position assert_eq!(changelog.line(), 0); assert_eq!(changelog.column(), 0); assert_eq!(changelog.line_col(), (0, 0)); // Test entry line numbers let entries: Vec<_> = changelog.iter().collect(); assert_eq!(entries.len(), 2); // First entry starts at line 0 assert_eq!(entries[0].line(), 0); assert_eq!(entries[0].column(), 0); assert_eq!(entries[0].line_col(), (0, 0)); // Second entry starts at line 6 (after first entry and empty line) assert_eq!(entries[1].line(), 6); assert_eq!(entries[1].column(), 0); assert_eq!(entries[1].line_col(), (6, 0)); // Test entry components let header = entries[0].header().unwrap(); assert_eq!(header.line(), 0); assert_eq!(header.column(), 0); let body = entries[0].body().unwrap(); assert_eq!(body.line(), 2); // Body starts at first change line let footer = entries[0].footer().unwrap(); assert_eq!(footer.line(), 4); // Footer line // Test maintainer and timestamp nodes let maintainer = entries[0].maintainer_node().unwrap(); assert_eq!(maintainer.line(), 4); // On footer line let timestamp = entries[0].timestamp_node().unwrap(); assert_eq!(timestamp.line(), 4); // On footer line // Verify second entry components let header2 = entries[1].header().unwrap(); assert_eq!(header2.line(), 6); let footer2 = entries[1].footer().unwrap(); assert_eq!(footer2.line(), 11); } } debian-changelog-0.2.14/src/textwrap.rs000064400000000000000000000543641046102023000161060ustar 00000000000000//! Text wrapping functions //! //! These functions are used to wrap text for use in a changelog. //! The main function is `textwrap`, which takes a string and wraps it to a //! specified width, without breaking in between "Closes: #XXXXXX" fragments. use lazy_regex::regex_captures; use std::borrow::Cow; use textwrap::core::Word; /// Default width for text wrapping pub const DEFAULT_WIDTH: usize = 78; /// Initial indent for text wrapping pub const INITIAL_INDENT: &str = "* "; #[inline] fn can_break_word(line: &str, pos: usize) -> bool { // Don't break if we're not at a space if !line[pos..].starts_with(' ') { return false; } // Check if breaking here would split "Closes: #" or "LP: #" // We need to look at the context around this position // Pattern: "Closes: #" - don't break between "Closes:" and "#" // or between ":" and " #" if pos >= 7 && &line[pos.saturating_sub(8)..pos] == "Closes: " && line[pos..].starts_with(" #") { // Don't break right after "Closes: " if followed by "#" return false; } // Also check if we're right after "Closes:" (before the space) if pos >= 7 && line[pos.saturating_sub(7)..pos].ends_with("Closes:") { return false; } // Pattern: "LP: #" - don't break between "LP:" and "#" if pos >= 3 && &line[pos.saturating_sub(4)..pos] == "LP: " && line[pos..].starts_with(" #") { return false; } if pos >= 3 && line[pos.saturating_sub(3)..pos].ends_with("LP:") { return false; } true } #[cfg(test)] mod can_break_word_tests { #[test] fn test_can_break_word() { assert!(super::can_break_word("foo bar", 3)); assert!(!super::can_break_word("foo bar", 0)); assert!(!super::can_break_word("foo bar", 5)); } #[test] fn test_can_break_word_edge_cases() { // Test position at end of string assert!(!super::can_break_word("foo", 3)); // Test empty string assert!(!super::can_break_word("", 0)); } #[test] fn test_closes() { // Test "Closes: #" at the start of line assert!( !super::can_break_word("Closes: #123456", 6), "Should not break after 'Closes:'" ); assert!( !super::can_break_word("Closes: #123456", 7), "Should not break between 'Closes:' and '#'" ); assert!( super::can_break_word("Closes: #123456 foo", 15), "Should break after bug number" ); // Test "Closes: #" in the middle of line (the bug scenario) assert!( !super::can_break_word("Fix bug (Closes: #123456)", 16), "Should not break after 'Closes:' in middle of line" ); assert!( !super::can_break_word("Fix bug (Closes: #123456)", 17), "Should not break between 'Closes:' and '#' in middle" ); // Test that we can break before "(Closes:" assert!( super::can_break_word("Fix bug (Closes: #123456)", 7), "Should be able to break before '(Closes:'" ); } #[test] fn test_lp() { // Test "LP: #" pattern assert!( !super::can_break_word("LP: #123456", 2), "Should not break after 'LP:'" ); assert!( !super::can_break_word("LP: #123456", 3), "Should not break between 'LP:' and '#'" ); assert!( super::can_break_word("LP: #123456 foo", 11), "Should break after bug number" ); // Test "LP: #" in the middle of line assert!( !super::can_break_word("Fix bug (LP: #123456)", 12), "Should not break after 'LP:' in middle of line" ); assert!( !super::can_break_word("Fix bug (LP: #123456)", 13), "Should not break between 'LP:' and '#' in middle" ); } } fn find_words<'a>(line: &'a str) -> Box> + 'a> { let mut start = 0; let mut can_break = false; let mut char_indices = line.char_indices(); Box::new(std::iter::from_fn(move || { for (idx, ch) in char_indices.by_ref() { let word_finished = can_break && ch != ' '; can_break = can_break_word(&line[start..], idx - start); if word_finished { let word = Word::from(&line[start..idx]); start = idx; return Some(word); } } if start < line.len() { let word = Word::from(&line[start..]); start = line.len(); return Some(word); } None })) } #[cfg(test)] mod find_words_tests { use super::find_words; use textwrap::core::Word; use textwrap::WordSeparator; #[test] fn test_find_words() { let ws = WordSeparator::Custom(find_words); assert_eq!( vec![Word::from("foo")], ws.find_words("foo").collect::>() ); assert_eq!( vec![Word::from("foo "), Word::from("bar")], ws.find_words("foo bar").collect::>() ); } #[test] fn test_split_closes() { let ws = WordSeparator::Custom(find_words); assert_eq!( vec![ Word::from("This "), Word::from("test "), Word::from("Closes: #123456 "), Word::from("foo"), ], ws.find_words("This test Closes: #123456 foo") .collect::>() ); assert_eq!( vec![ Word::from("This "), Word::from("test "), Word::from("Closes: #123456"), ], ws.find_words("This test Closes: #123456") .collect::>() ); } } fn options<'a>( width: Option, initial_indent: Option<&'a str>, subsequent_indent: Option<&'a str>, ) -> textwrap::Options<'a> { let width = width.unwrap_or(DEFAULT_WIDTH); let mut options = textwrap::Options::new(width) .break_words(false) .word_splitter(textwrap::WordSplitter::NoHyphenation) .word_separator(textwrap::WordSeparator::Custom(find_words)); if let Some(initial_indent) = initial_indent { options = options.initial_indent(initial_indent); } if let Some(subsequent_indent) = subsequent_indent { options = options.subsequent_indent(subsequent_indent); } options } /// Wrap a string of text, without breaking in between "Closes: #XXXXXX" fragments pub fn textwrap<'a>( text: &'a str, width: Option, initial_indent: Option<&str>, subsequent_indent: Option<&str>, ) -> Vec> { let options = options(width, initial_indent, subsequent_indent); // Actual text wrapping using textwrap crate textwrap::wrap(text, options) } #[cfg(test)] mod textwrap_tests { #[test] fn test_wrap_closes() { assert_eq!( vec!["And", "this", "fixes", "something.", "Closes: #123456"], super::textwrap( "And this fixes something. Closes: #123456", Some(5), None, None ) ); } #[test] fn test_wrap() { let ws = textwrap::WordSeparator::Custom(super::find_words); let options = textwrap::Options::new(30) .break_words(false) .word_separator(ws); assert_eq!( vec!["This", "is", "a", "line", "that", "has", "been", "broken"], ws.find_words("This is a line that has been broken") .map(|w| w.to_string()) .collect::>() ); assert_eq!( vec!["This is a line that has been", "broken"], textwrap::wrap("This is a line that has been broken", options) ); assert_eq!( vec!["This is a line that has been", "broken"], super::textwrap("This is a line that has been broken", Some(30), None, None) ); } } /// Check if two lines can join fn can_join(line1: &str, line2: &str) -> bool { if line1.ends_with(':') { return false; } if let Some(first_char) = line2.chars().next() { if first_char.is_uppercase() { if line1.ends_with(']') || line1.ends_with('}') { return false; } if !line1.ends_with('.') { return false; } } } if line2.trim_start().starts_with('*') || line2.trim_start().starts_with('-') || line2.trim_start().starts_with('+') { return false; } // don't let lines with different indentation join let line1_indent = line1.len() - line1.trim_start_matches(' ').len(); let line2_indent = line2.len() - line2.trim_start_matches(' ').len(); if line1_indent != line2_indent { return false; } true } #[cfg(test)] mod can_join_tests { #[test] fn test_can_join() { assert!(super::can_join("This is a line.", "This is a line.")); assert!(super::can_join( "This is a line.", "This is a line. And this is another." )); assert!(!super::can_join( "This is a line.", "+ This is a submititem." )); assert!(!super::can_join( "This is a line introducing:", " * A list item." )); assert!(!super::can_join( " Lines with different indentation", " can not join." )); } #[test] fn test_can_join_edge_cases() { // Test line ending with bracket assert!(!super::can_join("Some text]", "Uppercase text")); assert!(!super::can_join("Some text}", "Uppercase text")); // Test line ending with period and uppercase next line assert!(super::can_join("End with period.", "Uppercase text")); // Test line not ending with period and uppercase next line assert!(!super::can_join("No period", "Uppercase text")); // Test line2 starting with bullet points assert!(!super::can_join("Some text", " * bullet")); assert!(!super::can_join("Some text", " - bullet")); assert!(!super::can_join("Some text", " + bullet")); // Test line1 ending with colon assert!(!super::can_join("Introduction:", "some text")); // Test same indentation assert!(super::can_join(" same indent", " can join")); // Test empty lines assert!(super::can_join("", "")); } } // Check if any lines are longer than the specified width fn any_long_lines(lines: &[&str], width: usize) -> bool { lines.iter().any(|line| line.len() > width) } #[derive(Debug, PartialEq)] /// Text wrapping error pub enum Error { /// Missing bullet point in a line MissingBulletPoint { /// Line with missing bullet point line: String, }, /// Unexpected indent in a line UnexpectedIndent { /// Line number lineno: usize, /// Line with unexpected indent line: String, /// Found indent indent: usize, }, } impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { Error::MissingBulletPoint { line } => { write!(f, "Missing bullet point in line: {}", line) } Error::UnexpectedIndent { lineno, line, indent, } => write!( f, "Unexpected indent in line {}: {} (expected {} spaces)", lineno, line, indent ), } } } impl std::error::Error for Error {} // Rewrap lines from a list of changes // // E.g.: // // * This is a long line that needs to be wrapped // // => // // * This is a short line that // needs to be wrappd // fn rewrap_change<'a>(change: &[&'a str], width: Option) -> Result>, Error> { let width = width.unwrap_or(DEFAULT_WIDTH); assert!(width > 4); if change.is_empty() { return Ok(vec![]); } let mut initial_indent = match regex_captures!(r"^[ ]*[\+\-\*] ", change[0]) { Some(initial_indent) => initial_indent.to_string(), None => { return Err(Error::MissingBulletPoint { line: change[0].to_string(), }) } }; let prefix_len = initial_indent.len(); if !any_long_lines(change, width) { return Ok(change.iter().map(|line| (*line).into()).collect()); } let mut subsequent_indent = " ".repeat(prefix_len); let mut lines = vec![&change[0][prefix_len..]]; // Strip the leading indentation from continuation lines // Accept any indentation >= 0, to handle varying indentation levels for line in change[1..].iter() { if line.is_empty() { // Empty line lines.push(line); } else if line.starts_with(' ') { // Line with indentation - determine how much to strip let line_indent = line.len() - line.trim_start_matches(' ').len(); if line_indent >= prefix_len { // Strip the prefix indentation lines.push(&line[prefix_len..]); } else { // Less indentation than prefix - just use the line as-is lines.push(line); } } else { // No indentation - use line as-is lines.push(line); } } let mut ret: Vec> = Vec::new(); let mut todo = vec![lines.remove(0)]; for line in lines.into_iter() { if can_join(todo.last().unwrap(), line) { todo.push(line); } else { ret.extend( textwrap( todo.join(" ").as_str(), Some(width), Some(initial_indent.as_str()), Some(subsequent_indent.as_str()), ) .iter() .map(|s| Cow::Owned(s.to_string())), ); initial_indent = " ".repeat(prefix_len + line.len() - line.trim_start_matches(' ').len()); subsequent_indent = " ".repeat(initial_indent.len()); todo = vec![line.trim_start_matches(' ')]; } } ret.extend( textwrap( todo.join(" ").as_str(), Some(width), Some(initial_indent.as_str()), Some(subsequent_indent.as_str()), ) .iter() .map(|s| Cow::Owned(s.to_string())), ); Ok(ret) } /// Rewrap lines from an iterator of changes /// /// Returns a Result containing the rewrapped lines or an error if rewrapping fails. pub fn try_rewrap_changes<'a>( changes: impl Iterator, ) -> Result>, Error> { let mut change = Vec::new(); let mut indent_len: Option = None; let mut ret = vec![]; for line in changes { // Start of a new change if let Some(indent) = regex_captures!(r"^[ ]*[\+\-\*] ", line) { if !change.is_empty() { ret.extend(rewrap_change(change.as_slice(), None)?); } indent_len = Some(indent.len()); change = vec![line]; } else if let Some(_current_indent) = indent_len { // Continuation line - keep full line with indentation change.push(line); } else { if !change.is_empty() { ret.extend(rewrap_change(change.as_slice(), None)?); } ret.push(line.into()); change = vec![]; } } if !change.is_empty() { ret.extend(rewrap_change(change.as_slice(), None)?); } Ok(ret) } /// Rewrap lines from an iterator of changes /// /// # Deprecated /// /// This function panics on errors. Use [`try_rewrap_changes`] instead for proper error handling. /// /// # Panics /// /// Panics if rewrapping fails (e.g., due to invalid formatting). #[deprecated( since = "0.2.10", note = "Use try_rewrap_changes for proper error handling" )] pub fn rewrap_changes<'a>( changes: impl Iterator, ) -> impl Iterator> { try_rewrap_changes(changes).unwrap().into_iter() } #[cfg(test)] mod rewrap_tests { use super::rewrap_change; const LONG_LINE: &str = "This is a very long line that could have been broken and should have been broken but was not broken."; #[test] fn test_too_short() { assert_eq!(Vec::<&str>::new(), rewrap_change(&[][..], None).unwrap()); assert_eq!( vec!["* Foo bar"], rewrap_change(&["* Foo bar"][..], None).unwrap() ); assert_eq!( vec!["* Foo", " bar"], rewrap_change(&["* Foo", " bar"][..], None).unwrap() ); assert_eq!( vec![" * Beginning", " next line"], rewrap_change(&[" * Beginning", " next line"][..], None).unwrap() ); } #[test] fn test_no_initial() { let long = "x".repeat(100); assert_eq!( super::Error::MissingBulletPoint { line: long.clone() }, rewrap_change(&[&long], None).unwrap_err() ); } #[test] fn test_wrap() { assert_eq!( vec![ super::Cow::Borrowed( "* This is a very long line that could have been broken and should have been" ), " broken but was not broken.".into() ], rewrap_change(&[format!("* {}", LONG_LINE).as_str()][..], None).unwrap() ); assert_eq!(r###" * Build-Depend on libsdl1.2-dev, libsdl-ttf2.0-dev and libsdl-mixer1.2-dev instead of with the embedded version, add -lSDL_ttf to --with-py-libs in debian/rules and rebootstrap (Closes: #382202)"###.split('\n').collect::>(), rewrap_change(r###" * Build-Depend on libsdl1.2-dev, libsdl-ttf2.0-dev and libsdl-mixer1.2-dev instead of with the embedded version, add -lSDL_ttf to --with-py-libs in debian/rules and rebootstrap (Closes: #382202) "###.split('\n').collect::>().as_slice(), None).unwrap()); } #[test] fn test_no_join() { assert_eq!(r###" - Translators know why this sign has been put here: _Choices: ${FOO}, !Other[ You only have to translate Other, remove the exclamation mark and this comment between brackets] Currently text, newt, slang and gtk frontends support this feature."###.split('\n').collect::>(), rewrap_change(r###" - Translators know why this sign has been put here: _Choices: ${FOO}, !Other[ You only have to translate Other, remove the exclamation mark and this comment between brackets] Currently text, newt, slang and gtk frontends support this feature. "###.split('\n').collect::>().as_slice(), None).unwrap()); } } #[cfg(test)] mod rewrap_changes_tests { use super::try_rewrap_changes; /// Test that long unbreakable lines (e.g., URLs) don't cause errors #[test] fn test_long_url() { let changes = vec![ " * Fix bug", " https://www.example.com/this/is/a/very/long/url/that/can/not/be/broken/because/it/is/longer/than/80/characters.", ]; let result = try_rewrap_changes(changes.into_iter()); assert!(result.is_ok(), "Should handle long URLs without error"); let lines = result.unwrap(); assert_eq!( lines, vec![ " * Fix bug", " https://www.example.com/this/is/a/very/long/url/that/can/not/be/broken/because/it/is/longer/than/80/characters." ] ); } /// Test that continuation lines have proper 4-space indentation after wrapping #[test] fn test_continuation_indent() { let changes = vec![ " * Fix blocks/blockedby of archived bugs (Closes: #XXXXXXX). Thanks to somebody who fixed it.", " * Provide informative error message when unarchive fails because the bug is not archived.", ]; let result = try_rewrap_changes(changes.into_iter()); assert!(result.is_ok(), "Should wrap successfully"); let lines = result.unwrap(); assert_eq!( lines, vec![ " * Fix blocks/blockedby of archived bugs (Closes: #XXXXXXX). Thanks to", " somebody who fixed it.", " * Provide informative error message when unarchive fails because the bug is", " not archived." ] ); } /// Test that "Closes: #" pattern stays together when wrapping #[test] fn test_closes_tag_not_broken() { let changes = vec![ " * Fix blocks/blockedby of archived bugs and more blah blah blah bl (Closes: #XXXXXXX).", ]; let result = try_rewrap_changes(changes.into_iter()); assert!(result.is_ok(), "Should wrap successfully"); let lines = result.unwrap(); assert_eq!( lines, vec![ " * Fix blocks/blockedby of archived bugs and more blah blah blah bl", " (Closes: #XXXXXXX)." ] ); } /// Test handling of complex nested indentation structures #[test] fn test_complex_nested_indentation() { let changes = vec![ " * Main change item", " - Sub-item with 4 spaces", " + Nested sub-item with 6 spaces", " More text in nested item", " - Another sub-item", ]; let result = try_rewrap_changes(changes.into_iter()); assert!(result.is_ok(), "Should handle nested indentation"); let lines = result.unwrap(); assert_eq!( lines, vec![ " * Main change item", " - Sub-item with 4 spaces", " + Nested sub-item with 6 spaces", " More text in nested item", " - Another sub-item", ] ); } /// Test handling of empty lines between changes #[test] fn test_empty_lines() { let changes = vec![" * First change", "", " * Second change"]; let result = try_rewrap_changes(changes.into_iter()); assert!(result.is_ok(), "Should handle empty lines"); let lines = result.unwrap(); assert_eq!(lines, vec![" * First change", "", " * Second change"]); } } debian-changelog-0.2.14/tests/builder_tests.rs000064400000000000000000000051241046102023000174410ustar 00000000000000use debian_changelog::ChangeLog; #[test] fn test_entry_builder_verify_missing_package() { let mut cl = ChangeLog::new(); let builder = cl .new_empty_entry() .version("1.0.0".parse().unwrap()) .distributions(vec!["unstable".to_string()]); // Missing package - verify should fail let result = builder.verify(); assert!(result.is_err()); assert_eq!(result.unwrap_err(), "package is required"); } #[test] fn test_entry_builder_verify_missing_version() { let mut cl = ChangeLog::new(); let builder = cl .new_empty_entry() .package("test".to_string()) .distributions(vec!["unstable".to_string()]); // Missing version - verify should fail let result = builder.verify(); assert!(result.is_err()); assert_eq!(result.unwrap_err(), "version is required"); } #[test] fn test_entry_builder_verify_missing_distributions() { let mut cl = ChangeLog::new(); let builder = cl .new_empty_entry() .package("test".to_string()) .version("1.0.0".parse().unwrap()); // Missing distributions - verify should fail let result = builder.verify(); assert!(result.is_err()); assert_eq!(result.unwrap_err(), "at least one distribution is required"); } #[test] fn test_entry_builder_verify_empty_distributions() { let mut cl = ChangeLog::new(); let builder = cl .new_empty_entry() .package("test".to_string()) .version("1.0.0".parse().unwrap()) .distributions(vec![]); // Empty distributions - verify should fail let result = builder.verify(); assert!(result.is_err()); assert_eq!(result.unwrap_err(), "at least one distribution is required"); } #[test] fn test_entry_builder_verify_missing_change_lines() { let mut cl = ChangeLog::new(); let builder = cl .new_empty_entry() .package("test".to_string()) .version("1.0.0".parse().unwrap()) .distributions(vec!["unstable".to_string()]); // Missing change lines - verify should fail let result = builder.verify(); assert!(result.is_err()); assert_eq!(result.unwrap_err(), "at least one change line is required"); } #[test] fn test_entry_builder_verify_success() { let mut cl = ChangeLog::new(); let builder = cl .new_empty_entry() .package("test".to_string()) .version("1.0.0".parse().unwrap()) .distributions(vec!["unstable".to_string()]) .change_line("* Initial release.".to_string()); // All required fields present - verify should succeed let result = builder.verify(); assert!(result.is_ok()); } debian-changelog-0.2.14/tests/display_tests.rs000064400000000000000000000021111046102023000174510ustar 00000000000000use debian_changelog::{ChangeLog, Error}; use std::io; #[test] fn test_error_display() { // Test IO error display let io_error = io::Error::new(io::ErrorKind::NotFound, "file not found"); let error = Error::Io(io_error); let display = format!("{}", error); assert!(display.contains("IO error")); assert!(display.contains("file not found")); // Test Parse error display by triggering a parse error let result: Result = "invalid changelog".parse(); assert!(result.is_err()); if let Err(parse_error) = result { let error = Error::Parse(parse_error); let display = format!("{}", error); assert!(display.contains("Parse error")); } } #[test] fn test_parse_error_from_invalid_input() { // Test that parsing invalid input produces errors with proper display let result: Result = "INVALID".parse(); assert!(result.is_err()); if let Err(error) = result { let display = format!("{}", error); // Should contain some error message assert!(!display.is_empty()); } } debian-changelog-0.2.14/tests/parse_type.rs000064400000000000000000000145411046102023000167470ustar 00000000000000use debian_changelog::{ChangeLog, Parse}; #[test] fn test_parse_clone() { let changelog_text = r#"test (1.0.0) unstable; urgency=low * Initial release. -- Test User Mon, 04 Sep 2023 18:13:45 -0500 "#; let parsed: Parse = ChangeLog::parse(changelog_text); let cloned = parsed.clone(); // Verify that clone creates an equal object assert_eq!(parsed, cloned); // Verify they have the same content assert_eq!(parsed.green(), cloned.green()); assert_eq!(parsed.errors(), cloned.errors()); } #[test] fn test_parse_partial_eq() { let changelog1 = r#"test (1.0.0) unstable; urgency=low * Initial release. -- Test User Mon, 04 Sep 2023 18:13:45 -0500 "#; let changelog2 = r#"test (2.0.0) unstable; urgency=low * New version. -- Test User Mon, 04 Sep 2023 18:13:45 -0500 "#; let parsed1 = ChangeLog::parse(changelog1); let parsed2 = ChangeLog::parse(changelog2); let parsed1_clone = parsed1.clone(); // Same content should be equal assert_eq!(parsed1, parsed1_clone); // Different content should not be equal assert_ne!(parsed1, parsed2); } #[test] fn test_parse_with_errors() { // Parse some invalid changelog let invalid_text = "this is not a valid changelog"; let parsed = ChangeLog::parse(invalid_text); // Should have errors assert!(!parsed.ok()); assert!(!parsed.errors().is_empty()); // Clone should preserve errors let cloned = parsed.clone(); assert_eq!(parsed.errors(), cloned.errors()); assert_eq!(parsed, cloned); } #[test] fn test_parse_errors_accessor() { let invalid_text = "INVALID"; let parsed = ChangeLog::parse(invalid_text); // Access errors let errors = parsed.errors(); assert!(!errors.is_empty()); assert!(errors[0].contains("expected") || errors[0].contains("VERSION")); } #[test] fn test_parse_send_sync() { fn assert_send_sync() {} assert_send_sync::>(); } #[test] fn test_parse_to_result_with_errors() { let invalid_text = "INVALID CHANGELOG"; let parsed = ChangeLog::parse(invalid_text); // to_result should return Err when there are errors let result = parsed.to_result(); assert!(result.is_err()); match result { Err(_) => { // Expected error } Ok(_) => panic!("Expected error but got Ok"), } } #[test] fn test_parse_to_mut_result_with_errors() { let invalid_text = "INVALID CHANGELOG"; let parsed = ChangeLog::parse(invalid_text); // to_mut_result should return Err when there are errors let result = parsed.to_mut_result(); assert!(result.is_err()); match result { Err(_) => { // Expected error } Ok(_) => panic!("Expected error but got Ok"), } } #[test] fn test_parse_tree_mut() { let changelog_text = r#"test (1.0.0) unstable; urgency=low * Initial release. -- Test User Mon, 04 Sep 2023 18:13:45 -0500 "#; let parsed = ChangeLog::parse(changelog_text); let tree = parsed.tree_mut(); // Should be able to get a mutable tree assert_eq!(tree.iter().count(), 1); // Verify the content let entry = tree.iter().next().unwrap(); assert_eq!(entry.package(), Some("test".to_string())); assert_eq!(entry.version().unwrap().to_string(), "1.0.0"); } #[test] #[should_panic(expected = "tried to get tree with errors")] fn test_parse_tree_panics_with_errors() { let invalid_text = "INVALID"; let parsed = ChangeLog::parse(invalid_text); // This should panic because there are errors let _tree = parsed.tree(); } #[test] #[should_panic(expected = "tried to get tree with errors")] fn test_parse_tree_mut_panics_with_errors() { let invalid_text = "INVALID"; let parsed = ChangeLog::parse(invalid_text); // This should panic because there are errors let _tree = parsed.tree_mut(); } #[test] fn test_parse_equality_with_same_errors() { // Two parses of the same invalid input should be equal let invalid_text = "INVALID CHANGELOG"; let parsed1 = ChangeLog::parse(invalid_text); let parsed2 = ChangeLog::parse(invalid_text); assert_eq!(parsed1, parsed2); } #[test] fn test_parse_inequality_different_errors() { // Different invalid inputs should produce different Parse objects let invalid1 = "INVALID1"; let invalid2 = "INVALID2 (different)"; let parsed1 = ChangeLog::parse(invalid1); let parsed2 = ChangeLog::parse(invalid2); // They should not be equal because they have different green nodes assert_ne!(parsed1, parsed2); } #[test] fn test_invalid_version_no_panic() { // Test with an invalid version string that should not panic let changelog_text = r#"test (2.0.37+cvs.JCW_PRE2_2037-1) unstable; urgency=low * Initial release. -- Test User Mon, 04 Sep 2023 18:13:45 -0500 "#; let parsed = ChangeLog::parse(changelog_text); // If parsing fails, that's okay - just shouldn't panic if !parsed.ok() { // Expected to have errors with relaxed parsing assert!(!parsed.errors().is_empty()); } else { // If it parses successfully, accessing the entry should also not panic if let Some(entry) = parsed.tree().iter().next() { // Accessing version should not panic - this is the critical test let version_result = entry.version(); assert_eq!(version_result, None, "Invalid version should return None"); // try_version should return Some(Err(...)) for invalid version strings let try_result = entry.try_version(); match try_result { Some(Err(err)) => { // Expected: version token exists but parsing failed assert!( err.to_string().contains("Invalid version string") || err.to_string().contains("2.0.37+cvs.JCW_PRE2_2037-1"), "Error should mention invalid version: {}", err ); } Some(Ok(_)) => { panic!("Expected parsing to fail for invalid version string"); } None => { panic!("Expected Some(Err(...)) because version token exists but is invalid"); } } } } }