indexmap-2.12.1/.cargo_vcs_info.json0000644000000001360000000000100127240ustar { "git": { "sha1": "cfad7589c88e298e97449afb838c6a4b08c58394" }, "path_in_vcs": "" }indexmap-2.12.1/.github/workflows/ci.yml000064400000000000000000000113731046102023000162340ustar 00000000000000on: push: branches: [ main ] pull_request: branches: [ main ] merge_group: name: CI env: CARGO_TERM_COLOR: always CARGO_INCREMENTAL: 0 jobs: tests: runs-on: ubuntu-latest strategy: matrix: include: - rust: 1.82.0 # MSRV features: - rust: stable features: arbitrary - rust: stable features: quickcheck - rust: stable features: rayon - rust: stable features: serde - rust: stable features: sval - rust: stable features: borsh - rust: stable features: std - rust: beta features: - rust: nightly bench: test build benchmarks steps: - uses: actions/checkout@v4 - name: Lock MSRV-compatible dependencies if: matrix.rust == '1.82.0' env: CARGO_RESOLVER_INCOMPATIBLE_RUST_VERSIONS: fallback # Note that this uses the runner's pre-installed stable cargo run: cargo generate-lockfile - uses: dtolnay/rust-toolchain@master with: toolchain: ${{ matrix.rust }} - name: Tests run: | cargo build --verbose --features "${{ matrix.features }}" cargo doc --verbose --features "${{ matrix.features }}" cargo test --verbose --features "${{ matrix.features }}" cargo test --release --verbose --features "${{ matrix.features }}" - name: Tests (serde) if: matrix.features == 'serde' run: | cargo test --verbose -p test-serde - name: Tests (sval) if: matrix.features == 'sval' run: | cargo test --verbose -p test-sval - name: Test run benchmarks if: matrix.bench != '' run: cargo test -v --benches nostd_build: runs-on: ubuntu-latest strategy: matrix: include: - rust: 1.82.0 target: thumbv6m-none-eabi - rust: stable target: thumbv6m-none-eabi steps: - uses: actions/checkout@v4 - name: Lock MSRV-compatible dependencies if: matrix.rust == '1.82.0' env: CARGO_RESOLVER_INCOMPATIBLE_RUST_VERSIONS: fallback # Note that this uses the runner's pre-installed stable cargo run: cargo generate-lockfile - uses: dtolnay/rust-toolchain@master with: toolchain: ${{ matrix.rust }} target: ${{ matrix.target }} - name: Tests run: | cargo build -vv --target=${{ matrix.target }} --no-default-features cargo build -v -p test-nostd --target=${{ matrix.target }} clippy: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@beta with: components: clippy - run: cargo clippy --all-features miri: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@nightly with: components: miri, rust-src - uses: taiki-e/install-action@v2 with: tool: cargo-nextest if: github.event_name == 'merge_group' - run: cargo miri nextest run if: github.event_name == 'merge_group' - run: cargo miri test --doc minimal-versions: name: Check MSRV and minimal-versions runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@nightly - uses: dtolnay/rust-toolchain@1.82.0 # MSRV - uses: taiki-e/install-action@v2 with: tool: cargo-hack - name: Lock minimal direct dependencies run: cargo +nightly hack generate-lockfile --remove-dev-deps -Z direct-minimal-versions env: CARGO_RESOLVER_INCOMPATIBLE_RUST_VERSIONS: fallback - name: Build (nightly) run: cargo +nightly build --verbose --all-features - name: Build (MSRV) run: cargo build --verbose --features arbitrary,quickcheck,serde,sval,rayon # One job that "summarizes" the success state of this pipeline. This can then be added to branch # protection, rather than having to add each job separately. success: name: Success runs-on: ubuntu-latest needs: [tests, nostd_build, clippy, miri, minimal-versions] # Github branch protection is exceedingly silly and treats "jobs skipped because a dependency # failed" as success. So we have to do some contortions to ensure the job fails if any of its # dependencies fails. if: always() # make sure this is never "skipped" steps: # Manually check the status of all dependencies. `if: failure()` does not work. - name: check if any dependency failed run: jq --exit-status 'all(.result == "success")' <<< '${{ toJson(needs) }}' indexmap-2.12.1/.gitignore000064400000000000000000000000221046102023000134760ustar 00000000000000target Cargo.lock indexmap-2.12.1/.rustfmt.toml000064400000000000000000000000211046102023000141640ustar 00000000000000edition = "2021" indexmap-2.12.1/Cargo.lock0000644000000147410000000000100107060ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "arbitrary" version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1" [[package]] name = "borsh" version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" dependencies = [ "cfg_aliases", ] [[package]] name = "cfg-if" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" [[package]] name = "cfg_aliases" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "crossbeam-deque" version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-utils" version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "either" version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] name = "equivalent" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "fastrand" version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "getrandom" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "libc", "wasi", ] [[package]] name = "hashbrown" version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" [[package]] name = "indexmap" version = "2.12.1" dependencies = [ "arbitrary", "borsh", "equivalent", "fastrand", "fnv", "hashbrown", "itertools", "quickcheck", "rayon", "serde", "serde_core", "sval", ] [[package]] name = "itertools" version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" dependencies = [ "either", ] [[package]] name = "libc" version = "0.2.177" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" [[package]] name = "proc-macro2" version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" dependencies = [ "unicode-ident", ] [[package]] name = "quickcheck" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ "rand", ] [[package]] name = "quote" version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" dependencies = [ "proc-macro2", ] [[package]] name = "rand" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "rand_core", ] [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] [[package]] name = "rayon" version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" dependencies = [ "either", "rayon-core", ] [[package]] name = "rayon-core" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" dependencies = [ "crossbeam-deque", "crossbeam-utils", ] [[package]] name = "serde" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ "serde_core", "serde_derive", ] [[package]] name = "serde_core" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "sval" version = "2.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d94c4464e595f0284970fd9c7e9013804d035d4a61ab74b113242c874c05814d" [[package]] name = "syn" version = "2.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "unicode-ident" version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" [[package]] name = "wasi" version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" indexmap-2.12.1/Cargo.toml0000644000000057010000000000100107250ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.82" name = "indexmap" version = "2.12.1" build = false autolib = false autobins = false autoexamples = false autotests = false autobenches = false description = "A hash table with consistent order and fast iteration." documentation = "https://docs.rs/indexmap/" readme = "README.md" keywords = [ "hashmap", "no_std", ] categories = [ "data-structures", "no-std", ] license = "Apache-2.0 OR MIT" repository = "https://github.com/indexmap-rs/indexmap" [package.metadata.release] allow-branch = ["main"] sign-tag = true tag-name = "{{version}}" [package.metadata.docs.rs] features = [ "arbitrary", "quickcheck", "serde", "borsh", "rayon", "sval", ] rustdoc-args = [ "--cfg", "docsrs", ] [features] default = ["std"] serde = [ "dep:serde_core", "dep:serde", ] std = [] test_debug = [] [lib] name = "indexmap" path = "src/lib.rs" bench = false [[test]] name = "equivalent_trait" path = "tests/equivalent_trait.rs" [[test]] name = "macros_full_path" path = "tests/macros_full_path.rs" [[test]] name = "quick" path = "tests/quick.rs" [[test]] name = "tests" path = "tests/tests.rs" [[bench]] name = "bench" path = "benches/bench.rs" [[bench]] name = "faststring" path = "benches/faststring.rs" [dependencies.arbitrary] version = "1.0" optional = true default-features = false [dependencies.borsh] version = "1.2" optional = true default-features = false [dependencies.equivalent] version = "1.0" default-features = false [dependencies.hashbrown] version = "0.16.1" default-features = false [dependencies.quickcheck] version = "1.0" optional = true default-features = false [dependencies.rayon] version = "1.9" optional = true [dependencies.serde_core] version = "1.0.220" optional = true default-features = false [dependencies.sval] version = "2" optional = true default-features = false [dev-dependencies.fastrand] version = "2" default-features = false [dev-dependencies.fnv] version = "1.0" [dev-dependencies.itertools] version = "0.14" [dev-dependencies.quickcheck] version = "1.0" default-features = false [dev-dependencies.serde] version = "1.0" features = ["derive"] default-features = false [target."cfg(any())".dependencies.serde] version = "1.0.220" optional = true default-features = false [lints.clippy] style = "allow" [lints.rust] private-bounds = "deny" private-interfaces = "deny" rust-2018-idioms = "warn" unnameable-types = "deny" unreachable-pub = "deny" unsafe-code = "deny" [profile.bench] debug = 2 indexmap-2.12.1/Cargo.toml.orig000064400000000000000000000043011046102023000144010ustar 00000000000000[package] name = "indexmap" edition = "2021" version = "2.12.1" documentation = "https://docs.rs/indexmap/" repository = "https://github.com/indexmap-rs/indexmap" license = "Apache-2.0 OR MIT" description = "A hash table with consistent order and fast iteration." keywords = ["hashmap", "no_std"] categories = ["data-structures", "no-std"] rust-version = "1.82" [lib] bench = false [dependencies] equivalent = { version = "1.0", default-features = false } hashbrown = { version = "0.16.1", default-features = false } arbitrary = { version = "1.0", optional = true, default-features = false } quickcheck = { version = "1.0", optional = true, default-features = false } serde_core = { version = "1.0.220", optional = true, default-features = false } rayon = { version = "1.9", optional = true } sval = { version = "2", optional = true, default-features = false } # deprecated: use borsh's "indexmap" feature instead. borsh = { version = "1.2", optional = true, default-features = false } # serde v1.0.220 is the first version that released with `serde_core`. # This is required to avoid conflict with other `serde` users which may require an older version. [target.'cfg(any())'.dependencies] serde = { version = "1.0.220", default-features = false, optional = true } [dev-dependencies] itertools = "0.14" fastrand = { version = "2", default-features = false } quickcheck = { version = "1.0", default-features = false } fnv = "1.0" serde = { version = "1.0", default-features = false, features = ["derive"] } [features] default = ["std"] std = [] serde = ["dep:serde_core", "dep:serde"] # for testing only, of course test_debug = [] [profile.bench] debug = true [package.metadata.release] allow-branch = ["main"] sign-tag = true tag-name = "{{version}}" [package.metadata.docs.rs] features = ["arbitrary", "quickcheck", "serde", "borsh", "rayon", "sval"] rustdoc-args = ["--cfg", "docsrs"] [workspace] members = ["test-nostd", "test-serde", "test-sval"] [lints.rust] private-bounds = "deny" private-interfaces = "deny" unnameable-types = "deny" unreachable-pub = "deny" # We *mostly* avoid unsafe code, but there are a few fine-grained cases allowed unsafe-code = "deny" rust-2018-idioms = "warn" [lints.clippy] style = "allow" indexmap-2.12.1/LICENSE-APACHE000064400000000000000000000251371046102023000134500ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. indexmap-2.12.1/LICENSE-MIT000064400000000000000000000020311046102023000131440ustar 00000000000000Copyright (c) 2016--2017 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. indexmap-2.12.1/README.md000064400000000000000000000054021046102023000127740ustar 00000000000000# indexmap [![build status](https://github.com/indexmap-rs/indexmap/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/indexmap-rs/indexmap/actions) [![crates.io](https://img.shields.io/crates/v/indexmap.svg)](https://crates.io/crates/indexmap) [![docs](https://docs.rs/indexmap/badge.svg)](https://docs.rs/indexmap) [![rustc](https://img.shields.io/badge/rust-1.82%2B-orange.svg)](https://img.shields.io/badge/rust-1.82%2B-orange.svg) A pure-Rust hash table which preserves (in a limited sense) insertion order. This crate implements compact map and set data-structures, where the iteration order of the keys is independent from their hash or value. It preserves insertion order (except after removals), and it allows lookup of entries by either hash table key or numerical index. Note: this crate was originally released under the name `ordermap`, but it was renamed to `indexmap` to better reflect its features. The [`ordermap`](https://crates.io/crates/ordermap) crate now exists as a wrapper over `indexmap` with stronger ordering properties. # Background This was inspired by Python 3.6's new dict implementation (which remembers the insertion order and is fast to iterate, and is compact in memory). Some of those features were translated to Rust, and some were not. The result was indexmap, a hash table that has following properties: - Order is **independent of hash function** and hash values of keys. - Fast to iterate. - Indexed in compact space. - Preserves insertion order **as long** as you don't call `.remove()`, `.swap_remove()`, or other methods that explicitly change order. The alternate `.shift_remove()` does preserve relative order. - Uses hashbrown for the inner table, just like Rust's libstd `HashMap` does. ## Performance `IndexMap` derives a couple of performance facts directly from how it is constructed, which is roughly: > A raw hash table of key-value indices, and a vector of key-value pairs. - Iteration is very fast since it is on the dense key-values. - Removal is fast since it moves memory areas only in the table, and uses a single swap in the vector. - Lookup is fast-ish because the initial 7-bit hash lookup uses SIMD, and indices are densely stored. Lookup also is slow-ish since the actual key-value pairs are stored separately. (Visible when cpu caches size is limiting.) - In practice, `IndexMap` has been tested out as the hashmap in rustc in [PR45282] and the performance was roughly on par across the whole workload. - If you want the properties of `IndexMap`, or its strongest performance points fits your workload, it might be the best hash table implementation. [PR45282]: https://github.com/rust-lang/rust/pull/45282 # Recent Changes See [RELEASES.md](https://github.com/indexmap-rs/indexmap/blob/main/RELEASES.md). indexmap-2.12.1/RELEASES.md000064400000000000000000000523151046102023000132470ustar 00000000000000# Releases ## 2.12.1 (2025-11-20) - Simplified a lot of internals using `hashbrown`'s new bucket API. ## 2.12.0 (2025-10-17) - **MSRV**: Rust 1.82.0 or later is now required. - Updated the `hashbrown` dependency to 0.16 alone. - Error types now implement `core::error::Error`. - Added `pop_if` methods to `IndexMap` and `IndexSet`, similar to the method for `Vec` added in Rust 1.86. ## 2.11.4 (2025-09-18) - Updated the `hashbrown` dependency to a range allowing 0.15 or 0.16. ## 2.11.3 (2025-09-15) - Make the minimum `serde` version only apply when "serde" is enabled. ## 2.11.2 (2025-09-15) - Switched the "serde" feature to depend on `serde_core`, improving build parallelism in cases where other dependents have enabled "serde/derive". ## 2.11.1 (2025-09-08) - Added a `get_key_value_mut` method to `IndexMap`. - Removed the unnecessary `Ord` bound on `insert_sorted_by` methods. ## 2.11.0 (2025-08-22) - Added `insert_sorted_by` and `insert_sorted_by_key` methods to `IndexMap`, `IndexSet`, and `VacantEntry`, like customizable versions of `insert_sorted`. - Added `is_sorted`, `is_sorted_by`, and `is_sorted_by_key` methods to `IndexMap` and `IndexSet`, as well as their `Slice` counterparts. - Added `sort_by_key` and `sort_unstable_by_key` methods to `IndexMap` and `IndexSet`, as well as parallel counterparts. - Added `replace_index` methods to `IndexMap`, `IndexSet`, and `VacantEntry` to replace the key (or set value) at a given index. - Added optional `sval` serialization support. ## 2.10.0 (2025-06-26) - Added `extract_if` methods to `IndexMap` and `IndexSet`, similar to the methods for `HashMap` and `HashSet` with ranges like `Vec::extract_if`. - Added more `#[track_caller]` annotations to functions that may panic. ## 2.9.0 (2025-04-04) - Added a `get_disjoint_mut` method to `IndexMap`, matching Rust 1.86's `HashMap` method. - Added a `get_disjoint_indices_mut` method to `IndexMap` and `map::Slice`, matching Rust 1.86's `get_disjoint_mut` method on slices. - Deprecated the `borsh` feature in favor of their own `indexmap` feature, solving a cyclic dependency that occurred via `borsh-derive`. ## 2.8.0 (2025-03-10) - Added `indexmap_with_default!` and `indexset_with_default!` to be used with alternative hashers, especially when using the crate without `std`. - Implemented `PartialEq` between each `Slice` and `[]`/arrays. - Removed the internal `rustc-rayon` feature and dependency. ## 2.7.1 (2025-01-19) - Added `#[track_caller]` to functions that may panic. - Improved memory reservation for `insert_entry`. ## 2.7.0 (2024-11-30) - Added methods `Entry::insert_entry` and `VacantEntry::insert_entry`, returning an `OccupiedEntry` after insertion. ## 2.6.0 (2024-10-01) - Implemented `Clone` for `map::IntoIter` and `set::IntoIter`. - Updated the `hashbrown` dependency to version 0.15. ## 2.5.0 (2024-08-30) - Added an `insert_before` method to `IndexMap` and `IndexSet`, as an alternative to `shift_insert` with different behavior on existing entries. - Added `first_entry` and `last_entry` methods to `IndexMap`. - Added `From` implementations between `IndexedEntry` and `OccupiedEntry`. ## 2.4.0 (2024-08-13) - Added methods `IndexMap::append` and `IndexSet::append`, moving all items from one map or set into another, and leaving the original capacity for reuse. ## 2.3.0 (2024-07-31) - Added trait `MutableEntryKey` for opt-in mutable access to map entry keys. - Added method `MutableKeys::iter_mut2` for opt-in mutable iteration of map keys and values. ## 2.2.6 (2024-03-22) - Added trait `MutableValues` for opt-in mutable access to set values. ## 2.2.5 (2024-02-29) - Added optional `borsh` serialization support. ## 2.2.4 (2024-02-28) - Added an `insert_sorted` method on `IndexMap`, `IndexSet`, and `VacantEntry`. - Avoid hashing for lookups in single-entry maps. - Limit preallocated memory in `serde` deserializers. ## 2.2.3 (2024-02-11) - Added `move_index` and `swap_indices` methods to `IndexedEntry`, `OccupiedEntry`, and `RawOccupiedEntryMut`, functioning like the existing methods on `IndexMap`. - Added `shift_insert` methods on `VacantEntry` and `RawVacantEntryMut`, as well as `shift_insert_hashed_nocheck` on the latter, to insert the new entry at a particular index. - Added `shift_insert` methods on `IndexMap` and `IndexSet` to insert a new entry at a particular index, or else move an existing entry there. ## 2.2.2 (2024-01-31) - Added indexing methods to raw entries: `RawEntryBuilder::from_hash_full`, `RawEntryBuilder::index_from_hash`, and `RawEntryMut::index`. ## 2.2.1 (2024-01-28) - Corrected the signature of `RawOccupiedEntryMut::into_key(self) -> &'a mut K`, This a breaking change from 2.2.0, but that version was published for less than a day and has now been yanked. ## 2.2.0 (2024-01-28) - The new `IndexMap::get_index_entry` method finds an entry by its index for in-place manipulation. - The `Keys` iterator now implements `Index` for quick access to the entry's key, compared to indexing the map to get the value. - The new `IndexMap::splice` and `IndexSet::splice` methods will drain the given range as an iterator, and then replace that range with entries from an input iterator. - The new trait `RawEntryApiV1` offers opt-in access to a raw entry API for `IndexMap`, corresponding to the unstable API on `HashSet` as of Rust 1.75. - Many `IndexMap` and `IndexSet` methods have relaxed their type constraints, e.g. removing `K: Hash` on methods that don't actually need to hash. - Removal methods `remove`, `remove_entry`, and `take` are now deprecated in favor of their `shift_` or `swap_` prefixed variants, which are more explicit about their effect on the index and order of remaining items. The deprecated methods will remain to guide drop-in replacements from `HashMap` and `HashSet` toward the prefixed methods. ## 2.1.0 (2023-10-31) - Empty slices can now be created with `map::Slice::{new, new_mut}` and `set::Slice::new`. In addition, `Slice::new`, `len`, and `is_empty` are now `const` functions on both types. - `IndexMap`, `IndexSet`, and their respective `Slice`s all have binary search methods for sorted data: map `binary_search_keys` and set `binary_search` for plain comparison, `binary_search_by` for custom comparators, `binary_search_by_key` for key extraction, and `partition_point` for boolean conditions. ## 2.0.2 (2023-09-29) - The `hashbrown` dependency has been updated to version 0.14.1 to complete the support for Rust 1.63. ## 2.0.1 (2023-09-27) - **MSRV**: Rust 1.63.0 is now supported as well, pending publication of `hashbrown`'s relaxed MSRV (or use cargo `--ignore-rust-version`). ## 2.0.0 (2023-06-23) - **MSRV**: Rust 1.64.0 or later is now required. - The `"std"` feature is no longer auto-detected. It is included in the default feature set, or else can be enabled like any other Cargo feature. - The `"serde-1"` feature has been removed, leaving just the optional `"serde"` dependency to be enabled like a feature itself. - `IndexMap::get_index_mut` now returns `Option<(&K, &mut V)>`, changing the key part from `&mut K` to `&K`. There is also a new alternative `MutableKeys::get_index_mut2` to access the former behavior. - The new `map::Slice` and `set::Slice` offer a linear view of maps and sets, behaving a lot like normal `[(K, V)]` and `[T]` slices. Notably, comparison traits like `Eq` only consider items in order, rather than hash lookups, and slices even implement `Hash`. - `IndexMap` and `IndexSet` now have `sort_by_cached_key` and `par_sort_by_cached_key` methods which perform stable sorts in place using a key extraction function. - `IndexMap` and `IndexSet` now have `reserve_exact`, `try_reserve`, and `try_reserve_exact` methods that correspond to the same methods on `Vec`. However, exactness only applies to the direct capacity for items, while the raw hash table still follows its own rules for capacity and load factor. - The `Equivalent` trait is now re-exported from the `equivalent` crate, intended as a common base to allow types to work with multiple map types. - The `hashbrown` dependency has been updated to version 0.14. - The `serde_seq` module has been moved from the crate root to below the `map` module. ## 1.9.3 (2023-03-24) - Bump the `rustc-rayon` dependency, for compiler use only. ## 1.9.2 (2022-11-17) - `IndexMap` and `IndexSet` both implement `arbitrary::Arbitrary<'_>` and `quickcheck::Arbitrary` if those optional dependency features are enabled. ## 1.9.1 (2022-06-21) - The MSRV now allows Rust 1.56.0 as well. However, currently `hashbrown` 0.12.1 requires 1.56.1, so users on 1.56.0 should downgrade that to 0.12.0 until there is a later published version relaxing its requirement. ## 1.9.0 (2022-06-16) - **MSRV**: Rust 1.56.1 or later is now required. - The `hashbrown` dependency has been updated to version 0.12. - `IterMut` and `ValuesMut` now implement `Debug`. - The new `IndexMap::shrink_to` and `IndexSet::shrink_to` methods shrink the capacity with a lower bound. - The new `IndexMap::move_index` and `IndexSet::move_index` methods change the position of an item from one index to another, shifting the items between to accommodate the move. ## 1.8.2 (2022-05-27) - Bump the `rustc-rayon` dependency, for compiler use only. ## 1.8.1 (2022-03-29) - The new `IndexSet::replace_full` will return the index of the item along with the replaced value, if any, by @zakcutner in PR [222]. [222]: https://github.com/indexmap-rs/indexmap/pull/222 ## 1.8.0 (2022-01-07) - The new `IndexMap::into_keys` and `IndexMap::into_values` will consume the map into keys or values, respectively, matching Rust 1.54's `HashMap` methods, by @taiki-e in PR [195]. - More of the iterator types implement `Debug`, `ExactSizeIterator`, and `FusedIterator`, by @cuviper in PR [196]. - `IndexMap` and `IndexSet` now implement rayon's `ParallelDrainRange`, by @cuviper in PR [197]. - `IndexMap::with_hasher` and `IndexSet::with_hasher` are now `const` functions, allowing static maps and sets, by @mwillsey in PR [203]. - `IndexMap` and `IndexSet` now implement `From` for arrays, matching Rust 1.56's implementation for `HashMap`, by @rouge8 in PR [205]. - `IndexMap` and `IndexSet` now have methods `sort_unstable_keys`, `sort_unstable_by`, `sorted_unstable_by`, and `par_*` equivalents, which sort in-place without preserving the order of equal items, by @bhgomes in PR [211]. [195]: https://github.com/indexmap-rs/indexmap/pull/195 [196]: https://github.com/indexmap-rs/indexmap/pull/196 [197]: https://github.com/indexmap-rs/indexmap/pull/197 [203]: https://github.com/indexmap-rs/indexmap/pull/203 [205]: https://github.com/indexmap-rs/indexmap/pull/205 [211]: https://github.com/indexmap-rs/indexmap/pull/211 ## 1.7.0 (2021-06-29) - **MSRV**: Rust 1.49 or later is now required. - The `hashbrown` dependency has been updated to version 0.11. ## 1.6.2 (2021-03-05) - Fixed to match `std` behavior, `OccupiedEntry::key` now references the existing key in the map instead of the lookup key, by @cuviper in PR [170]. - The new `Entry::or_insert_with_key` matches Rust 1.50's `Entry` method, passing `&K` to the callback to create a value, by @cuviper in PR [175]. [170]: https://github.com/indexmap-rs/indexmap/pull/170 [175]: https://github.com/indexmap-rs/indexmap/pull/175 ## 1.6.1 (2020-12-14) - The new `serde_seq` module implements `IndexMap` serialization as a sequence to ensure order is preserved, by @cuviper in PR [158]. - New methods on maps and sets work like the `Vec`/slice methods by the same name: `truncate`, `split_off`, `first`, `first_mut`, `last`, `last_mut`, and `swap_indices`, by @cuviper in PR [160]. [158]: https://github.com/indexmap-rs/indexmap/pull/158 [160]: https://github.com/indexmap-rs/indexmap/pull/160 ## 1.6.0 (2020-09-05) - **MSRV**: Rust 1.36 or later is now required. - The `hashbrown` dependency has been updated to version 0.9. ## 1.5.2 (2020-09-01) - The new "std" feature will force the use of `std` for users that explicitly want the default `S = RandomState`, bypassing the autodetection added in 1.3.0, by @cuviper in PR [145]. [145]: https://github.com/indexmap-rs/indexmap/pull/145 ## 1.5.1 (2020-08-07) - Values can now be indexed by their `usize` position by @cuviper in PR [132]. - Some of the generic bounds have been relaxed to match `std` by @cuviper in PR [141]. - `drain` now accepts any `R: RangeBounds` by @cuviper in PR [142]. [132]: https://github.com/indexmap-rs/indexmap/pull/132 [141]: https://github.com/indexmap-rs/indexmap/pull/141 [142]: https://github.com/indexmap-rs/indexmap/pull/142 ## 1.5.0 (2020-07-17) - **MSRV**: Rust 1.32 or later is now required. - The inner hash table is now based on `hashbrown` by @cuviper in PR [131]. This also completes the method `reserve` and adds `shrink_to_fit`. - Add new methods `get_key_value`, `remove_entry`, `swap_remove_entry`, and `shift_remove_entry`, by @cuviper in PR [136] - `Clone::clone_from` reuses allocations by @cuviper in PR [125] - Add new method `reverse` by @linclelinkpart5 in PR [128] [125]: https://github.com/indexmap-rs/indexmap/pull/125 [128]: https://github.com/indexmap-rs/indexmap/pull/128 [131]: https://github.com/indexmap-rs/indexmap/pull/131 [136]: https://github.com/indexmap-rs/indexmap/pull/136 ## 1.4.0 (2020-06-01) - Add new method `get_index_of` by @Thermatrix in PR [115] and [120] - Fix build script rebuild-if-changed configuration to use "build.rs"; fixes issue [123]. Fix by @cuviper. - Dev-dependencies (rand and quickcheck) have been updated. The crate's tests now run using Rust 1.32 or later (MSRV for building the crate has not changed). by @kjeremy and @bluss [123]: https://github.com/indexmap-rs/indexmap/issues/123 [115]: https://github.com/indexmap-rs/indexmap/pull/115 [120]: https://github.com/indexmap-rs/indexmap/pull/120 ## 1.3.2 (2020-02-05) - Maintenance update to regenerate the published `Cargo.toml`. ## 1.3.1 (2020-01-15) - Maintenance update for formatting and `autocfg` 1.0. ## 1.3.0 (2019-10-18) - The deprecation messages in the previous version have been removed. (The methods have not otherwise changed.) Docs for removal methods have been improved. - From Rust 1.36, this crate supports being built **without std**, requiring `alloc` instead. This is enabled automatically when it is detected that `std` is not available. There is no crate feature to enable/disable to trigger this. The new build-dep `autocfg` enables this. ## 1.2.0 (2019-09-08) - Plain `.remove()` now has a deprecation message, it informs the user about picking one of the removal functions `swap_remove` and `shift_remove` which have different performance and order semantics. Plain `.remove()` will not be removed, the warning message and method will remain until further. - Add new method `shift_remove` for order preserving removal on the map, and `shift_take` for the corresponding operation on the set. - Add methods `swap_remove`, `swap_remove_entry` to `Entry`. - Fix indexset/indexmap to support full paths, like `indexmap::indexmap!()` - Internal improvements: fix warnings, deprecations and style lints ## 1.1.0 (2019-08-20) - Added optional feature `"rayon"` that adds parallel iterator support to `IndexMap` and `IndexSet` using Rayon. This includes all the regular iterators in parallel versions, and parallel sort. - Implemented `Clone` for `map::{Iter, Keys, Values}` and `set::{Difference, Intersection, Iter, SymmetricDifference, Union}` - Implemented `Debug` for `map::{Entry, IntoIter, Iter, Keys, Values}` and `set::{Difference, Intersection, IntoIter, Iter, SymmetricDifference, Union}` - Serde trait `IntoDeserializer` are implemented for `IndexMap` and `IndexSet`. - Minimum Rust version requirement increased to Rust 1.30 for development builds. ## 1.0.2 (2018-10-22) - The new methods `IndexMap::insert_full` and `IndexSet::insert_full` are both like `insert` with the index included in the return value. - The new method `Entry::and_modify` can be used to modify occupied entries, matching the new methods of `std` maps in Rust 1.26. - The new method `Entry::or_default` inserts a default value in unoccupied entries, matching the new methods of `std` maps in Rust 1.28. ## 1.0.1 (2018-03-24) - Document Rust version policy for the crate (see rustdoc) ## 1.0.0 (2018-03-11) - This is the 1.0 release for `indexmap`! (the crate and datastructure formerly known as “ordermap”) - `OccupiedEntry::insert` changed its signature, to use `&mut self` for the method receiver, matching the equivalent method for a standard `HashMap`. Thanks to @dtolnay for finding this bug. - The deprecated old names from ordermap were removed: `OrderMap`, `OrderSet`, `ordermap!{}`, `orderset!{}`. Use the new `IndexMap` etc names instead. ## 0.4.1 (2018-02-14) - Renamed crate to `indexmap`; the `ordermap` crate is now deprecated and the types `OrderMap/Set` now have a deprecation notice. ## 0.4.0 (2018-02-02) - This is the last release series for this `ordermap` under that name, because the crate is **going to be renamed** to `indexmap` (with types `IndexMap`, `IndexSet`) and no change in functionality! - The map and its associated structs moved into the `map` submodule of the crate, so that the map and set are symmetric + The iterators, `Entry` and other structs are now under `ordermap::map::` - Internally refactored `OrderMap` so that all the main algorithms (insertion, lookup, removal etc) that don't use the `S` parameter (the hasher) are compiled without depending on `S`, which reduces generics bloat. - `Entry` no longer has a type parameter `S`, which is just like the standard `HashMap`'s entry. - Minimum Rust version requirement increased to Rust 1.18 ## 0.3.5 (2018-01-14) - Documentation improvements ## 0.3.4 (2018-01-04) - The `.retain()` methods for `OrderMap` and `OrderSet` now traverse the elements in order, and the retained elements **keep their order** - Added new methods `.sort_by()`, `.sort_keys()` to `OrderMap` and `.sort_by()`, `.sort()` to `OrderSet`. These methods allow you to sort the maps in place efficiently. ## 0.3.3 (2017-12-28) - Document insertion behaviour better by @lucab - Updated dependences (no feature changes) by @ignatenkobrain ## 0.3.2 (2017-11-25) - Add `OrderSet` by @cuviper! - `OrderMap::drain` is now (too) a double ended iterator. ## 0.3.1 (2017-11-19) - In all ordermap iterators, forward the `collect` method to the underlying iterator as well. - Add crates.io categories. ## 0.3.0 (2017-10-07) - The methods `get_pair`, `get_pair_index` were both replaced by `get_full` (and the same for the mutable case). - Method `swap_remove_pair` replaced by `swap_remove_full`. - Add trait `MutableKeys` for opt-in mutable key access. Mutable key access is only possible through the methods of this extension trait. - Add new trait `Equivalent` for key equivalence. This extends the `Borrow` trait mechanism for `OrderMap::get` in a backwards compatible way, just some minor type inference related issues may become apparent. See [#10] for more information. - Implement `Extend<(&K, &V)>` by @xfix. [#10]: https://github.com/indexmap-rs/indexmap/pull/10 ## 0.2.13 (2017-09-30) - Fix deserialization to support custom hashers by @Techcable. - Add methods `.index()` on the entry types by @garro95. ## 0.2.12 (2017-09-11) - Add methods `.with_hasher()`, `.hasher()`. ## 0.2.11 (2017-08-29) - Support `ExactSizeIterator` for the iterators. By @Binero. - Use `Box<[Pos]>` internally, saving a word in the `OrderMap` struct. - Serde support, with crate feature `"serde-1"`. By @xfix. ## 0.2.10 (2017-04-29) - Add iterator `.drain(..)` by @stevej. ## 0.2.9 (2017-03-26) - Add method `.is_empty()` by @overvenus. - Implement `PartialEq, Eq` by @overvenus. - Add method `.sorted_by()`. ## 0.2.8 (2017-03-01) - Add iterators `.values()` and `.values_mut()`. - Fix compatibility with 32-bit platforms. ## 0.2.7 (2016-11-02) - Add `.retain()`. ## 0.2.6 (2016-11-02) - Add `OccupiedEntry::remove_entry` and other minor entry methods, so that it now has all the features of `HashMap`'s entries. ## 0.2.5 (2016-10-31) - Improved `.pop()` slightly. ## 0.2.4 (2016-10-22) - Improved performance of `.insert()` ([#3]) by @pczarn. [#3]: https://github.com/indexmap-rs/indexmap/pull/3 ## 0.2.3 (2016-10-11) - Generalize `Entry` for now, so that it works on hashmaps with non-default hasher. However, there's a lingering compat issue since libstd `HashMap` does not parameterize its entries by the hasher (`S` typarm). - Special case some iterator methods like `.nth()`. ## 0.2.2 (2016-10-02) - Disable the verbose `Debug` impl by default. ## 0.2.1 (2016-10-02) - Fix doc links and clarify docs. ## 0.2.0 (2016-10-01) - Add more `HashMap` methods & compat with its API. - Experimental support for `.entry()` (the simplest parts of the API). - Add `.reserve()` (placeholder impl). - Add `.remove()` as synonym for `.swap_remove()`. - Changed `.insert()` to swap value if the entry already exists, and return `Option`. - Experimental support as an *indexed* hash map! Added methods `.get_index()`, `.get_index_mut()`, `.swap_remove_index()`, `.get_pair_index()`, `.get_pair_index_mut()`. ## 0.1.2 (2016-09-19) - Implement the 32/32 split idea for `Pos` which improves cache utilization and lookup performance. ## 0.1.1 (2016-09-16) - Initial release. indexmap-2.12.1/benches/bench.rs000064400000000000000000000412251046102023000145540ustar 00000000000000#![feature(test)] extern crate test; use fnv::FnvHasher; use std::hash::BuildHasherDefault; use std::hash::Hash; use std::hint::black_box; use std::sync::LazyLock; type FnvBuilder = BuildHasherDefault; use test::Bencher; use indexmap::IndexMap; use std::collections::HashMap; /// Use a consistently seeded Rng for benchmark stability fn small_rng() -> fastrand::Rng { let seed = u64::from_le_bytes(*b"indexmap"); fastrand::Rng::with_seed(seed) } #[bench] fn new_hashmap(b: &mut Bencher) { b.iter(|| HashMap::::new()); } #[bench] fn new_indexmap(b: &mut Bencher) { b.iter(|| IndexMap::::new()); } #[bench] fn with_capacity_10e5_hashmap(b: &mut Bencher) { b.iter(|| HashMap::::with_capacity(10_000)); } #[bench] fn with_capacity_10e5_indexmap(b: &mut Bencher) { b.iter(|| IndexMap::::with_capacity(10_000)); } #[bench] fn insert_hashmap_10_000(b: &mut Bencher) { let c = 10_000; b.iter(|| { let mut map = HashMap::with_capacity(c); for x in 0..c { map.insert(x, ()); } map }); } #[bench] fn insert_indexmap_10_000(b: &mut Bencher) { let c = 10_000; b.iter(|| { let mut map = IndexMap::with_capacity(c); for x in 0..c { map.insert(x, ()); } map }); } #[bench] fn insert_hashmap_string_10_000(b: &mut Bencher) { let c = 10_000; b.iter(|| { let mut map = HashMap::with_capacity(c); for x in 0..c { map.insert(x.to_string(), ()); } map }); } #[bench] fn insert_indexmap_string_10_000(b: &mut Bencher) { let c = 10_000; b.iter(|| { let mut map = IndexMap::with_capacity(c); for x in 0..c { map.insert(x.to_string(), ()); } map }); } #[bench] fn insert_hashmap_str_10_000(b: &mut Bencher) { let c = 10_000; let ss = Vec::from_iter((0..c).map(|x| x.to_string())); b.iter(|| { let mut map = HashMap::with_capacity(c); for key in &ss { map.insert(&key[..], ()); } map }); } #[bench] fn insert_indexmap_str_10_000(b: &mut Bencher) { let c = 10_000; let ss = Vec::from_iter((0..c).map(|x| x.to_string())); b.iter(|| { let mut map = IndexMap::with_capacity(c); for key in &ss { map.insert(&key[..], ()); } map }); } #[bench] fn insert_hashmap_int_bigvalue_10_000(b: &mut Bencher) { let c = 10_000; let value = [0u64; 10]; b.iter(|| { let mut map = HashMap::with_capacity(c); for i in 0..c { map.insert(i, value); } map }); } #[bench] fn insert_indexmap_int_bigvalue_10_000(b: &mut Bencher) { let c = 10_000; let value = [0u64; 10]; b.iter(|| { let mut map = IndexMap::with_capacity(c); for i in 0..c { map.insert(i, value); } map }); } #[bench] fn insert_hashmap_100_000(b: &mut Bencher) { let c = 100_000; b.iter(|| { let mut map = HashMap::with_capacity(c); for x in 0..c { map.insert(x, ()); } map }); } #[bench] fn insert_indexmap_100_000(b: &mut Bencher) { let c = 100_000; b.iter(|| { let mut map = IndexMap::with_capacity(c); for x in 0..c { map.insert(x, ()); } map }); } #[bench] fn insert_hashmap_150(b: &mut Bencher) { let c = 150; b.iter(|| { let mut map = HashMap::with_capacity(c); for x in 0..c { map.insert(x, ()); } map }); } #[bench] fn insert_indexmap_150(b: &mut Bencher) { let c = 150; b.iter(|| { let mut map = IndexMap::with_capacity(c); for x in 0..c { map.insert(x, ()); } map }); } #[bench] fn entry_hashmap_150(b: &mut Bencher) { let c = 150; b.iter(|| { let mut map = HashMap::with_capacity(c); for x in 0..c { map.entry(x).or_insert(()); } map }); } #[bench] fn entry_indexmap_150(b: &mut Bencher) { let c = 150; b.iter(|| { let mut map = IndexMap::with_capacity(c); for x in 0..c { map.entry(x).or_insert(()); } map }); } #[bench] fn iter_sum_hashmap_10_000(b: &mut Bencher) { let c = 10_000; let mut map = HashMap::with_capacity(c); let len = c - c / 10; for x in 0..len { map.insert(x, ()); } assert_eq!(map.len(), len); b.iter(|| map.keys().sum::()); } #[bench] fn iter_sum_indexmap_10_000(b: &mut Bencher) { let c = 10_000; let mut map = IndexMap::with_capacity(c); let len = c - c / 10; for x in 0..len { map.insert(x, ()); } assert_eq!(map.len(), len); b.iter(|| map.keys().sum::()); } #[bench] fn iter_black_box_hashmap_10_000(b: &mut Bencher) { let c = 10_000; let mut map = HashMap::with_capacity(c); let len = c - c / 10; for x in 0..len { map.insert(x, ()); } assert_eq!(map.len(), len); b.iter(|| { for &key in map.keys() { black_box(key); } }); } #[bench] fn iter_black_box_indexmap_10_000(b: &mut Bencher) { let c = 10_000; let mut map = IndexMap::with_capacity(c); let len = c - c / 10; for x in 0..len { map.insert(x, ()); } assert_eq!(map.len(), len); b.iter(|| { for &key in map.keys() { black_box(key); } }); } fn shuffled_keys(iter: I) -> Vec where I: IntoIterator, { let mut v = Vec::from_iter(iter); let mut rng = small_rng(); rng.shuffle(&mut v); v } #[bench] fn lookup_hashmap_10_000_exist(b: &mut Bencher) { let c = 10_000; let mut map = HashMap::with_capacity(c); let keys = shuffled_keys(0..c); for &key in &keys { map.insert(key, 1); } b.iter(|| { let mut found = 0; for key in 5000..c { found += map.get(&key).is_some() as i32; } found }); } #[bench] fn lookup_hashmap_10_000_noexist(b: &mut Bencher) { let c = 10_000; let mut map = HashMap::with_capacity(c); let keys = shuffled_keys(0..c); for &key in &keys { map.insert(key, 1); } b.iter(|| { let mut found = 0; for key in c..15000 { found += map.get(&key).is_some() as i32; } found }); } #[bench] fn lookup_indexmap_10_000_exist(b: &mut Bencher) { let c = 10_000; let mut map = IndexMap::with_capacity(c); let keys = shuffled_keys(0..c); for &key in &keys { map.insert(key, 1); } b.iter(|| { let mut found = 0; for key in 5000..c { found += map.get(&key).is_some() as i32; } found }); } #[bench] fn lookup_indexmap_10_000_noexist(b: &mut Bencher) { let c = 10_000; let mut map = IndexMap::with_capacity(c); let keys = shuffled_keys(0..c); for &key in &keys { map.insert(key, 1); } b.iter(|| { let mut found = 0; for key in c..15000 { found += map.get(&key).is_some() as i32; } found }); } // number of items to look up const LOOKUP_MAP_SIZE: u32 = 100_000_u32; const LOOKUP_SAMPLE_SIZE: u32 = 5000; const SORT_MAP_SIZE: usize = 10_000; // use (lazy) statics so that comparison benchmarks use the exact same inputs static KEYS: LazyLock> = LazyLock::new(|| shuffled_keys(0..LOOKUP_MAP_SIZE)); static HMAP_100K: LazyLock> = LazyLock::new(|| { let c = LOOKUP_MAP_SIZE; let mut map = HashMap::with_capacity(c as usize); let keys = &*KEYS; for &key in keys { map.insert(key, key); } map }); static IMAP_100K: LazyLock> = LazyLock::new(|| { let c = LOOKUP_MAP_SIZE; let mut map = IndexMap::with_capacity(c as usize); let keys = &*KEYS; for &key in keys { map.insert(key, key); } map }); static IMAP_SORT_U32: LazyLock> = LazyLock::new(|| { let mut map = IndexMap::with_capacity(SORT_MAP_SIZE); for &key in &KEYS[..SORT_MAP_SIZE] { map.insert(key, key); } map }); static IMAP_SORT_S: LazyLock> = LazyLock::new(|| { let mut map = IndexMap::with_capacity(SORT_MAP_SIZE); for &key in &KEYS[..SORT_MAP_SIZE] { map.insert(format!("{:^16x}", &key), String::new()); } map }); #[bench] fn lookup_hashmap_100_000_multi(b: &mut Bencher) { let map = &*HMAP_100K; b.iter(|| { let mut found = 0; for key in 0..LOOKUP_SAMPLE_SIZE { found += map.get(&key).is_some() as u32; } found }); } #[bench] fn lookup_indexmap_100_000_multi(b: &mut Bencher) { let map = &*IMAP_100K; b.iter(|| { let mut found = 0; for key in 0..LOOKUP_SAMPLE_SIZE { found += map.get(&key).is_some() as u32; } found }); } // inorder: Test looking up keys in the same order as they were inserted #[bench] fn lookup_hashmap_100_000_inorder_multi(b: &mut Bencher) { let map = &*HMAP_100K; let keys = &*KEYS; b.iter(|| { let mut found = 0; for key in &keys[0..LOOKUP_SAMPLE_SIZE as usize] { found += map.get(key).is_some() as u32; } found }); } #[bench] fn lookup_indexmap_100_000_inorder_multi(b: &mut Bencher) { let map = &*IMAP_100K; let keys = &*KEYS; b.iter(|| { let mut found = 0; for key in &keys[0..LOOKUP_SAMPLE_SIZE as usize] { found += map.get(key).is_some() as u32; } found }); } #[bench] fn lookup_hashmap_100_000_single(b: &mut Bencher) { let map = &*HMAP_100K; let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle(); b.iter(|| { let key = iter.next().unwrap(); map.get(&key).is_some() }); } #[bench] fn lookup_indexmap_100_000_single(b: &mut Bencher) { let map = &*IMAP_100K; let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle(); b.iter(|| { let key = iter.next().unwrap(); map.get(&key).is_some() }); } const GROW_SIZE: usize = 100_000; type GrowKey = u32; // Test grow/resize without preallocation #[bench] fn grow_fnv_hashmap_100_000(b: &mut Bencher) { b.iter(|| { let mut map: HashMap<_, _, FnvBuilder> = HashMap::default(); for x in 0..GROW_SIZE { map.insert(x as GrowKey, x as GrowKey); } map }); } #[bench] fn grow_fnv_indexmap_100_000(b: &mut Bencher) { b.iter(|| { let mut map: IndexMap<_, _, FnvBuilder> = IndexMap::default(); for x in 0..GROW_SIZE { map.insert(x as GrowKey, x as GrowKey); } map }); } const MERGE: u64 = 10_000; #[bench] fn hashmap_merge_simple(b: &mut Bencher) { let first_map: HashMap = (0..MERGE).map(|i| (i, ())).collect(); let second_map: HashMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); b.iter(|| { let mut merged = first_map.clone(); merged.extend(second_map.iter().map(|(&k, &v)| (k, v))); merged }); } #[bench] fn hashmap_merge_shuffle(b: &mut Bencher) { let first_map: HashMap = (0..MERGE).map(|i| (i, ())).collect(); let second_map: HashMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); let mut v = Vec::new(); let mut rng = small_rng(); b.iter(|| { let mut merged = first_map.clone(); v.extend(second_map.iter().map(|(&k, &v)| (k, v))); rng.shuffle(&mut v); merged.extend(v.drain(..)); merged }); } #[bench] fn indexmap_merge_simple(b: &mut Bencher) { let first_map: IndexMap = (0..MERGE).map(|i| (i, ())).collect(); let second_map: IndexMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); b.iter(|| { let mut merged = first_map.clone(); merged.extend(second_map.iter().map(|(&k, &v)| (k, v))); merged }); } #[bench] fn indexmap_merge_shuffle(b: &mut Bencher) { let first_map: IndexMap = (0..MERGE).map(|i| (i, ())).collect(); let second_map: IndexMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); let mut v = Vec::new(); let mut rng = small_rng(); b.iter(|| { let mut merged = first_map.clone(); v.extend(second_map.iter().map(|(&k, &v)| (k, v))); rng.shuffle(&mut v); merged.extend(v.drain(..)); merged }); } #[bench] fn swap_remove_indexmap_100_000(b: &mut Bencher) { let map = IMAP_100K.clone(); let mut keys = Vec::from_iter(map.keys().copied()); let mut rng = small_rng(); rng.shuffle(&mut keys); b.iter(|| { let mut map = map.clone(); for key in &keys { map.swap_remove(key); } assert_eq!(map.len(), 0); map }); } #[bench] fn shift_remove_indexmap_100_000_few(b: &mut Bencher) { let map = IMAP_100K.clone(); let mut keys = Vec::from_iter(map.keys().copied()); let mut rng = small_rng(); rng.shuffle(&mut keys); keys.truncate(50); b.iter(|| { let mut map = map.clone(); for key in &keys { map.shift_remove(key); } assert_eq!(map.len(), IMAP_100K.len() - keys.len()); map }); } #[bench] fn shift_remove_indexmap_2_000_full(b: &mut Bencher) { let mut keys = KEYS[..2_000].to_vec(); let mut map = IndexMap::with_capacity(keys.len()); for &key in &keys { map.insert(key, key); } let mut rng = small_rng(); rng.shuffle(&mut keys); b.iter(|| { let mut map = map.clone(); for key in &keys { map.shift_remove(key); } assert_eq!(map.len(), 0); map }); } #[bench] fn pop_indexmap_100_000(b: &mut Bencher) { let map = IMAP_100K.clone(); b.iter(|| { let mut map = map.clone(); while !map.is_empty() { map.pop(); } assert_eq!(map.len(), 0); map }); } #[bench] fn few_retain_indexmap_100_000(b: &mut Bencher) { let map = IMAP_100K.clone(); b.iter(|| { let mut map = map.clone(); map.retain(|k, _| *k % 7 == 0); map }); } #[bench] fn few_retain_hashmap_100_000(b: &mut Bencher) { let map = HMAP_100K.clone(); b.iter(|| { let mut map = map.clone(); map.retain(|k, _| *k % 7 == 0); map }); } #[bench] fn half_retain_indexmap_100_000(b: &mut Bencher) { let map = IMAP_100K.clone(); b.iter(|| { let mut map = map.clone(); map.retain(|k, _| *k % 2 == 0); map }); } #[bench] fn half_retain_hashmap_100_000(b: &mut Bencher) { let map = HMAP_100K.clone(); b.iter(|| { let mut map = map.clone(); map.retain(|k, _| *k % 2 == 0); map }); } #[bench] fn many_retain_indexmap_100_000(b: &mut Bencher) { let map = IMAP_100K.clone(); b.iter(|| { let mut map = map.clone(); map.retain(|k, _| *k % 100 != 0); map }); } #[bench] fn many_retain_hashmap_100_000(b: &mut Bencher) { let map = HMAP_100K.clone(); b.iter(|| { let mut map = map.clone(); map.retain(|k, _| *k % 100 != 0); map }); } // simple sort impl for comparison pub fn simple_sort(m: &mut IndexMap) { let mut ordered: Vec<_> = m.drain(..).collect(); ordered.sort_by(|left, right| left.0.cmp(&right.0)); m.extend(ordered); } #[bench] fn indexmap_sort_s(b: &mut Bencher) { let map = IMAP_SORT_S.clone(); // there's a map clone there, but it's still useful to profile this b.iter(|| { let mut map = map.clone(); map.sort_keys(); map }); } #[bench] fn indexmap_simple_sort_s(b: &mut Bencher) { let map = IMAP_SORT_S.clone(); // there's a map clone there, but it's still useful to profile this b.iter(|| { let mut map = map.clone(); simple_sort(&mut map); map }); } #[bench] fn indexmap_sort_u32(b: &mut Bencher) { let map = IMAP_SORT_U32.clone(); // there's a map clone there, but it's still useful to profile this b.iter(|| { let mut map = map.clone(); map.sort_keys(); map }); } #[bench] fn indexmap_simple_sort_u32(b: &mut Bencher) { let map = IMAP_SORT_U32.clone(); // there's a map clone there, but it's still useful to profile this b.iter(|| { let mut map = map.clone(); simple_sort(&mut map); map }); } // measure the fixed overhead of cloning in sort benchmarks #[bench] fn indexmap_clone_for_sort_s(b: &mut Bencher) { let map = IMAP_SORT_S.clone(); b.iter(|| map.clone()); } #[bench] fn indexmap_clone_for_sort_u32(b: &mut Bencher) { let map = IMAP_SORT_U32.clone(); b.iter(|| map.clone()); } indexmap-2.12.1/benches/faststring.rs000064400000000000000000000100551046102023000156560ustar 00000000000000#![feature(test)] extern crate test; use test::Bencher; use indexmap::IndexMap; use std::collections::HashMap; use std::hash::{Hash, Hasher}; use std::borrow::Borrow; use std::ops::Deref; /// Use a consistently seeded Rng for benchmark stability fn small_rng() -> fastrand::Rng { let seed = u64::from_le_bytes(*b"indexmap"); fastrand::Rng::with_seed(seed) } #[derive(PartialEq, Eq, Copy, Clone)] #[repr(transparent)] pub struct OneShot(pub T); impl Hash for OneShot { fn hash(&self, h: &mut H) { h.write(self.0.as_bytes()) } } impl<'a, S> From<&'a S> for &'a OneShot where S: AsRef, { #[allow(unsafe_code)] fn from(s: &'a S) -> Self { let s: &str = s.as_ref(); // SAFETY: OneShot is a `repr(transparent)` wrapper unsafe { &*(s as *const str as *const OneShot) } } } impl Hash for OneShot { fn hash(&self, h: &mut H) { h.write(self.0.as_bytes()) } } impl Borrow> for OneShot { fn borrow(&self) -> &OneShot { <&OneShot>::from(&self.0) } } impl Deref for OneShot { type Target = T; fn deref(&self) -> &T { &self.0 } } fn shuffled_keys(iter: I) -> Vec where I: IntoIterator, { let mut v = Vec::from_iter(iter); let mut rng = small_rng(); rng.shuffle(&mut v); v } #[bench] fn insert_hashmap_string_10_000(b: &mut Bencher) { let c = 10_000; b.iter(|| { let mut map = HashMap::with_capacity(c); for x in 0..c { map.insert(x.to_string(), ()); } map }); } #[bench] fn insert_hashmap_string_oneshot_10_000(b: &mut Bencher) { let c = 10_000; b.iter(|| { let mut map = HashMap::with_capacity(c); for x in 0..c { map.insert(OneShot(x.to_string()), ()); } map }); } #[bench] fn insert_indexmap_string_10_000(b: &mut Bencher) { let c = 10_000; b.iter(|| { let mut map = IndexMap::with_capacity(c); for x in 0..c { map.insert(x.to_string(), ()); } map }); } #[bench] fn lookup_hashmap_10_000_exist_string(b: &mut Bencher) { let c = 10_000; let mut map = HashMap::with_capacity(c); let keys = shuffled_keys(0..c); for &key in &keys { map.insert(key.to_string(), 1); } let lookups = (5000..c).map(|x| x.to_string()).collect::>(); b.iter(|| { let mut found = 0; for key in &lookups { found += map.get(key).is_some() as i32; } found }); } #[bench] fn lookup_hashmap_10_000_exist_string_oneshot(b: &mut Bencher) { let c = 10_000; let mut map = HashMap::with_capacity(c); let keys = shuffled_keys(0..c); for &key in &keys { map.insert(OneShot(key.to_string()), 1); } let lookups = (5000..c) .map(|x| OneShot(x.to_string())) .collect::>(); b.iter(|| { let mut found = 0; for key in &lookups { found += map.get(key).is_some() as i32; } found }); } #[bench] fn lookup_indexmap_10_000_exist_string(b: &mut Bencher) { let c = 10_000; let mut map = IndexMap::with_capacity(c); let keys = shuffled_keys(0..c); for &key in &keys { map.insert(key.to_string(), 1); } let lookups = (5000..c).map(|x| x.to_string()).collect::>(); b.iter(|| { let mut found = 0; for key in &lookups { found += map.get(key).is_some() as i32; } found }); } #[bench] fn lookup_indexmap_10_000_exist_string_oneshot(b: &mut Bencher) { let c = 10_000; let mut map = IndexMap::with_capacity(c); let keys = shuffled_keys(0..c); for &key in &keys { map.insert(OneShot(key.to_string()), 1); } let lookups = (5000..c) .map(|x| OneShot(x.to_string())) .collect::>(); b.iter(|| { let mut found = 0; for key in &lookups { found += map.get(key).is_some() as i32; } found }); } indexmap-2.12.1/src/arbitrary.rs000064400000000000000000000042761046102023000146610ustar 00000000000000#[cfg(feature = "arbitrary")] #[cfg_attr(docsrs, doc(cfg(feature = "arbitrary")))] mod impl_arbitrary { use crate::{IndexMap, IndexSet}; use arbitrary::{Arbitrary, Result, Unstructured}; use core::hash::{BuildHasher, Hash}; impl<'a, K, V, S> Arbitrary<'a> for IndexMap where K: Arbitrary<'a> + Hash + Eq, V: Arbitrary<'a>, S: BuildHasher + Default, { fn arbitrary(u: &mut Unstructured<'a>) -> Result { u.arbitrary_iter()?.collect() } fn arbitrary_take_rest(u: Unstructured<'a>) -> Result { u.arbitrary_take_rest_iter()?.collect() } } impl<'a, T, S> Arbitrary<'a> for IndexSet where T: Arbitrary<'a> + Hash + Eq, S: BuildHasher + Default, { fn arbitrary(u: &mut Unstructured<'a>) -> Result { u.arbitrary_iter()?.collect() } fn arbitrary_take_rest(u: Unstructured<'a>) -> Result { u.arbitrary_take_rest_iter()?.collect() } } } #[cfg(feature = "quickcheck")] #[cfg_attr(docsrs, doc(cfg(feature = "quickcheck")))] mod impl_quickcheck { use crate::{IndexMap, IndexSet}; use alloc::boxed::Box; use alloc::vec::Vec; use core::hash::{BuildHasher, Hash}; use quickcheck::{Arbitrary, Gen}; impl Arbitrary for IndexMap where K: Arbitrary + Hash + Eq, V: Arbitrary, S: BuildHasher + Default + Clone + 'static, { fn arbitrary(g: &mut Gen) -> Self { Self::from_iter(Vec::arbitrary(g)) } fn shrink(&self) -> Box> { let vec = Vec::from_iter(self.clone()); Box::new(vec.shrink().map(Self::from_iter)) } } impl Arbitrary for IndexSet where T: Arbitrary + Hash + Eq, S: BuildHasher + Default + Clone + 'static, { fn arbitrary(g: &mut Gen) -> Self { Self::from_iter(Vec::arbitrary(g)) } fn shrink(&self) -> Box> { let vec = Vec::from_iter(self.clone()); Box::new(vec.shrink().map(Self::from_iter)) } } } indexmap-2.12.1/src/borsh.rs000064400000000000000000000074241046102023000137750ustar 00000000000000#![cfg_attr(docsrs, doc(cfg(feature = "borsh")))] use alloc::vec::Vec; use core::hash::BuildHasher; use core::hash::Hash; use borsh::error::ERROR_ZST_FORBIDDEN; use borsh::io::{Error, ErrorKind, Read, Result, Write}; use borsh::{BorshDeserialize, BorshSerialize}; use crate::map::IndexMap; use crate::set::IndexSet; // NOTE: the real `#[deprecated]` attribute doesn't work for trait implementations, // but we can get close by mimicking the message style for documentation. ///
👎Deprecated: use borsh's indexmap feature instead.
impl BorshSerialize for IndexMap where K: BorshSerialize, V: BorshSerialize, { #[inline] fn serialize(&self, writer: &mut W) -> Result<()> { check_zst::()?; let iterator = self.iter(); u32::try_from(iterator.len()) .map_err(|_| ErrorKind::InvalidData)? .serialize(writer)?; for (key, value) in iterator { key.serialize(writer)?; value.serialize(writer)?; } Ok(()) } } ///
👎Deprecated: use borsh's indexmap feature instead.
impl BorshDeserialize for IndexMap where K: BorshDeserialize + Eq + Hash, V: BorshDeserialize, S: BuildHasher + Default, { #[inline] fn deserialize_reader(reader: &mut R) -> Result { check_zst::()?; let vec = >::deserialize_reader(reader)?; Ok(vec.into_iter().collect::>()) } } ///
👎Deprecated: use borsh's indexmap feature instead.
impl BorshSerialize for IndexSet where T: BorshSerialize, { #[inline] fn serialize(&self, writer: &mut W) -> Result<()> { check_zst::()?; let iterator = self.iter(); u32::try_from(iterator.len()) .map_err(|_| ErrorKind::InvalidData)? .serialize(writer)?; for item in iterator { item.serialize(writer)?; } Ok(()) } } ///
👎Deprecated: use borsh's indexmap feature instead.
impl BorshDeserialize for IndexSet where T: BorshDeserialize + Eq + Hash, S: BuildHasher + Default, { #[inline] fn deserialize_reader(reader: &mut R) -> Result { check_zst::()?; let vec = >::deserialize_reader(reader)?; Ok(vec.into_iter().collect::>()) } } fn check_zst() -> Result<()> { if size_of::() == 0 { return Err(Error::new(ErrorKind::InvalidData, ERROR_ZST_FORBIDDEN)); } Ok(()) } #[cfg(test)] mod borsh_tests { use super::*; #[test] fn map_borsh_roundtrip() { let original_map: IndexMap = { let mut map = IndexMap::new(); map.insert(1, 2); map.insert(3, 4); map.insert(5, 6); map }; let serialized_map = borsh::to_vec(&original_map).unwrap(); let deserialized_map: IndexMap = BorshDeserialize::try_from_slice(&serialized_map).unwrap(); assert_eq!(original_map, deserialized_map); } #[test] fn set_borsh_roundtrip() { let original_map: IndexSet = [1, 2, 3, 4, 5, 6].into_iter().collect(); let serialized_map = borsh::to_vec(&original_map).unwrap(); let deserialized_map: IndexSet = BorshDeserialize::try_from_slice(&serialized_map).unwrap(); assert_eq!(original_map, deserialized_map); } } indexmap-2.12.1/src/lib.rs000064400000000000000000000222471046102023000134260ustar 00000000000000#![no_std] //! [`IndexMap`] is a hash table where the iteration order of the key-value //! pairs is independent of the hash values of the keys. //! //! [`IndexSet`] is a corresponding hash set using the same implementation and //! with similar properties. //! //! ### Highlights //! //! [`IndexMap`] and [`IndexSet`] are drop-in compatible with the std `HashMap` //! and `HashSet`, but they also have some features of note: //! //! - The ordering semantics (see their documentation for details) //! - Sorting methods and the [`.pop()`][IndexMap::pop] methods. //! - The [`Equivalent`] trait, which offers more flexible equality definitions //! between borrowed and owned versions of keys. //! - The [`MutableKeys`][map::MutableKeys] trait, which gives opt-in mutable //! access to map keys, and [`MutableValues`][set::MutableValues] for sets. //! //! ### Feature Flags //! //! To reduce the amount of compiled code in the crate by default, certain //! features are gated behind [feature flags]. These allow you to opt in to (or //! out of) functionality. Below is a list of the features available in this //! crate. //! //! * `std`: Enables features which require the Rust standard library. For more //! information see the section on [`no_std`]. //! * `rayon`: Enables parallel iteration and other parallel methods. //! * `serde`: Adds implementations for [`Serialize`] and [`Deserialize`] //! to [`IndexMap`] and [`IndexSet`]. Alternative implementations for //! (de)serializing [`IndexMap`] as an ordered sequence are available in the //! [`map::serde_seq`] module. //! * `arbitrary`: Adds implementations for the [`arbitrary::Arbitrary`] trait //! to [`IndexMap`] and [`IndexSet`]. //! * `quickcheck`: Adds implementations for the [`quickcheck::Arbitrary`] trait //! to [`IndexMap`] and [`IndexSet`]. //! * `borsh` (**deprecated**): Adds implementations for [`BorshSerialize`] and //! [`BorshDeserialize`] to [`IndexMap`] and [`IndexSet`]. Due to a cyclic //! dependency that arose between [`borsh`] and `indexmap`, `borsh v1.5.6` //! added an `indexmap` feature that should be used instead of enabling the //! feature here. //! //! _Note: only the `std` feature is enabled by default._ //! //! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section //! [`no_std`]: #no-standard-library-targets //! [`Serialize`]: `::serde_core::Serialize` //! [`Deserialize`]: `::serde_core::Deserialize` //! [`BorshSerialize`]: `::borsh::BorshSerialize` //! [`BorshDeserialize`]: `::borsh::BorshDeserialize` //! [`borsh`]: `::borsh` //! [`arbitrary::Arbitrary`]: `::arbitrary::Arbitrary` //! [`quickcheck::Arbitrary`]: `::quickcheck::Arbitrary` //! //! ### Alternate Hashers //! //! [`IndexMap`] and [`IndexSet`] have a default hasher type //! [`S = RandomState`][std::hash::RandomState], //! just like the standard `HashMap` and `HashSet`, which is resistant to //! HashDoS attacks but not the most performant. Type aliases can make it easier //! to use alternate hashers: //! //! ``` //! use fnv::FnvBuildHasher; //! use indexmap::{IndexMap, IndexSet}; //! //! type FnvIndexMap = IndexMap; //! type FnvIndexSet = IndexSet; //! //! let std: IndexSet = (0..100).collect(); //! let fnv: FnvIndexSet = (0..100).collect(); //! assert_eq!(std, fnv); //! ``` //! //! ### Rust Version //! //! This version of indexmap requires Rust 1.82 or later. //! //! The indexmap 2.x release series will use a carefully considered version //! upgrade policy, where in a later 2.x version, we will raise the minimum //! required Rust version. //! //! ## No Standard Library Targets //! //! This crate supports being built without `std`, requiring `alloc` instead. //! This is chosen by disabling the default "std" cargo feature, by adding //! `default-features = false` to your dependency specification. //! //! - Creating maps and sets using [`new`][IndexMap::new] and //! [`with_capacity`][IndexMap::with_capacity] is unavailable without `std`. //! Use methods [`IndexMap::default`], [`with_hasher`][IndexMap::with_hasher], //! [`with_capacity_and_hasher`][IndexMap::with_capacity_and_hasher] instead. //! A no-std compatible hasher will be needed as well, for example //! from the crate `twox-hash`. //! - Macros [`indexmap!`] and [`indexset!`] are unavailable without `std`. Use //! the macros [`indexmap_with_default!`] and [`indexset_with_default!`] instead. #![cfg_attr(docsrs, feature(doc_cfg))] extern crate alloc; #[cfg(feature = "std")] #[macro_use] extern crate std; mod arbitrary; #[macro_use] mod macros; #[cfg(feature = "borsh")] mod borsh; #[cfg(feature = "serde")] mod serde; #[cfg(feature = "sval")] mod sval; mod util; pub mod map; pub mod set; // Placed after `map` and `set` so new `rayon` methods on the types // are documented after the "normal" methods. #[cfg(feature = "rayon")] mod rayon; pub use crate::map::IndexMap; pub use crate::set::IndexSet; pub use equivalent::Equivalent; // shared private items /// Hash value newtype. Not larger than usize, since anything larger /// isn't used for selecting position anyway. #[derive(Clone, Copy, Debug, PartialEq)] struct HashValue(usize); impl HashValue { #[inline(always)] fn get(self) -> u64 { self.0 as u64 } } #[derive(Copy, Debug)] struct Bucket { hash: HashValue, key: K, value: V, } impl Clone for Bucket where K: Clone, V: Clone, { fn clone(&self) -> Self { Bucket { hash: self.hash, key: self.key.clone(), value: self.value.clone(), } } fn clone_from(&mut self, other: &Self) { self.hash = other.hash; self.key.clone_from(&other.key); self.value.clone_from(&other.value); } } impl Bucket { // field accessors -- used for `f` instead of closures in `.map(f)` fn key_ref(&self) -> &K { &self.key } fn value_ref(&self) -> &V { &self.value } fn value_mut(&mut self) -> &mut V { &mut self.value } fn key(self) -> K { self.key } fn value(self) -> V { self.value } fn key_value(self) -> (K, V) { (self.key, self.value) } fn refs(&self) -> (&K, &V) { (&self.key, &self.value) } fn ref_mut(&mut self) -> (&K, &mut V) { (&self.key, &mut self.value) } fn muts(&mut self) -> (&mut K, &mut V) { (&mut self.key, &mut self.value) } } /// The error type for [`try_reserve`][IndexMap::try_reserve] methods. #[derive(Clone, PartialEq, Eq, Debug)] pub struct TryReserveError { kind: TryReserveErrorKind, } #[derive(Clone, PartialEq, Eq, Debug)] enum TryReserveErrorKind { // The standard library's kind is currently opaque to us, otherwise we could unify this. Std(alloc::collections::TryReserveError), CapacityOverflow, AllocError { layout: alloc::alloc::Layout }, } // These are not `From` so we don't expose them in our public API. impl TryReserveError { fn from_alloc(error: alloc::collections::TryReserveError) -> Self { Self { kind: TryReserveErrorKind::Std(error), } } fn from_hashbrown(error: hashbrown::TryReserveError) -> Self { Self { kind: match error { hashbrown::TryReserveError::CapacityOverflow => { TryReserveErrorKind::CapacityOverflow } hashbrown::TryReserveError::AllocError { layout } => { TryReserveErrorKind::AllocError { layout } } }, } } } impl core::fmt::Display for TryReserveError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { let reason = match &self.kind { TryReserveErrorKind::Std(e) => return core::fmt::Display::fmt(e, f), TryReserveErrorKind::CapacityOverflow => { " because the computed capacity exceeded the collection's maximum" } TryReserveErrorKind::AllocError { .. } => { " because the memory allocator returned an error" } }; f.write_str("memory allocation failed")?; f.write_str(reason) } } impl core::error::Error for TryReserveError {} // NOTE: This is copied from the slice module in the std lib. /// The error type returned by [`get_disjoint_indices_mut`][`IndexMap::get_disjoint_indices_mut`]. /// /// It indicates one of two possible errors: /// - An index is out-of-bounds. /// - The same index appeared multiple times in the array. // (or different but overlapping indices when ranges are provided) #[derive(Debug, Clone, PartialEq, Eq)] pub enum GetDisjointMutError { /// An index provided was out-of-bounds for the slice. IndexOutOfBounds, /// Two indices provided were overlapping. OverlappingIndices, } impl core::fmt::Display for GetDisjointMutError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { let msg = match self { GetDisjointMutError::IndexOutOfBounds => "an index is out of bounds", GetDisjointMutError::OverlappingIndices => "there were overlapping indices", }; core::fmt::Display::fmt(msg, f) } } impl core::error::Error for GetDisjointMutError {} indexmap-2.12.1/src/macros.rs000064400000000000000000000167131046102023000141450ustar 00000000000000/// Create an [`IndexMap`][crate::IndexMap] from a list of key-value pairs /// and a [`BuildHasherDefault`][core::hash::BuildHasherDefault]-wrapped custom hasher. /// /// ## Example /// /// ``` /// use indexmap::indexmap_with_default; /// use fnv::FnvHasher; /// /// let map = indexmap_with_default!{ /// FnvHasher; /// "a" => 1, /// "b" => 2, /// }; /// assert_eq!(map["a"], 1); /// assert_eq!(map["b"], 2); /// assert_eq!(map.get("c"), None); /// /// // "a" is the first key /// assert_eq!(map.keys().next(), Some(&"a")); /// ``` #[macro_export] macro_rules! indexmap_with_default { ($H:ty; $($key:expr => $value:expr,)+) => { $crate::indexmap_with_default!($H; $($key => $value),+) }; ($H:ty; $($key:expr => $value:expr),*) => {{ let builder = ::core::hash::BuildHasherDefault::<$H>::default(); const CAP: usize = <[()]>::len(&[$({ stringify!($key); }),*]); #[allow(unused_mut)] // Specify your custom `H` (must implement Default + Hasher) as the hasher: let mut map = $crate::IndexMap::with_capacity_and_hasher(CAP, builder); $( map.insert($key, $value); )* map }}; } #[cfg(feature = "std")] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] #[macro_export] /// Create an [`IndexMap`][crate::IndexMap] from a list of key-value pairs /// /// ## Example /// /// ``` /// use indexmap::indexmap; /// /// let map = indexmap!{ /// "a" => 1, /// "b" => 2, /// }; /// assert_eq!(map["a"], 1); /// assert_eq!(map["b"], 2); /// assert_eq!(map.get("c"), None); /// /// // "a" is the first key /// assert_eq!(map.keys().next(), Some(&"a")); /// ``` macro_rules! indexmap { ($($key:expr => $value:expr,)+) => { $crate::indexmap!($($key => $value),+) }; ($($key:expr => $value:expr),*) => { { // Note: `stringify!($key)` is just here to consume the repetition, // but we throw away that string literal during constant evaluation. const CAP: usize = <[()]>::len(&[$({ stringify!($key); }),*]); let mut map = $crate::IndexMap::with_capacity(CAP); $( map.insert($key, $value); )* map } }; } /// Create an [`IndexSet`][crate::IndexSet] from a list of values /// and a [`BuildHasherDefault`][core::hash::BuildHasherDefault]-wrapped custom hasher. /// /// ## Example /// /// ``` /// use indexmap::indexset_with_default; /// use fnv::FnvHasher; /// /// let set = indexset_with_default!{ /// FnvHasher; /// "a", /// "b", /// }; /// assert!(set.contains("a")); /// assert!(set.contains("b")); /// assert!(!set.contains("c")); /// /// // "a" is the first value /// assert_eq!(set.iter().next(), Some(&"a")); /// ``` #[macro_export] macro_rules! indexset_with_default { ($H:ty; $($value:expr,)+) => { $crate::indexset_with_default!($H; $($value),+) }; ($H:ty; $($value:expr),*) => {{ let builder = ::core::hash::BuildHasherDefault::<$H>::default(); const CAP: usize = <[()]>::len(&[$({ stringify!($value); }),*]); #[allow(unused_mut)] // Specify your custom `H` (must implement Default + Hash) as the hasher: let mut set = $crate::IndexSet::with_capacity_and_hasher(CAP, builder); $( set.insert($value); )* set }}; } #[cfg(feature = "std")] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] #[macro_export] /// Create an [`IndexSet`][crate::IndexSet] from a list of values /// /// ## Example /// /// ``` /// use indexmap::indexset; /// /// let set = indexset!{ /// "a", /// "b", /// }; /// assert!(set.contains("a")); /// assert!(set.contains("b")); /// assert!(!set.contains("c")); /// /// // "a" is the first value /// assert_eq!(set.iter().next(), Some(&"a")); /// ``` macro_rules! indexset { ($($value:expr,)+) => { $crate::indexset!($($value),+) }; ($($value:expr),*) => { { // Note: `stringify!($value)` is just here to consume the repetition, // but we throw away that string literal during constant evaluation. const CAP: usize = <[()]>::len(&[$({ stringify!($value); }),*]); let mut set = $crate::IndexSet::with_capacity(CAP); $( set.insert($value); )* set } }; } // generate all the Iterator methods by just forwarding to the underlying // self.iter and mapping its element. macro_rules! iterator_methods { // $map_elt is the mapping function from the underlying iterator's element // same mapping function for both options and iterators ($map_elt:expr) => { fn next(&mut self) -> Option { self.iter.next().map($map_elt) } fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } fn count(self) -> usize { self.iter.len() } fn nth(&mut self, n: usize) -> Option { self.iter.nth(n).map($map_elt) } fn last(mut self) -> Option { self.next_back() } fn collect(self) -> C where C: FromIterator, { // NB: forwarding this directly to standard iterators will // allow it to leverage unstable traits like `TrustedLen`. self.iter.map($map_elt).collect() } }; } macro_rules! double_ended_iterator_methods { // $map_elt is the mapping function from the underlying iterator's element // same mapping function for both options and iterators ($map_elt:expr) => { fn next_back(&mut self) -> Option { self.iter.next_back().map($map_elt) } fn nth_back(&mut self, n: usize) -> Option { self.iter.nth_back(n).map($map_elt) } }; } // generate `ParallelIterator` methods by just forwarding to the underlying // self.entries and mapping its elements. #[cfg(feature = "rayon")] macro_rules! parallel_iterator_methods { // $map_elt is the mapping function from the underlying iterator's element ($map_elt:expr) => { fn drive_unindexed(self, consumer: C) -> C::Result where C: UnindexedConsumer, { self.entries .into_par_iter() .map($map_elt) .drive_unindexed(consumer) } // NB: This allows indexed collection, e.g. directly into a `Vec`, but the // underlying iterator must really be indexed. We should remove this if we // start having tombstones that must be filtered out. fn opt_len(&self) -> Option { Some(self.entries.len()) } }; } // generate `IndexedParallelIterator` methods by just forwarding to the underlying // self.entries and mapping its elements. #[cfg(feature = "rayon")] macro_rules! indexed_parallel_iterator_methods { // $map_elt is the mapping function from the underlying iterator's element ($map_elt:expr) => { fn drive(self, consumer: C) -> C::Result where C: Consumer, { self.entries.into_par_iter().map($map_elt).drive(consumer) } fn len(&self) -> usize { self.entries.len() } fn with_producer(self, callback: CB) -> CB::Output where CB: ProducerCallback, { self.entries .into_par_iter() .map($map_elt) .with_producer(callback) } }; } indexmap-2.12.1/src/map/core/entry.rs000064400000000000000000000602641046102023000155270ustar 00000000000000use super::{equivalent, get_hash, Bucket, IndexMapCore}; use crate::HashValue; use core::cmp::Ordering; use core::{fmt, mem}; /// Entry for an existing key-value pair in an [`IndexMap`][crate::IndexMap] /// or a vacant location to insert one. pub enum Entry<'a, K, V> { /// Existing slot with equivalent key. Occupied(OccupiedEntry<'a, K, V>), /// Vacant slot (no equivalent key in the map). Vacant(VacantEntry<'a, K, V>), } impl<'a, K, V> Entry<'a, K, V> { pub(crate) fn new(map: &'a mut IndexMapCore, hash: HashValue, key: K) -> Self where K: Eq, { let eq = equivalent(&key, &map.entries); match map.indices.find_entry(hash.get(), eq) { Ok(entry) => Entry::Occupied(OccupiedEntry { bucket: entry.bucket_index(), index: *entry.get(), map, }), Err(_) => Entry::Vacant(VacantEntry { map, hash, key }), } } /// Return the index where the key-value pair exists or will be inserted. pub fn index(&self) -> usize { match self { Entry::Occupied(entry) => entry.index, Entry::Vacant(entry) => entry.index(), } } /// Sets the value of the entry (after inserting if vacant), and returns an `OccupiedEntry`. /// /// Computes in **O(1)** time (amortized average). pub fn insert_entry(self, value: V) -> OccupiedEntry<'a, K, V> { match self { Entry::Occupied(mut entry) => { entry.insert(value); entry } Entry::Vacant(entry) => entry.insert_entry(value), } } /// Inserts the given default value in the entry if it is vacant and returns a mutable /// reference to it. Otherwise a mutable reference to an already existent value is returned. /// /// Computes in **O(1)** time (amortized average). pub fn or_insert(self, default: V) -> &'a mut V { match self { Entry::Occupied(entry) => entry.into_mut(), Entry::Vacant(entry) => entry.insert(default), } } /// Inserts the result of the `call` function in the entry if it is vacant and returns a mutable /// reference to it. Otherwise a mutable reference to an already existent value is returned. /// /// Computes in **O(1)** time (amortized average). pub fn or_insert_with(self, call: F) -> &'a mut V where F: FnOnce() -> V, { match self { Entry::Occupied(entry) => entry.into_mut(), Entry::Vacant(entry) => entry.insert(call()), } } /// Inserts the result of the `call` function with a reference to the entry's key if it is /// vacant, and returns a mutable reference to the new value. Otherwise a mutable reference to /// an already existent value is returned. /// /// Computes in **O(1)** time (amortized average). pub fn or_insert_with_key(self, call: F) -> &'a mut V where F: FnOnce(&K) -> V, { match self { Entry::Occupied(entry) => entry.into_mut(), Entry::Vacant(entry) => { let value = call(&entry.key); entry.insert(value) } } } /// Gets a reference to the entry's key, either within the map if occupied, /// or else the new key that was used to find the entry. pub fn key(&self) -> &K { match *self { Entry::Occupied(ref entry) => entry.key(), Entry::Vacant(ref entry) => entry.key(), } } /// Modifies the entry if it is occupied. pub fn and_modify(mut self, f: F) -> Self where F: FnOnce(&mut V), { if let Entry::Occupied(entry) = &mut self { f(entry.get_mut()); } self } /// Inserts a default-constructed value in the entry if it is vacant and returns a mutable /// reference to it. Otherwise a mutable reference to an already existent value is returned. /// /// Computes in **O(1)** time (amortized average). pub fn or_default(self) -> &'a mut V where V: Default, { match self { Entry::Occupied(entry) => entry.into_mut(), Entry::Vacant(entry) => entry.insert(V::default()), } } } impl fmt::Debug for Entry<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut tuple = f.debug_tuple("Entry"); match self { Entry::Vacant(v) => tuple.field(v), Entry::Occupied(o) => tuple.field(o), }; tuple.finish() } } /// A view into an occupied entry in an [`IndexMap`][crate::IndexMap]. /// It is part of the [`Entry`] enum. pub struct OccupiedEntry<'a, K, V> { map: &'a mut IndexMapCore, // We have a mutable reference to the map, which keeps these two // indices valid and pointing to the correct entry. index: usize, bucket: usize, } impl<'a, K, V> OccupiedEntry<'a, K, V> { /// Constructor for `RawEntryMut::from_hash` pub(super) fn from_hash( map: &'a mut IndexMapCore, hash: u64, mut is_match: F, ) -> Result> where F: FnMut(&K) -> bool, { let entries = &*map.entries; let eq = move |&i: &usize| is_match(&entries[i].key); match map.indices.find_entry(hash, eq) { Ok(entry) => Ok(OccupiedEntry { bucket: entry.bucket_index(), index: *entry.get(), map, }), Err(_) => Err(map), } } pub(crate) fn get_bucket(&self) -> &Bucket { &self.map.entries[self.index] } pub(crate) fn get_bucket_mut(&mut self) -> &mut Bucket { &mut self.map.entries[self.index] } pub(crate) fn into_bucket(self) -> &'a mut Bucket { &mut self.map.entries[self.index] } /// Return the index of the key-value pair #[inline] pub fn index(&self) -> usize { self.index } /// Gets a reference to the entry's key in the map. /// /// Note that this is not the key that was used to find the entry. There may be an observable /// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like /// extra fields or the memory address of an allocation. pub fn key(&self) -> &K { &self.map.entries[self.index].key } /// Gets a reference to the entry's value in the map. pub fn get(&self) -> &V { &self.map.entries[self.index].value } /// Gets a mutable reference to the entry's value in the map. /// /// If you need a reference which may outlive the destruction of the /// [`Entry`] value, see [`into_mut`][Self::into_mut]. pub fn get_mut(&mut self) -> &mut V { &mut self.map.entries[self.index].value } /// Converts into a mutable reference to the entry's value in the map, /// with a lifetime bound to the map itself. pub fn into_mut(self) -> &'a mut V { &mut self.map.entries[self.index].value } /// Sets the value of the entry to `value`, and returns the entry's old value. pub fn insert(&mut self, value: V) -> V { mem::replace(self.get_mut(), value) } /// Remove the key, value pair stored in the map for this entry, and return the value. /// /// **NOTE:** This is equivalent to [`.swap_remove()`][Self::swap_remove], replacing this /// entry's position with the last element, and it is deprecated in favor of calling that /// explicitly. If you need to preserve the relative order of the keys in the map, use /// [`.shift_remove()`][Self::shift_remove] instead. #[deprecated(note = "`remove` disrupts the map order -- \ use `swap_remove` or `shift_remove` for explicit behavior.")] pub fn remove(self) -> V { self.swap_remove() } /// Remove the key, value pair stored in the map for this entry, and return the value. /// /// Like [`Vec::swap_remove`][alloc::vec::Vec::swap_remove], the pair is removed by swapping it /// with the last element of the map and popping it off. /// **This perturbs the position of what used to be the last element!** /// /// Computes in **O(1)** time (average). pub fn swap_remove(self) -> V { self.swap_remove_entry().1 } /// Remove the key, value pair stored in the map for this entry, and return the value. /// /// Like [`Vec::remove`][alloc::vec::Vec::remove], the pair is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Computes in **O(n)** time (average). pub fn shift_remove(self) -> V { self.shift_remove_entry().1 } /// Remove and return the key, value pair stored in the map for this entry /// /// **NOTE:** This is equivalent to [`.swap_remove_entry()`][Self::swap_remove_entry], /// replacing this entry's position with the last element, and it is deprecated in favor of /// calling that explicitly. If you need to preserve the relative order of the keys in the map, /// use [`.shift_remove_entry()`][Self::shift_remove_entry] instead. #[deprecated(note = "`remove_entry` disrupts the map order -- \ use `swap_remove_entry` or `shift_remove_entry` for explicit behavior.")] pub fn remove_entry(self) -> (K, V) { self.swap_remove_entry() } /// Remove and return the key, value pair stored in the map for this entry /// /// Like [`Vec::swap_remove`][alloc::vec::Vec::swap_remove], the pair is removed by swapping it /// with the last element of the map and popping it off. /// **This perturbs the position of what used to be the last element!** /// /// Computes in **O(1)** time (average). pub fn swap_remove_entry(mut self) -> (K, V) { self.remove_index(); self.map.swap_remove_finish(self.index) } /// Remove and return the key, value pair stored in the map for this entry /// /// Like [`Vec::remove`][alloc::vec::Vec::remove], the pair is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Computes in **O(n)** time (average). pub fn shift_remove_entry(mut self) -> (K, V) { self.remove_index(); self.map.shift_remove_finish(self.index) } fn remove_index(&mut self) { let entry = self.map.indices.get_bucket_entry(self.bucket).unwrap(); debug_assert_eq!(*entry.get(), self.index); entry.remove(); } /// Moves the position of the entry to a new index /// by shifting all other entries in-between. /// /// This is equivalent to [`IndexMap::move_index`][`crate::IndexMap::move_index`] /// coming `from` the current [`.index()`][Self::index]. /// /// * If `self.index() < to`, the other pairs will shift down while the targeted pair moves up. /// * If `self.index() > to`, the other pairs will shift up while the targeted pair moves down. /// /// ***Panics*** if `to` is out of bounds. /// /// Computes in **O(n)** time (average). #[track_caller] pub fn move_index(self, to: usize) { if self.index != to { let _ = self.map.entries[to]; // explicit bounds check self.map.move_index_inner(self.index, to); self.update_index(to); } } /// Swaps the position of entry with another. /// /// This is equivalent to [`IndexMap::swap_indices`][`crate::IndexMap::swap_indices`] /// with the current [`.index()`][Self::index] as one of the two being swapped. /// /// ***Panics*** if the `other` index is out of bounds. /// /// Computes in **O(1)** time (average). #[track_caller] pub fn swap_indices(self, other: usize) { if self.index != other { // Since we already know where our bucket is, we only need to find the other. let hash = self.map.entries[other].hash; let other_mut = self.map.indices.find_mut(hash.get(), move |&i| i == other); *other_mut.expect("index not found") = self.index; self.map.entries.swap(self.index, other); self.update_index(other); } } fn update_index(self, to: usize) { let index = self.map.indices.get_bucket_mut(self.bucket).unwrap(); debug_assert_eq!(*index, self.index); *index = to; } } impl fmt::Debug for OccupiedEntry<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedEntry") .field("key", self.key()) .field("value", self.get()) .finish() } } impl<'a, K, V> From> for OccupiedEntry<'a, K, V> { fn from(other: IndexedEntry<'a, K, V>) -> Self { let IndexedEntry { map, index } = other; let hash = map.entries[index].hash; let bucket = map .indices .find_bucket_index(hash.get(), move |&i| i == index) .expect("index not found"); Self { map, index, bucket } } } /// A view into a vacant entry in an [`IndexMap`][crate::IndexMap]. /// It is part of the [`Entry`] enum. pub struct VacantEntry<'a, K, V> { map: &'a mut IndexMapCore, hash: HashValue, key: K, } impl<'a, K, V> VacantEntry<'a, K, V> { /// Return the index where a key-value pair may be inserted. pub fn index(&self) -> usize { self.map.indices.len() } /// Gets a reference to the key that was used to find the entry. pub fn key(&self) -> &K { &self.key } pub(crate) fn key_mut(&mut self) -> &mut K { &mut self.key } /// Takes ownership of the key, leaving the entry vacant. pub fn into_key(self) -> K { self.key } /// Inserts the entry's key and the given value into the map, and returns a mutable reference /// to the value. /// /// Computes in **O(1)** time (amortized average). pub fn insert(self, value: V) -> &'a mut V { let Self { map, hash, key } = self; map.insert_unique(hash, key, value).value_mut() } /// Inserts the entry's key and the given value into the map, and returns an `OccupiedEntry`. /// /// Computes in **O(1)** time (amortized average). pub fn insert_entry(self, value: V) -> OccupiedEntry<'a, K, V> { let Self { map, hash, key } = self; let index = map.indices.len(); debug_assert_eq!(index, map.entries.len()); let bucket = map .indices .insert_unique(hash.get(), index, get_hash(&map.entries)) .bucket_index(); map.push_entry(hash, key, value); OccupiedEntry { map, index, bucket } } /// Inserts the entry's key and the given value into the map at its ordered /// position among sorted keys, and returns the new index and a mutable /// reference to the value. /// /// If the existing keys are **not** already sorted, then the insertion /// index is unspecified (like [`slice::binary_search`]), but the key-value /// pair is inserted at that position regardless. /// /// Computes in **O(n)** time (average). pub fn insert_sorted(self, value: V) -> (usize, &'a mut V) where K: Ord, { let slice = crate::map::Slice::from_slice(&self.map.entries); let i = slice.binary_search_keys(&self.key).unwrap_err(); (i, self.shift_insert(i, value)) } /// Inserts the entry's key and the given value into the map at its ordered /// position among keys sorted by `cmp`, and returns the new index and a /// mutable reference to the value. /// /// If the existing keys are **not** already sorted, then the insertion /// index is unspecified (like [`slice::binary_search`]), but the key-value /// pair is inserted at that position regardless. /// /// Computes in **O(n)** time (average). pub fn insert_sorted_by(self, value: V, mut cmp: F) -> (usize, &'a mut V) where F: FnMut(&K, &V, &K, &V) -> Ordering, { let slice = crate::map::Slice::from_slice(&self.map.entries); let (Ok(i) | Err(i)) = slice.binary_search_by(|k, v| cmp(k, v, &self.key, &value)); (i, self.shift_insert(i, value)) } /// Inserts the entry's key and the given value into the map at its ordered /// position using a sort-key extraction function, and returns the new index /// and a mutable reference to the value. /// /// If the existing keys are **not** already sorted, then the insertion /// index is unspecified (like [`slice::binary_search`]), but the key-value /// pair is inserted at that position regardless. /// /// Computes in **O(n)** time (average). pub fn insert_sorted_by_key(self, value: V, mut sort_key: F) -> (usize, &'a mut V) where B: Ord, F: FnMut(&K, &V) -> B, { let search_key = sort_key(&self.key, &value); let slice = crate::map::Slice::from_slice(&self.map.entries); let (Ok(i) | Err(i)) = slice.binary_search_by_key(&search_key, sort_key); (i, self.shift_insert(i, value)) } /// Inserts the entry's key and the given value into the map at the given index, /// shifting others to the right, and returns a mutable reference to the value. /// /// ***Panics*** if `index` is out of bounds. /// /// Computes in **O(n)** time (average). #[track_caller] pub fn shift_insert(self, index: usize, value: V) -> &'a mut V { self.map .shift_insert_unique(index, self.hash, self.key, value); &mut self.map.entries[index].value } /// Replaces the key at the given index with this entry's key, returning the /// old key and an `OccupiedEntry` for that index. /// /// ***Panics*** if `index` is out of bounds. /// /// Computes in **O(1)** time (average). #[track_caller] pub fn replace_index(self, index: usize) -> (K, OccupiedEntry<'a, K, V>) { let Self { map, hash, key } = self; // NB: This removal and insertion isn't "no grow" (with unreachable hasher) // because hashbrown's tombstones might force a resize anyway. let old_hash = map.entries[index].hash; map.indices .find_entry(old_hash.get(), move |&i| i == index) .expect("index not found") .remove(); let bucket = map .indices .insert_unique(hash.get(), index, get_hash(&map.entries)) .bucket_index(); let entry = &mut map.entries[index]; entry.hash = hash; let old_key = mem::replace(&mut entry.key, key); (old_key, OccupiedEntry { map, index, bucket }) } } impl fmt::Debug for VacantEntry<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("VacantEntry").field(self.key()).finish() } } /// A view into an occupied entry in an [`IndexMap`][crate::IndexMap] obtained by index. /// /// This `struct` is created from the [`get_index_entry`][crate::IndexMap::get_index_entry] method. pub struct IndexedEntry<'a, K, V> { map: &'a mut IndexMapCore, // We have a mutable reference to the map, which keeps the index // valid and pointing to the correct entry. index: usize, } impl<'a, K, V> IndexedEntry<'a, K, V> { pub(crate) fn new(map: &'a mut IndexMapCore, index: usize) -> Option { if index < map.len() { Some(Self { map, index }) } else { None } } /// Return the index of the key-value pair #[inline] pub fn index(&self) -> usize { self.index } /// Gets a reference to the entry's key in the map. pub fn key(&self) -> &K { &self.map.entries[self.index].key } pub(crate) fn key_mut(&mut self) -> &mut K { &mut self.map.entries[self.index].key } /// Gets a reference to the entry's value in the map. pub fn get(&self) -> &V { &self.map.entries[self.index].value } /// Gets a mutable reference to the entry's value in the map. /// /// If you need a reference which may outlive the destruction of the /// `IndexedEntry` value, see [`into_mut`][Self::into_mut]. pub fn get_mut(&mut self) -> &mut V { &mut self.map.entries[self.index].value } /// Sets the value of the entry to `value`, and returns the entry's old value. pub fn insert(&mut self, value: V) -> V { mem::replace(self.get_mut(), value) } /// Converts into a mutable reference to the entry's value in the map, /// with a lifetime bound to the map itself. pub fn into_mut(self) -> &'a mut V { &mut self.map.entries[self.index].value } /// Remove and return the key, value pair stored in the map for this entry /// /// Like [`Vec::swap_remove`][alloc::vec::Vec::swap_remove], the pair is removed by swapping it /// with the last element of the map and popping it off. /// **This perturbs the position of what used to be the last element!** /// /// Computes in **O(1)** time (average). pub fn swap_remove_entry(self) -> (K, V) { self.map.swap_remove_index(self.index).unwrap() } /// Remove and return the key, value pair stored in the map for this entry /// /// Like [`Vec::remove`][alloc::vec::Vec::remove], the pair is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Computes in **O(n)** time (average). pub fn shift_remove_entry(self) -> (K, V) { self.map.shift_remove_index(self.index).unwrap() } /// Remove the key, value pair stored in the map for this entry, and return the value. /// /// Like [`Vec::swap_remove`][alloc::vec::Vec::swap_remove], the pair is removed by swapping it /// with the last element of the map and popping it off. /// **This perturbs the position of what used to be the last element!** /// /// Computes in **O(1)** time (average). pub fn swap_remove(self) -> V { self.swap_remove_entry().1 } /// Remove the key, value pair stored in the map for this entry, and return the value. /// /// Like [`Vec::remove`][alloc::vec::Vec::remove], the pair is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Computes in **O(n)** time (average). pub fn shift_remove(self) -> V { self.shift_remove_entry().1 } /// Moves the position of the entry to a new index /// by shifting all other entries in-between. /// /// This is equivalent to [`IndexMap::move_index`][`crate::IndexMap::move_index`] /// coming `from` the current [`.index()`][Self::index]. /// /// * If `self.index() < to`, the other pairs will shift down while the targeted pair moves up. /// * If `self.index() > to`, the other pairs will shift up while the targeted pair moves down. /// /// ***Panics*** if `to` is out of bounds. /// /// Computes in **O(n)** time (average). #[track_caller] pub fn move_index(self, to: usize) { self.map.move_index(self.index, to); } /// Swaps the position of entry with another. /// /// This is equivalent to [`IndexMap::swap_indices`][`crate::IndexMap::swap_indices`] /// with the current [`.index()`][Self::index] as one of the two being swapped. /// /// ***Panics*** if the `other` index is out of bounds. /// /// Computes in **O(1)** time (average). #[track_caller] pub fn swap_indices(self, other: usize) { self.map.swap_indices(self.index, other); } } impl fmt::Debug for IndexedEntry<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("IndexedEntry") .field("index", &self.index) .field("key", self.key()) .field("value", self.get()) .finish() } } impl<'a, K, V> From> for IndexedEntry<'a, K, V> { fn from(other: OccupiedEntry<'a, K, V>) -> Self { let OccupiedEntry { map, index, .. } = other; Self { map, index } } } indexmap-2.12.1/src/map/core/extract.rs000064400000000000000000000071561046102023000160410ustar 00000000000000#![allow(unsafe_code)] use super::{Bucket, IndexMapCore}; use crate::util::simplify_range; use core::ops::RangeBounds; impl IndexMapCore { #[track_caller] pub(crate) fn extract(&mut self, range: R) -> ExtractCore<'_, K, V> where R: RangeBounds, { let range = simplify_range(range, self.entries.len()); // SAFETY: We must have consistent lengths to start, so that's a hard assertion. // Then the worst `set_len` can do is leak items if `ExtractCore` doesn't drop. assert_eq!(self.entries.len(), self.indices.len()); unsafe { self.entries.set_len(range.start); } ExtractCore { map: self, new_len: range.start, current: range.start, end: range.end, } } } pub(crate) struct ExtractCore<'a, K, V> { map: &'a mut IndexMapCore, new_len: usize, current: usize, end: usize, } impl Drop for ExtractCore<'_, K, V> { fn drop(&mut self) { let old_len = self.map.indices.len(); let mut new_len = self.new_len; debug_assert!(new_len <= self.current); debug_assert!(self.current <= self.end); debug_assert!(self.current <= old_len); debug_assert!(old_len <= self.map.entries.capacity()); // SAFETY: We assume `new_len` and `current` were correctly maintained by the iterator. // So `entries[new_len..current]` were extracted, but the rest before and after are valid. unsafe { if new_len == self.current { // Nothing was extracted, so any remaining items can be left in place. new_len = old_len; } else if self.current < old_len { // Need to shift the remaining items down. let tail_len = old_len - self.current; let base = self.map.entries.as_mut_ptr(); let src = base.add(self.current); let dest = base.add(new_len); src.copy_to(dest, tail_len); new_len += tail_len; } self.map.entries.set_len(new_len); } if new_len != old_len { // We don't keep track of *which* items were extracted, so reindex everything. self.map.rebuild_hash_table(); } } } impl ExtractCore<'_, K, V> { pub(crate) fn extract_if(&mut self, mut pred: F) -> Option> where F: FnMut(&mut Bucket) -> bool, { debug_assert!(self.end <= self.map.entries.capacity()); let base = self.map.entries.as_mut_ptr(); while self.current < self.end { // SAFETY: We're maintaining both indices within bounds of the original entries, so // 0..new_len and current..indices.len() are always valid items for our Drop to keep. unsafe { let item = base.add(self.current); if pred(&mut *item) { // Extract it! self.current += 1; return Some(item.read()); } else { // Keep it, shifting it down if needed. if self.new_len != self.current { debug_assert!(self.new_len < self.current); let dest = base.add(self.new_len); item.copy_to_nonoverlapping(dest, 1); } self.current += 1; self.new_len += 1; } } } None } pub(crate) fn remaining(&self) -> usize { self.end - self.current } } indexmap-2.12.1/src/map/core/raw_entry_v1.rs000064400000000000000000000560241046102023000170050ustar 00000000000000//! Opt-in access to the experimental raw entry API. //! //! This module is designed to mimic the raw entry API of [`HashMap`][std::collections::hash_map], //! matching its unstable state as of Rust 1.75. See the tracking issue //! [rust#56167](https://github.com/rust-lang/rust/issues/56167) for more details. //! //! The trait [`RawEntryApiV1`] and the `_v1` suffix on its methods are meant to insulate this for //! the future, in case later breaking changes are needed. If the standard library stabilizes its //! `hash_raw_entry` feature (or some replacement), matching *inherent* methods will be added to //! `IndexMap` without such an opt-in trait. use super::{IndexMapCore, OccupiedEntry}; use crate::{Equivalent, HashValue, IndexMap}; use core::fmt; use core::hash::{BuildHasher, Hash}; use core::marker::PhantomData; use core::mem; /// Opt-in access to the experimental raw entry API. /// /// See the [`raw_entry_v1`][self] module documentation for more information. #[expect(private_bounds)] pub trait RawEntryApiV1: Sealed { /// Creates a raw immutable entry builder for the [`IndexMap`]. /// /// Raw entries provide the lowest level of control for searching and /// manipulating a map. They must be manually initialized with a hash and /// then manually searched. /// /// This is useful for /// * Hash memoization /// * Using a search key that doesn't work with the [`Equivalent`] trait /// * Using custom comparison logic without newtype wrappers /// /// Unless you are in such a situation, higher-level and more foolproof APIs like /// [`get`][IndexMap::get] should be preferred. /// /// Immutable raw entries have very limited use; you might instead want /// [`raw_entry_mut_v1`][Self::raw_entry_mut_v1]. /// /// # Examples /// /// ``` /// use core::hash::BuildHasher; /// use indexmap::map::{IndexMap, RawEntryApiV1}; /// /// let mut map = IndexMap::new(); /// map.extend([("a", 100), ("b", 200), ("c", 300)]); /// /// for k in ["a", "b", "c", "d", "e", "f"] { /// let hash = map.hasher().hash_one(k); /// let i = map.get_index_of(k); /// let v = map.get(k); /// let kv = map.get_key_value(k); /// let ikv = map.get_full(k); /// /// println!("Key: {} and value: {:?}", k, v); /// /// assert_eq!(map.raw_entry_v1().from_key(k), kv); /// assert_eq!(map.raw_entry_v1().from_hash(hash, |q| *q == k), kv); /// assert_eq!(map.raw_entry_v1().from_key_hashed_nocheck(hash, k), kv); /// assert_eq!(map.raw_entry_v1().from_hash_full(hash, |q| *q == k), ikv); /// assert_eq!(map.raw_entry_v1().index_from_hash(hash, |q| *q == k), i); /// } /// ``` fn raw_entry_v1(&self) -> RawEntryBuilder<'_, K, V, S>; /// Creates a raw entry builder for the [`IndexMap`]. /// /// Raw entries provide the lowest level of control for searching and /// manipulating a map. They must be manually initialized with a hash and /// then manually searched. After this, insertions into a vacant entry /// still require an owned key to be provided. /// /// Raw entries are useful for such exotic situations as: /// /// * Hash memoization /// * Deferring the creation of an owned key until it is known to be required /// * Using a search key that doesn't work with the [`Equivalent`] trait /// * Using custom comparison logic without newtype wrappers /// /// Because raw entries provide much more low-level control, it's much easier /// to put the `IndexMap` into an inconsistent state which, while memory-safe, /// will cause the map to produce seemingly random results. Higher-level and more /// foolproof APIs like [`entry`][IndexMap::entry] should be preferred when possible. /// /// Raw entries give mutable access to the keys. This must not be used /// to modify how the key would compare or hash, as the map will not re-evaluate /// where the key should go, meaning the keys may become "lost" if their /// location does not reflect their state. For instance, if you change a key /// so that the map now contains keys which compare equal, search may start /// acting erratically, with two keys randomly masking each other. Implementations /// are free to assume this doesn't happen (within the limits of memory-safety). /// /// # Examples /// /// ``` /// use core::hash::BuildHasher; /// use indexmap::map::{IndexMap, RawEntryApiV1}; /// use indexmap::map::raw_entry_v1::RawEntryMut; /// /// let mut map = IndexMap::new(); /// map.extend([("a", 100), ("b", 200), ("c", 300)]); /// /// // Existing key (insert and update) /// match map.raw_entry_mut_v1().from_key("a") { /// RawEntryMut::Vacant(_) => unreachable!(), /// RawEntryMut::Occupied(mut view) => { /// assert_eq!(view.index(), 0); /// assert_eq!(view.get(), &100); /// let v = view.get_mut(); /// let new_v = (*v) * 10; /// *v = new_v; /// assert_eq!(view.insert(1111), 1000); /// } /// } /// /// assert_eq!(map["a"], 1111); /// assert_eq!(map.len(), 3); /// /// // Existing key (take) /// let hash = map.hasher().hash_one("c"); /// match map.raw_entry_mut_v1().from_key_hashed_nocheck(hash, "c") { /// RawEntryMut::Vacant(_) => unreachable!(), /// RawEntryMut::Occupied(view) => { /// assert_eq!(view.index(), 2); /// assert_eq!(view.shift_remove_entry(), ("c", 300)); /// } /// } /// assert_eq!(map.raw_entry_v1().from_key("c"), None); /// assert_eq!(map.len(), 2); /// /// // Nonexistent key (insert and update) /// let key = "d"; /// let hash = map.hasher().hash_one(key); /// match map.raw_entry_mut_v1().from_hash(hash, |q| *q == key) { /// RawEntryMut::Occupied(_) => unreachable!(), /// RawEntryMut::Vacant(view) => { /// assert_eq!(view.index(), 2); /// let (k, value) = view.insert("d", 4000); /// assert_eq!((*k, *value), ("d", 4000)); /// *value = 40000; /// } /// } /// assert_eq!(map["d"], 40000); /// assert_eq!(map.len(), 3); /// /// match map.raw_entry_mut_v1().from_hash(hash, |q| *q == key) { /// RawEntryMut::Vacant(_) => unreachable!(), /// RawEntryMut::Occupied(view) => { /// assert_eq!(view.index(), 2); /// assert_eq!(view.swap_remove_entry(), ("d", 40000)); /// } /// } /// assert_eq!(map.get("d"), None); /// assert_eq!(map.len(), 2); /// ``` fn raw_entry_mut_v1(&mut self) -> RawEntryBuilderMut<'_, K, V, S>; } impl RawEntryApiV1 for IndexMap { fn raw_entry_v1(&self) -> RawEntryBuilder<'_, K, V, S> { RawEntryBuilder { map: self } } fn raw_entry_mut_v1(&mut self) -> RawEntryBuilderMut<'_, K, V, S> { RawEntryBuilderMut { map: self } } } /// A builder for computing where in an [`IndexMap`] a key-value pair would be stored. /// /// This `struct` is created by the [`IndexMap::raw_entry_v1`] method, provided by the /// [`RawEntryApiV1`] trait. See its documentation for more. pub struct RawEntryBuilder<'a, K, V, S> { map: &'a IndexMap, } impl fmt::Debug for RawEntryBuilder<'_, K, V, S> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawEntryBuilder").finish_non_exhaustive() } } impl<'a, K, V, S> RawEntryBuilder<'a, K, V, S> { /// Access an entry by key. pub fn from_key(self, key: &Q) -> Option<(&'a K, &'a V)> where S: BuildHasher, Q: ?Sized + Hash + Equivalent, { self.map.get_key_value(key) } /// Access an entry by a key and its hash. pub fn from_key_hashed_nocheck(self, hash: u64, key: &Q) -> Option<(&'a K, &'a V)> where Q: ?Sized + Equivalent, { let hash = HashValue(hash as usize); let i = self.map.core.get_index_of(hash, key)?; self.map.get_index(i) } /// Access an entry by hash. pub fn from_hash(self, hash: u64, is_match: F) -> Option<(&'a K, &'a V)> where F: FnMut(&K) -> bool, { let map = self.map; let i = self.index_from_hash(hash, is_match)?; map.get_index(i) } /// Access an entry by hash, including its index. pub fn from_hash_full(self, hash: u64, is_match: F) -> Option<(usize, &'a K, &'a V)> where F: FnMut(&K) -> bool, { let map = self.map; let i = self.index_from_hash(hash, is_match)?; let (key, value) = map.get_index(i)?; Some((i, key, value)) } /// Access the index of an entry by hash. pub fn index_from_hash(self, hash: u64, mut is_match: F) -> Option where F: FnMut(&K) -> bool, { let hash = HashValue(hash as usize); let entries = &*self.map.core.entries; let eq = move |&i: &usize| is_match(&entries[i].key); self.map.core.indices.find(hash.get(), eq).copied() } } /// A builder for computing where in an [`IndexMap`] a key-value pair would be stored. /// /// This `struct` is created by the [`IndexMap::raw_entry_mut_v1`] method, provided by the /// [`RawEntryApiV1`] trait. See its documentation for more. pub struct RawEntryBuilderMut<'a, K, V, S> { map: &'a mut IndexMap, } impl fmt::Debug for RawEntryBuilderMut<'_, K, V, S> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawEntryBuilderMut").finish_non_exhaustive() } } impl<'a, K, V, S> RawEntryBuilderMut<'a, K, V, S> { /// Access an entry by key. pub fn from_key(self, key: &Q) -> RawEntryMut<'a, K, V, S> where S: BuildHasher, Q: ?Sized + Hash + Equivalent, { let hash = self.map.hash(key); self.from_key_hashed_nocheck(hash.get(), key) } /// Access an entry by a key and its hash. pub fn from_key_hashed_nocheck(self, hash: u64, key: &Q) -> RawEntryMut<'a, K, V, S> where Q: ?Sized + Equivalent, { self.from_hash(hash, |k| Q::equivalent(key, k)) } /// Access an entry by hash. pub fn from_hash(self, hash: u64, is_match: F) -> RawEntryMut<'a, K, V, S> where F: FnMut(&K) -> bool, { match OccupiedEntry::from_hash(&mut self.map.core, hash, is_match) { Ok(inner) => RawEntryMut::Occupied(RawOccupiedEntryMut { inner, hash_builder: PhantomData, }), Err(map) => RawEntryMut::Vacant(RawVacantEntryMut { map, hash_builder: &self.map.hash_builder, }), } } } /// Raw entry for an existing key-value pair or a vacant location to /// insert one. pub enum RawEntryMut<'a, K, V, S> { /// Existing slot with equivalent key. Occupied(RawOccupiedEntryMut<'a, K, V, S>), /// Vacant slot (no equivalent key in the map). Vacant(RawVacantEntryMut<'a, K, V, S>), } impl fmt::Debug for RawEntryMut<'_, K, V, S> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut tuple = f.debug_tuple("RawEntryMut"); match self { Self::Vacant(v) => tuple.field(v), Self::Occupied(o) => tuple.field(o), }; tuple.finish() } } impl<'a, K, V, S> RawEntryMut<'a, K, V, S> { /// Return the index where the key-value pair exists or may be inserted. #[inline] pub fn index(&self) -> usize { match self { Self::Occupied(entry) => entry.index(), Self::Vacant(entry) => entry.index(), } } /// Inserts the given default key and value in the entry if it is vacant and returns mutable /// references to them. Otherwise mutable references to an already existent pair are returned. pub fn or_insert(self, default_key: K, default_value: V) -> (&'a mut K, &'a mut V) where K: Hash, S: BuildHasher, { match self { Self::Occupied(entry) => entry.into_key_value_mut(), Self::Vacant(entry) => entry.insert(default_key, default_value), } } /// Inserts the result of the `call` function in the entry if it is vacant and returns mutable /// references to them. Otherwise mutable references to an already existent pair are returned. pub fn or_insert_with(self, call: F) -> (&'a mut K, &'a mut V) where F: FnOnce() -> (K, V), K: Hash, S: BuildHasher, { match self { Self::Occupied(entry) => entry.into_key_value_mut(), Self::Vacant(entry) => { let (key, value) = call(); entry.insert(key, value) } } } /// Modifies the entry if it is occupied. pub fn and_modify(mut self, f: F) -> Self where F: FnOnce(&mut K, &mut V), { if let Self::Occupied(entry) = &mut self { let (k, v) = entry.get_key_value_mut(); f(k, v); } self } } /// A raw view into an occupied entry in an [`IndexMap`]. /// It is part of the [`RawEntryMut`] enum. pub struct RawOccupiedEntryMut<'a, K, V, S> { inner: OccupiedEntry<'a, K, V>, hash_builder: PhantomData<&'a S>, } impl fmt::Debug for RawOccupiedEntryMut<'_, K, V, S> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawOccupiedEntryMut") .field("key", self.key()) .field("value", self.get()) .finish_non_exhaustive() } } impl<'a, K, V, S> RawOccupiedEntryMut<'a, K, V, S> { /// Return the index of the key-value pair #[inline] pub fn index(&self) -> usize { self.inner.index() } /// Gets a reference to the entry's key in the map. /// /// Note that this is not the key that was used to find the entry. There may be an observable /// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like /// extra fields or the memory address of an allocation. pub fn key(&self) -> &K { self.inner.key() } /// Gets a mutable reference to the entry's key in the map. /// /// Note that this is not the key that was used to find the entry. There may be an observable /// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like /// extra fields or the memory address of an allocation. pub fn key_mut(&mut self) -> &mut K { &mut self.inner.get_bucket_mut().key } /// Converts into a mutable reference to the entry's key in the map, /// with a lifetime bound to the map itself. /// /// Note that this is not the key that was used to find the entry. There may be an observable /// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like /// extra fields or the memory address of an allocation. pub fn into_key(self) -> &'a mut K { &mut self.inner.into_bucket().key } /// Gets a reference to the entry's value in the map. pub fn get(&self) -> &V { self.inner.get() } /// Gets a mutable reference to the entry's value in the map. /// /// If you need a reference which may outlive the destruction of the /// [`RawEntryMut`] value, see [`into_mut`][Self::into_mut]. pub fn get_mut(&mut self) -> &mut V { self.inner.get_mut() } /// Converts into a mutable reference to the entry's value in the map, /// with a lifetime bound to the map itself. pub fn into_mut(self) -> &'a mut V { self.inner.into_mut() } /// Gets a reference to the entry's key and value in the map. pub fn get_key_value(&self) -> (&K, &V) { self.inner.get_bucket().refs() } /// Gets a reference to the entry's key and value in the map. pub fn get_key_value_mut(&mut self) -> (&mut K, &mut V) { self.inner.get_bucket_mut().muts() } /// Converts into a mutable reference to the entry's key and value in the map, /// with a lifetime bound to the map itself. pub fn into_key_value_mut(self) -> (&'a mut K, &'a mut V) { self.inner.into_bucket().muts() } /// Sets the value of the entry, and returns the entry's old value. pub fn insert(&mut self, value: V) -> V { self.inner.insert(value) } /// Sets the key of the entry, and returns the entry's old key. pub fn insert_key(&mut self, key: K) -> K { mem::replace(self.key_mut(), key) } /// Remove the key, value pair stored in the map for this entry, and return the value. /// /// **NOTE:** This is equivalent to [`.swap_remove()`][Self::swap_remove], replacing this /// entry's position with the last element, and it is deprecated in favor of calling that /// explicitly. If you need to preserve the relative order of the keys in the map, use /// [`.shift_remove()`][Self::shift_remove] instead. #[deprecated(note = "`remove` disrupts the map order -- \ use `swap_remove` or `shift_remove` for explicit behavior.")] pub fn remove(self) -> V { self.swap_remove() } /// Remove the key, value pair stored in the map for this entry, and return the value. /// /// Like [`Vec::swap_remove`][alloc::vec::Vec::swap_remove], the pair is removed by swapping it /// with the last element of the map and popping it off. /// **This perturbs the position of what used to be the last element!** /// /// Computes in **O(1)** time (average). pub fn swap_remove(self) -> V { self.inner.swap_remove() } /// Remove the key, value pair stored in the map for this entry, and return the value. /// /// Like [`Vec::remove`][alloc::vec::Vec::remove], the pair is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Computes in **O(n)** time (average). pub fn shift_remove(self) -> V { self.inner.shift_remove() } /// Remove and return the key, value pair stored in the map for this entry /// /// **NOTE:** This is equivalent to [`.swap_remove_entry()`][Self::swap_remove_entry], /// replacing this entry's position with the last element, and it is deprecated in favor of /// calling that explicitly. If you need to preserve the relative order of the keys in the map, /// use [`.shift_remove_entry()`][Self::shift_remove_entry] instead. #[deprecated(note = "`remove_entry` disrupts the map order -- \ use `swap_remove_entry` or `shift_remove_entry` for explicit behavior.")] pub fn remove_entry(self) -> (K, V) { self.swap_remove_entry() } /// Remove and return the key, value pair stored in the map for this entry /// /// Like [`Vec::swap_remove`][alloc::vec::Vec::swap_remove], the pair is removed by swapping it /// with the last element of the map and popping it off. /// **This perturbs the position of what used to be the last element!** /// /// Computes in **O(1)** time (average). pub fn swap_remove_entry(self) -> (K, V) { self.inner.swap_remove_entry() } /// Remove and return the key, value pair stored in the map for this entry /// /// Like [`Vec::remove`][alloc::vec::Vec::remove], the pair is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Computes in **O(n)** time (average). pub fn shift_remove_entry(self) -> (K, V) { self.inner.shift_remove_entry() } /// Moves the position of the entry to a new index /// by shifting all other entries in-between. /// /// This is equivalent to [`IndexMap::move_index`] /// coming `from` the current [`.index()`][Self::index]. /// /// * If `self.index() < to`, the other pairs will shift down while the targeted pair moves up. /// * If `self.index() > to`, the other pairs will shift up while the targeted pair moves down. /// /// ***Panics*** if `to` is out of bounds. /// /// Computes in **O(n)** time (average). #[track_caller] pub fn move_index(self, to: usize) { self.inner.move_index(to); } /// Swaps the position of entry with another. /// /// This is equivalent to [`IndexMap::swap_indices`] /// with the current [`.index()`][Self::index] as one of the two being swapped. /// /// ***Panics*** if the `other` index is out of bounds. /// /// Computes in **O(1)** time (average). #[track_caller] pub fn swap_indices(self, other: usize) { self.inner.swap_indices(other); } } /// A view into a vacant raw entry in an [`IndexMap`]. /// It is part of the [`RawEntryMut`] enum. pub struct RawVacantEntryMut<'a, K, V, S> { map: &'a mut IndexMapCore, hash_builder: &'a S, } impl fmt::Debug for RawVacantEntryMut<'_, K, V, S> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawVacantEntryMut").finish_non_exhaustive() } } impl<'a, K, V, S> RawVacantEntryMut<'a, K, V, S> { /// Return the index where a key-value pair may be inserted. pub fn index(&self) -> usize { self.map.len() } /// Inserts the given key and value into the map, /// and returns mutable references to them. pub fn insert(self, key: K, value: V) -> (&'a mut K, &'a mut V) where K: Hash, S: BuildHasher, { let h = self.hash_builder.hash_one(&key); self.insert_hashed_nocheck(h, key, value) } /// Inserts the given key and value into the map with the provided hash, /// and returns mutable references to them. pub fn insert_hashed_nocheck(self, hash: u64, key: K, value: V) -> (&'a mut K, &'a mut V) { let hash = HashValue(hash as usize); self.map.insert_unique(hash, key, value).muts() } /// Inserts the given key and value into the map at the given index, /// shifting others to the right, and returns mutable references to them. /// /// ***Panics*** if `index` is out of bounds. /// /// Computes in **O(n)** time (average). #[track_caller] pub fn shift_insert(self, index: usize, key: K, value: V) -> (&'a mut K, &'a mut V) where K: Hash, S: BuildHasher, { let h = self.hash_builder.hash_one(&key); self.shift_insert_hashed_nocheck(index, h, key, value) } /// Inserts the given key and value into the map with the provided hash /// at the given index, and returns mutable references to them. /// /// ***Panics*** if `index` is out of bounds. /// /// Computes in **O(n)** time (average). #[track_caller] pub fn shift_insert_hashed_nocheck( self, index: usize, hash: u64, key: K, value: V, ) -> (&'a mut K, &'a mut V) { let hash = HashValue(hash as usize); self.map.shift_insert_unique(index, hash, key, value); self.map.entries[index].muts() } } trait Sealed {} impl Sealed for IndexMap {} indexmap-2.12.1/src/map/core.rs000064400000000000000000000602611046102023000143630ustar 00000000000000//! This is the core implementation that doesn't depend on the hasher at all. //! //! The methods of `IndexMapCore` don't use any Hash properties of K. //! //! It's cleaner to separate them out, then the compiler checks that we are not //! using Hash at all in these methods. //! //! However, we should probably not let this show in the public API or docs. mod entry; mod extract; pub mod raw_entry_v1; use alloc::vec::{self, Vec}; use core::mem; use core::ops::RangeBounds; use hashbrown::hash_table; use crate::util::simplify_range; use crate::{Bucket, Equivalent, HashValue, TryReserveError}; type Indices = hash_table::HashTable; type Entries = Vec>; pub use entry::{Entry, IndexedEntry, OccupiedEntry, VacantEntry}; pub(crate) use extract::ExtractCore; /// Core of the map that does not depend on S #[derive(Debug)] pub(crate) struct IndexMapCore { /// indices mapping from the entry hash to its index. indices: Indices, /// entries is a dense vec maintaining entry order. entries: Entries, } #[inline(always)] fn get_hash(entries: &[Bucket]) -> impl Fn(&usize) -> u64 + use<'_, K, V> { move |&i| entries[i].hash.get() } #[inline] fn equivalent<'a, K, V, Q: ?Sized + Equivalent>( key: &'a Q, entries: &'a [Bucket], ) -> impl Fn(&usize) -> bool + use<'a, K, V, Q> { move |&i| Q::equivalent(key, &entries[i].key) } #[inline] fn erase_index(table: &mut Indices, hash: HashValue, index: usize) { if let Ok(entry) = table.find_entry(hash.get(), move |&i| i == index) { entry.remove(); } else if cfg!(debug_assertions) { panic!("index not found"); } } #[inline] fn update_index(table: &mut Indices, hash: HashValue, old: usize, new: usize) { let index = table .find_mut(hash.get(), move |&i| i == old) .expect("index not found"); *index = new; } /// Inserts many entries into the indices table without reallocating, /// and without regard for duplication. /// /// ***Panics*** if there is not sufficient capacity already. fn insert_bulk_no_grow(indices: &mut Indices, entries: &[Bucket]) { assert!(indices.capacity() - indices.len() >= entries.len()); for entry in entries { indices.insert_unique(entry.hash.get(), indices.len(), |_| unreachable!()); } } impl Clone for IndexMapCore where K: Clone, V: Clone, { fn clone(&self) -> Self { let mut new = Self::new(); new.clone_from(self); new } fn clone_from(&mut self, other: &Self) { self.indices.clone_from(&other.indices); if self.entries.capacity() < other.entries.len() { // If we must resize, match the indices capacity. let additional = other.entries.len() - self.entries.len(); self.reserve_entries(additional); } self.entries.clone_from(&other.entries); } } impl IndexMapCore { /// The maximum capacity before the `entries` allocation would exceed `isize::MAX`. const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / size_of::>(); #[inline] pub(crate) const fn new() -> Self { IndexMapCore { indices: Indices::new(), entries: Vec::new(), } } #[inline] pub(crate) fn with_capacity(n: usize) -> Self { IndexMapCore { indices: Indices::with_capacity(n), entries: Vec::with_capacity(n), } } #[inline] pub(crate) fn into_entries(self) -> Entries { self.entries } #[inline] pub(crate) fn as_entries(&self) -> &[Bucket] { &self.entries } #[inline] pub(crate) fn as_entries_mut(&mut self) -> &mut [Bucket] { &mut self.entries } pub(crate) fn with_entries(&mut self, f: F) where F: FnOnce(&mut [Bucket]), { f(&mut self.entries); self.rebuild_hash_table(); } #[inline] pub(crate) fn len(&self) -> usize { debug_assert_eq!(self.entries.len(), self.indices.len()); self.indices.len() } #[inline] pub(crate) fn capacity(&self) -> usize { Ord::min(self.indices.capacity(), self.entries.capacity()) } pub(crate) fn clear(&mut self) { self.indices.clear(); self.entries.clear(); } pub(crate) fn truncate(&mut self, len: usize) { if len < self.len() { self.erase_indices(len, self.entries.len()); self.entries.truncate(len); } } #[track_caller] pub(crate) fn drain(&mut self, range: R) -> vec::Drain<'_, Bucket> where R: RangeBounds, { let range = simplify_range(range, self.entries.len()); self.erase_indices(range.start, range.end); self.entries.drain(range) } #[cfg(feature = "rayon")] pub(crate) fn par_drain(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket> where K: Send, V: Send, R: RangeBounds, { use rayon::iter::ParallelDrainRange; let range = simplify_range(range, self.entries.len()); self.erase_indices(range.start, range.end); self.entries.par_drain(range) } #[track_caller] pub(crate) fn split_off(&mut self, at: usize) -> Self { let len = self.entries.len(); assert!( at <= len, "index out of bounds: the len is {len} but the index is {at}. Expected index <= len" ); self.erase_indices(at, self.entries.len()); let entries = self.entries.split_off(at); let mut indices = Indices::with_capacity(entries.len()); insert_bulk_no_grow(&mut indices, &entries); Self { indices, entries } } #[track_caller] pub(crate) fn split_splice(&mut self, range: R) -> (Self, vec::IntoIter>) where R: RangeBounds, { let range = simplify_range(range, self.len()); self.erase_indices(range.start, self.entries.len()); let entries = self.entries.split_off(range.end); let drained = self.entries.split_off(range.start); let mut indices = Indices::with_capacity(entries.len()); insert_bulk_no_grow(&mut indices, &entries); (Self { indices, entries }, drained.into_iter()) } /// Append from another map without checking whether items already exist. pub(crate) fn append_unchecked(&mut self, other: &mut Self) { self.reserve(other.len()); insert_bulk_no_grow(&mut self.indices, &other.entries); self.entries.append(&mut other.entries); other.indices.clear(); } /// Reserve capacity for `additional` more key-value pairs. pub(crate) fn reserve(&mut self, additional: usize) { self.indices.reserve(additional, get_hash(&self.entries)); // Only grow entries if necessary, since we also round up capacity. if additional > self.entries.capacity() - self.entries.len() { self.reserve_entries(additional); } } /// Reserve capacity for `additional` more key-value pairs, without over-allocating. pub(crate) fn reserve_exact(&mut self, additional: usize) { self.indices.reserve(additional, get_hash(&self.entries)); self.entries.reserve_exact(additional); } /// Try to reserve capacity for `additional` more key-value pairs. pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { self.indices .try_reserve(additional, get_hash(&self.entries)) .map_err(TryReserveError::from_hashbrown)?; // Only grow entries if necessary, since we also round up capacity. if additional > self.entries.capacity() - self.entries.len() { self.try_reserve_entries(additional) } else { Ok(()) } } /// Try to reserve entries capacity, rounded up to match the indices fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> { // Use a soft-limit on the maximum capacity, but if the caller explicitly // requested more, do it and let them have the resulting error. let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY); let try_add = new_capacity - self.entries.len(); if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() { return Ok(()); } self.entries .try_reserve_exact(additional) .map_err(TryReserveError::from_alloc) } /// Try to reserve capacity for `additional` more key-value pairs, without over-allocating. pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> { self.indices .try_reserve(additional, get_hash(&self.entries)) .map_err(TryReserveError::from_hashbrown)?; self.entries .try_reserve_exact(additional) .map_err(TryReserveError::from_alloc) } /// Shrink the capacity of the map with a lower bound pub(crate) fn shrink_to(&mut self, min_capacity: usize) { self.indices .shrink_to(min_capacity, get_hash(&self.entries)); self.entries.shrink_to(min_capacity); } /// Remove the last key-value pair pub(crate) fn pop(&mut self) -> Option<(K, V)> { if let Some(entry) = self.entries.pop() { let last = self.entries.len(); erase_index(&mut self.indices, entry.hash, last); Some((entry.key, entry.value)) } else { None } } /// Return the index in `entries` where an equivalent key can be found pub(crate) fn get_index_of(&self, hash: HashValue, key: &Q) -> Option where Q: ?Sized + Equivalent, { let eq = equivalent(key, &self.entries); self.indices.find(hash.get(), eq).copied() } /// Append a key-value pair to `entries`, /// *without* checking whether it already exists. fn push_entry(&mut self, hash: HashValue, key: K, value: V) { if self.entries.len() == self.entries.capacity() { // Reserve our own capacity synced to the indices, // rather than letting `Vec::push` just double it. self.reserve_entries(1); } self.entries.push(Bucket { hash, key, value }); } pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option) where K: Eq, { let eq = equivalent(&key, &self.entries); let hasher = get_hash(&self.entries); match self.indices.entry(hash.get(), eq, hasher) { hash_table::Entry::Occupied(entry) => { let i = *entry.get(); (i, Some(mem::replace(&mut self.entries[i].value, value))) } hash_table::Entry::Vacant(entry) => { let i = self.entries.len(); entry.insert(i); self.push_entry(hash, key, value); debug_assert_eq!(self.indices.len(), self.entries.len()); (i, None) } } } /// Same as `insert_full`, except it also replaces the key pub(crate) fn replace_full( &mut self, hash: HashValue, key: K, value: V, ) -> (usize, Option<(K, V)>) where K: Eq, { let eq = equivalent(&key, &self.entries); let hasher = get_hash(&self.entries); match self.indices.entry(hash.get(), eq, hasher) { hash_table::Entry::Occupied(entry) => { let i = *entry.get(); let entry = &mut self.entries[i]; let kv = ( mem::replace(&mut entry.key, key), mem::replace(&mut entry.value, value), ); (i, Some(kv)) } hash_table::Entry::Vacant(entry) => { let i = self.entries.len(); entry.insert(i); self.push_entry(hash, key, value); debug_assert_eq!(self.indices.len(), self.entries.len()); (i, None) } } } /// Remove an entry by shifting all entries that follow it pub(crate) fn shift_remove_full(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)> where Q: ?Sized + Equivalent, { let eq = equivalent(key, &self.entries); let (index, _) = self.indices.find_entry(hash.get(), eq).ok()?.remove(); let (key, value) = self.shift_remove_finish(index); Some((index, key, value)) } /// Remove an entry by swapping it with the last pub(crate) fn swap_remove_full(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)> where Q: ?Sized + Equivalent, { let eq = equivalent(key, &self.entries); let (index, _) = self.indices.find_entry(hash.get(), eq).ok()?.remove(); let (key, value) = self.swap_remove_finish(index); Some((index, key, value)) } /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..` /// /// All of these items should still be at their original location in `entries`. /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`. fn erase_indices(&mut self, start: usize, end: usize) { let (init, shifted_entries) = self.entries.split_at(end); let (start_entries, erased_entries) = init.split_at(start); let erased = erased_entries.len(); let shifted = shifted_entries.len(); let half_capacity = self.indices.capacity() / 2; // Use a heuristic between different strategies if erased == 0 { // Degenerate case, nothing to do } else if start + shifted < half_capacity && start < erased { // Reinsert everything, as there are few kept indices self.indices.clear(); // Reinsert stable indices, then shifted indices insert_bulk_no_grow(&mut self.indices, start_entries); insert_bulk_no_grow(&mut self.indices, shifted_entries); } else if erased + shifted < half_capacity { // Find each affected index, as there are few to adjust // Find erased indices for (i, entry) in (start..).zip(erased_entries) { erase_index(&mut self.indices, entry.hash, i); } // Find shifted indices for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) { update_index(&mut self.indices, entry.hash, old, new); } } else { // Sweep the whole table for adjustments let offset = end - start; self.indices.retain(move |i| { if *i >= end { *i -= offset; true } else { *i < start } }); } debug_assert_eq!(self.indices.len(), start + shifted); } pub(crate) fn retain_in_order(&mut self, mut keep: F) where F: FnMut(&mut K, &mut V) -> bool, { self.entries .retain_mut(|entry| keep(&mut entry.key, &mut entry.value)); if self.entries.len() < self.indices.len() { self.rebuild_hash_table(); } } fn rebuild_hash_table(&mut self) { self.indices.clear(); insert_bulk_no_grow(&mut self.indices, &self.entries); } pub(crate) fn reverse(&mut self) { self.entries.reverse(); // No need to save hash indices, can easily calculate what they should // be, given that this is an in-place reversal. let len = self.entries.len(); for i in &mut self.indices { *i = len - *i - 1; } } /// Reserve entries capacity, rounded up to match the indices #[inline] fn reserve_entries(&mut self, additional: usize) { // Use a soft-limit on the maximum capacity, but if the caller explicitly // requested more, do it and let them have the resulting panic. let try_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY); let try_add = try_capacity - self.entries.len(); if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() { return; } self.entries.reserve_exact(additional); } /// Insert a key-value pair in `entries`, /// *without* checking whether it already exists. pub(super) fn insert_unique(&mut self, hash: HashValue, key: K, value: V) -> &mut Bucket { let i = self.indices.len(); debug_assert_eq!(i, self.entries.len()); self.indices .insert_unique(hash.get(), i, get_hash(&self.entries)); self.push_entry(hash, key, value); &mut self.entries[i] } /// Replaces the key at the given index, /// *without* checking whether it already exists. #[track_caller] pub(crate) fn replace_index_unique(&mut self, index: usize, hash: HashValue, key: K) -> K { // NB: This removal and insertion isn't "no grow" (with unreachable hasher) // because hashbrown's tombstones might force a resize anyway. erase_index(&mut self.indices, self.entries[index].hash, index); self.indices .insert_unique(hash.get(), index, get_hash(&self.entries)); let entry = &mut self.entries[index]; entry.hash = hash; mem::replace(&mut entry.key, key) } /// Insert a key-value pair in `entries` at a particular index, /// *without* checking whether it already exists. fn shift_insert_unique(&mut self, index: usize, hash: HashValue, key: K, value: V) { let end = self.indices.len(); assert!(index <= end); // Increment others first so we don't have duplicate indices. self.increment_indices(index, end); let entries = &*self.entries; self.indices.insert_unique(hash.get(), index, move |&i| { // Adjust for the incremented indices to find hashes. debug_assert_ne!(i, index); let i = if i < index { i } else { i - 1 }; entries[i].hash.get() }); if self.entries.len() == self.entries.capacity() { // Reserve our own capacity synced to the indices, // rather than letting `Vec::insert` just double it. self.reserve_entries(1); } self.entries.insert(index, Bucket { hash, key, value }); } /// Remove an entry by shifting all entries that follow it pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> { match self.entries.get(index) { Some(entry) => { erase_index(&mut self.indices, entry.hash, index); Some(self.shift_remove_finish(index)) } None => None, } } /// Remove an entry by shifting all entries that follow it /// /// The index should already be removed from `self.indices`. fn shift_remove_finish(&mut self, index: usize) -> (K, V) { // Correct indices that point to the entries that followed the removed entry. self.decrement_indices(index + 1, self.entries.len()); // Use Vec::remove to actually remove the entry. let entry = self.entries.remove(index); (entry.key, entry.value) } /// Remove an entry by swapping it with the last pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> { match self.entries.get(index) { Some(entry) => { erase_index(&mut self.indices, entry.hash, index); Some(self.swap_remove_finish(index)) } None => None, } } /// Finish removing an entry by swapping it with the last /// /// The index should already be removed from `self.indices`. fn swap_remove_finish(&mut self, index: usize) -> (K, V) { // use swap_remove, but then we need to update the index that points // to the other entry that has to move let entry = self.entries.swap_remove(index); // correct index that points to the entry that had to swap places if let Some(entry) = self.entries.get(index) { // was not last element // examine new element in `index` and find it in indices let last = self.entries.len(); update_index(&mut self.indices, entry.hash, last, index); } (entry.key, entry.value) } /// Decrement all indices in the range `start..end`. /// /// The index `start - 1` should not exist in `self.indices`. /// All entries should still be in their original positions. fn decrement_indices(&mut self, start: usize, end: usize) { // Use a heuristic between a full sweep vs. a `find()` for every shifted item. let shifted_entries = &self.entries[start..end]; if shifted_entries.len() > self.indices.capacity() / 2 { // Shift all indices in range. for i in &mut self.indices { if start <= *i && *i < end { *i -= 1; } } } else { // Find each entry in range to shift its index. for (i, entry) in (start..end).zip(shifted_entries) { update_index(&mut self.indices, entry.hash, i, i - 1); } } } /// Increment all indices in the range `start..end`. /// /// The index `end` should not exist in `self.indices`. /// All entries should still be in their original positions. fn increment_indices(&mut self, start: usize, end: usize) { // Use a heuristic between a full sweep vs. a `find()` for every shifted item. let shifted_entries = &self.entries[start..end]; if shifted_entries.len() > self.indices.capacity() / 2 { // Shift all indices in range. for i in &mut self.indices { if start <= *i && *i < end { *i += 1; } } } else { // Find each entry in range to shift its index, updated in reverse so // we never have duplicated indices that might have a hash collision. for (i, entry) in (start..end).zip(shifted_entries).rev() { update_index(&mut self.indices, entry.hash, i, i + 1); } } } #[track_caller] pub(super) fn move_index(&mut self, from: usize, to: usize) { let from_hash = self.entries[from].hash; if from != to { let _ = self.entries[to]; // explicit bounds check // Find the bucket index first so we won't lose it among other updated indices. let bucket = self .indices .find_bucket_index(from_hash.get(), move |&i| i == from) .expect("index not found"); self.move_index_inner(from, to); *self.indices.get_bucket_mut(bucket).unwrap() = to; } } fn move_index_inner(&mut self, from: usize, to: usize) { // Update all other indices and rotate the entry positions. if from < to { self.decrement_indices(from + 1, to + 1); self.entries[from..=to].rotate_left(1); } else if to < from { self.increment_indices(to, from); self.entries[to..=from].rotate_right(1); } } #[track_caller] pub(crate) fn swap_indices(&mut self, a: usize, b: usize) { // If they're equal and in-bounds, there's nothing to do. if a == b && a < self.entries.len() { return; } // We'll get a "nice" bounds-check from indexing `entries`, // and then we expect to find it in the table as well. match self.indices.get_disjoint_mut( [self.entries[a].hash.get(), self.entries[b].hash.get()], move |i, &x| if i == 0 { x == a } else { x == b }, ) { [Some(ref_a), Some(ref_b)] => { mem::swap(ref_a, ref_b); self.entries.swap(a, b); } _ => panic!("indices not found"), } } } #[test] fn assert_send_sync() { fn assert_send_sync() {} assert_send_sync::>(); assert_send_sync::>(); assert_send_sync::>(); assert_send_sync::>(); } indexmap-2.12.1/src/map/iter.rs000064400000000000000000000514441046102023000144010ustar 00000000000000use super::{Bucket, ExtractCore, IndexMap, IndexMapCore, Slice}; use alloc::vec::{self, Vec}; use core::fmt; use core::hash::{BuildHasher, Hash}; use core::iter::FusedIterator; use core::ops::{Index, RangeBounds}; use core::slice; impl<'a, K, V, S> IntoIterator for &'a IndexMap { type Item = (&'a K, &'a V); type IntoIter = Iter<'a, K, V>; fn into_iter(self) -> Self::IntoIter { self.iter() } } impl<'a, K, V, S> IntoIterator for &'a mut IndexMap { type Item = (&'a K, &'a mut V); type IntoIter = IterMut<'a, K, V>; fn into_iter(self) -> Self::IntoIter { self.iter_mut() } } impl IntoIterator for IndexMap { type Item = (K, V); type IntoIter = IntoIter; fn into_iter(self) -> Self::IntoIter { IntoIter::new(self.into_entries()) } } /// An iterator over the entries of an [`IndexMap`]. /// /// This `struct` is created by the [`IndexMap::iter`] method. /// See its documentation for more. pub struct Iter<'a, K, V> { iter: slice::Iter<'a, Bucket>, } impl<'a, K, V> Iter<'a, K, V> { pub(super) fn new(entries: &'a [Bucket]) -> Self { Self { iter: entries.iter(), } } /// Returns a slice of the remaining entries in the iterator. pub fn as_slice(&self) -> &'a Slice { Slice::from_slice(self.iter.as_slice()) } } impl<'a, K, V> Iterator for Iter<'a, K, V> { type Item = (&'a K, &'a V); iterator_methods!(Bucket::refs); } impl DoubleEndedIterator for Iter<'_, K, V> { double_ended_iterator_methods!(Bucket::refs); } impl ExactSizeIterator for Iter<'_, K, V> { fn len(&self) -> usize { self.iter.len() } } impl FusedIterator for Iter<'_, K, V> {} // FIXME(#26925) Remove in favor of `#[derive(Clone)]` impl Clone for Iter<'_, K, V> { fn clone(&self) -> Self { Iter { iter: self.iter.clone(), } } } impl fmt::Debug for Iter<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } impl Default for Iter<'_, K, V> { fn default() -> Self { Self { iter: [].iter() } } } /// A mutable iterator over the entries of an [`IndexMap`]. /// /// This `struct` is created by the [`IndexMap::iter_mut`] method. /// See its documentation for more. pub struct IterMut<'a, K, V> { iter: slice::IterMut<'a, Bucket>, } impl<'a, K, V> IterMut<'a, K, V> { pub(super) fn new(entries: &'a mut [Bucket]) -> Self { Self { iter: entries.iter_mut(), } } /// Returns a slice of the remaining entries in the iterator. pub fn as_slice(&self) -> &Slice { Slice::from_slice(self.iter.as_slice()) } /// Returns a mutable slice of the remaining entries in the iterator. /// /// To avoid creating `&mut` references that alias, this is forced to consume the iterator. pub fn into_slice(self) -> &'a mut Slice { Slice::from_mut_slice(self.iter.into_slice()) } } impl<'a, K, V> Iterator for IterMut<'a, K, V> { type Item = (&'a K, &'a mut V); iterator_methods!(Bucket::ref_mut); } impl DoubleEndedIterator for IterMut<'_, K, V> { double_ended_iterator_methods!(Bucket::ref_mut); } impl ExactSizeIterator for IterMut<'_, K, V> { fn len(&self) -> usize { self.iter.len() } } impl FusedIterator for IterMut<'_, K, V> {} impl fmt::Debug for IterMut<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let iter = self.iter.as_slice().iter().map(Bucket::refs); f.debug_list().entries(iter).finish() } } impl Default for IterMut<'_, K, V> { fn default() -> Self { Self { iter: [].iter_mut(), } } } /// A mutable iterator over the entries of an [`IndexMap`]. /// /// This `struct` is created by the [`MutableKeys::iter_mut2`][super::MutableKeys::iter_mut2] method. /// See its documentation for more. pub struct IterMut2<'a, K, V> { iter: slice::IterMut<'a, Bucket>, } impl<'a, K, V> IterMut2<'a, K, V> { pub(super) fn new(entries: &'a mut [Bucket]) -> Self { Self { iter: entries.iter_mut(), } } /// Returns a slice of the remaining entries in the iterator. pub fn as_slice(&self) -> &Slice { Slice::from_slice(self.iter.as_slice()) } /// Returns a mutable slice of the remaining entries in the iterator. /// /// To avoid creating `&mut` references that alias, this is forced to consume the iterator. pub fn into_slice(self) -> &'a mut Slice { Slice::from_mut_slice(self.iter.into_slice()) } } impl<'a, K, V> Iterator for IterMut2<'a, K, V> { type Item = (&'a mut K, &'a mut V); iterator_methods!(Bucket::muts); } impl DoubleEndedIterator for IterMut2<'_, K, V> { double_ended_iterator_methods!(Bucket::muts); } impl ExactSizeIterator for IterMut2<'_, K, V> { fn len(&self) -> usize { self.iter.len() } } impl FusedIterator for IterMut2<'_, K, V> {} impl fmt::Debug for IterMut2<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let iter = self.iter.as_slice().iter().map(Bucket::refs); f.debug_list().entries(iter).finish() } } impl Default for IterMut2<'_, K, V> { fn default() -> Self { Self { iter: [].iter_mut(), } } } /// An owning iterator over the entries of an [`IndexMap`]. /// /// This `struct` is created by the [`IndexMap::into_iter`] method /// (provided by the [`IntoIterator`] trait). See its documentation for more. #[derive(Clone)] pub struct IntoIter { iter: vec::IntoIter>, } impl IntoIter { pub(super) fn new(entries: Vec>) -> Self { Self { iter: entries.into_iter(), } } /// Returns a slice of the remaining entries in the iterator. pub fn as_slice(&self) -> &Slice { Slice::from_slice(self.iter.as_slice()) } /// Returns a mutable slice of the remaining entries in the iterator. pub fn as_mut_slice(&mut self) -> &mut Slice { Slice::from_mut_slice(self.iter.as_mut_slice()) } } impl Iterator for IntoIter { type Item = (K, V); iterator_methods!(Bucket::key_value); } impl DoubleEndedIterator for IntoIter { double_ended_iterator_methods!(Bucket::key_value); } impl ExactSizeIterator for IntoIter { fn len(&self) -> usize { self.iter.len() } } impl FusedIterator for IntoIter {} impl fmt::Debug for IntoIter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let iter = self.iter.as_slice().iter().map(Bucket::refs); f.debug_list().entries(iter).finish() } } impl Default for IntoIter { fn default() -> Self { Self { iter: Vec::new().into_iter(), } } } /// A draining iterator over the entries of an [`IndexMap`]. /// /// This `struct` is created by the [`IndexMap::drain`] method. /// See its documentation for more. pub struct Drain<'a, K, V> { iter: vec::Drain<'a, Bucket>, } impl<'a, K, V> Drain<'a, K, V> { pub(super) fn new(iter: vec::Drain<'a, Bucket>) -> Self { Self { iter } } /// Returns a slice of the remaining entries in the iterator. pub fn as_slice(&self) -> &Slice { Slice::from_slice(self.iter.as_slice()) } } impl Iterator for Drain<'_, K, V> { type Item = (K, V); iterator_methods!(Bucket::key_value); } impl DoubleEndedIterator for Drain<'_, K, V> { double_ended_iterator_methods!(Bucket::key_value); } impl ExactSizeIterator for Drain<'_, K, V> { fn len(&self) -> usize { self.iter.len() } } impl FusedIterator for Drain<'_, K, V> {} impl fmt::Debug for Drain<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let iter = self.iter.as_slice().iter().map(Bucket::refs); f.debug_list().entries(iter).finish() } } /// An iterator over the keys of an [`IndexMap`]. /// /// This `struct` is created by the [`IndexMap::keys`] method. /// See its documentation for more. pub struct Keys<'a, K, V> { iter: slice::Iter<'a, Bucket>, } impl<'a, K, V> Keys<'a, K, V> { pub(super) fn new(entries: &'a [Bucket]) -> Self { Self { iter: entries.iter(), } } } impl<'a, K, V> Iterator for Keys<'a, K, V> { type Item = &'a K; iterator_methods!(Bucket::key_ref); } impl DoubleEndedIterator for Keys<'_, K, V> { double_ended_iterator_methods!(Bucket::key_ref); } impl ExactSizeIterator for Keys<'_, K, V> { fn len(&self) -> usize { self.iter.len() } } impl FusedIterator for Keys<'_, K, V> {} // FIXME(#26925) Remove in favor of `#[derive(Clone)]` impl Clone for Keys<'_, K, V> { fn clone(&self) -> Self { Keys { iter: self.iter.clone(), } } } impl fmt::Debug for Keys<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } impl Default for Keys<'_, K, V> { fn default() -> Self { Self { iter: [].iter() } } } /// Access [`IndexMap`] keys at indexed positions. /// /// While [`Index for IndexMap`][values] accesses a map's values, /// indexing through [`IndexMap::keys`] offers an alternative to access a map's /// keys instead. /// /// [values]: IndexMap#impl-Index-for-IndexMap /// /// Since `Keys` is also an iterator, consuming items from the iterator will /// offset the effective indices. Similarly, if `Keys` is obtained from /// [`Slice::keys`], indices will be interpreted relative to the position of /// that slice. /// /// # Examples /// /// ``` /// use indexmap::IndexMap; /// /// let mut map = IndexMap::new(); /// for word in "Lorem ipsum dolor sit amet".split_whitespace() { /// map.insert(word.to_lowercase(), word.to_uppercase()); /// } /// /// assert_eq!(map[0], "LOREM"); /// assert_eq!(map.keys()[0], "lorem"); /// assert_eq!(map[1], "IPSUM"); /// assert_eq!(map.keys()[1], "ipsum"); /// /// map.reverse(); /// assert_eq!(map.keys()[0], "amet"); /// assert_eq!(map.keys()[1], "sit"); /// /// map.sort_keys(); /// assert_eq!(map.keys()[0], "amet"); /// assert_eq!(map.keys()[1], "dolor"); /// /// // Advancing the iterator will offset the indexing /// let mut keys = map.keys(); /// assert_eq!(keys[0], "amet"); /// assert_eq!(keys.next().map(|s| &**s), Some("amet")); /// assert_eq!(keys[0], "dolor"); /// assert_eq!(keys[1], "ipsum"); /// /// // Slices may have an offset as well /// let slice = &map[2..]; /// assert_eq!(slice[0], "IPSUM"); /// assert_eq!(slice.keys()[0], "ipsum"); /// ``` /// /// ```should_panic /// use indexmap::IndexMap; /// /// let mut map = IndexMap::new(); /// map.insert("foo", 1); /// println!("{:?}", map.keys()[10]); // panics! /// ``` impl Index for Keys<'_, K, V> { type Output = K; /// Returns a reference to the key at the supplied `index`. /// /// ***Panics*** if `index` is out of bounds. fn index(&self, index: usize) -> &K { &self.iter.as_slice()[index].key } } /// An owning iterator over the keys of an [`IndexMap`]. /// /// This `struct` is created by the [`IndexMap::into_keys`] method. /// See its documentation for more. pub struct IntoKeys { iter: vec::IntoIter>, } impl IntoKeys { pub(super) fn new(entries: Vec>) -> Self { Self { iter: entries.into_iter(), } } } impl Iterator for IntoKeys { type Item = K; iterator_methods!(Bucket::key); } impl DoubleEndedIterator for IntoKeys { double_ended_iterator_methods!(Bucket::key); } impl ExactSizeIterator for IntoKeys { fn len(&self) -> usize { self.iter.len() } } impl FusedIterator for IntoKeys {} impl fmt::Debug for IntoKeys { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let iter = self.iter.as_slice().iter().map(Bucket::key_ref); f.debug_list().entries(iter).finish() } } impl Default for IntoKeys { fn default() -> Self { Self { iter: Vec::new().into_iter(), } } } /// An iterator over the values of an [`IndexMap`]. /// /// This `struct` is created by the [`IndexMap::values`] method. /// See its documentation for more. pub struct Values<'a, K, V> { iter: slice::Iter<'a, Bucket>, } impl<'a, K, V> Values<'a, K, V> { pub(super) fn new(entries: &'a [Bucket]) -> Self { Self { iter: entries.iter(), } } } impl<'a, K, V> Iterator for Values<'a, K, V> { type Item = &'a V; iterator_methods!(Bucket::value_ref); } impl DoubleEndedIterator for Values<'_, K, V> { double_ended_iterator_methods!(Bucket::value_ref); } impl ExactSizeIterator for Values<'_, K, V> { fn len(&self) -> usize { self.iter.len() } } impl FusedIterator for Values<'_, K, V> {} // FIXME(#26925) Remove in favor of `#[derive(Clone)]` impl Clone for Values<'_, K, V> { fn clone(&self) -> Self { Values { iter: self.iter.clone(), } } } impl fmt::Debug for Values<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } impl Default for Values<'_, K, V> { fn default() -> Self { Self { iter: [].iter() } } } /// A mutable iterator over the values of an [`IndexMap`]. /// /// This `struct` is created by the [`IndexMap::values_mut`] method. /// See its documentation for more. pub struct ValuesMut<'a, K, V> { iter: slice::IterMut<'a, Bucket>, } impl<'a, K, V> ValuesMut<'a, K, V> { pub(super) fn new(entries: &'a mut [Bucket]) -> Self { Self { iter: entries.iter_mut(), } } } impl<'a, K, V> Iterator for ValuesMut<'a, K, V> { type Item = &'a mut V; iterator_methods!(Bucket::value_mut); } impl DoubleEndedIterator for ValuesMut<'_, K, V> { double_ended_iterator_methods!(Bucket::value_mut); } impl ExactSizeIterator for ValuesMut<'_, K, V> { fn len(&self) -> usize { self.iter.len() } } impl FusedIterator for ValuesMut<'_, K, V> {} impl fmt::Debug for ValuesMut<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let iter = self.iter.as_slice().iter().map(Bucket::value_ref); f.debug_list().entries(iter).finish() } } impl Default for ValuesMut<'_, K, V> { fn default() -> Self { Self { iter: [].iter_mut(), } } } /// An owning iterator over the values of an [`IndexMap`]. /// /// This `struct` is created by the [`IndexMap::into_values`] method. /// See its documentation for more. pub struct IntoValues { iter: vec::IntoIter>, } impl IntoValues { pub(super) fn new(entries: Vec>) -> Self { Self { iter: entries.into_iter(), } } } impl Iterator for IntoValues { type Item = V; iterator_methods!(Bucket::value); } impl DoubleEndedIterator for IntoValues { double_ended_iterator_methods!(Bucket::value); } impl ExactSizeIterator for IntoValues { fn len(&self) -> usize { self.iter.len() } } impl FusedIterator for IntoValues {} impl fmt::Debug for IntoValues { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let iter = self.iter.as_slice().iter().map(Bucket::value_ref); f.debug_list().entries(iter).finish() } } impl Default for IntoValues { fn default() -> Self { Self { iter: Vec::new().into_iter(), } } } /// A splicing iterator for `IndexMap`. /// /// This `struct` is created by [`IndexMap::splice()`]. /// See its documentation for more. pub struct Splice<'a, I, K, V, S> where I: Iterator, K: Hash + Eq, S: BuildHasher, { map: &'a mut IndexMap, tail: IndexMapCore, drain: vec::IntoIter>, replace_with: I, } impl<'a, I, K, V, S> Splice<'a, I, K, V, S> where I: Iterator, K: Hash + Eq, S: BuildHasher, { #[track_caller] pub(super) fn new(map: &'a mut IndexMap, range: R, replace_with: I) -> Self where R: RangeBounds, { let (tail, drain) = map.core.split_splice(range); Self { map, tail, drain, replace_with, } } } impl Drop for Splice<'_, I, K, V, S> where I: Iterator, K: Hash + Eq, S: BuildHasher, { fn drop(&mut self) { // Finish draining unconsumed items. We don't strictly *have* to do this // manually, since we already split it into separate memory, but it will // match the drop order of `vec::Splice` items this way. let _ = self.drain.nth(usize::MAX); // Now insert all the new items. If a key matches an existing entry, it // keeps the original position and only replaces the value, like `insert`. while let Some((key, value)) = self.replace_with.next() { // Since the tail is disjoint, we can try to update it first, // or else insert (update or append) the primary map. let hash = self.map.hash(&key); if let Some(i) = self.tail.get_index_of(hash, &key) { self.tail.as_entries_mut()[i].value = value; } else { self.map.core.insert_full(hash, key, value); } } // Finally, re-append the tail self.map.core.append_unchecked(&mut self.tail); } } impl Iterator for Splice<'_, I, K, V, S> where I: Iterator, K: Hash + Eq, S: BuildHasher, { type Item = (K, V); fn next(&mut self) -> Option { self.drain.next().map(Bucket::key_value) } fn size_hint(&self) -> (usize, Option) { self.drain.size_hint() } } impl DoubleEndedIterator for Splice<'_, I, K, V, S> where I: Iterator, K: Hash + Eq, S: BuildHasher, { fn next_back(&mut self) -> Option { self.drain.next_back().map(Bucket::key_value) } } impl ExactSizeIterator for Splice<'_, I, K, V, S> where I: Iterator, K: Hash + Eq, S: BuildHasher, { fn len(&self) -> usize { self.drain.len() } } impl FusedIterator for Splice<'_, I, K, V, S> where I: Iterator, K: Hash + Eq, S: BuildHasher, { } impl fmt::Debug for Splice<'_, I, K, V, S> where I: fmt::Debug + Iterator, K: fmt::Debug + Hash + Eq, V: fmt::Debug, S: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // Follow `vec::Splice` in only printing the drain and replacement f.debug_struct("Splice") .field("drain", &self.drain) .field("replace_with", &self.replace_with) .finish() } } /// An extracting iterator for `IndexMap`. /// /// This `struct` is created by [`IndexMap::extract_if()`]. /// See its documentation for more. pub struct ExtractIf<'a, K, V, F> { inner: ExtractCore<'a, K, V>, pred: F, } impl ExtractIf<'_, K, V, F> { #[track_caller] pub(super) fn new(core: &mut IndexMapCore, range: R, pred: F) -> ExtractIf<'_, K, V, F> where R: RangeBounds, F: FnMut(&K, &mut V) -> bool, { ExtractIf { inner: core.extract(range), pred, } } } impl Iterator for ExtractIf<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool, { type Item = (K, V); fn next(&mut self) -> Option { self.inner .extract_if(|bucket| { let (key, value) = bucket.ref_mut(); (self.pred)(key, value) }) .map(Bucket::key_value) } fn size_hint(&self) -> (usize, Option) { (0, Some(self.inner.remaining())) } } impl FusedIterator for ExtractIf<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool {} impl fmt::Debug for ExtractIf<'_, K, V, F> where K: fmt::Debug, V: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ExtractIf").finish_non_exhaustive() } } indexmap-2.12.1/src/map/mutable.rs000064400000000000000000000121121046102023000150540ustar 00000000000000use core::hash::{BuildHasher, Hash}; use super::{ Bucket, Entry, Equivalent, IndexMap, IndexedEntry, IterMut2, OccupiedEntry, VacantEntry, }; /// Opt-in mutable access to [`IndexMap`] keys. /// /// These methods expose `&mut K`, mutable references to the key as it is stored /// in the map. /// You are allowed to modify the keys in the map **if the modification /// does not change the key's hash and equality**. /// /// If keys are modified erroneously, you can no longer look them up. /// This is sound (memory safe) but a logical error hazard (just like /// implementing `PartialEq`, `Eq`, or `Hash` incorrectly would be). /// /// `use` this trait to enable its methods for `IndexMap`. /// /// This trait is sealed and cannot be implemented for types outside this crate. #[expect(private_bounds)] pub trait MutableKeys: Sealed { type Key; type Value; /// Return item index, mutable reference to key and value /// /// Computes in **O(1)** time (average). fn get_full_mut2(&mut self, key: &Q) -> Option<(usize, &mut Self::Key, &mut Self::Value)> where Q: ?Sized + Hash + Equivalent; /// Return mutable reference to key and value at an index. /// /// Valid indices are `0 <= index < self.len()`. /// /// Computes in **O(1)** time. fn get_index_mut2(&mut self, index: usize) -> Option<(&mut Self::Key, &mut Self::Value)>; /// Return an iterator over the key-value pairs of the map, in their order fn iter_mut2(&mut self) -> IterMut2<'_, Self::Key, Self::Value>; /// Scan through each key-value pair in the map and keep those where the /// closure `keep` returns `true`. /// /// The elements are visited in order, and remaining elements keep their /// order. /// /// Computes in **O(n)** time (average). fn retain2(&mut self, keep: F) where F: FnMut(&mut Self::Key, &mut Self::Value) -> bool; } /// Opt-in mutable access to [`IndexMap`] keys. /// /// See [`MutableKeys`] for more information. impl MutableKeys for IndexMap where S: BuildHasher, { type Key = K; type Value = V; fn get_full_mut2(&mut self, key: &Q) -> Option<(usize, &mut K, &mut V)> where Q: ?Sized + Hash + Equivalent, { if let Some(i) = self.get_index_of(key) { let entry = &mut self.as_entries_mut()[i]; Some((i, &mut entry.key, &mut entry.value)) } else { None } } fn get_index_mut2(&mut self, index: usize) -> Option<(&mut K, &mut V)> { self.as_entries_mut().get_mut(index).map(Bucket::muts) } fn iter_mut2(&mut self) -> IterMut2<'_, Self::Key, Self::Value> { IterMut2::new(self.as_entries_mut()) } fn retain2(&mut self, keep: F) where F: FnMut(&mut K, &mut V) -> bool, { self.core.retain_in_order(keep); } } /// Opt-in mutable access to [`Entry`] keys. /// /// These methods expose `&mut K`, mutable references to the key as it is stored /// in the map. /// You are allowed to modify the keys in the map **if the modification /// does not change the key's hash and equality**. /// /// If keys are modified erroneously, you can no longer look them up. /// This is sound (memory safe) but a logical error hazard (just like /// implementing `PartialEq`, `Eq`, or `Hash` incorrectly would be). /// /// `use` this trait to enable its methods for `Entry`. /// /// This trait is sealed and cannot be implemented for types outside this crate. #[expect(private_bounds)] pub trait MutableEntryKey: Sealed { type Key; /// Gets a mutable reference to the entry's key, either within the map if occupied, /// or else the new key that was used to find the entry. fn key_mut(&mut self) -> &mut Self::Key; } /// Opt-in mutable access to [`Entry`] keys. /// /// See [`MutableEntryKey`] for more information. impl MutableEntryKey for Entry<'_, K, V> { type Key = K; fn key_mut(&mut self) -> &mut Self::Key { match self { Entry::Occupied(e) => e.key_mut(), Entry::Vacant(e) => e.key_mut(), } } } /// Opt-in mutable access to [`OccupiedEntry`] keys. /// /// See [`MutableEntryKey`] for more information. impl MutableEntryKey for OccupiedEntry<'_, K, V> { type Key = K; fn key_mut(&mut self) -> &mut Self::Key { &mut self.get_bucket_mut().key } } /// Opt-in mutable access to [`VacantEntry`] keys. /// /// See [`MutableEntryKey`] for more information. impl MutableEntryKey for VacantEntry<'_, K, V> { type Key = K; fn key_mut(&mut self) -> &mut Self::Key { self.key_mut() } } /// Opt-in mutable access to [`IndexedEntry`] keys. /// /// See [`MutableEntryKey`] for more information. impl MutableEntryKey for IndexedEntry<'_, K, V> { type Key = K; fn key_mut(&mut self) -> &mut Self::Key { self.key_mut() } } trait Sealed {} impl Sealed for IndexMap {} impl Sealed for Entry<'_, K, V> {} impl Sealed for OccupiedEntry<'_, K, V> {} impl Sealed for VacantEntry<'_, K, V> {} impl Sealed for IndexedEntry<'_, K, V> {} indexmap-2.12.1/src/map/serde_seq.rs000064400000000000000000000074701046102023000154100ustar 00000000000000//! Functions to serialize and deserialize an [`IndexMap`] as an ordered sequence. //! //! The default `serde` implementation serializes `IndexMap` as a normal map, //! but there is no guarantee that serialization formats will preserve the order //! of the key-value pairs. This module serializes `IndexMap` as a sequence of //! `(key, value)` elements instead, in order. //! //! This module may be used in a field attribute for derived implementations: //! //! ``` //! # use indexmap::IndexMap; //! # use serde::{Deserialize, Serialize}; //! #[derive(Deserialize, Serialize)] //! struct Data { //! #[serde(with = "indexmap::map::serde_seq")] //! map: IndexMap, //! // ... //! } //! ``` use serde_core::de::{Deserialize, Deserializer, SeqAccess, Visitor}; use serde_core::ser::{Serialize, Serializer}; use core::fmt::{self, Formatter}; use core::hash::{BuildHasher, Hash}; use core::marker::PhantomData; use crate::map::Slice as MapSlice; use crate::serde::cautious_capacity; use crate::set::Slice as SetSlice; use crate::IndexMap; /// Serializes a [`map::Slice`][MapSlice] as an ordered sequence. /// /// This behaves like [`crate::map::serde_seq`] for `IndexMap`, serializing a sequence /// of `(key, value)` pairs, rather than as a map that might not preserve order. impl Serialize for MapSlice where K: Serialize, V: Serialize, { fn serialize(&self, serializer: T) -> Result where T: Serializer, { serializer.collect_seq(self) } } /// Serializes a [`set::Slice`][SetSlice] as an ordered sequence. impl Serialize for SetSlice where T: Serialize, { fn serialize(&self, serializer: Se) -> Result where Se: Serializer, { serializer.collect_seq(self) } } /// Serializes an [`IndexMap`] as an ordered sequence. /// /// This function may be used in a field attribute for deriving [`Serialize`]: /// /// ``` /// # use indexmap::IndexMap; /// # use serde::Serialize; /// #[derive(Serialize)] /// struct Data { /// #[serde(serialize_with = "indexmap::map::serde_seq::serialize")] /// map: IndexMap, /// // ... /// } /// ``` pub fn serialize(map: &IndexMap, serializer: T) -> Result where K: Serialize, V: Serialize, T: Serializer, { serializer.collect_seq(map) } /// Visitor to deserialize a *sequenced* `IndexMap` struct SeqVisitor(PhantomData<(K, V, S)>); impl<'de, K, V, S> Visitor<'de> for SeqVisitor where K: Deserialize<'de> + Eq + Hash, V: Deserialize<'de>, S: Default + BuildHasher, { type Value = IndexMap; fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { write!(formatter, "a sequenced map") } fn visit_seq(self, mut seq: A) -> Result where A: SeqAccess<'de>, { let capacity = cautious_capacity::(seq.size_hint()); let mut map = IndexMap::with_capacity_and_hasher(capacity, S::default()); while let Some((key, value)) = seq.next_element()? { map.insert(key, value); } Ok(map) } } /// Deserializes an [`IndexMap`] from an ordered sequence. /// /// This function may be used in a field attribute for deriving [`Deserialize`]: /// /// ``` /// # use indexmap::IndexMap; /// # use serde::Deserialize; /// #[derive(Deserialize)] /// struct Data { /// #[serde(deserialize_with = "indexmap::map::serde_seq::deserialize")] /// map: IndexMap, /// // ... /// } /// ``` pub fn deserialize<'de, D, K, V, S>(deserializer: D) -> Result, D::Error> where D: Deserializer<'de>, K: Deserialize<'de> + Eq + Hash, V: Deserialize<'de>, S: Default + BuildHasher, { deserializer.deserialize_seq(SeqVisitor(PhantomData)) } indexmap-2.12.1/src/map/slice.rs000064400000000000000000000571771046102023000145460ustar 00000000000000use super::{ Bucket, IndexMap, IntoIter, IntoKeys, IntoValues, Iter, IterMut, Keys, Values, ValuesMut, }; use crate::util::{slice_eq, try_simplify_range}; use crate::GetDisjointMutError; use alloc::boxed::Box; use alloc::vec::Vec; use core::cmp::Ordering; use core::fmt; use core::hash::{Hash, Hasher}; use core::ops::{self, Bound, Index, IndexMut, RangeBounds}; /// A dynamically-sized slice of key-value pairs in an [`IndexMap`]. /// /// This supports indexed operations much like a `[(K, V)]` slice, /// but not any hashed operations on the map keys. /// /// Unlike `IndexMap`, `Slice` does consider the order for [`PartialEq`] /// and [`Eq`], and it also implements [`PartialOrd`], [`Ord`], and [`Hash`]. #[repr(transparent)] pub struct Slice { pub(crate) entries: [Bucket], } // SAFETY: `Slice` is a transparent wrapper around `[Bucket]`, // and reference lifetimes are bound together in function signatures. #[allow(unsafe_code)] impl Slice { pub(super) const fn from_slice(entries: &[Bucket]) -> &Self { unsafe { &*(entries as *const [Bucket] as *const Self) } } pub(super) fn from_mut_slice(entries: &mut [Bucket]) -> &mut Self { unsafe { &mut *(entries as *mut [Bucket] as *mut Self) } } pub(super) fn from_boxed(entries: Box<[Bucket]>) -> Box { unsafe { Box::from_raw(Box::into_raw(entries) as *mut Self) } } fn into_boxed(self: Box) -> Box<[Bucket]> { unsafe { Box::from_raw(Box::into_raw(self) as *mut [Bucket]) } } } impl Slice { pub(crate) fn into_entries(self: Box) -> Vec> { self.into_boxed().into_vec() } /// Returns an empty slice. pub const fn new<'a>() -> &'a Self { Self::from_slice(&[]) } /// Returns an empty mutable slice. pub fn new_mut<'a>() -> &'a mut Self { Self::from_mut_slice(&mut []) } /// Return the number of key-value pairs in the map slice. #[inline] pub const fn len(&self) -> usize { self.entries.len() } /// Returns true if the map slice contains no elements. #[inline] pub const fn is_empty(&self) -> bool { self.entries.is_empty() } /// Get a key-value pair by index. /// /// Valid indices are `0 <= index < self.len()`. pub fn get_index(&self, index: usize) -> Option<(&K, &V)> { self.entries.get(index).map(Bucket::refs) } /// Get a key-value pair by index, with mutable access to the value. /// /// Valid indices are `0 <= index < self.len()`. pub fn get_index_mut(&mut self, index: usize) -> Option<(&K, &mut V)> { self.entries.get_mut(index).map(Bucket::ref_mut) } /// Returns a slice of key-value pairs in the given range of indices. /// /// Valid indices are `0 <= index < self.len()`. pub fn get_range>(&self, range: R) -> Option<&Self> { let range = try_simplify_range(range, self.entries.len())?; self.entries.get(range).map(Slice::from_slice) } /// Returns a mutable slice of key-value pairs in the given range of indices. /// /// Valid indices are `0 <= index < self.len()`. pub fn get_range_mut>(&mut self, range: R) -> Option<&mut Self> { let range = try_simplify_range(range, self.entries.len())?; self.entries.get_mut(range).map(Slice::from_mut_slice) } /// Get the first key-value pair. pub fn first(&self) -> Option<(&K, &V)> { self.entries.first().map(Bucket::refs) } /// Get the first key-value pair, with mutable access to the value. pub fn first_mut(&mut self) -> Option<(&K, &mut V)> { self.entries.first_mut().map(Bucket::ref_mut) } /// Get the last key-value pair. pub fn last(&self) -> Option<(&K, &V)> { self.entries.last().map(Bucket::refs) } /// Get the last key-value pair, with mutable access to the value. pub fn last_mut(&mut self) -> Option<(&K, &mut V)> { self.entries.last_mut().map(Bucket::ref_mut) } /// Divides one slice into two at an index. /// /// ***Panics*** if `index > len`. #[track_caller] pub fn split_at(&self, index: usize) -> (&Self, &Self) { let (first, second) = self.entries.split_at(index); (Self::from_slice(first), Self::from_slice(second)) } /// Divides one mutable slice into two at an index. /// /// ***Panics*** if `index > len`. #[track_caller] pub fn split_at_mut(&mut self, index: usize) -> (&mut Self, &mut Self) { let (first, second) = self.entries.split_at_mut(index); (Self::from_mut_slice(first), Self::from_mut_slice(second)) } /// Returns the first key-value pair and the rest of the slice, /// or `None` if it is empty. pub fn split_first(&self) -> Option<((&K, &V), &Self)> { if let [first, rest @ ..] = &self.entries { Some((first.refs(), Self::from_slice(rest))) } else { None } } /// Returns the first key-value pair and the rest of the slice, /// with mutable access to the value, or `None` if it is empty. pub fn split_first_mut(&mut self) -> Option<((&K, &mut V), &mut Self)> { if let [first, rest @ ..] = &mut self.entries { Some((first.ref_mut(), Self::from_mut_slice(rest))) } else { None } } /// Returns the last key-value pair and the rest of the slice, /// or `None` if it is empty. pub fn split_last(&self) -> Option<((&K, &V), &Self)> { if let [rest @ .., last] = &self.entries { Some((last.refs(), Self::from_slice(rest))) } else { None } } /// Returns the last key-value pair and the rest of the slice, /// with mutable access to the value, or `None` if it is empty. pub fn split_last_mut(&mut self) -> Option<((&K, &mut V), &mut Self)> { if let [rest @ .., last] = &mut self.entries { Some((last.ref_mut(), Self::from_mut_slice(rest))) } else { None } } /// Return an iterator over the key-value pairs of the map slice. pub fn iter(&self) -> Iter<'_, K, V> { Iter::new(&self.entries) } /// Return an iterator over the key-value pairs of the map slice. pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { IterMut::new(&mut self.entries) } /// Return an iterator over the keys of the map slice. pub fn keys(&self) -> Keys<'_, K, V> { Keys::new(&self.entries) } /// Return an owning iterator over the keys of the map slice. pub fn into_keys(self: Box) -> IntoKeys { IntoKeys::new(self.into_entries()) } /// Return an iterator over the values of the map slice. pub fn values(&self) -> Values<'_, K, V> { Values::new(&self.entries) } /// Return an iterator over mutable references to the the values of the map slice. pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { ValuesMut::new(&mut self.entries) } /// Return an owning iterator over the values of the map slice. pub fn into_values(self: Box) -> IntoValues { IntoValues::new(self.into_entries()) } /// Search over a sorted map for a key. /// /// Returns the position where that key is present, or the position where it can be inserted to /// maintain the sort. See [`slice::binary_search`] for more details. /// /// Computes in **O(log(n))** time, which is notably less scalable than looking the key up in /// the map this is a slice from using [`IndexMap::get_index_of`], but this can also position /// missing keys. pub fn binary_search_keys(&self, x: &K) -> Result where K: Ord, { self.binary_search_by(|p, _| p.cmp(x)) } /// Search over a sorted map with a comparator function. /// /// Returns the position where that value is present, or the position where it can be inserted /// to maintain the sort. See [`slice::binary_search_by`] for more details. /// /// Computes in **O(log(n))** time. #[inline] pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result where F: FnMut(&'a K, &'a V) -> Ordering, { self.entries.binary_search_by(move |a| f(&a.key, &a.value)) } /// Search over a sorted map with an extraction function. /// /// Returns the position where that value is present, or the position where it can be inserted /// to maintain the sort. See [`slice::binary_search_by_key`] for more details. /// /// Computes in **O(log(n))** time. #[inline] pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result where F: FnMut(&'a K, &'a V) -> B, B: Ord, { self.binary_search_by(|k, v| f(k, v).cmp(b)) } /// Checks if the keys of this slice are sorted. #[inline] pub fn is_sorted(&self) -> bool where K: PartialOrd, { self.entries.is_sorted_by(|a, b| a.key <= b.key) } /// Checks if this slice is sorted using the given comparator function. #[inline] pub fn is_sorted_by<'a, F>(&'a self, mut cmp: F) -> bool where F: FnMut(&'a K, &'a V, &'a K, &'a V) -> bool, { self.entries .is_sorted_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)) } /// Checks if this slice is sorted using the given sort-key function. #[inline] pub fn is_sorted_by_key<'a, F, T>(&'a self, mut sort_key: F) -> bool where F: FnMut(&'a K, &'a V) -> T, T: PartialOrd, { self.entries .is_sorted_by_key(move |a| sort_key(&a.key, &a.value)) } /// Returns the index of the partition point of a sorted map according to the given predicate /// (the index of the first element of the second partition). /// /// See [`slice::partition_point`] for more details. /// /// Computes in **O(log(n))** time. #[must_use] pub fn partition_point

(&self, mut pred: P) -> usize where P: FnMut(&K, &V) -> bool, { self.entries .partition_point(move |a| pred(&a.key, &a.value)) } /// Get an array of `N` key-value pairs by `N` indices /// /// Valid indices are *0 <= index < self.len()* and each index needs to be unique. pub fn get_disjoint_mut( &mut self, indices: [usize; N], ) -> Result<[(&K, &mut V); N], GetDisjointMutError> { let indices = indices.map(Some); let key_values = self.get_disjoint_opt_mut(indices)?; Ok(key_values.map(Option::unwrap)) } #[allow(unsafe_code)] pub(crate) fn get_disjoint_opt_mut( &mut self, indices: [Option; N], ) -> Result<[Option<(&K, &mut V)>; N], GetDisjointMutError> { // SAFETY: Can't allow duplicate indices as we would return several mutable refs to the same data. let len = self.len(); for i in 0..N { if let Some(idx) = indices[i] { if idx >= len { return Err(GetDisjointMutError::IndexOutOfBounds); } else if indices[..i].contains(&Some(idx)) { return Err(GetDisjointMutError::OverlappingIndices); } } } let entries_ptr = self.entries.as_mut_ptr(); let out = indices.map(|idx_opt| { match idx_opt { Some(idx) => { // SAFETY: The base pointer is valid as it comes from a slice and the reference is always // in-bounds & unique as we've already checked the indices above. let kv = unsafe { (*(entries_ptr.add(idx))).ref_mut() }; Some(kv) } None => None, } }); Ok(out) } } impl<'a, K, V> IntoIterator for &'a Slice { type IntoIter = Iter<'a, K, V>; type Item = (&'a K, &'a V); fn into_iter(self) -> Self::IntoIter { self.iter() } } impl<'a, K, V> IntoIterator for &'a mut Slice { type IntoIter = IterMut<'a, K, V>; type Item = (&'a K, &'a mut V); fn into_iter(self) -> Self::IntoIter { self.iter_mut() } } impl IntoIterator for Box> { type IntoIter = IntoIter; type Item = (K, V); fn into_iter(self) -> Self::IntoIter { IntoIter::new(self.into_entries()) } } impl Default for &'_ Slice { fn default() -> Self { Slice::from_slice(&[]) } } impl Default for &'_ mut Slice { fn default() -> Self { Slice::from_mut_slice(&mut []) } } impl Default for Box> { fn default() -> Self { Slice::from_boxed(Box::default()) } } impl Clone for Box> { fn clone(&self) -> Self { Slice::from_boxed(self.entries.to_vec().into_boxed_slice()) } } impl From<&Slice> for Box> { fn from(slice: &Slice) -> Self { Slice::from_boxed(Box::from(&slice.entries)) } } impl fmt::Debug for Slice { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self).finish() } } impl PartialEq> for Slice where K: PartialEq, V: PartialEq, { fn eq(&self, other: &Slice) -> bool { slice_eq(&self.entries, &other.entries, |b1, b2| { b1.key == b2.key && b1.value == b2.value }) } } impl PartialEq<[(K2, V2)]> for Slice where K: PartialEq, V: PartialEq, { fn eq(&self, other: &[(K2, V2)]) -> bool { slice_eq(&self.entries, other, |b, t| b.key == t.0 && b.value == t.1) } } impl PartialEq> for [(K, V)] where K: PartialEq, V: PartialEq, { fn eq(&self, other: &Slice) -> bool { slice_eq(self, &other.entries, |t, b| t.0 == b.key && t.1 == b.value) } } impl PartialEq<[(K2, V2); N]> for Slice where K: PartialEq, V: PartialEq, { fn eq(&self, other: &[(K2, V2); N]) -> bool { >::eq(self, other) } } impl PartialEq> for [(K, V); N] where K: PartialEq, V: PartialEq, { fn eq(&self, other: &Slice) -> bool { <[_] as PartialEq<_>>::eq(self, other) } } impl Eq for Slice {} impl PartialOrd for Slice { fn partial_cmp(&self, other: &Self) -> Option { self.iter().partial_cmp(other) } } impl Ord for Slice { fn cmp(&self, other: &Self) -> Ordering { self.iter().cmp(other) } } impl Hash for Slice { fn hash(&self, state: &mut H) { self.len().hash(state); for (key, value) in self { key.hash(state); value.hash(state); } } } impl Index for Slice { type Output = V; fn index(&self, index: usize) -> &V { &self.entries[index].value } } impl IndexMut for Slice { fn index_mut(&mut self, index: usize) -> &mut V { &mut self.entries[index].value } } // We can't have `impl> Index` because that conflicts // both upstream with `Index` and downstream with `Index<&Q>`. // Instead, we repeat the implementations for all the core range types. macro_rules! impl_index { ($($range:ty),*) => {$( impl Index<$range> for IndexMap { type Output = Slice; fn index(&self, range: $range) -> &Self::Output { Slice::from_slice(&self.as_entries()[range]) } } impl IndexMut<$range> for IndexMap { fn index_mut(&mut self, range: $range) -> &mut Self::Output { Slice::from_mut_slice(&mut self.as_entries_mut()[range]) } } impl Index<$range> for Slice { type Output = Slice; fn index(&self, range: $range) -> &Self { Self::from_slice(&self.entries[range]) } } impl IndexMut<$range> for Slice { fn index_mut(&mut self, range: $range) -> &mut Self { Self::from_mut_slice(&mut self.entries[range]) } } )*} } impl_index!( ops::Range, ops::RangeFrom, ops::RangeFull, ops::RangeInclusive, ops::RangeTo, ops::RangeToInclusive, (Bound, Bound) ); #[cfg(test)] mod tests { use super::*; #[test] fn slice_index() { fn check( vec_slice: &[(i32, i32)], map_slice: &Slice, sub_slice: &Slice, ) { assert_eq!(map_slice as *const _, sub_slice as *const _); itertools::assert_equal( vec_slice.iter().copied(), map_slice.iter().map(|(&k, &v)| (k, v)), ); itertools::assert_equal(vec_slice.iter().map(|(k, _)| k), map_slice.keys()); itertools::assert_equal(vec_slice.iter().map(|(_, v)| v), map_slice.values()); } let vec: Vec<(i32, i32)> = (0..10).map(|i| (i, i * i)).collect(); let map: IndexMap = vec.iter().cloned().collect(); let slice = map.as_slice(); // RangeFull check(&vec[..], &map[..], &slice[..]); for i in 0usize..10 { // Index assert_eq!(vec[i].1, map[i]); assert_eq!(vec[i].1, slice[i]); assert_eq!(map[&(i as i32)], map[i]); assert_eq!(map[&(i as i32)], slice[i]); // RangeFrom check(&vec[i..], &map[i..], &slice[i..]); // RangeTo check(&vec[..i], &map[..i], &slice[..i]); // RangeToInclusive check(&vec[..=i], &map[..=i], &slice[..=i]); // (Bound, Bound) let bounds = (Bound::Excluded(i), Bound::Unbounded); check(&vec[i + 1..], &map[bounds], &slice[bounds]); for j in i..=10 { // Range check(&vec[i..j], &map[i..j], &slice[i..j]); } for j in i..10 { // RangeInclusive check(&vec[i..=j], &map[i..=j], &slice[i..=j]); } } } #[test] fn slice_index_mut() { fn check_mut( vec_slice: &[(i32, i32)], map_slice: &mut Slice, sub_slice: &mut Slice, ) { assert_eq!(map_slice, sub_slice); itertools::assert_equal( vec_slice.iter().copied(), map_slice.iter_mut().map(|(&k, &mut v)| (k, v)), ); itertools::assert_equal( vec_slice.iter().map(|&(_, v)| v), map_slice.values_mut().map(|&mut v| v), ); } let vec: Vec<(i32, i32)> = (0..10).map(|i| (i, i * i)).collect(); let mut map: IndexMap = vec.iter().cloned().collect(); let mut map2 = map.clone(); let slice = map2.as_mut_slice(); // RangeFull check_mut(&vec[..], &mut map[..], &mut slice[..]); for i in 0usize..10 { // IndexMut assert_eq!(&mut map[i], &mut slice[i]); // RangeFrom check_mut(&vec[i..], &mut map[i..], &mut slice[i..]); // RangeTo check_mut(&vec[..i], &mut map[..i], &mut slice[..i]); // RangeToInclusive check_mut(&vec[..=i], &mut map[..=i], &mut slice[..=i]); // (Bound, Bound) let bounds = (Bound::Excluded(i), Bound::Unbounded); check_mut(&vec[i + 1..], &mut map[bounds], &mut slice[bounds]); for j in i..=10 { // Range check_mut(&vec[i..j], &mut map[i..j], &mut slice[i..j]); } for j in i..10 { // RangeInclusive check_mut(&vec[i..=j], &mut map[i..=j], &mut slice[i..=j]); } } } #[test] fn slice_new() { let slice: &Slice = Slice::new(); assert!(slice.is_empty()); assert_eq!(slice.len(), 0); } #[test] fn slice_new_mut() { let slice: &mut Slice = Slice::new_mut(); assert!(slice.is_empty()); assert_eq!(slice.len(), 0); } #[test] fn slice_get_index_mut() { let mut map: IndexMap = (0..10).map(|i| (i, i * i)).collect(); let slice: &mut Slice = map.as_mut_slice(); { let (key, value) = slice.get_index_mut(0).unwrap(); assert_eq!(*key, 0); assert_eq!(*value, 0); *value = 11; } assert_eq!(slice[0], 11); { let result = slice.get_index_mut(11); assert!(result.is_none()); } } #[test] fn slice_split_first() { let slice: &mut Slice = Slice::new_mut(); let result = slice.split_first(); assert!(result.is_none()); let mut map: IndexMap = (0..10).map(|i| (i, i * i)).collect(); let slice: &mut Slice = map.as_mut_slice(); { let (first, rest) = slice.split_first().unwrap(); assert_eq!(first, (&0, &0)); assert_eq!(rest.len(), 9); } assert_eq!(slice.len(), 10); } #[test] fn slice_split_first_mut() { let slice: &mut Slice = Slice::new_mut(); let result = slice.split_first_mut(); assert!(result.is_none()); let mut map: IndexMap = (0..10).map(|i| (i, i * i)).collect(); let slice: &mut Slice = map.as_mut_slice(); { let (first, rest) = slice.split_first_mut().unwrap(); assert_eq!(first, (&0, &mut 0)); assert_eq!(rest.len(), 9); *first.1 = 11; } assert_eq!(slice.len(), 10); assert_eq!(slice[0], 11); } #[test] fn slice_split_last() { let slice: &mut Slice = Slice::new_mut(); let result = slice.split_last(); assert!(result.is_none()); let mut map: IndexMap = (0..10).map(|i| (i, i * i)).collect(); let slice: &mut Slice = map.as_mut_slice(); { let (last, rest) = slice.split_last().unwrap(); assert_eq!(last, (&9, &81)); assert_eq!(rest.len(), 9); } assert_eq!(slice.len(), 10); } #[test] fn slice_split_last_mut() { let slice: &mut Slice = Slice::new_mut(); let result = slice.split_last_mut(); assert!(result.is_none()); let mut map: IndexMap = (0..10).map(|i| (i, i * i)).collect(); let slice: &mut Slice = map.as_mut_slice(); { let (last, rest) = slice.split_last_mut().unwrap(); assert_eq!(last, (&9, &mut 81)); assert_eq!(rest.len(), 9); *last.1 = 100; } assert_eq!(slice.len(), 10); assert_eq!(slice[slice.len() - 1], 100); } #[test] fn slice_get_range() { let mut map: IndexMap = (0..10).map(|i| (i, i * i)).collect(); let slice: &mut Slice = map.as_mut_slice(); let subslice = slice.get_range(3..6).unwrap(); assert_eq!(subslice.len(), 3); assert_eq!(subslice, &[(3, 9), (4, 16), (5, 25)]); } } indexmap-2.12.1/src/map/tests.rs000064400000000000000000001111511046102023000145700ustar 00000000000000use super::*; use std::string::String; #[test] fn it_works() { let mut map = IndexMap::new(); assert_eq!(map.is_empty(), true); map.insert(1, ()); map.insert(1, ()); assert_eq!(map.len(), 1); assert!(map.get(&1).is_some()); assert_eq!(map.is_empty(), false); } #[test] fn new() { let map = IndexMap::::new(); println!("{:?}", map); assert_eq!(map.capacity(), 0); assert_eq!(map.len(), 0); assert_eq!(map.is_empty(), true); } #[test] fn insert() { let insert = [0, 4, 2, 12, 8, 7, 11, 5]; let not_present = [1, 3, 6, 9, 10]; let mut map = IndexMap::with_capacity(insert.len()); for (i, &elt) in insert.iter().enumerate() { assert_eq!(map.len(), i); map.insert(elt, elt); assert_eq!(map.len(), i + 1); assert_eq!(map.get(&elt), Some(&elt)); assert_eq!(map[&elt], elt); } println!("{:?}", map); for &elt in ¬_present { assert!(map.get(&elt).is_none()); } } #[test] fn insert_full() { let insert = vec![9, 2, 7, 1, 4, 6, 13]; let present = vec![1, 6, 2]; let mut map = IndexMap::with_capacity(insert.len()); for (i, &elt) in insert.iter().enumerate() { assert_eq!(map.len(), i); let (index, existing) = map.insert_full(elt, elt); assert_eq!(existing, None); assert_eq!(Some(index), map.get_full(&elt).map(|x| x.0)); assert_eq!(map.len(), i + 1); } let len = map.len(); for &elt in &present { let (index, existing) = map.insert_full(elt, elt); assert_eq!(existing, Some(elt)); assert_eq!(Some(index), map.get_full(&elt).map(|x| x.0)); assert_eq!(map.len(), len); } } #[test] fn insert_2() { let mut map = IndexMap::with_capacity(16); let mut keys = vec![]; keys.extend(0..16); keys.extend(if cfg!(miri) { 32..64 } else { 128..267 }); for &i in &keys { let old_map = map.clone(); map.insert(i, ()); for key in old_map.keys() { if map.get(key).is_none() { println!("old_map: {:?}", old_map); println!("map: {:?}", map); panic!("did not find {} in map", key); } } } for &i in &keys { assert!(map.get(&i).is_some(), "did not find {}", i); } } #[test] fn insert_order() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut map = IndexMap::new(); for &elt in &insert { map.insert(elt, ()); } assert_eq!(map.keys().count(), map.len()); assert_eq!(map.keys().count(), insert.len()); for (a, b) in insert.iter().zip(map.keys()) { assert_eq!(a, b); } for (i, k) in (0..insert.len()).zip(map.keys()) { assert_eq!(map.get_index(i).unwrap().0, k); } } #[test] fn shift_insert() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut map = IndexMap::new(); for &elt in &insert { map.shift_insert(0, elt, ()); } assert_eq!(map.keys().count(), map.len()); assert_eq!(map.keys().count(), insert.len()); for (a, b) in insert.iter().rev().zip(map.keys()) { assert_eq!(a, b); } for (i, k) in (0..insert.len()).zip(map.keys()) { assert_eq!(map.get_index(i).unwrap().0, k); } // "insert" that moves an existing entry map.shift_insert(0, insert[0], ()); assert_eq!(map.keys().count(), insert.len()); assert_eq!(insert[0], map.keys()[0]); for (a, b) in insert[1..].iter().rev().zip(map.keys().skip(1)) { assert_eq!(a, b); } } #[test] fn insert_sorted_bad() { let mut map = IndexMap::new(); map.insert(10, ()); for i in 0..10 { map.insert(i, ()); } // The binary search will want to insert this at the end (index == len()), // but that's only possible for *new* inserts. It should still be handled // without panicking though, and in this case it's simple enough that we // know the exact result. (But don't read this as an API guarantee!) assert_eq!(map.first(), Some((&10, &()))); map.insert_sorted(10, ()); assert_eq!(map.last(), Some((&10, &()))); assert!(map.keys().copied().eq(0..=10)); // Other out-of-order entries can also "insert" to a binary-searched // position, moving in either direction. map.move_index(5, 0); map.move_index(6, 10); assert_eq!(map.first(), Some((&5, &()))); assert_eq!(map.last(), Some((&6, &()))); map.insert_sorted(5, ()); // moves back up map.insert_sorted(6, ()); // moves back down assert!(map.keys().copied().eq(0..=10)); } #[test] fn grow() { let insert = [0, 4, 2, 12, 8, 7, 11]; let not_present = [1, 3, 6, 9, 10]; let mut map = IndexMap::with_capacity(insert.len()); for (i, &elt) in insert.iter().enumerate() { assert_eq!(map.len(), i); map.insert(elt, elt); assert_eq!(map.len(), i + 1); assert_eq!(map.get(&elt), Some(&elt)); assert_eq!(map[&elt], elt); } println!("{:?}", map); for &elt in &insert { map.insert(elt * 10, elt); } for &elt in &insert { map.insert(elt * 100, elt); } for (i, &elt) in insert.iter().cycle().enumerate().take(100) { map.insert(elt * 100 + i as i32, elt); } println!("{:?}", map); for &elt in ¬_present { assert!(map.get(&elt).is_none()); } } #[test] fn reserve() { let mut map = IndexMap::::new(); assert_eq!(map.capacity(), 0); map.reserve(100); let capacity = map.capacity(); assert!(capacity >= 100); for i in 0..capacity { assert_eq!(map.len(), i); map.insert(i, i * i); assert_eq!(map.len(), i + 1); assert_eq!(map.capacity(), capacity); assert_eq!(map.get(&i), Some(&(i * i))); } map.insert(capacity, std::usize::MAX); assert_eq!(map.len(), capacity + 1); assert!(map.capacity() > capacity); assert_eq!(map.get(&capacity), Some(&std::usize::MAX)); } #[test] fn try_reserve() { let mut map = IndexMap::::new(); assert_eq!(map.capacity(), 0); assert_eq!(map.try_reserve(100), Ok(())); assert!(map.capacity() >= 100); assert!(map.try_reserve(usize::MAX).is_err()); } #[test] fn shrink_to_fit() { let mut map = IndexMap::::new(); assert_eq!(map.capacity(), 0); for i in 0..100 { assert_eq!(map.len(), i); map.insert(i, i * i); assert_eq!(map.len(), i + 1); assert!(map.capacity() >= i + 1); assert_eq!(map.get(&i), Some(&(i * i))); map.shrink_to_fit(); assert_eq!(map.len(), i + 1); assert_eq!(map.capacity(), i + 1); assert_eq!(map.get(&i), Some(&(i * i))); } } #[test] fn remove() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut map = IndexMap::new(); for &elt in &insert { map.insert(elt, elt); } assert_eq!(map.keys().count(), map.len()); assert_eq!(map.keys().count(), insert.len()); for (a, b) in insert.iter().zip(map.keys()) { assert_eq!(a, b); } let remove_fail = [99, 77]; let remove = [4, 12, 8, 7]; for &key in &remove_fail { assert!(map.swap_remove_full(&key).is_none()); } println!("{:?}", map); for &key in &remove { //println!("{:?}", map); let index = map.get_full(&key).unwrap().0; assert_eq!(map.swap_remove_full(&key), Some((index, key, key))); } println!("{:?}", map); for key in &insert { assert_eq!(map.get(key).is_some(), !remove.contains(key)); } assert_eq!(map.len(), insert.len() - remove.len()); assert_eq!(map.keys().count(), insert.len() - remove.len()); } #[test] fn remove_to_empty() { let mut map = indexmap! { 0 => 0, 4 => 4, 5 => 5 }; map.swap_remove(&5).unwrap(); map.swap_remove(&4).unwrap(); map.swap_remove(&0).unwrap(); assert!(map.is_empty()); } #[test] fn swap_remove_index() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut map = IndexMap::new(); for &elt in &insert { map.insert(elt, elt * 2); } let mut vector = insert.to_vec(); let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1]; // check that the same swap remove sequence on vec and map // have the same result. for &rm in remove_sequence { let out_vec = vector.swap_remove(rm); let (out_map, _) = map.swap_remove_index(rm).unwrap(); assert_eq!(out_vec, out_map); } assert_eq!(vector.len(), map.len()); for (a, b) in vector.iter().zip(map.keys()) { assert_eq!(a, b); } } #[test] fn partial_eq_and_eq() { let mut map_a = IndexMap::new(); map_a.insert(1, "1"); map_a.insert(2, "2"); let mut map_b = map_a.clone(); assert_eq!(map_a, map_b); map_b.swap_remove(&1); assert_ne!(map_a, map_b); let map_c: IndexMap<_, String> = map_b.into_iter().map(|(k, v)| (k, v.into())).collect(); assert_ne!(map_a, map_c); assert_ne!(map_c, map_a); } #[test] fn extend() { let mut map = IndexMap::new(); map.extend(vec![(&1, &2), (&3, &4)]); map.extend(vec![(5, 6)]); assert_eq!( map.into_iter().collect::>(), vec![(1, 2), (3, 4), (5, 6)] ); } #[test] fn entry() { let mut map = IndexMap::new(); map.insert(1, "1"); map.insert(2, "2"); { let e = map.entry(3); assert_eq!(e.index(), 2); let e = e.or_insert("3"); assert_eq!(e, &"3"); } let e = map.entry(2); assert_eq!(e.index(), 1); assert_eq!(e.key(), &2); match e { Entry::Occupied(ref e) => assert_eq!(e.get(), &"2"), Entry::Vacant(_) => panic!(), } assert_eq!(e.or_insert("4"), &"2"); } #[test] fn entry_and_modify() { let mut map = IndexMap::new(); map.insert(1, "1"); map.entry(1).and_modify(|x| *x = "2"); assert_eq!(Some(&"2"), map.get(&1)); map.entry(2).and_modify(|x| *x = "doesn't exist"); assert_eq!(None, map.get(&2)); } #[test] fn entry_or_default() { let mut map = IndexMap::new(); #[derive(Debug, PartialEq)] enum TestEnum { DefaultValue, NonDefaultValue, } impl Default for TestEnum { fn default() -> Self { TestEnum::DefaultValue } } map.insert(1, TestEnum::NonDefaultValue); assert_eq!(&mut TestEnum::NonDefaultValue, map.entry(1).or_default()); assert_eq!(&mut TestEnum::DefaultValue, map.entry(2).or_default()); } #[test] fn occupied_entry_key() { // These keys match hash and equality, but their addresses are distinct. let (k1, k2) = (&mut 1, &mut 1); let k1_ptr = k1 as *const i32; let k2_ptr = k2 as *const i32; assert_ne!(k1_ptr, k2_ptr); let mut map = IndexMap::new(); map.insert(k1, "value"); match map.entry(k2) { Entry::Occupied(ref e) => { // `OccupiedEntry::key` should reference the key in the map, // not the key that was used to find the entry. let ptr = *e.key() as *const i32; assert_eq!(ptr, k1_ptr); assert_ne!(ptr, k2_ptr); } Entry::Vacant(_) => panic!(), } } #[test] fn get_index_entry() { let mut map = IndexMap::new(); assert!(map.get_index_entry(0).is_none()); assert!(map.first_entry().is_none()); assert!(map.last_entry().is_none()); map.insert(0, "0"); map.insert(1, "1"); map.insert(2, "2"); map.insert(3, "3"); assert!(map.get_index_entry(4).is_none()); { let e = map.get_index_entry(1).unwrap(); assert_eq!(*e.key(), 1); assert_eq!(*e.get(), "1"); assert_eq!(e.swap_remove(), "1"); } { let mut e = map.get_index_entry(1).unwrap(); assert_eq!(*e.key(), 3); assert_eq!(*e.get(), "3"); assert_eq!(e.insert("4"), "3"); } assert_eq!(*map.get(&3).unwrap(), "4"); { let e = map.first_entry().unwrap(); assert_eq!(*e.key(), 0); assert_eq!(*e.get(), "0"); } { let e = map.last_entry().unwrap(); assert_eq!(*e.key(), 2); assert_eq!(*e.get(), "2"); } } #[test] fn from_entries() { let mut map = IndexMap::from([(1, "1"), (2, "2"), (3, "3")]); { let e = match map.entry(1) { Entry::Occupied(e) => IndexedEntry::from(e), Entry::Vacant(_) => panic!(), }; assert_eq!(e.index(), 0); assert_eq!(*e.key(), 1); assert_eq!(*e.get(), "1"); } { let e = match map.get_index_entry(1) { Some(e) => OccupiedEntry::from(e), None => panic!(), }; assert_eq!(e.index(), 1); assert_eq!(*e.key(), 2); assert_eq!(*e.get(), "2"); } } #[test] fn keys() { let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; let map: IndexMap<_, _> = vec.into_iter().collect(); let keys: Vec<_> = map.keys().copied().collect(); assert_eq!(keys.len(), 3); assert!(keys.contains(&1)); assert!(keys.contains(&2)); assert!(keys.contains(&3)); } #[test] fn into_keys() { let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; let map: IndexMap<_, _> = vec.into_iter().collect(); let keys: Vec = map.into_keys().collect(); assert_eq!(keys.len(), 3); assert!(keys.contains(&1)); assert!(keys.contains(&2)); assert!(keys.contains(&3)); } #[test] fn values() { let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; let map: IndexMap<_, _> = vec.into_iter().collect(); let values: Vec<_> = map.values().copied().collect(); assert_eq!(values.len(), 3); assert!(values.contains(&'a')); assert!(values.contains(&'b')); assert!(values.contains(&'c')); } #[test] fn values_mut() { let vec = vec![(1, 1), (2, 2), (3, 3)]; let mut map: IndexMap<_, _> = vec.into_iter().collect(); for value in map.values_mut() { *value *= 2 } let values: Vec<_> = map.values().copied().collect(); assert_eq!(values.len(), 3); assert!(values.contains(&2)); assert!(values.contains(&4)); assert!(values.contains(&6)); } #[test] fn into_values() { let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; let map: IndexMap<_, _> = vec.into_iter().collect(); let values: Vec = map.into_values().collect(); assert_eq!(values.len(), 3); assert!(values.contains(&'a')); assert!(values.contains(&'b')); assert!(values.contains(&'c')); } #[test] fn drain_range() { // Test the various heuristics of `erase_indices` for range in [ 0..0, // nothing erased 10..90, // reinsert the few kept (..10 and 90..) 80..90, // update the few to adjust (80..) 20..30, // sweep everything ] { let mut vec = Vec::from_iter(0..100); let mut map: IndexMap = (0..100).map(|i| (i, ())).collect(); drop(vec.drain(range.clone())); drop(map.drain(range)); assert!(vec.iter().eq(map.keys())); for (i, x) in vec.iter().enumerate() { assert_eq!(map.get_index_of(x), Some(i)); } } } #[test] #[cfg(feature = "std")] fn from_array() { let map = IndexMap::from([(1, 2), (3, 4)]); let mut expected = IndexMap::new(); expected.insert(1, 2); expected.insert(3, 4); assert_eq!(map, expected) } #[test] fn iter_default() { struct K; struct V; fn assert_default() where T: Default + Iterator, { assert!(T::default().next().is_none()); } assert_default::>(); assert_default::>(); assert_default::>(); assert_default::>(); assert_default::>(); assert_default::>(); assert_default::>(); assert_default::>(); assert_default::>(); } #[test] fn get_index_mut2() { let mut map: IndexMap = IndexMap::new(); map.insert(1, 2); map.insert(3, 4); map.insert(5, 6); { let (key, value) = map.get_index_mut2(0).unwrap(); assert_eq!(*key, 1); assert_eq!(*value, 2); *value = 7; } assert_eq!(map[0], 7); { let (key, _) = map.get_index_mut2(0).unwrap(); *key = 8; } assert_eq!(map.get_index(0).unwrap().0, &8); } #[test] fn shift_shift_remove_index() { let mut map: IndexMap = IndexMap::new(); map.insert(1, 2); map.insert(3, 4); map.insert(5, 6); map.insert(7, 8); map.insert(9, 10); let result = map.shift_remove_index(1); assert_eq!(result, Some((3, 4))); assert_eq!(map.len(), 4); assert_eq!(map.as_slice(), &[(1, 2), (5, 6), (7, 8), (9, 10)]); let result = map.shift_remove_index(1); assert_eq!(result, Some((5, 6))); assert_eq!(map.len(), 3); assert_eq!(map.as_slice(), &[(1, 2), (7, 8), (9, 10)]); let result = map.shift_remove_index(2); assert_eq!(result, Some((9, 10))); assert_eq!(map.len(), 2); assert_eq!(map.as_slice(), &[(1, 2), (7, 8)]); let result = map.shift_remove_index(2); assert_eq!(result, None); assert_eq!(map.len(), 2); assert_eq!(map.as_slice(), &[(1, 2), (7, 8)]); } #[test] fn shift_remove_entry() { let mut map: IndexMap = IndexMap::new(); map.insert(1, 2); map.insert(3, 4); map.insert(5, 6); map.insert(7, 8); map.insert(9, 10); let result = map.shift_remove_entry(&3); assert_eq!(result, Some((3, 4))); assert_eq!(map.len(), 4); assert_eq!(map.as_slice(), &[(1, 2), (5, 6), (7, 8), (9, 10)]); let result = map.shift_remove_entry(&9); assert_eq!(result, Some((9, 10))); assert_eq!(map.len(), 3); assert_eq!(map.as_slice(), &[(1, 2), (5, 6), (7, 8)]); let result = map.shift_remove_entry(&9); assert_eq!(result, None); assert_eq!(map.len(), 3); assert_eq!(map.as_slice(), &[(1, 2), (5, 6), (7, 8)]); } #[test] fn shift_remove_full() { let mut map: IndexMap = IndexMap::new(); map.insert(1, 2); map.insert(3, 4); map.insert(5, 6); map.insert(7, 8); map.insert(9, 10); let result = map.shift_remove_full(&3); assert_eq!(result, Some((1, 3, 4))); assert_eq!(map.len(), 4); assert_eq!(map.as_slice(), &[(1, 2), (5, 6), (7, 8), (9, 10)]); let result = map.shift_remove_full(&9); assert_eq!(result, Some((3, 9, 10))); assert_eq!(map.len(), 3); assert_eq!(map.as_slice(), &[(1, 2), (5, 6), (7, 8)]); let result = map.shift_remove_full(&9); assert_eq!(result, None); assert_eq!(map.len(), 3); assert_eq!(map.as_slice(), &[(1, 2), (5, 6), (7, 8)]); } #[test] fn sorted_unstable_by() { let mut map: IndexMap = IndexMap::new(); map.extend(vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50)]); let sorted = map.sorted_unstable_by(|_a, b, _c, d| d.cmp(&b)); assert_eq!( sorted.as_slice(), &[(5, 50), (4, 40), (3, 30), (2, 20), (1, 10)] ); } #[test] fn into_boxed_slice() { let mut map: IndexMap = IndexMap::new(); for i in 0..5 { map.insert(i, i * 10); } let boxed_slice: Box> = map.into_boxed_slice(); assert_eq!(boxed_slice.len(), 5); assert_eq!( boxed_slice.as_ref(), &[(0, 0), (1, 10), (2, 20), (3, 30), (4, 40)] ); } #[test] fn last_mut() { let mut map: IndexMap<&str, i32> = IndexMap::new(); let last_entry = map.last_mut(); assert_eq!(last_entry, None); map.insert("key1", 1); map.insert("key2", 2); map.insert("key3", 3); let last_entry = map.last_mut(); assert_eq!(last_entry, Some((&"key3", &mut 3))); *last_entry.unwrap().1 = 4; assert_eq!(map.get("key3"), Some(&4)); } #[test] #[should_panic = "index out of bounds"] fn insert_before_oob() { let mut map: IndexMap = IndexMap::new(); let _ = map.insert_before(0, 'a', ()); let _ = map.insert_before(1, 'b', ()); map.insert_before(3, 'd', ()); } #[test] fn clear() { let mut map: IndexMap = IndexMap::new(); map.extend(vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50)]); map.clear(); assert_eq!(map.len(), 0); } #[test] fn get_range() { let mut index_map: IndexMap = IndexMap::new(); index_map.insert(1, 10); index_map.insert(2, 20); index_map.insert(3, 30); index_map.insert(4, 40); index_map.insert(5, 50); let result = index_map.get_range(2..2); assert!(result.unwrap().is_empty()); let result = index_map.get_range(4..2); assert!(result.is_none()); let result = index_map.get_range(2..4); let slice: &Slice = result.unwrap(); assert_eq!(slice.len(), 2); assert_eq!(slice, &[(3, 30), (4, 40)]); } #[test] fn get_range_mut() { let mut index_map: IndexMap = IndexMap::new(); index_map.insert(1, 10); index_map.insert(2, 20); index_map.insert(3, 30); index_map.insert(4, 40); index_map.insert(5, 50); let result = index_map.get_range_mut(2..2); assert!(result.unwrap().is_empty()); let result = index_map.get_range_mut(4..2); assert!(result.is_none()); let result = index_map.get_range_mut(2..4); let slice: &mut Slice = result.unwrap(); assert_eq!(slice.len(), 2); assert_eq!(slice, &mut [(3, 30), (4, 40)]); for i in 0..slice.len() { slice[i] += 1; } assert_eq!(slice, &mut [(3, 31), (4, 41)]); } #[test] #[should_panic = "index out of bounds"] fn shift_insert_oob() { let mut map: IndexMap = IndexMap::new(); map.shift_insert(0, 1, 10); map.shift_insert(1, 2, 20); map.shift_insert(2, 3, 30); map.shift_insert(5, 4, 40); } #[test] fn test_binary_search_by() { // adapted from std's test for binary_search let b: IndexMap<_, i32> = [] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.binary_search_by(|_, x| x.cmp(&5)), Err(0)); let b: IndexMap<_, i32> = [4] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.binary_search_by(|_, x| x.cmp(&3)), Err(0)); assert_eq!(b.binary_search_by(|_, x| x.cmp(&4)), Ok(0)); assert_eq!(b.binary_search_by(|_, x| x.cmp(&5)), Err(1)); let b: IndexMap<_, i32> = [1, 2, 4, 6, 8, 9] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.binary_search_by(|_, x| x.cmp(&5)), Err(3)); assert_eq!(b.binary_search_by(|_, x| x.cmp(&6)), Ok(3)); assert_eq!(b.binary_search_by(|_, x| x.cmp(&7)), Err(4)); assert_eq!(b.binary_search_by(|_, x| x.cmp(&8)), Ok(4)); let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.binary_search_by(|_, x| x.cmp(&9)), Err(6)); let b: IndexMap<_, i32> = [1, 2, 4, 6, 7, 8, 9] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.binary_search_by(|_, x| x.cmp(&6)), Ok(3)); assert_eq!(b.binary_search_by(|_, x| x.cmp(&5)), Err(3)); assert_eq!(b.binary_search_by(|_, x| x.cmp(&8)), Ok(5)); let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8, 9] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.binary_search_by(|_, x| x.cmp(&7)), Err(5)); assert_eq!(b.binary_search_by(|_, x| x.cmp(&0)), Err(0)); let b: IndexMap<_, i32> = [1, 3, 3, 3, 7] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.binary_search_by(|_, x| x.cmp(&0)), Err(0)); assert_eq!(b.binary_search_by(|_, x| x.cmp(&1)), Ok(0)); assert_eq!(b.binary_search_by(|_, x| x.cmp(&2)), Err(1)); assert!(match b.binary_search_by(|_, x| x.cmp(&3)) { Ok(1..=3) => true, _ => false, }); assert!(match b.binary_search_by(|_, x| x.cmp(&3)) { Ok(1..=3) => true, _ => false, }); assert_eq!(b.binary_search_by(|_, x| x.cmp(&4)), Err(4)); assert_eq!(b.binary_search_by(|_, x| x.cmp(&5)), Err(4)); assert_eq!(b.binary_search_by(|_, x| x.cmp(&6)), Err(4)); assert_eq!(b.binary_search_by(|_, x| x.cmp(&7)), Ok(4)); assert_eq!(b.binary_search_by(|_, x| x.cmp(&8)), Err(5)); } #[test] fn test_binary_search_by_key() { // adapted from std's test for binary_search let b: IndexMap<_, i32> = [] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.binary_search_by_key(&5, |_, &x| x), Err(0)); let b: IndexMap<_, i32> = [4] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.binary_search_by_key(&3, |_, &x| x), Err(0)); assert_eq!(b.binary_search_by_key(&4, |_, &x| x), Ok(0)); assert_eq!(b.binary_search_by_key(&5, |_, &x| x), Err(1)); let b: IndexMap<_, i32> = [1, 2, 4, 6, 8, 9] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.binary_search_by_key(&5, |_, &x| x), Err(3)); assert_eq!(b.binary_search_by_key(&6, |_, &x| x), Ok(3)); assert_eq!(b.binary_search_by_key(&7, |_, &x| x), Err(4)); assert_eq!(b.binary_search_by_key(&8, |_, &x| x), Ok(4)); let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.binary_search_by_key(&9, |_, &x| x), Err(6)); let b: IndexMap<_, i32> = [1, 2, 4, 6, 7, 8, 9] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.binary_search_by_key(&6, |_, &x| x), Ok(3)); assert_eq!(b.binary_search_by_key(&5, |_, &x| x), Err(3)); assert_eq!(b.binary_search_by_key(&8, |_, &x| x), Ok(5)); let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8, 9] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.binary_search_by_key(&7, |_, &x| x), Err(5)); assert_eq!(b.binary_search_by_key(&0, |_, &x| x), Err(0)); let b: IndexMap<_, i32> = [1, 3, 3, 3, 7] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.binary_search_by_key(&0, |_, &x| x), Err(0)); assert_eq!(b.binary_search_by_key(&1, |_, &x| x), Ok(0)); assert_eq!(b.binary_search_by_key(&2, |_, &x| x), Err(1)); assert!(match b.binary_search_by_key(&3, |_, &x| x) { Ok(1..=3) => true, _ => false, }); assert!(match b.binary_search_by_key(&3, |_, &x| x) { Ok(1..=3) => true, _ => false, }); assert_eq!(b.binary_search_by_key(&4, |_, &x| x), Err(4)); assert_eq!(b.binary_search_by_key(&5, |_, &x| x), Err(4)); assert_eq!(b.binary_search_by_key(&6, |_, &x| x), Err(4)); assert_eq!(b.binary_search_by_key(&7, |_, &x| x), Ok(4)); assert_eq!(b.binary_search_by_key(&8, |_, &x| x), Err(5)); } #[test] fn test_partition_point() { // adapted from std's test for partition_point let b: IndexMap<_, i32> = [] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.partition_point(|_, &x| x < 5), 0); let b: IndexMap<_, i32> = [4] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.partition_point(|_, &x| x < 3), 0); assert_eq!(b.partition_point(|_, &x| x < 4), 0); assert_eq!(b.partition_point(|_, &x| x < 5), 1); let b: IndexMap<_, i32> = [1, 2, 4, 6, 8, 9] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.partition_point(|_, &x| x < 5), 3); assert_eq!(b.partition_point(|_, &x| x < 6), 3); assert_eq!(b.partition_point(|_, &x| x < 7), 4); assert_eq!(b.partition_point(|_, &x| x < 8), 4); let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.partition_point(|_, &x| x < 9), 6); let b: IndexMap<_, i32> = [1, 2, 4, 6, 7, 8, 9] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.partition_point(|_, &x| x < 6), 3); assert_eq!(b.partition_point(|_, &x| x < 5), 3); assert_eq!(b.partition_point(|_, &x| x < 8), 5); let b: IndexMap<_, i32> = [1, 2, 4, 5, 6, 8, 9] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.partition_point(|_, &x| x < 7), 5); assert_eq!(b.partition_point(|_, &x| x < 0), 0); let b: IndexMap<_, i32> = [1, 3, 3, 3, 7] .into_iter() .enumerate() .map(|(i, x)| (i + 100, x)) .collect(); assert_eq!(b.partition_point(|_, &x| x < 0), 0); assert_eq!(b.partition_point(|_, &x| x < 1), 0); assert_eq!(b.partition_point(|_, &x| x < 2), 1); assert_eq!(b.partition_point(|_, &x| x < 3), 1); assert_eq!(b.partition_point(|_, &x| x < 4), 4); assert_eq!(b.partition_point(|_, &x| x < 5), 4); assert_eq!(b.partition_point(|_, &x| x < 6), 4); assert_eq!(b.partition_point(|_, &x| x < 7), 4); assert_eq!(b.partition_point(|_, &x| x < 8), 5); } macro_rules! move_index_oob { ($test:ident, $from:expr, $to:expr) => { #[test] #[should_panic(expected = "index out of bounds")] fn $test() { let mut map: IndexMap = (0..10).map(|k| (k, ())).collect(); map.move_index($from, $to); } }; } move_index_oob!(test_move_index_out_of_bounds_0_10, 0, 10); move_index_oob!(test_move_index_out_of_bounds_0_max, 0, usize::MAX); move_index_oob!(test_move_index_out_of_bounds_10_0, 10, 0); move_index_oob!(test_move_index_out_of_bounds_max_0, usize::MAX, 0); #[test] fn disjoint_mut_empty_map() { let mut map: IndexMap = IndexMap::default(); assert_eq!( map.get_disjoint_mut([&0, &1, &2, &3]), [None, None, None, None] ); } #[test] fn disjoint_mut_empty_param() { let mut map: IndexMap = IndexMap::default(); map.insert(1, 10); assert_eq!(map.get_disjoint_mut([] as [&u32; 0]), []); } #[test] fn disjoint_mut_single_fail() { let mut map: IndexMap = IndexMap::default(); map.insert(1, 10); assert_eq!(map.get_disjoint_mut([&0]), [None]); } #[test] fn disjoint_mut_single_success() { let mut map: IndexMap = IndexMap::default(); map.insert(1, 10); assert_eq!(map.get_disjoint_mut([&1]), [Some(&mut 10)]); } #[test] fn disjoint_mut_multi_success() { let mut map: IndexMap = IndexMap::default(); map.insert(1, 100); map.insert(2, 200); map.insert(3, 300); map.insert(4, 400); assert_eq!( map.get_disjoint_mut([&1, &2]), [Some(&mut 100), Some(&mut 200)] ); assert_eq!( map.get_disjoint_mut([&1, &3]), [Some(&mut 100), Some(&mut 300)] ); assert_eq!( map.get_disjoint_mut([&3, &1, &4, &2]), [ Some(&mut 300), Some(&mut 100), Some(&mut 400), Some(&mut 200) ] ); } #[test] fn disjoint_mut_multi_success_unsized_key() { let mut map: IndexMap<&'static str, u32> = IndexMap::default(); map.insert("1", 100); map.insert("2", 200); map.insert("3", 300); map.insert("4", 400); assert_eq!( map.get_disjoint_mut(["1", "2"]), [Some(&mut 100), Some(&mut 200)] ); assert_eq!( map.get_disjoint_mut(["1", "3"]), [Some(&mut 100), Some(&mut 300)] ); assert_eq!( map.get_disjoint_mut(["3", "1", "4", "2"]), [ Some(&mut 300), Some(&mut 100), Some(&mut 400), Some(&mut 200) ] ); } #[test] fn disjoint_mut_multi_success_borrow_key() { let mut map: IndexMap = IndexMap::default(); map.insert("1".into(), 100); map.insert("2".into(), 200); map.insert("3".into(), 300); map.insert("4".into(), 400); assert_eq!( map.get_disjoint_mut(["1", "2"]), [Some(&mut 100), Some(&mut 200)] ); assert_eq!( map.get_disjoint_mut(["1", "3"]), [Some(&mut 100), Some(&mut 300)] ); assert_eq!( map.get_disjoint_mut(["3", "1", "4", "2"]), [ Some(&mut 300), Some(&mut 100), Some(&mut 400), Some(&mut 200) ] ); } #[test] fn disjoint_mut_multi_fail_missing() { let mut map: IndexMap = IndexMap::default(); map.insert(1, 100); map.insert(2, 200); map.insert(3, 300); map.insert(4, 400); assert_eq!(map.get_disjoint_mut([&1, &5]), [Some(&mut 100), None]); assert_eq!(map.get_disjoint_mut([&5, &6]), [None, None]); assert_eq!( map.get_disjoint_mut([&1, &5, &4]), [Some(&mut 100), None, Some(&mut 400)] ); } #[test] #[should_panic] fn disjoint_mut_multi_fail_duplicate_panic() { let mut map: IndexMap = IndexMap::default(); map.insert(1, 100); map.get_disjoint_mut([&1, &2, &1]); } #[test] fn disjoint_indices_mut_fail_oob() { let mut map: IndexMap = IndexMap::default(); map.insert(1, 10); map.insert(321, 20); assert_eq!( map.get_disjoint_indices_mut([1, 3]), Err(crate::GetDisjointMutError::IndexOutOfBounds) ); } #[test] fn disjoint_indices_mut_empty() { let mut map: IndexMap = IndexMap::default(); map.insert(1, 10); map.insert(321, 20); assert_eq!(map.get_disjoint_indices_mut([]), Ok([])); } #[test] fn disjoint_indices_mut_success() { let mut map: IndexMap = IndexMap::default(); map.insert(1, 10); map.insert(321, 20); assert_eq!(map.get_disjoint_indices_mut([0]), Ok([(&1, &mut 10)])); assert_eq!(map.get_disjoint_indices_mut([1]), Ok([(&321, &mut 20)])); assert_eq!( map.get_disjoint_indices_mut([0, 1]), Ok([(&1, &mut 10), (&321, &mut 20)]) ); } #[test] fn disjoint_indices_mut_fail_duplicate() { let mut map: IndexMap = IndexMap::default(); map.insert(1, 10); map.insert(321, 20); assert_eq!( map.get_disjoint_indices_mut([1, 0, 1]), Err(crate::GetDisjointMutError::OverlappingIndices) ); } #[test] fn insert_sorted_by_key() { let mut values = [(-1, 8), (3, 18), (-27, 2), (-2, 5)]; let mut map: IndexMap = IndexMap::new(); for (key, value) in values { let (_, old) = map.insert_sorted_by_key(key, value, |k, _| k.abs()); assert_eq!(old, None); } values.sort_by_key(|(key, _)| key.abs()); assert_eq!(values, *map.as_slice()); for (key, value) in &mut values { let (_, old) = map.insert_sorted_by_key(*key, -*value, |k, _| k.abs()); assert_eq!(old, Some(*value)); *value = -*value; } assert_eq!(values, *map.as_slice()); } #[test] fn insert_sorted_by() { let mut values = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)]; let mut map: IndexMap = IndexMap::new(); for (key, value) in values { let (_, old) = map.insert_sorted_by(key, value, |key1, _, key2, _| key2.cmp(key1)); assert_eq!(old, None); } values.reverse(); assert_eq!(values, *map.as_slice()); for (key, value) in &mut values { let (_, old) = map.insert_sorted_by(*key, -*value, |key1, _, key2, _| key2.cmp(key1)); assert_eq!(old, Some(*value)); *value = -*value; } assert_eq!(values, *map.as_slice()); } #[test] fn is_sorted() { fn expect(map: &IndexMap, e: [bool; 7]) { assert_eq!(e[0], map.is_sorted()); assert_eq!(e[1], map.is_sorted_by(|k1, _, k2, _| k1 < k2)); assert_eq!(e[2], map.is_sorted_by(|k1, _, k2, _| k1 > k2)); assert_eq!(e[3], map.is_sorted_by(|_, v1, _, v2| v1 < v2)); assert_eq!(e[4], map.is_sorted_by(|_, v1, _, v2| v1 > v2)); assert_eq!(e[5], map.is_sorted_by_key(|k, _| k)); assert_eq!(e[6], map.is_sorted_by_key(|_, v| v)); } let mut map = IndexMap::from_iter((0..10).map(|i| (i, i * i))); expect(&map, [true, true, false, true, false, true, true]); map[5] = -1; expect(&map, [true, true, false, false, false, true, false]); map[5] = 25; map.replace_index(5, -1).unwrap(); expect(&map, [false, false, false, true, false, false, true]); } #[test] fn is_sorted_trivial() { fn expect(map: &IndexMap, e: [bool; 5]) { assert_eq!(e[0], map.is_sorted()); assert_eq!(e[1], map.is_sorted_by(|_, _, _, _| true)); assert_eq!(e[2], map.is_sorted_by(|_, _, _, _| false)); assert_eq!(e[3], map.is_sorted_by_key(|_, _| 0f64)); assert_eq!(e[4], map.is_sorted_by_key(|_, _| f64::NAN)); } let mut map = IndexMap::new(); expect(&map, [true, true, true, true, true]); map.insert(0, 0); expect(&map, [true, true, true, true, true]); map.insert(1, 1); expect(&map, [true, true, false, true, false]); map.reverse(); expect(&map, [false, true, false, true, false]); } indexmap-2.12.1/src/map.rs000064400000000000000000001772401046102023000134410ustar 00000000000000//! [`IndexMap`] is a hash table where the iteration order of the key-value //! pairs is independent of the hash values of the keys. mod core; mod iter; mod mutable; mod slice; #[cfg(feature = "serde")] #[cfg_attr(docsrs, doc(cfg(feature = "serde")))] pub mod serde_seq; #[cfg(test)] mod tests; pub use self::core::raw_entry_v1::{self, RawEntryApiV1}; pub use self::core::{Entry, IndexedEntry, OccupiedEntry, VacantEntry}; pub use self::iter::{ Drain, ExtractIf, IntoIter, IntoKeys, IntoValues, Iter, IterMut, IterMut2, Keys, Splice, Values, ValuesMut, }; pub use self::mutable::MutableEntryKey; pub use self::mutable::MutableKeys; pub use self::slice::Slice; #[cfg(feature = "rayon")] pub use crate::rayon::map as rayon; use ::core::cmp::Ordering; use ::core::fmt; use ::core::hash::{BuildHasher, Hash}; use ::core::mem; use ::core::ops::{Index, IndexMut, RangeBounds}; use alloc::boxed::Box; use alloc::vec::Vec; #[cfg(feature = "std")] use std::hash::RandomState; pub(crate) use self::core::{ExtractCore, IndexMapCore}; use crate::util::{third, try_simplify_range}; use crate::{Bucket, Equivalent, GetDisjointMutError, HashValue, TryReserveError}; /// A hash table where the iteration order of the key-value pairs is independent /// of the hash values of the keys. /// /// The interface is closely compatible with the standard /// [`HashMap`][std::collections::HashMap], /// but also has additional features. /// /// # Order /// /// The key-value pairs have a consistent order that is determined by /// the sequence of insertion and removal calls on the map. The order does /// not depend on the keys or the hash function at all. /// /// All iterators traverse the map in *the order*. /// /// The insertion order is preserved, with **notable exceptions** like the /// [`.remove()`][Self::remove] or [`.swap_remove()`][Self::swap_remove] methods. /// Methods such as [`.sort_by()`][Self::sort_by] of /// course result in a new order, depending on the sorting order. /// /// # Indices /// /// The key-value pairs are indexed in a compact range without holes in the /// range `0..self.len()`. For example, the method `.get_full` looks up the /// index for a key, and the method `.get_index` looks up the key-value pair by /// index. /// /// # Examples /// /// ``` /// use indexmap::IndexMap; /// /// // count the frequency of each letter in a sentence. /// let mut letters = IndexMap::new(); /// for ch in "a short treatise on fungi".chars() { /// *letters.entry(ch).or_insert(0) += 1; /// } /// /// assert_eq!(letters[&'s'], 2); /// assert_eq!(letters[&'t'], 3); /// assert_eq!(letters[&'u'], 1); /// assert_eq!(letters.get(&'y'), None); /// ``` #[cfg(feature = "std")] pub struct IndexMap { pub(crate) core: IndexMapCore, hash_builder: S, } #[cfg(not(feature = "std"))] pub struct IndexMap { pub(crate) core: IndexMapCore, hash_builder: S, } impl Clone for IndexMap where K: Clone, V: Clone, S: Clone, { fn clone(&self) -> Self { IndexMap { core: self.core.clone(), hash_builder: self.hash_builder.clone(), } } fn clone_from(&mut self, other: &Self) { self.core.clone_from(&other.core); self.hash_builder.clone_from(&other.hash_builder); } } impl fmt::Debug for IndexMap where K: fmt::Debug, V: fmt::Debug, { #[cfg(not(feature = "test_debug"))] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map().entries(self.iter()).finish() } #[cfg(feature = "test_debug")] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // Let the inner `IndexMapCore` print all of its details f.debug_struct("IndexMap") .field("core", &self.core) .finish() } } #[cfg(feature = "std")] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] impl IndexMap { /// Create a new map. (Does not allocate.) #[inline] pub fn new() -> Self { Self::with_capacity(0) } /// Create a new map with capacity for `n` key-value pairs. (Does not /// allocate if `n` is zero.) /// /// Computes in **O(n)** time. #[inline] pub fn with_capacity(n: usize) -> Self { Self::with_capacity_and_hasher(n, <_>::default()) } } impl IndexMap { /// Create a new map with capacity for `n` key-value pairs. (Does not /// allocate if `n` is zero.) /// /// Computes in **O(n)** time. #[inline] pub fn with_capacity_and_hasher(n: usize, hash_builder: S) -> Self { if n == 0 { Self::with_hasher(hash_builder) } else { IndexMap { core: IndexMapCore::with_capacity(n), hash_builder, } } } /// Create a new map with `hash_builder`. /// /// This function is `const`, so it /// can be called in `static` contexts. pub const fn with_hasher(hash_builder: S) -> Self { IndexMap { core: IndexMapCore::new(), hash_builder, } } #[inline] pub(crate) fn into_entries(self) -> Vec> { self.core.into_entries() } #[inline] pub(crate) fn as_entries(&self) -> &[Bucket] { self.core.as_entries() } #[inline] pub(crate) fn as_entries_mut(&mut self) -> &mut [Bucket] { self.core.as_entries_mut() } pub(crate) fn with_entries(&mut self, f: F) where F: FnOnce(&mut [Bucket]), { self.core.with_entries(f); } /// Return the number of elements the map can hold without reallocating. /// /// This number is a lower bound; the map might be able to hold more, /// but is guaranteed to be able to hold at least this many. /// /// Computes in **O(1)** time. pub fn capacity(&self) -> usize { self.core.capacity() } /// Return a reference to the map's `BuildHasher`. pub fn hasher(&self) -> &S { &self.hash_builder } /// Return the number of key-value pairs in the map. /// /// Computes in **O(1)** time. #[inline] pub fn len(&self) -> usize { self.core.len() } /// Returns true if the map contains no elements. /// /// Computes in **O(1)** time. #[inline] pub fn is_empty(&self) -> bool { self.len() == 0 } /// Return an iterator over the key-value pairs of the map, in their order pub fn iter(&self) -> Iter<'_, K, V> { Iter::new(self.as_entries()) } /// Return an iterator over the key-value pairs of the map, in their order pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { IterMut::new(self.as_entries_mut()) } /// Return an iterator over the keys of the map, in their order pub fn keys(&self) -> Keys<'_, K, V> { Keys::new(self.as_entries()) } /// Return an owning iterator over the keys of the map, in their order pub fn into_keys(self) -> IntoKeys { IntoKeys::new(self.into_entries()) } /// Return an iterator over the values of the map, in their order pub fn values(&self) -> Values<'_, K, V> { Values::new(self.as_entries()) } /// Return an iterator over mutable references to the values of the map, /// in their order pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { ValuesMut::new(self.as_entries_mut()) } /// Return an owning iterator over the values of the map, in their order pub fn into_values(self) -> IntoValues { IntoValues::new(self.into_entries()) } /// Remove all key-value pairs in the map, while preserving its capacity. /// /// Computes in **O(n)** time. pub fn clear(&mut self) { self.core.clear(); } /// Shortens the map, keeping the first `len` elements and dropping the rest. /// /// If `len` is greater than the map's current length, this has no effect. pub fn truncate(&mut self, len: usize) { self.core.truncate(len); } /// Clears the `IndexMap` in the given index range, returning those /// key-value pairs as a drain iterator. /// /// The range may be any type that implements [`RangeBounds`], /// including all of the `std::ops::Range*` types, or even a tuple pair of /// `Bound` start and end values. To drain the map entirely, use `RangeFull` /// like `map.drain(..)`. /// /// This shifts down all entries following the drained range to fill the /// gap, and keeps the allocated memory for reuse. /// /// ***Panics*** if the starting point is greater than the end point or if /// the end point is greater than the length of the map. #[track_caller] pub fn drain(&mut self, range: R) -> Drain<'_, K, V> where R: RangeBounds, { Drain::new(self.core.drain(range)) } /// Creates an iterator which uses a closure to determine if an element should be removed, /// for all elements in the given range. /// /// If the closure returns true, the element is removed from the map and yielded. /// If the closure returns false, or panics, the element remains in the map and will not be /// yielded. /// /// Note that `extract_if` lets you mutate every value in the filter closure, regardless of /// whether you choose to keep or remove it. /// /// The range may be any type that implements [`RangeBounds`], /// including all of the `std::ops::Range*` types, or even a tuple pair of /// `Bound` start and end values. To check the entire map, use `RangeFull` /// like `map.extract_if(.., predicate)`. /// /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating /// or the iteration short-circuits, then the remaining elements will be retained. /// Use [`retain`] with a negated predicate if you do not need the returned iterator. /// /// [`retain`]: IndexMap::retain /// /// ***Panics*** if the starting point is greater than the end point or if /// the end point is greater than the length of the map. /// /// # Examples /// /// Splitting a map into even and odd keys, reusing the original map: /// /// ``` /// use indexmap::IndexMap; /// /// let mut map: IndexMap = (0..8).map(|x| (x, x)).collect(); /// let extracted: IndexMap = map.extract_if(.., |k, _v| k % 2 == 0).collect(); /// /// let evens = extracted.keys().copied().collect::>(); /// let odds = map.keys().copied().collect::>(); /// /// assert_eq!(evens, vec![0, 2, 4, 6]); /// assert_eq!(odds, vec![1, 3, 5, 7]); /// ``` #[track_caller] pub fn extract_if(&mut self, range: R, pred: F) -> ExtractIf<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool, R: RangeBounds, { ExtractIf::new(&mut self.core, range, pred) } /// Splits the collection into two at the given index. /// /// Returns a newly allocated map containing the elements in the range /// `[at, len)`. After the call, the original map will be left containing /// the elements `[0, at)` with its previous capacity unchanged. /// /// ***Panics*** if `at > len`. #[track_caller] pub fn split_off(&mut self, at: usize) -> Self where S: Clone, { Self { core: self.core.split_off(at), hash_builder: self.hash_builder.clone(), } } /// Reserve capacity for `additional` more key-value pairs. /// /// Computes in **O(n)** time. pub fn reserve(&mut self, additional: usize) { self.core.reserve(additional); } /// Reserve capacity for `additional` more key-value pairs, without over-allocating. /// /// Unlike `reserve`, this does not deliberately over-allocate the entry capacity to avoid /// frequent re-allocations. However, the underlying data structures may still have internal /// capacity requirements, and the allocator itself may give more space than requested, so this /// cannot be relied upon to be precisely minimal. /// /// Computes in **O(n)** time. pub fn reserve_exact(&mut self, additional: usize) { self.core.reserve_exact(additional); } /// Try to reserve capacity for `additional` more key-value pairs. /// /// Computes in **O(n)** time. pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { self.core.try_reserve(additional) } /// Try to reserve capacity for `additional` more key-value pairs, without over-allocating. /// /// Unlike `try_reserve`, this does not deliberately over-allocate the entry capacity to avoid /// frequent re-allocations. However, the underlying data structures may still have internal /// capacity requirements, and the allocator itself may give more space than requested, so this /// cannot be relied upon to be precisely minimal. /// /// Computes in **O(n)** time. pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> { self.core.try_reserve_exact(additional) } /// Shrink the capacity of the map as much as possible. /// /// Computes in **O(n)** time. pub fn shrink_to_fit(&mut self) { self.core.shrink_to(0); } /// Shrink the capacity of the map with a lower limit. /// /// Computes in **O(n)** time. pub fn shrink_to(&mut self, min_capacity: usize) { self.core.shrink_to(min_capacity); } } impl IndexMap where K: Hash + Eq, S: BuildHasher, { /// Insert a key-value pair in the map. /// /// If an equivalent key already exists in the map: the key remains and /// retains in its place in the order, its corresponding value is updated /// with `value`, and the older value is returned inside `Some(_)`. /// /// If no equivalent key existed in the map: the new key-value pair is /// inserted, last in order, and `None` is returned. /// /// Computes in **O(1)** time (amortized average). /// /// See also [`entry`][Self::entry] if you want to insert *or* modify, /// or [`insert_full`][Self::insert_full] if you need to get the index of /// the corresponding key-value pair. pub fn insert(&mut self, key: K, value: V) -> Option { self.insert_full(key, value).1 } /// Insert a key-value pair in the map, and get their index. /// /// If an equivalent key already exists in the map: the key remains and /// retains in its place in the order, its corresponding value is updated /// with `value`, and the older value is returned inside `(index, Some(_))`. /// /// If no equivalent key existed in the map: the new key-value pair is /// inserted, last in order, and `(index, None)` is returned. /// /// Computes in **O(1)** time (amortized average). /// /// See also [`entry`][Self::entry] if you want to insert *or* modify. pub fn insert_full(&mut self, key: K, value: V) -> (usize, Option) { let hash = self.hash(&key); self.core.insert_full(hash, key, value) } /// Insert a key-value pair in the map at its ordered position among sorted keys. /// /// This is equivalent to finding the position with /// [`binary_search_keys`][Self::binary_search_keys], then either updating /// it or calling [`insert_before`][Self::insert_before] for a new key. /// /// If the sorted key is found in the map, its corresponding value is /// updated with `value`, and the older value is returned inside /// `(index, Some(_))`. Otherwise, the new key-value pair is inserted at /// the sorted position, and `(index, None)` is returned. /// /// If the existing keys are **not** already sorted, then the insertion /// index is unspecified (like [`slice::binary_search`]), but the key-value /// pair is moved to or inserted at that position regardless. /// /// Computes in **O(n)** time (average). Instead of repeating calls to /// `insert_sorted`, it may be faster to call batched [`insert`][Self::insert] /// or [`extend`][Self::extend] and only call [`sort_keys`][Self::sort_keys] /// or [`sort_unstable_keys`][Self::sort_unstable_keys] once. pub fn insert_sorted(&mut self, key: K, value: V) -> (usize, Option) where K: Ord, { match self.binary_search_keys(&key) { Ok(i) => (i, Some(mem::replace(&mut self[i], value))), Err(i) => self.insert_before(i, key, value), } } /// Insert a key-value pair in the map at its ordered position among keys /// sorted by `cmp`. /// /// This is equivalent to finding the position with /// [`binary_search_by`][Self::binary_search_by], then calling /// [`insert_before`][Self::insert_before] with the given key and value. /// /// If the existing keys are **not** already sorted, then the insertion /// index is unspecified (like [`slice::binary_search`]), but the key-value /// pair is moved to or inserted at that position regardless. /// /// Computes in **O(n)** time (average). pub fn insert_sorted_by(&mut self, key: K, value: V, mut cmp: F) -> (usize, Option) where F: FnMut(&K, &V, &K, &V) -> Ordering, { let (Ok(i) | Err(i)) = self.binary_search_by(|k, v| cmp(k, v, &key, &value)); self.insert_before(i, key, value) } /// Insert a key-value pair in the map at its ordered position /// using a sort-key extraction function. /// /// This is equivalent to finding the position with /// [`binary_search_by_key`][Self::binary_search_by_key] with `sort_key(key)`, then /// calling [`insert_before`][Self::insert_before] with the given key and value. /// /// If the existing keys are **not** already sorted, then the insertion /// index is unspecified (like [`slice::binary_search`]), but the key-value /// pair is moved to or inserted at that position regardless. /// /// Computes in **O(n)** time (average). pub fn insert_sorted_by_key( &mut self, key: K, value: V, mut sort_key: F, ) -> (usize, Option) where B: Ord, F: FnMut(&K, &V) -> B, { let search_key = sort_key(&key, &value); let (Ok(i) | Err(i)) = self.binary_search_by_key(&search_key, sort_key); self.insert_before(i, key, value) } /// Insert a key-value pair in the map before the entry at the given index, or at the end. /// /// If an equivalent key already exists in the map: the key remains and /// is moved to the new position in the map, its corresponding value is updated /// with `value`, and the older value is returned inside `Some(_)`. The returned index /// will either be the given index or one less, depending on how the entry moved. /// (See [`shift_insert`](Self::shift_insert) for different behavior here.) /// /// If no equivalent key existed in the map: the new key-value pair is /// inserted exactly at the given index, and `None` is returned. /// /// ***Panics*** if `index` is out of bounds. /// Valid indices are `0..=map.len()` (inclusive). /// /// Computes in **O(n)** time (average). /// /// See also [`entry`][Self::entry] if you want to insert *or* modify, /// perhaps only using the index for new entries with [`VacantEntry::shift_insert`]. /// /// # Examples /// /// ``` /// use indexmap::IndexMap; /// let mut map: IndexMap = ('a'..='z').map(|c| (c, ())).collect(); /// /// // The new key '*' goes exactly at the given index. /// assert_eq!(map.get_index_of(&'*'), None); /// assert_eq!(map.insert_before(10, '*', ()), (10, None)); /// assert_eq!(map.get_index_of(&'*'), Some(10)); /// /// // Moving the key 'a' up will shift others down, so this moves *before* 10 to index 9. /// assert_eq!(map.insert_before(10, 'a', ()), (9, Some(()))); /// assert_eq!(map.get_index_of(&'a'), Some(9)); /// assert_eq!(map.get_index_of(&'*'), Some(10)); /// /// // Moving the key 'z' down will shift others up, so this moves to exactly 10. /// assert_eq!(map.insert_before(10, 'z', ()), (10, Some(()))); /// assert_eq!(map.get_index_of(&'z'), Some(10)); /// assert_eq!(map.get_index_of(&'*'), Some(11)); /// /// // Moving or inserting before the endpoint is also valid. /// assert_eq!(map.len(), 27); /// assert_eq!(map.insert_before(map.len(), '*', ()), (26, Some(()))); /// assert_eq!(map.get_index_of(&'*'), Some(26)); /// assert_eq!(map.insert_before(map.len(), '+', ()), (27, None)); /// assert_eq!(map.get_index_of(&'+'), Some(27)); /// assert_eq!(map.len(), 28); /// ``` #[track_caller] pub fn insert_before(&mut self, mut index: usize, key: K, value: V) -> (usize, Option) { let len = self.len(); assert!( index <= len, "index out of bounds: the len is {len} but the index is {index}. Expected index <= len" ); match self.entry(key) { Entry::Occupied(mut entry) => { if index > entry.index() { // Some entries will shift down when this one moves up, // so "insert before index" becomes "move to index - 1", // keeping the entry at the original index unmoved. index -= 1; } let old = mem::replace(entry.get_mut(), value); entry.move_index(index); (index, Some(old)) } Entry::Vacant(entry) => { entry.shift_insert(index, value); (index, None) } } } /// Insert a key-value pair in the map at the given index. /// /// If an equivalent key already exists in the map: the key remains and /// is moved to the given index in the map, its corresponding value is updated /// with `value`, and the older value is returned inside `Some(_)`. /// Note that existing entries **cannot** be moved to `index == map.len()`! /// (See [`insert_before`](Self::insert_before) for different behavior here.) /// /// If no equivalent key existed in the map: the new key-value pair is /// inserted at the given index, and `None` is returned. /// /// ***Panics*** if `index` is out of bounds. /// Valid indices are `0..map.len()` (exclusive) when moving an existing entry, or /// `0..=map.len()` (inclusive) when inserting a new key. /// /// Computes in **O(n)** time (average). /// /// See also [`entry`][Self::entry] if you want to insert *or* modify, /// perhaps only using the index for new entries with [`VacantEntry::shift_insert`]. /// /// # Examples /// /// ``` /// use indexmap::IndexMap; /// let mut map: IndexMap = ('a'..='z').map(|c| (c, ())).collect(); /// /// // The new key '*' goes exactly at the given index. /// assert_eq!(map.get_index_of(&'*'), None); /// assert_eq!(map.shift_insert(10, '*', ()), None); /// assert_eq!(map.get_index_of(&'*'), Some(10)); /// /// // Moving the key 'a' up to 10 will shift others down, including the '*' that was at 10. /// assert_eq!(map.shift_insert(10, 'a', ()), Some(())); /// assert_eq!(map.get_index_of(&'a'), Some(10)); /// assert_eq!(map.get_index_of(&'*'), Some(9)); /// /// // Moving the key 'z' down to 9 will shift others up, including the '*' that was at 9. /// assert_eq!(map.shift_insert(9, 'z', ()), Some(())); /// assert_eq!(map.get_index_of(&'z'), Some(9)); /// assert_eq!(map.get_index_of(&'*'), Some(10)); /// /// // Existing keys can move to len-1 at most, but new keys can insert at the endpoint. /// assert_eq!(map.len(), 27); /// assert_eq!(map.shift_insert(map.len() - 1, '*', ()), Some(())); /// assert_eq!(map.get_index_of(&'*'), Some(26)); /// assert_eq!(map.shift_insert(map.len(), '+', ()), None); /// assert_eq!(map.get_index_of(&'+'), Some(27)); /// assert_eq!(map.len(), 28); /// ``` /// /// ```should_panic /// use indexmap::IndexMap; /// let mut map: IndexMap = ('a'..='z').map(|c| (c, ())).collect(); /// /// // This is an invalid index for moving an existing key! /// map.shift_insert(map.len(), 'a', ()); /// ``` #[track_caller] pub fn shift_insert(&mut self, index: usize, key: K, value: V) -> Option { let len = self.len(); match self.entry(key) { Entry::Occupied(mut entry) => { assert!( index < len, "index out of bounds: the len is {len} but the index is {index}" ); let old = mem::replace(entry.get_mut(), value); entry.move_index(index); Some(old) } Entry::Vacant(entry) => { assert!( index <= len, "index out of bounds: the len is {len} but the index is {index}. Expected index <= len" ); entry.shift_insert(index, value); None } } } /// Replaces the key at the given index. The new key does not need to be /// equivalent to the one it is replacing, but it must be unique to the rest /// of the map. /// /// Returns `Ok(old_key)` if successful, or `Err((other_index, key))` if an /// equivalent key already exists at a different index. The map will be /// unchanged in the error case. /// /// Direct indexing can be used to change the corresponding value: simply /// `map[index] = value`, or `mem::replace(&mut map[index], value)` to /// retrieve the old value as well. /// /// ***Panics*** if `index` is out of bounds. /// /// Computes in **O(1)** time (average). #[track_caller] pub fn replace_index(&mut self, index: usize, key: K) -> Result { // If there's a direct match, we don't even need to hash it. let entry = &mut self.as_entries_mut()[index]; if key == entry.key { return Ok(mem::replace(&mut entry.key, key)); } let hash = self.hash(&key); if let Some(i) = self.core.get_index_of(hash, &key) { debug_assert_ne!(i, index); return Err((i, key)); } Ok(self.core.replace_index_unique(index, hash, key)) } /// Get the given key's corresponding entry in the map for insertion and/or /// in-place manipulation. /// /// Computes in **O(1)** time (amortized average). pub fn entry(&mut self, key: K) -> Entry<'_, K, V> { let hash = self.hash(&key); Entry::new(&mut self.core, hash, key) } /// Creates a splicing iterator that replaces the specified range in the map /// with the given `replace_with` key-value iterator and yields the removed /// items. `replace_with` does not need to be the same length as `range`. /// /// The `range` is removed even if the iterator is not consumed until the /// end. It is unspecified how many elements are removed from the map if the /// `Splice` value is leaked. /// /// The input iterator `replace_with` is only consumed when the `Splice` /// value is dropped. If a key from the iterator matches an existing entry /// in the map (outside of `range`), then the value will be updated in that /// position. Otherwise, the new key-value pair will be inserted in the /// replaced `range`. /// /// ***Panics*** if the starting point is greater than the end point or if /// the end point is greater than the length of the map. /// /// # Examples /// /// ``` /// use indexmap::IndexMap; /// /// let mut map = IndexMap::from([(0, '_'), (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')]); /// let new = [(5, 'E'), (4, 'D'), (3, 'C'), (2, 'B'), (1, 'A')]; /// let removed: Vec<_> = map.splice(2..4, new).collect(); /// /// // 1 and 4 got new values, while 5, 3, and 2 were newly inserted. /// assert!(map.into_iter().eq([(0, '_'), (1, 'A'), (5, 'E'), (3, 'C'), (2, 'B'), (4, 'D')])); /// assert_eq!(removed, &[(2, 'b'), (3, 'c')]); /// ``` #[track_caller] pub fn splice(&mut self, range: R, replace_with: I) -> Splice<'_, I::IntoIter, K, V, S> where R: RangeBounds, I: IntoIterator, { Splice::new(self, range, replace_with.into_iter()) } /// Moves all key-value pairs from `other` into `self`, leaving `other` empty. /// /// This is equivalent to calling [`insert`][Self::insert] for each /// key-value pair from `other` in order, which means that for keys that /// already exist in `self`, their value is updated in the current position. /// /// # Examples /// /// ``` /// use indexmap::IndexMap; /// /// // Note: Key (3) is present in both maps. /// let mut a = IndexMap::from([(3, "c"), (2, "b"), (1, "a")]); /// let mut b = IndexMap::from([(3, "d"), (4, "e"), (5, "f")]); /// let old_capacity = b.capacity(); /// /// a.append(&mut b); /// /// assert_eq!(a.len(), 5); /// assert_eq!(b.len(), 0); /// assert_eq!(b.capacity(), old_capacity); /// /// assert!(a.keys().eq(&[3, 2, 1, 4, 5])); /// assert_eq!(a[&3], "d"); // "c" was overwritten. /// ``` pub fn append(&mut self, other: &mut IndexMap) { self.extend(other.drain(..)); } } impl IndexMap where S: BuildHasher, { pub(crate) fn hash(&self, key: &Q) -> HashValue { let h = self.hash_builder.hash_one(key); HashValue(h as usize) } /// Return `true` if an equivalent to `key` exists in the map. /// /// Computes in **O(1)** time (average). pub fn contains_key(&self, key: &Q) -> bool where Q: ?Sized + Hash + Equivalent, { self.get_index_of(key).is_some() } /// Return a reference to the stored value for `key`, if it is present, /// else `None`. /// /// Computes in **O(1)** time (average). pub fn get(&self, key: &Q) -> Option<&V> where Q: ?Sized + Hash + Equivalent, { if let Some(i) = self.get_index_of(key) { let entry = &self.as_entries()[i]; Some(&entry.value) } else { None } } /// Return references to the stored key-value pair for the lookup `key`, /// if it is present, else `None`. /// /// Computes in **O(1)** time (average). pub fn get_key_value(&self, key: &Q) -> Option<(&K, &V)> where Q: ?Sized + Hash + Equivalent, { if let Some(i) = self.get_index_of(key) { let entry = &self.as_entries()[i]; Some((&entry.key, &entry.value)) } else { None } } /// Return the index with references to the stored key-value pair for the /// lookup `key`, if it is present, else `None`. /// /// Computes in **O(1)** time (average). pub fn get_full(&self, key: &Q) -> Option<(usize, &K, &V)> where Q: ?Sized + Hash + Equivalent, { if let Some(i) = self.get_index_of(key) { let entry = &self.as_entries()[i]; Some((i, &entry.key, &entry.value)) } else { None } } /// Return the item index for `key`, if it is present, else `None`. /// /// Computes in **O(1)** time (average). pub fn get_index_of(&self, key: &Q) -> Option where Q: ?Sized + Hash + Equivalent, { match self.as_entries() { [] => None, [x] => key.equivalent(&x.key).then_some(0), _ => { let hash = self.hash(key); self.core.get_index_of(hash, key) } } } /// Return a mutable reference to the stored value for `key`, /// if it is present, else `None`. /// /// Computes in **O(1)** time (average). pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> where Q: ?Sized + Hash + Equivalent, { if let Some(i) = self.get_index_of(key) { let entry = &mut self.as_entries_mut()[i]; Some(&mut entry.value) } else { None } } /// Return a reference and mutable references to the stored key-value pair /// for the lookup `key`, if it is present, else `None`. /// /// Computes in **O(1)** time (average). pub fn get_key_value_mut(&mut self, key: &Q) -> Option<(&K, &mut V)> where Q: ?Sized + Hash + Equivalent, { if let Some(i) = self.get_index_of(key) { let entry = &mut self.as_entries_mut()[i]; Some((&entry.key, &mut entry.value)) } else { None } } /// Return the index with a reference and mutable reference to the stored /// key-value pair for the lookup `key`, if it is present, else `None`. /// /// Computes in **O(1)** time (average). pub fn get_full_mut(&mut self, key: &Q) -> Option<(usize, &K, &mut V)> where Q: ?Sized + Hash + Equivalent, { if let Some(i) = self.get_index_of(key) { let entry = &mut self.as_entries_mut()[i]; Some((i, &entry.key, &mut entry.value)) } else { None } } /// Return the values for `N` keys. If any key is duplicated, this function will panic. /// /// # Examples /// /// ``` /// let mut map = indexmap::IndexMap::from([(1, 'a'), (3, 'b'), (2, 'c')]); /// assert_eq!(map.get_disjoint_mut([&2, &1]), [Some(&mut 'c'), Some(&mut 'a')]); /// ``` pub fn get_disjoint_mut(&mut self, keys: [&Q; N]) -> [Option<&mut V>; N] where Q: ?Sized + Hash + Equivalent, { let indices = keys.map(|key| self.get_index_of(key)); match self.as_mut_slice().get_disjoint_opt_mut(indices) { Err(GetDisjointMutError::IndexOutOfBounds) => { unreachable!( "Internal error: indices should never be OOB as we got them from get_index_of" ); } Err(GetDisjointMutError::OverlappingIndices) => { panic!("duplicate keys found"); } Ok(key_values) => key_values.map(|kv_opt| kv_opt.map(|kv| kv.1)), } } /// Remove the key-value pair equivalent to `key` and return /// its value. /// /// **NOTE:** This is equivalent to [`.swap_remove(key)`][Self::swap_remove], replacing this /// entry's position with the last element, and it is deprecated in favor of calling that /// explicitly. If you need to preserve the relative order of the keys in the map, use /// [`.shift_remove(key)`][Self::shift_remove] instead. #[deprecated(note = "`remove` disrupts the map order -- \ use `swap_remove` or `shift_remove` for explicit behavior.")] pub fn remove(&mut self, key: &Q) -> Option where Q: ?Sized + Hash + Equivalent, { self.swap_remove(key) } /// Remove and return the key-value pair equivalent to `key`. /// /// **NOTE:** This is equivalent to [`.swap_remove_entry(key)`][Self::swap_remove_entry], /// replacing this entry's position with the last element, and it is deprecated in favor of /// calling that explicitly. If you need to preserve the relative order of the keys in the map, /// use [`.shift_remove_entry(key)`][Self::shift_remove_entry] instead. #[deprecated(note = "`remove_entry` disrupts the map order -- \ use `swap_remove_entry` or `shift_remove_entry` for explicit behavior.")] pub fn remove_entry(&mut self, key: &Q) -> Option<(K, V)> where Q: ?Sized + Hash + Equivalent, { self.swap_remove_entry(key) } /// Remove the key-value pair equivalent to `key` and return /// its value. /// /// Like [`Vec::swap_remove`], the pair is removed by swapping it with the /// last element of the map and popping it off. **This perturbs /// the position of what used to be the last element!** /// /// Return `None` if `key` is not in map. /// /// Computes in **O(1)** time (average). pub fn swap_remove(&mut self, key: &Q) -> Option where Q: ?Sized + Hash + Equivalent, { self.swap_remove_full(key).map(third) } /// Remove and return the key-value pair equivalent to `key`. /// /// Like [`Vec::swap_remove`], the pair is removed by swapping it with the /// last element of the map and popping it off. **This perturbs /// the position of what used to be the last element!** /// /// Return `None` if `key` is not in map. /// /// Computes in **O(1)** time (average). pub fn swap_remove_entry(&mut self, key: &Q) -> Option<(K, V)> where Q: ?Sized + Hash + Equivalent, { match self.swap_remove_full(key) { Some((_, key, value)) => Some((key, value)), None => None, } } /// Remove the key-value pair equivalent to `key` and return it and /// the index it had. /// /// Like [`Vec::swap_remove`], the pair is removed by swapping it with the /// last element of the map and popping it off. **This perturbs /// the position of what used to be the last element!** /// /// Return `None` if `key` is not in map. /// /// Computes in **O(1)** time (average). pub fn swap_remove_full(&mut self, key: &Q) -> Option<(usize, K, V)> where Q: ?Sized + Hash + Equivalent, { match self.as_entries() { [x] if key.equivalent(&x.key) => { let (k, v) = self.core.pop()?; Some((0, k, v)) } [_] | [] => None, _ => { let hash = self.hash(key); self.core.swap_remove_full(hash, key) } } } /// Remove the key-value pair equivalent to `key` and return /// its value. /// /// Like [`Vec::remove`], the pair is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Return `None` if `key` is not in map. /// /// Computes in **O(n)** time (average). pub fn shift_remove(&mut self, key: &Q) -> Option where Q: ?Sized + Hash + Equivalent, { self.shift_remove_full(key).map(third) } /// Remove and return the key-value pair equivalent to `key`. /// /// Like [`Vec::remove`], the pair is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Return `None` if `key` is not in map. /// /// Computes in **O(n)** time (average). pub fn shift_remove_entry(&mut self, key: &Q) -> Option<(K, V)> where Q: ?Sized + Hash + Equivalent, { match self.shift_remove_full(key) { Some((_, key, value)) => Some((key, value)), None => None, } } /// Remove the key-value pair equivalent to `key` and return it and /// the index it had. /// /// Like [`Vec::remove`], the pair is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Return `None` if `key` is not in map. /// /// Computes in **O(n)** time (average). pub fn shift_remove_full(&mut self, key: &Q) -> Option<(usize, K, V)> where Q: ?Sized + Hash + Equivalent, { match self.as_entries() { [x] if key.equivalent(&x.key) => { let (k, v) = self.core.pop()?; Some((0, k, v)) } [_] | [] => None, _ => { let hash = self.hash(key); self.core.shift_remove_full(hash, key) } } } } impl IndexMap { /// Remove the last key-value pair /// /// This preserves the order of the remaining elements. /// /// Computes in **O(1)** time (average). #[doc(alias = "pop_last")] // like `BTreeMap` pub fn pop(&mut self) -> Option<(K, V)> { self.core.pop() } /// Removes and returns the last key-value pair from a map if the predicate /// returns `true`, or [`None`] if the predicate returns false or the map /// is empty (the predicate will not be called in that case). /// /// This preserves the order of the remaining elements. /// /// Computes in **O(1)** time (average). /// /// # Examples /// /// ``` /// use indexmap::IndexMap; /// /// let init = [(1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')]; /// let mut map = IndexMap::from(init); /// let pred = |key: &i32, _value: &mut char| *key % 2 == 0; /// /// assert_eq!(map.pop_if(pred), Some((4, 'd'))); /// assert_eq!(map.as_slice(), &init[..3]); /// assert_eq!(map.pop_if(pred), None); /// ``` pub fn pop_if(&mut self, predicate: impl FnOnce(&K, &mut V) -> bool) -> Option<(K, V)> { let (last_key, last_value) = self.last_mut()?; if predicate(last_key, last_value) { self.core.pop() } else { None } } /// Scan through each key-value pair in the map and keep those where the /// closure `keep` returns `true`. /// /// The elements are visited in order, and remaining elements keep their /// order. /// /// Computes in **O(n)** time (average). pub fn retain(&mut self, mut keep: F) where F: FnMut(&K, &mut V) -> bool, { self.core.retain_in_order(move |k, v| keep(k, v)); } /// Sort the map's key-value pairs by the default ordering of the keys. /// /// This is a stable sort -- but equivalent keys should not normally coexist in /// a map at all, so [`sort_unstable_keys`][Self::sort_unstable_keys] is preferred /// because it is generally faster and doesn't allocate auxiliary memory. /// /// See [`sort_by`](Self::sort_by) for details. pub fn sort_keys(&mut self) where K: Ord, { self.with_entries(move |entries| { entries.sort_by(move |a, b| K::cmp(&a.key, &b.key)); }); } /// Sort the map's key-value pairs in place using the comparison /// function `cmp`. /// /// The comparison function receives two key and value pairs to compare (you /// can sort by keys or values or their combination as needed). /// /// Computes in **O(n log n + c)** time and **O(n)** space where *n* is /// the length of the map and *c* the capacity. The sort is stable. pub fn sort_by(&mut self, mut cmp: F) where F: FnMut(&K, &V, &K, &V) -> Ordering, { self.with_entries(move |entries| { entries.sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); }); } /// Sort the key-value pairs of the map and return a by-value iterator of /// the key-value pairs with the result. /// /// The sort is stable. pub fn sorted_by(self, mut cmp: F) -> IntoIter where F: FnMut(&K, &V, &K, &V) -> Ordering, { let mut entries = self.into_entries(); entries.sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); IntoIter::new(entries) } /// Sort the map's key-value pairs in place using a sort-key extraction function. /// /// Computes in **O(n log n + c)** time and **O(n)** space where *n* is /// the length of the map and *c* the capacity. The sort is stable. pub fn sort_by_key(&mut self, mut sort_key: F) where T: Ord, F: FnMut(&K, &V) -> T, { self.with_entries(move |entries| { entries.sort_by_key(move |a| sort_key(&a.key, &a.value)); }); } /// Sort the map's key-value pairs by the default ordering of the keys, but /// may not preserve the order of equal elements. /// /// See [`sort_unstable_by`](Self::sort_unstable_by) for details. pub fn sort_unstable_keys(&mut self) where K: Ord, { self.with_entries(move |entries| { entries.sort_unstable_by(move |a, b| K::cmp(&a.key, &b.key)); }); } /// Sort the map's key-value pairs in place using the comparison function `cmp`, but /// may not preserve the order of equal elements. /// /// The comparison function receives two key and value pairs to compare (you /// can sort by keys or values or their combination as needed). /// /// Computes in **O(n log n + c)** time where *n* is /// the length of the map and *c* is the capacity. The sort is unstable. pub fn sort_unstable_by(&mut self, mut cmp: F) where F: FnMut(&K, &V, &K, &V) -> Ordering, { self.with_entries(move |entries| { entries.sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); }); } /// Sort the key-value pairs of the map and return a by-value iterator of /// the key-value pairs with the result. /// /// The sort is unstable. #[inline] pub fn sorted_unstable_by(self, mut cmp: F) -> IntoIter where F: FnMut(&K, &V, &K, &V) -> Ordering, { let mut entries = self.into_entries(); entries.sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); IntoIter::new(entries) } /// Sort the map's key-value pairs in place using a sort-key extraction function. /// /// Computes in **O(n log n + c)** time where *n* is /// the length of the map and *c* is the capacity. The sort is unstable. pub fn sort_unstable_by_key(&mut self, mut sort_key: F) where T: Ord, F: FnMut(&K, &V) -> T, { self.with_entries(move |entries| { entries.sort_unstable_by_key(move |a| sort_key(&a.key, &a.value)); }); } /// Sort the map's key-value pairs in place using a sort-key extraction function. /// /// During sorting, the function is called at most once per entry, by using temporary storage /// to remember the results of its evaluation. The order of calls to the function is /// unspecified and may change between versions of `indexmap` or the standard library. /// /// Computes in **O(m n + n log n + c)** time () and **O(n)** space, where the function is /// **O(m)**, *n* is the length of the map, and *c* the capacity. The sort is stable. pub fn sort_by_cached_key(&mut self, mut sort_key: F) where T: Ord, F: FnMut(&K, &V) -> T, { self.with_entries(move |entries| { entries.sort_by_cached_key(move |a| sort_key(&a.key, &a.value)); }); } /// Search over a sorted map for a key. /// /// Returns the position where that key is present, or the position where it can be inserted to /// maintain the sort. See [`slice::binary_search`] for more details. /// /// Computes in **O(log(n))** time, which is notably less scalable than looking the key up /// using [`get_index_of`][IndexMap::get_index_of], but this can also position missing keys. pub fn binary_search_keys(&self, x: &K) -> Result where K: Ord, { self.as_slice().binary_search_keys(x) } /// Search over a sorted map with a comparator function. /// /// Returns the position where that value is present, or the position where it can be inserted /// to maintain the sort. See [`slice::binary_search_by`] for more details. /// /// Computes in **O(log(n))** time. #[inline] pub fn binary_search_by<'a, F>(&'a self, f: F) -> Result where F: FnMut(&'a K, &'a V) -> Ordering, { self.as_slice().binary_search_by(f) } /// Search over a sorted map with an extraction function. /// /// Returns the position where that value is present, or the position where it can be inserted /// to maintain the sort. See [`slice::binary_search_by_key`] for more details. /// /// Computes in **O(log(n))** time. #[inline] pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, f: F) -> Result where F: FnMut(&'a K, &'a V) -> B, B: Ord, { self.as_slice().binary_search_by_key(b, f) } /// Checks if the keys of this map are sorted. #[inline] pub fn is_sorted(&self) -> bool where K: PartialOrd, { self.as_slice().is_sorted() } /// Checks if this map is sorted using the given comparator function. #[inline] pub fn is_sorted_by<'a, F>(&'a self, cmp: F) -> bool where F: FnMut(&'a K, &'a V, &'a K, &'a V) -> bool, { self.as_slice().is_sorted_by(cmp) } /// Checks if this map is sorted using the given sort-key function. #[inline] pub fn is_sorted_by_key<'a, F, T>(&'a self, sort_key: F) -> bool where F: FnMut(&'a K, &'a V) -> T, T: PartialOrd, { self.as_slice().is_sorted_by_key(sort_key) } /// Returns the index of the partition point of a sorted map according to the given predicate /// (the index of the first element of the second partition). /// /// See [`slice::partition_point`] for more details. /// /// Computes in **O(log(n))** time. #[must_use] pub fn partition_point

(&self, pred: P) -> usize where P: FnMut(&K, &V) -> bool, { self.as_slice().partition_point(pred) } /// Reverses the order of the map's key-value pairs in place. /// /// Computes in **O(n)** time and **O(1)** space. pub fn reverse(&mut self) { self.core.reverse() } /// Returns a slice of all the key-value pairs in the map. /// /// Computes in **O(1)** time. pub fn as_slice(&self) -> &Slice { Slice::from_slice(self.as_entries()) } /// Returns a mutable slice of all the key-value pairs in the map. /// /// Computes in **O(1)** time. pub fn as_mut_slice(&mut self) -> &mut Slice { Slice::from_mut_slice(self.as_entries_mut()) } /// Converts into a boxed slice of all the key-value pairs in the map. /// /// Note that this will drop the inner hash table and any excess capacity. pub fn into_boxed_slice(self) -> Box> { Slice::from_boxed(self.into_entries().into_boxed_slice()) } /// Get a key-value pair by index /// /// Valid indices are `0 <= index < self.len()`. /// /// Computes in **O(1)** time. pub fn get_index(&self, index: usize) -> Option<(&K, &V)> { self.as_entries().get(index).map(Bucket::refs) } /// Get a key-value pair by index /// /// Valid indices are `0 <= index < self.len()`. /// /// Computes in **O(1)** time. pub fn get_index_mut(&mut self, index: usize) -> Option<(&K, &mut V)> { self.as_entries_mut().get_mut(index).map(Bucket::ref_mut) } /// Get an entry in the map by index for in-place manipulation. /// /// Valid indices are `0 <= index < self.len()`. /// /// Computes in **O(1)** time. pub fn get_index_entry(&mut self, index: usize) -> Option> { IndexedEntry::new(&mut self.core, index) } /// Get an array of `N` key-value pairs by `N` indices /// /// Valid indices are *0 <= index < self.len()* and each index needs to be unique. /// /// # Examples /// /// ``` /// let mut map = indexmap::IndexMap::from([(1, 'a'), (3, 'b'), (2, 'c')]); /// assert_eq!(map.get_disjoint_indices_mut([2, 0]), Ok([(&2, &mut 'c'), (&1, &mut 'a')])); /// ``` pub fn get_disjoint_indices_mut( &mut self, indices: [usize; N], ) -> Result<[(&K, &mut V); N], GetDisjointMutError> { self.as_mut_slice().get_disjoint_mut(indices) } /// Returns a slice of key-value pairs in the given range of indices. /// /// Valid indices are `0 <= index < self.len()`. /// /// Computes in **O(1)** time. pub fn get_range>(&self, range: R) -> Option<&Slice> { let entries = self.as_entries(); let range = try_simplify_range(range, entries.len())?; entries.get(range).map(Slice::from_slice) } /// Returns a mutable slice of key-value pairs in the given range of indices. /// /// Valid indices are `0 <= index < self.len()`. /// /// Computes in **O(1)** time. pub fn get_range_mut>(&mut self, range: R) -> Option<&mut Slice> { let entries = self.as_entries_mut(); let range = try_simplify_range(range, entries.len())?; entries.get_mut(range).map(Slice::from_mut_slice) } /// Get the first key-value pair /// /// Computes in **O(1)** time. #[doc(alias = "first_key_value")] // like `BTreeMap` pub fn first(&self) -> Option<(&K, &V)> { self.as_entries().first().map(Bucket::refs) } /// Get the first key-value pair, with mutable access to the value /// /// Computes in **O(1)** time. pub fn first_mut(&mut self) -> Option<(&K, &mut V)> { self.as_entries_mut().first_mut().map(Bucket::ref_mut) } /// Get the first entry in the map for in-place manipulation. /// /// Computes in **O(1)** time. pub fn first_entry(&mut self) -> Option> { self.get_index_entry(0) } /// Get the last key-value pair /// /// Computes in **O(1)** time. #[doc(alias = "last_key_value")] // like `BTreeMap` pub fn last(&self) -> Option<(&K, &V)> { self.as_entries().last().map(Bucket::refs) } /// Get the last key-value pair, with mutable access to the value /// /// Computes in **O(1)** time. pub fn last_mut(&mut self) -> Option<(&K, &mut V)> { self.as_entries_mut().last_mut().map(Bucket::ref_mut) } /// Get the last entry in the map for in-place manipulation. /// /// Computes in **O(1)** time. pub fn last_entry(&mut self) -> Option> { self.get_index_entry(self.len().checked_sub(1)?) } /// Remove the key-value pair by index /// /// Valid indices are `0 <= index < self.len()`. /// /// Like [`Vec::swap_remove`], the pair is removed by swapping it with the /// last element of the map and popping it off. **This perturbs /// the position of what used to be the last element!** /// /// Computes in **O(1)** time (average). pub fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> { self.core.swap_remove_index(index) } /// Remove the key-value pair by index /// /// Valid indices are `0 <= index < self.len()`. /// /// Like [`Vec::remove`], the pair is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Computes in **O(n)** time (average). pub fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> { self.core.shift_remove_index(index) } /// Moves the position of a key-value pair from one index to another /// by shifting all other pairs in-between. /// /// * If `from < to`, the other pairs will shift down while the targeted pair moves up. /// * If `from > to`, the other pairs will shift up while the targeted pair moves down. /// /// ***Panics*** if `from` or `to` are out of bounds. /// /// Computes in **O(n)** time (average). #[track_caller] pub fn move_index(&mut self, from: usize, to: usize) { self.core.move_index(from, to) } /// Swaps the position of two key-value pairs in the map. /// /// ***Panics*** if `a` or `b` are out of bounds. /// /// Computes in **O(1)** time (average). #[track_caller] pub fn swap_indices(&mut self, a: usize, b: usize) { self.core.swap_indices(a, b) } } /// Access [`IndexMap`] values corresponding to a key. /// /// # Examples /// /// ``` /// use indexmap::IndexMap; /// /// let mut map = IndexMap::new(); /// for word in "Lorem ipsum dolor sit amet".split_whitespace() { /// map.insert(word.to_lowercase(), word.to_uppercase()); /// } /// assert_eq!(map["lorem"], "LOREM"); /// assert_eq!(map["ipsum"], "IPSUM"); /// ``` /// /// ```should_panic /// use indexmap::IndexMap; /// /// let mut map = IndexMap::new(); /// map.insert("foo", 1); /// println!("{:?}", map["bar"]); // panics! /// ``` impl Index<&Q> for IndexMap where Q: Hash + Equivalent, S: BuildHasher, { type Output = V; /// Returns a reference to the value corresponding to the supplied `key`. /// /// ***Panics*** if `key` is not present in the map. fn index(&self, key: &Q) -> &V { self.get(key).expect("no entry found for key") } } /// Access [`IndexMap`] values corresponding to a key. /// /// Mutable indexing allows changing / updating values of key-value /// pairs that are already present. /// /// You can **not** insert new pairs with index syntax, use `.insert()`. /// /// # Examples /// /// ``` /// use indexmap::IndexMap; /// /// let mut map = IndexMap::new(); /// for word in "Lorem ipsum dolor sit amet".split_whitespace() { /// map.insert(word.to_lowercase(), word.to_string()); /// } /// let lorem = &mut map["lorem"]; /// assert_eq!(lorem, "Lorem"); /// lorem.retain(char::is_lowercase); /// assert_eq!(map["lorem"], "orem"); /// ``` /// /// ```should_panic /// use indexmap::IndexMap; /// /// let mut map = IndexMap::new(); /// map.insert("foo", 1); /// map["bar"] = 1; // panics! /// ``` impl IndexMut<&Q> for IndexMap where Q: Hash + Equivalent, S: BuildHasher, { /// Returns a mutable reference to the value corresponding to the supplied `key`. /// /// ***Panics*** if `key` is not present in the map. fn index_mut(&mut self, key: &Q) -> &mut V { self.get_mut(key).expect("no entry found for key") } } /// Access [`IndexMap`] values at indexed positions. /// /// See [`Index for Keys`][keys] to access a map's keys instead. /// /// [keys]: Keys#impl-Index-for-Keys<'a,+K,+V> /// /// # Examples /// /// ``` /// use indexmap::IndexMap; /// /// let mut map = IndexMap::new(); /// for word in "Lorem ipsum dolor sit amet".split_whitespace() { /// map.insert(word.to_lowercase(), word.to_uppercase()); /// } /// assert_eq!(map[0], "LOREM"); /// assert_eq!(map[1], "IPSUM"); /// map.reverse(); /// assert_eq!(map[0], "AMET"); /// assert_eq!(map[1], "SIT"); /// map.sort_keys(); /// assert_eq!(map[0], "AMET"); /// assert_eq!(map[1], "DOLOR"); /// ``` /// /// ```should_panic /// use indexmap::IndexMap; /// /// let mut map = IndexMap::new(); /// map.insert("foo", 1); /// println!("{:?}", map[10]); // panics! /// ``` impl Index for IndexMap { type Output = V; /// Returns a reference to the value at the supplied `index`. /// /// ***Panics*** if `index` is out of bounds. fn index(&self, index: usize) -> &V { if let Some((_, value)) = self.get_index(index) { value } else { panic!( "index out of bounds: the len is {len} but the index is {index}", len = self.len() ); } } } /// Access [`IndexMap`] values at indexed positions. /// /// Mutable indexing allows changing / updating indexed values /// that are already present. /// /// You can **not** insert new values with index syntax -- use [`.insert()`][IndexMap::insert]. /// /// # Examples /// /// ``` /// use indexmap::IndexMap; /// /// let mut map = IndexMap::new(); /// for word in "Lorem ipsum dolor sit amet".split_whitespace() { /// map.insert(word.to_lowercase(), word.to_string()); /// } /// let lorem = &mut map[0]; /// assert_eq!(lorem, "Lorem"); /// lorem.retain(char::is_lowercase); /// assert_eq!(map["lorem"], "orem"); /// ``` /// /// ```should_panic /// use indexmap::IndexMap; /// /// let mut map = IndexMap::new(); /// map.insert("foo", 1); /// map[10] = 1; // panics! /// ``` impl IndexMut for IndexMap { /// Returns a mutable reference to the value at the supplied `index`. /// /// ***Panics*** if `index` is out of bounds. fn index_mut(&mut self, index: usize) -> &mut V { let len: usize = self.len(); if let Some((_, value)) = self.get_index_mut(index) { value } else { panic!("index out of bounds: the len is {len} but the index is {index}"); } } } impl FromIterator<(K, V)> for IndexMap where K: Hash + Eq, S: BuildHasher + Default, { /// Create an `IndexMap` from the sequence of key-value pairs in the /// iterable. /// /// `from_iter` uses the same logic as `extend`. See /// [`extend`][IndexMap::extend] for more details. fn from_iter>(iterable: I) -> Self { let iter = iterable.into_iter(); let (low, _) = iter.size_hint(); let mut map = Self::with_capacity_and_hasher(low, <_>::default()); map.extend(iter); map } } #[cfg(feature = "std")] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] impl From<[(K, V); N]> for IndexMap where K: Hash + Eq, { /// # Examples /// /// ``` /// use indexmap::IndexMap; /// /// let map1 = IndexMap::from([(1, 2), (3, 4)]); /// let map2: IndexMap<_, _> = [(1, 2), (3, 4)].into(); /// assert_eq!(map1, map2); /// ``` fn from(arr: [(K, V); N]) -> Self { Self::from_iter(arr) } } impl Extend<(K, V)> for IndexMap where K: Hash + Eq, S: BuildHasher, { /// Extend the map with all key-value pairs in the iterable. /// /// This is equivalent to calling [`insert`][IndexMap::insert] for each of /// them in order, which means that for keys that already existed /// in the map, their value is updated but it keeps the existing order. /// /// New keys are inserted in the order they appear in the sequence. If /// equivalents of a key occur more than once, the last corresponding value /// prevails. fn extend>(&mut self, iterable: I) { // (Note: this is a copy of `std`/`hashbrown`'s reservation logic.) // Keys may be already present or show multiple times in the iterator. // Reserve the entire hint lower bound if the map is empty. // Otherwise reserve half the hint (rounded up), so the map // will only resize twice in the worst case. let iter = iterable.into_iter(); let (lower_len, _) = iter.size_hint(); let reserve = if self.is_empty() { lower_len } else { lower_len.div_ceil(2) }; self.reserve(reserve); iter.for_each(move |(k, v)| { self.insert(k, v); }); } } impl<'a, K, V, S> Extend<(&'a K, &'a V)> for IndexMap where K: Hash + Eq + Copy, V: Copy, S: BuildHasher, { /// Extend the map with all key-value pairs in the iterable. /// /// See the first extend method for more details. fn extend>(&mut self, iterable: I) { self.extend(iterable.into_iter().map(|(&key, &value)| (key, value))); } } impl Default for IndexMap where S: Default, { /// Return an empty [`IndexMap`] fn default() -> Self { Self::with_capacity_and_hasher(0, S::default()) } } impl PartialEq> for IndexMap where K: Hash + Eq, V1: PartialEq, S1: BuildHasher, S2: BuildHasher, { fn eq(&self, other: &IndexMap) -> bool { if self.len() != other.len() { return false; } self.iter() .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) } } impl Eq for IndexMap where K: Eq + Hash, V: Eq, S: BuildHasher, { } indexmap-2.12.1/src/rayon/map.rs000064400000000000000000000464531046102023000145720ustar 00000000000000//! Parallel iterator types for [`IndexMap`] with [`rayon`][::rayon]. //! //! You will rarely need to interact with this module directly unless you need to name one of the //! iterator types. use super::collect; use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer}; use rayon::prelude::*; use alloc::boxed::Box; use alloc::vec::Vec; use core::cmp::Ordering; use core::fmt; use core::hash::{BuildHasher, Hash}; use core::ops::RangeBounds; use crate::map::Slice; use crate::Bucket; use crate::IndexMap; impl IntoParallelIterator for IndexMap where K: Send, V: Send, { type Item = (K, V); type Iter = IntoParIter; fn into_par_iter(self) -> Self::Iter { IntoParIter { entries: self.into_entries(), } } } impl IntoParallelIterator for Box> where K: Send, V: Send, { type Item = (K, V); type Iter = IntoParIter; fn into_par_iter(self) -> Self::Iter { IntoParIter { entries: self.into_entries(), } } } /// A parallel owning iterator over the entries of an [`IndexMap`]. /// /// This `struct` is created by the [`IndexMap::into_par_iter`] method /// (provided by rayon's [`IntoParallelIterator`] trait). See its documentation for more. pub struct IntoParIter { entries: Vec>, } impl fmt::Debug for IntoParIter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let iter = self.entries.iter().map(Bucket::refs); f.debug_list().entries(iter).finish() } } impl ParallelIterator for IntoParIter { type Item = (K, V); parallel_iterator_methods!(Bucket::key_value); } impl IndexedParallelIterator for IntoParIter { indexed_parallel_iterator_methods!(Bucket::key_value); } impl<'a, K, V, S> IntoParallelIterator for &'a IndexMap where K: Sync, V: Sync, { type Item = (&'a K, &'a V); type Iter = ParIter<'a, K, V>; fn into_par_iter(self) -> Self::Iter { ParIter { entries: self.as_entries(), } } } impl<'a, K, V> IntoParallelIterator for &'a Slice where K: Sync, V: Sync, { type Item = (&'a K, &'a V); type Iter = ParIter<'a, K, V>; fn into_par_iter(self) -> Self::Iter { ParIter { entries: &self.entries, } } } /// A parallel iterator over the entries of an [`IndexMap`]. /// /// This `struct` is created by the [`IndexMap::par_iter`] method /// (provided by rayon's [`IntoParallelRefIterator`] trait). See its documentation for more. /// /// [`IndexMap::par_iter`]: ../struct.IndexMap.html#method.par_iter pub struct ParIter<'a, K, V> { entries: &'a [Bucket], } impl Clone for ParIter<'_, K, V> { fn clone(&self) -> Self { ParIter { ..*self } } } impl fmt::Debug for ParIter<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let iter = self.entries.iter().map(Bucket::refs); f.debug_list().entries(iter).finish() } } impl<'a, K: Sync, V: Sync> ParallelIterator for ParIter<'a, K, V> { type Item = (&'a K, &'a V); parallel_iterator_methods!(Bucket::refs); } impl IndexedParallelIterator for ParIter<'_, K, V> { indexed_parallel_iterator_methods!(Bucket::refs); } impl<'a, K, V, S> IntoParallelIterator for &'a mut IndexMap where K: Sync + Send, V: Send, { type Item = (&'a K, &'a mut V); type Iter = ParIterMut<'a, K, V>; fn into_par_iter(self) -> Self::Iter { ParIterMut { entries: self.as_entries_mut(), } } } impl<'a, K, V> IntoParallelIterator for &'a mut Slice where K: Sync + Send, V: Send, { type Item = (&'a K, &'a mut V); type Iter = ParIterMut<'a, K, V>; fn into_par_iter(self) -> Self::Iter { ParIterMut { entries: &mut self.entries, } } } /// A parallel mutable iterator over the entries of an [`IndexMap`]. /// /// This `struct` is created by the [`IndexMap::par_iter_mut`] method /// (provided by rayon's [`IntoParallelRefMutIterator`] trait). See its documentation for more. /// /// [`IndexMap::par_iter_mut`]: ../struct.IndexMap.html#method.par_iter_mut pub struct ParIterMut<'a, K, V> { entries: &'a mut [Bucket], } impl fmt::Debug for ParIterMut<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let iter = self.entries.iter().map(Bucket::refs); f.debug_list().entries(iter).finish() } } impl<'a, K: Sync + Send, V: Send> ParallelIterator for ParIterMut<'a, K, V> { type Item = (&'a K, &'a mut V); parallel_iterator_methods!(Bucket::ref_mut); } impl IndexedParallelIterator for ParIterMut<'_, K, V> { indexed_parallel_iterator_methods!(Bucket::ref_mut); } impl<'a, K, V, S> ParallelDrainRange for &'a mut IndexMap where K: Send, V: Send, { type Item = (K, V); type Iter = ParDrain<'a, K, V>; fn par_drain>(self, range: R) -> Self::Iter { ParDrain { entries: self.core.par_drain(range), } } } /// A parallel draining iterator over the entries of an [`IndexMap`]. /// /// This `struct` is created by the [`IndexMap::par_drain`] method /// (provided by rayon's [`ParallelDrainRange`] trait). See its documentation for more. /// /// [`IndexMap::par_drain`]: ../struct.IndexMap.html#method.par_drain pub struct ParDrain<'a, K: Send, V: Send> { entries: rayon::vec::Drain<'a, Bucket>, } impl ParallelIterator for ParDrain<'_, K, V> { type Item = (K, V); parallel_iterator_methods!(Bucket::key_value); } impl IndexedParallelIterator for ParDrain<'_, K, V> { indexed_parallel_iterator_methods!(Bucket::key_value); } /// Parallel iterator methods and other parallel methods. /// /// The following methods **require crate feature `"rayon"`**. /// /// See also the `IntoParallelIterator` implementations. impl IndexMap where K: Sync, V: Sync, { /// Return a parallel iterator over the keys of the map. /// /// While parallel iterators can process items in any order, their relative order /// in the map is still preserved for operations like `reduce` and `collect`. pub fn par_keys(&self) -> ParKeys<'_, K, V> { ParKeys { entries: self.as_entries(), } } /// Return a parallel iterator over the values of the map. /// /// While parallel iterators can process items in any order, their relative order /// in the map is still preserved for operations like `reduce` and `collect`. pub fn par_values(&self) -> ParValues<'_, K, V> { ParValues { entries: self.as_entries(), } } } /// Parallel iterator methods and other parallel methods. /// /// The following methods **require crate feature `"rayon"`**. /// /// See also the `IntoParallelIterator` implementations. impl Slice where K: Sync, V: Sync, { /// Return a parallel iterator over the keys of the map slice. /// /// While parallel iterators can process items in any order, their relative order /// in the slice is still preserved for operations like `reduce` and `collect`. pub fn par_keys(&self) -> ParKeys<'_, K, V> { ParKeys { entries: &self.entries, } } /// Return a parallel iterator over the values of the map slice. /// /// While parallel iterators can process items in any order, their relative order /// in the slice is still preserved for operations like `reduce` and `collect`. pub fn par_values(&self) -> ParValues<'_, K, V> { ParValues { entries: &self.entries, } } } impl IndexMap where K: Hash + Eq + Sync, V: Sync, S: BuildHasher, { /// Returns `true` if `self` contains all of the same key-value pairs as `other`, /// regardless of each map's indexed order, determined in parallel. pub fn par_eq(&self, other: &IndexMap) -> bool where V: PartialEq, V2: Sync, S2: BuildHasher + Sync, { self.len() == other.len() && self .par_iter() .all(move |(key, value)| other.get(key).map_or(false, |v| *value == *v)) } } /// A parallel iterator over the keys of an [`IndexMap`]. /// /// This `struct` is created by the [`IndexMap::par_keys`] method. /// See its documentation for more. pub struct ParKeys<'a, K, V> { entries: &'a [Bucket], } impl Clone for ParKeys<'_, K, V> { fn clone(&self) -> Self { ParKeys { ..*self } } } impl fmt::Debug for ParKeys<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let iter = self.entries.iter().map(Bucket::key_ref); f.debug_list().entries(iter).finish() } } impl<'a, K: Sync, V: Sync> ParallelIterator for ParKeys<'a, K, V> { type Item = &'a K; parallel_iterator_methods!(Bucket::key_ref); } impl IndexedParallelIterator for ParKeys<'_, K, V> { indexed_parallel_iterator_methods!(Bucket::key_ref); } /// A parallel iterator over the values of an [`IndexMap`]. /// /// This `struct` is created by the [`IndexMap::par_values`] method. /// See its documentation for more. pub struct ParValues<'a, K, V> { entries: &'a [Bucket], } impl Clone for ParValues<'_, K, V> { fn clone(&self) -> Self { ParValues { ..*self } } } impl fmt::Debug for ParValues<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let iter = self.entries.iter().map(Bucket::value_ref); f.debug_list().entries(iter).finish() } } impl<'a, K: Sync, V: Sync> ParallelIterator for ParValues<'a, K, V> { type Item = &'a V; parallel_iterator_methods!(Bucket::value_ref); } impl IndexedParallelIterator for ParValues<'_, K, V> { indexed_parallel_iterator_methods!(Bucket::value_ref); } impl IndexMap where K: Send, V: Send, { /// Return a parallel iterator over mutable references to the values of the map /// /// While parallel iterators can process items in any order, their relative order /// in the map is still preserved for operations like `reduce` and `collect`. pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> { ParValuesMut { entries: self.as_entries_mut(), } } } impl Slice where K: Send, V: Send, { /// Return a parallel iterator over mutable references to the the values of the map slice. /// /// While parallel iterators can process items in any order, their relative order /// in the slice is still preserved for operations like `reduce` and `collect`. pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> { ParValuesMut { entries: &mut self.entries, } } } impl IndexMap where K: Send, V: Send, { /// Sort the map's key-value pairs in parallel, by the default ordering of the keys. pub fn par_sort_keys(&mut self) where K: Ord, { self.with_entries(|entries| { entries.par_sort_by(|a, b| K::cmp(&a.key, &b.key)); }); } /// Sort the map's key-value pairs in place and in parallel, using the comparison /// function `cmp`. /// /// The comparison function receives two key and value pairs to compare (you /// can sort by keys or values or their combination as needed). pub fn par_sort_by(&mut self, cmp: F) where F: Fn(&K, &V, &K, &V) -> Ordering + Sync, { self.with_entries(|entries| { entries.par_sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); }); } /// Sort the key-value pairs of the map in parallel and return a by-value parallel /// iterator of the key-value pairs with the result. pub fn par_sorted_by(self, cmp: F) -> IntoParIter where F: Fn(&K, &V, &K, &V) -> Ordering + Sync, { let mut entries = self.into_entries(); entries.par_sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); IntoParIter { entries } } /// Sort the map's key-value pairs in place and in parallel, using a sort-key extraction /// function. pub fn par_sort_by_key(&mut self, sort_key: F) where T: Ord, F: Fn(&K, &V) -> T + Sync, { self.with_entries(move |entries| { entries.par_sort_by_key(move |a| sort_key(&a.key, &a.value)); }); } /// Sort the map's key-value pairs in parallel, by the default ordering of the keys. pub fn par_sort_unstable_keys(&mut self) where K: Ord, { self.with_entries(|entries| { entries.par_sort_unstable_by(|a, b| K::cmp(&a.key, &b.key)); }); } /// Sort the map's key-value pairs in place and in parallel, using the comparison /// function `cmp`. /// /// The comparison function receives two key and value pairs to compare (you /// can sort by keys or values or their combination as needed). pub fn par_sort_unstable_by(&mut self, cmp: F) where F: Fn(&K, &V, &K, &V) -> Ordering + Sync, { self.with_entries(|entries| { entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); }); } /// Sort the key-value pairs of the map in parallel and return a by-value parallel /// iterator of the key-value pairs with the result. pub fn par_sorted_unstable_by(self, cmp: F) -> IntoParIter where F: Fn(&K, &V, &K, &V) -> Ordering + Sync, { let mut entries = self.into_entries(); entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); IntoParIter { entries } } /// Sort the map's key-value pairs in place and in parallel, using a sort-key extraction /// function. pub fn par_sort_unstable_by_key(&mut self, sort_key: F) where T: Ord, F: Fn(&K, &V) -> T + Sync, { self.with_entries(move |entries| { entries.par_sort_unstable_by_key(move |a| sort_key(&a.key, &a.value)); }); } /// Sort the map's key-value pairs in place and in parallel, using a sort-key extraction /// function. pub fn par_sort_by_cached_key(&mut self, sort_key: F) where T: Ord + Send, F: Fn(&K, &V) -> T + Sync, { self.with_entries(move |entries| { entries.par_sort_by_cached_key(move |a| sort_key(&a.key, &a.value)); }); } } /// A parallel mutable iterator over the values of an [`IndexMap`]. /// /// This `struct` is created by the [`IndexMap::par_values_mut`] method. /// See its documentation for more. pub struct ParValuesMut<'a, K, V> { entries: &'a mut [Bucket], } impl fmt::Debug for ParValuesMut<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let iter = self.entries.iter().map(Bucket::value_ref); f.debug_list().entries(iter).finish() } } impl<'a, K: Send, V: Send> ParallelIterator for ParValuesMut<'a, K, V> { type Item = &'a mut V; parallel_iterator_methods!(Bucket::value_mut); } impl IndexedParallelIterator for ParValuesMut<'_, K, V> { indexed_parallel_iterator_methods!(Bucket::value_mut); } impl FromParallelIterator<(K, V)> for IndexMap where K: Eq + Hash + Send, V: Send, S: BuildHasher + Default + Send, { fn from_par_iter(iter: I) -> Self where I: IntoParallelIterator, { let list = collect(iter); let len = list.iter().map(Vec::len).sum(); let mut map = Self::with_capacity_and_hasher(len, S::default()); for vec in list { map.extend(vec); } map } } impl ParallelExtend<(K, V)> for IndexMap where K: Eq + Hash + Send, V: Send, S: BuildHasher + Send, { fn par_extend(&mut self, iter: I) where I: IntoParallelIterator, { for vec in collect(iter) { self.extend(vec); } } } impl<'a, K: 'a, V: 'a, S> ParallelExtend<(&'a K, &'a V)> for IndexMap where K: Copy + Eq + Hash + Send + Sync, V: Copy + Send + Sync, S: BuildHasher + Send, { fn par_extend(&mut self, iter: I) where I: IntoParallelIterator, { for vec in collect(iter) { self.extend(vec); } } } #[cfg(test)] mod tests { use super::*; use std::string::String; #[test] fn insert_order() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut map = IndexMap::new(); for &elt in &insert { map.insert(elt, ()); } assert_eq!(map.par_keys().count(), map.len()); assert_eq!(map.par_keys().count(), insert.len()); insert.par_iter().zip(map.par_keys()).for_each(|(a, b)| { assert_eq!(a, b); }); (0..insert.len()) .into_par_iter() .zip(map.par_keys()) .for_each(|(i, k)| { assert_eq!(map.get_index(i).unwrap().0, k); }); } #[test] fn partial_eq_and_eq() { let mut map_a = IndexMap::new(); map_a.insert(1, "1"); map_a.insert(2, "2"); let mut map_b = map_a.clone(); assert!(map_a.par_eq(&map_b)); map_b.swap_remove(&1); assert!(!map_a.par_eq(&map_b)); map_b.insert(3, "3"); assert!(!map_a.par_eq(&map_b)); let map_c: IndexMap<_, String> = map_b.into_par_iter().map(|(k, v)| (k, v.into())).collect(); assert!(!map_a.par_eq(&map_c)); assert!(!map_c.par_eq(&map_a)); } #[test] fn extend() { let mut map = IndexMap::new(); map.par_extend(vec![(&1, &2), (&3, &4)]); map.par_extend(vec![(5, 6)]); assert_eq!( map.into_par_iter().collect::>(), vec![(1, 2), (3, 4), (5, 6)] ); } #[test] fn keys() { let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; let map: IndexMap<_, _> = vec.into_par_iter().collect(); let keys: Vec<_> = map.par_keys().copied().collect(); assert_eq!(keys.len(), 3); assert!(keys.contains(&1)); assert!(keys.contains(&2)); assert!(keys.contains(&3)); } #[test] fn values() { let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; let map: IndexMap<_, _> = vec.into_par_iter().collect(); let values: Vec<_> = map.par_values().copied().collect(); assert_eq!(values.len(), 3); assert!(values.contains(&'a')); assert!(values.contains(&'b')); assert!(values.contains(&'c')); } #[test] fn values_mut() { let vec = vec![(1, 1), (2, 2), (3, 3)]; let mut map: IndexMap<_, _> = vec.into_par_iter().collect(); map.par_values_mut().for_each(|value| *value *= 2); let values: Vec<_> = map.par_values().copied().collect(); assert_eq!(values.len(), 3); assert!(values.contains(&2)); assert!(values.contains(&4)); assert!(values.contains(&6)); } } indexmap-2.12.1/src/rayon/mod.rs000064400000000000000000000006261046102023000145640ustar 00000000000000#![cfg_attr(docsrs, doc(cfg(feature = "rayon")))] use rayon::prelude::*; use alloc::collections::LinkedList; use alloc::vec::Vec; pub mod map; pub mod set; // This form of intermediate collection is also how Rayon collects `HashMap`. // Note that the order will also be preserved! fn collect(iter: I) -> LinkedList> { iter.into_par_iter().collect_vec_list() } indexmap-2.12.1/src/rayon/set.rs000064400000000000000000000534251046102023000146050ustar 00000000000000//! Parallel iterator types for [`IndexSet`] with [rayon][::rayon]. //! //! You will rarely need to interact with this module directly unless you need to name one of the //! iterator types. use super::collect; use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer}; use rayon::prelude::*; use alloc::boxed::Box; use alloc::vec::Vec; use core::cmp::Ordering; use core::fmt; use core::hash::{BuildHasher, Hash}; use core::ops::RangeBounds; use crate::set::Slice; use crate::IndexSet; type Bucket = crate::Bucket; impl IntoParallelIterator for IndexSet where T: Send, { type Item = T; type Iter = IntoParIter; fn into_par_iter(self) -> Self::Iter { IntoParIter { entries: self.into_entries(), } } } impl IntoParallelIterator for Box> where T: Send, { type Item = T; type Iter = IntoParIter; fn into_par_iter(self) -> Self::Iter { IntoParIter { entries: self.into_entries(), } } } /// A parallel owning iterator over the items of an [`IndexSet`]. /// /// This `struct` is created by the [`IndexSet::into_par_iter`] method /// (provided by rayon's [`IntoParallelIterator`] trait). See its documentation for more. pub struct IntoParIter { entries: Vec>, } impl fmt::Debug for IntoParIter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let iter = self.entries.iter().map(Bucket::key_ref); f.debug_list().entries(iter).finish() } } impl ParallelIterator for IntoParIter { type Item = T; parallel_iterator_methods!(Bucket::key); } impl IndexedParallelIterator for IntoParIter { indexed_parallel_iterator_methods!(Bucket::key); } impl<'a, T, S> IntoParallelIterator for &'a IndexSet where T: Sync, { type Item = &'a T; type Iter = ParIter<'a, T>; fn into_par_iter(self) -> Self::Iter { ParIter { entries: self.as_entries(), } } } impl<'a, T> IntoParallelIterator for &'a Slice where T: Sync, { type Item = &'a T; type Iter = ParIter<'a, T>; fn into_par_iter(self) -> Self::Iter { ParIter { entries: &self.entries, } } } /// A parallel iterator over the items of an [`IndexSet`]. /// /// This `struct` is created by the [`IndexSet::par_iter`] method /// (provided by rayon's [`IntoParallelRefIterator`] trait). See its documentation for more. /// /// [`IndexSet::par_iter`]: ../struct.IndexSet.html#method.par_iter pub struct ParIter<'a, T> { entries: &'a [Bucket], } impl Clone for ParIter<'_, T> { fn clone(&self) -> Self { ParIter { ..*self } } } impl fmt::Debug for ParIter<'_, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let iter = self.entries.iter().map(Bucket::key_ref); f.debug_list().entries(iter).finish() } } impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> { type Item = &'a T; parallel_iterator_methods!(Bucket::key_ref); } impl IndexedParallelIterator for ParIter<'_, T> { indexed_parallel_iterator_methods!(Bucket::key_ref); } impl<'a, T, S> ParallelDrainRange for &'a mut IndexSet where T: Send, { type Item = T; type Iter = ParDrain<'a, T>; fn par_drain>(self, range: R) -> Self::Iter { ParDrain { entries: self.map.core.par_drain(range), } } } /// A parallel draining iterator over the items of an [`IndexSet`]. /// /// This `struct` is created by the [`IndexSet::par_drain`] method /// (provided by rayon's [`ParallelDrainRange`] trait). See its documentation for more. /// /// [`IndexSet::par_drain`]: ../struct.IndexSet.html#method.par_drain pub struct ParDrain<'a, T: Send> { entries: rayon::vec::Drain<'a, Bucket>, } impl ParallelIterator for ParDrain<'_, T> { type Item = T; parallel_iterator_methods!(Bucket::key); } impl IndexedParallelIterator for ParDrain<'_, T> { indexed_parallel_iterator_methods!(Bucket::key); } /// Parallel iterator methods and other parallel methods. /// /// The following methods **require crate feature `"rayon"`**. /// /// See also the `IntoParallelIterator` implementations. impl IndexSet where T: Hash + Eq + Sync, S: BuildHasher + Sync, { /// Return a parallel iterator over the values that are in `self` but not `other`. /// /// While parallel iterators can process items in any order, their relative order /// in the `self` set is still preserved for operations like `reduce` and `collect`. pub fn par_difference<'a, S2>( &'a self, other: &'a IndexSet, ) -> ParDifference<'a, T, S, S2> where S2: BuildHasher + Sync, { ParDifference { set1: self, set2: other, } } /// Return a parallel iterator over the values that are in `self` or `other`, /// but not in both. /// /// While parallel iterators can process items in any order, their relative order /// in the sets is still preserved for operations like `reduce` and `collect`. /// Values from `self` are produced in their original order, followed by /// values from `other` in their original order. pub fn par_symmetric_difference<'a, S2>( &'a self, other: &'a IndexSet, ) -> ParSymmetricDifference<'a, T, S, S2> where S2: BuildHasher + Sync, { ParSymmetricDifference { set1: self, set2: other, } } /// Return a parallel iterator over the values that are in both `self` and `other`. /// /// While parallel iterators can process items in any order, their relative order /// in the `self` set is still preserved for operations like `reduce` and `collect`. pub fn par_intersection<'a, S2>( &'a self, other: &'a IndexSet, ) -> ParIntersection<'a, T, S, S2> where S2: BuildHasher + Sync, { ParIntersection { set1: self, set2: other, } } /// Return a parallel iterator over all values that are in `self` or `other`. /// /// While parallel iterators can process items in any order, their relative order /// in the sets is still preserved for operations like `reduce` and `collect`. /// Values from `self` are produced in their original order, followed by /// values that are unique to `other` in their original order. pub fn par_union<'a, S2>(&'a self, other: &'a IndexSet) -> ParUnion<'a, T, S, S2> where S2: BuildHasher + Sync, { ParUnion { set1: self, set2: other, } } /// Returns `true` if `self` contains all of the same values as `other`, /// regardless of each set's indexed order, determined in parallel. pub fn par_eq(&self, other: &IndexSet) -> bool where S2: BuildHasher + Sync, { self.len() == other.len() && self.par_is_subset(other) } /// Returns `true` if `self` has no elements in common with `other`, /// determined in parallel. pub fn par_is_disjoint(&self, other: &IndexSet) -> bool where S2: BuildHasher + Sync, { if self.len() <= other.len() { self.par_iter().all(move |value| !other.contains(value)) } else { other.par_iter().all(move |value| !self.contains(value)) } } /// Returns `true` if all elements of `other` are contained in `self`, /// determined in parallel. pub fn par_is_superset(&self, other: &IndexSet) -> bool where S2: BuildHasher + Sync, { other.par_is_subset(self) } /// Returns `true` if all elements of `self` are contained in `other`, /// determined in parallel. pub fn par_is_subset(&self, other: &IndexSet) -> bool where S2: BuildHasher + Sync, { self.len() <= other.len() && self.par_iter().all(move |value| other.contains(value)) } } /// A parallel iterator producing elements in the difference of [`IndexSet`]s. /// /// This `struct` is created by the [`IndexSet::par_difference`] method. /// See its documentation for more. pub struct ParDifference<'a, T, S1, S2> { set1: &'a IndexSet, set2: &'a IndexSet, } impl Clone for ParDifference<'_, T, S1, S2> { fn clone(&self) -> Self { ParDifference { ..*self } } } impl fmt::Debug for ParDifference<'_, T, S1, S2> where T: fmt::Debug + Eq + Hash, S1: BuildHasher, S2: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list() .entries(self.set1.difference(self.set2)) .finish() } } impl<'a, T, S1, S2> ParallelIterator for ParDifference<'a, T, S1, S2> where T: Hash + Eq + Sync, S1: BuildHasher + Sync, S2: BuildHasher + Sync, { type Item = &'a T; fn drive_unindexed(self, consumer: C) -> C::Result where C: UnindexedConsumer, { let Self { set1, set2 } = self; set1.par_iter() .filter(move |&item| !set2.contains(item)) .drive_unindexed(consumer) } } /// A parallel iterator producing elements in the intersection of [`IndexSet`]s. /// /// This `struct` is created by the [`IndexSet::par_intersection`] method. /// See its documentation for more. pub struct ParIntersection<'a, T, S1, S2> { set1: &'a IndexSet, set2: &'a IndexSet, } impl Clone for ParIntersection<'_, T, S1, S2> { fn clone(&self) -> Self { ParIntersection { ..*self } } } impl fmt::Debug for ParIntersection<'_, T, S1, S2> where T: fmt::Debug + Eq + Hash, S1: BuildHasher, S2: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list() .entries(self.set1.intersection(self.set2)) .finish() } } impl<'a, T, S1, S2> ParallelIterator for ParIntersection<'a, T, S1, S2> where T: Hash + Eq + Sync, S1: BuildHasher + Sync, S2: BuildHasher + Sync, { type Item = &'a T; fn drive_unindexed(self, consumer: C) -> C::Result where C: UnindexedConsumer, { let Self { set1, set2 } = self; set1.par_iter() .filter(move |&item| set2.contains(item)) .drive_unindexed(consumer) } } /// A parallel iterator producing elements in the symmetric difference of [`IndexSet`]s. /// /// This `struct` is created by the [`IndexSet::par_symmetric_difference`] method. /// See its documentation for more. pub struct ParSymmetricDifference<'a, T, S1, S2> { set1: &'a IndexSet, set2: &'a IndexSet, } impl Clone for ParSymmetricDifference<'_, T, S1, S2> { fn clone(&self) -> Self { ParSymmetricDifference { ..*self } } } impl fmt::Debug for ParSymmetricDifference<'_, T, S1, S2> where T: fmt::Debug + Eq + Hash, S1: BuildHasher, S2: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list() .entries(self.set1.symmetric_difference(self.set2)) .finish() } } impl<'a, T, S1, S2> ParallelIterator for ParSymmetricDifference<'a, T, S1, S2> where T: Hash + Eq + Sync, S1: BuildHasher + Sync, S2: BuildHasher + Sync, { type Item = &'a T; fn drive_unindexed(self, consumer: C) -> C::Result where C: UnindexedConsumer, { let Self { set1, set2 } = self; set1.par_difference(set2) .chain(set2.par_difference(set1)) .drive_unindexed(consumer) } } /// A parallel iterator producing elements in the union of [`IndexSet`]s. /// /// This `struct` is created by the [`IndexSet::par_union`] method. /// See its documentation for more. pub struct ParUnion<'a, T, S1, S2> { set1: &'a IndexSet, set2: &'a IndexSet, } impl Clone for ParUnion<'_, T, S1, S2> { fn clone(&self) -> Self { ParUnion { ..*self } } } impl fmt::Debug for ParUnion<'_, T, S1, S2> where T: fmt::Debug + Eq + Hash, S1: BuildHasher, S2: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.set1.union(self.set2)).finish() } } impl<'a, T, S1, S2> ParallelIterator for ParUnion<'a, T, S1, S2> where T: Hash + Eq + Sync, S1: BuildHasher + Sync, S2: BuildHasher + Sync, { type Item = &'a T; fn drive_unindexed(self, consumer: C) -> C::Result where C: UnindexedConsumer, { let Self { set1, set2 } = self; set1.par_iter() .chain(set2.par_difference(set1)) .drive_unindexed(consumer) } } /// Parallel sorting methods. /// /// The following methods **require crate feature `"rayon"`**. impl IndexSet where T: Send, { /// Sort the set's values in parallel by their default ordering. pub fn par_sort(&mut self) where T: Ord, { self.with_entries(|entries| { entries.par_sort_by(|a, b| T::cmp(&a.key, &b.key)); }); } /// Sort the set's values in place and in parallel, using the comparison function `cmp`. pub fn par_sort_by(&mut self, cmp: F) where F: Fn(&T, &T) -> Ordering + Sync, { self.with_entries(|entries| { entries.par_sort_by(move |a, b| cmp(&a.key, &b.key)); }); } /// Sort the values of the set in parallel and return a by-value parallel iterator of /// the values with the result. pub fn par_sorted_by(self, cmp: F) -> IntoParIter where F: Fn(&T, &T) -> Ordering + Sync, { let mut entries = self.into_entries(); entries.par_sort_by(move |a, b| cmp(&a.key, &b.key)); IntoParIter { entries } } /// Sort the set's values in place and in parallel, using a key extraction function. pub fn par_sort_by_key(&mut self, sort_key: F) where K: Ord, F: Fn(&T) -> K + Sync, { self.with_entries(move |entries| { entries.par_sort_by_key(move |a| sort_key(&a.key)); }); } /// Sort the set's values in parallel by their default ordering. pub fn par_sort_unstable(&mut self) where T: Ord, { self.with_entries(|entries| { entries.par_sort_unstable_by(|a, b| T::cmp(&a.key, &b.key)); }); } /// Sort the set's values in place and in parallel, using the comparison function `cmp`. pub fn par_sort_unstable_by(&mut self, cmp: F) where F: Fn(&T, &T) -> Ordering + Sync, { self.with_entries(|entries| { entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &b.key)); }); } /// Sort the values of the set in parallel and return a by-value parallel iterator of /// the values with the result. pub fn par_sorted_unstable_by(self, cmp: F) -> IntoParIter where F: Fn(&T, &T) -> Ordering + Sync, { let mut entries = self.into_entries(); entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &b.key)); IntoParIter { entries } } /// Sort the set's values in place and in parallel, using a key extraction function. pub fn par_sort_unstable_by_key(&mut self, sort_key: F) where K: Ord, F: Fn(&T) -> K + Sync, { self.with_entries(move |entries| { entries.par_sort_unstable_by_key(move |a| sort_key(&a.key)); }); } /// Sort the set's values in place and in parallel, using a key extraction function. pub fn par_sort_by_cached_key(&mut self, sort_key: F) where K: Ord + Send, F: Fn(&T) -> K + Sync, { self.with_entries(move |entries| { entries.par_sort_by_cached_key(move |a| sort_key(&a.key)); }); } } impl FromParallelIterator for IndexSet where T: Eq + Hash + Send, S: BuildHasher + Default + Send, { fn from_par_iter(iter: I) -> Self where I: IntoParallelIterator, { let list = collect(iter); let len = list.iter().map(Vec::len).sum(); let mut set = Self::with_capacity_and_hasher(len, S::default()); for vec in list { set.extend(vec); } set } } impl ParallelExtend for IndexSet where T: Eq + Hash + Send, S: BuildHasher + Send, { fn par_extend(&mut self, iter: I) where I: IntoParallelIterator, { for vec in collect(iter) { self.extend(vec); } } } impl<'a, T: 'a, S> ParallelExtend<&'a T> for IndexSet where T: Copy + Eq + Hash + Send + Sync, S: BuildHasher + Send, { fn par_extend(&mut self, iter: I) where I: IntoParallelIterator, { for vec in collect(iter) { self.extend(vec); } } } #[cfg(test)] mod tests { use super::*; #[test] fn insert_order() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut set = IndexSet::new(); for &elt in &insert { set.insert(elt); } assert_eq!(set.par_iter().count(), set.len()); assert_eq!(set.par_iter().count(), insert.len()); insert.par_iter().zip(&set).for_each(|(a, b)| { assert_eq!(a, b); }); (0..insert.len()) .into_par_iter() .zip(&set) .for_each(|(i, v)| { assert_eq!(set.get_index(i).unwrap(), v); }); } #[test] fn partial_eq_and_eq() { let mut set_a = IndexSet::new(); set_a.insert(1); set_a.insert(2); let mut set_b = set_a.clone(); assert!(set_a.par_eq(&set_b)); set_b.swap_remove(&1); assert!(!set_a.par_eq(&set_b)); set_b.insert(3); assert!(!set_a.par_eq(&set_b)); let set_c: IndexSet<_> = set_b.into_par_iter().collect(); assert!(!set_a.par_eq(&set_c)); assert!(!set_c.par_eq(&set_a)); } #[test] fn extend() { let mut set = IndexSet::new(); set.par_extend(vec![&1, &2, &3, &4]); set.par_extend(vec![5, 6]); assert_eq!( set.into_par_iter().collect::>(), vec![1, 2, 3, 4, 5, 6] ); } #[test] fn comparisons() { let set_a: IndexSet<_> = (0..3).collect(); let set_b: IndexSet<_> = (3..6).collect(); let set_c: IndexSet<_> = (0..6).collect(); let set_d: IndexSet<_> = (3..9).collect(); assert!(!set_a.par_is_disjoint(&set_a)); assert!(set_a.par_is_subset(&set_a)); assert!(set_a.par_is_superset(&set_a)); assert!(set_a.par_is_disjoint(&set_b)); assert!(set_b.par_is_disjoint(&set_a)); assert!(!set_a.par_is_subset(&set_b)); assert!(!set_b.par_is_subset(&set_a)); assert!(!set_a.par_is_superset(&set_b)); assert!(!set_b.par_is_superset(&set_a)); assert!(!set_a.par_is_disjoint(&set_c)); assert!(!set_c.par_is_disjoint(&set_a)); assert!(set_a.par_is_subset(&set_c)); assert!(!set_c.par_is_subset(&set_a)); assert!(!set_a.par_is_superset(&set_c)); assert!(set_c.par_is_superset(&set_a)); assert!(!set_c.par_is_disjoint(&set_d)); assert!(!set_d.par_is_disjoint(&set_c)); assert!(!set_c.par_is_subset(&set_d)); assert!(!set_d.par_is_subset(&set_c)); assert!(!set_c.par_is_superset(&set_d)); assert!(!set_d.par_is_superset(&set_c)); } #[test] fn iter_comparisons() { use std::iter::empty; fn check<'a, I1, I2>(iter1: I1, iter2: I2) where I1: ParallelIterator, I2: Iterator, { let v1: Vec<_> = iter1.copied().collect(); let v2: Vec<_> = iter2.collect(); assert_eq!(v1, v2); } let set_a: IndexSet<_> = (0..3).collect(); let set_b: IndexSet<_> = (3..6).collect(); let set_c: IndexSet<_> = (0..6).collect(); let set_d: IndexSet<_> = (3..9).rev().collect(); check(set_a.par_difference(&set_a), empty()); check(set_a.par_symmetric_difference(&set_a), empty()); check(set_a.par_intersection(&set_a), 0..3); check(set_a.par_union(&set_a), 0..3); check(set_a.par_difference(&set_b), 0..3); check(set_b.par_difference(&set_a), 3..6); check(set_a.par_symmetric_difference(&set_b), 0..6); check(set_b.par_symmetric_difference(&set_a), (3..6).chain(0..3)); check(set_a.par_intersection(&set_b), empty()); check(set_b.par_intersection(&set_a), empty()); check(set_a.par_union(&set_b), 0..6); check(set_b.par_union(&set_a), (3..6).chain(0..3)); check(set_a.par_difference(&set_c), empty()); check(set_c.par_difference(&set_a), 3..6); check(set_a.par_symmetric_difference(&set_c), 3..6); check(set_c.par_symmetric_difference(&set_a), 3..6); check(set_a.par_intersection(&set_c), 0..3); check(set_c.par_intersection(&set_a), 0..3); check(set_a.par_union(&set_c), 0..6); check(set_c.par_union(&set_a), 0..6); check(set_c.par_difference(&set_d), 0..3); check(set_d.par_difference(&set_c), (6..9).rev()); check( set_c.par_symmetric_difference(&set_d), (0..3).chain((6..9).rev()), ); check( set_d.par_symmetric_difference(&set_c), (6..9).rev().chain(0..3), ); check(set_c.par_intersection(&set_d), 3..6); check(set_d.par_intersection(&set_c), (3..6).rev()); check(set_c.par_union(&set_d), (0..6).chain((6..9).rev())); check(set_d.par_union(&set_c), (3..9).rev().chain(0..3)); } } indexmap-2.12.1/src/serde.rs000064400000000000000000000105311046102023000137530ustar 00000000000000#![cfg_attr(docsrs, doc(cfg(feature = "serde")))] use serde_core::de::value::{MapDeserializer, SeqDeserializer}; use serde_core::de::{ Deserialize, Deserializer, Error, IntoDeserializer, MapAccess, SeqAccess, Visitor, }; use serde_core::ser::{Serialize, Serializer}; use core::fmt::{self, Formatter}; use core::hash::{BuildHasher, Hash}; use core::marker::PhantomData; use crate::{Bucket, IndexMap, IndexSet}; /// Limit our preallocated capacity from a deserializer `size_hint()`. /// /// We do account for the `Bucket` overhead from its saved `hash` field, but we don't count the /// `RawTable` allocation or the fact that its raw capacity will be rounded up to a power of two. /// The "max" is an arbitrary choice anyway, not something that needs precise adherence. /// /// This is based on the internal `serde::de::size_hint::cautious(hint)` function. pub(crate) fn cautious_capacity(hint: Option) -> usize { const MAX_PREALLOC_BYTES: usize = 1024 * 1024; Ord::min( hint.unwrap_or(0), MAX_PREALLOC_BYTES / size_of::>(), ) } impl Serialize for IndexMap where K: Serialize, V: Serialize, { fn serialize(&self, serializer: T) -> Result where T: Serializer, { serializer.collect_map(self) } } struct IndexMapVisitor(PhantomData<(K, V, S)>); impl<'de, K, V, S> Visitor<'de> for IndexMapVisitor where K: Deserialize<'de> + Eq + Hash, V: Deserialize<'de>, S: Default + BuildHasher, { type Value = IndexMap; fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { write!(formatter, "a map") } fn visit_map(self, mut map: A) -> Result where A: MapAccess<'de>, { let capacity = cautious_capacity::(map.size_hint()); let mut values = IndexMap::with_capacity_and_hasher(capacity, S::default()); while let Some((key, value)) = map.next_entry()? { values.insert(key, value); } Ok(values) } } impl<'de, K, V, S> Deserialize<'de> for IndexMap where K: Deserialize<'de> + Eq + Hash, V: Deserialize<'de>, S: Default + BuildHasher, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { deserializer.deserialize_map(IndexMapVisitor(PhantomData)) } } impl<'de, K, V, S, E> IntoDeserializer<'de, E> for IndexMap where K: IntoDeserializer<'de, E> + Eq + Hash, V: IntoDeserializer<'de, E>, S: BuildHasher, E: Error, { type Deserializer = MapDeserializer<'de, ::IntoIter, E>; fn into_deserializer(self) -> Self::Deserializer { MapDeserializer::new(self.into_iter()) } } impl Serialize for IndexSet where T: Serialize, { fn serialize(&self, serializer: Se) -> Result where Se: Serializer, { serializer.collect_seq(self) } } struct IndexSetVisitor(PhantomData<(T, S)>); impl<'de, T, S> Visitor<'de> for IndexSetVisitor where T: Deserialize<'de> + Eq + Hash, S: Default + BuildHasher, { type Value = IndexSet; fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { write!(formatter, "a set") } fn visit_seq(self, mut seq: A) -> Result where A: SeqAccess<'de>, { let capacity = cautious_capacity::(seq.size_hint()); let mut values = IndexSet::with_capacity_and_hasher(capacity, S::default()); while let Some(value) = seq.next_element()? { values.insert(value); } Ok(values) } } impl<'de, T, S> Deserialize<'de> for IndexSet where T: Deserialize<'de> + Eq + Hash, S: Default + BuildHasher, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { deserializer.deserialize_seq(IndexSetVisitor(PhantomData)) } } impl<'de, T, S, E> IntoDeserializer<'de, E> for IndexSet where T: IntoDeserializer<'de, E> + Eq + Hash, S: BuildHasher, E: Error, { type Deserializer = SeqDeserializer<::IntoIter, E>; fn into_deserializer(self) -> Self::Deserializer { SeqDeserializer::new(self.into_iter()) } } indexmap-2.12.1/src/set/iter.rs000064400000000000000000000354401046102023000144150ustar 00000000000000use crate::map::{ExtractCore, IndexMapCore}; use super::{Bucket, IndexSet, Slice}; use alloc::vec::{self, Vec}; use core::fmt; use core::hash::{BuildHasher, Hash}; use core::iter::{Chain, FusedIterator}; use core::ops::RangeBounds; use core::slice::Iter as SliceIter; impl<'a, T, S> IntoIterator for &'a IndexSet { type Item = &'a T; type IntoIter = Iter<'a, T>; fn into_iter(self) -> Self::IntoIter { self.iter() } } impl IntoIterator for IndexSet { type Item = T; type IntoIter = IntoIter; fn into_iter(self) -> Self::IntoIter { IntoIter::new(self.into_entries()) } } /// An iterator over the items of an [`IndexSet`]. /// /// This `struct` is created by the [`IndexSet::iter`] method. /// See its documentation for more. pub struct Iter<'a, T> { iter: SliceIter<'a, Bucket>, } impl<'a, T> Iter<'a, T> { pub(super) fn new(entries: &'a [Bucket]) -> Self { Self { iter: entries.iter(), } } /// Returns a slice of the remaining entries in the iterator. pub fn as_slice(&self) -> &'a Slice { Slice::from_slice(self.iter.as_slice()) } } impl<'a, T> Iterator for Iter<'a, T> { type Item = &'a T; iterator_methods!(Bucket::key_ref); } impl DoubleEndedIterator for Iter<'_, T> { double_ended_iterator_methods!(Bucket::key_ref); } impl ExactSizeIterator for Iter<'_, T> { fn len(&self) -> usize { self.iter.len() } } impl FusedIterator for Iter<'_, T> {} impl Clone for Iter<'_, T> { fn clone(&self) -> Self { Iter { iter: self.iter.clone(), } } } impl fmt::Debug for Iter<'_, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } impl Default for Iter<'_, T> { fn default() -> Self { Self { iter: [].iter() } } } /// An owning iterator over the items of an [`IndexSet`]. /// /// This `struct` is created by the [`IndexSet::into_iter`] method /// (provided by the [`IntoIterator`] trait). See its documentation for more. #[derive(Clone)] pub struct IntoIter { iter: vec::IntoIter>, } impl IntoIter { pub(super) fn new(entries: Vec>) -> Self { Self { iter: entries.into_iter(), } } /// Returns a slice of the remaining entries in the iterator. pub fn as_slice(&self) -> &Slice { Slice::from_slice(self.iter.as_slice()) } } impl Iterator for IntoIter { type Item = T; iterator_methods!(Bucket::key); } impl DoubleEndedIterator for IntoIter { double_ended_iterator_methods!(Bucket::key); } impl ExactSizeIterator for IntoIter { fn len(&self) -> usize { self.iter.len() } } impl FusedIterator for IntoIter {} impl fmt::Debug for IntoIter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let iter = self.iter.as_slice().iter().map(Bucket::key_ref); f.debug_list().entries(iter).finish() } } impl Default for IntoIter { fn default() -> Self { Self { iter: Vec::new().into_iter(), } } } /// A draining iterator over the items of an [`IndexSet`]. /// /// This `struct` is created by the [`IndexSet::drain`] method. /// See its documentation for more. pub struct Drain<'a, T> { iter: vec::Drain<'a, Bucket>, } impl<'a, T> Drain<'a, T> { pub(super) fn new(iter: vec::Drain<'a, Bucket>) -> Self { Self { iter } } /// Returns a slice of the remaining entries in the iterator. pub fn as_slice(&self) -> &Slice { Slice::from_slice(self.iter.as_slice()) } } impl Iterator for Drain<'_, T> { type Item = T; iterator_methods!(Bucket::key); } impl DoubleEndedIterator for Drain<'_, T> { double_ended_iterator_methods!(Bucket::key); } impl ExactSizeIterator for Drain<'_, T> { fn len(&self) -> usize { self.iter.len() } } impl FusedIterator for Drain<'_, T> {} impl fmt::Debug for Drain<'_, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let iter = self.iter.as_slice().iter().map(Bucket::key_ref); f.debug_list().entries(iter).finish() } } /// A lazy iterator producing elements in the difference of [`IndexSet`]s. /// /// This `struct` is created by the [`IndexSet::difference`] method. /// See its documentation for more. pub struct Difference<'a, T, S> { iter: Iter<'a, T>, other: &'a IndexSet, } impl<'a, T, S> Difference<'a, T, S> { pub(super) fn new(set: &'a IndexSet, other: &'a IndexSet) -> Self { Self { iter: set.iter(), other, } } } impl<'a, T, S> Iterator for Difference<'a, T, S> where T: Eq + Hash, S: BuildHasher, { type Item = &'a T; fn next(&mut self) -> Option { while let Some(item) = self.iter.next() { if !self.other.contains(item) { return Some(item); } } None } fn size_hint(&self) -> (usize, Option) { (0, self.iter.size_hint().1) } } impl DoubleEndedIterator for Difference<'_, T, S> where T: Eq + Hash, S: BuildHasher, { fn next_back(&mut self) -> Option { while let Some(item) = self.iter.next_back() { if !self.other.contains(item) { return Some(item); } } None } } impl FusedIterator for Difference<'_, T, S> where T: Eq + Hash, S: BuildHasher, { } impl Clone for Difference<'_, T, S> { fn clone(&self) -> Self { Difference { iter: self.iter.clone(), ..*self } } } impl fmt::Debug for Difference<'_, T, S> where T: fmt::Debug + Eq + Hash, S: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } /// A lazy iterator producing elements in the intersection of [`IndexSet`]s. /// /// This `struct` is created by the [`IndexSet::intersection`] method. /// See its documentation for more. pub struct Intersection<'a, T, S> { iter: Iter<'a, T>, other: &'a IndexSet, } impl<'a, T, S> Intersection<'a, T, S> { pub(super) fn new(set: &'a IndexSet, other: &'a IndexSet) -> Self { Self { iter: set.iter(), other, } } } impl<'a, T, S> Iterator for Intersection<'a, T, S> where T: Eq + Hash, S: BuildHasher, { type Item = &'a T; fn next(&mut self) -> Option { while let Some(item) = self.iter.next() { if self.other.contains(item) { return Some(item); } } None } fn size_hint(&self) -> (usize, Option) { (0, self.iter.size_hint().1) } } impl DoubleEndedIterator for Intersection<'_, T, S> where T: Eq + Hash, S: BuildHasher, { fn next_back(&mut self) -> Option { while let Some(item) = self.iter.next_back() { if self.other.contains(item) { return Some(item); } } None } } impl FusedIterator for Intersection<'_, T, S> where T: Eq + Hash, S: BuildHasher, { } impl Clone for Intersection<'_, T, S> { fn clone(&self) -> Self { Intersection { iter: self.iter.clone(), ..*self } } } impl fmt::Debug for Intersection<'_, T, S> where T: fmt::Debug + Eq + Hash, S: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } /// A lazy iterator producing elements in the symmetric difference of [`IndexSet`]s. /// /// This `struct` is created by the [`IndexSet::symmetric_difference`] method. /// See its documentation for more. pub struct SymmetricDifference<'a, T, S1, S2> { iter: Chain, Difference<'a, T, S1>>, } impl<'a, T, S1, S2> SymmetricDifference<'a, T, S1, S2> where T: Eq + Hash, S1: BuildHasher, S2: BuildHasher, { pub(super) fn new(set1: &'a IndexSet, set2: &'a IndexSet) -> Self { let diff1 = set1.difference(set2); let diff2 = set2.difference(set1); Self { iter: diff1.chain(diff2), } } } impl<'a, T, S1, S2> Iterator for SymmetricDifference<'a, T, S1, S2> where T: Eq + Hash, S1: BuildHasher, S2: BuildHasher, { type Item = &'a T; fn next(&mut self) -> Option { self.iter.next() } fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } fn fold(self, init: B, f: F) -> B where F: FnMut(B, Self::Item) -> B, { self.iter.fold(init, f) } } impl DoubleEndedIterator for SymmetricDifference<'_, T, S1, S2> where T: Eq + Hash, S1: BuildHasher, S2: BuildHasher, { fn next_back(&mut self) -> Option { self.iter.next_back() } fn rfold(self, init: B, f: F) -> B where F: FnMut(B, Self::Item) -> B, { self.iter.rfold(init, f) } } impl FusedIterator for SymmetricDifference<'_, T, S1, S2> where T: Eq + Hash, S1: BuildHasher, S2: BuildHasher, { } impl Clone for SymmetricDifference<'_, T, S1, S2> { fn clone(&self) -> Self { SymmetricDifference { iter: self.iter.clone(), } } } impl fmt::Debug for SymmetricDifference<'_, T, S1, S2> where T: fmt::Debug + Eq + Hash, S1: BuildHasher, S2: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } /// A lazy iterator producing elements in the union of [`IndexSet`]s. /// /// This `struct` is created by the [`IndexSet::union`] method. /// See its documentation for more. pub struct Union<'a, T, S> { iter: Chain, Difference<'a, T, S>>, } impl<'a, T, S> Union<'a, T, S> where T: Eq + Hash, S: BuildHasher, { pub(super) fn new(set1: &'a IndexSet, set2: &'a IndexSet) -> Self where S2: BuildHasher, { Self { iter: set1.iter().chain(set2.difference(set1)), } } } impl<'a, T, S> Iterator for Union<'a, T, S> where T: Eq + Hash, S: BuildHasher, { type Item = &'a T; fn next(&mut self) -> Option { self.iter.next() } fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } fn fold(self, init: B, f: F) -> B where F: FnMut(B, Self::Item) -> B, { self.iter.fold(init, f) } } impl DoubleEndedIterator for Union<'_, T, S> where T: Eq + Hash, S: BuildHasher, { fn next_back(&mut self) -> Option { self.iter.next_back() } fn rfold(self, init: B, f: F) -> B where F: FnMut(B, Self::Item) -> B, { self.iter.rfold(init, f) } } impl FusedIterator for Union<'_, T, S> where T: Eq + Hash, S: BuildHasher, { } impl Clone for Union<'_, T, S> { fn clone(&self) -> Self { Union { iter: self.iter.clone(), } } } impl fmt::Debug for Union<'_, T, S> where T: fmt::Debug + Eq + Hash, S: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } /// A splicing iterator for `IndexSet`. /// /// This `struct` is created by [`IndexSet::splice()`]. /// See its documentation for more. pub struct Splice<'a, I, T, S> where I: Iterator, T: Hash + Eq, S: BuildHasher, { iter: crate::map::Splice<'a, UnitValue, T, (), S>, } impl<'a, I, T, S> Splice<'a, I, T, S> where I: Iterator, T: Hash + Eq, S: BuildHasher, { #[track_caller] pub(super) fn new(set: &'a mut IndexSet, range: R, replace_with: I) -> Self where R: RangeBounds, { Self { iter: set.map.splice(range, UnitValue(replace_with)), } } } impl Iterator for Splice<'_, I, T, S> where I: Iterator, T: Hash + Eq, S: BuildHasher, { type Item = T; fn next(&mut self) -> Option { Some(self.iter.next()?.0) } fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } } impl DoubleEndedIterator for Splice<'_, I, T, S> where I: Iterator, T: Hash + Eq, S: BuildHasher, { fn next_back(&mut self) -> Option { Some(self.iter.next_back()?.0) } } impl ExactSizeIterator for Splice<'_, I, T, S> where I: Iterator, T: Hash + Eq, S: BuildHasher, { fn len(&self) -> usize { self.iter.len() } } impl FusedIterator for Splice<'_, I, T, S> where I: Iterator, T: Hash + Eq, S: BuildHasher, { } struct UnitValue(I); impl Iterator for UnitValue { type Item = (I::Item, ()); fn next(&mut self) -> Option { self.0.next().map(|x| (x, ())) } } impl fmt::Debug for Splice<'_, I, T, S> where I: fmt::Debug + Iterator, T: fmt::Debug + Hash + Eq, S: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&self.iter, f) } } impl fmt::Debug for UnitValue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&self.0, f) } } /// An extracting iterator for `IndexSet`. /// /// This `struct` is created by [`IndexSet::extract_if()`]. /// See its documentation for more. pub struct ExtractIf<'a, T, F> { inner: ExtractCore<'a, T, ()>, pred: F, } impl ExtractIf<'_, T, F> { #[track_caller] pub(super) fn new(core: &mut IndexMapCore, range: R, pred: F) -> ExtractIf<'_, T, F> where R: RangeBounds, F: FnMut(&T) -> bool, { ExtractIf { inner: core.extract(range), pred, } } } impl Iterator for ExtractIf<'_, T, F> where F: FnMut(&T) -> bool, { type Item = T; fn next(&mut self) -> Option { self.inner .extract_if(|bucket| (self.pred)(bucket.key_ref())) .map(Bucket::key) } fn size_hint(&self) -> (usize, Option) { (0, Some(self.inner.remaining())) } } impl FusedIterator for ExtractIf<'_, T, F> where F: FnMut(&T) -> bool {} impl fmt::Debug for ExtractIf<'_, T, F> where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ExtractIf").finish_non_exhaustive() } } indexmap-2.12.1/src/set/mutable.rs000064400000000000000000000050001046102023000150700ustar 00000000000000use core::hash::{BuildHasher, Hash}; use super::{Equivalent, IndexSet}; use crate::map::MutableKeys; /// Opt-in mutable access to [`IndexSet`] values. /// /// These methods expose `&mut T`, mutable references to the value as it is stored /// in the set. /// You are allowed to modify the values in the set **if the modification /// does not change the value's hash and equality**. /// /// If values are modified erroneously, you can no longer look them up. /// This is sound (memory safe) but a logical error hazard (just like /// implementing `PartialEq`, `Eq`, or `Hash` incorrectly would be). /// /// `use` this trait to enable its methods for `IndexSet`. /// /// This trait is sealed and cannot be implemented for types outside this crate. #[expect(private_bounds)] pub trait MutableValues: Sealed { type Value; /// Return item index and mutable reference to the value /// /// Computes in **O(1)** time (average). fn get_full_mut2(&mut self, value: &Q) -> Option<(usize, &mut Self::Value)> where Q: ?Sized + Hash + Equivalent; /// Return mutable reference to the value at an index. /// /// Valid indices are `0 <= index < self.len()`. /// /// Computes in **O(1)** time. fn get_index_mut2(&mut self, index: usize) -> Option<&mut Self::Value>; /// Scan through each value in the set and keep those where the /// closure `keep` returns `true`. /// /// The values are visited in order, and remaining values keep their order. /// /// Computes in **O(n)** time (average). fn retain2(&mut self, keep: F) where F: FnMut(&mut Self::Value) -> bool; } /// Opt-in mutable access to [`IndexSet`] values. /// /// See [`MutableValues`] for more information. impl MutableValues for IndexSet where S: BuildHasher, { type Value = T; fn get_full_mut2(&mut self, value: &Q) -> Option<(usize, &mut T)> where Q: ?Sized + Hash + Equivalent, { match self.map.get_full_mut2(value) { Some((index, value, ())) => Some((index, value)), None => None, } } fn get_index_mut2(&mut self, index: usize) -> Option<&mut T> { match self.map.get_index_mut2(index) { Some((value, ())) => Some(value), None => None, } } fn retain2(&mut self, mut keep: F) where F: FnMut(&mut T) -> bool, { self.map.retain2(move |value, ()| keep(value)); } } trait Sealed {} impl Sealed for IndexSet {} indexmap-2.12.1/src/set/slice.rs000064400000000000000000000263111046102023000145460ustar 00000000000000use super::{Bucket, IndexSet, IntoIter, Iter}; use crate::util::{slice_eq, try_simplify_range}; use alloc::boxed::Box; use alloc::vec::Vec; use core::cmp::Ordering; use core::fmt; use core::hash::{Hash, Hasher}; use core::ops::{self, Bound, Index, RangeBounds}; /// A dynamically-sized slice of values in an [`IndexSet`]. /// /// This supports indexed operations much like a `[T]` slice, /// but not any hashed operations on the values. /// /// Unlike `IndexSet`, `Slice` does consider the order for [`PartialEq`] /// and [`Eq`], and it also implements [`PartialOrd`], [`Ord`], and [`Hash`]. #[repr(transparent)] pub struct Slice { pub(crate) entries: [Bucket], } // SAFETY: `Slice` is a transparent wrapper around `[Bucket]`, // and reference lifetimes are bound together in function signatures. #[allow(unsafe_code)] impl Slice { pub(super) const fn from_slice(entries: &[Bucket]) -> &Self { unsafe { &*(entries as *const [Bucket] as *const Self) } } pub(super) fn from_boxed(entries: Box<[Bucket]>) -> Box { unsafe { Box::from_raw(Box::into_raw(entries) as *mut Self) } } fn into_boxed(self: Box) -> Box<[Bucket]> { unsafe { Box::from_raw(Box::into_raw(self) as *mut [Bucket]) } } } impl Slice { pub(crate) fn into_entries(self: Box) -> Vec> { self.into_boxed().into_vec() } /// Returns an empty slice. pub const fn new<'a>() -> &'a Self { Self::from_slice(&[]) } /// Return the number of elements in the set slice. pub const fn len(&self) -> usize { self.entries.len() } /// Returns true if the set slice contains no elements. pub const fn is_empty(&self) -> bool { self.entries.is_empty() } /// Get a value by index. /// /// Valid indices are `0 <= index < self.len()`. pub fn get_index(&self, index: usize) -> Option<&T> { self.entries.get(index).map(Bucket::key_ref) } /// Returns a slice of values in the given range of indices. /// /// Valid indices are `0 <= index < self.len()`. pub fn get_range>(&self, range: R) -> Option<&Self> { let range = try_simplify_range(range, self.entries.len())?; self.entries.get(range).map(Self::from_slice) } /// Get the first value. pub fn first(&self) -> Option<&T> { self.entries.first().map(Bucket::key_ref) } /// Get the last value. pub fn last(&self) -> Option<&T> { self.entries.last().map(Bucket::key_ref) } /// Divides one slice into two at an index. /// /// ***Panics*** if `index > len`. #[track_caller] pub fn split_at(&self, index: usize) -> (&Self, &Self) { let (first, second) = self.entries.split_at(index); (Self::from_slice(first), Self::from_slice(second)) } /// Returns the first value and the rest of the slice, /// or `None` if it is empty. pub fn split_first(&self) -> Option<(&T, &Self)> { if let [first, rest @ ..] = &self.entries { Some((&first.key, Self::from_slice(rest))) } else { None } } /// Returns the last value and the rest of the slice, /// or `None` if it is empty. pub fn split_last(&self) -> Option<(&T, &Self)> { if let [rest @ .., last] = &self.entries { Some((&last.key, Self::from_slice(rest))) } else { None } } /// Return an iterator over the values of the set slice. pub fn iter(&self) -> Iter<'_, T> { Iter::new(&self.entries) } /// Search over a sorted set for a value. /// /// Returns the position where that value is present, or the position where it can be inserted /// to maintain the sort. See [`slice::binary_search`] for more details. /// /// Computes in **O(log(n))** time, which is notably less scalable than looking the value up in /// the set this is a slice from using [`IndexSet::get_index_of`], but this can also position /// missing values. pub fn binary_search(&self, x: &T) -> Result where T: Ord, { self.binary_search_by(|p| p.cmp(x)) } /// Search over a sorted set with a comparator function. /// /// Returns the position where that value is present, or the position where it can be inserted /// to maintain the sort. See [`slice::binary_search_by`] for more details. /// /// Computes in **O(log(n))** time. #[inline] pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result where F: FnMut(&'a T) -> Ordering, { self.entries.binary_search_by(move |a| f(&a.key)) } /// Search over a sorted set with an extraction function. /// /// Returns the position where that value is present, or the position where it can be inserted /// to maintain the sort. See [`slice::binary_search_by_key`] for more details. /// /// Computes in **O(log(n))** time. #[inline] pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result where F: FnMut(&'a T) -> B, B: Ord, { self.binary_search_by(|k| f(k).cmp(b)) } /// Checks if the values of this slice are sorted. #[inline] pub fn is_sorted(&self) -> bool where T: PartialOrd, { self.entries.is_sorted_by(|a, b| a.key <= b.key) } /// Checks if this slice is sorted using the given comparator function. #[inline] pub fn is_sorted_by<'a, F>(&'a self, mut cmp: F) -> bool where F: FnMut(&'a T, &'a T) -> bool, { self.entries.is_sorted_by(move |a, b| cmp(&a.key, &b.key)) } /// Checks if this slice is sorted using the given sort-key function. #[inline] pub fn is_sorted_by_key<'a, F, K>(&'a self, mut sort_key: F) -> bool where F: FnMut(&'a T) -> K, K: PartialOrd, { self.entries.is_sorted_by_key(move |a| sort_key(&a.key)) } /// Returns the index of the partition point of a sorted set according to the given predicate /// (the index of the first element of the second partition). /// /// See [`slice::partition_point`] for more details. /// /// Computes in **O(log(n))** time. #[must_use] pub fn partition_point

(&self, mut pred: P) -> usize where P: FnMut(&T) -> bool, { self.entries.partition_point(move |a| pred(&a.key)) } } impl<'a, T> IntoIterator for &'a Slice { type IntoIter = Iter<'a, T>; type Item = &'a T; fn into_iter(self) -> Self::IntoIter { self.iter() } } impl IntoIterator for Box> { type IntoIter = IntoIter; type Item = T; fn into_iter(self) -> Self::IntoIter { IntoIter::new(self.into_entries()) } } impl Default for &'_ Slice { fn default() -> Self { Slice::from_slice(&[]) } } impl Default for Box> { fn default() -> Self { Slice::from_boxed(Box::default()) } } impl Clone for Box> { fn clone(&self) -> Self { Slice::from_boxed(self.entries.to_vec().into_boxed_slice()) } } impl From<&Slice> for Box> { fn from(slice: &Slice) -> Self { Slice::from_boxed(Box::from(&slice.entries)) } } impl fmt::Debug for Slice { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self).finish() } } impl PartialEq> for Slice where T: PartialEq, { fn eq(&self, other: &Slice) -> bool { slice_eq(&self.entries, &other.entries, |b1, b2| b1.key == b2.key) } } impl PartialEq<[U]> for Slice where T: PartialEq, { fn eq(&self, other: &[U]) -> bool { slice_eq(&self.entries, other, |b, o| b.key == *o) } } impl PartialEq> for [T] where T: PartialEq, { fn eq(&self, other: &Slice) -> bool { slice_eq(self, &other.entries, |o, b| *o == b.key) } } impl PartialEq<[U; N]> for Slice where T: PartialEq, { fn eq(&self, other: &[U; N]) -> bool { >::eq(self, other) } } impl PartialEq> for [T; N] where T: PartialEq, { fn eq(&self, other: &Slice) -> bool { <[T] as PartialEq>>::eq(self, other) } } impl Eq for Slice {} impl PartialOrd for Slice { fn partial_cmp(&self, other: &Self) -> Option { self.iter().partial_cmp(other) } } impl Ord for Slice { fn cmp(&self, other: &Self) -> Ordering { self.iter().cmp(other) } } impl Hash for Slice { fn hash(&self, state: &mut H) { self.len().hash(state); for value in self { value.hash(state); } } } impl Index for Slice { type Output = T; fn index(&self, index: usize) -> &Self::Output { &self.entries[index].key } } // We can't have `impl> Index` because that conflicts with `Index`. // Instead, we repeat the implementations for all the core range types. macro_rules! impl_index { ($($range:ty),*) => {$( impl Index<$range> for IndexSet { type Output = Slice; fn index(&self, range: $range) -> &Self::Output { Slice::from_slice(&self.as_entries()[range]) } } impl Index<$range> for Slice { type Output = Self; fn index(&self, range: $range) -> &Self::Output { Slice::from_slice(&self.entries[range]) } } )*} } impl_index!( ops::Range, ops::RangeFrom, ops::RangeFull, ops::RangeInclusive, ops::RangeTo, ops::RangeToInclusive, (Bound, Bound) ); #[cfg(test)] mod tests { use super::*; #[test] fn slice_index() { fn check(vec_slice: &[i32], set_slice: &Slice, sub_slice: &Slice) { assert_eq!(set_slice as *const _, sub_slice as *const _); itertools::assert_equal(vec_slice, set_slice); } let vec: Vec = (0..10).map(|i| i * i).collect(); let set: IndexSet = vec.iter().cloned().collect(); let slice = set.as_slice(); // RangeFull check(&vec[..], &set[..], &slice[..]); for i in 0usize..10 { // Index assert_eq!(vec[i], set[i]); assert_eq!(vec[i], slice[i]); // RangeFrom check(&vec[i..], &set[i..], &slice[i..]); // RangeTo check(&vec[..i], &set[..i], &slice[..i]); // RangeToInclusive check(&vec[..=i], &set[..=i], &slice[..=i]); // (Bound, Bound) let bounds = (Bound::Excluded(i), Bound::Unbounded); check(&vec[i + 1..], &set[bounds], &slice[bounds]); for j in i..=10 { // Range check(&vec[i..j], &set[i..j], &slice[i..j]); } for j in i..10 { // RangeInclusive check(&vec[i..=j], &set[i..=j], &slice[i..=j]); } } } } indexmap-2.12.1/src/set/tests.rs000064400000000000000000000732721046102023000146210ustar 00000000000000use super::*; use std::string::String; #[test] fn it_works() { let mut set = IndexSet::new(); assert_eq!(set.is_empty(), true); set.insert(1); set.insert(1); assert_eq!(set.len(), 1); assert!(set.get(&1).is_some()); assert_eq!(set.is_empty(), false); } #[test] fn new() { let set = IndexSet::::new(); println!("{:?}", set); assert_eq!(set.capacity(), 0); assert_eq!(set.len(), 0); assert_eq!(set.is_empty(), true); } #[test] fn insert() { let insert = [0, 4, 2, 12, 8, 7, 11, 5]; let not_present = [1, 3, 6, 9, 10]; let mut set = IndexSet::with_capacity(insert.len()); for (i, &elt) in insert.iter().enumerate() { assert_eq!(set.len(), i); set.insert(elt); assert_eq!(set.len(), i + 1); assert_eq!(set.get(&elt), Some(&elt)); } println!("{:?}", set); for &elt in ¬_present { assert!(set.get(&elt).is_none()); } } #[test] fn insert_full() { let insert = vec![9, 2, 7, 1, 4, 6, 13]; let present = vec![1, 6, 2]; let mut set = IndexSet::with_capacity(insert.len()); for (i, &elt) in insert.iter().enumerate() { assert_eq!(set.len(), i); let (index, success) = set.insert_full(elt); assert!(success); assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); assert_eq!(set.len(), i + 1); } let len = set.len(); for &elt in &present { let (index, success) = set.insert_full(elt); assert!(!success); assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); assert_eq!(set.len(), len); } } #[test] fn insert_2() { let mut set = IndexSet::with_capacity(16); let mut values = vec![]; values.extend(0..16); values.extend(if cfg!(miri) { 32..64 } else { 128..267 }); for &i in &values { let old_set = set.clone(); set.insert(i); for value in old_set.iter() { if set.get(value).is_none() { println!("old_set: {:?}", old_set); println!("set: {:?}", set); panic!("did not find {} in set", value); } } } for &i in &values { assert!(set.get(&i).is_some(), "did not find {}", i); } } #[test] fn insert_dup() { let mut elements = vec![0, 2, 4, 6, 8]; let mut set: IndexSet = elements.drain(..).collect(); { let (i, v) = set.get_full(&0).unwrap(); assert_eq!(set.len(), 5); assert_eq!(i, 0); assert_eq!(*v, 0); } { let inserted = set.insert(0); let (i, v) = set.get_full(&0).unwrap(); assert_eq!(set.len(), 5); assert_eq!(inserted, false); assert_eq!(i, 0); assert_eq!(*v, 0); } } #[test] fn insert_order() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut set = IndexSet::new(); for &elt in &insert { set.insert(elt); } assert_eq!(set.iter().count(), set.len()); assert_eq!(set.iter().count(), insert.len()); for (a, b) in insert.iter().zip(set.iter()) { assert_eq!(a, b); } for (i, v) in (0..insert.len()).zip(set.iter()) { assert_eq!(set.get_index(i).unwrap(), v); } } #[test] fn shift_insert() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut set = IndexSet::new(); for &elt in &insert { set.shift_insert(0, elt); } assert_eq!(set.iter().count(), set.len()); assert_eq!(set.iter().count(), insert.len()); for (a, b) in insert.iter().rev().zip(set.iter()) { assert_eq!(a, b); } for (i, v) in (0..insert.len()).zip(set.iter()) { assert_eq!(set.get_index(i).unwrap(), v); } // "insert" that moves an existing entry set.shift_insert(0, insert[0]); assert_eq!(set.iter().count(), insert.len()); assert_eq!(insert[0], set[0]); for (a, b) in insert[1..].iter().rev().zip(set.iter().skip(1)) { assert_eq!(a, b); } } #[test] fn replace() { let replace = [0, 4, 2, 12, 8, 7, 11, 5]; let not_present = [1, 3, 6, 9, 10]; let mut set = IndexSet::with_capacity(replace.len()); for (i, &elt) in replace.iter().enumerate() { assert_eq!(set.len(), i); set.replace(elt); assert_eq!(set.len(), i + 1); assert_eq!(set.get(&elt), Some(&elt)); } println!("{:?}", set); for &elt in ¬_present { assert!(set.get(&elt).is_none()); } } #[test] fn replace_full() { let replace = vec![9, 2, 7, 1, 4, 6, 13]; let present = vec![1, 6, 2]; let mut set = IndexSet::with_capacity(replace.len()); for (i, &elt) in replace.iter().enumerate() { assert_eq!(set.len(), i); let (index, replaced) = set.replace_full(elt); assert!(replaced.is_none()); assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); assert_eq!(set.len(), i + 1); } let len = set.len(); for &elt in &present { let (index, replaced) = set.replace_full(elt); assert_eq!(Some(elt), replaced); assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); assert_eq!(set.len(), len); } } #[test] fn replace_2() { let mut set = IndexSet::with_capacity(16); let mut values = vec![]; values.extend(0..16); values.extend(if cfg!(miri) { 32..64 } else { 128..267 }); for &i in &values { let old_set = set.clone(); set.replace(i); for value in old_set.iter() { if set.get(value).is_none() { println!("old_set: {:?}", old_set); println!("set: {:?}", set); panic!("did not find {} in set", value); } } } for &i in &values { assert!(set.get(&i).is_some(), "did not find {}", i); } } #[test] fn replace_dup() { let mut elements = vec![0, 2, 4, 6, 8]; let mut set: IndexSet = elements.drain(..).collect(); { let (i, v) = set.get_full(&0).unwrap(); assert_eq!(set.len(), 5); assert_eq!(i, 0); assert_eq!(*v, 0); } { let replaced = set.replace(0); let (i, v) = set.get_full(&0).unwrap(); assert_eq!(set.len(), 5); assert_eq!(replaced, Some(0)); assert_eq!(i, 0); assert_eq!(*v, 0); } } #[test] fn replace_order() { let replace = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut set = IndexSet::new(); for &elt in &replace { set.replace(elt); } assert_eq!(set.iter().count(), set.len()); assert_eq!(set.iter().count(), replace.len()); for (a, b) in replace.iter().zip(set.iter()) { assert_eq!(a, b); } for (i, v) in (0..replace.len()).zip(set.iter()) { assert_eq!(set.get_index(i).unwrap(), v); } } #[test] fn replace_change() { // Check pointers to make sure it really changes let mut set = indexset!(vec![42]); let old_ptr = set[0].as_ptr(); let new = set[0].clone(); let new_ptr = new.as_ptr(); assert_ne!(old_ptr, new_ptr); let replaced = set.replace(new).unwrap(); assert_eq!(replaced.as_ptr(), old_ptr); } #[test] fn grow() { let insert = [0, 4, 2, 12, 8, 7, 11]; let not_present = [1, 3, 6, 9, 10]; let mut set = IndexSet::with_capacity(insert.len()); for (i, &elt) in insert.iter().enumerate() { assert_eq!(set.len(), i); set.insert(elt); assert_eq!(set.len(), i + 1); assert_eq!(set.get(&elt), Some(&elt)); } println!("{:?}", set); for &elt in &insert { set.insert(elt * 10); } for &elt in &insert { set.insert(elt * 100); } for (i, &elt) in insert.iter().cycle().enumerate().take(100) { set.insert(elt * 100 + i as i32); } println!("{:?}", set); for &elt in ¬_present { assert!(set.get(&elt).is_none()); } } #[test] fn reserve() { let mut set = IndexSet::::new(); assert_eq!(set.capacity(), 0); set.reserve(100); let capacity = set.capacity(); assert!(capacity >= 100); for i in 0..capacity { assert_eq!(set.len(), i); set.insert(i); assert_eq!(set.len(), i + 1); assert_eq!(set.capacity(), capacity); assert_eq!(set.get(&i), Some(&i)); } set.insert(capacity); assert_eq!(set.len(), capacity + 1); assert!(set.capacity() > capacity); assert_eq!(set.get(&capacity), Some(&capacity)); } #[test] fn try_reserve() { let mut set = IndexSet::::new(); assert_eq!(set.capacity(), 0); assert_eq!(set.try_reserve(100), Ok(())); assert!(set.capacity() >= 100); assert!(set.try_reserve(usize::MAX).is_err()); } #[test] fn shrink_to_fit() { let mut set = IndexSet::::new(); assert_eq!(set.capacity(), 0); for i in 0..100 { assert_eq!(set.len(), i); set.insert(i); assert_eq!(set.len(), i + 1); assert!(set.capacity() >= i + 1); assert_eq!(set.get(&i), Some(&i)); set.shrink_to_fit(); assert_eq!(set.len(), i + 1); assert_eq!(set.capacity(), i + 1); assert_eq!(set.get(&i), Some(&i)); } } #[test] fn remove() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut set = IndexSet::new(); for &elt in &insert { set.insert(elt); } assert_eq!(set.iter().count(), set.len()); assert_eq!(set.iter().count(), insert.len()); for (a, b) in insert.iter().zip(set.iter()) { assert_eq!(a, b); } let remove_fail = [99, 77]; let remove = [4, 12, 8, 7]; for &value in &remove_fail { assert!(set.swap_remove_full(&value).is_none()); } println!("{:?}", set); for &value in &remove { //println!("{:?}", set); let index = set.get_full(&value).unwrap().0; assert_eq!(set.swap_remove_full(&value), Some((index, value))); } println!("{:?}", set); for value in &insert { assert_eq!(set.get(value).is_some(), !remove.contains(value)); } assert_eq!(set.len(), insert.len() - remove.len()); assert_eq!(set.iter().count(), insert.len() - remove.len()); } #[test] fn swap_remove_index() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut set = IndexSet::new(); for &elt in &insert { set.insert(elt); } let mut vector = insert.to_vec(); let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1]; // check that the same swap remove sequence on vec and set // have the same result. for &rm in remove_sequence { let out_vec = vector.swap_remove(rm); let out_set = set.swap_remove_index(rm).unwrap(); assert_eq!(out_vec, out_set); } assert_eq!(vector.len(), set.len()); for (a, b) in vector.iter().zip(set.iter()) { assert_eq!(a, b); } } #[test] fn partial_eq_and_eq() { let mut set_a = IndexSet::new(); set_a.insert(1); set_a.insert(2); let mut set_b = set_a.clone(); assert_eq!(set_a, set_b); set_b.swap_remove(&1); assert_ne!(set_a, set_b); let set_c: IndexSet<_> = set_b.into_iter().collect(); assert_ne!(set_a, set_c); assert_ne!(set_c, set_a); } #[test] fn extend() { let mut set = IndexSet::new(); set.extend(vec![&1, &2, &3, &4]); set.extend(vec![5, 6]); assert_eq!(set.into_iter().collect::>(), vec![1, 2, 3, 4, 5, 6]); } #[test] fn comparisons() { let set_a: IndexSet<_> = (0..3).collect(); let set_b: IndexSet<_> = (3..6).collect(); let set_c: IndexSet<_> = (0..6).collect(); let set_d: IndexSet<_> = (3..9).collect(); assert!(!set_a.is_disjoint(&set_a)); assert!(set_a.is_subset(&set_a)); assert!(set_a.is_superset(&set_a)); assert!(set_a.is_disjoint(&set_b)); assert!(set_b.is_disjoint(&set_a)); assert!(!set_a.is_subset(&set_b)); assert!(!set_b.is_subset(&set_a)); assert!(!set_a.is_superset(&set_b)); assert!(!set_b.is_superset(&set_a)); assert!(!set_a.is_disjoint(&set_c)); assert!(!set_c.is_disjoint(&set_a)); assert!(set_a.is_subset(&set_c)); assert!(!set_c.is_subset(&set_a)); assert!(!set_a.is_superset(&set_c)); assert!(set_c.is_superset(&set_a)); assert!(!set_c.is_disjoint(&set_d)); assert!(!set_d.is_disjoint(&set_c)); assert!(!set_c.is_subset(&set_d)); assert!(!set_d.is_subset(&set_c)); assert!(!set_c.is_superset(&set_d)); assert!(!set_d.is_superset(&set_c)); } #[test] fn iter_comparisons() { use std::iter::empty; fn check<'a, I1, I2>(iter1: I1, iter2: I2) where I1: Iterator, I2: Iterator, { assert!(iter1.copied().eq(iter2)); } let set_a: IndexSet<_> = (0..3).collect(); let set_b: IndexSet<_> = (3..6).collect(); let set_c: IndexSet<_> = (0..6).collect(); let set_d: IndexSet<_> = (3..9).rev().collect(); check(set_a.difference(&set_a), empty()); check(set_a.symmetric_difference(&set_a), empty()); check(set_a.intersection(&set_a), 0..3); check(set_a.union(&set_a), 0..3); check(set_a.difference(&set_b), 0..3); check(set_b.difference(&set_a), 3..6); check(set_a.symmetric_difference(&set_b), 0..6); check(set_b.symmetric_difference(&set_a), (3..6).chain(0..3)); check(set_a.intersection(&set_b), empty()); check(set_b.intersection(&set_a), empty()); check(set_a.union(&set_b), 0..6); check(set_b.union(&set_a), (3..6).chain(0..3)); check(set_a.difference(&set_c), empty()); check(set_c.difference(&set_a), 3..6); check(set_a.symmetric_difference(&set_c), 3..6); check(set_c.symmetric_difference(&set_a), 3..6); check(set_a.intersection(&set_c), 0..3); check(set_c.intersection(&set_a), 0..3); check(set_a.union(&set_c), 0..6); check(set_c.union(&set_a), 0..6); check(set_c.difference(&set_d), 0..3); check(set_d.difference(&set_c), (6..9).rev()); check( set_c.symmetric_difference(&set_d), (0..3).chain((6..9).rev()), ); check(set_d.symmetric_difference(&set_c), (6..9).rev().chain(0..3)); check(set_c.intersection(&set_d), 3..6); check(set_d.intersection(&set_c), (3..6).rev()); check(set_c.union(&set_d), (0..6).chain((6..9).rev())); check(set_d.union(&set_c), (3..9).rev().chain(0..3)); } #[test] fn ops() { let empty = IndexSet::::new(); let set_a: IndexSet<_> = (0..3).collect(); let set_b: IndexSet<_> = (3..6).collect(); let set_c: IndexSet<_> = (0..6).collect(); let set_d: IndexSet<_> = (3..9).rev().collect(); #[allow(clippy::eq_op)] { assert_eq!(&set_a & &set_a, set_a); assert_eq!(&set_a | &set_a, set_a); assert_eq!(&set_a ^ &set_a, empty); assert_eq!(&set_a - &set_a, empty); } assert_eq!(&set_a & &set_b, empty); assert_eq!(&set_b & &set_a, empty); assert_eq!(&set_a | &set_b, set_c); assert_eq!(&set_b | &set_a, set_c); assert_eq!(&set_a ^ &set_b, set_c); assert_eq!(&set_b ^ &set_a, set_c); assert_eq!(&set_a - &set_b, set_a); assert_eq!(&set_b - &set_a, set_b); assert_eq!(&set_a & &set_c, set_a); assert_eq!(&set_c & &set_a, set_a); assert_eq!(&set_a | &set_c, set_c); assert_eq!(&set_c | &set_a, set_c); assert_eq!(&set_a ^ &set_c, set_b); assert_eq!(&set_c ^ &set_a, set_b); assert_eq!(&set_a - &set_c, empty); assert_eq!(&set_c - &set_a, set_b); assert_eq!(&set_c & &set_d, set_b); assert_eq!(&set_d & &set_c, set_b); assert_eq!(&set_c | &set_d, &set_a | &set_d); assert_eq!(&set_d | &set_c, &set_a | &set_d); assert_eq!(&set_c ^ &set_d, &set_a | &(&set_d - &set_b)); assert_eq!(&set_d ^ &set_c, &set_a | &(&set_d - &set_b)); assert_eq!(&set_c - &set_d, set_a); assert_eq!(&set_d - &set_c, &set_d - &set_b); } #[test] #[cfg(feature = "std")] fn from_array() { let set1 = IndexSet::from([1, 2, 3, 4]); let set2: IndexSet<_> = [1, 2, 3, 4].into(); assert_eq!(set1, set2); } #[test] fn iter_default() { struct Item; fn assert_default() where T: Default + Iterator, { assert!(T::default().next().is_none()); } assert_default::>(); assert_default::>(); } #[test] #[allow(deprecated)] fn take() { let mut index_set: IndexSet = IndexSet::new(); index_set.insert(10); assert_eq!(index_set.len(), 1); let result = index_set.take(&10); assert_eq!(result, Some(10)); assert_eq!(index_set.len(), 0); let result = index_set.take(&20); assert_eq!(result, None); } #[test] fn swap_take() { let mut index_set: IndexSet = IndexSet::new(); index_set.insert(10); index_set.insert(20); index_set.insert(30); index_set.insert(40); assert_eq!(index_set.len(), 4); let result = index_set.swap_take(&20); assert_eq!(result, Some(20)); assert_eq!(index_set.len(), 3); assert_eq!(index_set.as_slice(), &[10, 40, 30]); let result = index_set.swap_take(&50); assert_eq!(result, None); } #[test] fn sort_unstable() { let mut index_set: IndexSet = IndexSet::new(); index_set.insert(30); index_set.insert(20); index_set.insert(10); index_set.sort_unstable(); assert_eq!(index_set.as_slice(), &[10, 20, 30]); } #[test] fn try_reserve_exact() { let mut index_set: IndexSet = IndexSet::new(); index_set.insert(10); index_set.insert(20); index_set.insert(30); index_set.shrink_to_fit(); assert_eq!(index_set.capacity(), 3); index_set.try_reserve_exact(2).unwrap(); assert_eq!(index_set.capacity(), 5); } #[test] fn shift_remove_full() { let mut set: IndexSet = IndexSet::new(); set.insert(10); set.insert(20); set.insert(30); set.insert(40); set.insert(50); let result = set.shift_remove_full(&20); assert_eq!(result, Some((1, 20))); assert_eq!(set.len(), 4); assert_eq!(set.as_slice(), &[10, 30, 40, 50]); let result = set.shift_remove_full(&50); assert_eq!(result, Some((3, 50))); assert_eq!(set.len(), 3); assert_eq!(set.as_slice(), &[10, 30, 40]); let result = set.shift_remove_full(&60); assert_eq!(result, None); assert_eq!(set.len(), 3); assert_eq!(set.as_slice(), &[10, 30, 40]); } #[test] fn shift_remove_index() { let mut set: IndexSet = IndexSet::new(); set.insert(10); set.insert(20); set.insert(30); set.insert(40); set.insert(50); let result = set.shift_remove_index(1); assert_eq!(result, Some(20)); assert_eq!(set.len(), 4); assert_eq!(set.as_slice(), &[10, 30, 40, 50]); let result = set.shift_remove_index(1); assert_eq!(result, Some(30)); assert_eq!(set.len(), 3); assert_eq!(set.as_slice(), &[10, 40, 50]); let result = set.shift_remove_index(3); assert_eq!(result, None); assert_eq!(set.len(), 3); assert_eq!(set.as_slice(), &[10, 40, 50]); } #[test] fn sort_unstable_by() { let mut set: IndexSet = IndexSet::from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); set.sort_unstable_by(|a, b| b.cmp(a)); assert_eq!(set.as_slice(), &[10, 9, 8, 7, 6, 5, 4, 3, 2, 1]); } #[test] fn sort_by() { let mut set: IndexSet = IndexSet::new(); set.insert(3); set.insert(1); set.insert(2); set.sort_by(|a, b| a.cmp(b)); assert_eq!(set.as_slice(), &[1, 2, 3]); } #[test] fn drain() { let mut set: IndexSet = IndexSet::new(); set.insert(1); set.insert(2); set.insert(3); { let drain = set.drain(0..2); assert_eq!(drain.as_slice(), &[1, 2]); } assert_eq!(set.len(), 1); assert_eq!(set.as_slice(), &[3]); } #[test] fn split_off() { let mut set: IndexSet = IndexSet::from([1, 2, 3, 4, 5]); let split_set: IndexSet = set.split_off(3); assert_eq!(split_set.len(), 2); assert_eq!(split_set.as_slice(), &[4, 5]); assert_eq!(set.len(), 3); assert_eq!(set.as_slice(), &[1, 2, 3]); } #[test] fn retain() { let mut set: IndexSet = IndexSet::from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); set.retain(|&x| x > 4); assert_eq!(set.len(), 6); assert_eq!(set.as_slice(), &[5, 6, 7, 8, 9, 10]); set.retain(|_| false); assert_eq!(set.len(), 0); } #[test] fn first() { let mut index_set: IndexSet = IndexSet::new(); index_set.insert(10); index_set.insert(20); index_set.insert(30); let result = index_set.first(); assert_eq!(*result.unwrap(), 10); index_set.clear(); let result = index_set.first(); assert!(result.is_none()); } #[test] fn sort_by_key() { let mut index_set: IndexSet = IndexSet::new(); index_set.insert(3); index_set.insert(1); index_set.insert(2); index_set.insert(0); index_set.sort_by_key(|&x| -x); assert_eq!(index_set.as_slice(), &[3, 2, 1, 0]); } #[test] fn sort_unstable_by_key() { let mut index_set: IndexSet = IndexSet::new(); index_set.insert(3); index_set.insert(1); index_set.insert(2); index_set.insert(0); index_set.sort_unstable_by_key(|&x| -x); assert_eq!(index_set.as_slice(), &[3, 2, 1, 0]); } #[test] fn sort_by_cached_key() { let mut index_set: IndexSet = IndexSet::new(); index_set.insert(3); index_set.insert(1); index_set.insert(2); index_set.insert(0); index_set.sort_by_cached_key(|&x| -x); assert_eq!(index_set.as_slice(), &[3, 2, 1, 0]); } #[test] fn insert_sorted() { let mut set: IndexSet = IndexSet::::new(); set.insert_sorted(1); set.insert_sorted(3); assert_eq!(set.insert_sorted(2), (1, true)); } #[test] fn binary_search() { let mut set: IndexSet = IndexSet::new(); set.insert(100); set.insert(300); set.insert(200); set.insert(400); let result = set.binary_search(&200); assert_eq!(result, Ok(2)); let result = set.binary_search(&500); assert_eq!(result, Err(4)); } #[test] fn sorted_unstable_by() { let mut set: IndexSet = IndexSet::from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); set.sort_unstable_by(|a, b| b.cmp(a)); assert_eq!(set.as_slice(), &[10, 9, 8, 7, 6, 5, 4, 3, 2, 1]); } #[test] fn last() { let mut set: IndexSet = IndexSet::new(); set.insert(1); set.insert(2); set.insert(3); set.insert(4); set.insert(5); set.insert(6); assert_eq!(set.last(), Some(&6)); set.pop(); assert_eq!(set.last(), Some(&5)); set.clear(); assert_eq!(set.last(), None); } #[test] fn get_range() { let set: IndexSet = IndexSet::from([1, 2, 3, 4, 5]); let result = set.get_range(0..3); let slice: &Slice = result.unwrap(); assert_eq!(slice, &[1, 2, 3]); let result = set.get_range(0..0); assert_eq!(result.unwrap().len(), 0); let result = set.get_range(2..1); assert!(result.is_none()); } #[test] fn shift_take() { let mut set: IndexSet = IndexSet::new(); set.insert(1); set.insert(2); set.insert(3); set.insert(4); set.insert(5); let result = set.shift_take(&2); assert_eq!(result, Some(2)); assert_eq!(set.len(), 4); assert_eq!(set.as_slice(), &[1, 3, 4, 5]); let result = set.shift_take(&5); assert_eq!(result, Some(5)); assert_eq!(set.len(), 3); assert_eq!(set.as_slice(), &[1, 3, 4]); let result = set.shift_take(&5); assert_eq!(result, None); assert_eq!(set.len(), 3); assert_eq!(set.as_slice(), &[1, 3, 4]); } #[test] fn test_binary_search_by() { // adapted from std's test for binary_search let b: IndexSet = [].into(); assert_eq!(b.binary_search_by(|x| x.cmp(&5)), Err(0)); let b: IndexSet = [4].into(); assert_eq!(b.binary_search_by(|x| x.cmp(&3)), Err(0)); assert_eq!(b.binary_search_by(|x| x.cmp(&4)), Ok(0)); assert_eq!(b.binary_search_by(|x| x.cmp(&5)), Err(1)); let b: IndexSet = [1, 2, 4, 6, 8, 9].into(); assert_eq!(b.binary_search_by(|x| x.cmp(&5)), Err(3)); assert_eq!(b.binary_search_by(|x| x.cmp(&6)), Ok(3)); assert_eq!(b.binary_search_by(|x| x.cmp(&7)), Err(4)); assert_eq!(b.binary_search_by(|x| x.cmp(&8)), Ok(4)); let b: IndexSet = [1, 2, 4, 5, 6, 8].into(); assert_eq!(b.binary_search_by(|x| x.cmp(&9)), Err(6)); let b: IndexSet = [1, 2, 4, 6, 7, 8, 9].into(); assert_eq!(b.binary_search_by(|x| x.cmp(&6)), Ok(3)); assert_eq!(b.binary_search_by(|x| x.cmp(&5)), Err(3)); assert_eq!(b.binary_search_by(|x| x.cmp(&8)), Ok(5)); let b: IndexSet = [1, 2, 4, 5, 6, 8, 9].into(); assert_eq!(b.binary_search_by(|x| x.cmp(&7)), Err(5)); assert_eq!(b.binary_search_by(|x| x.cmp(&0)), Err(0)); let b: IndexSet = [1, 3, 3, 3, 7].into(); assert_eq!(b.binary_search_by(|x| x.cmp(&0)), Err(0)); assert_eq!(b.binary_search_by(|x| x.cmp(&1)), Ok(0)); assert_eq!(b.binary_search_by(|x| x.cmp(&2)), Err(1)); // diff from std as set merges the duplicate keys assert!(match b.binary_search_by(|x| x.cmp(&3)) { Ok(1..=2) => true, _ => false, }); assert!(match b.binary_search_by(|x| x.cmp(&3)) { Ok(1..=2) => true, _ => false, }); assert_eq!(b.binary_search_by(|x| x.cmp(&4)), Err(2)); assert_eq!(b.binary_search_by(|x| x.cmp(&5)), Err(2)); assert_eq!(b.binary_search_by(|x| x.cmp(&6)), Err(2)); assert_eq!(b.binary_search_by(|x| x.cmp(&7)), Ok(2)); assert_eq!(b.binary_search_by(|x| x.cmp(&8)), Err(3)); } #[test] fn test_binary_search_by_key() { // adapted from std's test for binary_search let b: IndexSet = [].into(); assert_eq!(b.binary_search_by_key(&5, |&x| x), Err(0)); let b: IndexSet = [4].into(); assert_eq!(b.binary_search_by_key(&3, |&x| x), Err(0)); assert_eq!(b.binary_search_by_key(&4, |&x| x), Ok(0)); assert_eq!(b.binary_search_by_key(&5, |&x| x), Err(1)); let b: IndexSet = [1, 2, 4, 6, 8, 9].into(); assert_eq!(b.binary_search_by_key(&5, |&x| x), Err(3)); assert_eq!(b.binary_search_by_key(&6, |&x| x), Ok(3)); assert_eq!(b.binary_search_by_key(&7, |&x| x), Err(4)); assert_eq!(b.binary_search_by_key(&8, |&x| x), Ok(4)); let b: IndexSet = [1, 2, 4, 5, 6, 8].into(); assert_eq!(b.binary_search_by_key(&9, |&x| x), Err(6)); let b: IndexSet = [1, 2, 4, 6, 7, 8, 9].into(); assert_eq!(b.binary_search_by_key(&6, |&x| x), Ok(3)); assert_eq!(b.binary_search_by_key(&5, |&x| x), Err(3)); assert_eq!(b.binary_search_by_key(&8, |&x| x), Ok(5)); let b: IndexSet = [1, 2, 4, 5, 6, 8, 9].into(); assert_eq!(b.binary_search_by_key(&7, |&x| x), Err(5)); assert_eq!(b.binary_search_by_key(&0, |&x| x), Err(0)); let b: IndexSet = [1, 3, 3, 3, 7].into(); assert_eq!(b.binary_search_by_key(&0, |&x| x), Err(0)); assert_eq!(b.binary_search_by_key(&1, |&x| x), Ok(0)); assert_eq!(b.binary_search_by_key(&2, |&x| x), Err(1)); // diff from std as set merges the duplicate keys assert!(match b.binary_search_by_key(&3, |&x| x) { Ok(1..=2) => true, _ => false, }); assert!(match b.binary_search_by_key(&3, |&x| x) { Ok(1..=2) => true, _ => false, }); assert_eq!(b.binary_search_by_key(&4, |&x| x), Err(2)); assert_eq!(b.binary_search_by_key(&5, |&x| x), Err(2)); assert_eq!(b.binary_search_by_key(&6, |&x| x), Err(2)); assert_eq!(b.binary_search_by_key(&7, |&x| x), Ok(2)); assert_eq!(b.binary_search_by_key(&8, |&x| x), Err(3)); } #[test] fn test_partition_point() { // adapted from std's test for partition_point let b: IndexSet = [].into(); assert_eq!(b.partition_point(|&x| x < 5), 0); let b: IndexSet<_> = [4].into(); assert_eq!(b.partition_point(|&x| x < 3), 0); assert_eq!(b.partition_point(|&x| x < 4), 0); assert_eq!(b.partition_point(|&x| x < 5), 1); let b: IndexSet<_> = [1, 2, 4, 6, 8, 9].into(); assert_eq!(b.partition_point(|&x| x < 5), 3); assert_eq!(b.partition_point(|&x| x < 6), 3); assert_eq!(b.partition_point(|&x| x < 7), 4); assert_eq!(b.partition_point(|&x| x < 8), 4); let b: IndexSet<_> = [1, 2, 4, 5, 6, 8].into(); assert_eq!(b.partition_point(|&x| x < 9), 6); let b: IndexSet<_> = [1, 2, 4, 6, 7, 8, 9].into(); assert_eq!(b.partition_point(|&x| x < 6), 3); assert_eq!(b.partition_point(|&x| x < 5), 3); assert_eq!(b.partition_point(|&x| x < 8), 5); let b: IndexSet<_> = [1, 2, 4, 5, 6, 8, 9].into(); assert_eq!(b.partition_point(|&x| x < 7), 5); assert_eq!(b.partition_point(|&x| x < 0), 0); let b: IndexSet<_> = [1, 3, 3, 3, 7].into(); assert_eq!(b.partition_point(|&x| x < 0), 0); assert_eq!(b.partition_point(|&x| x < 1), 0); assert_eq!(b.partition_point(|&x| x < 2), 1); assert_eq!(b.partition_point(|&x| x < 3), 1); assert_eq!(b.partition_point(|&x| x < 4), 2); // diff from std as set merges the duplicate keys assert_eq!(b.partition_point(|&x| x < 5), 2); assert_eq!(b.partition_point(|&x| x < 6), 2); assert_eq!(b.partition_point(|&x| x < 7), 2); assert_eq!(b.partition_point(|&x| x < 8), 3); } #[test] fn is_sorted() { fn expect(set: &IndexSet, e: [bool; 4]) { assert_eq!(e[0], set.is_sorted()); assert_eq!(e[1], set.is_sorted_by(|v1, v2| v1 < v2)); assert_eq!(e[2], set.is_sorted_by(|v1, v2| v1 > v2)); assert_eq!(e[3], set.is_sorted_by_key(|v| v)); } let mut set = IndexSet::::from_iter(0..10); expect(&set, [true, true, false, true]); set.replace_index(5, -1).unwrap(); expect(&set, [false, false, false, false]); } #[test] fn is_sorted_trivial() { fn expect(set: &IndexSet, e: [bool; 5]) { assert_eq!(e[0], set.is_sorted()); assert_eq!(e[1], set.is_sorted_by(|_, _| true)); assert_eq!(e[2], set.is_sorted_by(|_, _| false)); assert_eq!(e[3], set.is_sorted_by_key(|_| 0f64)); assert_eq!(e[4], set.is_sorted_by_key(|_| f64::NAN)); } let mut set = IndexSet::::default(); expect(&set, [true, true, true, true, true]); set.insert(0); expect(&set, [true, true, true, true, true]); set.insert(1); expect(&set, [true, true, false, true, false]); set.reverse(); expect(&set, [false, true, false, true, false]); } indexmap-2.12.1/src/set.rs000064400000000000000000001410011046102023000134410ustar 00000000000000//! A hash set implemented using [`IndexMap`] mod iter; mod mutable; mod slice; #[cfg(test)] mod tests; pub use self::iter::{ Difference, Drain, ExtractIf, Intersection, IntoIter, Iter, Splice, SymmetricDifference, Union, }; pub use self::mutable::MutableValues; pub use self::slice::Slice; #[cfg(feature = "rayon")] pub use crate::rayon::set as rayon; use crate::TryReserveError; #[cfg(feature = "std")] use std::hash::RandomState; use crate::util::try_simplify_range; use alloc::boxed::Box; use alloc::vec::Vec; use core::cmp::Ordering; use core::fmt; use core::hash::{BuildHasher, Hash}; use core::ops::{BitAnd, BitOr, BitXor, Index, RangeBounds, Sub}; use super::{Equivalent, IndexMap}; type Bucket = super::Bucket; /// A hash set where the iteration order of the values is independent of their /// hash values. /// /// The interface is closely compatible with the standard /// [`HashSet`][std::collections::HashSet], /// but also has additional features. /// /// # Order /// /// The values have a consistent order that is determined by the sequence of /// insertion and removal calls on the set. The order does not depend on the /// values or the hash function at all. Note that insertion order and value /// are not affected if a re-insertion is attempted once an element is /// already present. /// /// All iterators traverse the set *in order*. Set operation iterators like /// [`IndexSet::union`] produce a concatenated order, as do their matching "bitwise" /// operators. See their documentation for specifics. /// /// The insertion order is preserved, with **notable exceptions** like the /// [`.remove()`][Self::remove] or [`.swap_remove()`][Self::swap_remove] methods. /// Methods such as [`.sort_by()`][Self::sort_by] of /// course result in a new order, depending on the sorting order. /// /// # Indices /// /// The values are indexed in a compact range without holes in the range /// `0..self.len()`. For example, the method `.get_full` looks up the index for /// a value, and the method `.get_index` looks up the value by index. /// /// # Complexity /// /// Internally, `IndexSet` just holds an [`IndexMap`](IndexMap). Thus the complexity /// of the two are the same for most methods. /// /// # Examples /// /// ``` /// use indexmap::IndexSet; /// /// // Collects which letters appear in a sentence. /// let letters: IndexSet<_> = "a short treatise on fungi".chars().collect(); /// /// assert!(letters.contains(&'s')); /// assert!(letters.contains(&'t')); /// assert!(letters.contains(&'u')); /// assert!(!letters.contains(&'y')); /// ``` #[cfg(feature = "std")] pub struct IndexSet { pub(crate) map: IndexMap, } #[cfg(not(feature = "std"))] pub struct IndexSet { pub(crate) map: IndexMap, } impl Clone for IndexSet where T: Clone, S: Clone, { fn clone(&self) -> Self { IndexSet { map: self.map.clone(), } } fn clone_from(&mut self, other: &Self) { self.map.clone_from(&other.map); } } impl fmt::Debug for IndexSet where T: fmt::Debug, { #[cfg(not(feature = "test_debug"))] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_set().entries(self.iter()).finish() } #[cfg(feature = "test_debug")] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // Let the inner `IndexMap` print all of its details f.debug_struct("IndexSet").field("map", &self.map).finish() } } #[cfg(feature = "std")] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] impl IndexSet { /// Create a new set. (Does not allocate.) pub fn new() -> Self { IndexSet { map: IndexMap::new(), } } /// Create a new set with capacity for `n` elements. /// (Does not allocate if `n` is zero.) /// /// Computes in **O(n)** time. pub fn with_capacity(n: usize) -> Self { IndexSet { map: IndexMap::with_capacity(n), } } } impl IndexSet { /// Create a new set with capacity for `n` elements. /// (Does not allocate if `n` is zero.) /// /// Computes in **O(n)** time. pub fn with_capacity_and_hasher(n: usize, hash_builder: S) -> Self { IndexSet { map: IndexMap::with_capacity_and_hasher(n, hash_builder), } } /// Create a new set with `hash_builder`. /// /// This function is `const`, so it /// can be called in `static` contexts. pub const fn with_hasher(hash_builder: S) -> Self { IndexSet { map: IndexMap::with_hasher(hash_builder), } } #[inline] pub(crate) fn into_entries(self) -> Vec> { self.map.into_entries() } #[inline] pub(crate) fn as_entries(&self) -> &[Bucket] { self.map.as_entries() } pub(crate) fn with_entries(&mut self, f: F) where F: FnOnce(&mut [Bucket]), { self.map.with_entries(f); } /// Return the number of elements the set can hold without reallocating. /// /// This number is a lower bound; the set might be able to hold more, /// but is guaranteed to be able to hold at least this many. /// /// Computes in **O(1)** time. pub fn capacity(&self) -> usize { self.map.capacity() } /// Return a reference to the set's `BuildHasher`. pub fn hasher(&self) -> &S { self.map.hasher() } /// Return the number of elements in the set. /// /// Computes in **O(1)** time. pub fn len(&self) -> usize { self.map.len() } /// Returns true if the set contains no elements. /// /// Computes in **O(1)** time. pub fn is_empty(&self) -> bool { self.map.is_empty() } /// Return an iterator over the values of the set, in their order pub fn iter(&self) -> Iter<'_, T> { Iter::new(self.as_entries()) } /// Remove all elements in the set, while preserving its capacity. /// /// Computes in **O(n)** time. pub fn clear(&mut self) { self.map.clear(); } /// Shortens the set, keeping the first `len` elements and dropping the rest. /// /// If `len` is greater than the set's current length, this has no effect. pub fn truncate(&mut self, len: usize) { self.map.truncate(len); } /// Clears the `IndexSet` in the given index range, returning those values /// as a drain iterator. /// /// The range may be any type that implements [`RangeBounds`], /// including all of the `std::ops::Range*` types, or even a tuple pair of /// `Bound` start and end values. To drain the set entirely, use `RangeFull` /// like `set.drain(..)`. /// /// This shifts down all entries following the drained range to fill the /// gap, and keeps the allocated memory for reuse. /// /// ***Panics*** if the starting point is greater than the end point or if /// the end point is greater than the length of the set. #[track_caller] pub fn drain(&mut self, range: R) -> Drain<'_, T> where R: RangeBounds, { Drain::new(self.map.core.drain(range)) } /// Creates an iterator which uses a closure to determine if a value should be removed, /// for all values in the given range. /// /// If the closure returns true, then the value is removed and yielded. /// If the closure returns false, the value will remain in the list and will not be yielded /// by the iterator. /// /// The range may be any type that implements [`RangeBounds`], /// including all of the `std::ops::Range*` types, or even a tuple pair of /// `Bound` start and end values. To check the entire set, use `RangeFull` /// like `set.extract_if(.., predicate)`. /// /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating /// or the iteration short-circuits, then the remaining elements will be retained. /// Use [`retain`] with a negated predicate if you do not need the returned iterator. /// /// [`retain`]: IndexSet::retain /// /// ***Panics*** if the starting point is greater than the end point or if /// the end point is greater than the length of the set. /// /// # Examples /// /// Splitting a set into even and odd values, reusing the original set: /// /// ``` /// use indexmap::IndexSet; /// /// let mut set: IndexSet = (0..8).collect(); /// let extracted: IndexSet = set.extract_if(.., |v| v % 2 == 0).collect(); /// /// let evens = extracted.into_iter().collect::>(); /// let odds = set.into_iter().collect::>(); /// /// assert_eq!(evens, vec![0, 2, 4, 6]); /// assert_eq!(odds, vec![1, 3, 5, 7]); /// ``` #[track_caller] pub fn extract_if(&mut self, range: R, pred: F) -> ExtractIf<'_, T, F> where F: FnMut(&T) -> bool, R: RangeBounds, { ExtractIf::new(&mut self.map.core, range, pred) } /// Splits the collection into two at the given index. /// /// Returns a newly allocated set containing the elements in the range /// `[at, len)`. After the call, the original set will be left containing /// the elements `[0, at)` with its previous capacity unchanged. /// /// ***Panics*** if `at > len`. #[track_caller] pub fn split_off(&mut self, at: usize) -> Self where S: Clone, { Self { map: self.map.split_off(at), } } /// Reserve capacity for `additional` more values. /// /// Computes in **O(n)** time. pub fn reserve(&mut self, additional: usize) { self.map.reserve(additional); } /// Reserve capacity for `additional` more values, without over-allocating. /// /// Unlike `reserve`, this does not deliberately over-allocate the entry capacity to avoid /// frequent re-allocations. However, the underlying data structures may still have internal /// capacity requirements, and the allocator itself may give more space than requested, so this /// cannot be relied upon to be precisely minimal. /// /// Computes in **O(n)** time. pub fn reserve_exact(&mut self, additional: usize) { self.map.reserve_exact(additional); } /// Try to reserve capacity for `additional` more values. /// /// Computes in **O(n)** time. pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { self.map.try_reserve(additional) } /// Try to reserve capacity for `additional` more values, without over-allocating. /// /// Unlike `try_reserve`, this does not deliberately over-allocate the entry capacity to avoid /// frequent re-allocations. However, the underlying data structures may still have internal /// capacity requirements, and the allocator itself may give more space than requested, so this /// cannot be relied upon to be precisely minimal. /// /// Computes in **O(n)** time. pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> { self.map.try_reserve_exact(additional) } /// Shrink the capacity of the set as much as possible. /// /// Computes in **O(n)** time. pub fn shrink_to_fit(&mut self) { self.map.shrink_to_fit(); } /// Shrink the capacity of the set with a lower limit. /// /// Computes in **O(n)** time. pub fn shrink_to(&mut self, min_capacity: usize) { self.map.shrink_to(min_capacity); } } impl IndexSet where T: Hash + Eq, S: BuildHasher, { /// Insert the value into the set. /// /// If an equivalent item already exists in the set, it returns /// `false` leaving the original value in the set and without /// altering its insertion order. Otherwise, it inserts the new /// item and returns `true`. /// /// Computes in **O(1)** time (amortized average). pub fn insert(&mut self, value: T) -> bool { self.map.insert(value, ()).is_none() } /// Insert the value into the set, and get its index. /// /// If an equivalent item already exists in the set, it returns /// the index of the existing item and `false`, leaving the /// original value in the set and without altering its insertion /// order. Otherwise, it inserts the new item and returns the index /// of the inserted item and `true`. /// /// Computes in **O(1)** time (amortized average). pub fn insert_full(&mut self, value: T) -> (usize, bool) { let (index, existing) = self.map.insert_full(value, ()); (index, existing.is_none()) } /// Insert the value into the set at its ordered position among sorted values. /// /// This is equivalent to finding the position with /// [`binary_search`][Self::binary_search], and if needed calling /// [`insert_before`][Self::insert_before] for a new value. /// /// If the sorted item is found in the set, it returns the index of that /// existing item and `false`, without any change. Otherwise, it inserts the /// new item and returns its sorted index and `true`. /// /// If the existing items are **not** already sorted, then the insertion /// index is unspecified (like [`slice::binary_search`]), but the value /// is moved to or inserted at that position regardless. /// /// Computes in **O(n)** time (average). Instead of repeating calls to /// `insert_sorted`, it may be faster to call batched [`insert`][Self::insert] /// or [`extend`][Self::extend] and only call [`sort`][Self::sort] or /// [`sort_unstable`][Self::sort_unstable] once. pub fn insert_sorted(&mut self, value: T) -> (usize, bool) where T: Ord, { let (index, existing) = self.map.insert_sorted(value, ()); (index, existing.is_none()) } /// Insert the value into the set at its ordered position among values /// sorted by `cmp`. /// /// This is equivalent to finding the position with /// [`binary_search_by`][Self::binary_search_by], then calling /// [`insert_before`][Self::insert_before]. /// /// If the existing items are **not** already sorted, then the insertion /// index is unspecified (like [`slice::binary_search`]), but the value /// is moved to or inserted at that position regardless. /// /// Computes in **O(n)** time (average). pub fn insert_sorted_by(&mut self, value: T, mut cmp: F) -> (usize, bool) where F: FnMut(&T, &T) -> Ordering, { let (index, existing) = self .map .insert_sorted_by(value, (), |a, (), b, ()| cmp(a, b)); (index, existing.is_none()) } /// Insert the value into the set at its ordered position among values /// using a sort-key extraction function. /// /// This is equivalent to finding the position with /// [`binary_search_by_key`][Self::binary_search_by_key] with `sort_key(key)`, /// then calling [`insert_before`][Self::insert_before]. /// /// If the existing items are **not** already sorted, then the insertion /// index is unspecified (like [`slice::binary_search`]), but the value /// is moved to or inserted at that position regardless. /// /// Computes in **O(n)** time (average). pub fn insert_sorted_by_key(&mut self, value: T, mut sort_key: F) -> (usize, bool) where B: Ord, F: FnMut(&T) -> B, { let (index, existing) = self.map.insert_sorted_by_key(value, (), |k, _| sort_key(k)); (index, existing.is_none()) } /// Insert the value into the set before the value at the given index, or at the end. /// /// If an equivalent item already exists in the set, it returns `false` leaving the /// original value in the set, but moved to the new position. The returned index /// will either be the given index or one less, depending on how the value moved. /// (See [`shift_insert`](Self::shift_insert) for different behavior here.) /// /// Otherwise, it inserts the new value exactly at the given index and returns `true`. /// /// ***Panics*** if `index` is out of bounds. /// Valid indices are `0..=set.len()` (inclusive). /// /// Computes in **O(n)** time (average). /// /// # Examples /// /// ``` /// use indexmap::IndexSet; /// let mut set: IndexSet = ('a'..='z').collect(); /// /// // The new value '*' goes exactly at the given index. /// assert_eq!(set.get_index_of(&'*'), None); /// assert_eq!(set.insert_before(10, '*'), (10, true)); /// assert_eq!(set.get_index_of(&'*'), Some(10)); /// /// // Moving the value 'a' up will shift others down, so this moves *before* 10 to index 9. /// assert_eq!(set.insert_before(10, 'a'), (9, false)); /// assert_eq!(set.get_index_of(&'a'), Some(9)); /// assert_eq!(set.get_index_of(&'*'), Some(10)); /// /// // Moving the value 'z' down will shift others up, so this moves to exactly 10. /// assert_eq!(set.insert_before(10, 'z'), (10, false)); /// assert_eq!(set.get_index_of(&'z'), Some(10)); /// assert_eq!(set.get_index_of(&'*'), Some(11)); /// /// // Moving or inserting before the endpoint is also valid. /// assert_eq!(set.len(), 27); /// assert_eq!(set.insert_before(set.len(), '*'), (26, false)); /// assert_eq!(set.get_index_of(&'*'), Some(26)); /// assert_eq!(set.insert_before(set.len(), '+'), (27, true)); /// assert_eq!(set.get_index_of(&'+'), Some(27)); /// assert_eq!(set.len(), 28); /// ``` #[track_caller] pub fn insert_before(&mut self, index: usize, value: T) -> (usize, bool) { let (index, existing) = self.map.insert_before(index, value, ()); (index, existing.is_none()) } /// Insert the value into the set at the given index. /// /// If an equivalent item already exists in the set, it returns `false` leaving /// the original value in the set, but moved to the given index. /// Note that existing values **cannot** be moved to `index == set.len()`! /// (See [`insert_before`](Self::insert_before) for different behavior here.) /// /// Otherwise, it inserts the new value at the given index and returns `true`. /// /// ***Panics*** if `index` is out of bounds. /// Valid indices are `0..set.len()` (exclusive) when moving an existing value, or /// `0..=set.len()` (inclusive) when inserting a new value. /// /// Computes in **O(n)** time (average). /// /// # Examples /// /// ``` /// use indexmap::IndexSet; /// let mut set: IndexSet = ('a'..='z').collect(); /// /// // The new value '*' goes exactly at the given index. /// assert_eq!(set.get_index_of(&'*'), None); /// assert_eq!(set.shift_insert(10, '*'), true); /// assert_eq!(set.get_index_of(&'*'), Some(10)); /// /// // Moving the value 'a' up to 10 will shift others down, including the '*' that was at 10. /// assert_eq!(set.shift_insert(10, 'a'), false); /// assert_eq!(set.get_index_of(&'a'), Some(10)); /// assert_eq!(set.get_index_of(&'*'), Some(9)); /// /// // Moving the value 'z' down to 9 will shift others up, including the '*' that was at 9. /// assert_eq!(set.shift_insert(9, 'z'), false); /// assert_eq!(set.get_index_of(&'z'), Some(9)); /// assert_eq!(set.get_index_of(&'*'), Some(10)); /// /// // Existing values can move to len-1 at most, but new values can insert at the endpoint. /// assert_eq!(set.len(), 27); /// assert_eq!(set.shift_insert(set.len() - 1, '*'), false); /// assert_eq!(set.get_index_of(&'*'), Some(26)); /// assert_eq!(set.shift_insert(set.len(), '+'), true); /// assert_eq!(set.get_index_of(&'+'), Some(27)); /// assert_eq!(set.len(), 28); /// ``` /// /// ```should_panic /// use indexmap::IndexSet; /// let mut set: IndexSet = ('a'..='z').collect(); /// /// // This is an invalid index for moving an existing value! /// set.shift_insert(set.len(), 'a'); /// ``` #[track_caller] pub fn shift_insert(&mut self, index: usize, value: T) -> bool { self.map.shift_insert(index, value, ()).is_none() } /// Adds a value to the set, replacing the existing value, if any, that is /// equal to the given one, without altering its insertion order. Returns /// the replaced value. /// /// Computes in **O(1)** time (average). pub fn replace(&mut self, value: T) -> Option { self.replace_full(value).1 } /// Adds a value to the set, replacing the existing value, if any, that is /// equal to the given one, without altering its insertion order. Returns /// the index of the item and its replaced value. /// /// Computes in **O(1)** time (average). pub fn replace_full(&mut self, value: T) -> (usize, Option) { let hash = self.map.hash(&value); match self.map.core.replace_full(hash, value, ()) { (i, Some((replaced, ()))) => (i, Some(replaced)), (i, None) => (i, None), } } /// Replaces the value at the given index. The new value does not need to be /// equivalent to the one it is replacing, but it must be unique to the rest /// of the set. /// /// Returns `Ok(old_value)` if successful, or `Err((other_index, value))` if /// an equivalent value already exists at a different index. The set will be /// unchanged in the error case. /// /// ***Panics*** if `index` is out of bounds. /// /// Computes in **O(1)** time (average). #[track_caller] pub fn replace_index(&mut self, index: usize, value: T) -> Result { self.map.replace_index(index, value) } /// Return an iterator over the values that are in `self` but not `other`. /// /// Values are produced in the same order that they appear in `self`. pub fn difference<'a, S2>(&'a self, other: &'a IndexSet) -> Difference<'a, T, S2> where S2: BuildHasher, { Difference::new(self, other) } /// Return an iterator over the values that are in `self` or `other`, /// but not in both. /// /// Values from `self` are produced in their original order, followed by /// values from `other` in their original order. pub fn symmetric_difference<'a, S2>( &'a self, other: &'a IndexSet, ) -> SymmetricDifference<'a, T, S, S2> where S2: BuildHasher, { SymmetricDifference::new(self, other) } /// Return an iterator over the values that are in both `self` and `other`. /// /// Values are produced in the same order that they appear in `self`. pub fn intersection<'a, S2>(&'a self, other: &'a IndexSet) -> Intersection<'a, T, S2> where S2: BuildHasher, { Intersection::new(self, other) } /// Return an iterator over all values that are in `self` or `other`. /// /// Values from `self` are produced in their original order, followed by /// values that are unique to `other` in their original order. pub fn union<'a, S2>(&'a self, other: &'a IndexSet) -> Union<'a, T, S> where S2: BuildHasher, { Union::new(self, other) } /// Creates a splicing iterator that replaces the specified range in the set /// with the given `replace_with` iterator and yields the removed items. /// `replace_with` does not need to be the same length as `range`. /// /// The `range` is removed even if the iterator is not consumed until the /// end. It is unspecified how many elements are removed from the set if the /// `Splice` value is leaked. /// /// The input iterator `replace_with` is only consumed when the `Splice` /// value is dropped. If a value from the iterator matches an existing entry /// in the set (outside of `range`), then the original will be unchanged. /// Otherwise, the new value will be inserted in the replaced `range`. /// /// ***Panics*** if the starting point is greater than the end point or if /// the end point is greater than the length of the set. /// /// # Examples /// /// ``` /// use indexmap::IndexSet; /// /// let mut set = IndexSet::from([0, 1, 2, 3, 4]); /// let new = [5, 4, 3, 2, 1]; /// let removed: Vec<_> = set.splice(2..4, new).collect(); /// /// // 1 and 4 kept their positions, while 5, 3, and 2 were newly inserted. /// assert!(set.into_iter().eq([0, 1, 5, 3, 2, 4])); /// assert_eq!(removed, &[2, 3]); /// ``` #[track_caller] pub fn splice(&mut self, range: R, replace_with: I) -> Splice<'_, I::IntoIter, T, S> where R: RangeBounds, I: IntoIterator, { Splice::new(self, range, replace_with.into_iter()) } /// Moves all values from `other` into `self`, leaving `other` empty. /// /// This is equivalent to calling [`insert`][Self::insert] for each value /// from `other` in order, which means that values that already exist /// in `self` are unchanged in their current position. /// /// See also [`union`][Self::union] to iterate the combined values by /// reference, without modifying `self` or `other`. /// /// # Examples /// /// ``` /// use indexmap::IndexSet; /// /// let mut a = IndexSet::from([3, 2, 1]); /// let mut b = IndexSet::from([3, 4, 5]); /// let old_capacity = b.capacity(); /// /// a.append(&mut b); /// /// assert_eq!(a.len(), 5); /// assert_eq!(b.len(), 0); /// assert_eq!(b.capacity(), old_capacity); /// /// assert!(a.iter().eq(&[3, 2, 1, 4, 5])); /// ``` pub fn append(&mut self, other: &mut IndexSet) { self.map.append(&mut other.map); } } impl IndexSet where S: BuildHasher, { /// Return `true` if an equivalent to `value` exists in the set. /// /// Computes in **O(1)** time (average). pub fn contains(&self, value: &Q) -> bool where Q: ?Sized + Hash + Equivalent, { self.map.contains_key(value) } /// Return a reference to the value stored in the set, if it is present, /// else `None`. /// /// Computes in **O(1)** time (average). pub fn get(&self, value: &Q) -> Option<&T> where Q: ?Sized + Hash + Equivalent, { self.map.get_key_value(value).map(|(x, &())| x) } /// Return item index and value pub fn get_full(&self, value: &Q) -> Option<(usize, &T)> where Q: ?Sized + Hash + Equivalent, { self.map.get_full(value).map(|(i, x, &())| (i, x)) } /// Return item index, if it exists in the set /// /// Computes in **O(1)** time (average). pub fn get_index_of(&self, value: &Q) -> Option where Q: ?Sized + Hash + Equivalent, { self.map.get_index_of(value) } /// Remove the value from the set, and return `true` if it was present. /// /// **NOTE:** This is equivalent to [`.swap_remove(value)`][Self::swap_remove], replacing this /// value's position with the last element, and it is deprecated in favor of calling that /// explicitly. If you need to preserve the relative order of the values in the set, use /// [`.shift_remove(value)`][Self::shift_remove] instead. #[deprecated(note = "`remove` disrupts the set order -- \ use `swap_remove` or `shift_remove` for explicit behavior.")] pub fn remove(&mut self, value: &Q) -> bool where Q: ?Sized + Hash + Equivalent, { self.swap_remove(value) } /// Remove the value from the set, and return `true` if it was present. /// /// Like [`Vec::swap_remove`], the value is removed by swapping it with the /// last element of the set and popping it off. **This perturbs /// the position of what used to be the last element!** /// /// Return `false` if `value` was not in the set. /// /// Computes in **O(1)** time (average). pub fn swap_remove(&mut self, value: &Q) -> bool where Q: ?Sized + Hash + Equivalent, { self.map.swap_remove(value).is_some() } /// Remove the value from the set, and return `true` if it was present. /// /// Like [`Vec::remove`], the value is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Return `false` if `value` was not in the set. /// /// Computes in **O(n)** time (average). pub fn shift_remove(&mut self, value: &Q) -> bool where Q: ?Sized + Hash + Equivalent, { self.map.shift_remove(value).is_some() } /// Removes and returns the value in the set, if any, that is equal to the /// given one. /// /// **NOTE:** This is equivalent to [`.swap_take(value)`][Self::swap_take], replacing this /// value's position with the last element, and it is deprecated in favor of calling that /// explicitly. If you need to preserve the relative order of the values in the set, use /// [`.shift_take(value)`][Self::shift_take] instead. #[deprecated(note = "`take` disrupts the set order -- \ use `swap_take` or `shift_take` for explicit behavior.")] pub fn take(&mut self, value: &Q) -> Option where Q: ?Sized + Hash + Equivalent, { self.swap_take(value) } /// Removes and returns the value in the set, if any, that is equal to the /// given one. /// /// Like [`Vec::swap_remove`], the value is removed by swapping it with the /// last element of the set and popping it off. **This perturbs /// the position of what used to be the last element!** /// /// Return `None` if `value` was not in the set. /// /// Computes in **O(1)** time (average). pub fn swap_take(&mut self, value: &Q) -> Option where Q: ?Sized + Hash + Equivalent, { self.map.swap_remove_entry(value).map(|(x, ())| x) } /// Removes and returns the value in the set, if any, that is equal to the /// given one. /// /// Like [`Vec::remove`], the value is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Return `None` if `value` was not in the set. /// /// Computes in **O(n)** time (average). pub fn shift_take(&mut self, value: &Q) -> Option where Q: ?Sized + Hash + Equivalent, { self.map.shift_remove_entry(value).map(|(x, ())| x) } /// Remove the value from the set return it and the index it had. /// /// Like [`Vec::swap_remove`], the value is removed by swapping it with the /// last element of the set and popping it off. **This perturbs /// the position of what used to be the last element!** /// /// Return `None` if `value` was not in the set. pub fn swap_remove_full(&mut self, value: &Q) -> Option<(usize, T)> where Q: ?Sized + Hash + Equivalent, { self.map.swap_remove_full(value).map(|(i, x, ())| (i, x)) } /// Remove the value from the set return it and the index it had. /// /// Like [`Vec::remove`], the value is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Return `None` if `value` was not in the set. pub fn shift_remove_full(&mut self, value: &Q) -> Option<(usize, T)> where Q: ?Sized + Hash + Equivalent, { self.map.shift_remove_full(value).map(|(i, x, ())| (i, x)) } } impl IndexSet { /// Remove the last value /// /// This preserves the order of the remaining elements. /// /// Computes in **O(1)** time (average). #[doc(alias = "pop_last")] // like `BTreeSet` pub fn pop(&mut self) -> Option { self.map.pop().map(|(x, ())| x) } /// Removes and returns the last value from a set if the predicate /// returns `true`, or [`None`] if the predicate returns false or the set /// is empty (the predicate will not be called in that case). /// /// This preserves the order of the remaining elements. /// /// Computes in **O(1)** time (average). /// /// # Examples /// /// ``` /// use indexmap::IndexSet; /// /// let mut set = IndexSet::from([1, 2, 3, 4]); /// let pred = |x: &i32| *x % 2 == 0; /// /// assert_eq!(set.pop_if(pred), Some(4)); /// assert_eq!(set.as_slice(), &[1, 2, 3]); /// assert_eq!(set.pop_if(pred), None); /// ``` pub fn pop_if(&mut self, predicate: impl FnOnce(&T) -> bool) -> Option { let last = self.last()?; if predicate(last) { self.pop() } else { None } } /// Scan through each value in the set and keep those where the /// closure `keep` returns `true`. /// /// The elements are visited in order, and remaining elements keep their /// order. /// /// Computes in **O(n)** time (average). pub fn retain(&mut self, mut keep: F) where F: FnMut(&T) -> bool, { self.map.retain(move |x, &mut ()| keep(x)) } /// Sort the set's values by their default ordering. /// /// This is a stable sort -- but equivalent values should not normally coexist in /// a set at all, so [`sort_unstable`][Self::sort_unstable] is preferred /// because it is generally faster and doesn't allocate auxiliary memory. /// /// See [`sort_by`](Self::sort_by) for details. pub fn sort(&mut self) where T: Ord, { self.map.sort_keys() } /// Sort the set's values in place using the comparison function `cmp`. /// /// Computes in **O(n log n)** time and **O(n)** space. The sort is stable. pub fn sort_by(&mut self, mut cmp: F) where F: FnMut(&T, &T) -> Ordering, { self.map.sort_by(move |a, (), b, ()| cmp(a, b)); } /// Sort the values of the set and return a by-value iterator of /// the values with the result. /// /// The sort is stable. pub fn sorted_by(self, mut cmp: F) -> IntoIter where F: FnMut(&T, &T) -> Ordering, { let mut entries = self.into_entries(); entries.sort_by(move |a, b| cmp(&a.key, &b.key)); IntoIter::new(entries) } /// Sort the set's values in place using a key extraction function. /// /// Computes in **O(n log n)** time and **O(n)** space. The sort is stable. pub fn sort_by_key(&mut self, mut sort_key: F) where K: Ord, F: FnMut(&T) -> K, { self.with_entries(move |entries| { entries.sort_by_key(move |a| sort_key(&a.key)); }); } /// Sort the set's values by their default ordering. /// /// See [`sort_unstable_by`](Self::sort_unstable_by) for details. pub fn sort_unstable(&mut self) where T: Ord, { self.map.sort_unstable_keys() } /// Sort the set's values in place using the comparison function `cmp`. /// /// Computes in **O(n log n)** time. The sort is unstable. pub fn sort_unstable_by(&mut self, mut cmp: F) where F: FnMut(&T, &T) -> Ordering, { self.map.sort_unstable_by(move |a, _, b, _| cmp(a, b)) } /// Sort the values of the set and return a by-value iterator of /// the values with the result. pub fn sorted_unstable_by(self, mut cmp: F) -> IntoIter where F: FnMut(&T, &T) -> Ordering, { let mut entries = self.into_entries(); entries.sort_unstable_by(move |a, b| cmp(&a.key, &b.key)); IntoIter::new(entries) } /// Sort the set's values in place using a key extraction function. /// /// Computes in **O(n log n)** time. The sort is unstable. pub fn sort_unstable_by_key(&mut self, mut sort_key: F) where K: Ord, F: FnMut(&T) -> K, { self.with_entries(move |entries| { entries.sort_unstable_by_key(move |a| sort_key(&a.key)); }); } /// Sort the set's values in place using a key extraction function. /// /// During sorting, the function is called at most once per entry, by using temporary storage /// to remember the results of its evaluation. The order of calls to the function is /// unspecified and may change between versions of `indexmap` or the standard library. /// /// Computes in **O(m n + n log n + c)** time () and **O(n)** space, where the function is /// **O(m)**, *n* is the length of the map, and *c* the capacity. The sort is stable. pub fn sort_by_cached_key(&mut self, mut sort_key: F) where K: Ord, F: FnMut(&T) -> K, { self.with_entries(move |entries| { entries.sort_by_cached_key(move |a| sort_key(&a.key)); }); } /// Search over a sorted set for a value. /// /// Returns the position where that value is present, or the position where it can be inserted /// to maintain the sort. See [`slice::binary_search`] for more details. /// /// Computes in **O(log(n))** time, which is notably less scalable than looking the value up /// using [`get_index_of`][IndexSet::get_index_of], but this can also position missing values. pub fn binary_search(&self, x: &T) -> Result where T: Ord, { self.as_slice().binary_search(x) } /// Search over a sorted set with a comparator function. /// /// Returns the position where that value is present, or the position where it can be inserted /// to maintain the sort. See [`slice::binary_search_by`] for more details. /// /// Computes in **O(log(n))** time. #[inline] pub fn binary_search_by<'a, F>(&'a self, f: F) -> Result where F: FnMut(&'a T) -> Ordering, { self.as_slice().binary_search_by(f) } /// Search over a sorted set with an extraction function. /// /// Returns the position where that value is present, or the position where it can be inserted /// to maintain the sort. See [`slice::binary_search_by_key`] for more details. /// /// Computes in **O(log(n))** time. #[inline] pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, f: F) -> Result where F: FnMut(&'a T) -> B, B: Ord, { self.as_slice().binary_search_by_key(b, f) } /// Checks if the values of this set are sorted. #[inline] pub fn is_sorted(&self) -> bool where T: PartialOrd, { self.as_slice().is_sorted() } /// Checks if this set is sorted using the given comparator function. #[inline] pub fn is_sorted_by<'a, F>(&'a self, cmp: F) -> bool where F: FnMut(&'a T, &'a T) -> bool, { self.as_slice().is_sorted_by(cmp) } /// Checks if this set is sorted using the given sort-key function. #[inline] pub fn is_sorted_by_key<'a, F, K>(&'a self, sort_key: F) -> bool where F: FnMut(&'a T) -> K, K: PartialOrd, { self.as_slice().is_sorted_by_key(sort_key) } /// Returns the index of the partition point of a sorted set according to the given predicate /// (the index of the first element of the second partition). /// /// See [`slice::partition_point`] for more details. /// /// Computes in **O(log(n))** time. #[must_use] pub fn partition_point

(&self, pred: P) -> usize where P: FnMut(&T) -> bool, { self.as_slice().partition_point(pred) } /// Reverses the order of the set's values in place. /// /// Computes in **O(n)** time and **O(1)** space. pub fn reverse(&mut self) { self.map.reverse() } /// Returns a slice of all the values in the set. /// /// Computes in **O(1)** time. pub fn as_slice(&self) -> &Slice { Slice::from_slice(self.as_entries()) } /// Converts into a boxed slice of all the values in the set. /// /// Note that this will drop the inner hash table and any excess capacity. pub fn into_boxed_slice(self) -> Box> { Slice::from_boxed(self.into_entries().into_boxed_slice()) } /// Get a value by index /// /// Valid indices are `0 <= index < self.len()`. /// /// Computes in **O(1)** time. pub fn get_index(&self, index: usize) -> Option<&T> { self.as_entries().get(index).map(Bucket::key_ref) } /// Returns a slice of values in the given range of indices. /// /// Valid indices are `0 <= index < self.len()`. /// /// Computes in **O(1)** time. pub fn get_range>(&self, range: R) -> Option<&Slice> { let entries = self.as_entries(); let range = try_simplify_range(range, entries.len())?; entries.get(range).map(Slice::from_slice) } /// Get the first value /// /// Computes in **O(1)** time. pub fn first(&self) -> Option<&T> { self.as_entries().first().map(Bucket::key_ref) } /// Get the last value /// /// Computes in **O(1)** time. pub fn last(&self) -> Option<&T> { self.as_entries().last().map(Bucket::key_ref) } /// Remove the value by index /// /// Valid indices are `0 <= index < self.len()`. /// /// Like [`Vec::swap_remove`], the value is removed by swapping it with the /// last element of the set and popping it off. **This perturbs /// the position of what used to be the last element!** /// /// Computes in **O(1)** time (average). pub fn swap_remove_index(&mut self, index: usize) -> Option { self.map.swap_remove_index(index).map(|(x, ())| x) } /// Remove the value by index /// /// Valid indices are `0 <= index < self.len()`. /// /// Like [`Vec::remove`], the value is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Computes in **O(n)** time (average). pub fn shift_remove_index(&mut self, index: usize) -> Option { self.map.shift_remove_index(index).map(|(x, ())| x) } /// Moves the position of a value from one index to another /// by shifting all other values in-between. /// /// * If `from < to`, the other values will shift down while the targeted value moves up. /// * If `from > to`, the other values will shift up while the targeted value moves down. /// /// ***Panics*** if `from` or `to` are out of bounds. /// /// Computes in **O(n)** time (average). #[track_caller] pub fn move_index(&mut self, from: usize, to: usize) { self.map.move_index(from, to) } /// Swaps the position of two values in the set. /// /// ***Panics*** if `a` or `b` are out of bounds. /// /// Computes in **O(1)** time (average). #[track_caller] pub fn swap_indices(&mut self, a: usize, b: usize) { self.map.swap_indices(a, b) } } /// Access [`IndexSet`] values at indexed positions. /// /// # Examples /// /// ``` /// use indexmap::IndexSet; /// /// let mut set = IndexSet::new(); /// for word in "Lorem ipsum dolor sit amet".split_whitespace() { /// set.insert(word.to_string()); /// } /// assert_eq!(set[0], "Lorem"); /// assert_eq!(set[1], "ipsum"); /// set.reverse(); /// assert_eq!(set[0], "amet"); /// assert_eq!(set[1], "sit"); /// set.sort(); /// assert_eq!(set[0], "Lorem"); /// assert_eq!(set[1], "amet"); /// ``` /// /// ```should_panic /// use indexmap::IndexSet; /// /// let mut set = IndexSet::new(); /// set.insert("foo"); /// println!("{:?}", set[10]); // panics! /// ``` impl Index for IndexSet { type Output = T; /// Returns a reference to the value at the supplied `index`. /// /// ***Panics*** if `index` is out of bounds. fn index(&self, index: usize) -> &T { if let Some(value) = self.get_index(index) { value } else { panic!( "index out of bounds: the len is {len} but the index is {index}", len = self.len() ); } } } impl FromIterator for IndexSet where T: Hash + Eq, S: BuildHasher + Default, { fn from_iter>(iterable: I) -> Self { let iter = iterable.into_iter().map(|x| (x, ())); IndexSet { map: IndexMap::from_iter(iter), } } } #[cfg(feature = "std")] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] impl From<[T; N]> for IndexSet where T: Eq + Hash, { /// # Examples /// /// ``` /// use indexmap::IndexSet; /// /// let set1 = IndexSet::from([1, 2, 3, 4]); /// let set2: IndexSet<_> = [1, 2, 3, 4].into(); /// assert_eq!(set1, set2); /// ``` fn from(arr: [T; N]) -> Self { Self::from_iter(arr) } } impl Extend for IndexSet where T: Hash + Eq, S: BuildHasher, { fn extend>(&mut self, iterable: I) { let iter = iterable.into_iter().map(|x| (x, ())); self.map.extend(iter); } } impl<'a, T, S> Extend<&'a T> for IndexSet where T: Hash + Eq + Copy + 'a, S: BuildHasher, { fn extend>(&mut self, iterable: I) { let iter = iterable.into_iter().copied(); self.extend(iter); } } impl Default for IndexSet where S: Default, { /// Return an empty [`IndexSet`] fn default() -> Self { IndexSet { map: IndexMap::default(), } } } impl PartialEq> for IndexSet where T: Hash + Eq, S1: BuildHasher, S2: BuildHasher, { fn eq(&self, other: &IndexSet) -> bool { self.len() == other.len() && self.is_subset(other) } } impl Eq for IndexSet where T: Eq + Hash, S: BuildHasher, { } impl IndexSet where T: Eq + Hash, S: BuildHasher, { /// Returns `true` if `self` has no elements in common with `other`. pub fn is_disjoint(&self, other: &IndexSet) -> bool where S2: BuildHasher, { if self.len() <= other.len() { self.iter().all(move |value| !other.contains(value)) } else { other.iter().all(move |value| !self.contains(value)) } } /// Returns `true` if all elements of `self` are contained in `other`. pub fn is_subset(&self, other: &IndexSet) -> bool where S2: BuildHasher, { self.len() <= other.len() && self.iter().all(move |value| other.contains(value)) } /// Returns `true` if all elements of `other` are contained in `self`. pub fn is_superset(&self, other: &IndexSet) -> bool where S2: BuildHasher, { other.is_subset(self) } } impl BitAnd<&IndexSet> for &IndexSet where T: Eq + Hash + Clone, S1: BuildHasher + Default, S2: BuildHasher, { type Output = IndexSet; /// Returns the set intersection, cloned into a new set. /// /// Values are collected in the same order that they appear in `self`. fn bitand(self, other: &IndexSet) -> Self::Output { self.intersection(other).cloned().collect() } } impl BitOr<&IndexSet> for &IndexSet where T: Eq + Hash + Clone, S1: BuildHasher + Default, S2: BuildHasher, { type Output = IndexSet; /// Returns the set union, cloned into a new set. /// /// Values from `self` are collected in their original order, followed by /// values that are unique to `other` in their original order. fn bitor(self, other: &IndexSet) -> Self::Output { self.union(other).cloned().collect() } } impl BitXor<&IndexSet> for &IndexSet where T: Eq + Hash + Clone, S1: BuildHasher + Default, S2: BuildHasher, { type Output = IndexSet; /// Returns the set symmetric-difference, cloned into a new set. /// /// Values from `self` are collected in their original order, followed by /// values from `other` in their original order. fn bitxor(self, other: &IndexSet) -> Self::Output { self.symmetric_difference(other).cloned().collect() } } impl Sub<&IndexSet> for &IndexSet where T: Eq + Hash + Clone, S1: BuildHasher + Default, S2: BuildHasher, { type Output = IndexSet; /// Returns the set difference, cloned into a new set. /// /// Values are collected in the same order that they appear in `self`. fn sub(self, other: &IndexSet) -> Self::Output { self.difference(other).cloned().collect() } } indexmap-2.12.1/src/sval.rs000064400000000000000000000017051046102023000136210ustar 00000000000000#![cfg_attr(docsrs, doc(cfg(feature = "sval")))] use crate::{IndexMap, IndexSet}; use sval::{Stream, Value}; impl Value for IndexMap { fn stream<'sval, ST: Stream<'sval> + ?Sized>(&'sval self, stream: &mut ST) -> sval::Result { stream.map_begin(Some(self.len()))?; for (k, v) in self { stream.map_key_begin()?; stream.value(k)?; stream.map_key_end()?; stream.map_value_begin()?; stream.value(v)?; stream.map_value_end()?; } stream.map_end() } } impl Value for IndexSet { fn stream<'sval, ST: Stream<'sval> + ?Sized>(&'sval self, stream: &mut ST) -> sval::Result { stream.seq_begin(Some(self.len()))?; for value in self { stream.seq_value_begin()?; stream.value(value)?; stream.seq_value_end()?; } stream.seq_end() } } indexmap-2.12.1/src/util.rs000064400000000000000000000044031046102023000136270ustar 00000000000000use core::ops::{Bound, Range, RangeBounds}; pub(crate) fn third(t: (A, B, C)) -> C { t.2 } #[track_caller] pub(crate) fn simplify_range(range: R, len: usize) -> Range where R: RangeBounds, { let start = match range.start_bound() { Bound::Unbounded => 0, Bound::Included(&i) if i <= len => i, Bound::Excluded(&i) if i < len => i + 1, Bound::Included(i) | Bound::Excluded(i) => { panic!("range start index {i} out of range for slice of length {len}") } }; let end = match range.end_bound() { Bound::Unbounded => len, Bound::Excluded(&i) if i <= len => i, Bound::Included(&i) if i < len => i + 1, Bound::Included(i) | Bound::Excluded(i) => { panic!("range end index {i} out of range for slice of length {len}") } }; if start > end { panic!( "range start index {:?} should be <= range end index {:?}", range.start_bound(), range.end_bound() ); } start..end } pub(crate) fn try_simplify_range(range: R, len: usize) -> Option> where R: RangeBounds, { let start = match range.start_bound() { Bound::Unbounded => 0, Bound::Included(&i) if i <= len => i, Bound::Excluded(&i) if i < len => i + 1, _ => return None, }; let end = match range.end_bound() { Bound::Unbounded => len, Bound::Excluded(&i) if i <= len => i, Bound::Included(&i) if i < len => i + 1, _ => return None, }; if start > end { return None; } Some(start..end) } // Generic slice equality -- copied from the standard library but adding a custom comparator, // allowing for our `Bucket` wrapper on either or both sides. pub(crate) fn slice_eq(left: &[T], right: &[U], eq: impl Fn(&T, &U) -> bool) -> bool { if left.len() != right.len() { return false; } // Implemented as explicit indexing rather // than zipped iterators for performance reasons. // See PR https://github.com/rust-lang/rust/pull/116846 for i in 0..left.len() { // bound checks are optimized away if !eq(&left[i], &right[i]) { return false; } } true } indexmap-2.12.1/tests/equivalent_trait.rs000064400000000000000000000020411046102023000166010ustar 00000000000000use indexmap::indexmap; use indexmap::Equivalent; use std::hash::Hash; #[derive(Debug, Hash)] pub struct Pair(pub A, pub B); impl PartialEq<(A, B)> for Pair where C: PartialEq, D: PartialEq, { fn eq(&self, rhs: &(A, B)) -> bool { self.0 == rhs.0 && self.1 == rhs.1 } } impl Equivalent for Pair where Pair: PartialEq, A: Hash + Eq, B: Hash + Eq, { fn equivalent(&self, other: &X) -> bool { *self == *other } } #[test] fn test_lookup() { let s = String::from; let map = indexmap! { (s("a"), s("b")) => 1, (s("a"), s("x")) => 2, }; assert!(map.contains_key(&Pair("a", "b"))); assert!(!map.contains_key(&Pair("b", "a"))); } #[test] fn test_string_str() { let s = String::from; let mut map = indexmap! { s("a") => 1, s("b") => 2, s("x") => 3, s("y") => 4, }; assert!(map.contains_key("a")); assert!(!map.contains_key("z")); assert_eq!(map.swap_remove("b"), Some(2)); } indexmap-2.12.1/tests/macros_full_path.rs000064400000000000000000000004031046102023000165430ustar 00000000000000#[test] fn test_create_map() { let _m = indexmap::indexmap! { 1 => 2, 7 => 1, 2 => 2, 3 => 3, }; } #[test] fn test_create_set() { let _s = indexmap::indexset! { 1, 7, 2, 3, }; } indexmap-2.12.1/tests/quick.rs000064400000000000000000000674231046102023000143540ustar 00000000000000use indexmap::{IndexMap, IndexSet}; use itertools::Itertools; use quickcheck::Arbitrary; use quickcheck::Gen; use quickcheck::QuickCheck; use quickcheck::TestResult; use fnv::FnvHasher; use std::hash::{BuildHasher, BuildHasherDefault}; type FnvBuilder = BuildHasherDefault; type IndexMapFnv = IndexMap; use std::cmp::min; use std::collections::HashMap; use std::collections::HashSet; use std::fmt::Debug; use std::hash::Hash; use std::ops::Bound; use std::ops::Deref; use indexmap::map::Entry; use std::collections::hash_map::Entry as StdEntry; fn set<'a, T: 'a, I>(iter: I) -> HashSet where I: IntoIterator, T: Copy + Hash + Eq, { iter.into_iter().copied().collect() } fn indexmap<'a, T: 'a, I>(iter: I) -> IndexMap where I: IntoIterator, T: Copy + Hash + Eq, { IndexMap::from_iter(iter.into_iter().copied().map(|k| (k, ()))) } // Helper macro to allow us to use smaller quickcheck limits under miri. macro_rules! quickcheck_limit { (@as_items $($i:item)*) => ($($i)*); { $( $(#[$m:meta])* fn $fn_name:ident($($arg_name:ident : $arg_ty:ty),*) -> $ret:ty { $($code:tt)* } )* } => ( quickcheck::quickcheck! { @as_items $( #[test] $(#[$m])* fn $fn_name() { fn prop($($arg_name: $arg_ty),*) -> $ret { $($code)* } let mut quickcheck = QuickCheck::new(); if cfg!(miri) { quickcheck = quickcheck .gen(Gen::new(10)) .tests(10) .max_tests(100); } quickcheck.quickcheck(prop as fn($($arg_ty),*) -> $ret); } )* } ) } quickcheck_limit! { fn contains(insert: Vec) -> bool { let mut map = IndexMap::new(); for &key in &insert { map.insert(key, ()); } insert.iter().all(|&key| map.get(&key).is_some()) } fn contains_not(insert: Vec, not: Vec) -> bool { let mut map = IndexMap::new(); for &key in &insert { map.insert(key, ()); } let nots = &set(¬) - &set(&insert); nots.iter().all(|&key| map.get(&key).is_none()) } fn insert_remove(insert: Vec, remove: Vec) -> bool { let mut map = IndexMap::new(); for &key in &insert { map.insert(key, ()); } for &key in &remove { map.swap_remove(&key); } let elements = &set(&insert) - &set(&remove); map.len() == elements.len() && map.iter().count() == elements.len() && elements.iter().all(|k| map.get(k).is_some()) } fn insertion_order(insert: Vec) -> bool { let mut map = IndexMap::new(); for &key in &insert { map.insert(key, ()); } itertools::assert_equal(insert.iter().unique(), map.keys()); true } fn insert_sorted(insert: Vec<(u32, u32)>) -> bool { let mut hmap = HashMap::new(); let mut map = IndexMap::new(); let mut map2 = IndexMap::new(); for &(key, value) in &insert { hmap.insert(key, value); map.insert_sorted(key, value); match map2.entry(key) { Entry::Occupied(e) => *e.into_mut() = value, Entry::Vacant(e) => { e.insert_sorted(value); } } } itertools::assert_equal(hmap.iter().sorted(), &map); itertools::assert_equal(&map, &map2); true } fn insert_sorted_by(insert: Vec<(u32, u32)>) -> bool { let mut hmap = HashMap::new(); let mut map = IndexMap::new(); let mut map2 = IndexMap::new(); for &(key, value) in &insert { hmap.insert(key, value); map.insert_sorted_by(key, value, |key1, _, key2, _| key2.cmp(key1)); match map2.entry(key) { Entry::Occupied(e) => *e.into_mut() = value, Entry::Vacant(e) => { e.insert_sorted_by(value, |key1, _, key2, _| key2.cmp(key1)); } } } let hsorted = hmap.iter().sorted_by(|(key1, _), (key2, _)| key2.cmp(key1)); itertools::assert_equal(hsorted, &map); itertools::assert_equal(&map, &map2); true } fn insert_sorted_by_key(insert: Vec<(i32, u32)>) -> bool { let mut hmap = HashMap::new(); let mut map = IndexMap::new(); let mut map2 = IndexMap::new(); for &(key, value) in &insert { hmap.insert(key, value); map.insert_sorted_by_key(key, value, |&k, _| (k.unsigned_abs(), k)); match map2.entry(key) { Entry::Occupied(e) => *e.into_mut() = value, Entry::Vacant(e) => { e.insert_sorted_by_key(value, |&k, _| (k.unsigned_abs(), k)); } } } let hsorted = hmap.iter().sorted_by_key(|(&k, _)| (k.unsigned_abs(), k)); itertools::assert_equal(hsorted, &map); itertools::assert_equal(&map, &map2); true } fn replace_index(insert: Vec, index: u8, new_key: u8) -> TestResult { if insert.is_empty() { return TestResult::discard(); } let mut map = IndexMap::new(); for &key in &insert { map.insert(key, ()); } let mut index = usize::from(index); if index < map.len() { match map.replace_index(index, new_key) { Ok(old_key) => { assert!(old_key == new_key || !map.contains_key(&old_key)); } Err((i, key)) => { assert_eq!(key, new_key); index = i; } } assert_eq!(map.get_index_of(&new_key), Some(index)); assert_eq!(map.get_index(index), Some((&new_key, &()))); TestResult::passed() } else { TestResult::must_fail(move || map.replace_index(index, new_key)) } } fn vacant_replace_index(insert: Vec, index: u8, new_key: u8) -> TestResult { if insert.is_empty() { return TestResult::discard(); } let mut map = IndexMap::new(); for &key in &insert { map.insert(key, ()); } let index = usize::from(index); if let Some((&old_key, &())) = map.get_index(index) { match map.entry(new_key) { Entry::Occupied(_) => return TestResult::discard(), Entry::Vacant(entry) => { let (replaced_key, entry) = entry.replace_index(index); assert_eq!(old_key, replaced_key); assert_eq!(*entry.key(), new_key); } }; assert!(!map.contains_key(&old_key)); assert_eq!(map.get_index_of(&new_key), Some(index)); assert_eq!(map.get_index(index), Some((&new_key, &()))); TestResult::passed() } else { TestResult::must_fail(move || map.replace_index(index, new_key)) } } fn pop(insert: Vec) -> bool { let mut map = IndexMap::new(); for &key in &insert { map.insert(key, ()); } let mut pops = Vec::new(); while let Some((key, _v)) = map.pop() { pops.push(key); } pops.reverse(); itertools::assert_equal(insert.iter().unique(), &pops); true } fn with_cap(template: Vec<()>) -> bool { let cap = template.len(); let map: IndexMap = IndexMap::with_capacity(cap); println!("wish: {}, got: {} (diff: {})", cap, map.capacity(), map.capacity() as isize - cap as isize); map.capacity() >= cap } fn drain_full(insert: Vec) -> bool { let mut map = IndexMap::new(); for &key in &insert { map.insert(key, ()); } let mut clone = map.clone(); let drained = clone.drain(..); for (key, _) in drained { map.swap_remove(&key); } map.is_empty() } fn drain_bounds(insert: Vec, range: (Bound, Bound)) -> TestResult { let mut map = IndexMap::new(); for &key in &insert { map.insert(key, ()); } // First see if `Vec::drain` is happy with this range. let result = std::panic::catch_unwind(|| { let mut keys: Vec = map.keys().copied().collect(); keys.drain(range); keys }); if let Ok(keys) = result { map.drain(range); // Check that our `drain` matches the same key order. assert!(map.keys().eq(&keys)); // Check that hash lookups all work too. assert!(keys.iter().all(|key| map.contains_key(key))); TestResult::passed() } else { // If `Vec::drain` panicked, so should we. TestResult::must_fail(move || { map.drain(range); }) } } fn extract_if_odd(insert: Vec) -> bool { let mut map = IndexMap::new(); for &x in &insert { map.insert(x, x.to_string()); } let (odd, even): (Vec<_>, Vec<_>) = map.keys().copied().partition(|k| k % 2 == 1); let extracted: Vec<_> = map .extract_if(.., |k, _| k % 2 == 1) .map(|(k, _)| k) .collect(); even.iter().all(|k| map.contains_key(k)) && map.keys().eq(&even) && extracted == odd } fn extract_if_odd_limit(insert: Vec, limit: usize) -> bool { let mut map = IndexMap::new(); for &x in &insert { map.insert(x, x.to_string()); } let limit = limit % (map.len() + 1); let mut i = 0; let (odd, other): (Vec<_>, Vec<_>) = map.keys().copied().partition(|k| { k % 2 == 1 && i < limit && { i += 1; true } }); let extracted: Vec<_> = map .extract_if(.., |k, _| k % 2 == 1) .map(|(k, _)| k) .take(limit) .collect(); other.iter().all(|k| map.contains_key(k)) && map.keys().eq(&other) && extracted == odd } fn shift_remove(insert: Vec, remove: Vec) -> bool { let mut map = IndexMap::new(); for &key in &insert { map.insert(key, ()); } for &key in &remove { map.shift_remove(&key); } let elements = &set(&insert) - &set(&remove); // Check that order is preserved after removals let mut iter = map.keys(); for &key in insert.iter().unique() { if elements.contains(&key) { assert_eq!(Some(&key), iter.next()); } } map.len() == elements.len() && map.iter().count() == elements.len() && elements.iter().all(|k| map.get(k).is_some()) } fn indexing(insert: Vec) -> bool { let mut map: IndexMap<_, _> = insert.into_iter().map(|x| (x, x)).collect(); let set: IndexSet<_> = map.keys().copied().collect(); assert_eq!(map.len(), set.len()); for (i, &key) in set.iter().enumerate() { assert_eq!(map.get_index(i), Some((&key, &key))); assert_eq!(set.get_index(i), Some(&key)); assert_eq!(map[i], key); assert_eq!(set[i], key); *map.get_index_mut(i).unwrap().1 >>= 1; map[i] <<= 1; } set.iter().enumerate().all(|(i, &key)| { let value = key & !1; map[&key] == value && map[i] == value }) } // Use `u8` test indices so quickcheck is less likely to go out of bounds. fn set_swap_indices(vec: Vec, a: u8, b: u8) -> TestResult { let mut set = IndexSet::::from_iter(vec); let a = usize::from(a); let b = usize::from(b); if a >= set.len() || b >= set.len() { return TestResult::discard(); } let mut vec = Vec::from_iter(set.iter().cloned()); vec.swap(a, b); set.swap_indices(a, b); // Check both iteration order and hash lookups assert!(set.iter().eq(vec.iter())); assert!(vec.iter().enumerate().all(|(i, x)| { set.get_index_of(x) == Some(i) })); TestResult::passed() } fn map_swap_indices(vec: Vec, from: u8, to: u8) -> TestResult { test_map_swap_indices(vec, from, to, IndexMap::swap_indices) } fn occupied_entry_swap_indices(vec: Vec, from: u8, to: u8) -> TestResult { test_map_swap_indices(vec, from, to, |map, from, to| { let key = map.keys()[from]; match map.entry(key) { Entry::Occupied(entry) => entry.swap_indices(to), _ => unreachable!(), } }) } fn indexed_entry_swap_indices(vec: Vec, from: u8, to: u8) -> TestResult { test_map_swap_indices(vec, from, to, |map, from, to| { map.get_index_entry(from).unwrap().swap_indices(to); }) } fn raw_occupied_entry_swap_indices(vec: Vec, from: u8, to: u8) -> TestResult { use indexmap::map::raw_entry_v1::{RawEntryApiV1, RawEntryMut}; test_map_swap_indices(vec, from, to, |map, from, to| { let key = map.keys()[from]; match map.raw_entry_mut_v1().from_key(&key) { RawEntryMut::Occupied(entry) => entry.swap_indices(to), _ => unreachable!(), } }) } // Use `u8` test indices so quickcheck is less likely to go out of bounds. fn set_move_index(vec: Vec, from: u8, to: u8) -> TestResult { let mut set = IndexSet::::from_iter(vec); let from = usize::from(from); let to = usize::from(to); if from >= set.len() || to >= set.len() { return TestResult::discard(); } let mut vec = Vec::from_iter(set.iter().cloned()); let x = vec.remove(from); vec.insert(to, x); set.move_index(from, to); // Check both iteration order and hash lookups assert!(set.iter().eq(vec.iter())); assert!(vec.iter().enumerate().all(|(i, x)| { set.get_index_of(x) == Some(i) })); TestResult::passed() } fn map_move_index(vec: Vec, from: u8, to: u8) -> TestResult { test_map_move_index(vec, from, to, IndexMap::move_index) } fn occupied_entry_move_index(vec: Vec, from: u8, to: u8) -> TestResult { test_map_move_index(vec, from, to, |map, from, to| { let key = map.keys()[from]; match map.entry(key) { Entry::Occupied(entry) => entry.move_index(to), _ => unreachable!(), } }) } fn indexed_entry_move_index(vec: Vec, from: u8, to: u8) -> TestResult { test_map_move_index(vec, from, to, |map, from, to| { map.get_index_entry(from).unwrap().move_index(to); }) } fn raw_occupied_entry_move_index(vec: Vec, from: u8, to: u8) -> TestResult { use indexmap::map::raw_entry_v1::{RawEntryApiV1, RawEntryMut}; test_map_move_index(vec, from, to, |map, from, to| { let key = map.keys()[from]; match map.raw_entry_mut_v1().from_key(&key) { RawEntryMut::Occupied(entry) => entry.move_index(to), _ => unreachable!(), } }) } fn occupied_entry_shift_insert(vec: Vec, i: u8) -> TestResult { test_map_shift_insert(vec, i, |map, i, key| { match map.entry(key) { Entry::Vacant(entry) => entry.shift_insert(i, ()), _ => unreachable!(), }; }) } fn raw_occupied_entry_shift_insert(vec: Vec, i: u8) -> TestResult { use indexmap::map::raw_entry_v1::{RawEntryApiV1, RawEntryMut}; test_map_shift_insert(vec, i, |map, i, key| { match map.raw_entry_mut_v1().from_key(&key) { RawEntryMut::Vacant(entry) => entry.shift_insert(i, key, ()), _ => unreachable!(), }; }) } } fn test_map_swap_indices(vec: Vec, a: u8, b: u8, swap_indices: F) -> TestResult where F: FnOnce(&mut IndexMap, usize, usize), { let mut map = IndexMap::::from_iter(vec.into_iter().map(|k| (k, ()))); let a = usize::from(a); let b = usize::from(b); if a >= map.len() || b >= map.len() { return TestResult::discard(); } let mut vec = Vec::from_iter(map.keys().copied()); vec.swap(a, b); swap_indices(&mut map, a, b); // Check both iteration order and hash lookups assert!(map.keys().eq(vec.iter())); assert!(vec .iter() .enumerate() .all(|(i, x)| { map.get_index_of(x) == Some(i) })); TestResult::passed() } fn test_map_move_index(vec: Vec, from: u8, to: u8, move_index: F) -> TestResult where F: FnOnce(&mut IndexMap, usize, usize), { let mut map = IndexMap::::from_iter(vec.into_iter().map(|k| (k, ()))); let from = usize::from(from); let to = usize::from(to); if from >= map.len() || to >= map.len() { return TestResult::discard(); } let mut vec = Vec::from_iter(map.keys().copied()); let x = vec.remove(from); vec.insert(to, x); move_index(&mut map, from, to); // Check both iteration order and hash lookups assert!(map.keys().eq(vec.iter())); assert!(vec .iter() .enumerate() .all(|(i, x)| { map.get_index_of(x) == Some(i) })); TestResult::passed() } fn test_map_shift_insert(vec: Vec, i: u8, shift_insert: F) -> TestResult where F: FnOnce(&mut IndexMap, usize, u8), { let mut map = IndexMap::::from_iter(vec.into_iter().map(|k| (k, ()))); let i = usize::from(i); if i >= map.len() { return TestResult::discard(); } let mut vec = Vec::from_iter(map.keys().copied()); let x = vec.pop().unwrap(); vec.insert(i, x); let (last, ()) = map.pop().unwrap(); assert_eq!(x, last); map.shrink_to_fit(); // so we might have to grow and rehash the table shift_insert(&mut map, i, last); // Check both iteration order and hash lookups assert!(map.keys().eq(vec.iter())); assert!(vec .iter() .enumerate() .all(|(i, x)| { map.get_index_of(x) == Some(i) })); TestResult::passed() } use crate::Op::*; #[derive(Copy, Clone, Debug)] enum Op { Add(K, V), Remove(K), AddEntry(K, V), RemoveEntry(K), } impl Arbitrary for Op where K: Arbitrary, V: Arbitrary, { fn arbitrary(g: &mut Gen) -> Self { match u32::arbitrary(g) % 4 { 0 => Add(K::arbitrary(g), V::arbitrary(g)), 1 => AddEntry(K::arbitrary(g), V::arbitrary(g)), 2 => Remove(K::arbitrary(g)), _ => RemoveEntry(K::arbitrary(g)), } } } fn do_ops(ops: &[Op], a: &mut IndexMap, b: &mut HashMap) where K: Hash + Eq + Clone, V: Clone, S: BuildHasher, { for op in ops { match *op { Add(ref k, ref v) => { a.insert(k.clone(), v.clone()); b.insert(k.clone(), v.clone()); } AddEntry(ref k, ref v) => { a.entry(k.clone()).or_insert_with(|| v.clone()); b.entry(k.clone()).or_insert_with(|| v.clone()); } Remove(ref k) => { a.swap_remove(k); b.remove(k); } RemoveEntry(ref k) => { if let Entry::Occupied(ent) = a.entry(k.clone()) { ent.swap_remove_entry(); } if let StdEntry::Occupied(ent) = b.entry(k.clone()) { ent.remove_entry(); } } } //println!("{:?}", a); } } fn assert_maps_equivalent(a: &IndexMap, b: &HashMap) -> bool where K: Hash + Eq + Debug, V: Eq + Debug, { assert_eq!(a.len(), b.len()); assert_eq!(a.iter().next().is_some(), b.iter().next().is_some()); for key in a.keys() { assert!(b.contains_key(key), "b does not contain {:?}", key); } for key in b.keys() { assert!(a.get(key).is_some(), "a does not contain {:?}", key); } for key in a.keys() { assert_eq!(a[key], b[key]); } true } quickcheck_limit! { fn operations_i8(ops: Large>>) -> bool { let mut map = IndexMap::new(); let mut reference = HashMap::new(); do_ops(&ops, &mut map, &mut reference); assert_maps_equivalent(&map, &reference) } fn operations_string(ops: Vec>) -> bool { let mut map = IndexMap::new(); let mut reference = HashMap::new(); do_ops(&ops, &mut map, &mut reference); assert_maps_equivalent(&map, &reference) } fn keys_values(ops: Large>>) -> bool { let mut map = IndexMap::new(); let mut reference = HashMap::new(); do_ops(&ops, &mut map, &mut reference); let mut visit = IndexMap::new(); for (k, v) in map.keys().zip(map.values()) { assert_eq!(&map[k], v); assert!(!visit.contains_key(k)); visit.insert(*k, *v); } assert_eq!(visit.len(), reference.len()); true } fn keys_values_mut(ops: Large>>) -> bool { let mut map = IndexMap::new(); let mut reference = HashMap::new(); do_ops(&ops, &mut map, &mut reference); let mut visit = IndexMap::new(); let keys = Vec::from_iter(map.keys().copied()); for (k, v) in keys.iter().zip(map.values_mut()) { assert_eq!(&reference[k], v); assert!(!visit.contains_key(k)); visit.insert(*k, *v); } assert_eq!(visit.len(), reference.len()); true } fn equality(ops1: Vec>, removes: Vec) -> bool { let mut map = IndexMap::new(); let mut reference = HashMap::new(); do_ops(&ops1, &mut map, &mut reference); let mut ops2 = ops1.clone(); for &r in &removes { if !ops2.is_empty() { let i = r % ops2.len(); ops2.remove(i); } } let mut map2 = IndexMapFnv::default(); let mut reference2 = HashMap::new(); do_ops(&ops2, &mut map2, &mut reference2); assert_eq!(map == map2, reference == reference2); true } fn retain_ordered(keys: Large>, remove: Large>) -> () { let mut map = indexmap(keys.iter()); let initial_map = map.clone(); // deduplicated in-order input let remove_map = indexmap(remove.iter()); let keys_s = set(keys.iter()); let remove_s = set(remove.iter()); let answer = &keys_s - &remove_s; map.retain(|k, _| !remove_map.contains_key(k)); // check the values assert_eq!(map.len(), answer.len()); for key in &answer { assert!(map.contains_key(key)); } // check the order itertools::assert_equal(map.keys(), initial_map.keys().filter(|&k| !remove_map.contains_key(k))); } fn sort_1(keyvals: Large>) -> () { let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec()); let mut answer = keyvals.0; answer.sort_by_key(|t| t.0); // reverse dedup: Because IndexMap::from_iter keeps the last value for // identical keys answer.reverse(); answer.dedup_by_key(|t| t.0); answer.reverse(); map.sort_by(|k1, _, k2, _| Ord::cmp(k1, k2)); // check it contains all the values it should for &(key, val) in &answer { assert_eq!(map[&key], val); } // check the order let mapv = Vec::from_iter(map); assert_eq!(answer, mapv); } fn sort_2(keyvals: Large>) -> () { let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec()); map.sort_by(|_, v1, _, v2| Ord::cmp(v1, v2)); assert_sorted_by_key(map, |t| t.1); } fn sort_3(keyvals: Large>) -> () { let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec()); map.sort_by_cached_key(|&k, _| std::cmp::Reverse(k)); assert_sorted_by_key(map, |t| std::cmp::Reverse(t.0)); } fn reverse(keyvals: Large>) -> () { let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec()); fn generate_answer(input: &Vec<(i8, i8)>) -> Vec<(i8, i8)> { // to mimic what `IndexMap::from_iter` does: // need to get (A) the unique keys in forward order, and (B) the // last value of each of those keys. // create (A): an iterable that yields the unique keys in ltr order let mut seen_keys = HashSet::new(); let unique_keys_forward = input.iter().filter_map(move |(k, _)| { if seen_keys.contains(k) { None } else { seen_keys.insert(*k); Some(*k) } }); // create (B): a mapping of keys to the last value seen for that key // this is the same as reversing the input and taking the first // value seen for that key! let mut last_val_per_key = HashMap::new(); for &(k, v) in input.iter().rev() { if !last_val_per_key.contains_key(&k) { last_val_per_key.insert(k, v); } } // iterate over the keys in (A) in order, and match each one with // the corresponding last value from (B) let mut ans: Vec<_> = unique_keys_forward .map(|k| (k, *last_val_per_key.get(&k).unwrap())) .collect(); // finally, since this test is testing `.reverse()`, reverse the // answer in-place ans.reverse(); ans } let answer = generate_answer(&keyvals.0); // perform the work map.reverse(); // check it contains all the values it should for &(key, val) in &answer { assert_eq!(map[&key], val); } // check the order let mapv = Vec::from_iter(map); assert_eq!(answer, mapv); } } fn assert_sorted_by_key(iterable: I, key: Key) where I: IntoIterator, Key: Fn(&I::Item) -> X, X: Ord, { let input = Vec::from_iter(iterable); let mut sorted = input.clone(); sorted.sort_by_key(key); assert_eq!(input, sorted); } #[derive(Clone, Debug, Hash, PartialEq, Eq)] struct Alpha(String); impl Deref for Alpha { type Target = String; fn deref(&self) -> &String { &self.0 } } const ALPHABET: &[u8] = b"abcdefghijklmnopqrstuvwxyz"; impl Arbitrary for Alpha { fn arbitrary(g: &mut Gen) -> Self { let len = usize::arbitrary(g) % g.size(); let len = min(len, 16); Alpha( (0..len) .map(|_| ALPHABET[usize::arbitrary(g) % ALPHABET.len()] as char) .collect(), ) } fn shrink(&self) -> Box> { Box::new((**self).shrink().map(Alpha)) } } /// quickcheck Arbitrary adaptor -- make a larger vec #[derive(Clone, Debug)] struct Large(T); impl Deref for Large { type Target = T; fn deref(&self) -> &T { &self.0 } } impl Arbitrary for Large> where T: Arbitrary, { fn arbitrary(g: &mut Gen) -> Self { let len = usize::arbitrary(g) % (g.size() * 10); Large((0..len).map(|_| T::arbitrary(g)).collect()) } fn shrink(&self) -> Box> { Box::new((**self).shrink().map(Large)) } } indexmap-2.12.1/tests/tests.rs000064400000000000000000000007431046102023000143720ustar 00000000000000use indexmap::{indexmap, indexset}; #[test] fn test_sort() { let m = indexmap! { 1 => 2, 7 => 1, 2 => 2, 3 => 3, }; itertools::assert_equal( m.sorted_by(|_k1, v1, _k2, v2| v1.cmp(v2)), vec![(7, 1), (1, 2), (2, 2), (3, 3)], ); } #[test] fn test_sort_set() { let s = indexset! { 1, 7, 2, 3, }; itertools::assert_equal(s.sorted_by(|v1, v2| v1.cmp(v2)), vec![1, 2, 3, 7]); }