seize-0.5.0/.cargo_vcs_info.json0000644000000001360000000000100121550ustar { "git": { "sha1": "35b59d972995665322f0cb8e52da81db331f6f8c" }, "path_in_vcs": "" }seize-0.5.0/.github/DOCS.md000064400000000000000000000000771046102023000133630ustar 00000000000000Workflows adapted from https://github.com/jonhoo/rust-ci-conf. seize-0.5.0/.github/workflows/check.yml000064400000000000000000000105071046102023000161450ustar 00000000000000# This workflow runs whenever a PR is opened or updated, or a commit is pushed to main. It runs # several checks: # - fmt: checks that the code is formatted according to rustfmt # - clippy: checks that the code does not contain any clippy warnings # - doc: checks that the code can be documented without errors # - hack: check combinations of feature flags # - msrv: check that the msrv specified in the crate is correct permissions: contents: read # This configuration allows maintainers of this repo to create a branch and pull request based on # the new branch. Restricting the push trigger to the main branch ensures that the PR only gets # built once. on: push: branches: [master] pull_request: # If new code is pushed to a PR branch, then cancel in progress workflows for that PR. Ensures that # we don't waste CI time, and returns results quicker https://github.com/jonhoo/rust-ci-conf/pull/5 concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true name: check jobs: fmt: runs-on: ubuntu-latest name: stable / fmt steps: - uses: actions/checkout@v4 with: submodules: true - name: Install stable uses: dtolnay/rust-toolchain@stable with: components: rustfmt - name: cargo fmt --check run: cargo fmt --check clippy: runs-on: ubuntu-latest name: ${{ matrix.toolchain }} / clippy permissions: contents: read checks: write strategy: fail-fast: false matrix: # Get early warning of new lints which are regularly introduced in beta channels. toolchain: [stable, beta] steps: - uses: actions/checkout@v4 with: submodules: true - name: Install ${{ matrix.toolchain }} uses: dtolnay/rust-toolchain@master with: toolchain: ${{ matrix.toolchain }} components: clippy - name: cargo clippy uses: giraffate/clippy-action@v1 with: reporter: 'github-pr-check' github_token: ${{ secrets.GITHUB_TOKEN }} semver: runs-on: ubuntu-latest name: semver steps: - uses: actions/checkout@v4 with: submodules: true - name: Install stable uses: dtolnay/rust-toolchain@stable with: components: rustfmt - name: cargo-semver-checks uses: obi1kenobi/cargo-semver-checks-action@v2 doc: # run docs generation on nightly rather than stable. This enables features like # https://doc.rust-lang.org/beta/unstable-book/language-features/doc-cfg.html which allows an # API be documented as only available in some specific platforms. runs-on: ubuntu-latest name: nightly / doc steps: - uses: actions/checkout@v4 with: submodules: true - name: Install nightly uses: dtolnay/rust-toolchain@nightly - name: cargo doc run: cargo doc --no-deps --all-features env: RUSTDOCFLAGS: --cfg docsrs hack: # cargo-hack checks combinations of feature flags to ensure that features are all additive # which is required for feature unification runs-on: ubuntu-latest name: ubuntu / stable / features steps: - uses: actions/checkout@v4 with: submodules: true - name: Install stable uses: dtolnay/rust-toolchain@stable - name: cargo install cargo-hack uses: taiki-e/install-action@cargo-hack # intentionally no target specifier; see https://github.com/jonhoo/rust-ci-conf/pull/4 # --feature-powerset runs for every combination of features - name: cargo hack run: cargo hack --feature-powerset check msrv: # check that we can build using the minimal rust version that is specified by this crate runs-on: ubuntu-latest # we use a matrix here just because env can't be used in job names # https://docs.github.com/en/actions/learn-github-actions/contexts#context-availability strategy: matrix: msrv: ["1.72.0"] name: ubuntu / ${{ matrix.msrv }} steps: - uses: actions/checkout@v4 with: submodules: true - name: Install ${{ matrix.msrv }} uses: dtolnay/rust-toolchain@master with: toolchain: ${{ matrix.msrv }} - name: cargo +${{ matrix.msrv }} check run: cargo check seize-0.5.0/.github/workflows/safety.yml000064400000000000000000000052021046102023000163570ustar 00000000000000# This workflow runs checks for unsafe code. In crates that don't have any unsafe code, this can be # removed. Runs: # - miri - detects undefined behavior and memory leaks # - address sanitizer - detects memory errors # - leak sanitizer - detects memory leaks # See check.yml for information about how the concurrency cancellation and workflow triggering works permissions: contents: read on: push: branches: [master] pull_request: concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true name: safety jobs: sanitizers: runs-on: ubuntu-latest timeout-minutes: 15 steps: - uses: actions/checkout@v4 with: submodules: true - name: Install nightly uses: dtolnay/rust-toolchain@nightly - run: | # to get the symbolizer for debug symbol resolution sudo apt install llvm # to fix buggy leak analyzer: # https://github.com/japaric/rust-san#unrealiable-leaksanitizer # ensure there's a profile.dev section if ! grep -qE '^[ \t]*[profile.dev]' Cargo.toml; then echo >> Cargo.toml echo '[profile.dev]' >> Cargo.toml fi # remove pre-existing opt-levels in profile.dev sed -i '/^\s*\[profile.dev\]/,/^\s*\[/ {/^\s*opt-level/d}' Cargo.toml # now set opt-level to 1 sed -i '/^\s*\[profile.dev\]/a opt-level = 1' Cargo.toml cat Cargo.toml name: Enable debug symbols - name: cargo test -Zsanitizer=address # only --lib --tests b/c of https://github.com/rust-lang/rust/issues/53945 run: cargo test --lib --tests --all-features --target x86_64-unknown-linux-gnu env: ASAN_OPTIONS: "detect_odr_violation=0:detect_leaks=0" RUSTFLAGS: "-Z sanitizer=address --cfg seize_asan" - name: cargo test -Zsanitizer=leak if: always() run: cargo test --all-features --target x86_64-unknown-linux-gnu env: LSAN_OPTIONS: "suppressions=lsan-suppressions.txt" RUSTFLAGS: "-Z sanitizer=leak" miri: runs-on: ubuntu-latest timeout-minutes: 15 steps: - uses: actions/checkout@v4 with: submodules: true - run: | echo "NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri)" >> $GITHUB_ENV - name: Install ${{ env.NIGHTLY }} uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env.NIGHTLY }} components: miri - name: cargo miri test run: cargo miri test seize-0.5.0/.github/workflows/test.yml000064400000000000000000000043251046102023000160500ustar 00000000000000# This is the main CI workflow that runs the test suite on all pushes to main and all pull requests. # It runs the following jobs: # - required: runs the test suite on ubuntu with stable and beta rust toolchains # requirements of this crate, and its dependencies # - os-check: runs the test suite on mac and windows # See check.yml for information about how the concurrency cancellation and workflow triggering works permissions: contents: read on: push: branches: [master] pull_request: concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true name: test jobs: required: runs-on: ubuntu-latest timeout-minutes: 15 name: ubuntu / ${{ matrix.toolchain }} strategy: matrix: # run on stable and beta to ensure that tests won't break on the next version of the rust # toolchain toolchain: [stable, beta] steps: - uses: actions/checkout@v4 with: submodules: true - name: Install ${{ matrix.toolchain }} uses: dtolnay/rust-toolchain@master with: toolchain: ${{ matrix.toolchain }} - name: cargo generate-lockfile # enable this ci template to run regardless of whether the lockfile is checked in or not if: hashFiles('Cargo.lock') == '' run: cargo generate-lockfile # https://twitter.com/jonhoo/status/1571290371124260865 - name: cargo test --locked run: cargo test --locked --all-features --all-targets # https://github.com/rust-lang/cargo/issues/6669 - name: cargo test --doc run: cargo test --locked --all-features --doc os-check: # run cargo test on mac and windows runs-on: ${{ matrix.os }} timeout-minutes: 15 name: ${{ matrix.os }} / stable strategy: fail-fast: false matrix: os: [macos-latest, windows-latest] steps: - uses: actions/checkout@v4 with: submodules: true - name: Install stable uses: dtolnay/rust-toolchain@stable - name: cargo generate-lockfile if: hashFiles('Cargo.lock') == '' run: cargo generate-lockfile - name: cargo test run: cargo test --locked --all-features --all-targets seize-0.5.0/.gitignore000064400000000000000000000000231046102023000127300ustar 00000000000000/target Cargo.lock seize-0.5.0/Cargo.lock0000644000000402710000000000100101340ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "aho-corasick" version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "atty" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", "winapi", ] [[package]] name = "autocfg" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bumpalo" version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "cast" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clap" version = "2.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ "bitflags", "textwrap", "unicode-width", ] [[package]] name = "criterion" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" dependencies = [ "atty", "cast", "clap", "criterion-plot", "csv", "itertools", "lazy_static", "num-traits", "oorandom", "plotters", "rayon", "regex", "serde", "serde_cbor", "serde_derive", "serde_json", "tinytemplate", "walkdir", ] [[package]] name = "criterion-plot" version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876" dependencies = [ "cast", "itertools", ] [[package]] name = "crossbeam-deque" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-utils" version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "csv" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" dependencies = [ "csv-core", "itoa", "ryu", "serde", ] [[package]] name = "csv-core" version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" dependencies = [ "memchr", ] [[package]] name = "either" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "half" version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403" [[package]] name = "hermit-abi" version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" dependencies = [ "libc", ] [[package]] name = "itertools" version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] [[package]] name = "itoa" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "js-sys" version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" dependencies = [ "wasm-bindgen", ] [[package]] name = "lazy_static" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" version = "0.2.161" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" [[package]] name = "log" version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" [[package]] name = "memchr" version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "num-traits" version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] [[package]] name = "once_cell" version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "oorandom" version = "11.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "plotters" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" dependencies = [ "num-traits", "plotters-backend", "plotters-svg", "wasm-bindgen", "web-sys", ] [[package]] name = "plotters-backend" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" [[package]] name = "plotters-svg" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" dependencies = [ "plotters-backend", ] [[package]] name = "proc-macro2" version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] [[package]] name = "rayon" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", ] [[package]] name = "rayon-core" version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ "crossbeam-deque", "crossbeam-utils", ] [[package]] name = "regex" version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", "regex-automata", "regex-syntax", ] [[package]] name = "regex-automata" version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] [[package]] name = "regex-syntax" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "ryu" version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "same-file" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ "winapi-util", ] [[package]] name = "seize" version = "0.5.0" dependencies = [ "criterion", "crossbeam-epoch", "libc", "windows-sys 0.52.0", ] [[package]] name = "serde" version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" dependencies = [ "serde_derive", ] [[package]] name = "serde_cbor" version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" dependencies = [ "half", "serde", ] [[package]] name = "serde_derive" version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "serde_json" version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ "itoa", "memchr", "ryu", "serde", ] [[package]] name = "syn" version = "2.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "textwrap" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" dependencies = [ "unicode-width", ] [[package]] name = "tinytemplate" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" dependencies = [ "serde", "serde_json", ] [[package]] name = "unicode-ident" version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-width" version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "walkdir" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", ] [[package]] name = "wasm-bindgen" version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" dependencies = [ "cfg-if", "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" [[package]] name = "web-sys" version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ "windows-sys 0.59.0", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ "windows-targets", ] [[package]] name = "windows-sys" version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ "windows-targets", ] [[package]] name = "windows-targets" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", "windows_i686_gnu", "windows_i686_gnullvm", "windows_i686_msvc", "windows_x86_64_gnu", "windows_x86_64_gnullvm", "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" seize-0.5.0/Cargo.toml0000644000000033200000000000100101510ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.72.0" name = "seize" version = "0.5.0" authors = ["Ibraheem Ahmed "] build = false autolib = false autobins = false autoexamples = false autotests = false autobenches = false description = "Fast, efficient, and predictable memory reclamation for concurrent data structures." readme = "README.md" keywords = [ "lock-free", "rcu", "atomic", "garbage", "concurrency", ] categories = [ "concurrency", "memory-management", ] license = "MIT" repository = "https://github.com/ibraheemdev/seize" [lib] name = "seize" path = "src/lib.rs" [[test]] name = "lib" path = "tests/lib.rs" [[bench]] name = "single_thread" path = "benches/single_thread.rs" harness = false [[bench]] name = "stack" path = "benches/stack.rs" harness = false [dependencies.libc] version = "0.2" optional = true [dev-dependencies.criterion] version = "0.3.5" [dev-dependencies.crossbeam-epoch] version = "0.9.8" [features] default = ["fast-barrier"] fast-barrier = [ "windows-sys", "libc", ] [target."cfg(windows)".dependencies.windows-sys] version = "0.52" features = ["Win32_System_Threading"] optional = true [lints.rust.unexpected_cfgs] level = "warn" priority = 0 check-cfg = ["cfg(seize_asan)"] seize-0.5.0/Cargo.toml.orig000064400000000000000000000021341046102023000136340ustar 00000000000000[package] name = "seize" version = "0.5.0" edition = "2021" license = "MIT" authors = ["Ibraheem Ahmed "] description = "Fast, efficient, and predictable memory reclamation for concurrent data structures." repository = "https://github.com/ibraheemdev/seize" keywords = ["lock-free", "rcu", "atomic", "garbage", "concurrency"] categories = ["concurrency", "memory-management"] rust-version = "1.72.0" [dependencies] libc = { version = "0.2", optional = true } [target.'cfg(windows)'.dependencies] windows-sys = { version = "0.52", features = ["Win32_System_Threading"], optional = true } [features] default = ["fast-barrier"] # Enables runtime detection of fast memory barriers on Linux and Windows. fast-barrier = ["windows-sys", "libc"] [dev-dependencies] criterion = "0.3.5" crossbeam-epoch = "0.9.8" haphazard = { git = "https://github.com/jonhoo/haphazard", rev = "e0e18f60f78652a63aba235be854f87d106c1a1b" } [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ['cfg(seize_asan)'] } [[bench]] name = "stack" harness = false [[bench]] name = "single_thread" harness = false seize-0.5.0/LICENSE000064400000000000000000000020571046102023000117560ustar 00000000000000MIT License Copyright (c) 2022 Ibraheem Ahmed Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. seize-0.5.0/README.md000064400000000000000000000060221046102023000122240ustar 00000000000000# `seize` [crates.io](https://crates.io/crates/seize) [github](https://github.com/ibraheemdev/seize) [docs.rs](https://docs.rs/seize) Fast, efficient, and predictable memory reclamation for concurrent data structures. Refer to the [quick-start guide] to get started. ## Background Concurrent data structures are faced with the problem of deciding when it is safe to free memory. Despite an object being logically removed, it may still be accessible by other threads that are holding references to it, and thus it is not safe to free immediately. Over the years, many algorithms have been devised to solve this problem. However, most traditional memory reclamation schemes make a tradeoff between performance and efficiency. For example, [hazard pointers] track individual pointers, making them very memory efficient but also relatively slow. On the other hand, [epoch based reclamation] is fast and lightweight, but lacks predictability, requiring periodic checks to determine when it is safe to free memory. This can cause reclamation to trigger unpredictably, leading to poor latency distributions. Alternative epoch-based schemes forgo workload balancing, relying on the thread that retires an object always being the one that frees it. While this can avoid synchronization costs, it also leads to unbalanced reclamation in read-dominated workloads; parallelism is reduced when only a fraction of threads are writing, degrading memory efficiency as well as performance. ## Implementation `seize` is based on the [hyaline reclamation scheme], which uses reference counting to determine when it is safe to free memory. However, unlike traditional reference counting schemes where every memory access requires modifying shared memory, reference counters are only used for retired objects. When a batch of objects is retired, a reference counter is initialized and propagated to all active threads. Threads cooperate to decrement the reference counter as they exit, eventually freeing the batch. Reclamation is naturally balanced as the thread with the last reference to an object is the one that frees it. This also removes the need to check whether other threads have made progress, leading to predictable latency without sacrificing performance. `seize` provides performance competitive with that of epoch based schemes, while memory efficiency is similar to that of hazard pointers. `seize` is compatible with all modern hardware that supports single-word atomic operations such as FAA and CAS. [quick-start guide]: https://docs.rs/seize/latest/seize/guide/index.html [hazard pointers]: https://www.cs.otago.ac.nz/cosc440/readings/hazard-pointers.pdf [hyaline reclamation scheme]: https://arxiv.org/pdf/1905.07903.pdf [epoch based reclamation]: https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-579.pdf seize-0.5.0/benches/single_thread.rs000064400000000000000000000010471046102023000155340ustar 00000000000000use std::hint::black_box; use criterion::{criterion_group, criterion_main, Criterion}; fn enter_leave(c: &mut Criterion) { let mut group = c.benchmark_group("enter_leave"); group.bench_function("seize", |b| { let collector = seize::Collector::new(); b.iter(|| { black_box(collector.enter()); }); }); group.bench_function("crossbeam", |b| { b.iter(|| { black_box(crossbeam_epoch::pin()); }); }); } criterion_group!(benches, enter_leave); criterion_main!(benches); seize-0.5.0/benches/stack.rs000064400000000000000000000213201046102023000140250ustar 00000000000000use std::sync::{Arc, Barrier}; use std::thread; use criterion::{criterion_group, criterion_main, Criterion}; const THREADS: usize = 16; const ITEMS: usize = 1000; fn treiber_stack(c: &mut Criterion) { c.bench_function("trieber_stack-haphazard", |b| { b.iter(run::>) }); c.bench_function("trieber_stack-crossbeam", |b| { b.iter(run::>) }); c.bench_function("trieber_stack-seize", |b| { b.iter(run::>) }); } trait Stack { fn new() -> Self; fn push(&self, value: T); fn pop(&self) -> Option; fn is_empty(&self) -> bool; } fn run() where T: Stack + Send + Sync + 'static, { let stack = Arc::new(T::new()); let barrier = Arc::new(Barrier::new(THREADS)); let handles = (0..THREADS - 1) .map(|_| { let stack = stack.clone(); let barrier = barrier.clone(); thread::spawn(move || { barrier.wait(); for i in 0..ITEMS { stack.push(i); assert!(stack.pop().is_some()); } }) }) .collect::>(); barrier.wait(); for i in 0..ITEMS { stack.push(i); assert!(stack.pop().is_some()); } for handle in handles { handle.join().unwrap(); } assert!(stack.pop().is_none()); assert!(stack.is_empty()); } criterion_group!(benches, treiber_stack); criterion_main!(benches); mod seize_stack { use super::Stack; use seize::{reclaim, Collector, Guard}; use std::mem::ManuallyDrop; use std::ptr::{self, NonNull}; use std::sync::atomic::{AtomicPtr, Ordering}; #[derive(Debug)] pub struct TreiberStack { head: AtomicPtr>, collector: Collector, } #[derive(Debug)] struct Node { data: ManuallyDrop, next: *mut Node, } impl Stack for TreiberStack { fn new() -> TreiberStack { TreiberStack { head: AtomicPtr::new(ptr::null_mut()), collector: Collector::new().batch_size(32), } } fn push(&self, value: T) { let node = Box::into_raw(Box::new(Node { data: ManuallyDrop::new(value), next: ptr::null_mut(), })); let guard = self.collector.enter(); loop { let head = guard.protect(&self.head, Ordering::Relaxed); unsafe { (*node).next = head } if self .head .compare_exchange(head, node, Ordering::Release, Ordering::Relaxed) .is_ok() { break; } } } fn pop(&self) -> Option { let guard = self.collector.enter(); loop { let head = NonNull::new(guard.protect(&self.head, Ordering::Acquire))?.as_ptr(); let next = unsafe { (*head).next }; if self .head .compare_exchange(head, next, Ordering::Relaxed, Ordering::Relaxed) .is_ok() { unsafe { let data = ptr::read(&(*head).data); guard.defer_retire(head, reclaim::boxed); return Some(ManuallyDrop::into_inner(data)); } } } } fn is_empty(&self) -> bool { self.head.load(Ordering::Relaxed).is_null() } } impl Drop for TreiberStack { fn drop(&mut self) { while self.pop().is_some() {} } } } mod haphazard_stack { use super::Stack; use haphazard::{Domain, HazardPointer}; use std::mem::ManuallyDrop; use std::ptr; use std::sync::atomic::{AtomicPtr, Ordering}; #[derive(Debug)] pub struct TreiberStack { head: AtomicPtr>, } #[derive(Debug)] struct Node { data: ManuallyDrop, next: *mut Node, } unsafe impl Send for Node {} unsafe impl Sync for Node {} impl Stack for TreiberStack { fn new() -> TreiberStack { TreiberStack { head: AtomicPtr::default(), } } fn push(&self, value: T) { let node = Box::into_raw(Box::new(Node { data: ManuallyDrop::new(value), next: ptr::null_mut(), })); let mut h = HazardPointer::new(); loop { let head = match h.protect_ptr(&self.head) { Some((ptr, _)) => ptr.as_ptr(), None => ptr::null_mut(), }; unsafe { (*node).next = head } if self .head .compare_exchange(head, node, Ordering::Release, Ordering::Relaxed) .is_ok() { break; } } } fn pop(&self) -> Option { let mut h = HazardPointer::new(); loop { let (head, _) = h.protect_ptr(&self.head)?; let next = unsafe { head.as_ref().next }; if self .head .compare_exchange(head.as_ptr(), next, Ordering::Relaxed, Ordering::Relaxed) .is_ok() { unsafe { let data = ptr::read(&head.as_ref().data); Domain::global().retire_ptr::<_, Box>>(head.as_ptr()); return Some(ManuallyDrop::into_inner(data)); } } } } fn is_empty(&self) -> bool { let mut h = HazardPointer::new(); unsafe { h.protect(&self.head) }.is_none() } } impl Drop for TreiberStack { fn drop(&mut self) { while self.pop().is_some() {} } } } mod crossbeam_stack { use super::Stack; use crossbeam_epoch::{Atomic, Owned, Shared}; use std::mem::ManuallyDrop; use std::ptr; use std::sync::atomic::Ordering; #[derive(Debug)] pub struct TreiberStack { head: Atomic>, } unsafe impl Send for TreiberStack {} unsafe impl Sync for TreiberStack {} #[derive(Debug)] struct Node { data: ManuallyDrop, next: *const Node, } impl Stack for TreiberStack { fn new() -> TreiberStack { TreiberStack { head: Atomic::null(), } } fn push(&self, value: T) { let guard = crossbeam_epoch::pin(); let mut node = Owned::new(Node { data: ManuallyDrop::new(value), next: ptr::null_mut(), }); loop { let head = self.head.load(Ordering::Relaxed, &guard); node.next = head.as_raw(); match self.head.compare_exchange( head, node, Ordering::Release, Ordering::Relaxed, &guard, ) { Ok(_) => break, Err(err) => node = err.new, } } } fn pop(&self) -> Option { let guard = crossbeam_epoch::pin(); loop { let head = self.head.load(Ordering::Acquire, &guard); if head.is_null() { return None; } let next = unsafe { head.deref().next }; if self .head .compare_exchange( head, Shared::from(next), Ordering::Relaxed, Ordering::Relaxed, &guard, ) .is_ok() { unsafe { let data = ptr::read(&head.deref().data); guard.defer_destroy(head); return Some(ManuallyDrop::into_inner(data)); } } } } fn is_empty(&self) -> bool { let guard = crossbeam_epoch::pin(); self.head.load(Ordering::Relaxed, &guard).is_null() } } impl Drop for TreiberStack { fn drop(&mut self) { while self.pop().is_some() {} } } } seize-0.5.0/docs/GUIDE.md000064400000000000000000000161761046102023000130670ustar 00000000000000A quick-start guide for working with `seize`. # Introduction `seize` tries to stay out of your way as much as possible. It works with raw pointers directly instead of creating safe wrapper types that end up being a hassle to work with in practice. Below is a step-by-step guide on how to get started. We'll be writing a stack that implements concurrent `push` and `pop` operations. The details of how the stack works are not directly relevant, the guide will instead focus on how `seize` works generally. # Collectors `seize` avoids the use of global state and encourages creating a designated _collector_ per data structure. Collectors allow you to safely read and reclaim objects. For our concurrent stack, the collector will sit alongside the head node. ```rust,ignore use seize::{reclaim, Collector, Linked}; use std::mem::ManuallyDrop; use std::sync::atomic::{AtomicPtr, Ordering}; pub struct Stack { // The collector for memory reclamation. collector: Collector, // The head of the stack. head: AtomicPtr>, } struct Node { // The node's value. value: ManuallyDrop, // The next node in the stack. next: *mut Linked>, } ``` # Performing Operations Before starting an operation that involves loading objects that may be reclaimed, you must mark the thread as _active_ by calling the `enter` method. ```rust,ignore impl Stack { pub fn push(&self, value: T) { let node = Box::into:raw(Box::new(Node { next: std::ptr::null_mut(), value: ManuallyDrop::new(value), })); let guard = self.collector.enter(); // <=== // ... } } ``` # Protecting Loads `enter` returns a guard that allows you to safely load atomic pointers. Guards are the core of safe memory reclamation, letting other threads know that the current thread may be accessing shared memory. Using a guard, you cana perform a _protected_ load of an atomic pointer using the [`Guard::protect`] method. Any valid pointer that is protected is guaranteed to stay valid until the guard is dropped, or the pointer is retired by the current thread. Importantly, if another thread retires an object that you protected, the collector knows not to reclaim the object until your guard is dropped. ```rust,ignore impl Stack { pub fn push(&self, value: T) { // ... let guard = self.collector.enter(); loop { let head = guard.protect(&self.head.load, Ordering::Relaxed); // <=== unsafe { (*node).next = head; } if self .head .compare_exchange(head, node, Ordering::Release, Ordering::Relaxed) .is_ok() { break; } } drop(guard); } } ``` Notice that the lifetime of a guarded pointer is logically tied to that of the guard — when the guard is dropped the pointer is invalidated — but we work with raw pointers for convenience. Data structures that return shared references to values should ensure that the lifetime of the reference is tied to the lifetime of a guard. # Retiring Objects Objects that have been removed from a data structure can be safely _retired_ through the collector. It will be _reclaimed_, or freed, when no threads holds a reference to it any longer. ```rust,ignore impl Stack { pub fn pop(&self) -> Option { // Mark the thread as active. let guard = self.collector.enter(); loop { // Perform a protected load of the head. let head = guard.protect(&self.head.load, Ordering::Acquire); if head.is_null() { return None; } let next = unsafe { (*head).next }; // Pop the head from the stack. if self .head .compare_exchange(head, next, Ordering::Relaxed, Ordering::Relaxed) .is_ok() { unsafe { // Read the value of the previous head. let data = ptr::read(&(*head).value); // Retire the previous head now that it has been popped. self.collector.retire(head, reclaim::boxed); // <=== // Return the value. return Some(ManuallyDrop::into_inner(data)); } } } } } ``` There are a couple important things to note about retiring an object. ### 1. Retired objects must be logically removed An object can only be retired if it is _no longer accessible_ to any thread that comes after. In the above code example this was ensured by swapping out the node before retiring it. Threads that loaded a value _before_ it was retired are safe, but threads that come after are not. Note that concurrent stacks typically suffer from the [ABA problem]. Using `retire` after popping a node ensures that the node is only freed _after_ all active threads that could have loaded it exit, avoiding any potential ABA. ### 2. Retired objects cannot be accessed by the current thread A guard does not protect objects retired by the current thread. If no other thread holds a reference to an object, it may be reclaimed _immediately_. This makes the following code unsound. ```rust,ignore let ptr = guard.protect(&node, Ordering::Acquire); collector.retire(ptr, reclaim::boxed); // **Unsound**, the pointer has been retired. println!("{}", (*ptr).value); ``` Retirement can be delayed until the guard is dropped by calling [`defer_retire`] on the guard, instead of on the collector directly. ```rust,ignore let ptr = guard.protect(&node, Ordering::Acquire); guard.defer_retire(ptr, reclaim::boxed); // This read is fine. println!("{}", (*ptr).value); // However, once the guard is dropped, the pointer is invalidated. drop(guard); ``` ### 3. Custom Reclaimers You probably noticed that `retire` takes a function as a second parameter. This function is known as a _reclaimer_, and is run when the collector decides it is safe to free the retired object. Typically you will pass in a function from the [`seize::reclaim`] module. For example, values allocated with `Box` can use [`reclaim::boxed`], as we used in our stack. ```rust,ignore use seize::reclaim; impl Stack { pub fn pop(&self) -> Option { // ... self.collector.retire(head, reclaim::boxed); // ... } } ``` If you need to run custom reclamation code, you can write a custom reclaimer. ```rust,ignore collector.retire(value, |value: *mut Node, _collector: &Collector| unsafe { // Safety: The value was allocated with `Box::new`. let value = Box::from_raw(ptr); println!("Dropping {value}"); drop(value); }); ``` Note that the reclaimer receives a reference to the collector as its second argument, allowing for recursive reclamation. [`defer_retire`]: https://docs.rs/seize/latest/seize/trait.Guard.html#tymethod.defer_retire [`Guard::protect`]: https://docs.rs/seize/latest/seize/trait.Guard.html#tymethod.protect [`seize::reclaim`]: https://docs.rs/seize/latest/seize/reclaim/index.html [`reclaim::boxed`]: https://docs.rs/seize/latest/seize/reclaim/fn.boxed.html [ABA problem]: https://en.wikipedia.org/wiki/ABA_problem seize-0.5.0/rustfmt.toml000064400000000000000000000000501046102023000133410ustar 00000000000000wrap_comments = true comment_width = 80 seize-0.5.0/src/collector.rs000064400000000000000000000226771046102023000141060ustar 00000000000000use crate::raw::{self, membarrier, Thread}; use crate::{LocalGuard, OwnedGuard}; use std::fmt; /// A concurrent garbage collector. /// /// A `Collector` manages the access and retirement of concurrent objects /// Objects can be safely loaded through *guards*, which can be created using /// the [`enter`](Collector::enter) or [`enter_owned`](Collector::enter_owned) /// methods. /// /// Every instance of a concurrent data structure should typically own its /// `Collector`. This allows the garbage collection of non-`'static` values, as /// memory reclamation is guaranteed to run when the `Collector` is dropped. #[repr(transparent)] pub struct Collector { /// The underlying raw collector instance. pub(crate) raw: raw::Collector, } impl Default for Collector { fn default() -> Self { Self::new() } } impl Collector { /// The default batch size for a new collector. const DEFAULT_BATCH_SIZE: usize = 32; /// Creates a new collector. pub fn new() -> Self { // Initialize the `membarrier` module, detecting the presence of // operating-system strong barrier APIs. membarrier::detect(); let cpus = std::thread::available_parallelism() .map(Into::into) .unwrap_or(1); // Ensure every batch accumulates at least as many entries // as there are threads on the system. let batch_size = cpus.max(Self::DEFAULT_BATCH_SIZE); Self { raw: raw::Collector::new(cpus, batch_size), } } /// Sets the number of objects that must be in a batch before reclamation is /// attempted. /// /// Retired objects are added to thread-local *batches* before starting the /// reclamation process. After `batch_size` is hit, the objects are moved to /// separate *retirement lists*, where reference counting kicks in and /// batches are eventually reclaimed. /// /// A larger batch size amortizes the cost of retirement. However, /// reclamation latency can also grow due to the large number of objects /// needed to be freed. Note that reclamation can not be attempted /// unless the batch contains at least as many objects as the number of /// active threads. /// /// The default batch size is `32`. pub fn batch_size(mut self, batch_size: usize) -> Self { self.raw.batch_size = batch_size; self } /// Marks the current thread as active, returning a guard that protects /// loads of concurrent objects for its lifetime. The thread will be /// marked as inactive when the guard is dropped. /// /// Note that loads of objects that may be retired must be protected with /// the [`Guard::protect`]. See [the /// guide](crate::guide#starting-operations) for an introduction to /// using guards, or the documentation of [`LocalGuard`] for /// more details. /// /// Note that `enter` is reentrant, and it is legal to create multiple /// guards on the same thread. The thread will stay marked as active /// until the last guard is dropped. /// /// [`Guard::protect`]: crate::Guard::protect /// /// # Performance /// /// Performance-wise, creating and destroying a `LocalGuard` is about the /// same as locking and unlocking an uncontended `Mutex`. Because of /// this, guards should be reused across multiple operations if /// possible. However, holding a guard prevents the reclamation of any /// concurrent objects retired during its lifetime, so there is /// a tradeoff between performance and memory usage. /// /// # Examples /// /// ```rust /// # use std::sync::atomic::{AtomicPtr, Ordering}; /// use seize::Guard; /// # let collector = seize::Collector::new(); /// /// // An atomic object. /// let ptr = AtomicPtr::new(Box::into_raw(Box::new(1_usize))); /// /// { /// // Create a guard that is active for this scope. /// let guard = collector.enter(); /// /// // Read the object using a protected load. /// let value = guard.protect(&ptr, Ordering::Acquire); /// unsafe { assert_eq!(*value, 1) } /// /// // If there are other thread that may retire the object, /// // the pointer is no longer valid after the guard is dropped. /// drop(guard); /// } /// # unsafe { drop(Box::from_raw(ptr.load(Ordering::Relaxed))) }; /// ``` #[inline] pub fn enter(&self) -> LocalGuard<'_> { LocalGuard::enter(self) } /// Create an owned guard that protects objects for its lifetime. /// /// Unlike local guards created with [`enter`](Collector::enter), owned /// guards are independent of the current thread, allowing them to /// implement `Send` and `Sync`. See the documentation of [`OwnedGuard`] /// for more details. #[inline] pub fn enter_owned(&self) -> OwnedGuard<'_> { OwnedGuard::enter(self) } /// Retires a value, running `reclaim` when no threads hold a reference to /// it. /// /// Note that this method is disconnected from any guards on the current /// thread, so the pointer may be reclaimed immediately. Use /// [`Guard::defer_retire`](crate::Guard::defer_retire) if the pointer may /// still be accessed by the current thread while the guard is active. /// /// # Safety /// /// The retired pointer must no longer be accessible to any thread that /// enters after it is removed. It also cannot be accessed by the /// current thread after `retire` is called. /// /// Additionally, the pointer must be valid to pass to the provided /// reclaimer, once it is safe to reclaim. /// /// # Examples /// /// Common reclaimers are provided by the [`reclaim`](crate::reclaim) /// module. /// /// ``` /// # use std::sync::atomic::{AtomicPtr, Ordering}; /// # let collector = seize::Collector::new(); /// use seize::reclaim; /// /// // An atomic object. /// let ptr = AtomicPtr::new(Box::into_raw(Box::new(1_usize))); /// /// // Create a guard. /// let guard = collector.enter(); /// /// // Store a new value. /// let old = ptr.swap(Box::into_raw(Box::new(2_usize)), Ordering::Release); /// /// // Reclaim the old value. /// // /// // Safety: The `swap` above made the old value unreachable for any new threads. /// // Additionally, the old value was allocated with a `Box`, so `reclaim::boxed` /// // is valid. /// unsafe { collector.retire(old, reclaim::boxed) }; /// # unsafe { collector.retire(ptr.load(Ordering::Relaxed), reclaim::boxed) }; /// ``` /// /// Alternative, a custom reclaimer function can be used. /// /// ``` /// use seize::Collector; /// /// let collector = Collector::new(); /// /// // Allocate a value and immediately retire it. /// let value: *mut usize = Box::into_raw(Box::new(1_usize)); /// /// // Safety: The value was never shared. /// unsafe { /// collector.retire(value, |ptr: *mut usize, _collector: &Collector| unsafe { /// // Safety: The value was allocated with `Box::new`. /// let value = Box::from_raw(ptr); /// println!("Dropping {value}"); /// drop(value); /// }); /// } /// ``` #[inline] pub unsafe fn retire(&self, ptr: *mut T, reclaim: unsafe fn(*mut T, &Collector)) { debug_assert!(!ptr.is_null(), "attempted to retire a null pointer"); // Note that `add` doesn't ever actually reclaim the pointer immediately if // the current thread is active. Instead, it adds it to the current thread's // reclamation list, but we don't guarantee that publicly. unsafe { self.raw.add(ptr, reclaim, Thread::current()) } } /// Reclaim any values that have been retired. /// /// This method reclaims any objects that have been retired across *all* /// threads. After calling this method, any values that were previous /// retired, or retired recursively on the current thread during this /// call, will have been reclaimed. /// /// # Safety /// /// This function is **extremely unsafe** to call. It is only sound when no /// threads are currently active, whether accessing values that have /// been retired or accessing the collector through any type of guard. /// This is akin to having a unique reference to the collector. However, /// this method takes a shared reference, as reclaimers to /// be run by this thread are allowed to access the collector recursively. /// /// # Notes /// /// Note that if reclaimers initialize guards across threads, or initialize /// owned guards, objects retired through those guards may not be /// reclaimed. pub unsafe fn reclaim_all(&self) { unsafe { self.raw.reclaim_all() }; } // Create a reference to `Collector` from an underlying `raw::Collector`. pub(crate) fn from_raw(raw: &raw::Collector) -> &Collector { unsafe { &*(raw as *const raw::Collector as *const Collector) } } } impl Eq for Collector {} impl PartialEq for Collector { /// Checks if both references point to the same collector. #[inline] fn eq(&self, other: &Self) -> bool { self.raw.id == other.raw.id } } impl fmt::Debug for Collector { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Collector") .field("batch_size", &self.raw.batch_size) .finish() } } seize-0.5.0/src/guard.rs000064400000000000000000000323761046102023000132170ustar 00000000000000use std::fmt; use std::marker::PhantomData; use std::sync::atomic::{AtomicPtr, Ordering}; use crate::raw::{self, Reservation, Thread}; use crate::Collector; /// A guard that enables protected loads of concurrent objects. /// /// This trait provides common functionality implemented by [`LocalGuard`] and /// [`OwnedGuard`]. See [the guide](crate::guide#starting-operations) for an /// introduction to using guards. pub trait Guard { /// Refreshes the guard. /// /// Calling this method is similar to dropping and immediately creating a /// new guard. The current thread remains active, but any pointers that /// were previously protected may be reclaimed. /// /// # Safety /// /// This method is not marked as `unsafe`, but will affect the validity of /// pointers loaded using [`Guard::protect`], similar to dropping a guard. /// It is intended to be used safely by users of concurrent data structures, /// as references will be tied to the guard and this method takes `&mut /// self`. fn refresh(&mut self); /// Flush any retired values in the local batch. /// /// This method flushes any values from the current thread's local batch, /// starting the reclamation process. Note that no memory can be /// reclaimed while this guard is active, but calling `flush` may allow /// memory to be reclaimed more quickly after the guard is dropped. /// /// Note that the batch must contain at least as many objects as the number /// of currently active threads for a flush to be performed. See /// [`Collector::batch_size`] for details about batch sizes. fn flush(&self); /// Returns the collector this guard was created from. fn collector(&self) -> &Collector; /// Returns a numeric identifier for the current thread. /// /// Guards rely on thread-local state, including thread IDs. This method is /// a cheap way to get an identifier for the current thread without TLS /// overhead. Note that thread IDs may be reused, so the value returned /// is only unique for the lifetime of this thread. fn thread_id(&self) -> usize; /// Protects the load of an atomic pointer. /// /// Any valid pointer loaded through a guard using the `protect` method is /// guaranteed to stay valid until the guard is dropped, or the object /// is retired by the current thread. Importantly, if another thread /// retires this object, it will not be reclaimed for the lifetime of /// this guard. /// /// Note that the lifetime of a guarded pointer is logically tied to that of /// the guard — when the guard is dropped the pointer is invalidated. Data /// structures that return shared references to values should ensure that /// the lifetime of the reference is tied to the lifetime of a guard. fn protect(&self, ptr: &AtomicPtr, order: Ordering) -> *mut T { ptr.load(raw::Collector::protect(order)) } /// Stores a value into the pointer, returning the protected previous value. /// /// This method is equivalent to [`AtomicPtr::swap`], except the returned /// value is guaranteed to be protected with the same guarantees as /// [`Guard::protect`]. fn swap(&self, ptr: &AtomicPtr, value: *mut T, order: Ordering) -> *mut T { ptr.swap(value, raw::Collector::protect(order)) } /// Stores a value into the pointer if the current value is the same as the /// `current` value, returning the protected previous value. /// /// This method is equivalent to [`AtomicPtr::compare_exchange`], except the /// returned value is guaranteed to be protected with the same /// guarantees as [`Guard::protect`]. fn compare_exchange( &self, ptr: &AtomicPtr, current: *mut T, new: *mut T, success: Ordering, failure: Ordering, ) -> Result<*mut T, *mut T> { ptr.compare_exchange( current, new, raw::Collector::protect(success), raw::Collector::protect(failure), ) } /// Stores a value into the pointer if the current value is the same as the /// `current` value, returning the protected previous value. /// /// This method is equivalent to [`AtomicPtr::compare_exchange_weak`], /// except the returned value is guaranteed to be protected with the /// same guarantees as [`Guard::protect`]. fn compare_exchange_weak( &self, ptr: &AtomicPtr, current: *mut T, new: *mut T, success: Ordering, failure: Ordering, ) -> Result<*mut T, *mut T> { ptr.compare_exchange_weak( current, new, raw::Collector::protect(success), raw::Collector::protect(failure), ) } /// Retires a value, running `reclaim` when no threads hold a reference to /// it. /// /// This method delays reclamation until the guard is dropped, as opposed to /// [`Collector::retire`], which may reclaim objects immediately. /// /// /// # Safety /// /// The retired pointer must no longer be accessible to any thread that /// enters after it is removed. Additionally, the pointer must be valid /// to pass to the provided reclaimer, once it is safe to reclaim. unsafe fn defer_retire(&self, ptr: *mut T, reclaim: unsafe fn(*mut T, &Collector)); } /// A guard that keeps the current thread marked as active. /// /// Local guards are created by calling [`Collector::enter`]. Unlike /// [`OwnedGuard`], a local guard is tied to the current thread and does not /// implement `Send`. This makes local guards relatively cheap to create and /// destroy. /// /// Most of the functionality provided by this type is through the [`Guard`] /// trait. pub struct LocalGuard<'a> { /// The collector that this guard is associated with. collector: &'a Collector, // The current thread. thread: Thread, // The reservation for the current thread. reservation: *const Reservation, // `LocalGuard` not be `Send or Sync` as we are tied to the state of the // current thread in the collector. _unsend: PhantomData<*mut ()>, } impl LocalGuard<'_> { #[inline] pub(crate) fn enter(collector: &Collector) -> LocalGuard<'_> { let thread = Thread::current(); // Safety: `thread` is the current thread. let reservation = unsafe { collector.raw.reservation(thread) }; // Calls to `enter` may be reentrant, so we need to keep track of the number of // active guards for the current thread. let guards = reservation.guards.get(); reservation.guards.set(guards + 1); if guards == 0 { // Safety: Only called on the current thread, which is currently inactive. unsafe { collector.raw.enter(reservation) }; } LocalGuard { thread, reservation, collector, _unsend: PhantomData, } } } impl Guard for LocalGuard<'_> { /// Refreshes the guard. #[inline] fn refresh(&mut self) { // Safety: `self.reservation` is owned by the current thread. let reservation = unsafe { &*self.reservation }; let guards = reservation.guards.get(); if guards == 1 { // Safety: We have a unique reference to the last active guard. unsafe { self.collector.raw.refresh(reservation) } } } /// Flush any retired values in the local batch. #[inline] fn flush(&self) { // Note that this does not actually retire any values, it just attempts to add // the batch to any active reservations lists, including ours. // // Safety: `self.thread` is the current thread. unsafe { self.collector.raw.try_retire_batch(self.thread) } } /// Returns the collector this guard was created from. #[inline] fn collector(&self) -> &Collector { self.collector } /// Returns a numeric identifier for the current thread. #[inline] fn thread_id(&self) -> usize { self.thread.id } /// Retires a value, running `reclaim` when no threads hold a reference to /// it. #[inline] unsafe fn defer_retire(&self, ptr: *mut T, reclaim: unsafe fn(*mut T, &Collector)) { // Safety: // - `self.thread` is the current thread. // - The validity of the pointer is guaranteed by the caller. unsafe { self.collector.raw.add(ptr, reclaim, self.thread) } } } impl Drop for LocalGuard<'_> { #[inline] fn drop(&mut self) { // Safety: `self.reservation` is owned by the current thread. let reservation = unsafe { &*self.reservation }; // Decrement the active guard count. let guards = reservation.guards.get(); reservation.guards.set(guards - 1); if guards == 1 { // Safety: We have a unique reference to the last active guard. unsafe { self.collector.raw.leave(reservation) }; } } } impl fmt::Debug for LocalGuard<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("LocalGuard").finish() } } /// A guard that protects objects for it's lifetime, independent of the current /// thread. /// /// Unlike [`LocalGuard`], an owned guard is independent of the current thread, /// allowing it to implement `Send` and `Sync`. This is useful for holding /// guards across `.await` points in work-stealing schedulers, where execution /// may be resumed on a different thread than started on. However, owned guards /// are more expensive to create and destroy, so should be avoided if /// cross-thread usage is not required. /// /// Most of the functionality provided by this type is through the [`Guard`] /// trait. pub struct OwnedGuard<'a> { /// The collector that this guard is associated with. collector: &'a Collector, // An owned thread, unique to this guard. thread: Thread, // The reservation for this guard. reservation: *const Reservation, } // Safety: All shared methods on `OwnedGuard` that access shared memory are // synchronized with locks. unsafe impl Sync for OwnedGuard<'_> {} // Safety: `OwnedGuard` owns its thread slot and is not tied to any // thread-locals. unsafe impl Send for OwnedGuard<'_> {} impl OwnedGuard<'_> { #[inline] pub(crate) fn enter(collector: &Collector) -> OwnedGuard<'_> { // Create a thread slot that will last for the lifetime of this guard. let thread = Thread::create(); // Safety: We have ownership of `thread` and have not shared it. let reservation = unsafe { collector.raw.reservation(thread) }; // Safety: We have ownership of `reservation`. unsafe { collector.raw.enter(reservation) }; OwnedGuard { collector, thread, reservation, } } } impl Guard for OwnedGuard<'_> { /// Refreshes the guard. #[inline] fn refresh(&mut self) { // Safety: `self.reservation` is owned by the current thread. let reservation = unsafe { &*self.reservation }; unsafe { self.collector.raw.refresh(reservation) } } /// Flush any retired values in the local batch. #[inline] fn flush(&self) { // Safety: `self.reservation` is owned by the current thread. let reservation = unsafe { &*self.reservation }; let _lock = reservation.lock.lock().unwrap(); // Note that this does not actually retire any values, it just attempts to add // the batch to any active reservations lists, including ours. // // Safety: We hold the lock and so have unique access to the batch. unsafe { self.collector.raw.try_retire_batch(self.thread) } } /// Returns the collector this guard was created from. #[inline] fn collector(&self) -> &Collector { self.collector } /// Returns a numeric identifier for the current thread. #[inline] fn thread_id(&self) -> usize { // We can't return the ID of our thread slot because `OwnedGuard` is `Send` so // the ID is not uniquely tied to the current thread. We also can't // return the OS thread ID because it might conflict with our thread // IDs, so we have to get/create the current thread. Thread::current().id } /// Retires a value, running `reclaim` when no threads hold a reference to /// it. #[inline] unsafe fn defer_retire(&self, ptr: *mut T, reclaim: unsafe fn(*mut T, &Collector)) { // Safety: `self.reservation` is owned by the current thread. let reservation = unsafe { &*self.reservation }; let _lock = reservation.lock.lock().unwrap(); // Safety: // - We hold the lock and so have unique access to the batch. // - The validity of the pointer is guaranteed by the caller. unsafe { self.collector.raw.add(ptr, reclaim, self.thread) } } } impl Drop for OwnedGuard<'_> { #[inline] fn drop(&mut self) { // Safety: `self.reservation` is owned by the current thread. let reservation = unsafe { &*self.reservation }; // Safety: `self.thread` is an owned thread. unsafe { self.collector.raw.leave(reservation) }; // Safety: We are in `drop` and never share `self.thread`. unsafe { Thread::free(self.thread.id) }; } } seize-0.5.0/src/guide.rs000064400000000000000000000000531046102023000131750ustar 00000000000000#![doc = include_str!("../docs/GUIDE.md")] seize-0.5.0/src/lib.rs000064400000000000000000000004151046102023000126500ustar 00000000000000#![allow(clippy::missing_transmute_annotations)] #![deny(unsafe_op_in_unsafe_fn)] #![doc = include_str!("../README.md")] mod collector; mod guard; mod raw; pub mod guide; pub mod reclaim; pub use collector::Collector; pub use guard::{Guard, LocalGuard, OwnedGuard}; seize-0.5.0/src/raw/collector.rs000064400000000000000000000542431046102023000146710ustar 00000000000000use super::membarrier; use super::tls::{Thread, ThreadLocal}; use super::utils::CachePadded; use std::cell::{Cell, UnsafeCell}; use std::ptr; use std::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering}; use std::sync::Mutex; /// Fast and efficient concurrent memory reclamation. /// /// The core memory reclamation algorithm used by seize is described /// [in this paper](https://arxiv.org/pdf/2108.02763.pdf). Specifically, /// this module implements the Hyaline-1 variant of the algorithm. pub struct Collector { /// Per-thread batches of retired nodes. /// /// Retired values are added to thread-local batches before starting the /// reclamation process, amortizing the cost of retirement. batches: ThreadLocal>>, /// Per-thread reservations lists. /// /// A reservation list is a list of batches that were retired while the /// current thread was active. The thread must decrement the reference /// count and potentially free the batch of any reservations before /// exiting. reservations: ThreadLocal>, /// A unique identifier for a collector. pub(crate) id: usize, /// The minimum number of nodes required in a batch before attempting /// retirement. pub(crate) batch_size: usize, } impl Collector { /// Create a collector with the provided batch size and initial thread /// count. pub fn new(threads: usize, batch_size: usize) -> Self { // A counter for collector IDs. static ID: AtomicUsize = AtomicUsize::new(0); Self { id: ID.fetch_add(1, Ordering::Relaxed), reservations: ThreadLocal::with_capacity(threads), batches: ThreadLocal::with_capacity(threads), batch_size: batch_size.next_power_of_two(), } } /// Return the reservation for the given thread. /// /// # Safety /// /// The current thread must have unique access to the reservation for the /// provided `thread`. #[inline] pub unsafe fn reservation(&self, thread: Thread) -> &Reservation { // Safety: Guaranteed by caller. unsafe { self.reservations.load(thread) } } /// Mark the current thread as active. /// /// `enter` and `leave` calls maintain a local reference count to allow /// reentrancy. If the current thread is already marked as active, this /// method simply increases the reference count. /// /// # Safety /// /// This method is not safe to call concurrently on the same thread, and /// must only be called if the current thread is inactive. #[inline] pub unsafe fn enter(&self, reservation: &Reservation) { // Mark the current thread as active. reservation .head .store(ptr::null_mut(), membarrier::light_store()); // This barrier, combined with the light store above, synchronizes with the // heavy barrier in `retire`: // - If our store comes first, the thread retiring will see that we are active. // - If the fence comes first, we will see the new values of any objects being // retired by that thread // // Note that all pointer loads perform a light barrier to participate in the // total order. membarrier::light_barrier(); } /// Strengthens an ordering to that necessary to protect the load of a /// pointer. #[inline] pub fn protect(_order: Ordering) -> Ordering { // We have to respect both the user provided ordering and the ordering required // by the membarrier strategy. `SeqCst` is equivalent to `Acquire` on // most platforms, so we just use it unconditionally. // // Loads performed with this ordering, paired with the light barrier in `enter`, // will participate in the total order established by `enter`, and thus see the // new values of any pointers that were retired when the thread was inactive. Ordering::SeqCst } /// Mark the current thread as inactive. /// /// # Safety /// /// Any previously protected pointers may be invalidated after calling /// `leave`. Additionally, this method is not safe to call concurrently /// with the same reservation. #[inline] pub unsafe fn leave(&self, reservation: &Reservation) { // Release: Exit the critical section, ensuring that any pointer accesses // happen-before we are marked as inactive. let head = reservation.head.swap(Entry::INACTIVE, Ordering::Release); if head != Entry::INACTIVE { // Acquire any new entries in the reservation list, as well as the new values of // any objects that were retired while we were active. atomic::fence(Ordering::Acquire); // Decrement the reference counts of any batches that were retired. unsafe { self.traverse(head) } } } /// Clear the reservation list, keeping the thread marked as active. /// /// # Safety /// /// Any previously protected pointers may be invalidated after calling /// `leave`. Additionally, this method is not safe to call concurrently /// with the same reservation. #[inline] pub unsafe fn refresh(&self, reservation: &Reservation) { // SeqCst: Establish the ordering of a combined call to `leave` and `enter`. let head = reservation.head.swap(ptr::null_mut(), Ordering::SeqCst); if head != Entry::INACTIVE { // Decrement the reference counts of any batches that were retired. unsafe { self.traverse(head) } } } /// Add a node to the retirement batch, retiring the batch if `batch_size` /// nodes are reached. /// /// # Safety /// /// The given pointer must no longer be accessible to any thread that enters /// after it is removed. It also cannot be accessed by the current /// thread after `add` is called. /// /// The pointer also be valid to pass to the provided reclaimer once it is /// safe to reclaim. /// /// Additionally, current thread must have unique access to the batch for /// the provided `thread`. #[inline] pub unsafe fn add( &self, ptr: *mut T, reclaim: unsafe fn(*mut T, &crate::Collector), thread: Thread, ) { // Safety: The caller guarantees we have unique access to the batch. let local_batch = unsafe { self.batches.load(thread).get() }; // Safety: The caller guarantees we have unique access to the batch. let batch = unsafe { (*local_batch).get_or_init(self.batch_size) }; // If we are in a recursive call during `drop` or `reclaim_all`, reclaim the // object immediately. if batch == LocalBatch::DROP { // Safety: `LocalBatch::DROP` means we have unique access to the collector. // Additionally, the caller guarantees that the pointer is valid for the // provided reclaimer. unsafe { reclaim(ptr, crate::Collector::from_raw(self)) } return; } // Safety: `fn(*mut T) and fn(*mut U)` are ABI compatible if `T, U: Sized`. let reclaim: unsafe fn(*mut (), &crate::Collector) = unsafe { std::mem::transmute(reclaim) }; // Safety: The caller guarantees we have unique access to the batch. let len = unsafe { // Create an entry for this node. (*batch).entries.push(Entry { batch, reclaim, ptr: ptr.cast::<()>(), state: EntryState { head: ptr::null_mut(), }, }); (*batch).entries.len() }; // Attempt to retire the batch if we have enough entries. if len >= self.batch_size { // Safety: The caller guarantees that we have unique access to the batch, and we // are not holding on to any mutable references. unsafe { self.try_retire(local_batch) } } } /// Attempt to retire objects in the current thread's batch. /// /// # Safety /// /// The current thread must have unique access to the batch for the given /// `thread`. #[inline] pub unsafe fn try_retire_batch(&self, thread: Thread) { // Safety: Guaranteed by caller. unsafe { self.try_retire(self.batches.load(thread).get()) } } /// Attempt to retire objects in this batch. /// /// Note that if a guard on the current thread is active, the batch will /// also be added to the current reservation list for deferred /// reclamation. /// /// # Safety /// /// The current thread must have unique access to the provided batch. /// /// Additionally, the caller should not be holding on to any mutable /// references the the local batch, as they may be invalidated by /// recursive calls to `try_retire`. #[inline] pub unsafe fn try_retire(&self, local_batch: *mut LocalBatch) { // Establish a total order between the retirement of nodes in this batch and // light stores marking a thread as active: // - If the store comes first, we will see that the thread is active. // - If this barrier comes first, the thread will see the new values of any // objects in this batch. // // This barrier also establishes synchronizes with the light store executed when // a thread is created: // - If our barrier comes first, they will see the new values of any objects in // this batch. // - If their store comes first, we will see the new thread. membarrier::heavy(); // Safety: The caller guarantees we have unique access to the batch. let batch = unsafe { (*local_batch).batch }; // There is nothing to retire. if batch.is_null() || batch == LocalBatch::DROP { return; } // Safety: The caller guarantees we have unique access to the batch. let batch_entries = unsafe { (*batch).entries.as_mut_ptr() }; let mut marked = 0; // Record all active threads, including the current thread. // // We need to do this in a separate step before actually retiring the batch to // ensure we have enough entries for reservation lists, as the number of // threads can grow dynamically. // // Safety: We only access `reservation.head`, which is an atomic pointer that is // sound to access from multiple threads. for reservation in unsafe { self.reservations.iter() } { // If this thread is inactive, we can skip it. The heavy barrier above ensurse // that the next time it becomes active, it will see the new values // of any objects in this batch. // // Relaxed: See the Acquire fence below. if reservation.head.load(Ordering::Relaxed) == Entry::INACTIVE { continue; } // If we don't have enough entries to insert into the reservation lists of all // active threads, try again later. // // Safety: The caller guarantees we have unique access to the batch. let Some(entry) = unsafe { &mut (*batch).entries }.get_mut(marked) else { return; }; // Temporarily store this reservation list in the batch. // // Safety: All nodes in a batch are valid and this batch has not yet been shared // to other threads. entry.state.head = &reservation.head; marked += 1; } // We have enough entries to perform reclamation. At this point, we can reset // the local batch. unsafe { *local_batch = LocalBatch::default() }; // For any inactive threads we skipped above, synchronize with `leave` to ensure // any accesses happen-before we retire. We ensured with the heavy // barrier above that the thread will see the new values of any objects // in this batch the next time it becomes active. atomic::fence(Ordering::Acquire); let mut active = 0; // Add the batch to the reservation lists of any active threads. 'retire: for i in 0..marked { // Safety: The caller guarantees we have unique access to the batch, and we // ensure we have at least `marked` entries in the batch. let curr = unsafe { batch_entries.add(i) }; // Safety: `curr` is a valid node in the batch, and we just initialized `head` // for all `marked` nodes in the previous loop. let head = unsafe { &*(*curr).state.head }; // Relaxed: All writes to the `head` use RMW instructions, so the previous node // in the list is synchronized through the release sequence on // `head`. let mut prev = head.load(Ordering::Relaxed); loop { // The thread became inactive, skip it. // // As long as the thread became inactive at some point after the heavy barrier, // it can no longer access any objects in this batch. The next // time it becomes active it will load the new object values. if prev == Entry::INACTIVE { // Acquire: Synchronize with `leave` to ensure any accesses happen-before we // retire. atomic::fence(Ordering::Acquire); continue 'retire; } // Link this node to the reservation list. unsafe { (*curr).state.next = prev } // Release: Ensure our access of the node, as well as the stores of new values // for any objects in the batch, are synchronized when this // thread calls `leave` and attempts to reclaim this batch. match head.compare_exchange_weak(prev, curr, Ordering::Release, Ordering::Relaxed) { Ok(_) => break, // Lost the race to another thread, retry. Err(found) => prev = found, } } active += 1; } // Release: If we don't free the list, ensure our access of the batch is // synchronized with the thread that eventually will. // // Safety: The caller guarantees we have unique access to the batch. if unsafe { &*batch } .active .fetch_add(active, Ordering::Release) .wrapping_add(active) == 0 { // Acquire: Ensure any access of objects in the batch, by threads that were // active and decremented the reference count, happen-before we free // it. atomic::fence(Ordering::Acquire); // Safety: The reference count is zero, meaning that either no threads were // active, or they have all already decremented the reference count. // // Additionally, the local batch has been reset and we are not holding on to any // mutable references, so any recursive calls to `retire` during // reclamation are valid. unsafe { self.free_batch(batch) } } } /// Traverse the reservation list, decrementing the reference count of each /// batch. /// /// # Safety /// /// `list` must be a valid reservation list. #[cold] #[inline(never)] unsafe fn traverse(&self, mut list: *mut Entry) { while !list.is_null() { let curr = list; // Advance the cursor. // Safety: `curr` is a valid, non-null node in the list. list = unsafe { (*curr).state.next }; let batch = unsafe { (*curr).batch }; // Safety: Batch pointers are valid for reads until they are reclaimed. unsafe { // Release: If we don't free the list, ensure our access of the batch is // synchronized with the thread that eventually will. if (*batch).active.fetch_sub(1, Ordering::Release) == 1 { // Ensure any access of objects in the batch by other active threads // happen-before we free it. atomic::fence(Ordering::Acquire); // Safety: We have the last reference to the batch and it has been removed from // our reservation list. self.free_batch(batch) } } } } /// Reclaim all values in the collector, including recursive calls to /// retire. /// /// # Safety /// /// No threads may be accessing the collector or any values that have been /// retired. This is equivalent to having a unique reference to the data /// structure containing the collector. #[inline] pub unsafe fn reclaim_all(&self) { // Safety: Guaranteed by caller. for local_batch in unsafe { self.batches.iter() } { let local_batch = local_batch.value.get(); // Safety: The caller guarantees we have unique access to the batch. let batch = unsafe { (*local_batch).batch }; // There is nothing to reclaim. if batch.is_null() { continue; } // Tell any recursive calls to `retire` to reclaim immediately. // // Safety: The caller guarantees we have unique access to the batch. unsafe { (*local_batch).batch = LocalBatch::DROP }; // Safety: The caller guarantees we have unique access to the batch, and we // ensured it is non-null. Additionally, the local batch was reset // above, so the batch is inaccessible through recursive calls to // `retire`. unsafe { self.free_batch(batch) }; // Reset the batch. // // Safety: The caller guarantees we have unique access to the batch. unsafe { (*local_batch).batch = ptr::null_mut() }; } } /// Free a batch of objects. /// /// # Safety /// /// The batch reference count must be zero. /// /// Additionally, the current thread must not be holding on to any mutable /// references to thread-locals as recursive calls to `retire` may still /// access the local batch; the batch being retired must be unreachable /// through any recursive calls. #[inline] unsafe fn free_batch(&self, batch: *mut Batch) { // Safety: We have a unique reference to the batch. for entry in unsafe { (*batch).entries.iter_mut() } { unsafe { (entry.reclaim)(entry.ptr.cast(), crate::Collector::from_raw(self)) }; } unsafe { LocalBatch::free(batch) }; } } impl Drop for Collector { fn drop(&mut self) { // Safety: Values are only retired after being made inaccessible to any // inactive threads. Additionally, we have `&mut self`, meaning that any // active threads are no longer accessing retired values. unsafe { self.reclaim_all() }; } } /// A per-thread reservation list. /// /// Reservation lists are lists of retired entries, where each entry represents /// a batch. #[repr(C)] pub struct Reservation { /// The head of the list head: AtomicPtr, /// The number of active guards for this thread. pub guards: Cell, /// A lock used for owned guards to prevent concurrent operations. pub lock: Mutex<()>, } // Safety: Reservations are only accessed by the current thread, or synchronized // through a lock. unsafe impl Sync for Reservation {} impl Default for Reservation { fn default() -> Self { Reservation { head: AtomicPtr::new(Entry::INACTIVE), guards: Cell::new(0), lock: Mutex::new(()), } } } /// A batch of nodes waiting to be retired. struct Batch { /// Nodes in this batch. /// /// TODO: This allocation could be flattened. entries: Vec, /// The reference count for any active threads. active: AtomicUsize, } impl Batch { /// Create a new batch with the specified capacity. #[inline] fn new(capacity: usize) -> Batch { Batch { entries: Vec::with_capacity(capacity), active: AtomicUsize::new(0), } } } /// A retired object. struct Entry { /// The pointer to the retired object. ptr: *mut (), /// The function used to reclaim the object. reclaim: unsafe fn(*mut (), &crate::Collector), /// The state of the retired object. state: EntryState, /// The batch that this node is a part of. batch: *mut Batch, } /// The state of a retired object. #[repr(C)] pub union EntryState { // While retiring: A temporary location for an active reservation list. head: *const AtomicPtr, // After retiring: The next node in the thread's reservation list. next: *mut Entry, } impl Entry { /// Represents an inactive thread. /// /// While null indicates an empty list, `INACTIVE` indicates the thread has /// no active guards and is not currently accessing any objects. pub const INACTIVE: *mut Entry = usize::MAX as _; } /// A pointer to a batch, unique to the current thread. pub struct LocalBatch { batch: *mut Batch, } impl Default for LocalBatch { fn default() -> Self { LocalBatch { batch: ptr::null_mut(), } } } impl LocalBatch { /// This is set during a call to `reclaim_all`, signalling recursive calls /// to retire to reclaim immediately. const DROP: *mut Batch = usize::MAX as _; /// Returns a pointer to the batch, initializing the batch if it was null. #[inline] fn get_or_init(&mut self, capacity: usize) -> *mut Batch { if self.batch.is_null() { self.batch = Box::into_raw(Box::new(Batch::new(capacity))); } self.batch } /// Free the batch. /// /// # Safety /// /// The safety requirements of `Box::from_raw` apply. #[inline] unsafe fn free(batch: *mut Batch) { // Safety: Guaranteed by caller. unsafe { drop(Box::from_raw(batch)) } } } // Safety: Any access to the batch owned by `LocalBatch` is unsafe. unsafe impl Send for LocalBatch {} seize-0.5.0/src/raw/membarrier.rs000064400000000000000000000311401046102023000150170ustar 00000000000000//! Memory barriers optimized for RCU, inspired by . //! //! # Semantics //! //! There is a total order over all memory barriers provided by this module: //! - Light store barriers, created by a pair of [`light_store`] and //! [`light_barrier`]. //! - Light load barriers, created by a pair of [`light_barrier`] and //! [`light_load`]. //! - Sequentially consistent barriers, or cumulative light barriers. //! - Heavy barriers, created by [`heavy`]. //! //! If thread A issues barrier X and thread B issues barrier Y and X occurs //! before Y in the total order, X is ordered before Y with respect to coherence //! only if either X or Y is a heavy barrier. In other words, there is no way to //! establish an ordering between light barriers without the presence of a heavy //! barrier. #![allow(dead_code)] #[cfg(all(target_os = "linux", feature = "fast-barrier", not(miri)))] pub use linux::*; #[cfg(all(target_os = "windows", feature = "fast-barrier", not(miri)))] pub use windows::*; #[cfg(any( not(feature = "fast-barrier"), not(any(target_os = "windows", target_os = "linux")), miri ))] pub use default::*; #[cfg(any( not(feature = "fast-barrier"), not(any(target_os = "windows", target_os = "linux")), miri ))] mod default { use core::sync::atomic::{fence, Ordering}; pub fn detect() {} /// The ordering for a store operation that synchronizes with heavy /// barriers. /// /// Must be followed by a light barrier. #[inline] pub fn light_store() -> Ordering { // Synchronize with `SeqCst` heavy barriers. Ordering::SeqCst } /// Issues a light memory barrier for a preceding store or subsequent load /// operation. #[inline] pub fn light_barrier() { // This is a no-op due to strong loads and stores. } /// The ordering for a load operation that synchronizes with heavy barriers. #[inline] pub fn light_load() -> Ordering { // Participate in the total order established by light and heavy `SeqCst` // barriers. Ordering::SeqCst } /// Issues a heavy memory barrier for slow path that synchronizes with light /// stores. #[inline] pub fn heavy() { // Synchronize with `SeqCst` light stores. fence(Ordering::SeqCst); } } #[cfg(all(target_os = "linux", feature = "fast-barrier", not(miri)))] mod linux { use std::sync::atomic::{self, AtomicU8, Ordering}; /// The ordering for a store operation that synchronizes with heavy /// barriers. /// /// Must be followed by a light barrier. #[inline] pub fn light_store() -> Ordering { match STRATEGY.load(Ordering::Relaxed) { FALLBACK => Ordering::SeqCst, _ => Ordering::Relaxed, } } /// Issues a light memory barrier for a preceding store or subsequent load /// operation. #[inline] pub fn light_barrier() { atomic::compiler_fence(atomic::Ordering::SeqCst) } /// The ordering for a load operation that synchronizes with heavy barriers. #[inline] pub fn light_load() -> Ordering { // There is no difference between `Acquire` and `SeqCst` loads on most // platforms, so checking the strategy is not worth it. Ordering::SeqCst } /// Issues a heavy memory barrier for slow path. #[inline] pub fn heavy() { // Issue a private expedited membarrier using the `sys_membarrier()` system // call, if supported; otherwise, fall back to `mprotect()`-based // process-wide memory barrier. match STRATEGY.load(Ordering::Relaxed) { MEMBARRIER => membarrier::barrier(), MPROTECT => mprotect::barrier(), _ => atomic::fence(atomic::Ordering::SeqCst), } } /// Use the `membarrier` system call. const MEMBARRIER: u8 = 0; /// Use the `mprotect`-based trick. const MPROTECT: u8 = 1; /// Use `SeqCst` fences. const FALLBACK: u8 = 2; /// The right strategy to use on the current machine. static STRATEGY: AtomicU8 = AtomicU8::new(FALLBACK); /// Perform runtime detection for a membarrier strategy. pub fn detect() { if membarrier::is_supported() { STRATEGY.store(MEMBARRIER, Ordering::Relaxed); } else if mprotect::is_supported() { STRATEGY.store(MPROTECT, Ordering::Relaxed); } } macro_rules! fatal_assert { ($cond:expr) => { if !$cond { #[allow(unused_unsafe)] unsafe { libc::abort(); } } }; } mod membarrier { /// Commands for the membarrier system call. /// /// # Caveat /// /// We're defining it here because, unfortunately, the `libc` crate /// currently doesn't expose `membarrier_cmd` for us. You can /// find the numbers in the [Linux source code](https://github.com/torvalds/linux/blob/master/include/uapi/linux/membarrier.h). /// /// This enum should really be `#[repr(libc::c_int)]`, but Rust /// currently doesn't allow it. #[repr(i32)] #[allow(dead_code, non_camel_case_types)] enum membarrier_cmd { MEMBARRIER_CMD_QUERY = 0, MEMBARRIER_CMD_GLOBAL = (1 << 0), MEMBARRIER_CMD_GLOBAL_EXPEDITED = (1 << 1), MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = (1 << 2), MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3), MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4), MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 5), MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 6), } /// Call the `sys_membarrier` system call. #[inline] fn sys_membarrier(cmd: membarrier_cmd) -> libc::c_long { unsafe { libc::syscall(libc::SYS_membarrier, cmd as libc::c_int, 0 as libc::c_int) } } /// Returns `true` if the `sys_membarrier` call is available. pub fn is_supported() -> bool { // Queries which membarrier commands are supported. Checks if private expedited // membarrier is supported. let ret = sys_membarrier(membarrier_cmd::MEMBARRIER_CMD_QUERY); if ret < 0 || ret & membarrier_cmd::MEMBARRIER_CMD_PRIVATE_EXPEDITED as libc::c_long == 0 || ret & membarrier_cmd::MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED as libc::c_long == 0 { return false; } // Registers the current process as a user of private expedited membarrier. if sys_membarrier(membarrier_cmd::MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED) < 0 { return false; } true } /// Executes a heavy `sys_membarrier`-based barrier. #[inline] pub fn barrier() { fatal_assert!(sys_membarrier(membarrier_cmd::MEMBARRIER_CMD_PRIVATE_EXPEDITED) >= 0); } } mod mprotect { use std::cell::UnsafeCell; use std::mem::MaybeUninit; use std::ptr; use std::sync::{atomic, OnceLock}; struct Barrier { lock: UnsafeCell, page: u64, page_size: libc::size_t, } unsafe impl Sync for Barrier {} impl Barrier { /// Issues a process-wide barrier by changing access protections of /// a single mmap-ed page. This method is not as fast as /// the `sys_membarrier()` call, but works very /// similarly. #[inline] fn barrier(&self) { let page = self.page as *mut libc::c_void; unsafe { // Lock the mutex. fatal_assert!(libc::pthread_mutex_lock(self.lock.get()) == 0); // Set the page access protections to read + write. fatal_assert!( libc::mprotect(page, self.page_size, libc::PROT_READ | libc::PROT_WRITE,) == 0 ); // Ensure that the page is dirty before we change the protection so that we // prevent the OS from skipping the global TLB flush. let atomic_usize = &*(page as *const atomic::AtomicUsize); atomic_usize.fetch_add(1, atomic::Ordering::SeqCst); // Set the page access protections to none. // // Changing a page protection from read + write to none causes the OS to issue // an interrupt to flush TLBs on all processors. This also results in flushing // the processor buffers. fatal_assert!(libc::mprotect(page, self.page_size, libc::PROT_NONE) == 0); // Unlock the mutex. fatal_assert!(libc::pthread_mutex_unlock(self.lock.get()) == 0); } } } /// An alternative solution to `sys_membarrier` that works on older /// Linux kernels and x86/x86-64 systems. static BARRIER: OnceLock = OnceLock::new(); /// Returns `true` if the `mprotect`-based trick is supported. pub fn is_supported() -> bool { cfg!(target_arch = "x86") || cfg!(target_arch = "x86_64") } /// Executes a heavy `mprotect`-based barrier. #[inline] pub fn barrier() { let barrier = BARRIER.get_or_init(|| { unsafe { // Find out the page size on the current system. let page_size = libc::sysconf(libc::_SC_PAGESIZE); fatal_assert!(page_size > 0); let page_size = page_size as libc::size_t; // Create a dummy page. let page = libc::mmap( ptr::null_mut(), page_size, libc::PROT_NONE, libc::MAP_PRIVATE | libc::MAP_ANONYMOUS, -1 as libc::c_int, 0 as libc::off_t, ); fatal_assert!(page != libc::MAP_FAILED); fatal_assert!(page as libc::size_t % page_size == 0); // Locking the page ensures that it stays in memory during the two mprotect // calls in `Barrier::barrier()`. If the page was unmapped between those calls, // they would not have the expected effect of generating IPI. libc::mlock(page, page_size as libc::size_t); // Initialize the mutex. let lock = UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER); let mut attr = MaybeUninit::::uninit(); fatal_assert!(libc::pthread_mutexattr_init(attr.as_mut_ptr()) == 0); let mut attr = attr.assume_init(); fatal_assert!( libc::pthread_mutexattr_settype(&mut attr, libc::PTHREAD_MUTEX_NORMAL) == 0 ); fatal_assert!(libc::pthread_mutex_init(lock.get(), &attr) == 0); fatal_assert!(libc::pthread_mutexattr_destroy(&mut attr) == 0); let page = page as u64; Barrier { lock, page, page_size, } } }); barrier.barrier(); } } } #[cfg(all(target_os = "windows", feature = "fast-barrier", not(miri)))] mod windows { use core::sync::atomic::{self, Ordering}; use windows_sys; pub fn detect() {} /// The ordering for a store operation that synchronizes with heavy /// barriers. /// /// Must be followed by a light barrier. #[inline] pub fn light_store() -> Ordering { Ordering::Relaxed } /// Issues a light memory barrier for a preceding store or subsequent load /// operation. #[inline] pub fn light_barrier() { atomic::compiler_fence(atomic::Ordering::SeqCst) } /// The ordering for a load operation that synchronizes with heavy barriers. #[inline] pub fn light_load() -> Ordering { Ordering::Relaxed } /// Issues a heavy memory barrier for slow path that synchronizes with light /// stores. #[inline] pub fn heavy() { // Invoke the `FlushProcessWriteBuffers()` system call. unsafe { windows_sys::Win32::System::Threading::FlushProcessWriteBuffers() } } } seize-0.5.0/src/raw/mod.rs000064400000000000000000000001731046102023000134530ustar 00000000000000mod collector; mod tls; mod utils; pub mod membarrier; pub use collector::{Collector, Reservation}; pub use tls::Thread; seize-0.5.0/src/raw/tls/mod.rs000064400000000000000000000354771046102023000142740ustar 00000000000000// Copyright 2017 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. mod thread_id; use std::cell::UnsafeCell; use std::mem::MaybeUninit; use std::sync::atomic::{self, AtomicBool, AtomicPtr, Ordering}; use std::{mem, ptr}; pub use thread_id::Thread; /// Per-object thread local storage. pub struct ThreadLocal { /// Buckets with increasing power-of-two sizes. buckets: [AtomicPtr>; thread_id::BUCKETS], } /// An entry in a `ThreadLocal`. struct Entry { /// A flag for initialization. present: AtomicBool, /// The value for this entry. value: UnsafeCell>, } /// Safety: /// /// - We expose mutable references to values when the `ThreadLocal` is dropped, /// hence `T: Send`. /// - However, it is impossible to obtain shared references to `T`s except by /// sharing the `ThreadLocal`, so `T: Sync` is not required. unsafe impl Send for ThreadLocal {} /// Safety: /// /// - Values can be inserted through a shared reference and thus dropped on /// another thread than they were created on, hence `T: Send`. /// - However, there is no way to access a `T` inserted by another thread except /// through iteration, which is unsafe, so `T: Sync` is not required. unsafe impl Sync for ThreadLocal {} impl ThreadLocal { /// Create a `ThreadLocal` container with the given initial capacity. pub fn with_capacity(capacity: usize) -> ThreadLocal { let init = match capacity { 0 => 0, // Initialize enough buckets for `capacity` elements. n => Thread::new(n).bucket, }; let mut buckets = [ptr::null_mut(); thread_id::BUCKETS]; // Initialize the initial buckets. for (i, bucket) in buckets[..=init].iter_mut().enumerate() { let bucket_size = Thread::bucket_capacity(i); *bucket = allocate_bucket::(bucket_size); } ThreadLocal { // Safety: `AtomicPtr` has the same representation as `*mut T`. buckets: unsafe { mem::transmute(buckets) }, } } /// Load the slot for the given `thread`, initializing it with a default /// value if necessary. /// /// # Safety /// /// The current thread must have unique access to the slot for the provided /// `thread`. #[inline] pub unsafe fn load(&self, thread: Thread) -> &T where T: Default, { // Safety: Guaranteed by caller. unsafe { self.load_or(T::default, thread) } } /// Load the entry for the given `thread`, initializing it using the /// provided function if necessary. /// /// # Safety /// /// The current thread must have unique access to the slot for the given /// `thread`. #[inline] pub unsafe fn load_or(&self, create: impl Fn() -> T, thread: Thread) -> &T { // Safety: `thread.bucket` is always in bounds. let bucket = unsafe { self.buckets.get_unchecked(thread.bucket) }; let mut bucket_ptr = bucket.load(Ordering::Acquire); if bucket_ptr.is_null() { bucket_ptr = self.initialize(bucket, thread); } // Safety: `thread.entry` is always in bounds, and we ensured the bucket was // initialized above. let entry = unsafe { &*bucket_ptr.add(thread.entry) }; // Relaxed: Only the current thread can set the value. if !entry.present.load(Ordering::Relaxed) { // Safety: Guaranteed by caller. unsafe { self.write(entry, create) } } // Safety: The entry was initialized above. unsafe { (*entry.value.get()).assume_init_ref() } } /// Load the entry for the current thread, returning `None` if it has not /// been initialized. #[cfg(test)] fn try_load(&self) -> Option<&T> { let thread = Thread::current(); // Safety: `thread.bucket` is always in bounds. let bucket_ptr = unsafe { self.buckets.get_unchecked(thread.bucket) }.load(Ordering::Acquire); if bucket_ptr.is_null() { return None; } // Safety: `thread.entry` is always in bounds, and we ensured the bucket was // initialized above. let entry = unsafe { &*bucket_ptr.add(thread.entry) }; // Relaxed: Only the current thread can set the value. if !entry.present.load(Ordering::Relaxed) { return None; } // Safety: The entry was initialized above. unsafe { Some((*entry.value.get()).assume_init_ref()) } } /// Initialize the entry for the given thread. /// /// # Safety /// /// The current thread must have unique access to the uninitialized `entry`. #[cold] #[inline(never)] unsafe fn write(&self, entry: &Entry, create: impl Fn() -> T) { // Insert the new element into the bucket. // // Safety: Guaranteed by caller. unsafe { entry.value.get().write(MaybeUninit::new(create())) }; // Release: Necessary for synchronization with iterators. entry.present.store(true, Ordering::Release); // Synchronize with the heavy barrier in `retire`: // - If this fence comes first, the thread retiring will see our entry. // - If their barrier comes first, we will see the new values of any pointers // being retired by that thread. // // Note that we do not use a light barrier here because the initialization of // the bucket is not performed with the light-store ordering. We // probably could avoid a full fence here, but there are no serious // performance implications. atomic::fence(Ordering::SeqCst); } // Initialize the bucket for the given thread's entry. #[cold] #[inline(never)] fn initialize(&self, bucket: &AtomicPtr>, thread: Thread) -> *mut Entry { let new_bucket = allocate_bucket(Thread::bucket_capacity(thread.bucket)); match bucket.compare_exchange( ptr::null_mut(), new_bucket, // Release: If we win the race, synchronize with Acquire loads of the bucket from other // threads. Ordering::Release, // Acquire: If we lose the race, synchronize with the initialization of the bucket that // won. Ordering::Acquire, ) { // We won the race and initialized the bucket. Ok(_) => new_bucket, // We lost the race and can use the bucket that was stored instead. Err(other) => unsafe { // Safety: The pointer has not been shared. let _ = Box::from_raw(ptr::slice_from_raw_parts_mut( new_bucket, Thread::bucket_capacity(thread.bucket), )); other }, } } /// Returns an iterator over all active thread slots. /// /// # Safety /// /// The values stored in the `ThreadLocal` by threads other than the current /// one must be sound to access. #[inline] pub unsafe fn iter(&self) -> Iter<'_, T> { Iter { index: 0, bucket: 0, thread_local: self, bucket_size: Thread::bucket_capacity(0), } } } impl Drop for ThreadLocal { fn drop(&mut self) { // Drop any buckets that were allocatec. for (i, bucket) in self.buckets.iter_mut().enumerate() { let bucket_ptr = *bucket.get_mut(); if bucket_ptr.is_null() { continue; } let bucket_size = Thread::bucket_capacity(i); // Safety: We have `&mut self` and ensured the bucket was initialized. let _ = unsafe { Box::from_raw(std::slice::from_raw_parts_mut(bucket_ptr, bucket_size)) }; } } } impl Drop for Entry { fn drop(&mut self) { if *self.present.get_mut() { // Safety: We have `&mut self` and ensured the entry was initialized. unsafe { ptr::drop_in_place((*self.value.get()).as_mut_ptr()); } } } } /// An iterator over a `ThreadLocal`. pub struct Iter<'a, T> { bucket: usize, index: usize, bucket_size: usize, thread_local: &'a ThreadLocal, } impl<'a, T> Iterator for Iter<'a, T> { type Item = &'a T; #[inline] fn next(&mut self) -> Option { // Because we reuse thread IDs, a new thread could join and be inserted into the // middle of the vector, meaning we have to check all the buckets here. // Yielding extra values is fine, but not yielding all originally active // threads is not. while self.bucket < thread_id::BUCKETS { // Safety: We ensured `self.bucket` was in-bounds above. let bucket = unsafe { self.thread_local .buckets .get_unchecked(self.bucket) .load(Ordering::Acquire) }; if !bucket.is_null() { while self.index < self.bucket_size { // Safety: We ensured `self.index` was in-bounds above. let entry = unsafe { &*bucket.add(self.index) }; // Advance to the next entry. self.index += 1; if entry.present.load(Ordering::Acquire) { // Safety: We ensured the entry was initialized above, and the Acquire load // ensures we synchronized with its initialization. return Some(unsafe { (*entry.value.get()).assume_init_ref() }); } } } // Advance to the next bucket. self.index = 0; self.bucket += 1; self.bucket_size <<= 1; } None } } /// Allocate a bucket with the given capacity. fn allocate_bucket(capacity: usize) -> *mut Entry { let entries = (0..capacity) .map(|_| Entry:: { present: AtomicBool::new(false), value: UnsafeCell::new(MaybeUninit::uninit()), }) .collect::]>>(); Box::into_raw(entries) as *mut _ } #[cfg(test)] #[allow(clippy::redundant_closure)] mod tests { use super::*; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::Relaxed; use std::sync::{Arc, Barrier}; use std::thread; fn make_create() -> Arc usize + Send + Sync> { let count = AtomicUsize::new(0); Arc::new(move || count.fetch_add(1, Relaxed)) } #[test] fn same_thread() { // Safety: Loading with `Thread::current` is always sound. unsafe { let create = make_create(); let tls = ThreadLocal::with_capacity(1); assert_eq!(None, tls.try_load()); assert_eq!(0, *tls.load_or(|| create(), Thread::current())); assert_eq!(Some(&0), tls.try_load()); assert_eq!(0, *tls.load_or(|| create(), Thread::current())); assert_eq!(Some(&0), tls.try_load()); assert_eq!(0, *tls.load_or(|| create(), Thread::current())); assert_eq!(Some(&0), tls.try_load()); } } #[test] fn different_thread() { // Safety: Loading with `Thread::current` is always sound. unsafe { let create = make_create(); let tls = Arc::new(ThreadLocal::with_capacity(1)); assert_eq!(None, tls.try_load()); assert_eq!(0, *tls.load_or(|| create(), Thread::current())); assert_eq!(Some(&0), tls.try_load()); let tls2 = tls.clone(); let create2 = create.clone(); thread::spawn(move || { assert_eq!(None, tls2.try_load()); assert_eq!(1, *tls2.load_or(|| create2(), Thread::current())); assert_eq!(Some(&1), tls2.try_load()); }) .join() .unwrap(); assert_eq!(Some(&0), tls.try_load()); assert_eq!(0, *tls.load_or(|| create(), Thread::current())); } } #[test] fn iter() { // Safety: Loading with `Thread::current` is always sound. unsafe { let tls = Arc::new(ThreadLocal::with_capacity(1)); tls.load_or(|| Box::new(1), Thread::current()); let tls2 = tls.clone(); thread::spawn(move || { tls2.load_or(|| Box::new(2), Thread::current()); let tls3 = tls2.clone(); thread::spawn(move || { tls3.load_or(|| Box::new(3), Thread::current()); }) .join() .unwrap(); drop(tls2); }) .join() .unwrap(); let tls = Arc::try_unwrap(tls).unwrap_or_else(|_| panic!(".")); let mut v = tls.iter().map(|x| **x).collect::>(); v.sort_unstable(); assert_eq!(vec![1, 2, 3], v); } } #[test] fn iter_snapshot() { // Safety: Loading with `Thread::current` is always sound. unsafe { let tls = Arc::new(ThreadLocal::with_capacity(1)); tls.load_or(|| Box::new(1), Thread::current()); let iterator = tls.iter(); tls.load_or(|| Box::new(2), Thread::current()); let v = iterator.map(|x| **x).collect::>(); assert_eq!(vec![1], v); } } #[test] fn test_drop() { let local = ThreadLocal::with_capacity(1); struct Dropped(Arc); impl Drop for Dropped { fn drop(&mut self) { self.0.fetch_add(1, Relaxed); } } let dropped = Arc::new(AtomicUsize::new(0)); // Safety: Loading with `Thread::current` is always sound. unsafe { local.load_or(|| Dropped(dropped.clone()), Thread::current()); } assert_eq!(dropped.load(Relaxed), 0); drop(local); assert_eq!(dropped.load(Relaxed), 1); } #[test] fn iter_many() { let tls = Arc::new(ThreadLocal::with_capacity(0)); let barrier = Arc::new(Barrier::new(65)); for i in 0..64 { let tls = tls.clone(); let barrier = barrier.clone(); thread::spawn(move || { dbg!(i); // Safety: Loading with `Thread::current` is always sound. unsafe { tls.load_or(|| 1, Thread::current()); } barrier.wait(); }); } barrier.wait(); unsafe { assert_eq!(tls.iter().count(), 64) } } } seize-0.5.0/src/raw/tls/thread_id.rs000064400000000000000000000156651046102023000154350ustar 00000000000000// Copyright 2017 Amanieu d'Antras // // Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::cell::Cell; use std::cmp::Reverse; use std::collections::BinaryHeap; use std::sync::{Mutex, OnceLock}; /// An allocator for thread IDs. /// /// The allocator attempts to aggressively reuse thread IDs where possible to /// avoid cases where a `ThreadLocal` grows indefinitely when it is used by many /// short-lived threads. #[derive(Default)] struct ThreadIdManager { free_from: usize, free_list: BinaryHeap>, } impl ThreadIdManager { /// Allocate a new thread ID. fn alloc(&mut self) -> usize { if let Some(id) = self.free_list.pop() { id.0 } else { let id = self.free_from; self.free_from = self .free_from .checked_add(1) .expect("Ran out of thread IDs"); id } } /// Free a thread ID for reuse. fn free(&mut self, id: usize) { self.free_list.push(Reverse(id)); } } /// Returns a reference to the global thread ID manager. fn thread_id_manager() -> &'static Mutex { static THREAD_ID_MANAGER: OnceLock> = OnceLock::new(); THREAD_ID_MANAGER.get_or_init(Default::default) } /// A unique identifier for a slot in a triangular vector, such as /// `ThreadLocal`. /// /// A thread ID may be reused after the corresponding thread exits. #[derive(Clone, Copy)] pub struct Thread { /// A unique identifier for the thread. pub id: usize, /// The index of the entry in the bucket. pub entry: usize, /// The index of the bucket. pub bucket: usize, } /// The number of entries that are skipped from the start of a vector. /// /// Index calculations assume that buckets are of sizes `[2^0, 2^1, ..., 2^63]`. /// To skip shorter buckets and avoid unnecessary allocations, the zeroeth entry /// index is remapped to a larger index (`2^0 + ... + 2^4 = 31`). const ZERO_ENTRY: usize = 31; /// The number of buckets that are skipped from the start of a vector. /// /// This is the index that the zeroeth bucket index is remapped to (currently /// `5`). const ZERO_BUCKET: usize = (usize::BITS - ZERO_ENTRY.leading_zeros()) as usize; /// The number of buckets in a vector. pub const BUCKETS: usize = (usize::BITS as usize) - ZERO_BUCKET; /// The maximum index of an element in the vector. /// /// Note that capacity of the vector is: /// `2^ZERO_BUCKET + ... + 2^63 = usize::MAX - ZERO_INDEX`. const MAX_INDEX: usize = usize::MAX - ZERO_ENTRY - 1; impl Thread { /// Returns a `ThreadId` identifier from a generic unique thread ID. /// /// The ID provided must not exceed `MAX_INDEX`. #[inline] pub fn new(id: usize) -> Thread { if id > MAX_INDEX { panic!("exceeded maximum thread count") } // Offset the ID based on the number of entries we skip at the start of the // buckets array. let index = id + ZERO_ENTRY; // Calculate the bucket index based on ⌊log2(index)⌋. let bucket = BUCKETS - ((index + 1).leading_zeros() as usize) - 1; // Offset the absolute index by the capacity of the preceding buckets. let entry = index - (Thread::bucket_capacity(bucket) - 1); Thread { id, bucket, entry } } /// Returns the capacity of the bucket at the given index. #[inline] pub fn bucket_capacity(bucket: usize) -> usize { 1 << (bucket + ZERO_BUCKET) } /// Get the current thread. #[inline] pub fn current() -> Thread { THREAD.with(|thread| { if let Some(thread) = thread.get() { thread } else { Thread::init_slow(thread) } }) } /// Slow path for allocating a thread ID. #[cold] #[inline(never)] fn init_slow(thread: &Cell>) -> Thread { let new = Thread::create(); thread.set(Some(new)); THREAD_GUARD.with(|guard| guard.id.set(new.id)); new } /// Create a new thread. pub fn create() -> Thread { Thread::new(thread_id_manager().lock().unwrap().alloc()) } /// Free the given thread. /// /// # Safety /// /// This function must only be called once on a given thread. pub unsafe fn free(id: usize) { thread_id_manager().lock().unwrap().free(id); } } // This is split into 2 thread-local variables so that we can check whether the // thread is initialized without having to register a thread-local destructor. // // This makes the fast path smaller. thread_local! { static THREAD: Cell> = const { Cell::new(None) }; } thread_local! { static THREAD_GUARD: ThreadGuard = const { ThreadGuard { id: Cell::new(0) } }; } // Guard to ensure the thread ID is released on thread exit. struct ThreadGuard { // We keep a copy of the thread ID in the `ThreadGuard`: we can't reliably access // `THREAD` in our `Drop` impl due to the unpredictable order of TLS destructors. id: Cell, } impl Drop for ThreadGuard { fn drop(&mut self) { // Release the thread ID. Any further accesses to the thread ID will go through // get_slow which will either panic or initialize a new ThreadGuard. let _ = THREAD.try_with(|thread| thread.set(None)); // Safety: We are in `drop` and the current thread uniquely owns this ID. unsafe { Thread::free(self.id.get()) }; } } #[cfg(test)] mod tests { use super::*; #[test] fn thread() { assert_eq!(Thread::bucket_capacity(0), 32); for i in 0..32 { let thread = Thread::new(i); assert_eq!(thread.id, i); assert_eq!(thread.bucket, 0); assert_eq!(thread.entry, i); } assert_eq!(Thread::bucket_capacity(1), 64); for i in 33..96 { let thread = Thread::new(i); assert_eq!(thread.id, i); assert_eq!(thread.bucket, 1); assert_eq!(thread.entry, i - 32); } assert_eq!(Thread::bucket_capacity(2), 128); for i in 96..224 { let thread = Thread::new(i); assert_eq!(thread.id, i); assert_eq!(thread.bucket, 2); assert_eq!(thread.entry, i - 96); } } #[test] fn max_entries() { let mut entries = 0; for i in 0..BUCKETS { entries += Thread::bucket_capacity(i); } assert_eq!(entries, MAX_INDEX + 1); let max = Thread::new(MAX_INDEX); assert_eq!(max.id, MAX_INDEX); assert_eq!(max.bucket, BUCKETS - 1); assert_eq!(Thread::bucket_capacity(BUCKETS - 1), 1 << (usize::BITS - 1)); assert_eq!(max.entry, (1 << (usize::BITS - 1)) - 1); } } seize-0.5.0/src/raw/utils.rs000064400000000000000000000020461046102023000140350ustar 00000000000000/// Pads and aligns a value to the length of a cache line. #[cfg_attr( any( target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64", ), repr(align(128)) )] #[cfg_attr( any( target_arch = "arm", target_arch = "mips", target_arch = "mips64", target_arch = "riscv64", ), repr(align(32)) )] #[cfg_attr(target_arch = "s390x", repr(align(256)))] #[cfg_attr( not(any( target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64", target_arch = "arm", target_arch = "mips", target_arch = "mips64", target_arch = "riscv64", target_arch = "s390x", )), repr(align(64)) )] #[derive(Default)] pub struct CachePadded { pub value: T, } impl std::ops::Deref for CachePadded { type Target = T; fn deref(&self) -> &T { &self.value } } impl std::ops::DerefMut for CachePadded { fn deref_mut(&mut self) -> &mut T { &mut self.value } } seize-0.5.0/src/reclaim.rs000064400000000000000000000014341046102023000135200ustar 00000000000000//! Common memory reclaimers. //! //! The functions in this module can be passed to //! [`retire`](crate::Collector::retire) to free allocated memory or run drop //! glue. See [the guide](crate#custom-reclaimers) for details about memory //! reclamation, and writing custom reclaimers. use std::ptr; use crate::Collector; /// Reclaims memory allocated with [`Box`]. /// /// # Safety /// /// The safety requirements of [`Box::from_raw`] apply. pub unsafe fn boxed(ptr: *mut T, _collector: &Collector) { unsafe { drop(Box::from_raw(ptr)) } } /// Reclaims memory by dropping the value in place. /// /// # Safety /// /// The safety requirements of [`ptr::drop_in_place`] apply. pub unsafe fn in_place(ptr: *mut T, _collector: &Collector) { unsafe { ptr::drop_in_place::(ptr) } } seize-0.5.0/tests/lib.rs000064400000000000000000000433571046102023000132370ustar 00000000000000use seize::{reclaim, Collector, Guard}; use std::mem::ManuallyDrop; use std::ptr; use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; use std::sync::{mpsc, Arc, Barrier}; use std::thread; #[test] fn is_sync() { fn assert_send_sync() {} assert_send_sync::(); assert_send_sync::(); } struct DropTrack(Arc); impl Drop for DropTrack { fn drop(&mut self) { self.0.fetch_add(1, Ordering::Relaxed); } } fn boxed(value: T) -> *mut T { Box::into_raw(Box::new(value)) } struct UnsafeSend(T); unsafe impl Send for UnsafeSend {} #[test] fn single_thread() { let collector = Arc::new(Collector::new().batch_size(2)); let dropped = Arc::new(AtomicUsize::new(0)); // multiple of 2 let items = cfg::ITEMS & !1; for _ in 0..items { let zero = AtomicPtr::new(boxed(DropTrack(dropped.clone()))); { let guard = collector.enter(); let _ = guard.protect(&zero, Ordering::Relaxed); } { let guard = collector.enter(); let value = guard.protect(&zero, Ordering::Acquire); unsafe { collector.retire(value, reclaim::boxed) } } } assert_eq!(dropped.load(Ordering::Relaxed), items); } #[test] fn two_threads() { let collector = Arc::new(Collector::new().batch_size(3)); let a_dropped = Arc::new(AtomicUsize::new(0)); let b_dropped = Arc::new(AtomicUsize::new(0)); let (tx, rx) = mpsc::channel(); let one = Arc::new(AtomicPtr::new(boxed(DropTrack(a_dropped.clone())))); let h = thread::spawn({ let one = one.clone(); let collector = collector.clone(); move || { let guard = collector.enter(); let _value = guard.protect(&one, Ordering::Acquire); tx.send(()).unwrap(); drop(guard); tx.send(()).unwrap(); } }); for _ in 0..2 { let zero = AtomicPtr::new(boxed(DropTrack(b_dropped.clone()))); let guard = collector.enter(); let value = guard.protect(&zero, Ordering::Acquire); unsafe { collector.retire(value, reclaim::boxed) } } rx.recv().unwrap(); // wait for thread to access value let guard = collector.enter(); let value = guard.protect(&one, Ordering::Acquire); unsafe { collector.retire(value, reclaim::boxed) } rx.recv().unwrap(); // wait for thread to drop guard h.join().unwrap(); drop(guard); assert_eq!( ( b_dropped.load(Ordering::Acquire), a_dropped.load(Ordering::Acquire) ), (2, 1) ); } #[test] fn refresh() { let collector = Arc::new(Collector::new().batch_size(3)); let items = (0..cfg::ITEMS) .map(|i| AtomicPtr::new(boxed(i))) .collect::>(); let handles = (0..cfg::THREADS) .map(|_| { thread::spawn({ let items = items.clone(); let collector = collector.clone(); move || { let mut guard = collector.enter(); for _ in 0..cfg::ITER { for item in items.iter() { let item = guard.protect(item, Ordering::Acquire); unsafe { assert!(*item < cfg::ITEMS) } } guard.refresh(); } } }) }) .collect::>(); for i in 0..cfg::ITER { for item in items.iter() { let old = item.swap(Box::into_raw(Box::new(i)), Ordering::AcqRel); unsafe { collector.retire(old, reclaim::boxed) } } } for handle in handles { handle.join().unwrap() } // cleanup for item in items.iter() { let old = item.swap(ptr::null_mut(), Ordering::Acquire); unsafe { collector.retire(old, reclaim::boxed) } } } #[test] fn recursive_retire() { struct Recursive { _value: usize, pointers: Vec<*mut usize>, } let collector = Collector::new().batch_size(1); let ptr = boxed(Recursive { _value: 0, pointers: (0..cfg::ITEMS).map(boxed).collect(), }); unsafe { collector.retire(ptr, |ptr: *mut Recursive, collector| { let value = Box::from_raw(ptr); for pointer in value.pointers { collector.retire(pointer, reclaim::boxed); let mut guard = collector.enter(); guard.flush(); guard.refresh(); drop(guard); } }); collector.enter().flush(); } } #[test] fn reclaim_all() { let collector = Collector::new().batch_size(2); for _ in 0..cfg::ITER { let dropped = Arc::new(AtomicUsize::new(0)); let items = (0..cfg::ITEMS) .map(|_| AtomicPtr::new(boxed(DropTrack(dropped.clone())))) .collect::>(); for item in items { unsafe { collector.retire(item.load(Ordering::Relaxed), reclaim::boxed) }; } unsafe { collector.reclaim_all() }; assert_eq!(dropped.load(Ordering::Relaxed), cfg::ITEMS); } } #[test] fn recursive_retire_reclaim_all() { struct Recursive { _value: usize, pointers: Vec<*mut DropTrack>, } unsafe { let collector = Collector::new().batch_size(cfg::ITEMS * 2); let dropped = Arc::new(AtomicUsize::new(0)); let ptr = boxed(Recursive { _value: 0, pointers: (0..cfg::ITEMS) .map(|_| boxed(DropTrack(dropped.clone()))) .collect(), }); collector.retire(ptr, |ptr: *mut Recursive, collector| { let value = Box::from_raw(ptr); for pointer in value.pointers { (*collector).retire(pointer, reclaim::boxed); } }); collector.reclaim_all(); assert_eq!(dropped.load(Ordering::Relaxed), cfg::ITEMS); } } #[test] fn defer_retire() { let collector = Collector::new().batch_size(5); let dropped = Arc::new(AtomicUsize::new(0)); let objects: Vec<_> = (0..30).map(|_| boxed(DropTrack(dropped.clone()))).collect(); let guard = collector.enter(); for object in objects { unsafe { guard.defer_retire(object, reclaim::boxed) } guard.flush(); } // guard is still active assert_eq!(dropped.load(Ordering::Relaxed), 0); drop(guard); // now the objects should have been dropped assert_eq!(dropped.load(Ordering::Relaxed), 30); } #[test] fn reentrant() { let collector = Arc::new(Collector::new().batch_size(5)); let dropped = Arc::new(AtomicUsize::new(0)); let objects: UnsafeSend> = UnsafeSend((0..5).map(|_| boxed(DropTrack(dropped.clone()))).collect()); assert_eq!(dropped.load(Ordering::Relaxed), 0); let guard1 = collector.enter(); let guard2 = collector.enter(); let guard3 = collector.enter(); thread::spawn({ let collector = collector.clone(); move || { let guard = collector.enter(); for object in { objects }.0 { unsafe { guard.defer_retire(object, reclaim::boxed) } } } }) .join() .unwrap(); assert_eq!(dropped.load(Ordering::Relaxed), 0); drop(guard1); assert_eq!(dropped.load(Ordering::Relaxed), 0); drop(guard2); assert_eq!(dropped.load(Ordering::Relaxed), 0); drop(guard3); assert_eq!(dropped.load(Ordering::Relaxed), 5); let dropped = Arc::new(AtomicUsize::new(0)); let objects: UnsafeSend> = UnsafeSend((0..5).map(|_| boxed(DropTrack(dropped.clone()))).collect()); assert_eq!(dropped.load(Ordering::Relaxed), 0); let mut guard1 = collector.enter(); let mut guard2 = collector.enter(); let mut guard3 = collector.enter(); thread::spawn({ let collector = collector.clone(); move || { let guard = collector.enter(); for object in { objects }.0 { unsafe { guard.defer_retire(object, reclaim::boxed) } } } }) .join() .unwrap(); assert_eq!(dropped.load(Ordering::Relaxed), 0); guard1.refresh(); assert_eq!(dropped.load(Ordering::Relaxed), 0); drop(guard1); guard2.refresh(); assert_eq!(dropped.load(Ordering::Relaxed), 0); drop(guard2); assert_eq!(dropped.load(Ordering::Relaxed), 0); guard3.refresh(); assert_eq!(dropped.load(Ordering::Relaxed), 5); } #[test] fn swap_stress() { for _ in 0..cfg::ITER { let collector = Collector::new(); let entries = [const { AtomicPtr::new(ptr::null_mut()) }; cfg::ITEMS]; thread::scope(|s| { for _ in 0..cfg::THREADS { s.spawn(|| { for i in 0..cfg::ITEMS { let guard = collector.enter(); let new = Box::into_raw(Box::new(i)); let old = guard.swap(&entries[i], new, Ordering::AcqRel); if !old.is_null() { unsafe { assert_eq!(*old, i) } unsafe { guard.defer_retire(old, reclaim::boxed) } } } }); } }); for i in 0..cfg::ITEMS { let val = entries[i].load(Ordering::Relaxed); let _ = unsafe { Box::from_raw(val) }; } } } #[test] fn cas_stress() { for _ in 0..cfg::ITER { let collector = Collector::new(); let entries = [const { AtomicPtr::new(ptr::null_mut()) }; cfg::ITEMS]; thread::scope(|s| { for _ in 0..cfg::THREADS { s.spawn(|| { for i in 0..cfg::ITEMS { let guard = collector.enter(); let new = Box::into_raw(Box::new(i)); loop { let old = entries[i].load(Ordering::Relaxed); let result = guard.compare_exchange( &entries[i], old, new, Ordering::AcqRel, Ordering::Relaxed, ); let Ok(old) = result else { continue; }; if !old.is_null() { unsafe { assert_eq!(*old, i) } unsafe { guard.defer_retire(old, reclaim::boxed) } } break; } } }); } }); for i in 0..cfg::ITEMS { let val = entries[i].load(Ordering::Relaxed); let _ = unsafe { Box::from_raw(val) }; } } } #[test] fn owned_guard() { let collector = Collector::new().batch_size(5); let dropped = Arc::new(AtomicUsize::new(0)); let objects = UnsafeSend( (0..5) .map(|_| AtomicPtr::new(boxed(DropTrack(dropped.clone())))) .collect::>(), ); assert_eq!(dropped.load(Ordering::Relaxed), 0); thread::scope(|s| { let guard1 = collector.enter_owned(); let guard2 = collector.enter(); for object in objects.0.iter() { unsafe { guard2.defer_retire(object.load(Ordering::Acquire), reclaim::boxed) } } drop(guard2); // guard1 is still active assert_eq!(dropped.load(Ordering::Relaxed), 0); s.spawn(move || { for object in objects.0.iter() { let _ = unsafe { &*guard1.protect(object, Ordering::Relaxed) }; } // guard1 is still active assert_eq!(dropped.load(Ordering::Relaxed), 0); drop(guard1); assert_eq!(dropped.load(Ordering::Relaxed), 5); }); }); } #[test] fn owned_guard_concurrent() { let collector = Collector::new().batch_size(1); let dropped = Arc::new(AtomicUsize::new(0)); let objects = UnsafeSend( (0..cfg::THREADS) .map(|_| AtomicPtr::new(boxed(DropTrack(dropped.clone())))) .collect::>(), ); let guard = collector.enter_owned(); let barrier = Barrier::new(cfg::THREADS); thread::scope(|s| { for i in 0..cfg::THREADS { let guard = &guard; let objects = &objects; let dropped = &dropped; let barrier = &barrier; s.spawn(move || { barrier.wait(); unsafe { guard.defer_retire(objects.0[i].load(Ordering::Acquire), reclaim::boxed) }; guard.flush(); for object in objects.0.iter() { let _ = unsafe { &*guard.protect(object, Ordering::Relaxed) }; } assert_eq!(dropped.load(Ordering::Relaxed), 0); }); } }); drop(guard); assert_eq!(dropped.load(Ordering::Relaxed), cfg::THREADS); } #[test] fn collector_equality() { let a = Collector::new(); let b = Collector::new(); assert_eq!(a, a); assert_eq!(b, b); assert_ne!(a, b); assert_eq!(*a.enter().collector(), a); assert_ne!(*a.enter().collector(), b); assert_eq!(*b.enter().collector(), b); assert_ne!(*b.enter().collector(), a); } #[test] fn stress() { // stress test with operation on a shared stack for _ in 0..cfg::ITER { let stack = Arc::new(Stack::new(1)); thread::scope(|s| { for i in 0..cfg::ITEMS { stack.push(i, &stack.collector.enter()); stack.pop(&stack.collector.enter()); } for _ in 0..cfg::THREADS { s.spawn(|| { for i in 0..cfg::ITEMS { stack.push(i, &stack.collector.enter()); stack.pop(&stack.collector.enter()); } }); } }); assert!(stack.pop(&stack.collector.enter()).is_none()); assert!(stack.is_empty()); } } #[test] fn shared_owned_stress() { // all threads sharing an owned guard for _ in 0..cfg::ITER { let stack = Arc::new(Stack::new(1)); let guard = &stack.collector.enter_owned(); thread::scope(|s| { for i in 0..cfg::ITEMS { stack.push(i, guard); stack.pop(guard); } for _ in 0..cfg::THREADS { s.spawn(|| { for i in 0..cfg::ITEMS { stack.push(i, guard); stack.pop(guard); } }); } }); assert!(stack.pop(guard).is_none()); assert!(stack.is_empty()); } } #[test] fn owned_stress() { // all threads creating an owned guard (this is very unrealistic and stresses // tls synchronization) for _ in 0..cfg::ITER { let stack = Arc::new(Stack::new(1)); thread::scope(|s| { for i in 0..cfg::ITEMS { let guard = &stack.collector.enter_owned(); stack.push(i, guard); stack.pop(guard); } for _ in 0..cfg::THREADS { s.spawn(|| { for i in 0..cfg::ITEMS { let guard = &stack.collector.enter_owned(); stack.push(i, guard); stack.pop(guard); } }); } }); assert!(stack.pop(&stack.collector.enter_owned()).is_none()); assert!(stack.is_empty()); } } #[derive(Debug)] pub struct Stack { head: AtomicPtr>, collector: Collector, } #[derive(Debug)] struct Node { data: ManuallyDrop, next: *mut Node, } impl Stack { pub fn new(batch_size: usize) -> Stack { Stack { head: AtomicPtr::new(ptr::null_mut()), collector: Collector::new().batch_size(batch_size), } } pub fn push(&self, value: T, guard: &impl Guard) { let new = boxed(Node { data: ManuallyDrop::new(value), next: ptr::null_mut(), }); loop { let head = guard.protect(&self.head, Ordering::Relaxed); unsafe { (*new).next = head } if self .head .compare_exchange(head, new, Ordering::Release, Ordering::Relaxed) .is_ok() { break; } } } pub fn pop(&self, guard: &impl Guard) -> Option { loop { let head = guard.protect(&self.head, Ordering::Acquire); if head.is_null() { return None; } let next = unsafe { (*head).next }; if self .head .compare_exchange(head, next, Ordering::Relaxed, Ordering::Relaxed) .is_ok() { unsafe { let data = ptr::read(&(*head).data); self.collector.retire(head, reclaim::boxed); return Some(ManuallyDrop::into_inner(data)); } } } } pub fn is_empty(&self) -> bool { self.head.load(Ordering::Relaxed).is_null() } } impl Drop for Stack { fn drop(&mut self) { let guard = self.collector.enter(); while self.pop(&guard).is_some() {} } } #[cfg(any(miri, seize_asan))] mod cfg { pub const THREADS: usize = 4; pub const ITEMS: usize = 100; pub const ITER: usize = 4; } #[cfg(not(any(miri, seize_asan)))] mod cfg { pub const THREADS: usize = 32; pub const ITEMS: usize = 10_000; pub const ITER: usize = 50; }