sdd-4.5.3/.cargo_vcs_info.json0000644000000001360000000000100116170ustar { "git": { "sha1": "f5828d462dc12bd60a555183f2e98b403c295981" }, "path_in_vcs": "" }sdd-4.5.3/.gitignore000064400000000000000000000006631046102023000124040ustar 00000000000000# Generated by Cargo # will have compiled files and executables /target/ # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html Cargo.lock # These are backup files generated by rustfmt **/*.rs.bk # IntelliJ .idea # macOS **/.DS_Store # VSCode .vscode # Emacs **/#*# **/*~ # Proptest /proptest-regressions/ sdd-4.5.3/.woodpecker/sdd.yml000064400000000000000000000035621046102023000141320ustar 00000000000000matrix: RUST: [stable, nightly] steps: test: when: event: [push, pull_request] image: rust environment: CARGO_TERM_COLOR: always commands: - rustup default $RUST - cargo test - cargo test -p examples - cargo test --release - cargo test --release -p examples 32-bit: when: event: [push, pull_request] matrix: RUST: nightly image: rust environment: CARGO_TERM_COLOR: always commands: - apt-get update - apt-get -y install gcc-multilib - rustup default $RUST - rustup target add i686-unknown-linux-gnu - cargo test --target i686-unknown-linux-gnu - cargo test --target i686-unknown-linux-gnu -p examples linter: when: event: [push, pull_request] matrix: RUST: stable image: rust environment: CARGO_TERM_COLOR: always commands: - rustup default $RUST - rustup component add rustfmt - rustup component add clippy - cargo fmt -- --check - cargo fmt -p examples --check - cargo clippy --all - cargo clippy -p examples --all - cargo doc --document-private-items miri: when: event: [push, pull_request] matrix: RUST: nightly image: rust environment: CARGO_TERM_COLOR: always commands: - rustup default $RUST - rustup component add miri - cargo miri test loom: when: event: [push, pull_request] matrix: RUST: stable image: rust environment: CARGO_TERM_COLOR: always commands: - rustup default $RUST - cargo test --features loom --release --lib benchmark: when: event: [push, pull_request] matrix: RUST: stable image: rust environment: CARGO_TERM_COLOR: always commands: - rustup default $RUST - cargo bench sdd-4.5.3/CHANGELOG.md000064400000000000000000000055251046102023000122270ustar 00000000000000# Changelog 4.5.3 * Minor optimization of scanning remote thread-local variables. 4.5.2 * Adjust epoch countdown parameters. 4.5.1 * Migrate to [`codeberg`](https://codeberg.org/wvwwvwwv/scalable-delayed-dealloc). * Remove `LinkedEntry::take_inner`: the method is highly dangerous to use. 4.5.0 * Add `Guard::set_has_garbage`. 4.4.0 * Add `Ptr::as_{ptr|ref}_unchecked`. * Add `{Owned|Shared}::as_non_null_ptr`. 4.3.5 * Prepare for an upcoming Rust breaking change: [`Rust#136702`](https://github.com/rust-lang/rust/issues/136702). 4.3.4 * Add `Ptr::as_ref_unchecked`. 4.3.3 * Minor code cleanup. 4.3.2 * Add `Bag::try_push`. 4.3.1 * Add lock-free concurrent data structures: `Bag`, `LinkedList`, `Queue`, and `Stack`. 4.2.5 * Add `Guard::has_garbage`. 4.2.4 * Minor optimization. 4.2.2 - 4.2.3 * `Guard::accelerate` now only accelerates garbage collection of the current thread without affecting other threads. 4.2.1 * `u8` can be converted into `Epoch`. 4.2.0 * `Epoch` uses a range of `[0, 63]` `u8` values instead of rotating four values. 4.1.2 * More const functions. 4.1.1 * Let `miri` not execute Intel-specific code paths. 4.1.0 * The size of `Option` is now that of `Guard`. 4.0.1 * Minor improvements to documentation. 4.0.0 * Bump MSRV to 1.85.0 / Edition 2024. 3.0.10 * Minor epoch update policy optimization. * Minor `NonNull` optimization on `Owned` and `Shared`. 3.0.9 * Fix unsound `Sync` implementations of `AtomicShared` and `Shared`; previously, the `Sync` implementation allowed an arbitrary thread to own/drop the contained instance. 3.0.8 * Minor `const` optimization. 3.0.7 * Fix a use-after-free issue when thread-local storage is dropped. 3.0.5 * Fix minor linting errors. 3.0.4 * Adjust tests to be more Miri friendly. 3.0.3 * Fix a rare memory ordering issue when dropping thread-local storage. 3.0.2 * Make `SDD` much more friendly to Miri. 3.0.1 * Compatible with the [`Miri`](https://github.com/rust-lang/miri) memory leak checker. * Make `Collectible` private since it is unsafe. * Remove `Guard::defer` which depends on `Collectible`. * Remove `prepare`. 2.1.0 * Minor performance optimization. * Remove `Owned::release`. 2.0.0 * `{Owned, Shared}::release` no longer receives a `Guard`. * `Link` is now public. 1.7.0 * Add `loom` support. 1.6.0 * Add `Guard::accelerate`. 1.5.0 * Fix `Guard::epoch` to return the correct epoch value. 1.4.0 * `Epoch` is now a 4-state type (3 -> 4). 1.3.0 * Add `Epoch` * Add `Guard::epoch`. 1.2.0 * Remove `Collectible::drop_and_dealloc`. 1.1.0 * Add `prepare`. 1.0.1 * Relax trait bounds of `Guard::defer_execute`. 1.0.0 * Minor code cleanup. 0.2.0 * Make `Guard` `UnwindSafe`. 0.1.0 * Minor optimization. 0.0.1 * Initial commit: code copied from [`scalable-concurrent-containers`](https://github.com/wvwwvwwv/scalable-concurrent-containers). sdd-4.5.3/Cargo.lock0000644000000470170000000000100076030ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 4 [[package]] name = "aho-corasick" version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" dependencies = [ "memchr", ] [[package]] name = "alloca" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5a7d05ea6aea7e9e64d25b9156ba2fee3fdd659e34e41063cd2fc7cd020d7f4" dependencies = [ "cc", ] [[package]] name = "anes" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstyle" version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" [[package]] name = "autocfg" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "bumpalo" version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" [[package]] name = "cast" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" version = "1.2.49" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215" dependencies = [ "find-msvc-tools", "shlex", ] [[package]] name = "cfg-if" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "ciborium" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" dependencies = [ "ciborium-io", "ciborium-ll", "serde", ] [[package]] name = "ciborium-io" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" [[package]] name = "ciborium-ll" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" dependencies = [ "ciborium-io", "half", ] [[package]] name = "clap" version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" dependencies = [ "anstyle", "clap_lex", ] [[package]] name = "clap_lex" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" [[package]] name = "criterion" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d883447757bb0ee46f233e9dc22eb84d93a9508c9b868687b274fc431d886bf" dependencies = [ "alloca", "anes", "cast", "ciborium", "clap", "criterion-plot", "itertools", "num-traits", "oorandom", "page_size", "plotters", "rayon", "regex", "serde", "serde_json", "tinytemplate", "walkdir", ] [[package]] name = "criterion-plot" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed943f81ea2faa8dcecbbfa50164acf95d555afec96a27871663b300e387b2e4" dependencies = [ "cast", "itertools", ] [[package]] name = "crossbeam-deque" version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-utils" version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" [[package]] name = "either" version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] name = "find-msvc-tools" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" [[package]] name = "generator" version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52f04ae4152da20c76fe800fa48659201d5cf627c5149ca0b707b69d7eef6cf9" dependencies = [ "cc", "cfg-if", "libc", "log", "rustversion", "windows-link", "windows-result", ] [[package]] name = "half" version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", "zerocopy", ] [[package]] name = "itertools" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" dependencies = [ "either", ] [[package]] name = "itoa" version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "js-sys" version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" dependencies = [ "once_cell", "wasm-bindgen", ] [[package]] name = "lazy_static" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" version = "0.2.178" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" [[package]] name = "log" version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] name = "loom" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" dependencies = [ "cfg-if", "generator", "scoped-tls", "tracing", "tracing-subscriber", ] [[package]] name = "matchers" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" dependencies = [ "regex-automata", ] [[package]] name = "memchr" version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "nu-ansi-term" version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ "windows-sys", ] [[package]] name = "num-traits" version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] [[package]] name = "once_cell" version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "oorandom" version = "11.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "page_size" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da" dependencies = [ "libc", "winapi", ] [[package]] name = "pin-project-lite" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "plotters" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" dependencies = [ "num-traits", "plotters-backend", "plotters-svg", "wasm-bindgen", "web-sys", ] [[package]] name = "plotters-backend" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" [[package]] name = "plotters-svg" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" dependencies = [ "plotters-backend", ] [[package]] name = "proc-macro2" version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" dependencies = [ "proc-macro2", ] [[package]] name = "rayon" version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" dependencies = [ "either", "rayon-core", ] [[package]] name = "rayon-core" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" dependencies = [ "crossbeam-deque", "crossbeam-utils", ] [[package]] name = "regex" version = "1.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" dependencies = [ "aho-corasick", "memchr", "regex-automata", "regex-syntax", ] [[package]] name = "regex-automata" version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] [[package]] name = "regex-syntax" version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "rustversion" version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "same-file" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ "winapi-util", ] [[package]] name = "scoped-tls" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" [[package]] name = "sdd" version = "4.5.3" dependencies = [ "criterion", "loom", "static_assertions", ] [[package]] name = "serde" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ "serde_core", "serde_derive", ] [[package]] name = "serde_core" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "serde_json" version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ "itoa", "memchr", "ryu", "serde", "serde_core", ] [[package]] name = "sharded-slab" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ "lazy_static", ] [[package]] name = "shlex" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "smallvec" version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] name = "static_assertions" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "syn" version = "2.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "thread_local" version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" dependencies = [ "cfg-if", ] [[package]] name = "tinytemplate" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" dependencies = [ "serde", "serde_json", ] [[package]] name = "tracing" version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "pin-project-lite", "tracing-core", ] [[package]] name = "tracing-core" version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" dependencies = [ "once_cell", "valuable", ] [[package]] name = "tracing-log" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" dependencies = [ "log", "once_cell", "tracing-core", ] [[package]] name = "tracing-subscriber" version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" dependencies = [ "matchers", "nu-ansi-term", "once_cell", "regex-automata", "sharded-slab", "smallvec", "thread_local", "tracing", "tracing-core", "tracing-log", ] [[package]] name = "unicode-ident" version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" [[package]] name = "valuable" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "walkdir" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", ] [[package]] name = "wasm-bindgen" version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" dependencies = [ "bumpalo", "proc-macro2", "quote", "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" dependencies = [ "unicode-ident", ] [[package]] name = "web-sys" version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ "windows-sys", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-link" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] name = "windows-result" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ "windows-link", ] [[package]] name = "windows-sys" version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ "windows-link", ] [[package]] name = "zerocopy" version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" dependencies = [ "proc-macro2", "quote", "syn", ] sdd-4.5.3/Cargo.toml0000644000000025750000000000100076260ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2024" rust-version = "1.85.0" name = "sdd" version = "4.5.3" authors = ["wvwwvwwv "] build = false autolib = false autobins = false autoexamples = false autotests = false autobenches = false description = "Scalable lock-free delayed memory reclaimer" documentation = "https://docs.rs/sdd" readme = "README.md" keywords = [ "concurrent", "epoch", "garbage", "lock-free", "memory", ] categories = [ "concurrency", "data-structures", "memory-management", ] license = "Apache-2.0" repository = "https://codeberg.org/wvwwvwwv/scalable-delayed-dealloc" [lib] name = "sdd" path = "src/lib.rs" [[bench]] name = "bag" path = "benches/bag.rs" harness = false [[bench]] name = "ebr" path = "benches/ebr.rs" harness = false [dependencies.loom] version = "0.7" optional = true [dev-dependencies.criterion] version = "0.8" [dev-dependencies.static_assertions] version = "1.1" sdd-4.5.3/Cargo.toml.orig000064400000000000000000000013161046102023000132770ustar 00000000000000[package] name = "sdd" description = "Scalable lock-free delayed memory reclaimer" documentation = "https://docs.rs/sdd" version = "4.5.3" authors = ["wvwwvwwv "] edition = "2024" rust-version = "1.85.0" readme = "README.md" repository = "https://codeberg.org/wvwwvwwv/scalable-delayed-dealloc" license = "Apache-2.0" categories = ["concurrency", "data-structures", "memory-management"] keywords = ["concurrent", "epoch", "garbage", "lock-free", "memory"] [workspace] members = [".", "examples"] [dependencies] loom = { version = "0.7", optional = true } [dev-dependencies] criterion = "0.8" static_assertions = "1.1" [[bench]] name = "ebr" harness = false [[bench]] name = "bag" harness = false sdd-4.5.3/LICENSE000064400000000000000000000250151046102023000114170ustar 00000000000000 Apache License Version 2.0, April 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2024-present Changgyoo Park Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. sdd-4.5.3/README.md000064400000000000000000000135661046102023000117010ustar 00000000000000# Scalable Delayed Dealloc [![Cargo](https://img.shields.io/crates/v/sdd)](https://crates.io/crates/sdd) ![Crates.io](https://img.shields.io/crates/l/sdd) A scalable lock-free delayed memory reclaimer that emulates garbage collection by keeping track of memory reachability. The delayed deallocation algorithm is based on a variant of epoch-based reclamation where retired memory chunks are temporarily kept in thread-local storage until they are no longer reachable. It is similar to [`crossbeam_epoch`](https://docs.rs/crossbeam-epoch/), however, users will find `sdd` more straightforward to use as `sdd` provides smart pointer types. For instance, `sdd::AtomicOwned`, `sdd::Owned`, `sdd::AtomicShared`, and `sdd::Shared` retire the contained value when the last reference is dropped. ## Features * Lock-free epoch-based reclamation. * [`Loom`](https://crates.io/crates/loom) support: `features = ["loom"]`. ## Examples This crate can be used _without an `unsafe` block_. ```rust use sdd::{suspend, AtomicOwned, AtomicShared, Guard, Owned, Ptr, Shared, Tag}; use std::sync::atomic::Ordering::Relaxed; // `atomic_shared` holds a strong reference to `17`. let atomic_shared: AtomicShared = AtomicShared::new(17); // `atomic_owned` owns `19`. let atomic_owned: AtomicOwned = AtomicOwned::new(19); // `guard` prevents the garbage collector from dropping reachable instances. let guard = Guard::new(); // `ptr` cannot outlive `guard`. let mut ptr: Ptr = atomic_shared.load(Relaxed, &guard); assert_eq!(*ptr.as_ref().unwrap(), 17); // `atomic_shared` can be tagged. atomic_shared.update_tag_if(Tag::First, |p| p.tag() == Tag::None, Relaxed, Relaxed); // `ptr` is not tagged, so CAS fails. assert!(atomic_shared.compare_exchange( ptr, (Some(Shared::new(18)), Tag::First), Relaxed, Relaxed, &guard).is_err()); // `ptr` can be tagged. ptr.set_tag(Tag::First); // The ownership of the contained instance is transferred to the return value of CAS. let prev: Shared = atomic_shared.compare_exchange( ptr, (Some(Shared::new(19)), Tag::Second), Relaxed, Relaxed, &guard).unwrap().0.unwrap(); assert_eq!(*prev, 17); // `17` will be garbage-collected later. drop(prev); // `sdd::AtomicShared` can be converted into `sdd::Shared`. let shared: Shared = atomic_shared.into_shared(Relaxed).unwrap(); assert_eq!(*shared, 19); // `18` and `19` will be garbage-collected later. drop(shared); drop(atomic_owned); // `17` is still valid as `guard` keeps the garbage collector from dropping it. assert_eq!(*ptr.as_ref().unwrap(), 17); // Execution of a closure can be deferred until all the current readers are gone. guard.defer_execute(|| println!("deferred")); drop(guard); // `sdd::Owned` and `sdd::Shared` can be nested. let shared_nested: Shared>> = Shared::new(Owned::new(Shared::new(20))); assert_eq!(***shared_nested, 20); // If the thread is expected to lie dormant for a while, call `suspend()` to allow // others to reclaim the memory. suspend(); ``` ## Memory Overhead Retired instances are stored in intrusive queues in thread-local storage, and therefore, additional space for `Option>` is allocated per instance. ## Performance The average time taken to enter and exit a protected region: less than a nanosecond on Apple M4 Pro. ## Applications [`sdd`](https://crates.io/crates/sdd) provides widely used lock-free concurrent data structures, including [`LinkedList`](#linkedlist), [`Bag`](#bag), [`Queue`](#queue), and [`Stack`](#stack). ### `LinkedList` [`LinkedList`](#linkedlist) is a trait that implements lock-free concurrent singly linked list operations. It additionally provides a method for marking a linked list entry to denote a user-defined state. ```rust use std::sync::atomic::Ordering::Relaxed; use sdd::{AtomicShared, Guard, LinkedList, Shared}; #[derive(Default)] struct L(AtomicShared, usize); impl LinkedList for L { fn link_ref(&self) -> &AtomicShared { &self.0 } } let guard = Guard::new(); let head: L = L::default(); let tail: Shared = Shared::new(L(AtomicShared::null(), 1)); // A new entry is pushed. assert!(head.push_back(tail.clone(), false, Relaxed, &guard).is_ok()); assert!(!head.is_marked(Relaxed)); // Users can mark a flag on an entry. head.mark(Relaxed); assert!(head.is_marked(Relaxed)); // `next_ptr` traverses the linked list. let next_ptr = head.next_ptr(Relaxed, &guard); assert_eq!(next_ptr.as_ref().unwrap().1, 1); // Once `tail` is deleted, it becomes unreachable. tail.delete_self(Relaxed); assert!(head.next_ptr(Relaxed, &guard).is_null()); ``` ### `Bag` [`Bag`](#bag) is a concurrent lock-free unordered container. [`Bag`](#bag) is completely opaque, disallowing access to contained instances until they are popped. [`Bag`](#bag) is especially efficient if the number of contained instances can be maintained under `ARRAY_LEN (default: usize::BITS / 2)` ```rust use sdd::Bag; let bag: Bag = Bag::default(); bag.push(1); assert!(!bag.is_empty()); assert_eq!(bag.pop(), Some(1)); assert!(bag.is_empty()); ``` ### `Queue` [`Queue`](#queue) is a concurrent lock-free first-in-first-out container. ```rust use sdd::Queue; let queue: Queue = Queue::default(); queue.push(1); assert!(queue.push_if(2, |e| e.map_or(false, |x| **x == 1)).is_ok()); assert!(queue.push_if(3, |e| e.map_or(false, |x| **x == 1)).is_err()); assert_eq!(queue.pop().map(|e| **e), Some(1)); assert_eq!(queue.pop().map(|e| **e), Some(2)); assert!(queue.pop().is_none()); ``` ### `Stack` [`Stack`](#stack) is a concurrent lock-free last-in-first-out container. ```rust use sdd::Stack; let stack: Stack = Stack::default(); stack.push(1); stack.push(2); assert_eq!(stack.pop().map(|e| **e), Some(2)); assert_eq!(stack.pop().map(|e| **e), Some(1)); assert!(stack.pop().is_none()); ``` ## [Changelog](https://codeberg.org/wvwwvwwv/scalable-delayed-dealloc/src/branch/main/CHANGELOG.md) sdd-4.5.3/benches/bag.rs000064400000000000000000000006661046102023000131250ustar 00000000000000use criterion::{Criterion, criterion_group, criterion_main}; use sdd::Bag; fn bag_push_pop(c: &mut Criterion) { let bag: Bag = Bag::default(); let mut i: usize = 0; c.bench_function("Bag: push-pop", |b| { b.iter(|| { bag.push(i); let p = bag.pop(); assert_eq!(p, Some(i)); i += 1; }) }); } criterion_group!(bag, bag_push_pop); criterion_main!(bag); sdd-4.5.3/benches/ebr.rs000064400000000000000000000013771046102023000131440ustar 00000000000000use criterion::{Criterion, criterion_group, criterion_main}; use sdd::Guard; fn guard_accelerate(c: &mut Criterion) { let _guard = Guard::new(); c.bench_function("EBR: accelerate", |b| { b.iter(|| { let guard = Guard::new(); guard.accelerate(); }) }); } fn guard_single(c: &mut Criterion) { c.bench_function("EBR: guard", |b| { b.iter(|| { let _guard = Guard::new(); }) }); } fn guard_superposed(c: &mut Criterion) { let _guard = Guard::new(); c.bench_function("EBR: superposed guard", |b| { b.iter(|| { let _guard = Guard::new(); }) }); } criterion_group!(ebr, guard_accelerate, guard_single, guard_superposed); criterion_main!(ebr); sdd-4.5.3/src/atomic_owned.rs000064400000000000000000000307331046102023000142220ustar 00000000000000use std::mem::forget; use std::panic::UnwindSafe; use std::ptr::{NonNull, null, null_mut}; #[cfg(not(feature = "loom"))] use std::sync::atomic::AtomicPtr; use std::sync::atomic::Ordering::{self, Relaxed}; #[cfg(feature = "loom")] use loom::sync::atomic::AtomicPtr; use crate::ref_counted::RefCounted; use crate::{Guard, Owned, Ptr, Tag}; /// [`AtomicOwned`] owns the underlying instance, and allows users to perform atomic operations /// on the pointer to it. #[derive(Debug)] pub struct AtomicOwned { instance_ptr: AtomicPtr>, } /// A pair of [`Owned`] and [`Ptr`] of the same type. pub type OwnedPtrPair<'g, T> = (Option>, Ptr<'g, T>); impl AtomicOwned { /// Creates a new [`AtomicOwned`] from an instance of `T`. /// /// The type of the instance must be determined at compile-time, must not contain non-static /// references, and must not be a non-static reference since the instance can theoretically /// live as long as the process. For instance, `struct Disallowed<'l, T>(&'l T)` is not /// allowed, because an instance of the type cannot outlive `'l` whereas the garbage collector /// does not guarantee that the instance is dropped within `'l`. /// /// # Examples /// /// ``` /// use sdd::AtomicOwned; /// /// let atomic_owned: AtomicOwned = AtomicOwned::new(10); /// ``` #[inline] pub fn new(t: T) -> Self { Self { instance_ptr: AtomicPtr::new(RefCounted::new_unique(t).as_ptr()), } } } impl AtomicOwned { /// Creates a new [`AtomicOwned`] from an [`Owned`] of `T`. /// /// # Examples /// /// ``` /// use sdd::{AtomicOwned, Owned}; /// /// let owned: Owned = Owned::new(10); /// let atomic_owned: AtomicOwned = AtomicOwned::from(owned); /// ``` #[cfg(not(feature = "loom"))] #[inline] #[must_use] pub const fn from(owned: Owned) -> Self { let ptr = owned.underlying_ptr(); forget(owned); let instance_ptr: std::sync::atomic::AtomicPtr> = AtomicPtr::new(ptr.cast_mut()); Self { instance_ptr } } /// Creates a new [`AtomicOwned`] from an [`Owned`] of `T`. #[cfg(feature = "loom")] #[inline] #[must_use] pub fn from(owned: Owned) -> Self { let ptr = owned.underlying_ptr(); forget(owned); let instance_ptr: loom::sync::atomic::AtomicPtr> = AtomicPtr::new(ptr.cast_mut()); Self { instance_ptr } } /// Creates a null [`AtomicOwned`]. /// /// # Examples /// /// ``` /// use sdd::AtomicOwned; /// /// let atomic_owned: AtomicOwned = AtomicOwned::null(); /// ``` #[cfg(not(feature = "loom"))] #[inline] #[must_use] pub const fn null() -> Self { let instance_ptr: std::sync::atomic::AtomicPtr> = AtomicPtr::new(null_mut()); Self { instance_ptr } } /// Creates a null [`AtomicOwned`]. #[cfg(feature = "loom")] #[inline] #[must_use] pub fn null() -> Self { let instance_ptr: loom::sync::atomic::AtomicPtr> = AtomicPtr::new(null_mut()); Self { instance_ptr } } /// Returns `true` if the [`AtomicOwned`] is null. /// /// # Examples /// /// ``` /// use sdd::{AtomicOwned, Tag}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_owned: AtomicOwned = AtomicOwned::null(); /// atomic_owned.update_tag_if(Tag::Both, |p| p.tag() == Tag::None, Relaxed, Relaxed); /// assert!(atomic_owned.is_null(Relaxed)); /// ``` #[inline] #[must_use] pub fn is_null(&self, order: Ordering) -> bool { Tag::unset_tag(self.instance_ptr.load(order)).is_null() } /// Loads a pointer value from the [`AtomicOwned`]. /// /// # Examples /// /// ``` /// use sdd::{AtomicOwned, Guard}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_owned: AtomicOwned = AtomicOwned::new(11); /// let guard = Guard::new(); /// let ptr = atomic_owned.load(Relaxed, &guard); /// assert_eq!(*ptr.as_ref().unwrap(), 11); /// ``` #[inline] #[must_use] pub fn load<'g>(&self, order: Ordering, _guard: &'g Guard) -> Ptr<'g, T> { Ptr::from(self.instance_ptr.load(order)) } /// Stores the given value into the [`AtomicOwned`] and returns the original value. /// /// # Examples /// /// ``` /// use sdd::{AtomicOwned, Guard, Owned, Tag}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_owned: AtomicOwned = AtomicOwned::new(14); /// let guard = Guard::new(); /// let (old, tag) = atomic_owned.swap((Some(Owned::new(15)), Tag::Second), Relaxed); /// assert_eq!(tag, Tag::None); /// assert_eq!(*old.unwrap(), 14); /// let (old, tag) = atomic_owned.swap((None, Tag::First), Relaxed); /// assert_eq!(tag, Tag::Second); /// assert_eq!(*old.unwrap(), 15); /// let (old, tag) = atomic_owned.swap((None, Tag::None), Relaxed); /// assert_eq!(tag, Tag::First); /// assert!(old.is_none()); /// ``` #[inline] pub fn swap(&self, new: (Option>, Tag), order: Ordering) -> (Option>, Tag) { let desired = Tag::update_tag( new.0.as_ref().map_or_else(null, Owned::underlying_ptr), new.1, ) .cast_mut(); let prev = self.instance_ptr.swap(desired, order); let tag = Tag::into_tag(prev); let prev_ptr = Tag::unset_tag(prev).cast_mut(); forget(new); (NonNull::new(prev_ptr).map(Owned::from), tag) } /// Returns its [`Tag`]. /// /// # Examples /// /// ``` /// use sdd::{AtomicOwned, Tag}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_owned: AtomicOwned = AtomicOwned::null(); /// assert_eq!(atomic_owned.tag(Relaxed), Tag::None); /// ``` #[inline] #[must_use] pub fn tag(&self, order: Ordering) -> Tag { Tag::into_tag(self.instance_ptr.load(order)) } /// Sets a new [`Tag`] if the given condition is met. /// /// Returns `true` if the new [`Tag`] has been successfully set. /// /// # Examples /// /// ``` /// use sdd::{AtomicOwned, Tag}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_owned: AtomicOwned = AtomicOwned::null(); /// assert!(atomic_owned.update_tag_if(Tag::Both, |p| p.tag() == Tag::None, Relaxed, Relaxed)); /// assert_eq!(atomic_owned.tag(Relaxed), Tag::Both); /// ``` #[inline] pub fn update_tag_if) -> bool>( &self, tag: Tag, mut condition: F, set_order: Ordering, fetch_order: Ordering, ) -> bool { self.instance_ptr .fetch_update(set_order, fetch_order, |ptr| { if condition(Ptr::from(ptr)) { Some(Tag::update_tag(ptr, tag).cast_mut()) } else { None } }) .is_ok() } /// Stores `new` into the [`AtomicOwned`] if the current value is the same as `current`. /// /// Returns the previously held value and the updated [`Ptr`]. /// /// # Errors /// /// Returns `Err` with the supplied [`Owned`] and the current [`Ptr`]. /// /// # Examples /// /// ``` /// use sdd::{AtomicOwned, Guard, Owned, Tag}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_owned: AtomicOwned = AtomicOwned::new(17); /// let guard = Guard::new(); /// /// let mut ptr = atomic_owned.load(Relaxed, &guard); /// assert_eq!(*ptr.as_ref().unwrap(), 17); /// /// atomic_owned.update_tag_if(Tag::Both, |_| true, Relaxed, Relaxed); /// assert!(atomic_owned.compare_exchange( /// ptr, (Some(Owned::new(18)), Tag::First), Relaxed, Relaxed, &guard).is_err()); /// /// ptr.set_tag(Tag::Both); /// let old: Owned = atomic_owned.compare_exchange( /// ptr, (Some(Owned::new(18)), Tag::First), Relaxed, Relaxed, &guard).unwrap().0.unwrap(); /// assert_eq!(*old, 17); /// drop(old); /// /// assert!(atomic_owned.compare_exchange( /// ptr, (Some(Owned::new(19)), Tag::None), Relaxed, Relaxed, &guard).is_err()); /// assert_eq!(*ptr.as_ref().unwrap(), 17); /// ``` #[inline] pub fn compare_exchange<'g>( &self, current: Ptr<'g, T>, new: (Option>, Tag), success: Ordering, failure: Ordering, _guard: &'g Guard, ) -> Result, OwnedPtrPair<'g, T>> { let desired = Tag::update_tag( new.0.as_ref().map_or_else(null, Owned::underlying_ptr), new.1, ) .cast_mut(); match self.instance_ptr.compare_exchange( current.as_underlying_ptr().cast_mut(), desired, success, failure, ) { Ok(prev) => { let prev_owned = NonNull::new(Tag::unset_tag(prev).cast_mut()).map(Owned::from); forget(new); Ok((prev_owned, Ptr::from(desired))) } Err(actual) => Err((new.0, Ptr::from(actual))), } } /// Stores `new` into the [`AtomicOwned`] if the current value is the same as `current`. /// /// This method is allowed to spuriously fail even when the comparison succeeds. /// /// Returns the previously held value and the updated [`Ptr`]. /// /// # Errors /// /// Returns `Err` with the supplied [`Owned`] and the current [`Ptr`]. /// /// # Examples /// /// ``` /// use sdd::{AtomicOwned, Owned, Guard, Tag}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_owned: AtomicOwned = AtomicOwned::new(17); /// let guard = Guard::new(); /// /// let mut ptr = atomic_owned.load(Relaxed, &guard); /// assert_eq!(*ptr.as_ref().unwrap(), 17); /// /// while let Err((_, actual)) = atomic_owned.compare_exchange_weak( /// ptr, /// (Some(Owned::new(18)), Tag::First), /// Relaxed, /// Relaxed, /// &guard) { /// ptr = actual; /// } /// /// let mut ptr = atomic_owned.load(Relaxed, &guard); /// assert_eq!(*ptr.as_ref().unwrap(), 18); /// ``` #[inline] pub fn compare_exchange_weak<'g>( &self, current: Ptr<'g, T>, new: (Option>, Tag), success: Ordering, failure: Ordering, _guard: &'g Guard, ) -> Result, OwnedPtrPair<'g, T>> { let desired = Tag::update_tag( new.0.as_ref().map_or_else(null, Owned::underlying_ptr), new.1, ) .cast_mut(); match self.instance_ptr.compare_exchange_weak( current.as_underlying_ptr().cast_mut(), desired, success, failure, ) { Ok(prev) => { let prev_owned = NonNull::new(Tag::unset_tag(prev).cast_mut()).map(Owned::from); forget(new); Ok((prev_owned, Ptr::from(desired))) } Err(actual) => Err((new.0, Ptr::from(actual))), } } /// Converts `self` into an [`Owned`]. /// /// Returns `None` if `self` did not own an instance. /// /// # Examples /// /// ``` /// use sdd::{AtomicOwned, Owned}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_owned: AtomicOwned = AtomicOwned::new(55); /// let owned: Owned = atomic_owned.into_owned(Relaxed).unwrap(); /// assert_eq!(*owned, 55); /// ``` #[inline] #[must_use] pub fn into_owned(self, order: Ordering) -> Option> { let ptr = self.instance_ptr.swap(null_mut(), order); if let Some(underlying_ptr) = NonNull::new(Tag::unset_tag(ptr).cast_mut()) { return Some(Owned::from(underlying_ptr)); } None } } impl Default for AtomicOwned { #[inline] fn default() -> Self { Self::null() } } impl Drop for AtomicOwned { #[inline] fn drop(&mut self) { if let Some(ptr) = NonNull::new(Tag::unset_tag(self.instance_ptr.load(Relaxed)).cast_mut()) { drop(Owned::from(ptr)); } } } unsafe impl Send for AtomicOwned {} unsafe impl Sync for AtomicOwned {} impl UnwindSafe for AtomicOwned {} sdd-4.5.3/src/atomic_shared.rs000064400000000000000000000351151046102023000143530ustar 00000000000000use std::mem::forget; use std::panic::UnwindSafe; use std::ptr::{NonNull, null, null_mut}; #[cfg(not(feature = "loom"))] use std::sync::atomic::AtomicPtr; use std::sync::atomic::Ordering::{self, Acquire, Relaxed}; #[cfg(feature = "loom")] use loom::sync::atomic::AtomicPtr; use crate::ref_counted::RefCounted; use crate::{Guard, Ptr, Shared, Tag}; /// [`AtomicShared`] owns the underlying instance, and allows users to perform atomic operations /// on the pointer to it. #[derive(Debug)] pub struct AtomicShared { instance_ptr: AtomicPtr>, } /// A pair of [`Shared`] and [`Ptr`] of the same type. pub type SharedPtrPair<'g, T> = (Option>, Ptr<'g, T>); impl AtomicShared { /// Creates a new [`AtomicShared`] from an instance of `T`. /// /// The type of the instance must be determined at compile-time, must not contain non-static /// references, and must not be a non-static reference since the instance can theoretically /// live as long as the process. For instance, `struct Disallowed<'l, T>(&'l T)` is not /// allowed, because an instance of the type cannot outlive `'l` whereas the garbage collector /// does not guarantee that the instance is dropped within `'l`. /// /// # Examples /// /// ``` /// use sdd::AtomicShared; /// /// let atomic_shared: AtomicShared = AtomicShared::new(10); /// ``` #[inline] pub fn new(t: T) -> Self { Self { instance_ptr: AtomicPtr::new(RefCounted::new_shared(t).as_ptr()), } } } impl AtomicShared { /// Creates a new [`AtomicShared`] from a [`Shared`] of `T`. /// /// # Examples /// /// ``` /// use sdd::{AtomicShared, Shared}; /// /// let shared: Shared = Shared::new(10); /// let atomic_shared: AtomicShared = AtomicShared::from(shared); /// ``` #[cfg(not(feature = "loom"))] #[inline] #[must_use] pub const fn from(shared: Shared) -> Self { let ptr = shared.underlying_ptr(); forget(shared); let instance_ptr: std::sync::atomic::AtomicPtr> = AtomicPtr::new(ptr.cast_mut()); Self { instance_ptr } } /// Creates a new [`AtomicShared`] from a [`Shared`] of `T`. #[cfg(feature = "loom")] #[inline] #[must_use] pub fn from(shared: Shared) -> Self { let ptr = shared.underlying_ptr(); forget(shared); let instance_ptr: loom::sync::atomic::AtomicPtr> = AtomicPtr::new(ptr.cast_mut()); Self { instance_ptr } } /// Creates a null [`AtomicShared`]. /// /// # Examples /// /// ``` /// use sdd::AtomicShared; /// /// let atomic_shared: AtomicShared = AtomicShared::null(); /// ``` #[cfg(not(feature = "loom"))] #[inline] #[must_use] pub const fn null() -> Self { let instance_ptr: std::sync::atomic::AtomicPtr> = AtomicPtr::new(null_mut()); Self { instance_ptr } } /// Creates a null [`AtomicShared`]. #[cfg(feature = "loom")] #[inline] #[must_use] pub fn null() -> Self { let instance_ptr: loom::sync::atomic::AtomicPtr> = AtomicPtr::new(null_mut()); Self { instance_ptr } } /// Returns `true` if the [`AtomicShared`] is null. /// /// # Examples /// /// ``` /// use sdd::{AtomicShared, Tag}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_shared: AtomicShared = AtomicShared::null(); /// atomic_shared.update_tag_if(Tag::Both, |p| p.tag() == Tag::None, Relaxed, Relaxed); /// assert!(atomic_shared.is_null(Relaxed)); /// ``` #[inline] #[must_use] pub fn is_null(&self, order: Ordering) -> bool { Tag::unset_tag(self.instance_ptr.load(order)).is_null() } /// Loads a pointer value from the [`AtomicShared`]. /// /// # Examples /// /// ``` /// use sdd::{AtomicShared, Guard}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_shared: AtomicShared = AtomicShared::new(11); /// let guard = Guard::new(); /// let ptr = atomic_shared.load(Relaxed, &guard); /// assert_eq!(*ptr.as_ref().unwrap(), 11); /// ``` #[inline] #[must_use] pub fn load<'g>(&self, order: Ordering, _guard: &'g Guard) -> Ptr<'g, T> { Ptr::from(self.instance_ptr.load(order)) } /// Stores the given value into the [`AtomicShared`] and returns the original value. /// /// # Examples /// /// ``` /// use sdd::{AtomicShared, Guard, Shared, Tag}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_shared: AtomicShared = AtomicShared::new(14); /// let guard = Guard::new(); /// let (old, tag) = atomic_shared.swap((Some(Shared::new(15)), Tag::Second), Relaxed); /// assert_eq!(tag, Tag::None); /// assert_eq!(*old.unwrap(), 14); /// let (old, tag) = atomic_shared.swap((None, Tag::First), Relaxed); /// assert_eq!(tag, Tag::Second); /// assert_eq!(*old.unwrap(), 15); /// let (old, tag) = atomic_shared.swap((None, Tag::None), Relaxed); /// assert_eq!(tag, Tag::First); /// assert!(old.is_none()); /// ``` #[inline] pub fn swap(&self, new: (Option>, Tag), order: Ordering) -> (Option>, Tag) { let desired = Tag::update_tag( new.0.as_ref().map_or_else(null, Shared::underlying_ptr), new.1, ) .cast_mut(); let prev = self.instance_ptr.swap(desired, order); let tag = Tag::into_tag(prev); let prev_ptr = Tag::unset_tag(prev).cast_mut(); forget(new); (NonNull::new(prev_ptr).map(Shared::from), tag) } /// Returns its [`Tag`]. /// /// # Examples /// /// ``` /// use sdd::{AtomicShared, Tag}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_shared: AtomicShared = AtomicShared::null(); /// assert_eq!(atomic_shared.tag(Relaxed), Tag::None); /// ``` #[inline] #[must_use] pub fn tag(&self, order: Ordering) -> Tag { Tag::into_tag(self.instance_ptr.load(order)) } /// Sets a new [`Tag`] if the given condition is met. /// /// Returns `true` if the new [`Tag`] has been successfully set. /// /// # Examples /// /// ``` /// use sdd::{AtomicShared, Tag}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_shared: AtomicShared = AtomicShared::null(); /// assert!(atomic_shared.update_tag_if(Tag::Both, |p| p.tag() == Tag::None, Relaxed, Relaxed)); /// assert_eq!(atomic_shared.tag(Relaxed), Tag::Both); /// ``` #[inline] pub fn update_tag_if) -> bool>( &self, tag: Tag, mut condition: F, set_order: Ordering, fetch_order: Ordering, ) -> bool { self.instance_ptr .fetch_update(set_order, fetch_order, |ptr| { if condition(Ptr::from(ptr)) { Some(Tag::update_tag(ptr, tag).cast_mut()) } else { None } }) .is_ok() } /// Stores `new` into the [`AtomicShared`] if the current value is the same as `current`. /// /// Returns the previously held value and the updated [`Ptr`]. /// /// # Errors /// /// Returns `Err` with the supplied [`Shared`] and the current [`Ptr`]. /// /// # Examples /// /// ``` /// use sdd::{AtomicShared, Guard, Shared, Tag}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_shared: AtomicShared = AtomicShared::new(17); /// let guard = Guard::new(); /// /// let mut ptr = atomic_shared.load(Relaxed, &guard); /// assert_eq!(*ptr.as_ref().unwrap(), 17); /// /// atomic_shared.update_tag_if(Tag::Both, |_| true, Relaxed, Relaxed); /// assert!(atomic_shared.compare_exchange( /// ptr, (Some(Shared::new(18)), Tag::First), Relaxed, Relaxed, &guard).is_err()); /// /// ptr.set_tag(Tag::Both); /// let old: Shared = atomic_shared.compare_exchange( /// ptr, /// (Some(Shared::new(18)), Tag::First), /// Relaxed, /// Relaxed, /// &guard).unwrap().0.unwrap(); /// assert_eq!(*old, 17); /// drop(old); /// /// assert!(atomic_shared.compare_exchange( /// ptr, (Some(Shared::new(19)), Tag::None), Relaxed, Relaxed, &guard).is_err()); /// assert_eq!(*ptr.as_ref().unwrap(), 17); /// ``` #[inline] pub fn compare_exchange<'g>( &self, current: Ptr<'g, T>, new: (Option>, Tag), success: Ordering, failure: Ordering, _guard: &'g Guard, ) -> Result, SharedPtrPair<'g, T>> { let desired = Tag::update_tag( new.0.as_ref().map_or_else(null, Shared::underlying_ptr), new.1, ) .cast_mut(); match self.instance_ptr.compare_exchange( current.as_underlying_ptr().cast_mut(), desired, success, failure, ) { Ok(prev) => { let prev_shared = NonNull::new(Tag::unset_tag(prev).cast_mut()).map(Shared::from); forget(new); Ok((prev_shared, Ptr::from(desired))) } Err(actual) => Err((new.0, Ptr::from(actual))), } } /// Stores `new` into the [`AtomicShared`] if the current value is the same as `current`. /// /// This method is allowed to spuriously fail even when the comparison succeeds. /// /// Returns the previously held value and the updated [`Ptr`]. /// /// # Errors /// /// Returns `Err` with the supplied [`Shared`] and the current [`Ptr`]. /// /// # Examples /// /// ``` /// use sdd::{AtomicShared, Guard, Shared, Tag}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_shared: AtomicShared = AtomicShared::new(17); /// let guard = Guard::new(); /// /// let mut ptr = atomic_shared.load(Relaxed, &guard); /// assert_eq!(*ptr.as_ref().unwrap(), 17); /// /// while let Err((_, actual)) = atomic_shared.compare_exchange_weak( /// ptr, /// (Some(Shared::new(18)), Tag::First), /// Relaxed, /// Relaxed, /// &guard) { /// ptr = actual; /// } /// /// let mut ptr = atomic_shared.load(Relaxed, &guard); /// assert_eq!(*ptr.as_ref().unwrap(), 18); /// ``` #[inline] pub fn compare_exchange_weak<'g>( &self, current: Ptr<'g, T>, new: (Option>, Tag), success: Ordering, failure: Ordering, _guard: &'g Guard, ) -> Result, SharedPtrPair<'g, T>> { let desired = Tag::update_tag( new.0.as_ref().map_or_else(null, Shared::underlying_ptr), new.1, ) .cast_mut(); match self.instance_ptr.compare_exchange_weak( current.as_underlying_ptr().cast_mut(), desired, success, failure, ) { Ok(prev) => { let prev_shared = NonNull::new(Tag::unset_tag(prev).cast_mut()).map(Shared::from); forget(new); Ok((prev_shared, Ptr::from(desired))) } Err(actual) => Err((new.0, Ptr::from(actual))), } } /// Clones `self` including tags. /// /// If `self` is not supposed to be an `AtomicShared::null`, this will never return an /// `AtomicShared::null`. /// /// # Examples /// /// ``` /// use sdd::{AtomicShared, Guard}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_shared: AtomicShared = AtomicShared::new(59); /// let guard = Guard::new(); /// let atomic_shared_clone = atomic_shared.clone(Relaxed, &guard); /// let ptr = atomic_shared_clone.load(Relaxed, &guard); /// assert_eq!(*ptr.as_ref().unwrap(), 59); /// ``` #[inline] #[must_use] pub fn clone(&self, order: Ordering, guard: &Guard) -> AtomicShared { self.get_shared(order, guard) .map_or_else(Self::null, |s| Self::from(s)) } /// Tries to create a [`Shared`] out of `self`. /// /// If `self` is not supposed to be an `AtomicShared::null`, this will never return `None`. /// /// # Examples /// /// ``` /// use sdd::{AtomicShared, Guard, Shared}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_shared: AtomicShared = AtomicShared::new(47); /// let guard = Guard::new(); /// let shared: Shared = atomic_shared.get_shared(Relaxed, &guard).unwrap(); /// assert_eq!(*shared, 47); /// ``` #[inline] #[must_use] pub fn get_shared(&self, order: Ordering, _guard: &Guard) -> Option> { let mut ptr = Tag::unset_tag(self.instance_ptr.load(order)); while !ptr.is_null() { if unsafe { (*ptr).try_add_ref(Acquire) } { return NonNull::new(ptr.cast_mut()).map(Shared::from); } ptr = Tag::unset_tag(self.instance_ptr.load(order)); } None } /// Converts `self` into a [`Shared`]. /// /// Returns `None` if `self` did not hold a strong reference. /// /// # Examples /// /// ``` /// use sdd::{AtomicShared, Shared}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_shared: AtomicShared = AtomicShared::new(55); /// let shared: Shared = atomic_shared.into_shared(Relaxed).unwrap(); /// assert_eq!(*shared, 55); /// ``` #[inline] #[must_use] pub fn into_shared(self, order: Ordering) -> Option> { let ptr = self.instance_ptr.swap(null_mut(), order); if let Some(underlying_ptr) = NonNull::new(Tag::unset_tag(ptr).cast_mut()) { return Some(Shared::from(underlying_ptr)); } None } } impl Clone for AtomicShared { #[inline] fn clone(&self) -> AtomicShared { self.clone(Acquire, &Guard::new()) } } impl Default for AtomicShared { #[inline] fn default() -> Self { Self::null() } } impl Drop for AtomicShared { #[inline] fn drop(&mut self) { if let Some(ptr) = NonNull::new(Tag::unset_tag(self.instance_ptr.load(Relaxed)).cast_mut()) { drop(Shared::from(ptr)); } } } unsafe impl Send for AtomicShared {} unsafe impl Sync for AtomicShared {} impl UnwindSafe for AtomicShared {} sdd-4.5.3/src/bag.rs000064400000000000000000000533361046102023000123070ustar 00000000000000//! [`Bag`] is a lock-free concurrent unordered instance container. use std::cell::UnsafeCell; use std::iter::FusedIterator; use std::mem::{MaybeUninit, needs_drop}; use std::panic::UnwindSafe; use std::ptr::{self, drop_in_place}; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; use super::{Guard, LinkedEntry, LinkedList, Stack}; /// [`Bag`] is a lock-free concurrent unordered instance container. /// /// [`Bag`] is a linearizable concurrent instance container where `ARRAY_LEN` instances are stored /// in a fixed-size array, and the rest are managed by its backup container; this makes a [`Bag`] /// especially efficient if the expected number of instances does not exceed `ARRAY_LEN`. /// /// The maximum value of `ARRAY_LEN` is limited to `usize::BITS / 2` which is the default value. #[derive(Debug)] pub struct Bag { /// Primary storage. primary_storage: Storage, /// Fallback storage. stack: Stack>, } /// A mutable iterator over the entries of a [`Bag`]. #[derive(Debug)] pub struct IterMut<'b, T, const ARRAY_LEN: usize = DEFAULT_ARRAY_LEN> { bag: &'b mut Bag, current_index: u32, current_stack_entry: Option<&'b mut LinkedEntry>>, } /// An iterator that moves out of a [`Bag`]. #[derive(Debug)] pub struct IntoIter { bag: Bag, } /// The default length of the fixed-size array in a [`Bag`]. const DEFAULT_ARRAY_LEN: usize = usize::BITS as usize / 2; #[derive(Debug)] struct Storage { /// Storage. storage: UnsafeCell<[MaybeUninit; ARRAY_LEN]>, /// Storage metadata. /// /// The layout of the metadata is, /// - Upper `usize::BITS / 2` bits: initialization bitmap. /// - Lower `usize::BITS / 2` bits: owned state bitmap. /// /// The metadata represents four possible states of a storage slot. /// - `!instantiated && !owned`: initial state. /// - `!instantiated && owned`: owned for instantiating. /// - `instantiated && !owned`: valid and reachable. /// - `instantiated && owned`: owned for moving out the instance. metadata: AtomicUsize, } impl Bag { /// `ARRAY_LEN` cannot be larger than `usize::BITS / 2`. const CHECK_ARRAY_LEN: () = assert!(ARRAY_LEN <= (usize::BITS as usize) / 2); /// Creates an empty [`Bag`]. /// /// # Examples /// /// ``` /// use sdd::Bag; /// /// let bag: Bag = Bag::new(); /// ``` #[cfg(not(feature = "loom"))] #[inline] #[must_use] pub const fn new() -> Self { let () = Self::CHECK_ARRAY_LEN; Self { primary_storage: Storage::new(), stack: Stack::new(), } } /// Creates an empty [`Bag`]. #[cfg(feature = "loom")] #[inline] #[must_use] pub fn new() -> Self { let () = Self::CHECK_ARRAY_LEN; Self { primary_storage: Storage::new(), stack: Stack::new(), } } /// Pushes an instance of `T`. /// /// # Examples /// /// ``` /// use sdd::Bag; /// /// let bag: Bag = Bag::default(); /// /// bag.push(11); /// ``` #[inline] pub fn push(&self, val: T) { if let Some(val) = self.primary_storage.push(val, true) { self.stack.peek_with(|e| { if let Some(storage) = e { if let Some(val) = storage.push(val, false) { unsafe { self.stack.push_unchecked(Storage::with_val(val)); } } } else { unsafe { self.stack.push_unchecked(Storage::with_val(val)); } } }); } } /// Tries to push an instance of `T` only if the primary storage is not full. /// /// # Errors /// /// Returns an error containing the supplied value if the primary storage is full. /// /// # Examples /// /// ``` /// use sdd::Bag; /// /// let bag: Bag = Bag::new(); /// /// assert!(bag.try_push(11).is_ok()); /// assert_eq!(bag.try_push(17), Err(17)); /// ``` #[inline] pub fn try_push(&self, val: T) -> Result<(), T> { if let Some(returned) = self.primary_storage.push(val, true) { Err(returned) } else { Ok(()) } } /// Pops an instance in the [`Bag`] if not empty. /// /// # Examples /// /// ``` /// use sdd::Bag; /// /// let bag: Bag = Bag::default(); /// /// bag.push(37); /// /// assert_eq!(bag.pop(), Some(37)); /// assert!(bag.pop().is_none()); /// ``` #[inline] pub fn pop(&self) -> Option { let result = self.stack.peek_with(|e| { e.and_then(|storage| { let (val, empty) = storage.pop(); if empty { // Once marked deleted, new entries will be inserted in a new `Storage` // that may not be reachable from this one. storage.delete_self(Relaxed); } val }) }); if let Some(val) = result { return Some(val); } self.primary_storage.pop().0 } /// Pops all the entries at once and folds them into an accumulator. /// /// # Examples /// /// ``` /// use sdd::Bag; /// /// let bag: Bag = Bag::default(); /// /// bag.push(7); /// bag.push(17); /// bag.push(37); /// /// assert_eq!(bag.pop_all(0, |a, v| a + v), 61); /// /// bag.push(47); /// assert_eq!(bag.pop(), Some(47)); /// assert!(bag.pop().is_none()); /// assert!(bag.is_empty()); /// ``` #[inline] pub fn pop_all B>(&self, init: B, mut fold: F) -> B { let mut acc = init; let popped = self.stack.pop_all(); while let Some(storage) = popped.pop() { acc = storage.pop_all(acc, &mut fold); } self.primary_storage.pop_all(acc, &mut fold) } /// Returns the number of entries in the [`Bag`]. /// /// This method iterates over all the entry arrays in the [`Bag`] to count the number of /// entries; therefore, its time complexity is `O(N)`. /// /// # Examples /// /// ``` /// use sdd::Bag; /// /// let bag: Bag = Bag::default(); /// assert_eq!(bag.len(), 0); /// /// bag.push(7); /// assert_eq!(bag.len(), 1); /// /// for v in 0..64 { /// bag.push(v); /// } /// bag.pop(); /// assert_eq!(bag.len(), 64); /// ``` #[inline] pub fn len(&self) -> usize { self.stack .iter(&Guard::new()) .fold(self.primary_storage.len(), |acc, storage| { acc + storage.len() }) } /// Returns `true` if the [`Bag`] is empty. /// /// # Examples /// /// ``` /// use sdd::Bag; /// /// let bag: Bag = Bag::default(); /// assert!(bag.is_empty()); /// /// bag.push(7); /// assert!(!bag.is_empty()); /// /// assert_eq!(bag.pop(), Some(7)); /// assert!(bag.is_empty()); /// ``` #[inline] pub fn is_empty(&self) -> bool { if self.primary_storage.len() == 0 { self.stack.is_empty() } else { false } } /// Returns an iterator over the contained instances for modification. /// /// # Examples /// /// ``` /// use sdd::Bag; /// /// let mut bag: Bag = Bag::default(); /// /// bag.push(3); /// bag.push(3); /// /// assert_eq!(bag.iter_mut().count(), 2); /// bag.iter_mut().for_each(|e| { *e += 1; }); /// /// assert_eq!(bag.pop(), Some(4)); /// assert_eq!(bag.pop(), Some(4)); /// assert!(bag.pop().is_none()); /// ``` #[inline] pub const fn iter_mut(&mut self) -> IterMut<'_, T, ARRAY_LEN> { IterMut { bag: self, current_index: 0, current_stack_entry: None, } } } impl Default for Bag { #[inline] fn default() -> Self { Self::new() } } impl Drop for Bag { #[inline] fn drop(&mut self) { if needs_drop::() { // It needs to drop all the stored instances in-place. while let Some(v) = self.pop() { drop(v); } } } } impl FromIterator for Bag { #[inline] fn from_iter>(iter: I) -> Self { let into_iter = iter.into_iter(); let bag = Self::new(); into_iter.for_each(|v| { bag.push(v); }); bag } } impl IntoIterator for Bag { type Item = T; type IntoIter = IntoIter; #[inline] fn into_iter(self) -> Self::IntoIter { IntoIter { bag: self } } } impl<'b, T, const ARRAY_LEN: usize> IntoIterator for &'b mut Bag { type IntoIter = IterMut<'b, T, ARRAY_LEN>; type Item = &'b mut T; #[inline] fn into_iter(self) -> Self::IntoIter { self.iter_mut() } } impl FusedIterator for IterMut<'_, T, ARRAY_LEN> {} impl<'b, T, const ARRAY_LEN: usize> Iterator for IterMut<'b, T, ARRAY_LEN> { type Item = &'b mut T; #[inline] fn next(&mut self) -> Option { while self.current_index != u32::MAX { let current_storage = if let Some(linked) = self.current_stack_entry.as_mut() { &mut **linked } else { &mut self.bag.primary_storage }; let instance_bitmap = Storage::::instance_bitmap(current_storage.metadata.load(Acquire)); let first_occupied = (instance_bitmap.wrapping_shr(self.current_index)).trailing_zeros(); let next_occupied = self.current_index + first_occupied; self.current_index = next_occupied + 1; if (next_occupied as usize) < ARRAY_LEN { return Some(unsafe { &mut *(*current_storage.storage.get())[next_occupied as usize].as_mut_ptr() }); } self.current_index = u32::MAX; if let Some(linked) = self.current_stack_entry.as_mut() { let guard = Guard::new(); if let Some(next) = linked.next_ptr(Acquire, &guard).as_ref() { let entry_mut = ptr::from_ref(next).cast_mut(); self.current_stack_entry = unsafe { entry_mut.as_mut() }; self.current_index = 0; } } else { self.bag.stack.peek_with(|e| { if let Some(e) = e { let entry_mut = ptr::from_ref(e).cast_mut(); self.current_stack_entry = unsafe { entry_mut.as_mut() }; self.current_index = 0; } }); } } None } } impl UnwindSafe for IterMut<'_, T, ARRAY_LEN> where T: UnwindSafe {} impl FusedIterator for IntoIter {} impl Iterator for IntoIter { type Item = T; #[inline] fn next(&mut self) -> Option { self.bag.pop() } } impl UnwindSafe for IntoIter where T: UnwindSafe {} impl Storage { /// Creates a new [`Storage`]. const fn new() -> Self { #[allow(clippy::uninit_assumed_init)] Storage { storage: unsafe { MaybeUninit::uninit().assume_init() }, metadata: AtomicUsize::new(0), } } /// Creates a new [`Storage`] with one inserted. fn with_val(val: T) -> Self { #[allow(clippy::uninit_assumed_init)] let storage = Self { storage: UnsafeCell::new(unsafe { MaybeUninit::uninit().assume_init() }), metadata: AtomicUsize::new(1_usize << ARRAY_LEN), }; unsafe { (*storage.storage.get())[0].as_mut_ptr().write(val); } storage } /// Returns the number of entries. fn len(&self) -> usize { let metadata = self.metadata.load(Relaxed); let instance_bitmap = Self::instance_bitmap(metadata); let owned_bitmap = Self::owned_bitmap(metadata); let valid_entries_bitmap = instance_bitmap & (!owned_bitmap); valid_entries_bitmap.count_ones() as usize } /// Pushes a new value. fn push(&self, val: T, allow_empty: bool) -> Option { let mut metadata = self.metadata.load(Relaxed); 'after_read_metadata: loop { // Look for a free slot. let mut instance_bitmap = Self::instance_bitmap(metadata); let owned_bitmap = Self::owned_bitmap(metadata); // Regard entries being removed as removed ones. if !allow_empty && (instance_bitmap & !owned_bitmap) == 0 { return Some(val); } let mut index = instance_bitmap.trailing_ones() as usize; while index < ARRAY_LEN { if (owned_bitmap & (1_u32 << index)) == 0 { // Mark the slot `owned`. let new = metadata | (1_usize << index); match self .metadata .compare_exchange_weak(metadata, new, Acquire, Relaxed) { Ok(_) => { // Now the free slot is owned by the thread. unsafe { (*self.storage.get())[index].as_mut_ptr().write(val); } let result = self.metadata.fetch_update(Release, Relaxed, |m| { debug_assert_ne!(m & (1_usize << index), 0); debug_assert_eq!(m & (1_usize << (index + ARRAY_LEN)), 0); if !allow_empty && (Self::instance_bitmap(m) & !Self::owned_bitmap(m)) == 0 { // Disallow pushing a value into an empty, or a soon-to-be-emptied array. None } else { let new = (m & (!(1_usize << index))) | (1_usize << (index + ARRAY_LEN)); Some(new) } }); if result.is_ok() { return None; } // The array was empty, thus rolling back the change. let val = unsafe { (*self.storage.get())[index].as_ptr().read() }; self.metadata.fetch_and(!(1_usize << index), Release); return Some(val); } Err(prev) => { // Metadata has changed. metadata = prev; continue 'after_read_metadata; } } } // Look for another free slot. instance_bitmap |= 1_u32 << index; index = instance_bitmap.trailing_ones() as usize; } // No free slots or all the entries are owned. return Some(val); } } /// Pops a value. fn pop(&self) -> (Option, bool) { let mut metadata = self.metadata.load(Relaxed); 'after_read_metadata: loop { // Look for an instantiated, yet to be owned entry. let mut instance_bitmap_inverted = !Self::instance_bitmap(metadata); let owned_bitmap = Self::owned_bitmap(metadata); let mut index = instance_bitmap_inverted.trailing_ones() as usize; while index < ARRAY_LEN { if (owned_bitmap & (1_u32 << index)) == 0 { // Mark the slot `owned`. let new = metadata | (1_usize << index); match self .metadata .compare_exchange_weak(metadata, new, Acquire, Relaxed) { Ok(_) => { // Now the desired slot is owned by the thread. let inst = unsafe { (*self.storage.get())[index].as_ptr().read() }; let mut empty = false; let result = self.metadata.fetch_update(Release, Relaxed, |m| { debug_assert_ne!(m & (1_usize << index), 0); debug_assert_ne!(m & (1_usize << (index + ARRAY_LEN)), 0); let new = m & (!((1_usize << index) | (1_usize << (index + ARRAY_LEN)))); empty = Self::instance_bitmap(new) == 0; Some(new) }); debug_assert!(result.is_ok()); return (Some(inst), empty); } Err(prev) => { // Metadata has changed. metadata = prev; continue 'after_read_metadata; } } } // Look for another valid slot. instance_bitmap_inverted |= 1_u32 << index; index = instance_bitmap_inverted.trailing_ones() as usize; } return (None, false); } } /// Pops all the values, and folds them. #[allow(clippy::cast_possible_truncation)] fn pop_all B>(&self, init: B, fold: &mut F) -> B { struct ExitGuard(Option); impl Drop for ExitGuard { #[inline] fn drop(&mut self) { let Some(f) = self.0.take() else { return; }; f(); } } let mut acc = init; let mut metadata = self.metadata.load(Relaxed); loop { // Look for instantiated, and reachable entries. let instance_bitmap = Self::instance_bitmap(metadata) as usize; let owned_bitmap = Self::owned_bitmap(metadata) as usize; let instances_to_pop = instance_bitmap & (!owned_bitmap); debug_assert_eq!(instances_to_pop & owned_bitmap, 0); if instances_to_pop == 0 { return acc; } let marked_for_removal = metadata | instances_to_pop; match self.metadata.compare_exchange_weak( metadata, marked_for_removal, Acquire, Relaxed, ) { Ok(_) => { metadata = marked_for_removal; let _guard = ExitGuard(Some(|| { loop { let new_metadata = metadata & (!((instances_to_pop << ARRAY_LEN) | instances_to_pop)); if let Err(actual) = self.metadata.compare_exchange_weak( metadata, new_metadata, Release, Relaxed, ) { metadata = actual; continue; } break; } })); // Now all the valid slots are locked for removal. let mut index = instances_to_pop.trailing_zeros() as usize; while index < ARRAY_LEN { acc = fold(acc, unsafe { (*self.storage.get())[index].as_ptr().read() }); index = (instances_to_pop & (!((1_usize << (index + 1) as u32) - 1))) .trailing_zeros() as usize; } return acc; } Err(actual) => metadata = actual, } } } #[allow(clippy::cast_possible_truncation)] const fn instance_bitmap(metadata: usize) -> u32 { metadata.wrapping_shr(ARRAY_LEN as u32) as u32 } #[allow(clippy::cast_possible_truncation)] const fn owned_bitmap(metadata: usize) -> u32 { (metadata % (1_usize << ARRAY_LEN)) as u32 } } impl Drop for Storage { #[inline] fn drop(&mut self) { if needs_drop::() { let mut instance_bitmap = Self::instance_bitmap(self.metadata.load(Acquire)); loop { let index = instance_bitmap.trailing_zeros(); if index == 32 { break; } instance_bitmap &= !(1_u32 << index); unsafe { drop_in_place((*self.storage.get())[index as usize].as_mut_ptr()) }; } } } } unsafe impl Send for Storage {} unsafe impl Sync for Storage {} sdd-4.5.3/src/collectible.rs000064400000000000000000000052421046102023000140300ustar 00000000000000use std::ptr::{self, NonNull}; use std::sync::atomic::Ordering::Relaxed; use std::sync::atomic::{AtomicPtr, AtomicUsize}; /// [`Collectible`] defines the memory layout for the type in order to be passed to the garbage /// collector. pub(super) trait Collectible { /// Returns the next [`Collectible`] pointer. fn next_ptr(&self) -> Option>; /// Sets the next [`Collectible`] pointer. fn set_next_ptr(&self, next_ptr: Option>); } /// [`Link`] implements [`Collectible`]. #[derive(Debug, Default)] pub struct Link { data: (AtomicUsize, AtomicPtr), } /// [`DeferredClosure`] implements [`Collectible`] for a closure to execute it after all the /// current readers in the process are gone. pub(super) struct DeferredClosure { f: Option, link: Link, } impl Link { #[inline] pub(super) const fn new_shared() -> Self { Link { data: (AtomicUsize::new(1), AtomicPtr::new(ptr::null_mut())), } } #[inline] pub(super) const fn new_unique() -> Self { Link { data: (AtomicUsize::new(0), AtomicPtr::new(ptr::null_mut())), } } #[inline] pub(super) const fn ref_cnt(&self) -> &AtomicUsize { &self.data.0 } } impl Collectible for Link { #[inline] fn next_ptr(&self) -> Option> { let fat_ptr: (*mut usize, *mut usize) = ( self.data.0.load(Relaxed) as *mut usize, self.data.1.load(Relaxed), ); unsafe { std::mem::transmute(fat_ptr) } } #[inline] fn set_next_ptr(&self, next_ptr: Option>) { let data: (*mut usize, *mut usize) = next_ptr.map_or_else( || (ptr::null_mut(), ptr::null_mut()), |p| unsafe { std::mem::transmute(p) }, ); self.data.0.store(data.0 as usize, Relaxed); self.data.1.store(data.1, Relaxed); } } impl DeferredClosure { /// Creates a new [`DeferredClosure`]. #[inline] pub fn new(f: F) -> Self { DeferredClosure { f: Some(f), link: Link::default(), } } } impl Collectible for DeferredClosure { #[inline] fn next_ptr(&self) -> Option> { self.link.next_ptr() } #[inline] fn set_next_ptr(&self, next_ptr: Option>) { self.link.set_next_ptr(next_ptr); } } impl Drop for DeferredClosure { #[inline] fn drop(&mut self) { if let Some(f) = self.f.take() { f(); } } } sdd-4.5.3/src/collector.rs000064400000000000000000000575511046102023000135470ustar 00000000000000use std::cell::UnsafeCell; use std::ptr::{self, NonNull, addr_of_mut}; use std::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; #[cfg(not(feature = "loom"))] use std::sync::atomic::fence; use std::sync::atomic::{AtomicPtr, AtomicU8}; #[cfg(feature = "loom")] use loom::sync::atomic::fence; use super::collectible::{Collectible, Link}; use super::exit_guard::ExitGuard; use super::{Epoch, Tag}; /// [`Collector`] is a garbage collector that reclaims thread-locally unreachable instances /// when they are globally unreachable. #[derive(Debug, Default)] #[repr(align(128))] pub(super) struct Collector { state: AtomicU8, announcement: Epoch, next_epoch_update: u8, has_garbage: bool, num_readers: u32, previous_instance_link: Option>, current_instance_link: Option>, next_instance_link: Option>, next_link: AtomicPtr, link: Link, } /// Data stored in a [`CollectorRoot`] is shared among [`Collector`] instances. #[derive(Debug, Default)] pub(super) struct CollectorRoot { epoch: AtomicU8, chain_head: AtomicPtr, } /// [`CollectorAnchor`] helps allocate and cleanup the thread-local [`Collector`]. struct CollectorAnchor; impl Collector { /// The number of quiescent states before an epoch update is triggered. const CADENCE: u8 = 1_u8 << 7; /// Represents a quiescent state. const INACTIVE: u8 = Epoch::NUM_EPOCHS; /// Represents a terminated thread state. const INVALID: u8 = Epoch::NUM_EPOCHS << 1; #[inline] /// Accelerates garbage collection. pub(super) const fn accelerate(collector_ptr: NonNull) { unsafe { (*collector_ptr.as_ptr()).next_epoch_update = 0; } } /// Returns the [`Collector`] attached to the current thread. #[inline] pub(super) fn current() -> NonNull { LOCAL_COLLECTOR.with(|local_collector| { let local_collector_ptr = local_collector.get(); unsafe { NonNull::new(*local_collector_ptr).unwrap_or_else(|| { let collector_ptr = COLLECTOR_ANCHOR.with(CollectorAnchor::alloc); (*local_collector_ptr) = collector_ptr.as_ptr(); collector_ptr }) } }) } /// Acknowledges a new [`Guard`](super::Guard) being instantiated. /// /// # Panics /// /// The method may panic if the number of readers has reached `u32::MAX`. #[inline] pub(super) fn new_guard(collector_ptr: NonNull) { unsafe { if (*collector_ptr.as_ptr()).num_readers == 0 { debug_assert_eq!( (*collector_ptr.as_ptr()).state.load(Relaxed) & Self::INACTIVE, Self::INACTIVE ); (*collector_ptr.as_ptr()).num_readers = 1; // The epoch value can be any number between the last time a guard was created in // the thread and the most recent value of `GLOBAL_TOOR.epoch`. let new_epoch = Epoch::from_u8(GLOBAL_ROOT.epoch.load(Relaxed)); // Every epoch update, pointer loading, memory retirement, and memory // reclamation event is always placed between a pair `SeqCst` memory barrier events // (one in this method, and the other one in `scan`) where those `SeqCst` memory // barriers are globally ordered by definition. This property ensures that a retired // memory region cannot be reclaimed until any threads holding a pointer to the // region turn inactive, because, the reclaimer needs to wait for at least two // `SeqCst` barrier events in `scan` to reclaim the memory region, and the fact that // the other threads were able to load a valid pointer means that the thread was in // between the same `SeqCst` barrier event pair or an older one; if the former, one // of the two `scan` events must have observed that the thread was active (this // cannot be achieved by `Release-Acquire` relationships), preventing the global // epoch from advancing more than once; if the latter, trivial. if cfg!(feature = "loom") || cfg!(miri) || cfg!(not(any(target_arch = "x86", target_arch = "x86_64"))) { // What will happen after the fence strictly happens after the fence. (*collector_ptr.as_ptr()) .state .store(new_epoch.into(), Relaxed); fence(SeqCst); } else { // This special optimization is excerpted from // [`crossbeam_epoch`](https://docs.rs/crossbeam-epoch/). // // The rationale behind the code is, it compiles to `lock xchg` that // practically acts as a full memory barrier on `X86`, and is much faster than // `mfence`. (*collector_ptr.as_ptr()) .state .swap(new_epoch.into(), SeqCst); } if (*collector_ptr.as_ptr()).announcement != new_epoch { (*collector_ptr.as_ptr()).announcement = new_epoch; let exit_guard = ExitGuard::new((), |()| { Self::end_guard(collector_ptr); }); Collector::epoch_updated(collector_ptr); exit_guard.forget(); } } else { debug_assert_eq!( (*collector_ptr.as_ptr()).state.load(Relaxed) & Self::INACTIVE, 0 ); assert_ne!( (*collector_ptr.as_ptr()).num_readers, u32::MAX, "Too many EBR guards" ); (*collector_ptr.as_ptr()).num_readers += 1; } } } /// Acknowledges an existing [`Guard`](super::Guard) being dropped. #[inline] pub(super) fn end_guard(collector_ptr: NonNull) { unsafe { debug_assert_eq!( (*collector_ptr.as_ptr()).state.load(Relaxed) & Self::INACTIVE, 0 ); debug_assert_eq!( (*collector_ptr.as_ptr()).state.load(Relaxed), u8::from((*collector_ptr.as_ptr()).announcement) ); if (*collector_ptr.as_ptr()).num_readers == 1 { (*collector_ptr.as_ptr()).num_readers = 0; if (*collector_ptr.as_ptr()).next_epoch_update == 0 { Collector::scan(collector_ptr); (*collector_ptr.as_ptr()).next_epoch_update = Self::CADENCE; } else if (*collector_ptr.as_ptr()).has_garbage || Tag::into_tag(GLOBAL_ROOT.chain_head.load(Relaxed)) == Tag::Second { (*collector_ptr.as_ptr()).next_epoch_update -= 1; } // `Release` is needed to prevent any previous load operations in this thread from // passing through. (*collector_ptr.as_ptr()).state.store( u8::from((*collector_ptr.as_ptr()).announcement) | Self::INACTIVE, Release, ); } else { (*collector_ptr.as_ptr()).num_readers -= 1; } } } /// Returns the current epoch. #[inline] pub(super) fn current_epoch() -> Epoch { // It is called by an active `Guard` therefore it is after a `SeqCst` memory barrier. Each // epoch update is preceded by another `SeqCst` memory barrier, therefore those two events // are globally ordered. If the `SeqCst` event during the `Guard` creation happened before // the other `SeqCst` event, this will either load the last previous epoch value, or the // current value. If not, it is guaranteed that it reads the latest global epoch value. // // It is not possible to return the announced epoch here since the global epoch value is // rotated and the announced epoch may be outdated; this may lead to a situation where the // caller thinks that a new generation has been witnessed. Epoch::from_u8(GLOBAL_ROOT.epoch.load(Relaxed)) } /// Returns `true` if the [`Collector`] has garbage. #[inline] pub(super) const fn has_garbage(collector_ptr: NonNull) -> bool { unsafe { (*collector_ptr.as_ptr()).has_garbage } } /// Sets the garbage flag to allow this thread to advance the global epoch. #[inline] pub(super) const fn set_has_garbage(collector_ptr: NonNull) { unsafe { (*collector_ptr.as_ptr()).has_garbage = true; } } /// Collects garbage instances. #[inline] pub(super) fn collect(collector_ptr: NonNull, instance_ptr: *mut dyn Collectible) { unsafe { (*instance_ptr).set_next_ptr((*collector_ptr.as_ptr()).current_instance_link.take()); (*collector_ptr.as_ptr()).current_instance_link = NonNull::new(instance_ptr); (*collector_ptr.as_ptr()).has_garbage = true; } } /// Passes its garbage instances to other threads. #[inline] pub(super) fn pass_garbage() -> bool { LOCAL_COLLECTOR.with(|local_collector| { let local_collector_ptr = local_collector.get(); let collector_ptr = unsafe { *local_collector_ptr }; if collector_ptr.is_null() { return true; } let collector = unsafe { &*collector_ptr }; if collector.num_readers != 0 { return false; } if collector.has_garbage { // In case the thread state is marked `Invalid`, a `Release` guard is required since // any remaining garbage may be reclaimed by other threads. collector.state.fetch_or(Collector::INVALID, Release); unsafe { *local_collector_ptr = ptr::null_mut(); } mark_scan_enforced(); } true }) } /// Allocates a new [`Collector`]. fn alloc() -> NonNull { let mut boxed = Box::new(Collector::default()); boxed.state.store(Self::INACTIVE, Relaxed); boxed.next_epoch_update = Self::CADENCE; let ptr = Box::into_raw(boxed); let mut current = GLOBAL_ROOT.chain_head.load(Relaxed); loop { unsafe { (*ptr) .next_link .store(Tag::unset_tag(current).cast_mut(), Relaxed); } // It keeps the tag intact. let tag = Tag::into_tag(current); let new = Tag::update_tag(ptr, tag).cast_mut(); if let Err(actual) = GLOBAL_ROOT .chain_head .compare_exchange_weak(current, new, Release, Relaxed) { current = actual; } else { break; } } unsafe { NonNull::new_unchecked(ptr) } } /// Acknowledges a new global epoch. fn epoch_updated(collector_ptr: NonNull) { unsafe { debug_assert_eq!( (*collector_ptr.as_ptr()).state.load(Relaxed) & Self::INACTIVE, 0 ); debug_assert_eq!( (*collector_ptr.as_ptr()).state.load(Relaxed), u8::from((*collector_ptr.as_ptr()).announcement) ); if (*collector_ptr.as_ptr()).has_garbage { let mut garbage_link = (*collector_ptr.as_ptr()).next_instance_link.take(); (*collector_ptr.as_ptr()).next_instance_link = (*collector_ptr.as_ptr()).previous_instance_link.take(); (*collector_ptr.as_ptr()).previous_instance_link = (*collector_ptr.as_ptr()).current_instance_link.take(); (*collector_ptr.as_ptr()).has_garbage = (*collector_ptr.as_ptr()).next_instance_link.is_some() || (*collector_ptr.as_ptr()).previous_instance_link.is_some(); while let Some(instance_ptr) = garbage_link.take() { garbage_link = (*instance_ptr.as_ptr()).next_ptr(); let mut guard = ExitGuard::new(garbage_link, |mut garbage_link| { while let Some(instance_ptr) = garbage_link.take() { // Something went wrong during dropping and deallocating an instance. garbage_link = (*instance_ptr.as_ptr()).next_ptr(); // Previous `drop_and_dealloc` may have accessed `self.current_instance_link`. std::sync::atomic::compiler_fence(Acquire); Collector::collect(collector_ptr, instance_ptr.as_ptr()); } }); // The `drop` below may access `self.current_instance_link`. std::sync::atomic::compiler_fence(Acquire); drop(Box::from_raw(instance_ptr.as_ptr())); garbage_link = guard.take(); } } (*collector_ptr.as_ptr()).next_epoch_update = Self::CADENCE; } } /// Clears all the garbage instances for dropping the [`Collector`]. fn clear_for_drop(collector_ptr: *mut Collector) { unsafe { loop { let garbage_containers = [ (*collector_ptr).previous_instance_link.take(), (*collector_ptr).current_instance_link.take(), (*collector_ptr).next_instance_link.take(), ]; if !garbage_containers.iter().any(Option::is_some) { break; } for mut link in garbage_containers { while let Some(instance_ptr) = link { link = (*instance_ptr.as_ptr()).next_ptr(); drop(Box::from_raw(instance_ptr.as_ptr())); } } } } } /// Scans the [`Collector`] instances to update the global epoch. fn scan(collector_ptr: NonNull) { unsafe { debug_assert_eq!( (*collector_ptr.as_ptr()).state.load(Relaxed) & Self::INVALID, 0 ); if u8::from((*collector_ptr.as_ptr()).announcement) != GLOBAL_ROOT.epoch.load(Relaxed) { // No need for further processing if the announcement is not up-to-date. return; } // Only one thread that acquires the chain lock is allowed to scan the thread-local // collectors. let lock_result = Self::lock_chain(); if let Ok(mut current_collector_ptr) = lock_result { let _guard = ExitGuard::new((), |()| Self::unlock_chain()); let known_epoch = (*collector_ptr.as_ptr()).state.load(Relaxed); let mut update_global_epoch = true; let mut prev_collector_ptr: *mut Collector = ptr::null_mut(); while !current_collector_ptr.is_null() { if ptr::eq(collector_ptr.as_ptr(), current_collector_ptr) { prev_collector_ptr = current_collector_ptr; current_collector_ptr = (*collector_ptr.as_ptr()).next_link.load(Acquire); continue; } // `Acquire` is needed in case the other thread is inactive so that this thread // needs to reclaim memory for the thread. let collector_state = (*current_collector_ptr).state.load(Acquire); let next_collector_ptr = (*current_collector_ptr).next_link.load(Acquire); if (collector_state & Self::INVALID) != 0 { // The collector is obsolete. let result = if prev_collector_ptr.is_null() { GLOBAL_ROOT .chain_head .fetch_update(Release, Relaxed, |p| { let tag = Tag::into_tag(p); debug_assert!(tag == Tag::First || tag == Tag::Both); if ptr::eq(Tag::unset_tag(p), current_collector_ptr) { Some(Tag::update_tag(next_collector_ptr, tag).cast_mut()) } else { None } }) .is_ok() } else { (*prev_collector_ptr) .next_link .store(next_collector_ptr, Release); true }; if result { Self::collect(collector_ptr, current_collector_ptr); current_collector_ptr = next_collector_ptr; continue; } } else if (collector_state & Self::INACTIVE) == 0 && collector_state != known_epoch { // Not ready for an epoch update. update_global_epoch = false; break; } prev_collector_ptr = current_collector_ptr; current_collector_ptr = next_collector_ptr; } if update_global_epoch { // A memory region can be retired after a `SeqCst` barrier in a `Guard`, and the // memory region can only be deallocated after the thread has observed three // times of epoch updates. This `SeqCst` fence ensures that the epoch update is // strictly sequenced after/before a `Guard`, enabling the event of the // retirement of the memory region is also globally ordered with epoch updates. fence(SeqCst); GLOBAL_ROOT .epoch .store(Epoch::from_u8(known_epoch).next().into(), Relaxed); } } } } /// Clears the [`Collector`] chain to if all are invalid. fn clear_chain() -> bool { let lock_result = Self::lock_chain(); if let Ok(collector_head) = lock_result { let _guard = ExitGuard::new((), |()| Self::unlock_chain()); unsafe { let mut current_collector_ptr = collector_head; while !current_collector_ptr.is_null() { if ((*current_collector_ptr).state.load(Acquire) & Self::INVALID) == 0 { return false; } current_collector_ptr = (*current_collector_ptr).next_link.load(Acquire); } // Reaching here means that there is no `Ptr` that possibly sees any garbage instances // in those `Collector` instances in the chain. let result = GLOBAL_ROOT.chain_head.fetch_update(Release, Relaxed, |p| { if Tag::unset_tag(p) == collector_head { let tag = Tag::into_tag(p); debug_assert!(tag == Tag::First || tag == Tag::Both); Some(Tag::update_tag(ptr::null::(), tag).cast_mut()) } else { None } }); if result.is_ok() { let mut current_collector_ptr = collector_head; while !current_collector_ptr.is_null() { let next_collector_ptr = (*current_collector_ptr).next_link.load(Acquire); drop(Box::from_raw(current_collector_ptr)); current_collector_ptr = next_collector_ptr; } return true; } } } false } /// Locks the chain. fn lock_chain() -> Result<*mut Collector, *mut Collector> { GLOBAL_ROOT .chain_head .fetch_update(Acquire, Acquire, |p| { let tag = Tag::into_tag(p); if tag == Tag::First || tag == Tag::Both { None } else { Some(Tag::update_tag(p, Tag::First).cast_mut()) } }) .map(|p| Tag::unset_tag(p).cast_mut()) } /// Unlocks the chain. fn unlock_chain() { loop { let result = GLOBAL_ROOT.chain_head.fetch_update(Release, Relaxed, |p| { let tag = Tag::into_tag(p); debug_assert!(tag == Tag::First || tag == Tag::Both); let new_tag = if tag == Tag::First { Tag::None } else { // Retain the mark. Tag::Second }; Some(Tag::update_tag(p, new_tag).cast_mut()) }); if result.is_ok() { break; } } } } impl Drop for Collector { #[inline] fn drop(&mut self) { let collector_ptr = addr_of_mut!(*self); Self::clear_for_drop(collector_ptr); } } impl Collectible for Collector { #[inline] fn next_ptr(&self) -> Option> { self.link.next_ptr() } #[inline] fn set_next_ptr(&self, next_ptr: Option>) { self.link.set_next_ptr(next_ptr); } } impl CollectorAnchor { fn alloc(&self) -> NonNull { let _: &CollectorAnchor = self; Collector::alloc() } } impl Drop for CollectorAnchor { #[inline] fn drop(&mut self) { unsafe { // `LOCAL_COLLECTOR` is the last thread-local variable to be dropped. LOCAL_COLLECTOR.with(|local_collector| { let local_collector_ptr = local_collector.get(); let collector_ptr = *local_collector_ptr; if !collector_ptr.is_null() { (*collector_ptr).state.fetch_or(Collector::INVALID, Release); } let mut temp_collector = Collector::default(); temp_collector.state.store(Collector::INACTIVE, Relaxed); *local_collector_ptr = addr_of_mut!(temp_collector); if !Collector::clear_chain() { mark_scan_enforced(); } Collector::clear_for_drop(addr_of_mut!(temp_collector)); *local_collector_ptr = ptr::null_mut(); }); } } } /// Marks the head of a chain to indicate that there is a potentially unreachable `Collector` in the /// chain. fn mark_scan_enforced() { // `Tag::Second` indicates that there is a garbage `Collector`. let _result = GLOBAL_ROOT.chain_head.fetch_update(Release, Relaxed, |p| { let new_tag = match Tag::into_tag(p) { Tag::None => Tag::Second, Tag::First => Tag::Both, Tag::Second | Tag::Both => return None, }; Some(Tag::update_tag(p, new_tag).cast_mut()) }); } thread_local! { static LOCAL_COLLECTOR: UnsafeCell<*mut Collector> = const { UnsafeCell::new(ptr::null_mut()) }; static COLLECTOR_ANCHOR: CollectorAnchor = const { CollectorAnchor }; } /// The global and default [`CollectorRoot`]. static GLOBAL_ROOT: CollectorRoot = CollectorRoot { epoch: AtomicU8::new(0), chain_head: AtomicPtr::new(ptr::null_mut()), }; sdd-4.5.3/src/epoch.rs000064400000000000000000000073131046102023000126460ustar 00000000000000/// [`Epoch`] is a unit of time that dictates the lifetime of retired memory regions. /// /// The global epoch rotates `64` [`Epoch`] values in a range of `[0..63]`, instead of monotonically /// increasing to reduce the memory footprint of the [`Epoch`] values. #[derive(Clone, Copy, Debug, Default, Eq, Ord, PartialEq, PartialOrd)] pub struct Epoch { value: u8, } impl Epoch { /// Rotates `64` epoch values. pub(super) const NUM_EPOCHS: u8 = 64; /// Returns a future [`Epoch`] when the current readers will not be present. /// /// The current [`Epoch`] may lag behind the global epoch value by `1`, therefore this method /// returns an [`Epoch`] three epochs next to `self`. /// /// # Examples /// /// ``` /// use sdd::Epoch; /// /// let initial = Epoch::default(); /// /// let next_generation = initial.next_generation(); /// assert_eq!(next_generation, initial.next().next().next()); /// ``` #[inline] #[must_use] pub const fn next_generation(self) -> Epoch { self.next().next().next() } /// Checks if the current [`Epoch`] is in the same generation as the given [`Epoch`]. /// /// This operation is not commutative, e.g., `a.in_same_generation(b)` is not the same as /// `b.in_same_generation(a)`. This returns `true` if the other [`Epoch`] is either the same /// with the current one, the next one, or the next one after that. The meaning of `false` /// returned by this method is that a memory region retired in the current [`Epoch`] will no /// longer be reachable in the other [`Epoch`]. /// /// # Examples /// /// ``` /// use sdd::Epoch; /// /// let initial = Epoch::default(); /// /// let next_generation = initial.next_generation(); /// assert!(initial.in_same_generation(initial.next().next())); /// assert!(!initial.in_same_generation(initial.next().next().next())); /// ``` #[inline] #[must_use] pub const fn in_same_generation(self, other: Epoch) -> bool { other.value == self.value || other.value == self.next().value || other.value == self.next().next().value } /// Returns the next [`Epoch`] value. /// /// # Examples /// /// ``` /// use sdd::Epoch; /// /// let initial = Epoch::default(); /// /// let next = initial.next(); /// assert!(initial < next); /// /// let next_prev = next.prev(); /// assert_eq!(initial, next_prev); /// ``` #[inline] #[must_use] pub const fn next(self) -> Epoch { Epoch { value: (self.value + 1) % Self::NUM_EPOCHS, } } /// Returns the previous [`Epoch`] value. /// /// # Examples /// /// ``` /// use sdd::Epoch; /// /// let initial = Epoch::default(); /// /// let prev = initial.prev(); /// assert!(initial < prev); /// /// let prev_next = prev.next(); /// assert_eq!(initial, prev_next); /// ``` #[inline] #[must_use] pub const fn prev(self) -> Epoch { Epoch { value: (self.value + Self::NUM_EPOCHS - 1) % Self::NUM_EPOCHS, } } /// Construct an [`Epoch`] from a [`u8`] value. #[inline] pub(super) const fn from_u8(value: u8) -> Epoch { Epoch { value } } } impl TryFrom for Epoch { type Error = Epoch; #[inline] fn try_from(value: u8) -> Result { if value < Self::NUM_EPOCHS { Ok(Epoch { value }) } else { Err(Epoch { value: value % Self::NUM_EPOCHS, }) } } } impl From for u8 { #[inline] fn from(epoch: Epoch) -> Self { epoch.value } } sdd-4.5.3/src/exit_guard.rs000064400000000000000000000026261046102023000137050ustar 00000000000000//! This module implements a simplified but safe version of //! [`scopeguard`](https://crates.io/crates/scopeguard). use std::mem::{ManuallyDrop, forget}; use std::ops::{Deref, DerefMut}; /// [`ExitGuard`] captures the environment and invokes a defined closure at the end of the scope. pub(crate) struct ExitGuard { drop_callback: ManuallyDrop<(T, F)>, } impl ExitGuard { /// Creates a new [`ExitGuard`] with the specified variables captured. #[inline] pub(crate) const fn new(captured: T, drop_callback: F) -> Self { Self { drop_callback: ManuallyDrop::new((captured, drop_callback)), } } /// Forgets the [`ExitGuard`] without invoking the drop callback. #[inline] pub(crate) fn forget(mut self) { unsafe { ManuallyDrop::drop(&mut self.drop_callback); } forget(self); } } impl Drop for ExitGuard { #[inline] fn drop(&mut self) { let (c, f) = unsafe { ManuallyDrop::take(&mut self.drop_callback) }; f(c); } } impl Deref for ExitGuard { type Target = T; #[inline] fn deref(&self) -> &Self::Target { &self.drop_callback.0 } } impl DerefMut for ExitGuard { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.drop_callback.0 } } sdd-4.5.3/src/guard.rs000064400000000000000000000132661046102023000126560ustar 00000000000000use std::panic::{RefUnwindSafe, UnwindSafe}; use std::ptr::NonNull; use super::Epoch; use super::collectible::DeferredClosure; use super::collector::Collector; /// [`Guard`] allows the user to read [`AtomicShared`](super::AtomicShared) and keeps the /// underlying instance pinned to the thread. /// /// [`Guard`] internally prevents the global epoch value from passing through the value /// announced by the current thread, thus keeping reachable instances in the thread from being /// garbage collected. #[derive(Debug)] pub struct Guard { collector_ptr: NonNull, } impl Guard { /// Creates a new [`Guard`]. /// /// # Panics /// /// The maximum number of [`Guard`] instances in a thread is limited to `u32::MAX`; a /// thread panics when the number of [`Guard`] instances in the thread exceeds the limit. /// /// # Examples /// /// ``` /// use sdd::Guard; /// /// let guard = Guard::new(); /// ``` #[inline] #[must_use] pub fn new() -> Self { let collector_ptr = Collector::current(); Collector::new_guard(collector_ptr); Self { collector_ptr } } /// Returns the epoch in which the current thread lives. /// /// This method can be used to check whether a retired memory region is potentially reachable or /// not. A chunk of memory retired in a witnessed [`Epoch`] can be deallocated after the thread /// has observed three new epochs. For instance, if the witnessed epoch value is `1` in the /// current thread where the global epoch value is `2`, and an instance is retired in the same /// thread, the instance can be dropped when the thread witnesses `0` which is three epochs away /// from `1`. /// /// In other words, there can be potential readers of the memory chunk until the current thread /// witnesses the previous epoch. In the above example, the global epoch can be in `2` /// while the current thread has only witnessed `1`, and therefore there can a reader of the /// memory chunk in another thread in epoch `2`. The reader can survive until the global epoch /// reaches `0`, because the thread being in `2` prevents the global epoch from reaching `0`. /// /// # Examples /// /// ``` /// use sdd::{Guard, Owned}; /// use std::sync::atomic::AtomicBool; /// use std::sync::atomic::Ordering::Relaxed; /// /// static DROPPED: AtomicBool = AtomicBool::new(false); /// /// struct D(&'static AtomicBool); /// /// impl Drop for D { /// fn drop(&mut self) { /// self.0.store(true, Relaxed); /// } /// } /// /// let owned = Owned::new(D(&DROPPED)); /// /// let epoch_before = Guard::new().epoch(); /// /// drop(owned); /// assert!(!DROPPED.load(Relaxed)); /// /// while Guard::new().epoch() == epoch_before { /// assert!(!DROPPED.load(Relaxed)); /// } /// /// while Guard::new().epoch() == epoch_before.next() { /// assert!(!DROPPED.load(Relaxed)); /// } /// /// while Guard::new().epoch() == epoch_before.next().next() { /// assert!(!DROPPED.load(Relaxed)); /// } /// /// assert!(DROPPED.load(Relaxed)); /// assert_eq!(Guard::new().epoch(), epoch_before.next().next().next()); /// ``` #[inline] #[must_use] pub fn epoch(&self) -> Epoch { Collector::current_epoch() } /// Returns `true` if the thread local container has garbage. /// /// # Examples /// /// ``` /// use sdd::{Guard, Shared}; /// /// let guard = Guard::new(); /// /// assert!(!guard.has_garbage()); /// /// drop(Shared::new(1_usize)); /// assert!(guard.has_garbage()); /// ``` #[inline] #[must_use] pub const fn has_garbage(&self) -> bool { Collector::has_garbage(self.collector_ptr) } /// Sets the garbage flag to allow this thread to advance the global epoch. /// /// # Examples /// /// ``` /// use sdd::Guard; /// /// let guard = Guard::new(); /// /// assert!(!guard.has_garbage()); /// guard.set_has_garbage(); /// assert!(guard.has_garbage()); /// ``` #[inline] pub const fn set_has_garbage(&self) { Collector::set_has_garbage(self.collector_ptr); } /// Forces the [`Guard`] to try to start a new epoch when it is dropped. /// /// # Examples /// /// ``` /// use sdd::Guard; /// /// let guard = Guard::new(); /// /// let epoch = guard.epoch(); /// guard.accelerate(); /// /// drop(guard); /// /// assert_ne!(epoch, Guard::new().epoch()); /// ``` #[inline] pub const fn accelerate(&self) { Collector::accelerate(self.collector_ptr); } /// Executes the supplied closure at a later point of time. /// /// It is guaranteed that the closure will be executed after every [`Guard`] at the moment when /// the method was invoked is dropped, however it is totally non-deterministic when exactly the /// closure will be executed. /// /// # Examples /// /// ``` /// use sdd::Guard; /// /// let guard = Guard::new(); /// guard.defer_execute(|| println!("deferred")); /// ``` #[inline] pub fn defer_execute(&self, f: F) { Collector::collect( self.collector_ptr, Box::into_raw(Box::new(DeferredClosure::new(f))), ); } } impl Default for Guard { #[inline] fn default() -> Self { Self::new() } } impl Drop for Guard { #[inline] fn drop(&mut self) { Collector::end_guard(self.collector_ptr); } } impl RefUnwindSafe for Guard {} impl UnwindSafe for Guard {} sdd-4.5.3/src/lib.rs000064400000000000000000000023201046102023000123070ustar 00000000000000#![deny(missing_docs, warnings, clippy::all, clippy::pedantic)] #![doc = include_str!("../README.md")] mod atomic_owned; pub use atomic_owned::AtomicOwned; mod atomic_shared; pub use atomic_shared::AtomicShared; pub mod bag; pub use bag::Bag; mod epoch; pub use epoch::Epoch; mod guard; pub use guard::Guard; mod linked_list; pub use linked_list::{LinkedEntry, LinkedList}; mod owned; pub use owned::Owned; mod ptr; pub use ptr::Ptr; pub mod queue; pub use queue::Queue; mod shared; pub use shared::Shared; pub mod stack; pub use stack::Stack; mod tag; pub use tag::Tag; mod collectible; mod collector; mod exit_guard; mod ref_counted; /// Suspends the garbage collector of the current thread. /// /// It returns `false` if there is an active [`Guard`] in the thread. Otherwise, it passes all its /// retired instances to a free flowing garbage container that can be cleaned up by other threads. /// /// # Examples /// /// ``` /// use sdd::{suspend, Guard}; /// /// assert!(suspend()); /// /// { /// let guard = Guard::new(); /// assert!(!suspend()); /// } /// /// assert!(suspend()); /// ``` #[inline] #[must_use] pub fn suspend() -> bool { collector::Collector::pass_garbage() } #[cfg(test)] mod tests; sdd-4.5.3/src/linked_list.rs000064400000000000000000000373541046102023000140610ustar 00000000000000//! [`LinkedList`] is a trait that implements lock-free concurrent singly linked list operations. use std::fmt::{self, Debug, Display}; use std::ops::{Deref, DerefMut}; use std::sync::atomic::Ordering::{self, AcqRel, Acquire, Relaxed}; use super::{AtomicShared, Guard, Ptr, Shared, Tag}; /// [`LinkedList`] is a trait that implements lock-free singly linked list operations. pub trait LinkedList: Sized { /// Returns a reference to the forward link. /// /// The pointer value may be tagged if [`Self::mark`] or [`Self::delete_self`] has been /// invoked. The [`AtomicShared`] must only be updated through [`LinkedList`] in order to keep /// the linked list consistent. fn link_ref(&self) -> &AtomicShared; /// Returns `true` if `self` is reachable and not marked. /// /// # Examples /// /// ``` /// use std::sync::atomic::Ordering::Relaxed; /// /// use sdd::{AtomicShared, LinkedList, Tag}; /// /// #[derive(Default)] /// struct L(AtomicShared, usize); /// impl LinkedList for L { /// fn link_ref(&self) -> &AtomicShared { /// &self.0 /// } /// } /// /// let head: L = L::default(); /// assert!(head.is_clear(Relaxed)); /// assert!(head.mark(Relaxed)); /// assert!(!head.is_clear(Relaxed)); /// assert!(head.delete_self(Relaxed)); /// assert!(!head.is_clear(Relaxed)); /// ``` #[inline] fn is_clear(&self, order: Ordering) -> bool { self.link_ref().tag(order) == Tag::None } /// Marks `self` with an internal flag to denote that `self` is in a special state. /// /// Returns `false` if a flag has already been set on `self`. /// /// # Examples /// /// ``` /// use std::sync::atomic::Ordering::Relaxed; /// /// use sdd::{AtomicShared, LinkedList}; /// /// #[derive(Default)] /// struct L(AtomicShared, usize); /// impl LinkedList for L { /// fn link_ref(&self) -> &AtomicShared { /// &self.0 /// } /// } /// /// let head: L = L::default(); /// assert!(head.mark(Relaxed)); /// ``` #[inline] fn mark(&self, order: Ordering) -> bool { self.link_ref() .update_tag_if(Tag::First, |ptr| ptr.tag() == Tag::None, order, Relaxed) } /// Removes any mark from `self`. /// /// Returns `false` if no flag has been set on `self`. /// /// # Examples /// /// ``` /// use std::sync::atomic::Ordering::Relaxed; /// /// use sdd::{AtomicShared, LinkedList}; /// /// #[derive(Default)] /// struct L(AtomicShared, usize); /// impl LinkedList for L { /// fn link_ref(&self) -> &AtomicShared { /// &self.0 /// } /// } /// /// let head: L = L::default(); /// assert!(!head.unmark(Relaxed)); /// assert!(head.mark(Relaxed)); /// assert!(head.unmark(Relaxed)); /// assert!(!head.is_marked(Relaxed)); /// ``` #[inline] fn unmark(&self, order: Ordering) -> bool { self.link_ref() .update_tag_if(Tag::None, |ptr| ptr.tag() == Tag::First, order, Relaxed) } /// Returns `true` if `self` has a mark on it. /// /// # Examples /// /// ``` /// use std::sync::atomic::Ordering::Relaxed; /// /// use sdd::{AtomicShared, LinkedList}; /// /// #[derive(Default)] /// struct L(AtomicShared, usize); /// impl LinkedList for L { /// fn link_ref(&self) -> &AtomicShared { /// &self.0 /// } /// } /// /// let head: L = L::default(); /// assert!(!head.is_marked(Relaxed)); /// assert!(head.mark(Relaxed)); /// assert!(head.is_marked(Relaxed)); /// ``` #[inline] fn is_marked(&self, order: Ordering) -> bool { self.link_ref().tag(order) == Tag::First } /// Deletes `self`. /// /// Returns `false` if `self` is already marked as deleted. /// /// # Examples /// /// ``` /// use std::sync::atomic::Ordering::Relaxed; /// /// use sdd::{AtomicShared, Guard, LinkedList, Shared}; /// /// #[derive(Default)] /// struct L(AtomicShared, usize); /// impl LinkedList for L { /// fn link_ref(&self) -> &AtomicShared { /// &self.0 /// } /// } /// /// let guard = Guard::new(); /// /// let head: L = L::default(); /// let tail: Shared = Shared::new(L::default()); /// assert!(head.push_back(tail.clone(), false, Relaxed, &guard).is_ok()); /// /// tail.delete_self(Relaxed); /// assert!(head.next_ptr(Relaxed, &guard).as_ref().is_none()); /// ``` #[inline] fn delete_self(&self, order: Ordering) -> bool { self.link_ref().update_tag_if( Tag::Second, |ptr| { let tag = ptr.tag(); tag == Tag::None || tag == Tag::First }, order, Relaxed, ) } /// Returns `true` if `self` has been deleted. /// /// # Examples /// /// ``` /// use std::sync::atomic::Ordering::Relaxed; /// /// use sdd::{AtomicShared, LinkedList}; /// /// #[derive(Default)] /// struct L(AtomicShared, usize); /// impl LinkedList for L { /// fn link_ref(&self) -> &AtomicShared { /// &self.0 /// } /// } /// /// let entry: L = L::default(); /// assert!(!entry.is_deleted(Relaxed)); /// entry.delete_self(Relaxed); /// assert!(entry.is_deleted(Relaxed)); /// ``` #[inline] fn is_deleted(&self, order: Ordering) -> bool { let tag = self.link_ref().tag(order); tag == Tag::Second || tag == Tag::Both } /// Appends the given entry to `self` and returns a pointer to the entry. /// /// If `mark` is `true`, it atomically sets an internal flag on `self` when updating /// the linked list, otherwise it removes the mark. /// /// # Errors /// /// Returns the supplied [`Shared`] when it finds `self` deleted. /// /// # Examples /// /// ``` /// use std::sync::atomic::Ordering::{Relaxed, Release}; /// /// use sdd::{AtomicShared, Guard, LinkedList, Shared}; /// /// #[derive(Default)] /// struct L(AtomicShared, usize); /// impl LinkedList for L { /// fn link_ref(&self) -> &AtomicShared { /// &self.0 /// } /// } /// /// let guard = Guard::new(); /// /// let head: L = L::default(); /// assert!(head.push_back(Shared::new(L::default()), true, Release, &guard).is_ok()); /// assert!(head.is_marked(Relaxed)); /// assert!(head.push_back(Shared::new(L::default()), false, Release, &guard).is_ok()); /// assert!(!head.is_marked(Relaxed)); /// /// head.delete_self(Relaxed); /// assert!(!head.is_marked(Relaxed)); /// assert!(head.push_back(Shared::new(L::default()), false, Release, &guard).is_err()); /// ``` #[inline] fn push_back<'g>( &self, mut entry: Shared, mark: bool, order: Ordering, guard: &'g Guard, ) -> Result, Shared> { let new_tag = if mark { Tag::First } else { Tag::None }; let mut next_ptr = self.link_ref().load(Relaxed, guard); loop { let tag = next_ptr.tag(); if tag == Tag::Second || tag == Tag::Both { break; } entry .link_ref() .swap((next_ptr.get_shared(), Tag::None), Relaxed); match self.link_ref().compare_exchange_weak( next_ptr, (Some(entry), new_tag), order, Relaxed, guard, ) { Ok((_, updated)) => { return Ok(updated); } Err((passed, actual)) => { entry = unsafe { passed.unwrap_unchecked() }; next_ptr = actual; } } } // `current` has been deleted. Err(entry) } /// Returns the closest next valid entry. /// /// It unlinks deleted entries until it reaches a valid one. /// /// # Examples /// /// ``` /// use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; /// /// use sdd::{AtomicShared, Guard, LinkedList, Shared}; /// /// #[derive(Default)] /// struct L(AtomicShared, usize); /// impl LinkedList for L { /// fn link_ref(&self) -> &AtomicShared { /// &self.0 /// } /// } /// /// let guard = Guard::new(); /// /// let head: L = L::default(); /// assert!( /// head.push_back(Shared::new(L(AtomicShared::null(), 1)), false, Release, &guard).is_ok()); /// head.mark(Relaxed); /// /// let next_ptr = head.next_ptr(Acquire, &guard); /// assert_eq!(next_ptr.as_ref().unwrap().1, 1); /// assert!(head.is_marked(Relaxed)); /// ``` #[inline] fn next_ptr<'g>(&self, order: Ordering, guard: &'g Guard) -> Ptr<'g, Self> { next_ptr_recursive(self, order, 64, guard) } /// Returns a [`Shared`] handle to the closest next valid entry. /// /// It unlinks deleted entries until it reaches a valid one. /// /// # Examples /// /// ``` /// use std::sync::atomic::Ordering::{Acquire, Relaxed, Release}; /// /// use sdd::{AtomicShared, Guard, LinkedList, Shared}; /// /// #[derive(Default)] /// struct L(AtomicShared, usize); /// impl LinkedList for L { /// fn link_ref(&self) -> &AtomicShared { /// &self.0 /// } /// } /// /// let guard = Guard::new(); /// /// let head: L = L::default(); /// assert!( /// head.push_back(Shared::new(L(AtomicShared::null(), 1)), false, Release, &guard).is_ok()); /// head.mark(Relaxed); /// /// let next_shared = head.next_shared(Acquire, &guard); /// assert_eq!(next_shared.unwrap().1, 1); /// assert!(head.is_marked(Relaxed)); /// ``` #[inline] fn next_shared(&self, order: Ordering, guard: &Guard) -> Option> { let mut next_ptr = self.next_ptr(order, guard); let mut next_entry = next_ptr.get_shared(); while !next_ptr.is_null() && next_entry.is_none() { // The entry was released in the mean time. next_ptr = next_ptr .as_ref() .map_or_else(Ptr::null, |n| n.next_ptr(Acquire, guard)); next_entry = next_ptr.get_shared(); } next_entry } } /// [`LinkedEntry`] stores an instance of `T` and a link to the next entry. pub struct LinkedEntry { /// `instance` is always `Some` unless [`Self::take_inner`] is called. instance: Option, /// `next` points to the next entry in a linked list. next: AtomicShared, } impl LinkedEntry { /// Extracts the inner instance of `T`. /// /// # Safety /// /// This method has to be called at most once per [`LinkedEntry`], and the caller needs to make /// sure that the [`LinkedEntry`] is not accessed via [`LinkedList`] methods. #[inline] pub(crate) unsafe fn take_inner(&mut self) -> T { unsafe { self.instance.take().unwrap_unchecked() } } #[inline] pub(super) fn new(val: T) -> Self { Self { instance: Some(val), next: AtomicShared::default(), } } /// Returns a reference to `next`. #[inline] pub(super) fn next(&self) -> &AtomicShared { &self.next } } impl AsRef for LinkedEntry { #[inline] fn as_ref(&self) -> &T { unsafe { self.instance.as_ref().unwrap_unchecked() } } } impl AsMut for LinkedEntry { #[inline] fn as_mut(&mut self) -> &mut T { unsafe { self.instance.as_mut().unwrap_unchecked() } } } impl Clone for LinkedEntry { #[inline] fn clone(&self) -> Self { Self { instance: self.instance.clone(), next: AtomicShared::default(), } } } impl Debug for LinkedEntry { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Entry") .field("instance", &self.instance) .field("next", &self.next) .field("marked", &self.is_marked(Relaxed)) .field("removed", &self.is_deleted(Relaxed)) .finish() } } impl Deref for LinkedEntry { type Target = T; #[inline] fn deref(&self) -> &Self::Target { unsafe { self.instance.as_ref().unwrap_unchecked() } } } impl DerefMut for LinkedEntry { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { unsafe { self.instance.as_mut().unwrap_unchecked() } } } impl Drop for LinkedEntry { #[inline] fn drop(&mut self) { if !self.next.is_null(Relaxed) { let guard = Guard::new(); if let Some(next_entry) = self.next.load(Relaxed, &guard).as_ref() { next_ptr_recursive(next_entry, Relaxed, 64, &guard); } } } } impl Display for LinkedEntry { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if let Some(instance) = self.instance.as_ref() { write!(f, "Some({instance})") } else { write!(f, "None") } } } impl Eq for LinkedEntry {} impl LinkedList for LinkedEntry { #[inline] fn link_ref(&self) -> &AtomicShared { &self.next } } impl PartialEq for LinkedEntry { #[inline] fn eq(&self, other: &Self) -> bool { self.instance == other.instance } } /// Recursively cleans up the linked list starting from the supplied head. fn next_ptr_recursive<'g, T: LinkedList>( head: &T, order: Ordering, mut depth: usize, guard: &'g Guard, ) -> Ptr<'g, T> { let mut head_next_ptr = head.link_ref().load(order, guard); let mut head_tag = head_next_ptr.tag(); loop { let mut next_ptr = head_next_ptr; let next_valid_ptr = loop { if let Some(next_ref) = next_ptr.as_ref() { let next_next_ptr = next_ref.link_ref().load(order, guard); if next_next_ptr.tag() != Tag::Second { break next_ptr; } if depth != 0 { break next_ptr_recursive(next_ref, order, depth - 1, guard); } next_ptr = next_next_ptr; } else { break Ptr::null(); } }; // Do not allow a recursive call more than once. depth = 0; // Updates its link if there is an invalid entry between itself and the next valid one. if next_valid_ptr.with_tag(head_tag) != head_next_ptr { let next_valid_entry = next_valid_ptr.get_shared(); if !next_valid_ptr.is_null() && next_valid_entry.is_none() { // The entry was unlinked in the meantime. head_next_ptr = head.link_ref().load(order, guard); head_tag = head_next_ptr.tag(); continue; } match head.link_ref().compare_exchange( head_next_ptr, (next_valid_entry, head_tag), AcqRel, Acquire, guard, ) { Ok((prev, _)) => { if let Some(removed) = prev { let _: bool = removed.release(); } } Err((_, actual)) => { head_next_ptr = actual; head_tag = actual.tag(); continue; } } } return next_valid_ptr; } } sdd-4.5.3/src/owned.rs000064400000000000000000000142441046102023000126650ustar 00000000000000use std::mem::forget; use std::ops::Deref; use std::panic::UnwindSafe; use std::ptr::NonNull; use super::ref_counted::RefCounted; use super::{Guard, Ptr}; /// [`Owned`] uniquely owns an instance. /// /// The instance is passed to the `EBR` garbage collector when the [`Owned`] is dropped. #[derive(Debug)] pub struct Owned { instance_ptr: NonNull>, } impl Owned { /// Creates a new instance of [`Owned`]. /// /// The type of the instance must be determined at compile-time, must not contain non-static /// references, and must not be a non-static reference since the instance can theoretically /// survive the process. For instance, `struct Disallowed<'l, T>(&'l T)` is not allowed, /// because an instance of the type cannot outlive `'l` whereas the garbage collector does not /// guarantee that the instance is dropped within `'l`. /// /// # Examples /// /// ``` /// use sdd::Owned; /// /// let owned: Owned = Owned::new(31); /// ``` #[inline] pub fn new(t: T) -> Self { Self { instance_ptr: RefCounted::new_unique(t), } } } impl Owned { /// Creates a new [`Owned`] without checking the lifetime of `T`. /// /// # Safety /// /// `T::drop` can be run after the [`Owned`] is dropped, therefore it is safe only if `T::drop` /// does not access short-lived data or [`std::mem::needs_drop`] is `false` for `T`. /// /// # Examples /// /// ``` /// use sdd::Owned; /// /// let hello = String::from("hello"); /// let owned: Owned<&str> = unsafe { Owned::new_unchecked(hello.as_str()) }; /// ``` #[inline] pub unsafe fn new_unchecked(t: T) -> Self { Self { instance_ptr: RefCounted::new_unique(t), } } /// Returns a [`Ptr`] to the instance that may live as long as the supplied [`Guard`]. /// /// # Examples /// /// ``` /// use sdd::{Guard, Owned}; /// /// let owned: Owned = Owned::new(37); /// let guard = Guard::new(); /// let ptr = owned.get_guarded_ptr(&guard); /// drop(owned); /// /// assert_eq!(*ptr.as_ref().unwrap(), 37); /// ``` #[inline] #[must_use] pub const fn get_guarded_ptr<'g>(&self, _guard: &'g Guard) -> Ptr<'g, T> { Ptr::from(self.instance_ptr.as_ptr()) } /// Returns a reference to the instance that may live as long as the supplied [`Guard`]. /// /// # Examples /// /// ``` /// use sdd::{Guard, Owned}; /// /// let owned: Owned = Owned::new(37); /// let guard = Guard::new(); /// let ref_b = owned.get_guarded_ref(&guard); /// drop(owned); /// /// assert_eq!(*ref_b, 37); /// ``` #[inline] #[must_use] pub const fn get_guarded_ref<'g>(&self, _guard: &'g Guard) -> &'g T { unsafe { RefCounted::inst_non_null_ptr(self.instance_ptr).as_ref() } } /// Returns a mutable reference to the instance. /// /// # Safety /// /// The method is `unsafe` since there can be a [`Ptr`] to the instance. /// /// # Examples /// /// ``` /// use sdd::Owned; /// /// let mut owned: Owned = Owned::new(38); /// unsafe { /// *owned.get_mut() += 1; /// } /// assert_eq!(*owned, 39); /// ``` #[inline] pub const unsafe fn get_mut(&mut self) -> &mut T { unsafe { (*self.instance_ptr.as_ptr()).get_mut_unique() } } /// Provides a raw pointer to the instance. /// /// # Examples /// /// ``` /// use sdd::Owned; /// /// let owned: Owned = Owned::new(10); /// /// assert_eq!(unsafe { *owned.as_ptr() }, 10); /// ``` #[inline] #[must_use] pub const fn as_ptr(&self) -> *const T { RefCounted::inst_non_null_ptr(self.instance_ptr).as_ptr() } /// Provides a raw non-null pointer to the instance. /// /// # Examples /// /// ``` /// use sdd::Owned; /// /// let owned: Owned = Owned::new(10); /// /// assert_eq!(unsafe { *owned.as_non_null_ptr().as_ref() }, 10); /// ``` #[inline] #[must_use] pub const fn as_non_null_ptr(&self) -> NonNull { RefCounted::inst_non_null_ptr(self.instance_ptr) } /// Drops the instance immediately. /// /// # Safety /// /// The caller must ensure that there is no [`Ptr`] pointing to the instance. /// /// # Examples /// /// ``` /// use sdd::Owned; /// use std::sync::atomic::AtomicBool; /// use std::sync::atomic::Ordering::Relaxed; /// /// static DROPPED: AtomicBool = AtomicBool::new(false); /// struct T(&'static AtomicBool); /// impl Drop for T { /// fn drop(&mut self) { /// self.0.store(true, Relaxed); /// } /// } /// /// let owned: Owned = Owned::new(T(&DROPPED)); /// assert!(!DROPPED.load(Relaxed)); /// /// unsafe { /// owned.drop_in_place(); /// } /// /// assert!(DROPPED.load(Relaxed)); /// ``` #[inline] pub unsafe fn drop_in_place(self) { unsafe { drop(Box::from_raw(self.instance_ptr.as_ptr())); } forget(self); } /// Creates a new [`Owned`] from the given pointer. #[inline] pub(super) const fn from(ptr: NonNull>) -> Self { Self { instance_ptr: ptr } } /// Returns a pointer to the [`RefCounted`]. #[inline] pub(super) const fn underlying_ptr(&self) -> *const RefCounted { self.instance_ptr.as_ptr() } } impl AsRef for Owned { #[inline] fn as_ref(&self) -> &T { unsafe { &*self.instance_ptr.as_ptr() } } } impl Deref for Owned { type Target = T; #[inline] fn deref(&self) -> &Self::Target { self.as_ref() } } impl Drop for Owned { #[inline] fn drop(&mut self) { RefCounted::pass_to_collector(self.instance_ptr.as_ptr()); } } unsafe impl Send for Owned {} // `T` does not need to be `Send` since sending `T` is not possible only with `&Owned`. unsafe impl Sync for Owned {} impl UnwindSafe for Owned {} sdd-4.5.3/src/ptr.rs000064400000000000000000000174251046102023000123620ustar 00000000000000use std::marker::PhantomData; use std::panic::UnwindSafe; use std::sync::atomic::Ordering::Relaxed; use std::{ptr, ptr::NonNull}; use crate::ref_counted::RefCounted; use crate::{Shared, Tag}; /// [`Ptr`] points to an instance. #[derive(Debug)] pub struct Ptr<'g, T> { instance_ptr: *const RefCounted, _phantom: PhantomData<&'g T>, } impl<'g, T> Ptr<'g, T> { /// Creates a null [`Ptr`]. /// /// # Examples /// /// ``` /// use sdd::Ptr; /// /// let ptr: Ptr = Ptr::null(); /// ``` #[inline] #[must_use] pub const fn null() -> Self { Self { instance_ptr: ptr::null(), _phantom: PhantomData, } } /// Returns `true` if the [`Ptr`] is null. /// /// # Examples /// /// ``` /// use sdd::Ptr; /// /// let ptr: Ptr = Ptr::null(); /// assert!(ptr.is_null()); /// ``` #[inline] #[must_use] pub fn is_null(&self) -> bool { Tag::unset_tag(self.instance_ptr).is_null() } /// Tries to create a reference to the underlying instance. /// /// # Examples /// /// ``` /// use sdd::{AtomicShared, Guard}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_shared: AtomicShared = AtomicShared::new(21); /// let guard = Guard::new(); /// let ptr = atomic_shared.load(Relaxed, &guard); /// assert_eq!(*ptr.as_ref().unwrap(), 21); /// ``` #[inline] #[must_use] pub fn as_ref(&self) -> Option<&'g T> { let ptr = Tag::unset_tag(self.instance_ptr); if ptr.is_null() { return None; } unsafe { Some(&*ptr) } } /// Tries to create a reference to the underlying instance without checking tag bits. /// /// # Safety /// /// This [`Ptr`] must not have any tag bits set, otherwise dereferencing the pointer may lead to /// undefined behavior. /// /// # Examples /// /// ``` /// use sdd::{AtomicShared, Guard}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_shared: AtomicShared = AtomicShared::new(21); /// let guard = Guard::new(); /// let ptr = atomic_shared.load(Relaxed, &guard); /// assert_eq!(unsafe { *ptr.as_ref_unchecked().unwrap() }, 21); /// ``` #[inline] #[must_use] pub const unsafe fn as_ref_unchecked(&self) -> Option<&'g T> { unsafe { RefCounted::inst_ptr(self.instance_ptr).as_ref() } } /// Provides a raw pointer to the instance. /// /// # Examples /// /// ``` /// use sdd::{Guard, Shared}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let shared: Shared = Shared::new(29); /// let guard = Guard::new(); /// let ptr = shared.get_guarded_ptr(&guard); /// drop(shared); /// /// assert_eq!(unsafe { *ptr.as_ptr() }, 29); /// ``` #[inline] #[must_use] pub fn as_ptr(&self) -> *const T { RefCounted::inst_ptr(Tag::unset_tag(self.instance_ptr)) } /// Tries to create a pointer to the underlying instance without checking tag bits. /// /// # Safety /// /// This [`Ptr`] must not have any tag bits set, otherwise dereferencing the pointer may lead to /// undefined behavior. /// /// # Examples /// /// ``` /// use sdd::{AtomicShared, Guard}; /// use std::sync::atomic::Ordering::Relaxed; /// /// let atomic_shared: AtomicShared = AtomicShared::new(21); /// let guard = Guard::new(); /// let ptr = atomic_shared.load(Relaxed, &guard); /// assert_eq!(unsafe { *ptr.as_ptr_unchecked() }, 21); /// ``` #[inline] #[must_use] pub const unsafe fn as_ptr_unchecked(&self) -> *const T { RefCounted::inst_ptr(self.instance_ptr) } /// Returns its [`Tag`]. /// /// # Examples /// /// ``` /// use sdd::{Ptr, Tag}; /// /// let ptr: Ptr = Ptr::null(); /// assert_eq!(ptr.tag(), Tag::None); /// ``` #[inline] #[must_use] pub fn tag(&self) -> Tag { Tag::into_tag(self.instance_ptr) } /// Sets a [`Tag`], overwriting its existing [`Tag`]. /// /// Returns the previous tag value. /// /// # Examples /// /// ``` /// use sdd::{Ptr, Tag}; /// /// let mut ptr: Ptr = Ptr::null(); /// assert_eq!(ptr.set_tag(Tag::Both), Tag::None); /// assert_eq!(ptr.tag(), Tag::Both); /// ``` #[inline] pub fn set_tag(&mut self, tag: Tag) -> Tag { let old_tag = Tag::into_tag(self.instance_ptr); self.instance_ptr = Tag::update_tag(self.instance_ptr, tag); old_tag } /// Clears its [`Tag`]. /// /// Returns the previous tag value. /// /// # Examples /// /// ``` /// use sdd::{Ptr, Tag}; /// /// let mut ptr: Ptr = Ptr::null().with_tag(Tag::Both); /// assert_eq!(ptr.unset_tag(), Tag::Both); /// ``` #[inline] pub fn unset_tag(&mut self) -> Tag { let old_tag = Tag::into_tag(self.instance_ptr); self.instance_ptr = Tag::unset_tag(self.instance_ptr); old_tag } /// Returns a copy of `self` with a [`Tag`] set. /// /// # Examples /// /// ``` /// use sdd::{Ptr, Tag}; /// /// let mut ptr: Ptr = Ptr::null(); /// assert_eq!(ptr.tag(), Tag::None); /// /// let ptr_with_tag = ptr.with_tag(Tag::First); /// assert_eq!(ptr_with_tag.tag(), Tag::First); /// ``` #[inline] #[must_use] pub fn with_tag(self, tag: Tag) -> Self { Self::from(Tag::update_tag(self.instance_ptr, tag)) } /// Returns a copy of `self` with its [`Tag`] erased. /// /// # Examples /// /// ``` /// use sdd::{Ptr, Tag}; /// /// let mut ptr: Ptr = Ptr::null(); /// ptr.set_tag(Tag::Second); /// assert_eq!(ptr.tag(), Tag::Second); /// /// let ptr_without_tag = ptr.without_tag(); /// assert_eq!(ptr_without_tag.tag(), Tag::None); /// ``` #[inline] #[must_use] pub fn without_tag(self) -> Self { Self::from(Tag::unset_tag(self.instance_ptr)) } /// Tries to convert itself into a [`Shared`]. /// /// # Examples /// /// ``` /// use sdd::{Guard, Shared}; /// /// let shared: Shared = Shared::new(83); /// let guard = Guard::new(); /// let ptr = shared.get_guarded_ptr(&guard); /// let shared_restored = ptr.get_shared().unwrap(); /// assert_eq!(*shared_restored, 83); /// /// drop(shared); /// drop(shared_restored); /// /// assert!(ptr.get_shared().is_none()); /// ``` #[inline] #[must_use] pub fn get_shared(self) -> Option> { unsafe { if let Some(ptr) = NonNull::new(Tag::unset_tag(self.instance_ptr).cast_mut()) { if (*ptr.as_ptr()).try_add_ref(Relaxed) { return Some(Shared::from(ptr)); } } } None } /// Creates a new [`Ptr`] from a raw pointer. #[inline] pub(super) const fn from(ptr: *const RefCounted) -> Self { Self { instance_ptr: ptr, _phantom: std::marker::PhantomData, } } /// Provides a raw pointer to its [`RefCounted`]. #[inline] pub(super) const fn as_underlying_ptr(self) -> *const RefCounted { self.instance_ptr } } impl Clone for Ptr<'_, T> { #[inline] fn clone(&self) -> Self { *self } } impl Copy for Ptr<'_, T> {} impl Default for Ptr<'_, T> { #[inline] fn default() -> Self { Self::null() } } impl Eq for Ptr<'_, T> {} impl PartialEq for Ptr<'_, T> { #[inline] fn eq(&self, other: &Self) -> bool { self.instance_ptr == other.instance_ptr } } impl UnwindSafe for Ptr<'_, T> {} sdd-4.5.3/src/queue.rs000064400000000000000000000403421046102023000126730ustar 00000000000000//! [`Queue`] is a lock-free concurrent first-in-first-out container. use std::fmt::{self, Debug}; use std::iter::FusedIterator; use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed}; use super::linked_list::{LinkedEntry, LinkedList}; use super::{AtomicShared, Guard, Ptr, Shared, Tag}; /// [`Queue`] is a lock-free concurrent first-in-first-out container. pub struct Queue { /// `oldest` points to the oldest entry in the [`Queue`]. oldest: AtomicShared>, /// `newest` *eventually* points to the newest entry in the [`Queue`]. newest: AtomicShared>, } /// An iterator over the entries of a [`Queue`]. /// /// [`Iter`] reads the oldest entry first. pub struct Iter<'g, T> { current: Ptr<'g, LinkedEntry>, guard: &'g Guard, } impl Queue { /// Pushes an instance of `T`. /// /// Returns a [`Shared`] holding a strong reference to the newly pushed entry. /// /// # Examples /// /// ``` /// use sdd::Queue; /// /// let queue: Queue = Queue::default(); /// /// assert_eq!(**queue.push(11), 11); /// ``` #[inline] pub fn push(&self, val: T) -> Shared> { match self.push_if_internal(val, |_| true, &Guard::new()) { Ok(entry) => entry, Err(_) => { unreachable!(); } } } /// Pushes an instance of `T` if the newest entry satisfies the given condition. /// /// # Errors /// /// Returns an error along with the supplied instance if the condition is not met. /// /// # Examples /// /// ``` /// use sdd::Queue; /// /// let queue: Queue = Queue::default(); /// /// queue.push(11); /// /// assert!(queue.push_if(17, |e| e.map_or(false, |x| **x == 11)).is_ok()); /// assert!(queue.push_if(29, |e| e.map_or(false, |x| **x == 11)).is_err()); /// ``` #[inline] pub fn push_if>) -> bool>( &self, val: T, cond: F, ) -> Result>, T> { self.push_if_internal(val, cond, &Guard::new()) } /// Returns a guarded reference to the oldest entry. /// /// Returns `None` if the [`Queue`] is empty. The returned reference can survive as long as the /// associated [`Guard`] is alive. /// /// # Examples /// /// ``` /// use sdd::{Guard, Queue}; /// /// let queue: Queue = Queue::default(); /// /// assert!(queue.peek(&Guard::new()).is_none()); /// /// queue.push(37); /// queue.push(3); /// /// assert_eq!(**queue.peek(&Guard::new()).unwrap(), 37); /// ``` #[inline] pub fn peek<'g>(&self, guard: &'g Guard) -> Option<&'g LinkedEntry> { let mut current = self.oldest.load(Acquire, guard); while let Some(oldest_entry) = current.as_ref() { if oldest_entry.is_deleted(Relaxed) { current = self.cleanup_oldest(guard); continue; } return Some(oldest_entry); } None } } impl Queue { /// Creates an empty [`Queue`]. /// /// # Examples /// /// ``` /// use sdd::Queue; /// /// let queue: Queue = Queue::new(); ///``` #[cfg(not(feature = "loom"))] #[inline] #[must_use] pub const fn new() -> Self { Self { oldest: AtomicShared::null(), newest: AtomicShared::null(), } } /// Creates an empty [`Queue`]. #[cfg(feature = "loom")] #[inline] #[must_use] pub fn new() -> Self { Self { oldest: AtomicShared::null(), newest: AtomicShared::null(), } } /// Pushes an instance of `T` without checking the lifetime of `T`. /// /// Returns a [`Shared`] holding a strong reference to the newly pushed entry. /// /// # Safety /// /// `T::drop` can be run after the [`Queue`] is dropped, therefore it is safe only if `T::drop` /// does not access short-lived data or [`std::mem::needs_drop`] is `false` for `T`. /// /// # Examples /// /// ``` /// use sdd::Queue; /// /// let hello = String::from("hello"); /// let queue: Queue<&str> = Queue::default(); /// /// assert_eq!(unsafe { **queue.push_unchecked(hello.as_str()) }, "hello"); /// ``` #[inline] pub unsafe fn push_unchecked(&self, val: T) -> Shared> { match self.push_if_internal(val, |_| true, &Guard::new()) { Ok(entry) => entry, Err(_) => { unreachable!(); } } } /// Pushes an instance of `T` if the newest entry satisfies the given condition without /// checking the lifetime of `T`. /// /// # Errors /// /// Returns an error along with the supplied instance if the condition is not met. /// /// # Safety /// /// `T::drop` can be run after the [`Queue`] is dropped, therefore it is safe only if `T::drop` /// does not access short-lived data or [`std::mem::needs_drop`] is `false` for `T`. /// /// # Examples /// /// ``` /// use sdd::Queue; /// /// let hello = String::from("hello"); /// let queue: Queue<&str> = Queue::default(); /// /// assert!(unsafe { queue.push_if_unchecked(hello.as_str(), |e| e.is_none()).is_ok() }); /// ``` #[inline] pub unsafe fn push_if_unchecked>) -> bool>( &self, val: T, cond: F, ) -> Result>, T> { self.push_if_internal(val, cond, &Guard::new()) } /// Pops the oldest entry. /// /// Returns `None` if the [`Queue`] is empty. /// /// # Examples /// /// ``` /// use sdd::Queue; /// /// let queue: Queue = Queue::default(); /// /// queue.push(37); /// queue.push(3); /// queue.push(1); /// /// assert_eq!(queue.pop().map(|e| **e), Some(37)); /// assert_eq!(queue.pop().map(|e| **e), Some(3)); /// assert_eq!(queue.pop().map(|e| **e), Some(1)); /// assert!(queue.pop().is_none()); /// ``` #[inline] pub fn pop(&self) -> Option>> { match self.pop_if(|_| true) { Ok(result) => result, Err(_) => unreachable!(), } } /// Pops the oldest entry if the entry satisfies the given condition. /// /// Returns `None` if the [`Queue`] is empty. /// /// # Errors /// /// Returns an error containing the oldest entry if the given condition is not met. /// /// # Examples /// /// ``` /// use sdd::Queue; /// /// let queue: Queue = Queue::default(); /// /// queue.push(3); /// queue.push(1); /// /// assert!(queue.pop_if(|v| **v == 1).is_err()); /// assert_eq!(queue.pop().map(|e| **e), Some(3)); /// assert_eq!(queue.pop_if(|v| **v == 1).ok().and_then(|e| e).map(|e| **e), Some(1)); /// ``` #[inline] pub fn pop_if) -> bool>( &self, mut cond: F, ) -> Result>>, Shared>> { let guard = Guard::new(); let mut current = self.oldest.load(Acquire, &guard); while !current.is_null() { if let Some(oldest_entry) = current.get_shared() { if !oldest_entry.is_deleted(Relaxed) && !cond(&*oldest_entry) { return Err(oldest_entry); } if oldest_entry.delete_self(Relaxed) { self.cleanup_oldest(&guard); return Ok(Some(oldest_entry)); } } current = self.cleanup_oldest(&guard); } Ok(None) } /// Peeks the oldest entry. /// /// # Examples /// /// ``` /// use sdd::Queue; /// /// let queue: Queue = Queue::default(); /// /// assert!(queue.peek_with(|v| v.is_none())); /// /// queue.push(37); /// queue.push(3); /// /// assert_eq!(queue.peek_with(|v| **v.unwrap()), 37); /// ``` #[inline] pub fn peek_with>) -> R>(&self, reader: F) -> R { let guard = Guard::new(); let mut current = self.oldest.load(Acquire, &guard); while let Some(oldest_entry) = current.as_ref() { if oldest_entry.is_deleted(Relaxed) { current = self.cleanup_oldest(&guard); continue; } return reader(Some(oldest_entry)); } reader(None) } /// Returns the number of entries in the [`Queue`]. /// /// This method iterates over all the entries in the [`Queue`] to count them; therefore, its /// time complexity is `O(N)`. /// /// # Examples /// /// ``` /// use sdd::Queue; /// /// let queue: Queue = Queue::default(); /// assert_eq!(queue.len(), 0); /// /// queue.push(7); /// queue.push(11); /// assert_eq!(queue.len(), 2); /// /// queue.pop(); /// queue.pop(); /// assert_eq!(queue.len(), 0); /// ``` #[inline] pub fn len(&self) -> usize { self.iter(&Guard::new()).count() } /// Returns `true` if the [`Queue`] is empty. /// /// # Examples /// /// ``` /// use sdd::Queue; /// /// let queue: Queue = Queue::default(); /// assert!(queue.is_empty()); /// /// queue.push(7); /// assert!(!queue.is_empty()); /// ``` #[inline] pub fn is_empty(&self) -> bool { self.newest.is_null(Acquire) } /// Returns an [`Iter`]. /// /// # Examples /// /// ``` /// use sdd::{Guard, Queue}; /// /// let queue: Queue = Queue::default(); /// assert_eq!(queue.iter(&Guard::new()).count(), 0); /// /// queue.push(7); /// queue.push(11); /// queue.push(17); /// /// let guard = Guard::new(); /// let mut iter = queue.iter(&guard); /// assert_eq!(*iter.next().unwrap(), 7); /// assert_eq!(*iter.next().unwrap(), 11); /// assert_eq!(*iter.next().unwrap(), 17); /// assert!(iter.next().is_none()); /// ``` #[inline] pub fn iter<'g>(&self, guard: &'g Guard) -> Iter<'g, T> { Iter { current: self.cleanup_oldest(guard), guard, } } /// Pushes an entry into the [`Queue`]. fn push_if_internal>) -> bool>( &self, val: T, mut cond: F, guard: &Guard, ) -> Result>, T> { let mut newest_ptr = self.newest.load(Acquire, guard); if newest_ptr.is_null() { // Traverse from the oldest. newest_ptr = self.oldest.load(Acquire, guard); } newest_ptr = Self::traverse(newest_ptr, guard); if !cond(newest_ptr.as_ref()) { // The condition is not met. return Err(val); } let mut new_entry = unsafe { Shared::new_unchecked(LinkedEntry::new(val)) }; loop { let result = if let Some(newest_entry) = newest_ptr.as_ref() { newest_entry.next().compare_exchange( Ptr::null(), (Some(new_entry.clone()), Tag::None), AcqRel, Acquire, guard, ) } else { self.oldest.compare_exchange( newest_ptr, (Some(new_entry.clone()), Tag::None), AcqRel, Acquire, guard, ) }; match result { Ok(_) => { self.newest .swap((Some(new_entry.clone()), Tag::None), AcqRel); if self.oldest.is_null(Acquire) { // The `Queue` was emptied in the meantime. self.newest.swap((None, Tag::None), Acquire); } return Ok(new_entry); } Err((_, actual_ptr)) => { newest_ptr = if actual_ptr.tag() == Tag::First { self.cleanup_oldest(guard) } else if actual_ptr.is_null() { self.oldest.load(Acquire, guard) } else { actual_ptr }; newest_ptr = Self::traverse(newest_ptr, guard); if !cond(newest_ptr.as_ref()) { // The condition is not met. break; } } } } // Extract the instance from the temporary entry. Err(unsafe { new_entry.get_mut().unwrap_unchecked().take_inner() }) } /// Cleans up logically removed entries that are attached to `oldest`. fn cleanup_oldest<'g>(&self, guard: &'g Guard) -> Ptr<'g, LinkedEntry> { let oldest_ptr = self.oldest.load(Acquire, guard); if let Some(oldest_entry) = oldest_ptr.as_ref() { if oldest_entry.is_deleted(Relaxed) { match self.oldest.compare_exchange( oldest_ptr, (oldest_entry.next_shared(Acquire, guard), Tag::None), AcqRel, Acquire, guard, ) { Ok((_, new_ptr)) => { if new_ptr.is_null() { // Reset `newest`. self.newest.swap((None, Tag::None), Acquire); } return new_ptr; } Err((_, actual_ptr)) => { return actual_ptr; } } } } oldest_ptr } /// Traverses the linked list to the end. #[inline] fn traverse<'g>(start: Ptr<'g, LinkedEntry>, guard: &'g Guard) -> Ptr<'g, LinkedEntry> { let mut current = start; while let Some(entry) = current.as_ref() { let next = entry.next_ptr(Acquire, guard); if next.is_null() { break; } current = next; } current } } impl Clone for Queue { #[inline] fn clone(&self) -> Self { let self_clone = Self::default(); let guard = Guard::new(); let mut current = self.oldest.load(Acquire, &guard); while let Some(entry) = current.as_ref() { let next = entry.next_ptr(Acquire, &guard); let _result = self_clone.push_if_internal((**entry).clone(), |_| true, &guard); current = next; } self_clone } } impl Debug for Queue { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut d = f.debug_set(); let guard = Guard::new(); let mut current = self.oldest.load(Acquire, &guard); while let Some(entry) = current.as_ref() { let next = entry.next_ptr(Acquire, &guard); d.entry(entry); current = next; } d.finish() } } impl Default for Queue { #[inline] fn default() -> Self { Self::new() } } impl Drop for Queue { #[inline] fn drop(&mut self) { if !self.oldest.is_null(Relaxed) { let guard = Guard::new(); let mut iter = self.iter(&guard); while let Some(entry) = iter.current.as_ref() { entry.delete_self(Relaxed); iter.next(); } } } } impl FromIterator for Queue { #[inline] fn from_iter>(iter: I) -> Self { let into_iter = iter.into_iter(); let queue = Self::default(); into_iter.for_each(|v| { queue.push(v); }); queue } } impl FusedIterator for Iter<'_, T> {} impl<'g, T> Iterator for Iter<'g, T> { type Item = &'g T; #[inline] fn next(&mut self) -> Option { if let Some(current) = self.current.as_ref() { self.current = current.next_ptr(Acquire, self.guard); Some(current) } else { None } } } sdd-4.5.3/src/ref_counted.rs000064400000000000000000000127471046102023000140540ustar 00000000000000use std::mem::offset_of; use std::ops::Deref; use std::ptr::{self, NonNull, addr_of}; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::{self, Relaxed}; use super::collectible::{Collectible, Link}; use super::collector::Collector; /// [`RefCounted`] stores an instance of type `T`, and a union of a link to the next /// [`Collectible`] or the reference counter. pub(super) struct RefCounted { instance: T, next_or_refcnt: Link, } impl RefCounted { /// Creates a new [`RefCounted`] that allows ownership sharing. #[inline] pub(super) fn new_shared(instance: T) -> NonNull> { let boxed = Box::new(Self { instance, next_or_refcnt: Link::new_shared(), }); unsafe { NonNull::new_unchecked(Box::into_raw(boxed)) } } /// Creates a new [`RefCounted`] that disallows reference counting. /// /// The reference counter field is never used until the instance is retired. #[inline] pub(super) fn new_unique(instance: T) -> NonNull> { let boxed = Box::new(Self { instance, next_or_refcnt: Link::new_unique(), }); unsafe { NonNull::new_unchecked(Box::into_raw(boxed)) } } /// Tries to add a strong reference to the underlying instance. /// /// `order` must be as strong as `Acquire` for the caller to correctly validate the newest /// state of the pointer. #[inline] pub(super) fn try_add_ref(&self, order: Ordering) -> bool { self.ref_cnt() .fetch_update( order, order, |r| { if r & 1 == 1 { Some(r + 2) } else { None } }, ) .is_ok() } /// Returns a mutable reference to the instance if the number of owners is `1`. #[inline] pub(super) fn get_mut_shared(&mut self) -> Option<&mut T> { if self.ref_cnt().load(Relaxed) == 1 { Some(&mut self.instance) } else { None } } /// Returns a mutable reference to the instance if it is uniquely owned. #[inline] pub(super) const fn get_mut_unique(&mut self) -> &mut T { &mut self.instance } /// Adds a strong reference to the underlying instance. #[inline] pub(super) fn add_ref(&self) { let mut current = self.ref_cnt().load(Relaxed); loop { debug_assert_eq!(current & 1, 1); debug_assert!(current <= usize::MAX - 2, "reference count overflow"); match self .ref_cnt() .compare_exchange_weak(current, current + 2, Relaxed, Relaxed) { Ok(_) => break, Err(actual) => { current = actual; } } } } /// Drops a strong reference to the underlying instance. /// /// Returns `true` if it the last reference was dropped. #[inline] pub(super) fn drop_ref(&self) -> bool { // It does not have to be a load-acquire as everything's synchronized via the global // epoch. let mut current = self.ref_cnt().load(Relaxed); loop { debug_assert_ne!(current, 0); let new = current.saturating_sub(2); match self .ref_cnt() .compare_exchange_weak(current, new, Relaxed, Relaxed) { Ok(_) => break, Err(actual) => { current = actual; } } } current == 1 } /// Returns a pointer to the instance. #[inline] pub(super) const fn inst_ptr(self_ptr: *const Self) -> *const T { if self_ptr.is_null() { ptr::null() } else { unsafe { addr_of!((*self_ptr).instance) } } } /// Returns a non-null pointer to the instance. #[inline] pub(super) const fn inst_non_null_ptr(self_ptr: NonNull) -> NonNull { let offset = offset_of!(Self, instance); unsafe { self_ptr.cast::().add(offset).cast::() } } /// Returns a reference to its reference count. #[inline] pub(super) fn ref_cnt(&self) -> &AtomicUsize { self.next_or_refcnt.ref_cnt() } /// Passes a pointer to [`RefCounted`] to the garbage collector. #[inline] pub(super) fn pass_to_collector(ptr: *mut Self) { // The lifetime of the pointer is extended to `'static`; it is not illegal since the safe // public API does not permit to create a `RefCounted` of a non `'static` type, and those // that allow to do so are all unsafe methods (e.g., `{Owned, Shard}::new_unchecked`) where // the doc explicitly states that the pointed-to value may be dropped at an arbitrary time. #[allow(clippy::transmute_ptr_to_ptr)] let ptr = unsafe { std::mem::transmute::<*mut (dyn Collectible + '_), *mut (dyn Collectible + 'static)>( ptr, ) }; Collector::collect(Collector::current(), ptr); } } impl Deref for RefCounted { type Target = T; #[inline] fn deref(&self) -> &Self::Target { &self.instance } } impl Collectible for RefCounted { #[inline] fn next_ptr(&self) -> Option> { self.next_or_refcnt.next_ptr() } #[inline] fn set_next_ptr(&self, next_ptr: Option>) { self.next_or_refcnt.set_next_ptr(next_ptr); } } sdd-4.5.3/src/shared.rs000064400000000000000000000210361046102023000130140ustar 00000000000000use std::mem::forget; use std::ops::Deref; use std::panic::UnwindSafe; use std::ptr::NonNull; use super::ref_counted::RefCounted; use super::{Guard, Ptr}; /// [`Shared`] is a reference-counted handle to an instance. /// /// The instance is passed to the EBR garbage collector when the last strong reference is dropped. #[derive(Debug)] pub struct Shared { instance_ptr: NonNull>, } impl Shared { /// Creates a new [`Shared`]. /// /// The type of the instance must be determined at compile-time, must not contain non-static /// references, and must not be a non-static reference since the instance can theoretically /// survive the process. For instance, `struct Disallowed<'l, T>(&'l T)` is not allowed, /// because an instance of the type cannot outlive `'l` whereas the garbage collector does not /// guarantee that the instance is dropped within `'l`. /// /// # Examples /// /// ``` /// use sdd::Shared; /// /// let shared: Shared = Shared::new(31); /// ``` #[inline] pub fn new(t: T) -> Self { Self { instance_ptr: RefCounted::new_shared(t), } } } impl Shared { /// Creates a new [`Shared`] without checking the lifetime of `T`. /// /// # Safety /// /// `T::drop` can be run after the last strong reference is dropped, therefore it is safe only /// if `T::drop` does not access short-lived data or [`std::mem::needs_drop`] is `false` for /// `T`. Otherwise, the instance must be manually dropped by invoking [`Self::drop_in_place`] /// within the lifetime of `T`. /// /// # Examples /// /// ``` /// use sdd::Shared; /// /// let hello = String::from("hello"); /// let shared: Shared<&str> = unsafe { Shared::new_unchecked(hello.as_str()) }; /// /// assert!(unsafe { shared.drop_in_place() }); /// ``` #[inline] pub unsafe fn new_unchecked(t: T) -> Self { Self { instance_ptr: RefCounted::new_shared(t), } } /// Returns a [`Ptr`] to the instance that may live as long as the supplied [`Guard`]. /// /// # Examples /// /// ``` /// use sdd::{Guard, Shared}; /// /// let shared: Shared = Shared::new(37); /// let guard = Guard::new(); /// let ptr = shared.get_guarded_ptr(&guard); /// drop(shared); /// /// assert_eq!(*ptr.as_ref().unwrap(), 37); /// ``` #[inline] #[must_use] pub const fn get_guarded_ptr<'g>(&self, _guard: &'g Guard) -> Ptr<'g, T> { Ptr::from(self.instance_ptr.as_ptr()) } /// Returns a reference to the instance that may live as long as the supplied [`Guard`]. /// /// # Examples /// /// ``` /// use sdd::{Guard, Shared}; /// /// let shared: Shared = Shared::new(37); /// let guard = Guard::new(); /// let ref_b = shared.get_guarded_ref(&guard); /// drop(shared); /// /// assert_eq!(*ref_b, 37); /// ``` #[inline] #[must_use] pub const fn get_guarded_ref<'g>(&self, _guard: &'g Guard) -> &'g T { unsafe { RefCounted::inst_non_null_ptr(self.instance_ptr).as_ref() } } /// Returns a mutable reference to the instance if the [`Shared`] is holding the only strong /// reference. /// /// # Safety /// /// The method is `unsafe` since there can be a [`Ptr`] to the instance without holding a /// strong reference. /// /// # Examples /// /// ``` /// use sdd::Shared; /// /// let mut shared: Shared = Shared::new(38); /// unsafe { /// *shared.get_mut().unwrap() += 1; /// } /// assert_eq!(*shared, 39); /// ``` #[inline] pub unsafe fn get_mut(&mut self) -> Option<&mut T> { unsafe { self.instance_ptr .as_ptr() .as_mut() .and_then(|r| r.get_mut_shared()) } } /// Provides a raw pointer to the instance. /// /// # Examples /// /// ``` /// use sdd::Shared; /// /// let shared: Shared = Shared::new(10); /// let shared_clone: Shared = shared.clone(); /// /// assert_eq!(shared.as_ptr(), shared_clone.as_ptr()); /// assert_eq!(unsafe { *shared.as_ptr() }, unsafe { *shared_clone.as_ptr() }); /// ``` #[inline] #[must_use] pub const fn as_ptr(&self) -> *const T { RefCounted::inst_non_null_ptr(self.instance_ptr).as_ptr() } /// Provides a raw non-null pointer to the instance. /// /// # Examples /// /// ``` /// use sdd::Shared; /// /// let shared: Shared = Shared::new(10); /// let shared_clone: Shared = shared.clone(); /// /// assert_eq!(shared.as_ptr(), shared_clone.as_ptr()); /// assert_eq!(unsafe { *shared.as_non_null_ptr().as_ref() }, unsafe { *shared_clone.as_ptr() }); /// ``` #[inline] #[must_use] pub const fn as_non_null_ptr(&self) -> NonNull { RefCounted::inst_non_null_ptr(self.instance_ptr) } /// Releases the strong reference by passing `self` to the given [`Guard`]. /// /// Returns `true` if the last reference was released. /// /// # Examples /// /// ``` /// use sdd::Shared; /// /// let shared: Shared = Shared::new(47); /// let shared_clone = shared.clone(); /// assert!(!shared.release()); /// assert!(shared_clone.release()); /// ``` #[inline] #[must_use] pub fn release(self) -> bool { let released = if unsafe { (*self.instance_ptr.as_ptr()).drop_ref() } { RefCounted::pass_to_collector(self.instance_ptr.as_ptr()); true } else { false }; forget(self); released } /// Drops the instance immediately if it has held the last reference to the instance. /// /// Returns `true` if the instance was dropped. /// /// # Safety /// /// The caller must ensure that there is no [`Ptr`] pointing to the instance. /// /// # Examples /// /// ``` /// use sdd::Shared; /// use std::sync::atomic::AtomicBool; /// use std::sync::atomic::Ordering::Relaxed; /// /// static DROPPED: AtomicBool = AtomicBool::new(false); /// struct T(&'static AtomicBool); /// impl Drop for T { /// fn drop(&mut self) { /// self.0.store(true, Relaxed); /// } /// } /// /// let shared: Shared = Shared::new(T(&DROPPED)); /// let shared_clone = shared.clone(); /// /// unsafe { /// assert!(!shared.drop_in_place()); /// assert!(!DROPPED.load(Relaxed)); /// assert!(shared_clone.drop_in_place()); /// assert!(DROPPED.load(Relaxed)); /// } /// ``` #[inline] #[must_use] pub unsafe fn drop_in_place(self) -> bool { unsafe { let dropped = if (*self.instance_ptr.as_ptr()).drop_ref() { drop(Box::from_raw(self.instance_ptr.as_ptr())); true } else { false }; forget(self); dropped } } /// Creates a new [`Shared`] from the given pointer. #[inline] pub(super) const fn from(ptr: NonNull>) -> Self { Self { instance_ptr: ptr } } /// Returns a pointer to the [`RefCounted`]. #[inline] pub(super) const fn underlying_ptr(&self) -> *const RefCounted { self.instance_ptr.as_ptr() } } impl AsRef for Shared { #[inline] fn as_ref(&self) -> &T { unsafe { &*self.instance_ptr.as_ptr() } } } impl Clone for Shared { #[inline] fn clone(&self) -> Self { unsafe { (*self.instance_ptr.as_ptr()).add_ref() } Self { instance_ptr: self.instance_ptr, } } } impl Deref for Shared { type Target = T; #[inline] fn deref(&self) -> &Self::Target { self.as_ref() } } impl Drop for Shared { #[inline] fn drop(&mut self) { if unsafe { (*self.instance_ptr.as_ptr()).drop_ref() } { RefCounted::pass_to_collector(self.instance_ptr.as_ptr()); } } } impl<'g, T> TryFrom> for Shared { type Error = Ptr<'g, T>; #[inline] fn try_from(ptr: Ptr<'g, T>) -> Result { if let Some(shared) = ptr.get_shared() { Ok(shared) } else { Err(ptr) } } } unsafe impl Send for Shared {} unsafe impl Sync for Shared {} impl UnwindSafe for Shared {} sdd-4.5.3/src/stack.rs000064400000000000000000000365131046102023000126610ustar 00000000000000//! [`Stack`] is a lock-free concurrent last-in-first-out container. use std::fmt::{self, Debug}; use std::iter::FusedIterator; use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed}; use super::linked_list::{LinkedEntry, LinkedList}; use super::{AtomicShared, Guard, Ptr, Shared, Tag}; /// [`Stack`] is a lock-free concurrent last-in-first-out container. pub struct Stack { /// `newest` points to the newest entry in the [`Stack`]. newest: AtomicShared>, } /// An iterator over the entries of a [`Stack`]. /// /// [`Iter`] reads the newest entry first. pub struct Iter<'g, T> { current: Ptr<'g, LinkedEntry>, guard: &'g Guard, } impl Stack { /// Pushes an instance of `T`. /// /// Returns a [`Shared`] holding a strong reference to the newly pushed entry. /// /// # Examples /// /// ``` /// use sdd::Stack; /// /// let stack: Stack = Stack::default(); /// /// assert_eq!(**stack.push(11), 11); /// ``` #[inline] pub fn push(&self, val: T) -> Shared> { match self.push_if_internal(val, |_| true, &Guard::new()) { Ok(entry) => entry, Err(_) => { unreachable!(); } } } /// Pushes an instance of `T` if the newest entry satisfies the given condition. /// /// # Errors /// /// Returns an error containing the supplied instance if the condition is not met. /// /// # Examples /// /// ``` /// use sdd::Stack; /// /// let stack: Stack = Stack::default(); /// /// stack.push(11); /// /// assert!(stack.push_if(17, |e| e.map_or(false, |x| **x == 11)).is_ok()); /// assert!(stack.push_if(29, |e| e.map_or(false, |x| **x == 11)).is_err()); /// ``` #[inline] pub fn push_if>) -> bool>( &self, val: T, cond: F, ) -> Result>, T> { self.push_if_internal(val, cond, &Guard::new()) } /// Returns a guarded reference to the newest entry. /// /// Returns `None` if the [`Stack`] is empty. The returned reference can survive as long as the /// associated [`Guard`] is alive. /// /// # Examples /// /// ``` /// use sdd::{Guard, Stack}; /// /// let stack: Stack = Stack::default(); /// /// assert!(stack.peek(&Guard::new()).is_none()); /// /// stack.push(37); /// stack.push(3); /// /// assert_eq!(**stack.peek(&Guard::new()).unwrap(), 3); /// ``` #[inline] pub fn peek<'g>(&self, guard: &'g Guard) -> Option<&'g LinkedEntry> { self.cleanup_newest(self.newest.load(Acquire, guard), guard) .as_ref() } } impl Stack { /// Creates an empty [`Stack`]. /// /// # Examples /// /// ``` /// use sdd::Stack; /// /// let stack: Stack = Stack::new(); ///``` #[cfg(not(feature = "loom"))] #[inline] #[must_use] pub const fn new() -> Self { Self { newest: AtomicShared::null(), } } /// Creates an empty [`Stack`]. #[cfg(feature = "loom")] #[inline] #[must_use] pub fn new() -> Self { Self { newest: AtomicShared::null(), } } /// Pushes an instance of `T` without checking the lifetime of `T`. /// /// Returns a [`Shared`] holding a strong reference to the newly pushed entry. /// /// # Safety /// /// `T::drop` can be run after the [`Stack`] is dropped, therefore it is safe only if `T::drop` /// does not access short-lived data or [`std::mem::needs_drop`] is `false` for `T`. /// /// # Examples /// /// ``` /// use sdd::Stack; /// /// let hello = String::from("hello"); /// let stack: Stack<&str> = Stack::default(); /// /// assert_eq!(unsafe { **stack.push_unchecked(hello.as_str()) }, "hello"); /// ``` #[inline] pub unsafe fn push_unchecked(&self, val: T) -> Shared> { match self.push_if_internal(val, |_| true, &Guard::new()) { Ok(entry) => entry, Err(_) => { unreachable!(); } } } /// Pushes an instance of `T` if the newest entry satisfies the given condition without /// checking the lifetime of `T`. /// /// # Errors /// /// Returns an error containing the supplied instance if the condition is not met. /// /// # Safety /// /// `T::drop` can be run after the [`Stack`] is dropped, therefore it is safe only if `T::drop` /// does not access short-lived data or [`std::mem::needs_drop`] is `false` for `T`. /// /// # Examples /// /// ``` /// use sdd::Stack; /// /// let hello = String::from("hello"); /// let stack: Stack<&str> = Stack::default(); /// /// assert!(unsafe { stack.push_if_unchecked(hello.as_str(), |e| e.is_none()).is_ok() }); /// ``` #[inline] pub unsafe fn push_if_unchecked>) -> bool>( &self, val: T, cond: F, ) -> Result>, T> { self.push_if_internal(val, cond, &Guard::new()) } /// Pops the newest entry. /// /// Returns `None` if the [`Stack`] is empty. /// /// # Examples /// /// ``` /// use sdd::Stack; /// /// let stack: Stack = Stack::default(); /// /// stack.push(37); /// stack.push(3); /// stack.push(1); /// /// assert_eq!(stack.pop().map(|e| **e), Some(1)); /// assert_eq!(stack.pop().map(|e| **e), Some(3)); /// assert_eq!(stack.pop().map(|e| **e), Some(37)); /// assert!(stack.pop().is_none()); /// ``` #[inline] pub fn pop(&self) -> Option>> { match self.pop_if(|_| true) { Ok(result) => result, Err(_) => unreachable!(), } } /// Pops all the entries at once and returns them as a new [`Stack`]. /// /// # Examples /// /// ``` /// use sdd::Stack; /// /// let stack: Stack = Stack::default(); /// /// stack.push(37); /// stack.push(3); /// /// let popped = stack.pop_all(); /// /// stack.push(1); /// /// assert_eq!(stack.pop().map(|e| **e), Some(1)); /// assert!(stack.pop().is_none()); /// assert!(stack.is_empty()); /// /// assert_eq!(popped.pop().map(|e| **e), Some(3)); /// assert_eq!(popped.pop().map(|e| **e), Some(37)); /// assert!(popped.pop().is_none()); /// ``` #[inline] #[must_use] pub fn pop_all(&self) -> Self { let head = self.newest.swap((None, Tag::None), AcqRel).0; Self { newest: head.map_or_else(AtomicShared::default, AtomicShared::from), } } /// Pops the newest entry if the entry satisfies the given condition. /// /// Returns `None` if the [`Stack`] is empty. /// /// # Errors /// /// Returns an error along with the newest entry if the given condition is not met. /// /// # Examples /// /// ``` /// use sdd::Stack; /// /// let stack: Stack = Stack::default(); /// /// stack.push(3); /// stack.push(1); /// /// assert!(stack.pop_if(|v| **v == 3).is_err()); /// assert_eq!(stack.pop().map(|e| **e), Some(1)); /// assert_eq!(stack.pop_if(|v| **v == 3).ok().and_then(|e| e).map(|e| **e), Some(3)); /// /// assert!(stack.is_empty()); /// ``` #[inline] pub fn pop_if) -> bool>( &self, mut cond: F, ) -> Result>>, Shared>> { let guard = Guard::new(); let mut newest_ptr = self.cleanup_newest(self.newest.load(Acquire, &guard), &guard); while !newest_ptr.is_null() { if let Some(newest_entry) = newest_ptr.get_shared() { if !newest_entry.is_deleted(Relaxed) && !cond(&*newest_entry) { return Err(newest_entry); } if newest_entry.delete_self(Relaxed) { self.cleanup_newest(newest_ptr, &guard); return Ok(Some(newest_entry)); } } newest_ptr = self.cleanup_newest(newest_ptr, &guard); } Ok(None) } /// Peeks the newest entry. /// /// # Examples /// /// ``` /// use sdd::Stack; /// /// let stack: Stack = Stack::default(); /// /// assert!(stack.peek_with(|v| v.is_none())); /// /// stack.push(37); /// stack.push(3); /// /// assert_eq!(stack.peek_with(|v| **v.unwrap()), 3); /// ``` #[inline] pub fn peek_with>) -> R>(&self, reader: F) -> R { let guard = Guard::new(); reader( self.cleanup_newest(self.newest.load(Acquire, &guard), &guard) .as_ref(), ) } /// Returns the number of entries in the [`Stack`]. /// /// This method iterates over all the entries in the [`Stack`] to count them, therefore its /// time complexity is `O(N)`. /// /// # Examples /// /// ``` /// use sdd::Stack; /// /// let stack: Stack = Stack::default(); /// assert_eq!(stack.len(), 0); /// /// stack.push(7); /// stack.push(11); /// assert_eq!(stack.len(), 2); /// /// stack.pop(); /// stack.pop(); /// assert_eq!(stack.len(), 0); /// ``` #[inline] pub fn len(&self) -> usize { self.iter(&Guard::new()).count() } /// Returns `true` if the [`Stack`] is empty. /// /// # Examples /// /// ``` /// use sdd::Stack; /// /// let stack: Stack = Stack::default(); /// assert!(stack.is_empty()); /// /// stack.push(7); /// assert!(!stack.is_empty()); /// ``` #[inline] pub fn is_empty(&self) -> bool { let guard = Guard::new(); self.cleanup_newest(self.newest.load(Acquire, &guard), &guard) .is_null() } /// Returns an [`Iter`]. /// /// # Examples /// /// ``` /// use sdd::{Guard, Stack}; /// /// let stack: Stack = Stack::default(); /// assert_eq!(stack.iter(&Guard::new()).count(), 0); /// /// stack.push(7); /// stack.push(11); /// stack.push(17); /// /// let guard = Guard::new(); /// let mut iter = stack.iter(&guard); /// assert_eq!(*iter.next().unwrap(), 17); /// assert_eq!(*iter.next().unwrap(), 11); /// assert_eq!(*iter.next().unwrap(), 7); /// assert!(iter.next().is_none()); /// ``` #[inline] pub fn iter<'g>(&self, guard: &'g Guard) -> Iter<'g, T> { Iter { current: self.cleanup_newest(self.newest.load(Acquire, guard), guard), guard, } } /// Pushes an entry into the [`Stack`]. fn push_if_internal>) -> bool>( &self, val: T, mut cond: F, guard: &Guard, ) -> Result>, T> { let mut newest_ptr = self.cleanup_newest(self.newest.load(Acquire, guard), guard); if !cond(newest_ptr.as_ref()) { // The condition is not met. return Err(val); } let mut new_entry = unsafe { Shared::new_unchecked(LinkedEntry::new(val)) }; loop { new_entry .next() .swap((newest_ptr.get_shared(), Tag::None), Acquire); let result = self.newest.compare_exchange( newest_ptr, (Some(new_entry.clone()), Tag::None), AcqRel, Acquire, guard, ); match result { Ok(_) => return Ok(new_entry), Err((_, actual_ptr)) => { newest_ptr = self.cleanup_newest(actual_ptr, guard); if !cond(newest_ptr.as_ref()) { // The condition is not met. break; } } } } // Extract the instance from the temporary entry. Err(unsafe { new_entry.get_mut().unwrap_unchecked().take_inner() }) } /// Cleans up logically removed entries that are attached to `newest`. fn cleanup_newest<'g>( &self, mut newest_ptr: Ptr<'g, LinkedEntry>, guard: &'g Guard, ) -> Ptr<'g, LinkedEntry> { while let Some(newest_entry) = newest_ptr.as_ref() { if newest_entry.is_deleted(Relaxed) { match self.newest.compare_exchange( newest_ptr, (newest_entry.next_shared(Acquire, guard), Tag::None), AcqRel, Acquire, guard, ) { Ok((_, ptr)) | Err((_, ptr)) => newest_ptr = ptr, } } else { break; } } newest_ptr } } impl Clone for Stack { #[inline] fn clone(&self) -> Self { let self_clone = Self::default(); let guard = Guard::new(); let mut current = self.newest.load(Acquire, &guard); let mut oldest: Option>> = None; while let Some(entry) = current.as_ref() { let new_entry = unsafe { Shared::new_unchecked(LinkedEntry::new((**entry).clone())) }; if let Some(oldest) = oldest.take() { oldest .next() .swap((Some(new_entry.clone()), Tag::None), Acquire); } else { self_clone .newest .swap((Some(new_entry.clone()), Tag::None), Acquire); } oldest.replace(new_entry); current = entry.next_ptr(Acquire, &guard); } self_clone } } impl Debug for Stack { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut d = f.debug_set(); let guard = Guard::new(); let mut current = self.newest.load(Acquire, &guard); while let Some(entry) = current.as_ref() { let next = entry.next_ptr(Acquire, &guard); d.entry(entry); current = next; } d.finish() } } impl Default for Stack { #[inline] fn default() -> Self { Self::new() } } impl Drop for Stack { #[inline] fn drop(&mut self) { if !self.newest.is_null(Relaxed) { let guard = Guard::new(); let mut iter = self.iter(&guard); while let Some(entry) = iter.current.as_ref() { entry.delete_self(Relaxed); iter.next(); } } } } impl FromIterator for Stack { #[inline] fn from_iter>(iter: I) -> Self { let into_iter = iter.into_iter(); let stack = Self::default(); into_iter.for_each(|v| { stack.push(v); }); stack } } impl FusedIterator for Iter<'_, T> {} impl<'g, T> Iterator for Iter<'g, T> { type Item = &'g T; #[inline] fn next(&mut self) -> Option { if let Some(current) = self.current.as_ref() { self.current = current.next_ptr(Acquire, self.guard); Some(current) } else { None } } } sdd-4.5.3/src/tag.rs000064400000000000000000000035271046102023000123260ustar 00000000000000use std::cmp::PartialEq; /// [`Tag`] is a four-state `Enum` that can be embedded in a pointer as the two least /// significant bits of the pointer value. #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] pub enum Tag { /// None tagged. None, /// The first bit is tagged. First, /// The second bit is tagged. Second, /// Both bits are tagged. Both, } impl Tag { /// Interprets the [`Tag`] as an integer. #[inline] pub(super) const fn value(self) -> usize { match self { Self::None => 0, Self::First => 1, Self::Second => 2, Self::Both => 3, } } /// Returns the tag embedded in the pointer. #[inline] pub(super) fn into_tag

(ptr: *const P) -> Self { match ptr.addr() & 3 { 0 => Tag::None, 1 => Tag::First, 2 => Tag::Second, _ => Tag::Both, } } /// Sets a tag, overwriting any existing tag in the pointer. #[inline] pub(super) fn update_tag

(ptr: *const P, tag: Tag) -> *const P { ptr.map_addr(|addr| (addr & (!3)) | tag.value()) } /// Returns the pointer with the tag bits erased. #[inline] pub(super) fn unset_tag

(ptr: *const P) -> *const P { ptr.map_addr(|addr| addr & (!3)) } } impl TryFrom for Tag { type Error = u8; #[inline] fn try_from(val: u8) -> Result { match val { 0 => Ok(Tag::None), 1 => Ok(Tag::First), 2 => Ok(Tag::Second), 3 => Ok(Tag::Both), _ => Err(val), } } } impl From for u8 { #[inline] fn from(t: Tag) -> Self { match t { Tag::None => 0, Tag::First => 1, Tag::Second => 2, Tag::Both => 3, } } } sdd-4.5.3/src/tests/models.rs000064400000000000000000000046501046102023000141760ustar 00000000000000use std::sync::atomic::Ordering::Relaxed; use std::sync::{Arc, Mutex}; use loom::sync::atomic::{AtomicBool, AtomicUsize}; use loom::thread::{spawn, yield_now}; use crate::{AtomicOwned, AtomicShared, Guard, suspend}; struct A(AtomicBool, Arc); impl Drop for A { fn drop(&mut self) { self.0.store(true, Relaxed); self.1.fetch_add(1, Relaxed); } } static SERIALIZER: Mutex<()> = Mutex::new(()); #[test] fn ebr_owned() { let _guard = SERIALIZER.lock().unwrap(); loom::model(|| { let drop_count = Arc::new(AtomicUsize::new(0)); let data_owned = AtomicOwned::new(A(AtomicBool::new(false), drop_count.clone())); let guard = Guard::new(); let ptr = data_owned.load(Relaxed, &guard); let thread = spawn(move || { let guard = Guard::new(); guard.accelerate(); let ptr = data_owned.load(Relaxed, &guard); drop(data_owned); assert!(!ptr.as_ref().unwrap().0.load(Relaxed)); drop(guard); while drop_count.load(Relaxed) != 1 { Guard::new().accelerate(); yield_now(); } assert!(suspend()); }); assert!(!ptr.as_ref().unwrap().0.load(Relaxed)); drop(guard); assert!(thread.join().is_ok()); assert!(suspend()); }); } #[test] fn ebr_shared() { let _guard = SERIALIZER.lock().unwrap(); loom::model(|| { let drop_count = Arc::new(AtomicUsize::new(0)); let data_shared = AtomicShared::new(A(AtomicBool::new(false), drop_count.clone())); let guard = Guard::new(); let ptr = data_shared.load(Relaxed, &guard); let thread = spawn(move || { let data_shared_clone = data_shared.get_shared(Relaxed, &Guard::new()).unwrap(); drop(data_shared); let guard = Guard::new(); guard.accelerate(); let ptr = data_shared_clone.get_guarded_ptr(&guard); drop(data_shared_clone); assert!(!ptr.as_ref().unwrap().0.load(Relaxed)); drop(guard); while drop_count.load(Relaxed) != 1 { Guard::new().accelerate(); yield_now(); } assert!(suspend()); }); assert!(!ptr.as_ref().unwrap().0.load(Relaxed)); drop(guard); assert!(thread.join().is_ok()); assert!(suspend()); }); } sdd-4.5.3/src/tests/unit_tests.rs000064400000000000000000001042571046102023000151200ustar 00000000000000use std::ops::Deref; use std::panic::{RefUnwindSafe, UnwindSafe}; use std::rc::Rc; use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; use std::sync::atomic::{AtomicBool, AtomicUsize}; use std::sync::{Arc, Barrier}; use std::thread; use crate::collector::Collector; use crate::{ AtomicOwned, AtomicShared, Bag, Guard, Owned, Ptr, Queue, Shared, Stack, Tag, bag, suspend, }; static_assertions::assert_eq_size!(Guard, usize); static_assertions::assert_eq_size!(Option, usize); static_assertions::assert_impl_all!(AtomicShared: Send, Sync, RefUnwindSafe, UnwindSafe); static_assertions::assert_impl_all!(Guard: RefUnwindSafe, UnwindSafe); static_assertions::assert_impl_all!(Ptr: RefUnwindSafe, UnwindSafe); static_assertions::assert_impl_all!(Shared: Send, Sync, RefUnwindSafe, UnwindSafe); static_assertions::assert_not_impl_all!(AtomicShared<*const u8>: Send, Sync, RefUnwindSafe, UnwindSafe); static_assertions::assert_not_impl_all!(Collector: Send, Sync); static_assertions::assert_not_impl_all!(Guard: Send, Sync); static_assertions::assert_not_impl_all!(Ptr: Send, Sync); static_assertions::assert_not_impl_all!(Ptr<*const u8>: Send, Sync, RefUnwindSafe, UnwindSafe); static_assertions::assert_not_impl_all!(Shared<*const u8>: Send, Sync, RefUnwindSafe, UnwindSafe); static_assertions::assert_not_impl_any!(Bag>: Send, Sync); static_assertions::assert_impl_all!(Bag: Send, Sync, UnwindSafe); static_assertions::assert_impl_all!(bag::IterMut<'static, String>: Send, Sync, UnwindSafe); static_assertions::assert_not_impl_any!(Bag<*const String>: Send, Sync); static_assertions::assert_not_impl_any!(bag::IterMut<'static, *const String>: Send, Sync); static_assertions::assert_not_impl_any!(Queue>: Send, Sync); static_assertions::assert_impl_all!(Queue: Send, Sync, UnwindSafe); static_assertions::assert_not_impl_any!(Queue<*const String>: Send, Sync); static_assertions::assert_not_impl_any!(Stack>: Send, Sync); static_assertions::assert_impl_all!(Stack: Send, Sync, UnwindSafe); static_assertions::assert_not_impl_any!(Stack<*const String>: Send, Sync); struct A(AtomicUsize, usize, &'static AtomicBool); impl Drop for A { fn drop(&mut self) { self.2.swap(true, Relaxed); } } struct B(&'static AtomicUsize); impl Drop for B { fn drop(&mut self) { self.0.fetch_add(1, Relaxed); } } struct C(Owned); impl Drop for C { fn drop(&mut self) { let guard = Guard::new(); let guarded_ptr = self.0.get_guarded_ptr(&guard); assert!(!guarded_ptr.is_null()); } } struct R(&'static AtomicUsize, usize, usize); impl R { fn new(cnt: &'static AtomicUsize, task_id: usize, seq: usize) -> R { cnt.fetch_add(1, Relaxed); R(cnt, task_id, seq) } } impl Drop for R { fn drop(&mut self) { self.0.fetch_sub(1, Relaxed); } } #[test] fn deferred() { static EXECUTED: AtomicBool = AtomicBool::new(false); let guard = Guard::new(); guard.defer_execute(|| EXECUTED.store(true, Relaxed)); drop(guard); while !EXECUTED.load(Relaxed) { Guard::new().accelerate(); thread::yield_now(); } } #[test] fn shared() { static DESTROYED: AtomicBool = AtomicBool::new(false); let mut shared = Shared::new(A(AtomicUsize::new(10), 10, &DESTROYED)); if let Some(mut_ref) = unsafe { shared.get_mut() } { mut_ref.1 += 1; } shared.0.fetch_add(1, Relaxed); assert_eq!(shared.deref().0.load(Relaxed), 11); assert_eq!(shared.deref().1, 11); let mut shared_clone = shared.clone(); assert!(unsafe { shared_clone.get_mut().is_none() }); shared_clone.0.fetch_add(1, Relaxed); assert_eq!(shared_clone.deref().0.load(Relaxed), 12); assert_eq!(shared_clone.deref().1, 11); let mut shared_clone_again = shared_clone.clone(); assert!(unsafe { shared_clone_again.get_mut().is_none() }); assert_eq!(shared_clone_again.deref().0.load(Relaxed), 12); assert_eq!(shared_clone_again.deref().1, 11); drop(shared); assert!(!DESTROYED.load(Relaxed)); assert!(unsafe { shared_clone_again.get_mut().is_none() }); drop(shared_clone); assert!(!DESTROYED.load(Relaxed)); assert!(unsafe { shared_clone_again.get_mut().is_some() }); drop(shared_clone_again); while !DESTROYED.load(Relaxed) { Guard::new().accelerate(); thread::yield_now(); } } #[test] fn owned() { static DESTROYED: AtomicBool = AtomicBool::new(false); let mut owned = Owned::new(A(AtomicUsize::new(10), 10, &DESTROYED)); unsafe { *owned.get_mut().0.get_mut() += 2; owned.get_mut().1 += 2; } assert_eq!(owned.deref().0.load(Relaxed), 12); assert_eq!(owned.deref().1, 12); let guard = Guard::new(); let ptr = owned.get_guarded_ptr(&guard); assert!(ptr.get_shared().is_none()); drop(owned); assert!(!DESTROYED.load(Relaxed)); drop(guard); while !DESTROYED.load(Relaxed) { Guard::new().accelerate(); thread::yield_now(); } } #[test] fn sendable() { static DESTROYED: AtomicBool = AtomicBool::new(false); let shared = Shared::new(A(AtomicUsize::new(14), 14, &DESTROYED)); let owned = Owned::new(A(AtomicUsize::new(15), 15, &DESTROYED)); let shared_clone = shared.clone(); let thread = std::thread::spawn(move || { assert_eq!(shared_clone.0.load(Relaxed), shared_clone.1); assert_eq!(owned.1, 15); }); assert!(thread.join().is_ok()); assert_eq!(shared.0.load(Relaxed), shared.1); } #[test] fn accelerate() { let current_epoch = Guard::new().epoch(); let target_epoch = current_epoch.next().next().next().next().next(); let thread = std::thread::spawn(move || { loop { let guard = Guard::new(); if guard.epoch() == target_epoch { break; } guard.accelerate(); thread::yield_now(); } }); loop { let guard = Guard::new(); if guard.epoch() == target_epoch { break; } guard.accelerate(); } assert!(thread.join().is_ok()); } #[test] fn shared_send() { static DESTROYED: AtomicBool = AtomicBool::new(false); let shared = Shared::new(A(AtomicUsize::new(14), 14, &DESTROYED)); let shared_clone = shared.clone(); let thread = std::thread::spawn(move || { assert_eq!(shared_clone.0.load(Relaxed), 14); unsafe { assert!(!shared_clone.drop_in_place()); } }); assert!(thread.join().is_ok()); assert_eq!(shared.0.load(Relaxed), 14); unsafe { assert!(shared.drop_in_place()); } assert!(DESTROYED.load(Relaxed)); } #[test] fn shared_nested() { static DESTROYED: AtomicBool = AtomicBool::new(false); let nested_shared = Shared::new(Shared::new(A(AtomicUsize::new(10), 10, &DESTROYED))); assert!(!DESTROYED.load(Relaxed)); drop(nested_shared); while !DESTROYED.load(Relaxed) { Guard::new().accelerate(); thread::yield_now(); } } #[test] fn shared_nested_thread() { static DESTROYED: AtomicBool = AtomicBool::new(false); let thread = std::thread::spawn(move || { let nested_shared = Shared::new(Shared::new(A(AtomicUsize::new(10), 10, &DESTROYED))); assert!(!DESTROYED.load(Relaxed)); drop(nested_shared); }); assert!(thread.join().is_ok()); while !DESTROYED.load(Relaxed) { Guard::new().accelerate(); thread::yield_now(); } } #[test] fn owned_nested_unchecked() { let nested_owned = Owned::new(C(Owned::new(C(Owned::new(11))))); assert_eq!(*(nested_owned.0.0), 11); } #[test] fn atomic_shared() { static DESTROYED: AtomicBool = AtomicBool::new(false); let atomic_shared = AtomicShared::new(A(AtomicUsize::new(10), 10, &DESTROYED)); assert!(!DESTROYED.load(Relaxed)); let guard = Guard::new(); let atomic_shared_clone = atomic_shared.clone(Relaxed, &guard); assert_eq!( atomic_shared_clone .load(Relaxed, &guard) .as_ref() .unwrap() .1, 10 ); drop(atomic_shared); assert!(!DESTROYED.load(Relaxed)); atomic_shared_clone.update_tag_if(Tag::Second, |_| true, Relaxed, Relaxed); drop(atomic_shared_clone); drop(guard); while !DESTROYED.load(Relaxed) { Guard::new().accelerate(); thread::yield_now(); } } #[test] fn atomic_owned() { static DESTROYED: AtomicBool = AtomicBool::new(false); let atomic_owned = AtomicOwned::new(A(AtomicUsize::new(10), 10, &DESTROYED)); assert!(!DESTROYED.load(Relaxed)); let guard = Guard::new(); let ptr = atomic_owned.load(Relaxed, &guard); assert_eq!(ptr.as_ref().map(|a| a.1), Some(10)); atomic_owned.update_tag_if(Tag::Second, |_| true, Relaxed, Relaxed); drop(atomic_owned); assert_eq!(ptr.as_ref().map(|a| a.1), Some(10)); drop(guard); while !DESTROYED.load(Relaxed) { Guard::new().accelerate(); thread::yield_now(); } } #[test] fn atomic_shared_send() { static DESTROYED: AtomicBool = AtomicBool::new(false); let atomic_shared = AtomicShared::new(A(AtomicUsize::new(17), 17, &DESTROYED)); assert!(!DESTROYED.load(Relaxed)); let thread = std::thread::spawn(move || { let guard = Guard::new(); let ptr = atomic_shared.load(Relaxed, &guard); assert_eq!(ptr.as_ref().unwrap().0.load(Relaxed), 17); }); assert!(thread.join().is_ok()); while !DESTROYED.load(Relaxed) { Guard::new().accelerate(); thread::yield_now(); } } #[test] fn atomic_shared_creation() { static DESTROYED: AtomicBool = AtomicBool::new(false); let atomic_shared = AtomicShared::new(A(AtomicUsize::new(11), 11, &DESTROYED)); assert!(!DESTROYED.load(Relaxed)); let guard = Guard::new(); let shared = atomic_shared.get_shared(Relaxed, &guard); drop(atomic_shared); assert!(!DESTROYED.load(Relaxed)); if let Some(shared) = shared { assert_eq!(shared.1, 11); assert!(!DESTROYED.load(Relaxed)); } drop(guard); while !DESTROYED.load(Relaxed) { Guard::new().accelerate(); thread::yield_now(); } } #[test] fn atomic_shared_conversion() { static DESTROYED: AtomicBool = AtomicBool::new(false); let atomic_shared = AtomicShared::new(A(AtomicUsize::new(11), 11, &DESTROYED)); assert!(!DESTROYED.load(Relaxed)); let guard = Guard::new(); let shared = atomic_shared.into_shared(Relaxed); assert!(!DESTROYED.load(Relaxed)); if let Some(shared) = shared { assert_eq!(shared.1, 11); assert!(!DESTROYED.load(Relaxed)); } drop(guard); while !DESTROYED.load(Relaxed) { Guard::new().accelerate(); thread::yield_now(); } } #[test] fn reclaim_collector() { static DEALLOCATED: AtomicUsize = AtomicUsize::new(0); let num_threads = 16; let num_iter = 32; for _ in 0..num_iter { assert!(suspend()); thread::scope(|s| { for _ in 0..num_threads { assert!( s.spawn(|| { let owned = Owned::new(B(&DEALLOCATED)); assert_ne!(owned.0.load(Relaxed), usize::MAX); }) .join() .is_ok() ); } }); while DEALLOCATED.load(Relaxed) != num_threads { Guard::new().accelerate(); thread::yield_now(); } DEALLOCATED.store(0, Relaxed); } } #[test] fn reclaim_collector_nested() { static DEALLOCATED: AtomicUsize = AtomicUsize::new(0); let num_threads = if cfg!(miri) { 4 } else { 16 }; let num_iter = if cfg!(miri) { 4 } else { 16 }; for _ in 0..num_iter { assert!(suspend()); thread::scope(|s| { let threads: Vec<_> = (0..num_threads) .map(|_| { s.spawn(|| { let guard = Guard::new(); let owned_shared = Owned::new(Shared::new(B(&DEALLOCATED))); assert_ne!( owned_shared .get_guarded_ptr(&guard) .as_ref() .unwrap() .0 .load(Relaxed), usize::MAX ); let owned = Owned::new(B(&DEALLOCATED)); assert_ne!( owned .get_guarded_ptr(&guard) .as_ref() .unwrap() .0 .load(Relaxed), usize::MAX ); }) }) .collect(); for t in threads { assert!(t.join().is_ok()); } }); while DEALLOCATED.load(Relaxed) != num_threads * 2 { Guard::new().accelerate(); thread::yield_now(); } DEALLOCATED.store(0, Relaxed); } } #[test] fn atomic_shared_parallel() { let atomic_shared: Shared> = Shared::new(AtomicShared::new(String::from("How are you?"))); let mut threads = Vec::new(); let concurrency = if cfg!(miri) { 4 } else { 16 }; for _ in 0..concurrency { let atomic_shared = atomic_shared.clone(); threads.push(thread::spawn(move || { for _ in 0..concurrency { let guard = Guard::new(); let mut ptr = (*atomic_shared).load(Acquire, &guard); assert!(ptr.tag() == Tag::None || ptr.tag() == Tag::Second); if let Some(str_ref) = ptr.as_ref() { assert!(str_ref == "How are you?" || str_ref == "How can I help you?"); } let converted: Result, _> = Shared::try_from(ptr); if let Ok(shared) = converted { assert!(*shared == "How are you?" || *shared == "How can I help you?"); } while let Err((passed, current)) = atomic_shared.compare_exchange( ptr, ( Some(Shared::new(String::from("How can I help you?"))), Tag::Second, ), AcqRel, Acquire, &guard, ) { if let Some(shared) = passed { assert!(*shared == "How can I help you?"); } ptr = current; if let Some(str_ref) = ptr.as_ref() { assert!(str_ref == "How are you?" || str_ref == "How can I help you?"); } assert!(ptr.tag() == Tag::None || ptr.tag() == Tag::Second); } assert!(!suspend()); drop(guard); assert!(suspend()); atomic_shared.update_tag_if(Tag::None, |_| true, Relaxed, Relaxed); let guard = Guard::new(); ptr = (*atomic_shared).load(Acquire, &guard); assert!(ptr.tag() == Tag::None || ptr.tag() == Tag::Second); if let Some(str_ref) = ptr.as_ref() { assert!(str_ref == "How are you?" || str_ref == "How can I help you?"); } drop(guard); let (old, _) = atomic_shared.swap( (Some(Shared::new(String::from("How are you?"))), Tag::Second), AcqRel, ); if let Some(shared) = old { assert!(*shared == "How are you?" || *shared == "How can I help you?"); } } })); } for t in threads { assert!(t.join().is_ok()); } } #[test] fn atomic_shared_clone() { let atomic_shared: Shared> = Shared::new(AtomicShared::new(String::from("How are you?"))); let mut threads = Vec::new(); for t in 0..4 { let atomic_shared = atomic_shared.clone(); threads.push(thread::spawn(move || { let num_iter = if cfg!(miri) { 16 } else { 256 }; for i in 0..num_iter { if t == 0 { let tag = if i % 3 == 0 { Tag::First } else if i % 2 == 0 { Tag::Second } else { Tag::None }; let (old, _) = atomic_shared.swap( (Some(Shared::new(String::from("How are you?"))), tag), Release, ); assert!(old.is_some()); if let Some(shared) = old { assert!(*shared == "How are you?"); } } else { let (shared_clone, _) = (*atomic_shared) .clone(Acquire, &Guard::new()) .swap((None, Tag::First), Release); assert!(shared_clone.is_some()); if let Some(shared) = shared_clone { assert!(*shared == "How are you?"); } let shared_clone = atomic_shared.get_shared(Acquire, &Guard::new()); assert!(shared_clone.is_some()); if let Some(shared) = shared_clone { assert!(*shared == "How are you?"); } } } })); } for t in threads { assert!(t.join().is_ok()); } } #[test] fn bag_reclaim() { static INST_CNT: AtomicUsize = AtomicUsize::new(0); for workload_size in [2, 18, 32, 40, 120] { let mut bag: Bag = Bag::default(); for _ in 0..workload_size { bag.push(R::new(&INST_CNT, 0, 0)); } assert_eq!(INST_CNT.load(Relaxed), workload_size); assert_eq!(bag.iter_mut().count(), workload_size); bag.iter_mut().for_each(|e| { *e = R::new(&INST_CNT, 0, 0); }); for _ in 0..workload_size / 2 { bag.pop(); } assert_eq!(INST_CNT.load(Relaxed), workload_size / 2); drop(bag); assert_eq!(INST_CNT.load(Relaxed), 0); } } #[test] fn bag_from_iter() { static INST_CNT: AtomicUsize = AtomicUsize::new(0); let workload_size = 16; let bag = (0..workload_size) .map(|_| R::new(&INST_CNT, 0, 0)) .collect::>(); assert_eq!(bag.len(), workload_size); drop(bag); assert_eq!(INST_CNT.load(Relaxed), 0); } #[test] fn bag_into_iter() { static INST_CNT: AtomicUsize = AtomicUsize::new(0); for workload_size in [2, 18, 32, 40, 120] { let mut bag: Bag = Bag::default(); for _ in 0..workload_size { bag.push(R::new(&INST_CNT, 0, 0)); } assert_eq!(INST_CNT.load(Relaxed), workload_size); assert_eq!(bag.len(), workload_size); assert_eq!(bag.iter_mut().count(), workload_size); for v in &mut bag { assert_eq!(v.0.load(Relaxed), INST_CNT.load(Relaxed)); } assert_eq!(INST_CNT.load(Relaxed), workload_size); for v in bag { assert_eq!(v.0.load(Relaxed), INST_CNT.load(Relaxed)); } assert_eq!(INST_CNT.load(Relaxed), 0); } } #[test] fn bag_mpmc() { const NUM_THREADS: usize = if cfg!(miri) { 2 } else { 6 }; static INST_CNT: AtomicUsize = AtomicUsize::new(0); let workload_size = if cfg!(miri) { 8 } else { 64 }; for _ in 0..4 { let bag_default: Arc> = Arc::new(Bag::default()); let bag_half: Arc> = Arc::new(Bag::new()); for _ in 0..workload_size { let mut threads = Vec::with_capacity(NUM_THREADS); let barrier = Arc::new(Barrier::new(NUM_THREADS)); for _ in 0..NUM_THREADS { let barrier = barrier.clone(); let bag32 = bag_default.clone(); let bag_half = bag_half.clone(); threads.push(thread::spawn(move || { barrier.wait(); for _ in 0..4 { for _ in 0..workload_size { bag32.push(R::new(&INST_CNT, 0, 0)); bag_half.push(R::new(&INST_CNT, 0, 0)); } for _ in 0..workload_size { while bag32.pop().is_none() { Guard::new().accelerate(); thread::yield_now(); } while bag_half.pop().is_none() { Guard::new().accelerate(); thread::yield_now(); } } } })); } for thread in threads { assert!(thread.join().is_ok()); } assert!(bag_default.pop().is_none()); assert!(bag_default.is_empty()); assert!(bag_half.pop().is_none()); assert!(bag_half.is_empty()); } assert_eq!(INST_CNT.load(Relaxed), 0); } } #[test] fn bag_mpsc() { const NUM_THREADS: usize = if cfg!(miri) { 2 } else { 6 }; static INST_CNT: AtomicUsize = AtomicUsize::new(0); let workload_size = if cfg!(miri) { 16 } else { 256 }; let bag32: Arc> = Arc::new(Bag::default()); let bag7: Arc> = Arc::new(Bag::new()); for _ in 0..16 { let mut threads = Vec::with_capacity(NUM_THREADS); let barrier = Arc::new(Barrier::new(NUM_THREADS)); for thread_id in 0..NUM_THREADS { let barrier = barrier.clone(); let bag32 = bag32.clone(); let bag7 = bag7.clone(); threads.push(thread::spawn(move || { barrier.wait(); let mut cnt = 0; while thread_id == 0 && cnt < workload_size * (NUM_THREADS - 1) * 2 { cnt += bag32.pop_all(0, |a, _| a + 1); cnt += bag7.pop_all(0, |a, _| a + 1); thread::yield_now(); } if thread_id != 0 { for _ in 0..workload_size { bag32.push(R::new(&INST_CNT, 0, 0)); bag7.push(R::new(&INST_CNT, 0, 0)); } for _ in 0..workload_size / 16 { if bag32.pop().is_some() { bag32.push(R::new(&INST_CNT, 0, 0)); } if bag7.pop().is_some() { bag7.push(R::new(&INST_CNT, 0, 0)); } } } })); } for thread in threads { assert!(thread.join().is_ok()); } assert!(bag32.pop().is_none()); assert!(bag32.is_empty()); assert!(bag7.pop().is_none()); assert!(bag7.is_empty()); } assert_eq!(INST_CNT.load(Relaxed), 0); } #[test] fn queue_clone() { let queue = Queue::default(); queue.push(37); queue.push(3); queue.push(1); let queue_clone = queue.clone(); assert_eq!(queue.pop().map(|e| **e), Some(37)); assert_eq!(queue.pop().map(|e| **e), Some(3)); assert_eq!(queue.pop().map(|e| **e), Some(1)); assert!(queue.pop().is_none()); assert_eq!(queue_clone.pop().map(|e| **e), Some(37)); assert_eq!(queue_clone.pop().map(|e| **e), Some(3)); assert_eq!(queue_clone.pop().map(|e| **e), Some(1)); assert!(queue_clone.pop().is_none()); } #[test] fn queue_from_iter() { static INST_CNT: AtomicUsize = AtomicUsize::new(0); let workload_size = 16; let queue = (0..workload_size) .map(|i| R::new(&INST_CNT, i, i)) .collect::>(); assert_eq!(queue.len(), workload_size); drop(queue); while INST_CNT.load(Relaxed) != 0 { Guard::new().accelerate(); thread::yield_now(); } } #[test] fn queue_pop_all() { const NUM_ENTRIES: usize = 256; static INST_CNT: AtomicUsize = AtomicUsize::new(0); let queue = Queue::default(); for i in 0..NUM_ENTRIES { queue.push(R::new(&INST_CNT, i, i)); } let mut expected = 0; while let Some(e) = queue.pop() { assert_eq!(e.1, expected); expected += 1; } assert_eq!(expected, NUM_ENTRIES); assert!(queue.is_empty()); while INST_CNT.load(Relaxed) != 0 { Guard::new().accelerate(); thread::yield_now(); } } #[test] fn queue_iter_push_pop() { const NUM_THREADS: usize = if cfg!(miri) { 2 } else { 4 }; static INST_CNT: AtomicUsize = AtomicUsize::new(0); let workload_size = if cfg!(miri) { 16 } else { 256 }; let queue: Arc> = Arc::new(Queue::default()); for _ in 0..4 { let mut threads = Vec::with_capacity(NUM_THREADS); let barrier = Arc::new(Barrier::new(NUM_THREADS)); for task_id in 0..NUM_THREADS { let barrier = barrier.clone(); let queue = queue.clone(); threads.push(thread::spawn(move || { if task_id == 0 { for seq in 0..workload_size { if seq == workload_size / 2 { barrier.wait(); } assert_eq!(queue.push(R::new(&INST_CNT, task_id, seq)).2, seq); } let mut last = 0; while let Some(popped) = queue.pop() { let current = popped.1; assert!(last == 0 || last + 1 == current); last = current; } } else { let mut last = 0; barrier.wait(); let guard = Guard::new(); let iter = queue.iter(&guard); for current in iter { let current = current.1; assert!(current == 0 || last + 1 == current); last = current; } } })); } for thread in threads { assert!(thread.join().is_ok()); } } assert!(queue.is_empty()); while INST_CNT.load(Relaxed) != 0 { Guard::new().accelerate(); thread::yield_now(); } } #[test] fn queue_mpmc() { const NUM_THREADS: usize = if cfg!(miri) { 3 } else { 6 }; const NUM_PRODUCERS: usize = NUM_THREADS / 2; static INST_CNT: AtomicUsize = AtomicUsize::new(0); let workload_size = if cfg!(miri) { 16 } else { 256 }; let queue: Arc> = Arc::new(Queue::default()); for _ in 0..4 { let num_popped: Arc = Arc::new(AtomicUsize::default()); let mut threads = Vec::with_capacity(NUM_THREADS); let barrier = Arc::new(Barrier::new(NUM_THREADS)); for thread_id in 0..NUM_THREADS { let barrier = barrier.clone(); let queue = queue.clone(); let num_popped = num_popped.clone(); threads.push(thread::spawn(move || { barrier.wait(); if thread_id < NUM_PRODUCERS { for seq in 1..=workload_size { assert_eq!(queue.push(R::new(&INST_CNT, thread_id, seq)).2, seq); } } else { let mut popped_acc: [usize; NUM_PRODUCERS] = Default::default(); loop { let mut cnt = 0; while let Some(popped) = queue.pop() { cnt += 1; assert!(popped_acc[popped.1] < popped.2); popped_acc[popped.1] = popped.2; } if num_popped.fetch_add(cnt, Relaxed) + cnt == workload_size * NUM_PRODUCERS { break; } thread::yield_now(); } } })); } for thread in threads { assert!(thread.join().is_ok()); } } assert!(queue.is_empty()); while INST_CNT.load(Relaxed) != 0 { Guard::new().accelerate(); thread::yield_now(); } } #[test] fn stack_clone() { let stack = Stack::default(); stack.push(37); stack.push(3); stack.push(1); let stack_clone = stack.clone(); assert_eq!(stack.pop().map(|e| **e), Some(1)); assert_eq!(stack.pop().map(|e| **e), Some(3)); assert_eq!(stack.pop().map(|e| **e), Some(37)); assert!(stack.pop().is_none()); assert_eq!(stack_clone.pop().map(|e| **e), Some(1)); assert_eq!(stack_clone.pop().map(|e| **e), Some(3)); assert_eq!(stack_clone.pop().map(|e| **e), Some(37)); assert!(stack_clone.pop().is_none()); } #[test] fn stack_from_iter() { let workload_size = 16; let stack = (0..workload_size).collect::>(); assert_eq!(stack.len(), workload_size); for i in (0..workload_size).rev() { assert_eq!(stack.pop().map(|e| **e), Some(i)); } } #[test] fn stack_iterator() { const NUM_THREADS: usize = if cfg!(miri) { 2 } else { 12 }; static INST_CNT: AtomicUsize = AtomicUsize::new(0); let workload_size = if cfg!(miri) { 16 } else { 256 }; let stack: Arc> = Arc::new(Stack::default()); for _ in 0..4 { let mut threads = Vec::with_capacity(NUM_THREADS); let barrier = Arc::new(Barrier::new(NUM_THREADS)); for task_id in 0..NUM_THREADS { let barrier = barrier.clone(); let stack = stack.clone(); threads.push(thread::spawn(move || { if task_id == 0 { for seq in 0..workload_size { if seq == workload_size / 2 { barrier.wait(); } assert_eq!(stack.push(R::new(&INST_CNT, task_id, seq)).2, seq); } let mut last = workload_size; while let Some(popped) = stack.pop() { let current = popped.2; assert_eq!(current + 1, last); last = current; } } else { let mut last = workload_size; barrier.wait(); let guard = Guard::new(); let iter = stack.iter(&guard); for current in iter { let current = current.2; assert!(last == workload_size || last > current); last = current; } } })); } for t in threads { assert!(t.join().is_ok()); } } assert!(stack.is_empty()); while INST_CNT.load(Relaxed) != 0 { Guard::new().accelerate(); thread::yield_now(); } } #[test] fn stack_mpmc() { const NUM_THREADS: usize = if cfg!(miri) { 2 } else { 12 }; static INST_CNT: AtomicUsize = AtomicUsize::new(0); let workload_size = if cfg!(miri) { 16 } else { 256 }; let stack: Arc> = Arc::new(Stack::default()); for _ in 0..4 { let mut threads = Vec::with_capacity(NUM_THREADS); let barrier = Arc::new(Barrier::new(NUM_THREADS)); for thread_id in 0..NUM_THREADS { let barrier = barrier.clone(); let stack = stack.clone(); threads.push(thread::spawn(move || { barrier.wait(); for seq in 0..workload_size { assert_eq!(stack.push(R::new(&INST_CNT, thread_id, seq)).2, seq); } let mut last_popped = usize::MAX; let mut cnt = 0; while cnt < workload_size { while let Ok(Some(popped)) = stack.pop_if(|e| e.1 == thread_id) { assert_eq!(popped.1, thread_id); assert!(last_popped > popped.2); last_popped = popped.2; cnt += 1; } thread::yield_now(); } })); } for t in threads { assert!(t.join().is_ok()); } } assert!(stack.is_empty()); while INST_CNT.load(Relaxed) != 0 { Guard::new().accelerate(); thread::yield_now(); } } #[test] fn stack_mpsc() { const NUM_THREADS: usize = if cfg!(miri) { 2 } else { 12 }; static INST_CNT: AtomicUsize = AtomicUsize::new(0); let workload_size = if cfg!(miri) { 16 } else { 256 }; let stack: Arc> = Arc::new(Stack::default()); for _ in 0..4 { let mut threads = Vec::with_capacity(NUM_THREADS); let barrier = Arc::new(Barrier::new(NUM_THREADS)); for thread_id in 0..NUM_THREADS { let barrier = barrier.clone(); let stack = stack.clone(); threads.push(thread::spawn(move || { barrier.wait(); let mut cnt = 0; while thread_id == 0 && cnt < workload_size * (NUM_THREADS - 1) { // Consumer. let popped = stack.pop_all(); while let Some(e) = popped.pop() { assert_ne!(e.1, 0); cnt += 1; } thread::yield_now(); } if thread_id != 0 { for seq in 0..workload_size { assert_eq!(stack.push(R::new(&INST_CNT, thread_id, seq)).2, seq); } for seq in 0..workload_size / 16 { if stack.pop().is_some() { assert_eq!(stack.push(R::new(&INST_CNT, thread_id, seq)).2, seq); } } } })); } for t in threads { assert!(t.join().is_ok()); } } assert!(stack.is_empty()); while INST_CNT.load(Relaxed) != 0 { Guard::new().accelerate(); thread::yield_now(); } } sdd-4.5.3/src/tests.rs000064400000000000000000000001241046102023000127030ustar 00000000000000#[cfg(feature = "loom")] mod models; #[cfg(not(feature = "loom"))] mod unit_tests;