saa-5.4.2/.cargo_vcs_info.json0000644000000001360000000000100116100ustar { "git": { "sha1": "5f52e1bdacd9df4f72ff9286aee2c64e5f2fd195" }, "path_in_vcs": "" }saa-5.4.2/.gitignore000064400000000000000000000006631046102023000123750ustar 00000000000000# Generated by Cargo # will have compiled files and executables /target/ # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html Cargo.lock # These are backup files generated by rustfmt **/*.rs.bk # IntelliJ .idea # macOS **/.DS_Store # VSCode .vscode # Emacs **/#*# **/*~ # Proptest /proptest-regressions/ saa-5.4.2/.woodpecker/saa.yml000064400000000000000000000036521046102023000141150ustar 00000000000000matrix: RUST: [stable, nightly] steps: test: when: event: [push, pull_request] image: rust environment: CARGO_TERM_COLOR: always commands: - rustup default $RUST - cargo test - cargo test -p examples - cargo test --release - cargo test --release -p examples 32-bit: when: event: [push, pull_request] matrix: RUST: nightly image: rust environment: CARGO_TERM_COLOR: always commands: - apt-get update - apt-get -y install gcc-multilib - rustup default $RUST - rustup target add i686-unknown-linux-gnu - cargo test --target i686-unknown-linux-gnu - cargo test --target i686-unknown-linux-gnu -p examples linter: when: event: [push, pull_request] matrix: RUST: stable image: rust environment: CARGO_TERM_COLOR: always commands: - rustup default $RUST - rustup component add rustfmt - rustup component add clippy - cargo fmt -- --check - cargo fmt -p examples --check - cargo clippy --all - cargo clippy -p examples --all - cargo test --features lock_api miri: when: event: [push, pull_request] matrix: RUST: nightly image: rust environment: CARGO_TERM_COLOR: always commands: - rustup default $RUST - rustup component add miri - cargo miri test loom: when: event: [push, pull_request] matrix: RUST: stable image: rust environment: CARGO_TERM_COLOR: always commands: - rustup default $RUST - cargo test --features loom --release --lib - cargo test --features lock_api,loom --release --lib benchmark: when: event: [push, pull_request] matrix: RUST: stable image: rust environment: CARGO_TERM_COLOR: always commands: - rustup default $RUST - cargo bench saa-5.4.2/CHANGELOG.md000064400000000000000000000041771046102023000122220ustar 00000000000000# Changelog 5.4.2 * Migrate to [`codeberg`](https://codeberg.org/wvwwvwwv/synchronous-and-asynchronous). 5.4.1 * Optimize `Future` sizes. 5.4.0 * Add support for [`lock_api`](https://crates.io/crates/lock_api) to `Lock`. 5.3.3 * Minor optimization. 5.3.2 * Optimize `Lock::try_*` methods. 5.3.1 * Micro optimization. 5.3.0 * API update: remove the `Config` API. * Adjust the spin-backoff strategy to outperform `std::sync::Mutex` when mildly contented. 5.2.0 * API update: add `Config`. 5.1.0 * API update: add `Barrier`. 5.0.0 * Separate `lock::Mode::Wait` into `WaitExclusive` and `WaitShared`. 4.4.0 * Add `lock::Mode::Wait`. 4.3.2 * Minor `Future` size reduction. 4.3.1 * Fix a bug in `Pager::try_poll`. 4.3.0 * Remove `Pager::is_sync`. * All `Pager` methods need a pinned reference. * `Pager::poll*` methods return the result only once, and further calls without registering it will return an error. 4.2.0 * Remove the 128B alignment requirement for `WaitQueue`. 4.1.0 * API update for the `Pager` API: `Pager` is now explicitly `!Unpin`, and `poll_async` replaces direct `await` calls. 4.0.0 * Add the `Pager` API for all synchronization primitives. * `Semaphore::acquire_many*` methods return `false` if the specified count is greater than the maximum allowed. 3.3.0 * Reduce the size of asynchronous tasks in general. 3.2.1 * Minor optimization. 3.2.0 * Remove internal use of `Mutex` in the wait queue. 3.1.0 * Add `gate::Pager::try_poll`. 3.0.4 * Inline trivial methods. 3.0.2 - 3.0.3 * Fix the `failure` load ordering when the lock is deliberately poisoned, the gate is open/sealed, or the semaphore is closed after an event. 3.0.1 * Minor improvements to documentation and metadata. 3.0.0 * Add a poisoned state to `saa::Lock`. * Add `*_with` methods for notifying when a thread enters a wait queue. 2.0.0 * New synchronization primitive: `saa::Gate`. 1.1.0 * Fix a hang issue when an asynchronous task is dropped before completion. * Work in progress: `saa::Gate`. 1.0.1 * Minor optimization. 1.0.0 * Stabilize. 0.4.0 * Update API. 0.3.0 * Stabilize. 0.2.0 * Update API. 0.1.0 * Initial release. saa-5.4.2/Cargo.lock0000644000000663460000000000100076020ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 4 [[package]] name = "aho-corasick" version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" dependencies = [ "memchr", ] [[package]] name = "alloca" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5a7d05ea6aea7e9e64d25b9156ba2fee3fdd659e34e41063cd2fc7cd020d7f4" dependencies = [ "cc", ] [[package]] name = "anes" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstyle" version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" [[package]] name = "autocfg" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "bitflags" version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" [[package]] name = "bumpalo" version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" [[package]] name = "bytes" version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" [[package]] name = "cast" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" version = "1.2.49" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215" dependencies = [ "find-msvc-tools", "shlex", ] [[package]] name = "cfg-if" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "ciborium" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" dependencies = [ "ciborium-io", "ciborium-ll", "serde", ] [[package]] name = "ciborium-io" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" [[package]] name = "ciborium-ll" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" dependencies = [ "ciborium-io", "half", ] [[package]] name = "clap" version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" dependencies = [ "anstyle", "clap_lex", ] [[package]] name = "clap_lex" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" [[package]] name = "criterion" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d883447757bb0ee46f233e9dc22eb84d93a9508c9b868687b274fc431d886bf" dependencies = [ "alloca", "anes", "cast", "ciborium", "clap", "criterion-plot", "futures", "itertools", "num-traits", "oorandom", "page_size", "plotters", "rayon", "regex", "serde", "serde_json", "tinytemplate", "walkdir", ] [[package]] name = "criterion-plot" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed943f81ea2faa8dcecbbfa50164acf95d555afec96a27871663b300e387b2e4" dependencies = [ "cast", "itertools", ] [[package]] name = "crossbeam-deque" version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-utils" version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" [[package]] name = "either" version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] name = "find-msvc-tools" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" [[package]] name = "futures" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", "futures-executor", "futures-io", "futures-sink", "futures-task", "futures-util", ] [[package]] name = "futures-channel" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", ] [[package]] name = "futures-core" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", "futures-util", ] [[package]] name = "futures-io" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-macro" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "futures-sink" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", "futures-io", "futures-macro", "futures-sink", "futures-task", "memchr", "pin-project-lite", "pin-utils", "slab", ] [[package]] name = "generator" version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52f04ae4152da20c76fe800fa48659201d5cf627c5149ca0b707b69d7eef6cf9" dependencies = [ "cc", "cfg-if", "libc", "log", "rustversion", "windows-link", "windows-result", ] [[package]] name = "half" version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", "zerocopy", ] [[package]] name = "itertools" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" dependencies = [ "either", ] [[package]] name = "itoa" version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "js-sys" version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" dependencies = [ "once_cell", "wasm-bindgen", ] [[package]] name = "lazy_static" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" version = "0.2.178" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" [[package]] name = "lock_api" version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" dependencies = [ "scopeguard", ] [[package]] name = "log" version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] name = "loom" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" dependencies = [ "cfg-if", "generator", "scoped-tls", "tracing", "tracing-subscriber", ] [[package]] name = "matchers" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" dependencies = [ "regex-automata", ] [[package]] name = "memchr" version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "mio" version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" dependencies = [ "libc", "wasi", "windows-sys 0.61.2", ] [[package]] name = "nu-ansi-term" version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ "windows-sys 0.61.2", ] [[package]] name = "num-traits" version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] [[package]] name = "once_cell" version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "oorandom" version = "11.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "page_size" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da" dependencies = [ "libc", "winapi", ] [[package]] name = "parking_lot" version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" dependencies = [ "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" version = "0.9.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", "windows-link", ] [[package]] name = "pin-project-lite" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "plotters" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" dependencies = [ "num-traits", "plotters-backend", "plotters-svg", "wasm-bindgen", "web-sys", ] [[package]] name = "plotters-backend" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" [[package]] name = "plotters-svg" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" dependencies = [ "plotters-backend", ] [[package]] name = "proc-macro2" version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" dependencies = [ "proc-macro2", ] [[package]] name = "rayon" version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" dependencies = [ "either", "rayon-core", ] [[package]] name = "rayon-core" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" dependencies = [ "crossbeam-deque", "crossbeam-utils", ] [[package]] name = "redox_syscall" version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ "bitflags", ] [[package]] name = "regex" version = "1.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" dependencies = [ "aho-corasick", "memchr", "regex-automata", "regex-syntax", ] [[package]] name = "regex-automata" version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] [[package]] name = "regex-syntax" version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "rustversion" version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "saa" version = "5.4.2" dependencies = [ "criterion", "futures", "lock_api", "loom", "tokio", ] [[package]] name = "same-file" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ "winapi-util", ] [[package]] name = "scoped-tls" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" [[package]] name = "scopeguard" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "serde" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ "serde_core", "serde_derive", ] [[package]] name = "serde_core" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "serde_json" version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ "itoa", "memchr", "ryu", "serde", "serde_core", ] [[package]] name = "sharded-slab" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ "lazy_static", ] [[package]] name = "shlex" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" version = "1.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad" dependencies = [ "libc", ] [[package]] name = "slab" version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "smallvec" version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] name = "socket2" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" dependencies = [ "libc", "windows-sys 0.60.2", ] [[package]] name = "syn" version = "2.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "thread_local" version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" dependencies = [ "cfg-if", ] [[package]] name = "tinytemplate" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" dependencies = [ "serde", "serde_json", ] [[package]] name = "tokio" version = "1.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" dependencies = [ "bytes", "libc", "mio", "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", "windows-sys 0.61.2", ] [[package]] name = "tokio-macros" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "tracing" version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647" dependencies = [ "pin-project-lite", "tracing-core", ] [[package]] name = "tracing-core" version = "0.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c" dependencies = [ "once_cell", "valuable", ] [[package]] name = "tracing-log" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" dependencies = [ "log", "once_cell", "tracing-core", ] [[package]] name = "tracing-subscriber" version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" dependencies = [ "matchers", "nu-ansi-term", "once_cell", "regex-automata", "sharded-slab", "smallvec", "thread_local", "tracing", "tracing-core", "tracing-log", ] [[package]] name = "unicode-ident" version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" [[package]] name = "valuable" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "walkdir" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", ] [[package]] name = "wasi" version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasm-bindgen" version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" dependencies = [ "bumpalo", "proc-macro2", "quote", "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" dependencies = [ "unicode-ident", ] [[package]] name = "web-sys" version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ "windows-sys 0.61.2", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-link" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] name = "windows-result" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ "windows-link", ] [[package]] name = "windows-sys" version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" dependencies = [ "windows-targets", ] [[package]] name = "windows-sys" version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ "windows-link", ] [[package]] name = "windows-targets" version = "0.53.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ "windows-link", "windows_aarch64_gnullvm", "windows_aarch64_msvc", "windows_i686_gnu", "windows_i686_gnullvm", "windows_i686_msvc", "windows_x86_64_gnu", "windows_x86_64_gnullvm", "windows_x86_64_msvc", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" [[package]] name = "windows_aarch64_msvc" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" [[package]] name = "windows_i686_gnu" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" [[package]] name = "windows_i686_gnullvm" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" [[package]] name = "windows_i686_msvc" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" [[package]] name = "windows_x86_64_gnu" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" [[package]] name = "windows_x86_64_gnullvm" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" [[package]] name = "windows_x86_64_msvc" version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] name = "zerocopy" version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" dependencies = [ "proc-macro2", "quote", "syn", ] saa-5.4.2/Cargo.toml0000644000000032040000000000100076050ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2024" rust-version = "1.85.0" name = "saa" version = "5.4.2" authors = ["wvwwvwwv "] build = false autolib = false autobins = false autoexamples = false autotests = false autobenches = false description = "Word-sized low-level synchronization primitives providing both asynchronous and synchronous interfaces." documentation = "https://docs.rs/saa" readme = "README.md" keywords = [ "async", "concurrency", "mutex", "synchronization", ] categories = [ "asynchronous", "concurrency", "data-structures", ] license = "Apache-2.0" repository = "https://codeberg.org/wvwwvwwv/synchronous-and-asynchronous" [features] lock_api = ["dep:lock_api"] loom = ["dep:loom"] [lib] name = "saa" path = "src/lib.rs" [[bench]] name = "lock" path = "benches/lock.rs" harness = false [[bench]] name = "semaphore" path = "benches/semaphore.rs" harness = false [dependencies.lock_api] version = "0.4" optional = true [dependencies.loom] version = "0.7" optional = true [dev-dependencies.criterion] version = "0.8" features = ["async_futures"] [dev-dependencies.futures] version = "0.3" [dev-dependencies.tokio] version = "1.48" features = ["full"] saa-5.4.2/Cargo.toml.orig000064400000000000000000000017131046102023000132710ustar 00000000000000[package] name = "saa" description = "Word-sized low-level synchronization primitives providing both asynchronous and synchronous interfaces." documentation = "https://docs.rs/saa" version = "5.4.2" authors = ["wvwwvwwv "] edition = "2024" rust-version = "1.85.0" readme = "README.md" repository = "https://codeberg.org/wvwwvwwv/synchronous-and-asynchronous" license = "Apache-2.0" categories = ["asynchronous", "concurrency", "data-structures"] keywords = ["async", "concurrency", "mutex", "synchronization"] [workspace] members = [".", "examples"] [dependencies] lock_api = { version = "0.4", optional = true } loom = { version = "0.7", optional = true } [features] lock_api = ["dep:lock_api"] loom = ["dep:loom"] [dev-dependencies] criterion = { version = "0.8", features = ["async_futures"] } futures = "0.3" tokio = { version = "1.48", features = ["full"] } [[bench]] name = "lock" harness = false [[bench]] name = "semaphore" harness = false saa-5.4.2/LICENSE000064400000000000000000000250141046102023000114070ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2025-2025 Changgyoo Park Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. saa-5.4.2/README.md000064400000000000000000000131611046102023000116610ustar 00000000000000# Synchronous and Asynchronous Synchronization Primitives [![Cargo](https://img.shields.io/crates/v/saa)](https://crates.io/crates/saa) ![Crates.io](https://img.shields.io/crates/l/saa) Word-sized low-level synchronization primitives providing both asynchronous and synchronous interfaces. ## Features - No heap allocation. - No hidden global variables. - Provides both asynchronous and synchronous interfaces. - [`lock_api`](https://crates.io/crates/lock_api) support: `features = ["lock_api"]`. - [`Loom`](https://github.com/tokio-rs/loom) support: `features = ["loom"]`. ## Lock `saa::Lock` is a low-level shared-exclusive lock providing both asynchronous and synchronous interfaces. Synchronous locking methods such as `lock_sync` and `share_sync` can be used alongside their asynchronous counterparts `lock_async` and `share_async` simultaneously. `saa::Lock` implements an allocation-free fair wait queue shared between both synchronous and asynchronous methods. ```rust use saa::Lock; // At most `62` concurrent shared owners are allowed. assert_eq!(Lock::MAX_SHARED_OWNERS, 62); let lock = Lock::default(); assert!(lock.lock_sync()); assert!(!lock.try_lock()); assert!(!lock.try_share()); assert!(!lock.release_share()); assert!(lock.release_lock()); assert!(lock.lock_sync()); // `Lock` can be poisoned. assert!(lock.poison_lock()); assert!(!lock.lock_sync()); assert!(lock.clear_poison()); async { assert!(lock.share_async().await); assert!(lock.release_share()); assert!(lock.lock_async().await); assert!(lock.release_lock()); }; ``` ### [`lock_api`](https://crates.io/crates/lock_api) support The `lock_api` feature is automatically disabled when the `loom` feature is enabled since `loom` atomic types cannot be instantiated in const contexts. ```rust #[cfg(all(feature = "lock_api", not(feature = "loom")))] use saa::{Mutex, RwLock, lock_async, read_async, write_async}; #[cfg(all(feature = "lock_api", not(feature = "loom")))] fn example() { let mutex: Mutex = Mutex::new(0); let rwlock: RwLock = RwLock::new(0); let mut mutex_guard = mutex.lock(); assert_eq!(*mutex_guard, 0); *mutex_guard += 1; assert_eq!(*mutex_guard, 1); drop(mutex_guard); let mut write_guard = rwlock.write(); assert_eq!(*write_guard, 0); *write_guard += 1; drop(write_guard); let read_guard = rwlock.read(); assert_eq!(*read_guard, 1); drop(read_guard); async { let mutex_guard = lock_async(&mutex).await; assert_eq!(*mutex_guard, 1); drop(mutex_guard); let mut write_guard = write_async(&rwlock).await; *write_guard += 1; drop(write_guard); let reader_guard = read_async(&rwlock).await; assert_eq!(*reader_guard, 2); drop(reader_guard); }; } ``` ## Barrier `saa::Barrier` is a synchronization primitive to enable a number of tasks to start execution at the same time. ```rust use std::sync::Arc; use std::thread; use saa::Barrier; // At most `63` concurrent tasks/threads can be synchronized. assert_eq!(Barrier::MAX_TASKS, 63); let barrier = Arc::new(Barrier::with_count(8)); let mut threads = Vec::new(); for _ in 0..8 { let barrier = barrier.clone(); threads.push(thread::spawn(move || { for _ in 0..4 { barrier.wait_sync(); } })); } for thread in threads { thread.join().unwrap(); } ``` ## Semaphore `saa::Semaphore` is a synchronization primitive that allows a fixed number of threads to access a resource concurrently. ```rust use saa::Semaphore; // At most `63` concurrent tasks/threads can be synchronized. assert_eq!(Semaphore::MAX_PERMITS, 63); let semaphore = Semaphore::default(); semaphore.acquire_many_sync(Semaphore::MAX_PERMITS - 1); assert!(semaphore.try_acquire()); assert!(!semaphore.try_acquire()); assert!(semaphore.release()); assert!(!semaphore.release_many(Semaphore::MAX_PERMITS)); assert!(semaphore.release_many(Semaphore::MAX_PERMITS - 1)); async { semaphore.acquire_async().await; assert!(semaphore.release()); }; ``` ## Gate `saa::Gate` is an unbounded barrier that can be opened or sealed manually as needed. ```rust use std::sync::Arc; use std::thread; use saa::Gate; use saa::gate::State; let gate = Arc::new(Gate::default()); let mut threads = Vec::new(); for _ in 0..4 { let gate = gate.clone(); threads.push(thread::spawn(move || { assert_eq!(gate.enter_sync(), Ok(State::Controlled)); })); } let mut count = 0; while count != 4 { if let Ok(n) = gate.permit() { count += n; } } for thread in threads { thread.join().unwrap(); } ``` ## Pager `saa::Pager` enables remotely waiting for a resource to become available. ```rust use std::pin::pin; use saa::{Gate, Pager}; use saa::gate::State; let gate = Gate::default(); let mut pinned_pager = pin!(Pager::default()); assert!(gate.register_pager(&mut pinned_pager, true)); assert_eq!(gate.open().1, 1); assert_eq!(pinned_pager.poll_sync(), Ok(State::Open)); ``` ## Notes Using synchronous methods in an asynchronous context may lead to deadlocks. Consider a scenario where an asynchronous runtime uses two threads to execute three tasks. * ThreadId(0): `task-0: share-waiting / pending` || `task-1: "synchronous"-lock-waiting`. * ThreadId(1): `task-2: release-lock / ready: wake-up task-0` -> `task-2: lock-waiting / pending`. In this example, `task-0` has logically acquired a shared lock transferred from `task-2`; however, it may remain in the task queue indefinitely depending on the task scheduling policy. ## [Changelog](https://codeberg.org/wvwwvwwv/synchronous-and-asynchronous/src/branch/main/CHANGELOG.md) saa-5.4.2/benches/lock.rs000064400000000000000000000116621046102023000133130ustar 00000000000000use std::hint::black_box; use std::sync::Arc; use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering::{Acquire, Release}; use std::thread; use std::time::{Duration, Instant}; use criterion::async_executor::FuturesExecutor; use criterion::{Criterion, criterion_group, criterion_main}; use saa::{Barrier, Lock}; fn exclusive_unlock_async(c: &mut Criterion) { async fn test() { let lock = Lock::default(); lock.lock_async().await; assert!(lock.release_lock()); } c.bench_function("lock-exclusive-unlock-async", |b| { b.to_async(FuturesExecutor).iter(test); }); } fn exclusive_unlock_sync(c: &mut Criterion) { c.bench_function("lock-exclusive-unlock-sync", |b| { b.iter(|| { let lock = Lock::default(); lock.lock_sync(); assert!(lock.release_lock()); }); }); } fn exclusive_unlock_async_with(c: &mut Criterion) { async fn test() { let lock = Lock::default(); let mut waited = false; lock.lock_async_with(|| { waited = true; std::thread::yield_now(); }) .await; assert!(!waited); assert!(lock.release_lock()); } c.bench_function("lock-exclusive-unlock-async-with", |b| { b.to_async(FuturesExecutor).iter(test); }); } fn exclusive_unlock_sync_with(c: &mut Criterion) { c.bench_function("lock-exclusive-unlock-sync-with", |b| { b.iter(|| { let lock = Lock::default(); let mut waited = false; lock.lock_sync_with(|| { waited = true; std::thread::yield_now(); }); assert!(!waited); assert!(lock.release_lock()); }); }); } fn shared_shared_unlock_unlock(c: &mut Criterion) { c.bench_function("share-shared-unlock-unlock", |b| { b.iter(|| { let lock = Lock::default(); lock.share_sync(); lock.share_sync(); assert!(lock.release_share()); assert!(lock.release_share()); }); }); } fn wait_awake(c: &mut Criterion) { c.bench_function("wait_awake", |b| { b.iter_custom(|iters| { let lock = Arc::new(Lock::default()); let entered = Arc::new(AtomicBool::new(false)); let mut acc = Duration::from_secs(0); for _ in 0..iters { assert!(lock.lock_sync()); let lock_clone = lock.clone(); let entered_clone = entered.clone(); let thread = thread::spawn(move || { let mut start = Instant::now(); lock_clone.lock_sync_with(|| { entered_clone.swap(true, Release); start = Instant::now(); }); start.elapsed() }); while !entered.load(Acquire) {} assert!(lock.release_lock()); acc += thread.join().unwrap(); assert!(lock.release_lock()); assert!(entered.swap(false, Release)); } acc }) }); } fn multi_threaded_workload(iters: u64, num_threads: usize, num_cycles: usize) -> Duration { let lock = Arc::new(Lock::default()); let mut threads = Vec::with_capacity(num_threads); let barrier = Arc::new(Barrier::with_count(num_threads)); for _ in 0..num_threads { let barrier = barrier.clone(); let lock = lock.clone(); let join = thread::spawn(move || { barrier.wait_sync(); let start = Instant::now(); for _ in 0..iters { assert!(lock.lock_sync()); let acc = black_box({ (black_box(0)..black_box(num_cycles)).fold(black_box(0), |acc, v| acc + v) }); assert_eq!(acc, (0..num_cycles).sum::()); assert!(lock.release_lock()); } start.elapsed() }); threads.push(join); } threads .into_iter() .fold(Duration::from_nanos(0), |acc, t| acc.max(t.join().unwrap())) } macro_rules! multi_threaded { ($name:ident, $num_threads:expr, $num_cycles:expr) => { fn $name(c: &mut Criterion) { c.bench_function(stringify!($name), |b| { b.iter_custom(|iters| multi_threaded_workload(iters, $num_threads, $num_cycles)) }); } }; } multi_threaded!(multi_threaded_2_256, 2, 256); multi_threaded!(multi_threaded_2_65536, 2, 65536); multi_threaded!(multi_threaded_8_256, 8, 256); multi_threaded!(multi_threaded_8_65536, 8, 65536); criterion_group!( lock, exclusive_unlock_async, exclusive_unlock_sync, exclusive_unlock_async_with, exclusive_unlock_sync_with, shared_shared_unlock_unlock, wait_awake, multi_threaded_2_256, multi_threaded_2_65536, multi_threaded_8_256, multi_threaded_8_65536, ); criterion_main!(lock); saa-5.4.2/benches/semaphore.rs000064400000000000000000000022441046102023000143420ustar 00000000000000use criterion::{Criterion, criterion_group, criterion_main}; use saa::Semaphore; fn acquire_release(c: &mut Criterion) { c.bench_function("semaphore-acquire-release", |b| { b.iter(|| { let semaphore = Semaphore::default(); semaphore.acquire_sync(); assert!(semaphore.release()); }); }); } fn acquire_acquire_release_release(c: &mut Criterion) { c.bench_function("semaphore-acquire-acquire-release-release", |b| { b.iter(|| { let semaphore = Semaphore::default(); semaphore.acquire_sync(); semaphore.acquire_sync(); assert!(semaphore.release()); assert!(semaphore.release()); }); }); } fn acquire_many_release_many(c: &mut Criterion) { c.bench_function("semaphore-acquire-many-release-many", |b| { b.iter(|| { let semaphore = Semaphore::default(); semaphore.acquire_many_sync(11); assert!(semaphore.release_many(11)); }); }); } criterion_group!( semaphore, acquire_release, acquire_acquire_release_release, acquire_many_release_many, ); criterion_main!(semaphore); saa-5.4.2/src/barrier.rs000064400000000000000000000241321046102023000131650ustar 00000000000000//! [`Barrier`] is a synchronization primitive that enables multiple tasks to start execution at the //! same time. #![deny(unsafe_code)] use std::fmt; use std::pin::{Pin, pin}; #[cfg(not(feature = "loom"))] use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::{self, AcqRel, Acquire, Relaxed}; #[cfg(feature = "loom")] use loom::sync::atomic::AtomicUsize; use crate::Pager; use crate::opcode::Opcode; use crate::pager::{self, SyncResult}; use crate::sync_primitive::SyncPrimitive; use crate::wait_queue::{Entry, WaitQueue}; /// [`Barrier`] is a synchronization primitive that enables multiple tasks to start execution at the /// same time. pub struct Barrier { /// [`Barrier`] state. state: AtomicUsize, } impl Barrier { /// Maximum number of tasks to block. pub const MAX_TASKS: usize = WaitQueue::DATA_MASK; /// Creates a new [`Barrier`] that can block the given number of tasks. /// /// The maximum number of tasks to block is defined by [`MAX_TASKS`](Self::MAX_TASKS), and if a /// value greater than or equal to [`MAX_TASKS`](Self::MAX_TASKS) is provided, it will be set to /// [`MAX_TASKS`](Self::MAX_TASKS). /// /// # Examples /// /// ``` /// use saa::Barrier; /// /// let barrier = Barrier::with_count(1); /// ``` #[inline] #[must_use] pub fn with_count(count: usize) -> Self { let adjusted_count = Self::MAX_TASKS.min(count); Self { state: AtomicUsize::new(adjusted_count), } } /// Returns the current count of tasks to block. /// /// # Examples /// /// ``` /// use std::sync::atomic::Ordering::Relaxed; /// /// use saa::Barrier; /// /// let barrier = Barrier::with_count(1); /// /// assert_eq!(barrier.count(Relaxed), 1); /// ``` #[inline] pub fn count(&self, mo: Ordering) -> usize { self.state.load(mo) & WaitQueue::DATA_MASK } /// Waits until a sufficient number of tasks have reached the barrier. /// /// Returns `true` if the task was the last one to reach the barrier. /// /// # Examples /// /// ``` /// use saa::Barrier; /// /// let barrier = Barrier::with_count(1); /// /// async { /// assert!(barrier.wait_async().await); /// }; /// ``` #[inline] pub async fn wait_async(&self) -> bool { self.wait_async_with(|| {}).await } /// Waits until a sufficient number of tasks have reached the barrier. /// /// Returns `true` if the task was the last one to reach the barrier. The callback is invoked /// when the task starts waiting. /// /// # Examples /// /// ``` /// use saa::Barrier; /// /// let barrier = Barrier::with_count(1); /// /// async { /// let mut wait = false; /// assert!(barrier.wait_async_with(|| { wait = true; }).await); /// assert!(!wait); /// }; /// ``` #[inline] pub async fn wait_async_with(&self, mut begin_wait: F) -> bool { let mut pinned_pager = pin!(Pager::default()); loop { pinned_pager .wait_queue() .construct(self, Opcode::Barrier(false), false); if let Some(returned) = self.count_down(&mut pinned_pager, false, begin_wait) { begin_wait = returned; let result = pinned_pager.poll_async().await.unwrap_or(false); debug_assert!(!result); } else { return pinned_pager.poll_async().await.unwrap_or(false); } } } /// Waits until a sufficient number of tasks have reached the barrier. /// /// Returns `true` if the task was the last one to reach the barrier. /// /// # Examples /// /// ``` /// use saa::Barrier; /// /// let barrier = Barrier::with_count(1); /// /// assert!(barrier.wait_sync()); /// ``` #[inline] pub fn wait_sync(&self) -> bool { self.wait_sync_with(|| ()) } /// Waits until a sufficient number of tasks have reached the barrier. /// /// Returns `true` if the task was the last one to reach the barrier. The callback is invoked /// when the task starts waiting. /// /// # Examples /// /// ``` /// use saa::Barrier; /// /// let barrier = Barrier::with_count(1); /// /// let mut wait = false; /// assert!(barrier.wait_sync_with(|| { wait = true; })); /// assert!(!wait); /// ``` #[inline] pub fn wait_sync_with(&self, mut begin_wait: F) -> bool { let mut pinned_pager = pin!(Pager::default()); loop { pinned_pager .wait_queue() .construct(self, Opcode::Barrier(false), true); if let Some(returned) = self.count_down(&mut pinned_pager, true, begin_wait) { begin_wait = returned; let result = pinned_pager.poll_sync().unwrap_or(false); debug_assert!(!result); } else { return pinned_pager.poll_sync().unwrap_or(false); } } } /// Counts down the barrier counter. /// /// Returns the wait callback if it needs to be retried. #[inline] fn count_down( &self, pager: &mut Pin<&mut Pager>, is_sync: bool, begin_wait: F, ) -> Option { let mut state = self.state.load(Acquire); let wait_queue = pager.wait_queue(); loop { let mut count = state & WaitQueue::DATA_MASK; if count == 0 { // The counter cannot be decremented, therefore wait for the counter to be reset. wait_queue.construct(self, Opcode::Barrier(true), is_sync); if self.try_push_wait_queue_entry(pager.wait_queue(), state) { return Some(begin_wait); } state = self.state.load(Acquire); } else if count == 1 { // This is the last task to reach the barrier, therefore we can reset the counter. match self.state.compare_exchange(state, 0, Acquire, Acquire) { Ok(value) => { let mut anchor_ptr = WaitQueue::to_anchor_ptr(value); if !anchor_ptr.is_null() { let tail_entry_ptr = WaitQueue::to_entry_ptr(anchor_ptr); Entry::iter_forward(tail_entry_ptr, false, |entry, _| { count += 1; // `0` means that all the tasks have reached the barrier, but it is // not the last one. entry.set_result(0); false }); } debug_assert!(count <= Self::MAX_TASKS); // Wake-up waiting tasks. anchor_ptr = WaitQueue::to_anchor_ptr(self.state.swap(count, AcqRel)); if !anchor_ptr.is_null() { let tail_entry_ptr = WaitQueue::to_entry_ptr(anchor_ptr); Entry::iter_forward(tail_entry_ptr, false, |entry, _| { // `2` means that the waiting task needs to retry. entry.set_result(2); false }); } // `1` means that the task is the last one to count down the barrier. wait_queue.entry().set_result(1); return None; } Err(new_state) => state = new_state, } } else { let anchor_ptr = wait_queue.anchor_ptr().0; let anchor_addr = anchor_ptr.expose_provenance(); debug_assert_eq!(anchor_addr & (!WaitQueue::ADDR_MASK), 0); wait_queue .entry() .update_next_entry_anchor_ptr(WaitQueue::to_anchor_ptr(state)); // Count down here. let next_state = ((state - 1) & (!WaitQueue::ADDR_MASK)) | anchor_addr; match self .state .compare_exchange(state, next_state, AcqRel, Acquire) { Ok(_) => { // The entry cannot be dropped until the result is acknowledged. wait_queue.entry().set_pollable(); begin_wait(); return None; } Err(new_state) => state = new_state, } } } } } impl fmt::Debug for Barrier { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let state = self.state.load(Relaxed); let counter = state & WaitQueue::DATA_MASK; let wait_queue_being_processed = state & WaitQueue::LOCKED_FLAG == WaitQueue::LOCKED_FLAG; let wait_queue_tail_addr = state & WaitQueue::ADDR_MASK; f.debug_struct("WaitQueue") .field("state", &state) .field("counter", &counter) .field("wait_queue_being_processed", &wait_queue_being_processed) .field("wait_queue_tail_addr", &wait_queue_tail_addr) .finish() } } impl Default for Barrier { /// The default number of tasks to block is [`MAX_TASKS`](Self::MAX_TASKS). #[inline] fn default() -> Self { Self { state: AtomicUsize::new(Self::MAX_TASKS), } } } impl SyncPrimitive for Barrier { #[inline] fn state(&self) -> &AtomicUsize { &self.state } #[inline] fn max_shared_owners() -> usize { Self::MAX_TASKS } #[inline] fn drop_wait_queue_entry(entry: &Entry) { Self::force_remove_wait_queue_entry(entry); } } impl SyncResult for Barrier { type Result = Result; #[inline] fn to_result(result: u8, pager_error: Option) -> Self::Result { pager_error.map_or_else(|| Ok(result == 1), Err) } } saa-5.4.2/src/gate.rs000064400000000000000000000462321046102023000124640ustar 00000000000000//! [`Gate`] is a synchronization primitive that blocks tasks from entering a critical section until //! they are allowed to do so. #![deny(unsafe_code)] use std::pin::{Pin, pin}; #[cfg(not(feature = "loom"))] use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::{self, AcqRel, Acquire, Relaxed}; #[cfg(feature = "loom")] use loom::sync::atomic::AtomicUsize; use crate::opcode::Opcode; use crate::pager::SyncResult; use crate::sync_primitive::SyncPrimitive; use crate::wait_queue::{Entry, WaitQueue}; use crate::{Pager, pager}; /// [`Gate`] is a synchronization primitive that blocks tasks from entering a critical section until /// they are allowed to do so. #[derive(Debug, Default)] pub struct Gate { /// [`Gate`] state. state: AtomicUsize, } /// The state of a [`Gate`]. /// /// [`Gate`] can be in one of three states. /// /// * `Controlled` - The default state where tasks can enter the [`Gate`] if permitted. /// * `Sealed` - The [`Gate`] is sealed and tasks immediately get rejected when they attempt to enter. /// * `Open` - The [`Gate`] is open and tasks can immediately enter it. #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] #[repr(u8)] pub enum State { /// The default state where tasks can enter the [`Gate`] if permitted. Controlled = 0_u8, /// The [`Gate`] is sealed and tasks immediately get rejected when they attempt to enter it. Sealed = 1_u8, /// The [`Gate`] is open and tasks can immediately enter it. Open = 2_u8, } /// Errors that can occur when accessing a [`Gate`]. #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] #[repr(u8)] pub enum Error { /// The [`Gate`] rejected the task. Rejected = 4_u8, /// The [`Gate`] has been sealed. Sealed = 8_u8, /// Spurious failure to enter a [`Gate`] in a [`Controlled`](State::Controlled) state. /// /// This can happen if a task holding a [`Pager`] gets cancelled or drops the [`Pager`] before /// the [`Gate`] has permitted or rejected the task; the task causes all other waiting tasks /// of the [`Gate`] to get this error. SpuriousFailure = 12_u8, /// The [`Pager`] is not registered in any [`Gate`]. NotRegistered = 16_u8, /// The wrong asynchronous/synchronous mode was used for a [`Pager`]. WrongMode = 20_u8, /// The result is not ready. NotReady = 24_u8, } impl Gate { /// Mask to get the state value from `u8`. const STATE_MASK: u8 = 0b11; /// Creates a new [`Gate`]. /// /// # Examples /// /// ``` /// use saa::Gate; /// /// let gate = Gate::new(); /// ``` #[cfg(not(feature = "loom"))] #[inline] #[must_use] pub const fn new() -> Self { Self { state: AtomicUsize::new(0), } } /// Creates a new [`Gate`]. #[cfg(feature = "loom")] #[inline] #[must_use] pub fn new() -> Self { Self { state: AtomicUsize::new(0), } } /// Returns the current state of the [`Gate`]. /// /// # Examples /// /// ``` /// use std::sync::atomic::Ordering::Relaxed; /// /// use saa::Gate; /// use saa::gate::State; /// /// let gate = Gate::default(); /// /// assert_eq!(gate.state(Relaxed), State::Controlled); /// ``` #[inline] pub fn state(&self, mo: Ordering) -> State { State::from(self.state.load(mo) & WaitQueue::DATA_MASK) } /// Resets the [`Gate`] to its initial state if it is not in a [`Controlled`](State::Controlled) /// state. /// /// Returns the previous state of the [`Gate`]. /// /// # Examples /// /// ``` /// use saa::Gate; /// use saa::gate::State; /// /// let gate = Gate::default(); /// /// assert_eq!(gate.reset(), None); /// /// gate.seal(); /// /// assert_eq!(gate.reset(), Some(State::Sealed)); /// ``` #[inline] pub fn reset(&self) -> Option { match self.state.fetch_update(Relaxed, Relaxed, |value| { let state = State::from(value & WaitQueue::DATA_MASK); if state == State::Controlled { None } else { debug_assert_eq!(value & WaitQueue::ADDR_MASK, 0); Some((value & WaitQueue::ADDR_MASK) | u8::from(state) as usize) } }) { Ok(state) => Some(State::from(state & WaitQueue::DATA_MASK)), Err(_) => None, } } /// Permits waiting tasks to enter the [`Gate`] if the [`Gate`] is in a /// [`Controlled`](State::Controlled) state. /// /// Returns the number of permitted tasks. /// /// # Errors /// /// Returns an [`Error`] if the [`Gate`] is not in a [`Controlled`](State::Controlled) state. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// use std::thread; /// /// use saa::Gate; /// use saa::gate::State; /// /// let gate = Arc::new(Gate::default()); /// /// let gate_clone = gate.clone(); /// /// let thread = thread::spawn(move || { /// assert_eq!(gate_clone.enter_sync(), Ok(State::Controlled)); /// }); /// /// loop { /// if gate.permit() == Ok(1) { /// break; /// } /// } /// /// thread.join().unwrap(); /// ``` #[inline] pub fn permit(&self) -> Result { let (state, count) = self.wake_all(None, None); if state == State::Controlled { Ok(count) } else { debug_assert_eq!(count, 0); Err(state) } } /// Rejects waiting tasks from entering the [`Gate`] if the [`Gate`] is in a /// [`Controlled`](State::Controlled) state. /// /// Returns the number of rejected tasks. /// /// # Errors /// /// Returns an [`Error`] if the [`Gate`] is not in a [`Controlled`](State::Controlled) state. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// use std::thread; /// /// use saa::Gate; /// use saa::gate::Error; /// /// let gate = Arc::new(Gate::default()); /// /// let gate_clone = gate.clone(); /// /// let thread = thread::spawn(move || { /// assert_eq!(gate_clone.enter_sync(), Err(Error::Rejected)); /// }); /// /// loop { /// if gate.reject() == Ok(1) { /// break; /// } /// } /// /// thread.join().unwrap(); /// ``` #[inline] pub fn reject(&self) -> Result { let (state, count) = self.wake_all(None, Some(Error::Rejected)); if state == State::Controlled { Ok(count) } else { debug_assert_eq!(count, 0); Err(state) } } /// Opens the [`Gate`] to allow any tasks to enter it. /// /// Returns the number of tasks that were waiting to enter it. /// /// # Examples /// /// ``` /// use std::sync::atomic::Ordering::Relaxed; /// /// use saa::Gate; /// use saa::gate::State; /// /// let gate = Gate::default(); /// assert_eq!(gate.state(Relaxed), State::Controlled); /// /// let (prev_state, count) = gate.open(); /// /// assert_eq!(prev_state, State::Controlled); /// assert_eq!(count, 0); /// assert_eq!(gate.state(Relaxed), State::Open); /// ``` #[inline] pub fn open(&self) -> (State, usize) { self.wake_all(Some(State::Open), None) } /// Seals the [`Gate`] to disallow tasks from entering. /// /// Returns the previous state of the [`Gate`] and the number of tasks that were waiting to /// enter the Gate. /// /// # Examples /// /// ``` /// use std::sync::atomic::Ordering::Relaxed; /// /// use saa::Gate; /// use saa::gate::State; /// /// let gate = Gate::default(); /// /// let (prev_state, count) = gate.seal(); /// /// assert_eq!(prev_state, State::Controlled); /// assert_eq!(count, 0); /// assert_eq!(gate.state(Relaxed), State::Sealed); /// ``` #[inline] pub fn seal(&self) -> (State, usize) { self.wake_all(Some(State::Sealed), Some(Error::Sealed)) } /// Enters the [`Gate`] asynchronously. /// /// Returns the current state of the [`Gate`]. /// /// # Errors /// /// Returns an [`Error`] if it failed to enter the [`Gate`]. /// /// # Examples /// /// ``` /// use futures::future; /// use saa::Gate; /// /// let gate = Gate::default(); /// /// let a = async { /// assert!(gate.enter_async().await.is_ok()); /// }; /// let b = async { /// gate.permit(); /// }; /// future::join(a, b); /// ``` #[inline] pub async fn enter_async(&self) -> Result { let mut pinned_pager = pin!(Pager::default()); pinned_pager .wait_queue() .construct(self, Opcode::Wait(0), false); self.push_wait_queue_entry(&mut pinned_pager, || {}); pinned_pager.poll_async().await } /// Enters the [`Gate`] synchronously. /// /// Returns the current state of the [`Gate`]. /// /// # Errors /// /// Returns an [`Error`] if it failed to enter the [`Gate`]. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// use std::thread; /// /// use saa::Gate; /// use saa::gate::State; /// /// let gate = Arc::new(Gate::default()); /// /// let gate_clone = gate.clone(); /// let thread_1 = thread::spawn(move || { /// assert_eq!(gate_clone.enter_sync(), Ok(State::Controlled)); /// }); /// /// let gate_clone = gate.clone(); /// let thread_2 = thread::spawn(move || { /// assert_eq!(gate_clone.enter_sync(), Ok(State::Controlled)); /// }); /// /// let mut count = 0; /// while count != 2 { /// if let Ok(n) = gate.permit() { /// count += n; /// } /// } /// /// thread_1.join().unwrap(); /// thread_2.join().unwrap(); /// ``` #[inline] pub fn enter_sync(&self) -> Result { self.enter_sync_with(|| ()) } /// Enters the [`Gate`] asynchronously with a wait callback. /// /// Returns the current state of the [`Gate`]. The callback is invoked when the task starts /// waiting. /// /// # Errors /// /// Returns an [`Error`] if it failed to enter the [`Gate`]. /// /// # Examples /// /// ``` /// use futures::future; /// use saa::Gate; /// /// let gate = Gate::default(); /// /// let a = async { /// let mut wait = false; /// assert!(gate.enter_async_with(|| wait = true).await.is_ok()); /// }; /// let b = async { /// gate.permit(); /// }; /// future::join(a, b); /// ``` #[inline] pub async fn enter_async_with(&self, begin_wait: F) -> Result { let mut pinned_pager = pin!(Pager::default()); pinned_pager .wait_queue() .construct(self, Opcode::Wait(0), false); self.push_wait_queue_entry(&mut pinned_pager, begin_wait); pinned_pager.poll_async().await } /// Enters the [`Gate`] synchronously with a wait callback. /// /// Returns the current state of the [`Gate`]. The callback is invoked when the task starts /// waiting. /// # Errors /// /// Returns an [`Error`] if it failed to enter the [`Gate`]. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// use std::thread; /// /// use saa::Gate; /// use saa::gate::State; /// /// let gate = Arc::new(Gate::default()); /// /// let gate_clone = gate.clone(); /// let thread_1 = thread::spawn(move || { /// let mut wait = false; /// assert_eq!(gate_clone.enter_sync_with(|| wait = true), Ok(State::Controlled)); /// }); /// /// let gate_clone = gate.clone(); /// let thread_2 = thread::spawn(move || { /// let mut wait = false; /// assert_eq!(gate_clone.enter_sync_with(|| wait = true), Ok(State::Controlled)); /// }); /// /// let mut count = 0; /// while count != 2 { /// if let Ok(n) = gate.permit() { /// count += n; /// } /// } /// /// thread_1.join().unwrap(); /// thread_2.join().unwrap(); /// ``` #[inline] pub fn enter_sync_with(&self, begin_wait: F) -> Result { let mut pinned_pager = pin!(Pager::default()); pinned_pager .wait_queue() .construct(self, Opcode::Wait(0), true); self.push_wait_queue_entry(&mut pinned_pager, begin_wait); pinned_pager.poll_sync() } /// Registers a [`Pager`] to allow it to get a permit to enter the [`Gate`] remotely. /// /// `is_sync` indicates whether the [`Pager`] will be polled asynchronously (`false`) or /// synchronously (`true`). /// /// Returns `false` if the [`Pager`] was already registered. /// /// # Examples /// /// ``` /// use std::pin::pin; /// use std::sync::Arc; /// use std::thread; /// /// use saa::{Gate, Pager}; /// use saa::gate::State; /// /// let gate = Arc::new(Gate::default()); /// /// let mut pinned_pager = pin!(Pager::default()); /// /// assert!(gate.register_pager(&mut pinned_pager, true)); /// assert!(!gate.register_pager(&mut pinned_pager, true)); /// /// let gate_clone = gate.clone(); /// let thread = thread::spawn(move || { /// assert_eq!(gate_clone.permit(), Ok(1)); /// }); /// /// thread.join().unwrap(); /// /// assert_eq!(pinned_pager.poll_sync(), Ok(State::Controlled)); /// ``` #[inline] pub fn register_pager<'g>( &'g self, pager: &mut Pin<&mut Pager<'g, Self>>, is_sync: bool, ) -> bool { if pager.is_registered() { return false; } pager.wait_queue().construct(self, Opcode::Wait(0), is_sync); self.push_wait_queue_entry(pager, || ()); true } /// Wakes up all waiting tasks and updates the state. /// /// Returns `(prev_state, count)` where `prev_state` is the previous state of the Gate and /// `count` is the number of tasks that were woken up. fn wake_all(&self, next_state: Option, error: Option) -> (State, usize) { match self.state.fetch_update(AcqRel, Acquire, |value| { if let Some(new_value) = next_state { Some(u8::from(new_value) as usize) } else { Some(value & WaitQueue::DATA_MASK) } }) { Ok(value) | Err(value) => { let mut count = 0; let prev_state = State::from(value & WaitQueue::DATA_MASK); let next_state = next_state.unwrap_or(prev_state); let result = Self::into_u8(next_state, error); let anchor_ptr = WaitQueue::to_anchor_ptr(value); if !anchor_ptr.is_null() { let tail_entry_ptr = WaitQueue::to_entry_ptr(anchor_ptr); Entry::iter_forward(tail_entry_ptr, false, |entry, _| { entry.set_result(result); count += 1; false }); } (prev_state, count) } } } /// Pushes the wait queue entry. #[inline] fn push_wait_queue_entry(&self, pager: &mut Pin<&mut Pager>, begin_wait: F) { loop { let state = self.state.load(Acquire); match State::from(state & WaitQueue::DATA_MASK) { State::Controlled => { if !self.try_push_wait_queue_entry(pager.wait_queue(), state) { continue; } begin_wait(); } State::Sealed => { pager .wait_queue() .entry() .set_result(Self::into_u8(State::Sealed, Some(Error::Sealed))); } State::Open => { pager .wait_queue() .entry() .set_result(Self::into_u8(State::Open, None)); } } break; } } /// Converts `(State, Error)` into `u8`. #[inline] fn into_u8(state: State, error: Option) -> u8 { u8::from(state) | error.map_or(0_u8, u8::from) } } impl Drop for Gate { #[inline] fn drop(&mut self) { if self.state.load(Relaxed) & WaitQueue::ADDR_MASK == 0 { return; } self.seal(); } } impl SyncPrimitive for Gate { #[inline] fn state(&self) -> &AtomicUsize { &self.state } #[inline] fn max_shared_owners() -> usize { usize::MAX } #[inline] fn drop_wait_queue_entry(entry: &Entry) { if entry.try_consume_result().is_none() { let this: &Self = entry.sync_primitive_ref(); this.wake_all(None, Some(Error::SpuriousFailure)); entry.acknowledge_result_sync(); } } } impl SyncResult for Gate { type Result = Result; #[inline] fn to_result(value: u8, pager_error: Option) -> Self::Result { if let Some(pager_error) = pager_error { match pager_error { pager::Error::NotRegistered => Err(Error::NotRegistered), pager::Error::WrongMode => Err(Error::WrongMode), pager::Error::NotReady => Err(Error::NotReady), } } else { let state = State::from(value & Self::STATE_MASK); let error = value & !(Self::STATE_MASK); if error != 0 { Err(Error::from(error)) } else { Ok(state) } } } } impl From for u8 { #[inline] fn from(value: State) -> Self { match value { State::Controlled => 0_u8, State::Sealed => 1_u8, State::Open => 2_u8, } } } impl From for State { #[inline] fn from(value: u8) -> Self { State::from(value as usize) } } impl From for State { #[inline] fn from(value: usize) -> Self { match value { 0 => State::Controlled, 1 => State::Sealed, _ => State::Open, } } } impl From for u8 { #[inline] fn from(value: Error) -> Self { match value { Error::Rejected => 4_u8, Error::Sealed => 8_u8, Error::SpuriousFailure => 12_u8, Error::NotRegistered => 16_u8, Error::WrongMode => 20_u8, Error::NotReady => 24_u8, } } } impl From for Error { #[inline] fn from(value: u8) -> Self { Error::from(value as usize) } } impl From for Error { #[inline] fn from(value: usize) -> Self { match value { 4 => Error::Rejected, 8 => Error::Sealed, 12 => Error::SpuriousFailure, 16 => Error::NotRegistered, 20 => Error::WrongMode, _ => Error::NotReady, } } } saa-5.4.2/src/lib.rs000064400000000000000000000011701046102023000123020ustar 00000000000000#![deny(missing_docs, clippy::all, clippy::pedantic)] #![doc = include_str!("../README.md")] pub mod barrier; pub use barrier::Barrier; pub mod gate; pub use gate::Gate; pub mod lock; pub use lock::Lock; #[cfg(all(feature = "lock_api", not(feature = "loom")))] pub mod lock_api; #[cfg(all(feature = "lock_api", not(feature = "loom")))] pub use lock_api::{ Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard, lock_async, read_async, write_async, }; pub mod pager; pub use pager::Pager; pub mod semaphore; pub use semaphore::Semaphore; mod opcode; mod sync_primitive; mod wait_queue; #[cfg(test)] mod tests; saa-5.4.2/src/lock.rs000064400000000000000000000625341046102023000124770ustar 00000000000000//! [`Lock`] is a low-level locking primitive for both synchronous and asynchronous operations. use std::fmt; use std::pin::Pin; #[cfg(not(feature = "loom"))] use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::{self, AcqRel, Acquire, Relaxed, Release}; #[cfg(not(feature = "loom"))] use std::thread::yield_now; #[cfg(feature = "loom")] use loom::sync::atomic::AtomicUsize; #[cfg(feature = "loom")] use loom::thread::yield_now; use crate::Pager; use crate::opcode::Opcode; use crate::pager::{self, SyncResult}; use crate::sync_primitive::SyncPrimitive; use crate::wait_queue::{Entry, WaitQueue}; /// [`Lock`] is a low-level locking primitive for both synchronous and asynchronous operations. /// /// The locking semantics are similar to [`RwLock`](std::sync::RwLock), however, [`Lock`] only /// provides low-level locking and releasing methods, hence forcing the user to manage the scope of /// acquired locks and the resources to protect. #[derive(Default)] pub struct Lock { /// [`Lock`] state. state: AtomicUsize, } /// Operation mode. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum Mode { /// Acquires an exclusive lock. Exclusive, /// Acquires a shared lock. Shared, /// Waits for the [`Lock`] to be free or poisoned. /// /// [`Self::WaitExclusive`], [`Self::WaitShared`], [`Lock::try_lock`], and [`Lock::try_share`] /// provide a way to bypass the fair queuing mechanism without spinning. WaitExclusive, /// Waits for a shared lock to be available or the [`Lock`] to be poisoned. /// /// If a [`Self::WaitExclusive`] entry is in front of a [`Self::WaitShared`] entry, the /// [`Self::WaitShared`] entry has to wait until the [`Self::WaitExclusive`] entry is processed. WaitShared, } impl Lock { /// Maximum number of shared owners. pub const MAX_SHARED_OWNERS: usize = WaitQueue::DATA_MASK - 1; /// Poisoned state. const POISONED_STATE: usize = WaitQueue::LOCKED_FLAG; /// Successfully acquired the desired lock. const ACQUIRED: u8 = 0_u8; /// Failed to acquire the desired lock. const NOT_ACQUIRED: u8 = 1_u8; /// Poisoned error code. const POISONED: u8 = 2_u8; /// Maximum spin count before waiting. const MAX_SPIN_COUNT: usize = 128; /// Creates a new [`Lock`]. /// /// # Examples /// /// ``` /// use saa::Lock; /// /// let lock = Lock::new(); /// ``` #[cfg(not(feature = "loom"))] #[inline] #[must_use] pub const fn new() -> Self { Self { state: AtomicUsize::new(0), } } /// Creates a new [`Lock`]. #[cfg(feature = "loom")] #[inline] #[must_use] pub fn new() -> Self { Self { state: AtomicUsize::new(0), } } /// Returns `true` if the lock is currently free. /// /// # Examples /// /// ``` /// use saa::Lock; /// use std::sync::atomic::Ordering::Relaxed; /// /// let lock = Lock::default(); /// assert!(lock.is_free(Relaxed)); /// /// lock.lock_sync(); /// assert!(!lock.is_free(Relaxed)); /// ``` #[inline] pub fn is_free(&self, mo: Ordering) -> bool { let state = self.state.load(mo); state != Self::POISONED_STATE && (state & WaitQueue::DATA_MASK) == 0 } /// Returns `true` if an exclusive lock is currently held. /// /// # Examples /// /// ``` /// use saa::Lock; /// use std::sync::atomic::Ordering::Relaxed; /// /// let lock = Lock::default(); /// assert!(!lock.is_locked(Relaxed)); /// /// lock.lock_sync(); /// assert!(lock.is_locked(Relaxed)); /// assert!(!lock.is_shared(Relaxed)); /// ``` #[inline] pub fn is_locked(&self, mo: Ordering) -> bool { (self.state.load(mo) & WaitQueue::DATA_MASK) == WaitQueue::DATA_MASK } /// Returns `true` if shared locks are currently held. /// /// # Examples /// /// ``` /// use saa::Lock; /// use std::sync::atomic::Ordering::Relaxed; /// /// let lock = Lock::default(); /// /// assert!(!lock.is_shared(Relaxed)); /// /// lock.share_sync(); /// assert!(lock.is_shared(Relaxed)); /// assert!(!lock.is_locked(Relaxed)); /// ``` #[inline] pub fn is_shared(&self, mo: Ordering) -> bool { let share_state = self.state.load(mo) & WaitQueue::DATA_MASK; share_state != 0 && share_state != WaitQueue::DATA_MASK } /// Returns `true` if the [`Lock`] is poisoned. /// /// # Examples /// /// ``` /// use saa::Lock; /// use std::sync::atomic::Ordering::Relaxed; /// /// let lock = Lock::default(); /// assert!(!lock.is_poisoned(Relaxed)); /// /// lock.lock_sync(); /// assert!(lock.poison_lock()); /// assert!(lock.is_poisoned(Relaxed)); /// ``` #[inline] pub fn is_poisoned(&self, mo: Ordering) -> bool { self.state.load(mo) == Self::POISONED_STATE } /// Acquires an exclusive lock asynchronously. /// /// Returns `false` if the lock is poisoned. /// /// # Examples /// /// ``` /// use saa::Lock; /// use std::sync::atomic::Ordering::Relaxed; /// /// let lock = Lock::default(); /// /// async { /// lock.lock_async().await; /// assert!(lock.is_locked(Relaxed)); /// assert!(!lock.is_shared(Relaxed)) /// }; /// ``` #[inline] pub async fn lock_async(&self) -> bool { self.lock_async_with(|| ()).await } /// Acquires an exclusive lock synchronously. /// /// Returns `false` if the lock is poisoned. /// /// # Examples /// /// ``` /// use saa::Lock; /// use std::sync::atomic::Ordering::Relaxed; /// /// let lock = Lock::default(); /// /// lock.lock_sync(); /// /// assert!(lock.is_locked(Relaxed)); /// assert!(!lock.try_share()); /// ``` #[inline] pub fn lock_sync(&self) -> bool { self.lock_sync_with(|| ()) } /// Acquires an exclusive lock asynchronously with a wait callback. /// /// Returns `false` if the lock is poisoned. The callback is invoked when the task starts /// waiting for a lock. /// /// # Examples /// /// ``` /// use saa::Lock; /// use std::sync::atomic::Ordering::Relaxed; /// /// let lock = Lock::default(); /// /// async { /// lock.lock_async().await; /// assert!(lock.is_locked(Relaxed)); /// assert!(!lock.is_shared(Relaxed)) /// }; /// ``` #[inline] pub async fn lock_async_with(&self, begin_wait: F) -> bool { loop { let (mut result, state) = self.try_lock_internal_spin(); if result == Self::ACQUIRED { return true; } else if result == Self::POISONED { return false; } debug_assert_eq!(result, Self::NOT_ACQUIRED); let async_wait = WaitQueue::default(); let async_wait_pinned = async_wait.pin(); async_wait_pinned.construct(self, Opcode::Exclusive, false); if self.try_push_wait_queue_entry(async_wait_pinned, state) { begin_wait(); result = async_wait_pinned.await; debug_assert!(result == Self::ACQUIRED || result == Self::POISONED); return result == Self::ACQUIRED; } } } /// Acquires an exclusive lock synchronously with a wait callback. /// /// Returns `false` if the lock is poisoned. The callback is invoked when the task starts /// waiting for a lock. /// /// # Examples /// /// ``` /// use saa::Lock; /// use std::sync::atomic::Ordering::Relaxed; /// /// let lock = Lock::default(); /// /// lock.lock_sync(); /// /// assert!(lock.is_locked(Relaxed)); /// assert!(!lock.try_share()); /// ``` #[inline] pub fn lock_sync_with(&self, mut begin_wait: F) -> bool { loop { let (result, state) = self.try_lock_internal_spin(); if result == Self::ACQUIRED { return true; } else if result == Self::POISONED { return false; } debug_assert_eq!(result, Self::NOT_ACQUIRED); match self.wait_resources_sync(state, Opcode::Exclusive, begin_wait) { Ok(result) => { debug_assert!(result == Self::ACQUIRED || result == Self::POISONED); return result == Self::ACQUIRED; } Err(returned) => begin_wait = returned, } } } /// Tries to acquire an exclusive lock. /// /// Returns `false` if the lock was not free. /// /// # Examples /// /// ``` /// use saa::Lock; /// /// let lock = Lock::default(); /// /// assert!(lock.try_lock()); /// assert!(!lock.try_share()); /// assert!(!lock.try_lock()); /// ``` #[inline] pub fn try_lock(&self) -> bool { self.try_lock_internal(self.state.load(Acquire)).0 == Self::ACQUIRED } /// Acquires a shared lock asynchronously. /// /// Returns `false` if the lock is poisoned. /// /// # Examples /// /// ``` /// use saa::Lock; /// use std::sync::atomic::Ordering::Relaxed; /// /// let lock = Lock::default(); /// /// async { /// lock.share_async().await; /// assert!(!lock.is_locked(Relaxed)); /// assert!(lock.is_shared(Relaxed)) /// }; /// ``` #[inline] pub async fn share_async(&self) -> bool { self.share_async_with(|| ()).await } /// Acquires a shared lock synchronously. /// /// Returns `false` if the lock is poisoned. /// /// # Examples /// /// ``` /// use saa::Lock; /// use std::sync::atomic::Ordering::Relaxed; /// /// let lock = Lock::default(); /// /// lock.share_sync(); /// /// assert!(lock.is_shared(Relaxed)); /// assert!(!lock.try_lock()); /// ``` #[inline] pub fn share_sync(&self) -> bool { self.share_sync_with(|| ()) } /// Acquires a shared lock asynchronously with a wait callback. /// /// Returns `false` if the lock is poisoned. The callback is invoked when the task starts /// waiting for a lock. /// /// # Examples /// /// ``` /// use saa::Lock; /// use std::sync::atomic::Ordering::Relaxed; /// /// let lock = Lock::default(); /// /// async { /// lock.share_async().await; /// assert!(!lock.is_locked(Relaxed)); /// assert!(lock.is_shared(Relaxed)) /// }; /// ``` #[inline] pub async fn share_async_with(&self, begin_wait: F) -> bool { loop { let (mut result, state) = self.try_share_internal_spin(); if result == Self::ACQUIRED { return true; } else if result == Self::POISONED { return false; } debug_assert_eq!(result, Self::NOT_ACQUIRED); let async_wait = WaitQueue::default(); let async_wait_pinned = async_wait.pin(); async_wait_pinned.construct(self, Opcode::Shared, false); if self.try_push_wait_queue_entry(async_wait_pinned, state) { begin_wait(); result = async_wait_pinned.await; debug_assert!(result == Self::ACQUIRED || result == Self::POISONED); return result == Self::ACQUIRED; } } } /// Acquires a shared lock synchronously with a wait callback. /// /// Returns `false` if the lock is poisoned. The callback is invoked when the task starts /// waiting for a lock. /// /// # Examples /// /// ``` /// use saa::Lock; /// use std::sync::atomic::Ordering::Relaxed; /// /// let lock = Lock::default(); /// /// lock.share_sync(); /// /// assert!(lock.is_shared(Relaxed)); /// assert!(!lock.try_lock()); /// ``` #[inline] pub fn share_sync_with(&self, mut begin_wait: F) -> bool { loop { let (result, state) = self.try_share_internal_spin(); if result == Self::ACQUIRED { return true; } else if result == Self::POISONED { return false; } debug_assert_eq!(result, Self::NOT_ACQUIRED); match self.wait_resources_sync(state, Opcode::Shared, begin_wait) { Ok(result) => { debug_assert!(result == Self::ACQUIRED || result == Self::POISONED); return result == Self::ACQUIRED; } Err(returned) => begin_wait = returned, } } } /// Tries to acquire a shared lock. /// /// Returns `false` if an exclusive lock is held, the number of shared owners has reached /// [`Self::MAX_SHARED_OWNERS`], or the lock is poisoned. /// /// # Examples /// /// ``` /// use saa::Lock; /// /// let lock = Lock::default(); /// /// assert!(lock.try_share()); /// assert!(lock.try_share()); /// assert!(!lock.try_lock()); /// ``` #[inline] pub fn try_share(&self) -> bool { self.try_share_internal(self.state.load(Acquire)).0 == Self::ACQUIRED } /// Registers a [`Pager`] to allow it to get an exclusive lock or a shared lock remotely. /// /// `is_sync` indicates whether the [`Pager`] will be polled asynchronously (`false`) or /// synchronously (`true`). /// /// Returns `false` if the [`Pager`] was already registered. /// /// # Examples /// /// ``` /// use std::pin::pin; /// /// use saa::{Lock, Pager}; /// use saa::lock::Mode; /// /// let lock = Lock::default(); /// /// let mut pinned_pager = pin!(Pager::default()); /// /// assert!(lock.register_pager(&mut pinned_pager, Mode::Exclusive, true)); /// assert!(!lock.register_pager(&mut pinned_pager, Mode::Exclusive, true)); /// /// assert_eq!(pinned_pager.poll_sync(), Ok(true)); /// ``` #[inline] pub fn register_pager<'l>( &'l self, pager: &mut Pin<&mut Pager<'l, Self>>, mode: Mode, is_sync: bool, ) -> bool { if pager.is_registered() { return false; } let opcode = match mode { Mode::Exclusive => Opcode::Exclusive, Mode::Shared => Opcode::Shared, Mode::WaitExclusive => Opcode::Wait(u8::try_from(WaitQueue::DATA_MASK).unwrap_or(0)), Mode::WaitShared => Opcode::Wait(1), }; pager.wait_queue().construct(self, opcode, is_sync); loop { let (result, state) = match mode { Mode::Exclusive => self.try_lock_internal_spin(), Mode::Shared => self.try_share_internal_spin(), Mode::WaitExclusive | Mode::WaitShared => { let state = self.state.load(Acquire); let result = if state == usize::from(Self::POISONED) { Self::POISONED } else if (mode == Mode::WaitExclusive && (state & WaitQueue::DATA_MASK) == 0) || (mode == Mode::WaitShared && (state & WaitQueue::DATA_MASK) < Self::MAX_SHARED_OWNERS) { // If an available lock is available, return immediately. Self::ACQUIRED } else { Self::NOT_ACQUIRED }; (result, state) } }; if result == Self::ACQUIRED || result == Self::POISONED { pager.wait_queue().entry().set_result(result); break; } if self.try_push_wait_queue_entry(pager.wait_queue(), state) { break; } } true } /// Releases an exclusive lock. /// /// Returns `true` if an exclusive lock was previously held and successfully released. /// /// # Examples /// /// ``` /// use saa::Lock; /// /// let lock = Lock::default(); /// /// lock.lock_sync(); /// /// assert!(lock.release_lock()); /// assert!(!lock.release_lock()); /// /// assert!(lock.try_share()); /// assert!(!lock.release_lock()); /// assert!(lock.release_share()); /// /// lock.lock_sync(); /// lock.poison_lock(); /// /// assert!(!lock.release_lock()); /// ``` #[inline] pub fn release_lock(&self) -> bool { match self .state .compare_exchange(WaitQueue::DATA_MASK, 0, Release, Relaxed) { Ok(_) => true, Err(state) => self.release_loop(state, Opcode::Exclusive), } } /// Poisons the lock with an exclusive lock held. /// /// Returns `false` if an exclusive lock is not held, or the lock was already poisoned. /// /// # Examples /// /// ``` /// use std::sync::atomic::Ordering::Relaxed; /// /// use saa::Lock; /// /// let lock = Lock::default(); /// /// assert!(!lock.poison_lock()); /// /// lock.lock_sync(); /// /// assert!(lock.poison_lock()); /// assert!(lock.is_poisoned(Relaxed)); /// /// assert!(!lock.poison_lock()); /// /// assert!(!lock.lock_sync()); /// assert!(!lock.share_sync()); /// assert!(!lock.release_lock()); /// assert!(!lock.release_share()); /// ``` #[inline] pub fn poison_lock(&self) -> bool { match self.state.compare_exchange( WaitQueue::DATA_MASK, Self::POISONED_STATE, Release, Relaxed, ) { Ok(_) => true, Err(state) => self.poison_lock_internal(state), } } /// Clears poison from the lock. /// /// Returns `true` if the lock was successfully cleared. /// /// # Examples /// /// ``` /// use std::sync::atomic::Ordering::Relaxed; /// /// use saa::Lock; /// /// let lock = Lock::default(); /// /// assert!(!lock.poison_lock()); /// /// lock.lock_sync(); /// /// assert!(lock.poison_lock()); /// assert!(lock.clear_poison()); /// assert!(!lock.is_poisoned(Relaxed)); /// ``` #[inline] pub fn clear_poison(&self) -> bool { self.state .compare_exchange(Self::POISONED_STATE, 0, Release, Relaxed) .is_ok() } /// Releases a shared lock. /// /// Returns `true` if a shared lock was previously held and successfully released. /// /// # Examples /// /// ``` /// use saa::Lock; /// /// let lock = Lock::default(); /// /// lock.share_sync(); /// lock.share_sync(); /// /// assert!(lock.release_share()); /// /// assert!(!lock.try_lock()); /// assert!(lock.release_share()); /// /// assert!(!lock.release_share()); /// assert!(lock.try_lock()); /// /// lock.poison_lock(); /// /// assert!(!lock.release_share()); /// ``` #[inline] pub fn release_share(&self) -> bool { match self.state.fetch_update(Release, Relaxed, |state| { if state != 0 && state <= Self::MAX_SHARED_OWNERS { Some(state - 1) } else { None } }) { Ok(_) => true, Err(state) => self.release_loop(state, Opcode::Shared), } } /// Tries to acquire an exclusive lock with spin-backoff. #[inline] fn try_lock_internal_spin(&self) -> (u8, usize) { let Err(mut state) = self .state .compare_exchange(0, WaitQueue::DATA_MASK, Acquire, Acquire) else { return (Self::ACQUIRED, 0); }; let mut result = Self::NOT_ACQUIRED; for spin_count in 0..Self::MAX_SPIN_COUNT { (result, state) = self.try_lock_internal(state); if result != Self::NOT_ACQUIRED { return (result, state); } if spin_count % 4 == 0 { yield_now(); } } (result, state) } /// Tries to acquire an exclusive lock. #[inline] fn try_lock_internal(&self, mut state: usize) -> (u8, usize) { loop { if state == Self::POISONED_STATE { return (Self::POISONED, state); } else if state != 0 { return (Self::NOT_ACQUIRED, state); } match self .state .compare_exchange(0, WaitQueue::DATA_MASK, Acquire, Acquire) { Ok(_) => return (Self::ACQUIRED, 0), Err(new_state) => state = new_state, } } } /// Tries to acquire a shared lock with spin-backoff. #[inline] fn try_share_internal_spin(&self) -> (u8, usize) { let Err(mut state) = self.state.fetch_update(Acquire, Acquire, |state| { if state < Self::MAX_SHARED_OWNERS { Some(state + 1) } else { None } }) else { return (Self::ACQUIRED, 0); }; let mut result = Self::NOT_ACQUIRED; for spin_count in 0..Self::MAX_SPIN_COUNT { (result, state) = self.try_share_internal(state); if result != Self::NOT_ACQUIRED { return (result, state); } if spin_count % 4 == 0 { yield_now(); } } (result, state) } /// Tries to acquire a shared lock. #[inline] fn try_share_internal(&self, mut state: usize) -> (u8, usize) { loop { if state == Self::POISONED_STATE { return (Self::POISONED, state); } else if state >= Self::MAX_SHARED_OWNERS { return (Self::NOT_ACQUIRED, state); } match self .state .compare_exchange(state, state + 1, Acquire, Acquire) { Ok(_) => return (Self::ACQUIRED, 0), Err(new_state) => state = new_state, } } } /// Poisons the lock. fn poison_lock_internal(&self, mut state: usize) -> bool { loop { if state == Self::POISONED_STATE || state & WaitQueue::DATA_MASK != WaitQueue::DATA_MASK { // Already poisoned or the lock is not held exclusively by the current thread. return false; } if state & WaitQueue::LOCKED_FLAG == WaitQueue::LOCKED_FLAG { // This only happens when an asynchronous task is being cancelled. yield_now(); state = self.state.load(Relaxed); continue; } match self .state .compare_exchange(state, Self::POISONED_STATE, AcqRel, Relaxed) { Ok(prev_state) => { // A possible data race where the lock is being poisoned before the one that // woke up the current lock owner has finished processing the wait queue is // prevented by the wait queue processing method itself; `model.rs` proves it. debug_assert_eq!(prev_state & WaitQueue::LOCKED_FLAG, 0); let anchor_ptr = WaitQueue::to_anchor_ptr(prev_state); if !anchor_ptr.is_null() { let tail_entry_ptr = WaitQueue::to_entry_ptr(anchor_ptr); Entry::iter_forward(tail_entry_ptr, false, |entry, _| { entry.set_result(Self::POISONED); false }); } return true; } Err(new_state) => state = new_state, } } } } impl fmt::Debug for Lock { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let state = self.state.load(Relaxed); let lock_share_state = state & WaitQueue::DATA_MASK; let locked = lock_share_state == WaitQueue::DATA_MASK; let share_count = if locked { 0 } else { lock_share_state }; let poisoned = state == Self::POISONED_STATE; let wait_queue_being_processed = state & WaitQueue::LOCKED_FLAG == WaitQueue::LOCKED_FLAG; let wait_queue_tail_addr = state & WaitQueue::ADDR_MASK; f.debug_struct("WaitQueue") .field("state", &state) .field("locked", &locked) .field("share_count", &share_count) .field("poisoned", &poisoned) .field("wait_queue_being_processed", &wait_queue_being_processed) .field("wait_queue_tail_addr", &wait_queue_tail_addr) .finish() } } impl SyncPrimitive for Lock { #[inline] fn state(&self) -> &AtomicUsize { &self.state } #[inline] fn max_shared_owners() -> usize { Self::MAX_SHARED_OWNERS } #[inline] fn drop_wait_queue_entry(entry: &Entry) { Self::force_remove_wait_queue_entry(entry); } } impl SyncResult for Lock { type Result = Result; #[inline] fn to_result(value: u8, pager_error: Option) -> Self::Result { pager_error.map_or_else( || { debug_assert!(value == Self::ACQUIRED || value == Self::POISONED); Ok(value == Self::ACQUIRED) }, Err, ) } } saa-5.4.2/src/lock_api.rs000064400000000000000000000104611046102023000133200ustar 00000000000000//! Implementations of locking traits from the [`lock_api`](https://crates.io/crates/lock_api) crate. use super::Lock; /// A mutual exclusion primitive for protecting shared data of type `T`. /// /// # Examples /// /// ``` /// use saa::Mutex; /// /// let mutex: Mutex = Mutex::new(0); /// ``` pub type Mutex = lock_api::Mutex; /// Acquires a mutex, asynchronously. /// /// # Examples /// /// ``` /// use saa::{Mutex, MutexGuard, lock_async}; /// /// let mutex: Mutex = Mutex::new(0); /// /// async { /// let mut guard: MutexGuard = lock_async(&mutex).await; /// }; /// ``` #[inline] pub async fn lock_async(mutex: &Mutex) -> MutexGuard<'_, T> { unsafe { mutex.raw().lock_async().await; mutex.make_guard_unchecked() } } /// A scoped mutex guard. /// /// # Examples /// /// ``` /// use saa::{Mutex, MutexGuard}; /// /// let mutex: Mutex = Mutex::new(0); /// let mut guard: MutexGuard = mutex.lock(); /// *guard += 1; /// drop(guard); /// /// assert_eq!(*mutex.try_lock().unwrap(), 1); /// ``` pub type MutexGuard<'a, T> = lock_api::MutexGuard<'a, Lock, T>; /// A reader-writer lock for protecting shared data of type `T`. /// /// # Examples /// /// ``` /// use saa::RwLock; /// /// let rwlock: RwLock = RwLock::new(0); /// ``` pub type RwLock = lock_api::RwLock; /// A scoped read lock. /// /// # Examples /// /// ``` /// use saa::{RwLock, RwLockReadGuard}; /// /// let rwlock: RwLock = RwLock::new(0); /// let guard: RwLockReadGuard = rwlock.read(); /// assert_eq!(*guard, 0); /// ``` pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, Lock, T>; /// Locks the [`RwLock`] with shared read access, asynchronously. /// /// # Examples /// /// ``` /// use saa::{RwLock, RwLockReadGuard, read_async}; /// /// let rwlock: RwLock = RwLock::new(0); /// /// async { /// let guard: RwLockReadGuard = read_async(&rwlock).await; /// }; /// ``` #[inline] pub async fn read_async(rwlock: &RwLock) -> RwLockReadGuard<'_, T> { unsafe { rwlock.raw().share_async().await; rwlock.make_read_guard_unchecked() } } /// A scoped write lock. /// /// # Examples /// /// ``` /// use saa::{RwLock, RwLockWriteGuard}; /// /// let rwlock: RwLock = RwLock::new(0); /// let mut guard: RwLockWriteGuard = rwlock.write(); /// *guard += 1; /// drop(guard); /// /// assert_eq!(*rwlock.read(), 1); /// ``` pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, Lock, T>; /// Locks the [`RwLock`] with exclusive write access, asynchronously. /// /// # Examples /// /// ``` /// use saa::{RwLock, RwLockWriteGuard, write_async}; /// /// let rwlock: RwLock = RwLock::new(0); /// /// async { /// let guard: RwLockWriteGuard = write_async(&rwlock).await; /// }; /// ``` #[inline] pub async fn write_async(rwlock: &RwLock) -> RwLockWriteGuard<'_, T> { unsafe { rwlock.raw().lock_async().await; rwlock.make_write_guard_unchecked() } } unsafe impl lock_api::RawMutex for Lock { const INIT: Self = Lock::new(); type GuardMarker = lock_api::GuardSend; #[inline] fn lock(&self) { self.lock_sync(); } #[inline] fn try_lock(&self) -> bool { self.try_lock() } #[inline] unsafe fn unlock(&self) { self.release_lock(); } } unsafe impl lock_api::RawMutexFair for Lock { #[inline] unsafe fn unlock_fair(&self) { self.release_lock(); } } unsafe impl lock_api::RawRwLock for Lock { const INIT: Self = Lock::new(); type GuardMarker = lock_api::GuardSend; #[inline] fn lock_shared(&self) { self.share_sync(); } #[inline] fn try_lock_shared(&self) -> bool { self.try_share() } #[inline] unsafe fn unlock_shared(&self) { self.release_share(); } #[inline] fn lock_exclusive(&self) { self.lock_sync(); } #[inline] fn try_lock_exclusive(&self) -> bool { self.try_lock() } #[inline] unsafe fn unlock_exclusive(&self) { self.release_lock(); } } unsafe impl lock_api::RawRwLockFair for Lock { #[inline] unsafe fn unlock_shared_fair(&self) { self.release_share(); } #[inline] unsafe fn unlock_exclusive_fair(&self) { self.release_lock(); } } saa-5.4.2/src/opcode.rs000064400000000000000000000047441046102023000130170ustar 00000000000000//! Primitive synchronization operation types. use crate::wait_queue::WaitQueue; /// Operation types. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub(crate) enum Opcode { /// Acquires exclusive ownership. Exclusive, /// Acquires shared ownership. Shared, /// Barrier operation where the boolean flag indicates whether the operation is wait-only. Barrier(bool), /// Acquires semaphores. Semaphore(u8), /// Waits until the desired resources are available. Wait(u8), } impl Opcode { /// Checks if the resource expressed in `self` can be released from `state`. #[inline] pub(crate) const fn can_release(self, state: usize) -> bool { match self { Opcode::Exclusive => { let data = state & WaitQueue::DATA_MASK; data == WaitQueue::DATA_MASK } Opcode::Shared => { let data = state & WaitQueue::DATA_MASK; data >= 1 && data != WaitQueue::DATA_MASK } Opcode::Semaphore(count) => { let data = state & WaitQueue::DATA_MASK; let count = count as usize; data >= count } Opcode::Barrier(_) | Opcode::Wait(_) => true, } } /// Converts the operation mode into a `usize` value representing the desired resources. #[inline] pub(crate) const fn desired_count(self) -> usize { match self { Opcode::Exclusive => WaitQueue::DATA_MASK, Opcode::Shared => 1, Opcode::Barrier(wait_only) => { if wait_only { 0 } else { 1 } } Opcode::Semaphore(count) | Opcode::Wait(count) => { let count = count as usize; debug_assert!(count < WaitQueue::LOCKED_FLAG); count } } } /// Converts the operation mode into a `usize` value representing the resource held by the /// corresponding synchronization primitive. #[inline] pub(crate) const fn acquired_count(self) -> usize { match self { Opcode::Exclusive => WaitQueue::DATA_MASK, Opcode::Shared => 1, Opcode::Semaphore(count) => { let count = count as usize; debug_assert!(count < WaitQueue::LOCKED_FLAG); count } Opcode::Barrier(_) | Opcode::Wait(_) => 0, } } } saa-5.4.2/src/pager.rs000064400000000000000000000126231046102023000126370ustar 00000000000000//! [`Pager`] allows the user to remotely wait for a desired resource. #![deny(unsafe_code)] use std::cell::UnsafeCell; use std::marker::{PhantomData, PhantomPinned}; use std::pin::Pin; use crate::wait_queue::{Entry, WaitQueue}; /// Tasks holding a [`Pager`] can remotely acquire a desired resource. /// /// [`Pager`] contains a wait queue entry which forms an intrusive linked list. It is important that /// the [`Pager`] is not moved while it is registered in a synchronization primitive, otherwise it /// may lead to undefined behavior, therefore [`Pager`] does not implement [`Unpin`]. #[derive(Debug, Default)] pub struct Pager<'s, S: SyncResult> { /// The wait queue for the [`Pager`]. wait_queue: UnsafeCell, /// The [`Pager`] cannot outlive the associated synchronization primitive. _phantom: PhantomData<&'s S>, /// The [`Pager`] cannot be unpinned. _pinned: PhantomPinned, } /// Errors that can occur when using a [`Pager`]. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum Error { /// The [`Pager`] is not registered in a synchronization primitive. NotRegistered, /// The [`Pager`] is registered in a synchronization primitive with a different mode. WrongMode, /// The result is not ready. NotReady, } /// Defines result value interpretation interfaces. pub trait SyncResult: Sized { /// Operation result type. type Result: Clone + Copy + Eq + PartialEq; /// Converts a `u8` value into a `Self::Result`. fn to_result(value: u8, pager_error: Option) -> Self::Result; } impl<'s, S: SyncResult> Pager<'s, S> { /// Returns `true` if the [`Pager`] is registered in a synchronization primitive. /// /// # Examples /// /// ``` /// use std::pin::pin; /// /// use saa::{Gate, Pager}; /// use saa::gate::State; /// /// let gate = Gate::default(); /// /// let mut pinned_pager = pin!(Pager::default()); /// assert!(!pinned_pager.is_registered()); /// /// assert!(gate.register_pager(&mut pinned_pager, false)); /// assert!(pinned_pager.is_registered()); /// /// assert!(!gate.register_pager(&mut pinned_pager, false)); /// assert!(pinned_pager.is_registered()); /// ``` #[inline] pub fn is_registered(self: &mut Pin<&mut Pager<'s, S>>) -> bool { self.wait_queue().is_pollable() } /// Waits for the desired resource to become available asynchronously. /// /// # Examples /// /// ``` /// use std::pin::pin; /// /// use saa::{Gate, Pager}; /// use saa::gate::State; /// /// let gate = Gate::default(); /// /// let mut pinned_pager = pin!(Pager::default()); /// /// assert!(gate.register_pager(&mut pinned_pager, false)); /// /// assert_eq!(gate.open().1, 1); /// /// async { /// assert_eq!(pinned_pager.poll_async().await, Ok(State::Open)); /// }; /// ``` #[inline] pub async fn poll_async(self: &mut Pin<&mut Pager<'s, S>>) -> S::Result { if !self.is_registered() { return S::to_result(0, Some(Error::NotRegistered)); } let result = self.wait_queue().await; if result == Entry::ERROR_WRONG_MODE { return S::to_result(result, Some(Error::WrongMode)); } S::to_result(result, None) } /// Waits for the desired resource to become available synchronously. /// /// # Examples /// /// ``` /// use std::pin::pin; /// /// use saa::{Gate, Pager}; /// use saa::gate::State; /// /// let gate = Gate::default(); /// /// let mut pinned_pager = pin!(Pager::default()); /// /// assert!(gate.register_pager(&mut pinned_pager, true)); /// /// assert_eq!(gate.open().1, 1); /// /// assert_eq!(pinned_pager.poll_sync(), Ok(State::Open)); /// ``` #[inline] pub fn poll_sync(self: &mut Pin<&mut Pager<'s, S>>) -> S::Result { if !self.is_registered() { return S::to_result(0, Some(Error::NotRegistered)); } let result = self.wait_queue().entry().poll_result_sync(); if result == Entry::ERROR_WRONG_MODE { return S::to_result(result, Some(Error::WrongMode)); } S::to_result(result, None) } /// Tries to get the result. /// /// # Examples /// /// ``` /// use std::pin::pin; /// /// use saa::{Gate, Pager}; /// use saa::gate::{Error, State}; /// /// let gate = Gate::default(); /// /// let mut pinned_pager = pin!(Pager::default()); /// /// assert_eq!(pinned_pager.try_poll(), Err(Error::NotRegistered)); /// /// assert!(gate.register_pager(&mut pinned_pager, true)); /// /// assert_eq!(pinned_pager.try_poll(), Err(Error::NotReady)); /// assert_eq!(gate.open().1, 1); /// /// assert_eq!(pinned_pager.try_poll(), Ok(State::Open)); /// ``` #[inline] pub fn try_poll(self: &mut Pin<&mut Pager<'s, S>>) -> S::Result { if !self.is_registered() { return S::to_result(0, Some(Error::NotRegistered)); } if let Some(result) = self.wait_queue().entry().try_consume_result() { S::to_result(result, None) } else { S::to_result(0, Some(Error::NotReady)) } } /// Returns a reference to the wait queue entry. #[inline] pub(crate) fn wait_queue(&self) -> Pin<&WaitQueue> { WaitQueue::pin_ptr(self.wait_queue.get()) } } saa-5.4.2/src/semaphore.rs000064400000000000000000000422041046102023000135220ustar 00000000000000//! [`Semaphore`] is a synchronization primitive that allows a fixed number of threads to access a //! resource concurrently. #![deny(unsafe_code)] use std::fmt; use std::pin::Pin; #[cfg(not(feature = "loom"))] use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::{self, Acquire, Relaxed, Release}; #[cfg(feature = "loom")] use loom::sync::atomic::AtomicUsize; use crate::Pager; use crate::opcode::Opcode; use crate::pager::{self, SyncResult}; use crate::sync_primitive::SyncPrimitive; use crate::wait_queue::{Entry, WaitQueue}; /// [`Semaphore`] is a synchronization primitive that allows a fixed number of threads to access a /// resource concurrently. #[derive(Default)] pub struct Semaphore { /// [`Semaphore`] state. state: AtomicUsize, } impl Semaphore { /// Maximum number of concurrent owners. pub const MAX_PERMITS: usize = WaitQueue::DATA_MASK; /// Creates a new [`Semaphore`]. /// /// # Examples /// /// ``` /// use saa::Semaphore; /// /// let semaphore = Semaphore::new(); /// ``` #[cfg(not(feature = "loom"))] #[inline] #[must_use] pub const fn new() -> Self { Self { state: AtomicUsize::new(0), } } /// Creates a new [`Semaphore`]. #[cfg(feature = "loom")] #[inline] #[must_use] pub fn new() -> Self { Self { state: AtomicUsize::new(0), } } /// Creates a new [`Semaphore`] with the given number of initially available permits. /// /// The maximum number of available permits is [`MAX_PERMITS`](Self::MAX_PERMITS), and if a /// value greater than or equal to [`MAX_PERMITS`](Self::MAX_PERMITS) is provided, it will be /// set to [`MAX_PERMITS`](Self::MAX_PERMITS). /// /// # Examples /// /// ``` /// use saa::Semaphore; /// use std::sync::atomic::Ordering::Relaxed; /// /// let semaphore = Semaphore::with_permits(11); /// /// assert_eq!(semaphore.available_permits(Relaxed), 11); /// /// assert!(semaphore.try_acquire_many(11)); /// assert!(!semaphore.is_open(Relaxed)); /// ``` #[inline] #[must_use] pub fn with_permits(permits: usize) -> Self { let adjusted_permits = permits.min(Self::MAX_PERMITS); Self { state: AtomicUsize::new(Self::MAX_PERMITS - adjusted_permits), } } /// Returns `true` if the semaphore is currently open. /// /// # Examples /// /// ``` /// use saa::Semaphore; /// use std::sync::atomic::Ordering::Relaxed; /// /// let semaphore = Semaphore::default(); /// assert!(semaphore.is_open(Relaxed)); /// /// assert!(semaphore.try_acquire_many(Semaphore::MAX_PERMITS)); /// assert!(!semaphore.is_open(Relaxed)); /// ``` #[inline] pub fn is_open(&self, mo: Ordering) -> bool { let state = self.state.load(mo); (state & WaitQueue::DATA_MASK) != Self::MAX_PERMITS } /// Returns `true` if the semaphore is currently closed. /// /// # Examples /// /// ``` /// use saa::Semaphore; /// use std::sync::atomic::Ordering::Relaxed; /// /// let semaphore = Semaphore::default(); /// assert!(!semaphore.is_closed(Relaxed)); /// assert!(semaphore.is_open(Relaxed)); /// /// assert!(semaphore.try_acquire()); /// assert!(!semaphore.is_closed(Relaxed)); /// assert!(semaphore.is_open(Relaxed)); /// /// semaphore.try_acquire_many(Semaphore::MAX_PERMITS - 1); /// assert!(semaphore.is_closed(Relaxed)); /// ``` #[inline] pub fn is_closed(&self, mo: Ordering) -> bool { (self.state.load(mo) & WaitQueue::DATA_MASK) == WaitQueue::DATA_MASK } /// Returns the number of available permits. /// /// # Examples /// /// ``` /// use saa::Semaphore; /// use std::sync::atomic::Ordering::Relaxed; /// /// let semaphore = Semaphore::default(); /// assert_eq!(semaphore.available_permits(Relaxed), Semaphore::MAX_PERMITS); /// /// assert!(semaphore.try_acquire()); /// assert_eq!(semaphore.available_permits(Relaxed), Semaphore::MAX_PERMITS - 1); /// ``` #[inline] pub fn available_permits(&self, mo: Ordering) -> usize { Self::MAX_PERMITS - (self.state.load(mo) & WaitQueue::DATA_MASK) } /// Gets a permit from the semaphore asynchronously. /// /// # Examples /// /// ``` /// use saa::Semaphore; /// use std::sync::atomic::Ordering::Relaxed; /// /// let semaphore = Semaphore::default(); /// /// async { /// semaphore.acquire_async().await; /// assert_eq!(semaphore.available_permits(Relaxed), Semaphore::MAX_PERMITS - 1); /// }; /// ``` #[inline] pub async fn acquire_async(&self) { self.acquire_many_async_with(1, || {}).await; } /// Gets a permit from the semaphore synchronously. /// /// # Examples /// /// ``` /// use saa::Semaphore; /// use std::sync::atomic::Ordering::Relaxed; /// /// let semaphore = Semaphore::default(); /// /// semaphore.acquire_sync(); /// assert_eq!(semaphore.available_permits(Relaxed), Semaphore::MAX_PERMITS - 1); /// ``` #[inline] pub fn acquire_sync(&self) { self.acquire_many_sync_with(1, || ()); } /// Gets a permit from the semaphore asynchronously with a wait callback. /// /// The callback is invoked when the task starts waiting for a permit. /// /// # Examples /// /// ``` /// use saa::Semaphore; /// use std::sync::atomic::Ordering::Relaxed; /// /// let semaphore = Semaphore::default(); /// /// async { /// let mut wait = false; /// semaphore.acquire_async_with(|| { wait = true; }).await; /// assert_eq!(semaphore.available_permits(Relaxed), Semaphore::MAX_PERMITS - 1); /// assert!(!wait); /// }; /// ``` #[inline] pub async fn acquire_async_with(&self, begin_wait: F) { self.acquire_many_async_with(1, begin_wait).await; } /// Gets multiple permits from the semaphore synchronously with a wait callback. /// /// The callback is invoked when the task starts waiting for permits. /// /// # Examples /// /// ``` /// use saa::Semaphore; /// use std::sync::atomic::Ordering::Relaxed; /// /// let semaphore = Semaphore::default(); /// /// let mut wait = false; /// semaphore.acquire_sync_with(|| { wait = true; }); /// assert_eq!(semaphore.available_permits(Relaxed), Semaphore::MAX_PERMITS - 1); /// assert!(!wait); /// ``` #[inline] pub fn acquire_sync_with(&self, begin_wait: F) { self.acquire_many_sync_with(1, begin_wait); } /// Tries to get a permit from the semaphore. /// /// Returns `false` if no permits are available. /// /// # Examples /// /// ``` /// use saa::Semaphore; /// /// let semaphore = Semaphore::default(); /// /// assert!(semaphore.try_acquire()); /// assert!(!semaphore.try_acquire_many(Semaphore::MAX_PERMITS)); /// ``` #[inline] pub fn try_acquire(&self) -> bool { self.try_acquire_internal(1).0 } /// Gets multiple permits from the semaphore asynchronously. /// /// Returns `false` if the count exceeds [`Self::MAX_PERMITS`]. /// /// # Examples /// /// ``` /// use saa::Semaphore; /// use std::sync::atomic::Ordering::Relaxed; /// /// let semaphore = Semaphore::default(); /// /// async { /// assert!(semaphore.acquire_many_async(11).await); /// assert_eq!(semaphore.available_permits(Relaxed), Semaphore::MAX_PERMITS - 11); /// }; /// ``` #[inline] pub async fn acquire_many_async(&self, count: usize) -> bool { self.acquire_many_async_with(count, || {}).await } /// Gets multiple permits from the semaphore synchronously. /// /// Returns `false` if the count exceeds [`Self::MAX_PERMITS`]. /// /// # Examples /// /// ``` /// use saa::Semaphore; /// use std::sync::atomic::Ordering::Relaxed; /// /// let semaphore = Semaphore::default(); /// /// assert!(semaphore.acquire_many_sync(11)); /// assert_eq!(semaphore.available_permits(Relaxed), Semaphore::MAX_PERMITS - 11); /// ``` #[inline] pub fn acquire_many_sync(&self, count: usize) -> bool { self.acquire_many_sync_with(count, || ()) } /// Gets multiple permits from the semaphore asynchronously with a wait callback. /// /// Returns `false` if the count exceeds [`Self::MAX_PERMITS`]. The callback is invoked when the /// task starts waiting for permits. /// /// # Examples /// /// ``` /// use saa::Semaphore; /// use std::sync::atomic::Ordering::Relaxed; /// /// let semaphore = Semaphore::default(); /// /// async { /// let mut wait = false; /// assert!(semaphore.acquire_many_async_with(2, || { wait = true; }).await); /// assert_eq!(semaphore.available_permits(Relaxed), Semaphore::MAX_PERMITS - 2); /// assert!(!wait); /// }; /// ``` #[inline] pub async fn acquire_many_async_with(&self, count: usize, begin_wait: F) -> bool { if count > Self::MAX_PERMITS { return false; } let Ok(count) = u8::try_from(count) else { return false; }; loop { let (result, state) = self.try_acquire_internal(count); if result { return true; } let async_wait = WaitQueue::default(); let async_wait_pinned = async_wait.pin(); async_wait_pinned.construct(self, Opcode::Semaphore(count), false); if self.try_push_wait_queue_entry(async_wait_pinned, state) { begin_wait(); async_wait_pinned.await; return true; } } } /// Gets multiple permits from the semaphore synchronously with a wait callback. /// /// Returns `false` if the count exceeds [`Self::MAX_PERMITS`]. The callback is invoked when the /// task starts waiting for permits. /// /// # Examples /// /// ``` /// use saa::Semaphore; /// use std::sync::atomic::Ordering::Relaxed; /// /// let semaphore = Semaphore::default(); /// /// let mut wait = false; /// assert!(semaphore.acquire_many_sync_with(2, || { wait = true; })); /// assert_eq!(semaphore.available_permits(Relaxed), Semaphore::MAX_PERMITS - 2); /// assert!(!wait); /// ``` #[inline] pub fn acquire_many_sync_with(&self, count: usize, mut begin_wait: F) -> bool { if count > Self::MAX_PERMITS { return false; } let Ok(count) = u8::try_from(count) else { return false; }; loop { let (result, state) = self.try_acquire_internal(count); if result { return true; } // The value is checked in `try_acquire_internal`. if let Err(returned) = self.wait_resources_sync(state, Opcode::Semaphore(count), begin_wait) { begin_wait = returned; } else { return true; } } } /// Tries to get multiple permits from the semaphore. /// /// Returns `false` if no permits are available. /// /// # Examples /// /// ``` /// use saa::Semaphore; /// /// let semaphore = Semaphore::default(); /// /// assert!(semaphore.try_acquire_many(Semaphore::MAX_PERMITS)); /// assert!(!semaphore.try_acquire()); /// ``` #[inline] pub fn try_acquire_many(&self, count: usize) -> bool { if count > Self::MAX_PERMITS { return false; } let Ok(count) = u8::try_from(count) else { return false; }; self.try_acquire_internal(count).0 } /// Registers a [`Pager`] to allow it to get a permit remotely. /// /// `is_sync` indicates whether the [`Pager`] will be polled asynchronously (`false`) or /// synchronously (`true`). /// /// Returns `false` if the [`Pager`] was already registered, or if the count is greater than the /// maximum number of permits. /// /// # Examples /// /// ``` /// use std::pin::pin; /// /// use saa::{Pager, Semaphore}; /// /// let semaphore = Semaphore::default(); /// /// let mut pinned_pager = pin!(Pager::default()); /// /// assert!(semaphore.register_pager(&mut pinned_pager, 1, true)); /// assert!(!semaphore.register_pager(&mut pinned_pager, 1, true)); /// /// assert!(pinned_pager.poll_sync().is_ok()); /// ``` #[inline] pub fn register_pager<'s>( &'s self, pager: &mut Pin<&mut Pager<'s, Self>>, count: usize, is_sync: bool, ) -> bool { if count > Self::MAX_PERMITS || pager.is_registered() { return false; } let Ok(count) = u8::try_from(count) else { return false; }; pager .wait_queue() .construct(self, Opcode::Semaphore(count), is_sync); loop { let (result, state) = self.try_acquire_internal(count); if result { pager.wait_queue().entry().set_result(0); break; } if self.try_push_wait_queue_entry(pager.wait_queue(), state) { break; } } true } /// Releases a permit. /// /// Returns `true` if a permit was successfully released. /// /// # Examples /// /// ``` /// use saa::Semaphore; /// use std::sync::atomic::Ordering::Relaxed; /// /// let semaphore = Semaphore::default(); /// /// assert!(semaphore.try_acquire_many(11)); /// assert_eq!(semaphore.available_permits(Relaxed), Semaphore::MAX_PERMITS - 11); /// /// assert!(semaphore.release()); /// assert_eq!(semaphore.available_permits(Relaxed), Semaphore::MAX_PERMITS - 10); /// ``` #[inline] pub fn release(&self) -> bool { match self.state.compare_exchange(1, 0, Release, Relaxed) { Ok(_) => true, Err(state) => self.release_loop(state, Opcode::Semaphore(1)), } } /// Releases permits. /// /// Returns `true` if the specified number of permits were successfully released. /// /// # Examples /// /// ``` /// use saa::Semaphore; /// use std::sync::atomic::Ordering::Relaxed; /// /// let semaphore = Semaphore::default(); /// /// assert!(semaphore.try_acquire_many(11)); /// assert_eq!(semaphore.available_permits(Relaxed), Semaphore::MAX_PERMITS - 11); /// /// assert!(semaphore.release_many(10)); /// assert_eq!(semaphore.available_permits(Relaxed), Semaphore::MAX_PERMITS - 1); /// ``` #[inline] pub fn release_many(&self, count: usize) -> bool { let Ok(count) = u8::try_from(count) else { return false; }; match self .state .compare_exchange(count as usize, 0, Release, Relaxed) { Ok(_) => true, Err(state) => self.release_loop(state, Opcode::Semaphore(count)), } } /// Tries to acquire a permit. #[inline] fn try_acquire_internal(&self, count: u8) -> (bool, usize) { let mut state = self.state.load(Acquire); loop { if state & WaitQueue::ADDR_MASK != 0 || (state & WaitQueue::DATA_MASK) + usize::from(count) > Self::MAX_PERMITS { // There is a waiting thread, or the semaphore can no longer be shared. return (false, state); } match self .state .compare_exchange(state, state + usize::from(count), Acquire, Acquire) { Ok(_) => return (true, 0), Err(new_state) => state = new_state, } } } } impl fmt::Debug for Semaphore { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let state = self.state.load(Relaxed); let available_permits = Self::MAX_PERMITS - (state & WaitQueue::DATA_MASK); let wait_queue_being_processed = state & WaitQueue::LOCKED_FLAG == WaitQueue::LOCKED_FLAG; let wait_queue_tail_addr = state & WaitQueue::ADDR_MASK; f.debug_struct("WaitQueue") .field("state", &state) .field("available_permits", &available_permits) .field("wait_queue_being_processed", &wait_queue_being_processed) .field("wait_queue_tail_addr", &wait_queue_tail_addr) .finish() } } impl SyncPrimitive for Semaphore { #[inline] fn state(&self) -> &AtomicUsize { &self.state } #[inline] fn max_shared_owners() -> usize { Self::MAX_PERMITS } #[inline] fn drop_wait_queue_entry(entry: &Entry) { Self::force_remove_wait_queue_entry(entry); } } impl SyncResult for Semaphore { type Result = Result<(), pager::Error>; #[inline] fn to_result(_: u8, pager_error: Option) -> Self::Result { pager_error.map_or_else(|| Ok(()), Err) } } saa-5.4.2/src/sync_primitive.rs000064400000000000000000000313671046102023000146130ustar 00000000000000//! Define base operations for synchronization primitives. use std::pin::{Pin, pin}; use std::ptr::{addr_of, null, with_exposed_provenance}; #[cfg(not(feature = "loom"))] use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; #[cfg(not(feature = "loom"))] use std::thread::yield_now; #[cfg(feature = "loom")] use loom::sync::atomic::AtomicUsize; #[cfg(feature = "loom")] use loom::thread::yield_now; use crate::opcode::Opcode; use crate::wait_queue::{Entry, WaitQueue}; /// Defines base operations for synchronization primitives. pub(crate) trait SyncPrimitive: Sized { /// Returns a reference to the state. fn state(&self) -> &AtomicUsize; /// Returns the maximum number of shared owners. fn max_shared_owners() -> usize; /// Called when an enqueued wait queue entry is being dropped without acknowledging the result. fn drop_wait_queue_entry(entry: &Entry); /// Converts a reference to `Self` into a memory address. #[inline] fn addr(&self) -> usize { let self_ptr: *const Self = addr_of!(*self); self_ptr.expose_provenance() } /// Tries to push a wait queue entry into the wait queue. #[must_use] fn try_push_wait_queue_entry(&self, wait_queue: Pin<&WaitQueue>, state: usize) -> bool { let anchor_ptr = wait_queue.anchor_ptr().0; let anchor_addr = anchor_ptr.expose_provenance(); debug_assert_eq!(anchor_addr & (!WaitQueue::ADDR_MASK), 0); let tail_anchor_ptr = WaitQueue::to_anchor_ptr(state); wait_queue .entry() .update_next_entry_anchor_ptr(tail_anchor_ptr); // The anchor pointer, instead of an entry pointer, is stored in the state. let next_state = (state & (!WaitQueue::ADDR_MASK)) | anchor_addr; if self .state() .compare_exchange(state, next_state, AcqRel, Acquire) .is_ok() { // The entry cannot be dropped until the result is acknowledged. wait_queue.entry().set_pollable(); true } else { false } } /// Waits for the desired resource synchronously. fn wait_resources_sync( &self, state: usize, opcode: Opcode, begin_wait: F, ) -> Result { debug_assert!(state & WaitQueue::ADDR_MASK != 0 || state & WaitQueue::DATA_MASK != 0); let pinned_wait_queue = pin!(WaitQueue::default()); pinned_wait_queue.as_ref().construct(self, opcode, true); if self.try_push_wait_queue_entry(pinned_wait_queue.as_ref(), state) { begin_wait(); Ok(pinned_wait_queue.entry().poll_result_sync()) } else { Err(begin_wait) } } /// Releases the resource represented by the supplied operation mode. /// /// Returns `false` if the resource cannot be released. fn release_loop(&self, mut state: usize, opcode: Opcode) -> bool { while opcode.can_release(state) { if state & WaitQueue::ADDR_MASK == 0 || state & WaitQueue::LOCKED_FLAG == WaitQueue::LOCKED_FLAG { // Release the resource in-place. match self.state().compare_exchange( state, state - opcode.acquired_count(), Release, Relaxed, ) { Ok(_) => return true, Err(new_state) => state = new_state, } } else { // The wait queue is not empty and is not being processed. let next_state = (state | WaitQueue::LOCKED_FLAG) - opcode.acquired_count(); if let Err(new_state) = self .state() .compare_exchange(state, next_state, AcqRel, Relaxed) { state = new_state; continue; } self.process_wait_queue(next_state); return true; } } false } /// Processes the wait queue. /// /// The tail entry of the wait queue is either reset or stays the same. fn process_wait_queue(&self, mut state: usize) { let mut head_entry_ptr: *const Entry = null(); let mut unlocked = false; while !unlocked { debug_assert_eq!(state & WaitQueue::LOCKED_FLAG, WaitQueue::LOCKED_FLAG); let anchor_ptr = WaitQueue::to_anchor_ptr(state); let tail_entry_ptr = WaitQueue::to_entry_ptr(anchor_ptr); if head_entry_ptr.is_null() { Entry::iter_forward(tail_entry_ptr, true, |entry, next_entry| { head_entry_ptr = Entry::ref_to_ptr(entry); next_entry.is_none() }); } else { Entry::set_prev_ptr(tail_entry_ptr); } let data = state & WaitQueue::DATA_MASK; let mut transferred = 0; let mut resolved_entry_ptr: *const Entry = null(); let mut reset_failed = false; Entry::iter_backward(head_entry_ptr, |entry, prev_entry| { let desired = entry.opcode().desired_count(); if data + transferred == 0 || data + transferred + desired <= Self::max_shared_owners() { // The entry can inherit ownership. let acquired = entry.opcode().acquired_count(); debug_assert!(acquired <= desired); if prev_entry.is_some() { transferred += acquired; resolved_entry_ptr = Entry::ref_to_ptr(entry); false } else { // This is the tail of the wait queue: try to reset. debug_assert_eq!(tail_entry_ptr, addr_of!(*entry)); if self .state() .compare_exchange(state, data + transferred + acquired, AcqRel, Acquire) .is_err() { // This entry will be processed on the next retry. entry.update_next_entry_anchor_ptr(null()); head_entry_ptr = Entry::ref_to_ptr(entry); reset_failed = true; return true; } // The wait queue was reset. unlocked = true; resolved_entry_ptr = Entry::ref_to_ptr(entry); true } } else { // Unlink those that have succeeded in acquiring shared ownership. entry.update_next_entry_anchor_ptr(null()); head_entry_ptr = Entry::ref_to_ptr(entry); true } }); debug_assert!(!reset_failed || !unlocked); if !reset_failed && !unlocked { unlocked = self .state() .fetch_update(AcqRel, Acquire, |new_state| { let new_data = new_state & WaitQueue::DATA_MASK; debug_assert!(new_data <= data); debug_assert!(new_data + transferred <= WaitQueue::DATA_MASK); if new_data == data { Some((new_state & WaitQueue::ADDR_MASK) | (new_data + transferred)) } else { None } }) .is_ok(); } if !unlocked { state = self.state().fetch_add(transferred, AcqRel) + transferred; } Entry::iter_forward(resolved_entry_ptr, false, |entry, _next_entry| { entry.set_result(0); false }); } } /// Removes a wait queue entry from the wait queue. fn remove_wait_queue_entry( &self, mut state: usize, entry_ptr_to_remove: *const Entry, ) -> (usize, bool) { let mut result = Ok((state, false)); loop { debug_assert_eq!(state & WaitQueue::LOCKED_FLAG, WaitQueue::LOCKED_FLAG); debug_assert_ne!(state & WaitQueue::ADDR_MASK, 0); let anchor_ptr = WaitQueue::to_anchor_ptr(state); let tail_entry_ptr = WaitQueue::to_entry_ptr(anchor_ptr); Entry::iter_forward(tail_entry_ptr, true, |entry, next_entry| { if Entry::ref_to_ptr(entry) == entry_ptr_to_remove { // Found the entry to remove. let prev_entry_ptr = entry.prev_entry_ptr(); if let Some(next_entry) = next_entry { next_entry.update_prev_entry_ptr(prev_entry_ptr); } result = if let Some(prev_entry) = unsafe { prev_entry_ptr.as_ref() } { // Successfully unlinked the target entry without updating the state. prev_entry.update_next_entry_anchor_ptr(entry.next_entry_anchor_ptr()); Ok((state, true)) } else if let Some(next_entry) = next_entry { // The next entry becomes the new tail of the wait queue. let next_entry_addr = Entry::ref_to_ptr(next_entry).expose_provenance(); let next_entry_ptr = with_exposed_provenance(next_entry_addr); let new_tail_ptr = Entry::to_wait_queue_ptr(next_entry_ptr); let new_anchor_ptr = unsafe { (*new_tail_ptr).anchor_ptr().0 }; debug_assert_eq!(new_anchor_ptr.addr() & (!WaitQueue::ADDR_MASK), 0); let next_state = (state & (!WaitQueue::ADDR_MASK)) | new_anchor_ptr.expose_provenance(); debug_assert_eq!( next_state & WaitQueue::LOCKED_FLAG, WaitQueue::LOCKED_FLAG ); self.state() .compare_exchange(state, next_state, AcqRel, Acquire) .map(|_| (next_state, true)) } else { // Reset the wait queue and unlock. let next_state = state & WaitQueue::DATA_MASK; self.state() .compare_exchange(state, next_state, AcqRel, Acquire) .map(|_| (next_state, true)) }; true } else { false } }); match result { Ok((state, removed)) => return (state, removed), Err(new_state) => state = new_state, } } } /// Removes a [`WaitQueue`] entry that was pushed into the wait queue but has not been /// processed. fn force_remove_wait_queue_entry(entry: &Entry) { let this: &Self = entry.sync_primitive_ref(); let this_ptr: *const Entry = addr_of!(*entry); // Remove the wait queue entry from the wait queue list. let mut state = this.state().load(Acquire); let mut need_completion = false; loop { if state & WaitQueue::LOCKED_FLAG == WaitQueue::LOCKED_FLAG { // Another thread is processing the wait queue. yield_now(); state = this.state().load(Acquire); } else if state & WaitQueue::ADDR_MASK == 0 { // The wait queue is empty. need_completion = true; break; } else if let Err(new_state) = this.state().compare_exchange( state, state | WaitQueue::LOCKED_FLAG, AcqRel, Acquire, ) { state = new_state; } else { let (new_state, removed) = this.remove_wait_queue_entry(state | WaitQueue::LOCKED_FLAG, this_ptr); if new_state & WaitQueue::LOCKED_FLAG == WaitQueue::LOCKED_FLAG { // We need to process the wait queue if it is still locked. this.process_wait_queue(new_state); } if !removed { need_completion = true; } break; } } if need_completion { // The entry was removed by another thread, so it will be completed. while !entry.result_finalized() { yield_now(); } this.release_loop(state, entry.opcode()); } } } saa-5.4.2/src/tests/models.rs000064400000000000000000000136651046102023000141750ustar 00000000000000use std::pin::pin; use std::sync::Arc; use std::sync::atomic::Ordering::Relaxed; use loom::sync::atomic::AtomicBool; use loom::thread::{spawn, yield_now}; use crate::gate; use crate::{Barrier, Gate, Lock, Pager, Semaphore}; #[test] fn lock_shared() { loom::model(|| { let lock = Arc::new(Lock::default()); let check = Arc::new(AtomicBool::new(false)); lock.lock_sync(); let lock_clone = lock.clone(); let check_clone = check.clone(); let thread_1 = spawn(move || { assert!(lock_clone.share_sync()); assert!(check_clone.load(Relaxed)); }); let lock_clone = lock.clone(); let check_clone = check.clone(); let thread_2 = spawn(move || { assert!(lock_clone.share_sync()); assert!(check_clone.load(Relaxed)); }); check.store(true, Relaxed); assert!(lock.release_lock()); assert!(thread_1.join().is_ok()); assert!(thread_2.join().is_ok()); assert!(lock.release_share()); assert!(lock.release_share()); }); } #[test] fn lock_exclusive() { loom::model(|| { let lock = Arc::new(Lock::default()); let check = Arc::new(AtomicBool::new(false)); lock.lock_sync(); let lock_clone = lock.clone(); let check_clone = check.clone(); let thread_1 = spawn(move || { assert!(lock_clone.lock_sync()); assert!(check_clone.load(Relaxed)); assert!(lock_clone.release_lock()); }); let lock_clone = lock.clone(); let check_clone = check.clone(); let thread_2 = spawn(move || { assert!(lock_clone.lock_sync()); assert!(check_clone.load(Relaxed)); assert!(lock_clone.release_lock()); }); check.store(true, Relaxed); assert!(lock.release_lock()); assert!(thread_1.join().is_ok()); assert!(thread_2.join().is_ok()); }); } #[test] fn share_poison() { loom::model(|| { let lock = Arc::new(Lock::default()); lock.lock_sync(); let lock_clone = lock.clone(); let thread_1 = spawn(move || { if !lock_clone.share_sync() { assert!(lock_clone.is_poisoned(Relaxed)); } }); let lock_clone = lock.clone(); let thread_2 = spawn(move || { if !lock_clone.share_sync() { assert!(lock_clone.is_poisoned(Relaxed)); } }); assert!(lock.poison_lock()); assert!(thread_1.join().is_ok()); assert!(thread_2.join().is_ok()); }); } #[test] fn lock_poison() { loom::model(|| { let lock = Arc::new(Lock::default()); lock.lock_sync(); let lock_clone = lock.clone(); let thread_1 = spawn(move || { assert!(lock_clone.lock_sync()); assert!(lock_clone.poison_lock()); }); let lock_clone = lock.clone(); let thread_2 = spawn(move || { if lock_clone.lock_sync() { assert!(lock_clone.release_lock()); } else { assert!(lock_clone.is_poisoned(Relaxed)); } }); assert!(lock.release_lock()); assert!(thread_1.join().is_ok()); assert!(thread_2.join().is_ok()); assert!(lock.is_poisoned(Relaxed)); }); } #[test] fn barrier() { loom::model(|| { let barrier = Arc::new(Barrier::with_count(2)); let check = Arc::new(AtomicBool::new(false)); let barrier_clone = barrier.clone(); let check_clone = check.clone(); let thread = spawn(move || { if barrier_clone.wait_sync() { assert!(!check_clone.swap(true, Relaxed)); } barrier_clone.wait_sync(); }); if barrier.wait_sync() { assert!(!check.swap(true, Relaxed)); } barrier.wait_sync(); assert!(thread.join().is_ok()); assert!(check.load(Relaxed)); }); } #[test] fn semaphore_release_acquire() { loom::model(|| { let semaphore = Arc::new(Semaphore::default()); let check = Arc::new(AtomicBool::new(false)); semaphore.acquire_many_sync(Semaphore::MAX_PERMITS); let semaphore_clone = semaphore.clone(); let check_clone = check.clone(); let thread = spawn(move || { semaphore_clone.acquire_many_sync(9); assert!(check_clone.load(Relaxed)); }); check.store(true, Relaxed); assert!(semaphore.release_many(8)); assert!(semaphore.release()); assert!(thread.join().is_ok()); }); } #[test] fn gate_enter() { loom::model(|| { let gate = Arc::new(Gate::default()); let gate_clone = gate.clone(); let thread = spawn(move || { assert_eq!(gate_clone.enter_sync(), Ok(gate::State::Controlled)); }); loop { if gate.permit() == Ok(1) { break; } yield_now(); } assert!(thread.join().is_ok()); }); } #[test] fn gate_seal() { loom::model(|| { let gate = Arc::new(Gate::default()); let gate_clone = gate.clone(); let thread = spawn(move || { assert_eq!(gate_clone.enter_sync(), Err(gate::Error::Sealed)); }); gate.seal(); assert!(thread.join().is_ok()); }); } #[test] fn drop_future() { loom::model(|| { let semaphore = Arc::new(Semaphore::default()); semaphore.acquire_many_sync(Semaphore::MAX_PERMITS); let semaphore_clone = semaphore.clone(); let thread = spawn(move || { semaphore_clone.acquire_many_sync(9); }); { let mut pinned_pager = pin!(Pager::default()); assert!(semaphore.register_pager(&mut pinned_pager, 11, false)); } assert!(semaphore.release_many(Semaphore::MAX_PERMITS)); assert!(thread.join().is_ok()); }); } saa-5.4.2/src/tests/unit_tests.rs000064400000000000000000001147161046102023000151120ustar 00000000000000use std::pin::pin; use std::sync::Arc; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::{Relaxed, Release}; use std::thread; use std::time::Duration; use crate::{Barrier, Gate, Lock, Pager, Semaphore, gate, lock}; #[test] fn future_size() { let limit = 168; let limit_relaxed = 208; let lock = Lock::default(); let lock_fut_size = size_of_val(&lock.lock_async()); assert!(lock_fut_size <= limit, "{lock_fut_size}"); let lock_with_fut_size = size_of_val(&lock.lock_async_with(|| {})); assert!(lock_with_fut_size <= limit, "{lock_with_fut_size}"); let share_fut_size = size_of_val(&lock.share_async()); assert!(share_fut_size <= limit, "{share_fut_size}"); let share_with_fut_size = size_of_val(&lock.share_async_with(|| {})); assert!(share_with_fut_size <= limit, "{share_with_fut_size}"); let barrier = Barrier::default(); let barrier_fut_size = size_of_val(&barrier.wait_async()); assert!(barrier_fut_size <= limit_relaxed, "{barrier_fut_size}"); let barrier_with_fut_size = size_of_val(&barrier.wait_async_with(|| {})); assert!( barrier_with_fut_size <= limit_relaxed, "{barrier_with_fut_size}" ); let semaphore = Semaphore::default(); let acquire_fut_size = size_of_val(&semaphore.acquire_async()); assert!(acquire_fut_size <= limit_relaxed, "{acquire_fut_size}"); let acquire_with_fut_size = size_of_val(&semaphore.acquire_async_with(|| {})); assert!( acquire_with_fut_size <= limit_relaxed, "{acquire_with_fut_size}" ); let acquire_many_fut_size = size_of_val(&semaphore.acquire_many_async(1)); assert!( acquire_many_fut_size <= limit_relaxed, "{acquire_many_fut_size}" ); let acquire_many_with_fut_size = size_of_val(&semaphore.acquire_many_async_with(1, || {})); assert!( acquire_many_with_fut_size <= limit_relaxed, "{acquire_many_with_fut_size}" ); let gate = Gate::default(); let enter_fut_size = size_of_val(&gate.enter_async()); assert!(enter_fut_size <= limit_relaxed, "{enter_fut_size}"); let enter_with_fut_size = size_of_val(&gate.enter_async_with(|| {})); assert!( enter_with_fut_size <= limit_relaxed, "{enter_with_fut_size}" ); } #[cfg_attr(miri, ignore = "Tokio is not compatible with Miri")] #[tokio::test(flavor = "multi_thread", worker_threads = 16)] async fn lock_async() { let num_tasks = 64; let check = Arc::new(AtomicUsize::new(0)); let lock = Arc::new(Lock::default()); lock.lock_async().await; check.fetch_add(usize::MAX, Relaxed); let mut tasks = Vec::new(); for i in 0..num_tasks { let check = check.clone(); let lock = lock.clone(); tasks.push(tokio::spawn(async move { if i % 8 == 0 { assert!(lock.share_sync()); } else { assert!(lock.share_async().await); } assert_ne!(check.fetch_add(1, Relaxed), usize::MAX); check.fetch_sub(1, Relaxed); assert!(lock.release_share()); assert!(lock.share_async().await); assert!(lock.release_share()); })); } tokio::time::sleep(Duration::from_millis(50)).await; check.fetch_sub(usize::MAX, Relaxed); assert!(lock.release_lock()); for task in tasks { task.await.unwrap(); } assert_eq!(check.load(Relaxed), 0); lock.lock_async().await; assert!(lock.release_lock()); } #[test] fn lock_sync() { let num_threads = if cfg!(miri) { 4 } else { Lock::MAX_SHARED_OWNERS }; let num_iters = if cfg!(miri) { 32 } else { 256 }; let check = Arc::new(AtomicUsize::new(0)); let lock = Arc::new(Lock::default()); lock.lock_sync(); check.fetch_add(usize::MAX, Relaxed); let mut threads = Vec::new(); for _ in 0..num_threads { let check = check.clone(); let lock = lock.clone(); threads.push(thread::spawn(move || { for j in 0..num_iters { if j % 11 == 0 { assert!(lock.lock_sync()); assert_eq!(check.fetch_add(usize::MAX, Relaxed), 0); thread::sleep(Duration::from_micros(1)); check.fetch_sub(usize::MAX, Relaxed); assert!(lock.release_lock()); } else { assert!(lock.share_sync()); assert!(check.fetch_add(1, Relaxed) < Lock::MAX_SHARED_OWNERS); thread::sleep(Duration::from_micros(1)); check.fetch_sub(1, Relaxed); assert!(lock.release_share()); } } })); } thread::sleep(Duration::from_micros(1)); check.fetch_sub(usize::MAX, Relaxed); assert!(lock.release_lock()); for thread in threads { thread.join().unwrap(); } assert_eq!(check.load(Relaxed), 0); } #[test] fn lock_wait_queue() { let num_threads = 4; let mut semaphores = Vec::new(); let check = Arc::new(AtomicUsize::new(0)); let lock = Arc::new(Lock::default()); lock.lock_sync(); semaphores.resize_with(num_threads + 1, || Semaphore::with_permits(0)); let mut threads = Vec::new(); let semaphores = Arc::new(semaphores); for i in 0..num_threads { let semaphores = semaphores.clone(); let check = check.clone(); let lock = lock.clone(); threads.push(thread::spawn(move || { semaphores[i].acquire_sync(); lock.lock_sync_with(|| { semaphores[i + 1].release(); }); assert_eq!(check.fetch_add(1, Relaxed), i); lock.release_lock(); })); } semaphores[0].release(); semaphores[num_threads].acquire_sync(); assert!(lock.release_lock()); for thread in threads { thread.join().unwrap(); } } #[test] fn lock_sync_wait_callback() { let num_threads = if cfg!(miri) { 4 } else { Lock::MAX_SHARED_OWNERS }; let barrier = Arc::new(Barrier::with_count(num_threads + 1)); let lock = Arc::new(Lock::default()); lock.lock_sync(); let mut threads = Vec::new(); for i in 0..num_threads { let barrier = barrier.clone(); let lock = lock.clone(); threads.push(thread::spawn(move || { let result = if i % 7 == 3 { lock.lock_sync_with(|| { barrier.wait_sync(); }); lock.release_lock() } else { lock.share_sync_with(|| { barrier.wait_sync(); }); lock.release_share() }; assert!(result); })); } barrier.wait_sync(); assert!(lock.release_lock()); for thread in threads { thread.join().unwrap(); } } #[cfg_attr(miri, ignore = "Tokio is not compatible with Miri")] #[tokio::test(flavor = "multi_thread", worker_threads = 16)] async fn lock_async_wait_callback() { let num_tasks = 8; let barrier = Arc::new(AtomicUsize::new(num_tasks)); let lock = Arc::new(Lock::default()); lock.lock_sync(); let mut tasks = Vec::new(); for i in 0..num_tasks { let barrier = barrier.clone(); let lock = lock.clone(); tasks.push(tokio::task::spawn(async move { let result = if i % 7 == 3 { lock.lock_async_with(|| { barrier.fetch_sub(1, Relaxed); }) .await; lock.release_lock() } else { lock.share_async_with(|| { barrier.fetch_sub(1, Relaxed); }) .await; lock.release_share() }; assert!(result); })); } while barrier.load(Relaxed) != 0 { tokio::task::yield_now().await; } assert!(lock.release_lock()); for task in tasks { task.await.unwrap(); } } #[test] fn lock_poison_sync() { let num_threads = if cfg!(miri) { 4 } else { Lock::MAX_SHARED_OWNERS }; let num_iters = if cfg!(miri) { 4 } else { 64 }; let lock = Arc::new(Lock::default()); lock.lock_sync(); let mut threads = Vec::new(); for _ in 0..num_threads { let lock = lock.clone(); threads.push(thread::spawn(move || { for j in 0..num_iters { if j % 11 == 0 { if !lock.lock_sync() { assert!(lock.is_poisoned(Relaxed)); let mut waited = false; assert!(!lock.share_sync_with(|| waited = true)); assert!(!waited); } } else if !lock.share_sync() { assert!(lock.is_poisoned(Relaxed)); let mut waited = false; assert!(!lock.lock_sync_with(|| waited = true)); assert!(!waited); } } })); } assert!(lock.poison_lock()); for thread in threads { thread.join().unwrap(); } assert!(lock.clear_poison()); assert!(lock.lock_sync()); assert!(lock.release_lock()); } #[cfg_attr(miri, ignore = "Tokio is not compatible with Miri")] #[tokio::test(flavor = "multi_thread", worker_threads = 16)] async fn lock_poison_async() { let num_tasks = 8; let num_iters = 64; let lock = Arc::new(Lock::default()); lock.lock_sync(); let mut tasks = Vec::new(); for _ in 0..num_tasks { let lock = lock.clone(); tasks.push(tokio::task::spawn(async move { for j in 0..num_iters { if j % 11 == 0 { if !lock.lock_async().await { assert!(lock.is_poisoned(Relaxed)); let mut waited = false; assert!(!lock.share_async_with(|| waited = true).await); assert!(!waited); } } else if !lock.share_async().await { assert!(lock.is_poisoned(Relaxed)); let mut waited = false; assert!(!lock.lock_async_with(|| waited = true).await); assert!(!waited); } } })); } assert!(lock.poison_lock()); for task in tasks { task.await.unwrap(); } assert!(lock.clear_poison()); assert!(lock.lock_sync()); assert!(lock.release_lock()); } #[test] fn lock_poison_wait_sync() { let num_threads = if cfg!(miri) { 4 } else { Lock::MAX_SHARED_OWNERS }; let barrier = Arc::new(Barrier::with_count(num_threads + 1)); let lock = Arc::new(Lock::default()); lock.lock_sync(); let mut threads = Vec::new(); for i in 0..num_threads { let barrier = barrier.clone(); let lock = lock.clone(); threads.push(thread::spawn(move || { if i % 2 == 0 { assert!(!lock.lock_sync_with(|| { barrier.wait_sync(); })); } else { assert!(!lock.share_sync_with(|| { barrier.wait_sync(); })); } })); } barrier.wait_sync(); assert!(lock.poison_lock()); for thread in threads { thread.join().unwrap(); } assert!(lock.clear_poison()); assert!(lock.lock_sync()); assert!(lock.release_lock()); } #[cfg_attr(miri, ignore = "Tokio is not compatible with Miri")] #[tokio::test(flavor = "multi_thread", worker_threads = 16)] async fn lock_poison_wait_async() { let num_tasks = 8; let barrier = Arc::new(AtomicUsize::new(num_tasks)); let lock = Arc::new(Lock::default()); lock.lock_sync(); let mut tasks = Vec::new(); for i in 0..num_tasks { let barrier = barrier.clone(); let lock = lock.clone(); tasks.push(tokio::task::spawn(async move { if i % 2 == 0 { assert!( !lock .lock_async_with(|| { barrier.fetch_sub(1, Relaxed); }) .await ); } else { assert!( !lock .share_async_with(|| { barrier.fetch_sub(1, Relaxed); }) .await ); } })); } while barrier.load(Relaxed) != 0 { tokio::task::yield_now().await; } assert!(lock.poison_lock()); for task in tasks { task.await.unwrap(); } assert!(lock.clear_poison()); assert!(lock.lock_sync()); assert!(lock.release_lock()); } #[cfg_attr(miri, ignore = "Tokio is not compatible with Miri")] #[tokio::test(flavor = "multi_thread", worker_threads = 16)] async fn lock_pager() { let num_tasks = 8; let num_iters = 64; let check = Arc::new(AtomicUsize::new(0)); let lock = Arc::new(Lock::default()); let mut threads = Vec::new(); let mut tasks = Vec::new(); for i in 0..num_tasks { let check = check.clone(); let lock = lock.clone(); if i % 2 == 0 { tasks.push(tokio::spawn(async move { let mut pinned_pager = pin!(Pager::default()); for _ in 0..num_iters { lock.register_pager(&mut pinned_pager, lock::Mode::Shared, false); match pinned_pager.poll_async().await { Ok(true) => { check.fetch_add(1, Relaxed); lock.release_share(); } _ => unreachable!(), } } })); } else { threads.push(thread::spawn(move || { let mut pinned_pager = pin!(Pager::default()); for _ in 0..num_iters { lock.register_pager(&mut pinned_pager, lock::Mode::Exclusive, true); match pinned_pager.poll_sync() { Ok(true) => { check.fetch_add(1, Relaxed); lock.release_lock(); } _ => unreachable!(), } } })); } } for thread in threads { thread.join().unwrap(); } for task in tasks { task.await.unwrap(); } assert_eq!(check.load(Relaxed), num_iters * num_tasks); } #[test] fn lock_pager_wait() { let lock = Lock::default(); assert!(lock.try_lock()); let mut pinned_pager_1 = pin!(Pager::default()); let mut pinned_pager_2 = pin!(Pager::default()); let mut pinned_pager_3 = pin!(Pager::default()); assert!(lock.register_pager(&mut pinned_pager_1, lock::Mode::Shared, false)); assert!(lock.register_pager(&mut pinned_pager_2, lock::Mode::WaitExclusive, false)); assert!(lock.register_pager(&mut pinned_pager_3, lock::Mode::Exclusive, false)); assert!(lock.release_lock()); assert_eq!(pinned_pager_1.try_poll(), Ok(true)); assert!(pinned_pager_2.try_poll().is_err()); assert!(pinned_pager_3.try_poll().is_err()); assert!(lock.release_share()); assert_eq!(pinned_pager_2.try_poll(), Ok(true)); assert_eq!(pinned_pager_3.try_poll(), Ok(true)); assert!(lock.register_pager(&mut pinned_pager_2, lock::Mode::WaitShared, false)); assert!(pinned_pager_2.try_poll().is_err()); assert!(lock.poison_lock()); assert_eq!(pinned_pager_2.try_poll(), Ok(false)); } #[cfg_attr(miri, ignore = "Tokio is not compatible with Miri")] #[tokio::test(flavor = "multi_thread", worker_threads = 16)] async fn lock_pager_wait_parallel() { let num_tasks = 8; let num_iters = 64; let lock = Arc::new(Lock::default()); let mut threads = Vec::new(); let mut tasks = Vec::new(); for i in 0..num_tasks { let lock = lock.clone(); if i % 4 == 0 { tasks.push(tokio::spawn(async move { let mut pinned_pager = pin!(Pager::default()); for _ in 0..num_iters { let mode = if num_iters % 7 == 0 { lock::Mode::Exclusive } else { lock::Mode::Shared }; lock.register_pager(&mut pinned_pager, mode, false); match pinned_pager.poll_async().await { Ok(true) => { if mode == lock::Mode::Exclusive { lock.release_lock(); } else { lock.release_share(); } } _ => unreachable!(), } } })); } else { threads.push(thread::spawn(move || { let mut pinned_pager = pin!(Pager::default()); for _ in 0..num_iters { lock.register_pager(&mut pinned_pager, lock::Mode::WaitShared, true); assert!(pinned_pager.poll_sync().is_ok()); } })); } } for thread in threads { thread.join().unwrap(); } for task in tasks { task.await.unwrap(); } } #[test] fn barrier_sync() { let num_threads = if cfg!(miri) { 4 } else { Barrier::MAX_TASKS }; let num_iters = if cfg!(miri) { 2 } else { 8 }; let barrier = Arc::new(Barrier::with_count(num_threads)); let mut threads = Vec::new(); for _ in 0..num_threads { let barrier = barrier.clone(); threads.push(thread::spawn(move || { for _ in 0..num_iters { debug_assert!(barrier.count(Relaxed) <= num_threads); barrier.wait_sync(); } debug_assert!(barrier.count(Relaxed) <= num_threads); })); } for thread in threads { thread.join().unwrap(); } assert_eq!(barrier.count(Relaxed), num_threads); } #[cfg_attr(miri, ignore = "Tokio is not compatible with Miri")] #[tokio::test(flavor = "multi_thread", worker_threads = 16)] async fn semaphore_async() { let num_tasks = 64; let check = Arc::new(AtomicUsize::new(0)); let semaphore = Arc::new(Semaphore::default()); assert!(semaphore.acquire_many_sync(Semaphore::MAX_PERMITS)); check.fetch_add(Semaphore::MAX_PERMITS, Relaxed); let mut tasks = Vec::new(); for i in 0..num_tasks { let check = check.clone(); let semaphore = semaphore.clone(); tasks.push(tokio::spawn(async move { if i % 8 == 0 { semaphore.acquire_sync(); } else { semaphore.acquire_async().await; } assert!(check.fetch_add(1, Relaxed) < Semaphore::MAX_PERMITS); check.fetch_sub(1, Relaxed); assert!(semaphore.release()); })); } tokio::time::sleep(Duration::from_millis(25)).await; check.fetch_sub(Semaphore::MAX_PERMITS - 11, Relaxed); assert!(semaphore.release_many(Semaphore::MAX_PERMITS - 11)); tokio::time::sleep(Duration::from_millis(25)).await; check.fetch_sub(11, Relaxed); assert!(semaphore.release_many(11)); for task in tasks { task.await.unwrap(); } assert_eq!(check.load(Relaxed), 0); assert!(semaphore.acquire_many_async(Semaphore::MAX_PERMITS).await); assert!(semaphore.release_many(Semaphore::MAX_PERMITS)); } #[test] fn semaphore_sync() { let num_threads = if cfg!(miri) { 4 } else { Semaphore::MAX_PERMITS }; let num_iters = if cfg!(miri) { 32 } else { 256 }; let check = Arc::new(AtomicUsize::new(0)); let semaphore = Arc::new(Semaphore::default()); assert!(semaphore.acquire_many_sync(Semaphore::MAX_PERMITS)); check.fetch_add(Semaphore::MAX_PERMITS, Relaxed); let mut threads = Vec::new(); for i in 0..num_threads { let check = check.clone(); let semaphore = semaphore.clone(); threads.push(thread::spawn(move || { for _ in 0..num_iters { assert!(semaphore.acquire_many_sync(i + 1)); assert!(check.fetch_add(i + 1, Relaxed) + i < Semaphore::MAX_PERMITS); thread::sleep(Duration::from_micros(1)); check.fetch_sub(i + 1, Relaxed); assert!(semaphore.release_many(i + 1)); } })); } thread::sleep(Duration::from_micros(1)); check.fetch_sub(Semaphore::MAX_PERMITS, Relaxed); assert!(semaphore.release_many(Semaphore::MAX_PERMITS)); for thread in threads { thread.join().unwrap(); } assert_eq!(check.load(Relaxed), 0); } #[test] fn semaphore_sync_wait_callback() { let num_threads = if cfg!(miri) { 4 } else { Semaphore::MAX_PERMITS }; let barrier = Arc::new(Barrier::with_count(num_threads)); let semaphore = Arc::new(Semaphore::default()); assert!(semaphore.acquire_many_sync(Semaphore::MAX_PERMITS)); let mut threads = Vec::new(); for i in 0..num_threads { let barrier = barrier.clone(); let semaphore = semaphore.clone(); threads.push(thread::spawn(move || { let result = if i % 7 == 3 { semaphore.acquire_sync_with(|| { if i != 0 { barrier.wait_sync(); } }); semaphore.release() } else { semaphore.acquire_many_sync_with(3, || { if i != 0 { barrier.wait_sync(); } }); semaphore.release_many(3) }; assert!(result); })); } barrier.wait_sync(); assert!(semaphore.release_many(Semaphore::MAX_PERMITS)); for thread in threads { thread.join().unwrap(); } } #[cfg_attr(miri, ignore = "Tokio is not compatible with Miri")] #[tokio::test(flavor = "multi_thread", worker_threads = 16)] async fn semaphore_async_wait_callback() { let num_tasks = 8; let barrier = Arc::new(AtomicUsize::new(num_tasks)); let semaphore = Arc::new(Semaphore::default()); assert!(semaphore.acquire_many_sync(Semaphore::MAX_PERMITS)); let mut tasks = Vec::new(); for i in 0..num_tasks { let barrier = barrier.clone(); let semaphore = semaphore.clone(); tasks.push(tokio::task::spawn(async move { let result = if i % 7 == 3 { semaphore .acquire_async_with(|| { barrier.fetch_sub(1, Relaxed); }) .await; semaphore.release() } else { semaphore .acquire_many_async_with(7, || { barrier.fetch_sub(1, Relaxed); }) .await; semaphore.release_many(7) }; assert!(result); })); } while barrier.load(Relaxed) != 0 { tokio::task::yield_now().await; } assert!(semaphore.release_many(Semaphore::MAX_PERMITS)); for task in tasks { task.await.unwrap(); } } #[cfg_attr(miri, ignore = "Tokio is not compatible with Miri")] #[tokio::test(flavor = "multi_thread", worker_threads = 16)] async fn semaphore_pager() { let num_tasks = 8; let num_iters = 64; let check = Arc::new(AtomicUsize::new(0)); let semaphore = Arc::new(Semaphore::default()); let mut threads = Vec::new(); let mut tasks = Vec::new(); for i in 0..num_tasks { let check = check.clone(); let semaphore = semaphore.clone(); if i % 2 == 0 { tasks.push(tokio::spawn(async move { let mut pinned_pager = pin!(Pager::default()); for _ in 0..num_iters { assert!(!semaphore.register_pager(&mut pinned_pager, usize::MAX, false)); semaphore.register_pager(&mut pinned_pager, Semaphore::MAX_PERMITS, false); match pinned_pager.poll_async().await { Ok(()) => { check.fetch_add(1, Relaxed); semaphore.release_many(Semaphore::MAX_PERMITS); } Err(e) => unreachable!("{e:?}"), } } })); } else { threads.push(thread::spawn(move || { let mut pinned_pager = pin!(Pager::default()); for _ in 0..num_iters { assert!(!semaphore.register_pager(&mut pinned_pager, usize::MAX, true)); semaphore.register_pager(&mut pinned_pager, Semaphore::MAX_PERMITS, true); match pinned_pager.poll_sync() { Ok(()) => { check.fetch_add(1, Relaxed); semaphore.release_many(Semaphore::MAX_PERMITS); } Err(e) => unreachable!("{e:?}"), } } })); } } for thread in threads { thread.join().unwrap(); } for task in tasks { task.await.unwrap(); } assert_eq!(check.load(Relaxed), num_iters * num_tasks); } #[cfg_attr(miri, ignore = "Tokio is not compatible with Miri")] #[tokio::test(flavor = "multi_thread", worker_threads = 16)] async fn gate_async() { let num_tasks = 64; let num_iters = 256; let gate = Arc::new(Gate::default()); let mut tasks = Vec::new(); for _ in 0..num_tasks { let gate = gate.clone(); tasks.push(tokio::spawn(async move { for _ in 0..num_iters { assert_eq!(gate.enter_async().await, Ok(gate::State::Controlled)); } })); } let mut granted = 0; while granted != num_iters * num_tasks { if let Ok(n) = gate.permit() { granted += n; } } assert_eq!(granted, num_iters * num_tasks); gate.seal(); assert_eq!(gate.enter_sync(), Err(gate::Error::Sealed)); gate.open(); assert_eq!(gate.enter_sync(), Ok(gate::State::Open)); for task in tasks { task.await.unwrap(); } } #[test] fn gate_sync() { let num_threads = if cfg!(miri) { 4 } else { 16 }; let num_iters = if cfg!(miri) { 32 } else { 256 }; let gate = Arc::new(Gate::default()); let mut threads = Vec::new(); for _ in 0..num_threads { let gate = gate.clone(); threads.push(thread::spawn(move || { for _ in 0..num_iters { assert_eq!(gate.enter_sync(), Ok(gate::State::Controlled)); } })); } let mut granted = 0; while granted != num_iters * num_threads { if let Ok(n) = gate.permit() { granted += n; } } assert_eq!(granted, num_iters * num_threads); gate.seal(); assert_eq!(gate.enter_sync(), Err(gate::Error::Sealed)); gate.open(); assert_eq!(gate.enter_sync(), Ok(gate::State::Open)); for thread in threads { thread.join().unwrap(); } } #[cfg_attr(miri, ignore = "Tokio is not compatible with Miri")] #[tokio::test(flavor = "multi_thread", worker_threads = 16)] async fn gate_pager() { let num_tasks = 8; let num_iters = 64; let check = Arc::new(AtomicUsize::new(0)); let granted = AtomicUsize::new(0); let gate = Arc::new(Gate::default()); let mut threads = Vec::new(); let mut tasks = Vec::new(); for i in 0..num_tasks { let check = check.clone(); let gate = gate.clone(); if i % 2 == 0 { tasks.push(tokio::spawn(async move { let mut pinned_pager = pin!(Pager::default()); for _ in 0..num_iters { gate.register_pager(&mut pinned_pager, false); match pinned_pager.poll_async().await { Ok(_) => { check.fetch_add(1, Relaxed); } Err(e) => unreachable!("{e:?}"), } } })); } else { threads.push(thread::spawn(move || { let mut pinned_pager = pin!(Pager::default()); for _ in 0..num_iters { gate.register_pager(&mut pinned_pager, true); match pinned_pager.poll_sync() { Ok(_) => { check.fetch_add(1, Relaxed); } Err(e) => unreachable!("{e:?}"), } } })); } } while granted.load(Relaxed) != num_iters * num_tasks { if let Ok(n) = gate.permit() { granted.fetch_add(n, Relaxed); } } for thread in threads { thread.join().unwrap(); } for task in tasks { task.await.unwrap(); } assert_eq!(check.load(Relaxed), granted.load(Relaxed)); } #[test] fn pager_drop_implicit() { let lock = Arc::new(Lock::default()); lock.lock_sync(); let mut threads = Vec::new(); for i in 0..2 { let lock = lock.clone(); threads.push(thread::spawn(move || { if i == 0 { lock.lock_sync(); assert!(lock.release_lock()); } else { lock.share_sync(); assert!(lock.release_share()); } })); } for is_sync in [false, true] { let mut pinned_pager = pin!(Pager::default()); lock.register_pager(&mut pinned_pager, lock::Mode::Exclusive, is_sync); } assert!(lock.release_lock()); for thread in threads { thread.join().unwrap(); } lock.lock_sync(); assert!(lock.release_lock()); } #[test] fn pager_drop_explicit() { let semaphore = Arc::new(Semaphore::default()); assert!(semaphore.acquire_many_sync(Semaphore::MAX_PERMITS)); let semaphore_clone = semaphore.clone(); let thread = thread::spawn(move || { assert!(semaphore_clone.acquire_many_sync(9)); }); { let mut pinned_pager = pin!(Pager::default()); assert!(semaphore.register_pager(&mut pinned_pager, 11, false)); } assert!(semaphore.release_many(Semaphore::MAX_PERMITS)); assert!(thread.join().is_ok()); } #[cfg_attr(miri, ignore = "Tokio is not compatible with Miri")] #[tokio::test(flavor = "multi_thread", worker_threads = 16)] async fn lock_chaos() { let num_tasks = Lock::MAX_SHARED_OWNERS; let num_iters = 2048; let check = Arc::new(AtomicUsize::new(0)); let lock = Arc::new(Lock::default()); let mut threads = Vec::new(); let mut tasks = Vec::new(); for i in 0..num_tasks { let check = check.clone(); let lock = lock.clone(); if i % 2 == 0 { tasks.push(tokio::spawn(async move { for j in 0..num_iters { assert!(!lock.is_poisoned(Relaxed)); if j % 11 == 0 { assert!(lock.lock_async().await); assert_eq!(check.fetch_add(usize::MAX, Relaxed), 0); check.fetch_sub(usize::MAX, Relaxed); assert!(lock.release_lock()); } else { assert!(lock.share_async().await); assert!(check.fetch_add(1, Relaxed) < Lock::MAX_SHARED_OWNERS); check.fetch_sub(1, Relaxed); assert!(lock.release_share()); } assert!(!lock.is_poisoned(Relaxed)); } })); } else { threads.push(thread::spawn(move || { for j in 0..num_iters { assert!(!lock.is_poisoned(Relaxed)); if j % 7 == 3 { let mut pinned_pager = pin!(Pager::default()); lock.register_pager(&mut pinned_pager, lock::Mode::Exclusive, j % 2 == 0); } else if j % 11 == 0 { assert!(lock.lock_sync()); assert_eq!(check.fetch_add(usize::MAX, Relaxed), 0); check.fetch_sub(usize::MAX, Relaxed); assert!(lock.release_lock()); } else { assert!(lock.share_sync()); assert!(check.fetch_add(1, Relaxed) < Lock::MAX_SHARED_OWNERS); check.fetch_sub(1, Relaxed); assert!(lock.release_share()); } assert!(!lock.is_poisoned(Relaxed)); } })); } } for thread in threads { thread.join().unwrap(); } for task in tasks { task.await.unwrap(); } assert_eq!(check.load(Relaxed), 0); } #[cfg_attr(miri, ignore = "Tokio is not compatible with Miri")] #[tokio::test(flavor = "multi_thread", worker_threads = 16)] async fn barrier_chaos() { let num_tasks = 16; let num_iters = 2048; let check = Arc::new(AtomicUsize::new(0)); let barrier = Arc::new(Barrier::with_count(num_tasks)); let mut threads = Vec::new(); let mut tasks = Vec::new(); for i in 0..num_tasks { let check = check.clone(); let barrier = barrier.clone(); if i % 2 == 0 { tasks.push(tokio::spawn(async move { for i in 0..num_iters { debug_assert_eq!(check.load(Relaxed), i); if barrier.wait_async().await { check.fetch_add(1, Release); } barrier.wait_async().await; } })); } else { threads.push(thread::spawn(move || { for i in 0..num_iters { debug_assert_eq!(check.load(Relaxed), i); if barrier.wait_sync() { check.swap(i + 1, Release); } barrier.wait_sync(); } })); } } for thread in threads { thread.join().unwrap(); } for task in tasks { task.await.unwrap(); } assert_eq!(check.load(Relaxed), num_iters); } #[cfg_attr(miri, ignore = "Tokio is not compatible with Miri")] #[tokio::test(flavor = "multi_thread", worker_threads = 16)] async fn semaphore_chaos() { let num_tasks = Semaphore::MAX_PERMITS; let num_iters = 2048; let check = Arc::new(AtomicUsize::new(0)); let semaphore = Arc::new(Semaphore::default()); let mut threads = Vec::new(); let mut tasks = Vec::new(); for i in 0..num_tasks { let check = check.clone(); let semaphore = semaphore.clone(); if i % 2 == 0 { tasks.push(tokio::spawn(async move { for _ in 0..num_iters { assert!(semaphore.acquire_many_async(i + 1).await); assert!(check.fetch_add(i + 1, Relaxed) + i < Semaphore::MAX_PERMITS); check.fetch_sub(i + 1, Relaxed); assert!(semaphore.release_many(i + 1)); assert!(!semaphore.acquire_many_async(usize::MAX).await); } })); } else { threads.push(thread::spawn(move || { for j in 0..num_iters { if j % 7 == 1 { let mut pinned_pager = pin!(Pager::default()); assert!(semaphore.register_pager(&mut pinned_pager, 27, j % 2 == 0)); } else { assert!(semaphore.acquire_many_sync(i + 1)); assert!(check.fetch_add(i + 1, Relaxed) + i < Semaphore::MAX_PERMITS); check.fetch_sub(i + 1, Relaxed); assert!(semaphore.release_many(i + 1)); } assert!(!semaphore.acquire_many_sync(usize::MAX)); } })); } } for thread in threads { thread.join().unwrap(); } for task in tasks { task.await.unwrap(); } assert_eq!(check.load(Relaxed), 0); } #[cfg_attr(miri, ignore = "Tokio is not compatible with Miri")] #[tokio::test(flavor = "multi_thread", worker_threads = 16)] async fn gate_chaos() { let num_tasks = 16; let num_iters = 2048; let check = Arc::new(AtomicUsize::new(0)); let granted = AtomicUsize::new(0); let gate = Arc::new(Gate::default()); let mut threads = Vec::new(); let mut tasks = Vec::new(); for i in 0..num_tasks { let check = check.clone(); let gate = gate.clone(); if i % 2 == 0 { tasks.push(tokio::spawn(async move { for _ in 0..num_iters { match gate.enter_async().await { Ok(_) => { check.fetch_add(1, Relaxed); } Err(gate::Error::Sealed) => break, Err(e) => assert_eq!(e, gate::Error::SpuriousFailure), } } })); } else { threads.push(thread::spawn(move || { for j in 0..num_iters { if j % 7 == 5 { let mut pinned_pager = pin!(Pager::default()); if i % 3 == 0 { assert!(gate.register_pager(&mut pinned_pager, false)); } else { assert!(gate.register_pager(&mut pinned_pager, true)); if pinned_pager.try_poll().is_ok() { assert_eq!( pinned_pager.poll_sync(), Err(gate::Error::NotRegistered) ); } } } else { match gate.enter_sync() { Ok(_) => { check.fetch_add(1, Relaxed); } Err(gate::Error::Sealed) => break, Err(e) => assert_eq!(e, gate::Error::SpuriousFailure), } } } })); } } while granted.load(Relaxed) < num_iters / 2 { if let Ok(n) = gate.permit() { granted.fetch_add(n, Relaxed); } } gate.seal(); for thread in threads { thread.join().unwrap(); } for task in tasks { task.await.unwrap(); } assert!(check.load(Relaxed) <= granted.load(Relaxed)); } saa-5.4.2/src/tests.rs000064400000000000000000000001241046102023000126740ustar 00000000000000#[cfg(feature = "loom")] mod models; #[cfg(not(feature = "loom"))] mod unit_tests; saa-5.4.2/src/wait_queue.rs000064400000000000000000000543351046102023000137170ustar 00000000000000//! Wait queue implementation. use std::cell::UnsafeCell; use std::future::Future; use std::marker::PhantomPinned; use std::mem::align_of; use std::pin::Pin; use std::ptr::{from_ref, null, null_mut, with_exposed_provenance}; use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; #[cfg(not(feature = "loom"))] use std::sync::atomic::{AtomicPtr, AtomicU16}; use std::task::{Context, Poll, Waker}; #[cfg(not(feature = "loom"))] use std::thread::{Thread, current, park, yield_now}; #[cfg(feature = "loom")] use loom::sync::atomic::{AtomicPtr, AtomicU16}; #[cfg(feature = "loom")] use loom::thread::{Thread, current, park, yield_now}; use crate::opcode::Opcode; use crate::sync_primitive::SyncPrimitive; /// Fair and heap-free intrusive wait queue for locking primitives in this crate. /// /// [`WaitQueue`] itself forms an intrusive linked list of entries where entries are pushed at the /// tail and popped from the head. #[derive(Debug, Default)] #[repr(align(8))] pub(crate) struct WaitQueue { /// Wait queue entry raw data. /// /// The raw data is used to instantiate an anchor of the wait queue entry at a 128B aligned /// memory address. The anchor contains an offset value that enables other threads to locate /// the [`WaitQueue`] and its [`Entry`]. /// /// An [`Entry`] is instantiated either the first or second half of the raw data area, and /// which one of the two is determined by the offset value in the anchor: one that does not /// overlap with the anchor. #[cfg(not(feature = "loom"))] raw_data: UnsafeCell<[u64; 16]>, #[cfg(feature = "loom")] // Loom types are larger than those in the standard library. raw_data: UnsafeCell<[[u64; 16]; 32]>, /// The [`WaitQueue`] cannot be unpinned since it forms an intrusive linked list. _pinned: PhantomPinned, } /// Wait queue entry. #[derive(Debug)] #[repr(align(8))] pub(crate) struct Entry { /// Points to the entry anchor that was pushed right before this entry. next_entry_anchor_ptr: AtomicPtr, /// Points to the entry that was pushed right after this entry. prev_entry_ptr: AtomicPtr, /// Operation type. opcode: Opcode, /// Operation state. state: AtomicU16, /// Indicates that the wait queue entry can be polled. /// /// If the flag is set, the `drop` method will automatically release unknowingly acquired /// resources. pollable: std::sync::atomic::AtomicBool, // `Loom` is too slow when it is added to modeling. /// Monitors the result. monitor: Monitor, /// Context cleanup function when a [`WaitQueue`] is cancelled. drop_callback: fn(&Self), /// Address of the corresponding synchronization primitive. addr: usize, /// Offset of the entry within the wait queue. offset: u16, } /// Monitors the result. #[derive(Debug)] enum Monitor { /// Monitors asynchronously. Async(UnsafeCell>), /// Monitors synchronously. Sync(UnsafeCell>), } /// Static assertions. const _WAIT_QUEUE_ALIGN_ASSERT: () = assert!(align_of::() == 8); const _ENTRY_ALIGN_ASSERT: () = assert!(align_of::() == 8); const _ENTRY_SIZE_ASSERT: () = assert!(size_of::() <= WaitQueue::VIRTUAL_ALIGNMENT / 2); impl WaitQueue { /// Virtual alignment of the wait queue. #[cfg(not(feature = "loom"))] pub(crate) const VIRTUAL_ALIGNMENT: usize = 128; #[cfg(feature = "loom")] pub(crate) const VIRTUAL_ALIGNMENT: usize = 4096; /// Indicates that the wait queue is being processed by a thread. #[cfg(not(feature = "loom"))] pub(crate) const LOCKED_FLAG: usize = Self::VIRTUAL_ALIGNMENT >> 1; #[cfg(feature = "loom")] pub(crate) const LOCKED_FLAG: usize = 64; /// Mask to extract additional information tagged with the [`WaitQueue`] memory address. pub(crate) const DATA_MASK: usize = Self::LOCKED_FLAG - 1; /// Mask to extract the memory address part from a `usize` value. pub(crate) const ADDR_MASK: usize = !(Self::LOCKED_FLAG | Self::DATA_MASK); /// Constructs a new [`Entry`] in the [`WaitQueue`]. pub(crate) fn construct( self: Pin<&Self>, sync_primitive: &S, opcode: Opcode, is_sync: bool, ) { let (anchor_ptr, offset) = self.anchor_ptr(); let had_entry = unsafe { // `0` represents the initial state, therefore take the compliment of the offset. if *anchor_ptr == 0 { *anchor_ptr.cast_mut() = u64::MAX - u64::try_from(offset).unwrap_or(0); false } else { debug_assert_eq!(*anchor_ptr, u64::MAX - u64::try_from(offset).unwrap_or(0)); true } }; let entry_ptr = Self::to_entry_ptr(anchor_ptr).cast_mut(); let monitor = if is_sync { Monitor::Sync(UnsafeCell::new(None)) } else { Monitor::Async(UnsafeCell::new(None)) }; unsafe { if had_entry { debug_assert!(!(*entry_ptr).pollable.load(Relaxed)); (*entry_ptr).prev_entry_ptr.store(null_mut(), Relaxed); (*entry_ptr).opcode = opcode; (*entry_ptr).monitor = monitor; (*entry_ptr).addr = sync_primitive.addr(); } else { let entry = Entry { next_entry_anchor_ptr: AtomicPtr::new(null_mut()), prev_entry_ptr: AtomicPtr::new(null_mut()), opcode, state: AtomicU16::new(0), pollable: std::sync::atomic::AtomicBool::new(false), monitor, drop_callback: S::drop_wait_queue_entry, addr: sync_primitive.addr(), offset: u16::try_from( entry_ptr.expose_provenance() - self.raw_data.get().expose_provenance(), ) .unwrap_or(0), }; entry_ptr.write(entry); } } } /// Checks whether the wait queue entry can be polled. #[inline] pub(crate) fn is_pollable(&self) -> bool { let entry_ptr = Self::to_entry_ptr(self.anchor_ptr().0); if entry_ptr.is_null() { false } else { unsafe { (*entry_ptr).pollable.load(Acquire) } } } /// Gets a pinned reference from `self`. #[inline] pub(crate) const fn pin(&self) -> Pin<&WaitQueue> { unsafe { Pin::new_unchecked(self) } } /// Gets a pinned reference from a pointer. #[inline] pub(crate) const fn pin_ptr<'l>(wait_queue_ptr: *const WaitQueue) -> Pin<&'l WaitQueue> { unsafe { Pin::new_unchecked(&*wait_queue_ptr) } } /// Returns a reference to the entry. #[inline] pub(crate) fn entry(&self) -> &Entry { unsafe { &*Self::to_entry_ptr(self.anchor_ptr().0) } } /// Returns the entry pointer derived from the anchor pointer. #[inline] pub(crate) fn to_entry_ptr(anchor_ptr: *const u64) -> *const Entry { let anchor_val = unsafe { *anchor_ptr }; if anchor_val == 0 { // No entry exists. return null(); } anchor_ptr .map_addr(|addr| { debug_assert_eq!(addr % Self::VIRTUAL_ALIGNMENT, 0); let offset = usize::try_from(u64::MAX - anchor_val).unwrap_or(0); let start_addr = addr - offset; debug_assert_eq!(start_addr % 8, 0); if offset < Self::VIRTUAL_ALIGNMENT / 2 { // The anchor is in the first half, so the entry is in the second half. start_addr + Self::VIRTUAL_ALIGNMENT / 2 } else { // The anchor is in the second half, so the entry is in the first half. start_addr } }) .cast::() } /// Converts a synchronization primitive state into an anchor pointer. #[inline] pub(crate) fn to_anchor_ptr(state: usize) -> *const u64 { let anchor_addr = state & Self::ADDR_MASK; if anchor_addr == 0 { return null(); } with_exposed_provenance::(anchor_addr) } /// Returns the anchor pointer that is used to locate the wait queue entry. #[inline] pub(crate) fn anchor_ptr(&self) -> (*const u64, usize) { let start_addr = self.raw_data.get(); let mut offset = 0; let anchor_ptr = start_addr .map_addr(|addr| { let anchor_addr = if addr % Self::VIRTUAL_ALIGNMENT == 0 { // Perfectly aligned, so the anchor is at the start address, and the entry is at // 64th byte. // // `128: start/anchor | 192: entry`. addr } else { // If the address is not perfectly aligned, we need to round up to the next // multiple of `Self::VIRTUAL_ALIGNMENT`. // // `32: start/entry | 128: anchor`. // `64: start/entry | 128: anchor`. // `96: start | 128: anchor | 160: entry`. addr + Self::VIRTUAL_ALIGNMENT - (addr % Self::VIRTUAL_ALIGNMENT) }; debug_assert_eq!(addr % 8, 0); debug_assert_eq!(anchor_addr % Self::VIRTUAL_ALIGNMENT, 0); debug_assert!(anchor_addr - addr < Self::VIRTUAL_ALIGNMENT); offset = anchor_addr - addr; anchor_addr }) .cast::(); (anchor_ptr, offset) } } impl Drop for WaitQueue { #[inline] fn drop(&mut self) { let anchor_ptr = self.anchor_ptr().0; let entry_ptr = Self::to_entry_ptr(anchor_ptr).cast_mut(); if entry_ptr.is_null() { return; } Entry::prepare_drop(entry_ptr); unsafe { entry_ptr.drop_in_place(); } } } impl Future for Pin<&'_ WaitQueue> { type Output = u8; #[inline] fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { self.entry().poll_result_async(cx) } } unsafe impl Send for WaitQueue {} unsafe impl Sync for WaitQueue {} impl Entry { /// A method is used in the wrong mode. pub(crate) const ERROR_WRONG_MODE: u8 = u8::MAX; /// Indicates that a result is set. const RESULT_SET: u16 = 1_u16 << u8::BITS; /// Indicates that a waker is set. const WAKER_SET: u16 = 1_u16 << (u8::BITS + 1); /// Indicates that a result is finalized. const RESULT_FINALIZED: u16 = 1_u16 << (u8::BITS + 2); /// Returns the anchor pointer derived from the entry pointer. #[inline] pub(crate) fn to_wait_queue_ptr(entry_ptr: *const Self) -> *const WaitQueue { entry_ptr .map_addr(|addr| addr - unsafe { usize::from((*entry_ptr).offset) }) .cast::() } /// Gets a pointer to the next entry anchor. /// /// The next entry is the one that was pushed right before this entry. pub(crate) fn next_entry_anchor_ptr(&self) -> *const u64 { self.next_entry_anchor_ptr.load(Acquire) } /// Gets a pointer to the next entry. /// /// The next entry is the one that was pushed right before this entry. #[inline] pub(crate) fn next_entry_ptr(&self) -> *const Self { let anchor_ptr = self.next_entry_anchor_ptr.load(Acquire); if anchor_ptr.is_null() { return null(); } WaitQueue::to_entry_ptr(anchor_ptr) } /// Gets a pointer to the previous entry. /// /// The previous entry is the one that was pushed right after this entry. #[inline] pub(crate) fn prev_entry_ptr(&self) -> *const Self { self.prev_entry_ptr.load(Acquire) } /// Updates the next entry anchor pointer. #[inline] pub(crate) fn update_next_entry_anchor_ptr(&self, next_entry_anchor_ptr: *const u64) { debug_assert_eq!( next_entry_anchor_ptr as usize % WaitQueue::VIRTUAL_ALIGNMENT, 0 ); self.next_entry_anchor_ptr .store(next_entry_anchor_ptr.cast_mut(), Release); } /// Updates the previous entry pointer. #[inline] pub(crate) fn update_prev_entry_ptr(&self, prev_entry_ptr: *const Self) { self.prev_entry_ptr .store(prev_entry_ptr.cast_mut(), Release); } /// Returns the operation code. pub(crate) const fn opcode(&self) -> Opcode { self.opcode } /// Converts a reference to `Self` to a raw pointer. #[inline] pub(crate) const fn ref_to_ptr(this: &Self) -> *const Self { let wait_queue_ptr: *const Self = from_ref(this); wait_queue_ptr } /// Returns the corresponding synchronization primitive reference. #[inline] pub(crate) fn sync_primitive_ref(&self) -> &S { unsafe { &*with_exposed_provenance::(self.addr) } } /// Sets a pointer to the previous entry on each entry by forward-iterating over entries. pub(crate) fn set_prev_ptr(tail_entry_ptr: *const Self) { let mut entry_ptr = tail_entry_ptr; while !entry_ptr.is_null() { entry_ptr = unsafe { let next_entry_ptr = (*entry_ptr).next_entry_ptr(); if let Some(next_entry) = next_entry_ptr.as_ref() { if next_entry.prev_entry_ptr().is_null() { next_entry.update_prev_entry_ptr(entry_ptr); } else { debug_assert_eq!(next_entry.prev_entry_ptr(), entry_ptr); return; } } next_entry_ptr }; } } /// Forward-iterates over entries, calling the supplied closure for each entry. /// /// Stops iteration if the closure returns `true`. pub(crate) fn iter_forward) -> bool>( tail_entry_ptr: *const Self, set_prev: bool, mut f: F, ) { let mut entry_ptr = tail_entry_ptr; while !entry_ptr.is_null() { entry_ptr = unsafe { let next_entry_ptr = (*entry_ptr).next_entry_ptr(); if set_prev { if let Some(next_entry) = next_entry_ptr.as_ref() { next_entry.update_prev_entry_ptr(entry_ptr); } } // The result is set here, so the scope should be protected. if f(&*entry_ptr, next_entry_ptr.as_ref()) { return; } next_entry_ptr }; } } /// Backward-iterates over entries, calling the supplied closure for each entry. /// /// Stops iteration if the closure returns `true`. pub(crate) fn iter_backward) -> bool>( head_entry_ptr: *const Self, mut f: F, ) { let mut entry_ptr = head_entry_ptr; while !entry_ptr.is_null() { entry_ptr = unsafe { let prev_entry_ptr = (*entry_ptr).prev_entry_ptr(); if f(&*entry_ptr, prev_entry_ptr.as_ref()) { return; } prev_entry_ptr }; } } /// Sets the result to the entry. pub(crate) fn set_result(&self, result: u8) { self.pollable.store(true, Release); let mut state = self.state.load(Acquire); loop { debug_assert_eq!(state & Self::RESULT_SET, 0); debug_assert_eq!(state & Self::RESULT_FINALIZED, 0); // Once the result is set, a waker cannot be set. let next_state = (state | Self::RESULT_SET) | u16::from(result); match self .state .compare_exchange_weak(state, next_state, AcqRel, Acquire) { Ok(_) => { state = next_state; break; } Err(new_state) => state = new_state, } } if state & Self::WAKER_SET == Self::WAKER_SET { // A waker had been set before the result was set. unsafe { match &self.monitor { Monitor::Async(waker) => { if let Some(waker) = (*waker.get()).take() { self.state.fetch_or(Self::RESULT_FINALIZED, AcqRel); waker.wake(); return; } } Monitor::Sync(thread) => { if let Some(thread) = (*thread.get()).take() { self.state.fetch_or(Self::RESULT_FINALIZED, AcqRel); thread.unpark(); return; } } } } } self.state.fetch_or(Self::RESULT_FINALIZED, AcqRel); } /// Polls the result, synchronously. pub(crate) fn poll_result_sync(&self) -> u8 { let Monitor::Sync(thread) = &self.monitor else { return Self::ERROR_WRONG_MODE; }; loop { if let Some(result) = self.try_consume_result() { return result; } let mut this_thread = None; let state = self.state.load(Acquire); if state & Self::RESULT_SET == Self::RESULT_SET { // No need to install the thread. if let Some(result) = self.try_consume_result() { return result; } } else if state & Self::WAKER_SET == Self::WAKER_SET { // Replace the thread by clearing the flag first. if self .state .compare_exchange_weak(state, state & !Self::WAKER_SET, AcqRel, Acquire) .is_ok() { this_thread.replace(current()); } } else { this_thread.replace(current()); } if let Some(this_thread) = this_thread { unsafe { (*thread.get()).replace(this_thread); } if self.state.fetch_or(Self::WAKER_SET, Release) & Self::RESULT_SET == Self::RESULT_SET { // The result has been set, so the thread will not be signaled. yield_now(); } else { park(); } } else { // The thread is not set, so we need to yield the thread. yield_now(); } } } /// The wait queue entry has been enqueued and can be polled. #[inline] pub(crate) fn set_pollable(&self) { self.pollable.store(true, Release); } /// Returns `true` if the result has been finalized. #[inline] pub(crate) fn result_finalized(&self) -> bool { let state = self.state.load(Acquire); state & Self::RESULT_FINALIZED == Self::RESULT_FINALIZED } /// Tries to get the result and acknowledges it. #[inline] pub(crate) fn acknowledge_result_sync(&self) -> u8 { loop { if let Some(result) = self.try_consume_result() { return result; } yield_now(); } } /// Tries to get the result and acknowledges it. #[inline] pub(crate) fn try_consume_result(&self) -> Option { let state = self.state.load(Acquire); if state & Self::RESULT_FINALIZED == Self::RESULT_FINALIZED { // The result is consumed, so the wait queue entry is no longer pollable. debug_assert_ne!(state & Self::RESULT_SET, 0); self.state.store(0, Release); self.pollable.store(false, Release); return u8::try_from(state & ((1_u16 << u8::BITS) - 1)).ok(); } None } /// Polls the result, asynchronously. fn poll_result_async(&self, cx: &mut Context<'_>) -> Poll { let Monitor::Async(waker) = &self.monitor else { return Poll::Ready(Self::ERROR_WRONG_MODE); }; if let Some(result) = self.try_consume_result() { return Poll::Ready(result); } let mut this_waker = None; let state = self.state.load(Acquire); if state & Self::RESULT_SET == Self::RESULT_SET { // No need to install the waker. if let Some(result) = self.try_consume_result() { return Poll::Ready(result); } } else if state & Self::WAKER_SET == Self::WAKER_SET { // Replace the waker by clearing the flag first. if self .state .compare_exchange_weak(state, state & !Self::WAKER_SET, AcqRel, Acquire) .is_ok() { this_waker.replace(cx.waker().clone()); } } else { this_waker.replace(cx.waker().clone()); } if let Some(this_waker) = this_waker { unsafe { (*waker.get()).replace(this_waker); } if self.state.fetch_or(Self::WAKER_SET, Release) & Self::RESULT_SET == Self::RESULT_SET { // The result has been set, so the waker will not be notified. cx.waker().wake_by_ref(); } } else { // The waker is not set, so we need to wake the task. cx.waker().wake_by_ref(); } Poll::Pending } /// Prepares for dropping `self`. /// /// The method cannot be implemented in `drop` because `Miri` treats the drop method in a /// way that other threads are not allowed to access the memory. #[inline] fn prepare_drop(entry_ptr: *mut Self) { let this = unsafe { &mut *entry_ptr }; // The wait queue entry is pollable and the result is not consumed. // // The wait queue entry owner may acquire the resource or has already acquired it without // knowing it, therefore the resource needs to be released. if this.pollable.load(Acquire) { (this.drop_callback)(this); this.pollable.store(false, Release); } } } unsafe impl Send for Monitor {} unsafe impl Sync for Monitor {}