futures-intrusive-0.5.0/.cargo_vcs_info.json0000644000000001360000000000100145610ustar { "git": { "sha1": "0adfe0e6690d95d8c1d958416955b7a69b203655" }, "path_in_vcs": "" }futures-intrusive-0.5.0/.gitignore000064400000000000000000000001000072674642500153600ustar 00000000000000target/ **/*.rs.bk Cargo.lock _site .sass-cache /.idea .DS_Storefutures-intrusive-0.5.0/.rustfmt.toml000064400000000000000000000000170072674642500160560ustar 00000000000000max_width = 80 futures-intrusive-0.5.0/.travis.yml000064400000000000000000000006720072674642500155170ustar 00000000000000language: rust rust: - stable env: - RUST_BACKTRACE=1 cache: directories: - /home/travis/.cargo before_cache: - cargo cache -r registry before_script: - rustup component add rustfmt - (test -x $HOME/.cargo/bin/cargo-cache || cargo install cargo-cache) script: - cargo fmt --all -- --check - cargo test --no-default-features - cargo test --no-default-features --features alloc - cargo test --all-targets --all-features futures-intrusive-0.5.0/Cargo.lock0000644000001102130000000000100125320ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "async-channel" version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59740d83946db6a5af71ae25ddf9562c2b176b2ca42cf99a455f09f4a220d6b9" dependencies = [ "concurrent-queue", "event-listener", "futures-core", ] [[package]] name = "async-executor" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb877970c7b440ead138f6321a3b5395d6061183af779340b65e20c0fede9146" dependencies = [ "async-task", "concurrent-queue", "fastrand", "futures-lite", "once_cell", "vec-arena", ] [[package]] name = "async-global-executor" version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73079b49cd26b8fd5a15f68fc7707fc78698dc2a3d61430f2a7a9430230dfa04" dependencies = [ "async-executor", "async-io", "futures-lite", "num_cpus", "once_cell", ] [[package]] name = "async-io" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40a0b2bb8ae20fede194e779150fe283f65a4a08461b496de546ec366b174ad9" dependencies = [ "concurrent-queue", "fastrand", "futures-lite", "libc", "log", "nb-connect", "once_cell", "parking", "polling", "vec-arena", "waker-fn", "winapi", ] [[package]] name = "async-mutex" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e" dependencies = [ "event-listener", ] [[package]] name = "async-std" version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7e82538bc65a25dbdff70e4c5439d52f068048ab97cdea0acd73f131594caa1" dependencies = [ "async-global-executor", "async-io", "async-mutex", "blocking", "crossbeam-utils 0.8.0", "futures-channel", "futures-core", "futures-io", "futures-lite", "gloo-timers", "kv-log-macro", "log", "memchr", "num_cpus", "once_cell", "pin-project-lite 0.1.11", "pin-utils", "slab", "wasm-bindgen-futures", ] [[package]] name = "async-task" version = "4.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" [[package]] name = "atomic-waker" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" [[package]] name = "atty" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", "winapi", ] [[package]] name = "autocfg" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "bitflags" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "blocking" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c5e170dbede1f740736619b776d7251cb1b9095c435c34d8ca9f57fcd2f335e9" dependencies = [ "async-channel", "async-task", "atomic-waker", "fastrand", "futures-lite", "once_cell", ] [[package]] name = "bstr" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "473fc6b38233f9af7baa94fb5852dca389e3d95b8e21c8e3719301462c5d9faf" dependencies = [ "lazy_static", "memchr", "regex-automata", "serde", ] [[package]] name = "bumpalo" version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" [[package]] name = "byteorder" version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" [[package]] name = "bytes" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" [[package]] name = "cache-padded" version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" [[package]] name = "cast" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b9434b9a5aa1450faa3f9cb14ea0e8c53bb5d2b3c1bfd1ab4fc03e9f33fbfb0" dependencies = [ "rustc_version", ] [[package]] name = "cc" version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad9c6140b5a2c7db40ea56eb1821245e5362b44385c05b76288b1a599934ac87" [[package]] name = "cfg-if" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clap" version = "2.33.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" dependencies = [ "bitflags", "textwrap", "unicode-width", ] [[package]] name = "concurrent-queue" version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3" dependencies = [ "cache-padded", ] [[package]] name = "const_fn" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c478836e029dcef17fb47c89023448c64f781a046e0300e257ad8225ae59afab" [[package]] name = "criterion" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70daa7ceec6cf143990669a04c7df13391d55fb27bd4079d252fca774ba244d8" dependencies = [ "atty", "cast", "clap", "criterion-plot", "csv", "itertools", "lazy_static", "num-traits", "oorandom", "plotters", "rayon", "regex", "serde", "serde_cbor", "serde_derive", "serde_json", "tinytemplate", "walkdir", ] [[package]] name = "criterion-plot" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e022feadec601fba1649cfa83586381a4ad31c6bf3a9ab7d408118b05dd9889d" dependencies = [ "cast", "itertools", ] [[package]] name = "crossbeam" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e" dependencies = [ "cfg-if 0.1.10", "crossbeam-channel 0.4.4", "crossbeam-deque 0.7.3", "crossbeam-epoch 0.8.2", "crossbeam-queue", "crossbeam-utils 0.7.2", ] [[package]] name = "crossbeam-channel" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" dependencies = [ "crossbeam-utils 0.7.2", "maybe-uninit", ] [[package]] name = "crossbeam-channel" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775" dependencies = [ "cfg-if 1.0.0", "crossbeam-utils 0.8.0", ] [[package]] name = "crossbeam-deque" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" dependencies = [ "crossbeam-epoch 0.8.2", "crossbeam-utils 0.7.2", "maybe-uninit", ] [[package]] name = "crossbeam-deque" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch 0.9.0", "crossbeam-utils 0.8.0", ] [[package]] name = "crossbeam-epoch" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" dependencies = [ "autocfg", "cfg-if 0.1.10", "crossbeam-utils 0.7.2", "lazy_static", "maybe-uninit", "memoffset", "scopeguard", ] [[package]] name = "crossbeam-epoch" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0f606a85340376eef0d6d8fec399e6d4a544d648386c6645eb6d0653b27d9f" dependencies = [ "cfg-if 1.0.0", "const_fn", "crossbeam-utils 0.8.0", "lazy_static", "memoffset", "scopeguard", ] [[package]] name = "crossbeam-queue" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" dependencies = [ "cfg-if 0.1.10", "crossbeam-utils 0.7.2", "maybe-uninit", ] [[package]] name = "crossbeam-utils" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" dependencies = [ "autocfg", "cfg-if 0.1.10", "lazy_static", ] [[package]] name = "crossbeam-utils" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec91540d98355f690a86367e566ecad2e9e579f230230eb7c21398372be73ea5" dependencies = [ "autocfg", "cfg-if 1.0.0", "const_fn", "lazy_static", ] [[package]] name = "csv" version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc4666154fd004af3fd6f1da2e81a96fd5a81927fe8ddb6ecc79e2aa6e138b54" dependencies = [ "bstr", "csv-core", "itoa", "ryu", "serde", ] [[package]] name = "csv-core" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" dependencies = [ "memchr", ] [[package]] name = "either" version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "event-listener" version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59" [[package]] name = "fastrand" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca5faf057445ce5c9d4329e382b2ce7ca38550ef3b73a5348362d5f24e0c7fe3" dependencies = [ "instant", ] [[package]] name = "futures" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b3b0c040a1fe6529d30b3c5944b280c7f0dcb2930d2c3062bca967b602583d0" dependencies = [ "futures-channel", "futures-core", "futures-executor", "futures-io", "futures-sink", "futures-task", "futures-util", ] [[package]] name = "futures-channel" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b7109687aa4e177ef6fe84553af6280ef2778bdb7783ba44c9dc3399110fe64" dependencies = [ "futures-core", "futures-sink", ] [[package]] name = "futures-core" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "847ce131b72ffb13b6109a221da9ad97a64cbe48feb1028356b836b47b8f1748" [[package]] name = "futures-executor" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4caa2b2b68b880003057c1dd49f1ed937e38f22fcf6c212188a121f08cf40a65" dependencies = [ "futures-core", "futures-task", "futures-util", ] [[package]] name = "futures-intrusive" version = "0.5.0" dependencies = [ "async-std", "criterion", "crossbeam", "futures", "futures-core", "futures-test", "lazy_static", "lock_api", "parking_lot", "pin-utils", "rand", "signal-hook", "tokio", ] [[package]] name = "futures-io" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "611834ce18aaa1bd13c4b374f5d653e1027cf99b6b502584ff8c9a64413b30bb" [[package]] name = "futures-lite" version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e6c079abfac3ab269e2927ec048dabc89d009ebfdda6b8ee86624f30c689658" dependencies = [ "fastrand", "futures-core", "futures-io", "memchr", "parking", "pin-project-lite 0.1.11", "waker-fn", ] [[package]] name = "futures-macro" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77408a692f1f97bcc61dc001d752e00643408fbc922e4d634c655df50d595556" dependencies = [ "proc-macro-hack", "proc-macro2", "quote", "syn", ] [[package]] name = "futures-sink" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f878195a49cee50e006b02b93cf7e0a95a38ac7b776b4c4d9cc1207cd20fcb3d" [[package]] name = "futures-task" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c554eb5bf48b2426c4771ab68c6b14468b6e76cc90996f528c3338d761a4d0d" dependencies = [ "once_cell", ] [[package]] name = "futures-test" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd2847e7fa5ac5f7a50685fbc9b317a94fb2609e2f41f4056c10c6ae917998e2" dependencies = [ "futures-core", "futures-executor", "futures-io", "futures-sink", "futures-task", "futures-util", "once_cell", "pin-project", "pin-utils", ] [[package]] name = "futures-util" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d304cff4a7b99cfb7986f7d43fbe93d175e72e704a8860787cc95e9ffd85cbd2" dependencies = [ "futures-channel", "futures-core", "futures-io", "futures-macro", "futures-sink", "futures-task", "memchr", "pin-project", "pin-utils", "proc-macro-hack", "proc-macro-nested", "slab", ] [[package]] name = "getrandom" version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" dependencies = [ "cfg-if 0.1.10", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] name = "gloo-timers" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47204a46aaff920a1ea58b11d03dec6f704287d27561724a4631e450654a891f" dependencies = [ "futures-channel", "futures-core", "js-sys", "wasm-bindgen", "web-sys", ] [[package]] name = "half" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d36fab90f82edc3c747f9d438e06cf0a491055896f2a279638bb5beed6c40177" [[package]] name = "hermit-abi" version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" dependencies = [ "libc", ] [[package]] name = "instant" version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "itertools" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" dependencies = [ "either", ] [[package]] name = "itoa" version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" [[package]] name = "js-sys" version = "0.3.45" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca059e81d9486668f12d455a4ea6daa600bd408134cd17e3d3fb5a32d1f016f8" dependencies = [ "wasm-bindgen", ] [[package]] name = "kv-log-macro" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" dependencies = [ "log", ] [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" version = "0.2.135" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68783febc7782c6c5cb401fbda4de5a9898be1762314da0bb2c10ced61f18b0c" [[package]] name = "lock_api" version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "88943dd7ef4a2e5a4bfa2753aaab3013e34ce2533d1996fb18ef591e315e2b3b" dependencies = [ "scopeguard", ] [[package]] name = "log" version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" dependencies = [ "cfg-if 0.1.10", ] [[package]] name = "maybe-uninit" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "memchr" version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" [[package]] name = "memoffset" version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" dependencies = [ "autocfg", ] [[package]] name = "mio" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.36.1", ] [[package]] name = "nb-connect" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8123a81538e457d44b933a02faf885d3fe8408806b23fa700e8f01c6c3a98998" dependencies = [ "libc", "winapi", ] [[package]] name = "num-traits" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" dependencies = [ "autocfg", ] [[package]] name = "num_cpus" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" dependencies = [ "hermit-abi", "libc", ] [[package]] name = "once_cell" version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" [[package]] name = "oorandom" version = "11.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a170cebd8021a008ea92e4db85a72f80b35df514ec664b296fdcbb654eac0b2c" [[package]] name = "parking" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" [[package]] name = "parking_lot" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" dependencies = [ "cfg-if 1.0.0", "libc", "redox_syscall", "smallvec", "windows-sys 0.42.0", ] [[package]] name = "pin-project" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ccc2237c2c489783abd8c4c80e5450fc0e98644555b1364da68cc29aa151ca7" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8e8d2bf0b23038a4424865103a4df472855692821aab4e4f5c3312d461d9e5f" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "pin-project-lite" version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" [[package]] name = "pin-project-lite" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "plotters" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d1685fbe7beba33de0330629da9d955ac75bd54f33d7b79f9a895590124f6bb" dependencies = [ "js-sys", "num-traits", "wasm-bindgen", "web-sys", ] [[package]] name = "polling" version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2a7bc6b2a29e632e45451c941832803a18cce6781db04de8a04696cdca8bde4" dependencies = [ "cfg-if 0.1.10", "libc", "log", "wepoll-sys", "winapi", ] [[package]] name = "ppv-lite86" version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" [[package]] name = "proc-macro-hack" version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" [[package]] name = "proc-macro2" version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" dependencies = [ "unicode-xid", ] [[package]] name = "quote" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" dependencies = [ "proc-macro2", ] [[package]] name = "rand" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ "getrandom", "libc", "rand_chacha", "rand_core", "rand_hc", ] [[package]] name = "rand_chacha" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" dependencies = [ "ppv-lite86", "rand_core", ] [[package]] name = "rand_core" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ "getrandom", ] [[package]] name = "rand_hc" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ "rand_core", ] [[package]] name = "rayon" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674" dependencies = [ "autocfg", "crossbeam-deque 0.8.0", "either", "rayon-core", ] [[package]] name = "rayon-core" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a" dependencies = [ "crossbeam-channel 0.5.0", "crossbeam-deque 0.8.0", "crossbeam-utils 0.8.0", "lazy_static", "num_cpus", ] [[package]] name = "redox_syscall" version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] [[package]] name = "regex" version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" dependencies = [ "regex-syntax", ] [[package]] name = "regex-automata" version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" dependencies = [ "byteorder", ] [[package]] name = "regex-syntax" version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" [[package]] name = "rustc_version" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" dependencies = [ "semver", ] [[package]] name = "ryu" version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" [[package]] name = "same-file" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ "winapi-util", ] [[package]] name = "scopeguard" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "semver" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ "semver-parser", ] [[package]] name = "semver-parser" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b88fa983de7720629c9387e9f517353ed404164b1e482c970a90c1a4aaf7dc1a" [[package]] name = "serde_cbor" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e18acfa2f90e8b735b2836ab8d538de304cbb6729a7360729ea5a895d15a622" dependencies = [ "half", "serde", ] [[package]] name = "serde_derive" version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cbd1ae72adb44aab48f325a02444a5fc079349a8d804c1fc922aed3f7454c74e" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "serde_json" version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcac07dbffa1c65e7f816ab9eba78eb142c6d44410f4eeba1e26e4f5dfa56b95" dependencies = [ "itoa", "ryu", "serde", ] [[package]] name = "signal-hook" version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "604508c1418b99dfe1925ca9224829bb2a8a9a04dda655cc01fcad46f4ab05ed" dependencies = [ "libc", "signal-hook-registry", ] [[package]] name = "signal-hook-registry" version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce32ea0c6c56d5eacaeb814fbed9960547021d3edd010ded1425f180536b20ab" dependencies = [ "libc", ] [[package]] name = "slab" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" [[package]] name = "smallvec" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" dependencies = [ "libc", "winapi", ] [[package]] name = "syn" version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6498a9efc342871f91cc2d0d694c674368b4ceb40f62b65a7a08c3792935e702" dependencies = [ "proc-macro2", "quote", "unicode-xid", ] [[package]] name = "textwrap" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" dependencies = [ "unicode-width", ] [[package]] name = "tinytemplate" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d3dc76004a03cec1c5932bca4cdc2e39aaa798e3f82363dd94f9adf6098c12f" dependencies = [ "serde", "serde_json", ] [[package]] name = "tokio" version = "1.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c51a52ed6686dd62c320f9b89299e9dfb46f730c7a48e635c19f21d116cb1439" dependencies = [ "bytes", "libc", "memchr", "mio", "num_cpus", "once_cell", "parking_lot", "pin-project-lite 0.2.9", "signal-hook-registry", "socket2", "tokio-macros", "winapi", ] [[package]] name = "tokio-macros" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "unicode-width" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" [[package]] name = "unicode-xid" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" [[package]] name = "vec-arena" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eafc1b9b2dfc6f5529177b62cf806484db55b32dc7c9658a118e11bbeb33061d" [[package]] name = "waker-fn" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" [[package]] name = "walkdir" version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" dependencies = [ "same-file", "winapi", "winapi-util", ] [[package]] name = "wasi" version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ac64ead5ea5f05873d7c12b545865ca2b8d28adfc50a49b84770a3a97265d42" dependencies = [ "cfg-if 0.1.10", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f22b422e2a757c35a73774860af8e112bff612ce6cb604224e8e47641a9e4f68" dependencies = [ "bumpalo", "lazy_static", "log", "proc-macro2", "quote", "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7866cab0aa01de1edf8b5d7936938a7e397ee50ce24119aef3e1eaa3b6171da" dependencies = [ "cfg-if 0.1.10", "js-sys", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b13312a745c08c469f0b292dd2fcd6411dba5f7160f593da6ef69b64e407038" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f249f06ef7ee334cc3b8ff031bfc11ec99d00f34d86da7498396dc1e3b1498fe" dependencies = [ "proc-macro2", "quote", "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d649a3145108d7d3fbcde896a468d1bd636791823c9921135218ad89be08307" [[package]] name = "web-sys" version = "0.3.45" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4bf6ef87ad7ae8008e15a355ce696bed26012b7caa21605188cfd8214ab51e2d" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "wepoll-sys" version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fcb14dea929042224824779fbc82d9fab8d2e6d3cbc0ac404de8edf489e77ff" dependencies = [ "cc", ] [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ "winapi", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" dependencies = [ "windows_aarch64_msvc 0.36.1", "windows_i686_gnu 0.36.1", "windows_i686_msvc 0.36.1", "windows_x86_64_gnu 0.36.1", "windows_x86_64_msvc 0.36.1", ] [[package]] name = "windows-sys" version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc 0.42.0", "windows_i686_gnu 0.42.0", "windows_i686_msvc 0.42.0", "windows_x86_64_gnu 0.42.0", "windows_x86_64_gnullvm", "windows_x86_64_msvc 0.42.0", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" [[package]] name = "windows_aarch64_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" [[package]] name = "windows_aarch64_msvc" version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" [[package]] name = "windows_i686_gnu" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" [[package]] name = "windows_i686_gnu" version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" [[package]] name = "windows_i686_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" [[package]] name = "windows_i686_msvc" version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" [[package]] name = "windows_x86_64_gnu" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" [[package]] name = "windows_x86_64_gnu" version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" [[package]] name = "windows_x86_64_gnullvm" version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" [[package]] name = "windows_x86_64_msvc" version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" [[package]] name = "windows_x86_64_msvc" version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" futures-intrusive-0.5.0/Cargo.toml0000644000000037120000000000100125620ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "futures-intrusive" version = "0.5.0" authors = ["Matthias Einwag "] description = """ Futures based on intrusive data structures - for std and no-std environments. """ homepage = "https://github.com/Matthias247/futures-intrusive" license = "MIT OR Apache-2.0" repository = "https://github.com/Matthias247/futures-intrusive" [lib] name = "futures_intrusive" [[example]] name = "cancellation" required-features = ["std"] [[example]] name = "philosophers" required-features = ["std"] [[bench]] name = "mpmc_channel" harness = false [[bench]] name = "mutex" harness = false [[bench]] name = "semaphore" harness = false [dependencies.futures-core] version = "^0.3" default-features = false [dependencies.lock_api] version = "0.4.1" [dependencies.parking_lot] version = "0.12.0" optional = true [dev-dependencies.async-std] version = "1.4" [dev-dependencies.criterion] version = "0.3.0" [dev-dependencies.crossbeam] version = "0.7" [dev-dependencies.futures] version = "0.3.0" features = ["async-await"] default-features = true [dev-dependencies.futures-test] version = "0.3.0" default-features = true [dev-dependencies.lazy_static] version = "1.4.0" [dev-dependencies.pin-utils] version = "0.1.0" [dev-dependencies.rand] version = "0.7" [dev-dependencies.signal-hook] version = "0.1.11" [dev-dependencies.tokio] version = "1.14" features = ["full"] [features] alloc = ["futures-core/alloc"] default = ["std"] std = [ "alloc", "parking_lot", ] futures-intrusive-0.5.0/Cargo.toml.orig000064400000000000000000000025300072674642500162700ustar 00000000000000[package] name = "futures-intrusive" edition = "2018" version = "0.5.0" authors = ["Matthias Einwag "] license = "MIT OR Apache-2.0" repository = "https://github.com/Matthias247/futures-intrusive" homepage = "https://github.com/Matthias247/futures-intrusive" description = """ Futures based on intrusive data structures - for std and no-std environments. """ [lib] name = "futures_intrusive" [features] alloc = ["futures-core/alloc"] std = ["alloc", "parking_lot"] default = ["std"] [dependencies] futures-core = { version = "^0.3", default-features = false } lock_api = "0.4.1" parking_lot = { version = "0.12.0", optional = true } [dev-dependencies] futures = { version = "0.3.0", default-features = true, features=["async-await"] } futures-test = { version = "0.3.0", default-features = true } pin-utils = "0.1.0" criterion = "0.3.0" crossbeam = "0.7" # For channel benchmarks lazy_static = "1.4.0" rand = "0.7" async-std = "1.4" # For benchmarks tokio = { version = "1.14", features = ["full"] } # For channel benchmarks signal-hook = "0.1.11" # For cancellation example [[bench]] name = "mpmc_channel" harness = false [[bench]] name = "mutex" harness = false [[bench]] name = "semaphore" harness = false [[example]] name = "cancellation" required-features = ["std"] [[example]] name = "philosophers" required-features = ["std"] futures-intrusive-0.5.0/LICENSE-APACHE000064400000000000000000000251270072674642500153340ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright (c) 2019 Matthias Einwag Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. futures-intrusive-0.5.0/LICENSE-MIT000064400000000000000000000020430072674642500150340ustar 00000000000000Copyright (c) 2019 Matthias Einwag Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. futures-intrusive-0.5.0/benches/bench_mutex.sh000064400000000000000000000003030072674642500176410ustar 00000000000000# This is just a convenience script to filter the important facts out of the criterion report cargo bench --bench mutex | grep -E "cont|time" | grep -v -E "Warming|Analyzing|Benchmarking|Warning"futures-intrusive-0.5.0/benches/mpmc_channel.rs000064400000000000000000000246220072674642500200100ustar 00000000000000use criterion::{ criterion_group, criterion_main, Criterion, ParameterizedBenchmark, }; use futures::{ executor::block_on, future::join_all, join, sink::SinkExt, stream::StreamExt, FutureExt, }; use futures_intrusive::channel::{ shared::channel, shared::unbuffered_channel, LocalChannel, }; use std::time::Duration; /// Elements to transfer per producer const ELEMS_TO_SEND: usize = 1000; /// Buffer size for buffered channels const CHANNEL_BUFFER_SIZE: usize = 20; /// Benchmark for Crossbeam channels fn crossbeam_channel_variable_tx(producers: usize) { let elems_per_producer = ELEMS_TO_SEND / producers; let (tx, rx) = crossbeam::channel::bounded(CHANNEL_BUFFER_SIZE); for _i in 0..producers { let tx = tx.clone(); std::thread::spawn(move || { for _i in 0..elems_per_producer { tx.send(4).unwrap(); } }); } drop(tx); loop { let res = rx.recv(); if res.is_err() { break; } } } /// variable producers, single consumer fn futchan_bounded_variable_tx(producers: usize) { use futures::channel::mpsc::channel; let elems_per_producer = ELEMS_TO_SEND / producers; let (tx, mut rx) = channel(CHANNEL_BUFFER_SIZE); for _i in 0..producers { let mut tx = tx.clone(); std::thread::spawn(move || { block_on(async { for _i in 0..elems_per_producer { tx.send(4).await.unwrap(); } }); }); } drop(tx); block_on(async { loop { let res = rx.next().await; if res.is_none() { break; } } }); } /// variable producers, single consumer fn tokiochan_bounded_variable_tx(producers: usize) { let elems_per_producer = ELEMS_TO_SEND / producers; let (tx, mut rx) = tokio::sync::mpsc::channel(CHANNEL_BUFFER_SIZE); for _i in 0..producers { let tx = tx.clone(); std::thread::spawn(move || { block_on(async { for _i in 0..elems_per_producer { tx.send(4).await.unwrap(); } }); }); } drop(tx); block_on(async { loop { let res = rx.recv().await; if res.is_none() { break; } } }); } macro_rules! intrusive_channel_variable_tx { ($producers: expr, $channel_constructor: expr) => { let elems_per_producer = ELEMS_TO_SEND / $producers; let (tx, rx) = $channel_constructor; for _i in 0..$producers { let tx = tx.clone(); std::thread::spawn(move || { block_on(async { for _i in 0..elems_per_producer { let r = tx.send(4).await; assert!(r.is_ok()); } }); }); } drop(tx); block_on(async { loop { let res = rx.receive().await; if res.is_none() { break; } } }); }; } /// variable producers, single consumer fn intrusivechan_bounded_variable_tx(producers: usize) { intrusive_channel_variable_tx!( producers, channel::(CHANNEL_BUFFER_SIZE) ); } /// variable producers, single consumer fn intrusivechan_unbuffered_variable_tx(producers: usize) { intrusive_channel_variable_tx!(producers, unbuffered_channel::()); } /// variable producers, single consumer fn futchan_bounded_variable_tx_single_thread(producers: usize) { let elems_per_producer = ELEMS_TO_SEND / producers; block_on(async { let (tx, mut rx) = futures::channel::mpsc::channel(CHANNEL_BUFFER_SIZE); let produce_done = join_all((0..producers).into_iter().map(|_| { let mut tx = tx.clone(); async move { for _i in 0..elems_per_producer { tx.send(4).await.unwrap(); } } .boxed() })); drop(tx); let consume_done = async { loop { let res = rx.next().await; if res.is_none() { break; } } }; join!(produce_done, consume_done); }); } /// variable producers, single consumer fn tokiochan_bounded_variable_tx_single_thread(producers: usize) { let elems_per_producer = ELEMS_TO_SEND / producers; block_on(async { let (tx, mut rx) = tokio::sync::mpsc::channel(CHANNEL_BUFFER_SIZE); let produce_done = join_all((0..producers).into_iter().map(|_| { let tx = tx.clone(); async move { for _i in 0..elems_per_producer { tx.send(4).await.unwrap(); } } .boxed() })); drop(tx); let consume_done = async { loop { let res = rx.recv().await; if res.is_none() { break; } } }; join!(produce_done, consume_done); }); } macro_rules! intrusive_channel_variable_tx_single_thread { ($producers: expr, $channel_constructor: expr) => { let elems_per_producer = ELEMS_TO_SEND / $producers; block_on(async { let (tx, rx) = $channel_constructor; let produce_done = join_all((0..$producers).into_iter().map(|_| { let tx = tx.clone(); Box::pin(async move { for _i in 0..elems_per_producer { let r = tx.send(4).await; assert!(r.is_ok()); } }) })); drop(tx); let consume_done = async { loop { let res = rx.receive().await; if res.is_none() { break; } } }; join!(produce_done, consume_done); }); }; } /// variable producers, single consumer fn intrusivechan_bounded_variable_tx_single_thread(producers: usize) { intrusive_channel_variable_tx_single_thread!( producers, channel::(CHANNEL_BUFFER_SIZE) ); } /// variable producers, single consumer fn intrusivechan_unbuffered_variable_tx_single_thread(producers: usize) { intrusive_channel_variable_tx_single_thread!( producers, unbuffered_channel::() ); } /// variable producers, single consumer fn intrusive_local_chan_bounded_variable_tx_single_thread(producers: usize) { let elems_per_producer = ELEMS_TO_SEND / producers; block_on(async { let rx = LocalChannel::::new(); let produce_done = join_all((0..producers).into_iter().map(|_| { Box::pin(async { for _i in 0..elems_per_producer { let r = rx.send(4).await; assert!(r.is_ok()); } }) })); let consume_done = async { let mut count = 0; let needed = elems_per_producer * producers; loop { let _ = rx.receive().await.unwrap(); // The channel doesn't automatically get closed when producers are // gone since producer and consumer are the same object type. // Therefore we need to count receives. count += 1; if count == needed { break; } } }; join!(produce_done, consume_done); }); } fn criterion_benchmark(c: &mut Criterion) { // Producer and consumer are running on the same thread c.bench( "Channels (Single Threaded)", ParameterizedBenchmark::new( "intrusive local channel with producers", |b, &&producers| { b.iter(|| { intrusive_local_chan_bounded_variable_tx_single_thread( producers, ) }) }, &[5, 20, 100], ) .with_function("intrusive channel with producers", |b, &&producers| { b.iter(|| { intrusivechan_bounded_variable_tx_single_thread(producers) }) }) .with_function( "intrusive unbuffered channel with producers", |b, &&producers| { b.iter(|| { intrusivechan_unbuffered_variable_tx_single_thread( producers, ) }) }, ) .with_function( "futures::channel::mpsc with producers", |b, &&producers| { b.iter(|| futchan_bounded_variable_tx_single_thread(producers)) }, ) .with_function( "tokio::sync::mpsc with producers", |b, &&producers| { b.iter(|| { tokiochan_bounded_variable_tx_single_thread(producers) }) }, ), ); // Producer and consume run on a different thread c.bench( "Channels (Thread per producer)", ParameterizedBenchmark::new( "crossbeam channel with producers", |b, &&producers| { b.iter(|| crossbeam_channel_variable_tx(producers)) }, &[5, 20, 100], ) .with_function("intrusive channel with producers", |b, &&producers| { b.iter(|| intrusivechan_bounded_variable_tx(producers)) }) .with_function( "intrusive unbuffered channel with producers", |b, &&producers| { b.iter(|| intrusivechan_unbuffered_variable_tx(producers)) }, ) .with_function( "futures::channel::mpsc with producers", |b, &&producers| b.iter(|| futchan_bounded_variable_tx(producers)), ) .with_function( "tokio::sync::mpsc with producers", |b, &&producers| { b.iter(|| tokiochan_bounded_variable_tx(producers)) }, ), ); } criterion_group! { name = benches; config = Criterion::default().measurement_time(Duration::from_secs(10)).nresamples(50); targets = criterion_benchmark } criterion_main!(benches); futures-intrusive-0.5.0/benches/mutex.rs000064400000000000000000000150640072674642500165260ustar 00000000000000//! Benchmarks for asynchronous Mutex implementations use async_std::{sync::Mutex as AsyncStdMutex, task}; use criterion::{criterion_group, criterion_main, Benchmark, Criterion}; use futures_intrusive::sync::{Mutex as IntrusiveMutex, Semaphore}; use tokio::sync::Mutex as TokioMutex; use std::future::Future; use std::sync::Arc; use std::time::Duration; mod utils; use utils::Yield; const ITERATIONS: usize = 300; const CONTENTION_THREADS: usize = 10; /// With a chance of 25% chance the operation inside the async Mutex blocks, /// which is emulated by yielding `NR_YIELD` times back to the executor. const YIELD_CHANCE: usize = 25; const NR_YIELDS: usize = 10; /// Extension trait to add support for `block_on` for runtimes which not /// natively support it as member function trait Block { fn block_on>(&self, f: F); } struct FakeAsyncStdRuntime; impl Block for FakeAsyncStdRuntime { fn block_on>(&self, f: F) { task::block_on(f); } } macro_rules! run_with_mutex { ( $mutex_constructor: expr, $nr_tasks: expr, $nr_iterations: expr, $spawn_fn: expr ) => { let m = Arc::new($mutex_constructor); let mut tasks = Vec::new(); let sem = Arc::new(Semaphore::new(false, 0)); for _ in 0..$nr_tasks { let m = m.clone(); let s = sem.clone(); tasks.push($spawn_fn(async move { for count in 0..$nr_iterations { let _ = m.lock().await; // Asynchronous mutexes are intended to guard over // operations which are potentially task-blocking and take // a certain amount of time to complete. In order to simulate // the behavior we yield a certain amount of times to back // to the executor. This is more consistent than e.g. using // a timer, and the overhead of yielding is the same for the // various Mutex implementations. if YIELD_CHANCE != 0 && (count % (100 / YIELD_CHANCE) == 0) { Yield::new(NR_YIELDS).await; } } s.release(1); })); } sem.acquire($nr_tasks).await; }; } macro_rules! contention { ( $b: ident, $rt_setup: expr, $spawn_fn: expr, $mutex_constructor: expr, $nr_iterations: expr ) => { #[allow(unused_mut)] // mut is only required for some runtimes let mut rt = $rt_setup; $b.iter(|| { rt.block_on(async { run_with_mutex!( $mutex_constructor, CONTENTION_THREADS, $nr_iterations, $spawn_fn ); }) }); }; } macro_rules! no_contention { ( $b: ident, $rt_setup: expr, $spawn_fn: expr, $mutex_constructor: expr, $nr_iterations: expr ) => { #[allow(unused_mut)] // mut is only required for some runtimes let mut rt = $rt_setup; $b.iter(|| { rt.block_on(async { run_with_mutex!( $mutex_constructor, 1, $nr_iterations, $spawn_fn ); }) }); }; } macro_rules! benchmarks { ( $c: ident, $rt_name: literal, $rt_setup: expr, $spawn_fn: expr, $mutex_name: literal, $mutex_constructor: expr ) => { $c.bench( concat!($rt_name, "/", $mutex_name), Benchmark::new("contention", |b| { contention!( b, $rt_setup, $spawn_fn, $mutex_constructor, ITERATIONS ); }) .with_function("no_contention", |b| { no_contention!( b, $rt_setup, $spawn_fn, $mutex_constructor, ITERATIONS ); }), ); }; } fn tokio_rt_intrusive_fair_benchmarks(c: &mut Criterion) { benchmarks!( c, "tokio_rt", tokio::runtime::Runtime::new().unwrap(), tokio::spawn, "futures_intrusive(fair=true)", IntrusiveMutex::new((), true) ); } fn tokio_rt_intrusive_unfair_benchmarks(c: &mut Criterion) { benchmarks!( c, "tokio_rt", tokio::runtime::Runtime::new().unwrap(), tokio::spawn, "futures_intrusive(fair=false)", IntrusiveMutex::new((), false) ); } fn tokio_rt_async_std_benchmarks(c: &mut Criterion) { benchmarks!( c, "tokio_rt", tokio::runtime::Runtime::new().unwrap(), tokio::spawn, "async_std", AsyncStdMutex::new(()) ); } fn tokio_rt_tokio_benchmarks(c: &mut Criterion) { benchmarks!( c, "tokio_rt", tokio::runtime::Runtime::new().unwrap(), tokio::spawn, "tokio", TokioMutex::new(()) ); } fn async_std_intrusive_fair_benchmarks(c: &mut Criterion) { benchmarks!( c, "async_std_rt", FakeAsyncStdRuntime {}, task::spawn, "futures_intrusive(fair=true)", IntrusiveMutex::new((), true) ); } fn async_std_intrusive_unfair_benchmarks(c: &mut Criterion) { benchmarks!( c, "async_std_rt", FakeAsyncStdRuntime {}, task::spawn, "futures_intrusive(fair=false)", IntrusiveMutex::new((), false) ); } fn async_std_async_std_benchmarks(c: &mut Criterion) { benchmarks!( c, "async_std_rt", FakeAsyncStdRuntime {}, task::spawn, "async_std", AsyncStdMutex::new(()) ); } fn async_std_tokio_benchmarks(c: &mut Criterion) { benchmarks!( c, "async_std_rt", FakeAsyncStdRuntime {}, task::spawn, "tokio", TokioMutex::new(()) ); } criterion_group! { name = benches; config = Criterion::default().measurement_time(Duration::from_secs(10)); targets = // tokio tokio_rt_intrusive_fair_benchmarks, tokio_rt_intrusive_unfair_benchmarks, tokio_rt_async_std_benchmarks, tokio_rt_tokio_benchmarks, // async-std async_std_intrusive_fair_benchmarks, async_std_intrusive_unfair_benchmarks, async_std_async_std_benchmarks, async_std_tokio_benchmarks } criterion_main!(benches); futures-intrusive-0.5.0/benches/semaphore.rs000064400000000000000000000133220072674642500173420ustar 00000000000000//! Benchmarks for asynchronous Semaphore implementations use criterion::{criterion_group, criterion_main, Benchmark, Criterion}; use futures_intrusive::sync::{ Semaphore as IntrusiveSemaphore, SemaphoreReleaser as IntrusiveSemaphoreReleaser, }; use tokio::sync::{ Semaphore as TokioSemaphore, SemaphorePermit as TokioSemaphorePermit, }; use std::future::Future; use std::sync::Arc; use std::time::Duration; mod utils; use utils::Yield; /// How often each task should acquire the semaphore const NR_ACQUIRES: usize = 50; /// How many tasks are used const TASKS: usize = 200; /// The amount of available permits when we are testing strong contention const CONTENTION_PERMITS: usize = 100; /// The amount of available permits when testing light contention const NORMAL_PERMITS: usize = 180; /// The amount of available permits when testing no contention const UNCONTENDED_PERMITS: usize = TASKS; /// The number of yields we perform after the Semaphore was acquired const NR_YIELDS: usize = 4; /// Extension trait to add support for `block_on` for runtimes which not /// natively support it as member function trait Block { fn block_on>(&self, f: F); } fn create_intrusive_fair_semaphore(permits: usize) -> IntrusiveSemaphore { IntrusiveSemaphore::new(true, permits) } fn create_intrusive_unfair_semaphore(permits: usize) -> IntrusiveSemaphore { IntrusiveSemaphore::new(false, permits) } fn create_tokio_semaphore(permits: usize) -> TokioSemaphore { TokioSemaphore::new(permits) } async fn acquire_intrusive_semaphore( sem: &IntrusiveSemaphore, ) -> IntrusiveSemaphoreReleaser<'_> { sem.acquire(1).await } async fn acquire_tokio_semaphore( sem: &TokioSemaphore, ) -> TokioSemaphorePermit<'_> { sem.acquire().await.unwrap() } macro_rules! run_with_semaphore { ( $nr_tasks: expr, $nr_iterations: expr, $nr_permits: expr, $spawn_fn: expr, $create_semaphore_fn: ident, $acquire_fn: ident, ) => { let semaphore = Arc::new($create_semaphore_fn($nr_permits)); let mut tasks = Vec::new(); let sem = Arc::new(IntrusiveSemaphore::new(false, 0)); for _ in 0..$nr_tasks { let semaphore = semaphore.clone(); let s = sem.clone(); tasks.push($spawn_fn(async move { for _count in 0..$nr_iterations { let _releaser = $acquire_fn(&*semaphore).await; Yield::new(NR_YIELDS).await; } s.release(1); })); } sem.acquire($nr_tasks).await; }; } macro_rules! bench { ( $b: ident, $rt_setup: expr, $spawn_fn: expr, $nr_iterations: expr, $nr_permits: expr, $create_semaphore_fn: ident, $acquire_fn: ident, ) => { #[allow(unused_mut)] // mut is only required for some runtimes let mut rt = $rt_setup; $b.iter(|| { rt.block_on(async { run_with_semaphore!( TASKS, $nr_iterations, $nr_permits, $spawn_fn, $create_semaphore_fn, $acquire_fn, ); }) }); }; } macro_rules! benchmarks { ( $c: ident, $rt_name: literal, $rt_setup: expr, $spawn_fn: expr, $semaphore_name: literal, $create_semaphore_fn: ident, $acquire_fn: ident, ) => { $c.bench( concat!($rt_name, "/", $semaphore_name), Benchmark::new("heavy contention", |b| { bench!( b, $rt_setup, $spawn_fn, NR_ACQUIRES, CONTENTION_PERMITS, $create_semaphore_fn, $acquire_fn, ); }) .with_function("normal contention", |b| { bench!( b, $rt_setup, $spawn_fn, NR_ACQUIRES, NORMAL_PERMITS, $create_semaphore_fn, $acquire_fn, ); }) .with_function("no contention", |b| { bench!( b, $rt_setup, $spawn_fn, NR_ACQUIRES, UNCONTENDED_PERMITS, $create_semaphore_fn, $acquire_fn, ); }), ); }; } fn tokio_rt_intrusive_fair_benchmarks(c: &mut Criterion) { benchmarks!( c, "tokio_rt", tokio::runtime::Runtime::new().unwrap(), tokio::spawn, "futures_intrusive(fair=true)", create_intrusive_fair_semaphore, acquire_intrusive_semaphore, ); } fn tokio_rt_intrusive_unfair_benchmarks(c: &mut Criterion) { benchmarks!( c, "tokio_rt", tokio::runtime::Runtime::new().unwrap(), tokio::spawn, "futures_intrusive(fair=false)", create_intrusive_unfair_semaphore, acquire_intrusive_semaphore, ); } fn tokio_rt_tokio_benchmarks(c: &mut Criterion) { benchmarks!( c, "tokio_rt", tokio::runtime::Runtime::new().unwrap(), tokio::spawn, "tokio", create_tokio_semaphore, acquire_tokio_semaphore, ); } criterion_group! { name = benches; config = Criterion::default().measurement_time(Duration::from_secs(10)); targets = tokio_rt_intrusive_fair_benchmarks, tokio_rt_intrusive_unfair_benchmarks, tokio_rt_tokio_benchmarks, } criterion_main!(benches); futures-intrusive-0.5.0/benches/utils/mod.rs000064400000000000000000000011750072674642500173010ustar 00000000000000use std::{ future::Future, pin::Pin, task::{Context, Poll}, }; /// A Future which yields to the executor for a given amount of iterations /// and resolves after this pub struct Yield { iter: usize, } impl Yield { pub fn new(iter: usize) -> Yield { Yield { iter } } } impl Future for Yield { type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { if self.iter == 0 { Poll::Ready(()) } else { self.iter -= 1; cx.waker().wake_by_ref(); Poll::Pending } } } futures-intrusive-0.5.0/examples/cancellation.rs000064400000000000000000000337400072674642500202300ustar 00000000000000//! This example demonstrates the application of structured concurrency and //! gracefully cancellation in an async Rust application. //! An async [`ManualResetEvent`] as provided by `futures-intrusive` is a used //! as the main signalization mechanism for cooperative cancellation. //! //! Usage: cargo run --example cancellation //! After some seconds, press Ctrl+C and observe the results //! //! Structured concurrency is an application model where the lifetime of any //! concurrent operation is strictly contained within the lifetime of it's //! parent operation. //! //! The concept is described in further detail within //! https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/ //! https://trio.discourse.group/t/structured-concurrency-kickoff/55 //! //! The application of structured concurrency principles simplifies concurrent //! program. It allows for an easier reasoning about which concurrent tasks run //! at a given point of time, since no subtask will ever run without it's original //! parent task already having finished. This makes it impossible for the subtask //! to wait on a certain condition that will no longer occur - or to modify the //! state of the program when we no longer expect it. //! //! One challenge for structured concurrency is the graceful cancellation of //! subtasks. Within Rusts `Future`s and `async/await` programming model it is //! generally easy to stop asynchronous subtasks: We can just `drop` their //! associated `Future`s, which will cancel those tasks. However this foceful //! cancellation comes with several downsides: //! - The subtasks can't perform any cleanup work anymore that might be helpful. //! Only code inside their destructors can run if the tasks are cancelled. //! - The subtasks can't return any value. //! //! Therefore a cooperative and graceful cancellation is sometimes preferred. In //! this example we implement graceful cancellation in order to allow a sub task //! to return it's calculated values. //! //! Graceful cancellation is implemented in 3 steps: //! 1. Signalling the cancellation: One component signals the sub-tasks that they //! should stop their work as soon as it is convenient for them. The //! cancellation signal can either originate from a parent task, the sub task //! itself, or one of the sibling tasks. In order to distribute cancellation //! signals we utilize an async `ManualResetEvent` as a cancellation token. //! This datastructure allows to signal an arbitrary amount of tasks. //! The signal can be emitted by any component which has access to //! `ManualResetEvent`. //! 2. Detecting the signal inside sub-tasks and shutting down. In order to //! support graceful cancellation, subtasks need to detect the condition that //! they are supposed to shut down. In order to do this we use the futures-rs //! `select!` macro to wait in parallel for either the async calculation on //! the "normal path" to complete or for the cancellation to get signalled. //! Not all subtasks have to explicitly support this. Some of them just need //! to forward the cancellation token to their child tasks. When these finish //! early due to cancellation, then the parent will also finish early. //! Child tasks can return an error result in order to indicate that they have //! returned due the explicit cancellation. E.g. `Err(Cancelled)` could be //! returned to the parent. //! 3. The parent tasks waits for all sub-tasks to shut down, via waiting on //! their wait-handles (which in our case are `Future`s that can be awaited //! via `await` or various `join` functions). //! //! After these steps have completed all sub tasks of a given parent have //! completed and the parent task can also finish. It can thereby return the //! results of the child tasks if required. //! //! The implementation is similar in spirit to cancellation in the Go programming //! language trough the Context parameter (https://blog.golang.org/context). //! The main difference is that a `ManualResetEvent` is used for signalling //! cancellation instead of a `Channel` - and that we can check for the //! cancellation signal on every `await` of a `Future`. Checking for cancellation //! is not constrained to interaction with `Channel` types. //! E.g. we can easily wait on receiving data on a socket while in parallel //! waiting for cancellation. This is not directly possible in Go. //! //! It also similar to the `CancellationToken` mechanism in .NET. There the //! `CancellationToken` also needs to get forwarded as a parameter. //! //! This example demonstrates the mechanisms via a distributed "FizzBuzz" checker. //! The "algorithm" uses a parent tasks which uses 2 child tasks for it's work. //! When the user cancels the program, a graceful shutdown as described should //! be performed. This allows the user to retrieve the results of the algorithm. use futures::{executor::block_on, join, select}; use futures_intrusive::{ channel::LocalUnbufferedChannel, sync::{LocalManualResetEvent, ManualResetEvent}, timer::{StdClock, Timer, TimerService}, }; use lazy_static::lazy_static; use signal_hook; use std::{ sync::{ atomic::{AtomicBool, Ordering}, Arc, }, thread::{sleep, spawn}, time::Duration, }; /// The result of our search for FizzBuzz values #[derive(Debug, Default)] struct SearchResult { highest_fizz: Option, highest_buzz: Option, highest_fizzbuzz: Option, } /// This is our main async function that supports cooperative cancellation. /// The purpose of this function is to check values up to `max` for their /// fizzbuzzness and return the highest values in each category. /// /// The method can be be cancelled by signalling the cancellation token. In this /// case the method will return its latest findings. /// This is in contrast to just cancelling a `Future` - which would not allow us /// to return any results. Cancellation tokens can be passed as `Arc` /// if multiple independent subtasks need to get cancelled, or as a plain reference /// if only subtasks of a single task need to get signalled. For tasks which run /// on a singlethreaded executor `LocalManualResetEvent` provides an even higher /// lower overhead solution which does not require any internal synchronization. async fn fizzbuzz_search( max: usize, cancellation_token: Arc, ) -> SearchResult { // We start two child-tasks: // - One produces values to check // - The other task will check the values and store the results in the // result data structure. // // Both tasks are connected via a channel. Since the tasks are running as // subtasks of the same task in a singlethreaded executor, we can use an // extremely efficient LocalChannel for this. // // In order to make things a bit more interesting we do not utilize the same // cancellation signal for both tasks (which would also be a valid solution). // Instead we implement a sequential shutdown: // - When the main `cancellation_token` is signalled from the outside, // only the producer task will shut down. // - Before the producer task exits, it will signal another cancellation // token. That one will lead the checker task to shut down. let channel = LocalUnbufferedChannel::::new(); let checker_cancellation_token = LocalManualResetEvent::new(false); let producer_future = producer_task( max, &channel, &cancellation_token, &checker_cancellation_token, ); let checker_future = check_task(&channel, &checker_cancellation_token); // Here we wait for both tasks to complete. Waiting for all subtasks to // complete is one important part of structured concurrency. let results = join!(producer_future, checker_future); println!("All subtasks have completed"); // Since we waited for all subtasks to complete we can return the search // result. // If the async subtasks had been forcefully instead of cooperatively // cancelled the results would not have been available. results.1 } /// The producing task produces all values that need to get checked for /// fizzbuzzness. /// The task will run until it either has generated all values to check or /// until the task gets cancelled. async fn producer_task( max: usize, channel: &LocalUnbufferedChannel, main_cancellation_token: &ManualResetEvent, consumer_cancellation_token: &LocalManualResetEvent, ) { for value in 1..max { select! { result = channel.send(value) => { if !result.is_ok() { unreachable!("This can not happen in this example"); } }, _ = main_cancellation_token.wait() => { // The operation was cancelled break; } }; } // No more values to check or we had been cancelled. // In this case we signal the `cancellation_token`, in order to let the // consumer shut down. // We should here have alternatively `.close()`d the channel to signal the // consumer to join. However we want mainly want to demonstrate the // cancellation concept here. println!("Goodbye from the producer. Now signalling the checker"); consumer_cancellation_token.set(); } /// The check task runs until it gets cancelled. That can happen either due /// to a cancellation being signalled, or due to the input channel getting /// closed. In a real application one of those strategies would be good sufficient. /// Since this example focusses on cancellation and structured concurrency, this /// task will **always** get shut down via the cancellation token. /// /// It is important that this tasks runs to completion instead of getting /// forcefully cancelled. Otherwise no results would be available. async fn check_task( channel: &LocalUnbufferedChannel, cancellation_token: &LocalManualResetEvent, ) -> SearchResult { // Initialize the result with `None`s let mut result: SearchResult = Default::default(); loop { select! { value = channel.receive() => { if let Some(value) = value { // Received a value that needs to get checked for fizzbuzzness println!("Checking {} of fizzbuzzness", value); match (value % 3 == 0, value % 5 == 0) { (true, true) => result.highest_fizzbuzz = Some(value), (true, false) => result.highest_fizz = Some(value), (false, true) => result.highest_buzz = Some(value), _ => {}, } } else { unreachable!("this is not allowed in this example"); // Otherwise just doing the following here would be ok: // break; } }, _ = cancellation_token.wait() => { // The operation was cancelled break; } }; // Waits until the timer elapses or the task gets cancelled - whatever // comes first. This slows down our consumer, and introduces another // cancellation point. Since we use an unbuffered channel to accept // values to check from the producer, the producer is slowed down by // the same amount of time. select! { _ = get_timer().delay(Duration::from_millis(1000)) => {}, _ = cancellation_token.wait() => { // The operation was cancelled break; }, } } println!("Goodbye from the checker"); result } fn main() { // Spawn a background thread which advances the timer let timer_join_handle = spawn(move || { timer_thread(); }); // This is the asynchronous ManualResetEvent that will be used as a cancellation // token. When the cancellation is requested, the token will be set. Thereby // all tasks which are waiting for cancellation will get signalled and awoken. let cancellation_token = Arc::new(ManualResetEvent::new(false)); // This sets up a signal listener. When SIGINT (Ctrl+C) is signalled, // the Cancellation Token is set - which will lead the async task to run // to completion. Since setting the cancellation token is not signal safe, // we apply a workaround and set only an atomic variable in the signal handler. // A background thread regularly checks the signal and sets the event once // the signal had been observed. let cloned_token = cancellation_token.clone(); // Clone for the background thread std::thread::spawn(move || { let term = Arc::new(AtomicBool::new(false)); signal_hook::flag::register(signal_hook::SIGINT, Arc::clone(&term)) .unwrap(); while !term.load(Ordering::Relaxed) { std::thread::sleep(Duration::from_millis(100)); } println!("Starting cancellation"); cloned_token.set(); }); // Start our async task. This gets the cancellation token passed as argument let result = block_on(fizzbuzz_search(std::usize::MAX, cancellation_token)); // At this point in time, the task has finished - either due to running to // completion or due to being cancelled. The task can return results in both // situations. println!("Discovered these awesome results: {:?}", result); // Stop the timer thread STOP_TIMER.store(true, Ordering::Relaxed); timer_join_handle.join().unwrap(); } // Some setup for the asynchronously awaitable timer lazy_static! { static ref STD_CLOCK: StdClock = StdClock::new(); static ref TIMER_SERVICE: TimerService = TimerService::new(&*STD_CLOCK); static ref STOP_TIMER: AtomicBool = AtomicBool::new(false); } /// Returns a reference to the global timer fn get_timer() -> &'static dyn Timer { &*TIMER_SERVICE } /// A background thread that drives the async timer service fn timer_thread() { while !STOP_TIMER.load(Ordering::Relaxed) { sleep(Duration::from_millis(25)); TIMER_SERVICE.check_expirations(); } } futures-intrusive-0.5.0/examples/philosophers.rs000064400000000000000000000135620072674642500203130ustar 00000000000000//! The example in this file demonstrates a solution for the //! [Dining Philosophers Problem](https://en.wikipedia.org/wiki/Dining_philosophers_problem), //! which uses async tasks and futures_intrusive primitives in order to //! simulate philosophers. #![recursion_limit = "256"] use futures::{executor::block_on, join, select}; use futures_intrusive::{ sync::LocalMutex, timer::{StdClock, Timer, TimerService}, }; use lazy_static::lazy_static; use pin_utils::pin_mut; use std::sync::atomic::{AtomicBool, Ordering}; use std::thread::{sleep, spawn}; use std::time::Duration; /// We simulate the ownership of a fork through an asynchronously awaitable mutex. /// In order to acquire a fork, the philosopher acquires the Mutex. /// In order to release a fork, the philosopher releases the LockGuard. This /// happens automatically, when the LockGuard goes out of scope. /// Since all philosophers are subtasks of the same top-level `async` task, /// a lightweight non thread-safe `LocalMutex` can be utilized. type Fork = LocalMutex<()>; // Some setup for the asynchronously awaitable timer lazy_static! { static ref STD_CLOCK: StdClock = StdClock::new(); static ref TIMER_SERVICE: TimerService = TimerService::new(&*STD_CLOCK); static ref STOP_TIMER: AtomicBool = AtomicBool::new(false); } /// Returns a reference to the global timer fn get_timer() -> &'static dyn Timer { &*TIMER_SERVICE } /// Returns a random delay duration between `min` and `(min + max_extra)` fn rand_delay(min: Duration, max_extra: Duration) -> Duration { let extra_ms = rand::random::() % (max_extra.as_millis() as u64); min + Duration::from_millis(extra_ms) } /// How often a philosopher should eat const TO_EAT: usize = 5; /// Simulates a single philosopher async fn philosopher_task<'a>( name: &'a str, left_fork: &'a Fork, right_fork: &'a Fork, ) { println!("{} is ready to go", name); let mut eaten: usize = 0; while eaten != TO_EAT { println!("{} is thinking", name); get_timer() .delay(rand_delay( Duration::from_millis(1000), Duration::from_millis(1000), )) .await; { println!("{} is starting to pick up forks", name); // Create futures for acquiring both forks let get_left_fork_future = left_fork.lock(); pin_mut!(get_left_fork_future); let get_right_fork_future = right_fork.lock(); pin_mut!(get_right_fork_future); // This sets up a timer. If the philosopher can't obtain both forks // during that, they put back all acquired forks and start thinking // again. let abort_get_forks_future = get_timer().delay(Duration::from_millis(300)); pin_mut!(abort_get_forks_future); select! { _ = get_left_fork_future => { println!("{} got the left fork and tries to get the right fork", name); select! { _ = get_right_fork_future => { println!("{} got the right fork and starts eating", name); get_timer().delay( rand_delay(Duration::from_millis(1000), Duration::from_millis(200))).await; eaten += 1; println!("{} has finished eating [ate {} times]", name, eaten); }, _ = abort_get_forks_future => { println!("{} could not acquire the right fork", name); }, } }, _ = get_right_fork_future => { println!("{} got the right fork and tries to get the left fork", name); select! { _ = get_left_fork_future => { println!("{} got the left fork and starts eating", name); get_timer().delay( rand_delay(Duration::from_millis(1000), Duration::from_millis(200))).await; eaten += 1; println!("{} has finished eating [ate {} times]", name, eaten); }, _ = abort_get_forks_future => { println!("{} could not acquire the left fork", name); }, } }, _ = abort_get_forks_future => { println!("{} could not acquire any fork", name); }, } } } println!("{} has finished", name); } async fn simulate_philosophers() { // Create the forks for the philosophers let forks: [Fork; 5] = [ Fork::new((), true), Fork::new((), true), Fork::new((), true), Fork::new((), true), Fork::new((), true), ]; // Create a task for each philosopher let p1 = philosopher_task("A", &forks[4], &forks[0]); let p2 = philosopher_task("B", &forks[0], &forks[1]); let p3 = philosopher_task("C", &forks[1], &forks[2]); let p4 = philosopher_task("D", &forks[2], &forks[3]); let p5 = philosopher_task("E", &forks[3], &forks[4]); // Wait until all philosophers have finished eating join!(p1, p2, p3, p4, p5); } fn main() { // Spawn a background thread which advances the timer let join_handle = spawn(move || { timer_thread(); }); // And simulate the philosophers block_on(simulate_philosophers()); // Stop the timer thread STOP_TIMER.store(true, Ordering::Relaxed); join_handle.join().unwrap(); } fn timer_thread() { while !STOP_TIMER.load(Ordering::Relaxed) { sleep(Duration::from_millis(25)); TIMER_SERVICE.check_expirations(); } } futures-intrusive-0.5.0/examples/readme.md000064400000000000000000000002470072674642500170010ustar 00000000000000# Examples This folder contains examples for the usage of this library. Examples can be started in the following fashion: ``` cargo run --example name_of_example ```futures-intrusive-0.5.0/readme.md000064400000000000000000000026110072674642500151600ustar 00000000000000futures-intrusive ================= This crate provides a variety of `Futures`-based and `async/await` compatible types that are based on the idea of intrusive collections: - Channels in a variety of flavors: - Oneshot - Multi-Producer Multi-Consumer (MPMC) - State Broadcast - Synchronization Primitives: - Manual Reset Event - Mutex - Semaphore - A timer Please refer to the [documentation](https://docs.rs/futures-intrusive) for details. In addition to the documentation the examples provide a quick overview on how the primitives can be used. ## Usage Add this to your `Cargo.toml`: ```toml [dependencies] futures-intrusive = "^0.5" ``` In order to use the crate in a `no-std` environment, it needs to be compiled without default features: ```toml [dependencies] futures-intrusive = { version = "^0.5", default-features = false } ``` The crate defines a feature `alloc`, which can be used in order to re-enable `alloc` features. Also defined is `std`, which can be used in order to re-enable `std` features. ## Minimum Rust version The minimum required Rust version is 1.36, due to reliance on stable `Future`, `Context` and `Waker` types. ## License Licensed under either of * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option.futures-intrusive-0.5.0/src/buffer/mod.rs000064400000000000000000000003720072674642500165700ustar 00000000000000//! Buffer types mod real_array; pub use real_array::RealArray; mod ring_buffer; pub use ring_buffer::{ArrayBuf, RingBuf}; #[cfg(feature = "alloc")] pub use ring_buffer::FixedHeapBuf; #[cfg(feature = "alloc")] pub use ring_buffer::GrowingHeapBuf; futures-intrusive-0.5.0/src/buffer/real_array.rs000064400000000000000000000035710072674642500201360ustar 00000000000000/// A marker trait which may only be implemented for native array types, like /// `[T; 2]`. The library incorporates several components that are parameterized /// by array types, but currently Rust provides no safe mechanism to express /// that. /// /// In order to work around the limitations, these methods only accept arrays /// which implement the `RealArray` type. The library provides an implementation /// of `RealArray` for arrays up to length 64, as well as for all powers of 2 /// up to 64k. /// /// In order to let the library accept arrays of bigger sizes, `RealArray` can /// be implemented by users via newtypes. A type as defined in the following /// example can be passed to the library: /// /// ``` /// use futures_intrusive::buffer::RealArray; /// use futures_intrusive::channel::LocalChannel; /// /// struct I32x384Array([i32; 384]); /// unsafe impl RealArray for I32x384Array { /// const LEN: usize = 384; /// } /// /// impl AsMut<[i32]> for I32x384Array { /// fn as_mut(&mut self) -> &mut [i32] { /// &mut self.0 /// } /// } /// /// impl AsRef<[i32]> for I32x384Array { /// fn as_ref(&self) -> &[i32] { /// &self.0 /// } /// } /// /// fn main() { /// let channel = LocalChannel::::new(); /// } /// /// ``` pub unsafe trait RealArray { /// The length of the array const LEN: usize; } macro_rules! real_array { ($($N:expr),+) => { $( unsafe impl RealArray for [T; $N] { const LEN: usize = $N; } )+ } } real_array!( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536 ); futures-intrusive-0.5.0/src/buffer/ring_buffer.rs000064400000000000000000000243470072674642500203110ustar 00000000000000use super::RealArray; use core::marker::PhantomData; use core::mem::MaybeUninit; /// A Ring Buffer of items pub trait RingBuf { /// The type of stored items inside the Ring Buffer type Item; /// Creates a new instance of the Ring Buffer fn new() -> Self; /// Creates a new instance of the Ring Buffer with the given capacity. /// `RingBuf` implementations are allowed to ignore the `capacity` hint and /// utilize their default capacity. fn with_capacity(cap: usize) -> Self; /// The capacity of the buffer fn capacity(&self) -> usize; /// The amount of stored items in the buffer fn len(&self) -> usize; /// Returns true if no item is stored inside the buffer. fn is_empty(&self) -> bool { self.len() == 0 } /// Returns true if there is enough space in the buffer to /// store another item. fn can_push(&self) -> bool; /// Stores the item at the end of the buffer. /// Panics if there is not enough free space. fn push(&mut self, item: Self::Item); /// Returns the oldest item inside the buffer. /// Panics if there is no available item. fn pop(&mut self) -> Self::Item; } /// An array-backed Ring Buffer /// /// `A` is the type of the backing array. The backing array must be a real /// array. In order to verify this it must satisfy the [`RealArray`] constraint. /// In order to create a Ring Buffer backed by an array of 5 integer elements, /// the following code can be utilized: /// /// ``` /// use futures_intrusive::buffer::{ArrayBuf, RingBuf}; /// /// type Buffer5 = ArrayBuf; /// let buffer = Buffer5::new(); /// ``` pub struct ArrayBuf where A: core::convert::AsMut<[T]> + core::convert::AsRef<[T]> + RealArray, { buffer: MaybeUninit, size: usize, recv_idx: usize, send_idx: usize, _phantom: PhantomData, } impl core::fmt::Debug for ArrayBuf where A: core::convert::AsMut<[T]> + core::convert::AsRef<[T]> + RealArray, { fn fmt( &self, f: &mut core::fmt::Formatter, ) -> Result<(), core::fmt::Error> { f.debug_struct("ArrayBuf") .field("size", &self.size) .field("cap", &self.capacity()) .finish() } } impl ArrayBuf where A: core::convert::AsMut<[T]> + core::convert::AsRef<[T]> + RealArray, { fn next_idx(&mut self, last_idx: usize) -> usize { if last_idx + 1 == self.capacity() { return 0; } last_idx + 1 } } impl RingBuf for ArrayBuf where A: core::convert::AsMut<[T]> + core::convert::AsRef<[T]> + RealArray, { type Item = T; fn new() -> Self { ArrayBuf { buffer: MaybeUninit::uninit(), send_idx: 0, recv_idx: 0, size: 0, _phantom: PhantomData, } } fn with_capacity(_cap: usize) -> Self { // The fixed size array backed Ring Buffer doesn't support an adjustable // capacity. Therefore only the default capacity is utilized. Self::new() } #[inline] fn capacity(&self) -> usize { A::LEN } #[inline] fn len(&self) -> usize { self.size } #[inline] fn can_push(&self) -> bool { self.len() != self.capacity() } #[inline] fn push(&mut self, value: Self::Item) { assert!(self.can_push()); // Safety: We asserted that there is available space for an item. // Therefore the memory address is valid. unsafe { let arr_ptr = self.buffer.as_mut_ptr() as *mut T; arr_ptr.add(self.send_idx).write(value); } self.send_idx = self.next_idx(self.send_idx); self.size += 1; } #[inline] fn pop(&mut self) -> Self::Item { assert!(self.size > 0); // Safety: We asserted that there is an element available, so it must // have been written before. let val = unsafe { let arr_ptr = self.buffer.as_mut_ptr() as *mut T; arr_ptr.add(self.recv_idx).read() }; self.recv_idx = self.next_idx(self.recv_idx); self.size -= 1; val } } impl Drop for ArrayBuf where A: core::convert::AsMut<[T]> + core::convert::AsRef<[T]> + RealArray, { fn drop(&mut self) { // Drop all elements which are still stored inside the buffer while self.size > 0 { // Safety: This drops only as many elements as have been written via // ptr::write and haven't read via ptr::read before unsafe { let arr_ptr = self.buffer.as_mut_ptr() as *mut T; arr_ptr.add(self.recv_idx).drop_in_place(); } self.recv_idx = self.next_idx(self.recv_idx); self.size -= 1; } } } #[cfg(feature = "alloc")] mod if_alloc { use super::*; use alloc::collections::VecDeque; /// A Ring Buffer which stores all items on the heap. /// /// The `FixedHeapBuf` will allocate its capacity ahead of time. This is good /// fit when you have a constant latency between two components. pub struct FixedHeapBuf { buffer: VecDeque, /// The capacity is stored extra, since VecDeque can allocate space for /// more elements than specified. cap: usize, } impl core::fmt::Debug for FixedHeapBuf { fn fmt( &self, f: &mut core::fmt::Formatter, ) -> Result<(), core::fmt::Error> { f.debug_struct("FixedHeapBuf") .field("size", &self.buffer.len()) .field("cap", &self.cap) .finish() } } impl RingBuf for FixedHeapBuf { type Item = T; fn new() -> Self { FixedHeapBuf { buffer: VecDeque::new(), cap: 0, } } fn with_capacity(cap: usize) -> Self { FixedHeapBuf { buffer: VecDeque::with_capacity(cap), cap, } } #[inline] fn capacity(&self) -> usize { self.cap } #[inline] fn len(&self) -> usize { self.buffer.len() } #[inline] fn can_push(&self) -> bool { self.buffer.len() != self.cap } #[inline] fn push(&mut self, value: Self::Item) { assert!(self.can_push()); self.buffer.push_back(value); } #[inline] fn pop(&mut self) -> Self::Item { assert!(self.buffer.len() > 0); self.buffer.pop_front().unwrap() } } /// A Ring Buffer which stores all items on the heap but grows dynamically. /// /// A `GrowingHeapBuf` does not allocate the capacity ahead of time, as /// opposed to the `FixedHeapBuf`. This makes it a good fit when you have /// unpredictable latency between two components, when you want to /// amortize your allocation costs or when you are using an external /// back-pressure mechanism. pub struct GrowingHeapBuf { buffer: VecDeque, /// The maximum number of elements in the buffer. limit: usize, } impl core::fmt::Debug for GrowingHeapBuf { fn fmt( &self, f: &mut core::fmt::Formatter, ) -> Result<(), core::fmt::Error> { f.debug_struct("GrowingHeapBuf") .field("size", &self.buffer.len()) .field("limit", &self.limit) .finish() } } impl RingBuf for GrowingHeapBuf { type Item = T; fn new() -> Self { GrowingHeapBuf { buffer: VecDeque::new(), limit: 0, } } fn with_capacity(limit: usize) -> Self { GrowingHeapBuf { buffer: VecDeque::new(), limit, } } #[inline] fn capacity(&self) -> usize { self.limit } #[inline] fn len(&self) -> usize { self.buffer.len() } #[inline] fn can_push(&self) -> bool { self.buffer.len() != self.limit } #[inline] fn push(&mut self, value: Self::Item) { debug_assert!(self.can_push()); self.buffer.push_back(value); } #[inline] fn pop(&mut self) -> Self::Item { debug_assert!(self.buffer.len() > 0); self.buffer.pop_front().unwrap() } } } #[cfg(feature = "alloc")] pub use if_alloc::*; #[cfg(test)] #[cfg(feature = "alloc")] mod tests { use super::*; use crate::buffer::ring_buffer::if_alloc::FixedHeapBuf; fn test_ring_buf>(mut buf: Buf) { assert_eq!(5, buf.capacity()); assert_eq!(0, buf.len()); assert_eq!(true, buf.is_empty()); assert_eq!(true, buf.can_push()); buf.push(1); buf.push(2); buf.push(3); assert_eq!(5, buf.capacity()); assert_eq!(3, buf.len()); assert_eq!(false, buf.is_empty()); assert_eq!(true, buf.can_push()); assert_eq!(1, buf.pop()); assert_eq!(2, buf.pop()); assert_eq!(1, buf.len()); assert_eq!(false, buf.is_empty()); assert_eq!(3, buf.pop()); assert_eq!(0, buf.len()); assert_eq!(true, buf.is_empty()); for (i, val) in [4, 5, 6, 7, 8].iter().enumerate() { buf.push(*val); assert_eq!(i + 1, buf.len()); assert_eq!(i != 4, buf.can_push()); assert_eq!(false, buf.is_empty()); } for (i, val) in [4, 5, 6, 7, 8].iter().enumerate() { assert_eq!(*val, buf.pop()); assert_eq!(4 - i, buf.len()); assert_eq!(true, buf.can_push()); assert_eq!(i == 4, buf.is_empty()); } } #[test] fn test_array_ring_buf() { let buf = ArrayBuf::::new(); test_ring_buf(buf); } #[test] fn test_heap_ring_buf() { let buf = FixedHeapBuf::::with_capacity(5); test_ring_buf(buf); } #[test] fn test_growing_ring_buf() { let buf = GrowingHeapBuf::::with_capacity(5); test_ring_buf(buf); } } futures-intrusive-0.5.0/src/channel/channel_future.rs000064400000000000000000000450450072674642500211600ustar 00000000000000use super::ChannelSendError; use crate::intrusive_double_linked_list::ListNode; use core::marker::PhantomData; use core::pin::Pin; use futures_core::future::{FusedFuture, Future}; use futures_core::task::{Context, Poll, Waker}; /// Conveys additional information regarding the status of a channel /// following a `close` operation. #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] pub enum CloseStatus { /// The channel has just been closed by the operation. NewlyClosed, /// The channel was already closed prior to the operation. AlreadyClosed, } impl CloseStatus { /// Returns whether the value is the `NewlyClosed` variant. pub fn is_newly_closed(self) -> bool { match self { Self::NewlyClosed => true, _ => false, } } /// Returns whether the value is the `AlreadyClosed` variant. pub fn is_already_closed(self) -> bool { match self { Self::AlreadyClosed => true, _ => false, } } } /// Tracks how the future had interacted with the channel #[derive(PartialEq, Debug)] pub enum RecvPollState { /// The task is not registered at the wait queue at the channel Unregistered, /// The task was added to the wait queue at the channel. Registered, /// The task was notified that a value is available or can be sent, /// but hasn't interacted with the channel since then Notified, } /// Tracks the channel futures waiting state. /// Access to this struct is synchronized through the channel. #[derive(Debug)] pub struct RecvWaitQueueEntry { /// The task handle of the waiting task pub task: Option, /// Current polling state pub state: RecvPollState, } impl RecvWaitQueueEntry { /// Creates a new RecvWaitQueueEntry pub fn new() -> RecvWaitQueueEntry { RecvWaitQueueEntry { task: None, state: RecvPollState::Unregistered, } } } /// Tracks how the future had interacted with the channel #[derive(PartialEq, Debug)] pub enum SendPollState { /// The task is not registered at the wait queue at the channel Unregistered, /// The task was added to the wait queue at the channel. Registered, /// The value has been transmitted to the other task SendComplete, } /// Tracks the channel futures waiting state. /// Access to this struct is synchronized through the channel. pub struct SendWaitQueueEntry { /// The task handle of the waiting task pub task: Option, /// Current polling state pub state: SendPollState, /// The value to send pub value: Option, } impl core::fmt::Debug for SendWaitQueueEntry { fn fmt( &self, fmt: &mut core::fmt::Formatter<'_>, ) -> core::result::Result<(), core::fmt::Error> { fmt.debug_struct("SendWaitQueueEntry") .field("task", &self.task) .field("state", &self.state) .finish() } } impl SendWaitQueueEntry { /// Creates a new SendWaitQueueEntry pub fn new(value: T) -> SendWaitQueueEntry { SendWaitQueueEntry { task: None, state: SendPollState::Unregistered, value: Some(value), } } } /// Adapter trait that allows Futures to generically interact with Channel /// implementations via dynamic dispatch. pub trait ChannelSendAccess { unsafe fn send_or_register( &self, wait_node: &mut ListNode>, cx: &mut Context<'_>, ) -> (Poll<()>, Option); fn remove_send_waiter( &self, wait_node: &mut ListNode>, ); } /// Adapter trait that allows Futures to generically interact with Channel /// implementations via dynamic dispatch. pub trait ChannelReceiveAccess { unsafe fn receive_or_register( &self, wait_node: &mut ListNode, cx: &mut Context<'_>, ) -> Poll>; fn remove_receive_waiter( &self, wait_node: &mut ListNode, ); } /// A Future that is returned by the `receive` function on a channel. /// The future gets resolved with `Some(value)` when a value could be /// received from the channel. /// If the channels gets closed and no items are still enqueued inside the /// channel, the future will resolve to `None`. #[must_use = "futures do nothing unless polled"] pub struct ChannelReceiveFuture<'a, MutexType, T> { /// The channel that is associated with this ChannelReceiveFuture pub(crate) channel: Option<&'a dyn ChannelReceiveAccess>, /// Node for waiting on the channel pub(crate) wait_node: ListNode, /// Marker for mutex type pub(crate) _phantom: PhantomData, } // Safety: Channel futures can be sent between threads as long as the underlying // channel is thread-safe (Sync), which allows to poll/register/unregister from // a different thread. unsafe impl<'a, MutexType: Sync, T: Send> Send for ChannelReceiveFuture<'a, MutexType, T> { } impl<'a, MutexType, T> core::fmt::Debug for ChannelReceiveFuture<'a, MutexType, T> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("ChannelReceiveFuture").finish() } } impl<'a, MutexType, T> Future for ChannelReceiveFuture<'a, MutexType, T> { type Output = Option; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { // It might be possible to use Pin::map_unchecked here instead of the two unsafe APIs. // However this didn't seem to work for some borrow checker reasons // Safety: The next operations are safe, because Pin promises us that // the address of the wait queue entry inside ChannelReceiveFuture is stable, // and we don't move any fields inside the future until it gets dropped. let mut_self: &mut ChannelReceiveFuture = unsafe { Pin::get_unchecked_mut(self) }; let channel = mut_self .channel .expect("polled ChannelReceiveFuture after completion"); let poll_res = unsafe { channel.receive_or_register(&mut mut_self.wait_node, cx) }; if poll_res.is_ready() { // A value was available mut_self.channel = None; } poll_res } } impl<'a, MutexType, T> FusedFuture for ChannelReceiveFuture<'a, MutexType, T> { fn is_terminated(&self) -> bool { self.channel.is_none() } } impl<'a, MutexType, T> Drop for ChannelReceiveFuture<'a, MutexType, T> { fn drop(&mut self) { // If this ChannelReceiveFuture has been polled and it was added to the // wait queue at the channel, it must be removed before dropping. // Otherwise the channel would access invalid memory. if let Some(channel) = self.channel { channel.remove_receive_waiter(&mut self.wait_node); } } } /// A Future that is returned by the `send` function on a channel. /// The future gets resolved with `None` when a value could be /// written to the channel. /// If the channel gets closed the send operation will fail, and the /// Future will resolve to `ChannelSendError(T)` and return the item to send. #[must_use = "futures do nothing unless polled"] pub struct ChannelSendFuture<'a, MutexType, T> { /// The Channel that is associated with this ChannelSendFuture pub(crate) channel: Option<&'a dyn ChannelSendAccess>, /// Node for waiting on the channel pub(crate) wait_node: ListNode>, /// Marker for mutex type pub(crate) _phantom: PhantomData, } // Safety: Channel futures can be sent between threads as long as the underlying // channel is thread-safe (Sync), which allows to poll/register/unregister from // a different thread. unsafe impl<'a, MutexType: Sync, T: Send> Send for ChannelSendFuture<'a, MutexType, T> { } impl<'a, MutexType, T> core::fmt::Debug for ChannelSendFuture<'a, MutexType, T> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("ChannelSendFuture").finish() } } impl<'a, MutexType, T> ChannelSendFuture<'a, MutexType, T> { /// Tries to cancel the ongoing send operation pub fn cancel(&mut self) -> Option { let channel = self.channel.take(); match channel { None => None, Some(channel) => { channel.remove_send_waiter(&mut self.wait_node); self.wait_node.value.take() } } } } impl<'a, MutexType, T> Future for ChannelSendFuture<'a, MutexType, T> { type Output = Result<(), ChannelSendError>; fn poll( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll>> { // It might be possible to use Pin::map_unchecked here instead of the two unsafe APIs. // However this didn't seem to work for some borrow checker reasons // Safety: The next operations are safe, because Pin promises us that // the address of the wait queue entry inside ChannelSendFuture is stable, // and we don't move any fields inside the future until it gets dropped. let mut_self: &mut ChannelSendFuture = unsafe { Pin::get_unchecked_mut(self) }; let channel = mut_self .channel .expect("polled ChannelSendFuture after completion"); let send_res = unsafe { channel.send_or_register(&mut mut_self.wait_node, cx) }; match send_res.0 { Poll::Ready(()) => { // Value has been transmitted or channel was closed mut_self.channel = None; match send_res.1 { Some(v) => { // Channel must have been closed Poll::Ready(Err(ChannelSendError(v))) } None => Poll::Ready(Ok(())), } } Poll::Pending => Poll::Pending, } } } impl<'a, MutexType, T> FusedFuture for ChannelSendFuture<'a, MutexType, T> { fn is_terminated(&self) -> bool { self.channel.is_none() } } impl<'a, MutexType, T> Drop for ChannelSendFuture<'a, MutexType, T> { fn drop(&mut self) { // If this ChannelSendFuture has been polled and it was added to the // wait queue at the channel, it must be removed before dropping. // Otherwise the channel would access invalid memory. if let Some(channel) = self.channel { channel.remove_send_waiter(&mut self.wait_node); } } } #[cfg(feature = "alloc")] mod if_alloc { use super::*; pub mod shared { use super::*; /// A Future that is returned by the `receive` function on a channel. /// The future gets resolved with `Some(value)` when a value could be /// received from the channel. /// If the channels gets closed and no items are still enqueued inside the /// channel, the future will resolve to `None`. #[must_use = "futures do nothing unless polled"] pub struct ChannelReceiveFuture { /// The Channel that is associated with this ChannelReceiveFuture pub(crate) channel: Option>>, /// Node for waiting on the channel pub(crate) wait_node: ListNode, /// Marker for mutex type pub(crate) _phantom: PhantomData, } // Safety: Channel futures can be sent between threads as long as the underlying // channel is thread-safe (Sync), which allows to poll/register/unregister from // a different thread. unsafe impl Send for ChannelReceiveFuture { } impl core::fmt::Debug for ChannelReceiveFuture { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("ChannelReceiveFuture").finish() } } impl Future for ChannelReceiveFuture { type Output = Option; fn poll( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { // It might be possible to use Pin::map_unchecked here instead of the two unsafe APIs. // However this didn't seem to work for some borrow checker reasons // Safety: The next operations are safe, because Pin promises us that // the address of the wait queue entry inside ChannelReceiveFuture is stable, // and we don't move any fields inside the future until it gets dropped. let mut_self: &mut ChannelReceiveFuture = unsafe { Pin::get_unchecked_mut(self) }; let channel = mut_self .channel .take() .expect("polled ChannelReceiveFuture after completion"); let poll_res = unsafe { channel.receive_or_register(&mut mut_self.wait_node, cx) }; if poll_res.is_ready() { // A value was available mut_self.channel = None; } else { mut_self.channel = Some(channel) } poll_res } } impl FusedFuture for ChannelReceiveFuture { fn is_terminated(&self) -> bool { self.channel.is_none() } } impl Drop for ChannelReceiveFuture { fn drop(&mut self) { // If this ChannelReceiveFuture has been polled and it was added to the // wait queue at the channel, it must be removed before dropping. // Otherwise the channel would access invalid memory. if let Some(channel) = &self.channel { channel.remove_receive_waiter(&mut self.wait_node); } } } /// A Future that is returned by the `send` function on a channel. /// The future gets resolved with `None` when a value could be /// written to the channel. /// If the channel gets closed the send operation will fail, and the /// Future will resolve to `ChannelSendError(T)` and return the item /// to send. #[must_use = "futures do nothing unless polled"] pub struct ChannelSendFuture { /// The LocalChannel that is associated with this ChannelSendFuture pub(crate) channel: Option>>, /// Node for waiting on the channel pub(crate) wait_node: ListNode>, /// Marker for mutex type pub(crate) _phantom: PhantomData, } impl ChannelSendFuture { /// Tries to cancel the ongoing send operation pub fn cancel(&mut self) -> Option { let channel = self.channel.take(); match channel { None => None, Some(channel) => { channel.remove_send_waiter(&mut self.wait_node); self.wait_node.value.take() } } } } // Safety: Channel futures can be sent between threads as long as the underlying // channel is thread-safe (Sync), which allows to poll/register/unregister from // a different thread. unsafe impl Send for ChannelSendFuture {} impl core::fmt::Debug for ChannelSendFuture { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("ChannelSendFuture").finish() } } impl Future for ChannelSendFuture { type Output = Result<(), ChannelSendError>; fn poll( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll>> { // It might be possible to use Pin::map_unchecked here instead of the two unsafe APIs. // However this didn't seem to work for some borrow checker reasons // Safety: The next operations are safe, because Pin promises us that // the address of the wait queue entry inside ChannelSendFuture is stable, // and we don't move any fields inside the future until it gets dropped. let mut_self: &mut ChannelSendFuture = unsafe { Pin::get_unchecked_mut(self) }; let channel = mut_self .channel .take() .expect("polled ChannelSendFuture after completion"); let send_res = unsafe { channel.send_or_register(&mut mut_self.wait_node, cx) }; match send_res.0 { Poll::Ready(()) => { // Value has been transmitted or channel was closed match send_res.1 { Some(v) => { // Channel must have been closed Poll::Ready(Err(ChannelSendError(v))) } None => Poll::Ready(Ok(())), } } Poll::Pending => { mut_self.channel = Some(channel); Poll::Pending } } } } impl FusedFuture for ChannelSendFuture { fn is_terminated(&self) -> bool { self.channel.is_none() } } impl Drop for ChannelSendFuture { fn drop(&mut self) { // If this ChannelSendFuture has been polled and it was added to the // wait queue at the channel, it must be removed before dropping. // Otherwise the channel would access invalid memory. if let Some(channel) = &self.channel { channel.remove_send_waiter(&mut self.wait_node); } } } } } #[cfg(feature = "alloc")] pub use self::if_alloc::*; futures-intrusive-0.5.0/src/channel/error.rs000064400000000000000000000036400072674642500173020ustar 00000000000000/// The error which is returned when sending a value into a channel fails. /// /// The `send` operation can only fail if the channel has been closed, which /// would prevent the other actors to ever retrieve the value. /// /// The error recovers the value that has been sent. #[derive(PartialEq, Debug)] pub struct ChannelSendError(pub T); /// The error which is returned when trying to receive from a channel /// without waiting fails. #[derive(PartialEq, Debug, Copy, Clone)] pub enum TryReceiveError { /// The channel is empty. No value is available for reception. Empty, /// The channel had been closed and no more value is available for reception. Closed, } impl TryReceiveError { /// Returns whether the error is the `Empty` variant. pub fn is_empty(self) -> bool { match self { Self::Empty => true, _ => false, } } /// Returns whether the error is the `Closed` variant. pub fn is_closed(self) -> bool { match self { Self::Closed => true, _ => false, } } } /// The error which is returned when trying to send on a channel /// without waiting fails. #[derive(PartialEq, Debug)] pub enum TrySendError { /// The channel is full. Full(T), /// The channel was closed. Closed(T), } impl TrySendError { /// Converts the error into its inner value. pub fn into_inner(self) -> T { match self { Self::Closed(inner) => inner, Self::Full(inner) => inner, } } /// Returns whether the error is the `WouldBlock` variant. pub fn is_full(&self) -> bool { match self { Self::Full(_) => true, _ => false, } } /// Returns whether the error is the `Closed` variant. pub fn is_closed(&self) -> bool { match self { Self::Closed(_) => true, _ => false, } } } futures-intrusive-0.5.0/src/channel/mod.rs000064400000000000000000000034570072674642500167360ustar 00000000000000//! Asynchronous channels. //! //! This module provides various channels that can be used to communicate between //! asynchronous tasks. mod error; pub use self::error::{ChannelSendError, TryReceiveError, TrySendError}; mod channel_future; use channel_future::{ ChannelReceiveAccess, ChannelSendAccess, RecvPollState, RecvWaitQueueEntry, SendPollState, SendWaitQueueEntry, }; pub use channel_future::{ ChannelReceiveFuture, ChannelSendFuture, CloseStatus, }; mod oneshot; pub use self::oneshot::{GenericOneshotChannel, LocalOneshotChannel}; #[cfg(feature = "std")] pub use self::oneshot::OneshotChannel; mod oneshot_broadcast; pub use self::oneshot_broadcast::{ GenericOneshotBroadcastChannel, LocalOneshotBroadcastChannel, }; #[cfg(feature = "std")] pub use self::oneshot_broadcast::OneshotBroadcastChannel; mod state_broadcast; pub use state_broadcast::{ GenericStateBroadcastChannel, LocalStateBroadcastChannel, StateId, StateReceiveFuture, }; #[cfg(feature = "std")] pub use self::state_broadcast::StateBroadcastChannel; mod mpmc; pub use self::mpmc::{ ChannelStream, GenericChannel, LocalChannel, LocalUnbufferedChannel, }; #[cfg(feature = "std")] pub use self::mpmc::{Channel, UnbufferedChannel}; #[cfg(feature = "alloc")] mod if_alloc { /// Channel implementations where Sender and Receiver sides are cloneable /// and owned. /// The Futures produced by channels in this module don't require a lifetime /// parameter. pub mod shared { pub use super::super::channel_future::shared::*; pub use super::super::mpmc::shared::*; pub use super::super::oneshot::shared::*; pub use super::super::oneshot_broadcast::shared::*; pub use super::super::state_broadcast::shared::*; } } #[cfg(feature = "alloc")] pub use self::if_alloc::*; futures-intrusive-0.5.0/src/channel/mpmc.rs000064400000000000000000001226440072674642500171130ustar 00000000000000//! An asynchronously awaitable multi producer multi consumer channel use crate::intrusive_double_linked_list::{LinkedList, ListNode}; use crate::{ buffer::{ArrayBuf, RingBuf}, utils::update_waker_ref, NoopLock, }; use core::{marker::PhantomData, pin::Pin}; use futures_core::{ future::Future, stream::{FusedStream, Stream}, task::{Context, Poll, Waker}, }; use lock_api::{Mutex, RawMutex}; use super::{ ChannelReceiveAccess, ChannelReceiveFuture, ChannelSendAccess, ChannelSendFuture, CloseStatus, RecvPollState, RecvWaitQueueEntry, SendPollState, SendWaitQueueEntry, TryReceiveError, TrySendError, }; fn wake_recv_waiters(waiters: &mut LinkedList) { // Remove all waiters from the waiting list in reverse order and wake them. // We reverse the waiter list, so that the oldest waker (which is // at the end of the list), gets woken first and has the best // chance to grab the channel value. waiters.reverse_drain(|waiter| { if let Some(handle) = waiter.task.take() { handle.wake(); } // The only kind of waiter that could have been stored here are // registered waiters (with a value), since others are removed // whenever their value had been copied into the channel. waiter.state = RecvPollState::Unregistered; }); } fn wake_send_waiters(waiters: &mut LinkedList>) { // Remove all waiters from the waiting list in reverse order and wake them. // We reverse the waiter list, so that the oldest waker (which is // at the end of the list), gets woken first and has the best // chance to send. waiters.reverse_drain(|waiter| { if let Some(handle) = waiter.task.take() { handle.wake(); } waiter.state = SendPollState::Unregistered; }); } /// Wakes up the last waiter and removes it from the wait queue #[must_use] fn return_oldest_receive_waiter( waiters: &mut LinkedList, ) -> Option { let last_waiter = waiters.remove_last(); if let Some(last_waiter) = last_waiter { last_waiter.state = RecvPollState::Notified; last_waiter.task.take() } else { None } } /// Internal state of the channel struct ChannelState where A: RingBuf, { /// Whether the channel had been closed is_closed: bool, /// The value which is stored inside the channel buffer: A, /// Futures which are waiting on receive receive_waiters: LinkedList, /// Futures which are waiting on send send_waiters: LinkedList>, } impl ChannelState where A: RingBuf, { fn new(buffer: A) -> ChannelState { ChannelState:: { is_closed: false, buffer, receive_waiters: LinkedList::new(), send_waiters: LinkedList::new(), } } fn clear(&mut self) { while !self.buffer.is_empty() { self.buffer.pop(); } } fn close(&mut self) -> CloseStatus { if self.is_closed { return CloseStatus::AlreadyClosed; } self.is_closed = true; // Wakeup all send and receive waiters, since they are now guaranteed // to make progress. wake_recv_waiters(&mut self.receive_waiters); wake_send_waiters(&mut self.send_waiters); CloseStatus::NewlyClosed } /// Attempt to send a value without waiting. /// Returns a `Waker` if sending the value lead enabled a task to run. fn try_send(&mut self, value: T) -> Result, TrySendError> { debug_assert!( self.buffer.capacity() > 0, "try_send is not supported for unbuffered channels" ); if self.is_closed { Err(TrySendError::Closed(value)) } else if self.buffer.can_push() { self.buffer.push(value); // Return the oldest receive waiter Ok(return_oldest_receive_waiter(&mut self.receive_waiters)) } else { Err(TrySendError::Full(value)) } } /// Tries to send a value to the channel. /// If the value isn't available yet, the ChannelSendFuture gets added to the /// wait queue at the channel, and will be signalled once ready. /// If the channels is already closed, the value to send is returned. /// This function is only safe as long as the `wait_node`s address is guaranteed /// to be stable until it gets removed from the queue. /// If sending the value succeeded, the `Waker` for a task which can receive /// the value is returned. unsafe fn send_or_register( &mut self, wait_node: &mut ListNode>, cx: &mut Context<'_>, ) -> (Poll<()>, Option, Option) { match wait_node.state { SendPollState::Unregistered => { if self.is_closed { let value = wait_node.value.take(); return (Poll::Ready(()), value, None); } if !self.buffer.can_push() { // If the capacity is exhausted, register a waiter wait_node.task = Some(cx.waker().clone()); wait_node.state = SendPollState::Registered; self.send_waiters.add_front(wait_node); // Return the oldest receive waiter let waker = return_oldest_receive_waiter(&mut self.receive_waiters); return (Poll::Pending, None, waker); } else { // Otherwise copy the value directly into the channel let value = wait_node .value .take() .expect("wait_node must contain value"); self.buffer.push(value); // Return the oldest receive waiter let waker = return_oldest_receive_waiter(&mut self.receive_waiters); (Poll::Ready(()), None, waker) } } SendPollState::Registered => { // Since the channel wakes up all waiters and moves their states // to unregistered there can't be space available in the channel. // However the caller might have passed a different `Waker`. // In this case we need to update it. update_waker_ref(&mut wait_node.task, cx); (Poll::Pending, None, None) } SendPollState::SendComplete => { // The transfer is complete, and the sender has already been removed from the // list of pending senders (Poll::Ready(()), None, None) } } } /// If there is a send waiter, copy it's value into the channel buffer and complete it. /// The method may only be called if there is space in the receive buffer. #[must_use] fn try_copy_value_from_oldest_waiter(&mut self) -> Option { let last_waiter = self.send_waiters.remove_last(); if let Some(last_waiter) = last_waiter { let value = last_waiter .value .take() .expect("wait_node must contain value"); self.buffer.push(value); last_waiter.state = SendPollState::SendComplete; last_waiter.task.take() } else { None } } /// Tries to extract a value from the sending waiter which has been waiting /// longest on the send operation to complete. fn try_take_value_from_sender(&mut self) -> Option<(T, Option)> { // Safety: The method is only called inside the lock on a consistent // list. match self.send_waiters.remove_last() { Some(last_sender) => { // This path should be only used for 0 capacity queues. // Since the list is not empty, a value is available. // Extract it from the sender in order to return it debug_assert_eq!(0, self.buffer.capacity()); // Safety: The sender can't be invalid, since we only add valid // senders to the queue let val = last_sender.value.take().expect("Value must be available"); last_sender.state = SendPollState::SendComplete; // Return the waiter Some((val, last_sender.task.take())) } None => None, } } /// Tries to receive a value from the channel without waiting. fn try_receive(&mut self) -> Result<(T, Option), TryReceiveError> { if !self.buffer.is_empty() { let val = self.buffer.pop(); // Since this means a space in the buffer had been freed, // try to copy a value from a potential waiter into the channel. let waker = self.try_copy_value_from_oldest_waiter(); Ok((val, waker)) } else if let Some((val, waker)) = self.try_take_value_from_sender() { Ok((val, waker)) } else if self.is_closed { Err(TryReceiveError::Closed) } else { Err(TryReceiveError::Empty) } } /// Tries to read the value from the channel. /// If the value isn't available yet, the ChannelReceiveFuture gets added to the /// wait queue at the channel, and will be signalled once ready. /// This function is only safe as long as the `wait_node`s address is guaranteed /// to be stable until it gets removed from the queue. unsafe fn receive_or_register( &mut self, wait_node: &mut ListNode, cx: &mut Context<'_>, ) -> Poll)>> { match wait_node.state { RecvPollState::Unregistered | RecvPollState::Notified => { wait_node.state = RecvPollState::Unregistered; match self.try_receive() { Ok(val) => Poll::Ready(Some(val)), Err(TryReceiveError::Closed) => Poll::Ready(None), Err(TryReceiveError::Empty) => { // Added the task to the wait queue wait_node.task = Some(cx.waker().clone()); wait_node.state = RecvPollState::Registered; self.receive_waiters.add_front(wait_node); Poll::Pending } } } RecvPollState::Registered => { // Since the channel wakes up all waiters and moves their states // to unregistered there can't be any value in the channel in // this state. However the caller might have passed a different `Waker`. // In this case we need to update it. update_waker_ref(&mut wait_node.task, cx); Poll::Pending } } } fn remove_send_waiter( &mut self, wait_node: &mut ListNode>, ) { // ChannelSendFuture only needs to get removed if it had been added to // the wait queue of the channel. // This has happened in the SendPollState::Registered case. match wait_node.state { SendPollState::Registered => { // Safety: Due to the state, we know that the node must be part // of the waiter list if !unsafe { self.send_waiters.remove(wait_node) } { // Panic if the address isn't found. This can only happen if the contract was // violated, e.g. the WaitQueueEntry got moved after the initial poll. panic!("Future could not be removed from wait queue"); } wait_node.state = SendPollState::Unregistered; } SendPollState::Unregistered => {} SendPollState::SendComplete => { // Send was complete. In that case the queue item is not in the list } } } #[must_use] fn remove_receive_waiter( &mut self, wait_node: &mut ListNode, ) -> Option { // ChannelReceiveFuture only needs to get removed if it had been added to // the wait queue of the channel. This has happened in the RecvPollState::Registered case. match wait_node.state { RecvPollState::Registered => { // Safety: Due to the state, we know that the node must be part // of the waiter list if !unsafe { self.receive_waiters.remove(wait_node) } { // Panic if the address isn't found. This can only happen if the contract was // violated, e.g. the WaitQueueEntry got moved after the initial poll. panic!("Future could not be removed from wait queue"); } wait_node.state = RecvPollState::Unregistered; None } RecvPollState::Notified => { // wakeup another receive waiter instead wait_node.state = RecvPollState::Unregistered; return_oldest_receive_waiter(&mut self.receive_waiters) } RecvPollState::Unregistered => None, } } } /// A channel which can be used to exchange values of type `T` between /// concurrent tasks. /// /// `A` represents the backing buffer for a Channel. E.g. a channel which /// can buffer up to 4 `i32` values can be created via: /// /// ``` /// # use futures_intrusive::channel::LocalChannel; /// let channel: LocalChannel = LocalChannel::new(); /// ``` /// /// Tasks can receive values from the channel through the `receive` method. /// The returned Future will get resolved when a value is sent into the channel. /// Values can be sent into the channel through `send`. /// The returned Future will get resolved when the value has been stored /// inside the channel. pub struct GenericChannel where A: RingBuf, { inner: Mutex>, } // The channel can be sent to other threads as long as it's not borrowed and the // value in it can be sent to other threads. unsafe impl Send for GenericChannel where A: RingBuf + Send, { } // The channel is thread-safe as long as a thread-safe mutex is used unsafe impl Sync for GenericChannel where A: RingBuf, { } impl core::fmt::Debug for GenericChannel where A: RingBuf, { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("Channel").finish() } } impl GenericChannel where A: RingBuf, { /// Creates a new Channel, utilizing the default capacity that /// the RingBuffer in `A` provides. pub fn new() -> Self { GenericChannel { inner: Mutex::new(ChannelState::new(A::new())), } } /// Creates a new Channel, which has storage for a `capacity` items. /// Depending on the utilized `RingBuf` type, the capacity argument might /// be ignored and the default capacity might be utilized. pub fn with_capacity(capacity: usize) -> Self { GenericChannel { inner: Mutex::new(ChannelState::new(A::with_capacity(capacity))), } } /// Returns a future that gets fulfilled when the value has been written to /// the channel. /// If the channel gets closed while the send is in progress, sending the /// value will fail, and the future will deliver the value back. pub fn send(&self, value: T) -> ChannelSendFuture { ChannelSendFuture { channel: Some(self), wait_node: ListNode::new(SendWaitQueueEntry::new(value)), _phantom: PhantomData, } } /// Attempt to send the value without waiting. /// /// This operation is not supported for unbuffered channels and will /// panic if the capacity of the `RingBuf` is zero. The reason for this is /// that the actual value transfer on unbuffered channels always happens /// when a receiving task copies the value out of the sending task while it /// is waiting. If the sending task does not wait, the value can not be /// transferred. Since this method can therefore never yield a reasonable /// result with unbuffered channels, it panics in order to highlight the /// use of an inappropriate API. pub fn try_send(&self, value: T) -> Result<(), TrySendError> { let result = { self.inner.lock().try_send(value) }; match result { Ok(Some(waker)) => { waker.wake(); Ok(()) } Ok(None) => Ok(()), Err(e) => Err(e), } } /// Returns a future that gets fulfilled when a value is written to the channel. /// If the channels gets closed, the future will resolve to `None`. pub fn receive(&self) -> ChannelReceiveFuture { ChannelReceiveFuture { channel: Some(self), wait_node: ListNode::new(RecvWaitQueueEntry::new()), _phantom: PhantomData, } } /// Attempt to receive a value of the channel without waiting. pub fn try_receive(&self) -> Result { let result = { self.inner.lock().try_receive() }; match result { Ok((val, waker)) => { if let Some(waker) = waker { waker.wake(); } Ok(val) } Err(e) => Err(e), } } /// Returns a stream that will receive values from this channel. /// /// This stream does not yield `None` when the channel is empty, /// instead it yields `None` when it is terminated. pub fn stream(&self) -> ChannelStream { ChannelStream { channel: Some(self), future: None, } } /// Closes the channel. /// All pending and future send attempts will fail. /// Receive attempts will continue to succeed as long as there are items /// stored inside the channel. Further attempts will fail. pub fn close(&self) -> CloseStatus { self.inner.lock().close() } } impl ChannelSendAccess for GenericChannel where A: RingBuf, { unsafe fn send_or_register( &self, wait_node: &mut ListNode>, cx: &mut Context<'_>, ) -> (Poll<()>, Option) { let (poll_result, value, waker) = { self.inner.lock().send_or_register(wait_node, cx) }; if let Some(waker) = waker { waker.wake(); } (poll_result, value) } fn remove_send_waiter( &self, wait_node: &mut ListNode>, ) { self.inner.lock().remove_send_waiter(wait_node) } } impl ChannelReceiveAccess for GenericChannel where A: RingBuf, { unsafe fn receive_or_register( &self, wait_node: &mut ListNode, cx: &mut Context<'_>, ) -> Poll> { let result = { self.inner.lock().receive_or_register(wait_node, cx) }; match result { Poll::Ready(Some((val, waker))) => { if let Some(waker) = waker { waker.wake(); } Poll::Ready(Some(val)) } Poll::Ready(None) => Poll::Ready(None), Poll::Pending => Poll::Pending, } } fn remove_receive_waiter( &self, wait_node: &mut ListNode, ) { let waker = { self.inner.lock().remove_receive_waiter(wait_node) }; if let Some(waker) = waker { waker.wake(); } } } /// A stream that receives from a `GenericChannel`. /// /// Not driving the `ChannelStream` to completion after it has been polled /// might lead to lost wakeup notifications. #[derive(Debug)] pub struct ChannelStream<'a, MutexType: RawMutex, T, A> where A: RingBuf, { channel: Option<&'a GenericChannel>, future: Option>, } impl<'a, MutexType, T, A> Stream for ChannelStream<'a, MutexType, T, A> where A: RingBuf, MutexType: RawMutex, { type Item = T; fn poll_next( self: Pin<&mut Self>, cx: &mut Context, ) -> Poll> { // It might be possible to use Pin::map_unchecked here instead of the two unsafe APIs. // However this didn't seem to work for some borrow checker reasons // Safety: The next operations are safe, because Pin promises us that // the address of the wait queue entry inside ChannelReceiveFuture is stable, // and we don't move any fields inside the future until it gets dropped. let mut_self: &mut Self = unsafe { Pin::get_unchecked_mut(self) }; match mut_self.channel.take() { Some(channel) => { // Poll the next element. if mut_self.future.is_none() { mut_self.future.replace(channel.receive()); } let fut = mut_self.future.as_mut().unwrap(); // Safety: We guarantee that the pinned future will not move until // it resolves by storing it as part of the pinned `Stream` let poll = unsafe { let pin_fut = Pin::new_unchecked(fut); pin_fut.poll(cx) }; // Future was resolved, drop it. if poll.is_ready() { mut_self.future.take(); // If the channel was terminated, we let it drop. if let Poll::Ready(None) = &poll { return poll; } } // The channel was not terminated, so we reuse it. mut_self.channel.replace(channel); poll } // Channel was terminated. None => Poll::Ready(None), } } } impl<'a, MutexType, T, A> FusedStream for ChannelStream<'a, MutexType, T, A> where A: RingBuf, MutexType: RawMutex, { fn is_terminated(&self) -> bool { self.channel.is_none() } } // Export a non thread-safe version using NoopLock /// A [`GenericChannel`] implementation which is not thread-safe. pub type LocalChannel = GenericChannel>; /// An unbuffered [`GenericChannel`] implementation which is not thread-safe. pub type LocalUnbufferedChannel = LocalChannel; #[cfg(feature = "std")] mod if_std { use super::*; // Export a thread-safe version using parking_lot::RawMutex // TODO: We might also want to bind Channel to GenericChannel<..., FixedHeapBuf>, // which performs less type-churn. // However since we can't bind LocalChannel to that too due to no-std compatibility, // this would to introduce some inconsistency between those types. // It's also bit unfortunate that there are now `new()` and `with_capacity` // methods on both types, but for the array backed implementation only // `new()` is meaningful, while for the heap backed implementation only // `with_capacity()` is meaningful. /// A [`GenericChannel`] implementation backed by [`parking_lot`]. pub type Channel = GenericChannel>; /// An unbuffered [`GenericChannel`] implementation backed by [`parking_lot`]. pub type UnbufferedChannel = Channel; } #[cfg(feature = "std")] pub use self::if_std::*; #[cfg(feature = "alloc")] mod if_alloc { use super::*; /// Channel implementations where Sender and Receiver sides are cloneable /// and owned. /// The Futures produced by channels in this module don't require a lifetime /// parameter. pub mod shared { use super::*; use crate::channel::shared::{ChannelReceiveFuture, ChannelSendFuture}; use core::sync::atomic::{AtomicUsize, Ordering}; /// Shared Channel State, which is referenced by Senders and Receivers struct GenericChannelSharedState where MutexType: RawMutex, T: 'static, A: RingBuf, { /// The amount of [`GenericSender`] instances which reference this state. senders: AtomicUsize, /// The amount of [`GenericReceiver`] instances which reference this state. receivers: AtomicUsize, /// The channel on which is acted. channel: GenericChannel, } // Implement ChannelAccess trait for SharedChannelState, so that it can // be used for dynamic dispatch in futures. impl ChannelReceiveAccess for GenericChannelSharedState where MutexType: RawMutex, A: RingBuf, { unsafe fn receive_or_register( &self, wait_node: &mut ListNode, cx: &mut Context<'_>, ) -> Poll> { self.channel.receive_or_register(wait_node, cx) } fn remove_receive_waiter( &self, wait_node: &mut ListNode, ) { self.channel.remove_receive_waiter(wait_node) } } // Implement ChannelAccess trait for SharedChannelState, so that it can // be used for dynamic dispatch in futures. impl ChannelSendAccess for GenericChannelSharedState where MutexType: RawMutex, A: RingBuf, { unsafe fn send_or_register( &self, wait_node: &mut ListNode>, cx: &mut Context<'_>, ) -> (Poll<()>, Option) { self.channel.send_or_register(wait_node, cx) } fn remove_send_waiter( &self, wait_node: &mut ListNode>, ) { self.channel.remove_send_waiter(wait_node) } } /// The sending side of a channel which can be used to exchange values /// between concurrent tasks. /// /// Values can be sent into the channel through `send`. /// The returned Future will get resolved when the value has been stored inside the channel. pub struct GenericSender where MutexType: RawMutex, A: RingBuf, T: 'static, { inner: alloc::sync::Arc>, } /// The receiving side of a channel which can be used to exchange values /// between concurrent tasks. /// /// Tasks can receive values from the channel through the `receive` method. /// The returned Future will get resolved when a value is sent into the channel. pub struct GenericReceiver where MutexType: RawMutex, A: RingBuf, T: 'static, { inner: alloc::sync::Arc>, } impl core::fmt::Debug for GenericSender where MutexType: RawMutex, A: RingBuf, { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("Sender").finish() } } impl core::fmt::Debug for GenericReceiver where MutexType: RawMutex, A: RingBuf, { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("Receiver").finish() } } impl Clone for GenericSender where MutexType: RawMutex, A: RingBuf, { fn clone(&self) -> Self { let old_size = self.inner.senders.fetch_add(1, Ordering::Relaxed); if old_size > (core::isize::MAX) as usize { panic!("Reached maximum refcount"); } GenericSender { inner: self.inner.clone(), } } } impl Drop for GenericSender where MutexType: RawMutex, A: RingBuf, { fn drop(&mut self) { if self.inner.senders.fetch_sub(1, Ordering::Release) != 1 { return; } core::sync::atomic::fence(Ordering::Acquire); // Close the channel, before last sender gets destroyed // TODO: We could potentially avoid this, if no receiver is left self.inner.channel.close(); } } impl Clone for GenericReceiver where MutexType: RawMutex, A: RingBuf, { fn clone(&self) -> Self { let old_size = self.inner.receivers.fetch_add(1, Ordering::Relaxed); if old_size > (core::isize::MAX) as usize { panic!("Reached maximum refcount"); } GenericReceiver { inner: self.inner.clone(), } } } impl Drop for GenericReceiver where MutexType: RawMutex, A: RingBuf, { fn drop(&mut self) { if self.inner.receivers.fetch_sub(1, Ordering::Release) != 1 { return; } core::sync::atomic::fence(Ordering::Acquire); // Close the channel, before last receiver gets destroyed // TODO: We could potentially avoid this, if no sender is left self.inner.channel.close(); // Now drop the content of the channel. This ensures that // the content of the channel is dropped even if a sender is held. self.inner.channel.inner.lock().clear(); } } /// Creates a new Channel which can be used to exchange values of type `T` between /// concurrent tasks. The ends of the Channel are represented through /// the returned Sender and Receiver. /// Both the Sender and Receiver can be cloned in order to let more tasks /// interact with the Channel. /// /// As soon es either all Senders or all Receivers are closed, the Channel /// itself will be closed. /// /// The channel can buffer up to `capacity` items internally. pub fn generic_channel( capacity: usize, ) -> ( GenericSender, GenericReceiver, ) where MutexType: RawMutex, A: RingBuf, T: Send, { let inner = alloc::sync::Arc::new(GenericChannelSharedState { channel: GenericChannel::with_capacity(capacity), senders: AtomicUsize::new(1), receivers: AtomicUsize::new(1), }); let sender = GenericSender { inner: inner.clone(), }; let receiver = GenericReceiver { inner }; (sender, receiver) } impl GenericSender where MutexType: 'static + RawMutex, A: 'static + RingBuf, { /// Returns a future that gets fulfilled when the value has been written to /// the channel. /// If the channel gets closed while the send is in progress, sending the /// value will fail, and the future will deliver the value back. pub fn send(&self, value: T) -> ChannelSendFuture { ChannelSendFuture { channel: Some(self.inner.clone()), wait_node: ListNode::new(SendWaitQueueEntry::new(value)), _phantom: PhantomData, } } /// Attempt to send the value without waiting. /// /// This operation is not supported for unbuffered channels and will /// panic if the capacity of the `RingBuf` is zero. The reason for this is /// that the actual value transfer on unbuffered channels always happens /// when a receiving task copies the value out of the sending task while it /// is waiting. If the sending task does not wait, the value can not be /// transferred. Since this method can therefore never yield a reasonable /// result with unbuffered channels, it panics in order to highlight the /// use of an inappropriate API. pub fn try_send(&self, value: T) -> Result<(), TrySendError> { self.inner.channel.try_send(value) } /// Closes the channel. /// All pending future send attempts will fail. /// Receive attempts will continue to succeed as long as there are items /// stored inside the channel. Further attempts will return `None`. pub fn close(&self) -> CloseStatus { self.inner.channel.close() } } impl GenericReceiver where MutexType: 'static + RawMutex, A: 'static + RingBuf, { /// Returns a future that gets fulfilled when a value is written to the channel. /// If the channels gets closed, the future will resolve to `None`. pub fn receive(&self) -> ChannelReceiveFuture { ChannelReceiveFuture { channel: Some(self.inner.clone()), wait_node: ListNode::new(RecvWaitQueueEntry::new()), _phantom: PhantomData, } } /// Attempt to receive form the channel without waiting. pub fn try_receive(&self) -> Result { self.inner.channel.try_receive() } /// Closes the channel. /// All pending future send attempts will fail. /// Receive attempts will continue to succeed as long as there are items /// stored inside the channel. Further attempts will return `None`. pub fn close(&self) -> CloseStatus { self.inner.channel.close() } /// Returns a stream that will receive values from this channel. /// /// This stream does not yield `None` when the channel is empty, /// instead it yields `None` when it is terminated. pub fn into_stream(self) -> SharedStream { SharedStream { receiver: self, future: None, is_terminated: false, } } } /// A stream that receives from channel using a `GenericReceiver`. /// /// Not driving the `SharedStream` to completion after it has been polled /// might lead to lost wakeup notifications. #[derive(Debug)] pub struct SharedStream where MutexType: 'static + RawMutex, T: 'static, A: 'static + RingBuf, { receiver: GenericReceiver, future: Option>, is_terminated: bool, } impl SharedStream where MutexType: RawMutex, A: 'static + RingBuf, { /// Closes the channel. /// All pending and future send attempts will fail. /// Receive attempts will continue to succeed as long as there are items /// stored inside the channel. Further attempts will fail. pub fn close(&self) -> CloseStatus { self.receiver.close() } } impl Stream for SharedStream where MutexType: RawMutex, A: 'static + RingBuf, { type Item = T; fn poll_next( mut self: Pin<&mut Self>, cx: &mut Context, ) -> Poll> { if self.is_terminated { return Poll::Ready(None); } // Safety: This is safe since this is a pinned projection // that lives as long as the scope. let mut pin_fut = unsafe { self.as_mut().map_unchecked_mut(|v| { // Poll the next element. if v.future.is_none() { v.future.replace(v.receiver.receive()); } &mut v.future }) }; let poll = pin_fut.as_mut().as_pin_mut().unwrap().poll(cx); // Future was resolved, drop it. if poll.is_ready() { pin_fut.set(None); if let Poll::Ready(None) = &poll { // Safety: This is safe because `is_terminated` is never // considered pinned (i.e. not structuraly pinned). unsafe { self.get_unchecked_mut().is_terminated = true }; } } poll } } impl FusedStream for SharedStream where MutexType: RawMutex, A: 'static + RingBuf, { fn is_terminated(&self) -> bool { self.is_terminated } } // Export parking_lot based shared channels in std mode #[cfg(feature = "std")] mod if_std { use super::*; use crate::buffer::GrowingHeapBuf; /// A [`GenericSender`] implementation backed by [`parking_lot`]. /// /// Uses a `GrowingHeapBuf` whose capacity grows dynamically up to /// the given limit. Refer to [`GrowingHeapBuf`] for more information. /// /// [`GrowingHeapBuf`]: ../../buffer/struct.GrowingHeapBuf.html pub type Sender = GenericSender>; /// A [`GenericReceiver`] implementation backed by [`parking_lot`]. /// /// Uses a `GrowingHeapBuf` whose capacity grows dynamically up to /// the given limit. Refer to [`GrowingHeapBuf`] for more information. /// /// [`GrowingHeapBuf`]: ../../buffer/struct.GrowingHeapBuf.html pub type Receiver = GenericReceiver>; /// Creates a new channel with the given buffering capacity /// /// Uses a `GrowingHeapBuf` whose capacity grows dynamically up to /// the given limit. Refer to [`generic_channel`] and [`GrowingHeapBuf`] for more information. /// /// [`GrowingHeapBuf`]: ../../buffer/struct.GrowingHeapBuf.html /// /// ``` /// # use futures_intrusive::channel::shared::channel; /// let (sender, receiver) = channel::(4); /// ``` pub fn channel(capacity: usize) -> (Sender, Receiver) where T: Send, { generic_channel::>( capacity, ) } /// A [`GenericSender`] implementation backed by [`parking_lot`]. pub type UnbufferedSender = GenericSender>; /// A [`GenericReceiver`] implementation backed by [`parking_lot`]. pub type UnbufferedReceiver = GenericReceiver>; /// Creates a new unbuffered channel. /// /// Refer to [`generic_channel`] for details. pub fn unbuffered_channel() -> (Sender, Receiver) where T: Send, { generic_channel::>( 0, ) } } #[cfg(feature = "std")] pub use self::if_std::*; } } #[cfg(feature = "alloc")] pub use self::if_alloc::*; futures-intrusive-0.5.0/src/channel/oneshot.rs000064400000000000000000000367720072674642500176440ustar 00000000000000//! An asynchronously awaitable oneshot channel use super::{ ChannelReceiveAccess, ChannelReceiveFuture, ChannelSendError, CloseStatus, RecvPollState, RecvWaitQueueEntry, }; use crate::{ intrusive_double_linked_list::{LinkedList, ListNode}, utils::update_waker_ref, NoopLock, }; use core::marker::PhantomData; use futures_core::task::{Context, Poll}; use lock_api::{Mutex, RawMutex}; fn wake_waiters(waiters: &mut LinkedList) { // Remove all waiters from the waiting list in reverse order and wake them. // We reverse the waiter list, so that the oldest waker (which is // at the end of the list), gets woken first and has the best // chance to grab the channel value. waiters.reverse_drain(|waiter| { if let Some(handle) = waiter.task.take() { handle.wake(); } waiter.state = RecvPollState::Unregistered; }); } /// Internal state of the oneshot channel struct ChannelState { /// Whether the channel had been fulfilled before is_fulfilled: bool, /// The value which is stored inside the channel value: Option, /// The list of waiters, which are waiting for the channel to get fulfilled waiters: LinkedList, } impl ChannelState { fn new() -> ChannelState { ChannelState:: { is_fulfilled: false, value: None, waiters: LinkedList::new(), } } /// Writes a single value to the channel. /// If a value had been written to the channel before, the new value will be rejected. fn send(&mut self, value: T) -> Result<(), ChannelSendError> { if self.is_fulfilled { return Err(ChannelSendError(value)); } self.value = Some(value); self.is_fulfilled = true; // Wakeup all waiters wake_waiters(&mut self.waiters); Ok(()) } fn close(&mut self) -> CloseStatus { if self.is_fulfilled { return CloseStatus::AlreadyClosed; } self.is_fulfilled = true; // Wakeup all waiters wake_waiters(&mut self.waiters); CloseStatus::NewlyClosed } /// Tries to read the value from the channel. /// If the value isn't available yet, the ChannelReceiveFuture gets added to the /// wait queue at the channel, and will be signalled once ready. /// This function is only safe as long as the `wait_node`s address is guaranteed /// to be stable until it gets removed from the queue. unsafe fn try_receive( &mut self, wait_node: &mut ListNode, cx: &mut Context<'_>, ) -> Poll> { match wait_node.state { RecvPollState::Unregistered => { let maybe_val = self.value.take(); match maybe_val { Some(v) => { // A value was available inside the channel and was fetched Poll::Ready(Some(v)) } None => { // Check if something was written into the channel before // or the channel was closed. if self.is_fulfilled { Poll::Ready(None) } else { // Added the task to the wait queue wait_node.task = Some(cx.waker().clone()); wait_node.state = RecvPollState::Registered; self.waiters.add_front(wait_node); Poll::Pending } } } } RecvPollState::Registered => { // Since the channel wakes up all waiters and moves their states // to unregistered there can't be any value in the channel in this state. // However the caller might have passed a different `Waker`. // In this case we need to update it. update_waker_ref(&mut wait_node.task, cx); Poll::Pending } RecvPollState::Notified => { unreachable!("Not possible for Oneshot"); } } } fn remove_waiter(&mut self, wait_node: &mut ListNode) { // ChannelReceiveFuture only needs to get removed if it had been added to // the wait queue of the channel. This has happened in the RecvPollState::Waiting case. if let RecvPollState::Registered = wait_node.state { // Safety: Due to the state, we know that the node must be part // of the waiter list if !unsafe { self.waiters.remove(wait_node) } { // Panic if the address isn't found. This can only happen if the contract was // violated, e.g. the RecvWaitQueueEntry got moved after the initial poll. panic!("Future could not be removed from wait queue"); } wait_node.state = RecvPollState::Unregistered; } } } /// A channel which can be used to exchange a single value between two /// concurrent tasks. /// /// Tasks can wait for the value to get delivered via `receive`. /// The returned Future will get fulfilled when a value is sent into the channel. /// /// The value can only be extracted by a single receiving task. Once the value /// has been retrieved from the Channel, the Channel is closed and subsequent /// receive calls will return `None`. pub struct GenericOneshotChannel { inner: Mutex>, } // The channel can be sent to other threads as long as it's not borrowed and the // value in it can be sent to other threads. unsafe impl Send for GenericOneshotChannel { } // The channel is thread-safe as long as a thread-safe mutex is used unsafe impl Sync for GenericOneshotChannel { } impl core::fmt::Debug for GenericOneshotChannel { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("GenericOneshotChannel").finish() } } impl GenericOneshotChannel { /// Creates a new OneshotChannel in the given state pub fn new() -> GenericOneshotChannel { GenericOneshotChannel { inner: Mutex::new(ChannelState::new()), } } /// Writes a single value to the channel. /// /// This will notify waiters about the availability of the value. /// If a value had been written to the channel before, or if the /// channel is closed, the new value will be rejected and /// returned inside the error variant. pub fn send(&self, value: T) -> Result<(), ChannelSendError> { self.inner.lock().send(value) } /// Closes the channel. /// /// This will notify waiters about closure, by fulfilling pending `Future`s /// with `None`. /// `send(value)` attempts which follow this call will fail with a /// [`ChannelSendError`]. pub fn close(&self) -> CloseStatus { self.inner.lock().close() } /// Returns a future that gets fulfilled when a value is written to the channel /// or the channel is closed. pub fn receive(&self) -> ChannelReceiveFuture { ChannelReceiveFuture { channel: Some(self), wait_node: ListNode::new(RecvWaitQueueEntry::new()), _phantom: PhantomData, } } } impl ChannelReceiveAccess for GenericOneshotChannel { unsafe fn receive_or_register( &self, wait_node: &mut ListNode, cx: &mut Context<'_>, ) -> Poll> { self.inner.lock().try_receive(wait_node, cx) } fn remove_receive_waiter( &self, wait_node: &mut ListNode, ) { self.inner.lock().remove_waiter(wait_node) } } // Export a non thread-safe version using NoopLock /// A [`GenericOneshotChannel`] which is not thread-safe. pub type LocalOneshotChannel = GenericOneshotChannel; #[cfg(feature = "std")] mod if_std { use super::*; // Export a thread-safe version using parking_lot::RawMutex /// A [`GenericOneshotChannel`] implementation backed by [`parking_lot`]. pub type OneshotChannel = GenericOneshotChannel; } #[cfg(feature = "std")] pub use self::if_std::*; #[cfg(feature = "alloc")] mod if_alloc { use super::*; pub mod shared { use super::*; use crate::channel::shared::ChannelReceiveFuture; struct GenericOneshotChannelSharedState where MutexType: RawMutex, T: 'static, { channel: GenericOneshotChannel, } // Implement ChannelReceiveAccess trait for SharedChannelState, so that it can // be used for dynamic dispatch in futures. impl ChannelReceiveAccess for GenericOneshotChannelSharedState where MutexType: RawMutex, { unsafe fn receive_or_register( &self, wait_node: &mut ListNode, cx: &mut Context<'_>, ) -> Poll> { self.channel.receive_or_register(wait_node, cx) } fn remove_receive_waiter( &self, wait_node: &mut ListNode, ) { self.channel.remove_receive_waiter(wait_node) } } /// The sending side of a channel which can be used to exchange values /// between concurrent tasks. /// /// Values can be sent into the channel through `send`. pub struct GenericOneshotSender where MutexType: RawMutex, T: 'static, { inner: alloc::sync::Arc< GenericOneshotChannelSharedState, >, } /// The receiving side of a channel which can be used to exchange values /// between concurrent tasks. /// /// Tasks can receive values from the channel through the `receive` method. /// The returned Future will get resolved when a value is sent into the channel. pub struct GenericOneshotReceiver where MutexType: RawMutex, T: 'static, { inner: alloc::sync::Arc< GenericOneshotChannelSharedState, >, } impl core::fmt::Debug for GenericOneshotSender where MutexType: RawMutex, { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("OneshotSender").finish() } } impl core::fmt::Debug for GenericOneshotReceiver where MutexType: RawMutex, { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("OneshotReceiver").finish() } } impl Drop for GenericOneshotSender where MutexType: RawMutex, { fn drop(&mut self) { // Close the channel, before last sender gets destroyed // TODO: We could potentially avoid this, if no receiver is left self.inner.channel.close(); } } impl Drop for GenericOneshotReceiver where MutexType: RawMutex, { fn drop(&mut self) { // Close the channel, before last receiver gets destroyed // TODO: We could potentially avoid this, if no sender is left self.inner.channel.close(); } } /// Creates a new oneshot channel which can be used to exchange values /// of type `T` between concurrent tasks. /// The ends of the Channel are represented through /// the returned Sender and Receiver. /// /// As soon es either the senders or receivers is closed, the channel /// itself will be closed. pub fn generic_oneshot_channel() -> ( GenericOneshotSender, GenericOneshotReceiver, ) where MutexType: RawMutex, T: Send, { let inner = alloc::sync::Arc::new(GenericOneshotChannelSharedState { channel: GenericOneshotChannel::new(), }); let sender = GenericOneshotSender { inner: inner.clone(), }; let receiver = GenericOneshotReceiver { inner }; (sender, receiver) } impl GenericOneshotSender where MutexType: RawMutex + 'static, { /// Writes a single value to the channel. /// /// This will notify waiters about the availability of the value. /// If a value had been written to the channel before, or if the /// channel is closed, the new value will be rejected and /// returned inside the error variant. pub fn send(&self, value: T) -> Result<(), ChannelSendError> { self.inner.channel.send(value) } } impl GenericOneshotReceiver where MutexType: RawMutex + 'static, { /// Returns a future that gets fulfilled when a value is written to the channel. /// If the channels gets closed, the future will resolve to `None`. pub fn receive(&self) -> ChannelReceiveFuture { ChannelReceiveFuture { channel: Some(self.inner.clone()), wait_node: ListNode::new(RecvWaitQueueEntry::new()), _phantom: PhantomData, } } } // Export parking_lot based shared channels in std mode #[cfg(feature = "std")] mod if_std { use super::*; /// A [`GenericOneshotSender`] implementation backed by [`parking_lot`]. pub type OneshotSender = GenericOneshotSender; /// A [`GenericOneshotReceiver`] implementation backed by [`parking_lot`]. pub type OneshotReceiver = GenericOneshotReceiver; /// Creates a new oneshot channel. /// /// Refer to [`generic_oneshot_channel`] for details. /// /// Example for creating a channel to transmit an integer value: /// /// ``` /// # use futures_intrusive::channel::shared::oneshot_channel; /// let (sender, receiver) = oneshot_channel::(); /// ``` pub fn oneshot_channel() -> (OneshotSender, OneshotReceiver) where T: Send, { generic_oneshot_channel::() } } #[cfg(feature = "std")] pub use self::if_std::*; } } #[cfg(feature = "alloc")] pub use self::if_alloc::*; futures-intrusive-0.5.0/src/channel/oneshot_broadcast.rs000064400000000000000000000417440072674642500216610ustar 00000000000000//! An asynchronously awaitable oneshot channel which can be awaited by //! multiple consumers. use super::{ ChannelReceiveAccess, ChannelReceiveFuture, ChannelSendError, CloseStatus, RecvPollState, RecvWaitQueueEntry, }; use crate::{ intrusive_double_linked_list::{LinkedList, ListNode}, utils::update_waker_ref, NoopLock, }; use core::marker::PhantomData; use futures_core::task::{Context, Poll}; use lock_api::{Mutex, RawMutex}; fn wake_waiters(waiters: &mut LinkedList) { // Remove all waiters from the waiting list in reverse order and wake them. // We reverse the waiter list, so that the oldest waker (which is // at the end of the list), gets woken first and has the best // chance to grab the channel value. waiters.reverse_drain(|waiter| { if let Some(handle) = waiter.task.take() { handle.wake(); } waiter.state = RecvPollState::Unregistered; }); } /// Internal state of the oneshot channel struct ChannelState { /// Whether the channel had been fulfilled before is_fulfilled: bool, /// The value which is stored inside the channel value: Option, /// The list of waiters, which are waiting for the channel to get fulfilled waiters: LinkedList, } impl ChannelState where T: Clone, { fn new() -> ChannelState { ChannelState:: { is_fulfilled: false, value: None, waiters: LinkedList::new(), } } /// Writes a single value to the channel. /// If a value had been written to the channel before, the new value will be rejected. fn send(&mut self, value: T) -> Result<(), ChannelSendError> { if self.is_fulfilled { return Err(ChannelSendError(value)); } self.value = Some(value); self.is_fulfilled = true; // Wakeup all waiters wake_waiters(&mut self.waiters); Ok(()) } fn close(&mut self) -> CloseStatus { if self.is_fulfilled { return CloseStatus::AlreadyClosed; } self.is_fulfilled = true; // Wakeup all waiters wake_waiters(&mut self.waiters); CloseStatus::NewlyClosed } /// Tries to read the value from the channel. /// If the value isn't available yet, the ChannelReceiveFuture gets added to the /// wait queue at the channel, and will be signalled once ready. /// This function is only safe as long as the `wait_node`s address is guaranteed /// to be stable until it gets removed from the queue. unsafe fn try_receive( &mut self, wait_node: &mut ListNode, cx: &mut Context<'_>, ) -> Poll> { match wait_node.state { RecvPollState::Unregistered => { match &self.value { Some(v) => { // A value was available inside the channel and was fetched. // TODO: If the same waiter asks again, they will always // get the same value, instead of `None`. Is that reasonable? Poll::Ready(Some(v.clone())) } None => { // Check if something was written into the channel before // or the channel was closed. if self.is_fulfilled { Poll::Ready(None) } else { // Added the task to the wait queue wait_node.task = Some(cx.waker().clone()); wait_node.state = RecvPollState::Registered; self.waiters.add_front(wait_node); Poll::Pending } } } } RecvPollState::Registered => { // Since the channel wakes up all waiters and moves their states // to unregistered there can't be any value in the channel in this state. // However the caller might have passed a different `Waker`. // In this case we need to update it. update_waker_ref(&mut wait_node.task, cx); Poll::Pending } RecvPollState::Notified => { unreachable!("Not possible for Oneshot Broadcast"); } } } fn remove_waiter(&mut self, wait_node: &mut ListNode) { // ChannelReceiveFuture only needs to get removed if it had been added to // the wait queue of the channel. This has happened in the RecvPollState::Waiting case. if let RecvPollState::Registered = wait_node.state { // Safety: Due to the state, we know that the node must be part // of the waiter list if !unsafe { self.waiters.remove(wait_node) } { // Panic if the address isn't found. This can only happen if the contract was // violated, e.g. the RecvWaitQueueEntry got moved after the initial poll. panic!("Future could not be removed from wait queue"); } wait_node.state = RecvPollState::Unregistered; } } } /// A channel which can be used to exchange a single value between two or more /// concurrent tasks. /// /// The value which gets sent will get stored inside the Channel, and can be /// retrieved by an arbitrary number of tasks afterwards. /// /// Tasks can wait for the value to get delivered via `receive`. /// The returned Future will get fulfilled when a value is sent into the channel. pub struct GenericOneshotBroadcastChannel { inner: Mutex>, } // The channel can be sent to other threads as long as it's not borrowed and the // value in it can be sent to other threads. unsafe impl Send for GenericOneshotBroadcastChannel { } // The channel is thread-safe as long as a thread-safe mutex is used unsafe impl Sync for GenericOneshotBroadcastChannel { } impl core::fmt::Debug for GenericOneshotBroadcastChannel { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("GenericOneshotBroadcastChannel").finish() } } impl GenericOneshotBroadcastChannel where T: Clone, { /// Creates a new OneshotBroadcastChannel in the given state pub fn new() -> GenericOneshotBroadcastChannel { GenericOneshotBroadcastChannel { inner: Mutex::new(ChannelState::new()), } } /// Writes a single value to the channel. /// /// This will notify waiters about the availability of the value. /// If a value had been written to the channel before, or if the /// channel is closed, the new value will be rejected and /// returned inside the error variant. pub fn send(&self, value: T) -> Result<(), ChannelSendError> { self.inner.lock().send(value) } /// Closes the channel. /// /// This will notify waiters about closure, by fulfilling pending `Future`s /// with `None`. /// `send(value)` attempts which follow this call will fail with a /// [`ChannelSendError`]. pub fn close(&self) -> CloseStatus { self.inner.lock().close() } /// Returns a future that gets fulfilled when a value is written to the channel /// or the channel is closed. pub fn receive(&self) -> ChannelReceiveFuture { ChannelReceiveFuture { channel: Some(self), wait_node: ListNode::new(RecvWaitQueueEntry::new()), _phantom: PhantomData, } } } impl ChannelReceiveAccess for GenericOneshotBroadcastChannel where T: Clone, { unsafe fn receive_or_register( &self, wait_node: &mut ListNode, cx: &mut Context<'_>, ) -> Poll> { self.inner.lock().try_receive(wait_node, cx) } fn remove_receive_waiter( &self, wait_node: &mut ListNode, ) { self.inner.lock().remove_waiter(wait_node) } } // Export a non thread-safe version using NoopLock /// A [`GenericOneshotBroadcastChannel`] which is not thread-safe. pub type LocalOneshotBroadcastChannel = GenericOneshotBroadcastChannel; #[cfg(feature = "std")] mod if_std { use super::*; // Export a thread-safe version using parking_lot::RawMutex /// A [`GenericOneshotBroadcastChannel`] implementation backed by [`parking_lot`]. pub type OneshotBroadcastChannel = GenericOneshotBroadcastChannel; } #[cfg(feature = "std")] pub use self::if_std::*; #[cfg(feature = "alloc")] mod if_alloc { use super::*; pub mod shared { use super::*; use crate::channel::shared::ChannelReceiveFuture; struct GenericOneshotChannelSharedState where MutexType: RawMutex, T: 'static, { channel: GenericOneshotBroadcastChannel, } // Implement ChannelReceiveAccess trait for SharedChannelState, so that it can // be used for dynamic dispatch in futures. impl ChannelReceiveAccess for GenericOneshotChannelSharedState where MutexType: RawMutex, T: Clone, { unsafe fn receive_or_register( &self, wait_node: &mut ListNode, cx: &mut Context<'_>, ) -> Poll> { self.channel.receive_or_register(wait_node, cx) } fn remove_receive_waiter( &self, wait_node: &mut ListNode, ) { self.channel.remove_receive_waiter(wait_node) } } /// The sending side of a channel which can be used to exchange values /// between concurrent tasks. /// /// Values can be sent into the channel through `send`. pub struct GenericOneshotBroadcastSender where MutexType: RawMutex, T: Clone + 'static, { inner: alloc::sync::Arc< GenericOneshotChannelSharedState, >, } /// The receiving side of a channel which can be used to exchange values /// between concurrent tasks. /// /// Tasks can receive values from the channel through the `receive` method. /// The returned Future will get resolved when a value is sent into the channel. pub struct GenericOneshotBroadcastReceiver where MutexType: RawMutex, T: Clone + 'static, { inner: alloc::sync::Arc< GenericOneshotChannelSharedState, >, } // Manual `Clone` implementation, since #[derive(Clone)] also requires // the Mutex to be `Clone` impl Clone for GenericOneshotBroadcastReceiver where MutexType: RawMutex, T: Clone + 'static, { fn clone(&self) -> Self { Self { inner: self.inner.clone(), } } } impl core::fmt::Debug for GenericOneshotBroadcastSender where MutexType: RawMutex, T: Clone, { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("OneshotBroadcastSender").finish() } } impl core::fmt::Debug for GenericOneshotBroadcastReceiver where MutexType: RawMutex, T: Clone, { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("OneshotBroadcastReceiver").finish() } } impl Drop for GenericOneshotBroadcastSender where MutexType: RawMutex, T: Clone, { fn drop(&mut self) { // Close the channel, before last sender gets destroyed // TODO: We could potentially avoid this, if no receiver is left self.inner.channel.close(); } } impl Drop for GenericOneshotBroadcastReceiver where MutexType: RawMutex, T: Clone, { fn drop(&mut self) { // TODO: This is broken, since it will already close the channel if only one receiver is closed. // We need to count receivers, as in mpmc queue. // Close the channel, before last receiver gets destroyed // TODO: We could potentially avoid this, if no sender is left self.inner.channel.close(); } } /// Creates a new oneshot broadcast channel which can be used to exchange values /// of type `T` between concurrent tasks. /// The ends of the Channel are represented through /// the returned `Sender` and `Receiver`. The `Receiver` can be cloned. /// /// As soon es either the senders or all receivers is closed, the channel /// itself will be closed. pub fn generic_oneshot_broadcast_channel() -> ( GenericOneshotBroadcastSender, GenericOneshotBroadcastReceiver, ) where MutexType: RawMutex, T: Send + Clone, { let inner = alloc::sync::Arc::new(GenericOneshotChannelSharedState { channel: GenericOneshotBroadcastChannel::new(), }); let sender = GenericOneshotBroadcastSender { inner: inner.clone(), }; let receiver = GenericOneshotBroadcastReceiver { inner }; (sender, receiver) } impl GenericOneshotBroadcastSender where MutexType: RawMutex + 'static, T: Clone, { /// Writes a single value to the channel. /// /// This will notify waiters about the availability of the value. /// If a value had been written to the channel before, or if the /// channel is closed, the new value will be rejected and /// returned inside the error variant. pub fn send(&self, value: T) -> Result<(), ChannelSendError> { self.inner.channel.send(value) } } impl GenericOneshotBroadcastReceiver where MutexType: RawMutex + 'static, T: Clone, { /// Returns a future that gets fulfilled when a value is written to the channel. /// If the channels gets closed, the future will resolve to `None`. pub fn receive(&self) -> ChannelReceiveFuture { ChannelReceiveFuture { channel: Some(self.inner.clone()), wait_node: ListNode::new(RecvWaitQueueEntry::new()), _phantom: PhantomData, } } } // Export parking_lot based shared channels in std mode #[cfg(feature = "std")] mod if_std { use super::*; /// A [`GenericOneshotBroadcastSender`] implementation backed by [`parking_lot`]. pub type OneshotBroadcastSender = GenericOneshotBroadcastSender; /// A [`GenericOneshotBroadcastReceiver`] implementation backed by [`parking_lot`]. pub type OneshotBroadcastReceiver = GenericOneshotBroadcastReceiver; /// Creates a new oneshot broadcast channel. /// /// Refer to [`generic_oneshot_broadcast_channel`] for details. /// /// Example for creating a channel to transmit an integer value: /// /// ``` /// # use futures_intrusive::channel::shared::oneshot_broadcast_channel; /// let (sender, receiver) = oneshot_broadcast_channel::(); /// ``` pub fn oneshot_broadcast_channel( ) -> (OneshotBroadcastSender, OneshotBroadcastReceiver) where T: Send + Clone, { generic_oneshot_broadcast_channel::() } } #[cfg(feature = "std")] pub use self::if_std::*; } } #[cfg(feature = "alloc")] pub use self::if_alloc::*; futures-intrusive-0.5.0/src/channel/state_broadcast.rs000064400000000000000000000707760072674642500213310ustar 00000000000000//! An asynchronously awaitable state broadcasting channel use super::{ChannelSendError, CloseStatus}; use crate::{ intrusive_double_linked_list::{LinkedList, ListNode}, utils::update_waker_ref, NoopLock, }; use core::marker::PhantomData; use core::pin::Pin; use futures_core::{ future::{FusedFuture, Future}, task::{Context, Poll, Waker}, }; use lock_api::{Mutex, RawMutex}; /// An ID, which allows to differentiate states received from a Channel. /// Elements with a bigger state ID (`id > otherId`) have been published more /// recently into the Channel. #[derive(Copy, Clone, Debug, Default, PartialEq, Eq, Ord, PartialOrd)] pub struct StateId(u64); impl StateId { /// Returns the initial StateId, which is guaranteed to return the /// oldest buffered value available. pub fn new() -> Self { StateId(0) } } /// Tracks how the future had interacted with the channel #[derive(PartialEq, Debug)] pub enum RecvPollState { /// The task is not registered at the wait queue at the channel Unregistered, /// The task was added to the wait queue at the channel. Registered, } /// Tracks the channel futures waiting state. /// Access to this struct is synchronized through the channel. #[derive(Debug)] pub struct RecvWaitQueueEntry { /// The task handle of the waiting task task: Option, /// Current polling state state: RecvPollState, /// The minimum state ID we are waiting for state_id: StateId, } impl RecvWaitQueueEntry { /// Creates a new RecvWaitQueueEntry pub fn new(state_id: StateId) -> RecvWaitQueueEntry { RecvWaitQueueEntry { task: None, state_id, state: RecvPollState::Unregistered, } } } /// Adapter trait that allows Futures to generically interact with Channel /// implementations via dynamic dispatch. pub trait ChannelReceiveAccess { unsafe fn receive_or_register( &self, wait_node: &mut ListNode, cx: &mut Context<'_>, ) -> Poll>; fn remove_receive_waiter( &self, wait_node: &mut ListNode, ); } /// A Future that is returned by the `receive` function on a state broadcast channel. /// The future gets resolved with `Some((state_id, state))` when a value could be /// received from the channel. /// /// `state` represents the new state which had been retrieved from the channel. /// /// `state_id` is the [`StateId`] which can be passed as a parameter to /// `receive()` in order to fetch the next state from the channel. /// /// If the channels gets closed and no items are still enqueued inside the /// channel, the future will resolve to `None`. #[must_use = "futures do nothing unless polled"] pub struct StateReceiveFuture<'a, MutexType, T> where T: Clone, { /// The channel that is associated with this StateReceiveFuture channel: Option<&'a dyn ChannelReceiveAccess>, /// Node for waiting on the channel wait_node: ListNode, /// Marker for mutex type _phantom: PhantomData, } // Safety: Channel futures can be sent between threads as long as the underlying // channel is thread-safe (Sync), which allows to poll/register/unregister from // a different thread. unsafe impl<'a, MutexType: Sync, T: Clone + Send> Send for StateReceiveFuture<'a, MutexType, T> { } impl<'a, MutexType, T: Clone> core::fmt::Debug for StateReceiveFuture<'a, MutexType, T> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("StateReceiveFuture").finish() } } impl<'a, MutexType, T: Clone> Future for StateReceiveFuture<'a, MutexType, T> { type Output = Option<(StateId, T)>; fn poll( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { // It might be possible to use Pin::map_unchecked here instead of the two unsafe APIs. // However this didn't seem to work for some borrow checker reasons // Safety: The next operations are safe, because Pin promises us that // the address of the wait queue entry inside StateReceiveFuture is stable, // and we don't move any fields inside the future until it gets dropped. let mut_self: &mut StateReceiveFuture = unsafe { Pin::get_unchecked_mut(self) }; let channel = mut_self .channel .expect("polled StateReceiveFuture after completion"); let poll_res = unsafe { channel.receive_or_register(&mut mut_self.wait_node, cx) }; if poll_res.is_ready() { // A value was available mut_self.channel = None; } poll_res } } impl<'a, MutexType, T: Clone> FusedFuture for StateReceiveFuture<'a, MutexType, T> { fn is_terminated(&self) -> bool { self.channel.is_none() } } impl<'a, MutexType, T: Clone> Drop for StateReceiveFuture<'a, MutexType, T> { fn drop(&mut self) { // If this StateReceiveFuture has been polled and it was added to the // wait queue at the channel, it must be removed before dropping. // Otherwise the channel would access invalid memory. if let Some(channel) = self.channel { channel.remove_receive_waiter(&mut self.wait_node); } } } fn wake_waiters(waiters: &mut LinkedList) { // Remove all waiters from the waiting list in reverse order and wake them. // We reverse the waiter list, so that the oldest waker (which is // at the end of the list), gets woken first and has the best // chance to grab the channel value. waiters.reverse_drain(|waiter| { if let Some(handle) = waiter.task.take() { handle.wake(); } waiter.state = RecvPollState::Unregistered; }); } /// Internal state of the state broadcast channel struct ChannelState { /// Whether the channel was actively closed is_closed: bool, /// The ID of the next state. state_id: StateId, /// The value which is stored inside the channel value: Option, /// The list of waiters, which are waiting for the channel to get fulfilled waiters: LinkedList, } impl ChannelState where T: Clone, { fn new() -> ChannelState { ChannelState:: { is_closed: false, state_id: StateId(0), value: None, waiters: LinkedList::new(), } } /// Writes a single value to the channel. /// If the maximum amount of values had been written, the new value will be rejected. fn send(&mut self, value: T) -> Result<(), ChannelSendError> { if self.is_closed || self.state_id.0 == core::u64::MAX { return Err(ChannelSendError(value)); } self.value = Some(value); self.state_id.0 += 1; // Wakeup all waiters wake_waiters(&mut self.waiters); Ok(()) } fn close(&mut self) -> CloseStatus { if self.is_closed { return CloseStatus::AlreadyClosed; } self.is_closed = true; // Wakeup all waiters wake_waiters(&mut self.waiters); CloseStatus::NewlyClosed } fn try_receive(&mut self, state_id: StateId) -> Option<(StateId, T)> { let val = self.value.as_ref()?; if state_id < self.state_id { Some((self.state_id, val.clone())) } else { None } } /// Tries to read the value from the channel. /// If the value isn't available yet, the StateReceiveFuture gets added to the /// wait queue at the channel, and will be signalled once ready. /// This function is only safe as long as the `wait_node`s address is guaranteed /// to be stable until it gets removed from the queue. unsafe fn receive_or_register( &mut self, wait_node: &mut ListNode, cx: &mut Context<'_>, ) -> Poll> { match wait_node.state { RecvPollState::Unregistered => { // The caller must wait for a value if either there is no value // available yet, or if the value isn't newer than what the // caller requested. let val_to_deliver = match &self.value { Some(ref v) if wait_node.state_id < self.state_id => { Some(v.clone()) } Some(_) | None => None, }; match val_to_deliver { Some(v) => { // A value that satisfies the caller is available. Poll::Ready(Some((self.state_id, v))) } None => { // Check if something was written into the channel before // or the channel was closed. if self.is_closed { Poll::Ready(None) } else { // Added the task to the wait queue wait_node.task = Some(cx.waker().clone()); wait_node.state = RecvPollState::Registered; self.waiters.add_front(wait_node); Poll::Pending } } } } RecvPollState::Registered => { // Since the channel wakes up all waiters and moves their states // to unregistered there can't be any value in the channel in this state. // However the caller might have passed a different `Waker`. // In this case we need to update it. update_waker_ref(&mut wait_node.task, cx); Poll::Pending } } } fn remove_waiter(&mut self, wait_node: &mut ListNode) { // StateReceiveFuture only needs to get removed if it had been added to // the wait queue of the channel. This has happened in the RecvPollState::Waiting case. if let RecvPollState::Registered = wait_node.state { // Safety: Due to the state, we know that the node must be part // of the waiter list if !unsafe { self.waiters.remove(wait_node) } { // Panic if the address isn't found. This can only happen if the contract was // violated, e.g. the RecvWaitQueueEntry got moved after the initial poll. panic!("Future could not be removed from wait queue"); } wait_node.state = RecvPollState::Unregistered; } } } /// A channel which can be used to synchronize the state between a sender an /// arbitrary number of receivers. /// /// The sender can publish its state. /// /// The receivers can wait for state updates by announcing the most recent state /// that is already known to them. pub struct GenericStateBroadcastChannel { inner: Mutex>, } // The channel can be sent to other threads as long as it's not borrowed and the // value in it can be sent to other threads. unsafe impl Send for GenericStateBroadcastChannel { } // The channel is thread-safe as long as a thread-safe mutex is used unsafe impl Sync for GenericStateBroadcastChannel { } impl core::fmt::Debug for GenericStateBroadcastChannel { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("GenericStateBroadcastChannel").finish() } } impl GenericStateBroadcastChannel where T: Clone, { /// Creates a new State Broadcast Channel in the given state pub fn new() -> GenericStateBroadcastChannel where T: Clone, { GenericStateBroadcastChannel { inner: Mutex::new(ChannelState::new()), } } /// Writes a single value to the channel. /// /// This will notify waiters about the availability of the value. /// If the maximum amount of values had been written to the channel, /// or if the channel is closed, the new value will be rejected and /// returned inside the error variant. pub fn send(&self, value: T) -> Result<(), ChannelSendError> { self.inner.lock().send(value) } /// Closes the channel. /// /// This will notify waiters about closure, by fulfilling pending `Future`s /// with `None`. /// `send(value)` attempts which follow this call will fail with a /// [`ChannelSendError`]. pub fn close(&self) -> CloseStatus { self.inner.lock().close() } /// Returns a future that gets fulfilled when a value is written to the channel /// or the channel is closed. /// `state_id` specifies the minimum state ID that should be retrieved /// by the `receive` operation. /// /// The returned [`StateReceiveFuture`] will get fulfilled with the /// retrieved value as well as the [`StateId`] which is required to retrieve /// the following state. pub fn receive( &self, state_id: StateId, ) -> StateReceiveFuture { StateReceiveFuture { channel: Some(self), wait_node: ListNode::new(RecvWaitQueueEntry::new(state_id)), _phantom: PhantomData, } } /// Attempt to retrieve a value whose `StateId` is greater than the one provided. /// /// Returns `None` if no value is found in the channel, or if the current `StateId` /// of the value is less or equal to the one provided. pub fn try_receive(&self, state_id: StateId) -> Option<(StateId, T)> { self.inner.lock().try_receive(state_id) } } impl ChannelReceiveAccess for GenericStateBroadcastChannel { unsafe fn receive_or_register( &self, wait_node: &mut ListNode, cx: &mut Context<'_>, ) -> Poll> { self.inner.lock().receive_or_register(wait_node, cx) } fn remove_receive_waiter( &self, wait_node: &mut ListNode, ) { self.inner.lock().remove_waiter(wait_node) } } // Export a non thread-safe version using NoopLock /// A [`GenericStateBroadcastChannel`] which is not thread-safe. pub type LocalStateBroadcastChannel = GenericStateBroadcastChannel; #[cfg(feature = "std")] mod if_std { use super::*; // Export a thread-safe version using parking_lot::RawMutex /// A [`GenericStateBroadcastChannel`] implementation backed by [`parking_lot`]. pub type StateBroadcastChannel = GenericStateBroadcastChannel; } #[cfg(feature = "std")] pub use self::if_std::*; #[cfg(feature = "alloc")] mod if_alloc { use super::*; pub mod shared { use super::*; use core::sync::atomic::{AtomicUsize, Ordering}; struct GenericStateBroadcastChannelSharedState where MutexType: RawMutex, T: Clone + 'static, { /// The amount of [`GenericSender`] instances which reference this state. senders: AtomicUsize, /// The amount of [`GenericReceiver`] instances which reference this state. receivers: AtomicUsize, /// The channel on which is acted. channel: GenericStateBroadcastChannel, } // Implement ChannelReceiveAccess trait for SharedChannelState, so that it can // be used for dynamic dispatch in futures. impl ChannelReceiveAccess for GenericStateBroadcastChannelSharedState where MutexType: RawMutex, T: Clone + 'static, { unsafe fn receive_or_register( &self, wait_node: &mut ListNode, cx: &mut Context<'_>, ) -> Poll> { self.channel.receive_or_register(wait_node, cx) } fn remove_receive_waiter( &self, wait_node: &mut ListNode, ) { self.channel.remove_receive_waiter(wait_node) } } /// A Future that is returned by the `receive` function on a state broadcast channel. /// The future gets resolved with `Some((state_id, state))` when a value could be /// received from the channel. /// /// `state` represents the new state which had been retrieved from the channel. /// /// `state_id` is the [`StateId`] which can be passed as a parameter to /// `receive()` in order to fetch the next state from the channel. /// /// If the channels gets closed and no items are still enqueued inside the /// channel, the future will resolve to `None`. #[must_use = "futures do nothing unless polled"] pub struct StateReceiveFuture { /// The Channel that is associated with this StateReceiveFuture channel: Option>>, /// Node for waiting on the channel wait_node: ListNode, /// Marker for mutex type _phantom: PhantomData, } // Safety: Channel futures can be sent between threads as long as the underlying // channel is thread-safe (Sync), which allows to poll/register/unregister from // a different thread. unsafe impl Send for StateReceiveFuture { } impl core::fmt::Debug for StateReceiveFuture { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("StateReceiveFuture").finish() } } impl Future for StateReceiveFuture { type Output = Option<(StateId, T)>; fn poll( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { // It might be possible to use Pin::map_unchecked here instead of the two unsafe APIs. // However this didn't seem to work for some borrow checker reasons // Safety: The next operations are safe, because Pin promises us that // the address of the wait queue entry inside StateReceiveFuture is stable, // and we don't move any fields inside the future until it gets dropped. let mut_self: &mut StateReceiveFuture = unsafe { Pin::get_unchecked_mut(self) }; let channel = mut_self .channel .take() .expect("polled StateReceiveFuture after completion"); let poll_res = unsafe { channel.receive_or_register(&mut mut_self.wait_node, cx) }; if poll_res.is_ready() { // A value was available mut_self.channel = None; } else { mut_self.channel = Some(channel) } poll_res } } impl FusedFuture for StateReceiveFuture { fn is_terminated(&self) -> bool { self.channel.is_none() } } impl Drop for StateReceiveFuture { fn drop(&mut self) { // If this StateReceiveFuture has been polled and it was added to the // wait queue at the channel, it must be removed before dropping. // Otherwise the channel would access invalid memory. if let Some(channel) = &self.channel { channel.remove_receive_waiter(&mut self.wait_node); } } } /// The sending side of a channel which can be used to exchange values /// between concurrent tasks. /// /// Values can be sent into the channel through `send`. pub struct GenericStateSender where MutexType: RawMutex, T: Clone + 'static, { inner: alloc::sync::Arc< GenericStateBroadcastChannelSharedState, >, } /// The receiving side of a channel which can be used to exchange values /// between concurrent tasks. /// /// Tasks can receive values from the channel through the `receive` method. /// The returned Future will get resolved when a value is sent into the channel. pub struct GenericStateReceiver where MutexType: RawMutex, T: Clone + 'static, { inner: alloc::sync::Arc< GenericStateBroadcastChannelSharedState, >, } impl core::fmt::Debug for GenericStateSender where MutexType: RawMutex, T: Clone, { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("StateSender").finish() } } impl core::fmt::Debug for GenericStateReceiver where MutexType: RawMutex, T: Clone, { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("StateReceiver").finish() } } impl Clone for GenericStateSender where MutexType: RawMutex, T: Clone, { fn clone(&self) -> Self { let old_size = self.inner.senders.fetch_add(1, Ordering::Relaxed); if old_size > (core::isize::MAX) as usize { panic!("Reached maximum refcount"); } GenericStateSender { inner: self.inner.clone(), } } } impl Drop for GenericStateSender where MutexType: RawMutex, T: Clone, { fn drop(&mut self) { if self.inner.senders.fetch_sub(1, Ordering::Release) != 1 { return; } core::sync::atomic::fence(Ordering::Acquire); // Close the channel, before last sender gets destroyed // TODO: We could potentially avoid this, if no receiver is left self.inner.channel.close(); } } impl Clone for GenericStateReceiver where MutexType: RawMutex, T: Clone, { fn clone(&self) -> Self { let old_size = self.inner.receivers.fetch_add(1, Ordering::Relaxed); if old_size > (core::isize::MAX) as usize { panic!("Reached maximum refcount"); } GenericStateReceiver { inner: self.inner.clone(), } } } impl Drop for GenericStateReceiver where MutexType: RawMutex, T: Clone, { fn drop(&mut self) { if self.inner.receivers.fetch_sub(1, Ordering::Release) != 1 { return; } core::sync::atomic::fence(Ordering::Acquire); // Close the channel, before last receiver gets destroyed // TODO: We could potentially avoid this, if no sender is left self.inner.channel.close(); } } /// Creates a new state broadcast channel which can be used to exchange values /// of type `T` between concurrent tasks. /// The ends of the Channel are represented through /// the returned Sender and Receiver. /// /// As soon es either the senders or receivers is closed, the channel /// itself will be closed. pub fn generic_state_broadcast_channel() -> ( GenericStateSender, GenericStateReceiver, ) where MutexType: RawMutex, T: Clone + Send, { let inner = alloc::sync::Arc::new( GenericStateBroadcastChannelSharedState { channel: GenericStateBroadcastChannel::new(), senders: AtomicUsize::new(1), receivers: AtomicUsize::new(1), }, ); let sender = GenericStateSender { inner: inner.clone(), }; let receiver = GenericStateReceiver { inner }; (sender, receiver) } impl GenericStateSender where MutexType: RawMutex + 'static, T: Clone, { /// Writes a single value to the channel. /// /// This will notify waiters about the availability of the value. /// If a value had been written to the channel before, or if the /// channel is closed, the new value will be rejected and /// returned inside the error variant. pub fn send(&self, value: T) -> Result<(), ChannelSendError> { self.inner.channel.send(value) } } impl GenericStateReceiver where MutexType: RawMutex + 'static, T: Clone, { /// Returns a future that gets fulfilled when a value is written to the channel /// or the channel is closed. /// `state_id` specifies the minimum state ID that should be retrieved /// by the `receive` operation. /// /// The returned [`StateReceiveFuture`] will get fulfilled with the /// retrieved value as well as the [`StateId`] which is required to retrieve /// the following state pub fn receive( &self, state_id: StateId, ) -> StateReceiveFuture { StateReceiveFuture { channel: Some(self.inner.clone()), wait_node: ListNode::new(RecvWaitQueueEntry::new(state_id)), _phantom: PhantomData, } } /// Attempt to retrieve a value whose `StateId` is greater than the one provided. /// /// Returns `None` if no value is found in the channel, or if the current `StateId` /// of the value is less or equal to the one provided. pub fn try_receive( &self, state_id: StateId, ) -> Option<(StateId, T)> { self.inner.channel.try_receive(state_id) } } // Export parking_lot based shared channels in std mode #[cfg(feature = "std")] mod if_std { use super::*; /// A [`GenericStateSender`] implementation backed by [`parking_lot`]. pub type StateSender = GenericStateSender; /// A [`GenericStateReceiver`] implementation backed by [`parking_lot`]. pub type StateReceiver = GenericStateReceiver; /// Creates a new state broadcast channel. /// /// Refer to [`generic_state_broadcast_channel`] for details. /// /// Example for creating a channel to transmit an integer value: /// /// ``` /// # use futures_intrusive::channel::shared::state_broadcast_channel; /// let (sender, receiver) = state_broadcast_channel::(); /// ``` pub fn state_broadcast_channel( ) -> (StateSender, StateReceiver) where T: Clone + Send, { generic_state_broadcast_channel::() } } #[cfg(feature = "std")] pub use self::if_std::*; } } #[cfg(feature = "alloc")] pub use self::if_alloc::*; futures-intrusive-0.5.0/src/intrusive_double_linked_list.rs000064400000000000000000000631670072674642500225160ustar 00000000000000//! An intrusive double linked list of data use core::{ marker::PhantomPinned, ops::{Deref, DerefMut}, ptr::NonNull, }; /// A node which carries data of type `T` and is stored in an intrusive list #[derive(Debug)] pub struct ListNode { /// The previous node in the list. `None` if there is no previous node. prev: Option>>, /// The next node in the list. `None` if there is no previous node. next: Option>>, /// The data which is associated to this list item data: T, /// Prevents `ListNode`s from being `Unpin`. They may never be moved, since /// the list semantics require addresses to be stable. _pin: PhantomPinned, } impl ListNode { /// Creates a new node with the associated data pub fn new(data: T) -> ListNode { ListNode:: { prev: None, next: None, data, _pin: PhantomPinned, } } } impl Deref for ListNode { type Target = T; fn deref(&self) -> &T { &self.data } } impl DerefMut for ListNode { fn deref_mut(&mut self) -> &mut T { &mut self.data } } /// An intrusive linked list of nodes, where each node carries associated data /// of type `T`. #[derive(Debug)] pub struct LinkedList { head: Option>>, tail: Option>>, } impl LinkedList { /// Creates an empty linked list pub fn new() -> Self { LinkedList:: { head: None, tail: None, } } /// Adds a node at the front of the linked list. /// Safety: This function is only safe as long as `node` is guaranteed to /// get removed from the list before it gets moved or dropped. /// In addition to this `node` may not be added to another other list before /// it is removed from the current one. pub unsafe fn add_front(&mut self, node: &mut ListNode) { node.next = self.head; node.prev = None; match self.head { Some(mut head) => head.as_mut().prev = Some(node.into()), None => {} }; self.head = Some(node.into()); if self.tail.is_none() { self.tail = Some(node.into()); } } /// Returns a reference to the first node in the linked list /// The function is only safe as long as valid pointers are stored inside /// the linked list. /// The returned pointer is only guaranteed to be valid as long as the list /// is not mutated pub fn peek_first(&self) -> Option<&ListNode> { // Safety: When the node was inserted it was promised that it is alive // until it gets removed from the list. // The returned node has a pointer which constrains it to the lifetime // of the list. This is ok, since the Node is supposed to outlive // its insertion in the list. unsafe { self.head .map(|node| &*(node.as_ptr() as *const ListNode)) } } /// Returns a mutable reference to the first node in the linked list /// The function is only safe as long as valid pointers are stored inside /// the linked list. /// The returned pointer is only guaranteed to be valid as long as the list /// is not mutated pub fn peek_first_mut(&mut self) -> Option<&mut ListNode> { // Safety: When the node was inserted it was promised that it is alive // until it gets removed from the list. // The returned node has a pointer which constrains it to the lifetime // of the list. This is ok, since the Node is supposed to outlive // its insertion in the list. unsafe { self.head .map(|mut node| &mut *(node.as_mut() as *mut ListNode)) } } /// Returns a reference to the last node in the linked list /// The function is only safe as long as valid pointers are stored inside /// the linked list. /// The returned pointer is only guaranteed to be valid as long as the list /// is not mutated pub fn peek_last(&self) -> Option<&ListNode> { // Safety: When the node was inserted it was promised that it is alive // until it gets removed from the list. // The returned node has a pointer which constrains it to the lifetime // of the list. This is ok, since the Node is supposed to outlive // its insertion in the list. unsafe { self.tail .map(|node| &*(node.as_ptr() as *const ListNode)) } } /// Returns a mutable reference to the last node in the linked list /// The function is only safe as long as valid pointers are stored inside /// the linked list. /// The returned pointer is only guaranteed to be valid as long as the list /// is not mutated pub fn peek_last_mut(&mut self) -> Option<&mut ListNode> { // Safety: When the node was inserted it was promised that it is alive // until it gets removed from the list. // The returned node has a pointer which constrains it to the lifetime // of the list. This is ok, since the Node is supposed to outlive // its insertion in the list. unsafe { self.tail .map(|mut node| &mut *(node.as_mut() as *mut ListNode)) } } /// Removes the first node from the linked list pub fn remove_first(&mut self) -> Option<&mut ListNode> { // Safety: When the node was inserted it was promised that it is alive // until it gets removed from the list unsafe { let mut head = self.head?; self.head = head.as_mut().next; let first_ref = head.as_mut(); match first_ref.next { None => { // This was the only node in the list debug_assert_eq!(Some(first_ref.into()), self.tail); self.tail = None; } Some(mut next) => { next.as_mut().prev = None; } } first_ref.prev = None; first_ref.next = None; Some(&mut *(first_ref as *mut ListNode)) } } /// Removes the last node from the linked list and returns it pub fn remove_last(&mut self) -> Option<&mut ListNode> { // Safety: When the node was inserted it was promised that it is alive // until it gets removed from the list unsafe { let mut tail = self.tail?; self.tail = tail.as_mut().prev; let last_ref = tail.as_mut(); match last_ref.prev { None => { // This was the last node in the list debug_assert_eq!(Some(last_ref.into()), self.head); self.head = None; } Some(mut prev) => { prev.as_mut().next = None; } } last_ref.prev = None; last_ref.next = None; Some(&mut *(last_ref as *mut ListNode)) } } /// Returns whether the linked list doesn not contain any node pub fn is_empty(&self) -> bool { if !self.head.is_none() { return false; } debug_assert!(self.tail.is_none()); true } /// Removes the given `node` from the linked list. /// Returns whether the `node` was removed. /// It is also only save if it is known that the `node` is either part of this /// list, or of no list at all. If `node` is part of another list, the /// behavior is undefined. pub unsafe fn remove(&mut self, node: &mut ListNode) -> bool { match node.prev { None => { // This might be the first node in the list. If it is not, the // node is not in the list at all. Since our precondition is that // the node must either be in this list or in no list, we check that // the node is really in no list. if self.head != Some(node.into()) { debug_assert!(node.next.is_none()); return false; } self.head = node.next; } Some(mut prev) => { debug_assert_eq!(prev.as_ref().next, Some(node.into())); prev.as_mut().next = node.next; } } match node.next { None => { // This must be the last node in our list. Otherwise the list // is inconsistent. debug_assert_eq!(self.tail, Some(node.into())); self.tail = node.prev; } Some(mut next) => { debug_assert_eq!(next.as_mut().prev, Some(node.into())); next.as_mut().prev = node.prev; } } node.next = None; node.prev = None; true } /// Drains the list iby calling a callback on each list node /// /// The method does not return an iterator since stopping or deferring /// draining the list is not permitted. If the method would push nodes to /// an iterator we could not guarantee that the nodes do not get utilized /// after having been removed from the list anymore. pub fn drain(&mut self, mut func: F) where F: FnMut(&mut ListNode), { let mut current = self.head; self.head = None; self.tail = None; while let Some(mut node) = current { // Safety: The nodes have not been removed from the list yet and must // therefore contain valid data. The nodes can also not be added to // the list again during iteration, since the list is mutably borrowed. unsafe { let node_ref = node.as_mut(); current = node_ref.next; node_ref.next = None; node_ref.prev = None; // Note: We do not reset the pointers from the next element in the // list to the current one since we will iterate over the whole // list anyway, and therefore clean up all pointers. func(node_ref); } } } /// Drains the list in reverse order by calling a callback on each list node /// /// The method does not return an iterator since stopping or deferring /// draining the list is not permitted. If the method would push nodes to /// an iterator we could not guarantee that the nodes do not get utilized /// after having been removed from the list anymore. pub fn reverse_drain(&mut self, mut func: F) where F: FnMut(&mut ListNode), { let mut current = self.tail; self.head = None; self.tail = None; while let Some(mut node) = current { // Safety: The nodes have not been removed from the list yet and must // therefore contain valid data. The nodes can also not be added to // the list again during iteration, since the list is mutably borrowed. unsafe { let node_ref = node.as_mut(); current = node_ref.prev; node_ref.next = None; node_ref.prev = None; // Note: We do not reset the pointers from the next element in the // list to the current one since we will iterate over the whole // list anyway, and therefore clean up all pointers. func(node_ref); } } } } #[cfg(all(test, feature = "alloc"))] // Tests make use of Vec at the moment mod tests { use super::*; use alloc::vec::Vec; fn collect_list(mut list: LinkedList) -> Vec { let mut result = Vec::new(); list.drain(|node| { result.push(**node); }); result } fn collect_reverse_list(mut list: LinkedList) -> Vec { let mut result = Vec::new(); list.reverse_drain(|node| { result.push(**node); }); result } unsafe fn add_nodes( list: &mut LinkedList, nodes: &mut [&mut ListNode], ) { for node in nodes.iter_mut() { list.add_front(node); } } unsafe fn assert_clean(node: &mut ListNode) { assert!(node.next.is_none()); assert!(node.prev.is_none()); } #[test] fn insert_and_iterate() { unsafe { let mut a = ListNode::new(5); let mut b = ListNode::new(7); let mut c = ListNode::new(31); let mut setup = |list: &mut LinkedList| { assert_eq!(true, list.is_empty()); list.add_front(&mut c); assert_eq!(31, **list.peek_first().unwrap()); assert_eq!(false, list.is_empty()); list.add_front(&mut b); assert_eq!(7, **list.peek_first().unwrap()); list.add_front(&mut a); assert_eq!(5, **list.peek_first().unwrap()); }; let mut list = LinkedList::new(); setup(&mut list); let items: Vec = collect_list(list); assert_eq!([5, 7, 31].to_vec(), items); let mut list = LinkedList::new(); setup(&mut list); let items: Vec = collect_reverse_list(list); assert_eq!([31, 7, 5].to_vec(), items); } } #[test] fn drain_and_collect() { unsafe { let mut a = ListNode::new(5); let mut b = ListNode::new(7); let mut c = ListNode::new(31); let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); let taken_items: Vec = collect_list(list); assert_eq!([5, 7, 31].to_vec(), taken_items); } } #[test] fn peek_last() { unsafe { let mut a = ListNode::new(5); let mut b = ListNode::new(7); let mut c = ListNode::new(31); let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); let last = list.peek_last(); assert_eq!(31, **last.unwrap()); list.remove_last(); let last = list.peek_last(); assert_eq!(7, **last.unwrap()); list.remove_last(); let last = list.peek_last(); assert_eq!(5, **last.unwrap()); list.remove_last(); let last = list.peek_last(); assert!(last.is_none()); } } #[test] fn remove_first() { unsafe { // We iterate forward and backwards through the manipulated lists // to make sure pointers in both directions are still ok. let mut a = ListNode::new(5); let mut b = ListNode::new(7); let mut c = ListNode::new(31); let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); let removed = list.remove_first().unwrap(); assert_clean(removed); assert!(!list.is_empty()); let items: Vec = collect_list(list); assert_eq!([7, 31].to_vec(), items); let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); let removed = list.remove_first().unwrap(); assert_clean(removed); assert!(!list.is_empty()); let items: Vec = collect_reverse_list(list); assert_eq!([31, 7].to_vec(), items); let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut b, &mut a]); let removed = list.remove_first().unwrap(); assert_clean(removed); assert!(!list.is_empty()); let items: Vec = collect_list(list); assert_eq!([7].to_vec(), items); let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut b, &mut a]); let removed = list.remove_first().unwrap(); assert_clean(removed); assert!(!list.is_empty()); let items: Vec = collect_reverse_list(list); assert_eq!([7].to_vec(), items); let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut a]); let removed = list.remove_first().unwrap(); assert_clean(removed); assert!(list.is_empty()); let items: Vec = collect_list(list); assert!(items.is_empty()); let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut a]); let removed = list.remove_first().unwrap(); assert_clean(removed); assert!(list.is_empty()); let items: Vec = collect_reverse_list(list); assert!(items.is_empty()); } } #[test] fn remove_last() { unsafe { // We iterate forward and backwards through the manipulated lists // to make sure pointers in both directions are still ok. let mut a = ListNode::new(5); let mut b = ListNode::new(7); let mut c = ListNode::new(31); let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); let removed = list.remove_last().unwrap(); assert_clean(removed); assert!(!list.is_empty()); let items: Vec = collect_list(list); assert_eq!([5, 7].to_vec(), items); let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); let removed = list.remove_last().unwrap(); assert_clean(removed); assert!(!list.is_empty()); let items: Vec = collect_reverse_list(list); assert_eq!([7, 5].to_vec(), items); let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut b, &mut a]); let removed = list.remove_last().unwrap(); assert_clean(removed); assert!(!list.is_empty()); let items: Vec = collect_list(list); assert_eq!([5].to_vec(), items); let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut b, &mut a]); let removed = list.remove_last().unwrap(); assert_clean(removed); assert!(!list.is_empty()); let items: Vec = collect_reverse_list(list); assert_eq!([5].to_vec(), items); let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut a]); let removed = list.remove_last().unwrap(); assert_clean(removed); assert!(list.is_empty()); let items: Vec = collect_list(list); assert!(items.is_empty()); let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut a]); let removed = list.remove_last().unwrap(); assert_clean(removed); assert!(list.is_empty()); let items: Vec = collect_reverse_list(list); assert!(items.is_empty()); } } #[test] fn remove_by_address() { unsafe { let mut a = ListNode::new(5); let mut b = ListNode::new(7); let mut c = ListNode::new(31); { // Remove first let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); assert_eq!(true, list.remove(&mut a)); assert_clean((&mut a).into()); // a should be no longer there and can't be removed twice assert_eq!(false, list.remove(&mut a)); assert_eq!(Some((&mut b).into()), list.head); assert_eq!(Some((&mut c).into()), b.next); assert_eq!(Some((&mut b).into()), c.prev); let items: Vec = collect_list(list); assert_eq!([7, 31].to_vec(), items); let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); assert_eq!(true, list.remove(&mut a)); assert_clean((&mut a).into()); // a should be no longer there and can't be removed twice assert_eq!(false, list.remove(&mut a)); assert_eq!(Some((&mut c).into()), b.next); assert_eq!(Some((&mut b).into()), c.prev); let items: Vec = collect_reverse_list(list); assert_eq!([31, 7].to_vec(), items); } { // Remove middle let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); assert_eq!(true, list.remove(&mut b)); assert_clean((&mut b).into()); assert_eq!(Some((&mut c).into()), a.next); assert_eq!(Some((&mut a).into()), c.prev); let items: Vec = collect_list(list); assert_eq!([5, 31].to_vec(), items); let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); assert_eq!(true, list.remove(&mut b)); assert_clean((&mut b).into()); assert_eq!(Some((&mut c).into()), a.next); assert_eq!(Some((&mut a).into()), c.prev); let items: Vec = collect_reverse_list(list); assert_eq!([31, 5].to_vec(), items); } { // Remove last let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); assert_eq!(true, list.remove(&mut c)); assert_clean((&mut c).into()); assert!(b.next.is_none()); assert_eq!(Some((&mut b).into()), list.tail); let items: Vec = collect_list(list); assert_eq!([5, 7].to_vec(), items); let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut c, &mut b, &mut a]); assert_eq!(true, list.remove(&mut c)); assert_clean((&mut c).into()); assert!(b.next.is_none()); assert_eq!(Some((&mut b).into()), list.tail); let items: Vec = collect_reverse_list(list); assert_eq!([7, 5].to_vec(), items); } { // Remove first of two let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut b, &mut a]); assert_eq!(true, list.remove(&mut a)); assert_clean((&mut a).into()); // a should be no longer there and can't be removed twice assert_eq!(false, list.remove(&mut a)); assert_eq!(Some((&mut b).into()), list.head); assert_eq!(Some((&mut b).into()), list.tail); assert!(b.next.is_none()); assert!(b.prev.is_none()); let items: Vec = collect_list(list); assert_eq!([7].to_vec(), items); let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut b, &mut a]); assert_eq!(true, list.remove(&mut a)); assert_clean((&mut a).into()); // a should be no longer there and can't be removed twice assert_eq!(false, list.remove(&mut a)); assert_eq!(Some((&mut b).into()), list.head); assert_eq!(Some((&mut b).into()), list.tail); assert!(b.next.is_none()); assert!(b.prev.is_none()); let items: Vec = collect_reverse_list(list); assert_eq!([7].to_vec(), items); } { // Remove last of two let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut b, &mut a]); assert_eq!(true, list.remove(&mut b)); assert_clean((&mut b).into()); assert_eq!(Some((&mut a).into()), list.head); assert_eq!(Some((&mut a).into()), list.tail); assert!(a.next.is_none()); assert!(a.prev.is_none()); let items: Vec = collect_list(list); assert_eq!([5].to_vec(), items); let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut b, &mut a]); assert_eq!(true, list.remove(&mut b)); assert_clean((&mut b).into()); assert_eq!(Some((&mut a).into()), list.head); assert_eq!(Some((&mut a).into()), list.tail); assert!(a.next.is_none()); assert!(a.prev.is_none()); let items: Vec = collect_reverse_list(list); assert_eq!([5].to_vec(), items); } { // Remove last item let mut list = LinkedList::new(); add_nodes(&mut list, &mut [&mut a]); assert_eq!(true, list.remove(&mut a)); assert_clean((&mut a).into()); assert!(list.head.is_none()); assert!(list.tail.is_none()); let items: Vec = collect_list(list); assert!(items.is_empty()); } { // Remove missing let mut list = LinkedList::new(); list.add_front(&mut b); list.add_front(&mut a); assert_eq!(false, list.remove(&mut c)); } } } } futures-intrusive-0.5.0/src/intrusive_pairing_heap.rs000064400000000000000000000344150072674642500213030ustar 00000000000000//! Implements an intrusive priority queue based on a pairing heap. //! //! A [pairing heap] is a heap data structure (i.e. a tree whose nodes carry //! values, with the property that every node's value is lesser or equal to its //! children's) that supports the following operations: //! //! - finding a minimum element in `O(1)` //! - This is trivial: the heap property guarantees that the root is a //! minimum element. //! - insertion of a new node in `O(1)` //! - deletion in `O(log n)`, _amortized_ //! - However, note that any _individual_ deletion may take `O(N)` time. For //! example, if we take an empty heap and insert N elements, the tree will //! have a very degenerate (shallow) shape. Then, deleting the root will //! take `O(N)` time, but it will also reorganize the tree to make //! successive deletes cheaper. //! //! [pairing heap]: https://en.wikipedia.org/wiki/Pairing_heap use core::{ marker::PhantomPinned, mem, ops::{Deref, DerefMut, Drop}, ptr::NonNull, }; /// Compares `a` and `b` without unwinding. /// This is necessary to avoid reentrancy in the heap. fn safe_lesser(a: &T, b: &T) -> bool { struct DropBomb; impl Drop for DropBomb { fn drop(&mut self) { panic!("Panicked while comparing"); } } // If `T::cmp` panics, force a double-panic (and therefore an abort). let bomb = DropBomb; let ordering = a < b; mem::forget(bomb); ordering } /// A node which carries data of type `T` and is stored in an intrusive heap. /// /// Nodes will be compared based on `T`'s [`Ord`] impl. Those comparisons must /// not panic - otherwise, the program will abort. #[derive(Debug)] pub struct HeapNode { /// The parent. `None` if this is the root. parent: Option>>, /// The previous sibling. `None` if there is no previous sibling. prev: Option>>, /// The next sibling. `None` if there is no next sibling. next: Option>>, /// The first child. `None` if there are no children. first_child: Option>>, /// The data which is associated to this heap item. data: T, /// Prevents `HeapNode`s from being `Unpin`. They may never be moved, since /// the heap semantics require addresses to be stable. _pin: PhantomPinned, } impl HeapNode { /// Creates a new node with the associated data pub fn new(data: T) -> HeapNode { HeapNode:: { parent: None, prev: None, next: None, first_child: None, data, _pin: PhantomPinned, } } fn is_root(&self) -> bool { if self.parent.is_none() { debug_assert_eq!(self.prev, None); debug_assert_eq!(self.next, None); true } else { false } } } impl Deref for HeapNode { type Target = T; fn deref(&self) -> &T { &self.data } } impl DerefMut for HeapNode { fn deref_mut(&mut self) -> &mut T { &mut self.data } } /// Add a child to a node. unsafe fn add_child( mut parent: NonNull>, mut child: NonNull>, ) { // require parent <= child debug_assert!(!safe_lesser(&child.as_ref().data, &parent.as_ref().data)); if let Some(mut old_first_child) = parent.as_mut().first_child.take() { child.as_mut().next = Some(old_first_child); debug_assert_eq!(old_first_child.as_ref().prev, None); old_first_child.as_mut().prev = Some(child); } parent.as_mut().first_child = Some(child); child.as_mut().parent = Some(parent); } /// Merge two root heaps. Returns the new root. unsafe fn meld( left: NonNull>, right: NonNull>, ) -> NonNull> { debug_assert!(left.as_ref().is_root()); debug_assert!(right.as_ref().is_root()); // The lesser node should become the root. if safe_lesser(&left.as_ref().data, &right.as_ref().data) { add_child(left, right); left } else { add_child(right, left); right } } /// Merge two root heaps, where the left might be empty. Returns the new root. unsafe fn maybe_meld( left: Option>>, right: NonNull>, ) -> NonNull> { if let Some(left) = left { meld(left, right) } else { right } } /// Given the first child in a child list, traverse and find the last child. unsafe fn last_child( first_child: NonNull>, ) -> NonNull> { let mut cur = first_child; while let Some(next) = cur.as_ref().next { cur = next; } cur } /// Given a pointer to the last node in a child list, unlink it and return the /// previous node (which has become the last node in its list). /// /// That is, given a list `A <-> B <-> C`, `unlink_prev(C)` will return `B` and /// also unlink `C` to become `A <-> B C`. /// /// If the node was a lone child, returns `None`. /// /// Parent/child pointers are untouched. unsafe fn unlink_prev( mut node: NonNull>, ) -> Option>> { debug_assert_eq!(node.as_ref().next, None); let mut prev = node.as_mut().prev.take()?; debug_assert_eq!(prev.as_ref().next, Some(node)); prev.as_mut().next = None; Some(prev) } /// Merge together a child list. Each child in the child list is expected to /// have an equal `parent`. Returns the new merged root, whose `parent` will be unset. unsafe fn merge_children( first_child: NonNull>, ) -> NonNull> { let common_parent = first_child.as_ref().parent; debug_assert!(common_parent.is_some()); // Traverse the children right-to-left. This is important for the analysis // to work. Reading: "Pairing heaps: the forward variant", // https://arxiv.org/pdf/1709.01152.pdf let mut node = last_child(first_child); let mut current = None; // Loop invariant: `node` is the first unprocessed child, `current` // is the merged result of all processed children. loop { // All nodes in the list should have the same parent. let node_parent = node.as_mut().parent.take(); debug_assert_eq!(node_parent, common_parent); // Grab the last two unprocessed elements. let mut prev = if let Some(prev) = unlink_prev(node) { prev } else { // Odd case. return maybe_meld(current, node); }; // All nodes in the list should have the same parent. let prev_parent = prev.as_mut().parent.take(); debug_assert_eq!(prev_parent, common_parent); // Unlink `prev` from `prev.prev`. let prev_prev = unlink_prev(prev); // Meld the pair, then meld it into the accumulator. let cur = maybe_meld(current, meld(prev, node)); if let Some(prev_prev) = prev_prev { node = prev_prev; current = Some(cur); continue; } else { // Even case. return cur; } } } /// An intrusive min-heap of nodes, where each node carries associated data /// of type `T`. #[derive(Debug)] pub struct PairingHeap { root: Option>>, } impl PairingHeap { /// Creates an empty heap pub fn new() -> Self { PairingHeap:: { root: None } } /// Adds a node to the heap. /// Safety: This function is only safe as long as `node` is guaranteed to /// get removed from the list before it gets moved or dropped. /// In addition to this `node` may not be added to another other heap before /// it is removed from the current one. pub unsafe fn insert(&mut self, node: &mut HeapNode) { // The node should not already be in a heap. debug_assert!(node.is_root()); debug_assert_eq!(node.first_child, None); if let Some(root) = self.root { self.root = Some(meld(root, node.into())); } else { self.root = Some(node.into()); } } /// Returns the smallest element in the heap without removing it. /// The function is only safe as long as valid pointers are stored inside /// the heap. /// The returned pointer is only guaranteed to be valid as long as the heap /// is not mutated pub fn peek_min(&self) -> Option>> { self.root } /// Removes the given node from the heap. /// The node must be a member of this heap, and not a member of any other /// heap. pub unsafe fn remove(&mut self, node: &mut HeapNode) { let parent = node.parent.take(); if let Some(mut parent) = parent { // Unlink this node from its parent. if let Some(mut prev) = node.prev { prev.as_mut().next = node.next; } else { parent.as_mut().first_child = node.next; } if let Some(mut next) = node.next { next.as_mut().prev = node.prev; } node.next = None; node.prev = None; } else { debug_assert_eq!(node.next, None); debug_assert_eq!(node.prev, None); debug_assert_eq!(self.root, Some(node.into())); self.root = None; } if let Some(first_child) = node.first_child.take() { // Merge together the children. let children = merge_children(first_child); // Add the children back into the parent. if let Some(parent) = parent { // The heap property is preserved because we had `parent.data` // <= `node.data`, and `node.data` <= `child.data` for all // children. add_child(parent, children); } else { self.root = Some(children); } } } } #[cfg(all(test, feature = "std"))] mod tests { use super::{HeapNode, PairingHeap}; use core::ptr::NonNull; // Recursively check the provided node and all descendants for: // - pointer consistency: parent pointers and next/prev // - the heap property: `node.data <= child.data` for all children unsafe fn validate_heap_node( node: &HeapNode, parent: Option<&HeapNode>, ) { assert_eq!(node.parent, parent.map(NonNull::from)); if let Some(p) = parent { assert!(p.data <= node.data); } if let Some(prev) = node.prev { assert_eq!(prev.as_ref().next, Some(node.into())); } if let Some(next) = node.next { assert_eq!(next.as_ref().prev, Some(node.into())); } let mut child = node.first_child; while let Some(c) = child { validate_heap_node(c.as_ref(), Some(node)); child = c.as_ref().next; } } fn validate_heap(heap: &PairingHeap) { if let Some(root) = heap.root { // This is also sufficient to check that `heap.root` is indeed a // minimum element of the heap. unsafe { validate_heap_node(root.as_ref(), None); } } } #[test] fn insert_and_remove() { // This test exhaustively covers every possible schedule of inserting, // then removing, each of five different nodes from the heap. #[derive(Copy, Clone, Debug)] enum Action { Insert(u8), Remove(u8), } fn generate_schedules( current: &mut Vec, available: &mut Vec, f: fn(&[Action]), ) { for i in 0..available.len() { let action = available.swap_remove(i); current.push(action); f(current); if let Action::Insert(j) = action { available.push(Action::Remove(j)); } generate_schedules(current, available, f); if let Action::Insert(_) = action { available.pop(); } current.pop(); // the opposite of `swap_remove` available.push(action); let len = available.len(); available.swap(i, len - 1); } } let max = if cfg!(miri) { // Miri is really slow, make things easier. 3 } else { // 5 runs in a reasonable amount of time but still exercises // interesting cases. 5 }; generate_schedules( &mut vec![], &mut (0..max).map(Action::Insert).collect(), |schedule| unsafe { let mut nodes = [ HeapNode::new(0u8), HeapNode::new(1), HeapNode::new(2), HeapNode::new(3), HeapNode::new(4), ]; let mut heap = PairingHeap::new(); for action in schedule { match *action { Action::Insert(n) => { heap.insert(&mut nodes[n as usize]); validate_heap(&heap); } Action::Remove(n) => { heap.remove(&mut nodes[n as usize]); assert!(nodes[n as usize].is_root()); assert_eq!(nodes[n as usize].first_child, None); validate_heap(&heap); } } } }, ); } #[test] fn equal_values() { // Check that things behave properly in the presence of equal values. unsafe { let mut nodes = [ HeapNode::new(0u8), HeapNode::new(0), HeapNode::new(0), HeapNode::new(0), HeapNode::new(0), ]; let mut heap = PairingHeap::new(); for node in &mut nodes { heap.insert(node); validate_heap(&heap); } for _ in 0..5 { heap.remove(heap.peek_min().unwrap().as_mut()); validate_heap(&heap); } assert_eq!(heap.peek_min(), None); } } } futures-intrusive-0.5.0/src/lib.rs000064400000000000000000000226600072674642500153120ustar 00000000000000//! Synchronization primitives and utilities based on intrusive collections. //! //! This crate provides a variety of `Futures`-based and `async/await` compatible //! types that are based on the idea of intrusive collections: //! - Channels in a variety of flavors: //! - Oneshot //! - Multi-Producer Multi-Consumer (MPMC) //! - State Broadcast //! - Synchronization Primitives: //! - Manual Reset Event //! - Mutex //! - Semaphore //! - A timer //! //! ## Intrusive collections? //! //! In an intrusive collection, the elements that want to get stored inside the //! collection provide the means to store themselves inside the collection. //! E.g. in an intrusive linked list, each element that gets stored inside the //! list contains a pointer field that points to the next list element. E.g. //! //! ``` //! // The element which is intended to be stored inside an intrusive container //! struct ListElement { //! data: u32, //! next: *mut ListElement, //! } //! //! // The intrusive container //! struct List { //! head: *mut ListElement, //! } //! ``` //! //! The advantage here is that the intrusive collection (here: the list) requires //! only a fixed amount of memory. In this case it only needs a pointer to the //! first element. //! //! The list container itself has a fixed size of a single pointer independent //! of the number of stored elements. //! //! Intrusive lists are often used in low-level code like in operating system //! kernels. E.g. they can be used for storing elements that represent threads //! that are blocked and waiting on queue. In that case the stored elements can //! be on the call stack of the caller of each blocked thread, since the //! call stack won't change as long as the thread is blocked. //! //! ### Application in Futures //! //! This library brings this idea into the world of Rusts `Future`s. Due to the //! addition of `Pin`ning, the address of a certain `Future` is not allowed to //! change between the first call to `poll()` and when the `Future` is dropped. //! This means the data inside the `Future` itself can be inserted into an //! intrusive container. If the the call to `Future::poll()` is not immedately //! ready, some parts of the `Future` itself are registered in the type which //! yielded the `Future`. Each `Future` can store a `Waker`. When the original //! type becomes ready, it can iterate through the list of registered `Future`s, //! wakeup associated tasks, and potentially remove them from its queue. //! //! The result is that the future-yielding type is not required to copy an //! arbitrary number of `Waker` objects into itself, and thereby does not require //! dynamic memory for this task. //! //! When a `Future` gets destructed/dropped, it must make sure to remove itself //! from any collections that refer to it to avoid invalid memory accesses. //! //! This library implements common synchronization primitives for the usage in //! asychronous code based on this concept. //! //! The implementation requires the usage of a fair chunk of `unsafe` //! annotations. However the provided user-level API is intended to be fully safe. //! //! ## Features of this library //! //! The following types are currently implemented: //! - Channels (oneshot and multi-producer-multi-consumer) //! - Synchronization primitives (async mutexes and events) //! - Timers //! //! ## Design goals for the library //! //! - Provide implementations of common synchronization primitives in a platform //! independent fashion. //! - Support `no-std` environments. As many types as possible are also provided //! for `no-std` environments. The library should boost the ability to use //! async Rust code in environments like: //! - Microcontrollers (RTOS and bare-metal) //! - Kernels //! - Drivers //! - Avoid dynamic memory allocations at runtime. After objects from this //! library have been created, they should not require allocation of any //! further memory at runtime. E.g. they should not need to allocate memory //! for each call to an asynchronous function or each time a new task accesses //! the same object in parallel. //! - Offer familiar APIs. //! The library tries to mimic the APIs of existing Rust libraries like the //! standard library and `futures-rs` as closely as possible. //! //! ## Non goals //! //! - Provide IO primitives (like sockets), or platform specific implementations. //! - Reach the highest possible performance in terms of throughput and latency. //! While code in this library is optimized for performance, portability //! and deterministic memory usage are more important goals. //! - Provide future wrappers for platform-specific APIs. //! //! ## Local, Non-local and shared flavors //! //! The library provides types in a variety of flavors: //! //! - A local flavor (e.g. [`channel::LocalChannel`]) //! - A non-local flavor (e.g. [`channel::Channel`]) //! - A shared flavor (e.g. [`channel::shared::Sender`]) //! - A generic flavor (e.g. [`channel::GenericChannel`] and //! [`channel::shared::GenericSender`]) //! //! The difference between these types lie in their thread-safety. The non-local //! flavors of types can be accessed from multiple threads (and thereby also //! futures tasks) concurrently. This means they implement the `Sync` trait in //! addition to the `Send` trait. //! The local flavors only implement the `Send` trait. //! //! ### Local flavor //! //! The local flavors will require no internal synchronization (e.g. internal //! Mutexes) and can therefore be provided for all platforms (including `no-std`). //! Due the lack of required synchronization, they are also very fast. //! //! It might seem counter-intuitive to provide synchronization primitives that //! only work within a single task. However there are a variety of applications //! where these can be used to coordinate sub-tasks (futures that are polled on //! a single task concurrently). //! //! The following example demonstrates this use-case: //! //! ``` //! # use futures::join; //! # use futures_intrusive::sync::LocalManualResetEvent; //! async fn async_fn() { //! let event = LocalManualResetEvent::new(false); //! let task_a = async { //! // Wait for the event //! event.wait().await; //! // Do something with the knowledge that task_b reached a certain state //! }; //! let task_b = async { //! // Some complex asynchronous workflow here //! // ... //! // Signal task_a //! event.set(); //! }; //! join!(task_a, task_b); //! } //! ``` //! //! ### Non-local flavor //! //! The non-local flavors can be used between arbitrary tasks and threads. They //! use internal synchronization for this in form of an embedded `Mutex` of //! [`parking_lot::Mutex`] type. //! //! The non-local flavors are only available in `alloc` environments. //! //! ### Shared flavor //! //! For some types a shared flavor is provided. Non-local flavors of types are //! `Sync`, but they still can only be shared by reference between various tasks. //! Shared flavors are also `Sync`, but the types additionally implement the //! `Clone` trait, which allows duplicating the object, and passing ownership of //! it to a different task. These types allow avoiding references (and thereby //! lifetimes) in some scenarios, which makes them more convenient to use. The //! types also return `Future`s which do not have an associated lifetime. This //! allows using those types as implementations of traits without the need for //! generic associated types (GATs). //! //! Due to the requirement of atomic reference counting, these types are //! currently only available for `alloc` environments. //! //! ### Generic flavor //! //! The generic flavors of provided types are parameterized around a //! [`lock_api::RawMutex`] type. These form the base for the non-local and shared //! flavors which simply parameterize the generic flavor in either a //! non-thread-safe or thread-safe fashion. //! //! Users can directly use the generic flavors to adapt the provided thread-safe //! types for use in `no-std` environments. //! //! E.g. by providing a custom [`lock_api::RawMutex`] //! implementation, the following platforms can be supported: //! //! - For RTOS platforms, RTOS-specific mutexes can be wrapped. //! - For kernel development, spinlock based mutexes can be created. //! - For embedded development, mutexes which just disable interrupts can be //! utilized. //! //! //! ## Relation to types in other libraries //! //! Other libraries (e.g. `futures-rs` and `tokio`) provide many primitives that //! are comparable feature-wise to the types in this library. //! //! The most important differences are: //! - This library has a bigger focus on `no-std` environments, and does not //! only try to provide an implementation for `alloc` or `std`. //! - The types in this library do not require dynamic memory allocation for //! waking up an arbitrary number of tasks waiting on a particular //! `Future`. Other libraries typically require heap-allocated nodes of //! growing vectors for handling a varying number of tasks. //! - The `Future`s produced by this library are all `!Unpin`, which might make //! them less ergonomic to use. //! #![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs, missing_debug_implementations)] #![deny(bare_trait_objects)] #[cfg(feature = "alloc")] extern crate alloc; mod noop_lock; use noop_lock::NoopLock; pub mod buffer; #[allow(dead_code)] mod intrusive_double_linked_list; mod intrusive_pairing_heap; pub mod channel; pub mod sync; pub mod timer; mod utils; futures-intrusive-0.5.0/src/noop_lock.rs000064400000000000000000000011040072674642500165150ustar 00000000000000//! An unsafe (non-thread-safe) lock, equivalent to UnsafeCell use core::marker::PhantomData; use lock_api::{GuardSend, RawMutex}; /// An unsafe (non-thread-safe) lock, equivalent to UnsafeCell #[derive(Debug)] pub struct NoopLock { /// Assigned in order to make the type !Sync _phantom: PhantomData<*mut ()>, } unsafe impl RawMutex for NoopLock { const INIT: NoopLock = NoopLock { _phantom: PhantomData, }; type GuardMarker = GuardSend; fn lock(&self) {} fn try_lock(&self) -> bool { true } unsafe fn unlock(&self) {} } futures-intrusive-0.5.0/src/sync/manual_reset_event.rs000064400000000000000000000244540072674642500214030ustar 00000000000000//! An asynchronously awaitable event for signalization between tasks use crate::{ intrusive_double_linked_list::{LinkedList, ListNode}, utils::update_waker_ref, NoopLock, }; use core::pin::Pin; use futures_core::{ future::{FusedFuture, Future}, task::{Context, Poll, Waker}, }; use lock_api::{Mutex, RawMutex}; /// Tracks how the future had interacted with the event #[derive(PartialEq)] enum PollState { /// The task has never interacted with the event. New, /// The task was added to the wait queue at the event. Waiting, /// The task has been polled to completion. Done, } /// Tracks the WaitForEventFuture waiting state. /// Access to this struct is synchronized through the mutex in the Event. struct WaitQueueEntry { /// The task handle of the waiting task task: Option, /// Current polling state state: PollState, } impl WaitQueueEntry { /// Creates a new WaitQueueEntry fn new() -> WaitQueueEntry { WaitQueueEntry { task: None, state: PollState::New, } } } /// Internal state of the `ManualResetEvent` pair above struct EventState { is_set: bool, waiters: LinkedList, } impl EventState { fn new(is_set: bool) -> EventState { EventState { is_set, waiters: LinkedList::new(), } } fn reset(&mut self) { self.is_set = false; } fn set(&mut self) { if self.is_set != true { self.is_set = true; // Wakeup all waiters // This happens inside the lock to make cancellation reliable // If we would access waiters outside of the lock, the pointers // may no longer be valid. // Typically this shouldn't be an issue, since waking a task should // only move it from the blocked into the ready state and not have // further side effects. // Use a reverse iterator, so that the oldest waiter gets // scheduled first self.waiters.reverse_drain(|waiter| { if let Some(handle) = waiter.task.take() { handle.wake(); } waiter.state = PollState::Done; }); } } fn is_set(&self) -> bool { self.is_set } /// Checks if the event is set. If it is this returns immediately. /// If the event isn't set, the WaitForEventFuture gets added to the wait /// queue at the event, and will be signalled once ready. /// This function is only safe as long as the `wait_node`s address is guaranteed /// to be stable until it gets removed from the queue. unsafe fn try_wait( &mut self, wait_node: &mut ListNode, cx: &mut Context<'_>, ) -> Poll<()> { match wait_node.state { PollState::New => { if self.is_set { // The event is already signaled wait_node.state = PollState::Done; Poll::Ready(()) } else { // Added the task to the wait queue wait_node.task = Some(cx.waker().clone()); wait_node.state = PollState::Waiting; self.waiters.add_front(wait_node); Poll::Pending } } PollState::Waiting => { // The WaitForEventFuture is already in the queue. // The event can't have been set, since this would change the // waitstate inside the mutex. However the caller might have // passed a different `Waker`. In this case we need to update it. update_waker_ref(&mut wait_node.task, cx); Poll::Pending } PollState::Done => { // We have been woken up by the event. // This does not guarantee that the event is still set. It could // have been reset it in the meantime. Poll::Ready(()) } } } fn remove_waiter(&mut self, wait_node: &mut ListNode) { // WaitForEventFuture only needs to get removed if it has been added to // the wait queue of the Event. This has happened in the PollState::Waiting case. if let PollState::Waiting = wait_node.state { // Safety: Due to the state, we know that the node must be part // of the waiter list if !unsafe { self.waiters.remove(wait_node) } { // Panic if the address isn't found. This can only happen if the contract was // violated, e.g. the WaitQueueEntry got moved after the initial poll. panic!("Future could not be removed from wait queue"); } wait_node.state = PollState::Done; } } } /// A synchronization primitive which can be either in the set or reset state. /// /// Tasks can wait for the event to get set by obtaining a Future via `wait`. /// This Future will get fulfilled when the event has been set. pub struct GenericManualResetEvent { inner: Mutex, } // The Event is can be sent to other threads as long as it's not borrowed unsafe impl Send for GenericManualResetEvent { } // The Event is thread-safe as long as the utilized Mutex is thread-safe unsafe impl Sync for GenericManualResetEvent { } impl core::fmt::Debug for GenericManualResetEvent { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("ManualResetEvent").finish() } } impl GenericManualResetEvent { /// Creates a new ManualResetEvent in the given state pub fn new(is_set: bool) -> GenericManualResetEvent { GenericManualResetEvent { inner: Mutex::::new(EventState::new(is_set)), } } /// Sets the event. /// /// Setting the event will notify all pending waiters. pub fn set(&self) { self.inner.lock().set() } /// Resets the event. pub fn reset(&self) { self.inner.lock().reset() } /// Returns whether the event is set pub fn is_set(&self) -> bool { self.inner.lock().is_set() } /// Returns a future that gets fulfilled when the event is set. pub fn wait(&self) -> GenericWaitForEventFuture { GenericWaitForEventFuture { event: Some(self), wait_node: ListNode::new(WaitQueueEntry::new()), } } unsafe fn try_wait( &self, wait_node: &mut ListNode, cx: &mut Context<'_>, ) -> Poll<()> { self.inner.lock().try_wait(wait_node, cx) } fn remove_waiter(&self, wait_node: &mut ListNode) { self.inner.lock().remove_waiter(wait_node) } } /// A Future that is resolved once the corresponding ManualResetEvent has been set #[must_use = "futures do nothing unless polled"] pub struct GenericWaitForEventFuture<'a, MutexType: RawMutex> { /// The ManualResetEvent that is associated with this WaitForEventFuture event: Option<&'a GenericManualResetEvent>, /// Node for waiting at the event wait_node: ListNode, } // Safety: Futures can be sent between threads as long as the underlying // event is thread-safe (Sync), which allows to poll/register/unregister from // a different thread. unsafe impl<'a, MutexType: RawMutex + Sync> Send for GenericWaitForEventFuture<'a, MutexType> { } impl<'a, MutexType: RawMutex> core::fmt::Debug for GenericWaitForEventFuture<'a, MutexType> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("GenericWaitForEventFuture").finish() } } impl<'a, MutexType: RawMutex> Future for GenericWaitForEventFuture<'a, MutexType> { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { // It might be possible to use Pin::map_unchecked here instead of the two unsafe APIs. // However this didn't seem to work for some borrow checker reasons // Safety: The next operations are safe, because Pin promises us that // the address of the wait queue entry inside MutexLocalFuture is stable, // and we don't move any fields inside the future until it gets dropped. let mut_self: &mut GenericWaitForEventFuture = unsafe { Pin::get_unchecked_mut(self) }; let event = mut_self .event .expect("polled WaitForEventFuture after completion"); let poll_res = unsafe { event.try_wait(&mut mut_self.wait_node, cx) }; if let Poll::Ready(()) = poll_res { // The event was set mut_self.event = None; } poll_res } } impl<'a, MutexType: RawMutex> FusedFuture for GenericWaitForEventFuture<'a, MutexType> { fn is_terminated(&self) -> bool { self.event.is_none() } } impl<'a, MutexType: RawMutex> Drop for GenericWaitForEventFuture<'a, MutexType> { fn drop(&mut self) { // If this WaitForEventFuture has been polled and it was added to the // wait queue at the event, it must be removed before dropping. // Otherwise the event would access invalid memory. if let Some(ev) = self.event { ev.remove_waiter(&mut self.wait_node); } } } // Export a non thread-safe version using NoopLock /// A [`GenericManualResetEvent`] which is not thread-safe. pub type LocalManualResetEvent = GenericManualResetEvent; /// A [`GenericWaitForEventFuture`] for [`LocalManualResetEvent`]. pub type LocalWaitForEventFuture<'a> = GenericWaitForEventFuture<'a, NoopLock>; #[cfg(feature = "std")] mod if_std { use super::*; // Export a thread-safe version using parking_lot::RawMutex /// A [`GenericManualResetEvent`] implementation backed by [`parking_lot`]. pub type ManualResetEvent = GenericManualResetEvent; /// A [`GenericWaitForEventFuture`] for [`ManualResetEvent`]. pub type WaitForEventFuture<'a> = GenericWaitForEventFuture<'a, parking_lot::RawMutex>; } #[cfg(feature = "std")] pub use self::if_std::*; futures-intrusive-0.5.0/src/sync/mod.rs000064400000000000000000000023230072674642500162710ustar 00000000000000//! Asynchronous synchronization primitives based on intrusive collections. //! //! This module provides various primitives for synchronizing concurrently //! executing futures. mod manual_reset_event; pub use self::manual_reset_event::{ GenericManualResetEvent, GenericWaitForEventFuture, LocalManualResetEvent, LocalWaitForEventFuture, }; #[cfg(feature = "std")] pub use self::manual_reset_event::{ManualResetEvent, WaitForEventFuture}; mod mutex; pub use self::mutex::{ GenericMutex, GenericMutexGuard, GenericMutexLockFuture, LocalMutex, LocalMutexGuard, LocalMutexLockFuture, }; #[cfg(feature = "std")] pub use self::mutex::{Mutex, MutexGuard, MutexLockFuture}; mod semaphore; pub use self::semaphore::{ GenericSemaphore, GenericSemaphoreAcquireFuture, GenericSemaphoreReleaser, LocalSemaphore, LocalSemaphoreAcquireFuture, LocalSemaphoreReleaser, }; #[cfg(feature = "alloc")] pub use self::semaphore::{ GenericSharedSemaphore, GenericSharedSemaphoreAcquireFuture, GenericSharedSemaphoreReleaser, }; #[cfg(feature = "std")] pub use self::semaphore::{ Semaphore, SemaphoreAcquireFuture, SemaphoreReleaser, SharedSemaphore, SharedSemaphoreAcquireFuture, SharedSemaphoreReleaser, }; futures-intrusive-0.5.0/src/sync/mutex.rs000064400000000000000000000433140072674642500166610ustar 00000000000000//! An asynchronously awaitable mutex for synchronization between concurrently //! executing futures. use crate::{ intrusive_double_linked_list::{LinkedList, ListNode}, utils::update_waker_ref, NoopLock, }; use core::{ cell::UnsafeCell, ops::{Deref, DerefMut}, pin::Pin, }; use futures_core::{ future::{FusedFuture, Future}, task::{Context, Poll, Waker}, }; use lock_api::{Mutex as LockApiMutex, RawMutex}; /// Tracks how the future had interacted with the mutex #[derive(PartialEq)] enum PollState { /// The task has never interacted with the mutex. New, /// The task was added to the wait queue at the mutex. Waiting, /// The task had previously waited on the mutex, but was notified /// that the mutex was released in the meantime. Notified, /// The task had been polled to completion. Done, } /// Tracks the MutexLockFuture waiting state. /// Access to this struct is synchronized through the mutex in the Event. struct WaitQueueEntry { /// The task handle of the waiting task task: Option, /// Current polling state state: PollState, } impl WaitQueueEntry { /// Creates a new WaitQueueEntry fn new() -> WaitQueueEntry { WaitQueueEntry { task: None, state: PollState::New, } } } /// Internal state of the `Mutex` struct MutexState { is_fair: bool, is_locked: bool, waiters: LinkedList, } impl MutexState { fn new(is_fair: bool) -> Self { MutexState { is_fair, is_locked: false, waiters: LinkedList::new(), } } /// Returns the `Waker` associated with the up the last waiter /// /// If the Mutex is not fair, removes the associated wait node also from /// the wait queue fn return_last_waiter(&mut self) -> Option { let last_waiter = if self.is_fair { self.waiters.peek_last_mut() } else { self.waiters.remove_last() }; if let Some(last_waiter) = last_waiter { // Notify the waiter that it can try to lock the mutex again. // The notification gets tracked inside the waiter. // If the waiter aborts it's wait (drops the future), another task // must be woken. last_waiter.state = PollState::Notified; let task = &mut last_waiter.task; return task.take(); } None } fn is_locked(&self) -> bool { self.is_locked } /// Unlocks the mutex /// /// This is expected to be only called from the current holder of the mutex. /// The method returns the `Waker` which is associated with the task that /// needs to get woken due to the unlock. fn unlock(&mut self) -> Option { if self.is_locked { self.is_locked = false; // TODO: Does this require a memory barrier for the actual data, // or is this covered by unlocking the mutex which protects the data? // Wakeup the last waiter self.return_last_waiter() } else { None } } /// Tries to lock the mutex synchronously. /// /// Returns true if the lock obtained and false otherwise. fn try_lock_sync(&mut self) -> bool { // The lock can only be obtained synchronously if // - it is not locked // - the Semaphore is either not fair, or there are no waiters // - required_permits == 0 if !self.is_locked && (!self.is_fair || self.waiters.is_empty()) { self.is_locked = true; true } else { false } } /// Tries to acquire the Mutex from a WaitQueueEntry. /// /// If it isn't available, the WaitQueueEntry gets added to the wait /// queue at the Mutex, and will be signalled once ready. /// This function is only safe as long as the `wait_node`s address is guaranteed /// to be stable until it gets removed from the queue. unsafe fn try_lock( &mut self, wait_node: &mut ListNode, cx: &mut Context<'_>, ) -> Poll<()> { match wait_node.state { PollState::New => { // The fast path - the Mutex isn't locked by anyone else. // If the mutex is fair, noone must be in the wait list before us. if self.try_lock_sync() { wait_node.state = PollState::Done; Poll::Ready(()) } else { // Add the task to the wait queue wait_node.task = Some(cx.waker().clone()); wait_node.state = PollState::Waiting; self.waiters.add_front(wait_node); Poll::Pending } } PollState::Waiting => { // The MutexLockFuture is already in the queue. if self.is_fair { // The task needs to wait until it gets notified in order to // maintain the ordering. However the caller might have // passed a different `Waker`. In this case we need to update it. update_waker_ref(&mut wait_node.task, cx); Poll::Pending } else { // For throughput improvement purposes, grab the lock immediately // if it's available. if !self.is_locked { self.is_locked = true; wait_node.state = PollState::Done; // Since this waiter has been registered before, it must // get removed from the waiter list. // Safety: Due to the state, we know that the node must be part // of the waiter list self.force_remove_waiter(wait_node); Poll::Ready(()) } else { // The caller might have passed a different `Waker`. // In this case we need to update it. update_waker_ref(&mut wait_node.task, cx); Poll::Pending } } } PollState::Notified => { // We had been woken by the mutex, since the mutex is available again. // The mutex thereby removed us from the waiters list. // Just try to lock again. If the mutex isn't available, // we need to add it to the wait queue again. if !self.is_locked { if self.is_fair { // In a fair Mutex, the WaitQueueEntry is kept in the // linked list and must be removed here // Safety: Due to the state, we know that the node must be part // of the waiter list self.force_remove_waiter(wait_node); } self.is_locked = true; wait_node.state = PollState::Done; Poll::Ready(()) } else { // Fair mutexes should always be able to acquire the lock // after they had been notified debug_assert!(!self.is_fair); // Add to queue wait_node.task = Some(cx.waker().clone()); wait_node.state = PollState::Waiting; self.waiters.add_front(wait_node); Poll::Pending } } PollState::Done => { // The future had been polled to completion before panic!("polled Mutex after completion"); } } } /// Tries to remove a waiter from the wait queue, and panics if the /// waiter is no longer valid. unsafe fn force_remove_waiter( &mut self, wait_node: &mut ListNode, ) { if !self.waiters.remove(wait_node) { // Panic if the address isn't found. This can only happen if the contract was // violated, e.g. the WaitQueueEntry got moved after the initial poll. panic!("Future could not be removed from wait queue"); } } /// Removes the waiter from the list. /// /// This function is only safe as long as the reference that is passed here /// equals the reference/address under which the waiter was added. /// The waiter must not have been moved in between. /// /// Returns the `Waker` of another task which might get ready to run due to /// this. fn remove_waiter( &mut self, wait_node: &mut ListNode, ) -> Option { // MutexLockFuture only needs to get removed if it had been added to // the wait queue of the Mutex. This has happened in the PollState::Waiting case. // If the current waiter was notified, another waiter must get notified now. match wait_node.state { PollState::Notified => { if self.is_fair { // In a fair Mutex, the WaitQueueEntry is kept in the // linked list and must be removed here // Safety: Due to the state, we know that the node must be part // of the waiter list unsafe { self.force_remove_waiter(wait_node) }; } wait_node.state = PollState::Done; // Since the task was notified but did not lock the Mutex, // another task gets the chance to run. self.return_last_waiter() } PollState::Waiting => { // Remove the WaitQueueEntry from the linked list // Safety: Due to the state, we know that the node must be part // of the waiter list unsafe { self.force_remove_waiter(wait_node) }; wait_node.state = PollState::Done; None } PollState::New | PollState::Done => None, } } } /// An RAII guard returned by the `lock` and `try_lock` methods. /// When this structure is dropped (falls out of scope), the lock will be /// unlocked. pub struct GenericMutexGuard<'a, MutexType: RawMutex, T: 'a> { /// The Mutex which is associated with this Guard mutex: &'a GenericMutex, } impl core::fmt::Debug for GenericMutexGuard<'_, MutexType, T> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("GenericMutexGuard").finish() } } impl Drop for GenericMutexGuard<'_, MutexType, T> { fn drop(&mut self) { // Release the mutex let waker = { self.mutex.state.lock().unlock() }; if let Some(waker) = waker { waker.wake(); } } } impl Deref for GenericMutexGuard<'_, MutexType, T> { type Target = T; fn deref(&self) -> &T { unsafe { &*self.mutex.value.get() } } } impl DerefMut for GenericMutexGuard<'_, MutexType, T> { fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.mutex.value.get() } } } // Safety: GenericMutexGuard may only be used across threads if the underlying // type is Sync. unsafe impl Sync for GenericMutexGuard<'_, MutexType, T> { } /// A future which resolves when the target mutex has been successfully acquired. #[must_use = "futures do nothing unless polled"] pub struct GenericMutexLockFuture<'a, MutexType: RawMutex, T: 'a> { /// The Mutex which should get locked trough this Future mutex: Option<&'a GenericMutex>, /// Node for waiting at the mutex wait_node: ListNode, } // Safety: Futures can be sent between threads as long as the underlying // mutex is thread-safe (Sync), which allows to poll/register/unregister from // a different thread. unsafe impl<'a, MutexType: RawMutex + Sync, T: 'a> Send for GenericMutexLockFuture<'a, MutexType, T> { } impl<'a, MutexType: RawMutex, T: core::fmt::Debug> core::fmt::Debug for GenericMutexLockFuture<'a, MutexType, T> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("GenericMutexLockFuture").finish() } } impl<'a, MutexType: RawMutex, T> Future for GenericMutexLockFuture<'a, MutexType, T> { type Output = GenericMutexGuard<'a, MutexType, T>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { // Safety: The next operations are safe, because Pin promises us that // the address of the wait queue entry inside GenericMutexLockFuture is stable, // and we don't move any fields inside the future until it gets dropped. let mut_self: &mut GenericMutexLockFuture = unsafe { Pin::get_unchecked_mut(self) }; let mutex = mut_self .mutex .expect("polled GenericMutexLockFuture after completion"); let mut mutex_state = mutex.state.lock(); let poll_res = unsafe { mutex_state.try_lock(&mut mut_self.wait_node, cx) }; match poll_res { Poll::Pending => Poll::Pending, Poll::Ready(()) => { // The mutex was acquired mut_self.mutex = None; Poll::Ready(GenericMutexGuard::<'a, MutexType, T> { mutex }) } } } } impl<'a, MutexType: RawMutex, T> FusedFuture for GenericMutexLockFuture<'a, MutexType, T> { fn is_terminated(&self) -> bool { self.mutex.is_none() } } impl<'a, MutexType: RawMutex, T> Drop for GenericMutexLockFuture<'a, MutexType, T> { fn drop(&mut self) { // If this GenericMutexLockFuture has been polled and it was added to the // wait queue at the mutex, it must be removed before dropping. // Otherwise the mutex would access invalid memory. let waker = if let Some(mutex) = self.mutex { let mut mutex_state = mutex.state.lock(); mutex_state.remove_waiter(&mut self.wait_node) } else { None }; if let Some(waker) = waker { waker.wake(); } } } /// A futures-aware mutex. pub struct GenericMutex { value: UnsafeCell, state: LockApiMutex, } // It is safe to send mutexes between threads, as long as they are not used and // thereby borrowed unsafe impl Send for GenericMutex { } // The mutex is thread-safe as long as the utilized mutex is thread-safe unsafe impl Sync for GenericMutex { } impl core::fmt::Debug for GenericMutex { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("Mutex") .field("is_locked", &self.is_locked()) .finish() } } impl GenericMutex { /// Creates a new futures-aware mutex. /// /// `is_fair` defines whether the `Mutex` should behave be fair regarding the /// order of waiters. A fair `Mutex` will only allow the first waiter which /// tried to lock but failed to lock the `Mutex` once it's available again. /// Other waiters must wait until either this locking attempt completes, and /// the `Mutex` gets unlocked again, or until the `MutexLockFuture` which /// tried to gain the lock is dropped. pub fn new(value: T, is_fair: bool) -> GenericMutex { GenericMutex:: { value: UnsafeCell::new(value), state: LockApiMutex::new(MutexState::new(is_fair)), } } /// Acquire the mutex asynchronously. /// /// This method returns a future that will resolve once the mutex has been /// successfully acquired. pub fn lock(&self) -> GenericMutexLockFuture<'_, MutexType, T> { GenericMutexLockFuture:: { mutex: Some(&self), wait_node: ListNode::new(WaitQueueEntry::new()), } } /// Tries to acquire the mutex /// /// If acquiring the mutex is successful, a [`GenericMutexGuard`] /// will be returned, which allows to access the contained data. /// /// Otherwise `None` will be returned. pub fn try_lock(&self) -> Option> { if self.state.lock().try_lock_sync() { Some(GenericMutexGuard { mutex: self }) } else { None } } /// Returns whether the mutex is locked. pub fn is_locked(&self) -> bool { self.state.lock().is_locked() } } // Export a non thread-safe version using NoopLock /// A [`GenericMutex`] which is not thread-safe. pub type LocalMutex = GenericMutex; /// A [`GenericMutexGuard`] for [`LocalMutex`]. pub type LocalMutexGuard<'a, T> = GenericMutexGuard<'a, NoopLock, T>; /// A [`GenericMutexLockFuture`] for [`LocalMutex`]. pub type LocalMutexLockFuture<'a, T> = GenericMutexLockFuture<'a, NoopLock, T>; #[cfg(feature = "std")] mod if_std { use super::*; // Export a thread-safe version using parking_lot::RawMutex /// A [`GenericMutex`] backed by [`parking_lot`]. pub type Mutex = GenericMutex; /// A [`GenericMutexGuard`] for [`Mutex`]. pub type MutexGuard<'a, T> = GenericMutexGuard<'a, parking_lot::RawMutex, T>; /// A [`GenericMutexLockFuture`] for [`Mutex`]. pub type MutexLockFuture<'a, T> = GenericMutexLockFuture<'a, parking_lot::RawMutex, T>; } #[cfg(feature = "std")] pub use self::if_std::*; futures-intrusive-0.5.0/src/sync/semaphore.rs000064400000000000000000001013570072674642500175040ustar 00000000000000//! An asynchronously awaitable semaphore for synchronization between concurrently //! executing futures. use crate::{ intrusive_double_linked_list::{LinkedList, ListNode}, utils::update_waker_ref, NoopLock, }; use core::pin::Pin; use futures_core::{ future::{FusedFuture, Future}, task::{Context, Poll, Waker}, }; use lock_api::{Mutex as LockApiMutex, RawMutex}; /// Tracks how the future had interacted with the semaphore #[derive(PartialEq)] enum PollState { /// The task has never interacted with the semaphore. New, /// The task was added to the wait queue at the semaphore. Waiting, /// The task had previously waited on the semaphore, but was notified /// that the semaphore was released in the meantime and that the task /// thereby could retry. Notified, /// The task had been polled to completion. Done, } /// Tracks the SemaphoreAcquireFuture waiting state. struct WaitQueueEntry { /// The task handle of the waiting task task: Option, /// Current polling state state: PollState, /// The amount of permits that should be obtained required_permits: usize, } impl WaitQueueEntry { /// Creates a new WaitQueueEntry fn new(required_permits: usize) -> WaitQueueEntry { WaitQueueEntry { task: None, state: PollState::New, required_permits, } } } /// Internal state of the `Semaphore` struct SemaphoreState { is_fair: bool, permits: usize, waiters: LinkedList, } impl SemaphoreState { fn new(is_fair: bool, permits: usize) -> Self { SemaphoreState { is_fair, permits, waiters: LinkedList::new(), } } /// Wakes up the last waiter and removes it from the wait queue fn wakeup_waiters(&mut self) { // Wake as many tasks as the permits allow let mut available = self.permits; loop { match self.waiters.peek_last_mut() { None => return, Some(last_waiter) => { // Check if enough permits are available for this waiter. // If not then a wakeup attempt won't be successful. if available < last_waiter.required_permits { return; } available -= last_waiter.required_permits; // Notify the waiter that it can try to acquire the semaphore again. // The notification gets tracked inside the waiter. // If the waiter aborts it's wait (drops the future), another task // must be woken. if last_waiter.state != PollState::Notified { last_waiter.state = PollState::Notified; let task = &last_waiter.task; if let Some(ref handle) = task { handle.wake_by_ref(); } } // In the case of a non-fair semaphore, the waiters are directly // removed from the semaphores wait queue when woken. // That avoids having to remove the wait element later. if !self.is_fair { self.waiters.remove_last(); } else { // For a fair Semaphore we never wake more than 1 task. // That one needs to acquire the Semaphore. // TODO: We actually should be able to wake more, since // it's guaranteed that both tasks could make progress. // However the we currently can't peek iterate in reverse order. return; } } } } } fn permits(&self) -> usize { self.permits } /// Releases a certain amount of permits back to the semaphore fn release(&mut self, permits: usize) { if permits == 0 { return; } // TODO: Overflow check self.permits += permits; // Wakeup the last waiter self.wakeup_waiters(); } /// Tries to acquire the given amount of permits synchronously. /// /// Returns true if the permits were obtained and false otherwise. fn try_acquire_sync(&mut self, required_permits: usize) -> bool { // Permits can only be obtained synchronously if there are // - enough permits available // - the Semaphore is either not fair, or there are no waiters // - required_permits == 0 if (self.permits >= required_permits) && (!self.is_fair || self.waiters.is_empty() || required_permits == 0) { self.permits -= required_permits; true } else { false } } /// Tries to acquire the Semaphore from a WaitQueueEntry. /// If it isn't available, the WaitQueueEntry gets added to the wait /// queue at the Semaphore, and will be signalled once ready. /// This function is only safe as long as the `wait_node`s address is guaranteed /// to be stable until it gets removed from the queue. unsafe fn try_acquire( &mut self, wait_node: &mut ListNode, cx: &mut Context<'_>, ) -> Poll<()> { match wait_node.state { PollState::New => { // The fast path - enough permits are available if self.try_acquire_sync(wait_node.required_permits) { wait_node.state = PollState::Done; Poll::Ready(()) } else { // Add the task to the wait queue wait_node.task = Some(cx.waker().clone()); wait_node.state = PollState::Waiting; self.waiters.add_front(wait_node); Poll::Pending } } PollState::Waiting => { // The SemaphoreAcquireFuture is already in the queue. if self.is_fair { // The task needs to wait until it gets notified in order to // maintain the ordering. // However the caller might have passed a different `Waker`. // In this case we need to update it. update_waker_ref(&mut wait_node.task, cx); Poll::Pending } else { // For throughput improvement purposes, check immediately // if enough permits are available if self.permits >= wait_node.required_permits { self.permits -= wait_node.required_permits; wait_node.state = PollState::Done; // Since this waiter has been registered before, it must // get removed from the waiter list. // Safety: Due to the state, we know that the node must be part // of the waiter list self.force_remove_waiter(wait_node); Poll::Ready(()) } else { // The caller might have passed a different `Waker`. // In this case we need to update it. update_waker_ref(&mut wait_node.task, cx); Poll::Pending } } } PollState::Notified => { // We had been woken by the semaphore, since the semaphore is available again. // The semaphore thereby removed us from the waiters list. // Just try to lock again. If the semaphore isn't available, // we need to add it to the wait queue again. if self.permits >= wait_node.required_permits { if self.is_fair { // In a fair Semaphore, the WaitQueueEntry is kept in the // linked list and must be removed here // Safety: Due to the state, we know that the node must be part // of the waiter list self.force_remove_waiter(wait_node); } self.permits -= wait_node.required_permits; if self.is_fair { // There might be another task which is ready to run, // but couldn't, since it was blocked behind the fair waiter. self.wakeup_waiters(); } wait_node.state = PollState::Done; Poll::Ready(()) } else { // A fair semaphore should never end up in that branch, since // it's only notified when it's permits are guaranteed to // be available. assert! in order to find logic bugs assert!( !self.is_fair, "Fair semaphores should always be ready when notified" ); // Add to queue wait_node.task = Some(cx.waker().clone()); wait_node.state = PollState::Waiting; self.waiters.add_front(wait_node); Poll::Pending } } PollState::Done => { // The future had been polled to completion before panic!("polled Mutex after completion"); } } } /// Tries to remove a waiter from the wait queue, and panics if the /// waiter is no longer valid. unsafe fn force_remove_waiter( &mut self, wait_node: &mut ListNode, ) { if !self.waiters.remove(wait_node) { // Panic if the address isn't found. This can only happen if the contract was // violated, e.g. the WaitQueueEntry got moved after the initial poll. panic!("Future could not be removed from wait queue"); } } /// Removes the waiter from the list. /// This function is only safe as long as the reference that is passed here /// equals the reference/address under which the waiter was added. /// The waiter must not have been moved in between. fn remove_waiter(&mut self, wait_node: &mut ListNode) { // SemaphoreAcquireFuture only needs to get removed if it had been added to // the wait queue of the Semaphore. This has happened in the PollState::Waiting case. // If the current waiter was notified, another waiter must get notified now. match wait_node.state { PollState::Notified => { if self.is_fair { // In a fair Mutex, the WaitQueueEntry is kept in the // linked list and must be removed here // Safety: Due to the state, we know that the node must be part // of the waiter list unsafe { self.force_remove_waiter(wait_node) }; } wait_node.state = PollState::Done; // Wakeup more waiters self.wakeup_waiters(); } PollState::Waiting => { // Remove the WaitQueueEntry from the linked list // Safety: Due to the state, we know that the node must be part // of the waiter list unsafe { self.force_remove_waiter(wait_node) }; wait_node.state = PollState::Done; } PollState::New | PollState::Done => {} } } } /// An RAII guard returned by the `acquire` and `try_acquire` methods. /// /// When this structure is dropped (falls out of scope), /// the amount of permits that was used in the `acquire()` call will be released /// back to the Semaphore. pub struct GenericSemaphoreReleaser<'a, MutexType: RawMutex> { /// The Semaphore which is associated with this Releaser semaphore: &'a GenericSemaphore, /// The amount of permits to release permits: usize, } impl core::fmt::Debug for GenericSemaphoreReleaser<'_, MutexType> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("GenericSemaphoreReleaser").finish() } } impl GenericSemaphoreReleaser<'_, MutexType> { /// Prevents the SemaphoreReleaser from automatically releasing the permits /// when it gets dropped. /// This is helpful if the permits must be acquired for a longer lifetime /// than the one of the SemaphoreReleaser. /// If this method is used it is important to release the acquired permits /// manually back to the Semaphore. pub fn disarm(&mut self) -> usize { let permits = self.permits; self.permits = 0; permits } } impl Drop for GenericSemaphoreReleaser<'_, MutexType> { fn drop(&mut self) { // Release the requested amount of permits to the semaphore if self.permits != 0 { self.semaphore.state.lock().release(self.permits); } } } /// A future which resolves when the target semaphore has been successfully acquired. #[must_use = "futures do nothing unless polled"] pub struct GenericSemaphoreAcquireFuture<'a, MutexType: RawMutex> { /// The Semaphore which should get acquired trough this Future semaphore: Option<&'a GenericSemaphore>, /// Node for waiting at the semaphore wait_node: ListNode, /// Whether the obtained permits should automatically be released back /// to the semaphore. auto_release: bool, } // Safety: Futures can be sent between threads as long as the underlying // semaphore is thread-safe (Sync), which allows to poll/register/unregister from // a different thread. unsafe impl<'a, MutexType: RawMutex + Sync> Send for GenericSemaphoreAcquireFuture<'a, MutexType> { } impl<'a, MutexType: RawMutex> core::fmt::Debug for GenericSemaphoreAcquireFuture<'a, MutexType> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("GenericSemaphoreAcquireFuture").finish() } } impl<'a, MutexType: RawMutex> Future for GenericSemaphoreAcquireFuture<'a, MutexType> { type Output = GenericSemaphoreReleaser<'a, MutexType>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { // Safety: The next operations are safe, because Pin promises us that // the address of the wait queue entry inside GenericSemaphoreAcquireFuture is stable, // and we don't move any fields inside the future until it gets dropped. let mut_self: &mut GenericSemaphoreAcquireFuture = unsafe { Pin::get_unchecked_mut(self) }; let semaphore = mut_self .semaphore .expect("polled GenericSemaphoreAcquireFuture after completion"); let mut semaphore_state = semaphore.state.lock(); let poll_res = unsafe { semaphore_state.try_acquire(&mut mut_self.wait_node, cx) }; match poll_res { Poll::Pending => Poll::Pending, Poll::Ready(()) => { // The semaphore was acquired. mut_self.semaphore = None; let to_release = match mut_self.auto_release { true => mut_self.wait_node.required_permits, false => 0, }; Poll::Ready(GenericSemaphoreReleaser::<'a, MutexType> { semaphore, permits: to_release, }) } } } } impl<'a, MutexType: RawMutex> FusedFuture for GenericSemaphoreAcquireFuture<'a, MutexType> { fn is_terminated(&self) -> bool { self.semaphore.is_none() } } impl<'a, MutexType: RawMutex> Drop for GenericSemaphoreAcquireFuture<'a, MutexType> { fn drop(&mut self) { // If this GenericSemaphoreAcquireFuture has been polled and it was added to the // wait queue at the semaphore, it must be removed before dropping. // Otherwise the semaphore would access invalid memory. if let Some(semaphore) = self.semaphore { let mut semaphore_state = semaphore.state.lock(); // Analysis: Does the number of permits play a role here? // The future was notified because there was a certain amount of permits // available. // Removing the waiter will wake up as many tasks as there are permits // available inside the Semaphore now. If this is bigger than the // amount of permits required for this task, then additional new // tasks might get woken. However that isn't bad, since // those tasks should get into the wait state anyway. semaphore_state.remove_waiter(&mut self.wait_node); } } } /// A futures-aware semaphore. pub struct GenericSemaphore { state: LockApiMutex, } // It is safe to send semaphores between threads, as long as they are not used and // thereby borrowed unsafe impl Send for GenericSemaphore {} // The Semaphore is thread-safe as long as the utilized Mutex is thread-safe unsafe impl Sync for GenericSemaphore {} impl core::fmt::Debug for GenericSemaphore { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("Semaphore") .field("permits", &self.permits()) .finish() } } impl GenericSemaphore { /// Creates a new futures-aware semaphore. /// /// `is_fair` defines whether the `Semaphore` should behave be fair regarding the /// order of waiters. A fair `Semaphore` will only allow the oldest waiter on /// a `Semaphore` to retry acquiring it once it's available again. /// Other waiters must wait until either this acquire attempt completes, and /// the `Semaphore` has enough permits after that, or until the /// [`SemaphoreAcquireFuture`] which tried to acquire the `Semaphore` is dropped. /// /// If the `Semaphore` isn't fair, waiters that wait for a high amount of /// permits might never succeed since the permits might be stolen in between /// by other waiters. Therefore use-cases which make use of very different /// amount of permits per acquire should use fair semaphores. /// For use-cases where each `acquire()` tries to acquire the same amount of /// permits an unfair `Semaphore` might provide throughput advantages. /// /// `permits` is the amount of permits that a semaphore should hold when /// created. pub fn new(is_fair: bool, permits: usize) -> GenericSemaphore { GenericSemaphore:: { state: LockApiMutex::new(SemaphoreState::new(is_fair, permits)), } } /// Acquire a certain amount of permits on a semaphore asynchronously. /// /// This method returns a future that will resolve once the given amount of /// permits have been acquired. /// The Future will resolve to a [`GenericSemaphoreReleaser`], which will /// release all acquired permits automatically when dropped. pub fn acquire( &self, nr_permits: usize, ) -> GenericSemaphoreAcquireFuture<'_, MutexType> { GenericSemaphoreAcquireFuture:: { semaphore: Some(&self), wait_node: ListNode::new(WaitQueueEntry::new(nr_permits)), auto_release: true, } } /// Tries to acquire a certain amount of permits on a semaphore. /// /// If acquiring the permits is successful, a [`GenericSemaphoreReleaser`] /// will be returned, which will release all acquired permits automatically /// when dropped. /// /// Otherwise `None` will be returned. pub fn try_acquire( &self, nr_permits: usize, ) -> Option> { if self.state.lock().try_acquire_sync(nr_permits) { Some(GenericSemaphoreReleaser { semaphore: self, permits: nr_permits, }) } else { None } } /// Releases the given amount of permits back to the semaphore. /// /// This method should in most cases not be used, since the /// [`GenericSemaphoreReleaser`] which is obtained when acquiring a Semaphore /// will automatically release the obtained permits again. /// /// Therefore this method should only be used if the automatic release was /// disabled by calling [`GenericSemaphoreReleaser::disarm`], /// or when the amount of permits in the Semaphore /// should increase from the initial amount. pub fn release(&self, nr_permits: usize) { self.state.lock().release(nr_permits) } /// Returns the amount of permits that are available on the semaphore pub fn permits(&self) -> usize { self.state.lock().permits() } } // Export a non thread-safe version using NoopLock /// A [`GenericSemaphore`] which is not thread-safe. pub type LocalSemaphore = GenericSemaphore; /// A [`GenericSemaphoreReleaser`] for [`LocalSemaphore`]. pub type LocalSemaphoreReleaser<'a> = GenericSemaphoreReleaser<'a, NoopLock>; /// A [`GenericSemaphoreAcquireFuture`] for [`LocalSemaphore`]. pub type LocalSemaphoreAcquireFuture<'a> = GenericSemaphoreAcquireFuture<'a, NoopLock>; #[cfg(feature = "std")] mod if_std { use super::*; // Export a thread-safe version using parking_lot::RawMutex /// A [`GenericSemaphore`] backed by [`parking_lot`]. pub type Semaphore = GenericSemaphore; /// A [`GenericSemaphoreReleaser`] for [`Semaphore`]. pub type SemaphoreReleaser<'a> = GenericSemaphoreReleaser<'a, parking_lot::RawMutex>; /// A [`GenericSemaphoreAcquireFuture`] for [`Semaphore`]. pub type SemaphoreAcquireFuture<'a> = GenericSemaphoreAcquireFuture<'a, parking_lot::RawMutex>; } #[cfg(feature = "std")] pub use self::if_std::*; #[cfg(feature = "alloc")] mod if_alloc { use super::*; use alloc::sync::Arc; /// An RAII guard returned by the `acquire` and `try_acquire` methods. /// /// When this structure is dropped (falls out of scope), /// the amount of permits that was used in the `acquire()` call will be released /// back to the Semaphore. pub struct GenericSharedSemaphoreReleaser { /// The Semaphore which is associated with this Releaser semaphore: GenericSharedSemaphore, /// The amount of permits to release permits: usize, } impl core::fmt::Debug for GenericSharedSemaphoreReleaser { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("GenericSharedSemaphoreReleaser").finish() } } impl GenericSharedSemaphoreReleaser { /// Prevents the SharedSemaphoreReleaser from automatically releasing the permits /// when it gets dropped. /// /// This is helpful if the permits must be acquired for a longer lifetime /// than the one of the SemaphoreReleaser. /// /// If this method is used it is important to release the acquired permits /// manually back to the Semaphore. pub fn disarm(&mut self) -> usize { let permits = self.permits; self.permits = 0; permits } } impl Drop for GenericSharedSemaphoreReleaser { fn drop(&mut self) { // Release the requested amount of permits to the semaphore if self.permits != 0 { self.semaphore.state.lock().release(self.permits); } } } /// A future which resolves when the target semaphore has been successfully acquired. #[must_use = "futures do nothing unless polled"] pub struct GenericSharedSemaphoreAcquireFuture { /// The Semaphore which should get acquired trough this Future semaphore: Option>, /// Node for waiting at the semaphore wait_node: ListNode, /// Whether the obtained permits should automatically be released back /// to the semaphore. auto_release: bool, } // Safety: Futures can be sent between threads as long as the underlying // semaphore is thread-safe (Sync), which allows to poll/register/unregister from // a different thread. unsafe impl Send for GenericSharedSemaphoreAcquireFuture { } impl core::fmt::Debug for GenericSharedSemaphoreAcquireFuture { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("GenericSharedSemaphoreAcquireFuture") .finish() } } impl Future for GenericSharedSemaphoreAcquireFuture { type Output = GenericSharedSemaphoreReleaser; fn poll( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll { // Safety: The next operations are safe, because Pin promises us that // the address of the wait queue entry inside // GenericSharedSemaphoreAcquireFuture is stable, // and we don't move any fields inside the future until it gets dropped. let mut_self: &mut GenericSharedSemaphoreAcquireFuture = unsafe { Pin::get_unchecked_mut(self) }; let semaphore = mut_self.semaphore.take().expect( "polled GenericSharedSemaphoreAcquireFuture after completion", ); let poll_res = unsafe { let mut semaphore_state = semaphore.state.lock(); semaphore_state.try_acquire(&mut mut_self.wait_node, cx) }; match poll_res { Poll::Pending => { mut_self.semaphore.replace(semaphore); Poll::Pending } Poll::Ready(()) => { let to_release = match mut_self.auto_release { true => mut_self.wait_node.required_permits, false => 0, }; Poll::Ready(GenericSharedSemaphoreReleaser:: { semaphore, permits: to_release, }) } } } } impl FusedFuture for GenericSharedSemaphoreAcquireFuture { fn is_terminated(&self) -> bool { self.semaphore.is_none() } } impl Drop for GenericSharedSemaphoreAcquireFuture { fn drop(&mut self) { // If this GenericSharedSemaphoreAcquireFuture has been polled and it was added to the // wait queue at the semaphore, it must be removed before dropping. // Otherwise the semaphore would access invalid memory. if let Some(semaphore) = self.semaphore.take() { let mut semaphore_state = semaphore.state.lock(); // Analysis: Does the number of permits play a role here? // The future was notified because there was a certain amount of permits // available. // Removing the waiter will wake up as many tasks as there are permits // available inside the Semaphore now. If this is bigger than the // amount of permits required for this task, then additional new // tasks might get woken. However that isn't bad, since // those tasks should get into the wait state anyway. semaphore_state.remove_waiter(&mut self.wait_node); } } } /// A futures-aware shared semaphore. pub struct GenericSharedSemaphore { state: Arc>, } impl Clone for GenericSharedSemaphore { fn clone(&self) -> Self { Self { state: self.state.clone(), } } } // It is safe to send semaphores between threads, as long as they are not used and // thereby borrowed unsafe impl Send for GenericSharedSemaphore { } // The Semaphore is thread-safe as long as the utilized Mutex is thread-safe unsafe impl Sync for GenericSharedSemaphore { } impl core::fmt::Debug for GenericSharedSemaphore { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("Semaphore") .field("permits", &self.permits()) .finish() } } impl GenericSharedSemaphore { /// Creates a new futures-aware shared semaphore. /// /// See `GenericSharedSemaphore` for more information. pub fn new( is_fair: bool, permits: usize, ) -> GenericSharedSemaphore { GenericSharedSemaphore:: { state: Arc::new(LockApiMutex::new(SemaphoreState::new( is_fair, permits, ))), } } /// Acquire a certain amount of permits on a semaphore asynchronously. /// /// This method returns a future that will resolve once the given amount of /// permits have been acquired. /// The Future will resolve to a [`GenericSharedSemaphoreReleaser`], which will /// release all acquired permits automatically when dropped. pub fn acquire( &self, nr_permits: usize, ) -> GenericSharedSemaphoreAcquireFuture { GenericSharedSemaphoreAcquireFuture:: { semaphore: Some(self.clone()), wait_node: ListNode::new(WaitQueueEntry::new(nr_permits)), auto_release: true, } } /// Tries to acquire a certain amount of permits on a semaphore. /// /// If acquiring the permits is successful, a [`GenericSharedSemaphoreReleaser`] /// will be returned, which will release all acquired permits automatically /// when dropped. /// /// Otherwise `None` will be returned. pub fn try_acquire( &self, nr_permits: usize, ) -> Option> { if self.state.lock().try_acquire_sync(nr_permits) { Some(GenericSharedSemaphoreReleaser { semaphore: self.clone(), permits: nr_permits, }) } else { None } } /// Releases the given amount of permits back to the semaphore. /// /// This method should in most cases not be used, since the /// [`GenericSharedSemaphoreReleaser`] which is obtained when acquiring a Semaphore /// will automatically release the obtained permits again. /// /// Therefore this method should only be used if the automatic release was /// disabled by calling [`GenericSharedSemaphoreReleaser::disarm`], /// or when the amount of permits in the Semaphore /// should increase from the initial amount. pub fn release(&self, nr_permits: usize) { self.state.lock().release(nr_permits) } /// Returns the amount of permits that are available on the semaphore pub fn permits(&self) -> usize { self.state.lock().permits() } } // Export parking_lot based shared semaphores in std mode #[cfg(feature = "std")] mod if_std { use super::*; /// A [`GenericSharedSemaphore`] backed by [`parking_lot`]. pub type SharedSemaphore = GenericSharedSemaphore; /// A [`GenericSharedSemaphoreReleaser`] for [`SharedSemaphore`]. pub type SharedSemaphoreReleaser = GenericSharedSemaphoreReleaser; /// A [`GenericSharedSemaphoreAcquireFuture`] for [`SharedSemaphore`]. pub type SharedSemaphoreAcquireFuture = GenericSharedSemaphoreAcquireFuture; } #[cfg(feature = "std")] pub use self::if_std::*; } #[cfg(feature = "alloc")] pub use self::if_alloc::*; futures-intrusive-0.5.0/src/timer/clock.rs000064400000000000000000000054100072674642500167510ustar 00000000000000//! Monotonic clocks use core::sync::atomic::{AtomicUsize, Ordering}; /// A monotonic source of time. /// /// Clocks must always returning increasing timestamps. pub trait Clock: Sync { /// Returns a timestamp in milliseconds which represents the current time /// according to the clock. /// /// Clocks must only return timestamps that are bigger or equal than what /// they returned on the last call to `now()`. fn now(&self) -> u64; } /// A [`Clock`] which can be set to arbitrary timestamps for testing purposes. /// /// It can be used in a test case as demonstrated in the following example: /// ``` /// use futures_intrusive::timer::MockClock; /// # #[cfg(feature = "std")] /// # use futures_intrusive::timer::TimerService; /// /// static TEST_CLOCK: MockClock = MockClock::new(); /// TEST_CLOCK.set_time(2300); // Set the current time /// # #[cfg(feature = "std")] /// let timer = TimerService::new(&TEST_CLOCK); /// ``` pub struct MockClock { now: core::sync::atomic::AtomicUsize, } impl core::fmt::Debug for MockClock { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { let now = self.now(); f.debug_struct("MockClock").field("now", &now).finish() } } impl MockClock { /// Creates a new instance of the [`MockClock`], which is initialized to /// timestamp 0. pub const fn new() -> MockClock { MockClock { now: AtomicUsize::new(0), } } /// Sets the current timestamp inside to [`MockClock`] to the given value pub fn set_time(&self, timestamp: u64) { if timestamp > (core::usize::MAX as u64) { panic!("timestamps bigger than usize::MAX are not supported") } let to_set = timestamp as usize; self.now.store(to_set, Ordering::Release); } } impl Clock for MockClock { fn now(&self) -> u64 { self.now.load(Ordering::Relaxed) as u64 } } #[cfg(feature = "std")] mod if_std { use super::*; use std::time::Instant; /// A Clock that makes use of the Standard libraries [`std::time::Instant`] /// functionality in order to generate monotonically increasing timestamps. pub struct StdClock { start: Instant, } impl core::fmt::Debug for StdClock { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("StdClock").finish() } } impl StdClock { /// Creates a new [`StdClock`] pub fn new() -> StdClock { StdClock { start: Instant::now(), } } } impl Clock for StdClock { fn now(&self) -> u64 { let elapsed = Instant::now() - self.start; elapsed.as_millis() as u64 } } } #[cfg(feature = "std")] pub use self::if_std::*; futures-intrusive-0.5.0/src/timer/mod.rs000064400000000000000000000010670072674642500164410ustar 00000000000000//! Asynchronous timers. //! //! This module provides a timer implementation which returns awaitable //! `Future`s. //! The timer can work with a configurable clock source. In order to utilize //! the system clock, a global instance `StdClock` can be utilized. mod clock; pub use self::clock::{Clock, MockClock}; #[cfg(feature = "std")] pub use self::clock::StdClock; mod timer; pub use self::timer::{ GenericTimerService, LocalTimer, LocalTimerFuture, LocalTimerService, Timer, TimerFuture, }; #[cfg(feature = "std")] pub use self::timer::TimerService; futures-intrusive-0.5.0/src/timer/timer.rs000064400000000000000000000365520072674642500170110ustar 00000000000000//! An asynchronously awaitable timer use super::clock::Clock; use crate::{ intrusive_pairing_heap::{HeapNode, PairingHeap}, utils::update_waker_ref, NoopLock, }; use core::{pin::Pin, time::Duration}; use futures_core::{ future::{FusedFuture, Future}, task::{Context, Poll, Waker}, }; use lock_api::{Mutex, RawMutex}; /// Tracks how the future had interacted with the timer #[derive(PartialEq)] enum PollState { /// The task is not registered at the wait queue at the timer Unregistered, /// The task was added to the wait queue at the timer Registered, /// The timer has expired and was thereby removed from the wait queue at /// the timer. Having this extra state avoids to query the clock for an /// extra time. Expired, } /// Tracks the timer futures waiting state. struct TimerQueueEntry { /// Timestamp when the timer expires expiry: u64, /// The task handle of the waiting task task: Option, /// Current polling state state: PollState, } impl TimerQueueEntry { /// Creates a new TimerQueueEntry fn new(expiry: u64) -> TimerQueueEntry { TimerQueueEntry { expiry, task: None, state: PollState::Unregistered, } } } impl PartialEq for TimerQueueEntry { fn eq(&self, other: &TimerQueueEntry) -> bool { // This is technically not correct. However for the usage in this module // we only need to compare timers by expiration. self.expiry == other.expiry } } impl Eq for TimerQueueEntry {} impl PartialOrd for TimerQueueEntry { fn partial_cmp( &self, other: &TimerQueueEntry, ) -> Option { // Compare timer queue entries by expiration time self.expiry.partial_cmp(&other.expiry) } } impl Ord for TimerQueueEntry { fn cmp(&self, other: &TimerQueueEntry) -> core::cmp::Ordering { self.expiry.cmp(&other.expiry) } } /// Internal state of the timer struct TimerState { /// The clock which is utilized clock: &'static dyn Clock, /// The heap of waiters, which are waiting for their timer to expire waiters: PairingHeap, } impl TimerState { fn new(clock: &'static dyn Clock) -> TimerState { TimerState { clock, waiters: PairingHeap::new(), } } /// Registers the timer future at the Timer. /// This function is only safe as long as the `wait_node`s address is guaranteed /// to be stable until it gets removed from the queue. unsafe fn try_wait( &mut self, wait_node: &mut HeapNode, cx: &mut Context<'_>, ) -> Poll<()> { match wait_node.state { PollState::Unregistered => { let now = self.clock.now(); if now >= wait_node.expiry { // The timer is already expired wait_node.state = PollState::Expired; Poll::Ready(()) } else { // Added the task to the wait queue wait_node.task = Some(cx.waker().clone()); wait_node.state = PollState::Registered; self.waiters.insert(wait_node); Poll::Pending } } PollState::Registered => { // Since the timer wakes up all waiters and moves their states to // Expired when the timer expired, it can't be expired here yet. // However the caller might have passed a different `Waker`. // In this case we need to update it. update_waker_ref(&mut wait_node.task, cx); Poll::Pending } PollState::Expired => Poll::Ready(()), } } fn remove_waiter(&mut self, wait_node: &mut HeapNode) { // TimerFuture only needs to get removed if it had been added to // the wait queue of the timer. This has happened in the PollState::Registered case. if let PollState::Registered = wait_node.state { // Safety: Due to the state, we know that the node must be part // of the waiter heap unsafe { self.waiters.remove(wait_node) }; wait_node.state = PollState::Unregistered; } } /// Returns a timestamp when the next timer expires. /// /// For thread-safe timers, the returned value is not precise and subject to /// race-conditions, since other threads can add timer in the meantime. fn next_expiration(&self) -> Option { // Safety: We ensure that any node in the heap remains alive unsafe { self.waiters.peek_min().map(|first| first.as_ref().expiry) } } /// Checks whether any of the attached Futures is expired fn check_expirations(&mut self) { let now = self.clock.now(); while let Some(mut first) = self.waiters.peek_min() { // Safety: We ensure that any node in the heap remains alive unsafe { let entry = first.as_mut(); let first_expiry = entry.expiry; if now >= first_expiry { // The timer is expired. entry.state = PollState::Expired; if let Some(task) = entry.task.take() { task.wake(); } } else { // Remaining timers are not expired break; } // Remove the expired timer self.waiters.remove(entry); } } } } /// Adapter trait that allows Futures to generically interact with timer /// implementations via dynamic dispatch. trait TimerAccess { unsafe fn try_wait( &self, wait_node: &mut HeapNode, cx: &mut Context<'_>, ) -> Poll<()>; fn remove_waiter(&self, wait_node: &mut HeapNode); } /// An asynchronously awaitable timer which is bound to a thread. /// /// The timer operates on millisecond precision and makes use of a configurable /// clock source. /// /// The timer allows to wait asynchronously either for a certain duration, /// or until the provided [`Clock`] reaches a certain timestamp. pub trait LocalTimer { /// Returns a future that gets fulfilled after the given `Duration` fn delay(&self, delay: Duration) -> LocalTimerFuture; /// Returns a future that gets fulfilled when the utilized [`Clock`] reaches /// the given timestamp. fn deadline(&self, timestamp: u64) -> LocalTimerFuture; } /// An asynchronously awaitable thread-safe timer. /// /// The timer operates on millisecond precision and makes use of a configurable /// clock source. /// /// The timer allows to wait asynchronously either for a certain duration, /// or until the provided [`Clock`] reaches a certain timestamp. pub trait Timer { /// Returns a future that gets fulfilled after the given `Duration` fn delay(&self, delay: Duration) -> TimerFuture; /// Returns a future that gets fulfilled when the utilized [`Clock`] reaches /// the given timestamp. fn deadline(&self, timestamp: u64) -> TimerFuture; } /// An asynchronously awaitable timer. /// /// The timer operates on millisecond precision and makes use of a configurable /// clock source. /// /// The timer allows to wait asynchronously either for a certain duration, /// or until the provided [`Clock`] reaches a certain timestamp. /// /// In order to unblock tasks that are waiting on the timer, /// [`check_expirations`](GenericTimerService::check_expirations) /// must be called in regular intervals on this timer service. /// /// The timer can either be running on a separate timer thread (in case a /// thread-safe timer type is utilize), or it can be integrated into an executor /// in order to minimize context switches. pub struct GenericTimerService { inner: Mutex, } // The timer can be sent to other threads as long as it's not borrowed unsafe impl Send for GenericTimerService { } // The timer is thread-safe as long as it uses a thread-safe mutex unsafe impl Sync for GenericTimerService { } impl core::fmt::Debug for GenericTimerService { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("TimerService").finish() } } impl GenericTimerService { /// Creates a new Timer in the given state. /// /// The Timer will query the provided [`Clock`] instance for the current /// time whenever required. /// /// In order to create a create a clock which utilizes system time, /// [`StdClock`](super::StdClock) can be utilized. /// In order to simulate time for test purposes, /// [`MockClock`](super::MockClock) can be utilized. pub fn new(clock: &'static dyn Clock) -> GenericTimerService { GenericTimerService:: { inner: Mutex::new(TimerState::new(clock)), } } /// Returns a timestamp when the next timer expires. /// /// For thread-safe timers, the returned value is not precise and subject to /// race-conditions, since other threads can add timer in the meantime. /// /// Therefore adding any timer to the [`GenericTimerService`] should also /// make sure to wake up the executor which polls for timeouts, in order to /// let it capture the latest change. pub fn next_expiration(&self) -> Option { self.inner.lock().next_expiration() } /// Checks whether any of the attached [`TimerFuture`]s has expired. /// In this case the associated task is woken up. pub fn check_expirations(&self) { self.inner.lock().check_expirations() } /// Returns a deadline based on the current timestamp plus the given Duration fn deadline_from_now(&self, duration: Duration) -> u64 { let now = self.inner.lock().clock.now(); let duration_ms = core::cmp::min(duration.as_millis(), core::u64::MAX as u128) as u64; now.saturating_add(duration_ms) } } impl LocalTimer for GenericTimerService { /// Returns a future that gets fulfilled after the given [`Duration`] fn delay(&self, delay: Duration) -> LocalTimerFuture { let deadline = self.deadline_from_now(delay); LocalTimer::deadline(&*self, deadline) } /// Returns a future that gets fulfilled when the utilized [`Clock`] reaches /// the given timestamp. fn deadline(&self, timestamp: u64) -> LocalTimerFuture { LocalTimerFuture { timer: Some(self), wait_node: HeapNode::new(TimerQueueEntry::new(timestamp)), } } } impl Timer for GenericTimerService where MutexType: Sync, { /// Returns a future that gets fulfilled after the given [`Duration`] fn delay(&self, delay: Duration) -> TimerFuture { let deadline = self.deadline_from_now(delay); Timer::deadline(&*self, deadline) } /// Returns a future that gets fulfilled when the utilized [`Clock`] reaches /// the given timestamp. fn deadline(&self, timestamp: u64) -> TimerFuture { TimerFuture { timer_future: LocalTimerFuture { timer: Some(self), wait_node: HeapNode::new(TimerQueueEntry::new(timestamp)), }, } } } impl TimerAccess for GenericTimerService { unsafe fn try_wait( &self, wait_node: &mut HeapNode, cx: &mut Context<'_>, ) -> Poll<()> { self.inner.lock().try_wait(wait_node, cx) } fn remove_waiter(&self, wait_node: &mut HeapNode) { self.inner.lock().remove_waiter(wait_node) } } /// A Future that is resolved once the requested time has elapsed. #[must_use = "futures do nothing unless polled"] pub struct LocalTimerFuture<'a> { /// The Timer that is associated with this TimerFuture timer: Option<&'a dyn TimerAccess>, /// Node for waiting on the timer wait_node: HeapNode, } impl<'a> core::fmt::Debug for LocalTimerFuture<'a> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("LocalTimerFuture").finish() } } impl<'a> Future for LocalTimerFuture<'a> { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { // It might be possible to use Pin::map_unchecked here instead of the two unsafe APIs. // However this didn't seem to work for some borrow checker reasons // Safety: The next operations are safe, because Pin promises us that // the address of the wait queue entry inside TimerFuture is stable, // and we don't move any fields inside the future until it gets dropped. let mut_self: &mut LocalTimerFuture = unsafe { Pin::get_unchecked_mut(self) }; let timer = mut_self.timer.expect("polled TimerFuture after completion"); let poll_res = unsafe { timer.try_wait(&mut mut_self.wait_node, cx) }; if poll_res.is_ready() { // A value was available mut_self.timer = None; } poll_res } } impl<'a> FusedFuture for LocalTimerFuture<'a> { fn is_terminated(&self) -> bool { self.timer.is_none() } } impl<'a> Drop for LocalTimerFuture<'a> { fn drop(&mut self) { // If this TimerFuture has been polled and it was added to the // wait queue at the timer, it must be removed before dropping. // Otherwise the timer would access invalid memory. if let Some(timer) = self.timer { timer.remove_waiter(&mut self.wait_node); } } } /// A Future that is resolved once the requested time has elapsed. #[must_use = "futures do nothing unless polled"] pub struct TimerFuture<'a> { /// The Timer that is associated with this TimerFuture timer_future: LocalTimerFuture<'a>, } // Safety: TimerFutures are only returned by GenericTimerService instances which // are thread-safe (RawMutex: Sync). unsafe impl<'a> Send for TimerFuture<'a> {} impl<'a> core::fmt::Debug for TimerFuture<'a> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("TimerFuture").finish() } } impl<'a> Future for TimerFuture<'a> { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { // Safety: TimerFuture is a pure wrapper around LocalTimerFuture. // The inner value is never moved let inner_pin = unsafe { Pin::map_unchecked_mut(self, |fut| &mut fut.timer_future) }; inner_pin.poll(cx) } } impl<'a> FusedFuture for TimerFuture<'a> { fn is_terminated(&self) -> bool { self.timer_future.is_terminated() } } // Export a non thread-safe version using NoopLock /// A [`GenericTimerService`] implementation which is not thread-safe. pub type LocalTimerService = GenericTimerService; #[cfg(feature = "std")] mod if_std { use super::*; // Export a thread-safe version using parking_lot::RawMutex /// A [`GenericTimerService`] implementation backed by [`parking_lot`]. pub type TimerService = GenericTimerService; } #[cfg(feature = "std")] pub use self::if_std::*; futures-intrusive-0.5.0/src/utils/mod.rs000064400000000000000000000007020072674642500164540ustar 00000000000000//! Utilities which are used within the library use core::task::{Context, Waker}; /// Updates a `Waker` which is stored inside a `Option` to the newest value /// which is delivered via a `Context`. pub fn update_waker_ref(waker_option: &mut Option, cx: &Context) { if waker_option .as_ref() .map_or(true, |stored_waker| !stored_waker.will_wake(cx.waker())) { *waker_option = Some(cx.waker().clone()); } } futures-intrusive-0.5.0/tests/manual_reset_event.rs000064400000000000000000000162030072674642500207730ustar 00000000000000use futures::future::{FusedFuture, Future}; use futures::task::Context; use futures_intrusive::sync::LocalManualResetEvent; use futures_test::task::{new_count_waker, panic_waker}; use pin_utils::pin_mut; macro_rules! gen_event_tests { ($mod_name:ident, $event_type:ident) => { mod $mod_name { use super::*; #[test] fn synchronous() { let event = $event_type::new(false); assert!(!event.is_set()); event.set(); assert!(event.is_set()); event.reset(); assert!(!event.is_set()); } #[test] fn immediately_ready_event() { let event = $event_type::new(true); let waker = &panic_waker(); let cx = &mut Context::from_waker(&waker); assert!(event.is_set()); let poll = event.wait(); pin_mut!(poll); assert!(!poll.as_mut().is_terminated()); assert!(poll.as_mut().poll(cx).is_ready()); assert!(poll.as_mut().is_terminated()); } #[test] fn cancel_mid_wait() { let event = $event_type::new(false); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); { // Cancel a wait in between other waits // In order to arbitrarily drop a non movable future we have to box and pin it let mut poll1 = Box::pin(event.wait()); let mut poll2 = Box::pin(event.wait()); let mut poll3 = Box::pin(event.wait()); let mut poll4 = Box::pin(event.wait()); let mut poll5 = Box::pin(event.wait()); assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll2.as_mut().poll(cx).is_pending()); assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll4.as_mut().poll(cx).is_pending()); assert!(poll5.as_mut().poll(cx).is_pending()); assert!(!poll1.is_terminated()); assert!(!poll2.is_terminated()); assert!(!poll3.is_terminated()); assert!(!poll4.is_terminated()); assert!(!poll5.is_terminated()); // Cancel 2 futures. Only the remaining ones should get completed drop(poll2); drop(poll4); assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll5.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); event.set(); assert!(poll1.as_mut().poll(cx).is_ready()); assert!(poll3.as_mut().poll(cx).is_ready()); assert!(poll5.as_mut().poll(cx).is_ready()); assert!(poll1.is_terminated()); assert!(poll3.is_terminated()); assert!(poll5.is_terminated()); } assert_eq!(count, 3); } #[test] fn cancel_end_wait() { let event = $event_type::new(false); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let poll1 = event.wait(); let poll2 = event.wait(); let poll3 = event.wait(); let poll4 = event.wait(); pin_mut!(poll1); pin_mut!(poll2); pin_mut!(poll3); pin_mut!(poll4); assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll2.as_mut().poll(cx).is_pending()); // Start polling some wait handles which get cancelled // before new ones are attached { let poll5 = event.wait(); let poll6 = event.wait(); pin_mut!(poll5); pin_mut!(poll6); assert!(poll5.as_mut().poll(cx).is_pending()); assert!(poll6.as_mut().poll(cx).is_pending()); } assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll4.as_mut().poll(cx).is_pending()); event.set(); assert!(poll1.as_mut().poll(cx).is_ready()); assert!(poll2.as_mut().poll(cx).is_ready()); assert!(poll3.as_mut().poll(cx).is_ready()); assert!(poll4.as_mut().poll(cx).is_ready()); assert_eq!(count, 4); } #[test] fn poll_from_multiple_executors() { let (waker_1, count_1) = new_count_waker(); let (waker_2, count_2) = new_count_waker(); let event = $event_type::new(false); let cx_1 = &mut Context::from_waker(&waker_1); let cx_2 = &mut Context::from_waker(&waker_2); let fut = event.wait(); pin_mut!(fut); assert!(fut.as_mut().poll(cx_1).is_pending()); assert!(fut.as_mut().poll(cx_2).is_pending()); event.set(); assert!(event.is_set()); assert_eq!(count_1, 0); assert_eq!(count_2, 1); assert!(fut.as_mut().poll(cx_2).is_ready()); assert!(fut.as_mut().is_terminated()); } } }; } gen_event_tests!(local_manual_reset_event_tests, LocalManualResetEvent); #[cfg(feature = "std")] mod if_std { use super::*; use futures::executor::block_on; use futures_intrusive::sync::ManualResetEvent; use std::sync::Arc; use std::thread; use std::time; gen_event_tests!(manual_reset_event_tests, ManualResetEvent); fn is_send(_: &T) {} fn is_send_value(_: T) {} fn is_sync(_: &T) {} #[test] fn event_futures_are_send() { let event = ManualResetEvent::new(false); is_sync(&event); { let wait_fut = event.wait(); is_send(&wait_fut); pin_mut!(wait_fut); is_send(&wait_fut); } is_send_value(event); } #[test] fn multithreaded_smoke() { let event = Arc::new(ManualResetEvent::new(false)); let waiters: Vec> = [1..4] .iter() .map(|_| { let ev = event.clone(); thread::spawn(move || { block_on(ev.wait()); time::Instant::now() }) }) .collect(); let start = time::Instant::now(); thread::sleep(time::Duration::from_millis(100)); event.set(); for waiter in waiters.into_iter() { let end_time = waiter.join().unwrap(); let diff = end_time - start; assert!(diff > time::Duration::from_millis(50)); } } } futures-intrusive-0.5.0/tests/mpmc_channel.rs000064400000000000000000001542300072674642500175420ustar 00000000000000use futures::task::{Context, Poll}; use futures::{ future::{FusedFuture, Future}, stream::{FusedStream, Stream}, }; use futures_intrusive::channel::{ ChannelSendError, LocalChannel, LocalUnbufferedChannel, }; use futures_test::task::{new_count_waker, panic_waker}; use pin_utils::pin_mut; #[derive(Debug)] struct DropCounterInner { count: std::collections::HashMap, } #[derive(Clone, Debug)] struct DropCounter { inner: std::sync::Arc>, } impl DropCounter { fn new() -> DropCounter { DropCounter { inner: std::sync::Arc::new(std::sync::Mutex::new( DropCounterInner { count: std::collections::HashMap::new(), }, )), } } fn register_drop(&self, id: usize) { let mut guard = self.inner.lock().unwrap(); *guard.count.entry(id).or_insert(0) += 1; } fn clear(&self) { let mut guard = self.inner.lock().unwrap(); guard.count.clear(); } fn drops(&self, id: usize) -> usize { let guard = self.inner.lock().unwrap(); *(guard.count.get(&id).unwrap_or(&0)) } } #[derive(Debug, PartialEq)] struct CountedElemInner { id: usize, } #[derive(Debug, Clone)] struct CountedElem { drop_counter: DropCounter, inner: std::sync::Arc>, } impl PartialEq for CountedElem { fn eq(&self, other: &CountedElem) -> bool { self.id() == other.id() } } impl CountedElem { fn new(id: usize, drop_counter: DropCounter) -> CountedElem { CountedElem { inner: std::sync::Arc::new(std::sync::Mutex::new( CountedElemInner { id }, )), drop_counter, } } fn id(&self) -> usize { let guard = self.inner.lock().unwrap(); guard.id } fn strong_count(&self) -> usize { std::sync::Arc::strong_count(&self.inner) } } impl Drop for CountedElem { fn drop(&mut self) { self.drop_counter.register_drop(self.id()) } } fn assert_send_done( cx: &mut Context, send_fut: &mut core::pin::Pin<&mut FutureType>, expected: Result<(), ChannelSendError>, ) where FutureType: Future>> + FusedFuture, T: PartialEq + core::fmt::Debug, { match send_fut.as_mut().poll(cx) { Poll::Pending => panic!("future is not ready"), Poll::Ready(res) => { if res != expected { panic!("Unexpected send result: {:?}", res); } } }; assert!(send_fut.as_mut().is_terminated()); } fn assert_next_done( cx: &mut Context, stream_fut: &mut core::pin::Pin<&mut S>, value: Option, ) where S: Stream, T: PartialEq + core::fmt::Debug, { match stream_fut.as_mut().poll_next(cx) { Poll::Pending => panic!("future is not ready"), Poll::Ready(res) => { if res != value { panic!("Unexpected value {:?}", res); } } }; } // A stream future shouldn't terminate until the stream // terminates. fn assert_receive_done( cx: &mut Context, receive_fut: &mut core::pin::Pin<&mut FutureType>, value: Option, ) where FutureType: Future> + FusedFuture, T: PartialEq + core::fmt::Debug, { match receive_fut.as_mut().poll(cx) { Poll::Pending => panic!("future is not ready"), Poll::Ready(res) => { if res != value { panic!("Unexpected value {:?}", res); } } }; assert!(receive_fut.as_mut().is_terminated()); } macro_rules! gen_mpmc_tests { ($mod_name:ident, $channel_type:ident, $unbuffered_channel_type:ident) => { mod $mod_name { use super::*; type ChannelType = $channel_type; type UnbufferedChannelType = $unbuffered_channel_type; fn assert_send( cx: &mut Context, channel: &ChannelType, value: i32, ) { let send_fut = channel.send(value); pin_mut!(send_fut); assert!(!send_fut.as_mut().is_terminated()); assert_send_done(cx, &mut send_fut, Ok(())); } macro_rules! assert_receive { ($cx:ident, $channel:expr, $value: expr) => { let receive_fut = $channel.receive(); pin_mut!(receive_fut); assert!(!receive_fut.as_mut().is_terminated()); assert_receive_done($cx, &mut receive_fut, $value); }; } #[test] fn send_on_closed_channel() { let channel = ChannelType::new(); let waker = &panic_waker(); let cx = &mut Context::from_waker(&waker); assert!(channel.close().is_newly_closed()); let fut = channel.send(5); pin_mut!(fut); assert_send_done(cx, &mut fut, Err(ChannelSendError(5))); } #[test] fn unbuffered_try_receive() { let channel = UnbufferedChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let fut = channel.send(5); pin_mut!(fut); assert!(fut.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); channel.try_receive().unwrap(); assert_eq!(count, 1); assert_send_done(cx, &mut fut, Ok(())); } #[test] fn try_send_recv_smoke_test() { let channel = ChannelType::with_capacity(3); for _ in 0..3 { channel.try_send(5).unwrap(); } channel.try_send(5).unwrap_err(); for _ in 0..3 { channel.try_receive().unwrap(); } let err = channel.try_receive().unwrap_err(); assert!(err.is_empty()); } #[test] fn close_state() { let channel = ChannelType::with_capacity(3); assert!(channel.close().is_newly_closed()); assert!(channel.close().is_already_closed()); assert!(channel.close().is_already_closed()); assert!(channel.close().is_already_closed()); } #[test] fn try_send_full_channel() { let channel = ChannelType::with_capacity(3); for _ in 0..3 { channel.try_send(5).unwrap(); } let err = channel.try_send(5).unwrap_err(); assert!(err.is_full()); } #[test] fn try_send_on_closed_channel() { let channel = ChannelType::new(); assert!(channel.close().is_newly_closed()); let err = channel.try_send(5).unwrap_err(); assert!(err.is_closed()); } #[test] fn try_receive_empty_channel() { let channel = ChannelType::with_capacity(3); let err = channel.try_receive().unwrap_err(); assert!(err.is_empty()); } #[test] fn try_recv_on_closed_channel() { let channel = ChannelType::new(); channel.try_send(5).unwrap(); assert!(channel.close().is_newly_closed()); channel.try_receive().unwrap(); let err = channel.try_receive().unwrap_err(); assert!(err.is_closed()); } #[test] #[should_panic] fn try_send_unbuffered_panics() { let channel = UnbufferedChannelType::new(); let _ = channel.try_send(5); } #[test] fn buffered_close_unblocks_send() { let channel = ChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); // Fill the channel assert_send(cx, &channel, 5); assert_send(cx, &channel, 6); assert_send(cx, &channel, 7); let fut = channel.send(8); pin_mut!(fut); assert!(fut.as_mut().poll(cx).is_pending()); let fut2 = channel.send(9); pin_mut!(fut2); assert!(fut2.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); assert!(channel.close().is_newly_closed()); assert_eq!(count, 2); assert_send_done(cx, &mut fut, Err(ChannelSendError(8))); assert_send_done(cx, &mut fut2, Err(ChannelSendError(9))); } #[test] fn unbuffered_close_unblocks_send() { let channel = UnbufferedChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let fut = channel.send(8); pin_mut!(fut); assert!(fut.as_mut().poll(cx).is_pending()); let fut2 = channel.send(9); pin_mut!(fut2); assert!(fut2.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); assert!(channel.close().is_newly_closed()); assert_eq!(count, 2); assert_send_done(cx, &mut fut, Err(ChannelSendError(8))); assert_send_done(cx, &mut fut2, Err(ChannelSendError(9))); } #[test] fn close_unblocks_receive() { let channel = ChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let fut = channel.receive(); pin_mut!(fut); assert!(fut.as_mut().poll(cx).is_pending()); let fut2 = channel.receive(); pin_mut!(fut2); assert!(fut2.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); assert!(channel.close().is_newly_closed()); assert_eq!(count, 2); assert_receive_done(cx, &mut fut, None); assert_receive_done(cx, &mut fut2, None); } #[test] fn receive_after_send() { let channel = ChannelType::new(); let waker = &panic_waker(); let cx = &mut Context::from_waker(&waker); assert_send(cx, &channel, 1); assert_send(cx, &channel, 2); assert_receive!(cx, &channel, Some(1)); assert_receive!(cx, &channel, Some(2)); assert_send(cx, &channel, 5); assert_send(cx, &channel, 6); assert_send(cx, &channel, 7); assert!(channel.close().is_newly_closed()); assert_receive!(cx, &channel, Some(5)); assert_receive!(cx, &channel, Some(6)); assert_receive!(cx, &channel, Some(7)); assert_receive!(cx, &channel, None); } #[test] fn buffered_send_unblocks_receive() { let channel = ChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let fut = channel.receive(); pin_mut!(fut); assert!(fut.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); let fut2 = channel.receive(); pin_mut!(fut2); assert!(fut2.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); assert_send(cx, &channel, 99); assert_eq!(count, 1); assert_receive_done(cx, &mut fut, Some(99)); assert!(fut2.as_mut().poll(cx).is_pending()); assert_send(cx, &channel, 111); assert_eq!(count, 2); assert_receive_done(cx, &mut fut2, Some(111)); } #[test] fn unbuffered_send_unblocks_receive() { let channel = UnbufferedChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let fut = channel.receive(); pin_mut!(fut); assert!(fut.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); let fut2 = channel.receive(); pin_mut!(fut2); assert!(fut2.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); // In the unbuffered send case the send can't complete from the // Sender point-of-view until a receiver actively catches the // values. Therefore this returns pending. let futs1 = channel.send(99); let futs2 = channel.send(111); pin_mut!(futs1, futs2); assert!(futs1.as_mut().poll(cx).is_pending()); assert_eq!(count, 1); assert_receive_done(cx, &mut fut, Some(99)); assert_eq!(count, 2); assert_send_done(cx, &mut futs1, Ok(())); assert!(fut2.as_mut().poll(cx).is_pending()); assert!(futs2.as_mut().poll(cx).is_pending()); assert_eq!(count, 3); assert_receive_done(cx, &mut fut2, Some(111)); assert_eq!(count, 4); assert_send_done(cx, &mut futs2, Ok(())); } #[test] fn buffered_receive_unblocks_send() { let channel = ChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); // Fill the channel assert_send(cx, &channel, 1); assert_send(cx, &channel, 2); assert_send(cx, &channel, 3); let fut = channel.send(4); pin_mut!(fut); assert!(fut.as_mut().poll(cx).is_pending()); let fut2 = channel.send(5); pin_mut!(fut2); assert!(fut2.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); assert_receive!(cx, &channel, Some(1)); assert_eq!(count, 1); assert_send_done(cx, &mut fut, Ok(())); assert!(fut.is_terminated()); assert!(fut2.as_mut().poll(cx).is_pending()); assert_receive!(cx, &channel, Some(2)); assert_eq!(count, 2); assert_send_done(cx, &mut fut2, Ok(())); assert!(fut2.is_terminated()); } #[test] fn unbuffered_receive_unblocks_send() { let channel = UnbufferedChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let fut = channel.send(4); pin_mut!(fut); assert!(fut.as_mut().poll(cx).is_pending()); let fut2 = channel.send(5); pin_mut!(fut2); assert!(fut2.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); assert_receive!(cx, &channel, Some(4)); assert_eq!(count, 1); assert_send_done(cx, &mut fut, Ok(())); assert!(fut.is_terminated()); assert!(fut2.as_mut().poll(cx).is_pending()); assert_receive!(cx, &channel, Some(5)); assert_eq!(count, 2); assert_send_done(cx, &mut fut2, Ok(())); assert!(fut2.is_terminated()); } #[test] fn cancel_send_mid_wait() { let channel = ChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); assert_send(cx, &channel, 5); assert_send(cx, &channel, 6); assert_send(cx, &channel, 7); { // Cancel a wait in between other waits // In order to arbitrarily drop a non movable future we have to box and pin it let mut poll1 = Box::pin(channel.send(8)); let mut poll2 = Box::pin(channel.send(9)); let mut poll3 = Box::pin(channel.send(10)); let mut poll4 = Box::pin(channel.send(11)); let mut poll5 = Box::pin(channel.send(12)); assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll2.as_mut().poll(cx).is_pending()); assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll4.as_mut().poll(cx).is_pending()); assert!(poll5.as_mut().poll(cx).is_pending()); assert!(!poll1.is_terminated()); assert!(!poll2.is_terminated()); assert!(!poll3.is_terminated()); assert!(!poll4.is_terminated()); assert!(!poll5.is_terminated()); // Cancel 2 futures. Only the remaining ones should get completed drop(poll2); drop(poll4); assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll5.as_mut().poll(cx).is_pending()); assert_receive!(cx, &channel, Some(5)); assert_eq!(count, 1); assert_send_done(cx, &mut poll1.as_mut(), Ok(())); assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll5.as_mut().poll(cx).is_pending()); assert_receive!(cx, &channel, Some(6)); assert_receive!(cx, &channel, Some(7)); assert_eq!(count, 3); assert_send_done(cx, &mut poll3.as_mut(), Ok(())); assert_send_done(cx, &mut poll5.as_mut(), Ok(())); } assert_eq!(count, 3); } #[test] fn cancel_send_end_wait() { let channel = ChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); assert_send(cx, &channel, 100); assert_send(cx, &channel, 101); assert_send(cx, &channel, 102); let poll1 = channel.send(1); let poll2 = channel.send(2); let poll3 = channel.send(3); let poll4 = channel.send(4); pin_mut!(poll1); pin_mut!(poll2); pin_mut!(poll3); pin_mut!(poll4); assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll2.as_mut().poll(cx).is_pending()); // Start polling some wait handles which get cancelled // before new ones are attached { let poll5 = channel.send(5); let poll6 = channel.send(6); pin_mut!(poll5); pin_mut!(poll6); assert!(poll5.as_mut().poll(cx).is_pending()); assert!(poll6.as_mut().poll(cx).is_pending()); } assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll4.as_mut().poll(cx).is_pending()); assert_receive!(cx, &channel, Some(100)); assert_receive!(cx, &channel, Some(101)); assert_receive!(cx, &channel, Some(102)); assert_send_done(cx, &mut poll1, Ok(())); assert_send_done(cx, &mut poll2, Ok(())); assert_send_done(cx, &mut poll3, Ok(())); assert!(channel.close().is_newly_closed()); assert_receive!(cx, &channel, Some(1)); assert_receive!(cx, &channel, Some(2)); assert_receive!(cx, &channel, Some(3)); assert_send_done(cx, &mut poll4, Err(ChannelSendError(4))); assert_eq!(count, 4); } #[test] fn cancel_receive_mid_wait() { let channel = ChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); { let mut poll1 = Box::pin(channel.receive()); let mut poll2 = Box::pin(channel.receive()); let mut poll3 = Box::pin(channel.receive()); let mut poll4 = Box::pin(channel.receive()); let mut poll5 = Box::pin(channel.receive()); assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll2.as_mut().poll(cx).is_pending()); assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll4.as_mut().poll(cx).is_pending()); assert!(poll5.as_mut().poll(cx).is_pending()); assert!(!poll1.is_terminated()); assert!(!poll2.is_terminated()); assert!(!poll3.is_terminated()); assert!(!poll4.is_terminated()); assert!(!poll5.is_terminated()); // Cancel 2 futures. Only the remaining ones should get completed drop(poll2); drop(poll4); assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll5.as_mut().poll(cx).is_pending()); assert_send(cx, &channel, 1); assert_eq!(count, 1); assert_receive_done(cx, &mut poll1.as_mut(), Some(1)); assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll5.as_mut().poll(cx).is_pending()); assert_send(cx, &channel, 2); assert_send(cx, &channel, 3); assert_eq!(count, 3); assert_receive_done(cx, &mut poll3.as_mut(), Some(2)); assert_receive_done(cx, &mut poll5.as_mut(), Some(3)); } assert_eq!(count, 3); } #[test] fn cancel_receive_end_wait() { let channel = ChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let poll1 = channel.receive(); let poll2 = channel.receive(); let poll3 = channel.receive(); let poll4 = channel.receive(); pin_mut!(poll1); pin_mut!(poll2); pin_mut!(poll3); pin_mut!(poll4); assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll2.as_mut().poll(cx).is_pending()); // Start polling some wait handles which get cancelled // before new ones are attached { let poll5 = channel.receive(); let poll6 = channel.receive(); pin_mut!(poll5); pin_mut!(poll6); assert!(poll5.as_mut().poll(cx).is_pending()); assert!(poll6.as_mut().poll(cx).is_pending()); } assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll4.as_mut().poll(cx).is_pending()); assert_send(cx, &channel, 0); assert_send(cx, &channel, 1); assert_send(cx, &channel, 2); assert_receive_done(cx, &mut poll1, Some(0)); assert_receive_done(cx, &mut poll2, Some(1)); assert_receive_done(cx, &mut poll3, Some(2)); assert_send(cx, &channel, 3); assert_receive_done(cx, &mut poll4, Some(3)); assert_eq!(count, 4); } #[test] fn drops_unread_elements() { let (waker, _) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let drop_counter = DropCounter::new(); let elem1 = CountedElem::new(1, drop_counter.clone()); let elem2 = CountedElem::new(2, drop_counter.clone()); let elem3 = CountedElem::new(3, drop_counter.clone()); { let channel = $channel_type::::new(); // Fill the channel let fut1 = channel.send(elem1.clone()); let fut2 = channel.send(elem2.clone()); let fut3 = channel.send(elem3.clone()); assert_eq!(2, elem1.strong_count()); assert_eq!(2, elem2.strong_count()); assert_eq!(2, elem3.strong_count()); pin_mut!(fut1, fut2, fut3); assert_send_done(cx, &mut fut1, Ok(())); assert_send_done(cx, &mut fut2, Ok(())); assert_send_done(cx, &mut fut3, Ok(())); assert_eq!(2, elem1.strong_count()); assert_eq!(2, elem2.strong_count()); assert_eq!(2, elem3.strong_count()); } assert_eq!(1, drop_counter.drops(1)); assert_eq!(1, drop_counter.drops(2)); assert_eq!(1, drop_counter.drops(3)); assert_eq!(1, elem1.strong_count()); assert_eq!(1, elem2.strong_count()); assert_eq!(1, elem3.strong_count()); drop_counter.clear(); { let channel = $channel_type::::new(); // Fill the channel let fut1 = channel.send(elem1.clone()); let fut2 = channel.send(elem2.clone()); let futr1 = channel.receive(); let futr2 = channel.receive(); pin_mut!(fut1, fut2, futr1, futr2); assert_send_done(cx, &mut fut1, Ok(())); assert_send_done(cx, &mut fut2, Ok(())); let fut3 = channel.send(elem3.clone()); let fut4 = channel.send(elem2.clone()); pin_mut!(fut3, fut4); assert_receive_done(cx, &mut futr1, Some(elem1.clone())); assert_receive_done(cx, &mut futr2, Some(elem2.clone())); assert_eq!(1, elem1.strong_count()); assert_eq!(2, elem2.strong_count()); assert_send_done(cx, &mut fut3, Ok(())); assert_send_done(cx, &mut fut4, Ok(())); assert_eq!(1, elem1.strong_count()); assert_eq!(2, elem2.strong_count()); assert_eq!(2, elem3.strong_count()); // 1 and 2 are dropped twice, since we create a copy // through Option assert_eq!(2, drop_counter.drops(1)); assert_eq!(2, drop_counter.drops(2)); assert_eq!(0, drop_counter.drops(3)); drop_counter.clear(); } assert_eq!(0, drop_counter.drops(1)); assert_eq!(1, drop_counter.drops(2)); assert_eq!(1, drop_counter.drops(3)); assert_eq!(1, elem1.strong_count()); assert_eq!(1, elem2.strong_count()); assert_eq!(1, elem3.strong_count()); } #[test] fn poll_from_multiple_executors_on_receive() { let (waker_1, count_1) = new_count_waker(); let (waker_2, count_2) = new_count_waker(); let channel = ChannelType::new(); let cx_1 = &mut Context::from_waker(&waker_1); let cx_2 = &mut Context::from_waker(&waker_2); let fut = channel.receive(); pin_mut!(fut); assert!(fut.as_mut().poll(cx_1).is_pending()); assert!(fut.as_mut().poll(cx_2).is_pending()); assert_send(cx_1, &channel, 99); assert_eq!(count_1, 0); assert_eq!(count_2, 1); assert_receive_done(cx_2, &mut fut, Some(99)); } #[test] fn poll_from_multiple_executors_on_send() { let (waker_1, count_1) = new_count_waker(); let (waker_2, count_2) = new_count_waker(); let cx_1 = &mut Context::from_waker(&waker_1); let cx_2 = &mut Context::from_waker(&waker_2); let channel = ChannelType::new(); // Fill the channel, so that send blocks assert_send(cx_1, &channel, 1); assert_send(cx_1, &channel, 2); assert_send(cx_1, &channel, 3); let fut = channel.send(4); pin_mut!(fut); assert!(fut.as_mut().poll(cx_1).is_pending()); assert!(fut.as_mut().poll(cx_2).is_pending()); assert_receive!(cx_2, &channel, Some(1)); assert_eq!(count_2, 1); assert_eq!(count_1, 0); assert_send_done(cx_2, &mut fut, Ok(())); } #[test] fn buffered_starved_recv_does_not_deadlock() { let channel = ChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); // Create enough futures to starve the permits. let recv_fut1 = channel.receive(); let recv_fut2 = channel.receive(); let recv_fut3 = channel.receive(); let recv_fut4 = channel.receive(); let recv_fut5 = channel.receive(); pin_mut!(recv_fut1, recv_fut2, recv_fut3, recv_fut4, recv_fut5); assert!(recv_fut1.as_mut().poll(cx).is_pending()); assert!(recv_fut2.as_mut().poll(cx).is_pending()); assert!(recv_fut3.as_mut().poll(cx).is_pending()); assert!(recv_fut3.as_mut().poll(cx).is_pending()); assert!(recv_fut3.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); // Now send & recv while being starved. assert_send(cx, &channel, 1); assert_eq!(count, 1); assert_send(cx, &channel, 2); assert_eq!(count, 2); assert_send(cx, &channel, 3); assert_eq!(count, 3); let send_fut4 = channel.send(4); let send_fut5 = channel.send(5); pin_mut!(send_fut4, send_fut5); assert!(send_fut4.as_mut().poll(cx).is_pending()); assert!(send_fut5.as_mut().poll(cx).is_pending()); let recv_fut6 = channel.receive(); let recv_fut7 = channel.receive(); let recv_fut8 = channel.receive(); let recv_fut9 = channel.receive(); let recv_fut10 = channel.receive(); pin_mut!( recv_fut6, recv_fut7, recv_fut8, recv_fut9, recv_fut10 ); // Grab the buffered data. assert_receive_done(cx, &mut recv_fut6, Some(1)); assert_eq!(count, 4); assert_receive_done(cx, &mut recv_fut7, Some(2)); assert_eq!(count, 5); assert_receive_done(cx, &mut recv_fut8, Some(3)); // Grab the pending data. assert_receive_done(cx, &mut recv_fut9, Some(4)); assert_receive_done(cx, &mut recv_fut10, Some(5)); assert_eq!(count, 5); // Do one last send & recv. let recv_fut11 = channel.receive(); pin_mut!(recv_fut11); assert!(recv_fut11.as_mut().poll(cx).is_pending()); assert_send(cx, &channel, 6); assert_receive_done(cx, &mut recv_fut11, Some(6)); assert_eq!(count, 6); // Now resolve the starved futures. assert_send(cx, &channel, 7); assert_receive_done(cx, &mut recv_fut1, Some(7)); assert_send(cx, &channel, 8); assert_receive_done(cx, &mut recv_fut2, Some(8)); assert_send(cx, &channel, 9); assert_receive_done(cx, &mut recv_fut3, Some(9)); assert_send(cx, &channel, 10); assert_receive_done(cx, &mut recv_fut4, Some(10)); assert_send(cx, &channel, 11); assert_receive_done(cx, &mut recv_fut5, Some(11)); assert_eq!(count, 6); } #[test] fn unbuffered_starved_send_does_not_deadlock() { let channel = UnbufferedChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); // Create enough futures to starve the permits. let send_fut1 = channel.send(99); let send_fut2 = channel.send(111); let send_fut3 = channel.send(69); pin_mut!(send_fut1, send_fut2, send_fut3); assert!(send_fut1.as_mut().poll(cx).is_pending()); assert!(send_fut2.as_mut().poll(cx).is_pending()); assert!(send_fut3.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); // Now send & recv while being starved. let recv_fut = channel.receive(); pin_mut!(recv_fut); assert_receive_done(cx, &mut recv_fut, Some(99)); assert_eq!(count, 1); let recv_fut = channel.receive(); pin_mut!(recv_fut); assert_receive_done(cx, &mut recv_fut, Some(111)); assert_eq!(count, 2); let recv_fut = channel.receive(); pin_mut!(recv_fut); assert_receive_done(cx, &mut recv_fut, Some(69)); assert_eq!(count, 3); assert_send_done(cx, &mut send_fut1, Ok(())); assert_send_done(cx, &mut send_fut2, Ok(())); assert_send_done(cx, &mut send_fut3, Ok(())); } #[test] fn buffered_stream_smoke_test() { let channel = ChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let stream = channel.stream(); pin_mut!(stream); // Receive after send is immediately ready. for i in 0..10 { let send_fut = channel.send(i); pin_mut!(send_fut); assert_send_done(cx, &mut send_fut, Ok(())); assert_next_done(cx, &mut stream, Some(i)); // The should be not wakups since its immediate. assert_eq!(count, 0); } // Send after receive is immediately ready. for i in 0..10 { assert!(stream.as_mut().poll_next(cx).is_pending()); assert_eq!(count, i as usize); assert_send(cx, &channel, i); assert_next_done(cx, &mut stream, Some(i)); assert_eq!(count, i as usize + 1); } // This should block. assert!(stream.as_mut().poll_next(cx).is_pending()); // This should terminate the stream. assert!(channel.close().is_newly_closed()); // This should unblock. assert_next_done(cx, &mut stream, None); assert_eq!(count, 11); assert!(stream.is_terminated()); // Future calls should all return `None`. for _ in 0..10 { assert_next_done(cx, &mut stream, None); assert_eq!(count, 11); } } #[test] fn unbuffered_stream_smoke_test() { let channel = UnbufferedChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let stream = channel.stream(); pin_mut!(stream); // Send waits for a receive. for i in 0..10 { let send_fut = channel.send(i); pin_mut!(send_fut); assert!(send_fut.as_mut().poll(cx).is_pending()); assert_next_done(cx, &mut stream, Some(i)); assert_send_done(cx, &mut send_fut, Ok(())); assert_eq!(count, i as usize + 1); } // Receive waits for a send. for i in 0..10 { assert!(stream.as_mut().poll_next(cx).is_pending()); assert_eq!(count, 10 + i as usize * 2); let send_fut = channel.send(i); pin_mut!(send_fut); assert!(send_fut.as_mut().poll(cx).is_pending()); assert_next_done(cx, &mut stream, Some(i)); assert_send_done(cx, &mut send_fut, Ok(())); assert_eq!(count, 10 + i as usize * 2 + 2); } // This should block. assert!(stream.as_mut().poll_next(cx).is_pending()); // This should terminate the stream. assert!(channel.close().is_newly_closed()); // This should unblock. assert_next_done(cx, &mut stream, None); assert_eq!(count, 31); assert!(stream.is_terminated()); // Future calls should all return `None`. for _ in 0..10 { assert_next_done(cx, &mut stream, None); assert_eq!(count, 31); } } } }; } gen_mpmc_tests!( local_mpmc_channel_tests, LocalChannel, LocalUnbufferedChannel ); #[cfg(feature = "std")] mod if_std { use super::*; use futures_intrusive::channel::{ shared::channel, shared::ChannelReceiveFuture, shared::ChannelSendFuture, shared::Receiver, shared::Sender, Channel, UnbufferedChannel, }; gen_mpmc_tests!(mpmc_channel_tests, Channel, UnbufferedChannel); macro_rules! assert_receive { ($cx:ident, $receiver:expr, $value: expr) => { let receive_fut = $receiver.receive(); pin_mut!(receive_fut); assert!(!receive_fut.as_mut().is_terminated()); assert_receive_done($cx, &mut receive_fut, $value); }; } macro_rules! assert_send { ($cx:ident, $sender:expr, $value: expr) => { let send_fut = $sender.send($value); pin_mut!(send_fut); assert!(!send_fut.as_mut().is_terminated()); assert_send_done($cx, &mut send_fut, Ok(())); }; } fn is_send(_: &T) {} fn is_send_value(_: T) {} fn is_sync(_: &T) {} #[test] fn channel_futures_are_send() { let channel = Channel::::new(); is_sync(&channel); { let recv_fut = channel.receive(); is_send(&recv_fut); pin_mut!(recv_fut); is_send(&recv_fut); let send_fut = channel.send(3); is_send(&send_fut); pin_mut!(send_fut); is_send(&send_fut); } is_send_value(channel); } #[test] fn shared_channel_futures_are_send() { let (sender, receiver) = channel::(1); is_sync(&sender); is_sync(&receiver); is_send_value(sender.clone()); is_send_value(receiver.clone()); let recv_fut = receiver.receive(); is_send(&recv_fut); pin_mut!(recv_fut); is_send(&recv_fut); let send_fut = sender.send(3); is_send(&send_fut); pin_mut!(send_fut); is_send(&send_fut); } // Check if SharedChannel can be used in traits pub trait StreamTrait { type Output; type Next: Future; fn next(&self) -> Self::Next; } pub trait Sink { type Input; type Error; type Next: Future>; fn send(&self, value: Self::Input) -> Self::Next; } impl StreamTrait for Receiver where T: 'static, { type Output = Option; type Next = ChannelReceiveFuture; fn next(&self) -> Self::Next { self.receive() } } impl Sink for Sender where T: 'static, { type Input = T; type Error = ChannelSendError; type Next = ChannelSendFuture; fn send(&self, value: T) -> Self::Next { self.send(value) } } async fn send_stream>(stream: &S, value: i32) -> () { assert!(stream.send(value).await.is_ok()); } async fn read_stream>>( stream: &S, ) -> Option { stream.next().await } #[test] fn shared_channel_can_be_used_in_trait() { let (waker, _) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let (sender, receiver) = channel::(5); let stream = || async move { send_stream(&sender, 2).await; send_stream(&sender, 7).await; assert!(sender.close().is_newly_closed()); }; let drain = || async move { let mut sum = 0; loop { match read_stream(&receiver).await { None => return sum, Some(v) => sum += v, } } }; let stream_fut = stream(); pin_mut!(stream_fut); let drain_fut = drain(); pin_mut!(drain_fut); let mut do_drain = true; let mut do_stream = true; while do_drain || do_stream { if do_stream { match stream_fut.as_mut().poll(cx) { Poll::Ready(_) => { do_stream = false; } Poll::Pending => { if !do_drain { panic!("Expected channel to be closed"); } } } } if do_drain { match drain_fut.as_mut().poll(cx) { Poll::Ready(res) => { assert_eq!(9, res); do_drain = false; } Poll::Pending => {} } } } } #[test] fn try_send_recv_smoke_test() { let (sender, receiver) = channel::(3); for _ in 0..3 { sender.try_send(5).unwrap(); } sender.try_send(5).unwrap_err(); for _ in 0..3 { receiver.try_receive().unwrap(); } let err = receiver.try_receive().unwrap_err(); assert!(err.is_empty()); } #[test] fn try_send_full_channel() { let (sender, _receiver) = channel::(3); for _ in 0..3 { sender.try_send(5).unwrap(); } let err = sender.try_send(5).unwrap_err(); assert!(err.is_full()); } #[test] fn try_send_on_closed_channel() { let (sender, receiver) = channel::(3); assert!(receiver.close().is_newly_closed()); let err = sender.try_send(5).unwrap_err(); assert!(err.is_closed()); } #[test] fn try_receive_empty_channel() { let (_sender, receiver) = channel::(3); let err = receiver.try_receive().unwrap_err(); assert!(err.is_empty()); } #[test] fn try_recv_on_closed_channel() { let (sender, receiver) = channel::(3); sender.try_send(5).unwrap(); assert!(sender.close().is_newly_closed()); receiver.try_receive().unwrap(); let err = receiver.try_receive().unwrap_err(); assert!(err.is_closed()); } #[test] fn dropping_shared_channel_receivers_but_not_senders_drops_content() { use std::sync::{ atomic::{AtomicU64, Ordering}, Arc, }; #[derive(Debug, Clone)] struct Count(Arc); impl Count { fn new() -> Self { Self(Arc::new(AtomicU64::new(0))) } fn load(&self) -> u64 { self.0.load(Ordering::Acquire) } } impl Drop for Count { fn drop(&mut self) { self.0.fetch_add(1, Ordering::Relaxed); } } let count = Count::new(); assert_eq!(count.load(), 0); let (sender, receiver) = channel::(10); sender.try_send(count.clone()).unwrap(); sender.try_send(count.clone()).unwrap(); sender.try_send(count.clone()).unwrap(); drop(receiver); assert_eq!(count.load(), 3); } #[test] fn dropping_shared_channel_senders_closes_channel() { let (waker, _) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let (sender, receiver) = channel::(5); let sender2 = sender.clone(); let receiver2 = receiver.clone(); let fut = receiver.receive(); pin_mut!(fut); assert!(fut.as_mut().poll(cx).is_pending()); let fut2 = receiver2.receive(); pin_mut!(fut2); assert!(fut2.as_mut().poll(cx).is_pending()); drop(sender); assert!(fut.as_mut().poll(cx).is_pending()); assert!(fut2.as_mut().poll(cx).is_pending()); drop(sender2); match fut.as_mut().poll(cx) { Poll::Ready(None) => {} Poll::Ready(Some(_)) => panic!("Expected no value"), Poll::Pending => panic!("Expected channel to be closed"), } match fut2.as_mut().poll(cx) { Poll::Ready(None) => {} Poll::Ready(Some(_)) => panic!("Expected no value"), Poll::Pending => panic!("Expected channel to be closed"), } } #[test] fn dropping_shared_channel_receivers_closes_channel() { let (waker, _) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let (sender, receiver) = channel::(3); let sender2 = sender.clone(); let receiver2 = receiver.clone(); // Fill the channel for _ in 0..3 { let send_fut = sender.send(3); pin_mut!(send_fut); match send_fut.as_mut().poll(cx) { Poll::Pending => panic!("future is not ready"), Poll::Ready(res) => { if res.is_err() { panic!("Unexpected send result: {:?}", res); } } }; } let fut = sender.send(27); pin_mut!(fut); assert!(fut.as_mut().poll(cx).is_pending()); let fut2 = sender2.send(49); pin_mut!(fut2); assert!(fut2.as_mut().poll(cx).is_pending()); drop(receiver); assert!(fut.as_mut().poll(cx).is_pending()); assert!(fut2.as_mut().poll(cx).is_pending()); drop(receiver2); match fut.as_mut().poll(cx) { Poll::Ready(Err(ChannelSendError(27))) => {} Poll::Ready(v) => panic!("Unexpected value {:?}", v), Poll::Pending => panic!("Expected channel to be closed"), } match fut2.as_mut().poll(cx) { Poll::Ready(Err(ChannelSendError(49))) => {} Poll::Ready(v) => panic!("Unexpected value {:?}", v), Poll::Pending => panic!("Expected channel to be closed"), } } #[test] fn shared_stream_smoke_test() { let (sender, receiver) = channel::(3); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let stream = receiver.into_stream(); pin_mut!(stream); // Receive after send is immediately ready. for i in 0..10 { let send_fut = sender.send(i); pin_mut!(send_fut); assert_send_done(cx, &mut send_fut, Ok(())); assert_next_done(cx, &mut stream, Some(i)); // The should be not wakups since its immediate. assert_eq!(count, 0); } // Send after receive is immediately ready. for i in 0..10 { assert!(stream.as_mut().poll_next(cx).is_pending()); assert_eq!(count, i as usize); let send_fut = sender.send(i); pin_mut!(send_fut); assert_send_done(cx, &mut send_fut, Ok(())); assert_next_done(cx, &mut stream, Some(i)); assert_eq!(count, i as usize + 1); } // This should block. assert!(stream.as_mut().poll_next(cx).is_pending()); // This should terminate the stream. assert!(stream.close().is_newly_closed()); // This should unblock. assert_next_done(cx, &mut stream, None); assert_eq!(count, 11); assert!(stream.is_terminated()); // Future calls should all return `None`. for _ in 0..10 { assert_next_done(cx, &mut stream, None); assert_eq!(count, 11); } } #[test] fn cancel_send_mid_wait() { let (sender, receiver) = channel::(3); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); assert_send!(cx, &sender, 5); assert_send!(cx, &sender, 6); assert_send!(cx, &sender, 7); { // Cancel a wait in between other waits // In order to arbitrarily drop a non movable future we have to box and pin it let mut poll1 = Box::pin(sender.send(8)); let mut poll2 = Box::pin(sender.send(9)); let mut poll3 = Box::pin(sender.send(10)); let mut poll4 = Box::pin(sender.send(11)); let mut poll5 = Box::pin(sender.send(12)); assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll2.as_mut().poll(cx).is_pending()); assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll4.as_mut().poll(cx).is_pending()); assert!(poll5.as_mut().poll(cx).is_pending()); assert!(!poll1.is_terminated()); assert!(!poll2.is_terminated()); assert!(!poll3.is_terminated()); assert!(!poll4.is_terminated()); assert!(!poll5.is_terminated()); // Cancel 2 futures. Only the remaining ones should get completed // Safety: We are not using these pins again so this is safe. unsafe { core::pin::Pin::into_inner_unchecked(poll2).cancel() }; unsafe { core::pin::Pin::into_inner_unchecked(poll4).cancel() }; assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll5.as_mut().poll(cx).is_pending()); assert_receive!(cx, &receiver, Some(5)); assert_eq!(count, 1); assert_send_done(cx, &mut poll1.as_mut(), Ok(())); assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll5.as_mut().poll(cx).is_pending()); assert_receive!(cx, &receiver, Some(6)); assert_receive!(cx, &receiver, Some(7)); assert_eq!(count, 3); assert_send_done(cx, &mut poll3.as_mut(), Ok(())); assert_send_done(cx, &mut poll5.as_mut(), Ok(())); } assert_eq!(count, 3); } #[test] fn cancel_send_end_wait() { let (sender, receiver) = channel::(3); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); assert_send!(cx, &sender, 100); assert_send!(cx, &sender, 101); assert_send!(cx, &sender, 102); let poll1 = sender.send(1); let poll2 = sender.send(2); let poll3 = sender.send(3); let poll4 = sender.send(4); pin_mut!(poll1); pin_mut!(poll2); pin_mut!(poll3); pin_mut!(poll4); assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll2.as_mut().poll(cx).is_pending()); // Start polling some wait handles which get cancelled // before new ones are attached let poll5 = sender.send(5); let poll6 = sender.send(6); pin_mut!(poll5); pin_mut!(poll6); assert!(poll5.as_mut().poll(cx).is_pending()); assert!(poll6.as_mut().poll(cx).is_pending()); // Cancel 2 futures. Only the remaining ones should get completed // Safety: We are not using these pins again so this is safe. unsafe { core::pin::Pin::into_inner_unchecked(poll5).cancel() }; unsafe { core::pin::Pin::into_inner_unchecked(poll6).cancel() }; assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll4.as_mut().poll(cx).is_pending()); assert_receive!(cx, &receiver, Some(100)); assert_receive!(cx, &receiver, Some(101)); assert_receive!(cx, &receiver, Some(102)); assert_send_done(cx, &mut poll1, Ok(())); assert_send_done(cx, &mut poll2, Ok(())); assert_send_done(cx, &mut poll3, Ok(())); assert!(receiver.close().is_newly_closed()); assert_receive!(cx, &receiver, Some(1)); assert_receive!(cx, &receiver, Some(2)); assert_receive!(cx, &receiver, Some(3)); assert_send_done(cx, &mut poll4, Err(ChannelSendError(4))); assert_eq!(count, 4); } } futures-intrusive-0.5.0/tests/mutex.rs000064400000000000000000000446720072674642500162700ustar 00000000000000use futures::future::{FusedFuture, Future}; use futures::task::{Context, Poll}; use futures_intrusive::sync::LocalMutex; use futures_test::task::{new_count_waker, panic_waker}; use pin_utils::pin_mut; macro_rules! gen_mutex_tests { ($mod_name:ident, $mutex_type:ident) => { mod $mod_name { use super::*; #[test] fn uncontended_lock() { for is_fair in &[true, false] { let waker = &panic_waker(); let cx = &mut Context::from_waker(&waker); let mtx = $mutex_type::new(5, *is_fair); assert_eq!(false, mtx.is_locked()); { let mutex_fut = mtx.lock(); pin_mut!(mutex_fut); match mutex_fut.as_mut().poll(cx) { Poll::Pending => panic!("Expect mutex to get locked"), Poll::Ready(mut guard) => { assert_eq!(true, mtx.is_locked()); assert_eq!(5, *guard); *guard += 7; assert_eq!(12, *guard); } }; assert!(mutex_fut.as_mut().is_terminated()); } assert_eq!(false, mtx.is_locked()); { let mutex_fut = mtx.lock(); pin_mut!(mutex_fut); match mutex_fut.as_mut().poll(cx) { Poll::Pending => panic!("Expect mutex to get locked"), Poll::Ready(guard) => { assert_eq!(true, mtx.is_locked()); assert_eq!(12, *guard); } }; } assert_eq!(false, mtx.is_locked()); } } #[test] #[should_panic] fn poll_after_completion_should_panic() { for is_fair in &[true, false] { let waker = &panic_waker(); let cx = &mut Context::from_waker(&waker); let mtx = $mutex_type::new(5, *is_fair); let mutex_fut = mtx.lock(); pin_mut!(mutex_fut); let guard = match mutex_fut.as_mut().poll(cx) { Poll::Pending => panic!("Expect mutex to get locked"), Poll::Ready(guard) => guard, }; assert_eq!(5, *guard); assert!(mutex_fut.as_mut().is_terminated()); let _ = mutex_fut.poll(cx); } } #[test] fn contended_lock() { for is_fair in &[true, false] { let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let mtx = $mutex_type::new(5, *is_fair); let mutex_fut1 = mtx.lock(); pin_mut!(mutex_fut1); // Lock the mutex let mut guard1 = match mutex_fut1.poll(cx) { Poll::Pending => panic!("Expect mutex to get locked"), Poll::Ready(guard) => guard, }; *guard1 = 27; // The second and third lock attempt must fail let mutex_fut2 = mtx.lock(); pin_mut!(mutex_fut2); assert!(mutex_fut2.as_mut().poll(cx).is_pending()); assert!(!mutex_fut2.as_mut().is_terminated()); let mutex_fut3 = mtx.lock(); pin_mut!(mutex_fut3); assert!(mutex_fut3.as_mut().poll(cx).is_pending()); assert!(!mutex_fut3.as_mut().is_terminated()); assert_eq!(count, 0); // Unlock - mutex should be available again drop(guard1); assert_eq!(count, 1); let mut guard2 = match mutex_fut2.as_mut().poll(cx) { Poll::Pending => panic!("Expect mutex to get locked"), Poll::Ready(guard) => guard, }; assert_eq!(27, *guard2); *guard2 = 72; assert!(mutex_fut2.as_mut().is_terminated()); assert!(mutex_fut3.as_mut().poll(cx).is_pending()); assert!(!mutex_fut3.as_mut().is_terminated()); assert_eq!(count, 1); // Unlock - mutex should be available again drop(guard2); assert_eq!(count, 2); let guard3 = match mutex_fut3.as_mut().poll(cx) { Poll::Pending => panic!("Expect mutex to get locked"), Poll::Ready(guard) => guard, }; assert_eq!(72, *guard3); assert!(mutex_fut3.as_mut().is_terminated()); drop(guard3); assert_eq!(count, 2); } } #[test] fn lock_synchronously() { for is_fair in &[true] { let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let mtx = $mutex_type::new(5, *is_fair); let mutex_fut1 = mtx.lock(); pin_mut!(mutex_fut1); // Lock the mutex let mut guard1 = match mutex_fut1.poll(cx) { Poll::Pending => panic!("Expect mutex to get acquired 1"), Poll::Ready(guard) => guard, }; *guard1 = 7; assert_eq!(true, mtx.is_locked()); // Synchronous lock attempt fails assert!(mtx.try_lock().is_none()); // Add an async waiter let mut mutex_fut2 = Box::pin(mtx.lock()); assert!(mutex_fut2.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); // Release - mutex should be available again drop(guard1); assert_eq!(false, mtx.is_locked()); // In the fair case we shouldn't be able to obtain the // mutex asynchronously. In the unfair case it should // be possible. if *is_fair { assert!(mtx.try_lock().is_none()); // Cancel async lock attempt drop(mutex_fut2); // Now the mutex should be lockable } let guard = mtx.try_lock().unwrap(); assert_eq!(true, mtx.is_locked()); assert_eq!(*guard, 7); drop(guard); } } #[test] fn cancel_wait_for_mutex() { for is_fair in &[true, false] { let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let mtx = $mutex_type::new(5, *is_fair); // Lock the mutex let mut guard1 = mtx.try_lock().unwrap(); *guard1 = 27; // The second and third lock attempt must fail let mut mutex_fut2 = Box::pin(mtx.lock()); let mut mutex_fut3 = Box::pin(mtx.lock()); assert!(mutex_fut2.as_mut().poll(cx).is_pending()); assert!(mutex_fut3.as_mut().poll(cx).is_pending()); // Before the mutex gets available, cancel one lock attempt drop(mutex_fut2); // Unlock - mutex should be available again. Mutex2 should have been notified drop(guard1); assert_eq!(count, 1); // Unlock - mutex should be available again match mutex_fut3.as_mut().poll(cx) { Poll::Pending => panic!("Expect mutex to get locked"), Poll::Ready(guard) => guard, }; } } #[test] fn unlock_next_when_notification_is_not_used() { for is_fair in &[true, false] { let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let mtx = $mutex_type::new(5, *is_fair); // Lock the mutex let mut guard1 = mtx.try_lock().unwrap(); *guard1 = 27; // The second and third lock attempt must fail let mut mutex_fut2 = Box::pin(mtx.lock()); let mut mutex_fut3 = Box::pin(mtx.lock()); assert!(mutex_fut2.as_mut().poll(cx).is_pending()); assert!(!mutex_fut2.as_mut().is_terminated()); assert!(mutex_fut3.as_mut().poll(cx).is_pending()); assert!(!mutex_fut3.as_mut().is_terminated()); assert_eq!(count, 0); // Unlock - mutex should be available again. Mutex2 should have been notified drop(guard1); assert_eq!(count, 1); // We don't use the notification. Expect the next waiting task to be woken up drop(mutex_fut2); assert_eq!(count, 2); // Unlock - mutex should be available again match mutex_fut3.as_mut().poll(cx) { Poll::Pending => panic!("Expect mutex to get locked"), Poll::Ready(guard) => guard, }; } } #[test] fn new_waiters_on_unfair_mutex_can_acquire_future_while_one_task_is_notified() { let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let mtx = $mutex_type::new(5, false); // Lock the mutex let mut guard1 = mtx.try_lock().unwrap(); *guard1 = 27; // The second and third lock attempt must fail let mut mutex_fut2 = Box::pin(mtx.lock()); let mut mutex_fut3 = Box::pin(mtx.lock()); assert!(mutex_fut2.as_mut().poll(cx).is_pending()); // Unlock - mutex should be available again. fut2 should have been notified drop(guard1); assert_eq!(count, 1); // Lock fut3 in between. This should succeed let guard3 = match mutex_fut3.as_mut().poll(cx) { Poll::Pending => panic!("Expect mutex to get locked"), Poll::Ready(guard) => guard, }; // Now fut2 can't use it's notification and is still pending assert!(mutex_fut2.as_mut().poll(cx).is_pending()); // When we drop fut3, the mutex should signal that it's available for fut2, // which needs to have re-registered drop(guard3); assert_eq!(count, 2); match mutex_fut2.as_mut().poll(cx) { Poll::Pending => panic!("Expect mutex to get locked"), Poll::Ready(_guard) => {} }; } #[test] fn waiters_on_unfair_mutex_can_acquire_future_through_repolling_if_one_task_is_notified( ) { let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let mtx = $mutex_type::new(5, false); // Lock the mutex let mut guard1 = mtx.try_lock().unwrap(); *guard1 = 27; // The second and third lock attempt must fail let mut mutex_fut2 = Box::pin(mtx.lock()); let mut mutex_fut3 = Box::pin(mtx.lock()); assert!(mutex_fut2.as_mut().poll(cx).is_pending()); assert!(mutex_fut3.as_mut().poll(cx).is_pending()); // Unlock - mutex should be available again. fut2 should have been notified drop(guard1); assert_eq!(count, 1); // Lock fut3 in between. This should succeed let guard3 = match mutex_fut3.as_mut().poll(cx) { Poll::Pending => panic!("Expect mutex to get locked"), Poll::Ready(guard) => guard, }; // Now fut2 can't use it's notification and is still pending assert!(mutex_fut2.as_mut().poll(cx).is_pending()); // When we drop fut3, the mutex should signal that it's available for fut2, // which needs to have re-registered drop(guard3); assert_eq!(count, 2); match mutex_fut2.as_mut().poll(cx) { Poll::Pending => panic!("Expect mutex to get locked"), Poll::Ready(_guard) => {} }; } #[test] fn new_waiters_on_fair_mutex_cant_acquire_future_while_one_task_is_notified() { let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let mtx = $mutex_type::new(5, true); // Lock the mutex let mut guard1 = mtx.try_lock().unwrap(); *guard1 = 27; // The second and third lock attempt must fail let mut mutex_fut2 = Box::pin(mtx.lock()); let mut mutex_fut3 = Box::pin(mtx.lock()); assert!(mutex_fut2.as_mut().poll(cx).is_pending()); // Unlock - mutex should be available again. fut2 should have been notified drop(guard1); assert_eq!(count, 1); // Lock fut3 in between. This should fail assert!(mutex_fut3.as_mut().poll(cx).is_pending()); // fut2 should be lockable match mutex_fut2.as_mut().poll(cx) { Poll::Pending => panic!("Expect mutex to get locked"), Poll::Ready(_guard) => {} }; // Now fut3 should have been signaled and be lockable assert_eq!(count, 2); match mutex_fut3.as_mut().poll(cx) { Poll::Pending => panic!("Expect mutex to get locked"), Poll::Ready(_guard) => {} }; } #[test] fn waiters_on_fair_mutex_cant_acquire_future_through_repolling_if_one_task_is_notified() { let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let mtx = $mutex_type::new(5, true); // Lock the mutex let mut guard1 = mtx.try_lock().unwrap(); *guard1 = 27; // The second and third lock attempt must fail let mut mutex_fut2 = Box::pin(mtx.lock()); let mut mutex_fut3 = Box::pin(mtx.lock()); assert!(mutex_fut2.as_mut().poll(cx).is_pending()); assert!(mutex_fut3.as_mut().poll(cx).is_pending()); // Unlock - mutex should be available again. fut2 should have been notified drop(guard1); assert_eq!(count, 1); // Lock fut3 in between. This should fail, since fut2 should get the mutex first assert!(mutex_fut3.as_mut().poll(cx).is_pending()); // fut2 should be lockable match mutex_fut2.as_mut().poll(cx) { Poll::Pending => panic!("Expect mutex to get locked"), Poll::Ready(_guard) => {} }; // Now fut3 should be lockable assert_eq!(count, 2); match mutex_fut3.as_mut().poll(cx) { Poll::Pending => panic!("Expect mutex to get locked"), Poll::Ready(_guard) => {} }; } #[test] fn poll_from_multiple_executors() { for is_fair in &[true, false] { let (waker_1, count_1) = new_count_waker(); let (waker_2, count_2) = new_count_waker(); let mtx = $mutex_type::new(5, *is_fair); // Lock the mutex let mut guard1 = mtx.try_lock().unwrap(); *guard1 = 27; let cx_1 = &mut Context::from_waker(&waker_1); let cx_2 = &mut Context::from_waker(&waker_2); let fut = mtx.lock(); pin_mut!(fut); assert!(fut.as_mut().poll(cx_1).is_pending()); assert!(fut.as_mut().poll(cx_2).is_pending()); drop(guard1); assert_eq!(count_1, 0); assert_eq!(count_2, 1); assert!(fut.as_mut().poll(cx_2).is_ready()); assert!(fut.as_mut().is_terminated()); } } } }; } gen_mutex_tests!(local_mutex_tests, LocalMutex); #[cfg(feature = "std")] mod if_std { use super::*; use futures::FutureExt; use futures_intrusive::sync::Mutex; gen_mutex_tests!(mutex_tests, Mutex); fn is_send(_: &T) {} fn is_send_value(_: T) {} fn is_sync(_: &T) {} #[test] fn mutex_futures_are_send() { let mutex = Mutex::new(true, true); is_sync(&mutex); { let lock_fut = mutex.lock(); is_send(&lock_fut); pin_mut!(lock_fut); is_send(&lock_fut); let waker = &panic_waker(); let cx = &mut Context::from_waker(&waker); pin_mut!(lock_fut); let res = lock_fut.poll_unpin(cx); let guard = match res { Poll::Ready(v) => v, Poll::Pending => panic!("Expected to be ready"), }; is_send(&guard); is_send_value(guard); } is_send_value(mutex); } } futures-intrusive-0.5.0/tests/oneshot_channel.rs000064400000000000000000000275640072674642500202760ustar 00000000000000use futures::future::{FusedFuture, Future}; use futures::task::{Context, Poll}; use futures_intrusive::channel::{ChannelSendError, LocalOneshotChannel}; use futures_test::task::{new_count_waker, panic_waker}; use pin_utils::pin_mut; macro_rules! gen_oneshot_tests { ($mod_name:ident, $channel_type:ident) => { mod $mod_name { use super::*; fn assert_receive_done( cx: &mut Context, receive_fut: &mut core::pin::Pin<&mut FutureType>, value: Option, ) where FutureType: Future> + FusedFuture, T: PartialEq + core::fmt::Debug, { match receive_fut.as_mut().poll(cx) { Poll::Pending => panic!("future is not ready"), Poll::Ready(res) => { if res != value { panic!("Unexpected value {:?}", res); } } }; assert!(receive_fut.as_mut().is_terminated()); } #[test] fn send_on_closed_channel() { let channel = $channel_type::::new(); assert!(channel.close().is_newly_closed()); assert_eq!(Err(ChannelSendError(5)), channel.send(5)); } #[test] fn close_status() { let channel = $channel_type::::new(); assert!(channel.close().is_newly_closed()); assert!(channel.close().is_already_closed()); assert!(channel.close().is_already_closed()); } #[test] fn close_unblocks_receive() { let channel = $channel_type::::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let fut = channel.receive(); pin_mut!(fut); assert!(fut.as_mut().poll(cx).is_pending()); let fut2 = channel.receive(); pin_mut!(fut2); assert!(fut2.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); assert!(channel.close().is_newly_closed()); assert_eq!(count, 2); assert_receive_done(cx, &mut fut, None); assert_receive_done(cx, &mut fut2, None); } #[test] fn receive_after_send() { let channel = $channel_type::::new(); let waker = &panic_waker(); let cx = &mut Context::from_waker(&waker); channel.send(5).unwrap(); let receive_fut = channel.receive(); pin_mut!(receive_fut); assert!(!receive_fut.as_mut().is_terminated()); assert_receive_done(cx, &mut receive_fut, Some(5)); // A second receive attempt must yield None, since the // value was taken out of the channel let receive_fut2 = channel.receive(); pin_mut!(receive_fut2); assert_receive_done(cx, &mut receive_fut2, None); } #[test] fn send_after_receive() { let channel = $channel_type::::new(); let (waker, _) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let receive_fut1 = channel.receive(); let receive_fut2 = channel.receive(); pin_mut!(receive_fut1); pin_mut!(receive_fut2); assert!(!receive_fut1.as_mut().is_terminated()); assert!(!receive_fut2.as_mut().is_terminated()); let poll_res1 = receive_fut1.as_mut().poll(cx); let poll_res2 = receive_fut2.as_mut().poll(cx); assert!(poll_res1.is_pending()); assert!(poll_res2.is_pending()); channel.send(5).unwrap(); assert_receive_done(cx, &mut receive_fut1, Some(5)); // receive_fut2 isn't terminated, since it hasn't been polled assert!(!receive_fut2.as_mut().is_terminated()); // When it gets polled, it must evaluate to None assert_receive_done(cx, &mut receive_fut2, None); } #[test] fn second_send_rejects_value() { let channel = $channel_type::::new(); let (waker, _) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let receive_fut1 = channel.receive(); pin_mut!(receive_fut1); assert!(!receive_fut1.as_mut().is_terminated()); assert!(receive_fut1.as_mut().poll(cx).is_pending()); // First send channel.send(5).unwrap(); assert!(receive_fut1.as_mut().poll(cx).is_ready()); // Second send let send_res = channel.send(7); match send_res { Err(ChannelSendError(7)) => {} // expected _ => panic!("Second second should reject"), } } #[test] fn cancel_mid_wait() { let channel = $channel_type::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); { // Cancel a wait in between other waits // In order to arbitrarily drop a non movable future we have to box and pin it let mut poll1 = Box::pin(channel.receive()); let mut poll2 = Box::pin(channel.receive()); let mut poll3 = Box::pin(channel.receive()); let mut poll4 = Box::pin(channel.receive()); let mut poll5 = Box::pin(channel.receive()); assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll2.as_mut().poll(cx).is_pending()); assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll4.as_mut().poll(cx).is_pending()); assert!(poll5.as_mut().poll(cx).is_pending()); assert!(!poll1.is_terminated()); assert!(!poll2.is_terminated()); assert!(!poll3.is_terminated()); assert!(!poll4.is_terminated()); assert!(!poll5.is_terminated()); // Cancel 2 futures. Only the remaining ones should get completed drop(poll2); drop(poll4); assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll5.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); channel.send(7).unwrap(); assert_eq!(count, 3); assert!(poll1.as_mut().poll(cx).is_ready()); assert!(poll3.as_mut().poll(cx).is_ready()); assert!(poll5.as_mut().poll(cx).is_ready()); assert!(poll1.is_terminated()); assert!(poll3.is_terminated()); assert!(poll5.is_terminated()); } assert_eq!(count, 3) } #[test] fn cancel_end_wait() { let channel = $channel_type::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let poll1 = channel.receive(); let poll2 = channel.receive(); let poll3 = channel.receive(); let poll4 = channel.receive(); pin_mut!(poll1); pin_mut!(poll2); pin_mut!(poll3); pin_mut!(poll4); assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll2.as_mut().poll(cx).is_pending()); // Start polling some wait handles which get cancelled // before new ones are attached { let poll5 = channel.receive(); let poll6 = channel.receive(); pin_mut!(poll5); pin_mut!(poll6); assert!(poll5.as_mut().poll(cx).is_pending()); assert!(poll6.as_mut().poll(cx).is_pending()); } assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll4.as_mut().poll(cx).is_pending()); channel.send(99).unwrap(); assert!(poll1.as_mut().poll(cx).is_ready()); assert!(poll2.as_mut().poll(cx).is_ready()); assert!(poll3.as_mut().poll(cx).is_ready()); assert!(poll4.as_mut().poll(cx).is_ready()); assert_eq!(count, 4) } #[test] fn poll_from_multiple_executors() { let (waker_1, count_1) = new_count_waker(); let (waker_2, count_2) = new_count_waker(); let channel = $channel_type::new(); let cx_1 = &mut Context::from_waker(&waker_1); let cx_2 = &mut Context::from_waker(&waker_2); let fut = channel.receive(); pin_mut!(fut); assert!(fut.as_mut().poll(cx_1).is_pending()); assert!(fut.as_mut().poll(cx_2).is_pending()); channel.send(99).unwrap(); assert_eq!(count_1, 0); assert_eq!(count_2, 1); assert_receive_done(cx_2, &mut fut, Some(99)); } } }; } gen_oneshot_tests!(local_oneshot_channel_tests, LocalOneshotChannel); #[cfg(feature = "std")] mod if_std { use super::*; use futures_intrusive::channel::shared::oneshot_channel; use futures_intrusive::channel::OneshotChannel; gen_oneshot_tests!(oneshot_channel_tests, OneshotChannel); fn is_send(_: &T) {} fn is_send_value(_: T) {} fn is_sync(_: &T) {} #[test] fn channel_futures_are_send() { let channel = OneshotChannel::::new(); is_sync(&channel); { let recv_fut = channel.receive(); is_send(&recv_fut); pin_mut!(recv_fut); is_send(&recv_fut); let send_fut = channel.send(3); is_send(&send_fut); pin_mut!(send_fut); is_send(&send_fut); } is_send_value(channel); } #[test] fn shared_channel_futures_are_send() { let (sender, receiver) = oneshot_channel::(); is_sync(&sender); is_sync(&receiver); let recv_fut = receiver.receive(); is_send(&recv_fut); pin_mut!(recv_fut); is_send(&recv_fut); let send_fut = sender.send(3); is_send(&send_fut); pin_mut!(send_fut); is_send(&send_fut); is_send_value(sender); is_send_value(receiver); } #[test] fn dropping_shared_channel_senders_closes_channel() { let (waker, _) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let (sender, receiver) = oneshot_channel::(); let fut = receiver.receive(); pin_mut!(fut); assert!(fut.as_mut().poll(cx).is_pending()); drop(sender); match fut.as_mut().poll(cx) { Poll::Ready(None) => {} Poll::Ready(Some(_)) => panic!("Expected no value"), Poll::Pending => panic!("Expected channel to be closed"), } } #[test] fn dropping_shared_channel_receivers_closes_channel() { let (sender, receiver) = oneshot_channel::(); drop(receiver); assert_eq!(Err(ChannelSendError(5)), sender.send(5)); } } futures-intrusive-0.5.0/tests/semaphore.rs000064400000000000000000000602700072674642500171010ustar 00000000000000use futures::future::{FusedFuture, Future}; use futures::task::{Context, Poll}; use futures_intrusive::sync::LocalSemaphore; use futures_test::task::{new_count_waker, panic_waker}; use pin_utils::pin_mut; macro_rules! gen_semaphore_tests { ($mod_name:ident, $semaphore_type:ident) => { mod $mod_name { use super::*; #[test] fn uncontended_acquire() { for is_fair in &[true, false] { let waker = &panic_waker(); let cx = &mut Context::from_waker(&waker); let sem = $semaphore_type::new(*is_fair, 2); assert_eq!(2, sem.permits()); { let sem_fut = sem.acquire(1); pin_mut!(sem_fut); match sem_fut.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired"), Poll::Ready(_guard) => { assert_eq!(1, sem.permits()); }, }; assert!(sem_fut.as_mut().is_terminated()); assert_eq!(2, sem.permits()); } assert_eq!(2, sem.permits()); { let sem_fut = sem.acquire(2); pin_mut!(sem_fut); match sem_fut.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired"), Poll::Ready(_guard) => { assert_eq!(0, sem.permits()); }, }; assert!(sem_fut.as_mut().is_terminated()); } assert_eq!(2, sem.permits()); } } #[test] fn manual_release_via_disarm() { for is_fair in &[true, false] { let waker = &panic_waker(); let cx = &mut Context::from_waker(&waker); let sem = $semaphore_type::new(*is_fair, 2); assert_eq!(2, sem.permits()); { let sem_fut = sem.acquire(1); pin_mut!(sem_fut); match sem_fut.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired"), Poll::Ready(mut guard) => { assert_eq!(1, sem.permits()); guard.disarm(); }, }; assert!(sem_fut.as_mut().is_terminated()); assert_eq!(1, sem.permits()); } assert_eq!(1, sem.permits()); { let sem_fut = sem.acquire(1); pin_mut!(sem_fut); match sem_fut.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired"), Poll::Ready(mut guard) => { assert_eq!(0, sem.permits()); guard.disarm(); }, }; assert!(sem_fut.as_mut().is_terminated()); } assert_eq!(0, sem.permits()); sem.release(2); assert_eq!(2, sem.permits()); } } #[test] #[should_panic] fn poll_after_completion_should_panic() { for is_fair in &[true, false] { let waker = &panic_waker(); let cx = &mut Context::from_waker(&waker); let sem = $semaphore_type::new(*is_fair, 2); let sem_fut = sem.acquire(2); pin_mut!(sem_fut); match sem_fut.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired"), Poll::Ready(guard) => guard, }; assert!(sem_fut.as_mut().is_terminated()); let _ = sem_fut.poll(cx); } } #[test] fn contended_acquire() { for is_fair in &[false, true] { let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let sem = $semaphore_type::new(*is_fair, 3); let sem_fut1 = sem.acquire(3); pin_mut!(sem_fut1); // Acquire the semaphore let guard1 = match sem_fut1.poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired 1"), Poll::Ready(guard) => guard }; // The next acquire attempts must fail let sem_fut2 = sem.acquire(1); pin_mut!(sem_fut2); assert!(sem_fut2.as_mut().poll(cx).is_pending()); assert!(!sem_fut2.as_mut().is_terminated()); let sem_fut3 = sem.acquire(2); pin_mut!(sem_fut3); assert!(sem_fut3.as_mut().poll(cx).is_pending()); assert!(!sem_fut3.as_mut().is_terminated()); let sem_fut4 = sem.acquire(2); pin_mut!(sem_fut4); assert!(sem_fut4.as_mut().poll(cx).is_pending()); assert!(!sem_fut4.as_mut().is_terminated()); assert_eq!(count, 0); // Release - semaphore should be available again and allow // fut2 and fut3 to complete assert_eq!(0, sem.permits()); drop(guard1); assert_eq!(3, sem.permits()); // At least one task should be awoken. if *is_fair { assert_eq!(count, 1); } else { assert_eq!(count, 2); } let guard2 = match sem_fut2.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired 2"), Poll::Ready(guard) => guard }; assert!(sem_fut2.as_mut().is_terminated()); assert_eq!(2, sem.permits()); // In the fair case, the next task should be woken up here assert_eq!(count, 2); let guard3 = match sem_fut3.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired 3"), Poll::Ready(guard) => guard }; assert!(sem_fut3.as_mut().is_terminated()); assert_eq!(0, sem.permits()); assert!(sem_fut4.as_mut().poll(cx).is_pending()); assert!(!sem_fut4.as_mut().is_terminated()); // Release - some permits should be available again drop(guard2); assert_eq!(1, sem.permits()); assert_eq!(count, 2); assert!(sem_fut4.as_mut().poll(cx).is_pending()); assert!(!sem_fut4.as_mut().is_terminated()); // After releasing the permits from fut3, there should be // enough permits for fut4 getting woken. drop(guard3); assert_eq!(3, sem.permits()); assert_eq!(count, 3); let guard4 = match sem_fut4.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired 4"), Poll::Ready(guard) => guard }; assert!(sem_fut4.as_mut().is_terminated()); drop(guard4); assert_eq!(3, sem.permits()); assert_eq!(count, 3); } } #[test] fn acquire_synchronously() { for is_fair in &[true] { let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let sem = $semaphore_type::new(*is_fair, 3); let sem_fut1 = sem.acquire(3); pin_mut!(sem_fut1); // Acquire the semaphore let guard1 = match sem_fut1.poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired 1"), Poll::Ready(guard) => guard }; // Some failing acquire attempts assert!(sem.try_acquire(1).is_none()); // Add an async waiter let mut sem_fut2 = Box::pin(sem.acquire(1)); assert!(sem_fut2.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); // Release - semaphore should be available again drop(guard1); assert_eq!(3, sem.permits()); // In the fair case we shouldn't be able to obtain the // semaphore asynchronously. In the unfair case it should // be possible. if *is_fair { assert!(sem.try_acquire(1).is_none()); // Cancel async acquire attempt drop(sem_fut2); // Now the semaphore should be acquireable } let guard = sem.try_acquire(1).unwrap(); assert_eq!(2, sem.permits()); let mut guard2 = sem.try_acquire(2).unwrap(); assert_eq!(0, sem.permits()); guard2.disarm(); sem.release(2); drop(guard); } } #[test] fn acquire_0_permits_without_other_waiters() { for is_fair in &[false, true] { let (waker, _count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let sem = $semaphore_type::new(*is_fair, 3); // Acquire the semaphore let guard1 = sem.try_acquire(3).unwrap(); assert_eq!(0, sem.permits()); let sem_fut2 = sem.acquire(0); pin_mut!(sem_fut2); let guard2 = match sem_fut2.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired 2"), Poll::Ready(guard) => guard }; drop(guard2); assert_eq!(0, sem.permits()); drop(guard1); assert_eq!(3, sem.permits()); } } #[test] fn acquire_0_permits_with_other_waiters() { for is_fair in &[false, true] { let (waker, _count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let sem = $semaphore_type::new(*is_fair, 3); // Acquire the semaphore let guard1 = sem.try_acquire(3).unwrap(); assert_eq!(0, sem.permits()); let sem_fut2 = sem.acquire(1); pin_mut!(sem_fut2); assert!(sem_fut2.as_mut().poll(cx).is_pending()); let sem_fut3 = sem.acquire(0); pin_mut!(sem_fut3); let guard3 = match sem_fut3.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired 3"), Poll::Ready(guard) => guard }; drop(guard3); assert_eq!(0, sem.permits()); drop(guard1); assert_eq!(3, sem.permits()); let guard2 = match sem_fut2.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired 2"), Poll::Ready(guard) => guard }; assert_eq!(2, sem.permits()); drop(guard2); } } #[test] fn cancel_wait_for_semaphore() { for is_fair in &[true, false] { let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let sem = $semaphore_type::new(*is_fair, 5); // Acquire the semaphore let guard1 = sem.try_acquire(5).unwrap(); // The second and third lock attempt must fail let mut sem_fut2 = Box::pin(sem.acquire(1)); let mut sem_fut3 = Box::pin(sem.acquire(1)); assert!(sem_fut2.as_mut().poll(cx).is_pending()); assert!(sem_fut3.as_mut().poll(cx).is_pending()); // Before the semaphore gets available, cancel one acquire attempt drop(sem_fut2); // Unlock - semaphore should be available again. // fut2 should have been notified drop(guard1); assert_eq!(count, 1); // Unlock - semaphore should be available again match sem_fut3.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired"), Poll::Ready(guard) => guard }; } } #[test] fn unlock_next_when_notification_is_not_used() { for is_fair in &[true, false] { let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let sem = $semaphore_type::new(*is_fair, 2); let guard1 = sem.try_acquire(2).unwrap(); // The second and third acquire attempt must fail let mut sem_fut2 = Box::pin(sem.acquire(1)); let mut sem_fut3 = Box::pin(sem.acquire(1)); assert!(sem_fut2.as_mut().poll(cx).is_pending()); assert!(!sem_fut2.as_mut().is_terminated()); assert!(sem_fut3.as_mut().poll(cx).is_pending()); assert!(!sem_fut3.as_mut().is_terminated()); assert_eq!(count, 0); // Release - semaphore should be available again. fut2 should have been notified drop(guard1); if *is_fair { assert_eq!(count, 1); } else { assert_eq!(count, 2); } // We don't use the notification. Expect the next waiting task to be woken up drop(sem_fut2); assert_eq!(count, 2); match sem_fut3.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired"), Poll::Ready(guard) => guard }; } } #[test] fn new_waiters_on_unfair_semaphore_can_acquire_future_while_one_task_is_notified() { let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let sem = $semaphore_type::new(false, 3); // Acquire the semaphore let guard1 = sem.try_acquire(3).unwrap(); // The second and third acquire attempt must fail let mut sem_fut2 = Box::pin(sem.acquire(3)); let mut sem_fut3 = Box::pin(sem.acquire(3)); assert!(sem_fut2.as_mut().poll(cx).is_pending()); // Release - Semaphore should be available again. fut2 should have been notified drop(guard1); assert_eq!(count, 1); // Acquire fut3 in between. This should succeed let guard3 = match sem_fut3.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired"), Poll::Ready(guard) => guard }; // Now fut2 can't use it's notification and is still pending assert!(sem_fut2.as_mut().poll(cx).is_pending()); // When we drop fut3, the semaphore should signal that it's available for fut2, // which needs to have re-registered drop(guard3); assert_eq!(count, 2); match sem_fut2.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired"), Poll::Ready(_guard) => {}, }; } #[test] fn waiters_on_unfair_semaphore_can_acquire_future_through_repolling_if_one_task_is_notified() { let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let sem = $semaphore_type::new(false, 3); // Acquire the semaphore let guard1 = sem.try_acquire(3).unwrap(); // The second and third acquire attempt must fail let mut sem_fut2 = Box::pin(sem.acquire(3)); let mut sem_fut3 = Box::pin(sem.acquire(3)); // Start polling both futures, which means both are waiters assert!(sem_fut2.as_mut().poll(cx).is_pending()); assert!(sem_fut3.as_mut().poll(cx).is_pending()); // Release - semaphore should be available again. fut2 should have been notified drop(guard1); assert_eq!(count, 1); // Acquire fut3 in between. This should succeed let guard3 = match sem_fut3.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired"), Poll::Ready(guard) => guard }; // Now fut2 can't use it's notification and is still pending assert!(sem_fut2.as_mut().poll(cx).is_pending()); // When we drop fut3, the mutex should signal that it's available for fut2, // which needs to have re-registered drop(guard3); assert_eq!(count, 2); match sem_fut2.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired"), Poll::Ready(_guard) => {}, }; } #[test] fn new_waiters_on_fair_semaphore_cant_acquire_future_while_one_task_is_notified() { let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let sem = $semaphore_type::new(true, 3); // Acquire the semaphore let guard1 = sem.try_acquire(3).unwrap(); // The second and third acquire attempt must fail let mut sem_fut2 = Box::pin(sem.acquire(3)); let mut sem_fut3 = Box::pin(sem.acquire(3)); assert!(sem_fut2.as_mut().poll(cx).is_pending()); // Release - semaphore should be available again. fut2 should have been notified drop(guard1); assert_eq!(count, 1); // Try to acquire fut3 in between. This should fail assert!(sem_fut3.as_mut().poll(cx).is_pending()); // fut2 should be be able to get acquired match sem_fut2.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired"), Poll::Ready(_guard) => {}, }; // Now fut3 should have been signaled and should be able to get acquired assert_eq!(count, 2); match sem_fut3.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired"), Poll::Ready(_guard) => {}, }; } #[test] fn waiters_on_fair_semaphore_cant_acquire_future_through_repolling_if_one_task_is_notified() { let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let sem = $semaphore_type::new(true, 3); // Acquire the semaphore let guard1 = sem.try_acquire(3).unwrap(); // The second and third acquire attempt must fail let mut sem_fut2 = Box::pin(sem.acquire(3)); let mut sem_fut3 = Box::pin(sem.acquire(3)); assert!(sem_fut2.as_mut().poll(cx).is_pending()); assert!(sem_fut3.as_mut().poll(cx).is_pending()); // Release - semaphore should be available again. fut2 should have been notified drop(guard1); assert_eq!(count, 1); // Acquire fut3 in between. This should fail, since fut2 should get the permits first assert!(sem_fut3.as_mut().poll(cx).is_pending()); // fut2 should be acquired match sem_fut2.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired"), Poll::Ready(_guard) => {}, }; // Now fut3 should be able to get acquired assert_eq!(count, 2); match sem_fut3.as_mut().poll(cx) { Poll::Pending => panic!("Expect semaphore to get acquired"), Poll::Ready(_guard) => {}, }; } #[test] fn poll_from_multiple_executors() { for is_fair in &[true, false] { let (waker_1, count_1) = new_count_waker(); let (waker_2, count_2) = new_count_waker(); let sem = $semaphore_type::new(*is_fair, 3); // Acquire the semaphore let guard = sem.try_acquire(3).unwrap(); let fut = sem.acquire(1); pin_mut!(fut); let cx_1 = &mut Context::from_waker(&waker_1); let cx_2 = &mut Context::from_waker(&waker_2); assert!(fut.as_mut().poll(cx_1).is_pending()); assert!(fut.as_mut().poll(cx_2).is_pending()); drop(guard); assert_eq!(count_1, 0); assert_eq!(count_2, 1); assert!(fut.as_mut().poll(cx_2).is_ready()); assert!(fut.as_mut().is_terminated()); } } } } } gen_semaphore_tests!(local_semaphore_tests, LocalSemaphore); #[cfg(feature = "std")] mod if_std { use super::*; use futures::FutureExt; use futures_intrusive::sync::{Semaphore, SharedSemaphore}; gen_semaphore_tests!(semaphore_tests, Semaphore); gen_semaphore_tests!(shared_semaphore_tests, SharedSemaphore); fn is_send(_: &T) {} fn is_send_value(_: T) {} fn is_sync(_: &T) {} #[test] fn semaphore_futures_are_send() { let sem = Semaphore::new(true, 3); is_sync(&sem); { let wait_fut = sem.acquire(3); is_send(&wait_fut); pin_mut!(wait_fut); is_send(&wait_fut); let waker = &panic_waker(); let cx = &mut Context::from_waker(&waker); pin_mut!(wait_fut); let res = wait_fut.poll_unpin(cx); let releaser = match res { Poll::Ready(v) => v, Poll::Pending => panic!("Expected to be ready"), }; is_send(&releaser); is_send_value(releaser); } is_send_value(sem); } } futures-intrusive-0.5.0/tests/state_broadcast_channel.rs000064400000000000000000000432060072674642500217500ustar 00000000000000use futures::future::{FusedFuture, Future}; use futures::task::{Context, Poll}; use futures_intrusive::channel::{ ChannelSendError, LocalStateBroadcastChannel, StateId, }; use futures_test::task::{new_count_waker, panic_waker}; use pin_utils::pin_mut; macro_rules! gen_state_broadcast_tests { ($mod_name:ident, $channel_type:ident) => { mod $mod_name { use super::*; type ChannelType = $channel_type; fn assert_send(channel: &ChannelType, value: i32) { assert_eq!(Ok(()), channel.send(value)); } fn assert_receive_value( cx: &mut Context, receive_fut: &mut core::pin::Pin<&mut FutureType>, expected: T, ) -> StateId where FutureType: Future> + FusedFuture, T: PartialEq + core::fmt::Debug, { let id = match receive_fut.as_mut().poll(cx) { Poll::Pending => panic!("future is not ready"), Poll::Ready(None) => panic!("channel is closed"), Poll::Ready(Some((id, val))) => { if val != expected { panic!("Unexpected value {:?}", val); } id } }; assert!(receive_fut.as_mut().is_terminated()); id } fn assert_receive_closed( cx: &mut Context, receive_fut: &mut core::pin::Pin<&mut FutureType>, ) where FutureType: Future> + FusedFuture, T: PartialEq + core::fmt::Debug, { match receive_fut.as_mut().poll(cx) { Poll::Pending => panic!("future is not ready"), Poll::Ready(None) => {} Poll::Ready(Some(_)) => panic!("future has a value"), }; assert!(receive_fut.as_mut().is_terminated()); } macro_rules! assert_receive { ($cx:ident, $channel:expr, $expected: expr, $state_id: expr) => {{ let receive_fut = $channel.receive($state_id); pin_mut!(receive_fut); assert!(!receive_fut.as_mut().is_terminated()); assert_receive_value($cx, &mut receive_fut, $expected) }}; } #[test] fn close_status() { let channel = ChannelType::new(); assert!(channel.close().is_newly_closed()); assert!(channel.close().is_already_closed()); assert!(channel.close().is_already_closed()); assert!(channel.close().is_already_closed()); } #[test] fn send_on_closed_channel() { let channel = ChannelType::new(); assert!(channel.close().is_newly_closed()); assert_eq!(Err(ChannelSendError(5)), channel.send(5)); } #[test] fn close_unblocks_receive() { let channel = ChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let fut = channel.receive(Default::default()); pin_mut!(fut); assert!(fut.as_mut().poll(cx).is_pending()); let fut2 = channel.receive(Default::default()); pin_mut!(fut2); assert!(fut2.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); assert!(channel.close().is_newly_closed()); assert_eq!(count, 2); assert_receive_closed(cx, &mut fut); assert_receive_closed(cx, &mut fut2); } #[test] fn receive_after_send() { let channel = ChannelType::new(); let waker = &panic_waker(); let cx = &mut Context::from_waker(&waker); let mut state_id = StateId::new(); assert_send(&channel, 1); assert_send(&channel, 2); assert_receive!(cx, &channel, 2, state_id); state_id = assert_receive!(cx, &channel, 2, state_id); assert_send(&channel, 5); assert_send(&channel, 6); assert_send(&channel, 7); assert!(channel.close().is_newly_closed()); assert_receive!(cx, &channel, 7, state_id); assert_receive!(cx, &channel, 7, state_id); state_id = assert_receive!(cx, &channel, 7, state_id); let receive_fut = channel.receive(state_id); pin_mut!(receive_fut); assert_receive_closed(cx, &mut receive_fut); } #[test] fn try_receive() { let channel = ChannelType::new(); let state_id = StateId::new(); assert!(channel.try_receive(state_id).is_none()); assert_send(&channel, 0); let (state_id, _) = channel.try_receive(state_id).unwrap(); assert!(channel.try_receive(state_id).is_none()); } #[test] fn send_unblocks_receive() { let channel = ChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let fut = channel.receive(Default::default()); pin_mut!(fut); assert!(fut.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); let fut2 = channel.receive(Default::default()); pin_mut!(fut2); assert!(fut2.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); assert_send(&channel, 99); assert_eq!(count, 2); let next_state_id = assert_receive_value(cx, &mut fut, 99); assert_eq!(next_state_id, assert_receive_value(cx, &mut fut2, 99)); } #[test] fn get_increasing_state_id() { let channel = ChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let state_id_0 = StateId::new(); let fut01 = channel.receive(state_id_0); let fut02 = channel.receive(state_id_0); pin_mut!(fut01, fut02); assert!(fut01.as_mut().poll(cx).is_pending()); assert!(fut02.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); assert_send(&channel, 99); let state_id_1 = assert_receive_value(cx, &mut fut01, 99); assert_eq!(state_id_1, assert_receive_value(cx, &mut fut02, 99)); assert!(state_id_1 != state_id_0); let fut11 = channel.receive(state_id_1); let fut12 = channel.receive(state_id_1); pin_mut!(fut11, fut12); assert!(fut11.as_mut().poll(cx).is_pending()); assert!(fut12.as_mut().poll(cx).is_pending()); assert_eq!(count, 2); assert_send(&channel, 100); let state_id_2 = assert_receive_value(cx, &mut fut11, 100); assert_eq!(state_id_2, assert_receive_value(cx, &mut fut12, 100)); assert!(state_id_2 != state_id_1); let fut21 = channel.receive(state_id_2); let fut22 = channel.receive(state_id_2); pin_mut!(fut21, fut22); assert!(fut21.as_mut().poll(cx).is_pending()); assert!(fut22.as_mut().poll(cx).is_pending()); assert_eq!(count, 4); assert_send(&channel, 101); let state_id_3 = assert_receive_value(cx, &mut fut21, 101); assert_eq!(state_id_3, assert_receive_value(cx, &mut fut22, 101)); assert!(state_id_3 != state_id_2); let fut31 = channel.receive(state_id_3); pin_mut!(fut31); assert!(fut31.as_mut().poll(cx).is_pending()); assert!(channel.close().is_newly_closed()); assert_receive_closed(cx, &mut fut31); } #[test] fn get_same_element_for_same_state_id() { let channel = ChannelType::new(); let (waker, _count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let state_id = StateId::new(); assert_send(&channel, 1); let receive_fut = channel.receive(state_id); pin_mut!(receive_fut); let (state_id_21, val) = match receive_fut.as_mut().poll(cx) { Poll::Ready(Some(res)) => res, _ => panic!("future is not ready or closed"), }; assert_eq!(1, val); assert!(state_id != state_id_21); let receive_fut_2 = channel.receive(state_id); pin_mut!(receive_fut_2); let (state_id_22, val) = match receive_fut_2.as_mut().poll(cx) { Poll::Ready(Some(res)) => res, _ => panic!("future is not ready or closed"), }; assert_eq!(1, val); assert!(state_id != state_id_22); assert_eq!(state_id_21, state_id_22); } #[test] fn cancel_receive_mid_wait() { let channel = ChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); { let mut poll1 = Box::pin(channel.receive(Default::default())); let mut poll2 = Box::pin(channel.receive(Default::default())); let mut poll3 = Box::pin(channel.receive(Default::default())); let mut poll4 = Box::pin(channel.receive(Default::default())); let mut poll5 = Box::pin(channel.receive(Default::default())); assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll2.as_mut().poll(cx).is_pending()); assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll4.as_mut().poll(cx).is_pending()); assert!(poll5.as_mut().poll(cx).is_pending()); assert!(!poll1.is_terminated()); assert!(!poll2.is_terminated()); assert!(!poll3.is_terminated()); assert!(!poll4.is_terminated()); assert!(!poll5.is_terminated()); // Cancel 2 futures. Only the remaining ones should get completed drop(poll2); drop(poll4); assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll5.as_mut().poll(cx).is_pending()); assert_send(&channel, 1); assert_eq!(count, 3); assert_receive_value(cx, &mut poll1.as_mut(), 1); assert_receive_value(cx, &mut poll3.as_mut(), 1); assert_receive_value(cx, &mut poll5.as_mut(), 1); } } #[test] fn cancel_receive_end_wait() { let channel = ChannelType::new(); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let poll1 = channel.receive(Default::default()); let poll2 = channel.receive(Default::default()); let poll3 = channel.receive(Default::default()); let poll4 = channel.receive(Default::default()); pin_mut!(poll1); pin_mut!(poll2); pin_mut!(poll3); pin_mut!(poll4); assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll2.as_mut().poll(cx).is_pending()); // Start polling some wait handles which get cancelled // before new ones are attached { let poll5 = channel.receive(Default::default()); let poll6 = channel.receive(Default::default()); pin_mut!(poll5); pin_mut!(poll6); assert!(poll5.as_mut().poll(cx).is_pending()); assert!(poll6.as_mut().poll(cx).is_pending()); } assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll4.as_mut().poll(cx).is_pending()); assert_send(&channel, 0); assert_send(&channel, 1); assert_send(&channel, 2); assert_receive_value(cx, &mut poll1, 2); assert_receive_value(cx, &mut poll2, 2); assert_receive_value(cx, &mut poll3, 2); assert_send(&channel, 3); assert_receive_value(cx, &mut poll4, 3); assert_eq!(count, 4); } #[test] fn poll_from_multiple_executors() { let (waker_1, count_1) = new_count_waker(); let (waker_2, count_2) = new_count_waker(); let channel = ChannelType::new(); let cx_1 = &mut Context::from_waker(&waker_1); let cx_2 = &mut Context::from_waker(&waker_2); let fut = channel.receive(Default::default()); pin_mut!(fut); assert!(fut.as_mut().poll(cx_1).is_pending()); assert!(fut.as_mut().poll(cx_2).is_pending()); assert_send(&channel, 99); assert_eq!(count_1, 0); assert_eq!(count_2, 1); let _next_state_id = assert_receive_value(cx_2, &mut fut, 99); } } }; } gen_state_broadcast_tests!( local_state_broadcast_channel_tests, LocalStateBroadcastChannel ); #[cfg(feature = "std")] mod if_std { use super::*; use futures_intrusive::channel::{ shared::state_broadcast_channel, StateBroadcastChannel, }; gen_state_broadcast_tests!( state_broadcast_channel_tests, StateBroadcastChannel ); fn is_send(_: &T) {} fn is_send_value(_: T) {} fn is_sync(_: &T) {} #[test] fn channel_futures_are_send() { let channel = StateBroadcastChannel::::new(); is_sync(&channel); { let state_id = StateId::new(); let recv_fut = channel.receive(state_id); is_send(&recv_fut); pin_mut!(recv_fut); is_send(&recv_fut); let send_fut = channel.send(3); is_send(&send_fut); pin_mut!(send_fut); is_send(&send_fut); } is_send_value(channel); } #[test] fn shared_channel_futures_are_send() { let (sender, receiver) = state_broadcast_channel::(); is_sync(&sender); is_sync(&receiver); is_send_value(sender.clone()); is_send_value(receiver.clone()); let state_id = StateId::new(); let recv_fut = receiver.receive(state_id); is_send(&recv_fut); pin_mut!(recv_fut); is_send(&recv_fut); let send_fut = sender.send(3); is_send(&send_fut); pin_mut!(send_fut); is_send(&send_fut); } #[test] fn dropping_shared_channel_senders_closes_channel() { let (waker, _) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let state_id = StateId::new(); let (sender, receiver) = state_broadcast_channel::(); let sender2 = sender.clone(); let receiver2 = receiver.clone(); let fut = receiver.receive(state_id); pin_mut!(fut); assert!(fut.as_mut().poll(cx).is_pending()); let fut2 = receiver2.receive(state_id); pin_mut!(fut2); assert!(fut2.as_mut().poll(cx).is_pending()); drop(sender); assert!(fut.as_mut().poll(cx).is_pending()); assert!(fut2.as_mut().poll(cx).is_pending()); drop(sender2); match fut.as_mut().poll(cx) { Poll::Ready(None) => {} Poll::Ready(Some(_)) => panic!("Expected no value"), Poll::Pending => panic!("Expected channel to be closed"), } match fut2.as_mut().poll(cx) { Poll::Ready(None) => {} Poll::Ready(Some(_)) => panic!("Expected no value"), Poll::Pending => panic!("Expected channel to be closed"), } } #[test] fn dropping_shared_channel_receivers_closes_channel() { let (sender, receiver) = state_broadcast_channel::(); let sender2 = sender.clone(); let receiver2 = receiver.clone(); drop(receiver); assert_eq!(Ok(()), sender.send(5)); assert_eq!(Ok(()), sender2.send(7)); drop(receiver2); assert_eq!(Err(ChannelSendError(5)), sender.send(5)); assert_eq!(Err(ChannelSendError(7)), sender2.send(7)); } #[test] fn try_receive() { let (sender, receiver) = state_broadcast_channel::(); let state_id = StateId::new(); assert!(receiver.try_receive(state_id).is_none()); sender.send(1).unwrap(); let (state_id, _) = receiver.try_receive(state_id).unwrap(); assert!(receiver.try_receive(state_id).is_none()); } } futures-intrusive-0.5.0/tests/timer.rs000064400000000000000000000237620072674642500162430ustar 00000000000000use core::time::Duration; use futures::future::{FusedFuture, Future}; use futures::task::Context; use futures_intrusive::timer::{LocalTimerService, MockClock}; use futures_test::task::{new_count_waker, panic_waker}; use pin_utils::pin_mut; macro_rules! gen_timer_tests { ($mod_name:ident, $timer_type:ident, $timer_trait_type:ident) => { mod $mod_name { use super::*; use futures_intrusive::timer::$timer_trait_type; #[test] fn start_and_expire_timers() { static TEST_CLOCK: MockClock = MockClock::new(); TEST_CLOCK.set_time(200); let timer = $timer_type::new(&TEST_CLOCK); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); assert!(timer.next_expiration().is_none()); let fut = timer.deadline(999); pin_mut!(fut); assert!(fut.as_mut().poll(cx).is_pending()); assert_eq!(Some(999), timer.next_expiration()); let fut2 = timer.delay(Duration::from_millis(300)); pin_mut!(fut2); assert!(fut2.as_mut().poll(cx).is_pending()); assert_eq!(Some(500), timer.next_expiration()); let fut3 = timer.delay(Duration::from_millis(500)); pin_mut!(fut3); assert!(fut3.as_mut().poll(cx).is_pending()); assert_eq!(Some(500), timer.next_expiration()); TEST_CLOCK.set_time(500); timer.check_expirations(); assert_eq!(count, 1); assert!(fut.as_mut().poll(cx).is_pending()); assert!(fut2.as_mut().poll(cx).is_ready()); assert!(fut3.as_mut().poll(cx).is_pending()); assert_eq!(Some(700), timer.next_expiration()); TEST_CLOCK.set_time(699); timer.check_expirations(); assert_eq!(count, 1); TEST_CLOCK.set_time(700); timer.check_expirations(); assert_eq!(count, 2); assert!(fut.as_mut().poll(cx).is_pending()); assert!(fut3.as_mut().poll(cx).is_ready()); assert_eq!(Some(999), timer.next_expiration()); TEST_CLOCK.set_time(1000); timer.check_expirations(); assert_eq!(count, 3); assert!(fut.as_mut().poll(cx).is_ready()); assert_eq!(None, timer.next_expiration()); } #[test] fn immediately_ready_timer() { static TEST_CLOCK: MockClock = MockClock::new(); TEST_CLOCK.set_time(400); let timer = $timer_type::new(&TEST_CLOCK); let waker = &panic_waker(); let cx = &mut Context::from_waker(&waker); let fut = timer.delay(Duration::from_millis(0)); pin_mut!(fut); assert!(fut.as_mut().poll(cx).is_ready()); for ts in 389..=400 { let fut2 = timer.deadline(ts); pin_mut!(fut2); assert!(fut2.as_mut().poll(cx).is_ready()); } } #[test] fn can_use_timer_as_trait_object() { static TEST_CLOCK: MockClock = MockClock::new(); TEST_CLOCK.set_time(340); let timer = $timer_type::new(&TEST_CLOCK); let (waker, _count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let mut inner = |dyn_timer: &dyn $timer_trait_type| { let fut = dyn_timer.delay(Duration::from_millis(10)); pin_mut!(fut); assert!(fut.as_mut().poll(cx).is_pending()); TEST_CLOCK.set_time(350); timer.check_expirations(); assert!(fut.as_mut().poll(cx).is_ready()); }; inner(&timer); } #[test] fn cancel_mid_wait() { static TEST_CLOCK: MockClock = MockClock::new(); TEST_CLOCK.set_time(1300); let timer = $timer_type::new(&TEST_CLOCK); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); { // Cancel a wait in between other waits // In order to arbitrarily drop a non movable future we have to box and pin it let mut poll1 = Box::pin(timer.deadline(1400)); let mut poll2 = Box::pin(timer.deadline(1500)); let mut poll3 = Box::pin(timer.deadline(1600)); let mut poll4 = Box::pin(timer.deadline(1700)); let mut poll5 = Box::pin(timer.deadline(1800)); assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll2.as_mut().poll(cx).is_pending()); assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll4.as_mut().poll(cx).is_pending()); assert!(poll5.as_mut().poll(cx).is_pending()); assert!(!poll1.is_terminated()); assert!(!poll2.is_terminated()); assert!(!poll3.is_terminated()); assert!(!poll4.is_terminated()); assert!(!poll5.is_terminated()); // Cancel 2 futures. Only the remaining ones should get completed drop(poll2); drop(poll4); assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll5.as_mut().poll(cx).is_pending()); assert_eq!(count, 0); TEST_CLOCK.set_time(1800); timer.check_expirations(); assert!(poll1.as_mut().poll(cx).is_ready()); assert!(poll3.as_mut().poll(cx).is_ready()); assert!(poll5.as_mut().poll(cx).is_ready()); assert!(poll1.is_terminated()); assert!(poll3.is_terminated()); assert!(poll5.is_terminated()); } assert_eq!(count, 3); } #[test] fn cancel_end_wait() { static TEST_CLOCK: MockClock = MockClock::new(); TEST_CLOCK.set_time(2300); let timer = $timer_type::new(&TEST_CLOCK); let (waker, count) = new_count_waker(); let cx = &mut Context::from_waker(&waker); let poll1 = timer.deadline(2400); let poll2 = timer.deadline(2500); let poll3 = timer.deadline(2600); let poll4 = timer.deadline(2700); pin_mut!(poll1); pin_mut!(poll2); pin_mut!(poll3); pin_mut!(poll4); assert!(poll1.as_mut().poll(cx).is_pending()); assert!(poll2.as_mut().poll(cx).is_pending()); // Start polling some wait handles which get cancelled // before new ones are attached { let poll5 = timer.deadline(2350); let poll6 = timer.deadline(2650); pin_mut!(poll5); pin_mut!(poll6); assert!(poll5.as_mut().poll(cx).is_pending()); assert!(poll6.as_mut().poll(cx).is_pending()); } assert!(poll3.as_mut().poll(cx).is_pending()); assert!(poll4.as_mut().poll(cx).is_pending()); TEST_CLOCK.set_time(2700); timer.check_expirations(); assert!(poll1.as_mut().poll(cx).is_ready()); assert!(poll2.as_mut().poll(cx).is_ready()); assert!(poll3.as_mut().poll(cx).is_ready()); assert!(poll4.as_mut().poll(cx).is_ready()); assert_eq!(count, 4); } #[test] fn poll_from_multiple_executors() { static TEST_CLOCK: MockClock = MockClock::new(); TEST_CLOCK.set_time(2300); let timer = $timer_type::new(&TEST_CLOCK); let (waker_1, count_1) = new_count_waker(); let (waker_2, count_2) = new_count_waker(); let cx_1 = &mut Context::from_waker(&waker_1); let cx_2 = &mut Context::from_waker(&waker_2); let fut = timer.deadline(2400); pin_mut!(fut); assert!(fut.as_mut().poll(cx_1).is_pending()); assert!(fut.as_mut().poll(cx_2).is_pending()); TEST_CLOCK.set_time(2700); timer.check_expirations(); assert_eq!(count_1, 0); assert_eq!(count_2, 1); assert!(fut.as_mut().poll(cx_2).is_ready()); assert!(fut.as_mut().is_terminated()); } } }; } gen_timer_tests!(local_timer_service_tests, LocalTimerService, LocalTimer); #[cfg(feature = "std")] mod if_std { use super::*; use futures_intrusive::timer::{Timer, TimerService}; gen_timer_tests!(timer_service_tests, TimerService, Timer); fn is_send(_: &T) {} fn is_send_value(_: T) {} fn is_sync(_: &T) {} #[test] fn timer_futures_are_send() { static TEST_CLOCK: MockClock = MockClock::new(); TEST_CLOCK.set_time(2300); let timer = TimerService::new(&TEST_CLOCK); is_sync(&timer); { let deadline = timer.deadline(2400); is_send(&deadline); pin_mut!(deadline); is_send(&deadline); let delay_fut = timer.delay(Duration::from_millis(1000)); is_send(&delay_fut); pin_mut!(delay_fut); is_send(&delay_fut); } is_send_value(timer); } }