atomic_float-1.1.0/.cargo_vcs_info.json0000644000000001360000000000100134740ustar { "git": { "sha1": "691277c8553b6709507d7bd8d6d411e0ec3e7a2b" }, "path_in_vcs": "" }atomic_float-1.1.0/.github/workflows/ci.yml000064400000000000000000000043761046102023000170110ustar 00000000000000name: CI on: pull_request: push: branches: - main env: RUST_BACKTRACE: 1 jobs: test: name: Test Rust ${{ matrix.rust }} on ${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: include: - { rust: 1.60.0, os: ubuntu-latest } - { rust: stable, os: ubuntu-latest } - { rust: stable, os: macos-latest } - { rust: stable, os: windows-latest } - { rust: stable-i686-msvc, os: windows-latest } - { rust: beta, os: ubuntu-latest } - { rust: nightly, os: ubuntu-latest } steps: - uses: actions/checkout@v4 - uses: hecrj/setup-rust-action@v2 with: rust-version: ${{ matrix.rust }} - run: cargo test --verbose --workspace cross-test: name: Test on ${{ matrix.target }} (using cross) runs-on: ubuntu-latest strategy: fail-fast: false matrix: target: - i686-unknown-linux-gnu - armv7-linux-androideabi - aarch64-unknown-linux-gnu - powerpc-unknown-linux-gnu - powerpc64-unknown-linux-gnu steps: - uses: actions/checkout@v4 - uses: hecrj/setup-rust-action@v2 - run: cargo install cross - run: cross test --verbose --target=${{ matrix.target }} check: name: Check warnings runs-on: ubuntu-latest env: RUSTFLAGS: -Dwarnings steps: - uses: actions/checkout@v4 - uses: hecrj/setup-rust-action@v2 - run: cargo check --workspace --all-targets --verbose rustfmt: name: Verify code formatting runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: hecrj/setup-rust-action@v2 with: components: rustfmt - run: cargo fmt --all -- --check codecov-tarpaulin: name: coverage runs-on: ubuntu-latest container: image: xd009642/tarpaulin:develop-nightly options: --security-opt seccomp=unconfined steps: - uses: actions/checkout@v4 - run: cargo tarpaulin --verbose --doc --all-features --all-targets --engine llvm --out xml - uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} atomic_float-1.1.0/.gitignore000064400000000000000000000000231046102023000142470ustar 00000000000000/target Cargo.lock atomic_float-1.1.0/Cargo.toml0000644000000026710000000000100115000ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.60.0" name = "atomic_float" version = "1.1.0" authors = ["Thom Chiovoloni "] build = false autobins = false autoexamples = false autotests = false autobenches = false description = "Floating point types which can be safely shared between threads" homepage = "https://github.com/thomcc/atomic_float" documentation = "https://docs.rs/atomic_float" readme = "README.md" keywords = [ "atomic", "float", "atomicf32", "atomicf32", ] categories = [ "concurrency", "data-structures", "no-std", "rust-patterns", ] license = "Apache-2.0 OR MIT OR Unlicense" repository = "https://github.com/thomcc/atomic_float" [lib] name = "atomic_float" path = "src/lib.rs" [[test]] name = "test" path = "tests/test.rs" [dependencies.serde] version = "1" optional = true default-features = false [dev-dependencies.serde_test] version = "1" default-features = false [features] atomic_f64 = [] default = ["atomic_f64"] atomic_float-1.1.0/Cargo.toml.orig000064400000000000000000000014251046102023000151550ustar 00000000000000[package] name = "atomic_float" version = "1.1.0" authors = ["Thom Chiovoloni "] edition = "2021" rust-version = "1.60.0" license = "Apache-2.0 OR MIT OR Unlicense" readme = "README.md" description = "Floating point types which can be safely shared between threads" keywords = ["atomic", "float", "atomicf32", "atomicf32"] categories = ["concurrency", "data-structures", "no-std", "rust-patterns"] repository = "https://github.com/thomcc/atomic_float" documentation = "https://docs.rs/atomic_float" homepage = "https://github.com/thomcc/atomic_float" [features] default = ["atomic_f64"] atomic_f64 = [] [dependencies] serde = { version = "1", optional = true, default-features = false } [dev-dependencies] serde_test = { version = "1", default-features = false } atomic_float-1.1.0/LICENSE-APACHE000064400000000000000000000251261046102023000142160ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2024 Thom Chiovoloni Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. atomic_float-1.1.0/LICENSE-MIT000064400000000000000000000020431046102023000137170ustar 00000000000000Copyright (c) 2024 Thom Chiovoloni Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. atomic_float-1.1.0/README.md000064400000000000000000000026051046102023000135460ustar 00000000000000# `atomic_float` [![Build Status](https://github.com/thomcc/atomic_float/workflows/CI/badge.svg)](https://github.com/thomcc/atomic_float/actions) [![codecov](https://codecov.io/gh/thomcc/atomic_float/branch/main/graph/badge.svg)](https://codecov.io/gh/thomcc/atomic_float) [![Docs](https://docs.rs/atomic_float/badge.svg)](https://docs.rs/atomic_float) [![Latest Version](https://img.shields.io/crates/v/atomic_float.svg)](https://crates.io/crates/atomic_float) This crate provides `AtomicF32` and `AtomicF64` types that behave almost identically to the integer atomics in the stdlib. ## Usage ```rust use atomic_float::AtomicF32; use core::sync::atomic::Ordering::Relaxed; static A_STATIC: AtomicF32 = AtomicF32::new(800.0); // Should support the full std::sync::atomic::AtomicFoo API A_STATIC.fetch_add(30.0, Relaxed); A_STATIC.fetch_sub(-55.0, Relaxed); // But also supports things that can be implemented // efficiently easily, like sign-bit operations. A_STATIC.fetch_neg(Relaxed); assert_eq!(A_STATIC.load(Relaxed), -885.0); ``` ## License Licensed under either of - Apache License, Version 2.0, ([LICENSE-APACHE](./LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) - MIT license ([LICENSE-MIT](./LICENSE-MIT) or http://opensource.org/licenses/MIT) - Public domain, as explained by the Unlicense ([UNLICENSE](./UNLICENSE) or http://opensource.org/licenses/Unlicense) at your option. atomic_float-1.1.0/UNLICENSE000064400000000000000000000022721046102023000135370ustar 00000000000000This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to atomic_float-1.1.0/src/atomic_f32.rs000064400000000000000000000721231046102023000153540ustar 00000000000000use core::cell::UnsafeCell; use core::sync::atomic::{ AtomicU32, Ordering::{self, *}, }; /// A floating point type which can be safely shared between threads. /// /// This type has the same in-memory representation as the underlying floating /// point type, [`f32`]. /// /// See the module documentation for [core::sync::atomic] for information about /// the portability of various atomics (this one is mostly as portable as /// [`AtomicU32`](core::sync::atomic::AtomicU32), with the caveat that we /// additionally that the platform support 32-bit floats). /// /// # Example /// /// The intended use case is situations like this: /// /// ``` /// # use atomic_float::AtomicF32; /// # use std::sync::atomic::Ordering; /// static DELTA_TIME: AtomicF32 = AtomicF32::new(1.0); /// /// // In some main simulation loop: /// # fn compute_delta_time() -> f32 { 1.0 / 60.0 } /// DELTA_TIME.store(compute_delta_time(), Ordering::Release); /// /// // elsewhere, perhaps on other threads: /// let dt = DELTA_TIME.load(Ordering::Acquire); /// // Use `dt` to compute simulation... /// ``` /// /// As well as any other cases where use of locking would be too substantial of /// overhead. /// /// Note that when used like this (with Acquire and Release orderings), on /// x86_64 this compiles to the same code as you would get from a C++ global, /// (or a Rust `static mut`), while offering full synchronization. /// /// (While caveats exist, the cases where you'd need the total order guaranteed /// by [SeqCst](core::sync::atomic::Ordering::SeqCst) for something like /// [`AtomicF32`] seem very rare). /// /// # Implementation /// /// Note: These details are not part of the stability guarantee of this crate, /// and are subject to change without a semver-breaking change. /// /// Under the hood we use a transparent `UnsafeCell`, and cast the /// `&UnsafeCell` to an [`&AtomicU32`](core::sync::atomic::AtomicU32) in /// order to perform atomic operations. /// /// This means that we have the same ABI and layout as f32, and that some /// operations have a minimal cost (for example: on x86 all operations of /// equivalent or weaker ordering than `Release` stores/`Acquire` loads are /// essentially equivalent to non-atomic f32). /// /// However, operations like [`fetch_add`](AtomicF32::fetch_add) are /// considerably slower than would be the case for integer atomics. #[repr(transparent)] pub struct AtomicF32( // FIXME: Once we can do `f32::from_bits` in const fn, this should be an // `AtomicU32` (or at least `UnsafeCell`). UnsafeCell, ); // SAFETY: We only ever access the underlying data by refcasting to AtomicU32, // which guarantees no data races. unsafe impl Send for AtomicF32 {} unsafe impl Sync for AtomicF32 {} // Static assertions that the layout is identical, we cite these in a safety // comment in `AtomicF32::atom()`. Note that the alignment check is stricter // than we need, as it would still be safe if `AtomicU32` is less strictly- // aligned than our `f32`. Unlike with `AtomicF64`, this is unlikely to occur. const _: [(); core::mem::size_of::()] = [(); core::mem::size_of::>()]; const _: [(); core::mem::align_of::()] = [(); core::mem::align_of::>()]; impl AtomicF32 { /// Initialize a `AtomicF32` from an `f32`. /// /// # Example /// /// Use as a variable /// /// ``` /// # use atomic_float::AtomicF32; /// # use std::sync::atomic::Ordering::Relaxed; /// let x = AtomicF32::new(3.0f32); /// assert_eq!(x.load(Relaxed), 3.0f32); /// ``` /// /// Use as a static: /// /// ``` /// # use atomic_float::AtomicF32; /// # use std::sync::atomic::Ordering::Relaxed; /// static A_STATIC: AtomicF32 = AtomicF32::new(800.0); /// assert_eq!(A_STATIC.load(Relaxed), 800.0); /// ``` #[inline] pub const fn new(float: f32) -> Self { Self(UnsafeCell::new(float)) } /// Returns a mutable reference to the underlying float. /// /// This is safe because the mutable reference guarantees that no other /// threads are concurrently accessing the atomic data. /// /// # Example /// /// ``` /// # use atomic_float::AtomicF32; /// # use std::sync::atomic::Ordering; /// let mut some_float = AtomicF32::new(1.0); /// assert_eq!(*some_float.get_mut(), 1.0); /// *some_float.get_mut() += 1.0; /// assert_eq!(some_float.load(Ordering::SeqCst), 2.0); /// ``` #[inline] pub fn get_mut(&mut self) -> &mut f32 { // SAFETY: the mutable reference guarantees unique ownership. unsafe { &mut *self.0.get() } } /// Consumes the atomic and returns the contained value. /// /// This is safe because passing `self` by value guarantees that no other /// threads are concurrently accessing the atomic data. /// /// # Example /// /// ``` /// # use atomic_float::AtomicF32; /// let v = AtomicF32::new(6.0); /// assert_eq!(v.into_inner(), 6.0f32); /// ``` #[inline] pub fn into_inner(self) -> f32 { self.0.into_inner() } /// Loads a value from the atomic float. /// /// `load` takes an [`Ordering`] argument which describes the memory /// ordering of this operation. Possible values are [`SeqCst`], [`Acquire`] /// and [`Relaxed`]. /// /// # Panics /// /// Panics if `ordering` is [`Release`] or [`AcqRel`]. /// /// [`Ordering`]: core::sync::atomic::Ordering /// [`Relaxed`]: core::sync::atomic::Ordering::Relaxed /// [`Release`]: core::sync::atomic::Ordering::Release /// [`Acquire`]: core::sync::atomic::Ordering::Acquire /// [`AcqRel`]: core::sync::atomic::Ordering::AcqRel /// [`SeqCst`]: core::sync::atomic::Ordering::SeqCst /// /// # Example /// /// ``` /// use atomic_float::AtomicF32; /// use std::sync::atomic::Ordering; /// let v = AtomicF32::new(22.5); /// assert_eq!(v.load(Ordering::SeqCst), 22.5); /// ``` #[inline] pub fn load(&self, ordering: Ordering) -> f32 { f32::from_bits(self.as_atomic_bits().load(ordering)) } /// Store a value into the atomic float. /// /// `store` takes an [`Ordering`] argument which describes the memory /// ordering of this operation. Possible values are [`SeqCst`], [`Release`] /// and [`Relaxed`]. /// /// # Panics /// /// Panics if `ordering` is [`Acquire`] or [`AcqRel`]. /// /// [`Ordering`]: core::sync::atomic::Ordering /// [`Relaxed`]: core::sync::atomic::Ordering::Relaxed /// [`Release`]: core::sync::atomic::Ordering::Release /// [`Acquire`]: core::sync::atomic::Ordering::Acquire /// [`AcqRel`]: core::sync::atomic::Ordering::AcqRel /// [`SeqCst`]: core::sync::atomic::Ordering::SeqCst /// /// # Example /// /// ``` /// use atomic_float::AtomicF32; /// use std::sync::atomic::Ordering; /// let v = AtomicF32::new(22.5); /// assert_eq!(v.load(Ordering::SeqCst), 22.5); /// v.store(30.0, Ordering::SeqCst); /// assert_eq!(v.load(Ordering::SeqCst), 30.0); /// ``` #[inline] pub fn store(&self, value: f32, ordering: Ordering) { self.as_atomic_bits().store(value.to_bits(), ordering); } /// Stores a value into the atomic float, returning the previous value. /// /// `swap` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. All ordering modes are possible. Note that using /// [`Acquire`] makes the store part of this operation [`Relaxed`], and /// using [`Release`] makes the load part [`Relaxed`]. /// /// [`Ordering`]: core::sync::atomic::Ordering /// [`Relaxed`]: core::sync::atomic::Ordering::Relaxed /// [`Release`]: core::sync::atomic::Ordering::Release /// [`Acquire`]: core::sync::atomic::Ordering::Acquire /// /// # Example /// /// ``` /// use atomic_float::AtomicF32; /// use std::sync::atomic::Ordering; /// let v = AtomicF32::new(4.5); /// assert_eq!(v.swap(100.0, Ordering::Relaxed), 4.5); /// assert_eq!(v.load(Ordering::Relaxed), 100.0); /// ``` #[inline] #[cfg(target_has_atomic = "32")] pub fn swap(&self, new_value: f32, ordering: Ordering) -> f32 { f32::from_bits(self.as_atomic_bits().swap(new_value.to_bits(), ordering)) } /// Stores a value into the atomic float if the current value is *bitwise /// identical* to the `current` value. /// /// The return value is always the previous value. If it is equal to /// `current`, then the value was updated. /// /// `compare_and_swap` also takes an [`Ordering`] argument which describes /// the memory ordering of this operation. Notice that even when using /// `AcqRel`, the operation might fail and hence just perform an `Acquire` /// load, but not have `Release` semantics. Using `Acquire` makes the store /// part of this operation `Relaxed` if it happens, and using `Release` /// makes the load part `Relaxed`. /// /// [`Ordering`]: core::sync::atomic::Ordering /// /// # Caveats /// /// As the `current` must be bitwise identical to the previous value, you /// should not get the `current` value using any sort of arithmetic (both /// because of rounding, and to avoid any situation where -0.0 and +0.0 /// would be compared). Additionally, on some platforms (WASM and ASM.js /// currently) LLVM will canonicalize NaNs during loads, which can cause /// unexpected behavior here — typically in the other direction (two values /// being unexpectedly equal). /// /// In practice, typical patterns for CaS tend to avoid these issues, but /// you're encouraged to avoid relying on the behavior of cas-family APIs in /// the face of rounding, signed zero, and NaNs. /// /// # Example /// /// ``` /// # use atomic_float::AtomicF32; /// # use std::sync::atomic::Ordering; /// let v = AtomicF32::new(5.0); /// assert_eq!(v.compare_and_swap(5.0, 10.0, Ordering::Relaxed), 5.0); /// assert_eq!(v.load(Ordering::Relaxed), 10.0); /// /// assert_eq!(v.compare_and_swap(6.0, 12.0, Ordering::Relaxed), 10.0); /// assert_eq!(v.load(Ordering::Relaxed), 10.0); /// ``` #[inline] #[allow(deprecated)] #[cfg(target_has_atomic = "32")] pub fn compare_and_swap(&self, current: f32, new: f32, order: Ordering) -> f32 { f32::from_bits(self.as_atomic_bits().compare_and_swap( current.to_bits(), new.to_bits(), order, )) } /// Stores a value into the atomic float if the current value is the bitwise /// identical as the `current` value. /// /// The return value is a result indicating whether the new value was /// written and containing the previous value. On success this value is /// guaranteed to be bitwise identical to `current`. /// /// `compare_exchange` takes two [`Ordering`] arguments to describe the /// memory ordering of this operation. The first describes the required /// ordering if the operation succeeds while the second describes the /// required ordering when the operation fails. Using `Acquire` as success /// ordering makes the store part of this operation `Relaxed`, and using /// `Release` makes the successful load `Relaxed`. The failure ordering can /// only be `SeqCst`, `Acquire` or `Relaxed` and must be equivalent to or /// weaker than the success ordering. /// /// [`Ordering`]: core::sync::atomic::Ordering /// /// # Notes /// /// Note that in many cases, when `while` loops where the condition contains /// a `compare_exchange` operation are better written to use a /// [`compare_exchange_weak`](AtomicF32::compare_exchange_weak) in the /// condition instead (as on weakly ordered platforms like ARM, the /// `compare_exchange` operation itself can require a loop to perform). /// /// ## Caveats /// /// As the `current` parameter must be bitwise identical to the previous /// value, you should not get the `current` value using any sort of /// arithmetic (both because of rounding, and to avoid any situation where /// -0.0 and +0.0 would be compared). Additionally, on Wasm, in some cases /// `NaN` values have been known to cause problems for non-typical usage of /// this API. See [`AtomicF32::as_atomic_bits`] if performing the /// `compare_exchange` on the raw bits of this atomic float would solve an /// issue for you. /// /// # Example /// /// ``` /// # use atomic_float::AtomicF32; /// # use std::sync::atomic::Ordering::*; /// let v = AtomicF32::new(5.0); /// assert_eq!(v.compare_exchange(5.0, 10.0, Acquire, Relaxed), Ok(5.0)); /// assert_eq!(v.load(Relaxed), 10.0); /// /// assert_eq!(v.compare_exchange(6.0, 12.0, SeqCst, Relaxed), Err(10.0)); /// assert_eq!(v.load(Relaxed), 10.0); /// ``` #[inline] #[cfg(target_has_atomic = "32")] pub fn compare_exchange( &self, current: f32, new: f32, success: Ordering, failure: Ordering, ) -> Result { convert_result(self.as_atomic_bits().compare_exchange( current.to_bits(), new.to_bits(), success, failure, )) } /// Stores a value into the atomic integer if the current value is the same /// as the `current` value. /// /// Unlike [`compare_exchange`](Self::compare_exchange), this function is /// allowed to spuriously fail even when the comparison succeeds, which can /// result in more efficient code on some platforms. The return value is a /// result indicating whether the new value was written and containing the /// previous value. /// /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the /// memory ordering of this operation. The first describes the required /// ordering if the operation succeeds while the second describes the /// required ordering when the operation fails. Using `Acquire` as success /// ordering makes the store part of this operation `Relaxed`, and using /// `Release` makes the successful load `Relaxed`. The failure ordering can /// only be `SeqCst`, `Acquire` or `Relaxed` and must be equivalent to or /// weaker than the success ordering. /// /// [`Ordering`]: core::sync::atomic::Ordering /// /// ## Caveats /// /// As the `current` parameter must be bitwise identical to the previous /// value, you should not get the `current` value using any sort of /// arithmetic (both because of rounding, and to avoid any situation where /// -0.0 and +0.0 would be compared). Additionally, on Wasm, in some cases /// `NaN` values have been known to cause problems for non-typical usage of /// this API. See [`AtomicF32::as_atomic_bits`] if performing the /// `compare_exchange` on the raw bits of this atomic float would solve an /// issue for you. /// /// # Example /// /// Note that this sort of CaS loop should generally use [`fetch_update`] /// instead. /// /// [`fetch_update`]: AtomicF32::fetch_update /// /// ``` /// # use atomic_float::AtomicF32; /// # use std::sync::atomic::Ordering::*; /// let v = AtomicF32::new(5.0); /// let mut old = v.load(Relaxed); /// loop { /// let new = old * 2.0; /// match v.compare_exchange_weak(old, new, SeqCst, Relaxed) { /// Ok(_) => break, /// Err(x) => old = x, /// } /// } /// ``` #[inline] #[cfg(target_has_atomic = "32")] pub fn compare_exchange_weak( &self, current: f32, new: f32, success: Ordering, failure: Ordering, ) -> Result { convert_result(self.as_atomic_bits().compare_exchange_weak( current.to_bits(), new.to_bits(), success, failure, )) } /// Fetches the value, and applies a function to it that returns an optional /// new value. Returns a `Result` of `Ok(previous_value)` if the function /// returned `Some(_)`, else `Err(previous_value)`. /// /// Note: This may call the function multiple times if the value has been /// changed from other threads in the meantime, as long as the function /// returns `Some(_)`, but the function will have been applied only once to /// the stored value. /// /// `fetch_update` takes two [`Ordering`] arguments to describe the memory /// ordering of this operation. The first describes the required ordering /// for when the operation finally succeeds while the second describes the /// required ordering for loads. These correspond to the success and failure /// orderings of [`compare_exchange`][AtomicF32::compare_exchange] /// respectively. /// /// Using `Acquire` as success ordering makes the store part of this /// operation `Relaxed`, and using `Release` makes the final successful load /// `Relaxed`. The (failed) load ordering can only be `SeqCst`, `Acquire` or /// `Relaxed` and must be equivalent to or weaker than the success ordering. /// /// [`Ordering`]: core::sync::atomic::Ordering /// /// # Example /// /// ``` /// # use atomic_float::AtomicF32; /// # use std::sync::atomic::Ordering::*; /// let x = AtomicF32::new(7.0); /// /// assert_eq!(x.fetch_update(SeqCst, SeqCst, |_| None), Err(7.0)); /// assert_eq!(x.fetch_update(SeqCst, SeqCst, |x| Some(x + 1.0)), Ok(7.0)); /// assert_eq!(x.fetch_update(SeqCst, SeqCst, |x| Some(x + 1.0)), Ok(8.0)); /// assert_eq!(x.load(SeqCst), 9.0); /// ``` #[inline] #[cfg(target_has_atomic = "32")] pub fn fetch_update( &self, set_order: Ordering, fetch_order: Ordering, mut update: F, ) -> Result where F: FnMut(f32) -> Option, { let res = self .as_atomic_bits() .fetch_update(set_order, fetch_order, |prev| { update(f32::from_bits(prev)).map(f32::to_bits) }); convert_result(res) } /// A (nonstandard) convenience wrapper around [`fetch_update`](Self::fetch_update). /// /// A call like: /// /// ``` /// # const _: &str = stringify!{ /// let res = atom.update_with(order, |f| update f...); /// # }; /// ``` /// /// Is morally equivalent to: /// /// ``` /// # const _: &str = stringify!{ /// let res = atom.fetch_update( /// order, /// failure_order_for(order), /// |f| Some(update f...), /// ).unwrap(); /// # }; /// ``` /// /// Where `failure_order_for` returns the strongest failure order you'd be /// allowed to pass into `fetch_update` given the success order, that is: /// /// ``` /// # const _: &str = stringify!{ /// fn failure_order_for(order: Ordering) -> Ordering { /// Release | Relaxed => Relaxed, /// Acquire | AcqRel => Acquire, /// SeqCst => SeqCst, /// } /// # }; /// ``` #[inline] #[cfg(target_has_atomic = "32")] fn update_with(&self, order: Ordering, mut update: F) -> f32 where F: FnMut(f32) -> f32, { self.fetch_update(order, super::fail_order_for(order), |f| Some(update(f))) .unwrap() } /// Adds to the current value, returning the previous value. /// /// Because this returns the previous value, you may want to call it like: /// `atom.fetch_add(x, order) + x` /// /// # Examples /// ``` /// # use atomic_float::AtomicF32; /// # use std::sync::atomic::Ordering::*; /// let x = AtomicF32::new(7.0); /// /// assert_eq!(x.fetch_add(2.0, Relaxed), 7.0); /// assert_eq!(x.fetch_add(1.0, SeqCst), 9.0); /// assert_eq!(x.fetch_add(-100.0, AcqRel), 10.0); /// ``` #[inline] #[cfg(target_has_atomic = "32")] pub fn fetch_add(&self, val: f32, order: Ordering) -> f32 { self.update_with(order, |f| f + val) } /// Subtract from the current value, returning the previous value. /// /// Because this returns the previous value, you may want to call it like: /// `atom.fetch_sub(x, order) - x` /// /// Note: This operation uses [`fetch_update`](Self::fetch_update) under the hood, and is likely /// to be slower than the equivalent operation for atomic integers. /// /// # Examples /// ``` /// # use atomic_float::AtomicF32; /// # use std::sync::atomic::Ordering::*; /// let x = AtomicF32::new(7.0); /// assert_eq!(x.fetch_sub(2.0, Relaxed), 7.0); /// assert_eq!(x.fetch_sub(-1.0, SeqCst), 5.0); /// assert_eq!(x.fetch_sub(0.5, AcqRel), 6.0); /// ``` #[inline] #[cfg(target_has_atomic = "32")] pub fn fetch_sub(&self, val: f32, order: Ordering) -> f32 { self.update_with(order, |f| f - val) } /// Produce the absolute value of the current value, returning the previous /// value. /// /// # Examples /// ``` /// # use atomic_float::AtomicF32; /// # use std::sync::atomic::Ordering::*; /// let x = AtomicF32::new(-7.0); /// assert_eq!(x.fetch_abs(Relaxed), -7.0); /// assert_eq!(x.fetch_abs(SeqCst), 7.0); /// ``` #[inline] #[cfg(target_has_atomic = "32")] pub fn fetch_abs(&self, order: Ordering) -> f32 { f32::from_bits(self.as_atomic_bits().fetch_and(0x7fff_ffff, order)) } /// Negates the current value, returning the previous value. /// /// As a result of returning the previous value, you may want to invoke it like: /// `-atom.fetch_neg(Relaxed)`. /// /// /// # Examples /// ``` /// # use atomic_float::AtomicF32; /// # use std::sync::atomic::Ordering::*; /// let x = AtomicF32::new(-7.0); /// assert_eq!(x.fetch_neg(Relaxed), -7.0); /// assert_eq!(x.fetch_neg(SeqCst), 7.0); /// assert_eq!(x.fetch_neg(AcqRel), -7.0); /// ``` #[inline] #[cfg(target_has_atomic = "32")] pub fn fetch_neg(&self, order: Ordering) -> f32 { f32::from_bits(self.as_atomic_bits().fetch_xor(0x8000_0000, order)) } /// Minimum with the current value. /// /// Finds the minimum of the current value and the argument `val`, and sets /// the new value to the result. /// /// Returns the previous value. Because of this, you may want to call it /// like: `atom.fetch_min(x, order).min(x)` /// /// Note: This operation uses [`fetch_update`](Self::fetch_update) under the /// hood, and is likely to be slower than the equivalent operation for /// atomic integers. /// /// # Examples /// /// ``` /// # use atomic_float::AtomicF32; /// # use std::sync::atomic::Ordering; /// /// let foo = AtomicF32::new(23.0); /// assert_eq!(foo.fetch_min(42.0, Ordering::Relaxed), 23.0); /// assert_eq!(foo.load(Ordering::Relaxed), 23.0); /// assert_eq!(foo.fetch_min(22.0, Ordering::Relaxed), 23.0); /// assert_eq!(foo.load(Ordering::Relaxed), 22.0); /// ``` #[inline] #[cfg(target_has_atomic = "32")] pub fn fetch_min(&self, value: f32, order: Ordering) -> f32 { self.update_with(order, |f| f.min(value)) } /// Maximum with the current value. /// /// Finds the maximum of the current value and the argument `val`, and sets /// the new value to the result. /// /// Returns the previous value. Because of this, you may want to call it /// like: `atom.fetch_max(x, order).max(x)` /// /// Note: This operation uses [`fetch_update`](Self::fetch_update) under the /// hood, and is likely to be slower than the equivalent operation for /// atomic integers. /// /// # Examples /// /// ``` /// # use atomic_float::AtomicF32; /// # use std::sync::atomic::Ordering; /// /// let foo = AtomicF32::new(23.0); /// assert_eq!(foo.fetch_max(22.0, Ordering::Relaxed), 23.0); /// assert_eq!(foo.load(Ordering::Relaxed), 23.0); /// assert_eq!(foo.fetch_max(42.0, Ordering::Relaxed), 23.0); /// assert_eq!(foo.load(Ordering::Relaxed), 42.0); /// ``` #[inline] #[cfg(target_has_atomic = "32")] pub fn fetch_max(&self, value: f32, order: Ordering) -> f32 { self.update_with(order, |f| f.max(value)) } /// Returns a reference to an atomic integer which can be used to access the /// atomic float's underlying bits in a thread safe manner. /// /// This is essentially a `transmute::<&Self, &AtomicU32>(self)`, and is /// zero cost. /// /// # Motivation /// /// This is exposed as an escape hatch because of the caveats around the /// `AtomicF32` CaS-family APIs ([`compare_and_swap`], [`compare_exchange`], /// [`compare_exchange_weak`], ...) and the notion of bitwise identicality /// which they require being somewhat problematic for NaNs, especially on /// targets like Wasm (see [rust-lang/rust#73328]). /// /// [`compare_and_swap`]: AtomicU32::compare_and_swap /// [`compare_exchange`]: AtomicU32::compare_exchange /// [`compare_exchange_weak`]: AtomicU32::compare_exchange_weak /// [rust-lang/rust#73328]: https://github.com/rust-lang/rust/issues/73328 /// /// In general despite how bad this might sound, in practice we're fairly /// safe: LLVM almost never optimizes through atomic operations, this /// library is written to try to avoid potential issues from most naive /// usage, and I'm optimistic the situation will clean itself up in the /// short-to-medium-term future. /// /// However, if you need peace of mind, or find yourself in a case where you /// suspect you're hitting this issue, you can access the underlying atomic /// value using this function. /// /// # Examples /// /// ``` /// # use atomic_float::AtomicF32; /// # use std::sync::atomic::Ordering; /// let v = AtomicF32::new(22.5); /// assert_eq!(v.as_atomic_bits().load(Ordering::Relaxed), 22.5f32.to_bits()); /// ``` #[inline] pub fn as_atomic_bits(&self) -> &AtomicU32 { // Safety: All potentially shared reads/writes go through this, and the // static assertions above ensure that AtomicU32 and UnsafeCell are // compatible as pointers. unsafe { &*(&self.0 as *const _ as *const AtomicU32) } } } /// Return a zero-initialized atomic. /// /// # Example /// /// ``` /// # use atomic_float::AtomicF32; /// # use core::sync::atomic::Ordering; /// let x = AtomicF32::default(); /// assert_eq!(x.load(Ordering::SeqCst), 0.0); /// ``` impl Default for AtomicF32 { #[inline] fn default() -> Self { Self::from(0.0) } } /// Equivalent to `::fmt`. /// /// # Example /// /// ``` /// # use atomic_float::AtomicF32; /// # use core::sync::atomic::Ordering; /// let v = AtomicF32::new(40.0); /// assert_eq!(format!("{:?}", v), format!("{:?}", 40.0f32)); /// ``` impl core::fmt::Debug for AtomicF32 { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { self.load(SeqCst).fmt(f) } } /// Equivalent to `AtomicF32::new`. /// /// # Example /// /// ``` /// # use atomic_float::AtomicF32; /// # use core::sync::atomic::Ordering; /// let v = AtomicF32::from(10.0); /// assert_eq!(v.load(Ordering::SeqCst), 10.0); /// ``` impl From for AtomicF32 { #[inline] fn from(f: f32) -> Self { Self::new(f) } } #[cfg(feature = "serde")] /// Serializes the AtomicF32 /// /// The value is loaded with the `Ordering::SeqCst` and then serializes /// it as a normal `f32`. The information about the object being atomic is lost. impl serde::Serialize for AtomicF32 { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, { serializer.serialize_f32(self.load(Ordering::SeqCst)) } } #[cfg(feature = "serde")] /// Deserializes the AtomicF32 /// /// Attempts to deserialize f32 and, if successful, creates a new /// AtomicF32 with this value. impl<'de> serde::Deserialize<'de> for AtomicF32 { fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'de>, { f32::deserialize(deserializer).map(AtomicF32::new) } } #[inline(always)] fn convert_result(r: Result) -> Result { r.map(f32::from_bits).map_err(f32::from_bits) } // XXX: This is dubious since the actual atomic types don't implement this, but // I need it to use `serde_test`, so I might as well add it. /// Compare two [`AtomicF32`]s. /// /// ``` /// # use atomic_float::AtomicF32; /// # use std::sync::atomic::Ordering; /// let a = AtomicF32::new(1.0); /// a.fetch_add(1.0, Ordering::Relaxed); /// assert_ne!(a, AtomicF32::new(1.0)); /// assert_eq!(a, AtomicF32::new(2.0)); /// ``` /// /// # Caveats /// Relaxed ordering is used for each load, so additional fencing (or avoiding /// the use of this `PartialEq` implementation) may be desirable. /// /// Additionally, this is implemented in terms of `f32`'s `PartialEq`, so NaNs /// will compare as inequal. For example: /// ``` /// # use atomic_float::AtomicF32; /// let a = AtomicF32::new(f32::NAN); /// assert_ne!(a, a); /// ``` impl PartialEq for AtomicF32 { #[inline] fn eq(&self, o: &AtomicF32) -> bool { self.load(Relaxed) == o.load(Relaxed) } } atomic_float-1.1.0/src/atomic_f64.rs000064400000000000000000000714021046102023000153600ustar 00000000000000use core::cell::UnsafeCell; use core::sync::atomic::{ AtomicU64, Ordering::{self, *}, }; /// A floating point type which can be safely shared between threads. /// /// This type has the same in-memory representation as the underlying floating /// point type, [`f64`]. /// /// See the module documentation for [core::sync::atomic] for information about /// the portability of various atomics (this one is mostly as portable as /// [`AtomicU64`](core::sync::atomic::AtomicU64), with the caveat that we /// additionally require that the platform support 64-bit floats). /// /// # Example /// /// The intended use case is situations like this: /// /// ``` /// # use atomic_float::AtomicF64; /// # use std::sync::atomic::Ordering; /// static DELTA_TIME: AtomicF64 = AtomicF64::new(1.0); /// /// // In some main simulation loop: /// # fn compute_delta_time() -> f64 { 1.0 / 60.0 } /// DELTA_TIME.store(compute_delta_time(), Ordering::Release); /// /// // elsewhere, perhaps on other threads: /// let dt = DELTA_TIME.load(Ordering::Acquire); /// // Use `dt` to compute simulation... /// ``` /// /// As well as any other cases where use of locking would be too substantial of /// overhead. /// /// Note that when used like this (with Acquire and Release orderings), on /// x86_64 this compiles to the same code as you would get from a C++ global, /// (or a Rust `static mut`), while offering full synchronization. /// /// (While caveats exist, the cases where you'd need the total order guaranteed /// by [SeqCst](core::sync::atomic::Ordering::SeqCst) for something like /// [`AtomicF64`] seem very rare). /// /// # Implementation /// /// Note: These details are not part of the stability guarantee of this crate, /// and are subject to change without a semver-breaking change. /// /// Under the hood we use a transparent `UnsafeCell`, and cast the /// `&UnsafeCell` to an [`&AtomicU64`](core::sync::atomic::AtomicU64) in /// order to perform atomic operations. /// /// However, operations like [`fetch_add`](AtomicF64::fetch_add) are /// considerably slower than would be the case for integer atomics. #[cfg_attr(target_arch = "x86", repr(C, align(8)))] #[cfg_attr(not(target_arch = "x86"), repr(transparent))] pub struct AtomicF64( // FIXME: Once we can do `f32::from_bits` in const fn, this should be an // `AtomicU64` (or at least `UnsafeCell`). UnsafeCell, ); // SAFETY: We only ever access the underlying data by refcasting to AtomicU64, // which guarantees no data races. unsafe impl Send for AtomicF64 {} unsafe impl Sync for AtomicF64 {} // Static assertions that the layout is identical, we cite these in a safety // comment in `AtomicF64::atom()`. This is possible on some targets (like 32-bit // x86, which has different alignments for `AtomicU64` and `u64` for example), // so please file a bug for your target if you hit these. const _: [(); core::mem::size_of::()] = [(); core::mem::size_of::()]; const _: [(); 1] = [(); (core::mem::align_of::() >= core::mem::align_of::()) as usize]; impl AtomicF64 { /// Initialize a `AtomicF64` from an `f64`. /// /// # Example /// /// Use as a variable /// /// ``` /// # use atomic_float::AtomicF64; /// # use std::sync::atomic::Ordering::Relaxed; /// let x = AtomicF64::new(3.0f64); /// assert_eq!(x.load(Relaxed), 3.0f64); /// ``` /// /// Use as a static: /// /// ``` /// # use atomic_float::AtomicF64; /// # use std::sync::atomic::Ordering::Relaxed; /// static A_STATIC: AtomicF64 = AtomicF64::new(800.0); /// assert_eq!(A_STATIC.load(Relaxed), 800.0); /// ``` #[inline] pub const fn new(float: f64) -> Self { Self(UnsafeCell::new(float)) } /// Returns a mutable reference to the underlying float. /// /// This is safe because the mutable reference guarantees that no other /// threads are concurrently accessing the atomic data. /// /// # Example /// /// ``` /// # use atomic_float::AtomicF64; /// # use std::sync::atomic::Ordering; /// let mut some_float = AtomicF64::new(1.0); /// assert_eq!(*some_float.get_mut(), 1.0); /// *some_float.get_mut() += 1.0; /// assert_eq!(some_float.load(Ordering::SeqCst), 2.0); /// ``` #[inline] pub fn get_mut(&mut self) -> &mut f64 { // SAFETY: the mutable reference guarantees unique ownership. unsafe { &mut *self.0.get() } } /// Consumes the atomic and returns the contained value. /// /// This is safe because passing `self` by value guarantees that no other /// threads are concurrently accessing the atomic data. /// /// # Example /// /// ``` /// # use atomic_float::AtomicF64; /// let v = AtomicF64::new(6.0); /// assert_eq!(v.into_inner(), 6.0f64); /// ``` #[inline] pub fn into_inner(self) -> f64 { self.0.into_inner() } /// Loads a value from the atomic float. /// /// `load` takes an [`Ordering`] argument which describes the memory /// ordering of this operation. Possible values are [`SeqCst`], [`Acquire`] /// and [`Relaxed`]. /// /// # Panics /// /// Panics if `ordering` is [`Release`] or [`AcqRel`]. /// /// [`Ordering`]: core::sync::atomic::Ordering /// [`Relaxed`]: core::sync::atomic::Ordering::Relaxed /// [`Release`]: core::sync::atomic::Ordering::Release /// [`Acquire`]: core::sync::atomic::Ordering::Acquire /// [`AcqRel`]: core::sync::atomic::Ordering::AcqRel /// [`SeqCst`]: core::sync::atomic::Ordering::SeqCst /// /// # Example /// /// ``` /// use atomic_float::AtomicF64; /// use std::sync::atomic::Ordering; /// let v = AtomicF64::new(22.5); /// assert_eq!(v.load(Ordering::SeqCst), 22.5); /// ``` #[inline] pub fn load(&self, ordering: Ordering) -> f64 { f64::from_bits(self.as_atomic_bits().load(ordering)) } /// Store a value into the atomic float. /// /// `store` takes an [`Ordering`] argument which describes the memory /// ordering of this operation. Possible values are [`SeqCst`], [`Release`] /// and [`Relaxed`]. /// /// # Panics /// /// Panics if `ordering` is [`Acquire`] or [`AcqRel`]. /// /// [`Ordering`]: core::sync::atomic::Ordering /// [`Relaxed`]: core::sync::atomic::Ordering::Relaxed /// [`Release`]: core::sync::atomic::Ordering::Release /// [`Acquire`]: core::sync::atomic::Ordering::Acquire /// [`AcqRel`]: core::sync::atomic::Ordering::AcqRel /// [`SeqCst`]: core::sync::atomic::Ordering::SeqCst /// /// # Example /// /// ``` /// use atomic_float::AtomicF64; /// use std::sync::atomic::Ordering; /// let v = AtomicF64::new(22.5); /// assert_eq!(v.load(Ordering::SeqCst), 22.5); /// v.store(30.0, Ordering::SeqCst); /// assert_eq!(v.load(Ordering::SeqCst), 30.0); /// ``` #[inline] pub fn store(&self, value: f64, ordering: Ordering) { self.as_atomic_bits().store(value.to_bits(), ordering); } /// Stores a value into the atomic float, returning the previous value. /// /// `swap` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. All ordering modes are possible. Note that using /// [`Acquire`] makes the store part of this operation [`Relaxed`], and /// using [`Release`] makes the load part [`Relaxed`]. /// /// [`Ordering`]: core::sync::atomic::Ordering /// [`Relaxed`]: core::sync::atomic::Ordering::Relaxed /// [`Release`]: core::sync::atomic::Ordering::Release /// [`Acquire`]: core::sync::atomic::Ordering::Acquire /// /// # Example /// /// ``` /// use atomic_float::AtomicF64; /// use std::sync::atomic::Ordering; /// let v = AtomicF64::new(4.5); /// assert_eq!(v.swap(100.0, Ordering::Relaxed), 4.5); /// assert_eq!(v.load(Ordering::Relaxed), 100.0); /// ``` #[inline] pub fn swap(&self, new_value: f64, ordering: Ordering) -> f64 { f64::from_bits(self.as_atomic_bits().swap(new_value.to_bits(), ordering)) } /// Stores a value into the atomic float if the current value is *bitwise /// identical* to the `current` value. /// /// The return value is always the previous value. If it is equal to /// `current`, then the value was updated. /// /// `compare_and_swap` also takes an [`Ordering`] argument which describes /// the memory ordering of this operation. Notice that even when using /// `AcqRel`, the operation might fail and hence just perform an `Acquire` /// load, but not have `Release` semantics. Using `Acquire` makes the store /// part of this operation `Relaxed` if it happens, and using `Release` /// makes the load part `Relaxed`. /// /// [`Ordering`]: core::sync::atomic::Ordering /// /// # Caveats /// /// As the `current` must be bitwise identical to the previous value, you /// should not get the `current` value using any sort of arithmetic (both /// because of rounding, and to avoid any situation where -0.0 and +0.0 /// would be compared). Additionally, on some platforms (WASM and ASM.js /// currently) LLVM will canonicalize NaNs during loads, which can cause /// unexpected behavior here — typically in the other direction (two values /// being unexpectedly equal). /// /// In practice, typical patterns for CaS tend to avoid these issues, but /// you're encouraged to avoid relying on the behavior of cas-family APIs in /// the face of rounding, signed zero, and NaNs. /// /// # Example /// /// ``` /// # use atomic_float::AtomicF64; /// # use std::sync::atomic::Ordering; /// let v = AtomicF64::new(5.0); /// assert_eq!(v.compare_and_swap(5.0, 10.0, Ordering::Relaxed), 5.0); /// assert_eq!(v.load(Ordering::Relaxed), 10.0); /// /// assert_eq!(v.compare_and_swap(6.0, 12.0, Ordering::Relaxed), 10.0); /// assert_eq!(v.load(Ordering::Relaxed), 10.0); /// ``` #[inline] #[allow(deprecated)] pub fn compare_and_swap(&self, current: f64, new: f64, order: Ordering) -> f64 { f64::from_bits(self.as_atomic_bits().compare_and_swap( current.to_bits(), new.to_bits(), order, )) } /// Stores a value into the atomic float if the current value is the bitwise /// identical as the `current` value. /// /// The return value is a result indicating whether the new value was /// written and containing the previous value. On success this value is /// guaranteed to be bitwise identical to `current`. /// /// `compare_exchange` takes two [`Ordering`] arguments to describe the /// memory ordering of this operation. The first describes the required /// ordering if the operation succeeds while the second describes the /// required ordering when the operation fails. Using `Acquire` as success /// ordering makes the store part of this operation `Relaxed`, and using /// `Release` makes the successful load `Relaxed`. The failure ordering can /// only be `SeqCst`, `Acquire` or `Relaxed` and must be equivalent to or /// weaker than the success ordering. /// /// [`Ordering`]: core::sync::atomic::Ordering /// /// # Notes /// /// Note that in many cases, when `while` loops where the condition contains /// a `compare_exchange` operation are better written to use a /// [`compare_exchange_weak`](AtomicF64::compare_exchange_weak) in the /// condition instead (as on weakly ordered platforms like ARM, the /// `compare_exchange` operation itself can require a loop to perform). /// /// ## Caveats /// /// As the `current` parameter must be bitwise identical to the previous /// value, you should not get the `current` value using any sort of /// arithmetic (both because of rounding, and to avoid any situation where /// -0.0 and +0.0 would be compared). Additionally, on Wasm, in some cases /// `NaN` values have been known to cause problems for non-typical usage of /// this API. See [`AtomicF64::as_atomic_bits`] if performing the /// `compare_exchange` on the raw bits of this atomic float would solve an /// issue for you. /// /// # Example /// /// ``` /// # use atomic_float::AtomicF64; /// # use std::sync::atomic::Ordering::*; /// let v = AtomicF64::new(5.0); /// assert_eq!(v.compare_exchange(5.0, 10.0, Acquire, Relaxed), Ok(5.0)); /// assert_eq!(v.load(Relaxed), 10.0); /// /// assert_eq!(v.compare_exchange(6.0, 12.0, SeqCst, Relaxed), Err(10.0)); /// assert_eq!(v.load(Relaxed), 10.0); /// ``` #[inline] pub fn compare_exchange( &self, current: f64, new: f64, success: Ordering, failure: Ordering, ) -> Result { convert_result(self.as_atomic_bits().compare_exchange( current.to_bits(), new.to_bits(), success, failure, )) } /// Stores a value into the atomic integer if the current value is the same /// as the `current` value. /// /// Unlike [`compare_exchange`](Self::compare_exchange), this function is /// allowed to spuriously fail even when the comparison succeeds, which can /// result in more efficient code on some platforms. The return value is a /// result indicating whether the new value was written and containing the /// previous value. /// /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the /// memory ordering of this operation. The first describes the required /// ordering if the operation succeeds while the second describes the /// required ordering when the operation fails. Using `Acquire` as success /// ordering makes the store part of this operation `Relaxed`, and using /// `Release` makes the successful load `Relaxed`. The failure ordering can /// only be `SeqCst`, `Acquire` or `Relaxed` and must be equivalent to or /// weaker than the success ordering. /// /// [`Ordering`]: core::sync::atomic::Ordering /// /// ## Caveats /// /// As the `current` parameter must be bitwise identical to the previous /// value, you should not get the `current` value using any sort of /// arithmetic (both because of rounding, and to avoid any situation where /// -0.0 and +0.0 would be compared). Additionally, on Wasm, in some cases /// `NaN` values have been known to cause problems for non-typical usage of /// this API. See [`AtomicF64::as_atomic_bits`] if performing the /// `compare_exchange` on the raw bits of this atomic float would solve an /// issue for you. /// /// # Example /// /// Note that this sort of CaS loop should generally use [`fetch_update`] /// instead. /// /// [`fetch_update`]: AtomicF64::fetch_update /// /// ``` /// # use atomic_float::AtomicF64; /// # use std::sync::atomic::Ordering::*; /// let v = AtomicF64::new(5.0); /// let mut old = v.load(Relaxed); /// loop { /// let new = old * 2.0; /// match v.compare_exchange_weak(old, new, SeqCst, Relaxed) { /// Ok(_) => break, /// Err(x) => old = x, /// } /// } /// ``` #[inline] pub fn compare_exchange_weak( &self, current: f64, new: f64, success: Ordering, failure: Ordering, ) -> Result { convert_result(self.as_atomic_bits().compare_exchange_weak( current.to_bits(), new.to_bits(), success, failure, )) } /// Fetches the value, and applies a function to it that returns an optional /// new value. Returns a `Result` of `Ok(previous_value)` if the function /// returned `Some(_)`, else `Err(previous_value)`. /// /// Note: This may call the function multiple times if the value has been /// changed from other threads in the meantime, as long as the function /// returns `Some(_)`, but the function will have been applied only once to /// the stored value. /// /// `fetch_update` takes two [`Ordering`] arguments to describe the memory /// ordering of this operation. The first describes the required ordering /// for when the operation finally succeeds while the second describes the /// required ordering for loads. These correspond to the success and failure /// orderings of [`compare_exchange`][AtomicF64::compare_exchange] /// respectively. /// /// Using `Acquire` as success ordering makes the store part of this /// operation `Relaxed`, and using `Release` makes the final successful load /// `Relaxed`. The (failed) load ordering can only be `SeqCst`, `Acquire` or /// `Relaxed` and must be equivalent to or weaker than the success ordering. /// /// [`Ordering`]: core::sync::atomic::Ordering /// /// # Example /// /// ``` /// # use atomic_float::AtomicF64; /// # use std::sync::atomic::Ordering::*; /// let x = AtomicF64::new(7.0); /// /// assert_eq!(x.fetch_update(SeqCst, SeqCst, |_| None), Err(7.0)); /// assert_eq!(x.fetch_update(SeqCst, SeqCst, |x| Some(x + 1.0)), Ok(7.0)); /// assert_eq!(x.fetch_update(SeqCst, SeqCst, |x| Some(x + 1.0)), Ok(8.0)); /// assert_eq!(x.load(SeqCst), 9.0); /// ``` #[inline] pub fn fetch_update( &self, set_order: Ordering, fetch_order: Ordering, mut update: F, ) -> Result where F: FnMut(f64) -> Option, { let res = self .as_atomic_bits() .fetch_update(set_order, fetch_order, |prev| { update(f64::from_bits(prev)).map(f64::to_bits) }); convert_result(res) } // Not exposing this unless I can come up with a better name... /// A (nonstandard) convenience wrapper around [`fetch_update`](Self::fetch_update). /// /// A call like: /// /// ``` /// # const _: &str = stringify!{ /// let res = atom.update_with(order, |f| update f...); /// # }; /// ``` /// /// Is morally equivalent to: /// /// ``` /// # const _: &str = stringify!{ /// let res = atom.fetch_update( /// order, /// failure_order_for(order), /// |f| Some(update f...), /// ).unwrap(); /// # }; /// ``` /// /// Where `failure_order_for` returns the strongest failure order you'd be /// allowed to pass into `fetch_update` given the success order, that is: /// /// ``` /// # const _: &str = stringify!{ /// fn failure_order_for(order: Ordering) -> Ordering { /// Release | Relaxed => Relaxed, /// Acquire | AcqRel => Acquire, /// SeqCst => SeqCst, /// } /// # }; /// ``` #[inline] fn update_with(&self, order: Ordering, mut update: F) -> f64 where F: FnMut(f64) -> f64, { self.fetch_update(order, super::fail_order_for(order), |f| Some(update(f))) .unwrap() } /// Adds to the current value, returning the previous value. /// /// Because this returns the previous value, you may want to call it like: /// `atom.fetch_add(x, order) + x` /// /// # Examples /// ``` /// # use atomic_float::AtomicF64; /// # use std::sync::atomic::Ordering::*; /// let x = AtomicF64::new(7.0); /// /// assert_eq!(x.fetch_add(2.0, Relaxed), 7.0); /// assert_eq!(x.fetch_add(1.0, SeqCst), 9.0); /// assert_eq!(x.fetch_add(-100.0, AcqRel), 10.0); /// ``` #[inline] pub fn fetch_add(&self, val: f64, order: Ordering) -> f64 { self.update_with(order, |f| f + val) } /// Subtract from the current value, returning the previous value. /// /// Because this returns the previous value, you may want to call it like: /// `atom.fetch_sub(x, order) - x` /// /// Note: This operation uses [`fetch_update`](Self::fetch_update) under the /// hood, and is likely to be slower than the equivalent operation for /// atomic integers. /// /// # Examples /// ``` /// # use atomic_float::AtomicF64; /// # use std::sync::atomic::Ordering::*; /// let x = AtomicF64::new(7.0); /// assert_eq!(x.fetch_sub(2.0, Relaxed), 7.0); /// assert_eq!(x.fetch_sub(-1.0, SeqCst), 5.0); /// assert_eq!(x.fetch_sub(0.5, AcqRel), 6.0); /// ``` #[inline] pub fn fetch_sub(&self, val: f64, order: Ordering) -> f64 { self.update_with(order, |f| f - val) } /// Produce the absolute value of the current value, returning the previous /// value. /// /// Because this returns the previous value, you may want to call it like: /// `atom.fetch_abs(x, order).abs()` /// /// # Examples /// ``` /// # use atomic_float::AtomicF64; /// # use std::sync::atomic::Ordering::*; /// let x = AtomicF64::new(-7.0); /// assert_eq!(x.fetch_abs(Relaxed), -7.0); /// assert_eq!(x.fetch_abs(SeqCst), 7.0); /// ``` #[inline] pub fn fetch_abs(&self, order: Ordering) -> f64 { f64::from_bits( self.as_atomic_bits() .fetch_and(0x7fff_ffff_ffff_ffff, order), ) } /// Negates the current value, returning the previous value. /// /// As a result of returning the previous value, you may want to invoke it like: /// `-atom.fetch_neg(Relaxed)`. /// /// # Examples /// ``` /// # use atomic_float::AtomicF64; /// # use std::sync::atomic::Ordering::*; /// let x = AtomicF64::new(-7.0); /// assert_eq!(x.fetch_neg(Relaxed), -7.0); /// assert_eq!(x.fetch_neg(SeqCst), 7.0); /// assert_eq!(x.fetch_neg(AcqRel), -7.0); /// ``` #[inline] pub fn fetch_neg(&self, order: Ordering) -> f64 { f64::from_bits( self.as_atomic_bits() .fetch_xor(0x8000_0000_0000_0000, order), ) } /// Minimum with the current value. /// /// Finds the minimum of the current value and the argument `val`, and sets /// the new value to the result. /// /// Returns the previous value. Because of this, you may want to call it /// like: `atom.fetch_min(x, order).min(x)` /// /// Note: This operation uses [`fetch_update`](Self::fetch_update) under the /// hood, and is likely to be slower than the equivalent operation for /// atomic integers. /// /// # Examples /// /// ``` /// # use atomic_float::AtomicF64; /// # use std::sync::atomic::Ordering; /// /// let foo = AtomicF64::new(23.0); /// assert_eq!(foo.fetch_min(42.0, Ordering::Relaxed), 23.0); /// assert_eq!(foo.load(Ordering::Relaxed), 23.0); /// assert_eq!(foo.fetch_min(22.0, Ordering::Relaxed), 23.0); /// assert_eq!(foo.load(Ordering::Relaxed), 22.0); /// ``` #[inline] pub fn fetch_min(&self, value: f64, order: Ordering) -> f64 { self.update_with(order, |f| f.min(value)) } /// Maximum with the current value. /// /// Finds the maximum of the current value and the argument `val`, and sets /// the new value to the result. /// /// Returns the previous value. Because of this, you may want to call it /// like: `atom.fetch_max(x, order).max(x)` /// /// Note: This operation uses [`fetch_update`](Self::fetch_update) under the /// hood, and is likely to be slower than the equivalent operation for /// atomic integers. /// /// # Examples /// /// ``` /// # use atomic_float::AtomicF64; /// # use std::sync::atomic::Ordering; /// /// let foo = AtomicF64::new(23.0); /// assert_eq!(foo.fetch_max(22.0, Ordering::Relaxed), 23.0); /// assert_eq!(foo.load(Ordering::Relaxed), 23.0); /// assert_eq!(foo.fetch_max(42.0, Ordering::Relaxed), 23.0); /// assert_eq!(foo.load(Ordering::Relaxed), 42.0); /// ``` #[inline] pub fn fetch_max(&self, value: f64, order: Ordering) -> f64 { self.update_with(order, |f| f.max(value)) } /// Returns a reference to an atomic integer which can be used to access the /// atomic float's underlying bits in a thread safe manner. /// /// This is essentially a `transmute::<&Self, &AtomicU64>(self)`, and is /// zero cost. /// /// # Motivation /// /// This is exposed as an escape hatch because of the caveats around the /// `AtomicF64` CaS-family APIs ([`compare_and_swap`], [`compare_exchange`], /// [`compare_exchange_weak`], ...) and the notion of bitwise identicality /// which they require being somewhat problematic for NaNs, especially on /// targets like Wasm (see [rust-lang/rust#73328]). /// /// [`compare_and_swap`]: AtomicU64::compare_and_swap /// [`compare_exchange`]: AtomicU64::compare_exchange /// [`compare_exchange_weak`]: AtomicU64::compare_exchange_weak /// [rust-lang/rust#73328]: https://github.com/rust-lang/rust/issues/73328 /// /// In general despite how bad this might sound, in practice we're fairly /// safe: LLVM almost never optimizes through atomic operations, this /// library is written to try to avoid potential issues from most naive /// usage, and I'm optimistic the situation will clean itself up in the /// short-to-medium-term future. /// /// However, if you need peace of mind, or find yourself in a case where you /// suspect you're hitting this issue, you can access the underlying atomic /// value using this function. /// /// # Examples /// /// ``` /// # use atomic_float::AtomicF64; /// # use std::sync::atomic::Ordering; /// let v = AtomicF64::new(22.5); /// assert_eq!(v.as_atomic_bits().load(Ordering::Relaxed), 22.5f64.to_bits()); /// ``` #[inline] pub fn as_atomic_bits(&self) -> &AtomicU64 { // Safety: All potentially shared reads/writes go through this, and the // static assertions above ensure that AtomicU64 and UnsafeCell are // compatible as pointers. unsafe { &*(&self.0 as *const _ as *const AtomicU64) } } } /// Return a zero-initialized atomic. /// /// # Example /// /// ``` /// # use atomic_float::AtomicF64; /// # use core::sync::atomic::Ordering; /// let x = AtomicF64::default(); /// assert_eq!(x.load(Ordering::SeqCst), 0.0); /// ``` impl Default for AtomicF64 { #[inline] fn default() -> Self { Self::from(0.0) } } /// Equivalent to `::fmt`. /// /// # Example /// /// ``` /// # use atomic_float::AtomicF64; /// # use core::sync::atomic::Ordering; /// let v = AtomicF64::new(40.0); /// assert_eq!(format!("{:?}", v), format!("{:?}", 40.0)); /// ``` impl core::fmt::Debug for AtomicF64 { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { self.load(SeqCst).fmt(f) } } /// Equivalent to `AtomicF64::new`. /// /// # Example /// /// ``` /// # use atomic_float::AtomicF64; /// # use core::sync::atomic::Ordering; /// let v = AtomicF64::from(10.0); /// assert_eq!(v.load(Ordering::SeqCst), 10.0); /// ``` impl From for AtomicF64 { #[inline] fn from(f: f64) -> Self { Self::new(f) } } #[cfg(feature = "serde")] /// Serializes the AtomicF64 /// /// The value is loaded with the `Ordering::SeqCst` and then serializes /// it as a normal `f64`. The information about the object being atomic is lost. impl serde::Serialize for AtomicF64 { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, { serializer.serialize_f64(self.load(Ordering::SeqCst)) } } #[cfg(feature = "serde")] /// Deserializes the AtomicF64 /// /// Attempts to deserialize f64 and, if successful, creates a new /// AtomicF64 with this value. impl<'de> serde::Deserialize<'de> for AtomicF64 { fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'de>, { f64::deserialize(deserializer).map(AtomicF64::new) } } #[inline(always)] fn convert_result(r: Result) -> Result { r.map(f64::from_bits).map_err(f64::from_bits) } // XXX: This is dubious since the actual atomic types don't implement this, but // I need it to use `serde_test`, so I might as well add it. /// Compare two [`AtomicF64`]s. /// /// ``` /// # use atomic_float::AtomicF64; /// # use std::sync::atomic::Ordering; /// let a = AtomicF64::new(1.0); /// a.fetch_add(1.0, Ordering::Relaxed); /// assert_ne!(a, AtomicF64::new(1.0)); /// assert_eq!(a, AtomicF64::new(2.0)); /// ``` /// /// # Caveats /// Relaxed ordering is used for each load, so additional fencing (or avoiding /// the use of this `PartialEq` implementation) may be desirable. /// /// Additionally, this is implemented in terms of `f64`'s `PartialEq`, so NaNs /// will compare as inequal. For example: /// ``` /// # use atomic_float::AtomicF64; /// let a = AtomicF64::new(f64::NAN); /// assert_ne!(a, a); /// ``` impl PartialEq for AtomicF64 { #[inline] fn eq(&self, o: &AtomicF64) -> bool { self.load(Relaxed) == o.load(Relaxed) } } atomic_float-1.1.0/src/lib.rs000064400000000000000000000064701046102023000141760ustar 00000000000000//! This crate provides [`AtomicF32`] and [`AtomicF64`] types. They're //! implemented on top of `AtomicU32` and `AtomicU64` respectively. //! //! ``` //! # use atomic_float::AtomicF32; //! # use std::sync::atomic::Ordering; //! static DELTA_TIME: AtomicF32 = AtomicF32::new(1.0); //! //! // In some main simulation loop: //! # fn compute_delta_time() -> f32 { 1.0 / 60.0 } //! DELTA_TIME.store(compute_delta_time(), Ordering::Release); //! //! // elsewhere, perhaps on other threads: //! let dt = DELTA_TIME.load(Ordering::Acquire); //! // Use `dt` to compute simulation... //! ``` //! //! # Portability //! //! In general, this library is as portable as [`AtomicU32`]/[`AtomicU64`] //! (fairly portable). See the module documentation for [core::sync::atomic] for //! information about the portability of atomic operations as a whole. //! //! [`AtomicU32`]: core::sync::atomic::AtomicU32 //! [`AtomicU64`]: core::sync::atomic::AtomicU64 //! //! Some architectures do not support 64-bit atomics, so [`AtomicF64`] is not //! available on such architectures. Examples include 32-bit PowerPC, MIPS, and //! Arm M-Profile. //! //! # Potential Use Cases //! //! The motivating cases for this were: //! //! - Tunable parameters loaded from a file that otherwise behaved as global //! constants (still compelling to me). //! //! - Global variables like time deltas (see example above) which would need to //! be threaded through a large amount of code. (Not as compelling). //! //! But really it was another 90% finished project that I had meant to get out //! the door. //! //! # Performance //! //! On x86 and x86_64: basically 0 cost if you pick the right orderings and //! stick to load/store. //! //! On everything else: acceptable if you pick the right orderings. //! //! In general, this depends on your architecture. If you're on x86{,_64}, you //! can get away with a lot of dodgy atomic code. Even `SeqCst` usage won't bite //! you too bad, so long as stores are rare. That said, I'd try to stick to //! `Acquire`/`Release` even on x86. For load/store, this has roughly 0 cost //! compared to write/read to a global variable directly. Also, if you just need //! atomicity, and not any global orderings, feel free to use Relaxed. //! //! (I normally wouldn't give this advice, but you're probably not using //! floating point in a situation where the exact value you get must follow //! absolute rules). //! //! Beyond all of this, we provide a few convenient RMW operations. Ones that //! have to perform actual float operations, such as `fetch_add`/`fetch_sub` //! (but not ones that operate solely on the binary representation, like //! `fetch_abs` or `fetch_neg`) need to perform a CAS loop. That means they're //! much slower than `fetch_add`/`fetch_sub` are for AtomicU32, for example. #![no_std] #![deny(missing_docs)] mod atomic_f32; pub use atomic_f32::AtomicF32; #[cfg(target_has_atomic = "64")] mod atomic_f64; #[cfg(target_has_atomic = "64")] pub use atomic_f64::AtomicF64; use core::sync::atomic::Ordering; #[inline] fn fail_order_for(order: Ordering) -> Ordering { match order { Ordering::Release | Ordering::Relaxed => Ordering::Relaxed, Ordering::Acquire | Ordering::AcqRel => Ordering::Acquire, Ordering::SeqCst => Ordering::SeqCst, o => unreachable!("Unknown ordering: {:?} (file a bug with atomic_float)", o), } } atomic_float-1.1.0/tests/test.rs000064400000000000000000000016311046102023000147540ustar 00000000000000// Note: most of our tests are doctests use atomic_float::AtomicF32; use core::sync::atomic::Ordering::*; #[test] fn readme_test() { static A_STATIC: AtomicF32 = AtomicF32::new(800.0); // Should support the full std::sync::atomic::AtomicFoo API A_STATIC.fetch_add(30.0, Relaxed); A_STATIC.fetch_sub(-55.0, Relaxed); // But also supports things that can be implemented // efficiently easily, like sign-bit operations. A_STATIC.fetch_neg(Relaxed); assert_eq!(A_STATIC.load(Relaxed), -885.0); } #[cfg(feature = "serde")] #[test] fn test_serde_f32() { serde_test::assert_tokens( &atomic_float::AtomicF32::new(1.0), &[serde_test::Token::F32(1.0)], ); } #[cfg(all(feature = "serde", target_has_atomic = "64"))] #[test] fn test_serde_f64() { serde_test::assert_tokens( &atomic_float::AtomicF64::new(1.0), &[serde_test::Token::F64(1.0)], ); }