num-order-1.2.0/.cargo_vcs_info.json0000644000000001360000000000100127440ustar { "git": { "sha1": "b0a5322e5c731f64bfc16fe03356ade92fdb499b" }, "path_in_vcs": "" }num-order-1.2.0/.gitignore000064400000000000000000000000250072674642500135510ustar 00000000000000/target Cargo.lock num-order-1.2.0/CHANGELOG.md000064400000000000000000000025340072674642500134010ustar 00000000000000# Changelog ## [1.2.0] - 2023-08-30 - Bump the version of `num-modular` to prevent a bug. - The hash values of negative infinity and NaN are changed. - The hash values of rationals with a multiple of M127 are changed. > It's not released as a new major version because the previously designed values are not intended. It's set by mistake. ## [1.1.0] - 2023-08-29 Now the crate `num-traits` is an optional dependency, and the `libm` dependency is removed. The dependency version of `num-modular` is updated to v0.6. ## [1.0.4] - 2022-05-23 Bump the version of dependency `num-modular`. ## [1.0.3] - 2022-04-17 Bump the version of dependency `num-modular`. ## [1.0.2] - 2022-04-06 Bump the version of dependency `num-modular`, use `MersenneInt` for more efficient hashing. ## [1.0.1] - 2022-03-31 First public stable version of `num-order`! Numerical consistant order and hash comparison are fully supported for following types: - Unsigned integers: `u8`, `u16`, `u32`, `u64`, `u128` - Signed integers: `i8`, `i16`, `i32`, `i64`, `i128` - Float numbers: `f32`, `f64` - (`num-rational`) Rational numbers: `Ratio`, `Ratio`, `Ratio`, `Ratio`, `Ratio` - (`num-complex`) Complex numbers: `Complex`, `Complex` > v1.0.0 was yanked because `num-rational` is accidentally added as default feature. num-order-1.2.0/Cargo.toml0000644000000027550000000000100107530ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "num-order" version = "1.2.0" description = "Numerically consistent `Eq`, `Ord` and `Hash` implementations for various `num` types (`u32`, `f64`, `num_bigint::BigInt`, etc.)" documentation = "https://docs.rs/num-order" readme = "README.md" keywords = [ "numeric", "comparison", "hash", "order", "equality", ] categories = [ "mathematics", "algorithms", "no-std", ] license = "Apache-2.0" repository = "https://github.com/cmpute/num-order" [package.metadata.docs.rs] features = [ "num-rational", "num-bigint", "num-complex", ] [dependencies.num-bigint] version = "0.4.0" optional = true [dependencies.num-complex] version = "0.4.0" optional = true [dependencies.num-modular] version = "0.6.1" [dependencies.num-rational] version = "0.4.0" optional = true [dependencies.num-traits] version = "0.2.0" optional = true [features] default = [] num-bigint = [ "dep:num-bigint", "num-traits", ] num-rational = [ "dep:num-rational", "num-traits", ] std = [] num-order-1.2.0/Cargo.toml.orig000064400000000000000000000020600072674642500144510ustar 00000000000000[package] name = "num-order" version = "1.2.0" edition = "2018" license = "Apache-2.0" description = "Numerically consistent `Eq`, `Ord` and `Hash` implementations for various `num` types (`u32`, `f64`, `num_bigint::BigInt`, etc.)" repository = "https://github.com/cmpute/num-order" keywords = ["numeric", "comparison", "hash", "order", "equality"] categories = ["mathematics", "algorithms", "no-std"] documentation = "https://docs.rs/num-order" readme = "README.md" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] num-modular = { version = "0.6.1" } num-traits = { version = "0.2.0", optional = true } num-bigint = { version = "0.4.0", optional = true } num-rational = { version = "0.4.0", optional = true } num-complex = { version = "0.4.0", optional = true } [features] default = [] num-bigint = ["dep:num-bigint", "num-traits"] num-rational = ["dep:num-rational", "num-traits"] std = [] [package.metadata.docs.rs] features = ["num-rational", "num-bigint", "num-complex"] num-order-1.2.0/LICENSE000064400000000000000000000254320072674642500125770ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [2022] [Jacob Zhong] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.num-order-1.2.0/README.md000064400000000000000000000017600072674642500130470ustar 00000000000000Numerically consistent `Eq`, `Ord` and `Hash` implementations for various `num` types (`u32`, `f64`, `num_bigint::BigInt`, etc.). # Example ```rust use std::cmp::Ordering; use std::hash::Hasher; use std::collections::hash_map::DefaultHasher; use num_order::{NumOrd, NumHash}; assert!(NumOrd::num_eq(&3u64, &3.0f32)); assert!(NumOrd::num_lt(&-4.7f64, &-4i8)); assert!(!NumOrd::num_ge(&-3i8, &1u16)); // 40_000_000 can be exactly represented in f32, 40_000_001 cannot // 40_000_001 becames 40_000_000.0 in f32 assert_eq!(NumOrd::num_cmp(&40_000_000f32, &40_000_000u32), Ordering::Equal); assert_ne!(NumOrd::num_cmp(&40_000_001f32, &40_000_001u32), Ordering::Equal); assert_eq!(NumOrd::num_partial_cmp(&f32::NAN, &40_000_002u32), None); // same hash values are guaranteed for equal numbers let mut hasher1 = DefaultHasher::new(); 3u64.num_hash(&mut hasher1); let mut hasher2 = DefaultHasher::new(); 3.0f32.num_hash(&mut hasher2); assert_eq!(hasher1.finish(), hasher2.finish()) ``` num-order-1.2.0/src/hash.rs000064400000000000000000000242270072674642500136530ustar 00000000000000//! We use the Mersenne prime 2^127-1 (i128::MAX) as the main modulo, which maximize the space of available hashing slots. //! (The largest Mersenne prime under 2^64 is only 2^61-1, so we use u128 for hashing which is also future proof). //! //! The basic algorithm is similar to what is used in Python (see https://docs.python.org/3.8/library/stdtypes.html#hashing-of-numeric-types), //! specifically if the numerically consistent hash function is denoted as num_hash, then: //! - for an integer n: num_hash(n) = sgn(n) * (|n| % M127) //! - for a rational number n/d (including floating numbers): sgn(n/d) * num_hash(|n|) * (num_hash(|d|)^-1 mod M127) //! - for special values: num_hash(NaN) and num_hash(±∞) are specially chosen such that it won't overlap with normal numbers. use crate::NumHash; use core::hash::{Hash, Hasher}; use num_modular::{FixedMersenneInt, ModularAbs, ModularInteger}; // we use 2^127 - 1 (a Mersenne prime) as modulus type MInt = FixedMersenneInt<127, 1>; const M127: i128 = i128::MAX; const M127U: u128 = M127 as u128; const M127D: u128 = M127U + M127U; const HASH_INF: i128 = i128::MAX; // 2^127 - 1 const HASH_NEGINF: i128 = i128::MIN + 1; // -(2^127 - 1) const HASH_NAN: i128 = i128::MIN; // -2^127 #[cfg(feature = "num-complex")] const PROOT: u128 = i32::MAX as u128; // a Mersenne prime // TODO (v2.0): Use the coefficients of the characteristic polynomial to represent a number. By this way // all algebraic numbers can be represented including complex and quadratic numbers. // Case1: directly hash the i128 and u128 number (mod M127) impl NumHash for i128 { #[inline] fn num_hash(&self, state: &mut H) { const MINP1: i128 = i128::MIN + 1; match *self { i128::MAX | MINP1 => 0i128.hash(state), i128::MIN => (-1i128).hash(state), u => u.hash(state), } } } impl NumHash for u128 { #[inline] fn num_hash(&self, state: &mut H) { match *self { u128::MAX => 1i128.hash(state), M127D => 0i128.hash(state), u if u >= M127U => ((u - M127U) as i128).hash(state), u => (u as i128).hash(state), } } } // Case2: convert other integers to 64 bit integer macro_rules! impl_hash_for_small_int { ($($signed:ty)*) => ($( impl NumHash for $signed { #[inline] fn num_hash(&self, state: &mut H) { (&(*self as i128)).hash(state) // these integers are always smaller than M127 } } )*); } impl_hash_for_small_int! { i8 i16 i32 i64 u8 u16 u32 u64} impl NumHash for usize { #[inline] fn num_hash(&self, state: &mut H) { #[cfg(target_pointer_width = "32")] return (&(*self as u32)).num_hash(state); #[cfg(target_pointer_width = "64")] return (&(*self as u64)).num_hash(state); } } impl NumHash for isize { #[inline] fn num_hash(&self, state: &mut H) { #[cfg(target_pointer_width = "32")] return (&(*self as i32)).num_hash(state); #[cfg(target_pointer_width = "64")] return (&(*self as i64)).num_hash(state); } } #[cfg(feature = "num-bigint")] mod _num_bigint { use super::*; use num_bigint::{BigInt, BigUint}; use num_traits::ToPrimitive; impl NumHash for BigUint { fn num_hash(&self, state: &mut H) { (self % BigUint::from(M127U)).to_i128().unwrap().hash(state) } } impl NumHash for BigInt { fn num_hash(&self, state: &mut H) { (self % BigInt::from(M127)).to_i128().unwrap().hash(state) } } } // Case3: for rational(a, b) including floating numbers, the hash is `hash(a * b^-1 mod M127)` (b > 0) trait FloatHash { // Calculate mantissa * exponent^-1 mod M127 fn fhash(&self) -> i128; } impl FloatHash for f32 { fn fhash(&self) -> i128 { let bits = self.to_bits(); let sign_bit = bits >> 31; let mantissa_bits = bits & 0x7fffff; let mut exponent: i16 = ((bits >> 23) & 0xff) as i16; if exponent == 0xff { // deal with special floats if mantissa_bits != 0 { // nan HASH_NAN } else if sign_bit > 0 { HASH_NEGINF // -inf } else { HASH_INF // inf } } else { // then deal with normal floats let mantissa = if exponent == 0 { mantissa_bits << 1 } else { mantissa_bits | 0x800000 }; exponent -= 0x7f + 23; // calculate hash let mantissa = MInt::new(mantissa as u128, &M127U); // m * 2^e mod M127 = m * 2^(e mod 127) mod M127 let pow = mantissa.convert(1 << exponent.absm(&127)); let v = mantissa * pow; v.residue() as i128 * if sign_bit == 0 { 1 } else { -1 } } } } impl NumHash for f32 { fn num_hash(&self, state: &mut H) { self.fhash().num_hash(state) } } impl FloatHash for f64 { fn fhash(&self) -> i128 { let bits = self.to_bits(); let sign_bit = bits >> 63; let mantissa_bits = bits & 0xfffffffffffff; let mut exponent: i16 = ((bits >> 52) & 0x7ff) as i16; if exponent == 0x7ff { // deal with special floats if mantissa_bits != 0 { // nan HASH_NAN } else if sign_bit > 0 { HASH_NEGINF // -inf } else { HASH_INF // inf } } else { // deal with normal floats let mantissa = if exponent == 0 { mantissa_bits << 1 } else { mantissa_bits | 0x10000000000000 }; // Exponent bias + mantissa shift exponent -= 0x3ff + 52; // calculate hash let mantissa = MInt::new(mantissa as u128, &M127U); // m * 2^e mod M127 = m * 2^(e mod 127) mod M127 let pow = mantissa.convert(1 << exponent.absm(&127)); let v = mantissa * pow; v.residue() as i128 * if sign_bit == 0 { 1 } else { -1 } } } } impl NumHash for f64 { fn num_hash(&self, state: &mut H) { self.fhash().num_hash(state) } } #[cfg(feature = "num-rational")] mod _num_rational { use super::*; use core::ops::Neg; use num_rational::Ratio; macro_rules! impl_hash_for_ratio { ($($int:ty)*) => ($( impl NumHash for Ratio<$int> { fn num_hash(&self, state: &mut H) { let ub = *self.denom() as u128; // denom is always positive in Ratio let binv = if ub != M127U { MInt::new(ub, &M127U).inv().unwrap() } else { // no modular inverse, use INF or NEGINF as the result return if self.numer() > &0 { HASH_INF.num_hash(state) } else { HASH_NEGINF.num_hash(state) } }; let ua = if self.numer() < &0 { (*self.numer() as u128).wrapping_neg() } else { *self.numer() as u128 }; // essentially calculate |self.numer()| let ua = binv.convert(ua); let ab = (ua * binv).residue() as i128; if self.numer() >= &0 { ab.num_hash(state) } else { ab.neg().num_hash(state) } } } )*); } impl_hash_for_ratio!(i8 i16 i32 i64 i128 isize); #[cfg(feature = "num-bigint")] mod _num_bigint { use super::*; use num_bigint::{BigInt, BigUint}; use num_traits::{Signed, ToPrimitive, Zero}; impl NumHash for Ratio { fn num_hash(&self, state: &mut H) { let ub = (self.denom().magnitude() % BigUint::from(M127U)) .to_u128() .unwrap(); let binv = if !ub.is_zero() { MInt::new(ub, &M127U).inv().unwrap() } else { // no modular inverse, use INF or NEGINF as the result return if self.numer().is_negative() { HASH_NEGINF.num_hash(state) } else { HASH_INF.num_hash(state) }; }; let ua = (self.numer().magnitude() % BigUint::from(M127U)) .to_u128() .unwrap(); let ua = binv.convert(ua); let ab = (ua * binv).residue() as i128; if self.numer().is_negative() { ab.neg().num_hash(state) } else { ab.num_hash(state) } } } } } // Case4: for a + b*sqrt(r) where a, b are rational numbers, the hash is // - `hash(a + PROOT^2*b^2*r)` if b > 0 // - `hash(a - PROOT^2*b^2*r)` if b < 0 // The generalized version is that, hash of (a + b*r^(1/k)) will be `hash(a + PROOT^k*b^k*r)` // Some Caveats: // 1. if r = 1, the hash is not consistent with normal integer, but r = 1 is forbidden in QuadraticSurd // 2. a - b*sqrt(r) and a + b*sqrt(-r) has the same hash, which is usually not a problem #[cfg(feature = "num-complex")] mod _num_complex { use super::*; use num_complex::Complex; macro_rules! impl_complex_hash_for_float { ($($float:ty)*) => ($( impl NumHash for Complex<$float> { fn num_hash(&self, state: &mut H) { let a = self.re.fhash(); let b = self.im.fhash(); let bterm = if b >= 0 { let pb = MInt::new(b as u128, &M127U) * PROOT; -((pb * pb).residue() as i128) } else { let pb = MInt::new((-b) as u128, &M127U) * PROOT; (pb * pb).residue() as i128 }; (a + bterm).num_hash(state) } } )*); } impl_complex_hash_for_float!(f32 f64); } num-order-1.2.0/src/lib.rs000064400000000000000000000104100072674642500134630ustar 00000000000000//! //! `num-order` implements numerically consistent [Eq][core::cmp::Eq], [Ord][core::cmp::Ord] and //! [Hash][core::hash::Hash] for various `num` types. //! //! ```rust //! use std::cmp::Ordering; //! use std::hash::Hasher; //! use std::collections::hash_map::DefaultHasher; //! use num_order::NumOrd; //! //! assert!(NumOrd::num_eq(&3u64, &3.0f32)); //! assert!(NumOrd::num_lt(&-4.7f64, &-4i8)); //! assert!(!NumOrd::num_ge(&-3i8, &1u16)); //! //! // 40_000_000 can be exactly represented in f32, 40_000_001 cannot //! // 40_000_001 becames 40_000_000.0 in f32 //! assert_eq!(NumOrd::num_cmp(&40_000_000f32, &40_000_000u32), Ordering::Equal); //! assert_ne!(NumOrd::num_cmp(&40_000_001f32, &40_000_001u32), Ordering::Equal); //! assert_eq!(NumOrd::num_partial_cmp(&f32::NAN, &40_000_002u32), None); //! //! use num_order::NumHash; //! // same hash values are guaranteed for equal numbers //! let mut hasher1 = DefaultHasher::new(); //! 3u64.num_hash(&mut hasher1); //! let mut hasher2 = DefaultHasher::new(); //! 3.0f32.num_hash(&mut hasher2); //! assert_eq!(hasher1.finish(), hasher2.finish()) //! ``` //! //! This crate can serve applications where [float-ord](https://crates.io/crates/float-ord), //! [num-cmp](https://crates.io/crates/num-cmp), [num-ord](https://crates.io/crates/num-ord) are used. //! Meanwhile it also supports hashing and more numeric types (`num-bigint`, etc.). //! //! # Optional Features //! - `std`: enable std library //! - `num-bigint`: Support comparing against and hashing `num-bigint::{BigInt, BigUint}` //! - `num-rational`: Support comparing against and hashing `num-rational::Ratio`, where `I` can be //! `i8`, `i16`, `i32`, `i64`, `i128` and `isize`. `Ratio` is supported when both `num-bigint` //! and `num-rational` is enabled //! - `num-complex`: Support comparing against and hashing `num-complex::{Complex32, Complex64}` //! #![no_std] #[cfg(any(feature = "std", test))] extern crate std; use core::cmp::Ordering; use core::hash::Hasher; /// Consistent comparison among different numeric types. pub trait NumOrd { /// [PartialOrd::partial_cmp] on different numeric types fn num_partial_cmp(&self, other: &Other) -> Option; #[inline] /// [PartialEq::eq] on different numeric types fn num_eq(&self, other: &Other) -> bool { matches!(self.num_partial_cmp(other), Some(Ordering::Equal)) } #[inline] /// [PartialEq::ne] on different numeric types fn num_ne(&self, other: &Other) -> bool { !self.num_eq(other) } #[inline] /// [PartialOrd::lt] on different numeric types fn num_lt(&self, other: &Other) -> bool { matches!(self.num_partial_cmp(other), Some(Ordering::Less)) } #[inline] /// [PartialOrd::le] on different numeric types fn num_le(&self, other: &Other) -> bool { matches!( self.num_partial_cmp(other), Some(Ordering::Equal) | Some(Ordering::Less) ) } #[inline] /// [PartialOrd::gt] on different numeric types fn num_gt(&self, other: &Other) -> bool { matches!(self.num_partial_cmp(other), Some(Ordering::Greater)) } #[inline] /// [PartialOrd::ge] on different numeric types fn num_ge(&self, other: &Other) -> bool { matches!( self.num_partial_cmp(other), Some(Ordering::Equal) | Some(Ordering::Greater) ) } #[inline] /// [Ord::cmp] on different numeric types. It panics if either of the numeric values contains NaN. fn num_cmp(&self, other: &Other) -> Ordering { self.num_partial_cmp(other).unwrap() } } /// Consistent hash implementation among different numeric types. /// /// It's ensured that if `a.num_eq(b)`, then `a` and `b` will result in the same hash. Although the other direction is /// not ensured because it's infeasible, the hash function is still designed to be as sparse as possible. pub trait NumHash { /// Consistent [Hash::hash][core::hash::Hash::hash] on different numeric types. /// /// This function will ensures if `a.num_eq(b)`, then `a.num_hash()` and `b.num_hash()` manipulate the state in the same way. fn num_hash(&self, state: &mut H); } mod hash; mod ord; #[cfg(test)] mod tests; // TODO: support num-irrational::{QuadraticSurd, QuadraticInt} when their API is stablized num-order-1.2.0/src/ord.rs000064400000000000000000001237030072674642500135130ustar 00000000000000use crate::NumOrd; use core::cmp::Ordering; use core::convert::TryFrom; #[cfg(feature = "num-rational")] use num_modular::udouble; // Case0: swap operand, this introduces overhead so only used for non-primitive types #[allow(unused_macros)] macro_rules! impl_ord_by_swap { ($($t1:ty | $t2:ty;)*) => ($( impl NumOrd<$t2> for $t1 { #[inline] fn num_partial_cmp(&self, other: &$t2) -> Option { other.num_partial_cmp(self).map(Ordering::reverse) } } )*); } // Case1: forward to builtin operator for same types macro_rules! impl_ord_equal_types { ($($t:ty)*) => ($( impl NumOrd<$t> for $t { #[inline] fn num_partial_cmp(&self, other: &$t) -> Option { self.partial_cmp(&other) } } )*); } impl_ord_equal_types! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize f32 f64 } // Case2: forward to same types by safe casting macro_rules! impl_ord_by_casting { ($($small:ty => $big:ty;)*) => ($( impl NumOrd<$small> for $big { #[inline] fn num_partial_cmp(&self, other: &$small) -> Option { self.partial_cmp(&<$big>::from(*other)) } } impl NumOrd<$big> for $small { #[inline] fn num_partial_cmp(&self, other: &$big) -> Option { <$big>::from(*self).partial_cmp(other) } } )*); } impl_ord_by_casting! { // uN, uM for N < M u8 => u128; u8 => u64; u8 => u32; u8 => u16; u16 => u128; u16 => u64; u16 => u32; u32 => u128; u32 => u64; u64 => u128; // iN, iM for N > M i8 => i128; i8 => i64; i8 => i32; i8 => i16; i16 => i128; i16 => i64; i16 => i32; i32 => i128; i32 => i64; i64 => i128; // iN, uM for N > M u8 => i128; u8 => i64; u8 => i32; u8 => i16; u16 => i128; u16 => i64; u16 => i32; u32 => i128; u32 => i64; u64 => i128; // fN, fM for N > M f32 => f64; // f32, uM for 24 >= M, since f32 can exactly represent all integers (-2^24,2^24) // f64, uM for 53 >= M, since f64 can exactly represent all integers (-2^53,2^53) u8 => f32; u16 => f32; u8 => f64; u16 => f64; u32 => f64; // f32, iM for 24 >= M // f64, iM for 53 >= M // since iM's range [-2^(M-1),2^(M-1)) includes -2^(M-1), bounds do not change i8 => f32; i16 => f32; i8 => f64; i16 => f64; i32 => f64; } // Case3: trivial logic for comparing signed and unsigned integers macro_rules! impl_ord_between_diff_sign { ($($signed:ty => $unsigned:ty;)*) => ($( impl NumOrd<$signed> for $unsigned { #[inline] fn num_partial_cmp(&self, other: &$signed) -> Option { if other < &0 { Some(Ordering::Greater) } else { self.partial_cmp(&<$unsigned>::try_from(*other).unwrap()) } } } impl NumOrd<$unsigned> for $signed { #[inline] fn num_partial_cmp(&self, other: &$unsigned) -> Option { if self < &0 { Some(Ordering::Less) } else { <$unsigned>::try_from(*self).unwrap().partial_cmp(other) } } } )*); } impl_ord_between_diff_sign! { i8 => u128; i8 => u64; i8 => u32 ; i8 => u16; i8 => u8; i16 => u128; i16 => u64; i16 => u32 ; i16 => u16; i32 => u128; i32 => u64; i32 => u32 ; i64 => u128; i64 => u64; i128 => u128; isize => usize; } // Case4: special handling for comparing float and integer types // Note: if `a` is an integer, `a cmp b` equals to `(a, trunc(b)) cmp (trunc(b), b)` (lexicographically) trait FloatExp { /// Get the exponent of a float number fn e(self) -> i16; } impl FloatExp for f32 { #[inline] fn e(self) -> i16 { (self.to_bits() >> 23 & 0xff) as i16 - (0x7f + 23) } } impl FloatExp for f64 { #[inline] fn e(self) -> i16 { (self.to_bits() >> 52 & 0x7ff) as i16 - (0x3ff + 52) } } macro_rules! impl_ord_between_int_float { ($($float:ty | $int:ty;)*) => ($( impl NumOrd<$float> for $int { #[inline] fn num_partial_cmp(&self, other: &$float) -> Option { if other.is_nan() { None } else if other < &(<$int>::MIN as $float) { // integer min is on binary boundary Some(Ordering::Greater) } else if other >= &(<$int>::MAX as $float) { // integer max is not on binary boundary Some(Ordering::Less) } else if other.e() >= 0 { // the float has no fractional part self.partial_cmp(&(*other as $int)) } else { let trunc = *other as $int; (*self, trunc as $float).partial_cmp(&(trunc, *other)) } } } impl NumOrd<$int> for $float { #[inline] fn num_partial_cmp(&self, other: &$int) -> Option { if self.is_nan() { None } else if self < &(<$int>::MIN as $float) { // integer min is on binary boundary Some(Ordering::Less) } else if self >= &(<$int>::MAX as $float) { // integer max is not on binary boundary Some(Ordering::Greater) } else if self.e() >= 0 { // the float has no fractional part (*self as $int).partial_cmp(other) } else { let trunc = *other as $int; (trunc, *self).partial_cmp(&(*other, trunc as $float)) } } } )*); } impl_ord_between_int_float! { f32|u128; f32|i128; f32|u64; f32|i64; f32|u32; f32|i32; f64|u128; f64|i128; f64|u64; f64|i64; } // Case5: forward size integers to corresponding concrete types macro_rules! impl_ord_with_size_types { ($($t:ty)*) => ($( impl NumOrd<$t> for usize { #[inline] fn num_partial_cmp(&self, other: &$t) -> Option { #[cfg(target_pointer_width = "32")] { (*self as u32).num_partial_cmp(other) } #[cfg(target_pointer_width = "64")] { (*self as u64).num_partial_cmp(other) } } } impl NumOrd for $t { #[inline] fn num_partial_cmp(&self, other: &usize) -> Option { #[cfg(target_pointer_width = "32")] { self.num_partial_cmp(&(*other as u32)) } #[cfg(target_pointer_width = "64")] { self.num_partial_cmp(&(*other as u64)) } } } impl NumOrd<$t> for isize { #[inline] fn num_partial_cmp(&self, other: &$t) -> Option { #[cfg(target_pointer_width = "32")] { (*self as i32).num_partial_cmp(other) } #[cfg(target_pointer_width = "64")] { (*self as i64).num_partial_cmp(other) } } } impl NumOrd for $t { #[inline] fn num_partial_cmp(&self, other: &isize) -> Option { #[cfg(target_pointer_width = "32")] { self.num_partial_cmp(&(*other as i32)) } #[cfg(target_pointer_width = "64")] { self.num_partial_cmp(&(*other as i64)) } } } )*); } impl_ord_with_size_types!(u8 u16 u32 u64 u128 i8 i16 i32 i64 i128 f32 f64); // Case6: separate handling for special types #[cfg(feature = "num-bigint")] mod _num_bigint { use super::*; use num_bigint::{BigInt, BigUint}; use num_traits::{FromPrimitive, Signed}; impl_ord_equal_types!(BigInt BigUint); impl_ord_by_casting! { u8 => BigUint; u16 => BigUint; u32 => BigUint; u64 => BigUint; u128 => BigUint; i8 => BigInt; i16 => BigInt; i32 => BigInt; i64 => BigInt; i128 => BigInt; u8 => BigInt; u16 => BigInt; u32 => BigInt; u64 => BigInt; u128 => BigInt; } impl_ord_between_diff_sign! { i8 => BigUint; i16 => BigUint; i32 => BigUint; i64 => BigUint; i128 => BigUint; } impl_ord_with_size_types! (BigInt BigUint); // specialized implementations impl NumOrd for BigUint { #[inline] fn num_partial_cmp(&self, other: &f32) -> Option { if other.is_nan() { None } else if other < &0. { Some(Ordering::Greater) } else if other.is_infinite() && other.is_sign_positive() { Some(Ordering::Less) } else { let trunc = other.trunc(); (self, &trunc).partial_cmp(&(&BigUint::from_f32(trunc).unwrap(), other)) } } } impl NumOrd for BigUint { #[inline] fn num_partial_cmp(&self, other: &f64) -> Option { if other.is_nan() { None } else if other < &0. { Some(Ordering::Greater) } else if other.is_infinite() && other.is_sign_positive() { Some(Ordering::Less) } else { let trunc = other.trunc(); (self, &trunc).partial_cmp(&(&BigUint::from_f64(trunc).unwrap(), other)) } } } impl NumOrd for BigInt { #[inline] fn num_partial_cmp(&self, other: &f32) -> Option { if other.is_nan() { None } else if other.is_infinite() { if other.is_sign_positive() { Some(Ordering::Less) } else { Some(Ordering::Greater) } } else { let trunc = other.trunc(); (self, &trunc).partial_cmp(&(&BigInt::from_f32(trunc).unwrap(), other)) } } } impl NumOrd for BigInt { #[inline] fn num_partial_cmp(&self, other: &f64) -> Option { if other.is_nan() { None } else if other.is_infinite() { if other.is_sign_positive() { Some(Ordering::Less) } else { Some(Ordering::Greater) } } else { let trunc = other.trunc(); (self, &trunc).partial_cmp(&(&BigInt::from_f64(trunc).unwrap(), other)) } } } impl NumOrd for BigUint { #[inline] fn num_partial_cmp(&self, other: &BigInt) -> Option { if other.is_negative() { Some(Ordering::Greater) } else { self.partial_cmp(other.magnitude()) } } } impl_ord_by_swap! { f32|BigInt; f32|BigUint; f64|BigInt; f64|BigUint; BigInt|BigUint; } } // FIXME: Implementations for templated numeric types are directly specialized, because there is no // negative impl or specialization support yet in rust. We could have a generalized way to implement // the comparsion if the specialization is supported. #[cfg(feature = "num-rational")] mod _num_rational { use super::*; use num_rational::Ratio; use num_traits::{float::FloatCore, CheckedMul, Signed, Zero}; impl_ord_equal_types!( Ratio Ratio Ratio Ratio Ratio Ratio Ratio Ratio Ratio Ratio Ratio Ratio ); macro_rules! impl_ratio_ord_with_int { ($($t:ty)*) => ($( impl NumOrd> for $t { #[inline] fn num_partial_cmp(&self, other: &Ratio<$t>) -> Option { (self * other.denom()).partial_cmp(other.numer()) } } impl NumOrd<$t> for Ratio<$t> { #[inline] fn num_partial_cmp(&self, other: &$t) -> Option { self.numer().partial_cmp(&(other * self.denom())) } } )*); } impl_ratio_ord_with_int!(i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize); macro_rules! impl_ratio_ord_by_casting { ($($small:ty => $big:ty;)*) => ($( // between ratios impl NumOrd> for Ratio<$big> { #[inline] fn num_partial_cmp(&self, other: &Ratio<$small>) -> Option { let r = Ratio::new(<$big>::from(*other.numer()), <$big>::from(*other.denom())); self.partial_cmp(&r) } } impl NumOrd> for Ratio<$small> { #[inline] fn num_partial_cmp(&self, other: &Ratio<$big>) -> Option { let r = Ratio::new(<$big>::from(*self.numer()), <$big>::from(*self.denom())); r.partial_cmp(other) } } // between ratio and ints impl NumOrd<$small> for Ratio<$big> { #[inline] fn num_partial_cmp(&self, other: &$small) -> Option { if let Some(prod) = self.denom().checked_mul(&<$big>::from(*other)) { self.numer().partial_cmp(&prod) } else { Some(Ordering::Less) } } } impl NumOrd> for $small { #[inline] fn num_partial_cmp(&self, other: &Ratio<$big>) -> Option { if let Some(prod) = other.denom().checked_mul(&<$big>::from(*self)) { prod.partial_cmp(other.numer()) } else { Some(Ordering::Greater) } } } impl NumOrd<$big> for Ratio<$small> { #[inline] fn num_partial_cmp(&self, other: &$big) -> Option { if let Some(prod) = other.checked_mul(&<$big>::from(*self.denom())) { <$big>::from(*self.numer()).partial_cmp(&prod) } else { Some(Ordering::Less) } } } impl NumOrd> for $big { #[inline] fn num_partial_cmp(&self, other: &Ratio<$small>) -> Option { if let Some(prod) = self.checked_mul(&<$big>::from(*other.denom())) { prod.partial_cmp(&<$big>::from(*other.numer())) } else { Some(Ordering::Greater) } } } )*); } impl_ratio_ord_by_casting! { // uN, uM for N < M u8 => u128; u8 => u64; u8 => u32; u8 => u16; u16 => u128; u16 => u64; u16 => u32; u32 => u128; u32 => u64; u64 => u128; // iN, iM for N > M i8 => i128; i8 => i64; i8 => i32; i8 => i16; i16 => i128; i16 => i64; i16 => i32; i32 => i128; i32 => i64; i64 => i128; // iN, uM for N > M u8 => i128; u8 => i64; u8 => i32; u8 => i16; u16 => i128; u16 => i64; u16 => i32; u32 => i128; u32 => i64; u64 => i128; } // cast unsigned integers for comparison macro_rules! impl_ratio_ord_between_diff_sign { ($($int:ty => $uint:ty;)*) => ($( // between ratios impl NumOrd> for Ratio<$int> { #[inline] fn num_partial_cmp(&self, other: &Ratio<$uint>) -> Option { if self.is_negative() { Some(Ordering::Less) } else { let r = Ratio::<$uint>::new(<$uint>::try_from(*self.numer()).unwrap(), <$uint>::try_from(*self.denom()).unwrap()); r.partial_cmp(other) } } } impl NumOrd> for Ratio<$uint> { #[inline] fn num_partial_cmp(&self, other: &Ratio<$int>) -> Option { if other.is_negative() { Some(Ordering::Greater) } else { let r = Ratio::<$uint>::new(<$uint>::try_from(*other.numer()).unwrap(), <$uint>::try_from(*other.denom()).unwrap()); self.partial_cmp(&r) } } } // between ratio and integers impl NumOrd<$uint> for Ratio<$int> { #[inline] fn num_partial_cmp(&self, other: &$uint) -> Option { if self.is_negative() { Some(Ordering::Less) } else { <$uint>::try_from(*self.numer()).unwrap().partial_cmp(&(<$uint>::try_from(*self.denom()).unwrap() * other)) } } } impl NumOrd> for $uint { #[inline] fn num_partial_cmp(&self, other: &Ratio<$int>) -> Option { if other.is_negative() { Some(Ordering::Greater) } else { (<$uint>::try_from(*other.denom()).unwrap() * self).partial_cmp(&<$uint>::try_from(*other.numer()).unwrap()) } } } impl NumOrd<$int> for Ratio<$uint> { #[inline] fn num_partial_cmp(&self, other: &$int) -> Option { if other.is_negative() { Some(Ordering::Greater) } else { self.numer().partial_cmp(&(<$uint>::try_from(*other).unwrap() * self.denom())) } } } impl NumOrd> for $int { #[inline] fn num_partial_cmp(&self, other: &Ratio<$uint>) -> Option { if self.is_negative() { Some(Ordering::Less) } else { (<$uint>::try_from(*self).unwrap() * other.denom()).partial_cmp(other.numer()) } } } )*); } impl_ratio_ord_between_diff_sign! { i8 => u128; i8 => u64; i8 => u32; i8 => u16; i8 => u8; i16 => u128; i16 => u64; i16 => u32; i16 => u16; i32 => u128; i32 => u64; i32 => u32; i64 => u128; i64 => u64; i128 => u128; isize => usize; } macro_rules! float_cmp_shortcuts { ($ratio:tt, $float:tt) => { // shortcut for comparing zeros if $ratio.is_zero() { return 0f64.partial_cmp($float); } if $float.is_zero() { return $ratio.numer().partial_cmp(&0); } // shortcut for nan and inf if $float.is_nan() { return None; } else if $float.is_infinite() { if $float.is_sign_positive() { return Some(Ordering::Less); } else { // negative return Some(Ordering::Greater); } } }; } // special handling for f64 against u64/i64 and u128/i128 impl NumOrd for Ratio { fn num_partial_cmp(&self, other: &f64) -> Option { float_cmp_shortcuts!(self, other); // other = sign * man * 2^exp let (man, exp, sign) = other.integer_decode(); if sign < 0 { return Some(Ordering::Greater); } // self = a / b let a = *self.numer(); let b = *self.denom(); let result = if exp >= 0 { // r / f = a / (man * 2^exp * b) if exp >= 0 if let Some(den) = || -> Option<_> { 1u64.checked_shl(exp as u32)? .checked_mul(man)? .checked_mul(b) }() { a.partial_cmp(&den).unwrap() } else { Ordering::Less } } else { // r / f = (a * 2^(-exp)) / (man * b) if exp < 0 let den = man as u128 * b as u128; if let Some(num) = || -> Option<_> { 1u128.checked_shl((-exp) as u32)?.checked_mul(a as u128) }() { num.partial_cmp(&den).unwrap() } else { Ordering::Greater } }; Some(result) } } impl NumOrd for Ratio { fn num_partial_cmp(&self, other: &f64) -> Option { float_cmp_shortcuts!(self, other); // other = sign * man * 2^exp let (man, exp, sign) = other.integer_decode(); let reverse = match (!self.is_negative(), sign >= 0) { (true, false) => return Some(Ordering::Greater), (false, true) => return Some(Ordering::Less), (true, true) => false, (false, false) => true, }; // self = a / b, using safe absolute operation let a = if self.numer() < &0 { (*self.numer() as u64).wrapping_neg() } else { *self.numer() as u64 }; let b = if self.denom() < &0 { (*self.denom() as u64).wrapping_neg() } else { *self.denom() as u64 }; let result = if exp >= 0 { // r / f = a / (man * 2^exp * b) if exp >= 0 if let Some(den) = || -> Option<_> { 1u64.checked_shl(exp as u32)? .checked_mul(man)? .checked_mul(b) }() { a.partial_cmp(&den).unwrap() } else { Ordering::Less } } else { // r / f = (a * 2^(-exp)) / (man * b) if exp < 0 let den = man as u128 * b as u128; if let Some(num) = || -> Option<_> { 1u128.checked_shl((-exp) as u32)?.checked_mul(a as u128) }() { num.partial_cmp(&den).unwrap() } else { Ordering::Greater } }; if reverse { Some(result.reverse()) } else { Some(result) } } } impl NumOrd for Ratio { fn num_partial_cmp(&self, other: &f64) -> Option { float_cmp_shortcuts!(self, other); // other = sign * man * 2^exp let (man, exp, sign) = other.integer_decode(); if sign < 0 { return Some(Ordering::Greater); } // self = a / b let a = *self.numer(); let b = *self.denom(); let result = if exp >= 0 { // r / f = a / (man * 2^exp * b) if exp >= 0 if let Some(num) = || -> Option<_> { 1u128 .checked_shl(exp as u32)? .checked_mul(man as u128)? .checked_mul(b) }() { a.partial_cmp(&num).unwrap() } else { Ordering::Less } } else { // r / f = (a * 2^(-exp)) / (man * b) if exp < 0 let den = udouble::widening_mul(man as u128, b); if let Some(num) = || -> Option<_> { let (v, o) = udouble { lo: 1, hi: 0 } .checked_shl((-exp) as u32)? .overflowing_mul1(a); if !o { Some(v) } else { None } }() { num.partial_cmp(&den).unwrap() } else { Ordering::Greater } }; Some(result) } } impl NumOrd for Ratio { fn num_partial_cmp(&self, other: &f64) -> Option { float_cmp_shortcuts!(self, other); // other = sign * man * 2^exp let (man, exp, sign) = other.integer_decode(); let reverse = match (!self.is_negative(), sign >= 0) { (true, false) => return Some(Ordering::Greater), (false, true) => return Some(Ordering::Less), (true, true) => false, (false, false) => true, }; // self = a / b, using safe absolute operation let a = if self.numer() < &0 { (*self.numer() as u128).wrapping_neg() } else { *self.numer() as u128 }; let b = if self.denom() < &0 { (*self.denom() as u128).wrapping_neg() } else { *self.denom() as u128 }; let result = if exp >= 0 { // r / f = a / (man * 2^exp * b) if exp >= 0 if let Some(num) = || -> Option<_> { 1u128 .checked_shl(exp as u32)? .checked_mul(man as u128)? .checked_mul(b) }() { a.partial_cmp(&num).unwrap() } else { Ordering::Less } } else { // r / f = (a * 2^(-exp)) / (man * b) if exp < 0 let den = udouble::widening_mul(man as u128, b); if let Some(num) = || -> Option<_> { let (v, o) = udouble { lo: 1, hi: 0 } .checked_shl((-exp) as u32)? .overflowing_mul1(a); if !o { Some(v) } else { None } }() { num.partial_cmp(&den).unwrap() } else { Ordering::Greater } }; if reverse { Some(result.reverse()) } else { Some(result) } } } impl_ord_by_swap!(f64|Ratio; f64|Ratio; f64|Ratio; f64|Ratio;); // cast to f64 and i64 for comparison macro_rules! impl_ratio_ord_with_floats_by_casting { ($($float:ty => $bfloat:ty | $int:ty => $bint:ty;)*) => ($( impl NumOrd<$float> for Ratio<$int> { #[inline] fn num_partial_cmp(&self, other: &$float) -> Option { let bratio = Ratio::<$bint>::new(*self.numer() as $bint, *self.denom() as $bint); bratio.num_partial_cmp(&(*other as $bfloat)) } } impl NumOrd> for $float { #[inline] fn num_partial_cmp(&self, other: &Ratio<$int>) -> Option { let bratio = Ratio::<$bint>::new(*other.numer() as $bint, *other.denom() as $bint); (*self as $bfloat).num_partial_cmp(&bratio) } } )*); } impl_ratio_ord_with_floats_by_casting! { f32 => f64|i8 => i64; f32 => f64|i16 => i64; f32 => f64|i32 => i64; f32 => f64|i64 => i64; f64 => f64|i8 => i64; f64 => f64|i16 => i64; f64 => f64|i32 => i64; f32 => f64|u8 => u64; f32 => f64|u16 => u64; f32 => f64|u32 => u64; f32 => f64|u64 => u64; f64 => f64|u8 => u64; f64 => f64|u16 => u64; f64 => f64|u32 => u64; f32 => f64|u128 => u128; f32 => f64|i128 => i128; } // deal with size types macro_rules! impl_ratio_with_size_types_ord { ($($t:ty)*) => ($( impl NumOrd<$t> for Ratio { #[inline] fn num_partial_cmp(&self, other: &$t) -> Option { #[cfg(target_pointer_width = "32")] let r = Ratio::::new(*self.numer() as i32, *self.denom() as i32); #[cfg(target_pointer_width = "64")] let r = Ratio::::new(*self.numer() as i64, *self.denom() as i64); r.num_partial_cmp(other) } } impl NumOrd<$t> for Ratio { #[inline] fn num_partial_cmp(&self, other: &$t) -> Option { #[cfg(target_pointer_width = "32")] let r = Ratio::::new(*self.numer() as u32, *self.denom() as u32); #[cfg(target_pointer_width = "64")] let r = Ratio::::new(*self.numer() as u64, *self.denom() as u64); r.num_partial_cmp(other) } } impl_ord_by_swap!($t|Ratio; $t|Ratio;); )*); } impl_ratio_with_size_types_ord!(i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64); macro_rules! impl_ratio_ord_with_size_types { ($($t:ty)*) => ($( impl NumOrd> for isize { #[inline] fn num_partial_cmp(&self, other: &Ratio<$t>) -> Option { #[cfg(target_pointer_width = "32")] return (*self as i32).num_partial_cmp(other); #[cfg(target_pointer_width = "64")] return (*self as i64).num_partial_cmp(other); } } impl NumOrd> for usize { #[inline] fn num_partial_cmp(&self, other: &Ratio<$t>) -> Option { #[cfg(target_pointer_width = "32")] return (*self as u32).num_partial_cmp(other); #[cfg(target_pointer_width = "64")] return (*self as u64).num_partial_cmp(other); } } impl NumOrd> for Ratio { #[inline] fn num_partial_cmp(&self, other: &Ratio<$t>) -> Option { #[cfg(target_pointer_width = "32")] let r = Ratio::::new(*self.numer() as i32, *self.denom() as i32); #[cfg(target_pointer_width = "64")] let r = Ratio::::new(*self.numer() as i64, *self.denom() as i64); r.num_partial_cmp(other) } } impl NumOrd> for Ratio { #[inline] fn num_partial_cmp(&self, other: &Ratio<$t>) -> Option { #[cfg(target_pointer_width = "32")] let r = Ratio::::new(*self.numer() as u32, *self.denom() as u32); #[cfg(target_pointer_width = "64")] let r = Ratio::::new(*self.numer() as u64, *self.denom() as u64); r.num_partial_cmp(other) } } impl_ord_by_swap!(Ratio<$t>|isize; Ratio<$t>|usize; Ratio<$t>|Ratio; Ratio<$t>|Ratio;); )*); } impl_ratio_ord_with_size_types!(i8 i16 i32 i64 i128 u8 u16 u32 u64 u128); #[cfg(feature = "num-bigint")] mod _num_bigint { use super::*; use num_bigint::{BigInt, BigUint}; use num_traits::One; impl_ord_equal_types!(Ratio Ratio); impl_ratio_ord_by_casting! { u8 => BigUint; u16 => BigUint; u32 => BigUint; u64 => BigUint; u128 => BigUint; i8 => BigInt; i16 => BigInt; i32 => BigInt; i64 => BigInt; i128 => BigInt; u8 => BigInt; u16 => BigInt; u32 => BigInt; u64 => BigInt; u128 => BigInt; } impl_ratio_ord_between_diff_sign! { i8 => BigUint; i16 => BigUint; i32 => BigUint; i64 => BigUint; i128 => BigUint; } impl_ratio_ord_with_int!(BigInt BigUint); impl_ratio_with_size_types_ord!(BigInt BigUint); impl_ratio_ord_with_size_types!(BigInt BigUint); // specialized implementations impl NumOrd for Ratio { #[inline] fn num_partial_cmp(&self, other: &f64) -> Option { // shortcut for comparing zeros if self.is_zero() { return 0f64.partial_cmp(other); } if other.is_zero() { return self.numer().partial_cmp(&BigUint::zero()); } // shortcut for nan and inf if other.is_nan() { return None; } else if other.is_infinite() { if other.is_sign_positive() { return Some(Ordering::Less); } else { // negative return Some(Ordering::Greater); } } // other = sign * man * 2^exp let (man, exp, sign) = other.integer_decode(); if sign < 0 { return Some(Ordering::Greater); } // self = a / b let a = self.numer(); let b = self.denom(); let result = if exp >= 0 { // r / f = a / (man * 2^exp * b) if exp >= 0 a.partial_cmp(&((BigUint::one() << exp as u32) * man)) .unwrap() } else { // r / f = (a * 2^(-exp)) / (man * b) if exp < 0 let den = BigUint::from(man) * b; let num = (BigUint::one() << ((-exp) as u32)) * a; num.partial_cmp(&den).unwrap() }; Some(result) } } impl NumOrd for Ratio { #[inline] fn num_partial_cmp(&self, other: &f64) -> Option { // shortcut for comparing zeros if self.is_zero() { return 0f64.partial_cmp(other); } if other.is_zero() { return self.numer().partial_cmp(&BigInt::zero()); } // shortcut for nan and inf if other.is_nan() { return None; } else if other.is_infinite() { if other.is_sign_positive() { return Some(Ordering::Less); } else { // negative return Some(Ordering::Greater); } } // other = sign * man * 2^exp let (man, exp, sign) = other.integer_decode(); let reverse = match (!self.is_negative(), sign >= 0) { (true, false) => return Some(Ordering::Greater), (false, true) => return Some(Ordering::Less), (true, true) => false, (false, false) => true, }; // self = a / b, using safe absolute operation let a = self.numer().magnitude(); let b = self.denom().magnitude(); let result = if exp >= 0 { // r / f = a / (man * 2^exp * b) if exp >= 0 a.partial_cmp(&((BigUint::one() << exp as u32) * man)) .unwrap() } else { // r / f = (a * 2^(-exp)) / (man * b) if exp < 0 let den = BigUint::from(man) * b; let num = (BigUint::one() << ((-exp) as u32)) * a; num.partial_cmp(&den).unwrap() }; if reverse { Some(result.reverse()) } else { Some(result) } } } impl NumOrd for Ratio { #[inline] fn num_partial_cmp(&self, other: &f32) -> Option { self.num_partial_cmp(&(*other as f64)) } } impl NumOrd for Ratio { #[inline] fn num_partial_cmp(&self, other: &f32) -> Option { self.num_partial_cmp(&(*other as f64)) } } impl NumOrd> for Ratio { #[inline] fn num_partial_cmp(&self, other: &Ratio) -> Option { if other.is_negative() { Some(Ordering::Greater) } else { let rnum = other.numer().magnitude(); let rden = other.denom().magnitude(); (self.numer() * rden).partial_cmp(&(self.denom() * rnum)) } } } impl NumOrd> for BigUint { #[inline] fn num_partial_cmp(&self, other: &Ratio) -> Option { if other.is_negative() { Some(Ordering::Greater) } else { let rnum = other.numer().magnitude(); let rden = other.denom().magnitude(); (self * rden).partial_cmp(&rnum) } } } impl_ord_by_swap! { f32|Ratio; f32|Ratio; f64|Ratio; f64|Ratio; Ratio|Ratio; Ratio|BigUint; } } } // Order of complex numbers is implemented as lexicographic order #[cfg(feature = "num-complex")] mod _num_complex { use super::*; use num_complex::{Complex, Complex32, Complex64}; macro_rules! impl_complex_ord_lexical { ($($t1:ty | $t2:ty;)*) => ($( impl NumOrd> for Complex<$t1> { #[inline] fn num_partial_cmp(&self, other: &Complex<$t2>) -> Option { let re_cmp = self.re.num_partial_cmp(&other.re); if matches!(re_cmp, Some(o) if o == Ordering::Equal) { self.im.num_partial_cmp(&other.im) } else { re_cmp } } } )*); ($($t:ty)*) => ($( impl NumOrd> for Complex<$t> { #[inline] fn num_partial_cmp(&self, other: &Complex<$t>) -> Option { let re_cmp = self.re.partial_cmp(&other.re); if matches!(re_cmp, Some(o) if o == Ordering::Equal) { self.im.partial_cmp(&other.im) } else { re_cmp } } } )*); } impl_complex_ord_lexical!(f32 f64); impl_complex_ord_lexical!(f32|f64; f64|f32;); macro_rules! impl_complex_ord_with_real { ($($t:ty)*) => ($( impl NumOrd<$t> for Complex32 { #[inline] fn num_partial_cmp(&self, other: &$t) -> Option { if self.im.is_nan() { // shortcut nan tests return None; } let re_cmp = self.re.num_partial_cmp(other); if matches!(re_cmp, Some(o) if o == Ordering::Equal) { self.im.num_partial_cmp(&0f32) } else { re_cmp } } } impl NumOrd for $t { #[inline] fn num_partial_cmp(&self, other: &Complex32) -> Option { if other.im.is_nan() { // shortcut nan tests return None; } let re_cmp = self.num_partial_cmp(&other.re); if matches!(re_cmp, Some(o) if o == Ordering::Equal) { 0f32.num_partial_cmp(&other.im) } else { re_cmp } } } impl NumOrd<$t> for Complex64 { #[inline] fn num_partial_cmp(&self, other: &$t) -> Option { if self.im.is_nan() { // shortcut nan tests return None; } let re_cmp = self.re.num_partial_cmp(other); if matches!(re_cmp, Some(o) if o == Ordering::Equal) { self.im.num_partial_cmp(&0f64) } else { re_cmp } } } impl NumOrd for $t { #[inline] fn num_partial_cmp(&self, other: &Complex64) -> Option { if other.im.is_nan() { // shortcut nan tests return None; } let re_cmp = self.num_partial_cmp(&other.re); if matches!(re_cmp, Some(o) if o == Ordering::Equal) { 0f64.num_partial_cmp(&other.im) } else { re_cmp } } } )*); } impl_complex_ord_with_real! ( i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize f32 f64 ); #[cfg(feature = "num-bigint")] mod _num_bigint { use super::*; use num_bigint::{BigInt, BigUint}; impl_complex_ord_with_real! ( BigInt BigUint ); } #[cfg(feature = "num-rational")] mod _num_rational { use super::*; use num_rational::Ratio; impl_complex_ord_with_real! ( Ratio Ratio Ratio Ratio Ratio Ratio ); #[cfg(feature = "num-bigint")] mod _num_bigint { use super::*; use num_bigint::{BigInt, BigUint}; impl_complex_ord_with_real! ( Ratio Ratio ); } } } num-order-1.2.0/src/tests.rs000064400000000000000000001036550072674642500140750ustar 00000000000000use super::*; use std::cmp::Ordering::{self, *}; use std::collections::hash_map::DefaultHasher; use std::fmt; use std::hash::{Hash, Hasher}; use std::vec::Vec; #[cfg(feature = "num-bigint")] use num_bigint::{BigInt, BigUint}; #[cfg(feature = "num-complex")] use num_complex::Complex; #[cfg(feature = "num-rational")] use num_rational::Ratio; #[derive(Clone, Debug)] #[allow(non_camel_case_types)] enum N { u8(u8), u16(u16), u32(u32), u64(u64), u128(u128), usize(usize), i8(i8), i16(i16), i32(i32), i64(i64), i128(i128), isize(isize), f32(f32), f64(f64), #[cfg(feature = "num-bigint")] ubig(BigUint), #[cfg(feature = "num-bigint")] ibig(BigInt), #[cfg(feature = "num-rational")] r8(Ratio), #[cfg(feature = "num-rational")] r16(Ratio), #[cfg(feature = "num-rational")] r32(Ratio), #[cfg(feature = "num-rational")] r64(Ratio), #[cfg(feature = "num-rational")] r128(Ratio), #[cfg(feature = "num-rational")] rsize(Ratio), #[cfg(all(feature = "num-bigint", feature = "num-rational"))] rbig(Ratio), #[cfg(feature = "num-complex")] c32(Complex), #[cfg(feature = "num-complex")] c64(Complex), } macro_rules! repeat_arms { ($e:expr; $v:ident => $arm:expr) => { match $e { N::u8($v) => $arm, N::u16($v) => $arm, N::u32($v) => $arm, N::u64($v) => $arm, N::u128($v) => $arm, N::usize($v) => $arm, N::i8($v) => $arm, N::i16($v) => $arm, N::i32($v) => $arm, N::i64($v) => $arm, N::i128($v) => $arm, N::isize($v) => $arm, N::f32($v) => $arm, N::f64($v) => $arm, #[cfg(feature = "num-bigint")] N::ubig($v) => $arm, #[cfg(feature = "num-bigint")] N::ibig($v) => $arm, #[cfg(feature = "num-rational")] N::r8($v) => $arm, #[cfg(feature = "num-rational")] N::r16($v) => $arm, #[cfg(feature = "num-rational")] N::r32($v) => $arm, #[cfg(feature = "num-rational")] N::r64($v) => $arm, #[cfg(feature = "num-rational")] N::r128($v) => $arm, #[cfg(feature = "num-rational")] N::rsize($v) => $arm, #[cfg(all(feature = "num-bigint", feature = "num-rational"))] N::rbig($v) => $arm, #[cfg(feature = "num-complex")] N::c32($v) => $arm, #[cfg(feature = "num-complex")] N::c64($v) => $arm, } }; } impl Hash for N { fn hash(&self, state: &mut H) { repeat_arms! { self; v => v.num_hash(state) } } } // create list of `N` objects with given value (arg1) and types (arg2) macro_rules! n { ($e:expr; $($t:ident),*) => (&[$(N::$t($e as $t)),*]); } const B32: f64 = (1u64 << 32) as f64; const F32_SUBNORMAL_MIN: f32 = 1.4e-45; const F64_SUBNORMAL_MIN: f64 = 4.9e-324; // list of selected numbers ordered ascendingly // some numbers will be removed to reduce test time if extra feature is enabled const NUMBERS: &'static [&'static [N]] = &[ // f64 min boundary and infinity n!(f64::NEG_INFINITY; f32, f64), n!(f64::MIN; f64), // f32 min boundary n!((-B32 * B32 - 0x1000 as f64) * B32 * B32; f64), // -2^128 - 2^76 n!(-B32 * B32 * B32 * B32; f64), // -2^128 n!((-B32 * B32 + 0x800 as f64) * B32 * B32; f64), // -2^128 + 2^75 n!(f32::MIN; f32), // -2^128 + 2^104 // i128/f32 min boundary n!(-(0x8000_0100_0000_0000_0000_0000_0000_0000u128 as f32); f32), n!(-(0x8000_0000_0000_0800_0000_0000_0000_0000u128 as f64); f64), n!(-0x8000_0000_0000_0000_0000_0000_0000_0000i128; i128, f32), n!(-0x7fff_ffff_ffff_ffff_ffff_ffff_ffff_ffffi128; i128), n!(-0x7fff_ffff_ffff_fc00_0000_0000_0000_0000i128; i128, f64), n!(-0x7fff_ff80_0000_0000_0000_0000_0000_0000i128; i128, f32), // i64 min boundary n!(-0x8000_0100_0000_0000i128; i128, f32), n!(-0x8000_0000_0000_0800i128; i128, f64), n!(-0x8000_0000_0000_0001i128; i128), n!(-0x8000_0000_0000_0000i64; i64, f32), n!(-0x7fff_ffff_ffff_ffffi64; i64), n!(-0x7fff_ffff_ffff_fc00i64; i64, f64), n!(-0x7fff_ff80_0000_0000i64; i64, f32), // f64 min exact int boundary n!(-0x20_0000_4000_0000i64; i64, f32), n!(-0x20_0000_0000_0002i64; i64, f64), n!(-0x20_0000_0000_0001i64; i64), n!(-0x20_0000_0000_0000i64; i64, f32), n!(-0x1f_ffff_ffff_ffffi64; i64, f64), n!(-0x1f_ffff_e000_0000i64; i64, f32), // f64 min exact half int boundary n!(-0x10_0000_2000_0000i64; i64, f32), n!(-0x10_0000_0000_0002i64; i64, f64), n!(-0x10_0000_0000_0001i64; i64, f64), n!(-0x10_0000_0000_0000i64; i64, f32), n!(-0xf_ffff_ffff_ffffi64 as f64 - 0.5; f64), n!(-0xf_ffff_ffff_ffffi64; i64, f64), n!(-0xf_ffff_f000_0000i64; i64, f32), // i32 min boundary n!(-0x8000_0100i64; i64, f32), n!(-0x8000_0001i64; i64, f64), n!(-0x8000_0000i64 as f64 - 0.5; f64), n!(-0x8000_0000i64; i64, f32), n!(-0x7fff_ffff as f64 - 0.5; f64), n!(-0x7fff_ffff; i32, f64), n!(-0x7fff_ff80; i32, f32), // f32 min exact int boundary n!(-0x100_0002; i32, f32), n!(-0x100_0001 as f64 - 0.5; f64), n!(-0x100_0001; i32, f64), n!(-0x100_0000 as f64 - 0.5; f64), n!(-0x100_0000; i32, f32), n!(-0xff_ffff as f64 - 0.5; f64), n!(-0xff_ffff; i32, f32), n!(-0xff_fffe as f64 - 0.5; f64), n!(-0xff_fffe; i32, f32), // f32 min exact half int boundary n!(-0x80_0002; i32, f32), n!(-0x80_0001 as f64 - 0.5; f64), n!(-0x80_0001; i32, f32), n!(-0x80_0000 as f64 - 0.5; f64), n!(-0x80_0000; i32, f32), n!(-0x7f_ffff as f64 - 0.5; f32), n!(-0x7f_ffff; i32, f32), n!(-0x7f_fffe as f64 - 0.5; f32), n!(-0x7f_fffe; i32, f32), // i16 min boundary #[cfg(not(any(feature = "num-bigint")))] n!(-0x8002; i32, f32), #[cfg(not(any(feature = "num-bigint")))] n!(-0x8001 as f32 - 0.5; f32), #[cfg(not(any(feature = "num-bigint")))] n!(-0x8001; i32, f32), #[cfg(not(any(feature = "num-bigint")))] n!(-0x8000 as f32 - 0.5; f32), #[cfg(not(any(feature = "num-bigint")))] n!(-0x8000; i16, f32), #[cfg(not(any(feature = "num-bigint")))] n!(-0x7fff as f32 - 0.5; f32), #[cfg(not(any(feature = "num-bigint")))] n!(-0x7fff; i16, f32), // i8 min boundary #[cfg(not(any(feature = "num-bigint")))] n!(-0x82; i16, f32), #[cfg(not(any(feature = "num-bigint")))] n!(-0x81 as f32 - 0.5; f32), #[cfg(not(any(feature = "num-bigint")))] n!(-0x81; i16, f32), #[cfg(not(any(feature = "num-bigint")))] n!(-0x80 as f32 - 0.5; f32), #[cfg(not(any(feature = "num-bigint")))] n!(-0x80; i8, f32), #[cfg(not(any(feature = "num-bigint")))] n!(-0x7f as f32 - 0.5; f32), #[cfg(not(any(feature = "num-bigint")))] n!(-0x7f; i8, f32), // around zero n!(-2; i8, f32), n!(-1.5; f32), n!(-1.0 - f32::EPSILON * 2.0; f32), n!(-1.0 - f32::EPSILON; f32), n!(-1.0 - f64::EPSILON * 2.0; f64), n!(-1.0 - f64::EPSILON; f64), n!(-1; i8, f32), n!(-1.0 + f64::EPSILON / 2.0; f64), n!(-1.0 + f64::EPSILON; f64), n!(-1.0 + f32::EPSILON / 2.0; f32), n!(-1.0 + f32::EPSILON; f32), n!(-0.5; f32), n!(-0.1; f32), n!(-f32::MIN_POSITIVE; f32), n!(-F32_SUBNORMAL_MIN; f32), n!(-f64::MIN_POSITIVE; f64), n!(-F64_SUBNORMAL_MIN; f64), &[N::u8(0), N::i8(0), N::f32(0.0), N::f32(-0.0)], // negative zeros should be handled! n!(F64_SUBNORMAL_MIN; f64), n!(f64::MIN_POSITIVE; f64), n!(F32_SUBNORMAL_MIN; f32), n!(f32::MIN_POSITIVE; f32), n!(0.1; f32), n!(0.5; f32), n!(1.0 - f32::EPSILON; f32), n!(1.0 - f32::EPSILON / 2.0; f32), n!(1.0 - f64::EPSILON; f64), n!(1.0 - f64::EPSILON / 2.0; f64), n!(1; u8, i8, f32), n!(1.0 + f64::EPSILON; f64), n!(1.0 + f64::EPSILON * 2.0; f64), n!(1.0 + f32::EPSILON; f32), n!(1.0 + f32::EPSILON * 2.0; f32), n!(1.5; f32), n!(2; u8, i8, f32), // i8 max boundary #[cfg(not(any(feature = "num-bigint")))] n!(0x7e; u8, i8, f32), #[cfg(not(any(feature = "num-bigint")))] n!(0x7f as f32 - 0.5; f32), #[cfg(not(any(feature = "num-bigint")))] n!(0x7f; u8, i8, f32), #[cfg(not(any(feature = "num-bigint")))] n!(0x7f as f32 + 0.5; f32), #[cfg(not(any(feature = "num-bigint")))] n!(0x80; u8, i16, f32), #[cfg(not(any(feature = "num-bigint")))] n!(0x80 as f32 + 0.5; f32), #[cfg(not(any(feature = "num-bigint")))] n!(0x81; u8, i16, f32), // u8 max boundary #[cfg(not(any(feature = "num-bigint")))] n!(0xfe; u8, i16, f32), #[cfg(not(any(feature = "num-bigint")))] n!(0xff as f32 - 0.5; f32), #[cfg(not(any(feature = "num-bigint")))] n!(0xff; u8, i16, f32), #[cfg(not(any(feature = "num-bigint")))] n!(0xff as f32 + 0.5; f32), #[cfg(not(any(feature = "num-bigint")))] n!(0x100; u16, i16, f32), #[cfg(not(any(feature = "num-bigint")))] n!(0x100 as f32 + 0.5; f32), #[cfg(not(any(feature = "num-bigint")))] n!(0x101; u16, i16, f32), // i16 max boundary #[cfg(not(any(feature = "num-bigint")))] n!(0x7ffe; u16, i16, f32), #[cfg(not(any(feature = "num-bigint")))] n!(0x7fff as f32 - 0.5; f32), #[cfg(not(any(feature = "num-bigint")))] n!(0x7fff; u16, i16, f32), #[cfg(not(any(feature = "num-bigint")))] n!(0x7fff as f32 + 0.5; f32), #[cfg(not(any(feature = "num-bigint")))] n!(0x8000; u16, i32, f32), #[cfg(not(any(feature = "num-bigint")))] n!(0x8000 as f32 + 0.5; f32), #[cfg(not(any(feature = "num-bigint")))] n!(0x8001; u16, i32, f32), // u16 max boundary n!(0xfffe; u16, i32, f32), n!(0xffff as f32 - 0.5; f32), n!(0xffff; u16, i32, f32), n!(0xffff as f32 + 0.5; f32), n!(0x1_0000; u32, i32, f32), n!(0x1_0000 as f32 + 0.5; f32), n!(0x1_0001; u32, i32, f32), // f32 max exact half int boundary n!(0x7f_fffe; u32, i32, f32), n!(0x7f_ffff as f64 - 0.5; f32), n!(0x7f_ffff; u32, i32, f32), n!(0x7f_ffff as f64 + 0.5; f32), n!(0x80_0000; u32, i32, f32), n!(0x80_0000 as f64 + 0.5; f64), n!(0x80_0001; u32, i32, f32), n!(0x80_0001 as f64 + 0.5; f64), n!(0x80_0002; u32, i32, f32), // f32 max exact int boundary n!(0xff_fffe; u32, i32, f32), n!(0xff_ffff as f64 - 0.5; f64), n!(0xff_ffff; u32, i32, f32), n!(0xff_ffff as f64 + 0.5; f64), n!(0x100_0000; u32, i32, f32), n!(0x100_0000 as f64 + 0.5; f64), n!(0x100_0001; u32, i32, f64), n!(0x100_0001 as f64 + 0.5; f64), n!(0x100_0002; u32, i32, f32), // i32 max boundary n!(0x7fff_ff80; u32, i32, f32), n!(0x7fff_ffff; u32, i32, f64), n!(0x7fff_ffff as f64 + 0.5; f64), n!(0x8000_0000u64; u32, i64, f32), n!(0x8000_0000u64 as f64 + 0.5; f64), n!(0x8000_0001u64; u32, i64, f64), n!(0x8000_0100u64; u32, i64, f32), // u32 max boundary n!(0xffff_ff00u64; u32, i64, f32), n!(0xffff_ffffu64; u32, i64, f64), n!(0xffff_ffffu64 as f64 + 0.5; f64), n!(0x1_0000_0000u64; u64, i64, f32), n!(0x1_0000_0000u64 as f64 + 0.5; f64), n!(0x1_0000_0001u64; u64, i64, f64), n!(0x1_0000_0200u64; u64, i64, f32), // f64 max exact half int boundary n!(0xf_ffff_f000_0000u64; u64, i64, f32), n!(0xf_ffff_ffff_ffffu64; u64, i64, f64), n!(0xf_ffff_ffff_ffffu64 as f64 + 0.5; f64), n!(0x10_0000_0000_0000u64; u64, i64, f32), n!(0x10_0000_0000_0001u64; u64, i64, f64), n!(0x10_0000_0000_0002u64; u64, i64, f64), n!(0x10_0000_2000_0000u64; u64, i64, f32), // f64 max exact int boundary n!(0x1f_ffff_e000_0000u64; u64, i64, f32), n!(0x1f_ffff_ffff_ffffu64; u64, i64, f64), n!(0x20_0000_0000_0000u64; u64, i64, f32), n!(0x20_0000_0000_0001u64; u64, i64), n!(0x20_0000_0000_0002u64; u64, i64, f64), n!(0x20_0000_4000_0000u64; u64, i64, f32), // i64 max boundary n!(0x7fff_ff80_0000_0000u64; u64, i64, f32), n!(0x7fff_ffff_ffff_fc00u64; u64, i64, f64), n!(0x7fff_ffff_ffff_ffffu64; u64, i64), n!(0x8000_0000_0000_0000u64; u64, i128, f32), n!(0x8000_0000_0000_0001u64; u64, i128), n!(0x8000_0000_0000_0800u64; u64, i128, f64), n!(0x8000_0100_0000_0000u64; u64, i128, f32), // u64 max boundary n!(0xffff_ff00_0000_0000u64; u64, i128, f32), n!(0xffff_ffff_ffff_f800u64; u64, i128, f64), n!(0xffff_ffff_ffff_ffffu64; u64, i128), n!(0x1_0000_0000_0000_0000u128; u128, i128, f32), n!(0x1_0000_0000_0000_0001u128; u128, i128), n!(0x1_0000_0000_0000_1000u128; u128, i128, f64), n!(0x1_0000_0200_0000_0000u128; u128, i128, f32), // i128 max boundary n!(0x7fff_ff80_0000_0000_0000_0000_0000_0000u128; u128, i128, f32), n!(0x7fff_ffff_ffff_fc00_0000_0000_0000_0000u128; u128, i128, f64), n!(0x7fff_ffff_ffff_ffff_ffff_ffff_ffff_ffffu128; u128, i128), n!(0x8000_0000_0000_0000_0000_0000_0000_0000u128; u128, f32), n!(0x8000_0000_0000_0000_0000_0000_0000_0001u128; u128), n!(0x8000_0000_0000_0800_0000_0000_0000_0000u128; u128, f64), n!(0x8000_0100_0000_0000_0000_0000_0000_0000u128; u128, f32), // u128/f32 max boundary n!(0xffff_ff00_0000_0000_0000_0000_0000_0000u128; u128, f32), n!(0xffff_ffff_ffff_f800_0000_0000_0000_0000u128; u128, f64), // 2^128 - 2^75 n!(0xffff_ffff_ffff_ffff_ffff_ffff_ffff_ffffu128; u128), n!(B32 * B32 * B32 * B32; f64), // 2^128 n!((B32 * B32 + 0x1000 as f64) * B32 * B32; f64), // 2^128 + 2^76 // f64 max boundary and infinity n!(f64::MAX; f64), n!(f64::INFINITY; f32, f64), ]; fn expand_equiv_class(cls: &[N]) -> Vec { let mut ret = Vec::new(); for e in cls { // size extension match e { N::u8(v) => ret.extend_from_slice(&[N::u8(*v), N::u16(*v as u16), N::u32(*v as u32), N::u64(*v as u64), N::u128(*v as u128)]), N::u16(v) => ret.extend_from_slice(&[N::u16(*v), N::u32(*v as u32), N::u64(*v as u64), N::u128(*v as u128)]), N::u32(v) => ret.extend_from_slice(&[N::u32(*v), N::u64(*v as u64), N::u128(*v as u128)]), N::u64(v) => ret.extend_from_slice(&[N::u64(*v), N::u128(*v as u128)]), N::u128(v) => ret.push(N::u128(*v)), N::usize(v) => ret.push(N::usize(*v)), N::i8(v) => ret.extend_from_slice(&[N::i8(*v), N::i16(*v as i16), N::i32(*v as i32), N::i64(*v as i64), N::i128(*v as i128)]), N::i16(v) => ret.extend_from_slice(&[N::i16(*v), N::i32(*v as i32), N::i64(*v as i64), N::i128(*v as i128)]), N::i32(v) => ret.extend_from_slice(&[N::i32(*v), N::i64(*v as i64), N::i128(*v as i128)]), N::i64(v) => ret.extend_from_slice(&[N::i64(*v), N::i128(*v as i128)]), N::i128(v) => ret.push(N::i128(*v)), N::isize(v) => ret.push(N::isize(*v)), N::f32(v) => ret.extend_from_slice(&[N::f32(*v), N::f64(*v as f64)]), N::f64(v) => ret.push(N::f64(*v)), #[cfg(any(feature = "num-rational", feature = "num-bigint", feature = "num-complex"))] _ => {} } // size extension for bigints #[cfg(feature = "num-bigint")] match e { N::u8(v) => ret.push(N::ubig(BigUint::from(*v))), N::u16(v) => ret.push(N::ubig(BigUint::from(*v))), N::u32(v) => ret.push(N::ubig(BigUint::from(*v))), N::u64(v) => ret.push(N::ubig(BigUint::from(*v))), N::u128(v) => ret.push(N::ubig(BigUint::from(*v))), N::i8(v) => ret.push(N::ibig(BigInt::from(*v))), N::i16(v) => ret.push(N::ibig(BigInt::from(*v))), N::i32(v) => ret.push(N::ibig(BigInt::from(*v))), N::i64(v) => ret.push(N::ibig(BigInt::from(*v))), N::i128(v) => ret.push(N::ibig(BigInt::from(*v))), _ => {} } // insert equivalent usize/isize match e { N::u8(v) => ret.push(N::usize(*v as usize)), N::u16(v) => ret.push(N::usize(*v as usize)), N::u32(v) => ret.push(N::usize(*v as usize)), #[cfg(target_pointer_width = "64")] N::u64(v) => ret.push(N::usize(*v as usize)), N::i8(v) => ret.push(N::isize(*v as isize)), N::i16(v) => ret.push(N::isize(*v as isize)), N::i32(v) => ret.push(N::isize(*v as isize)), #[cfg(target_pointer_width = "64")] N::i64(v) => ret.push(N::isize(*v as isize)), _ => {} } } ret } fn assert_cmp>>(lhs: &N, rhs: &N, expected: T) { #[derive(PartialEq)] struct Result { ord: Option, eq: bool, ne: bool, lt: bool, gt: bool, le: bool, ge: bool, } impl fmt::Debug for Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if let Some(ord) = self.ord { write!(f, "<{:?} (", ord)?; } else { write!(f, "<_ (")?; } let neg = |b: bool| if b { "" } else { "!" }; write!(f, "{}eq {}ne {}lt {}gt {}le {}ge)>", neg(self.eq), neg(self.ne), neg(self.lt), neg(self.gt), neg(self.le), neg(self.gt)) } } let expected: Option = expected.into(); let expected = match expected { Some(Less) => { Result { ord: expected, eq: false, ne: true, lt: true, gt: false, le: true, ge: false } }, Some(Equal) => { Result { ord: expected, eq: true, ne: false, lt: false, gt: false, le: true, ge: true } }, Some(Greater) => { Result { ord: expected, eq: false, ne: true, lt: false, gt: true, le: false, ge: true } }, None => { Result { ord: expected, eq: false, ne: true, lt: false, gt: false, le: false, ge: false } }, }; let actual = repeat_arms! { lhs; x => { repeat_arms! { rhs; y => Result { ord: x.num_partial_cmp(y), eq: x.num_eq(y), ne: x.num_ne(y), lt: x.num_lt(y), gt: x.num_gt(y), le: x.num_le(y), ge: x.num_ge(y) } } } }; assert_eq!(expected, actual, "failed to compare {:?} against {:?}", lhs, rhs); } fn hash(num: &N) -> u64 { let mut hasher = DefaultHasher::new(); num.hash(&mut hasher); hasher.finish() } #[test] fn test_ordering() { let numbers: Vec<_> = NUMBERS.iter().map(|cls| expand_equiv_class(cls)).collect(); // comparison between numbers for icls in 0..numbers.len() { for jcls in 0..numbers.len() { let expected = icls.cmp(&jcls); for i in &numbers[icls] { for j in &numbers[jcls] { assert_cmp(i, j, expected); } } } } } #[test] fn test_nan() { let numbers: Vec<_> = NUMBERS.iter().map(|cls| expand_equiv_class(cls)).collect(); // comparison between numbers and NaNs for cls in &numbers { for i in cls { assert_cmp(i, &N::f32(f32::NAN), None); assert_cmp(i, &N::f64(f64::NAN), None); assert_cmp(&N::f32(f32::NAN), i, None); assert_cmp(&N::f64(f64::NAN), i, None); #[cfg(feature = "num-complex")] { assert_cmp(i, &N::c32(Complex::new(f32::NAN, 0.)), None); assert_cmp(i, &N::c32(Complex::new(0., f32::NAN)), None); assert_cmp(i, &N::c32(Complex::new(f32::NAN, f32::NAN)), None); assert_cmp(&N::c32(Complex::new(f32::NAN, 0.)), i, None); assert_cmp(&N::c32(Complex::new(0., f32::NAN)), i, None); assert_cmp(&N::c32(Complex::new(f32::NAN, f32::NAN)), i, None); } } } // comparison between NaNs themselves assert_cmp(&N::f32(f32::NAN), &N::f32(f32::NAN), None); assert_cmp(&N::f32(f32::NAN), &N::f64(f64::NAN), None); assert_cmp(&N::f64(f64::NAN), &N::f32(f32::NAN), None); assert_cmp(&N::f64(f64::NAN), &N::f64(f64::NAN), None); #[cfg(feature = "num-complex")] { let cnan0 = N::c32(Complex::new(f32::NAN, 0.)); let c0nan = N::c32(Complex::new(0., f32::NAN)); let cnannan = N::c32(Complex::new(f32::NAN, f32::NAN)); assert_cmp(&cnan0, &cnan0, None); assert_cmp(&cnan0, &c0nan, None); assert_cmp(&cnan0, &cnannan, None); assert_cmp(&c0nan, &cnan0, None); assert_cmp(&c0nan, &c0nan, None); assert_cmp(&c0nan, &cnannan, None); assert_cmp(&cnannan, &cnan0, None); assert_cmp(&cnannan, &c0nan, None); assert_cmp(&cnannan, &cnannan, None); } } #[test] fn test_hash() { for &equiv in NUMBERS { let hashes: Vec = equiv.iter().map(|n| hash(n)).collect(); for i in 1..equiv.len() { assert_eq!(hashes[0], hashes[i], "Hash mismatch between {:?} and {:?}", equiv[0], equiv[i]); } } } #[test] #[cfg(feature = "num-rational")] fn test_rational_using_primitives() { fn expand_equiv_class_ratio(cls: &[N]) -> Vec { let mut ret = Vec::new(); for e in cls { // size extension match e { N::u8(v) => ret.push(N::u8(*v)), N::u16(v) => ret.push(N::u16(*v)), N::u32(v) => ret.push(N::u32(*v)), N::u64(v) => ret.push(N::u64(*v)), N::u128(v) => ret.push(N::u128(*v)), N::f64(v) => ret.push(N::f64(*v)), N::f32(v) => ret.extend_from_slice(&[N::f32(*v), N::f64(*v as f64)]), N::i8(v) => ret.extend_from_slice(&[N::i8(*v), N::r8(Ratio::from(*v))]), N::i16(v) => ret.extend_from_slice(&[N::i16(*v), N::r16(Ratio::from(*v))]), N::i32(v) => ret.extend_from_slice(&[N::i32(*v), N::r32(Ratio::from(*v))]), N::i64(v) => ret.extend_from_slice(&[N::i64(*v), N::r64(Ratio::from(*v))]), #[cfg(not(feature = "num-bigint"))] N::i128(v) => ret.extend_from_slice(&[N::i128(*v), N::r128(Ratio::from(*v))]), #[cfg(feature = "num-bigint")] N::i128(v) => ret.extend_from_slice(&[N::i128(*v), N::r128(Ratio::from(*v)), N::rbig(Ratio::from(BigInt::from(*v)))]), N::isize(v) => ret.extend_from_slice(&[N::isize(*v), N::rsize(Ratio::from(*v))]), #[cfg(feature = "num-bigint")] N::ibig(v) => ret.extend_from_slice(&[N::ibig(v.clone()), N::rbig(Ratio::from(v.clone()))]), _ => {} } } ret } let numbers: Vec<_> = NUMBERS.iter().map(|cls| expand_equiv_class_ratio(cls)).collect(); // comparison between numbers for icls in 0..numbers.len() { for jcls in 0..numbers.len() { let expected = icls.cmp(&jcls); for i in &numbers[icls] { for j in &numbers[jcls] { assert_cmp(i, j, expected); } } } } for &equiv in NUMBERS { let equiv = expand_equiv_class_ratio(equiv); let hashes: Vec = equiv.iter().map(hash).collect(); for i in 1..equiv.len() { assert_eq!(hashes[0], hashes[i], "Hash mismatch between {:?} and {:?}", equiv[0], equiv[i]); } } } #[test] #[cfg(feature = "num-rational")] fn test_rational() { #[cfg(feature = "num-bigint")] let big = || BigInt::from(1u8) << 200u8; // additional test cases for rational numbers: (numer, denom, float value) let ratio_coeffs = [ (N::i8(-2), N::i8(1), Some(N::f32(-2.))), // near -1 (N::i32(i32::MIN), N::i32(i32::MAX), None), (N::i64(i64::MIN), N::i64(i64::MAX), None), #[cfg(feature = "num-bigint")] (N::ibig(-big()), N::ibig(big() - 1), None), (N::i8(-1), N::i8(1), Some(N::f32(-1.))), #[cfg(feature = "num-bigint")] (N::ibig(-big()), N::ibig(big() + 1), None), (N::i64(i64::MIN + 2), N::i64(i64::MAX), None), (N::i32(i32::MIN + 2), N::i32(i32::MAX), None), // near -0.5 (N::i32(-(1 << 22) - 1), N::i32(1 << 23), Some(N::f32(-0.5 - 2f32.powi(-23)))), (N::i64(-(1 << 52) - 1), N::i64(1 << 53), Some(N::f64(-0.5 - 2f64.powi(-53)))), (N::i8(-1), N::i8(2), Some(N::f32(-0.5))), (N::i64(-(1 << 52) + 1), N::i64(1 << 53), Some(N::f64(-0.5 + 2f64.powi(-53)))), (N::i32(-(1 << 22) + 1), N::i32(1 << 23), Some(N::f32(-0.5 + 2f32.powi(-23)))), // near 0 (N::i32(-1), N::i32(i32::MAX), None), (N::i64(-1), N::i64(i64::MAX), None), #[cfg(feature = "num-bigint")] (N::ibig(-BigInt::from(1u8)), N::ibig(big()), None), (N::i8(0), N::i8(1), Some(N::f32(0.))), #[cfg(feature = "num-bigint")] (N::ibig(BigInt::from(1u8)), N::ibig(big()), None), (N::i64(1), N::i64(i64::MAX), None), (N::i32(1), N::i32(i32::MAX), None), // near 0.5 (N::i32((1 << 22) - 1), N::i32(1 << 23), Some(N::f32(0.5 - 2f32.powi(-23)))), (N::i64((1 << 52) - 1), N::i64(1 << 53), Some(N::f64(0.5 - 2f64.powi(-53)))), (N::i8(1), N::i8(2), Some(N::f32(0.5))), (N::i64((1 << 52) + 1), N::i64(1 << 53), Some(N::f64(0.5 + 2f64.powi(-53)))), (N::i32((1 << 22) + 1), N::i32(1 << 23), Some(N::f32(0.5 + 2f32.powi(-23)))), // near 1 (N::i32(i32::MAX-1), N::i32(i32::MAX), None), (N::i64(i64::MAX-1), N::i64(i64::MAX), None), #[cfg(feature = "num-bigint")] (N::ibig(big()), N::ibig(big() + 1), None), (N::i8(1), N::i8(1), Some(N::f32(1.))), #[cfg(feature = "num-bigint")] (N::ibig(big()), N::ibig(big() - 1), None), (N::i64(i64::MAX), N::i64(i64::MAX-1), None), (N::i32(i32::MAX), N::i32(i32::MAX-1), None), (N::i8(2), N::i8(1), Some(N::f32(2.))), ]; fn expand_equiv_class_ratio(coeffs: &(N, N, Option)) -> Vec { let mut ret = Vec::new(); #[cfg(not(feature = "num-bigint"))] match coeffs { (N::i8(num), N::i8(den), _) => ret.extend_from_slice(&[ N::r8(Ratio::new(*num, *den)), N::r16(Ratio::new(*num as i16, *den as i16)), N::r32(Ratio::new(*num as i32, *den as i32)), N::r64(Ratio::new(*num as i64, *den as i64)), N::r128(Ratio::new(*num as i128, *den as i128))]), (N::i16(num), N::i16(den), _) => ret.extend_from_slice(&[ N::r16(Ratio::new(*num as i16, *den as i16)), N::r32(Ratio::new(*num as i32, *den as i32)), N::r64(Ratio::new(*num as i64, *den as i64)), N::r128(Ratio::new(*num as i128, *den as i128))]), (N::i32(num), N::i32(den), _) => ret.extend_from_slice(&[ N::r32(Ratio::new(*num as i32, *den as i32)), N::r64(Ratio::new(*num as i64, *den as i64)), N::r128(Ratio::new(*num as i128, *den as i128))]), (N::i64(num), N::i64(den), _) => ret.extend_from_slice(&[ N::r64(Ratio::new(*num as i64, *den as i64)), N::r128(Ratio::new(*num as i128, *den as i128))]), _ => unreachable!() }; #[cfg(feature = "num-bigint")] match coeffs { (N::i8(num), N::i8(den), _) => ret.extend_from_slice(&[ N::r8(Ratio::new(*num, *den)), N::r16(Ratio::new(*num as i16, *den as i16)), N::r32(Ratio::new(*num as i32, *den as i32)), N::r64(Ratio::new(*num as i64, *den as i64)), N::rbig(Ratio::new((*num).into(), (*den).into()))]), (N::i16(num), N::i16(den), _) => ret.extend_from_slice(&[ N::r16(Ratio::new(*num as i16, *den as i16)), N::r32(Ratio::new(*num as i32, *den as i32)), N::r64(Ratio::new(*num as i64, *den as i64)), N::rbig(Ratio::new((*num).into(), (*den).into()))]), (N::i32(num), N::i32(den), _) => ret.extend_from_slice(&[ N::r32(Ratio::new(*num as i32, *den as i32)), N::r64(Ratio::new(*num as i64, *den as i64)), N::rbig(Ratio::new((*num).into(), (*den).into()))]), (N::i64(num), N::i64(den), _) => ret.extend_from_slice(&[ N::r64(Ratio::new(*num as i64, *den as i64)), N::rbig(Ratio::new((*num).into(), (*den).into()))]), (N::ibig(num), N::ibig(den), _) => ret.extend_from_slice(&[ N::rbig(Ratio::new(num.clone(), den.clone()))]), _ => unreachable!() }; match coeffs.2 { Some(N::f32(v)) => ret.extend_from_slice(&[ N::f32(v), N::f64(v as f64) ]), Some(N::f64(v)) => ret.push(N::f64(v as f64)), _ => {} } ret } // test comparison and hashing for icls in 0..ratio_coeffs.len() { let iequiv = expand_equiv_class_ratio(&ratio_coeffs[icls]); // test hashing let hashes: Vec = iequiv.iter().map(hash).collect(); for i in 1..iequiv.len() { assert_eq!(hashes[0], hashes[i], "Hash mismatch between {:?} and {:?}", iequiv[0], iequiv[i]); } for jcls in 0..ratio_coeffs.len() { let jequiv = expand_equiv_class_ratio(&ratio_coeffs[jcls]); let expected = icls.cmp(&jcls); for i in &iequiv { for j in &jequiv { assert_cmp(i, j, expected); } } } } } #[test] #[cfg(feature = "num-complex")] fn test_complex_using_primitives() { fn expand_equiv_class_ratio(cls: &[N]) -> Vec { let mut ret = Vec::new(); for e in cls { // size extension match e { N::u8(v) => ret.push(N::u8(*v)), N::u16(v) => ret.push(N::u16(*v)), N::u32(v) => ret.push(N::u32(*v)), N::u64(v) => ret.push(N::u64(*v)), N::u128(v) => ret.push(N::u128(*v)), N::i8(v) => ret.push(N::i8(*v)), N::i16(v) => ret.push(N::i16(*v)), N::i32(v) => ret.push(N::i32(*v)), N::i64(v) => ret.push(N::i64(*v)), N::i128(v) => ret.push(N::i128(*v)), N::f64(v) => ret.extend_from_slice(&[N::f64(*v), N::c64(Complex::from(*v))]), N::f32(v) => ret.extend_from_slice(&[N::f32(*v), N::c32(Complex::from(*v)), N::f64(*v as f64), N::c64(Complex::from(*v as f64))]), _ => {} } } ret } let numbers: Vec<_> = NUMBERS.iter().map(|cls| expand_equiv_class_ratio(cls)).collect(); // comparison between numbers for icls in 0..numbers.len() { for jcls in 0..numbers.len() { let expected = icls.cmp(&jcls); for i in &numbers[icls] { for j in &numbers[jcls] { assert_cmp(i, j, expected); } } } } for &equiv in NUMBERS { let equiv = expand_equiv_class_ratio(equiv); let hashes: Vec = equiv.iter().map(hash).collect(); for i in 1..equiv.len() { assert_eq!(hashes[0], hashes[i], "Hash mismatch between {:?} and {:?}", equiv[0], equiv[i]); } } } #[test] #[cfg(feature = "num-complex")] fn test_complex() { // additional test cases for complex numbers: (real, image) let ratio_coeffs = [ (N::f32(-1.), N::f32(-1.)), (N::f32(-1.), N::f32(0.)), (N::f32(-1.), N::f32(1.)), (N::f32(0.), N::f32(-1.)), (N::f32(0.), N::f32(0.)), (N::f32(0.), N::f32(1.)), (N::f32(1.), N::f32(-1.)), (N::f32(1.), N::f32(0.)), (N::f32(1.), N::f32(1.)), ]; fn expand_equiv_class_ratio(coeffs: &(N, N)) -> Vec { let mut ret = Vec::new(); match coeffs { (N::f32(re), N::f32(im)) if im == &0. => ret.extend_from_slice(&[ N::c32(Complex::new(*re, *im)), N::c64(Complex::new(*re as f64, *im as f64)), N::f32(*re), N::f64(*re as f64)]), (N::f32(re), N::f32(im)) => ret.extend_from_slice(&[ N::c32(Complex::new(*re, *im)), N::c64(Complex::new(*re as f64, *im as f64))]), (N::f64(re), N::f64(im)) if im == &0. => ret.extend_from_slice(&[ N::c64(Complex::new(*re, *im)), N::f64(*re as f64)]), (N::f64(re), N::f64(im)) => ret.push( N::c64(Complex::new(*re, *im))), (_, _) => unreachable!() }; ret } // test comparison and hashing for icls in 0..ratio_coeffs.len() { let iequiv = expand_equiv_class_ratio(&ratio_coeffs[icls]); // test hashing let hashes: Vec = iequiv.iter().map(hash).collect(); for i in 1..iequiv.len() { assert_eq!(hashes[0], hashes[i], "Hash mismatch between {:?} and {:?}", iequiv[0], iequiv[i]); } for jcls in 0..ratio_coeffs.len() { let jequiv = expand_equiv_class_ratio(&ratio_coeffs[jcls]); let expected = icls.cmp(&jcls); for i in &iequiv { for j in &jequiv { assert_cmp(i, j, expected); } } } } }