diff --git a/CHANGELOG.md b/CHANGELOG.md index ae8ca611c..6380d7d59 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ customers cannot upgrade their bootloader, its changes are recorded separately. ## Firmware ### [Unreleased] -- Bitcoin: warn if the transaction fee is higher than 10% of the coins sent +- Bitcoin, Ethereum: warn if the transaction fee is higher than 10% of the coins sent - ETH Testnets: add Goerli and remove deprecated Rinkeby and Ropsten ### 9.13.1 diff --git a/src/rust/Cargo.lock b/src/rust/Cargo.lock index a71dc30a3..5b06a818d 100644 --- a/src/rust/Cargo.lock +++ b/src/rust/Cargo.lock @@ -104,6 +104,7 @@ dependencies = [ "lazy_static", "minicbor", "num-bigint", + "num-traits", "prost", "sha2", "sha3", @@ -384,9 +385,9 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.3.1" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e9a41747ae4633fce5adffb4d2e81ffc5e89593cb19917f8fb2cc5ff76507bf" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" dependencies = [ "autocfg", "num-integer", diff --git a/src/rust/bitbox02-rust/Cargo.toml b/src/rust/bitbox02-rust/Cargo.toml index d2021a9e6..8de2b1276 100644 --- a/src/rust/bitbox02-rust/Cargo.toml +++ b/src/rust/bitbox02-rust/Cargo.toml @@ -34,7 +34,8 @@ hex = { version = "0.4", default-features = false } sha2 = { version = "0.9.2", default-features = false } sha3 = { version = "0.9.1", default-features = false, optional = true } zeroize = "1.5.5" -num-bigint = { version = "0.3.1", default-features = false, optional = true } +num-bigint = { version = "0.4.3", default-features = false, optional = true } +num-traits = { version = "0.2", default-features = false, optional = true } bip32-ed25519 = { git = "https://github.com/digitalbitbox/rust-bip32-ed25519", tag = "v0.1.0", optional = true } bs58 = { version = "0.4.0", default-features = false, features = ["alloc", "check"], optional = true } bech32 = { version = "0.8.1", default-features = false, optional = true } @@ -70,6 +71,7 @@ app-ethereum = [ # enable these dependencies "sha3", "num-bigint", + "num-traits", # enable this feature in the deps "bitbox02/app-ethereum", ] diff --git a/src/rust/bitbox02-rust/src/hww/api/ethereum/amount.rs b/src/rust/bitbox02-rust/src/hww/api/ethereum/amount.rs index 48b573a6d..83cb649e4 100644 --- a/src/rust/bitbox02-rust/src/hww/api/ethereum/amount.rs +++ b/src/rust/bitbox02-rust/src/hww/api/ethereum/amount.rs @@ -14,6 +14,7 @@ use alloc::string::String; use num_bigint::BigUint; +use num_traits::{ToPrimitive, Zero}; pub struct Amount<'a> { pub unit: &'a str, @@ -45,6 +46,15 @@ impl<'a> Amount<'a> { } } +/// Computes the percentage of the fee of the amount, up to one decimal point. +/// Returns None if the amount is 0 or either fee or amount cannot be represented by `f64`. +pub fn calculate_percentage(fee: &BigUint, amount: &BigUint) -> Option { + if amount.is_zero() { + return None; + } + Some(100. * fee.to_f64()? / amount.to_f64()?) +} + #[cfg(test)] mod tests { use super::*; @@ -136,4 +146,26 @@ mod tests { ); } } + + #[test] + pub fn test_calculate_percentage() { + let p = |f: u64, a: u64| calculate_percentage(&f.into(), &a.into()); + assert_eq!(p(1, 0), None); + assert_eq!(p(3, 4), Some(75.)); + assert_eq!(p(0, 100), Some(0.)); + assert_eq!(p(1, 100), Some(1.)); + assert_eq!(p(9, 100), Some(9.)); + assert_eq!(p(10, 100), Some(10.)); + assert_eq!(p(99, 100), Some(99.)); + assert_eq!(p(909, 1000), Some(90.9)); + assert_eq!( + calculate_percentage( + // 63713280000000000 + &BigUint::from_bytes_be(b"\xe2\x5a\xe3\xfd\xe0\x00\x00"), + // 530564000000000000 + &BigUint::from_bytes_be(b"\x07\x5c\xf1\x25\x9e\x9c\x40\x00"), + ), + Some(12.008594627603833) + ); + } } diff --git a/src/rust/bitbox02-rust/src/hww/api/ethereum/sign.rs b/src/rust/bitbox02-rust/src/hww/api/ethereum/sign.rs index a1745faba..ebf7c1bd1 100644 --- a/src/rust/bitbox02-rust/src/hww/api/ethereum/sign.rs +++ b/src/rust/bitbox02-rust/src/hww/api/ethereum/sign.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::amount::Amount; +use super::amount::{calculate_percentage, Amount}; use super::params::Params; use super::pb; use super::Error; @@ -174,9 +174,10 @@ async fn verify_standard_transaction( let total = Amount { unit: params.unit, decimals: WEI_DECIMALS, - value: amount.value.add(&fee.value), + value: (&amount.value).add(&fee.value), }; - transaction::verify_total_fee(&total.format(), &fee.format(), None).await?; + let percentage = calculate_percentage(&fee.value, &amount.value); + transaction::verify_total_fee(&total.format(), &fee.format(), percentage).await?; Ok(()) } @@ -384,7 +385,58 @@ mod tests { ); } - /// Standard ETH transaction on an unusual keypath (Goerly on mainnet keypath) + /// Test a transaction with an unusually high fee. + #[test] + fn test_high_fee_warning() { + const KEYPATH: &[u32] = &[44 + HARDENED, 60 + HARDENED, 0 + HARDENED, 0, 0]; + + static mut UI_COUNTER: u32 = 0; + mock(Data { + ui_transaction_address_create: Some(Box::new(|_amount, _address| true)), + ui_transaction_fee_create: Some(Box::new(|total, fee, longtouch| { + assert_eq!(total, "0.59427728 ETH"); + assert_eq!(fee, "0.06371328 ETH"); + assert!(!longtouch); + true + })), + ui_confirm_create: Some(Box::new(move |params| { + match unsafe { + UI_COUNTER += 1; + UI_COUNTER + } { + 1 => { + assert_eq!(params.title, "High fee"); + assert_eq!(params.body, "The fee rate\nis 12.0%.\nProceed?"); + assert!(params.longtouch); + true + } + _ => panic!("too many user confirmations"), + } + })), + ..Default::default() + }); + mock_unlocked(); + assert!(block_on(process(&pb::EthSignRequest { + coin: pb::EthCoin::Eth as _, + keypath: KEYPATH.to_vec(), + nonce: b"\x1f\xdc".to_vec(), + // fee=gas_price*gas_limit=63713280000000000 + gas_price: b"\x01\x65\xa0\xbc\x00\x00".to_vec(), + gas_limit: b"\xa2\x08".to_vec(), + recipient: + b"\x04\xf2\x64\xcf\x34\x44\x03\x13\xb4\xa0\x19\x2a\x35\x28\x14\xfb\xe9\x27\xb8\x85" + .to_vec(), + // 530564000000000000 + value: b"\x07\x5c\xf1\x25\x9e\x9c\x40\x00".to_vec(), + data: b"".to_vec(), + host_nonce_commitment: None, + chain_id: 0, + })) + .is_ok()); + assert_eq!(unsafe { UI_COUNTER }, 1); + } + + /// Standard ETH transaction on an unusual keypath (Goerli on mainnet keypath) #[test] pub fn test_process_warn_unusual_keypath() { const KEYPATH: &[u32] = &[44 + HARDENED, 60 + HARDENED, 0 + HARDENED, 0, 0]; diff --git a/src/rust/util/Cargo.toml b/src/rust/util/Cargo.toml index 30a5908a1..7e7fcbf65 100644 --- a/src/rust/util/Cargo.toml +++ b/src/rust/util/Cargo.toml @@ -20,4 +20,4 @@ edition = "2018" license = "Apache-2.0" [dependencies] -num-bigint = { version = "0.3.1", default-features = false } +num-bigint = { version = "0.4.3", default-features = false } diff --git a/src/rust/vendor/num-bigint/.cargo-checksum.json b/src/rust/vendor/num-bigint/.cargo-checksum.json index 92714288e..563286e6c 100644 --- a/src/rust/vendor/num-bigint/.cargo-checksum.json +++ b/src/rust/vendor/num-bigint/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"7365a7f8541f6d8a889f42e4d8084786a942238ecd6688605fa6320e51e6a1db","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"d8a18a6efa61a27d8d8e454d4aa9d03808119a7be7c378da9e200d941a74c17b","RELEASES.md":"1a0da1010ea8a1c5eca9a9566bdaae639a1bb3b450a987e98c28c03875981dfd","benches/bigint.rs":"e0388d1880c4ff508b2f871c5b70f058999dd8c6a703c16e8ea69f0a8e1ba50d","benches/factorial.rs":"ed1d276a780e7e5fe79121b941c22a00c2854dbf92fd8a5372619853ba0c13b7","benches/gcd.rs":"2b433e5699b45e5fb23e77ab025a07e16e3eb9a49c47207b477551542fc4ff1e","benches/roots.rs":"967161d58d1977452ec7fa988a41848d575008a3e148eb048bc049c884d98f5f","benches/shootout-pidigits.rs":"c2a48133f5b679928f7e3f4e764c78aaa8c5b811f58b86fe57fae8c63cb07136","build.rs":"4955639b370d3636b8c44cb7743e6c5fb129077b069d78becbc135eba37e1ece","src/algorithms.rs":"5850d2931c34b43a79047d66c8ce98093299eeb3e8cb6dd761ee2bd1a6a50e07","src/bigint.rs":"1c73cb3bf03fab04a24a0e4a1e131a2aab2d06f233ce760786502fc96dab3f23","src/bigrand.rs":"579f4da36b8378267ef4b8a73a792437eaf1b39c48b5f2d50b66fefb4f9a63d7","src/biguint.rs":"db580af66ab88990d3a060077b6f229e4029f487a7d6ccceb8e1528d1603e02d","src/lib.rs":"483bc6f1a6159df10ec9a55b886e19e9f79c7cc1becca82cc00bd46b846feed2","src/macros.rs":"800239723d637c3ea1d6beb6a62b38a2300bd4c69c20dc0d50855ad6a8b31e70","src/monty.rs":"91688835e0fd409df72c3df5e07e2a114982578f03dd62721c02f36d5fc64ac6","tests/bigint.rs":"32fc36ebbfe97f7f4b050dd787cf86df2a36b47854fae5366c47098200026b0a","tests/bigint_bitwise.rs":"e6a2f76fa1eb919e7c513d7e30a8a2a963841a295a71103462fb8ab9792419b5","tests/bigint_scalar.rs":"5d6131e021f96d476f7949fa2b302581bd9254e91efde1bf2926cdd5e8dffcdb","tests/biguint.rs":"3dbd9fc4b341e7f8106cbeb2e765dab08643fb8691c27a5719216c0c13662493","tests/biguint_scalar.rs":"f16450c0dfcaf23b6fb85669b3de7c2bb6f594a65e3cdf91014b2e49c941cc95","tests/consts/mod.rs":"e20bc49a7cc95077242cbe4016b37745ea986c779d2385cb367fbfe44f15ff94","tests/macros/mod.rs":"1a8f9f015e5caaac60ce9ccff01a75ae489801c3ede6e7b9b3c5079b6efefc9c","tests/modpow.rs":"f1e4ed4fe466b544d7c4e57d0a0dc7d1c97b430b4805cae12f0915b8c40ab66f","tests/roots.rs":"a3bc2de170a0f6297cc8d8830d608db537ca102ccf204fd4fb8e2d92675622d8"},"package":"5e9a41747ae4633fce5adffb4d2e81ffc5e89593cb19917f8fb2cc5ff76507bf"} \ No newline at end of file +{"files":{"Cargo.toml":"0b84600b6ebbf302c71ee1984c54477ddb94142d17f82bb3601386b57bf49922","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"d830b9c7aa3ee607cdb87bcbd3f7c930c3c7faad7fa79312806efa0dc3bda577","RELEASES.md":"bf590b1e9d5c3235cd872fa442cb9041b4c8ea3db3622a95edcd66b554ac7cc8","benches/bigint.rs":"7efd4741f53c786bae63d1196492b5657fd0d928b37a59a08629f6efdc35c845","benches/factorial.rs":"ed1d276a780e7e5fe79121b941c22a00c2854dbf92fd8a5372619853ba0c13b7","benches/gcd.rs":"3cc1a3356f680a6fa625f0ece0c8dd778f4091a53a31177f2870ef9a6c858c7d","benches/rng/mod.rs":"38144fc8283955db4be72a1533328fded98986d6f9d0bc7da0b306f7d4b5ca43","benches/roots.rs":"b31846852a7215c26df228940f2e469aff32fa8805eccc2b5ee5e7280ef0eeb4","benches/shootout-pidigits.rs":"c2a48133f5b679928f7e3f4e764c78aaa8c5b811f58b86fe57fae8c63cb07136","build.rs":"4955639b370d3636b8c44cb7743e6c5fb129077b069d78becbc135eba37e1ece","src/bigint.rs":"0ba1f025b130517a1ce94008a4ace8e1e337a419c91f9eee181a6672e4985ecd","src/bigint/addition.rs":"440f67a80de748f418adc1f3bfbcc4d428e21bcf5ae6962e2b9f3bb82ed958a2","src/bigint/arbitrary.rs":"6679833dffb38fa81f251bf9dd35b0d5b4cecb2a368e82aac92b00cef4dfc21b","src/bigint/bits.rs":"6bfdd854de8daf5c98b8eb8f0f29aa76ae6098a39dbe12eec35fbc9b8c247602","src/bigint/convert.rs":"331f635497d429ec62d829d8d6b476ba402e6530ddbbf9d5b117ed3042e41beb","src/bigint/division.rs":"a0197386b4c6f1465db7ac13bda956dad6bf0ac75accd8f75755f5c8cd05d5fd","src/bigint/multiplication.rs":"0e3ea5982ea0748420d36381f633656ddc6b4c4bee5b97d8e7b2550e67aa3e4d","src/bigint/power.rs":"7391588452764440ae01bbcdfb5b94776018e15bb966b448fbeed693a484ddea","src/bigint/serde.rs":"8240ed79ac11ec0ec2dfc85d4657693d5b03379bdd60a42dccee4764b000e5b6","src/bigint/shift.rs":"3aca826b132a95394e16161708bb6067985a25fef684b25f9f662cf3be12d672","src/bigint/subtraction.rs":"9411b9f59bda0060d286c798e3e765a64def44ad29d9cd6879b73149b9ea4369","src/bigrand.rs":"1e3a9fea94f3be4d052d0ceb1a8de13c580028ee26695fbba1da9de51289c858","src/biguint.rs":"df931fdd6becfde3ab392218fe57b8eab215ab0a8435129e451c9d6013c506b6","src/biguint/addition.rs":"88c02a33ed47ac091b199c102a991ef494bb291771f8b38358d22bb791e66618","src/biguint/arbitrary.rs":"895fe5a9bbcf40824d1a342e089fb2aec78cb9bad0dd489cfef489a3323f6c3b","src/biguint/bits.rs":"05f56e1cd494a3cd63e418ce3d797b9e979f34c4fbacb882e977548a1f69be65","src/biguint/convert.rs":"1071e03f57fa56070e8f696417f33a2fb738afcac329984be6236e33bca37dfd","src/biguint/division.rs":"3b05da7dddeceefaa67c62f016411fa82ce2dcaf1678fdb1ee70e7d170870d9f","src/biguint/iter.rs":"c21e30f573bdf1936e72dd56a89ee662329d25e8b55e37e88e9b1aea2da48abd","src/biguint/monty.rs":"2382e59abf592d009f3f0aefcd2cfd541f21b861aac109931bca2fbc3ee37c62","src/biguint/multiplication.rs":"ee3f611add01239d4e7fc023afc87f1e8b79cb4cb3326e69a120bbee5b014fdf","src/biguint/power.rs":"729d6c4a7f3686711e4f7a86a634ddb920a02be3de1667dac8a0dc85c3b7a854","src/biguint/serde.rs":"fc16ef8f5d036085ca408e3abfef53646499959cc77b03af622e97636f03f778","src/biguint/shift.rs":"b023fad4f86516660d8c4c9328215139fbe2f13afb86ab7ce0206d0c0e04ae00","src/biguint/subtraction.rs":"abbc6e8aa7fcbf58d8444ead208a07171b377f81cab509eaba5c71509b2e472e","src/lib.rs":"113da969e9dd905b8d2b3c3f0571f79971a4517d9c96d9d6b2e4a0873d1e51a6","src/macros.rs":"800239723d637c3ea1d6beb6a62b38a2300bd4c69c20dc0d50855ad6a8b31e70","tests/bigint.rs":"267b907cdb66e62050922b68367e1135517ba0afbd453b7bec807836e9d1d2f3","tests/bigint_bitwise.rs":"e6a2f76fa1eb919e7c513d7e30a8a2a963841a295a71103462fb8ab9792419b5","tests/bigint_scalar.rs":"a87e801e370686985d44e1f020c69fceca72b9f048e0f7301d2b8d38469e5636","tests/biguint.rs":"b8109cae66582c34f2838125063ef2c7293eb31549429119eadd7fd8f95376ee","tests/biguint_scalar.rs":"b09cda9d4fe6ec519e93282653f69b57d70db73b9cb59c0ea5cd0861ca2de266","tests/consts/mod.rs":"e20bc49a7cc95077242cbe4016b37745ea986c779d2385cb367fbfe44f15ff94","tests/fuzzed.rs":"f60a84c446ea2f45d87eb4ee64682ea63fdef05bc74f482739d4e968960e8f4e","tests/macros/mod.rs":"1a8f9f015e5caaac60ce9ccff01a75ae489801c3ede6e7b9b3c5079b6efefc9c","tests/modpow.rs":"f1e4ed4fe466b544d7c4e57d0a0dc7d1c97b430b4805cae12f0915b8c40ab66f","tests/roots.rs":"a3bc2de170a0f6297cc8d8830d608db537ca102ccf204fd4fb8e2d92675622d8"},"package":"f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f"} \ No newline at end of file diff --git a/src/rust/vendor/num-bigint/Cargo.toml b/src/rust/vendor/num-bigint/Cargo.toml index da5dce705..1c15d09be 100644 --- a/src/rust/vendor/num-bigint/Cargo.toml +++ b/src/rust/vendor/num-bigint/Cargo.toml @@ -3,17 +3,16 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies +# to registry (e.g., crates.io) dependencies. # -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "num-bigint" -version = "0.3.1" +version = "0.4.3" authors = ["The Rust Project Developers"] build = "build.rs" exclude = ["/bors.toml", "/ci/*", "/.github/*"] @@ -44,7 +43,7 @@ name = "roots" name = "shootout-pidigits" harness = false [dependencies.arbitrary] -version = "0.4" +version = "1" optional = true default-features = false @@ -59,12 +58,12 @@ features = ["i128"] default-features = false [dependencies.quickcheck] -version = "0.9" +version = "1" optional = true default-features = false [dependencies.rand] -version = "0.7" +version = "0.8" optional = true default-features = false diff --git a/src/rust/vendor/num-bigint/README.md b/src/rust/vendor/num-bigint/README.md index d9df19bc4..d1cedad11 100644 --- a/src/rust/vendor/num-bigint/README.md +++ b/src/rust/vendor/num-bigint/README.md @@ -13,7 +13,7 @@ Add this to your `Cargo.toml`: ```toml [dependencies] -num-bigint = "0.3" +num-bigint = "0.4" ``` ## Features @@ -29,12 +29,12 @@ if your compiler is not new enough. feature is enabled. To enable it include rand as ```toml -rand = "0.7" -num-bigint = { version = "0.3", features = ["rand"] } +rand = "0.8" +num-bigint = { version = "0.4", features = ["rand"] } ``` Note that you must use the version of `rand` that `num-bigint` is compatible -with: `0.7`. +with: `0.8`. ## Releases diff --git a/src/rust/vendor/num-bigint/RELEASES.md b/src/rust/vendor/num-bigint/RELEASES.md index 24d0eae87..cd1432f40 100644 --- a/src/rust/vendor/num-bigint/RELEASES.md +++ b/src/rust/vendor/num-bigint/RELEASES.md @@ -1,3 +1,77 @@ +# Release 0.4.3 (2021-11-02) + +- [GHSA-v935-pqmr-g8v9]: [Fix unexpected panics in multiplication.][228] + +**Contributors**: @arvidn, @cuviper, @guidovranken + +[228]: https://github.com/rust-num/num-bigint/pull/228 +[GHSA-v935-pqmr-g8v9]: https://github.com/rust-num/num-bigint/security/advisories/GHSA-v935-pqmr-g8v9 + +# Release 0.4.2 (2021-09-03) + +- [Use explicit `Integer::div_ceil` to avoid the new unstable method.][219] + +**Contributors**: @catenacyber, @cuviper + +[219]: https://github.com/rust-num/num-bigint/pull/219 + +# Release 0.4.1 (2021-08-27) + +- [Fixed scalar divide-by-zero panics.][200] +- [Implemented `DoubleEndedIterator` for `U32Digits` and `U64Digits`.][208] +- [Optimized multiplication to avoid unnecessary allocations.][199] +- [Optimized string formatting for very large values.][216] + +**Contributors**: @cuviper, @PatrickNorton + +[199]: https://github.com/rust-num/num-bigint/pull/199 +[200]: https://github.com/rust-num/num-bigint/pull/200 +[208]: https://github.com/rust-num/num-bigint/pull/208 +[216]: https://github.com/rust-num/num-bigint/pull/216 + +# Release 0.4.0 (2021-03-05) + +### Breaking Changes + +- Updated public dependences on [arbitrary, quickcheck][194], and [rand][185]: + - `arbitrary` support has been updated to 1.0, requiring Rust 1.40. + - `quickcheck` support has been updated to 1.0, requiring Rust 1.46. + - `rand` support has been updated to 0.8, requiring Rust 1.36. +- [`Debug` now shows plain numeric values for `BigInt` and `BigUint`][195], + rather than the raw list of internal digits. + +**Contributors**: @cuviper, @Gelbpunkt + +[185]: https://github.com/rust-num/num-bigint/pull/185 +[194]: https://github.com/rust-num/num-bigint/pull/194 +[195]: https://github.com/rust-num/num-bigint/pull/195 + +# Release 0.3.3 (2021-09-03) + +- [Use explicit `Integer::div_ceil` to avoid the new unstable method.][219] + +**Contributors**: @catenacyber, @cuviper + +# Release 0.3.2 (2021-03-04) + +- [The new `BigUint` methods `count_ones` and `trailing_ones`][175] return the + number of `1` bits in the entire value or just its least-significant tail, + respectively. +- [The new `BigInt` and `BigUint` methods `bit` and `set_bit`][183] will read + and write individual bits of the value. For negative `BigInt`, bits are + determined as if they were in the two's complement representation. +- [The `from_radix_le` and `from_radix_be` methods][187] now accept empty + buffers to represent zero. +- [`BigInt` and `BigUint` can now iterate digits as `u32` or `u64`][192], + regardless of the actual internal digit size. + +**Contributors**: @BartMassey, @cuviper, @janmarthedal, @sebastianv89, @Speedy37 + +[175]: https://github.com/rust-num/num-bigint/pull/175 +[183]: https://github.com/rust-num/num-bigint/pull/183 +[187]: https://github.com/rust-num/num-bigint/pull/187 +[192]: https://github.com/rust-num/num-bigint/pull/192 + # Release 0.3.1 (2020-11-03) - [Addition and subtraction now uses intrinsics][141] for performance on `x86` diff --git a/src/rust/vendor/num-bigint/benches/bigint.rs b/src/rust/vendor/num-bigint/benches/bigint.rs index 591e9a992..80ec191ce 100644 --- a/src/rust/vendor/num-bigint/benches/bigint.rs +++ b/src/rust/vendor/num-bigint/benches/bigint.rs @@ -5,18 +5,11 @@ extern crate test; use num_bigint::{BigInt, BigUint, RandBigInt}; use num_traits::{FromPrimitive, Num, One, Zero}; -use rand::rngs::StdRng; -use rand::SeedableRng; use std::mem::replace; use test::Bencher; -fn get_rng() -> StdRng { - let mut seed = [0; 32]; - for i in 1..32 { - seed[usize::from(i)] = i; - } - SeedableRng::from_seed(seed) -} +mod rng; +use rng::get_rng; fn multiply_bench(b: &mut Bencher, xbits: u64, ybits: u64) { let mut rng = get_rng(); @@ -46,7 +39,7 @@ fn factorial(n: usize) -> BigUint { let mut f: BigUint = One::one(); for i in 1..=n { let bu: BigUint = FromPrimitive::from_usize(i).unwrap(); - f += bu; + f *= bu; } f } @@ -181,35 +174,40 @@ fn fib_to_string(b: &mut Bencher) { b.iter(|| fib.to_string()); } -fn to_str_radix_bench(b: &mut Bencher, radix: u32) { +fn to_str_radix_bench(b: &mut Bencher, radix: u32, bits: u64) { let mut rng = get_rng(); - let x = rng.gen_bigint(1009); + let x = rng.gen_bigint(bits); b.iter(|| x.to_str_radix(radix)); } #[bench] fn to_str_radix_02(b: &mut Bencher) { - to_str_radix_bench(b, 2); + to_str_radix_bench(b, 2, 1009); } #[bench] fn to_str_radix_08(b: &mut Bencher) { - to_str_radix_bench(b, 8); + to_str_radix_bench(b, 8, 1009); } #[bench] fn to_str_radix_10(b: &mut Bencher) { - to_str_radix_bench(b, 10); + to_str_radix_bench(b, 10, 1009); +} + +#[bench] +fn to_str_radix_10_2(b: &mut Bencher) { + to_str_radix_bench(b, 10, 10009); } #[bench] fn to_str_radix_16(b: &mut Bencher) { - to_str_radix_bench(b, 16); + to_str_radix_bench(b, 16, 1009); } #[bench] fn to_str_radix_36(b: &mut Bencher) { - to_str_radix_bench(b, 36); + to_str_radix_bench(b, 36, 1009); } fn from_str_radix_bench(b: &mut Bencher, radix: u32) { @@ -358,6 +356,21 @@ fn pow_bench_bigexp(b: &mut Bencher) { }); } +#[bench] +fn pow_bench_1e1000(b: &mut Bencher) { + b.iter(|| BigUint::from(10u32).pow(1_000)); +} + +#[bench] +fn pow_bench_1e10000(b: &mut Bencher) { + b.iter(|| BigUint::from(10u32).pow(10_000)); +} + +#[bench] +fn pow_bench_1e100000(b: &mut Bencher) { + b.iter(|| BigUint::from(10u32).pow(100_000)); +} + /// This modulus is the prime from the 2048-bit MODP DH group: /// https://tools.ietf.org/html/rfc3526#section-3 const RFC3526_2048BIT_MODP_GROUP: &str = "\ @@ -393,3 +406,35 @@ fn modpow_even(b: &mut Bencher) { b.iter(|| base.modpow(&e, &m)); } + +#[bench] +fn to_u32_digits(b: &mut Bencher) { + let mut rng = get_rng(); + let n = rng.gen_biguint(2048); + + b.iter(|| n.to_u32_digits()); +} + +#[bench] +fn iter_u32_digits(b: &mut Bencher) { + let mut rng = get_rng(); + let n = rng.gen_biguint(2048); + + b.iter(|| n.iter_u32_digits().max()); +} + +#[bench] +fn to_u64_digits(b: &mut Bencher) { + let mut rng = get_rng(); + let n = rng.gen_biguint(2048); + + b.iter(|| n.to_u64_digits()); +} + +#[bench] +fn iter_u64_digits(b: &mut Bencher) { + let mut rng = get_rng(); + let n = rng.gen_biguint(2048); + + b.iter(|| n.iter_u64_digits().max()); +} diff --git a/src/rust/vendor/num-bigint/benches/gcd.rs b/src/rust/vendor/num-bigint/benches/gcd.rs index 1a65654ca..c211b6ef2 100644 --- a/src/rust/vendor/num-bigint/benches/gcd.rs +++ b/src/rust/vendor/num-bigint/benches/gcd.rs @@ -6,17 +6,10 @@ extern crate test; use num_bigint::{BigUint, RandBigInt}; use num_integer::Integer; use num_traits::Zero; -use rand::rngs::StdRng; -use rand::SeedableRng; use test::Bencher; -fn get_rng() -> StdRng { - let mut seed = [0; 32]; - for i in 1..32 { - seed[usize::from(i)] = i; - } - SeedableRng::from_seed(seed) -} +mod rng; +use rng::get_rng; fn bench(b: &mut Bencher, bits: u64, gcd: fn(&BigUint, &BigUint) -> BigUint) { let mut rng = get_rng(); diff --git a/src/rust/vendor/num-bigint/benches/rng/mod.rs b/src/rust/vendor/num-bigint/benches/rng/mod.rs new file mode 100644 index 000000000..33e4f0fad --- /dev/null +++ b/src/rust/vendor/num-bigint/benches/rng/mod.rs @@ -0,0 +1,38 @@ +use rand::RngCore; + +pub(crate) fn get_rng() -> impl RngCore { + XorShiftStar { + a: 0x0123_4567_89AB_CDEF, + } +} + +/// Simple `Rng` for benchmarking without additional dependencies +struct XorShiftStar { + a: u64, +} + +impl RngCore for XorShiftStar { + fn next_u32(&mut self) -> u32 { + self.next_u64() as u32 + } + + fn next_u64(&mut self) -> u64 { + // https://en.wikipedia.org/wiki/Xorshift#xorshift* + self.a ^= self.a >> 12; + self.a ^= self.a << 25; + self.a ^= self.a >> 27; + self.a.wrapping_mul(0x2545_F491_4F6C_DD1D) + } + + fn fill_bytes(&mut self, dest: &mut [u8]) { + for chunk in dest.chunks_mut(8) { + let bytes = self.next_u64().to_le_bytes(); + let slice = &bytes[..chunk.len()]; + chunk.copy_from_slice(slice) + } + } + + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), rand::Error> { + Ok(self.fill_bytes(dest)) + } +} diff --git a/src/rust/vendor/num-bigint/benches/roots.rs b/src/rust/vendor/num-bigint/benches/roots.rs index 2433cf0a4..7afc4f763 100644 --- a/src/rust/vendor/num-bigint/benches/roots.rs +++ b/src/rust/vendor/num-bigint/benches/roots.rs @@ -4,10 +4,11 @@ extern crate test; use num_bigint::{BigUint, RandBigInt}; -use rand::rngs::StdRng; -use rand::SeedableRng; use test::Bencher; +mod rng; +use rng::get_rng; + // The `big64` cases demonstrate the speed of cases where the value // can be converted to a `u64` primitive for faster calculation. // @@ -16,14 +17,6 @@ use test::Bencher; // // The `big2k` and `big4k` cases are too big for `f64`, and use a simpler guess. -fn get_rng() -> StdRng { - let mut seed = [0; 32]; - for i in 1..32 { - seed[usize::from(i)] = i; - } - SeedableRng::from_seed(seed) -} - fn check(x: &BigUint, n: u32) { let root = x.nth_root(n); if n == 2 { diff --git a/src/rust/vendor/num-bigint/src/algorithms.rs b/src/rust/vendor/num-bigint/src/algorithms.rs deleted file mode 100644 index 5d12c45b1..000000000 --- a/src/rust/vendor/num-bigint/src/algorithms.rs +++ /dev/null @@ -1,935 +0,0 @@ -use crate::std_alloc::{Cow, Vec}; -use core::cmp; -use core::cmp::Ordering::{self, Equal, Greater, Less}; -use core::iter::repeat; -use core::mem; -use num_traits::{One, PrimInt, Zero}; - -#[cfg(all(use_addcarry, target_arch = "x86_64"))] -use core::arch::x86_64 as arch; - -#[cfg(all(use_addcarry, target_arch = "x86"))] -use core::arch::x86 as arch; - -use crate::biguint::biguint_from_vec; -use crate::biguint::BigUint; - -use crate::bigint::BigInt; -use crate::bigint::Sign; -use crate::bigint::Sign::{Minus, NoSign, Plus}; - -use crate::big_digit::{self, BigDigit, DoubleBigDigit}; - -// only needed for the fallback implementation of `sbb` -#[cfg(not(use_addcarry))] -use crate::big_digit::SignedDoubleBigDigit; - -// Generic functions for add/subtract/multiply with carry/borrow. These are specialized -// for some platforms to take advantage of intrinsics, etc. - -// Add with carry: -#[cfg(all(use_addcarry, u64_digit))] -#[inline] -fn adc(carry: u8, a: u64, b: u64, out: &mut u64) -> u8 { - // Safety: There are absolutely no safety concerns with calling `_addcarry_u64`. - // It's just unsafe for API consistency with other intrinsics. - unsafe { arch::_addcarry_u64(carry, a, b, out) } -} - -#[cfg(all(use_addcarry, not(u64_digit)))] -#[inline] -fn adc(carry: u8, a: u32, b: u32, out: &mut u32) -> u8 { - // Safety: There are absolutely no safety concerns with calling `_addcarry_u32`. - // It's just unsafe for API consistency with other intrinsics. - unsafe { arch::_addcarry_u32(carry, a, b, out) } -} - -// fallback for environments where we don't have an addcarry intrinsic -#[cfg(not(use_addcarry))] -#[inline] -fn adc(carry: u8, a: BigDigit, b: BigDigit, out: &mut BigDigit) -> u8 { - let sum = DoubleBigDigit::from(a) + DoubleBigDigit::from(b) + DoubleBigDigit::from(carry); - *out = sum as BigDigit; - (sum >> big_digit::BITS) as u8 -} - -// Subtract with borrow: -#[cfg(all(use_addcarry, u64_digit))] -#[inline] -fn sbb(borrow: u8, a: u64, b: u64, out: &mut u64) -> u8 { - // Safety: There are absolutely no safety concerns with calling `_subborrow_u64`. - // It's just unsafe for API consistency with other intrinsics. - unsafe { arch::_subborrow_u64(borrow, a, b, out) } -} - -#[cfg(all(use_addcarry, not(u64_digit)))] -#[inline] -fn sbb(borrow: u8, a: u32, b: u32, out: &mut u32) -> u8 { - // Safety: There are absolutely no safety concerns with calling `_subborrow_u32`. - // It's just unsafe for API consistency with other intrinsics. - unsafe { arch::_subborrow_u32(borrow, a, b, out) } -} - -// fallback for environments where we don't have a subborrow intrinsic -#[cfg(not(use_addcarry))] -#[inline] -fn sbb(borrow: u8, a: BigDigit, b: BigDigit, out: &mut BigDigit) -> u8 { - let difference = SignedDoubleBigDigit::from(a) - - SignedDoubleBigDigit::from(b) - - SignedDoubleBigDigit::from(borrow); - *out = difference as BigDigit; - u8::from(difference < 0) -} - -#[inline] -pub(crate) fn mac_with_carry( - a: BigDigit, - b: BigDigit, - c: BigDigit, - acc: &mut DoubleBigDigit, -) -> BigDigit { - *acc += DoubleBigDigit::from(a); - *acc += DoubleBigDigit::from(b) * DoubleBigDigit::from(c); - let lo = *acc as BigDigit; - *acc >>= big_digit::BITS; - lo -} - -#[inline] -pub(crate) fn mul_with_carry(a: BigDigit, b: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit { - *acc += DoubleBigDigit::from(a) * DoubleBigDigit::from(b); - let lo = *acc as BigDigit; - *acc >>= big_digit::BITS; - lo -} - -/// Divide a two digit numerator by a one digit divisor, returns quotient and remainder: -/// -/// Note: the caller must ensure that both the quotient and remainder will fit into a single digit. -/// This is _not_ true for an arbitrary numerator/denominator. -/// -/// (This function also matches what the x86 divide instruction does). -#[inline] -fn div_wide(hi: BigDigit, lo: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) { - debug_assert!(hi < divisor); - - let lhs = big_digit::to_doublebigdigit(hi, lo); - let rhs = DoubleBigDigit::from(divisor); - ((lhs / rhs) as BigDigit, (lhs % rhs) as BigDigit) -} - -/// For small divisors, we can divide without promoting to `DoubleBigDigit` by -/// using half-size pieces of digit, like long-division. -#[inline] -fn div_half(rem: BigDigit, digit: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) { - use crate::big_digit::{HALF, HALF_BITS}; - use num_integer::Integer; - - debug_assert!(rem < divisor && divisor <= HALF); - let (hi, rem) = ((rem << HALF_BITS) | (digit >> HALF_BITS)).div_rem(&divisor); - let (lo, rem) = ((rem << HALF_BITS) | (digit & HALF)).div_rem(&divisor); - ((hi << HALF_BITS) | lo, rem) -} - -#[inline] -pub(crate) fn div_rem_digit(mut a: BigUint, b: BigDigit) -> (BigUint, BigDigit) { - let mut rem = 0; - - if b <= big_digit::HALF { - for d in a.data.iter_mut().rev() { - let (q, r) = div_half(rem, *d, b); - *d = q; - rem = r; - } - } else { - for d in a.data.iter_mut().rev() { - let (q, r) = div_wide(rem, *d, b); - *d = q; - rem = r; - } - } - - (a.normalized(), rem) -} - -#[inline] -pub(crate) fn rem_digit(a: &BigUint, b: BigDigit) -> BigDigit { - let mut rem = 0; - - if b <= big_digit::HALF { - for &digit in a.data.iter().rev() { - let (_, r) = div_half(rem, digit, b); - rem = r; - } - } else { - for &digit in a.data.iter().rev() { - let (_, r) = div_wide(rem, digit, b); - rem = r; - } - } - - rem -} - -/// Two argument addition of raw slices, `a += b`, returning the carry. -/// -/// This is used when the data `Vec` might need to resize to push a non-zero carry, so we perform -/// the addition first hoping that it will fit. -/// -/// The caller _must_ ensure that `a` is at least as long as `b`. -#[inline] -pub(crate) fn __add2(a: &mut [BigDigit], b: &[BigDigit]) -> BigDigit { - debug_assert!(a.len() >= b.len()); - - let mut carry = 0; - let (a_lo, a_hi) = a.split_at_mut(b.len()); - - for (a, b) in a_lo.iter_mut().zip(b) { - carry = adc(carry, *a, *b, a); - } - - if carry != 0 { - for a in a_hi { - carry = adc(carry, *a, 0, a); - if carry == 0 { - break; - } - } - } - - carry as BigDigit -} - -/// Two argument addition of raw slices: -/// a += b -/// -/// The caller _must_ ensure that a is big enough to store the result - typically this means -/// resizing a to max(a.len(), b.len()) + 1, to fit a possible carry. -pub(crate) fn add2(a: &mut [BigDigit], b: &[BigDigit]) { - let carry = __add2(a, b); - - debug_assert!(carry == 0); -} - -pub(crate) fn sub2(a: &mut [BigDigit], b: &[BigDigit]) { - let mut borrow = 0; - - let len = cmp::min(a.len(), b.len()); - let (a_lo, a_hi) = a.split_at_mut(len); - let (b_lo, b_hi) = b.split_at(len); - - for (a, b) in a_lo.iter_mut().zip(b_lo) { - borrow = sbb(borrow, *a, *b, a); - } - - if borrow != 0 { - for a in a_hi { - borrow = sbb(borrow, *a, 0, a); - if borrow == 0 { - break; - } - } - } - - // note: we're _required_ to fail on underflow - assert!( - borrow == 0 && b_hi.iter().all(|x| *x == 0), - "Cannot subtract b from a because b is larger than a." - ); -} - -// Only for the Sub impl. `a` and `b` must have same length. -#[inline] -pub(crate) fn __sub2rev(a: &[BigDigit], b: &mut [BigDigit]) -> u8 { - debug_assert!(b.len() == a.len()); - - let mut borrow = 0; - - for (ai, bi) in a.iter().zip(b) { - borrow = sbb(borrow, *ai, *bi, bi); - } - - borrow -} - -pub(crate) fn sub2rev(a: &[BigDigit], b: &mut [BigDigit]) { - debug_assert!(b.len() >= a.len()); - - let len = cmp::min(a.len(), b.len()); - let (a_lo, a_hi) = a.split_at(len); - let (b_lo, b_hi) = b.split_at_mut(len); - - let borrow = __sub2rev(a_lo, b_lo); - - assert!(a_hi.is_empty()); - - // note: we're _required_ to fail on underflow - assert!( - borrow == 0 && b_hi.iter().all(|x| *x == 0), - "Cannot subtract b from a because b is larger than a." - ); -} - -pub(crate) fn sub_sign(a: &[BigDigit], b: &[BigDigit]) -> (Sign, BigUint) { - // Normalize: - let a = &a[..a.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)]; - let b = &b[..b.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)]; - - match cmp_slice(a, b) { - Greater => { - let mut a = a.to_vec(); - sub2(&mut a, b); - (Plus, biguint_from_vec(a)) - } - Less => { - let mut b = b.to_vec(); - sub2(&mut b, a); - (Minus, biguint_from_vec(b)) - } - _ => (NoSign, Zero::zero()), - } -} - -/// Three argument multiply accumulate: -/// acc += b * c -pub(crate) fn mac_digit(acc: &mut [BigDigit], b: &[BigDigit], c: BigDigit) { - if c == 0 { - return; - } - - let mut carry = 0; - let (a_lo, a_hi) = acc.split_at_mut(b.len()); - - for (a, &b) in a_lo.iter_mut().zip(b) { - *a = mac_with_carry(*a, b, c, &mut carry); - } - - let (carry_hi, carry_lo) = big_digit::from_doublebigdigit(carry); - - let final_carry = if carry_hi == 0 { - __add2(a_hi, &[carry_lo]) - } else { - __add2(a_hi, &[carry_hi, carry_lo]) - }; - assert_eq!(final_carry, 0, "carry overflow during multiplication!"); -} - -/// Subtract a multiple. -/// a -= b * c -/// Returns a borrow (if a < b then borrow > 0). -fn sub_mul_digit_same_len(a: &mut [BigDigit], b: &[BigDigit], c: BigDigit) -> BigDigit { - debug_assert!(a.len() == b.len()); - - // carry is between -big_digit::MAX and 0, so to avoid overflow we store - // offset_carry = carry + big_digit::MAX - let mut offset_carry = big_digit::MAX; - - for (x, y) in a.iter_mut().zip(b) { - // We want to calculate sum = x - y * c + carry. - // sum >= -(big_digit::MAX * big_digit::MAX) - big_digit::MAX - // sum <= big_digit::MAX - // Offsetting sum by (big_digit::MAX << big_digit::BITS) puts it in DoubleBigDigit range. - let offset_sum = big_digit::to_doublebigdigit(big_digit::MAX, *x) - - big_digit::MAX as DoubleBigDigit - + offset_carry as DoubleBigDigit - - *y as DoubleBigDigit * c as DoubleBigDigit; - - let (new_offset_carry, new_x) = big_digit::from_doublebigdigit(offset_sum); - offset_carry = new_offset_carry; - *x = new_x; - } - - // Return the borrow. - big_digit::MAX - offset_carry -} - -fn bigint_from_slice(slice: &[BigDigit]) -> BigInt { - BigInt::from(biguint_from_vec(slice.to_vec())) -} - -/// Three argument multiply accumulate: -/// acc += b * c -fn mac3(acc: &mut [BigDigit], b: &[BigDigit], c: &[BigDigit]) { - let (x, y) = if b.len() < c.len() { (b, c) } else { (c, b) }; - - // We use three algorithms for different input sizes. - // - // - For small inputs, long multiplication is fastest. - // - Next we use Karatsuba multiplication (Toom-2), which we have optimized - // to avoid unnecessary allocations for intermediate values. - // - For the largest inputs we use Toom-3, which better optimizes the - // number of operations, but uses more temporary allocations. - // - // The thresholds are somewhat arbitrary, chosen by evaluating the results - // of `cargo bench --bench bigint multiply`. - - if x.len() <= 32 { - // Long multiplication: - for (i, xi) in x.iter().enumerate() { - mac_digit(&mut acc[i..], y, *xi); - } - } else if x.len() <= 256 { - // Karatsuba multiplication: - // - // The idea is that we break x and y up into two smaller numbers that each have about half - // as many digits, like so (note that multiplying by b is just a shift): - // - // x = x0 + x1 * b - // y = y0 + y1 * b - // - // With some algebra, we can compute x * y with three smaller products, where the inputs to - // each of the smaller products have only about half as many digits as x and y: - // - // x * y = (x0 + x1 * b) * (y0 + y1 * b) - // - // x * y = x0 * y0 - // + x0 * y1 * b - // + x1 * y0 * b - // + x1 * y1 * b^2 - // - // Let p0 = x0 * y0 and p2 = x1 * y1: - // - // x * y = p0 - // + (x0 * y1 + x1 * y0) * b - // + p2 * b^2 - // - // The real trick is that middle term: - // - // x0 * y1 + x1 * y0 - // - // = x0 * y1 + x1 * y0 - p0 + p0 - p2 + p2 - // - // = x0 * y1 + x1 * y0 - x0 * y0 - x1 * y1 + p0 + p2 - // - // Now we complete the square: - // - // = -(x0 * y0 - x0 * y1 - x1 * y0 + x1 * y1) + p0 + p2 - // - // = -((x1 - x0) * (y1 - y0)) + p0 + p2 - // - // Let p1 = (x1 - x0) * (y1 - y0), and substitute back into our original formula: - // - // x * y = p0 - // + (p0 + p2 - p1) * b - // + p2 * b^2 - // - // Where the three intermediate products are: - // - // p0 = x0 * y0 - // p1 = (x1 - x0) * (y1 - y0) - // p2 = x1 * y1 - // - // In doing the computation, we take great care to avoid unnecessary temporary variables - // (since creating a BigUint requires a heap allocation): thus, we rearrange the formula a - // bit so we can use the same temporary variable for all the intermediate products: - // - // x * y = p2 * b^2 + p2 * b - // + p0 * b + p0 - // - p1 * b - // - // The other trick we use is instead of doing explicit shifts, we slice acc at the - // appropriate offset when doing the add. - - // When x is smaller than y, it's significantly faster to pick b such that x is split in - // half, not y: - let b = x.len() / 2; - let (x0, x1) = x.split_at(b); - let (y0, y1) = y.split_at(b); - - // We reuse the same BigUint for all the intermediate multiplies and have to size p - // appropriately here: x1.len() >= x0.len and y1.len() >= y0.len(): - let len = x1.len() + y1.len() + 1; - let mut p = BigUint { data: vec![0; len] }; - - // p2 = x1 * y1 - mac3(&mut p.data[..], x1, y1); - - // Not required, but the adds go faster if we drop any unneeded 0s from the end: - p.normalize(); - - add2(&mut acc[b..], &p.data[..]); - add2(&mut acc[b * 2..], &p.data[..]); - - // Zero out p before the next multiply: - p.data.truncate(0); - p.data.extend(repeat(0).take(len)); - - // p0 = x0 * y0 - mac3(&mut p.data[..], x0, y0); - p.normalize(); - - add2(&mut acc[..], &p.data[..]); - add2(&mut acc[b..], &p.data[..]); - - // p1 = (x1 - x0) * (y1 - y0) - // We do this one last, since it may be negative and acc can't ever be negative: - let (j0_sign, j0) = sub_sign(x1, x0); - let (j1_sign, j1) = sub_sign(y1, y0); - - match j0_sign * j1_sign { - Plus => { - p.data.truncate(0); - p.data.extend(repeat(0).take(len)); - - mac3(&mut p.data[..], &j0.data[..], &j1.data[..]); - p.normalize(); - - sub2(&mut acc[b..], &p.data[..]); - } - Minus => { - mac3(&mut acc[b..], &j0.data[..], &j1.data[..]); - } - NoSign => (), - } - } else { - // Toom-3 multiplication: - // - // Toom-3 is like Karatsuba above, but dividing the inputs into three parts. - // Both are instances of Toom-Cook, using `k=3` and `k=2` respectively. - // - // The general idea is to treat the large integers digits as - // polynomials of a certain degree and determine the coefficients/digits - // of the product of the two via interpolation of the polynomial product. - let i = y.len() / 3 + 1; - - let x0_len = cmp::min(x.len(), i); - let x1_len = cmp::min(x.len() - x0_len, i); - - let y0_len = i; - let y1_len = cmp::min(y.len() - y0_len, i); - - // Break x and y into three parts, representating an order two polynomial. - // t is chosen to be the size of a digit so we can use faster shifts - // in place of multiplications. - // - // x(t) = x2*t^2 + x1*t + x0 - let x0 = bigint_from_slice(&x[..x0_len]); - let x1 = bigint_from_slice(&x[x0_len..x0_len + x1_len]); - let x2 = bigint_from_slice(&x[x0_len + x1_len..]); - - // y(t) = y2*t^2 + y1*t + y0 - let y0 = bigint_from_slice(&y[..y0_len]); - let y1 = bigint_from_slice(&y[y0_len..y0_len + y1_len]); - let y2 = bigint_from_slice(&y[y0_len + y1_len..]); - - // Let w(t) = x(t) * y(t) - // - // This gives us the following order-4 polynomial. - // - // w(t) = w4*t^4 + w3*t^3 + w2*t^2 + w1*t + w0 - // - // We need to find the coefficients w4, w3, w2, w1 and w0. Instead - // of simply multiplying the x and y in total, we can evaluate w - // at 5 points. An n-degree polynomial is uniquely identified by (n + 1) - // points. - // - // It is arbitrary as to what points we evaluate w at but we use the - // following. - // - // w(t) at t = 0, 1, -1, -2 and inf - // - // The values for w(t) in terms of x(t)*y(t) at these points are: - // - // let a = w(0) = x0 * y0 - // let b = w(1) = (x2 + x1 + x0) * (y2 + y1 + y0) - // let c = w(-1) = (x2 - x1 + x0) * (y2 - y1 + y0) - // let d = w(-2) = (4*x2 - 2*x1 + x0) * (4*y2 - 2*y1 + y0) - // let e = w(inf) = x2 * y2 as t -> inf - - // x0 + x2, avoiding temporaries - let p = &x0 + &x2; - - // y0 + y2, avoiding temporaries - let q = &y0 + &y2; - - // x2 - x1 + x0, avoiding temporaries - let p2 = &p - &x1; - - // y2 - y1 + y0, avoiding temporaries - let q2 = &q - &y1; - - // w(0) - let r0 = &x0 * &y0; - - // w(inf) - let r4 = &x2 * &y2; - - // w(1) - let r1 = (p + x1) * (q + y1); - - // w(-1) - let r2 = &p2 * &q2; - - // w(-2) - let r3 = ((p2 + x2) * 2 - x0) * ((q2 + y2) * 2 - y0); - - // Evaluating these points gives us the following system of linear equations. - // - // 0 0 0 0 1 | a - // 1 1 1 1 1 | b - // 1 -1 1 -1 1 | c - // 16 -8 4 -2 1 | d - // 1 0 0 0 0 | e - // - // The solved equation (after gaussian elimination or similar) - // in terms of its coefficients: - // - // w0 = w(0) - // w1 = w(0)/2 + w(1)/3 - w(-1) + w(2)/6 - 2*w(inf) - // w2 = -w(0) + w(1)/2 + w(-1)/2 - w(inf) - // w3 = -w(0)/2 + w(1)/6 + w(-1)/2 - w(1)/6 - // w4 = w(inf) - // - // This particular sequence is given by Bodrato and is an interpolation - // of the above equations. - let mut comp3: BigInt = (r3 - &r1) / 3; - let mut comp1: BigInt = (r1 - &r2) / 2; - let mut comp2: BigInt = r2 - &r0; - comp3 = (&comp2 - comp3) / 2 + &r4 * 2; - comp2 += &comp1 - &r4; - comp1 -= &comp3; - - // Recomposition. The coefficients of the polynomial are now known. - // - // Evaluate at w(t) where t is our given base to get the result. - let bits = u64::from(big_digit::BITS) * i as u64; - let result = r0 - + (comp1 << bits) - + (comp2 << (2 * bits)) - + (comp3 << (3 * bits)) - + (r4 << (4 * bits)); - let result_pos = result.to_biguint().unwrap(); - add2(&mut acc[..], &result_pos.data); - } -} - -pub(crate) fn mul3(x: &[BigDigit], y: &[BigDigit]) -> BigUint { - let len = x.len() + y.len() + 1; - let mut prod = BigUint { data: vec![0; len] }; - - mac3(&mut prod.data[..], x, y); - prod.normalized() -} - -pub(crate) fn scalar_mul(a: &mut [BigDigit], b: BigDigit) -> BigDigit { - let mut carry = 0; - for a in a.iter_mut() { - *a = mul_with_carry(*a, b, &mut carry); - } - carry as BigDigit -} - -pub(crate) fn div_rem(mut u: BigUint, mut d: BigUint) -> (BigUint, BigUint) { - if d.is_zero() { - panic!("attempt to divide by zero") - } - if u.is_zero() { - return (Zero::zero(), Zero::zero()); - } - - if d.data.len() == 1 { - if d.data == [1] { - return (u, Zero::zero()); - } - let (div, rem) = div_rem_digit(u, d.data[0]); - // reuse d - d.data.clear(); - d += rem; - return (div, d); - } - - // Required or the q_len calculation below can underflow: - match u.cmp(&d) { - Less => return (Zero::zero(), u), - Equal => { - u.set_one(); - return (u, Zero::zero()); - } - Greater => {} // Do nothing - } - - // This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D: - // - // First, normalize the arguments so the highest bit in the highest digit of the divisor is - // set: the main loop uses the highest digit of the divisor for generating guesses, so we - // want it to be the largest number we can efficiently divide by. - // - let shift = d.data.last().unwrap().leading_zeros() as usize; - - let (q, r) = if shift == 0 { - // no need to clone d - div_rem_core(u, &d) - } else { - div_rem_core(u << shift, &(d << shift)) - }; - // renormalize the remainder - (q, r >> shift) -} - -pub(crate) fn div_rem_ref(u: &BigUint, d: &BigUint) -> (BigUint, BigUint) { - if d.is_zero() { - panic!("attempt to divide by zero") - } - if u.is_zero() { - return (Zero::zero(), Zero::zero()); - } - - if d.data.len() == 1 { - if d.data == [1] { - return (u.clone(), Zero::zero()); - } - - let (div, rem) = div_rem_digit(u.clone(), d.data[0]); - return (div, rem.into()); - } - - // Required or the q_len calculation below can underflow: - match u.cmp(d) { - Less => return (Zero::zero(), u.clone()), - Equal => return (One::one(), Zero::zero()), - Greater => {} // Do nothing - } - - // This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D: - // - // First, normalize the arguments so the highest bit in the highest digit of the divisor is - // set: the main loop uses the highest digit of the divisor for generating guesses, so we - // want it to be the largest number we can efficiently divide by. - // - let shift = d.data.last().unwrap().leading_zeros() as usize; - - let (q, r) = if shift == 0 { - // no need to clone d - div_rem_core(u.clone(), d) - } else { - div_rem_core(u << shift, &(d << shift)) - }; - // renormalize the remainder - (q, r >> shift) -} - -/// An implementation of the base division algorithm. -/// Knuth, TAOCP vol 2 section 4.3.1, algorithm D, with an improvement from exercises 19-21. -fn div_rem_core(mut a: BigUint, b: &BigUint) -> (BigUint, BigUint) { - debug_assert!( - a.data.len() >= b.data.len() - && b.data.len() > 1 - && b.data.last().unwrap().leading_zeros() == 0 - ); - - // The algorithm works by incrementally calculating "guesses", q0, for the next digit of the - // quotient. Once we have any number q0 such that (q0 << j) * b <= a, we can set - // - // q += q0 << j - // a -= (q0 << j) * b - // - // and then iterate until a < b. Then, (q, a) will be our desired quotient and remainder. - // - // q0, our guess, is calculated by dividing the last three digits of a by the last two digits of - // b - this will give us a guess that is close to the actual quotient, but is possibly greater. - // It can only be greater by 1 and only in rare cases, with probability at most - // 2^-(big_digit::BITS-1) for random a, see TAOCP 4.3.1 exercise 21. - // - // If the quotient turns out to be too large, we adjust it by 1: - // q -= 1 << j - // a += b << j - - // a0 stores an additional extra most significant digit of the dividend, not stored in a. - let mut a0 = 0; - - // [b1, b0] are the two most significant digits of the divisor. They never change. - let b0 = *b.data.last().unwrap(); - let b1 = b.data[b.data.len() - 2]; - - let q_len = a.data.len() - b.data.len() + 1; - let mut q = BigUint { - data: vec![0; q_len], - }; - - for j in (0..q_len).rev() { - debug_assert!(a.data.len() == b.data.len() + j); - - let a1 = *a.data.last().unwrap(); - let a2 = a.data[a.data.len() - 2]; - - // The first q0 estimate is [a1,a0] / b0. It will never be too small, it may be too large - // by at most 2. - let (mut q0, mut r) = if a0 < b0 { - let (q0, r) = div_wide(a0, a1, b0); - (q0, r as DoubleBigDigit) - } else { - debug_assert!(a0 == b0); - // Avoid overflowing q0, we know the quotient fits in BigDigit. - // [a1,a0] = b0 * (1< a0 { - // q0 is too large. We need to add back one multiple of b. - q0 -= 1; - borrow -= __add2(&mut a.data[j..], &b.data); - } - // The top digit of a, stored in a0, has now been zeroed. - debug_assert!(borrow == a0); - - q.data[j] = q0; - - // Pop off the next top digit of a. - a0 = a.data.pop().unwrap(); - } - - a.data.push(a0); - a.normalize(); - - debug_assert!(a < *b); - - (q.normalized(), a) -} - -/// Find last set bit -/// fls(0) == 0, fls(u32::MAX) == 32 -pub(crate) fn fls(v: T) -> u8 { - mem::size_of::() as u8 * 8 - v.leading_zeros() as u8 -} - -pub(crate) fn ilog2(v: T) -> u8 { - fls(v) - 1 -} - -#[inline] -pub(crate) fn biguint_shl(n: Cow<'_, BigUint>, shift: T) -> BigUint { - if shift < T::zero() { - panic!("attempt to shift left with negative"); - } - if n.is_zero() { - return n.into_owned(); - } - let bits = T::from(big_digit::BITS).unwrap(); - let digits = (shift / bits).to_usize().expect("capacity overflow"); - let shift = (shift % bits).to_u8().unwrap(); - biguint_shl2(n, digits, shift) -} - -fn biguint_shl2(n: Cow<'_, BigUint>, digits: usize, shift: u8) -> BigUint { - let mut data = match digits { - 0 => n.into_owned().data, - _ => { - let len = digits.saturating_add(n.data.len() + 1); - let mut data = Vec::with_capacity(len); - data.extend(repeat(0).take(digits)); - data.extend(n.data.iter()); - data - } - }; - - if shift > 0 { - let mut carry = 0; - let carry_shift = big_digit::BITS as u8 - shift; - for elem in data[digits..].iter_mut() { - let new_carry = *elem >> carry_shift; - *elem = (*elem << shift) | carry; - carry = new_carry; - } - if carry != 0 { - data.push(carry); - } - } - - biguint_from_vec(data) -} - -#[inline] -pub(crate) fn biguint_shr(n: Cow<'_, BigUint>, shift: T) -> BigUint { - if shift < T::zero() { - panic!("attempt to shift right with negative"); - } - if n.is_zero() { - return n.into_owned(); - } - let bits = T::from(big_digit::BITS).unwrap(); - let digits = (shift / bits).to_usize().unwrap_or(core::usize::MAX); - let shift = (shift % bits).to_u8().unwrap(); - biguint_shr2(n, digits, shift) -} - -fn biguint_shr2(n: Cow<'_, BigUint>, digits: usize, shift: u8) -> BigUint { - if digits >= n.data.len() { - let mut n = n.into_owned(); - n.set_zero(); - return n; - } - let mut data = match n { - Cow::Borrowed(n) => n.data[digits..].to_vec(), - Cow::Owned(mut n) => { - n.data.drain(..digits); - n.data - } - }; - - if shift > 0 { - let mut borrow = 0; - let borrow_shift = big_digit::BITS as u8 - shift; - for elem in data.iter_mut().rev() { - let new_borrow = *elem << borrow_shift; - *elem = (*elem >> shift) | borrow; - borrow = new_borrow; - } - } - - biguint_from_vec(data) -} - -pub(crate) fn cmp_slice(a: &[BigDigit], b: &[BigDigit]) -> Ordering { - debug_assert!(a.last() != Some(&0)); - debug_assert!(b.last() != Some(&0)); - - match Ord::cmp(&a.len(), &b.len()) { - Equal => Iterator::cmp(a.iter().rev(), b.iter().rev()), - other => other, - } -} - -#[cfg(test)] -mod algorithm_tests { - use crate::big_digit::BigDigit; - use crate::{BigInt, BigUint}; - use num_traits::Num; - - #[test] - fn test_sub_sign() { - use super::sub_sign; - - fn sub_sign_i(a: &[BigDigit], b: &[BigDigit]) -> BigInt { - let (sign, val) = sub_sign(a, b); - BigInt::from_biguint(sign, val) - } - - let a = BigUint::from_str_radix("265252859812191058636308480000000", 10).unwrap(); - let b = BigUint::from_str_radix("26525285981219105863630848000000", 10).unwrap(); - let a_i = BigInt::from(a.clone()); - let b_i = BigInt::from(b.clone()); - - assert_eq!(sub_sign_i(&a.data[..], &b.data[..]), &a_i - &b_i); - assert_eq!(sub_sign_i(&b.data[..], &a.data[..]), &b_i - &a_i); - } -} diff --git a/src/rust/vendor/num-bigint/src/bigint.rs b/src/rust/vendor/num-bigint/src/bigint.rs index d1ff4e416..891eeb460 100644 --- a/src/rust/vendor/num-bigint/src/bigint.rs +++ b/src/rust/vendor/num-bigint/src/bigint.rs @@ -1,43 +1,40 @@ // `Add`/`Sub` ops may flip from `BigInt` to its `BigUint` magnitude #![allow(clippy::suspicious_arithmetic_impl)] -#[cfg(any(feature = "quickcheck", feature = "arbitrary"))] -use crate::std_alloc::Box; use crate::std_alloc::{String, Vec}; -use core::cmp::Ordering::{self, Equal, Greater, Less}; -#[cfg(has_try_from)] -use core::convert::TryFrom; +use core::cmp::Ordering::{self, Equal}; use core::default::Default; use core::fmt; use core::hash; -use core::iter::{Product, Sum}; -use core::mem; -use core::ops::{ - Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Div, DivAssign, - Mul, MulAssign, Neg, Not, Rem, RemAssign, Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign, -}; -use core::str::{self, FromStr}; +use core::ops::{Neg, Not}; +use core::str; use core::{i128, u128}; use core::{i64, u64}; use num_integer::{Integer, Roots}; -use num_traits::{ - CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, FromPrimitive, Num, One, Pow, PrimInt, Signed, - ToPrimitive, Zero, -}; +use num_traits::{Num, One, Pow, Signed, Zero}; use self::Sign::{Minus, NoSign, Plus}; -use crate::big_digit::{self, BigDigit, DoubleBigDigit}; -use crate::biguint; +use crate::big_digit::BigDigit; use crate::biguint::to_str_radix_reversed; -use crate::biguint::{BigUint, IntDigits}; -use crate::ParseBigIntError; -#[cfg(has_try_from)] -use crate::TryFromBigIntError; +use crate::biguint::{BigUint, IntDigits, U32Digits, U64Digits}; + +mod addition; +mod division; +mod multiplication; +mod subtraction; + +mod bits; +mod convert; +mod power; +mod shift; + +#[cfg(any(feature = "quickcheck", feature = "arbitrary"))] +mod arbitrary; -use crate::IsizePromotion; -use crate::UsizePromotion; +#[cfg(feature = "serde")] +mod serde; /// A Sign is a `BigInt`'s composing element. #[derive(PartialEq, PartialOrd, Eq, Ord, Copy, Clone, Debug, Hash)] @@ -61,59 +58,7 @@ impl Neg for Sign { } } -impl Mul for Sign { - type Output = Sign; - - #[inline] - fn mul(self, other: Sign) -> Sign { - match (self, other) { - (NoSign, _) | (_, NoSign) => NoSign, - (Plus, Plus) | (Minus, Minus) => Plus, - (Plus, Minus) | (Minus, Plus) => Minus, - } - } -} - -#[cfg(feature = "serde")] -impl serde::Serialize for Sign { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - // Note: do not change the serialization format, or it may break - // forward and backward compatibility of serialized data! - match *self { - Sign::Minus => (-1i8).serialize(serializer), - Sign::NoSign => 0i8.serialize(serializer), - Sign::Plus => 1i8.serialize(serializer), - } - } -} - -#[cfg(feature = "serde")] -impl<'de> serde::Deserialize<'de> for Sign { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - use serde::de::Error; - use serde::de::Unexpected; - - let sign: i8 = serde::Deserialize::deserialize(deserializer)?; - match sign { - -1 => Ok(Sign::Minus), - 0 => Ok(Sign::NoSign), - 1 => Ok(Sign::Plus), - _ => Err(D::Error::invalid_value( - Unexpected::Signed(sign.into()), - &"a sign of -1, 0, or 1", - )), - } - } -} - /// A big signed integer type. -#[derive(Debug)] pub struct BigInt { sign: Sign, data: BigUint, @@ -137,41 +82,6 @@ impl Clone for BigInt { } } -#[cfg(feature = "quickcheck")] -impl quickcheck::Arbitrary for BigInt { - fn arbitrary(g: &mut G) -> Self { - let positive = bool::arbitrary(g); - let sign = if positive { Sign::Plus } else { Sign::Minus }; - Self::from_biguint(sign, BigUint::arbitrary(g)) - } - - fn shrink(&self) -> Box> { - let sign = self.sign(); - let unsigned_shrink = self.data.shrink(); - Box::new(unsigned_shrink.map(move |x| BigInt::from_biguint(sign, x))) - } -} - -#[cfg(feature = "arbitrary")] -mod abitrary_impl { - use super::*; - use arbitrary::{Arbitrary, Result, Unstructured}; - - impl Arbitrary for BigInt { - fn arbitrary(u: &mut Unstructured<'_>) -> Result { - let positive = bool::arbitrary(u)?; - let sign = if positive { Sign::Plus } else { Sign::Minus }; - Ok(Self::from_biguint(sign, BigUint::arbitrary(u)?)) - } - - fn shrink(&self) -> Box> { - let sign = self.sign(); - let unsigned_shrink = self.data.shrink(); - Box::new(unsigned_shrink.map(move |x| BigInt::from_biguint(sign, x))) - } - } -} - impl hash::Hash for BigInt { #[inline] fn hash(&self, state: &mut H) { @@ -226,6 +136,12 @@ impl Default for BigInt { } } +impl fmt::Debug for BigInt { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + impl fmt::Display for BigInt { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad_integral(!self.is_negative(), "", &self.data.to_str_radix(10)) @@ -258,30 +174,6 @@ impl fmt::UpperHex for BigInt { } } -// Negation in two's complement. -// acc must be initialized as 1 for least-significant digit. -// -// When negating, a carry (acc == 1) means that all the digits -// considered to this point were zero. This means that if all the -// digits of a negative BigInt have been considered, carry must be -// zero as we cannot have negative zero. -// -// 01 -> ...f ff -// ff -> ...f 01 -// 01 00 -> ...f ff 00 -// 01 01 -> ...f fe ff -// 01 ff -> ...f fe 01 -// ff 00 -> ...f 01 00 -// ff 01 -> ...f 00 ff -// ff ff -> ...f 00 01 -#[inline] -fn negate_carry(a: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit { - *acc += DoubleBigDigit::from(!a); - let lo = *acc as BigDigit; - *acc >>= big_digit::BITS; - lo -} - // !-2 = !...f fe = ...0 01 = +1 // !-1 = !...f ff = ...0 00 = 0 // ! 0 = !...0 00 = ...f ff = -1 @@ -316,553 +208,6 @@ impl<'a> Not for &'a BigInt { } } -// + 1 & -ff = ...0 01 & ...f 01 = ...0 01 = + 1 -// +ff & - 1 = ...0 ff & ...f ff = ...0 ff = +ff -// answer is pos, has length of a -fn bitand_pos_neg(a: &mut Vec, b: &[BigDigit]) { - let mut carry_b = 1; - for (ai, &bi) in a.iter_mut().zip(b.iter()) { - let twos_b = negate_carry(bi, &mut carry_b); - *ai &= twos_b; - } - debug_assert!(b.len() > a.len() || carry_b == 0); -} - -// - 1 & +ff = ...f ff & ...0 ff = ...0 ff = +ff -// -ff & + 1 = ...f 01 & ...0 01 = ...0 01 = + 1 -// answer is pos, has length of b -fn bitand_neg_pos(a: &mut Vec, b: &[BigDigit]) { - let mut carry_a = 1; - for (ai, &bi) in a.iter_mut().zip(b.iter()) { - let twos_a = negate_carry(*ai, &mut carry_a); - *ai = twos_a & bi; - } - debug_assert!(a.len() > b.len() || carry_a == 0); - match Ord::cmp(&a.len(), &b.len()) { - Greater => a.truncate(b.len()), - Equal => {} - Less => { - let extra = &b[a.len()..]; - a.extend(extra.iter().cloned()); - } - } -} - -// - 1 & -ff = ...f ff & ...f 01 = ...f 01 = - ff -// -ff & - 1 = ...f 01 & ...f ff = ...f 01 = - ff -// -ff & -fe = ...f 01 & ...f 02 = ...f 00 = -100 -// answer is neg, has length of longest with a possible carry -fn bitand_neg_neg(a: &mut Vec, b: &[BigDigit]) { - let mut carry_a = 1; - let mut carry_b = 1; - let mut carry_and = 1; - for (ai, &bi) in a.iter_mut().zip(b.iter()) { - let twos_a = negate_carry(*ai, &mut carry_a); - let twos_b = negate_carry(bi, &mut carry_b); - *ai = negate_carry(twos_a & twos_b, &mut carry_and); - } - debug_assert!(a.len() > b.len() || carry_a == 0); - debug_assert!(b.len() > a.len() || carry_b == 0); - match Ord::cmp(&a.len(), &b.len()) { - Greater => { - for ai in a[b.len()..].iter_mut() { - let twos_a = negate_carry(*ai, &mut carry_a); - *ai = negate_carry(twos_a, &mut carry_and); - } - debug_assert!(carry_a == 0); - } - Equal => {} - Less => { - let extra = &b[a.len()..]; - a.extend(extra.iter().map(|&bi| { - let twos_b = negate_carry(bi, &mut carry_b); - negate_carry(twos_b, &mut carry_and) - })); - debug_assert!(carry_b == 0); - } - } - if carry_and != 0 { - a.push(1); - } -} - -forward_val_val_binop!(impl BitAnd for BigInt, bitand); -forward_ref_val_binop!(impl BitAnd for BigInt, bitand); - -// do not use forward_ref_ref_binop_commutative! for bitand so that we can -// clone as needed, avoiding over-allocation -impl<'a, 'b> BitAnd<&'b BigInt> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn bitand(self, other: &BigInt) -> BigInt { - match (self.sign, other.sign) { - (NoSign, _) | (_, NoSign) => BigInt::zero(), - (Plus, Plus) => BigInt::from(&self.data & &other.data), - (Plus, Minus) => self.clone() & other, - (Minus, Plus) => other.clone() & self, - (Minus, Minus) => { - // forward to val-ref, choosing the larger to clone - if self.len() >= other.len() { - self.clone() & other - } else { - other.clone() & self - } - } - } - } -} - -impl<'a> BitAnd<&'a BigInt> for BigInt { - type Output = BigInt; - - #[inline] - fn bitand(mut self, other: &BigInt) -> BigInt { - self &= other; - self - } -} - -forward_val_assign!(impl BitAndAssign for BigInt, bitand_assign); - -impl<'a> BitAndAssign<&'a BigInt> for BigInt { - fn bitand_assign(&mut self, other: &BigInt) { - match (self.sign, other.sign) { - (NoSign, _) => {} - (_, NoSign) => self.set_zero(), - (Plus, Plus) => { - self.data &= &other.data; - if self.data.is_zero() { - self.sign = NoSign; - } - } - (Plus, Minus) => { - bitand_pos_neg(self.digits_mut(), other.digits()); - self.normalize(); - } - (Minus, Plus) => { - bitand_neg_pos(self.digits_mut(), other.digits()); - self.sign = Plus; - self.normalize(); - } - (Minus, Minus) => { - bitand_neg_neg(self.digits_mut(), other.digits()); - self.normalize(); - } - } - } -} - -// + 1 | -ff = ...0 01 | ...f 01 = ...f 01 = -ff -// +ff | - 1 = ...0 ff | ...f ff = ...f ff = - 1 -// answer is neg, has length of b -fn bitor_pos_neg(a: &mut Vec, b: &[BigDigit]) { - let mut carry_b = 1; - let mut carry_or = 1; - for (ai, &bi) in a.iter_mut().zip(b.iter()) { - let twos_b = negate_carry(bi, &mut carry_b); - *ai = negate_carry(*ai | twos_b, &mut carry_or); - } - debug_assert!(b.len() > a.len() || carry_b == 0); - match Ord::cmp(&a.len(), &b.len()) { - Greater => { - a.truncate(b.len()); - } - Equal => {} - Less => { - let extra = &b[a.len()..]; - a.extend(extra.iter().map(|&bi| { - let twos_b = negate_carry(bi, &mut carry_b); - negate_carry(twos_b, &mut carry_or) - })); - debug_assert!(carry_b == 0); - } - } - // for carry_or to be non-zero, we would need twos_b == 0 - debug_assert!(carry_or == 0); -} - -// - 1 | +ff = ...f ff | ...0 ff = ...f ff = - 1 -// -ff | + 1 = ...f 01 | ...0 01 = ...f 01 = -ff -// answer is neg, has length of a -fn bitor_neg_pos(a: &mut Vec, b: &[BigDigit]) { - let mut carry_a = 1; - let mut carry_or = 1; - for (ai, &bi) in a.iter_mut().zip(b.iter()) { - let twos_a = negate_carry(*ai, &mut carry_a); - *ai = negate_carry(twos_a | bi, &mut carry_or); - } - debug_assert!(a.len() > b.len() || carry_a == 0); - if a.len() > b.len() { - for ai in a[b.len()..].iter_mut() { - let twos_a = negate_carry(*ai, &mut carry_a); - *ai = negate_carry(twos_a, &mut carry_or); - } - debug_assert!(carry_a == 0); - } - // for carry_or to be non-zero, we would need twos_a == 0 - debug_assert!(carry_or == 0); -} - -// - 1 | -ff = ...f ff | ...f 01 = ...f ff = -1 -// -ff | - 1 = ...f 01 | ...f ff = ...f ff = -1 -// answer is neg, has length of shortest -fn bitor_neg_neg(a: &mut Vec, b: &[BigDigit]) { - let mut carry_a = 1; - let mut carry_b = 1; - let mut carry_or = 1; - for (ai, &bi) in a.iter_mut().zip(b.iter()) { - let twos_a = negate_carry(*ai, &mut carry_a); - let twos_b = negate_carry(bi, &mut carry_b); - *ai = negate_carry(twos_a | twos_b, &mut carry_or); - } - debug_assert!(a.len() > b.len() || carry_a == 0); - debug_assert!(b.len() > a.len() || carry_b == 0); - if a.len() > b.len() { - a.truncate(b.len()); - } - // for carry_or to be non-zero, we would need twos_a == 0 or twos_b == 0 - debug_assert!(carry_or == 0); -} - -forward_val_val_binop!(impl BitOr for BigInt, bitor); -forward_ref_val_binop!(impl BitOr for BigInt, bitor); - -// do not use forward_ref_ref_binop_commutative! for bitor so that we can -// clone as needed, avoiding over-allocation -impl<'a, 'b> BitOr<&'b BigInt> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn bitor(self, other: &BigInt) -> BigInt { - match (self.sign, other.sign) { - (NoSign, _) => other.clone(), - (_, NoSign) => self.clone(), - (Plus, Plus) => BigInt::from(&self.data | &other.data), - (Plus, Minus) => other.clone() | self, - (Minus, Plus) => self.clone() | other, - (Minus, Minus) => { - // forward to val-ref, choosing the smaller to clone - if self.len() <= other.len() { - self.clone() | other - } else { - other.clone() | self - } - } - } - } -} - -impl<'a> BitOr<&'a BigInt> for BigInt { - type Output = BigInt; - - #[inline] - fn bitor(mut self, other: &BigInt) -> BigInt { - self |= other; - self - } -} - -forward_val_assign!(impl BitOrAssign for BigInt, bitor_assign); - -impl<'a> BitOrAssign<&'a BigInt> for BigInt { - fn bitor_assign(&mut self, other: &BigInt) { - match (self.sign, other.sign) { - (_, NoSign) => {} - (NoSign, _) => self.clone_from(other), - (Plus, Plus) => self.data |= &other.data, - (Plus, Minus) => { - bitor_pos_neg(self.digits_mut(), other.digits()); - self.sign = Minus; - self.normalize(); - } - (Minus, Plus) => { - bitor_neg_pos(self.digits_mut(), other.digits()); - self.normalize(); - } - (Minus, Minus) => { - bitor_neg_neg(self.digits_mut(), other.digits()); - self.normalize(); - } - } - } -} - -// + 1 ^ -ff = ...0 01 ^ ...f 01 = ...f 00 = -100 -// +ff ^ - 1 = ...0 ff ^ ...f ff = ...f 00 = -100 -// answer is neg, has length of longest with a possible carry -fn bitxor_pos_neg(a: &mut Vec, b: &[BigDigit]) { - let mut carry_b = 1; - let mut carry_xor = 1; - for (ai, &bi) in a.iter_mut().zip(b.iter()) { - let twos_b = negate_carry(bi, &mut carry_b); - *ai = negate_carry(*ai ^ twos_b, &mut carry_xor); - } - debug_assert!(b.len() > a.len() || carry_b == 0); - match Ord::cmp(&a.len(), &b.len()) { - Greater => { - for ai in a[b.len()..].iter_mut() { - let twos_b = !0; - *ai = negate_carry(*ai ^ twos_b, &mut carry_xor); - } - } - Equal => {} - Less => { - let extra = &b[a.len()..]; - a.extend(extra.iter().map(|&bi| { - let twos_b = negate_carry(bi, &mut carry_b); - negate_carry(twos_b, &mut carry_xor) - })); - debug_assert!(carry_b == 0); - } - } - if carry_xor != 0 { - a.push(1); - } -} - -// - 1 ^ +ff = ...f ff ^ ...0 ff = ...f 00 = -100 -// -ff ^ + 1 = ...f 01 ^ ...0 01 = ...f 00 = -100 -// answer is neg, has length of longest with a possible carry -fn bitxor_neg_pos(a: &mut Vec, b: &[BigDigit]) { - let mut carry_a = 1; - let mut carry_xor = 1; - for (ai, &bi) in a.iter_mut().zip(b.iter()) { - let twos_a = negate_carry(*ai, &mut carry_a); - *ai = negate_carry(twos_a ^ bi, &mut carry_xor); - } - debug_assert!(a.len() > b.len() || carry_a == 0); - match Ord::cmp(&a.len(), &b.len()) { - Greater => { - for ai in a[b.len()..].iter_mut() { - let twos_a = negate_carry(*ai, &mut carry_a); - *ai = negate_carry(twos_a, &mut carry_xor); - } - debug_assert!(carry_a == 0); - } - Equal => {} - Less => { - let extra = &b[a.len()..]; - a.extend(extra.iter().map(|&bi| { - let twos_a = !0; - negate_carry(twos_a ^ bi, &mut carry_xor) - })); - } - } - if carry_xor != 0 { - a.push(1); - } -} - -// - 1 ^ -ff = ...f ff ^ ...f 01 = ...0 fe = +fe -// -ff & - 1 = ...f 01 ^ ...f ff = ...0 fe = +fe -// answer is pos, has length of longest -fn bitxor_neg_neg(a: &mut Vec, b: &[BigDigit]) { - let mut carry_a = 1; - let mut carry_b = 1; - for (ai, &bi) in a.iter_mut().zip(b.iter()) { - let twos_a = negate_carry(*ai, &mut carry_a); - let twos_b = negate_carry(bi, &mut carry_b); - *ai = twos_a ^ twos_b; - } - debug_assert!(a.len() > b.len() || carry_a == 0); - debug_assert!(b.len() > a.len() || carry_b == 0); - match Ord::cmp(&a.len(), &b.len()) { - Greater => { - for ai in a[b.len()..].iter_mut() { - let twos_a = negate_carry(*ai, &mut carry_a); - let twos_b = !0; - *ai = twos_a ^ twos_b; - } - debug_assert!(carry_a == 0); - } - Equal => {} - Less => { - let extra = &b[a.len()..]; - a.extend(extra.iter().map(|&bi| { - let twos_a = !0; - let twos_b = negate_carry(bi, &mut carry_b); - twos_a ^ twos_b - })); - debug_assert!(carry_b == 0); - } - } -} - -forward_all_binop_to_val_ref_commutative!(impl BitXor for BigInt, bitxor); - -impl<'a> BitXor<&'a BigInt> for BigInt { - type Output = BigInt; - - #[inline] - fn bitxor(mut self, other: &BigInt) -> BigInt { - self ^= other; - self - } -} - -forward_val_assign!(impl BitXorAssign for BigInt, bitxor_assign); - -impl<'a> BitXorAssign<&'a BigInt> for BigInt { - fn bitxor_assign(&mut self, other: &BigInt) { - match (self.sign, other.sign) { - (_, NoSign) => {} - (NoSign, _) => self.clone_from(other), - (Plus, Plus) => { - self.data ^= &other.data; - if self.data.is_zero() { - self.sign = NoSign; - } - } - (Plus, Minus) => { - bitxor_pos_neg(self.digits_mut(), other.digits()); - self.sign = Minus; - self.normalize(); - } - (Minus, Plus) => { - bitxor_neg_pos(self.digits_mut(), other.digits()); - self.normalize(); - } - (Minus, Minus) => { - bitxor_neg_neg(self.digits_mut(), other.digits()); - self.sign = Plus; - self.normalize(); - } - } - } -} - -impl FromStr for BigInt { - type Err = ParseBigIntError; - - #[inline] - fn from_str(s: &str) -> Result { - BigInt::from_str_radix(s, 10) - } -} - -impl Num for BigInt { - type FromStrRadixErr = ParseBigIntError; - - /// Creates and initializes a BigInt. - #[inline] - fn from_str_radix(mut s: &str, radix: u32) -> Result { - let sign = if s.starts_with('-') { - let tail = &s[1..]; - if !tail.starts_with('+') { - s = tail - } - Minus - } else { - Plus - }; - let bu = BigUint::from_str_radix(s, radix)?; - Ok(BigInt::from_biguint(sign, bu)) - } -} - -macro_rules! impl_shift { - (@ref $Shx:ident :: $shx:ident, $ShxAssign:ident :: $shx_assign:ident, $rhs:ty) => { - impl<'b> $Shx<&'b $rhs> for BigInt { - type Output = BigInt; - - #[inline] - fn $shx(self, rhs: &'b $rhs) -> BigInt { - $Shx::$shx(self, *rhs) - } - } - impl<'a, 'b> $Shx<&'b $rhs> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn $shx(self, rhs: &'b $rhs) -> BigInt { - $Shx::$shx(self, *rhs) - } - } - impl<'b> $ShxAssign<&'b $rhs> for BigInt { - #[inline] - fn $shx_assign(&mut self, rhs: &'b $rhs) { - $ShxAssign::$shx_assign(self, *rhs); - } - } - }; - ($($rhs:ty),+) => {$( - impl Shl<$rhs> for BigInt { - type Output = BigInt; - - #[inline] - fn shl(self, rhs: $rhs) -> BigInt { - BigInt::from_biguint(self.sign, self.data << rhs) - } - } - impl<'a> Shl<$rhs> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn shl(self, rhs: $rhs) -> BigInt { - BigInt::from_biguint(self.sign, &self.data << rhs) - } - } - impl ShlAssign<$rhs> for BigInt { - #[inline] - fn shl_assign(&mut self, rhs: $rhs) { - self.data <<= rhs - } - } - impl_shift! { @ref Shl::shl, ShlAssign::shl_assign, $rhs } - - impl Shr<$rhs> for BigInt { - type Output = BigInt; - - #[inline] - fn shr(self, rhs: $rhs) -> BigInt { - let round_down = shr_round_down(&self, rhs); - let data = self.data >> rhs; - let data = if round_down { data + 1u8 } else { data }; - BigInt::from_biguint(self.sign, data) - } - } - impl<'a> Shr<$rhs> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn shr(self, rhs: $rhs) -> BigInt { - let round_down = shr_round_down(self, rhs); - let data = &self.data >> rhs; - let data = if round_down { data + 1u8 } else { data }; - BigInt::from_biguint(self.sign, data) - } - } - impl ShrAssign<$rhs> for BigInt { - #[inline] - fn shr_assign(&mut self, rhs: $rhs) { - let round_down = shr_round_down(self, rhs); - self.data >>= rhs; - if round_down { - self.data += 1u8; - } else if self.data.is_zero() { - self.sign = NoSign; - } - } - } - impl_shift! { @ref Shr::shr, ShrAssign::shr_assign, $rhs } - )*}; -} - -impl_shift! { u8, u16, u32, u64, u128, usize } -impl_shift! { i8, i16, i32, i64, i128, isize } - -// Negative values need a rounding adjustment if there are any ones in the -// bits that are getting shifted out. -fn shr_round_down(i: &BigInt, shift: T) -> bool { - if i.is_negative() { - let zeros = i.trailing_zeros().expect("negative values are non-zero"); - shift > T::zero() && shift.to_u64().map(|shift| zeros < shift).unwrap_or(true) - } else { - false - } -} - impl Zero for BigInt { #[inline] fn zero() -> BigInt { @@ -943,68 +288,6 @@ impl Signed for BigInt { } } -/// Help function for pow -/// -/// Computes the effect of the exponent on the sign. -#[inline] -fn powsign(sign: Sign, other: &T) -> Sign { - if other.is_zero() { - Plus - } else if sign != Minus || other.is_odd() { - sign - } else { - -sign - } -} - -macro_rules! pow_impl { - ($T:ty) => { - impl Pow<$T> for BigInt { - type Output = BigInt; - - #[inline] - fn pow(self, rhs: $T) -> BigInt { - BigInt::from_biguint(powsign(self.sign, &rhs), self.data.pow(rhs)) - } - } - - impl<'b> Pow<&'b $T> for BigInt { - type Output = BigInt; - - #[inline] - fn pow(self, rhs: &$T) -> BigInt { - BigInt::from_biguint(powsign(self.sign, rhs), self.data.pow(rhs)) - } - } - - impl<'a> Pow<$T> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn pow(self, rhs: $T) -> BigInt { - BigInt::from_biguint(powsign(self.sign, &rhs), Pow::pow(&self.data, rhs)) - } - } - - impl<'a, 'b> Pow<&'b $T> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn pow(self, rhs: &$T) -> BigInt { - BigInt::from_biguint(powsign(self.sign, rhs), Pow::pow(&self.data, rhs)) - } - } - }; -} - -pow_impl!(u8); -pow_impl!(u16); -pow_impl!(u32); -pow_impl!(u64); -pow_impl!(usize); -pow_impl!(u128); -pow_impl!(BigUint); - trait UnsignedAbs { type Unsigned; @@ -1046,1100 +329,8 @@ impl_unsigned_abs!(i8, u8); impl_unsigned_abs!(i16, u16); impl_unsigned_abs!(i32, u32); impl_unsigned_abs!(i64, u64); -impl_unsigned_abs!(i128, u128); -impl_unsigned_abs!(isize, usize); - -// We want to forward to BigUint::add, but it's not clear how that will go until -// we compare both sign and magnitude. So we duplicate this body for every -// val/ref combination, deferring that decision to BigUint's own forwarding. -macro_rules! bigint_add { - ($a:expr, $a_owned:expr, $a_data:expr, $b:expr, $b_owned:expr, $b_data:expr) => { - match ($a.sign, $b.sign) { - (_, NoSign) => $a_owned, - (NoSign, _) => $b_owned, - // same sign => keep the sign with the sum of magnitudes - (Plus, Plus) | (Minus, Minus) => BigInt::from_biguint($a.sign, $a_data + $b_data), - // opposite signs => keep the sign of the larger with the difference of magnitudes - (Plus, Minus) | (Minus, Plus) => match $a.data.cmp(&$b.data) { - Less => BigInt::from_biguint($b.sign, $b_data - $a_data), - Greater => BigInt::from_biguint($a.sign, $a_data - $b_data), - Equal => Zero::zero(), - }, - } - }; -} - -impl<'a, 'b> Add<&'b BigInt> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn add(self, other: &BigInt) -> BigInt { - bigint_add!( - self, - self.clone(), - &self.data, - other, - other.clone(), - &other.data - ) - } -} - -impl<'a> Add for &'a BigInt { - type Output = BigInt; - - #[inline] - fn add(self, other: BigInt) -> BigInt { - bigint_add!(self, self.clone(), &self.data, other, other, other.data) - } -} - -impl<'a> Add<&'a BigInt> for BigInt { - type Output = BigInt; - - #[inline] - fn add(self, other: &BigInt) -> BigInt { - bigint_add!(self, self, self.data, other, other.clone(), &other.data) - } -} - -impl Add for BigInt { - type Output = BigInt; - - #[inline] - fn add(self, other: BigInt) -> BigInt { - bigint_add!(self, self, self.data, other, other, other.data) - } -} - -impl<'a> AddAssign<&'a BigInt> for BigInt { - #[inline] - fn add_assign(&mut self, other: &BigInt) { - let n = mem::replace(self, BigInt::zero()); - *self = n + other; - } -} -forward_val_assign!(impl AddAssign for BigInt, add_assign); - -promote_all_scalars!(impl Add for BigInt, add); -promote_all_scalars_assign!(impl AddAssign for BigInt, add_assign); -forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); -forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); -forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); - -impl Add for BigInt { - type Output = BigInt; - - #[inline] - fn add(self, other: u32) -> BigInt { - match self.sign { - NoSign => From::from(other), - Plus => BigInt::from(self.data + other), - Minus => match self.data.cmp(&From::from(other)) { - Equal => Zero::zero(), - Less => BigInt::from(other - self.data), - Greater => -BigInt::from(self.data - other), - }, - } - } -} - -impl AddAssign for BigInt { - #[inline] - fn add_assign(&mut self, other: u32) { - let n = mem::replace(self, BigInt::zero()); - *self = n + other; - } -} - -impl Add for BigInt { - type Output = BigInt; - - #[inline] - fn add(self, other: u64) -> BigInt { - match self.sign { - NoSign => From::from(other), - Plus => BigInt::from(self.data + other), - Minus => match self.data.cmp(&From::from(other)) { - Equal => Zero::zero(), - Less => BigInt::from(other - self.data), - Greater => -BigInt::from(self.data - other), - }, - } - } -} - -impl AddAssign for BigInt { - #[inline] - fn add_assign(&mut self, other: u64) { - let n = mem::replace(self, BigInt::zero()); - *self = n + other; - } -} - -impl Add for BigInt { - type Output = BigInt; - - #[inline] - fn add(self, other: u128) -> BigInt { - match self.sign { - NoSign => BigInt::from(other), - Plus => BigInt::from(self.data + other), - Minus => match self.data.cmp(&From::from(other)) { - Equal => BigInt::zero(), - Less => BigInt::from(other - self.data), - Greater => -BigInt::from(self.data - other), - }, - } - } -} -impl AddAssign for BigInt { - #[inline] - fn add_assign(&mut self, other: u128) { - let n = mem::replace(self, BigInt::zero()); - *self = n + other; - } -} - -forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); -forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); -forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); - -impl Add for BigInt { - type Output = BigInt; - - #[inline] - fn add(self, other: i32) -> BigInt { - match other.checked_uabs() { - Positive(u) => self + u, - Negative(u) => self - u, - } - } -} -impl AddAssign for BigInt { - #[inline] - fn add_assign(&mut self, other: i32) { - match other.checked_uabs() { - Positive(u) => *self += u, - Negative(u) => *self -= u, - } - } -} - -impl Add for BigInt { - type Output = BigInt; - - #[inline] - fn add(self, other: i64) -> BigInt { - match other.checked_uabs() { - Positive(u) => self + u, - Negative(u) => self - u, - } - } -} -impl AddAssign for BigInt { - #[inline] - fn add_assign(&mut self, other: i64) { - match other.checked_uabs() { - Positive(u) => *self += u, - Negative(u) => *self -= u, - } - } -} - -impl Add for BigInt { - type Output = BigInt; - - #[inline] - fn add(self, other: i128) -> BigInt { - match other.checked_uabs() { - Positive(u) => self + u, - Negative(u) => self - u, - } - } -} -impl AddAssign for BigInt { - #[inline] - fn add_assign(&mut self, other: i128) { - match other.checked_uabs() { - Positive(u) => *self += u, - Negative(u) => *self -= u, - } - } -} - -// We want to forward to BigUint::sub, but it's not clear how that will go until -// we compare both sign and magnitude. So we duplicate this body for every -// val/ref combination, deferring that decision to BigUint's own forwarding. -macro_rules! bigint_sub { - ($a:expr, $a_owned:expr, $a_data:expr, $b:expr, $b_owned:expr, $b_data:expr) => { - match ($a.sign, $b.sign) { - (_, NoSign) => $a_owned, - (NoSign, _) => -$b_owned, - // opposite signs => keep the sign of the left with the sum of magnitudes - (Plus, Minus) | (Minus, Plus) => BigInt::from_biguint($a.sign, $a_data + $b_data), - // same sign => keep or toggle the sign of the left with the difference of magnitudes - (Plus, Plus) | (Minus, Minus) => match $a.data.cmp(&$b.data) { - Less => BigInt::from_biguint(-$a.sign, $b_data - $a_data), - Greater => BigInt::from_biguint($a.sign, $a_data - $b_data), - Equal => Zero::zero(), - }, - } - }; -} - -impl<'a, 'b> Sub<&'b BigInt> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn sub(self, other: &BigInt) -> BigInt { - bigint_sub!( - self, - self.clone(), - &self.data, - other, - other.clone(), - &other.data - ) - } -} - -impl<'a> Sub for &'a BigInt { - type Output = BigInt; - - #[inline] - fn sub(self, other: BigInt) -> BigInt { - bigint_sub!(self, self.clone(), &self.data, other, other, other.data) - } -} - -impl<'a> Sub<&'a BigInt> for BigInt { - type Output = BigInt; - - #[inline] - fn sub(self, other: &BigInt) -> BigInt { - bigint_sub!(self, self, self.data, other, other.clone(), &other.data) - } -} - -impl Sub for BigInt { - type Output = BigInt; - - #[inline] - fn sub(self, other: BigInt) -> BigInt { - bigint_sub!(self, self, self.data, other, other, other.data) - } -} - -impl<'a> SubAssign<&'a BigInt> for BigInt { - #[inline] - fn sub_assign(&mut self, other: &BigInt) { - let n = mem::replace(self, BigInt::zero()); - *self = n - other; - } -} -forward_val_assign!(impl SubAssign for BigInt, sub_assign); - -promote_all_scalars!(impl Sub for BigInt, sub); -promote_all_scalars_assign!(impl SubAssign for BigInt, sub_assign); -forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); -forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); -forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); - -impl Sub for BigInt { - type Output = BigInt; - - #[inline] - fn sub(self, other: u32) -> BigInt { - match self.sign { - NoSign => -BigInt::from(other), - Minus => -BigInt::from(self.data + other), - Plus => match self.data.cmp(&From::from(other)) { - Equal => Zero::zero(), - Greater => BigInt::from(self.data - other), - Less => -BigInt::from(other - self.data), - }, - } - } -} -impl SubAssign for BigInt { - #[inline] - fn sub_assign(&mut self, other: u32) { - let n = mem::replace(self, BigInt::zero()); - *self = n - other; - } -} - -impl Sub for u32 { - type Output = BigInt; - - #[inline] - fn sub(self, other: BigInt) -> BigInt { - -(other - self) - } -} - -impl Sub for u64 { - type Output = BigInt; - - #[inline] - fn sub(self, other: BigInt) -> BigInt { - -(other - self) - } -} - -impl Sub for u128 { - type Output = BigInt; - - #[inline] - fn sub(self, other: BigInt) -> BigInt { - -(other - self) - } -} - -impl Sub for BigInt { - type Output = BigInt; - - #[inline] - fn sub(self, other: u64) -> BigInt { - match self.sign { - NoSign => -BigInt::from(other), - Minus => -BigInt::from(self.data + other), - Plus => match self.data.cmp(&From::from(other)) { - Equal => Zero::zero(), - Greater => BigInt::from(self.data - other), - Less => -BigInt::from(other - self.data), - }, - } - } -} - -impl SubAssign for BigInt { - #[inline] - fn sub_assign(&mut self, other: u64) { - let n = mem::replace(self, BigInt::zero()); - *self = n - other; - } -} - -impl Sub for BigInt { - type Output = BigInt; - - #[inline] - fn sub(self, other: u128) -> BigInt { - match self.sign { - NoSign => -BigInt::from(other), - Minus => -BigInt::from(self.data + other), - Plus => match self.data.cmp(&From::from(other)) { - Equal => Zero::zero(), - Greater => BigInt::from(self.data - other), - Less => -BigInt::from(other - self.data), - }, - } - } -} - -impl SubAssign for BigInt { - #[inline] - fn sub_assign(&mut self, other: u128) { - let n = mem::replace(self, BigInt::zero()); - *self = n - other; - } -} - -forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); -forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); -forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); - -impl Sub for BigInt { - type Output = BigInt; - - #[inline] - fn sub(self, other: i32) -> BigInt { - match other.checked_uabs() { - Positive(u) => self - u, - Negative(u) => self + u, - } - } -} -impl SubAssign for BigInt { - #[inline] - fn sub_assign(&mut self, other: i32) { - match other.checked_uabs() { - Positive(u) => *self -= u, - Negative(u) => *self += u, - } - } -} - -impl Sub for i32 { - type Output = BigInt; - - #[inline] - fn sub(self, other: BigInt) -> BigInt { - match self.checked_uabs() { - Positive(u) => u - other, - Negative(u) => -other - u, - } - } -} - -impl Sub for BigInt { - type Output = BigInt; - - #[inline] - fn sub(self, other: i64) -> BigInt { - match other.checked_uabs() { - Positive(u) => self - u, - Negative(u) => self + u, - } - } -} -impl SubAssign for BigInt { - #[inline] - fn sub_assign(&mut self, other: i64) { - match other.checked_uabs() { - Positive(u) => *self -= u, - Negative(u) => *self += u, - } - } -} - -impl Sub for i64 { - type Output = BigInt; - - #[inline] - fn sub(self, other: BigInt) -> BigInt { - match self.checked_uabs() { - Positive(u) => u - other, - Negative(u) => -other - u, - } - } -} - -impl Sub for BigInt { - type Output = BigInt; - - #[inline] - fn sub(self, other: i128) -> BigInt { - match other.checked_uabs() { - Positive(u) => self - u, - Negative(u) => self + u, - } - } -} - -impl SubAssign for BigInt { - #[inline] - fn sub_assign(&mut self, other: i128) { - match other.checked_uabs() { - Positive(u) => *self -= u, - Negative(u) => *self += u, - } - } -} - -impl Sub for i128 { - type Output = BigInt; - - #[inline] - fn sub(self, other: BigInt) -> BigInt { - match self.checked_uabs() { - Positive(u) => u - other, - Negative(u) => -other - u, - } - } -} - -forward_all_binop_to_ref_ref!(impl Mul for BigInt, mul); - -impl<'a, 'b> Mul<&'b BigInt> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn mul(self, other: &BigInt) -> BigInt { - BigInt::from_biguint(self.sign * other.sign, &self.data * &other.data) - } -} - -impl<'a> MulAssign<&'a BigInt> for BigInt { - #[inline] - fn mul_assign(&mut self, other: &BigInt) { - *self = &*self * other; - } -} -forward_val_assign!(impl MulAssign for BigInt, mul_assign); - -promote_all_scalars!(impl Mul for BigInt, mul); -promote_all_scalars_assign!(impl MulAssign for BigInt, mul_assign); -forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); -forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); -forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); - -impl Mul for BigInt { - type Output = BigInt; - - #[inline] - fn mul(self, other: u32) -> BigInt { - BigInt::from_biguint(self.sign, self.data * other) - } -} - -impl MulAssign for BigInt { - #[inline] - fn mul_assign(&mut self, other: u32) { - self.data *= other; - if self.data.is_zero() { - self.sign = NoSign; - } - } -} - -impl Mul for BigInt { - type Output = BigInt; - - #[inline] - fn mul(self, other: u64) -> BigInt { - BigInt::from_biguint(self.sign, self.data * other) - } -} - -impl MulAssign for BigInt { - #[inline] - fn mul_assign(&mut self, other: u64) { - self.data *= other; - if self.data.is_zero() { - self.sign = NoSign; - } - } -} - -impl Mul for BigInt { - type Output = BigInt; - - #[inline] - fn mul(self, other: u128) -> BigInt { - BigInt::from_biguint(self.sign, self.data * other) - } -} - -impl MulAssign for BigInt { - #[inline] - fn mul_assign(&mut self, other: u128) { - self.data *= other; - if self.data.is_zero() { - self.sign = NoSign; - } - } -} - -forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); -forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); -forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); - -impl Mul for BigInt { - type Output = BigInt; - - #[inline] - fn mul(self, other: i32) -> BigInt { - match other.checked_uabs() { - Positive(u) => self * u, - Negative(u) => -self * u, - } - } -} - -impl MulAssign for BigInt { - #[inline] - fn mul_assign(&mut self, other: i32) { - match other.checked_uabs() { - Positive(u) => *self *= u, - Negative(u) => { - self.sign = -self.sign; - self.data *= u; - } - } - } -} - -impl Mul for BigInt { - type Output = BigInt; - - #[inline] - fn mul(self, other: i64) -> BigInt { - match other.checked_uabs() { - Positive(u) => self * u, - Negative(u) => -self * u, - } - } -} - -impl MulAssign for BigInt { - #[inline] - fn mul_assign(&mut self, other: i64) { - match other.checked_uabs() { - Positive(u) => *self *= u, - Negative(u) => { - self.sign = -self.sign; - self.data *= u; - } - } - } -} - -impl Mul for BigInt { - type Output = BigInt; - - #[inline] - fn mul(self, other: i128) -> BigInt { - match other.checked_uabs() { - Positive(u) => self * u, - Negative(u) => -self * u, - } - } -} - -impl MulAssign for BigInt { - #[inline] - fn mul_assign(&mut self, other: i128) { - match other.checked_uabs() { - Positive(u) => *self *= u, - Negative(u) => { - self.sign = -self.sign; - self.data *= u; - } - } - } -} - -forward_all_binop_to_ref_ref!(impl Div for BigInt, div); - -impl<'a, 'b> Div<&'b BigInt> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn div(self, other: &BigInt) -> BigInt { - let (q, _) = self.div_rem(other); - q - } -} - -impl<'a> DivAssign<&'a BigInt> for BigInt { - #[inline] - fn div_assign(&mut self, other: &BigInt) { - *self = &*self / other; - } -} -forward_val_assign!(impl DivAssign for BigInt, div_assign); - -promote_all_scalars!(impl Div for BigInt, div); -promote_all_scalars_assign!(impl DivAssign for BigInt, div_assign); -forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); -forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); -forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); - -impl Div for BigInt { - type Output = BigInt; - - #[inline] - fn div(self, other: u32) -> BigInt { - BigInt::from_biguint(self.sign, self.data / other) - } -} - -impl DivAssign for BigInt { - #[inline] - fn div_assign(&mut self, other: u32) { - self.data /= other; - if self.data.is_zero() { - self.sign = NoSign; - } - } -} - -impl Div for u32 { - type Output = BigInt; - - #[inline] - fn div(self, other: BigInt) -> BigInt { - BigInt::from_biguint(other.sign, self / other.data) - } -} - -impl Div for BigInt { - type Output = BigInt; - - #[inline] - fn div(self, other: u64) -> BigInt { - BigInt::from_biguint(self.sign, self.data / other) - } -} - -impl DivAssign for BigInt { - #[inline] - fn div_assign(&mut self, other: u64) { - self.data /= other; - if self.data.is_zero() { - self.sign = NoSign; - } - } -} - -impl Div for u64 { - type Output = BigInt; - - #[inline] - fn div(self, other: BigInt) -> BigInt { - BigInt::from_biguint(other.sign, self / other.data) - } -} - -impl Div for BigInt { - type Output = BigInt; - - #[inline] - fn div(self, other: u128) -> BigInt { - BigInt::from_biguint(self.sign, self.data / other) - } -} - -impl DivAssign for BigInt { - #[inline] - fn div_assign(&mut self, other: u128) { - self.data /= other; - if self.data.is_zero() { - self.sign = NoSign; - } - } -} - -impl Div for u128 { - type Output = BigInt; - - #[inline] - fn div(self, other: BigInt) -> BigInt { - BigInt::from_biguint(other.sign, self / other.data) - } -} - -forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); -forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); -forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); - -impl Div for BigInt { - type Output = BigInt; - - #[inline] - fn div(self, other: i32) -> BigInt { - match other.checked_uabs() { - Positive(u) => self / u, - Negative(u) => -self / u, - } - } -} - -impl DivAssign for BigInt { - #[inline] - fn div_assign(&mut self, other: i32) { - match other.checked_uabs() { - Positive(u) => *self /= u, - Negative(u) => { - self.sign = -self.sign; - *self /= u; - } - } - } -} - -impl Div for i32 { - type Output = BigInt; - - #[inline] - fn div(self, other: BigInt) -> BigInt { - match self.checked_uabs() { - Positive(u) => u / other, - Negative(u) => u / -other, - } - } -} - -impl Div for BigInt { - type Output = BigInt; - - #[inline] - fn div(self, other: i64) -> BigInt { - match other.checked_uabs() { - Positive(u) => self / u, - Negative(u) => -self / u, - } - } -} - -impl DivAssign for BigInt { - #[inline] - fn div_assign(&mut self, other: i64) { - match other.checked_uabs() { - Positive(u) => *self /= u, - Negative(u) => { - self.sign = -self.sign; - *self /= u; - } - } - } -} - -impl Div for i64 { - type Output = BigInt; - - #[inline] - fn div(self, other: BigInt) -> BigInt { - match self.checked_uabs() { - Positive(u) => u / other, - Negative(u) => u / -other, - } - } -} - -impl Div for BigInt { - type Output = BigInt; - - #[inline] - fn div(self, other: i128) -> BigInt { - match other.checked_uabs() { - Positive(u) => self / u, - Negative(u) => -self / u, - } - } -} - -impl DivAssign for BigInt { - #[inline] - fn div_assign(&mut self, other: i128) { - match other.checked_uabs() { - Positive(u) => *self /= u, - Negative(u) => { - self.sign = -self.sign; - *self /= u; - } - } - } -} - -impl Div for i128 { - type Output = BigInt; - - #[inline] - fn div(self, other: BigInt) -> BigInt { - match self.checked_uabs() { - Positive(u) => u / other, - Negative(u) => u / -other, - } - } -} - -forward_all_binop_to_ref_ref!(impl Rem for BigInt, rem); - -impl<'a, 'b> Rem<&'b BigInt> for &'a BigInt { - type Output = BigInt; - - #[inline] - fn rem(self, other: &BigInt) -> BigInt { - if let Some(other) = other.to_u32() { - self % other - } else if let Some(other) = other.to_i32() { - self % other - } else { - let (_, r) = self.div_rem(other); - r - } - } -} - -impl<'a> RemAssign<&'a BigInt> for BigInt { - #[inline] - fn rem_assign(&mut self, other: &BigInt) { - *self = &*self % other; - } -} -forward_val_assign!(impl RemAssign for BigInt, rem_assign); - -promote_all_scalars!(impl Rem for BigInt, rem); -promote_all_scalars_assign!(impl RemAssign for BigInt, rem_assign); -forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); -forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); -forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); - -impl Rem for BigInt { - type Output = BigInt; - - #[inline] - fn rem(self, other: u32) -> BigInt { - BigInt::from_biguint(self.sign, self.data % other) - } -} - -impl RemAssign for BigInt { - #[inline] - fn rem_assign(&mut self, other: u32) { - self.data %= other; - if self.data.is_zero() { - self.sign = NoSign; - } - } -} - -impl Rem for u32 { - type Output = BigInt; - - #[inline] - fn rem(self, other: BigInt) -> BigInt { - BigInt::from(self % other.data) - } -} - -impl Rem for BigInt { - type Output = BigInt; - - #[inline] - fn rem(self, other: u64) -> BigInt { - BigInt::from_biguint(self.sign, self.data % other) - } -} - -impl RemAssign for BigInt { - #[inline] - fn rem_assign(&mut self, other: u64) { - self.data %= other; - if self.data.is_zero() { - self.sign = NoSign; - } - } -} - -impl Rem for u64 { - type Output = BigInt; - - #[inline] - fn rem(self, other: BigInt) -> BigInt { - BigInt::from(self % other.data) - } -} - -impl Rem for BigInt { - type Output = BigInt; - - #[inline] - fn rem(self, other: u128) -> BigInt { - BigInt::from_biguint(self.sign, self.data % other) - } -} - -impl RemAssign for BigInt { - #[inline] - fn rem_assign(&mut self, other: u128) { - self.data %= other; - if self.data.is_zero() { - self.sign = NoSign; - } - } -} - -impl Rem for u128 { - type Output = BigInt; - - #[inline] - fn rem(self, other: BigInt) -> BigInt { - BigInt::from(self % other.data) - } -} - -forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); -forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); -forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); - -impl Rem for BigInt { - type Output = BigInt; - - #[inline] - fn rem(self, other: i32) -> BigInt { - self % other.uabs() - } -} - -impl RemAssign for BigInt { - #[inline] - fn rem_assign(&mut self, other: i32) { - *self %= other.uabs(); - } -} - -impl Rem for i32 { - type Output = BigInt; - - #[inline] - fn rem(self, other: BigInt) -> BigInt { - match self.checked_uabs() { - Positive(u) => u % other, - Negative(u) => -(u % other), - } - } -} - -impl Rem for BigInt { - type Output = BigInt; - - #[inline] - fn rem(self, other: i64) -> BigInt { - self % other.uabs() - } -} - -impl RemAssign for BigInt { - #[inline] - fn rem_assign(&mut self, other: i64) { - *self %= other.uabs(); - } -} - -impl Rem for i64 { - type Output = BigInt; - - #[inline] - fn rem(self, other: BigInt) -> BigInt { - match self.checked_uabs() { - Positive(u) => u % other, - Negative(u) => -(u % other), - } - } -} - -impl Rem for BigInt { - type Output = BigInt; - - #[inline] - fn rem(self, other: i128) -> BigInt { - self % other.uabs() - } -} - -impl RemAssign for BigInt { - #[inline] - fn rem_assign(&mut self, other: i128) { - *self %= other.uabs(); - } -} - -impl Rem for i128 { - type Output = BigInt; - - #[inline] - fn rem(self, other: BigInt) -> BigInt { - match self.checked_uabs() { - Positive(u) => u % other, - Negative(u) => -(u % other), - } - } -} +impl_unsigned_abs!(i128, u128); +impl_unsigned_abs!(isize, usize); impl Neg for BigInt { type Output = BigInt; @@ -2160,37 +351,6 @@ impl<'a> Neg for &'a BigInt { } } -impl CheckedAdd for BigInt { - #[inline] - fn checked_add(&self, v: &BigInt) -> Option { - Some(self.add(v)) - } -} - -impl CheckedSub for BigInt { - #[inline] - fn checked_sub(&self, v: &BigInt) -> Option { - Some(self.sub(v)) - } -} - -impl CheckedMul for BigInt { - #[inline] - fn checked_mul(&self, v: &BigInt) -> Option { - Some(self.mul(v)) - } -} - -impl CheckedDiv for BigInt { - #[inline] - fn checked_div(&self, v: &BigInt) -> Option { - if v.is_zero() { - return None; - } - Some(self.div(v)) - } -} - impl Integer for BigInt { #[inline] fn div_rem(&self, other: &BigInt) -> (BigInt, BigInt) { @@ -2372,246 +532,6 @@ impl Roots for BigInt { } } -impl ToPrimitive for BigInt { - #[inline] - fn to_i64(&self) -> Option { - match self.sign { - Plus => self.data.to_i64(), - NoSign => Some(0), - Minus => { - let n = self.data.to_u64()?; - let m: u64 = 1 << 63; - match n.cmp(&m) { - Less => Some(-(n as i64)), - Equal => Some(i64::MIN), - Greater => None, - } - } - } - } - - #[inline] - fn to_i128(&self) -> Option { - match self.sign { - Plus => self.data.to_i128(), - NoSign => Some(0), - Minus => { - let n = self.data.to_u128()?; - let m: u128 = 1 << 127; - match n.cmp(&m) { - Less => Some(-(n as i128)), - Equal => Some(i128::MIN), - Greater => None, - } - } - } - } - - #[inline] - fn to_u64(&self) -> Option { - match self.sign { - Plus => self.data.to_u64(), - NoSign => Some(0), - Minus => None, - } - } - - #[inline] - fn to_u128(&self) -> Option { - match self.sign { - Plus => self.data.to_u128(), - NoSign => Some(0), - Minus => None, - } - } - - #[inline] - fn to_f32(&self) -> Option { - let n = self.data.to_f32()?; - Some(if self.sign == Minus { -n } else { n }) - } - - #[inline] - fn to_f64(&self) -> Option { - let n = self.data.to_f64()?; - Some(if self.sign == Minus { -n } else { n }) - } -} - -macro_rules! impl_try_from_bigint { - ($T:ty, $to_ty:path) => { - #[cfg(has_try_from)] - impl TryFrom<&BigInt> for $T { - type Error = TryFromBigIntError<()>; - - #[inline] - fn try_from(value: &BigInt) -> Result<$T, TryFromBigIntError<()>> { - $to_ty(value).ok_or(TryFromBigIntError::new(())) - } - } - - #[cfg(has_try_from)] - impl TryFrom for $T { - type Error = TryFromBigIntError; - - #[inline] - fn try_from(value: BigInt) -> Result<$T, TryFromBigIntError> { - <$T>::try_from(&value).map_err(|_| TryFromBigIntError::new(value)) - } - } - }; -} - -impl_try_from_bigint!(u8, ToPrimitive::to_u8); -impl_try_from_bigint!(u16, ToPrimitive::to_u16); -impl_try_from_bigint!(u32, ToPrimitive::to_u32); -impl_try_from_bigint!(u64, ToPrimitive::to_u64); -impl_try_from_bigint!(usize, ToPrimitive::to_usize); -impl_try_from_bigint!(u128, ToPrimitive::to_u128); - -impl_try_from_bigint!(i8, ToPrimitive::to_i8); -impl_try_from_bigint!(i16, ToPrimitive::to_i16); -impl_try_from_bigint!(i32, ToPrimitive::to_i32); -impl_try_from_bigint!(i64, ToPrimitive::to_i64); -impl_try_from_bigint!(isize, ToPrimitive::to_isize); -impl_try_from_bigint!(i128, ToPrimitive::to_i128); - -impl FromPrimitive for BigInt { - #[inline] - fn from_i64(n: i64) -> Option { - Some(BigInt::from(n)) - } - - #[inline] - fn from_i128(n: i128) -> Option { - Some(BigInt::from(n)) - } - - #[inline] - fn from_u64(n: u64) -> Option { - Some(BigInt::from(n)) - } - - #[inline] - fn from_u128(n: u128) -> Option { - Some(BigInt::from(n)) - } - - #[inline] - fn from_f64(n: f64) -> Option { - if n >= 0.0 { - BigUint::from_f64(n).map(BigInt::from) - } else { - let x = BigUint::from_f64(-n)?; - Some(-BigInt::from(x)) - } - } -} - -impl From for BigInt { - #[inline] - fn from(n: i64) -> Self { - if n >= 0 { - BigInt::from(n as u64) - } else { - let u = u64::MAX - (n as u64) + 1; - BigInt { - sign: Minus, - data: BigUint::from(u), - } - } - } -} - -impl From for BigInt { - #[inline] - fn from(n: i128) -> Self { - if n >= 0 { - BigInt::from(n as u128) - } else { - let u = u128::MAX - (n as u128) + 1; - BigInt { - sign: Minus, - data: BigUint::from(u), - } - } - } -} - -macro_rules! impl_bigint_from_int { - ($T:ty) => { - impl From<$T> for BigInt { - #[inline] - fn from(n: $T) -> Self { - BigInt::from(n as i64) - } - } - }; -} - -impl_bigint_from_int!(i8); -impl_bigint_from_int!(i16); -impl_bigint_from_int!(i32); -impl_bigint_from_int!(isize); - -impl From for BigInt { - #[inline] - fn from(n: u64) -> Self { - if n > 0 { - BigInt { - sign: Plus, - data: BigUint::from(n), - } - } else { - BigInt::zero() - } - } -} - -impl From for BigInt { - #[inline] - fn from(n: u128) -> Self { - if n > 0 { - BigInt { - sign: Plus, - data: BigUint::from(n), - } - } else { - BigInt::zero() - } - } -} - -macro_rules! impl_bigint_from_uint { - ($T:ty) => { - impl From<$T> for BigInt { - #[inline] - fn from(n: $T) -> Self { - BigInt::from(n as u64) - } - } - }; -} - -impl_bigint_from_uint!(u8); -impl_bigint_from_uint!(u16); -impl_bigint_from_uint!(u32); -impl_bigint_from_uint!(usize); - -impl From for BigInt { - #[inline] - fn from(n: BigUint) -> Self { - if n.is_zero() { - BigInt::zero() - } else { - BigInt { - sign: Plus, - data: n, - } - } - } -} - impl IntDigits for BigInt { #[inline] fn digits(&self) -> &[BigDigit] { @@ -2638,29 +558,6 @@ impl IntDigits for BigInt { } } -#[cfg(feature = "serde")] -impl serde::Serialize for BigInt { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - // Note: do not change the serialization format, or it may break - // forward and backward compatibility of serialized data! - (self.sign, &self.data).serialize(serializer) - } -} - -#[cfg(feature = "serde")] -impl<'de> serde::Deserialize<'de> for BigInt { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let (sign, data) = serde::Deserialize::deserialize(deserializer)?; - Ok(BigInt::from_biguint(sign, data)) - } -} - /// A generic trait for converting a value to a `BigInt`. This may return /// `None` when converting from `f32` or `f64`, and will always succeed /// when converting from any integer or unsigned primitive, or `BigUint`. @@ -2669,90 +566,6 @@ pub trait ToBigInt { fn to_bigint(&self) -> Option; } -impl ToBigInt for BigInt { - #[inline] - fn to_bigint(&self) -> Option { - Some(self.clone()) - } -} - -impl ToBigInt for BigUint { - #[inline] - fn to_bigint(&self) -> Option { - if self.is_zero() { - Some(Zero::zero()) - } else { - Some(BigInt { - sign: Plus, - data: self.clone(), - }) - } - } -} - -impl biguint::ToBigUint for BigInt { - #[inline] - fn to_biguint(&self) -> Option { - match self.sign() { - Plus => Some(self.data.clone()), - NoSign => Some(Zero::zero()), - Minus => None, - } - } -} - -#[cfg(has_try_from)] -impl TryFrom<&BigInt> for BigUint { - type Error = TryFromBigIntError<()>; - - #[inline] - fn try_from(value: &BigInt) -> Result> { - value.to_biguint().ok_or(TryFromBigIntError::new(())) - } -} - -#[cfg(has_try_from)] -impl TryFrom for BigUint { - type Error = TryFromBigIntError; - - #[inline] - fn try_from(value: BigInt) -> Result> { - if value.sign() == Sign::Minus { - Err(TryFromBigIntError::new(value)) - } else { - Ok(value.data) - } - } -} - -macro_rules! impl_to_bigint { - ($T:ty, $from_ty:path) => { - impl ToBigInt for $T { - #[inline] - fn to_bigint(&self) -> Option { - $from_ty(*self) - } - } - }; -} - -impl_to_bigint!(isize, FromPrimitive::from_isize); -impl_to_bigint!(i8, FromPrimitive::from_i8); -impl_to_bigint!(i16, FromPrimitive::from_i16); -impl_to_bigint!(i32, FromPrimitive::from_i32); -impl_to_bigint!(i64, FromPrimitive::from_i64); -impl_to_bigint!(i128, FromPrimitive::from_i128); - -impl_to_bigint!(usize, FromPrimitive::from_usize); -impl_to_bigint!(u8, FromPrimitive::from_u8); -impl_to_bigint!(u16, FromPrimitive::from_u16); -impl_to_bigint!(u32, FromPrimitive::from_u32); -impl_to_bigint!(u64, FromPrimitive::from_u64); -impl_to_bigint!(u128, FromPrimitive::from_u128); - -impl_to_bigint!(f32, FromPrimitive::from_f32); -impl_to_bigint!(f64, FromPrimitive::from_f64); - impl BigInt { /// Creates and initializes a BigInt. /// @@ -2834,20 +647,7 @@ impl BigInt { /// The digits are in big-endian base 28. #[inline] pub fn from_signed_bytes_be(digits: &[u8]) -> BigInt { - let sign = match digits.first() { - Some(v) if *v > 0x7f => Sign::Minus, - Some(_) => Sign::Plus, - None => return BigInt::zero(), - }; - - if sign == Sign::Minus { - // two's-complement the content to retrieve the magnitude - let mut digits = Vec::from(digits); - twos_complement_be(&mut digits); - BigInt::from_biguint(sign, BigUint::from_bytes_be(&*digits)) - } else { - BigInt::from_biguint(sign, BigUint::from_bytes_be(digits)) - } + convert::from_signed_bytes_be(digits) } /// Creates and initializes a `BigInt` from an array of bytes in two's complement. @@ -2855,20 +655,7 @@ impl BigInt { /// The digits are in little-endian base 28. #[inline] pub fn from_signed_bytes_le(digits: &[u8]) -> BigInt { - let sign = match digits.last() { - Some(v) if *v > 0x7f => Sign::Minus, - Some(_) => Sign::Plus, - None => return BigInt::zero(), - }; - - if sign == Sign::Minus { - // two's-complement the content to retrieve the magnitude - let mut digits = Vec::from(digits); - twos_complement_le(&mut digits); - BigInt::from_biguint(sign, BigUint::from_bytes_le(&*digits)) - } else { - BigInt::from_biguint(sign, BigUint::from_bytes_le(digits)) - } + convert::from_signed_bytes_le(digits) } /// Creates and initializes a `BigInt`. @@ -2979,6 +766,65 @@ impl BigInt { (self.sign, self.data.to_u32_digits()) } + /// Returns the sign and the `u64` digits representation of the `BigInt` ordered least + /// significant digit first. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::{BigInt, Sign}; + /// + /// assert_eq!(BigInt::from(-1125).to_u64_digits(), (Sign::Minus, vec![1125])); + /// assert_eq!(BigInt::from(4294967295u32).to_u64_digits(), (Sign::Plus, vec![4294967295])); + /// assert_eq!(BigInt::from(4294967296u64).to_u64_digits(), (Sign::Plus, vec![4294967296])); + /// assert_eq!(BigInt::from(-112500000000i64).to_u64_digits(), (Sign::Minus, vec![112500000000])); + /// assert_eq!(BigInt::from(112500000000i64).to_u64_digits(), (Sign::Plus, vec![112500000000])); + /// assert_eq!(BigInt::from(1u128 << 64).to_u64_digits(), (Sign::Plus, vec![0, 1])); + /// ``` + #[inline] + pub fn to_u64_digits(&self) -> (Sign, Vec) { + (self.sign, self.data.to_u64_digits()) + } + + /// Returns an iterator of `u32` digits representation of the `BigInt` ordered least + /// significant digit first. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::BigInt; + /// + /// assert_eq!(BigInt::from(-1125).iter_u32_digits().collect::>(), vec![1125]); + /// assert_eq!(BigInt::from(4294967295u32).iter_u32_digits().collect::>(), vec![4294967295]); + /// assert_eq!(BigInt::from(4294967296u64).iter_u32_digits().collect::>(), vec![0, 1]); + /// assert_eq!(BigInt::from(-112500000000i64).iter_u32_digits().collect::>(), vec![830850304, 26]); + /// assert_eq!(BigInt::from(112500000000i64).iter_u32_digits().collect::>(), vec![830850304, 26]); + /// ``` + #[inline] + pub fn iter_u32_digits(&self) -> U32Digits<'_> { + self.data.iter_u32_digits() + } + + /// Returns an iterator of `u64` digits representation of the `BigInt` ordered least + /// significant digit first. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::BigInt; + /// + /// assert_eq!(BigInt::from(-1125).iter_u64_digits().collect::>(), vec![1125u64]); + /// assert_eq!(BigInt::from(4294967295u32).iter_u64_digits().collect::>(), vec![4294967295u64]); + /// assert_eq!(BigInt::from(4294967296u64).iter_u64_digits().collect::>(), vec![4294967296u64]); + /// assert_eq!(BigInt::from(-112500000000i64).iter_u64_digits().collect::>(), vec![112500000000u64]); + /// assert_eq!(BigInt::from(112500000000i64).iter_u64_digits().collect::>(), vec![112500000000u64]); + /// assert_eq!(BigInt::from(1u128 << 64).iter_u64_digits().collect::>(), vec![0, 1]); + /// ``` + #[inline] + pub fn iter_u64_digits(&self) -> U64Digits<'_> { + self.data.iter_u64_digits() + } + /// Returns the two's-complement byte representation of the `BigInt` in big-endian byte order. /// /// # Examples @@ -2991,20 +837,7 @@ impl BigInt { /// ``` #[inline] pub fn to_signed_bytes_be(&self) -> Vec { - let mut bytes = self.data.to_bytes_be(); - let first_byte = bytes.first().cloned().unwrap_or(0); - if first_byte > 0x7f - && !(first_byte == 0x80 - && bytes.iter().skip(1).all(Zero::is_zero) - && self.sign == Sign::Minus) - { - // msb used by magnitude, extend by 1 byte - bytes.insert(0, 0); - } - if self.sign == Sign::Minus { - twos_complement_be(&mut bytes); - } - bytes + convert::to_signed_bytes_be(self) } /// Returns the two's-complement byte representation of the `BigInt` in little-endian byte order. @@ -3019,20 +852,7 @@ impl BigInt { /// ``` #[inline] pub fn to_signed_bytes_le(&self) -> Vec { - let mut bytes = self.data.to_bytes_le(); - let last_byte = bytes.last().cloned().unwrap_or(0); - if last_byte > 0x7f - && !(last_byte == 0x80 - && bytes.iter().rev().skip(1).all(Zero::is_zero) - && self.sign == Sign::Minus) - { - // msb used by magnitude, extend by 1 byte - bytes.push(0); - } - if self.sign == Sign::Minus { - twos_complement_le(&mut bytes); - } - bytes + convert::to_signed_bytes_le(self) } /// Returns the integer formatted as a string in the given radix. @@ -3167,17 +987,17 @@ impl BigInt { #[inline] pub fn checked_add(&self, v: &BigInt) -> Option { - Some(self.add(v)) + Some(self + v) } #[inline] pub fn checked_sub(&self, v: &BigInt) -> Option { - Some(self.sub(v)) + Some(self - v) } #[inline] pub fn checked_mul(&self, v: &BigInt) -> Option { - Some(self.mul(v)) + Some(self * v) } #[inline] @@ -3185,7 +1005,7 @@ impl BigInt { if v.is_zero() { return None; } - Some(self.div(v)) + Some(self / v) } /// Returns `self ^ exponent`. @@ -3202,31 +1022,7 @@ impl BigInt { /// /// Panics if the exponent is negative or the modulus is zero. pub fn modpow(&self, exponent: &Self, modulus: &Self) -> Self { - assert!( - !exponent.is_negative(), - "negative exponentiation is not supported!" - ); - assert!( - !modulus.is_zero(), - "attempt to calculate with zero modulus!" - ); - - let result = self.data.modpow(&exponent.data, &modulus.data); - if result.is_zero() { - return BigInt::zero(); - } - - // The sign of the result follows the modulus, like `mod_floor`. - let (sign, mag) = match ( - self.is_negative() && exponent.is_odd(), - modulus.is_negative(), - ) { - (false, false) => (Plus, result), - (true, false) => (Plus, &modulus.data - result), - (false, true) => (Minus, &modulus.data - result), - (true, true) => (Minus, result), - }; - BigInt::from_biguint(sign, mag) + power::modpow(self, exponent, modulus) } /// Returns the truncated principal square root of `self` -- @@ -3252,49 +1048,62 @@ impl BigInt { pub fn trailing_zeros(&self) -> Option { self.data.trailing_zeros() } -} - -impl_sum_iter_type!(BigInt); -impl_product_iter_type!(BigInt); - -/// Perform in-place two's complement of the given binary representation, -/// in little-endian byte order. -#[inline] -fn twos_complement_le(digits: &mut [u8]) { - twos_complement(digits) -} -/// Perform in-place two's complement of the given binary representation -/// in big-endian byte order. -#[inline] -fn twos_complement_be(digits: &mut [u8]) { - twos_complement(digits.iter_mut().rev()) -} + /// Returns whether the bit in position `bit` is set, + /// using the two's complement for negative numbers + pub fn bit(&self, bit: u64) -> bool { + if self.is_negative() { + // Let the binary representation of a number be + // ... 0 x 1 0 ... 0 + // Then the two's complement is + // ... 1 !x 1 0 ... 0 + // where !x is obtained from x by flipping each bit + if bit >= u64::from(crate::big_digit::BITS) * self.len() as u64 { + true + } else { + let trailing_zeros = self.data.trailing_zeros().unwrap(); + match Ord::cmp(&bit, &trailing_zeros) { + Ordering::Less => false, + Ordering::Equal => true, + Ordering::Greater => !self.data.bit(bit), + } + } + } else { + self.data.bit(bit) + } + } -/// Perform in-place two's complement of the given digit iterator -/// starting from the least significant byte. -#[inline] -fn twos_complement<'a, I>(digits: I) -where - I: IntoIterator, -{ - let mut carry = true; - for d in digits { - *d = d.not(); - if carry { - *d = d.wrapping_add(1); - carry = d.is_zero(); + /// Sets or clears the bit in the given position, + /// using the two's complement for negative numbers + /// + /// Note that setting/clearing a bit (for positive/negative numbers, + /// respectively) greater than the current bit length, a reallocation + /// may be needed to store the new digits + pub fn set_bit(&mut self, bit: u64, value: bool) { + match self.sign { + Sign::Plus => self.data.set_bit(bit, value), + Sign::Minus => bits::set_negative_bit(self, bit, value), + Sign::NoSign => { + if value { + self.data.set_bit(bit, true); + self.sign = Sign::Plus; + } else { + // Clearing a bit for zero is a no-op + } + } } + // The top bit may have been cleared, so normalize + self.normalize(); } } #[test] fn test_from_biguint() { fn check(inp_s: Sign, inp_n: usize, ans_s: Sign, ans_n: usize) { - let inp = BigInt::from_biguint(inp_s, FromPrimitive::from_usize(inp_n).unwrap()); + let inp = BigInt::from_biguint(inp_s, BigUint::from(inp_n)); let ans = BigInt { sign: ans_s, - data: FromPrimitive::from_usize(ans_n).unwrap(), + data: BigUint::from(ans_n), }; assert_eq!(inp, ans); } @@ -3310,7 +1119,7 @@ fn test_from_slice() { let inp = BigInt::from_slice(inp_s, &[inp_n]); let ans = BigInt { sign: ans_s, - data: FromPrimitive::from_u32(ans_n).unwrap(), + data: BigUint::from(ans_n), }; assert_eq!(inp, ans); } @@ -3327,7 +1136,7 @@ fn test_assign_from_slice() { inp.assign_from_slice(inp_s, &[inp_n]); let ans = BigInt { sign: ans_s, - data: FromPrimitive::from_u32(ans_n).unwrap(), + data: BigUint::from(ans_n), }; assert_eq!(inp, ans); } diff --git a/src/rust/vendor/num-bigint/src/bigint/addition.rs b/src/rust/vendor/num-bigint/src/bigint/addition.rs new file mode 100644 index 000000000..b999f6251 --- /dev/null +++ b/src/rust/vendor/num-bigint/src/bigint/addition.rs @@ -0,0 +1,239 @@ +use super::CheckedUnsignedAbs::{Negative, Positive}; +use super::Sign::{Minus, NoSign, Plus}; +use super::{BigInt, UnsignedAbs}; + +use crate::{IsizePromotion, UsizePromotion}; + +use core::cmp::Ordering::{Equal, Greater, Less}; +use core::iter::Sum; +use core::mem; +use core::ops::{Add, AddAssign}; +use num_traits::{CheckedAdd, Zero}; + +// We want to forward to BigUint::add, but it's not clear how that will go until +// we compare both sign and magnitude. So we duplicate this body for every +// val/ref combination, deferring that decision to BigUint's own forwarding. +macro_rules! bigint_add { + ($a:expr, $a_owned:expr, $a_data:expr, $b:expr, $b_owned:expr, $b_data:expr) => { + match ($a.sign, $b.sign) { + (_, NoSign) => $a_owned, + (NoSign, _) => $b_owned, + // same sign => keep the sign with the sum of magnitudes + (Plus, Plus) | (Minus, Minus) => BigInt::from_biguint($a.sign, $a_data + $b_data), + // opposite signs => keep the sign of the larger with the difference of magnitudes + (Plus, Minus) | (Minus, Plus) => match $a.data.cmp(&$b.data) { + Less => BigInt::from_biguint($b.sign, $b_data - $a_data), + Greater => BigInt::from_biguint($a.sign, $a_data - $b_data), + Equal => Zero::zero(), + }, + } + }; +} + +impl<'a, 'b> Add<&'b BigInt> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: &BigInt) -> BigInt { + bigint_add!( + self, + self.clone(), + &self.data, + other, + other.clone(), + &other.data + ) + } +} + +impl<'a> Add for &'a BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: BigInt) -> BigInt { + bigint_add!(self, self.clone(), &self.data, other, other, other.data) + } +} + +impl<'a> Add<&'a BigInt> for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: &BigInt) -> BigInt { + bigint_add!(self, self, self.data, other, other.clone(), &other.data) + } +} + +impl Add for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: BigInt) -> BigInt { + bigint_add!(self, self, self.data, other, other, other.data) + } +} + +impl<'a> AddAssign<&'a BigInt> for BigInt { + #[inline] + fn add_assign(&mut self, other: &BigInt) { + let n = mem::replace(self, BigInt::zero()); + *self = n + other; + } +} +forward_val_assign!(impl AddAssign for BigInt, add_assign); + +promote_all_scalars!(impl Add for BigInt, add); +promote_all_scalars_assign!(impl AddAssign for BigInt, add_assign); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); + +impl Add for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: u32) -> BigInt { + match self.sign { + NoSign => From::from(other), + Plus => BigInt::from(self.data + other), + Minus => match self.data.cmp(&From::from(other)) { + Equal => Zero::zero(), + Less => BigInt::from(other - self.data), + Greater => -BigInt::from(self.data - other), + }, + } + } +} + +impl AddAssign for BigInt { + #[inline] + fn add_assign(&mut self, other: u32) { + let n = mem::replace(self, BigInt::zero()); + *self = n + other; + } +} + +impl Add for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: u64) -> BigInt { + match self.sign { + NoSign => From::from(other), + Plus => BigInt::from(self.data + other), + Minus => match self.data.cmp(&From::from(other)) { + Equal => Zero::zero(), + Less => BigInt::from(other - self.data), + Greater => -BigInt::from(self.data - other), + }, + } + } +} + +impl AddAssign for BigInt { + #[inline] + fn add_assign(&mut self, other: u64) { + let n = mem::replace(self, BigInt::zero()); + *self = n + other; + } +} + +impl Add for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: u128) -> BigInt { + match self.sign { + NoSign => BigInt::from(other), + Plus => BigInt::from(self.data + other), + Minus => match self.data.cmp(&From::from(other)) { + Equal => BigInt::zero(), + Less => BigInt::from(other - self.data), + Greater => -BigInt::from(self.data - other), + }, + } + } +} +impl AddAssign for BigInt { + #[inline] + fn add_assign(&mut self, other: u128) { + let n = mem::replace(self, BigInt::zero()); + *self = n + other; + } +} + +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigInt, add); + +impl Add for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: i32) -> BigInt { + match other.checked_uabs() { + Positive(u) => self + u, + Negative(u) => self - u, + } + } +} +impl AddAssign for BigInt { + #[inline] + fn add_assign(&mut self, other: i32) { + match other.checked_uabs() { + Positive(u) => *self += u, + Negative(u) => *self -= u, + } + } +} + +impl Add for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: i64) -> BigInt { + match other.checked_uabs() { + Positive(u) => self + u, + Negative(u) => self - u, + } + } +} +impl AddAssign for BigInt { + #[inline] + fn add_assign(&mut self, other: i64) { + match other.checked_uabs() { + Positive(u) => *self += u, + Negative(u) => *self -= u, + } + } +} + +impl Add for BigInt { + type Output = BigInt; + + #[inline] + fn add(self, other: i128) -> BigInt { + match other.checked_uabs() { + Positive(u) => self + u, + Negative(u) => self - u, + } + } +} +impl AddAssign for BigInt { + #[inline] + fn add_assign(&mut self, other: i128) { + match other.checked_uabs() { + Positive(u) => *self += u, + Negative(u) => *self -= u, + } + } +} + +impl CheckedAdd for BigInt { + #[inline] + fn checked_add(&self, v: &BigInt) -> Option { + Some(self.add(v)) + } +} + +impl_sum_iter_type!(BigInt); diff --git a/src/rust/vendor/num-bigint/src/bigint/arbitrary.rs b/src/rust/vendor/num-bigint/src/bigint/arbitrary.rs new file mode 100644 index 000000000..df66050e7 --- /dev/null +++ b/src/rust/vendor/num-bigint/src/bigint/arbitrary.rs @@ -0,0 +1,39 @@ +use super::{BigInt, Sign}; + +#[cfg(feature = "quickcheck")] +use crate::std_alloc::Box; +use crate::BigUint; + +#[cfg(feature = "quickcheck")] +impl quickcheck::Arbitrary for BigInt { + fn arbitrary(g: &mut quickcheck::Gen) -> Self { + let positive = bool::arbitrary(g); + let sign = if positive { Sign::Plus } else { Sign::Minus }; + Self::from_biguint(sign, BigUint::arbitrary(g)) + } + + fn shrink(&self) -> Box> { + let sign = self.sign(); + let unsigned_shrink = self.data.shrink(); + Box::new(unsigned_shrink.map(move |x| BigInt::from_biguint(sign, x))) + } +} + +#[cfg(feature = "arbitrary")] +impl arbitrary::Arbitrary<'_> for BigInt { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + let positive = bool::arbitrary(u)?; + let sign = if positive { Sign::Plus } else { Sign::Minus }; + Ok(Self::from_biguint(sign, BigUint::arbitrary(u)?)) + } + + fn arbitrary_take_rest(mut u: arbitrary::Unstructured<'_>) -> arbitrary::Result { + let positive = bool::arbitrary(&mut u)?; + let sign = if positive { Sign::Plus } else { Sign::Minus }; + Ok(Self::from_biguint(sign, BigUint::arbitrary_take_rest(u)?)) + } + + fn size_hint(depth: usize) -> (usize, Option) { + arbitrary::size_hint::and(bool::size_hint(depth), BigUint::size_hint(depth)) + } +} diff --git a/src/rust/vendor/num-bigint/src/bigint/bits.rs b/src/rust/vendor/num-bigint/src/bigint/bits.rs new file mode 100644 index 000000000..686def4d4 --- /dev/null +++ b/src/rust/vendor/num-bigint/src/bigint/bits.rs @@ -0,0 +1,531 @@ +use super::BigInt; +use super::Sign::{Minus, NoSign, Plus}; + +use crate::big_digit::{self, BigDigit, DoubleBigDigit}; +use crate::biguint::IntDigits; +use crate::std_alloc::Vec; + +use core::cmp::Ordering::{Equal, Greater, Less}; +use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign}; +use num_traits::{ToPrimitive, Zero}; + +// Negation in two's complement. +// acc must be initialized as 1 for least-significant digit. +// +// When negating, a carry (acc == 1) means that all the digits +// considered to this point were zero. This means that if all the +// digits of a negative BigInt have been considered, carry must be +// zero as we cannot have negative zero. +// +// 01 -> ...f ff +// ff -> ...f 01 +// 01 00 -> ...f ff 00 +// 01 01 -> ...f fe ff +// 01 ff -> ...f fe 01 +// ff 00 -> ...f 01 00 +// ff 01 -> ...f 00 ff +// ff ff -> ...f 00 01 +#[inline] +fn negate_carry(a: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit { + *acc += DoubleBigDigit::from(!a); + let lo = *acc as BigDigit; + *acc >>= big_digit::BITS; + lo +} + +// + 1 & -ff = ...0 01 & ...f 01 = ...0 01 = + 1 +// +ff & - 1 = ...0 ff & ...f ff = ...0 ff = +ff +// answer is pos, has length of a +fn bitand_pos_neg(a: &mut Vec, b: &[BigDigit]) { + let mut carry_b = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_b = negate_carry(bi, &mut carry_b); + *ai &= twos_b; + } + debug_assert!(b.len() > a.len() || carry_b == 0); +} + +// - 1 & +ff = ...f ff & ...0 ff = ...0 ff = +ff +// -ff & + 1 = ...f 01 & ...0 01 = ...0 01 = + 1 +// answer is pos, has length of b +fn bitand_neg_pos(a: &mut Vec, b: &[BigDigit]) { + let mut carry_a = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_a = negate_carry(*ai, &mut carry_a); + *ai = twos_a & bi; + } + debug_assert!(a.len() > b.len() || carry_a == 0); + match Ord::cmp(&a.len(), &b.len()) { + Greater => a.truncate(b.len()), + Equal => {} + Less => { + let extra = &b[a.len()..]; + a.extend(extra.iter().cloned()); + } + } +} + +// - 1 & -ff = ...f ff & ...f 01 = ...f 01 = - ff +// -ff & - 1 = ...f 01 & ...f ff = ...f 01 = - ff +// -ff & -fe = ...f 01 & ...f 02 = ...f 00 = -100 +// answer is neg, has length of longest with a possible carry +fn bitand_neg_neg(a: &mut Vec, b: &[BigDigit]) { + let mut carry_a = 1; + let mut carry_b = 1; + let mut carry_and = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_a = negate_carry(*ai, &mut carry_a); + let twos_b = negate_carry(bi, &mut carry_b); + *ai = negate_carry(twos_a & twos_b, &mut carry_and); + } + debug_assert!(a.len() > b.len() || carry_a == 0); + debug_assert!(b.len() > a.len() || carry_b == 0); + match Ord::cmp(&a.len(), &b.len()) { + Greater => { + for ai in a[b.len()..].iter_mut() { + let twos_a = negate_carry(*ai, &mut carry_a); + *ai = negate_carry(twos_a, &mut carry_and); + } + debug_assert!(carry_a == 0); + } + Equal => {} + Less => { + let extra = &b[a.len()..]; + a.extend(extra.iter().map(|&bi| { + let twos_b = negate_carry(bi, &mut carry_b); + negate_carry(twos_b, &mut carry_and) + })); + debug_assert!(carry_b == 0); + } + } + if carry_and != 0 { + a.push(1); + } +} + +forward_val_val_binop!(impl BitAnd for BigInt, bitand); +forward_ref_val_binop!(impl BitAnd for BigInt, bitand); + +// do not use forward_ref_ref_binop_commutative! for bitand so that we can +// clone as needed, avoiding over-allocation +impl<'a, 'b> BitAnd<&'b BigInt> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn bitand(self, other: &BigInt) -> BigInt { + match (self.sign, other.sign) { + (NoSign, _) | (_, NoSign) => BigInt::zero(), + (Plus, Plus) => BigInt::from(&self.data & &other.data), + (Plus, Minus) => self.clone() & other, + (Minus, Plus) => other.clone() & self, + (Minus, Minus) => { + // forward to val-ref, choosing the larger to clone + if self.len() >= other.len() { + self.clone() & other + } else { + other.clone() & self + } + } + } + } +} + +impl<'a> BitAnd<&'a BigInt> for BigInt { + type Output = BigInt; + + #[inline] + fn bitand(mut self, other: &BigInt) -> BigInt { + self &= other; + self + } +} + +forward_val_assign!(impl BitAndAssign for BigInt, bitand_assign); + +impl<'a> BitAndAssign<&'a BigInt> for BigInt { + fn bitand_assign(&mut self, other: &BigInt) { + match (self.sign, other.sign) { + (NoSign, _) => {} + (_, NoSign) => self.set_zero(), + (Plus, Plus) => { + self.data &= &other.data; + if self.data.is_zero() { + self.sign = NoSign; + } + } + (Plus, Minus) => { + bitand_pos_neg(self.digits_mut(), other.digits()); + self.normalize(); + } + (Minus, Plus) => { + bitand_neg_pos(self.digits_mut(), other.digits()); + self.sign = Plus; + self.normalize(); + } + (Minus, Minus) => { + bitand_neg_neg(self.digits_mut(), other.digits()); + self.normalize(); + } + } + } +} + +// + 1 | -ff = ...0 01 | ...f 01 = ...f 01 = -ff +// +ff | - 1 = ...0 ff | ...f ff = ...f ff = - 1 +// answer is neg, has length of b +fn bitor_pos_neg(a: &mut Vec, b: &[BigDigit]) { + let mut carry_b = 1; + let mut carry_or = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_b = negate_carry(bi, &mut carry_b); + *ai = negate_carry(*ai | twos_b, &mut carry_or); + } + debug_assert!(b.len() > a.len() || carry_b == 0); + match Ord::cmp(&a.len(), &b.len()) { + Greater => { + a.truncate(b.len()); + } + Equal => {} + Less => { + let extra = &b[a.len()..]; + a.extend(extra.iter().map(|&bi| { + let twos_b = negate_carry(bi, &mut carry_b); + negate_carry(twos_b, &mut carry_or) + })); + debug_assert!(carry_b == 0); + } + } + // for carry_or to be non-zero, we would need twos_b == 0 + debug_assert!(carry_or == 0); +} + +// - 1 | +ff = ...f ff | ...0 ff = ...f ff = - 1 +// -ff | + 1 = ...f 01 | ...0 01 = ...f 01 = -ff +// answer is neg, has length of a +fn bitor_neg_pos(a: &mut Vec, b: &[BigDigit]) { + let mut carry_a = 1; + let mut carry_or = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_a = negate_carry(*ai, &mut carry_a); + *ai = negate_carry(twos_a | bi, &mut carry_or); + } + debug_assert!(a.len() > b.len() || carry_a == 0); + if a.len() > b.len() { + for ai in a[b.len()..].iter_mut() { + let twos_a = negate_carry(*ai, &mut carry_a); + *ai = negate_carry(twos_a, &mut carry_or); + } + debug_assert!(carry_a == 0); + } + // for carry_or to be non-zero, we would need twos_a == 0 + debug_assert!(carry_or == 0); +} + +// - 1 | -ff = ...f ff | ...f 01 = ...f ff = -1 +// -ff | - 1 = ...f 01 | ...f ff = ...f ff = -1 +// answer is neg, has length of shortest +fn bitor_neg_neg(a: &mut Vec, b: &[BigDigit]) { + let mut carry_a = 1; + let mut carry_b = 1; + let mut carry_or = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_a = negate_carry(*ai, &mut carry_a); + let twos_b = negate_carry(bi, &mut carry_b); + *ai = negate_carry(twos_a | twos_b, &mut carry_or); + } + debug_assert!(a.len() > b.len() || carry_a == 0); + debug_assert!(b.len() > a.len() || carry_b == 0); + if a.len() > b.len() { + a.truncate(b.len()); + } + // for carry_or to be non-zero, we would need twos_a == 0 or twos_b == 0 + debug_assert!(carry_or == 0); +} + +forward_val_val_binop!(impl BitOr for BigInt, bitor); +forward_ref_val_binop!(impl BitOr for BigInt, bitor); + +// do not use forward_ref_ref_binop_commutative! for bitor so that we can +// clone as needed, avoiding over-allocation +impl<'a, 'b> BitOr<&'b BigInt> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn bitor(self, other: &BigInt) -> BigInt { + match (self.sign, other.sign) { + (NoSign, _) => other.clone(), + (_, NoSign) => self.clone(), + (Plus, Plus) => BigInt::from(&self.data | &other.data), + (Plus, Minus) => other.clone() | self, + (Minus, Plus) => self.clone() | other, + (Minus, Minus) => { + // forward to val-ref, choosing the smaller to clone + if self.len() <= other.len() { + self.clone() | other + } else { + other.clone() | self + } + } + } + } +} + +impl<'a> BitOr<&'a BigInt> for BigInt { + type Output = BigInt; + + #[inline] + fn bitor(mut self, other: &BigInt) -> BigInt { + self |= other; + self + } +} + +forward_val_assign!(impl BitOrAssign for BigInt, bitor_assign); + +impl<'a> BitOrAssign<&'a BigInt> for BigInt { + fn bitor_assign(&mut self, other: &BigInt) { + match (self.sign, other.sign) { + (_, NoSign) => {} + (NoSign, _) => self.clone_from(other), + (Plus, Plus) => self.data |= &other.data, + (Plus, Minus) => { + bitor_pos_neg(self.digits_mut(), other.digits()); + self.sign = Minus; + self.normalize(); + } + (Minus, Plus) => { + bitor_neg_pos(self.digits_mut(), other.digits()); + self.normalize(); + } + (Minus, Minus) => { + bitor_neg_neg(self.digits_mut(), other.digits()); + self.normalize(); + } + } + } +} + +// + 1 ^ -ff = ...0 01 ^ ...f 01 = ...f 00 = -100 +// +ff ^ - 1 = ...0 ff ^ ...f ff = ...f 00 = -100 +// answer is neg, has length of longest with a possible carry +fn bitxor_pos_neg(a: &mut Vec, b: &[BigDigit]) { + let mut carry_b = 1; + let mut carry_xor = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_b = negate_carry(bi, &mut carry_b); + *ai = negate_carry(*ai ^ twos_b, &mut carry_xor); + } + debug_assert!(b.len() > a.len() || carry_b == 0); + match Ord::cmp(&a.len(), &b.len()) { + Greater => { + for ai in a[b.len()..].iter_mut() { + let twos_b = !0; + *ai = negate_carry(*ai ^ twos_b, &mut carry_xor); + } + } + Equal => {} + Less => { + let extra = &b[a.len()..]; + a.extend(extra.iter().map(|&bi| { + let twos_b = negate_carry(bi, &mut carry_b); + negate_carry(twos_b, &mut carry_xor) + })); + debug_assert!(carry_b == 0); + } + } + if carry_xor != 0 { + a.push(1); + } +} + +// - 1 ^ +ff = ...f ff ^ ...0 ff = ...f 00 = -100 +// -ff ^ + 1 = ...f 01 ^ ...0 01 = ...f 00 = -100 +// answer is neg, has length of longest with a possible carry +fn bitxor_neg_pos(a: &mut Vec, b: &[BigDigit]) { + let mut carry_a = 1; + let mut carry_xor = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_a = negate_carry(*ai, &mut carry_a); + *ai = negate_carry(twos_a ^ bi, &mut carry_xor); + } + debug_assert!(a.len() > b.len() || carry_a == 0); + match Ord::cmp(&a.len(), &b.len()) { + Greater => { + for ai in a[b.len()..].iter_mut() { + let twos_a = negate_carry(*ai, &mut carry_a); + *ai = negate_carry(twos_a, &mut carry_xor); + } + debug_assert!(carry_a == 0); + } + Equal => {} + Less => { + let extra = &b[a.len()..]; + a.extend(extra.iter().map(|&bi| { + let twos_a = !0; + negate_carry(twos_a ^ bi, &mut carry_xor) + })); + } + } + if carry_xor != 0 { + a.push(1); + } +} + +// - 1 ^ -ff = ...f ff ^ ...f 01 = ...0 fe = +fe +// -ff & - 1 = ...f 01 ^ ...f ff = ...0 fe = +fe +// answer is pos, has length of longest +fn bitxor_neg_neg(a: &mut Vec, b: &[BigDigit]) { + let mut carry_a = 1; + let mut carry_b = 1; + for (ai, &bi) in a.iter_mut().zip(b.iter()) { + let twos_a = negate_carry(*ai, &mut carry_a); + let twos_b = negate_carry(bi, &mut carry_b); + *ai = twos_a ^ twos_b; + } + debug_assert!(a.len() > b.len() || carry_a == 0); + debug_assert!(b.len() > a.len() || carry_b == 0); + match Ord::cmp(&a.len(), &b.len()) { + Greater => { + for ai in a[b.len()..].iter_mut() { + let twos_a = negate_carry(*ai, &mut carry_a); + let twos_b = !0; + *ai = twos_a ^ twos_b; + } + debug_assert!(carry_a == 0); + } + Equal => {} + Less => { + let extra = &b[a.len()..]; + a.extend(extra.iter().map(|&bi| { + let twos_a = !0; + let twos_b = negate_carry(bi, &mut carry_b); + twos_a ^ twos_b + })); + debug_assert!(carry_b == 0); + } + } +} + +forward_all_binop_to_val_ref_commutative!(impl BitXor for BigInt, bitxor); + +impl<'a> BitXor<&'a BigInt> for BigInt { + type Output = BigInt; + + #[inline] + fn bitxor(mut self, other: &BigInt) -> BigInt { + self ^= other; + self + } +} + +forward_val_assign!(impl BitXorAssign for BigInt, bitxor_assign); + +impl<'a> BitXorAssign<&'a BigInt> for BigInt { + fn bitxor_assign(&mut self, other: &BigInt) { + match (self.sign, other.sign) { + (_, NoSign) => {} + (NoSign, _) => self.clone_from(other), + (Plus, Plus) => { + self.data ^= &other.data; + if self.data.is_zero() { + self.sign = NoSign; + } + } + (Plus, Minus) => { + bitxor_pos_neg(self.digits_mut(), other.digits()); + self.sign = Minus; + self.normalize(); + } + (Minus, Plus) => { + bitxor_neg_pos(self.digits_mut(), other.digits()); + self.normalize(); + } + (Minus, Minus) => { + bitxor_neg_neg(self.digits_mut(), other.digits()); + self.sign = Plus; + self.normalize(); + } + } + } +} + +pub(super) fn set_negative_bit(x: &mut BigInt, bit: u64, value: bool) { + debug_assert_eq!(x.sign, Minus); + let data = &mut x.data; + + let bits_per_digit = u64::from(big_digit::BITS); + if bit >= bits_per_digit * data.len() as u64 { + if !value { + data.set_bit(bit, true); + } + } else { + // If the Uint number is + // ... 0 x 1 0 ... 0 + // then the two's complement is + // ... 1 !x 1 0 ... 0 + // |-- bit at position 'trailing_zeros' + // where !x is obtained from x by flipping each bit + let trailing_zeros = data.trailing_zeros().unwrap(); + if bit > trailing_zeros { + data.set_bit(bit, !value); + } else if bit == trailing_zeros && !value { + // Clearing the bit at position `trailing_zeros` is dealt with by doing + // similarly to what `bitand_neg_pos` does, except we start at digit + // `bit_index`. All digits below `bit_index` are guaranteed to be zero, + // so initially we have `carry_in` = `carry_out` = 1. Furthermore, we + // stop traversing the digits when there are no more carries. + let bit_index = (bit / bits_per_digit).to_usize().unwrap(); + let bit_mask = (1 as BigDigit) << (bit % bits_per_digit); + let mut digit_iter = data.digits_mut().iter_mut().skip(bit_index); + let mut carry_in = 1; + let mut carry_out = 1; + + let digit = digit_iter.next().unwrap(); + let twos_in = negate_carry(*digit, &mut carry_in); + let twos_out = twos_in & !bit_mask; + *digit = negate_carry(twos_out, &mut carry_out); + + for digit in digit_iter { + if carry_in == 0 && carry_out == 0 { + // Exit the loop since no more digits can change + break; + } + let twos = negate_carry(*digit, &mut carry_in); + *digit = negate_carry(twos, &mut carry_out); + } + + if carry_out != 0 { + // All digits have been traversed and there is a carry + debug_assert_eq!(carry_in, 0); + data.digits_mut().push(1); + } + } else if bit < trailing_zeros && value { + // Flip each bit from position 'bit' to 'trailing_zeros', both inclusive + // ... 1 !x 1 0 ... 0 ... 0 + // |-- bit at position 'bit' + // |-- bit at position 'trailing_zeros' + // bit_mask: 1 1 ... 1 0 .. 0 + // This is done by xor'ing with the bit_mask + let index_lo = (bit / bits_per_digit).to_usize().unwrap(); + let index_hi = (trailing_zeros / bits_per_digit).to_usize().unwrap(); + let bit_mask_lo = big_digit::MAX << (bit % bits_per_digit); + let bit_mask_hi = + big_digit::MAX >> (bits_per_digit - 1 - (trailing_zeros % bits_per_digit)); + let digits = data.digits_mut(); + + if index_lo == index_hi { + digits[index_lo] ^= bit_mask_lo & bit_mask_hi; + } else { + digits[index_lo] = bit_mask_lo; + for digit in &mut digits[index_lo + 1..index_hi] { + *digit = big_digit::MAX; + } + digits[index_hi] ^= bit_mask_hi; + } + } else { + // We end up here in two cases: + // bit == trailing_zeros && value: Bit is already set + // bit < trailing_zeros && !value: Bit is already cleared + } + } +} diff --git a/src/rust/vendor/num-bigint/src/bigint/convert.rs b/src/rust/vendor/num-bigint/src/bigint/convert.rs new file mode 100644 index 000000000..ff8e04ef4 --- /dev/null +++ b/src/rust/vendor/num-bigint/src/bigint/convert.rs @@ -0,0 +1,469 @@ +use super::Sign::{self, Minus, NoSign, Plus}; +use super::{BigInt, ToBigInt}; + +use crate::std_alloc::Vec; +#[cfg(has_try_from)] +use crate::TryFromBigIntError; +use crate::{BigUint, ParseBigIntError, ToBigUint}; + +use core::cmp::Ordering::{Equal, Greater, Less}; +#[cfg(has_try_from)] +use core::convert::TryFrom; +use core::str::{self, FromStr}; +use num_traits::{FromPrimitive, Num, ToPrimitive, Zero}; + +impl FromStr for BigInt { + type Err = ParseBigIntError; + + #[inline] + fn from_str(s: &str) -> Result { + BigInt::from_str_radix(s, 10) + } +} + +impl Num for BigInt { + type FromStrRadixErr = ParseBigIntError; + + /// Creates and initializes a BigInt. + #[inline] + fn from_str_radix(mut s: &str, radix: u32) -> Result { + let sign = if s.starts_with('-') { + let tail = &s[1..]; + if !tail.starts_with('+') { + s = tail + } + Minus + } else { + Plus + }; + let bu = BigUint::from_str_radix(s, radix)?; + Ok(BigInt::from_biguint(sign, bu)) + } +} + +impl ToPrimitive for BigInt { + #[inline] + fn to_i64(&self) -> Option { + match self.sign { + Plus => self.data.to_i64(), + NoSign => Some(0), + Minus => { + let n = self.data.to_u64()?; + let m: u64 = 1 << 63; + match n.cmp(&m) { + Less => Some(-(n as i64)), + Equal => Some(core::i64::MIN), + Greater => None, + } + } + } + } + + #[inline] + fn to_i128(&self) -> Option { + match self.sign { + Plus => self.data.to_i128(), + NoSign => Some(0), + Minus => { + let n = self.data.to_u128()?; + let m: u128 = 1 << 127; + match n.cmp(&m) { + Less => Some(-(n as i128)), + Equal => Some(core::i128::MIN), + Greater => None, + } + } + } + } + + #[inline] + fn to_u64(&self) -> Option { + match self.sign { + Plus => self.data.to_u64(), + NoSign => Some(0), + Minus => None, + } + } + + #[inline] + fn to_u128(&self) -> Option { + match self.sign { + Plus => self.data.to_u128(), + NoSign => Some(0), + Minus => None, + } + } + + #[inline] + fn to_f32(&self) -> Option { + let n = self.data.to_f32()?; + Some(if self.sign == Minus { -n } else { n }) + } + + #[inline] + fn to_f64(&self) -> Option { + let n = self.data.to_f64()?; + Some(if self.sign == Minus { -n } else { n }) + } +} + +macro_rules! impl_try_from_bigint { + ($T:ty, $to_ty:path) => { + #[cfg(has_try_from)] + impl TryFrom<&BigInt> for $T { + type Error = TryFromBigIntError<()>; + + #[inline] + fn try_from(value: &BigInt) -> Result<$T, TryFromBigIntError<()>> { + $to_ty(value).ok_or(TryFromBigIntError::new(())) + } + } + + #[cfg(has_try_from)] + impl TryFrom for $T { + type Error = TryFromBigIntError; + + #[inline] + fn try_from(value: BigInt) -> Result<$T, TryFromBigIntError> { + <$T>::try_from(&value).map_err(|_| TryFromBigIntError::new(value)) + } + } + }; +} + +impl_try_from_bigint!(u8, ToPrimitive::to_u8); +impl_try_from_bigint!(u16, ToPrimitive::to_u16); +impl_try_from_bigint!(u32, ToPrimitive::to_u32); +impl_try_from_bigint!(u64, ToPrimitive::to_u64); +impl_try_from_bigint!(usize, ToPrimitive::to_usize); +impl_try_from_bigint!(u128, ToPrimitive::to_u128); + +impl_try_from_bigint!(i8, ToPrimitive::to_i8); +impl_try_from_bigint!(i16, ToPrimitive::to_i16); +impl_try_from_bigint!(i32, ToPrimitive::to_i32); +impl_try_from_bigint!(i64, ToPrimitive::to_i64); +impl_try_from_bigint!(isize, ToPrimitive::to_isize); +impl_try_from_bigint!(i128, ToPrimitive::to_i128); + +impl FromPrimitive for BigInt { + #[inline] + fn from_i64(n: i64) -> Option { + Some(BigInt::from(n)) + } + + #[inline] + fn from_i128(n: i128) -> Option { + Some(BigInt::from(n)) + } + + #[inline] + fn from_u64(n: u64) -> Option { + Some(BigInt::from(n)) + } + + #[inline] + fn from_u128(n: u128) -> Option { + Some(BigInt::from(n)) + } + + #[inline] + fn from_f64(n: f64) -> Option { + if n >= 0.0 { + BigUint::from_f64(n).map(BigInt::from) + } else { + let x = BigUint::from_f64(-n)?; + Some(-BigInt::from(x)) + } + } +} + +impl From for BigInt { + #[inline] + fn from(n: i64) -> Self { + if n >= 0 { + BigInt::from(n as u64) + } else { + let u = core::u64::MAX - (n as u64) + 1; + BigInt { + sign: Minus, + data: BigUint::from(u), + } + } + } +} + +impl From for BigInt { + #[inline] + fn from(n: i128) -> Self { + if n >= 0 { + BigInt::from(n as u128) + } else { + let u = core::u128::MAX - (n as u128) + 1; + BigInt { + sign: Minus, + data: BigUint::from(u), + } + } + } +} + +macro_rules! impl_bigint_from_int { + ($T:ty) => { + impl From<$T> for BigInt { + #[inline] + fn from(n: $T) -> Self { + BigInt::from(n as i64) + } + } + }; +} + +impl_bigint_from_int!(i8); +impl_bigint_from_int!(i16); +impl_bigint_from_int!(i32); +impl_bigint_from_int!(isize); + +impl From for BigInt { + #[inline] + fn from(n: u64) -> Self { + if n > 0 { + BigInt { + sign: Plus, + data: BigUint::from(n), + } + } else { + BigInt::zero() + } + } +} + +impl From for BigInt { + #[inline] + fn from(n: u128) -> Self { + if n > 0 { + BigInt { + sign: Plus, + data: BigUint::from(n), + } + } else { + BigInt::zero() + } + } +} + +macro_rules! impl_bigint_from_uint { + ($T:ty) => { + impl From<$T> for BigInt { + #[inline] + fn from(n: $T) -> Self { + BigInt::from(n as u64) + } + } + }; +} + +impl_bigint_from_uint!(u8); +impl_bigint_from_uint!(u16); +impl_bigint_from_uint!(u32); +impl_bigint_from_uint!(usize); + +impl From for BigInt { + #[inline] + fn from(n: BigUint) -> Self { + if n.is_zero() { + BigInt::zero() + } else { + BigInt { + sign: Plus, + data: n, + } + } + } +} + +impl ToBigInt for BigInt { + #[inline] + fn to_bigint(&self) -> Option { + Some(self.clone()) + } +} + +impl ToBigInt for BigUint { + #[inline] + fn to_bigint(&self) -> Option { + if self.is_zero() { + Some(Zero::zero()) + } else { + Some(BigInt { + sign: Plus, + data: self.clone(), + }) + } + } +} + +impl ToBigUint for BigInt { + #[inline] + fn to_biguint(&self) -> Option { + match self.sign() { + Plus => Some(self.data.clone()), + NoSign => Some(Zero::zero()), + Minus => None, + } + } +} + +#[cfg(has_try_from)] +impl TryFrom<&BigInt> for BigUint { + type Error = TryFromBigIntError<()>; + + #[inline] + fn try_from(value: &BigInt) -> Result> { + value + .to_biguint() + .ok_or_else(|| TryFromBigIntError::new(())) + } +} + +#[cfg(has_try_from)] +impl TryFrom for BigUint { + type Error = TryFromBigIntError; + + #[inline] + fn try_from(value: BigInt) -> Result> { + if value.sign() == Sign::Minus { + Err(TryFromBigIntError::new(value)) + } else { + Ok(value.data) + } + } +} + +macro_rules! impl_to_bigint { + ($T:ty, $from_ty:path) => { + impl ToBigInt for $T { + #[inline] + fn to_bigint(&self) -> Option { + $from_ty(*self) + } + } + }; +} + +impl_to_bigint!(isize, FromPrimitive::from_isize); +impl_to_bigint!(i8, FromPrimitive::from_i8); +impl_to_bigint!(i16, FromPrimitive::from_i16); +impl_to_bigint!(i32, FromPrimitive::from_i32); +impl_to_bigint!(i64, FromPrimitive::from_i64); +impl_to_bigint!(i128, FromPrimitive::from_i128); + +impl_to_bigint!(usize, FromPrimitive::from_usize); +impl_to_bigint!(u8, FromPrimitive::from_u8); +impl_to_bigint!(u16, FromPrimitive::from_u16); +impl_to_bigint!(u32, FromPrimitive::from_u32); +impl_to_bigint!(u64, FromPrimitive::from_u64); +impl_to_bigint!(u128, FromPrimitive::from_u128); + +impl_to_bigint!(f32, FromPrimitive::from_f32); +impl_to_bigint!(f64, FromPrimitive::from_f64); + +#[inline] +pub(super) fn from_signed_bytes_be(digits: &[u8]) -> BigInt { + let sign = match digits.first() { + Some(v) if *v > 0x7f => Sign::Minus, + Some(_) => Sign::Plus, + None => return BigInt::zero(), + }; + + if sign == Sign::Minus { + // two's-complement the content to retrieve the magnitude + let mut digits = Vec::from(digits); + twos_complement_be(&mut digits); + BigInt::from_biguint(sign, BigUint::from_bytes_be(&*digits)) + } else { + BigInt::from_biguint(sign, BigUint::from_bytes_be(digits)) + } +} + +#[inline] +pub(super) fn from_signed_bytes_le(digits: &[u8]) -> BigInt { + let sign = match digits.last() { + Some(v) if *v > 0x7f => Sign::Minus, + Some(_) => Sign::Plus, + None => return BigInt::zero(), + }; + + if sign == Sign::Minus { + // two's-complement the content to retrieve the magnitude + let mut digits = Vec::from(digits); + twos_complement_le(&mut digits); + BigInt::from_biguint(sign, BigUint::from_bytes_le(&*digits)) + } else { + BigInt::from_biguint(sign, BigUint::from_bytes_le(digits)) + } +} + +#[inline] +pub(super) fn to_signed_bytes_be(x: &BigInt) -> Vec { + let mut bytes = x.data.to_bytes_be(); + let first_byte = bytes.first().cloned().unwrap_or(0); + if first_byte > 0x7f + && !(first_byte == 0x80 && bytes.iter().skip(1).all(Zero::is_zero) && x.sign == Sign::Minus) + { + // msb used by magnitude, extend by 1 byte + bytes.insert(0, 0); + } + if x.sign == Sign::Minus { + twos_complement_be(&mut bytes); + } + bytes +} + +#[inline] +pub(super) fn to_signed_bytes_le(x: &BigInt) -> Vec { + let mut bytes = x.data.to_bytes_le(); + let last_byte = bytes.last().cloned().unwrap_or(0); + if last_byte > 0x7f + && !(last_byte == 0x80 + && bytes.iter().rev().skip(1).all(Zero::is_zero) + && x.sign == Sign::Minus) + { + // msb used by magnitude, extend by 1 byte + bytes.push(0); + } + if x.sign == Sign::Minus { + twos_complement_le(&mut bytes); + } + bytes +} + +/// Perform in-place two's complement of the given binary representation, +/// in little-endian byte order. +#[inline] +fn twos_complement_le(digits: &mut [u8]) { + twos_complement(digits) +} + +/// Perform in-place two's complement of the given binary representation +/// in big-endian byte order. +#[inline] +fn twos_complement_be(digits: &mut [u8]) { + twos_complement(digits.iter_mut().rev()) +} + +/// Perform in-place two's complement of the given digit iterator +/// starting from the least significant byte. +#[inline] +fn twos_complement<'a, I>(digits: I) +where + I: IntoIterator, +{ + let mut carry = true; + for d in digits { + *d = !*d; + if carry { + *d = d.wrapping_add(1); + carry = d.is_zero(); + } + } +} diff --git a/src/rust/vendor/num-bigint/src/bigint/division.rs b/src/rust/vendor/num-bigint/src/bigint/division.rs new file mode 100644 index 000000000..a702b8f24 --- /dev/null +++ b/src/rust/vendor/num-bigint/src/bigint/division.rs @@ -0,0 +1,448 @@ +use super::CheckedUnsignedAbs::{Negative, Positive}; +use super::Sign::NoSign; +use super::{BigInt, UnsignedAbs}; + +use crate::{IsizePromotion, UsizePromotion}; + +use core::ops::{Div, DivAssign, Rem, RemAssign}; +use num_integer::Integer; +use num_traits::{CheckedDiv, ToPrimitive, Zero}; + +forward_all_binop_to_ref_ref!(impl Div for BigInt, div); + +impl<'a, 'b> Div<&'b BigInt> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: &BigInt) -> BigInt { + let (q, _) = self.div_rem(other); + q + } +} + +impl<'a> DivAssign<&'a BigInt> for BigInt { + #[inline] + fn div_assign(&mut self, other: &BigInt) { + *self = &*self / other; + } +} +forward_val_assign!(impl DivAssign for BigInt, div_assign); + +promote_all_scalars!(impl Div for BigInt, div); +promote_all_scalars_assign!(impl DivAssign for BigInt, div_assign); +forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); +forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); +forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); + +impl Div for BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: u32) -> BigInt { + BigInt::from_biguint(self.sign, self.data / other) + } +} + +impl DivAssign for BigInt { + #[inline] + fn div_assign(&mut self, other: u32) { + self.data /= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Div for u32 { + type Output = BigInt; + + #[inline] + fn div(self, other: BigInt) -> BigInt { + BigInt::from_biguint(other.sign, self / other.data) + } +} + +impl Div for BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: u64) -> BigInt { + BigInt::from_biguint(self.sign, self.data / other) + } +} + +impl DivAssign for BigInt { + #[inline] + fn div_assign(&mut self, other: u64) { + self.data /= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Div for u64 { + type Output = BigInt; + + #[inline] + fn div(self, other: BigInt) -> BigInt { + BigInt::from_biguint(other.sign, self / other.data) + } +} + +impl Div for BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: u128) -> BigInt { + BigInt::from_biguint(self.sign, self.data / other) + } +} + +impl DivAssign for BigInt { + #[inline] + fn div_assign(&mut self, other: u128) { + self.data /= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Div for u128 { + type Output = BigInt; + + #[inline] + fn div(self, other: BigInt) -> BigInt { + BigInt::from_biguint(other.sign, self / other.data) + } +} + +forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); +forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); +forward_all_scalar_binop_to_val_val!(impl Div for BigInt, div); + +impl Div for BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: i32) -> BigInt { + match other.checked_uabs() { + Positive(u) => self / u, + Negative(u) => -self / u, + } + } +} + +impl DivAssign for BigInt { + #[inline] + fn div_assign(&mut self, other: i32) { + match other.checked_uabs() { + Positive(u) => *self /= u, + Negative(u) => { + self.sign = -self.sign; + *self /= u; + } + } + } +} + +impl Div for i32 { + type Output = BigInt; + + #[inline] + fn div(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u / other, + Negative(u) => u / -other, + } + } +} + +impl Div for BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: i64) -> BigInt { + match other.checked_uabs() { + Positive(u) => self / u, + Negative(u) => -self / u, + } + } +} + +impl DivAssign for BigInt { + #[inline] + fn div_assign(&mut self, other: i64) { + match other.checked_uabs() { + Positive(u) => *self /= u, + Negative(u) => { + self.sign = -self.sign; + *self /= u; + } + } + } +} + +impl Div for i64 { + type Output = BigInt; + + #[inline] + fn div(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u / other, + Negative(u) => u / -other, + } + } +} + +impl Div for BigInt { + type Output = BigInt; + + #[inline] + fn div(self, other: i128) -> BigInt { + match other.checked_uabs() { + Positive(u) => self / u, + Negative(u) => -self / u, + } + } +} + +impl DivAssign for BigInt { + #[inline] + fn div_assign(&mut self, other: i128) { + match other.checked_uabs() { + Positive(u) => *self /= u, + Negative(u) => { + self.sign = -self.sign; + *self /= u; + } + } + } +} + +impl Div for i128 { + type Output = BigInt; + + #[inline] + fn div(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u / other, + Negative(u) => u / -other, + } + } +} + +forward_all_binop_to_ref_ref!(impl Rem for BigInt, rem); + +impl<'a, 'b> Rem<&'b BigInt> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: &BigInt) -> BigInt { + if let Some(other) = other.to_u32() { + self % other + } else if let Some(other) = other.to_i32() { + self % other + } else { + let (_, r) = self.div_rem(other); + r + } + } +} + +impl<'a> RemAssign<&'a BigInt> for BigInt { + #[inline] + fn rem_assign(&mut self, other: &BigInt) { + *self = &*self % other; + } +} +forward_val_assign!(impl RemAssign for BigInt, rem_assign); + +promote_all_scalars!(impl Rem for BigInt, rem); +promote_all_scalars_assign!(impl RemAssign for BigInt, rem_assign); +forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); +forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); +forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); + +impl Rem for BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: u32) -> BigInt { + BigInt::from_biguint(self.sign, self.data % other) + } +} + +impl RemAssign for BigInt { + #[inline] + fn rem_assign(&mut self, other: u32) { + self.data %= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Rem for u32 { + type Output = BigInt; + + #[inline] + fn rem(self, other: BigInt) -> BigInt { + BigInt::from(self % other.data) + } +} + +impl Rem for BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: u64) -> BigInt { + BigInt::from_biguint(self.sign, self.data % other) + } +} + +impl RemAssign for BigInt { + #[inline] + fn rem_assign(&mut self, other: u64) { + self.data %= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Rem for u64 { + type Output = BigInt; + + #[inline] + fn rem(self, other: BigInt) -> BigInt { + BigInt::from(self % other.data) + } +} + +impl Rem for BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: u128) -> BigInt { + BigInt::from_biguint(self.sign, self.data % other) + } +} + +impl RemAssign for BigInt { + #[inline] + fn rem_assign(&mut self, other: u128) { + self.data %= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Rem for u128 { + type Output = BigInt; + + #[inline] + fn rem(self, other: BigInt) -> BigInt { + BigInt::from(self % other.data) + } +} + +forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); +forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); +forward_all_scalar_binop_to_val_val!(impl Rem for BigInt, rem); + +impl Rem for BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: i32) -> BigInt { + self % other.uabs() + } +} + +impl RemAssign for BigInt { + #[inline] + fn rem_assign(&mut self, other: i32) { + *self %= other.uabs(); + } +} + +impl Rem for i32 { + type Output = BigInt; + + #[inline] + fn rem(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u % other, + Negative(u) => -(u % other), + } + } +} + +impl Rem for BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: i64) -> BigInt { + self % other.uabs() + } +} + +impl RemAssign for BigInt { + #[inline] + fn rem_assign(&mut self, other: i64) { + *self %= other.uabs(); + } +} + +impl Rem for i64 { + type Output = BigInt; + + #[inline] + fn rem(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u % other, + Negative(u) => -(u % other), + } + } +} + +impl Rem for BigInt { + type Output = BigInt; + + #[inline] + fn rem(self, other: i128) -> BigInt { + self % other.uabs() + } +} + +impl RemAssign for BigInt { + #[inline] + fn rem_assign(&mut self, other: i128) { + *self %= other.uabs(); + } +} + +impl Rem for i128 { + type Output = BigInt; + + #[inline] + fn rem(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u % other, + Negative(u) => -(u % other), + } + } +} + +impl CheckedDiv for BigInt { + #[inline] + fn checked_div(&self, v: &BigInt) -> Option { + if v.is_zero() { + return None; + } + Some(self.div(v)) + } +} diff --git a/src/rust/vendor/num-bigint/src/bigint/multiplication.rs b/src/rust/vendor/num-bigint/src/bigint/multiplication.rs new file mode 100644 index 000000000..a2d970819 --- /dev/null +++ b/src/rust/vendor/num-bigint/src/bigint/multiplication.rs @@ -0,0 +1,217 @@ +use super::CheckedUnsignedAbs::{Negative, Positive}; +use super::Sign::{self, Minus, NoSign, Plus}; +use super::{BigInt, UnsignedAbs}; + +use crate::{IsizePromotion, UsizePromotion}; + +use core::iter::Product; +use core::ops::{Mul, MulAssign}; +use num_traits::{CheckedMul, One, Zero}; + +impl Mul for Sign { + type Output = Sign; + + #[inline] + fn mul(self, other: Sign) -> Sign { + match (self, other) { + (NoSign, _) | (_, NoSign) => NoSign, + (Plus, Plus) | (Minus, Minus) => Plus, + (Plus, Minus) | (Minus, Plus) => Minus, + } + } +} + +macro_rules! impl_mul { + ($(impl<$($a:lifetime),*> Mul<$Other:ty> for $Self:ty;)*) => {$( + impl<$($a),*> Mul<$Other> for $Self { + type Output = BigInt; + + #[inline] + fn mul(self, other: $Other) -> BigInt { + // automatically match value/ref + let BigInt { data: x, .. } = self; + let BigInt { data: y, .. } = other; + BigInt::from_biguint(self.sign * other.sign, x * y) + } + } + )*} +} +impl_mul! { + impl<> Mul for BigInt; + impl<'b> Mul<&'b BigInt> for BigInt; + impl<'a> Mul for &'a BigInt; + impl<'a, 'b> Mul<&'b BigInt> for &'a BigInt; +} + +macro_rules! impl_mul_assign { + ($(impl<$($a:lifetime),*> MulAssign<$Other:ty> for BigInt;)*) => {$( + impl<$($a),*> MulAssign<$Other> for BigInt { + #[inline] + fn mul_assign(&mut self, other: $Other) { + // automatically match value/ref + let BigInt { data: y, .. } = other; + self.data *= y; + if self.data.is_zero() { + self.sign = NoSign; + } else { + self.sign = self.sign * other.sign; + } + } + } + )*} +} +impl_mul_assign! { + impl<> MulAssign for BigInt; + impl<'a> MulAssign<&'a BigInt> for BigInt; +} + +promote_all_scalars!(impl Mul for BigInt, mul); +promote_all_scalars_assign!(impl MulAssign for BigInt, mul_assign); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); + +impl Mul for BigInt { + type Output = BigInt; + + #[inline] + fn mul(self, other: u32) -> BigInt { + BigInt::from_biguint(self.sign, self.data * other) + } +} + +impl MulAssign for BigInt { + #[inline] + fn mul_assign(&mut self, other: u32) { + self.data *= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Mul for BigInt { + type Output = BigInt; + + #[inline] + fn mul(self, other: u64) -> BigInt { + BigInt::from_biguint(self.sign, self.data * other) + } +} + +impl MulAssign for BigInt { + #[inline] + fn mul_assign(&mut self, other: u64) { + self.data *= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +impl Mul for BigInt { + type Output = BigInt; + + #[inline] + fn mul(self, other: u128) -> BigInt { + BigInt::from_biguint(self.sign, self.data * other) + } +} + +impl MulAssign for BigInt { + #[inline] + fn mul_assign(&mut self, other: u128) { + self.data *= other; + if self.data.is_zero() { + self.sign = NoSign; + } + } +} + +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigInt, mul); + +impl Mul for BigInt { + type Output = BigInt; + + #[inline] + fn mul(self, other: i32) -> BigInt { + match other.checked_uabs() { + Positive(u) => self * u, + Negative(u) => -self * u, + } + } +} + +impl MulAssign for BigInt { + #[inline] + fn mul_assign(&mut self, other: i32) { + match other.checked_uabs() { + Positive(u) => *self *= u, + Negative(u) => { + self.sign = -self.sign; + self.data *= u; + } + } + } +} + +impl Mul for BigInt { + type Output = BigInt; + + #[inline] + fn mul(self, other: i64) -> BigInt { + match other.checked_uabs() { + Positive(u) => self * u, + Negative(u) => -self * u, + } + } +} + +impl MulAssign for BigInt { + #[inline] + fn mul_assign(&mut self, other: i64) { + match other.checked_uabs() { + Positive(u) => *self *= u, + Negative(u) => { + self.sign = -self.sign; + self.data *= u; + } + } + } +} + +impl Mul for BigInt { + type Output = BigInt; + + #[inline] + fn mul(self, other: i128) -> BigInt { + match other.checked_uabs() { + Positive(u) => self * u, + Negative(u) => -self * u, + } + } +} + +impl MulAssign for BigInt { + #[inline] + fn mul_assign(&mut self, other: i128) { + match other.checked_uabs() { + Positive(u) => *self *= u, + Negative(u) => { + self.sign = -self.sign; + self.data *= u; + } + } + } +} + +impl CheckedMul for BigInt { + #[inline] + fn checked_mul(&self, v: &BigInt) -> Option { + Some(self.mul(v)) + } +} + +impl_product_iter_type!(BigInt); diff --git a/src/rust/vendor/num-bigint/src/bigint/power.rs b/src/rust/vendor/num-bigint/src/bigint/power.rs new file mode 100644 index 000000000..a4dd8063d --- /dev/null +++ b/src/rust/vendor/num-bigint/src/bigint/power.rs @@ -0,0 +1,94 @@ +use super::BigInt; +use super::Sign::{self, Minus, Plus}; + +use crate::BigUint; + +use num_integer::Integer; +use num_traits::{Pow, Signed, Zero}; + +/// Help function for pow +/// +/// Computes the effect of the exponent on the sign. +#[inline] +fn powsign(sign: Sign, other: &T) -> Sign { + if other.is_zero() { + Plus + } else if sign != Minus || other.is_odd() { + sign + } else { + -sign + } +} + +macro_rules! pow_impl { + ($T:ty) => { + impl Pow<$T> for BigInt { + type Output = BigInt; + + #[inline] + fn pow(self, rhs: $T) -> BigInt { + BigInt::from_biguint(powsign(self.sign, &rhs), self.data.pow(rhs)) + } + } + + impl<'b> Pow<&'b $T> for BigInt { + type Output = BigInt; + + #[inline] + fn pow(self, rhs: &$T) -> BigInt { + BigInt::from_biguint(powsign(self.sign, rhs), self.data.pow(rhs)) + } + } + + impl<'a> Pow<$T> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn pow(self, rhs: $T) -> BigInt { + BigInt::from_biguint(powsign(self.sign, &rhs), Pow::pow(&self.data, rhs)) + } + } + + impl<'a, 'b> Pow<&'b $T> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn pow(self, rhs: &$T) -> BigInt { + BigInt::from_biguint(powsign(self.sign, rhs), Pow::pow(&self.data, rhs)) + } + } + }; +} + +pow_impl!(u8); +pow_impl!(u16); +pow_impl!(u32); +pow_impl!(u64); +pow_impl!(usize); +pow_impl!(u128); +pow_impl!(BigUint); + +pub(super) fn modpow(x: &BigInt, exponent: &BigInt, modulus: &BigInt) -> BigInt { + assert!( + !exponent.is_negative(), + "negative exponentiation is not supported!" + ); + assert!( + !modulus.is_zero(), + "attempt to calculate with zero modulus!" + ); + + let result = x.data.modpow(&exponent.data, &modulus.data); + if result.is_zero() { + return BigInt::zero(); + } + + // The sign of the result follows the modulus, like `mod_floor`. + let (sign, mag) = match (x.is_negative() && exponent.is_odd(), modulus.is_negative()) { + (false, false) => (Plus, result), + (true, false) => (Plus, &modulus.data - result), + (false, true) => (Minus, &modulus.data - result), + (true, true) => (Minus, result), + }; + BigInt::from_biguint(sign, mag) +} diff --git a/src/rust/vendor/num-bigint/src/bigint/serde.rs b/src/rust/vendor/num-bigint/src/bigint/serde.rs new file mode 100644 index 000000000..5c232f943 --- /dev/null +++ b/src/rust/vendor/num-bigint/src/bigint/serde.rs @@ -0,0 +1,58 @@ +use super::{BigInt, Sign}; + +use serde::de::{Error, Unexpected}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +impl Serialize for Sign { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // Note: do not change the serialization format, or it may break + // forward and backward compatibility of serialized data! + match *self { + Sign::Minus => (-1i8).serialize(serializer), + Sign::NoSign => 0i8.serialize(serializer), + Sign::Plus => 1i8.serialize(serializer), + } + } +} + +impl<'de> Deserialize<'de> for Sign { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let sign = i8::deserialize(deserializer)?; + match sign { + -1 => Ok(Sign::Minus), + 0 => Ok(Sign::NoSign), + 1 => Ok(Sign::Plus), + _ => Err(D::Error::invalid_value( + Unexpected::Signed(sign.into()), + &"a sign of -1, 0, or 1", + )), + } + } +} + +impl Serialize for BigInt { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // Note: do not change the serialization format, or it may break + // forward and backward compatibility of serialized data! + (self.sign, &self.data).serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for BigInt { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let (sign, data) = Deserialize::deserialize(deserializer)?; + Ok(BigInt::from_biguint(sign, data)) + } +} diff --git a/src/rust/vendor/num-bigint/src/bigint/shift.rs b/src/rust/vendor/num-bigint/src/bigint/shift.rs new file mode 100644 index 000000000..b816e1265 --- /dev/null +++ b/src/rust/vendor/num-bigint/src/bigint/shift.rs @@ -0,0 +1,107 @@ +use super::BigInt; +use super::Sign::NoSign; + +use core::ops::{Shl, ShlAssign, Shr, ShrAssign}; +use num_traits::{PrimInt, Signed, Zero}; + +macro_rules! impl_shift { + (@ref $Shx:ident :: $shx:ident, $ShxAssign:ident :: $shx_assign:ident, $rhs:ty) => { + impl<'b> $Shx<&'b $rhs> for BigInt { + type Output = BigInt; + + #[inline] + fn $shx(self, rhs: &'b $rhs) -> BigInt { + $Shx::$shx(self, *rhs) + } + } + impl<'a, 'b> $Shx<&'b $rhs> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn $shx(self, rhs: &'b $rhs) -> BigInt { + $Shx::$shx(self, *rhs) + } + } + impl<'b> $ShxAssign<&'b $rhs> for BigInt { + #[inline] + fn $shx_assign(&mut self, rhs: &'b $rhs) { + $ShxAssign::$shx_assign(self, *rhs); + } + } + }; + ($($rhs:ty),+) => {$( + impl Shl<$rhs> for BigInt { + type Output = BigInt; + + #[inline] + fn shl(self, rhs: $rhs) -> BigInt { + BigInt::from_biguint(self.sign, self.data << rhs) + } + } + impl<'a> Shl<$rhs> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn shl(self, rhs: $rhs) -> BigInt { + BigInt::from_biguint(self.sign, &self.data << rhs) + } + } + impl ShlAssign<$rhs> for BigInt { + #[inline] + fn shl_assign(&mut self, rhs: $rhs) { + self.data <<= rhs + } + } + impl_shift! { @ref Shl::shl, ShlAssign::shl_assign, $rhs } + + impl Shr<$rhs> for BigInt { + type Output = BigInt; + + #[inline] + fn shr(self, rhs: $rhs) -> BigInt { + let round_down = shr_round_down(&self, rhs); + let data = self.data >> rhs; + let data = if round_down { data + 1u8 } else { data }; + BigInt::from_biguint(self.sign, data) + } + } + impl<'a> Shr<$rhs> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn shr(self, rhs: $rhs) -> BigInt { + let round_down = shr_round_down(self, rhs); + let data = &self.data >> rhs; + let data = if round_down { data + 1u8 } else { data }; + BigInt::from_biguint(self.sign, data) + } + } + impl ShrAssign<$rhs> for BigInt { + #[inline] + fn shr_assign(&mut self, rhs: $rhs) { + let round_down = shr_round_down(self, rhs); + self.data >>= rhs; + if round_down { + self.data += 1u8; + } else if self.data.is_zero() { + self.sign = NoSign; + } + } + } + impl_shift! { @ref Shr::shr, ShrAssign::shr_assign, $rhs } + )*}; +} + +impl_shift! { u8, u16, u32, u64, u128, usize } +impl_shift! { i8, i16, i32, i64, i128, isize } + +// Negative values need a rounding adjustment if there are any ones in the +// bits that are getting shifted out. +fn shr_round_down(i: &BigInt, shift: T) -> bool { + if i.is_negative() { + let zeros = i.trailing_zeros().expect("negative values are non-zero"); + shift > T::zero() && shift.to_u64().map(|shift| zeros < shift).unwrap_or(true) + } else { + false + } +} diff --git a/src/rust/vendor/num-bigint/src/bigint/subtraction.rs b/src/rust/vendor/num-bigint/src/bigint/subtraction.rs new file mode 100644 index 000000000..a12a844ae --- /dev/null +++ b/src/rust/vendor/num-bigint/src/bigint/subtraction.rs @@ -0,0 +1,300 @@ +use super::CheckedUnsignedAbs::{Negative, Positive}; +use super::Sign::{Minus, NoSign, Plus}; +use super::{BigInt, UnsignedAbs}; + +use crate::{IsizePromotion, UsizePromotion}; + +use core::cmp::Ordering::{Equal, Greater, Less}; +use core::mem; +use core::ops::{Sub, SubAssign}; +use num_traits::{CheckedSub, Zero}; + +// We want to forward to BigUint::sub, but it's not clear how that will go until +// we compare both sign and magnitude. So we duplicate this body for every +// val/ref combination, deferring that decision to BigUint's own forwarding. +macro_rules! bigint_sub { + ($a:expr, $a_owned:expr, $a_data:expr, $b:expr, $b_owned:expr, $b_data:expr) => { + match ($a.sign, $b.sign) { + (_, NoSign) => $a_owned, + (NoSign, _) => -$b_owned, + // opposite signs => keep the sign of the left with the sum of magnitudes + (Plus, Minus) | (Minus, Plus) => BigInt::from_biguint($a.sign, $a_data + $b_data), + // same sign => keep or toggle the sign of the left with the difference of magnitudes + (Plus, Plus) | (Minus, Minus) => match $a.data.cmp(&$b.data) { + Less => BigInt::from_biguint(-$a.sign, $b_data - $a_data), + Greater => BigInt::from_biguint($a.sign, $a_data - $b_data), + Equal => Zero::zero(), + }, + } + }; +} + +impl<'a, 'b> Sub<&'b BigInt> for &'a BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: &BigInt) -> BigInt { + bigint_sub!( + self, + self.clone(), + &self.data, + other, + other.clone(), + &other.data + ) + } +} + +impl<'a> Sub for &'a BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + bigint_sub!(self, self.clone(), &self.data, other, other, other.data) + } +} + +impl<'a> Sub<&'a BigInt> for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: &BigInt) -> BigInt { + bigint_sub!(self, self, self.data, other, other.clone(), &other.data) + } +} + +impl Sub for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + bigint_sub!(self, self, self.data, other, other, other.data) + } +} + +impl<'a> SubAssign<&'a BigInt> for BigInt { + #[inline] + fn sub_assign(&mut self, other: &BigInt) { + let n = mem::replace(self, BigInt::zero()); + *self = n - other; + } +} +forward_val_assign!(impl SubAssign for BigInt, sub_assign); + +promote_all_scalars!(impl Sub for BigInt, sub); +promote_all_scalars_assign!(impl SubAssign for BigInt, sub_assign); +forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); +forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); +forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); + +impl Sub for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: u32) -> BigInt { + match self.sign { + NoSign => -BigInt::from(other), + Minus => -BigInt::from(self.data + other), + Plus => match self.data.cmp(&From::from(other)) { + Equal => Zero::zero(), + Greater => BigInt::from(self.data - other), + Less => -BigInt::from(other - self.data), + }, + } + } +} +impl SubAssign for BigInt { + #[inline] + fn sub_assign(&mut self, other: u32) { + let n = mem::replace(self, BigInt::zero()); + *self = n - other; + } +} + +impl Sub for u32 { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + -(other - self) + } +} + +impl Sub for u64 { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + -(other - self) + } +} + +impl Sub for u128 { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + -(other - self) + } +} + +impl Sub for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: u64) -> BigInt { + match self.sign { + NoSign => -BigInt::from(other), + Minus => -BigInt::from(self.data + other), + Plus => match self.data.cmp(&From::from(other)) { + Equal => Zero::zero(), + Greater => BigInt::from(self.data - other), + Less => -BigInt::from(other - self.data), + }, + } + } +} + +impl SubAssign for BigInt { + #[inline] + fn sub_assign(&mut self, other: u64) { + let n = mem::replace(self, BigInt::zero()); + *self = n - other; + } +} + +impl Sub for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: u128) -> BigInt { + match self.sign { + NoSign => -BigInt::from(other), + Minus => -BigInt::from(self.data + other), + Plus => match self.data.cmp(&From::from(other)) { + Equal => Zero::zero(), + Greater => BigInt::from(self.data - other), + Less => -BigInt::from(other - self.data), + }, + } + } +} + +impl SubAssign for BigInt { + #[inline] + fn sub_assign(&mut self, other: u128) { + let n = mem::replace(self, BigInt::zero()); + *self = n - other; + } +} + +forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); +forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); +forward_all_scalar_binop_to_val_val!(impl Sub for BigInt, sub); + +impl Sub for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: i32) -> BigInt { + match other.checked_uabs() { + Positive(u) => self - u, + Negative(u) => self + u, + } + } +} +impl SubAssign for BigInt { + #[inline] + fn sub_assign(&mut self, other: i32) { + match other.checked_uabs() { + Positive(u) => *self -= u, + Negative(u) => *self += u, + } + } +} + +impl Sub for i32 { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u - other, + Negative(u) => -other - u, + } + } +} + +impl Sub for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: i64) -> BigInt { + match other.checked_uabs() { + Positive(u) => self - u, + Negative(u) => self + u, + } + } +} +impl SubAssign for BigInt { + #[inline] + fn sub_assign(&mut self, other: i64) { + match other.checked_uabs() { + Positive(u) => *self -= u, + Negative(u) => *self += u, + } + } +} + +impl Sub for i64 { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u - other, + Negative(u) => -other - u, + } + } +} + +impl Sub for BigInt { + type Output = BigInt; + + #[inline] + fn sub(self, other: i128) -> BigInt { + match other.checked_uabs() { + Positive(u) => self - u, + Negative(u) => self + u, + } + } +} + +impl SubAssign for BigInt { + #[inline] + fn sub_assign(&mut self, other: i128) { + match other.checked_uabs() { + Positive(u) => *self -= u, + Negative(u) => *self += u, + } + } +} + +impl Sub for i128 { + type Output = BigInt; + + #[inline] + fn sub(self, other: BigInt) -> BigInt { + match self.checked_uabs() { + Positive(u) => u - other, + Negative(u) => -other - u, + } + } +} + +impl CheckedSub for BigInt { + #[inline] + fn checked_sub(&self, v: &BigInt) -> Option { + Some(self.sub(v)) + } +} diff --git a/src/rust/vendor/num-bigint/src/bigrand.rs b/src/rust/vendor/num-bigint/src/bigrand.rs index cb4403277..8f0ce5b32 100644 --- a/src/rust/vendor/num-bigint/src/bigrand.rs +++ b/src/rust/vendor/num-bigint/src/bigrand.rs @@ -66,7 +66,7 @@ impl RandBigInt for R { let len = (digits + (rem > 0) as u64) .to_usize() .expect("capacity overflow"); - let native_digits = bit_size.div_ceil(&64); + let native_digits = Integer::div_ceil(&bit_size, &64); let native_len = native_digits.to_usize().expect("capacity overflow"); let mut data = vec![0u64; native_len]; unsafe { diff --git a/src/rust/vendor/num-bigint/src/biguint.rs b/src/rust/vendor/num-bigint/src/biguint.rs index c4a63f408..623823c8f 100644 --- a/src/rust/vendor/num-bigint/src/biguint.rs +++ b/src/rust/vendor/num-bigint/src/biguint.rs @@ -1,52 +1,40 @@ -#[cfg(any(feature = "quickcheck", feature = "arbitrary"))] -use crate::std_alloc::Box; -use crate::std_alloc::{Cow, String, Vec}; +use crate::big_digit::{self, BigDigit}; +use crate::std_alloc::{String, Vec}; + use core::cmp; -use core::cmp::Ordering::{self, Equal, Greater, Less}; -#[cfg(has_try_from)] -use core::convert::TryFrom; +use core::cmp::Ordering; use core::default::Default; use core::fmt; use core::hash; -use core::iter::{Product, Sum}; use core::mem; -use core::ops::{ - Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Div, DivAssign, - Mul, MulAssign, Rem, RemAssign, Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign, -}; -use core::str::{self, FromStr}; -use core::{f32, f64}; +use core::str; use core::{u32, u64, u8}; use num_integer::{Integer, Roots}; -use num_traits::float::FloatCore; -use num_traits::{ - CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, FromPrimitive, Num, One, Pow, ToPrimitive, - Unsigned, Zero, -}; +use num_traits::{Num, One, Pow, ToPrimitive, Unsigned, Zero}; -use crate::big_digit::{self, BigDigit}; +mod addition; +mod division; +mod multiplication; +mod subtraction; -#[path = "algorithms.rs"] -mod algorithms; -#[path = "monty.rs"] +mod bits; +mod convert; +mod iter; mod monty; +mod power; +mod shift; -use self::algorithms::{__add2, __sub2rev, add2, sub2, sub2rev}; -use self::algorithms::{biguint_shl, biguint_shr}; -use self::algorithms::{cmp_slice, fls, ilog2}; -use self::algorithms::{div_rem, div_rem_digit, div_rem_ref, rem_digit}; -use self::algorithms::{mac_with_carry, mul3, scalar_mul}; -use self::monty::monty_modpow; +#[cfg(any(feature = "quickcheck", feature = "arbitrary"))] +mod arbitrary; -use crate::UsizePromotion; +#[cfg(feature = "serde")] +mod serde; -use crate::ParseBigIntError; -#[cfg(has_try_from)] -use crate::TryFromBigIntError; +pub(crate) use self::convert::to_str_radix_reversed; +pub use self::iter::{U32Digits, U64Digits}; /// A big unsigned integer type. -#[derive(Debug)] pub struct BigUint { data: Vec, } @@ -67,35 +55,6 @@ impl Clone for BigUint { } } -#[cfg(feature = "quickcheck")] -impl quickcheck::Arbitrary for BigUint { - fn arbitrary(g: &mut G) -> Self { - // Use arbitrary from Vec - biguint_from_vec(Vec::::arbitrary(g)) - } - - fn shrink(&self) -> Box> { - // Use shrinker from Vec - Box::new(self.data.shrink().map(biguint_from_vec)) - } -} - -#[cfg(feature = "arbitrary")] -mod abitrary_impl { - use super::*; - use arbitrary::{Arbitrary, Result, Unstructured}; - - impl Arbitrary for BigUint { - fn arbitrary(u: &mut Unstructured<'_>) -> Result { - Ok(biguint_from_vec(Vec::::arbitrary(u)?)) - } - - fn shrink(&self) -> Box> { - Box::new(self.data.shrink().map(biguint_from_vec)) - } - } -} - impl hash::Hash for BigUint { #[inline] fn hash(&self, state: &mut H) { @@ -128,6 +87,17 @@ impl Ord for BigUint { } } +#[inline] +fn cmp_slice(a: &[BigDigit], b: &[BigDigit]) -> Ordering { + debug_assert!(a.last() != Some(&0)); + debug_assert!(b.last() != Some(&0)); + + match Ord::cmp(&a.len(), &b.len()) { + Ordering::Equal => Iterator::cmp(a.iter().rev(), b.iter().rev()), + other => other, + } +} + impl Default for BigUint { #[inline] fn default() -> BigUint { @@ -135,6 +105,12 @@ impl Default for BigUint { } } +impl fmt::Debug for BigUint { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + impl fmt::Display for BigUint { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad_integral(true, "", &self.to_str_radix(10)) @@ -167,350 +143,6 @@ impl fmt::Octal for BigUint { } } -impl FromStr for BigUint { - type Err = ParseBigIntError; - - #[inline] - fn from_str(s: &str) -> Result { - BigUint::from_str_radix(s, 10) - } -} - -// Convert from a power of two radix (bits == ilog2(radix)) where bits evenly divides -// BigDigit::BITS -fn from_bitwise_digits_le(v: &[u8], bits: u8) -> BigUint { - debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits == 0); - debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits))); - - let digits_per_big_digit = big_digit::BITS / bits; - - let data = v - .chunks(digits_per_big_digit.into()) - .map(|chunk| { - chunk - .iter() - .rev() - .fold(0, |acc, &c| (acc << bits) | BigDigit::from(c)) - }) - .collect(); - - biguint_from_vec(data) -} - -// Convert from a power of two radix (bits == ilog2(radix)) where bits doesn't evenly divide -// BigDigit::BITS -fn from_inexact_bitwise_digits_le(v: &[u8], bits: u8) -> BigUint { - debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits != 0); - debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits))); - - let big_digits = (v.len() as u64) - .saturating_mul(bits.into()) - .div_ceil(&big_digit::BITS.into()) - .to_usize() - .unwrap_or(core::usize::MAX); - let mut data = Vec::with_capacity(big_digits); - - let mut d = 0; - let mut dbits = 0; // number of bits we currently have in d - - // walk v accumululating bits in d; whenever we accumulate big_digit::BITS in d, spit out a - // big_digit: - for &c in v { - d |= BigDigit::from(c) << dbits; - dbits += bits; - - if dbits >= big_digit::BITS { - data.push(d); - dbits -= big_digit::BITS; - // if dbits was > big_digit::BITS, we dropped some of the bits in c (they couldn't fit - // in d) - grab the bits we lost here: - d = BigDigit::from(c) >> (bits - dbits); - } - } - - if dbits > 0 { - debug_assert!(dbits < big_digit::BITS); - data.push(d as BigDigit); - } - - biguint_from_vec(data) -} - -// Read little-endian radix digits -fn from_radix_digits_be(v: &[u8], radix: u32) -> BigUint { - debug_assert!(!v.is_empty() && !radix.is_power_of_two()); - debug_assert!(v.iter().all(|&c| u32::from(c) < radix)); - - #[cfg(feature = "std")] - let radix_log2 = f64::from(radix).log2(); - #[cfg(not(feature = "std"))] - let radix_log2 = ilog2(radix.next_power_of_two()) as f64; - - // Estimate how big the result will be, so we can pre-allocate it. - let bits = radix_log2 * v.len() as f64; - let big_digits = (bits / big_digit::BITS as f64).ceil(); - let mut data = Vec::with_capacity(big_digits.to_usize().unwrap_or(0)); - - let (base, power) = get_radix_base(radix, big_digit::BITS); - let radix = radix as BigDigit; - - let r = v.len() % power; - let i = if r == 0 { power } else { r }; - let (head, tail) = v.split_at(i); - - let first = head - .iter() - .fold(0, |acc, &d| acc * radix + BigDigit::from(d)); - data.push(first); - - debug_assert!(tail.len() % power == 0); - for chunk in tail.chunks(power) { - if data.last() != Some(&0) { - data.push(0); - } - - let mut carry = 0; - for d in data.iter_mut() { - *d = mac_with_carry(0, *d, base, &mut carry); - } - debug_assert!(carry == 0); - - let n = chunk - .iter() - .fold(0, |acc, &d| acc * radix + BigDigit::from(d)); - add2(&mut data, &[n]); - } - - biguint_from_vec(data) -} - -impl Num for BigUint { - type FromStrRadixErr = ParseBigIntError; - - /// Creates and initializes a `BigUint`. - fn from_str_radix(s: &str, radix: u32) -> Result { - assert!(2 <= radix && radix <= 36, "The radix must be within 2...36"); - let mut s = s; - if s.starts_with('+') { - let tail = &s[1..]; - if !tail.starts_with('+') { - s = tail - } - } - - if s.is_empty() { - return Err(ParseBigIntError::empty()); - } - - if s.starts_with('_') { - // Must lead with a real digit! - return Err(ParseBigIntError::invalid()); - } - - // First normalize all characters to plain digit values - let mut v = Vec::with_capacity(s.len()); - for b in s.bytes() { - let d = match b { - b'0'..=b'9' => b - b'0', - b'a'..=b'z' => b - b'a' + 10, - b'A'..=b'Z' => b - b'A' + 10, - b'_' => continue, - _ => u8::MAX, - }; - if d < radix as u8 { - v.push(d); - } else { - return Err(ParseBigIntError::invalid()); - } - } - - let res = if radix.is_power_of_two() { - // Powers of two can use bitwise masks and shifting instead of multiplication - let bits = ilog2(radix); - v.reverse(); - if big_digit::BITS % bits == 0 { - from_bitwise_digits_le(&v, bits) - } else { - from_inexact_bitwise_digits_le(&v, bits) - } - } else { - from_radix_digits_be(&v, radix) - }; - Ok(res) - } -} - -forward_val_val_binop!(impl BitAnd for BigUint, bitand); -forward_ref_val_binop!(impl BitAnd for BigUint, bitand); - -// do not use forward_ref_ref_binop_commutative! for bitand so that we can -// clone the smaller value rather than the larger, avoiding over-allocation -impl<'a, 'b> BitAnd<&'b BigUint> for &'a BigUint { - type Output = BigUint; - - #[inline] - fn bitand(self, other: &BigUint) -> BigUint { - // forward to val-ref, choosing the smaller to clone - if self.data.len() <= other.data.len() { - self.clone() & other - } else { - other.clone() & self - } - } -} - -forward_val_assign!(impl BitAndAssign for BigUint, bitand_assign); - -impl<'a> BitAnd<&'a BigUint> for BigUint { - type Output = BigUint; - - #[inline] - fn bitand(mut self, other: &BigUint) -> BigUint { - self &= other; - self - } -} -impl<'a> BitAndAssign<&'a BigUint> for BigUint { - #[inline] - fn bitand_assign(&mut self, other: &BigUint) { - for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) { - *ai &= bi; - } - self.data.truncate(other.data.len()); - self.normalize(); - } -} - -forward_all_binop_to_val_ref_commutative!(impl BitOr for BigUint, bitor); -forward_val_assign!(impl BitOrAssign for BigUint, bitor_assign); - -impl<'a> BitOr<&'a BigUint> for BigUint { - type Output = BigUint; - - fn bitor(mut self, other: &BigUint) -> BigUint { - self |= other; - self - } -} -impl<'a> BitOrAssign<&'a BigUint> for BigUint { - #[inline] - fn bitor_assign(&mut self, other: &BigUint) { - for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) { - *ai |= bi; - } - if other.data.len() > self.data.len() { - let extra = &other.data[self.data.len()..]; - self.data.extend(extra.iter().cloned()); - } - } -} - -forward_all_binop_to_val_ref_commutative!(impl BitXor for BigUint, bitxor); -forward_val_assign!(impl BitXorAssign for BigUint, bitxor_assign); - -impl<'a> BitXor<&'a BigUint> for BigUint { - type Output = BigUint; - - fn bitxor(mut self, other: &BigUint) -> BigUint { - self ^= other; - self - } -} -impl<'a> BitXorAssign<&'a BigUint> for BigUint { - #[inline] - fn bitxor_assign(&mut self, other: &BigUint) { - for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) { - *ai ^= bi; - } - if other.data.len() > self.data.len() { - let extra = &other.data[self.data.len()..]; - self.data.extend(extra.iter().cloned()); - } - self.normalize(); - } -} - -macro_rules! impl_shift { - (@ref $Shx:ident :: $shx:ident, $ShxAssign:ident :: $shx_assign:ident, $rhs:ty) => { - impl<'b> $Shx<&'b $rhs> for BigUint { - type Output = BigUint; - - #[inline] - fn $shx(self, rhs: &'b $rhs) -> BigUint { - $Shx::$shx(self, *rhs) - } - } - impl<'a, 'b> $Shx<&'b $rhs> for &'a BigUint { - type Output = BigUint; - - #[inline] - fn $shx(self, rhs: &'b $rhs) -> BigUint { - $Shx::$shx(self, *rhs) - } - } - impl<'b> $ShxAssign<&'b $rhs> for BigUint { - #[inline] - fn $shx_assign(&mut self, rhs: &'b $rhs) { - $ShxAssign::$shx_assign(self, *rhs); - } - } - }; - ($($rhs:ty),+) => {$( - impl Shl<$rhs> for BigUint { - type Output = BigUint; - - #[inline] - fn shl(self, rhs: $rhs) -> BigUint { - biguint_shl(Cow::Owned(self), rhs) - } - } - impl<'a> Shl<$rhs> for &'a BigUint { - type Output = BigUint; - - #[inline] - fn shl(self, rhs: $rhs) -> BigUint { - biguint_shl(Cow::Borrowed(self), rhs) - } - } - impl ShlAssign<$rhs> for BigUint { - #[inline] - fn shl_assign(&mut self, rhs: $rhs) { - let n = mem::replace(self, BigUint::zero()); - *self = n << rhs; - } - } - impl_shift! { @ref Shl::shl, ShlAssign::shl_assign, $rhs } - - impl Shr<$rhs> for BigUint { - type Output = BigUint; - - #[inline] - fn shr(self, rhs: $rhs) -> BigUint { - biguint_shr(Cow::Owned(self), rhs) - } - } - impl<'a> Shr<$rhs> for &'a BigUint { - type Output = BigUint; - - #[inline] - fn shr(self, rhs: $rhs) -> BigUint { - biguint_shr(Cow::Borrowed(self), rhs) - } - } - impl ShrAssign<$rhs> for BigUint { - #[inline] - fn shr_assign(&mut self, rhs: $rhs) { - let n = mem::replace(self, BigUint::zero()); - *self = n >> rhs; - } - } - impl_shift! { @ref Shr::shr, ShrAssign::shr_assign, $rhs } - )*}; -} - -impl_shift! { u8, u16, u32, u64, u128, usize } -impl_shift! { i8, i16, i32, i64, i128, isize } - impl Zero for BigUint { #[inline] fn zero() -> BigUint { @@ -541,987 +173,39 @@ impl One for BigUint { } #[inline] - fn is_one(&self) -> bool { - self.data[..] == [1] - } -} - -impl Unsigned for BigUint {} - -impl<'b> Pow<&'b BigUint> for BigUint { - type Output = BigUint; - - #[inline] - fn pow(self, exp: &BigUint) -> BigUint { - if self.is_one() || exp.is_zero() { - BigUint::one() - } else if self.is_zero() { - BigUint::zero() - } else if let Some(exp) = exp.to_u64() { - self.pow(exp) - } else if let Some(exp) = exp.to_u128() { - self.pow(exp) - } else { - // At this point, `self >= 2` and `exp >= 2¹²⁸`. The smallest possible result given - // `2.pow(2¹²⁸)` would require far more memory than 64-bit targets can address! - panic!("memory overflow") - } - } -} - -impl Pow for BigUint { - type Output = BigUint; - - #[inline] - fn pow(self, exp: BigUint) -> BigUint { - Pow::pow(self, &exp) - } -} - -impl<'a, 'b> Pow<&'b BigUint> for &'a BigUint { - type Output = BigUint; - - #[inline] - fn pow(self, exp: &BigUint) -> BigUint { - if self.is_one() || exp.is_zero() { - BigUint::one() - } else if self.is_zero() { - BigUint::zero() - } else { - self.clone().pow(exp) - } - } -} - -impl<'a> Pow for &'a BigUint { - type Output = BigUint; - - #[inline] - fn pow(self, exp: BigUint) -> BigUint { - Pow::pow(self, &exp) - } -} - -macro_rules! pow_impl { - ($T:ty) => { - impl Pow<$T> for BigUint { - type Output = BigUint; - - fn pow(self, mut exp: $T) -> BigUint { - if exp == 0 { - return BigUint::one(); - } - let mut base = self; - - while exp & 1 == 0 { - base = &base * &base; - exp >>= 1; - } - - if exp == 1 { - return base; - } - - let mut acc = base.clone(); - while exp > 1 { - exp >>= 1; - base = &base * &base; - if exp & 1 == 1 { - acc = &acc * &base; - } - } - acc - } - } - - impl<'b> Pow<&'b $T> for BigUint { - type Output = BigUint; - - #[inline] - fn pow(self, exp: &$T) -> BigUint { - Pow::pow(self, *exp) - } - } - - impl<'a> Pow<$T> for &'a BigUint { - type Output = BigUint; - - #[inline] - fn pow(self, exp: $T) -> BigUint { - if exp == 0 { - return BigUint::one(); - } - Pow::pow(self.clone(), exp) - } - } - - impl<'a, 'b> Pow<&'b $T> for &'a BigUint { - type Output = BigUint; - - #[inline] - fn pow(self, exp: &$T) -> BigUint { - Pow::pow(self, *exp) - } - } - }; -} - -pow_impl!(u8); -pow_impl!(u16); -pow_impl!(u32); -pow_impl!(u64); -pow_impl!(usize); -pow_impl!(u128); - -forward_all_binop_to_val_ref_commutative!(impl Add for BigUint, add); -forward_val_assign!(impl AddAssign for BigUint, add_assign); - -impl<'a> Add<&'a BigUint> for BigUint { - type Output = BigUint; - - fn add(mut self, other: &BigUint) -> BigUint { - self += other; - self - } -} -impl<'a> AddAssign<&'a BigUint> for BigUint { - #[inline] - fn add_assign(&mut self, other: &BigUint) { - let self_len = self.data.len(); - let carry = if self_len < other.data.len() { - let lo_carry = __add2(&mut self.data[..], &other.data[..self_len]); - self.data.extend_from_slice(&other.data[self_len..]); - __add2(&mut self.data[self_len..], &[lo_carry]) - } else { - __add2(&mut self.data[..], &other.data[..]) - }; - if carry != 0 { - self.data.push(carry); - } - } -} - -promote_unsigned_scalars!(impl Add for BigUint, add); -promote_unsigned_scalars_assign!(impl AddAssign for BigUint, add_assign); -forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigUint, add); -forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigUint, add); -forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigUint, add); - -impl Add for BigUint { - type Output = BigUint; - - #[inline] - fn add(mut self, other: u32) -> BigUint { - self += other; - self - } -} - -impl AddAssign for BigUint { - #[inline] - fn add_assign(&mut self, other: u32) { - if other != 0 { - if self.data.is_empty() { - self.data.push(0); - } - - let carry = __add2(&mut self.data, &[other as BigDigit]); - if carry != 0 { - self.data.push(carry); - } - } - } -} - -impl Add for BigUint { - type Output = BigUint; - - #[inline] - fn add(mut self, other: u64) -> BigUint { - self += other; - self - } -} - -impl AddAssign for BigUint { - #[cfg(not(u64_digit))] - #[inline] - fn add_assign(&mut self, other: u64) { - let (hi, lo) = big_digit::from_doublebigdigit(other); - if hi == 0 { - *self += lo; - } else { - while self.data.len() < 2 { - self.data.push(0); - } - - let carry = __add2(&mut self.data, &[lo, hi]); - if carry != 0 { - self.data.push(carry); - } - } - } - - #[cfg(u64_digit)] - #[inline] - fn add_assign(&mut self, other: u64) { - if other != 0 { - if self.data.is_empty() { - self.data.push(0); - } - - let carry = __add2(&mut self.data, &[other as BigDigit]); - if carry != 0 { - self.data.push(carry); - } - } - } -} - -impl Add for BigUint { - type Output = BigUint; - - #[inline] - fn add(mut self, other: u128) -> BigUint { - self += other; - self - } -} - -impl AddAssign for BigUint { - #[cfg(not(u64_digit))] - #[inline] - fn add_assign(&mut self, other: u128) { - if other <= u128::from(u64::max_value()) { - *self += other as u64 - } else { - let (a, b, c, d) = u32_from_u128(other); - let carry = if a > 0 { - while self.data.len() < 4 { - self.data.push(0); - } - __add2(&mut self.data, &[d, c, b, a]) - } else { - debug_assert!(b > 0); - while self.data.len() < 3 { - self.data.push(0); - } - __add2(&mut self.data, &[d, c, b]) - }; - - if carry != 0 { - self.data.push(carry); - } - } - } - - #[cfg(u64_digit)] - #[inline] - fn add_assign(&mut self, other: u128) { - let (hi, lo) = big_digit::from_doublebigdigit(other); - if hi == 0 { - *self += lo; - } else { - while self.data.len() < 2 { - self.data.push(0); - } - - let carry = __add2(&mut self.data, &[lo, hi]); - if carry != 0 { - self.data.push(carry); - } - } - } -} - -forward_val_val_binop!(impl Sub for BigUint, sub); -forward_ref_ref_binop!(impl Sub for BigUint, sub); -forward_val_assign!(impl SubAssign for BigUint, sub_assign); - -impl<'a> Sub<&'a BigUint> for BigUint { - type Output = BigUint; - - fn sub(mut self, other: &BigUint) -> BigUint { - self -= other; - self - } -} -impl<'a> SubAssign<&'a BigUint> for BigUint { - fn sub_assign(&mut self, other: &'a BigUint) { - sub2(&mut self.data[..], &other.data[..]); - self.normalize(); - } -} - -impl<'a> Sub for &'a BigUint { - type Output = BigUint; - - fn sub(self, mut other: BigUint) -> BigUint { - let other_len = other.data.len(); - if other_len < self.data.len() { - let lo_borrow = __sub2rev(&self.data[..other_len], &mut other.data); - other.data.extend_from_slice(&self.data[other_len..]); - if lo_borrow != 0 { - sub2(&mut other.data[other_len..], &[1]) - } - } else { - sub2rev(&self.data[..], &mut other.data[..]); - } - other.normalized() - } -} - -promote_unsigned_scalars!(impl Sub for BigUint, sub); -promote_unsigned_scalars_assign!(impl SubAssign for BigUint, sub_assign); -forward_all_scalar_binop_to_val_val!(impl Sub for BigUint, sub); -forward_all_scalar_binop_to_val_val!(impl Sub for BigUint, sub); -forward_all_scalar_binop_to_val_val!(impl Sub for BigUint, sub); - -impl Sub for BigUint { - type Output = BigUint; - - #[inline] - fn sub(mut self, other: u32) -> BigUint { - self -= other; - self - } -} - -impl SubAssign for BigUint { - fn sub_assign(&mut self, other: u32) { - sub2(&mut self.data[..], &[other as BigDigit]); - self.normalize(); - } -} - -impl Sub for u32 { - type Output = BigUint; - - #[cfg(not(u64_digit))] - #[inline] - fn sub(self, mut other: BigUint) -> BigUint { - if other.data.len() == 0 { - other.data.push(self); - } else { - sub2rev(&[self], &mut other.data[..]); - } - other.normalized() - } - - #[cfg(u64_digit)] - #[inline] - fn sub(self, mut other: BigUint) -> BigUint { - if other.data.is_empty() { - other.data.push(self as BigDigit); - } else { - sub2rev(&[self as BigDigit], &mut other.data[..]); - } - other.normalized() - } -} - -impl Sub for BigUint { - type Output = BigUint; - - #[inline] - fn sub(mut self, other: u64) -> BigUint { - self -= other; - self - } -} - -impl SubAssign for BigUint { - #[cfg(not(u64_digit))] - #[inline] - fn sub_assign(&mut self, other: u64) { - let (hi, lo) = big_digit::from_doublebigdigit(other); - sub2(&mut self.data[..], &[lo, hi]); - self.normalize(); - } - - #[cfg(u64_digit)] - #[inline] - fn sub_assign(&mut self, other: u64) { - sub2(&mut self.data[..], &[other as BigDigit]); - self.normalize(); - } -} - -impl Sub for u64 { - type Output = BigUint; - - #[cfg(not(u64_digit))] - #[inline] - fn sub(self, mut other: BigUint) -> BigUint { - while other.data.len() < 2 { - other.data.push(0); - } - - let (hi, lo) = big_digit::from_doublebigdigit(self); - sub2rev(&[lo, hi], &mut other.data[..]); - other.normalized() - } - - #[cfg(u64_digit)] - #[inline] - fn sub(self, mut other: BigUint) -> BigUint { - if other.data.is_empty() { - other.data.push(self); - } else { - sub2rev(&[self], &mut other.data[..]); - } - other.normalized() - } -} - -impl Sub for BigUint { - type Output = BigUint; - - #[inline] - fn sub(mut self, other: u128) -> BigUint { - self -= other; - self - } -} - -impl SubAssign for BigUint { - #[cfg(not(u64_digit))] - #[inline] - fn sub_assign(&mut self, other: u128) { - let (a, b, c, d) = u32_from_u128(other); - sub2(&mut self.data[..], &[d, c, b, a]); - self.normalize(); - } - - #[cfg(u64_digit)] - #[inline] - fn sub_assign(&mut self, other: u128) { - let (hi, lo) = big_digit::from_doublebigdigit(other); - sub2(&mut self.data[..], &[lo, hi]); - self.normalize(); - } -} - -impl Sub for u128 { - type Output = BigUint; - - #[cfg(not(u64_digit))] - #[inline] - fn sub(self, mut other: BigUint) -> BigUint { - while other.data.len() < 4 { - other.data.push(0); - } - - let (a, b, c, d) = u32_from_u128(self); - sub2rev(&[d, c, b, a], &mut other.data[..]); - other.normalized() - } - - #[cfg(u64_digit)] - #[inline] - fn sub(self, mut other: BigUint) -> BigUint { - while other.data.len() < 2 { - other.data.push(0); - } - - let (hi, lo) = big_digit::from_doublebigdigit(self); - sub2rev(&[lo, hi], &mut other.data[..]); - other.normalized() - } -} - -forward_all_binop_to_ref_ref!(impl Mul for BigUint, mul); -forward_val_assign!(impl MulAssign for BigUint, mul_assign); - -impl<'a, 'b> Mul<&'b BigUint> for &'a BigUint { - type Output = BigUint; - - #[inline] - fn mul(self, other: &BigUint) -> BigUint { - mul3(&self.data[..], &other.data[..]) - } -} -impl<'a> MulAssign<&'a BigUint> for BigUint { - #[inline] - fn mul_assign(&mut self, other: &'a BigUint) { - *self = &*self * other - } -} - -promote_unsigned_scalars!(impl Mul for BigUint, mul); -promote_unsigned_scalars_assign!(impl MulAssign for BigUint, mul_assign); -forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigUint, mul); -forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigUint, mul); -forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigUint, mul); - -impl Mul for BigUint { - type Output = BigUint; - - #[inline] - fn mul(mut self, other: u32) -> BigUint { - self *= other; - self - } -} -impl MulAssign for BigUint { - #[inline] - fn mul_assign(&mut self, other: u32) { - if other == 0 { - self.data.clear(); - } else { - let carry = scalar_mul(&mut self.data[..], other as BigDigit); - if carry != 0 { - self.data.push(carry); - } - } - } -} - -impl Mul for BigUint { - type Output = BigUint; - - #[inline] - fn mul(mut self, other: u64) -> BigUint { - self *= other; - self - } -} -impl MulAssign for BigUint { - #[cfg(not(u64_digit))] - #[inline] - fn mul_assign(&mut self, other: u64) { - if other == 0 { - self.data.clear(); - } else if other <= u64::from(BigDigit::max_value()) { - *self *= other as BigDigit - } else { - let (hi, lo) = big_digit::from_doublebigdigit(other); - *self = mul3(&self.data[..], &[lo, hi]) - } - } - - #[cfg(u64_digit)] - #[inline] - fn mul_assign(&mut self, other: u64) { - if other == 0 { - self.data.clear(); - } else { - let carry = scalar_mul(&mut self.data[..], other as BigDigit); - if carry != 0 { - self.data.push(carry); - } - } - } -} - -impl Mul for BigUint { - type Output = BigUint; - - #[inline] - fn mul(mut self, other: u128) -> BigUint { - self *= other; - self - } -} - -impl MulAssign for BigUint { - #[cfg(not(u64_digit))] - #[inline] - fn mul_assign(&mut self, other: u128) { - if other == 0 { - self.data.clear(); - } else if other <= u128::from(BigDigit::max_value()) { - *self *= other as BigDigit - } else { - let (a, b, c, d) = u32_from_u128(other); - *self = mul3(&self.data[..], &[d, c, b, a]) - } - } - - #[cfg(u64_digit)] - #[inline] - fn mul_assign(&mut self, other: u128) { - if other == 0 { - self.data.clear(); - } else if other <= BigDigit::max_value() as u128 { - *self *= other as BigDigit - } else { - let (hi, lo) = big_digit::from_doublebigdigit(other); - *self = mul3(&self.data[..], &[lo, hi]) - } - } -} - -forward_val_ref_binop!(impl Div for BigUint, div); -forward_ref_val_binop!(impl Div for BigUint, div); -forward_val_assign!(impl DivAssign for BigUint, div_assign); - -impl Div for BigUint { - type Output = BigUint; - - #[inline] - fn div(self, other: BigUint) -> BigUint { - let (q, _) = div_rem(self, other); - q - } -} - -impl<'a, 'b> Div<&'b BigUint> for &'a BigUint { - type Output = BigUint; - - #[inline] - fn div(self, other: &BigUint) -> BigUint { - let (q, _) = self.div_rem(other); - q - } -} -impl<'a> DivAssign<&'a BigUint> for BigUint { - #[inline] - fn div_assign(&mut self, other: &'a BigUint) { - *self = &*self / other; - } -} - -promote_unsigned_scalars!(impl Div for BigUint, div); -promote_unsigned_scalars_assign!(impl DivAssign for BigUint, div_assign); -forward_all_scalar_binop_to_val_val!(impl Div for BigUint, div); -forward_all_scalar_binop_to_val_val!(impl Div for BigUint, div); -forward_all_scalar_binop_to_val_val!(impl Div for BigUint, div); - -impl Div for BigUint { - type Output = BigUint; - - #[inline] - fn div(self, other: u32) -> BigUint { - let (q, _) = div_rem_digit(self, other as BigDigit); - q - } -} -impl DivAssign for BigUint { - #[inline] - fn div_assign(&mut self, other: u32) { - *self = &*self / other; - } -} - -impl Div for u32 { - type Output = BigUint; - - #[inline] - fn div(self, other: BigUint) -> BigUint { - match other.data.len() { - 0 => panic!("attempt to divide by zero"), - 1 => From::from(self as BigDigit / other.data[0]), - _ => Zero::zero(), - } - } -} - -impl Div for BigUint { - type Output = BigUint; - - #[inline] - fn div(self, other: u64) -> BigUint { - let (q, _) = div_rem(self, From::from(other)); - q - } -} -impl DivAssign for BigUint { - #[inline] - fn div_assign(&mut self, other: u64) { - // a vec of size 0 does not allocate, so this is fairly cheap - let temp = mem::replace(self, Zero::zero()); - *self = temp / other; - } -} - -impl Div for u64 { - type Output = BigUint; - - #[cfg(not(u64_digit))] - #[inline] - fn div(self, other: BigUint) -> BigUint { - match other.data.len() { - 0 => panic!("attempt to divide by zero"), - 1 => From::from(self / u64::from(other.data[0])), - 2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])), - _ => Zero::zero(), - } - } - - #[cfg(u64_digit)] - #[inline] - fn div(self, other: BigUint) -> BigUint { - match other.data.len() { - 0 => panic!("attempt to divide by zero"), - 1 => From::from(self / other.data[0]), - _ => Zero::zero(), - } - } -} - -impl Div for BigUint { - type Output = BigUint; - - #[inline] - fn div(self, other: u128) -> BigUint { - let (q, _) = div_rem(self, From::from(other)); - q - } -} - -impl DivAssign for BigUint { - #[inline] - fn div_assign(&mut self, other: u128) { - *self = &*self / other; - } -} - -impl Div for u128 { - type Output = BigUint; - - #[cfg(not(u64_digit))] - #[inline] - fn div(self, other: BigUint) -> BigUint { - match other.data.len() { - 0 => panic!("attempt to divide by zero"), - 1 => From::from(self / u128::from(other.data[0])), - 2 => From::from( - self / u128::from(big_digit::to_doublebigdigit(other.data[1], other.data[0])), - ), - 3 => From::from(self / u32_to_u128(0, other.data[2], other.data[1], other.data[0])), - 4 => From::from( - self / u32_to_u128(other.data[3], other.data[2], other.data[1], other.data[0]), - ), - _ => Zero::zero(), - } - } - - #[cfg(u64_digit)] - #[inline] - fn div(self, other: BigUint) -> BigUint { - match other.data.len() { - 0 => panic!("attempt to divide by zero"), - 1 => From::from(self / other.data[0] as u128), - 2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])), - _ => Zero::zero(), - } - } -} - -forward_val_ref_binop!(impl Rem for BigUint, rem); -forward_ref_val_binop!(impl Rem for BigUint, rem); -forward_val_assign!(impl RemAssign for BigUint, rem_assign); - -impl Rem for BigUint { - type Output = BigUint; - - #[inline] - fn rem(self, other: BigUint) -> BigUint { - if let Some(other) = other.to_u32() { - &self % other - } else { - let (_, r) = div_rem(self, other); - r - } - } -} - -impl<'a, 'b> Rem<&'b BigUint> for &'a BigUint { - type Output = BigUint; - - #[inline] - fn rem(self, other: &BigUint) -> BigUint { - if let Some(other) = other.to_u32() { - self % other - } else { - let (_, r) = self.div_rem(other); - r - } - } -} -impl<'a> RemAssign<&'a BigUint> for BigUint { - #[inline] - fn rem_assign(&mut self, other: &BigUint) { - *self = &*self % other; - } -} - -promote_unsigned_scalars!(impl Rem for BigUint, rem); -promote_unsigned_scalars_assign!(impl RemAssign for BigUint, rem_assign); -forward_all_scalar_binop_to_ref_val!(impl Rem for BigUint, rem); -forward_all_scalar_binop_to_val_val!(impl Rem for BigUint, rem); -forward_all_scalar_binop_to_val_val!(impl Rem for BigUint, rem); - -impl<'a> Rem for &'a BigUint { - type Output = BigUint; - - #[inline] - fn rem(self, other: u32) -> BigUint { - rem_digit(self, other as BigDigit).into() - } -} -impl RemAssign for BigUint { - #[inline] - fn rem_assign(&mut self, other: u32) { - *self = &*self % other; - } -} - -impl<'a> Rem<&'a BigUint> for u32 { - type Output = BigUint; - - #[inline] - fn rem(mut self, other: &'a BigUint) -> BigUint { - self %= other; - From::from(self) - } -} - -macro_rules! impl_rem_assign_scalar { - ($scalar:ty, $to_scalar:ident) => { - forward_val_assign_scalar!(impl RemAssign for BigUint, $scalar, rem_assign); - impl<'a> RemAssign<&'a BigUint> for $scalar { - #[inline] - fn rem_assign(&mut self, other: &BigUint) { - *self = match other.$to_scalar() { - None => *self, - Some(0) => panic!("attempt to divide by zero"), - Some(v) => *self % v - }; - } - } - } -} - -// we can scalar %= BigUint for any scalar, including signed types -impl_rem_assign_scalar!(u128, to_u128); -impl_rem_assign_scalar!(usize, to_usize); -impl_rem_assign_scalar!(u64, to_u64); -impl_rem_assign_scalar!(u32, to_u32); -impl_rem_assign_scalar!(u16, to_u16); -impl_rem_assign_scalar!(u8, to_u8); -impl_rem_assign_scalar!(i128, to_i128); -impl_rem_assign_scalar!(isize, to_isize); -impl_rem_assign_scalar!(i64, to_i64); -impl_rem_assign_scalar!(i32, to_i32); -impl_rem_assign_scalar!(i16, to_i16); -impl_rem_assign_scalar!(i8, to_i8); - -impl Rem for BigUint { - type Output = BigUint; - - #[inline] - fn rem(self, other: u64) -> BigUint { - let (_, r) = div_rem(self, From::from(other)); - r - } -} -impl RemAssign for BigUint { - #[inline] - fn rem_assign(&mut self, other: u64) { - *self = &*self % other; - } -} - -impl Rem for u64 { - type Output = BigUint; - - #[inline] - fn rem(mut self, other: BigUint) -> BigUint { - self %= other; - From::from(self) - } -} - -impl Rem for BigUint { - type Output = BigUint; - - #[inline] - fn rem(self, other: u128) -> BigUint { - let (_, r) = div_rem(self, From::from(other)); - r - } -} - -impl RemAssign for BigUint { - #[inline] - fn rem_assign(&mut self, other: u128) { - *self = &*self % other; - } -} - -impl Rem for u128 { - type Output = BigUint; - - #[inline] - fn rem(mut self, other: BigUint) -> BigUint { - self %= other; - From::from(self) - } -} - -impl CheckedAdd for BigUint { - #[inline] - fn checked_add(&self, v: &BigUint) -> Option { - Some(self.add(v)) - } -} - -impl CheckedSub for BigUint { - #[inline] - fn checked_sub(&self, v: &BigUint) -> Option { - match self.cmp(v) { - Less => None, - Equal => Some(Zero::zero()), - Greater => Some(self.sub(v)), - } - } -} - -impl CheckedMul for BigUint { - #[inline] - fn checked_mul(&self, v: &BigUint) -> Option { - Some(self.mul(v)) - } -} - -impl CheckedDiv for BigUint { - #[inline] - fn checked_div(&self, v: &BigUint) -> Option { - if v.is_zero() { - return None; - } - Some(self.div(v)) + fn is_one(&self) -> bool { + self.data[..] == [1] } } +impl Unsigned for BigUint {} + impl Integer for BigUint { #[inline] fn div_rem(&self, other: &BigUint) -> (BigUint, BigUint) { - div_rem_ref(self, other) + division::div_rem_ref(self, other) } #[inline] fn div_floor(&self, other: &BigUint) -> BigUint { - let (d, _) = div_rem_ref(self, other); + let (d, _) = division::div_rem_ref(self, other); d } #[inline] fn mod_floor(&self, other: &BigUint) -> BigUint { - let (_, m) = div_rem_ref(self, other); + let (_, m) = division::div_rem_ref(self, other); m } #[inline] fn div_mod_floor(&self, other: &BigUint) -> (BigUint, BigUint) { - div_rem_ref(self, other) + division::div_rem_ref(self, other) } #[inline] fn div_ceil(&self, other: &BigUint) -> BigUint { - let (d, m) = div_rem_ref(self, other); + let (d, m) = division::div_rem_ref(self, other); if m.is_zero() { d } else { @@ -1685,586 +369,142 @@ impl Roots for BigUint { _ => (), } - // The root of non-zero values less than 2ⁿ can only be 1. - let bits = self.bits(); - let n64 = u64::from(n); - if bits <= n64 { - return BigUint::one(); - } - - // If we fit in `u64`, compute the root that way. - if let Some(x) = self.to_u64() { - return x.nth_root(n).into(); - } - - let max_bits = bits / n64 + 1; - - #[cfg(feature = "std")] - let guess = match self.to_f64() { - Some(f) if f.is_finite() => { - // We fit in `f64` (lossy), so get a better initial guess from that. - BigUint::from_f64((f.ln() / f64::from(n)).exp()).unwrap() - } - _ => { - // Try to guess by scaling down such that it does fit in `f64`. - // With some (x * 2ⁿᵏ), its nth root ≈ (ⁿ√x * 2ᵏ) - let extra_bits = bits - (f64::MAX_EXP as u64 - 1); - let root_scale = extra_bits.div_ceil(&n64); - let scale = root_scale * n64; - if scale < bits && bits - scale > n64 { - (self >> scale).nth_root(n) << root_scale - } else { - BigUint::one() << max_bits - } - } - }; - - #[cfg(not(feature = "std"))] - let guess = BigUint::one() << max_bits; - - let n_min_1 = n - 1; - fixpoint(guess, max_bits, move |s| { - let q = self / s.pow(n_min_1); - let t = n_min_1 * s + q; - t / n - }) - } - - // Reference: - // Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.13 - fn sqrt(&self) -> Self { - if self.is_zero() || self.is_one() { - return self.clone(); - } - - // If we fit in `u64`, compute the root that way. - if let Some(x) = self.to_u64() { - return x.sqrt().into(); - } - - let bits = self.bits(); - let max_bits = bits / 2 + 1; - - #[cfg(feature = "std")] - let guess = match self.to_f64() { - Some(f) if f.is_finite() => { - // We fit in `f64` (lossy), so get a better initial guess from that. - BigUint::from_f64(f.sqrt()).unwrap() - } - _ => { - // Try to guess by scaling down such that it does fit in `f64`. - // With some (x * 2²ᵏ), its sqrt ≈ (√x * 2ᵏ) - let extra_bits = bits - (f64::MAX_EXP as u64 - 1); - let root_scale = (extra_bits + 1) / 2; - let scale = root_scale * 2; - (self >> scale).sqrt() << root_scale - } - }; - - #[cfg(not(feature = "std"))] - let guess = BigUint::one() << max_bits; - - fixpoint(guess, max_bits, move |s| { - let q = self / s; - let t = s + q; - t >> 1 - }) - } - - fn cbrt(&self) -> Self { - if self.is_zero() || self.is_one() { - return self.clone(); - } - - // If we fit in `u64`, compute the root that way. - if let Some(x) = self.to_u64() { - return x.cbrt().into(); - } - - let bits = self.bits(); - let max_bits = bits / 3 + 1; - - #[cfg(feature = "std")] - let guess = match self.to_f64() { - Some(f) if f.is_finite() => { - // We fit in `f64` (lossy), so get a better initial guess from that. - BigUint::from_f64(f.cbrt()).unwrap() - } - _ => { - // Try to guess by scaling down such that it does fit in `f64`. - // With some (x * 2³ᵏ), its cbrt ≈ (∛x * 2ᵏ) - let extra_bits = bits - (f64::MAX_EXP as u64 - 1); - let root_scale = (extra_bits + 2) / 3; - let scale = root_scale * 3; - (self >> scale).cbrt() << root_scale - } - }; - - #[cfg(not(feature = "std"))] - let guess = BigUint::one() << max_bits; - - fixpoint(guess, max_bits, move |s| { - let q = self / (s * s); - let t = (s << 1) + q; - t / 3u32 - }) - } -} - -fn high_bits_to_u64(v: &BigUint) -> u64 { - match v.data.len() { - 0 => 0, - 1 => u64::from(v.data[0]), - _ => { - let mut bits = v.bits(); - let mut ret = 0u64; - let mut ret_bits = 0; - - for d in v.data.iter().rev() { - let digit_bits = (bits - 1) % u64::from(big_digit::BITS) + 1; - let bits_want = cmp::min(64 - ret_bits, digit_bits); - - if bits_want != 64 { - ret <<= bits_want; - } - ret |= u64::from(*d) >> (digit_bits - bits_want); - ret_bits += bits_want; - bits -= bits_want; - - if ret_bits == 64 { - break; - } - } - - ret - } - } -} - -impl ToPrimitive for BigUint { - #[inline] - fn to_i64(&self) -> Option { - self.to_u64().as_ref().and_then(u64::to_i64) - } - - #[inline] - fn to_i128(&self) -> Option { - self.to_u128().as_ref().and_then(u128::to_i128) - } - - #[inline] - fn to_u64(&self) -> Option { - let mut ret: u64 = 0; - let mut bits = 0; - - for i in self.data.iter() { - if bits >= 64 { - return None; - } - - ret += u64::from(*i) << bits; - bits += big_digit::BITS; - } - - Some(ret) - } - - #[inline] - fn to_u128(&self) -> Option { - let mut ret: u128 = 0; - let mut bits = 0; - - for i in self.data.iter() { - if bits >= 128 { - return None; - } - - ret |= u128::from(*i) << bits; - bits += big_digit::BITS; - } - - Some(ret) - } - - #[inline] - fn to_f32(&self) -> Option { - let mantissa = high_bits_to_u64(self); - let exponent = self.bits() - u64::from(fls(mantissa)); - - if exponent > f32::MAX_EXP as u64 { - Some(f32::INFINITY) - } else { - Some((mantissa as f32) * 2.0f32.powi(exponent as i32)) - } - } - - #[inline] - fn to_f64(&self) -> Option { - let mantissa = high_bits_to_u64(self); - let exponent = self.bits() - u64::from(fls(mantissa)); - - if exponent > f64::MAX_EXP as u64 { - Some(f64::INFINITY) - } else { - Some((mantissa as f64) * 2.0f64.powi(exponent as i32)) - } - } -} - -macro_rules! impl_try_from_biguint { - ($T:ty, $to_ty:path) => { - #[cfg(has_try_from)] - impl TryFrom<&BigUint> for $T { - type Error = TryFromBigIntError<()>; - - #[inline] - fn try_from(value: &BigUint) -> Result<$T, TryFromBigIntError<()>> { - $to_ty(value).ok_or(TryFromBigIntError::new(())) - } - } - - #[cfg(has_try_from)] - impl TryFrom for $T { - type Error = TryFromBigIntError; - - #[inline] - fn try_from(value: BigUint) -> Result<$T, TryFromBigIntError> { - <$T>::try_from(&value).map_err(|_| TryFromBigIntError::new(value)) - } - } - }; -} - -impl_try_from_biguint!(u8, ToPrimitive::to_u8); -impl_try_from_biguint!(u16, ToPrimitive::to_u16); -impl_try_from_biguint!(u32, ToPrimitive::to_u32); -impl_try_from_biguint!(u64, ToPrimitive::to_u64); -impl_try_from_biguint!(usize, ToPrimitive::to_usize); -impl_try_from_biguint!(u128, ToPrimitive::to_u128); - -impl_try_from_biguint!(i8, ToPrimitive::to_i8); -impl_try_from_biguint!(i16, ToPrimitive::to_i16); -impl_try_from_biguint!(i32, ToPrimitive::to_i32); -impl_try_from_biguint!(i64, ToPrimitive::to_i64); -impl_try_from_biguint!(isize, ToPrimitive::to_isize); -impl_try_from_biguint!(i128, ToPrimitive::to_i128); - -impl FromPrimitive for BigUint { - #[inline] - fn from_i64(n: i64) -> Option { - if n >= 0 { - Some(BigUint::from(n as u64)) - } else { - None - } - } - - #[inline] - fn from_i128(n: i128) -> Option { - if n >= 0 { - Some(BigUint::from(n as u128)) - } else { - None - } - } - - #[inline] - fn from_u64(n: u64) -> Option { - Some(BigUint::from(n)) - } - - #[inline] - fn from_u128(n: u128) -> Option { - Some(BigUint::from(n)) - } - - #[inline] - fn from_f64(mut n: f64) -> Option { - // handle NAN, INFINITY, NEG_INFINITY - if !n.is_finite() { - return None; - } - - // match the rounding of casting from float to int - n = n.trunc(); - - // handle 0.x, -0.x - if n.is_zero() { - return Some(BigUint::zero()); - } - - let (mantissa, exponent, sign) = FloatCore::integer_decode(n); - - if sign == -1 { - return None; - } - - let mut ret = BigUint::from(mantissa); - match exponent.cmp(&0) { - Greater => ret <<= exponent as usize, - Equal => {} - Less => ret >>= (-exponent) as usize, - } - Some(ret) - } -} - -impl From for BigUint { - #[inline] - fn from(mut n: u64) -> Self { - let mut ret: BigUint = Zero::zero(); - - while n != 0 { - ret.data.push(n as BigDigit); - // don't overflow if BITS is 64: - n = (n >> 1) >> (big_digit::BITS - 1); - } - - ret - } -} - -impl From for BigUint { - #[inline] - fn from(mut n: u128) -> Self { - let mut ret: BigUint = Zero::zero(); - - while n != 0 { - ret.data.push(n as BigDigit); - n >>= big_digit::BITS; - } - - ret - } -} - -macro_rules! impl_biguint_from_uint { - ($T:ty) => { - impl From<$T> for BigUint { - #[inline] - fn from(n: $T) -> Self { - BigUint::from(n as u64) - } - } - }; -} - -impl_biguint_from_uint!(u8); -impl_biguint_from_uint!(u16); -impl_biguint_from_uint!(u32); -impl_biguint_from_uint!(usize); - -macro_rules! impl_biguint_try_from_int { - ($T:ty, $from_ty:path) => { - #[cfg(has_try_from)] - impl TryFrom<$T> for BigUint { - type Error = TryFromBigIntError<()>; - - #[inline] - fn try_from(value: $T) -> Result> { - $from_ty(value).ok_or(TryFromBigIntError::new(())) - } - } - }; -} - -impl_biguint_try_from_int!(i8, FromPrimitive::from_i8); -impl_biguint_try_from_int!(i16, FromPrimitive::from_i16); -impl_biguint_try_from_int!(i32, FromPrimitive::from_i32); -impl_biguint_try_from_int!(i64, FromPrimitive::from_i64); -impl_biguint_try_from_int!(isize, FromPrimitive::from_isize); -impl_biguint_try_from_int!(i128, FromPrimitive::from_i128); - -/// A generic trait for converting a value to a `BigUint`. -pub trait ToBigUint { - /// Converts the value of `self` to a `BigUint`. - fn to_biguint(&self) -> Option; -} - -impl ToBigUint for BigUint { - #[inline] - fn to_biguint(&self) -> Option { - Some(self.clone()) - } -} - -macro_rules! impl_to_biguint { - ($T:ty, $from_ty:path) => { - impl ToBigUint for $T { - #[inline] - fn to_biguint(&self) -> Option { - $from_ty(*self) - } - } - }; -} - -impl_to_biguint!(isize, FromPrimitive::from_isize); -impl_to_biguint!(i8, FromPrimitive::from_i8); -impl_to_biguint!(i16, FromPrimitive::from_i16); -impl_to_biguint!(i32, FromPrimitive::from_i32); -impl_to_biguint!(i64, FromPrimitive::from_i64); -impl_to_biguint!(i128, FromPrimitive::from_i128); - -impl_to_biguint!(usize, FromPrimitive::from_usize); -impl_to_biguint!(u8, FromPrimitive::from_u8); -impl_to_biguint!(u16, FromPrimitive::from_u16); -impl_to_biguint!(u32, FromPrimitive::from_u32); -impl_to_biguint!(u64, FromPrimitive::from_u64); -impl_to_biguint!(u128, FromPrimitive::from_u128); - -impl_to_biguint!(f32, FromPrimitive::from_f32); -impl_to_biguint!(f64, FromPrimitive::from_f64); - -// Extract bitwise digits that evenly divide BigDigit -fn to_bitwise_digits_le(u: &BigUint, bits: u8) -> Vec { - debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits == 0); - - let last_i = u.data.len() - 1; - let mask: BigDigit = (1 << bits) - 1; - let digits_per_big_digit = big_digit::BITS / bits; - let digits = u - .bits() - .div_ceil(&u64::from(bits)) - .to_usize() - .unwrap_or(core::usize::MAX); - let mut res = Vec::with_capacity(digits); - - for mut r in u.data[..last_i].iter().cloned() { - for _ in 0..digits_per_big_digit { - res.push((r & mask) as u8); - r >>= bits; - } - } - - let mut r = u.data[last_i]; - while r != 0 { - res.push((r & mask) as u8); - r >>= bits; - } - - res -} - -// Extract bitwise digits that don't evenly divide BigDigit -fn to_inexact_bitwise_digits_le(u: &BigUint, bits: u8) -> Vec { - debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits != 0); - - let mask: BigDigit = (1 << bits) - 1; - let digits = u - .bits() - .div_ceil(&u64::from(bits)) - .to_usize() - .unwrap_or(core::usize::MAX); - let mut res = Vec::with_capacity(digits); + // The root of non-zero values less than 2ⁿ can only be 1. + let bits = self.bits(); + let n64 = u64::from(n); + if bits <= n64 { + return BigUint::one(); + } - let mut r = 0; - let mut rbits = 0; + // If we fit in `u64`, compute the root that way. + if let Some(x) = self.to_u64() { + return x.nth_root(n).into(); + } - for c in &u.data { - r |= *c << rbits; - rbits += big_digit::BITS; + let max_bits = bits / n64 + 1; - while rbits >= bits { - res.push((r & mask) as u8); - r >>= bits; + #[cfg(feature = "std")] + let guess = match self.to_f64() { + Some(f) if f.is_finite() => { + use num_traits::FromPrimitive; - // r had more bits than it could fit - grab the bits we lost - if rbits > big_digit::BITS { - r = *c >> (big_digit::BITS - (rbits - bits)); + // We fit in `f64` (lossy), so get a better initial guess from that. + BigUint::from_f64((f.ln() / f64::from(n)).exp()).unwrap() } + _ => { + // Try to guess by scaling down such that it does fit in `f64`. + // With some (x * 2ⁿᵏ), its nth root ≈ (ⁿ√x * 2ᵏ) + let extra_bits = bits - (core::f64::MAX_EXP as u64 - 1); + let root_scale = Integer::div_ceil(&extra_bits, &n64); + let scale = root_scale * n64; + if scale < bits && bits - scale > n64 { + (self >> scale).nth_root(n) << root_scale + } else { + BigUint::one() << max_bits + } + } + }; - rbits -= bits; - } - } - - if rbits != 0 { - res.push(r as u8); - } + #[cfg(not(feature = "std"))] + let guess = BigUint::one() << max_bits; - while let Some(&0) = res.last() { - res.pop(); + let n_min_1 = n - 1; + fixpoint(guess, max_bits, move |s| { + let q = self / s.pow(n_min_1); + let t = n_min_1 * s + q; + t / n + }) } - res -} - -// Extract little-endian radix digits -#[inline(always)] // forced inline to get const-prop for radix=10 -fn to_radix_digits_le(u: &BigUint, radix: u32) -> Vec { - debug_assert!(!u.is_zero() && !radix.is_power_of_two()); + // Reference: + // Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.13 + fn sqrt(&self) -> Self { + if self.is_zero() || self.is_one() { + return self.clone(); + } - #[cfg(feature = "std")] - let radix_log2 = f64::from(radix).log2(); - #[cfg(not(feature = "std"))] - let radix_log2 = ilog2(radix) as f64; + // If we fit in `u64`, compute the root that way. + if let Some(x) = self.to_u64() { + return x.sqrt().into(); + } - // Estimate how big the result will be, so we can pre-allocate it. - let radix_digits = ((u.bits() as f64) / radix_log2).ceil(); - let mut res = Vec::with_capacity(radix_digits.to_usize().unwrap_or(0)); + let bits = self.bits(); + let max_bits = bits / 2 + 1; - let mut digits = u.clone(); + #[cfg(feature = "std")] + let guess = match self.to_f64() { + Some(f) if f.is_finite() => { + use num_traits::FromPrimitive; - let (base, power) = get_radix_base(radix, big_digit::HALF_BITS); - let radix = radix as BigDigit; + // We fit in `f64` (lossy), so get a better initial guess from that. + BigUint::from_f64(f.sqrt()).unwrap() + } + _ => { + // Try to guess by scaling down such that it does fit in `f64`. + // With some (x * 2²ᵏ), its sqrt ≈ (√x * 2ᵏ) + let extra_bits = bits - (core::f64::MAX_EXP as u64 - 1); + let root_scale = (extra_bits + 1) / 2; + let scale = root_scale * 2; + (self >> scale).sqrt() << root_scale + } + }; - while digits.data.len() > 1 { - let (q, mut r) = div_rem_digit(digits, base); - for _ in 0..power { - res.push((r % radix) as u8); - r /= radix; - } - digits = q; - } + #[cfg(not(feature = "std"))] + let guess = BigUint::one() << max_bits; - let mut r = digits.data[0]; - while r != 0 { - res.push((r % radix) as u8); - r /= radix; + fixpoint(guess, max_bits, move |s| { + let q = self / s; + let t = s + q; + t >> 1 + }) } - res -} + fn cbrt(&self) -> Self { + if self.is_zero() || self.is_one() { + return self.clone(); + } -pub(crate) fn to_radix_le(u: &BigUint, radix: u32) -> Vec { - if u.is_zero() { - vec![0] - } else if radix.is_power_of_two() { - // Powers of two can use bitwise masks and shifting instead of division - let bits = ilog2(radix); - if big_digit::BITS % bits == 0 { - to_bitwise_digits_le(u, bits) - } else { - to_inexact_bitwise_digits_le(u, bits) + // If we fit in `u64`, compute the root that way. + if let Some(x) = self.to_u64() { + return x.cbrt().into(); } - } else if radix == 10 { - // 10 is so common that it's worth separating out for const-propagation. - // Optimizers can often turn constant division into a faster multiplication. - to_radix_digits_le(u, 10) - } else { - to_radix_digits_le(u, radix) - } -} -pub(crate) fn to_str_radix_reversed(u: &BigUint, radix: u32) -> Vec { - assert!(2 <= radix && radix <= 36, "The radix must be within 2...36"); + let bits = self.bits(); + let max_bits = bits / 3 + 1; + + #[cfg(feature = "std")] + let guess = match self.to_f64() { + Some(f) if f.is_finite() => { + use num_traits::FromPrimitive; - if u.is_zero() { - return vec![b'0']; - } + // We fit in `f64` (lossy), so get a better initial guess from that. + BigUint::from_f64(f.cbrt()).unwrap() + } + _ => { + // Try to guess by scaling down such that it does fit in `f64`. + // With some (x * 2³ᵏ), its cbrt ≈ (∛x * 2ᵏ) + let extra_bits = bits - (core::f64::MAX_EXP as u64 - 1); + let root_scale = (extra_bits + 2) / 3; + let scale = root_scale * 3; + (self >> scale).cbrt() << root_scale + } + }; - let mut res = to_radix_le(u, radix); + #[cfg(not(feature = "std"))] + let guess = BigUint::one() << max_bits; - // Now convert everything to ASCII digits. - for r in &mut res { - debug_assert!(u32::from(*r) < radix); - if *r < 10 { - *r += b'0'; - } else { - *r += b'a' - 10; - } + fixpoint(guess, max_bits, move |s| { + let q = self / (s * s); + let t = (s << 1) + q; + t / 3u32 + }) } - res +} + +/// A generic trait for converting a value to a `BigUint`. +pub trait ToBigUint { + /// Converts the value of `self` to a `BigUint`. + fn to_biguint(&self) -> Option; } /// Creates and initializes a `BigUint`. @@ -2316,14 +556,7 @@ impl BigUint { self.data.extend_from_slice(slice); #[cfg(u64_digit)] - self.data.extend(slice.chunks(2).map(|chunk| { - // raw could have odd length - let mut digit = BigDigit::from(chunk[0]); - if let Some(&hi) = chunk.get(1) { - digit |= BigDigit::from(hi) << 32; - } - digit - })); + self.data.extend(slice.chunks(2).map(u32_chunk_to_u64)); self.normalize(); } @@ -2365,7 +598,7 @@ impl BigUint { if bytes.is_empty() { Zero::zero() } else { - from_bitwise_digits_le(bytes, 8) + convert::from_bitwise_digits_le(bytes, 8) } } @@ -2408,30 +641,7 @@ impl BigUint { /// assert_eq!(a.to_radix_be(190), inbase190); /// ``` pub fn from_radix_be(buf: &[u8], radix: u32) -> Option { - assert!( - 2 <= radix && radix <= 256, - "The radix must be within 2...256" - ); - - if radix != 256 && buf.iter().any(|&b| b >= radix as u8) { - return None; - } - - let res = if radix.is_power_of_two() { - // Powers of two can use bitwise masks and shifting instead of multiplication - let bits = ilog2(radix); - let mut v = Vec::from(buf); - v.reverse(); - if big_digit::BITS % bits == 0 { - from_bitwise_digits_le(&v, bits) - } else { - from_inexact_bitwise_digits_le(&v, bits) - } - } else { - from_radix_digits_be(buf, radix) - }; - - Some(res) + convert::from_radix_be(buf, radix) } /// Creates and initializes a `BigUint`. Each u8 of the input slice is @@ -2451,30 +661,7 @@ impl BigUint { /// assert_eq!(a.to_radix_be(190), inbase190); /// ``` pub fn from_radix_le(buf: &[u8], radix: u32) -> Option { - assert!( - 2 <= radix && radix <= 256, - "The radix must be within 2...256" - ); - - if radix != 256 && buf.iter().any(|&b| b >= radix as u8) { - return None; - } - - let res = if radix.is_power_of_two() { - // Powers of two can use bitwise masks and shifting instead of multiplication - let bits = ilog2(radix); - if big_digit::BITS % bits == 0 { - from_bitwise_digits_le(buf, bits) - } else { - from_inexact_bitwise_digits_le(buf, bits) - } - } else { - let mut v = Vec::from(buf); - v.reverse(); - from_radix_digits_be(&v, radix) - }; - - Some(res) + convert::from_radix_le(buf, radix) } /// Returns the byte representation of the `BigUint` in big-endian byte order. @@ -2509,7 +696,7 @@ impl BigUint { if self.is_zero() { vec![0] } else { - to_bitwise_digits_le(self, 8) + convert::to_bitwise_digits_le(self, 8) } } @@ -2528,30 +715,63 @@ impl BigUint { /// ``` #[inline] pub fn to_u32_digits(&self) -> Vec { - let mut digits = Vec::new(); + self.iter_u32_digits().collect() + } - #[cfg(not(u64_digit))] - digits.clone_from(&self.data); + /// Returns the `u64` digits representation of the `BigUint` ordered least significant digit + /// first. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::BigUint; + /// + /// assert_eq!(BigUint::from(1125u32).to_u64_digits(), vec![1125]); + /// assert_eq!(BigUint::from(4294967295u32).to_u64_digits(), vec![4294967295]); + /// assert_eq!(BigUint::from(4294967296u64).to_u64_digits(), vec![4294967296]); + /// assert_eq!(BigUint::from(112500000000u64).to_u64_digits(), vec![112500000000]); + /// assert_eq!(BigUint::from(1u128 << 64).to_u64_digits(), vec![0, 1]); + /// ``` + #[inline] + pub fn to_u64_digits(&self) -> Vec { + self.iter_u64_digits().collect() + } - #[cfg(u64_digit)] - { - if let Some((&last, data)) = self.data.split_last() { - let last_lo = last as u32; - let last_hi = (last >> 32) as u32; - let u32_len = data.len() * 2 + 1 + (last_hi != 0) as usize; - digits.reserve_exact(u32_len); - for &x in data { - digits.push(x as u32); - digits.push((x >> 32) as u32); - } - digits.push(last_lo); - if last_hi != 0 { - digits.push(last_hi); - } - } - } + /// Returns an iterator of `u32` digits representation of the `BigUint` ordered least + /// significant digit first. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::BigUint; + /// + /// assert_eq!(BigUint::from(1125u32).iter_u32_digits().collect::>(), vec![1125]); + /// assert_eq!(BigUint::from(4294967295u32).iter_u32_digits().collect::>(), vec![4294967295]); + /// assert_eq!(BigUint::from(4294967296u64).iter_u32_digits().collect::>(), vec![0, 1]); + /// assert_eq!(BigUint::from(112500000000u64).iter_u32_digits().collect::>(), vec![830850304, 26]); + /// ``` + #[inline] + pub fn iter_u32_digits(&self) -> U32Digits<'_> { + U32Digits::new(self.data.as_slice()) + } - digits + /// Returns an iterator of `u64` digits representation of the `BigUint` ordered least + /// significant digit first. + /// + /// # Examples + /// + /// ``` + /// use num_bigint::BigUint; + /// + /// assert_eq!(BigUint::from(1125u32).iter_u64_digits().collect::>(), vec![1125]); + /// assert_eq!(BigUint::from(4294967295u32).iter_u64_digits().collect::>(), vec![4294967295]); + /// assert_eq!(BigUint::from(4294967296u64).iter_u64_digits().collect::>(), vec![4294967296]); + /// assert_eq!(BigUint::from(112500000000u64).iter_u64_digits().collect::>(), vec![112500000000]); + /// assert_eq!(BigUint::from(1u128 << 64).iter_u64_digits().collect::>(), vec![0, 1]); + /// ``` + #[inline] + pub fn iter_u64_digits(&self) -> U64Digits<'_> { + U64Digits::new(self.data.as_slice()) } /// Returns the integer formatted as a string in the given radix. @@ -2588,7 +808,7 @@ impl BigUint { /// ``` #[inline] pub fn to_radix_be(&self, radix: u32) -> Vec { - let mut v = to_radix_le(self, radix); + let mut v = convert::to_radix_le(self, radix); v.reverse(); v } @@ -2609,7 +829,7 @@ impl BigUint { /// ``` #[inline] pub fn to_radix_le(&self, radix: u32) -> Vec { - to_radix_le(self, radix) + convert::to_radix_le(self, radix) } /// Determines the fewest bits necessary to express the `BigUint`. @@ -2626,8 +846,9 @@ impl BigUint { /// be nonzero. #[inline] fn normalize(&mut self) { - while let Some(&0) = self.data.last() { - self.data.pop(); + if let Some(&0) = self.data.last() { + let len = self.data.iter().rposition(|&d| d != 0).map_or(0, |i| i + 1); + self.data.truncate(len); } if self.data.len() < self.data.capacity() / 4 { self.data.shrink_to_fit(); @@ -2650,18 +871,7 @@ impl BigUint { /// /// Panics if the modulus is zero. pub fn modpow(&self, exponent: &Self, modulus: &Self) -> Self { - assert!( - !modulus.is_zero(), - "attempt to calculate with zero modulus!" - ); - - if modulus.is_odd() { - // For an odd modulus, we can use Montgomery multiplication in base 2^32. - monty_modpow(self, exponent, modulus) - } else { - // Otherwise do basically the same as `num::pow`, but with a modulus. - plain_modpow(self, &exponent.data, modulus) - } + power::modpow(self, exponent, modulus) } /// Returns the truncated principal square root of `self` -- @@ -2689,113 +899,63 @@ impl BigUint { let zeros: u64 = self.data[i].trailing_zeros().into(); Some(i as u64 * u64::from(big_digit::BITS) + zeros) } -} - -fn plain_modpow(base: &BigUint, exp_data: &[BigDigit], modulus: &BigUint) -> BigUint { - assert!( - !modulus.is_zero(), - "attempt to calculate with zero modulus!" - ); - - let i = match exp_data.iter().position(|&r| r != 0) { - None => return BigUint::one(), - Some(i) => i, - }; - let mut base = base % modulus; - for _ in 0..i { - for _ in 0..big_digit::BITS { - base = &base * &base % modulus; + /// Returns the number of least-significant bits that are ones. + pub fn trailing_ones(&self) -> u64 { + if let Some(i) = self.data.iter().position(|&digit| !digit != 0) { + // XXX u64::trailing_ones() introduced in Rust 1.46, + // but we need to be compatible further back. + // Thanks to cuviper for this workaround. + let ones: u64 = (!self.data[i]).trailing_zeros().into(); + i as u64 * u64::from(big_digit::BITS) + ones + } else { + self.data.len() as u64 * u64::from(big_digit::BITS) } } - let mut r = exp_data[i]; - let mut b = 0u8; - while r.is_even() { - base = &base * &base % modulus; - r >>= 1; - b += 1; - } - - let mut exp_iter = exp_data[i + 1..].iter(); - if exp_iter.len() == 0 && r.is_one() { - return base; + /// Returns the number of one bits. + pub fn count_ones(&self) -> u64 { + self.data.iter().map(|&d| u64::from(d.count_ones())).sum() } - let mut acc = base.clone(); - r >>= 1; - b += 1; - - { - let mut unit = |exp_is_odd| { - base = &base * &base % modulus; - if exp_is_odd { - acc = &acc * &base % modulus; - } - }; - - if let Some(&last) = exp_iter.next_back() { - // consume exp_data[i] - for _ in b..big_digit::BITS { - unit(r.is_odd()); - r >>= 1; - } - - // consume all other digits before the last - for &r in exp_iter { - let mut r = r; - for _ in 0..big_digit::BITS { - unit(r.is_odd()); - r >>= 1; - } + /// Returns whether the bit in the given position is set + pub fn bit(&self, bit: u64) -> bool { + let bits_per_digit = u64::from(big_digit::BITS); + if let Some(digit_index) = (bit / bits_per_digit).to_usize() { + if let Some(digit) = self.data.get(digit_index) { + let bit_mask = (1 as BigDigit) << (bit % bits_per_digit); + return (digit & bit_mask) != 0; } - r = last; } + false + } - debug_assert_ne!(r, 0); - while !r.is_zero() { - unit(r.is_odd()); - r >>= 1; + /// Sets or clears the bit in the given position + /// + /// Note that setting a bit greater than the current bit length, a reallocation may be needed + /// to store the new digits + pub fn set_bit(&mut self, bit: u64, value: bool) { + // Note: we're saturating `digit_index` and `new_len` -- any such case is guaranteed to + // fail allocation, and that's more consistent than adding our own overflow panics. + let bits_per_digit = u64::from(big_digit::BITS); + let digit_index = (bit / bits_per_digit) + .to_usize() + .unwrap_or(core::usize::MAX); + let bit_mask = (1 as BigDigit) << (bit % bits_per_digit); + if value { + if digit_index >= self.data.len() { + let new_len = digit_index.saturating_add(1); + self.data.resize(new_len, 0); + } + self.data[digit_index] |= bit_mask; + } else if digit_index < self.data.len() { + self.data[digit_index] &= !bit_mask; + // the top bit may have been cleared, so normalize + self.normalize(); } } - acc -} - -#[test] -fn test_plain_modpow() { - let two = &BigUint::from(2u32); - let modulus = BigUint::from(0x1100u32); - - let exp = vec![0, 0b1]; - assert_eq!( - two.pow(0b1_00000000_u32) % &modulus, - plain_modpow(&two, &exp, &modulus) - ); - let exp = vec![0, 0b10]; - assert_eq!( - two.pow(0b10_00000000_u32) % &modulus, - plain_modpow(&two, &exp, &modulus) - ); - let exp = vec![0, 0b110010]; - assert_eq!( - two.pow(0b110010_00000000_u32) % &modulus, - plain_modpow(&two, &exp, &modulus) - ); - let exp = vec![0b1, 0b1]; - assert_eq!( - two.pow(0b1_00000001_u32) % &modulus, - plain_modpow(&two, &exp, &modulus) - ); - let exp = vec![0b1100, 0, 0b1]; - assert_eq!( - two.pow(0b1_00000000_00001100_u32) % &modulus, - plain_modpow(&two, &exp, &modulus) - ); } -impl_sum_iter_type!(BigUint); -impl_product_iter_type!(BigUint); - pub(crate) trait IntDigits { fn digits(&self) -> &[BigDigit]; fn digits_mut(&mut self) -> &mut Vec; @@ -2827,6 +987,17 @@ impl IntDigits for BigUint { } } +/// Convert a u32 chunk (len is either 1 or 2) to a single u64 digit +#[inline] +fn u32_chunk_to_u64(chunk: &[u32]) -> u64 { + // raw could have odd length + let mut digit = chunk[0] as u64; + if let Some(&hi) = chunk.get(1) { + digit |= (hi as u64) << 32; + } + digit +} + /// Combine four `u32`s into a single `u128`. #[cfg(any(test, not(u64_digit)))] #[inline] @@ -2846,138 +1017,6 @@ fn u32_from_u128(n: u128) -> (u32, u32, u32, u32) { ) } -#[cfg(feature = "serde")] -impl serde::Serialize for BigUint { - #[cfg(not(u64_digit))] - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - // Note: do not change the serialization format, or it may break forward - // and backward compatibility of serialized data! If we ever change the - // internal representation, we should still serialize in base-`u32`. - let data: &[u32] = &self.data; - data.serialize(serializer) - } - - #[cfg(u64_digit)] - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - use serde::ser::SerializeSeq; - if let Some((&last, data)) = self.data.split_last() { - let last_lo = last as u32; - let last_hi = (last >> 32) as u32; - let u32_len = data.len() * 2 + 1 + (last_hi != 0) as usize; - let mut seq = serializer.serialize_seq(Some(u32_len))?; - for &x in data { - seq.serialize_element(&(x as u32))?; - seq.serialize_element(&((x >> 32) as u32))?; - } - seq.serialize_element(&last_lo)?; - if last_hi != 0 { - seq.serialize_element(&last_hi)?; - } - seq.end() - } else { - let data: &[u32] = &[]; - data.serialize(serializer) - } - } -} - -#[cfg(feature = "serde")] -impl<'de> serde::Deserialize<'de> for BigUint { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - use serde::de::{SeqAccess, Visitor}; - - struct U32Visitor; - - impl<'de> Visitor<'de> for U32Visitor { - type Value = BigUint; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("a sequence of unsigned 32-bit numbers") - } - - #[cfg(not(u64_digit))] - fn visit_seq(self, mut seq: S) -> Result - where - S: SeqAccess<'de>, - { - let len = seq.size_hint().unwrap_or(0); - let mut data = Vec::with_capacity(len); - - while let Some(value) = seq.next_element::()? { - data.push(value); - } - - Ok(biguint_from_vec(data)) - } - - #[cfg(u64_digit)] - fn visit_seq(self, mut seq: S) -> Result - where - S: SeqAccess<'de>, - { - let u32_len = seq.size_hint().unwrap_or(0); - let len = u32_len.div_ceil(&2); - let mut data = Vec::with_capacity(len); - - while let Some(lo) = seq.next_element::()? { - let mut value = BigDigit::from(lo); - if let Some(hi) = seq.next_element::()? { - value |= BigDigit::from(hi) << 32; - data.push(value); - } else { - data.push(value); - break; - } - } - - Ok(biguint_from_vec(data)) - } - } - - deserializer.deserialize_seq(U32Visitor) - } -} - -/// Returns the greatest power of the radix for the given bit size -#[inline] -fn get_radix_base(radix: u32, bits: u8) -> (BigDigit, usize) { - mod gen { - include! { concat!(env!("OUT_DIR"), "/radix_bases.rs") } - } - - debug_assert!( - 2 <= radix && radix <= 256, - "The radix must be within 2...256" - ); - debug_assert!(!radix.is_power_of_two()); - debug_assert!(bits <= big_digit::BITS); - - match bits { - 16 => { - let (base, power) = gen::BASES_16[radix as usize]; - (base as BigDigit, power) - } - 32 => { - let (base, power) = gen::BASES_32[radix as usize]; - (base as BigDigit, power) - } - 64 => { - let (base, power) = gen::BASES_64[radix as usize]; - (base as BigDigit, power) - } - _ => panic!("Invalid bigdigit size"), - } -} - #[cfg(not(u64_digit))] #[test] fn test_from_slice() { @@ -3062,11 +1101,3 @@ fn test_u128_u32_roundtrip() { assert_eq!(u32_to_u128(a, b, c, d), *val); } } - -#[test] -fn test_pow_biguint() { - let base = BigUint::from(5u8); - let exponent = BigUint::from(3u8); - - assert_eq!(BigUint::from(125u8), base.pow(exponent)); -} diff --git a/src/rust/vendor/num-bigint/src/biguint/addition.rs b/src/rust/vendor/num-bigint/src/biguint/addition.rs new file mode 100644 index 000000000..e54f8cb14 --- /dev/null +++ b/src/rust/vendor/num-bigint/src/biguint/addition.rs @@ -0,0 +1,254 @@ +#[cfg(not(u64_digit))] +use super::u32_from_u128; +use super::{BigUint, IntDigits}; + +use crate::big_digit::{self, BigDigit}; +use crate::UsizePromotion; + +use core::iter::Sum; +use core::ops::{Add, AddAssign}; +use num_traits::{CheckedAdd, Zero}; + +#[cfg(all(use_addcarry, target_arch = "x86_64"))] +use core::arch::x86_64 as arch; + +#[cfg(all(use_addcarry, target_arch = "x86"))] +use core::arch::x86 as arch; + +// Add with carry: +#[cfg(all(use_addcarry, u64_digit))] +#[inline] +fn adc(carry: u8, a: u64, b: u64, out: &mut u64) -> u8 { + // Safety: There are absolutely no safety concerns with calling `_addcarry_u64`. + // It's just unsafe for API consistency with other intrinsics. + unsafe { arch::_addcarry_u64(carry, a, b, out) } +} + +#[cfg(all(use_addcarry, not(u64_digit)))] +#[inline] +fn adc(carry: u8, a: u32, b: u32, out: &mut u32) -> u8 { + // Safety: There are absolutely no safety concerns with calling `_addcarry_u32`. + // It's just unsafe for API consistency with other intrinsics. + unsafe { arch::_addcarry_u32(carry, a, b, out) } +} + +// fallback for environments where we don't have an addcarry intrinsic +#[cfg(not(use_addcarry))] +#[inline] +fn adc(carry: u8, a: BigDigit, b: BigDigit, out: &mut BigDigit) -> u8 { + use crate::big_digit::DoubleBigDigit; + + let sum = DoubleBigDigit::from(a) + DoubleBigDigit::from(b) + DoubleBigDigit::from(carry); + *out = sum as BigDigit; + (sum >> big_digit::BITS) as u8 +} + +/// Two argument addition of raw slices, `a += b`, returning the carry. +/// +/// This is used when the data `Vec` might need to resize to push a non-zero carry, so we perform +/// the addition first hoping that it will fit. +/// +/// The caller _must_ ensure that `a` is at least as long as `b`. +#[inline] +pub(super) fn __add2(a: &mut [BigDigit], b: &[BigDigit]) -> BigDigit { + debug_assert!(a.len() >= b.len()); + + let mut carry = 0; + let (a_lo, a_hi) = a.split_at_mut(b.len()); + + for (a, b) in a_lo.iter_mut().zip(b) { + carry = adc(carry, *a, *b, a); + } + + if carry != 0 { + for a in a_hi { + carry = adc(carry, *a, 0, a); + if carry == 0 { + break; + } + } + } + + carry as BigDigit +} + +/// Two argument addition of raw slices: +/// a += b +/// +/// The caller _must_ ensure that a is big enough to store the result - typically this means +/// resizing a to max(a.len(), b.len()) + 1, to fit a possible carry. +pub(super) fn add2(a: &mut [BigDigit], b: &[BigDigit]) { + let carry = __add2(a, b); + + debug_assert!(carry == 0); +} + +forward_all_binop_to_val_ref_commutative!(impl Add for BigUint, add); +forward_val_assign!(impl AddAssign for BigUint, add_assign); + +impl<'a> Add<&'a BigUint> for BigUint { + type Output = BigUint; + + fn add(mut self, other: &BigUint) -> BigUint { + self += other; + self + } +} +impl<'a> AddAssign<&'a BigUint> for BigUint { + #[inline] + fn add_assign(&mut self, other: &BigUint) { + let self_len = self.data.len(); + let carry = if self_len < other.data.len() { + let lo_carry = __add2(&mut self.data[..], &other.data[..self_len]); + self.data.extend_from_slice(&other.data[self_len..]); + __add2(&mut self.data[self_len..], &[lo_carry]) + } else { + __add2(&mut self.data[..], &other.data[..]) + }; + if carry != 0 { + self.data.push(carry); + } + } +} + +promote_unsigned_scalars!(impl Add for BigUint, add); +promote_unsigned_scalars_assign!(impl AddAssign for BigUint, add_assign); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigUint, add); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigUint, add); +forward_all_scalar_binop_to_val_val_commutative!(impl Add for BigUint, add); + +impl Add for BigUint { + type Output = BigUint; + + #[inline] + fn add(mut self, other: u32) -> BigUint { + self += other; + self + } +} + +impl AddAssign for BigUint { + #[inline] + fn add_assign(&mut self, other: u32) { + if other != 0 { + if self.data.is_empty() { + self.data.push(0); + } + + let carry = __add2(&mut self.data, &[other as BigDigit]); + if carry != 0 { + self.data.push(carry); + } + } + } +} + +impl Add for BigUint { + type Output = BigUint; + + #[inline] + fn add(mut self, other: u64) -> BigUint { + self += other; + self + } +} + +impl AddAssign for BigUint { + #[cfg(not(u64_digit))] + #[inline] + fn add_assign(&mut self, other: u64) { + let (hi, lo) = big_digit::from_doublebigdigit(other); + if hi == 0 { + *self += lo; + } else { + while self.data.len() < 2 { + self.data.push(0); + } + + let carry = __add2(&mut self.data, &[lo, hi]); + if carry != 0 { + self.data.push(carry); + } + } + } + + #[cfg(u64_digit)] + #[inline] + fn add_assign(&mut self, other: u64) { + if other != 0 { + if self.data.is_empty() { + self.data.push(0); + } + + let carry = __add2(&mut self.data, &[other as BigDigit]); + if carry != 0 { + self.data.push(carry); + } + } + } +} + +impl Add for BigUint { + type Output = BigUint; + + #[inline] + fn add(mut self, other: u128) -> BigUint { + self += other; + self + } +} + +impl AddAssign for BigUint { + #[cfg(not(u64_digit))] + #[inline] + fn add_assign(&mut self, other: u128) { + if other <= u128::from(u64::max_value()) { + *self += other as u64 + } else { + let (a, b, c, d) = u32_from_u128(other); + let carry = if a > 0 { + while self.data.len() < 4 { + self.data.push(0); + } + __add2(&mut self.data, &[d, c, b, a]) + } else { + debug_assert!(b > 0); + while self.data.len() < 3 { + self.data.push(0); + } + __add2(&mut self.data, &[d, c, b]) + }; + + if carry != 0 { + self.data.push(carry); + } + } + } + + #[cfg(u64_digit)] + #[inline] + fn add_assign(&mut self, other: u128) { + let (hi, lo) = big_digit::from_doublebigdigit(other); + if hi == 0 { + *self += lo; + } else { + while self.data.len() < 2 { + self.data.push(0); + } + + let carry = __add2(&mut self.data, &[lo, hi]); + if carry != 0 { + self.data.push(carry); + } + } + } +} + +impl CheckedAdd for BigUint { + #[inline] + fn checked_add(&self, v: &BigUint) -> Option { + Some(self.add(v)) + } +} + +impl_sum_iter_type!(BigUint); diff --git a/src/rust/vendor/num-bigint/src/biguint/arbitrary.rs b/src/rust/vendor/num-bigint/src/biguint/arbitrary.rs new file mode 100644 index 000000000..6fa91c0f8 --- /dev/null +++ b/src/rust/vendor/num-bigint/src/biguint/arbitrary.rs @@ -0,0 +1,34 @@ +use super::{biguint_from_vec, BigUint}; + +use crate::big_digit::BigDigit; +#[cfg(feature = "quickcheck")] +use crate::std_alloc::Box; +use crate::std_alloc::Vec; + +#[cfg(feature = "quickcheck")] +impl quickcheck::Arbitrary for BigUint { + fn arbitrary(g: &mut quickcheck::Gen) -> Self { + // Use arbitrary from Vec + biguint_from_vec(Vec::::arbitrary(g)) + } + + fn shrink(&self) -> Box> { + // Use shrinker from Vec + Box::new(self.data.shrink().map(biguint_from_vec)) + } +} + +#[cfg(feature = "arbitrary")] +impl arbitrary::Arbitrary<'_> for BigUint { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + Ok(biguint_from_vec(Vec::::arbitrary(u)?)) + } + + fn arbitrary_take_rest(u: arbitrary::Unstructured<'_>) -> arbitrary::Result { + Ok(biguint_from_vec(Vec::::arbitrary_take_rest(u)?)) + } + + fn size_hint(depth: usize) -> (usize, Option) { + Vec::::size_hint(depth) + } +} diff --git a/src/rust/vendor/num-bigint/src/biguint/bits.rs b/src/rust/vendor/num-bigint/src/biguint/bits.rs new file mode 100644 index 000000000..58c755a6d --- /dev/null +++ b/src/rust/vendor/num-bigint/src/biguint/bits.rs @@ -0,0 +1,93 @@ +use super::{BigUint, IntDigits}; + +use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign}; + +forward_val_val_binop!(impl BitAnd for BigUint, bitand); +forward_ref_val_binop!(impl BitAnd for BigUint, bitand); + +// do not use forward_ref_ref_binop_commutative! for bitand so that we can +// clone the smaller value rather than the larger, avoiding over-allocation +impl<'a, 'b> BitAnd<&'b BigUint> for &'a BigUint { + type Output = BigUint; + + #[inline] + fn bitand(self, other: &BigUint) -> BigUint { + // forward to val-ref, choosing the smaller to clone + if self.data.len() <= other.data.len() { + self.clone() & other + } else { + other.clone() & self + } + } +} + +forward_val_assign!(impl BitAndAssign for BigUint, bitand_assign); + +impl<'a> BitAnd<&'a BigUint> for BigUint { + type Output = BigUint; + + #[inline] + fn bitand(mut self, other: &BigUint) -> BigUint { + self &= other; + self + } +} +impl<'a> BitAndAssign<&'a BigUint> for BigUint { + #[inline] + fn bitand_assign(&mut self, other: &BigUint) { + for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) { + *ai &= bi; + } + self.data.truncate(other.data.len()); + self.normalize(); + } +} + +forward_all_binop_to_val_ref_commutative!(impl BitOr for BigUint, bitor); +forward_val_assign!(impl BitOrAssign for BigUint, bitor_assign); + +impl<'a> BitOr<&'a BigUint> for BigUint { + type Output = BigUint; + + fn bitor(mut self, other: &BigUint) -> BigUint { + self |= other; + self + } +} +impl<'a> BitOrAssign<&'a BigUint> for BigUint { + #[inline] + fn bitor_assign(&mut self, other: &BigUint) { + for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) { + *ai |= bi; + } + if other.data.len() > self.data.len() { + let extra = &other.data[self.data.len()..]; + self.data.extend(extra.iter().cloned()); + } + } +} + +forward_all_binop_to_val_ref_commutative!(impl BitXor for BigUint, bitxor); +forward_val_assign!(impl BitXorAssign for BigUint, bitxor_assign); + +impl<'a> BitXor<&'a BigUint> for BigUint { + type Output = BigUint; + + fn bitxor(mut self, other: &BigUint) -> BigUint { + self ^= other; + self + } +} +impl<'a> BitXorAssign<&'a BigUint> for BigUint { + #[inline] + fn bitxor_assign(&mut self, other: &BigUint) { + for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) { + *ai ^= bi; + } + if other.data.len() > self.data.len() { + let extra = &other.data[self.data.len()..]; + self.data.extend(extra.iter().cloned()); + } + self.normalize(); + } +} diff --git a/src/rust/vendor/num-bigint/src/biguint/convert.rs b/src/rust/vendor/num-bigint/src/biguint/convert.rs new file mode 100644 index 000000000..5cf05cb6b --- /dev/null +++ b/src/rust/vendor/num-bigint/src/biguint/convert.rs @@ -0,0 +1,784 @@ +use super::{biguint_from_vec, BigUint, ToBigUint}; + +use super::addition::add2; +use super::division::div_rem_digit; +use super::multiplication::mac_with_carry; + +use crate::big_digit::{self, BigDigit}; +use crate::std_alloc::Vec; +use crate::ParseBigIntError; +#[cfg(has_try_from)] +use crate::TryFromBigIntError; + +use core::cmp::Ordering::{Equal, Greater, Less}; +#[cfg(has_try_from)] +use core::convert::TryFrom; +use core::mem; +use core::str::FromStr; +use num_integer::{Integer, Roots}; +use num_traits::float::FloatCore; +use num_traits::{FromPrimitive, Num, PrimInt, ToPrimitive, Zero}; + +/// Find last set bit +/// fls(0) == 0, fls(u32::MAX) == 32 +fn fls(v: T) -> u8 { + mem::size_of::() as u8 * 8 - v.leading_zeros() as u8 +} + +fn ilog2(v: T) -> u8 { + fls(v) - 1 +} + +impl FromStr for BigUint { + type Err = ParseBigIntError; + + #[inline] + fn from_str(s: &str) -> Result { + BigUint::from_str_radix(s, 10) + } +} + +// Convert from a power of two radix (bits == ilog2(radix)) where bits evenly divides +// BigDigit::BITS +pub(super) fn from_bitwise_digits_le(v: &[u8], bits: u8) -> BigUint { + debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits == 0); + debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits))); + + let digits_per_big_digit = big_digit::BITS / bits; + + let data = v + .chunks(digits_per_big_digit.into()) + .map(|chunk| { + chunk + .iter() + .rev() + .fold(0, |acc, &c| (acc << bits) | BigDigit::from(c)) + }) + .collect(); + + biguint_from_vec(data) +} + +// Convert from a power of two radix (bits == ilog2(radix)) where bits doesn't evenly divide +// BigDigit::BITS +fn from_inexact_bitwise_digits_le(v: &[u8], bits: u8) -> BigUint { + debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits != 0); + debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits))); + + let total_bits = (v.len() as u64).saturating_mul(bits.into()); + let big_digits = Integer::div_ceil(&total_bits, &big_digit::BITS.into()) + .to_usize() + .unwrap_or(core::usize::MAX); + let mut data = Vec::with_capacity(big_digits); + + let mut d = 0; + let mut dbits = 0; // number of bits we currently have in d + + // walk v accumululating bits in d; whenever we accumulate big_digit::BITS in d, spit out a + // big_digit: + for &c in v { + d |= BigDigit::from(c) << dbits; + dbits += bits; + + if dbits >= big_digit::BITS { + data.push(d); + dbits -= big_digit::BITS; + // if dbits was > big_digit::BITS, we dropped some of the bits in c (they couldn't fit + // in d) - grab the bits we lost here: + d = BigDigit::from(c) >> (bits - dbits); + } + } + + if dbits > 0 { + debug_assert!(dbits < big_digit::BITS); + data.push(d as BigDigit); + } + + biguint_from_vec(data) +} + +// Read little-endian radix digits +fn from_radix_digits_be(v: &[u8], radix: u32) -> BigUint { + debug_assert!(!v.is_empty() && !radix.is_power_of_two()); + debug_assert!(v.iter().all(|&c| u32::from(c) < radix)); + + #[cfg(feature = "std")] + let radix_log2 = f64::from(radix).log2(); + #[cfg(not(feature = "std"))] + let radix_log2 = ilog2(radix.next_power_of_two()) as f64; + + // Estimate how big the result will be, so we can pre-allocate it. + let bits = radix_log2 * v.len() as f64; + let big_digits = (bits / big_digit::BITS as f64).ceil(); + let mut data = Vec::with_capacity(big_digits.to_usize().unwrap_or(0)); + + let (base, power) = get_radix_base(radix, big_digit::BITS); + let radix = radix as BigDigit; + + let r = v.len() % power; + let i = if r == 0 { power } else { r }; + let (head, tail) = v.split_at(i); + + let first = head + .iter() + .fold(0, |acc, &d| acc * radix + BigDigit::from(d)); + data.push(first); + + debug_assert!(tail.len() % power == 0); + for chunk in tail.chunks(power) { + if data.last() != Some(&0) { + data.push(0); + } + + let mut carry = 0; + for d in data.iter_mut() { + *d = mac_with_carry(0, *d, base, &mut carry); + } + debug_assert!(carry == 0); + + let n = chunk + .iter() + .fold(0, |acc, &d| acc * radix + BigDigit::from(d)); + add2(&mut data, &[n]); + } + + biguint_from_vec(data) +} + +pub(super) fn from_radix_be(buf: &[u8], radix: u32) -> Option { + assert!( + 2 <= radix && radix <= 256, + "The radix must be within 2...256" + ); + + if buf.is_empty() { + return Some(Zero::zero()); + } + + if radix != 256 && buf.iter().any(|&b| b >= radix as u8) { + return None; + } + + let res = if radix.is_power_of_two() { + // Powers of two can use bitwise masks and shifting instead of multiplication + let bits = ilog2(radix); + let mut v = Vec::from(buf); + v.reverse(); + if big_digit::BITS % bits == 0 { + from_bitwise_digits_le(&v, bits) + } else { + from_inexact_bitwise_digits_le(&v, bits) + } + } else { + from_radix_digits_be(buf, radix) + }; + + Some(res) +} + +pub(super) fn from_radix_le(buf: &[u8], radix: u32) -> Option { + assert!( + 2 <= radix && radix <= 256, + "The radix must be within 2...256" + ); + + if buf.is_empty() { + return Some(Zero::zero()); + } + + if radix != 256 && buf.iter().any(|&b| b >= radix as u8) { + return None; + } + + let res = if radix.is_power_of_two() { + // Powers of two can use bitwise masks and shifting instead of multiplication + let bits = ilog2(radix); + if big_digit::BITS % bits == 0 { + from_bitwise_digits_le(buf, bits) + } else { + from_inexact_bitwise_digits_le(buf, bits) + } + } else { + let mut v = Vec::from(buf); + v.reverse(); + from_radix_digits_be(&v, radix) + }; + + Some(res) +} + +impl Num for BigUint { + type FromStrRadixErr = ParseBigIntError; + + /// Creates and initializes a `BigUint`. + fn from_str_radix(s: &str, radix: u32) -> Result { + assert!(2 <= radix && radix <= 36, "The radix must be within 2...36"); + let mut s = s; + if s.starts_with('+') { + let tail = &s[1..]; + if !tail.starts_with('+') { + s = tail + } + } + + if s.is_empty() { + return Err(ParseBigIntError::empty()); + } + + if s.starts_with('_') { + // Must lead with a real digit! + return Err(ParseBigIntError::invalid()); + } + + // First normalize all characters to plain digit values + let mut v = Vec::with_capacity(s.len()); + for b in s.bytes() { + let d = match b { + b'0'..=b'9' => b - b'0', + b'a'..=b'z' => b - b'a' + 10, + b'A'..=b'Z' => b - b'A' + 10, + b'_' => continue, + _ => core::u8::MAX, + }; + if d < radix as u8 { + v.push(d); + } else { + return Err(ParseBigIntError::invalid()); + } + } + + let res = if radix.is_power_of_two() { + // Powers of two can use bitwise masks and shifting instead of multiplication + let bits = ilog2(radix); + v.reverse(); + if big_digit::BITS % bits == 0 { + from_bitwise_digits_le(&v, bits) + } else { + from_inexact_bitwise_digits_le(&v, bits) + } + } else { + from_radix_digits_be(&v, radix) + }; + Ok(res) + } +} + +fn high_bits_to_u64(v: &BigUint) -> u64 { + match v.data.len() { + 0 => 0, + 1 => { + // XXX Conversion is useless if already 64-bit. + #[allow(clippy::useless_conversion)] + let v0 = u64::from(v.data[0]); + v0 + } + _ => { + let mut bits = v.bits(); + let mut ret = 0u64; + let mut ret_bits = 0; + + for d in v.data.iter().rev() { + let digit_bits = (bits - 1) % u64::from(big_digit::BITS) + 1; + let bits_want = Ord::min(64 - ret_bits, digit_bits); + + if bits_want != 64 { + ret <<= bits_want; + } + // XXX Conversion is useless if already 64-bit. + #[allow(clippy::useless_conversion)] + let d0 = u64::from(*d) >> (digit_bits - bits_want); + ret |= d0; + ret_bits += bits_want; + bits -= bits_want; + + if ret_bits == 64 { + break; + } + } + + ret + } + } +} + +impl ToPrimitive for BigUint { + #[inline] + fn to_i64(&self) -> Option { + self.to_u64().as_ref().and_then(u64::to_i64) + } + + #[inline] + fn to_i128(&self) -> Option { + self.to_u128().as_ref().and_then(u128::to_i128) + } + + #[allow(clippy::useless_conversion)] + #[inline] + fn to_u64(&self) -> Option { + let mut ret: u64 = 0; + let mut bits = 0; + + for i in self.data.iter() { + if bits >= 64 { + return None; + } + + // XXX Conversion is useless if already 64-bit. + ret += u64::from(*i) << bits; + bits += big_digit::BITS; + } + + Some(ret) + } + + #[inline] + fn to_u128(&self) -> Option { + let mut ret: u128 = 0; + let mut bits = 0; + + for i in self.data.iter() { + if bits >= 128 { + return None; + } + + ret |= u128::from(*i) << bits; + bits += big_digit::BITS; + } + + Some(ret) + } + + #[inline] + fn to_f32(&self) -> Option { + let mantissa = high_bits_to_u64(self); + let exponent = self.bits() - u64::from(fls(mantissa)); + + if exponent > core::f32::MAX_EXP as u64 { + Some(core::f32::INFINITY) + } else { + Some((mantissa as f32) * 2.0f32.powi(exponent as i32)) + } + } + + #[inline] + fn to_f64(&self) -> Option { + let mantissa = high_bits_to_u64(self); + let exponent = self.bits() - u64::from(fls(mantissa)); + + if exponent > core::f64::MAX_EXP as u64 { + Some(core::f64::INFINITY) + } else { + Some((mantissa as f64) * 2.0f64.powi(exponent as i32)) + } + } +} + +macro_rules! impl_try_from_biguint { + ($T:ty, $to_ty:path) => { + #[cfg(has_try_from)] + impl TryFrom<&BigUint> for $T { + type Error = TryFromBigIntError<()>; + + #[inline] + fn try_from(value: &BigUint) -> Result<$T, TryFromBigIntError<()>> { + $to_ty(value).ok_or(TryFromBigIntError::new(())) + } + } + + #[cfg(has_try_from)] + impl TryFrom for $T { + type Error = TryFromBigIntError; + + #[inline] + fn try_from(value: BigUint) -> Result<$T, TryFromBigIntError> { + <$T>::try_from(&value).map_err(|_| TryFromBigIntError::new(value)) + } + } + }; +} + +impl_try_from_biguint!(u8, ToPrimitive::to_u8); +impl_try_from_biguint!(u16, ToPrimitive::to_u16); +impl_try_from_biguint!(u32, ToPrimitive::to_u32); +impl_try_from_biguint!(u64, ToPrimitive::to_u64); +impl_try_from_biguint!(usize, ToPrimitive::to_usize); +impl_try_from_biguint!(u128, ToPrimitive::to_u128); + +impl_try_from_biguint!(i8, ToPrimitive::to_i8); +impl_try_from_biguint!(i16, ToPrimitive::to_i16); +impl_try_from_biguint!(i32, ToPrimitive::to_i32); +impl_try_from_biguint!(i64, ToPrimitive::to_i64); +impl_try_from_biguint!(isize, ToPrimitive::to_isize); +impl_try_from_biguint!(i128, ToPrimitive::to_i128); + +impl FromPrimitive for BigUint { + #[inline] + fn from_i64(n: i64) -> Option { + if n >= 0 { + Some(BigUint::from(n as u64)) + } else { + None + } + } + + #[inline] + fn from_i128(n: i128) -> Option { + if n >= 0 { + Some(BigUint::from(n as u128)) + } else { + None + } + } + + #[inline] + fn from_u64(n: u64) -> Option { + Some(BigUint::from(n)) + } + + #[inline] + fn from_u128(n: u128) -> Option { + Some(BigUint::from(n)) + } + + #[inline] + fn from_f64(mut n: f64) -> Option { + // handle NAN, INFINITY, NEG_INFINITY + if !n.is_finite() { + return None; + } + + // match the rounding of casting from float to int + n = n.trunc(); + + // handle 0.x, -0.x + if n.is_zero() { + return Some(BigUint::zero()); + } + + let (mantissa, exponent, sign) = FloatCore::integer_decode(n); + + if sign == -1 { + return None; + } + + let mut ret = BigUint::from(mantissa); + match exponent.cmp(&0) { + Greater => ret <<= exponent as usize, + Equal => {} + Less => ret >>= (-exponent) as usize, + } + Some(ret) + } +} + +impl From for BigUint { + #[inline] + fn from(mut n: u64) -> Self { + let mut ret: BigUint = Zero::zero(); + + while n != 0 { + ret.data.push(n as BigDigit); + // don't overflow if BITS is 64: + n = (n >> 1) >> (big_digit::BITS - 1); + } + + ret + } +} + +impl From for BigUint { + #[inline] + fn from(mut n: u128) -> Self { + let mut ret: BigUint = Zero::zero(); + + while n != 0 { + ret.data.push(n as BigDigit); + n >>= big_digit::BITS; + } + + ret + } +} + +macro_rules! impl_biguint_from_uint { + ($T:ty) => { + impl From<$T> for BigUint { + #[inline] + fn from(n: $T) -> Self { + BigUint::from(n as u64) + } + } + }; +} + +impl_biguint_from_uint!(u8); +impl_biguint_from_uint!(u16); +impl_biguint_from_uint!(u32); +impl_biguint_from_uint!(usize); + +macro_rules! impl_biguint_try_from_int { + ($T:ty, $from_ty:path) => { + #[cfg(has_try_from)] + impl TryFrom<$T> for BigUint { + type Error = TryFromBigIntError<()>; + + #[inline] + fn try_from(value: $T) -> Result> { + $from_ty(value).ok_or(TryFromBigIntError::new(())) + } + } + }; +} + +impl_biguint_try_from_int!(i8, FromPrimitive::from_i8); +impl_biguint_try_from_int!(i16, FromPrimitive::from_i16); +impl_biguint_try_from_int!(i32, FromPrimitive::from_i32); +impl_biguint_try_from_int!(i64, FromPrimitive::from_i64); +impl_biguint_try_from_int!(isize, FromPrimitive::from_isize); +impl_biguint_try_from_int!(i128, FromPrimitive::from_i128); + +impl ToBigUint for BigUint { + #[inline] + fn to_biguint(&self) -> Option { + Some(self.clone()) + } +} + +macro_rules! impl_to_biguint { + ($T:ty, $from_ty:path) => { + impl ToBigUint for $T { + #[inline] + fn to_biguint(&self) -> Option { + $from_ty(*self) + } + } + }; +} + +impl_to_biguint!(isize, FromPrimitive::from_isize); +impl_to_biguint!(i8, FromPrimitive::from_i8); +impl_to_biguint!(i16, FromPrimitive::from_i16); +impl_to_biguint!(i32, FromPrimitive::from_i32); +impl_to_biguint!(i64, FromPrimitive::from_i64); +impl_to_biguint!(i128, FromPrimitive::from_i128); + +impl_to_biguint!(usize, FromPrimitive::from_usize); +impl_to_biguint!(u8, FromPrimitive::from_u8); +impl_to_biguint!(u16, FromPrimitive::from_u16); +impl_to_biguint!(u32, FromPrimitive::from_u32); +impl_to_biguint!(u64, FromPrimitive::from_u64); +impl_to_biguint!(u128, FromPrimitive::from_u128); + +impl_to_biguint!(f32, FromPrimitive::from_f32); +impl_to_biguint!(f64, FromPrimitive::from_f64); + +// Extract bitwise digits that evenly divide BigDigit +pub(super) fn to_bitwise_digits_le(u: &BigUint, bits: u8) -> Vec { + debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits == 0); + + let last_i = u.data.len() - 1; + let mask: BigDigit = (1 << bits) - 1; + let digits_per_big_digit = big_digit::BITS / bits; + let digits = Integer::div_ceil(&u.bits(), &u64::from(bits)) + .to_usize() + .unwrap_or(core::usize::MAX); + let mut res = Vec::with_capacity(digits); + + for mut r in u.data[..last_i].iter().cloned() { + for _ in 0..digits_per_big_digit { + res.push((r & mask) as u8); + r >>= bits; + } + } + + let mut r = u.data[last_i]; + while r != 0 { + res.push((r & mask) as u8); + r >>= bits; + } + + res +} + +// Extract bitwise digits that don't evenly divide BigDigit +fn to_inexact_bitwise_digits_le(u: &BigUint, bits: u8) -> Vec { + debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits != 0); + + let mask: BigDigit = (1 << bits) - 1; + let digits = Integer::div_ceil(&u.bits(), &u64::from(bits)) + .to_usize() + .unwrap_or(core::usize::MAX); + let mut res = Vec::with_capacity(digits); + + let mut r = 0; + let mut rbits = 0; + + for c in &u.data { + r |= *c << rbits; + rbits += big_digit::BITS; + + while rbits >= bits { + res.push((r & mask) as u8); + r >>= bits; + + // r had more bits than it could fit - grab the bits we lost + if rbits > big_digit::BITS { + r = *c >> (big_digit::BITS - (rbits - bits)); + } + + rbits -= bits; + } + } + + if rbits != 0 { + res.push(r as u8); + } + + while let Some(&0) = res.last() { + res.pop(); + } + + res +} + +// Extract little-endian radix digits +#[inline(always)] // forced inline to get const-prop for radix=10 +pub(super) fn to_radix_digits_le(u: &BigUint, radix: u32) -> Vec { + debug_assert!(!u.is_zero() && !radix.is_power_of_two()); + + #[cfg(feature = "std")] + let radix_log2 = f64::from(radix).log2(); + #[cfg(not(feature = "std"))] + let radix_log2 = ilog2(radix) as f64; + + // Estimate how big the result will be, so we can pre-allocate it. + let radix_digits = ((u.bits() as f64) / radix_log2).ceil(); + let mut res = Vec::with_capacity(radix_digits.to_usize().unwrap_or(0)); + + let mut digits = u.clone(); + + let (base, power) = get_radix_base(radix, big_digit::HALF_BITS); + let radix = radix as BigDigit; + + // For very large numbers, the O(n²) loop of repeated `div_rem_digit` dominates the + // performance. We can mitigate this by dividing into chunks of a larger base first. + // The threshold for this was chosen by anecdotal performance measurements to + // approximate where this starts to make a noticeable difference. + if digits.data.len() >= 64 { + let mut big_base = BigUint::from(base * base); + let mut big_power = 2usize; + + // Choose a target base length near √n. + let target_len = digits.data.len().sqrt(); + while big_base.data.len() < target_len { + big_base = &big_base * &big_base; + big_power *= 2; + } + + // This outer loop will run approximately √n times. + while digits > big_base { + // This is still the dominating factor, with n digits divided by √n digits. + let (q, mut big_r) = digits.div_rem(&big_base); + digits = q; + + // This inner loop now has O(√n²)=O(n) behavior altogether. + for _ in 0..big_power { + let (q, mut r) = div_rem_digit(big_r, base); + big_r = q; + for _ in 0..power { + res.push((r % radix) as u8); + r /= radix; + } + } + } + } + + while digits.data.len() > 1 { + let (q, mut r) = div_rem_digit(digits, base); + for _ in 0..power { + res.push((r % radix) as u8); + r /= radix; + } + digits = q; + } + + let mut r = digits.data[0]; + while r != 0 { + res.push((r % radix) as u8); + r /= radix; + } + + res +} + +pub(super) fn to_radix_le(u: &BigUint, radix: u32) -> Vec { + if u.is_zero() { + vec![0] + } else if radix.is_power_of_two() { + // Powers of two can use bitwise masks and shifting instead of division + let bits = ilog2(radix); + if big_digit::BITS % bits == 0 { + to_bitwise_digits_le(u, bits) + } else { + to_inexact_bitwise_digits_le(u, bits) + } + } else if radix == 10 { + // 10 is so common that it's worth separating out for const-propagation. + // Optimizers can often turn constant division into a faster multiplication. + to_radix_digits_le(u, 10) + } else { + to_radix_digits_le(u, radix) + } +} + +pub(crate) fn to_str_radix_reversed(u: &BigUint, radix: u32) -> Vec { + assert!(2 <= radix && radix <= 36, "The radix must be within 2...36"); + + if u.is_zero() { + return vec![b'0']; + } + + let mut res = to_radix_le(u, radix); + + // Now convert everything to ASCII digits. + for r in &mut res { + debug_assert!(u32::from(*r) < radix); + if *r < 10 { + *r += b'0'; + } else { + *r += b'a' - 10; + } + } + res +} + +/// Returns the greatest power of the radix for the given bit size +#[inline] +fn get_radix_base(radix: u32, bits: u8) -> (BigDigit, usize) { + mod gen { + include! { concat!(env!("OUT_DIR"), "/radix_bases.rs") } + } + + debug_assert!( + 2 <= radix && radix <= 256, + "The radix must be within 2...256" + ); + debug_assert!(!radix.is_power_of_two()); + debug_assert!(bits <= big_digit::BITS); + + match bits { + 16 => { + let (base, power) = gen::BASES_16[radix as usize]; + (base as BigDigit, power) + } + 32 => { + let (base, power) = gen::BASES_32[radix as usize]; + (base as BigDigit, power) + } + 64 => { + let (base, power) = gen::BASES_64[radix as usize]; + (base as BigDigit, power) + } + _ => panic!("Invalid bigdigit size"), + } +} diff --git a/src/rust/vendor/num-bigint/src/biguint/division.rs b/src/rust/vendor/num-bigint/src/biguint/division.rs new file mode 100644 index 000000000..b5d4259cc --- /dev/null +++ b/src/rust/vendor/num-bigint/src/biguint/division.rs @@ -0,0 +1,620 @@ +use super::addition::__add2; +#[cfg(not(u64_digit))] +use super::u32_to_u128; +use super::{cmp_slice, BigUint}; + +use crate::big_digit::{self, BigDigit, DoubleBigDigit}; +use crate::UsizePromotion; + +use core::cmp::Ordering::{Equal, Greater, Less}; +use core::mem; +use core::ops::{Div, DivAssign, Rem, RemAssign}; +use num_integer::Integer; +use num_traits::{CheckedDiv, One, ToPrimitive, Zero}; + +/// Divide a two digit numerator by a one digit divisor, returns quotient and remainder: +/// +/// Note: the caller must ensure that both the quotient and remainder will fit into a single digit. +/// This is _not_ true for an arbitrary numerator/denominator. +/// +/// (This function also matches what the x86 divide instruction does). +#[inline] +fn div_wide(hi: BigDigit, lo: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) { + debug_assert!(hi < divisor); + + let lhs = big_digit::to_doublebigdigit(hi, lo); + let rhs = DoubleBigDigit::from(divisor); + ((lhs / rhs) as BigDigit, (lhs % rhs) as BigDigit) +} + +/// For small divisors, we can divide without promoting to `DoubleBigDigit` by +/// using half-size pieces of digit, like long-division. +#[inline] +fn div_half(rem: BigDigit, digit: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) { + use crate::big_digit::{HALF, HALF_BITS}; + + debug_assert!(rem < divisor && divisor <= HALF); + let (hi, rem) = ((rem << HALF_BITS) | (digit >> HALF_BITS)).div_rem(&divisor); + let (lo, rem) = ((rem << HALF_BITS) | (digit & HALF)).div_rem(&divisor); + ((hi << HALF_BITS) | lo, rem) +} + +#[inline] +pub(super) fn div_rem_digit(mut a: BigUint, b: BigDigit) -> (BigUint, BigDigit) { + if b == 0 { + panic!("attempt to divide by zero") + } + + let mut rem = 0; + + if b <= big_digit::HALF { + for d in a.data.iter_mut().rev() { + let (q, r) = div_half(rem, *d, b); + *d = q; + rem = r; + } + } else { + for d in a.data.iter_mut().rev() { + let (q, r) = div_wide(rem, *d, b); + *d = q; + rem = r; + } + } + + (a.normalized(), rem) +} + +#[inline] +fn rem_digit(a: &BigUint, b: BigDigit) -> BigDigit { + if b == 0 { + panic!("attempt to divide by zero") + } + + let mut rem = 0; + + if b <= big_digit::HALF { + for &digit in a.data.iter().rev() { + let (_, r) = div_half(rem, digit, b); + rem = r; + } + } else { + for &digit in a.data.iter().rev() { + let (_, r) = div_wide(rem, digit, b); + rem = r; + } + } + + rem +} + +/// Subtract a multiple. +/// a -= b * c +/// Returns a borrow (if a < b then borrow > 0). +fn sub_mul_digit_same_len(a: &mut [BigDigit], b: &[BigDigit], c: BigDigit) -> BigDigit { + debug_assert!(a.len() == b.len()); + + // carry is between -big_digit::MAX and 0, so to avoid overflow we store + // offset_carry = carry + big_digit::MAX + let mut offset_carry = big_digit::MAX; + + for (x, y) in a.iter_mut().zip(b) { + // We want to calculate sum = x - y * c + carry. + // sum >= -(big_digit::MAX * big_digit::MAX) - big_digit::MAX + // sum <= big_digit::MAX + // Offsetting sum by (big_digit::MAX << big_digit::BITS) puts it in DoubleBigDigit range. + let offset_sum = big_digit::to_doublebigdigit(big_digit::MAX, *x) + - big_digit::MAX as DoubleBigDigit + + offset_carry as DoubleBigDigit + - *y as DoubleBigDigit * c as DoubleBigDigit; + + let (new_offset_carry, new_x) = big_digit::from_doublebigdigit(offset_sum); + offset_carry = new_offset_carry; + *x = new_x; + } + + // Return the borrow. + big_digit::MAX - offset_carry +} + +fn div_rem(mut u: BigUint, mut d: BigUint) -> (BigUint, BigUint) { + if d.is_zero() { + panic!("attempt to divide by zero") + } + if u.is_zero() { + return (Zero::zero(), Zero::zero()); + } + + if d.data.len() == 1 { + if d.data == [1] { + return (u, Zero::zero()); + } + let (div, rem) = div_rem_digit(u, d.data[0]); + // reuse d + d.data.clear(); + d += rem; + return (div, d); + } + + // Required or the q_len calculation below can underflow: + match u.cmp(&d) { + Less => return (Zero::zero(), u), + Equal => { + u.set_one(); + return (u, Zero::zero()); + } + Greater => {} // Do nothing + } + + // This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D: + // + // First, normalize the arguments so the highest bit in the highest digit of the divisor is + // set: the main loop uses the highest digit of the divisor for generating guesses, so we + // want it to be the largest number we can efficiently divide by. + // + let shift = d.data.last().unwrap().leading_zeros() as usize; + + if shift == 0 { + // no need to clone d + div_rem_core(u, &d.data) + } else { + let (q, r) = div_rem_core(u << shift, &(d << shift).data); + // renormalize the remainder + (q, r >> shift) + } +} + +pub(super) fn div_rem_ref(u: &BigUint, d: &BigUint) -> (BigUint, BigUint) { + if d.is_zero() { + panic!("attempt to divide by zero") + } + if u.is_zero() { + return (Zero::zero(), Zero::zero()); + } + + if d.data.len() == 1 { + if d.data == [1] { + return (u.clone(), Zero::zero()); + } + + let (div, rem) = div_rem_digit(u.clone(), d.data[0]); + return (div, rem.into()); + } + + // Required or the q_len calculation below can underflow: + match u.cmp(d) { + Less => return (Zero::zero(), u.clone()), + Equal => return (One::one(), Zero::zero()), + Greater => {} // Do nothing + } + + // This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D: + // + // First, normalize the arguments so the highest bit in the highest digit of the divisor is + // set: the main loop uses the highest digit of the divisor for generating guesses, so we + // want it to be the largest number we can efficiently divide by. + // + let shift = d.data.last().unwrap().leading_zeros() as usize; + + if shift == 0 { + // no need to clone d + div_rem_core(u.clone(), &d.data) + } else { + let (q, r) = div_rem_core(u << shift, &(d << shift).data); + // renormalize the remainder + (q, r >> shift) + } +} + +/// An implementation of the base division algorithm. +/// Knuth, TAOCP vol 2 section 4.3.1, algorithm D, with an improvement from exercises 19-21. +fn div_rem_core(mut a: BigUint, b: &[BigDigit]) -> (BigUint, BigUint) { + debug_assert!(a.data.len() >= b.len() && b.len() > 1); + debug_assert!(b.last().unwrap().leading_zeros() == 0); + + // The algorithm works by incrementally calculating "guesses", q0, for the next digit of the + // quotient. Once we have any number q0 such that (q0 << j) * b <= a, we can set + // + // q += q0 << j + // a -= (q0 << j) * b + // + // and then iterate until a < b. Then, (q, a) will be our desired quotient and remainder. + // + // q0, our guess, is calculated by dividing the last three digits of a by the last two digits of + // b - this will give us a guess that is close to the actual quotient, but is possibly greater. + // It can only be greater by 1 and only in rare cases, with probability at most + // 2^-(big_digit::BITS-1) for random a, see TAOCP 4.3.1 exercise 21. + // + // If the quotient turns out to be too large, we adjust it by 1: + // q -= 1 << j + // a += b << j + + // a0 stores an additional extra most significant digit of the dividend, not stored in a. + let mut a0 = 0; + + // [b1, b0] are the two most significant digits of the divisor. They never change. + let b0 = *b.last().unwrap(); + let b1 = b[b.len() - 2]; + + let q_len = a.data.len() - b.len() + 1; + let mut q = BigUint { + data: vec![0; q_len], + }; + + for j in (0..q_len).rev() { + debug_assert!(a.data.len() == b.len() + j); + + let a1 = *a.data.last().unwrap(); + let a2 = a.data[a.data.len() - 2]; + + // The first q0 estimate is [a1,a0] / b0. It will never be too small, it may be too large + // by at most 2. + let (mut q0, mut r) = if a0 < b0 { + let (q0, r) = div_wide(a0, a1, b0); + (q0, r as DoubleBigDigit) + } else { + debug_assert!(a0 == b0); + // Avoid overflowing q0, we know the quotient fits in BigDigit. + // [a1,a0] = b0 * (1< a0 { + // q0 is too large. We need to add back one multiple of b. + q0 -= 1; + borrow -= __add2(&mut a.data[j..], b); + } + // The top digit of a, stored in a0, has now been zeroed. + debug_assert!(borrow == a0); + + q.data[j] = q0; + + // Pop off the next top digit of a. + a0 = a.data.pop().unwrap(); + } + + a.data.push(a0); + a.normalize(); + + debug_assert_eq!(cmp_slice(&a.data, b), Less); + + (q.normalized(), a) +} + +forward_val_ref_binop!(impl Div for BigUint, div); +forward_ref_val_binop!(impl Div for BigUint, div); +forward_val_assign!(impl DivAssign for BigUint, div_assign); + +impl Div for BigUint { + type Output = BigUint; + + #[inline] + fn div(self, other: BigUint) -> BigUint { + let (q, _) = div_rem(self, other); + q + } +} + +impl<'a, 'b> Div<&'b BigUint> for &'a BigUint { + type Output = BigUint; + + #[inline] + fn div(self, other: &BigUint) -> BigUint { + let (q, _) = self.div_rem(other); + q + } +} +impl<'a> DivAssign<&'a BigUint> for BigUint { + #[inline] + fn div_assign(&mut self, other: &'a BigUint) { + *self = &*self / other; + } +} + +promote_unsigned_scalars!(impl Div for BigUint, div); +promote_unsigned_scalars_assign!(impl DivAssign for BigUint, div_assign); +forward_all_scalar_binop_to_val_val!(impl Div for BigUint, div); +forward_all_scalar_binop_to_val_val!(impl Div for BigUint, div); +forward_all_scalar_binop_to_val_val!(impl Div for BigUint, div); + +impl Div for BigUint { + type Output = BigUint; + + #[inline] + fn div(self, other: u32) -> BigUint { + let (q, _) = div_rem_digit(self, other as BigDigit); + q + } +} +impl DivAssign for BigUint { + #[inline] + fn div_assign(&mut self, other: u32) { + *self = &*self / other; + } +} + +impl Div for u32 { + type Output = BigUint; + + #[inline] + fn div(self, other: BigUint) -> BigUint { + match other.data.len() { + 0 => panic!("attempt to divide by zero"), + 1 => From::from(self as BigDigit / other.data[0]), + _ => Zero::zero(), + } + } +} + +impl Div for BigUint { + type Output = BigUint; + + #[inline] + fn div(self, other: u64) -> BigUint { + let (q, _) = div_rem(self, From::from(other)); + q + } +} +impl DivAssign for BigUint { + #[inline] + fn div_assign(&mut self, other: u64) { + // a vec of size 0 does not allocate, so this is fairly cheap + let temp = mem::replace(self, Zero::zero()); + *self = temp / other; + } +} + +impl Div for u64 { + type Output = BigUint; + + #[cfg(not(u64_digit))] + #[inline] + fn div(self, other: BigUint) -> BigUint { + match other.data.len() { + 0 => panic!("attempt to divide by zero"), + 1 => From::from(self / u64::from(other.data[0])), + 2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])), + _ => Zero::zero(), + } + } + + #[cfg(u64_digit)] + #[inline] + fn div(self, other: BigUint) -> BigUint { + match other.data.len() { + 0 => panic!("attempt to divide by zero"), + 1 => From::from(self / other.data[0]), + _ => Zero::zero(), + } + } +} + +impl Div for BigUint { + type Output = BigUint; + + #[inline] + fn div(self, other: u128) -> BigUint { + let (q, _) = div_rem(self, From::from(other)); + q + } +} + +impl DivAssign for BigUint { + #[inline] + fn div_assign(&mut self, other: u128) { + *self = &*self / other; + } +} + +impl Div for u128 { + type Output = BigUint; + + #[cfg(not(u64_digit))] + #[inline] + fn div(self, other: BigUint) -> BigUint { + match other.data.len() { + 0 => panic!("attempt to divide by zero"), + 1 => From::from(self / u128::from(other.data[0])), + 2 => From::from( + self / u128::from(big_digit::to_doublebigdigit(other.data[1], other.data[0])), + ), + 3 => From::from(self / u32_to_u128(0, other.data[2], other.data[1], other.data[0])), + 4 => From::from( + self / u32_to_u128(other.data[3], other.data[2], other.data[1], other.data[0]), + ), + _ => Zero::zero(), + } + } + + #[cfg(u64_digit)] + #[inline] + fn div(self, other: BigUint) -> BigUint { + match other.data.len() { + 0 => panic!("attempt to divide by zero"), + 1 => From::from(self / other.data[0] as u128), + 2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])), + _ => Zero::zero(), + } + } +} + +forward_val_ref_binop!(impl Rem for BigUint, rem); +forward_ref_val_binop!(impl Rem for BigUint, rem); +forward_val_assign!(impl RemAssign for BigUint, rem_assign); + +impl Rem for BigUint { + type Output = BigUint; + + #[inline] + fn rem(self, other: BigUint) -> BigUint { + if let Some(other) = other.to_u32() { + &self % other + } else { + let (_, r) = div_rem(self, other); + r + } + } +} + +impl<'a, 'b> Rem<&'b BigUint> for &'a BigUint { + type Output = BigUint; + + #[inline] + fn rem(self, other: &BigUint) -> BigUint { + if let Some(other) = other.to_u32() { + self % other + } else { + let (_, r) = self.div_rem(other); + r + } + } +} +impl<'a> RemAssign<&'a BigUint> for BigUint { + #[inline] + fn rem_assign(&mut self, other: &BigUint) { + *self = &*self % other; + } +} + +promote_unsigned_scalars!(impl Rem for BigUint, rem); +promote_unsigned_scalars_assign!(impl RemAssign for BigUint, rem_assign); +forward_all_scalar_binop_to_ref_val!(impl Rem for BigUint, rem); +forward_all_scalar_binop_to_val_val!(impl Rem for BigUint, rem); +forward_all_scalar_binop_to_val_val!(impl Rem for BigUint, rem); + +impl<'a> Rem for &'a BigUint { + type Output = BigUint; + + #[inline] + fn rem(self, other: u32) -> BigUint { + rem_digit(self, other as BigDigit).into() + } +} +impl RemAssign for BigUint { + #[inline] + fn rem_assign(&mut self, other: u32) { + *self = &*self % other; + } +} + +impl<'a> Rem<&'a BigUint> for u32 { + type Output = BigUint; + + #[inline] + fn rem(mut self, other: &'a BigUint) -> BigUint { + self %= other; + From::from(self) + } +} + +macro_rules! impl_rem_assign_scalar { + ($scalar:ty, $to_scalar:ident) => { + forward_val_assign_scalar!(impl RemAssign for BigUint, $scalar, rem_assign); + impl<'a> RemAssign<&'a BigUint> for $scalar { + #[inline] + fn rem_assign(&mut self, other: &BigUint) { + *self = match other.$to_scalar() { + None => *self, + Some(0) => panic!("attempt to divide by zero"), + Some(v) => *self % v + }; + } + } + } +} + +// we can scalar %= BigUint for any scalar, including signed types +impl_rem_assign_scalar!(u128, to_u128); +impl_rem_assign_scalar!(usize, to_usize); +impl_rem_assign_scalar!(u64, to_u64); +impl_rem_assign_scalar!(u32, to_u32); +impl_rem_assign_scalar!(u16, to_u16); +impl_rem_assign_scalar!(u8, to_u8); +impl_rem_assign_scalar!(i128, to_i128); +impl_rem_assign_scalar!(isize, to_isize); +impl_rem_assign_scalar!(i64, to_i64); +impl_rem_assign_scalar!(i32, to_i32); +impl_rem_assign_scalar!(i16, to_i16); +impl_rem_assign_scalar!(i8, to_i8); + +impl Rem for BigUint { + type Output = BigUint; + + #[inline] + fn rem(self, other: u64) -> BigUint { + let (_, r) = div_rem(self, From::from(other)); + r + } +} +impl RemAssign for BigUint { + #[inline] + fn rem_assign(&mut self, other: u64) { + *self = &*self % other; + } +} + +impl Rem for u64 { + type Output = BigUint; + + #[inline] + fn rem(mut self, other: BigUint) -> BigUint { + self %= other; + From::from(self) + } +} + +impl Rem for BigUint { + type Output = BigUint; + + #[inline] + fn rem(self, other: u128) -> BigUint { + let (_, r) = div_rem(self, From::from(other)); + r + } +} + +impl RemAssign for BigUint { + #[inline] + fn rem_assign(&mut self, other: u128) { + *self = &*self % other; + } +} + +impl Rem for u128 { + type Output = BigUint; + + #[inline] + fn rem(mut self, other: BigUint) -> BigUint { + self %= other; + From::from(self) + } +} + +impl CheckedDiv for BigUint { + #[inline] + fn checked_div(&self, v: &BigUint) -> Option { + if v.is_zero() { + return None; + } + Some(self.div(v)) + } +} diff --git a/src/rust/vendor/num-bigint/src/biguint/iter.rs b/src/rust/vendor/num-bigint/src/biguint/iter.rs new file mode 100644 index 000000000..1e673e454 --- /dev/null +++ b/src/rust/vendor/num-bigint/src/biguint/iter.rs @@ -0,0 +1,358 @@ +use core::iter::FusedIterator; + +#[cfg(not(u64_digit))] +use super::u32_chunk_to_u64; + +/// An iterator of `u32` digits representation of a `BigUint` or `BigInt`, +/// ordered least significant digit first. +pub struct U32Digits<'a> { + #[cfg(u64_digit)] + data: &'a [u64], + #[cfg(u64_digit)] + next_is_lo: bool, + #[cfg(u64_digit)] + last_hi_is_zero: bool, + + #[cfg(not(u64_digit))] + it: core::slice::Iter<'a, u32>, +} + +#[cfg(u64_digit)] +impl<'a> U32Digits<'a> { + #[inline] + pub(super) fn new(data: &'a [u64]) -> Self { + let last_hi_is_zero = data + .last() + .map(|&last| { + let last_hi = (last >> 32) as u32; + last_hi == 0 + }) + .unwrap_or(false); + U32Digits { + data, + next_is_lo: true, + last_hi_is_zero, + } + } +} + +#[cfg(u64_digit)] +impl Iterator for U32Digits<'_> { + type Item = u32; + #[inline] + fn next(&mut self) -> Option { + match self.data.split_first() { + Some((&first, data)) => { + let next_is_lo = self.next_is_lo; + self.next_is_lo = !next_is_lo; + if next_is_lo { + Some(first as u32) + } else { + self.data = data; + if data.is_empty() && self.last_hi_is_zero { + self.last_hi_is_zero = false; + None + } else { + Some((first >> 32) as u32) + } + } + } + None => None, + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let len = self.len(); + (len, Some(len)) + } + + #[inline] + fn last(self) -> Option { + self.data.last().map(|&last| { + if self.last_hi_is_zero { + last as u32 + } else { + (last >> 32) as u32 + } + }) + } + + #[inline] + fn count(self) -> usize { + self.len() + } +} + +#[cfg(u64_digit)] +impl DoubleEndedIterator for U32Digits<'_> { + fn next_back(&mut self) -> Option { + match self.data.split_last() { + Some((&last, data)) => { + let last_is_lo = self.last_hi_is_zero; + self.last_hi_is_zero = !last_is_lo; + if last_is_lo { + self.data = data; + if data.is_empty() && !self.next_is_lo { + self.next_is_lo = true; + None + } else { + Some(last as u32) + } + } else { + Some((last >> 32) as u32) + } + } + None => None, + } + } +} + +#[cfg(u64_digit)] +impl ExactSizeIterator for U32Digits<'_> { + #[inline] + fn len(&self) -> usize { + self.data.len() * 2 - usize::from(self.last_hi_is_zero) - usize::from(!self.next_is_lo) + } +} + +#[cfg(not(u64_digit))] +impl<'a> U32Digits<'a> { + #[inline] + pub(super) fn new(data: &'a [u32]) -> Self { + Self { it: data.iter() } + } +} + +#[cfg(not(u64_digit))] +impl Iterator for U32Digits<'_> { + type Item = u32; + #[inline] + fn next(&mut self) -> Option { + self.it.next().cloned() + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + self.it.nth(n).cloned() + } + + #[inline] + fn last(self) -> Option { + self.it.last().cloned() + } + + #[inline] + fn count(self) -> usize { + self.it.count() + } +} + +#[cfg(not(u64_digit))] +impl DoubleEndedIterator for U32Digits<'_> { + fn next_back(&mut self) -> Option { + self.it.next_back().copied() + } +} + +#[cfg(not(u64_digit))] +impl ExactSizeIterator for U32Digits<'_> { + #[inline] + fn len(&self) -> usize { + self.it.len() + } +} + +impl FusedIterator for U32Digits<'_> {} + +/// An iterator of `u64` digits representation of a `BigUint` or `BigInt`, +/// ordered least significant digit first. +pub struct U64Digits<'a> { + #[cfg(not(u64_digit))] + it: core::slice::Chunks<'a, u32>, + + #[cfg(u64_digit)] + it: core::slice::Iter<'a, u64>, +} + +#[cfg(not(u64_digit))] +impl<'a> U64Digits<'a> { + #[inline] + pub(super) fn new(data: &'a [u32]) -> Self { + U64Digits { it: data.chunks(2) } + } +} + +#[cfg(not(u64_digit))] +impl Iterator for U64Digits<'_> { + type Item = u64; + #[inline] + fn next(&mut self) -> Option { + self.it.next().map(u32_chunk_to_u64) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let len = self.len(); + (len, Some(len)) + } + + #[inline] + fn last(self) -> Option { + self.it.last().map(u32_chunk_to_u64) + } + + #[inline] + fn count(self) -> usize { + self.len() + } +} + +#[cfg(not(u64_digit))] +impl DoubleEndedIterator for U64Digits<'_> { + fn next_back(&mut self) -> Option { + self.it.next_back().map(u32_chunk_to_u64) + } +} + +#[cfg(u64_digit)] +impl<'a> U64Digits<'a> { + #[inline] + pub(super) fn new(data: &'a [u64]) -> Self { + Self { it: data.iter() } + } +} + +#[cfg(u64_digit)] +impl Iterator for U64Digits<'_> { + type Item = u64; + #[inline] + fn next(&mut self) -> Option { + self.it.next().cloned() + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + self.it.nth(n).cloned() + } + + #[inline] + fn last(self) -> Option { + self.it.last().cloned() + } + + #[inline] + fn count(self) -> usize { + self.it.count() + } +} + +#[cfg(u64_digit)] +impl DoubleEndedIterator for U64Digits<'_> { + fn next_back(&mut self) -> Option { + self.it.next_back().cloned() + } +} + +impl ExactSizeIterator for U64Digits<'_> { + #[inline] + fn len(&self) -> usize { + self.it.len() + } +} + +impl FusedIterator for U64Digits<'_> {} + +#[test] +fn test_iter_u32_digits() { + let n = super::BigUint::from(5u8); + let mut it = n.iter_u32_digits(); + assert_eq!(it.len(), 1); + assert_eq!(it.next(), Some(5)); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); + + let n = super::BigUint::from(112500000000u64); + let mut it = n.iter_u32_digits(); + assert_eq!(it.len(), 2); + assert_eq!(it.next(), Some(830850304)); + assert_eq!(it.len(), 1); + assert_eq!(it.next(), Some(26)); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); +} + +#[test] +fn test_iter_u64_digits() { + let n = super::BigUint::from(5u8); + let mut it = n.iter_u64_digits(); + assert_eq!(it.len(), 1); + assert_eq!(it.next(), Some(5)); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); + + let n = super::BigUint::from(18_446_744_073_709_551_616u128); + let mut it = n.iter_u64_digits(); + assert_eq!(it.len(), 2); + assert_eq!(it.next(), Some(0)); + assert_eq!(it.len(), 1); + assert_eq!(it.next(), Some(1)); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); +} + +#[test] +fn test_iter_u32_digits_be() { + let n = super::BigUint::from(5u8); + let mut it = n.iter_u32_digits(); + assert_eq!(it.len(), 1); + assert_eq!(it.next(), Some(5)); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); + + let n = super::BigUint::from(112500000000u64); + let mut it = n.iter_u32_digits(); + assert_eq!(it.len(), 2); + assert_eq!(it.next(), Some(830850304)); + assert_eq!(it.len(), 1); + assert_eq!(it.next(), Some(26)); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); +} + +#[test] +fn test_iter_u64_digits_be() { + let n = super::BigUint::from(5u8); + let mut it = n.iter_u64_digits(); + assert_eq!(it.len(), 1); + assert_eq!(it.next_back(), Some(5)); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); + + let n = super::BigUint::from(18_446_744_073_709_551_616u128); + let mut it = n.iter_u64_digits(); + assert_eq!(it.len(), 2); + assert_eq!(it.next_back(), Some(1)); + assert_eq!(it.len(), 1); + assert_eq!(it.next_back(), Some(0)); + assert_eq!(it.len(), 0); + assert_eq!(it.next(), None); +} diff --git a/src/rust/vendor/num-bigint/src/monty.rs b/src/rust/vendor/num-bigint/src/biguint/monty.rs similarity index 97% rename from src/rust/vendor/num-bigint/src/monty.rs rename to src/rust/vendor/num-bigint/src/biguint/monty.rs index b9a0bd06c..a5c79aa9e 100644 --- a/src/rust/vendor/num-bigint/src/monty.rs +++ b/src/rust/vendor/num-bigint/src/biguint/monty.rs @@ -41,6 +41,7 @@ impl MontyReducer { /// In the terminology of that paper, this is an "Almost Montgomery Multiplication": /// x and y are required to satisfy 0 <= z < 2**(n*_W) and then the result /// z is guaranteed to satisfy 0 <= z < 2**(n*_W), but it may not be < m. +#[allow(clippy::many_single_char_names)] fn montgomery(x: &BigUint, y: &BigUint, m: &BigUint, k: BigDigit, n: usize) -> BigUint { // This code assumes x, y, m are all the same length, n. // (required by addMulVVW and the for loop). @@ -131,7 +132,8 @@ fn mul_add_www(x: BigDigit, y: BigDigit, c: BigDigit) -> (BigDigit, BigDigit) { } /// Calculates x ** y mod m using a fixed, 4-bit window. -pub(crate) fn monty_modpow(x: &BigUint, y: &BigUint, m: &BigUint) -> BigUint { +#[allow(clippy::many_single_char_names)] +pub(super) fn monty_modpow(x: &BigUint, y: &BigUint, m: &BigUint) -> BigUint { assert!(m.data[0] & 1 == 1); let mr = MontyReducer::new(m); let num_words = m.data.len(); diff --git a/src/rust/vendor/num-bigint/src/biguint/multiplication.rs b/src/rust/vendor/num-bigint/src/biguint/multiplication.rs new file mode 100644 index 000000000..597a2029e --- /dev/null +++ b/src/rust/vendor/num-bigint/src/biguint/multiplication.rs @@ -0,0 +1,568 @@ +use super::addition::{__add2, add2}; +use super::subtraction::sub2; +#[cfg(not(u64_digit))] +use super::u32_from_u128; +use super::{biguint_from_vec, cmp_slice, BigUint, IntDigits}; + +use crate::big_digit::{self, BigDigit, DoubleBigDigit}; +use crate::Sign::{self, Minus, NoSign, Plus}; +use crate::{BigInt, UsizePromotion}; + +use core::cmp::Ordering; +use core::iter::Product; +use core::ops::{Mul, MulAssign}; +use num_traits::{CheckedMul, FromPrimitive, One, Zero}; + +#[inline] +pub(super) fn mac_with_carry( + a: BigDigit, + b: BigDigit, + c: BigDigit, + acc: &mut DoubleBigDigit, +) -> BigDigit { + *acc += DoubleBigDigit::from(a); + *acc += DoubleBigDigit::from(b) * DoubleBigDigit::from(c); + let lo = *acc as BigDigit; + *acc >>= big_digit::BITS; + lo +} + +#[inline] +fn mul_with_carry(a: BigDigit, b: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit { + *acc += DoubleBigDigit::from(a) * DoubleBigDigit::from(b); + let lo = *acc as BigDigit; + *acc >>= big_digit::BITS; + lo +} + +/// Three argument multiply accumulate: +/// acc += b * c +fn mac_digit(acc: &mut [BigDigit], b: &[BigDigit], c: BigDigit) { + if c == 0 { + return; + } + + let mut carry = 0; + let (a_lo, a_hi) = acc.split_at_mut(b.len()); + + for (a, &b) in a_lo.iter_mut().zip(b) { + *a = mac_with_carry(*a, b, c, &mut carry); + } + + let (carry_hi, carry_lo) = big_digit::from_doublebigdigit(carry); + + let final_carry = if carry_hi == 0 { + __add2(a_hi, &[carry_lo]) + } else { + __add2(a_hi, &[carry_hi, carry_lo]) + }; + assert_eq!(final_carry, 0, "carry overflow during multiplication!"); +} + +fn bigint_from_slice(slice: &[BigDigit]) -> BigInt { + BigInt::from(biguint_from_vec(slice.to_vec())) +} + +/// Three argument multiply accumulate: +/// acc += b * c +#[allow(clippy::many_single_char_names)] +fn mac3(mut acc: &mut [BigDigit], mut b: &[BigDigit], mut c: &[BigDigit]) { + // Least-significant zeros have no effect on the output. + if let Some(&0) = b.first() { + if let Some(nz) = b.iter().position(|&d| d != 0) { + b = &b[nz..]; + acc = &mut acc[nz..]; + } else { + return; + } + } + if let Some(&0) = c.first() { + if let Some(nz) = c.iter().position(|&d| d != 0) { + c = &c[nz..]; + acc = &mut acc[nz..]; + } else { + return; + } + } + + let acc = acc; + let (x, y) = if b.len() < c.len() { (b, c) } else { (c, b) }; + + // We use three algorithms for different input sizes. + // + // - For small inputs, long multiplication is fastest. + // - Next we use Karatsuba multiplication (Toom-2), which we have optimized + // to avoid unnecessary allocations for intermediate values. + // - For the largest inputs we use Toom-3, which better optimizes the + // number of operations, but uses more temporary allocations. + // + // The thresholds are somewhat arbitrary, chosen by evaluating the results + // of `cargo bench --bench bigint multiply`. + + if x.len() <= 32 { + // Long multiplication: + for (i, xi) in x.iter().enumerate() { + mac_digit(&mut acc[i..], y, *xi); + } + } else if x.len() <= 256 { + // Karatsuba multiplication: + // + // The idea is that we break x and y up into two smaller numbers that each have about half + // as many digits, like so (note that multiplying by b is just a shift): + // + // x = x0 + x1 * b + // y = y0 + y1 * b + // + // With some algebra, we can compute x * y with three smaller products, where the inputs to + // each of the smaller products have only about half as many digits as x and y: + // + // x * y = (x0 + x1 * b) * (y0 + y1 * b) + // + // x * y = x0 * y0 + // + x0 * y1 * b + // + x1 * y0 * b + // + x1 * y1 * b^2 + // + // Let p0 = x0 * y0 and p2 = x1 * y1: + // + // x * y = p0 + // + (x0 * y1 + x1 * y0) * b + // + p2 * b^2 + // + // The real trick is that middle term: + // + // x0 * y1 + x1 * y0 + // + // = x0 * y1 + x1 * y0 - p0 + p0 - p2 + p2 + // + // = x0 * y1 + x1 * y0 - x0 * y0 - x1 * y1 + p0 + p2 + // + // Now we complete the square: + // + // = -(x0 * y0 - x0 * y1 - x1 * y0 + x1 * y1) + p0 + p2 + // + // = -((x1 - x0) * (y1 - y0)) + p0 + p2 + // + // Let p1 = (x1 - x0) * (y1 - y0), and substitute back into our original formula: + // + // x * y = p0 + // + (p0 + p2 - p1) * b + // + p2 * b^2 + // + // Where the three intermediate products are: + // + // p0 = x0 * y0 + // p1 = (x1 - x0) * (y1 - y0) + // p2 = x1 * y1 + // + // In doing the computation, we take great care to avoid unnecessary temporary variables + // (since creating a BigUint requires a heap allocation): thus, we rearrange the formula a + // bit so we can use the same temporary variable for all the intermediate products: + // + // x * y = p2 * b^2 + p2 * b + // + p0 * b + p0 + // - p1 * b + // + // The other trick we use is instead of doing explicit shifts, we slice acc at the + // appropriate offset when doing the add. + + // When x is smaller than y, it's significantly faster to pick b such that x is split in + // half, not y: + let b = x.len() / 2; + let (x0, x1) = x.split_at(b); + let (y0, y1) = y.split_at(b); + + // We reuse the same BigUint for all the intermediate multiplies and have to size p + // appropriately here: x1.len() >= x0.len and y1.len() >= y0.len(): + let len = x1.len() + y1.len() + 1; + let mut p = BigUint { data: vec![0; len] }; + + // p2 = x1 * y1 + mac3(&mut p.data, x1, y1); + + // Not required, but the adds go faster if we drop any unneeded 0s from the end: + p.normalize(); + + add2(&mut acc[b..], &p.data); + add2(&mut acc[b * 2..], &p.data); + + // Zero out p before the next multiply: + p.data.truncate(0); + p.data.resize(len, 0); + + // p0 = x0 * y0 + mac3(&mut p.data, x0, y0); + p.normalize(); + + add2(acc, &p.data); + add2(&mut acc[b..], &p.data); + + // p1 = (x1 - x0) * (y1 - y0) + // We do this one last, since it may be negative and acc can't ever be negative: + let (j0_sign, j0) = sub_sign(x1, x0); + let (j1_sign, j1) = sub_sign(y1, y0); + + match j0_sign * j1_sign { + Plus => { + p.data.truncate(0); + p.data.resize(len, 0); + + mac3(&mut p.data, &j0.data, &j1.data); + p.normalize(); + + sub2(&mut acc[b..], &p.data); + } + Minus => { + mac3(&mut acc[b..], &j0.data, &j1.data); + } + NoSign => (), + } + } else { + // Toom-3 multiplication: + // + // Toom-3 is like Karatsuba above, but dividing the inputs into three parts. + // Both are instances of Toom-Cook, using `k=3` and `k=2` respectively. + // + // The general idea is to treat the large integers digits as + // polynomials of a certain degree and determine the coefficients/digits + // of the product of the two via interpolation of the polynomial product. + let i = y.len() / 3 + 1; + + let x0_len = Ord::min(x.len(), i); + let x1_len = Ord::min(x.len() - x0_len, i); + + let y0_len = i; + let y1_len = Ord::min(y.len() - y0_len, i); + + // Break x and y into three parts, representating an order two polynomial. + // t is chosen to be the size of a digit so we can use faster shifts + // in place of multiplications. + // + // x(t) = x2*t^2 + x1*t + x0 + let x0 = bigint_from_slice(&x[..x0_len]); + let x1 = bigint_from_slice(&x[x0_len..x0_len + x1_len]); + let x2 = bigint_from_slice(&x[x0_len + x1_len..]); + + // y(t) = y2*t^2 + y1*t + y0 + let y0 = bigint_from_slice(&y[..y0_len]); + let y1 = bigint_from_slice(&y[y0_len..y0_len + y1_len]); + let y2 = bigint_from_slice(&y[y0_len + y1_len..]); + + // Let w(t) = x(t) * y(t) + // + // This gives us the following order-4 polynomial. + // + // w(t) = w4*t^4 + w3*t^3 + w2*t^2 + w1*t + w0 + // + // We need to find the coefficients w4, w3, w2, w1 and w0. Instead + // of simply multiplying the x and y in total, we can evaluate w + // at 5 points. An n-degree polynomial is uniquely identified by (n + 1) + // points. + // + // It is arbitrary as to what points we evaluate w at but we use the + // following. + // + // w(t) at t = 0, 1, -1, -2 and inf + // + // The values for w(t) in terms of x(t)*y(t) at these points are: + // + // let a = w(0) = x0 * y0 + // let b = w(1) = (x2 + x1 + x0) * (y2 + y1 + y0) + // let c = w(-1) = (x2 - x1 + x0) * (y2 - y1 + y0) + // let d = w(-2) = (4*x2 - 2*x1 + x0) * (4*y2 - 2*y1 + y0) + // let e = w(inf) = x2 * y2 as t -> inf + + // x0 + x2, avoiding temporaries + let p = &x0 + &x2; + + // y0 + y2, avoiding temporaries + let q = &y0 + &y2; + + // x2 - x1 + x0, avoiding temporaries + let p2 = &p - &x1; + + // y2 - y1 + y0, avoiding temporaries + let q2 = &q - &y1; + + // w(0) + let r0 = &x0 * &y0; + + // w(inf) + let r4 = &x2 * &y2; + + // w(1) + let r1 = (p + x1) * (q + y1); + + // w(-1) + let r2 = &p2 * &q2; + + // w(-2) + let r3 = ((p2 + x2) * 2 - x0) * ((q2 + y2) * 2 - y0); + + // Evaluating these points gives us the following system of linear equations. + // + // 0 0 0 0 1 | a + // 1 1 1 1 1 | b + // 1 -1 1 -1 1 | c + // 16 -8 4 -2 1 | d + // 1 0 0 0 0 | e + // + // The solved equation (after gaussian elimination or similar) + // in terms of its coefficients: + // + // w0 = w(0) + // w1 = w(0)/2 + w(1)/3 - w(-1) + w(2)/6 - 2*w(inf) + // w2 = -w(0) + w(1)/2 + w(-1)/2 - w(inf) + // w3 = -w(0)/2 + w(1)/6 + w(-1)/2 - w(1)/6 + // w4 = w(inf) + // + // This particular sequence is given by Bodrato and is an interpolation + // of the above equations. + let mut comp3: BigInt = (r3 - &r1) / 3u32; + let mut comp1: BigInt = (r1 - &r2) >> 1; + let mut comp2: BigInt = r2 - &r0; + comp3 = ((&comp2 - comp3) >> 1) + (&r4 << 1); + comp2 += &comp1 - &r4; + comp1 -= &comp3; + + // Recomposition. The coefficients of the polynomial are now known. + // + // Evaluate at w(t) where t is our given base to get the result. + // + // let bits = u64::from(big_digit::BITS) * i as u64; + // let result = r0 + // + (comp1 << bits) + // + (comp2 << (2 * bits)) + // + (comp3 << (3 * bits)) + // + (r4 << (4 * bits)); + // let result_pos = result.to_biguint().unwrap(); + // add2(&mut acc[..], &result_pos.data); + // + // But with less intermediate copying: + for (j, result) in [&r0, &comp1, &comp2, &comp3, &r4].iter().enumerate().rev() { + match result.sign() { + Plus => add2(&mut acc[i * j..], result.digits()), + Minus => sub2(&mut acc[i * j..], result.digits()), + NoSign => {} + } + } + } +} + +fn mul3(x: &[BigDigit], y: &[BigDigit]) -> BigUint { + let len = x.len() + y.len() + 1; + let mut prod = BigUint { data: vec![0; len] }; + + mac3(&mut prod.data, x, y); + prod.normalized() +} + +fn scalar_mul(a: &mut BigUint, b: BigDigit) { + match b { + 0 => a.set_zero(), + 1 => {} + _ => { + if b.is_power_of_two() { + *a <<= b.trailing_zeros(); + } else { + let mut carry = 0; + for a in a.data.iter_mut() { + *a = mul_with_carry(*a, b, &mut carry); + } + if carry != 0 { + a.data.push(carry as BigDigit); + } + } + } + } +} + +fn sub_sign(mut a: &[BigDigit], mut b: &[BigDigit]) -> (Sign, BigUint) { + // Normalize: + if let Some(&0) = a.last() { + a = &a[..a.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)]; + } + if let Some(&0) = b.last() { + b = &b[..b.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)]; + } + + match cmp_slice(a, b) { + Ordering::Greater => { + let mut a = a.to_vec(); + sub2(&mut a, b); + (Plus, biguint_from_vec(a)) + } + Ordering::Less => { + let mut b = b.to_vec(); + sub2(&mut b, a); + (Minus, biguint_from_vec(b)) + } + Ordering::Equal => (NoSign, Zero::zero()), + } +} + +macro_rules! impl_mul { + ($(impl<$($a:lifetime),*> Mul<$Other:ty> for $Self:ty;)*) => {$( + impl<$($a),*> Mul<$Other> for $Self { + type Output = BigUint; + + #[inline] + fn mul(self, other: $Other) -> BigUint { + match (&*self.data, &*other.data) { + // multiply by zero + (&[], _) | (_, &[]) => BigUint::zero(), + // multiply by a scalar + (_, &[digit]) => self * digit, + (&[digit], _) => other * digit, + // full multiplication + (x, y) => mul3(x, y), + } + } + } + )*} +} +impl_mul! { + impl<> Mul for BigUint; + impl<'b> Mul<&'b BigUint> for BigUint; + impl<'a> Mul for &'a BigUint; + impl<'a, 'b> Mul<&'b BigUint> for &'a BigUint; +} + +macro_rules! impl_mul_assign { + ($(impl<$($a:lifetime),*> MulAssign<$Other:ty> for BigUint;)*) => {$( + impl<$($a),*> MulAssign<$Other> for BigUint { + #[inline] + fn mul_assign(&mut self, other: $Other) { + match (&*self.data, &*other.data) { + // multiply by zero + (&[], _) => {}, + (_, &[]) => self.set_zero(), + // multiply by a scalar + (_, &[digit]) => *self *= digit, + (&[digit], _) => *self = other * digit, + // full multiplication + (x, y) => *self = mul3(x, y), + } + } + } + )*} +} +impl_mul_assign! { + impl<> MulAssign for BigUint; + impl<'a> MulAssign<&'a BigUint> for BigUint; +} + +promote_unsigned_scalars!(impl Mul for BigUint, mul); +promote_unsigned_scalars_assign!(impl MulAssign for BigUint, mul_assign); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigUint, mul); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigUint, mul); +forward_all_scalar_binop_to_val_val_commutative!(impl Mul for BigUint, mul); + +impl Mul for BigUint { + type Output = BigUint; + + #[inline] + fn mul(mut self, other: u32) -> BigUint { + self *= other; + self + } +} +impl MulAssign for BigUint { + #[inline] + fn mul_assign(&mut self, other: u32) { + scalar_mul(self, other as BigDigit); + } +} + +impl Mul for BigUint { + type Output = BigUint; + + #[inline] + fn mul(mut self, other: u64) -> BigUint { + self *= other; + self + } +} +impl MulAssign for BigUint { + #[cfg(not(u64_digit))] + #[inline] + fn mul_assign(&mut self, other: u64) { + if let Some(other) = BigDigit::from_u64(other) { + scalar_mul(self, other); + } else { + let (hi, lo) = big_digit::from_doublebigdigit(other); + *self = mul3(&self.data, &[lo, hi]); + } + } + + #[cfg(u64_digit)] + #[inline] + fn mul_assign(&mut self, other: u64) { + scalar_mul(self, other); + } +} + +impl Mul for BigUint { + type Output = BigUint; + + #[inline] + fn mul(mut self, other: u128) -> BigUint { + self *= other; + self + } +} + +impl MulAssign for BigUint { + #[cfg(not(u64_digit))] + #[inline] + fn mul_assign(&mut self, other: u128) { + if let Some(other) = BigDigit::from_u128(other) { + scalar_mul(self, other); + } else { + *self = match u32_from_u128(other) { + (0, 0, c, d) => mul3(&self.data, &[d, c]), + (0, b, c, d) => mul3(&self.data, &[d, c, b]), + (a, b, c, d) => mul3(&self.data, &[d, c, b, a]), + }; + } + } + + #[cfg(u64_digit)] + #[inline] + fn mul_assign(&mut self, other: u128) { + if let Some(other) = BigDigit::from_u128(other) { + scalar_mul(self, other); + } else { + let (hi, lo) = big_digit::from_doublebigdigit(other); + *self = mul3(&self.data, &[lo, hi]); + } + } +} + +impl CheckedMul for BigUint { + #[inline] + fn checked_mul(&self, v: &BigUint) -> Option { + Some(self.mul(v)) + } +} + +impl_product_iter_type!(BigUint); + +#[test] +fn test_sub_sign() { + use crate::BigInt; + use num_traits::Num; + + fn sub_sign_i(a: &[BigDigit], b: &[BigDigit]) -> BigInt { + let (sign, val) = sub_sign(a, b); + BigInt::from_biguint(sign, val) + } + + let a = BigUint::from_str_radix("265252859812191058636308480000000", 10).unwrap(); + let b = BigUint::from_str_radix("26525285981219105863630848000000", 10).unwrap(); + let a_i = BigInt::from(a.clone()); + let b_i = BigInt::from(b.clone()); + + assert_eq!(sub_sign_i(&a.data, &b.data), &a_i - &b_i); + assert_eq!(sub_sign_i(&b.data, &a.data), &b_i - &a_i); +} diff --git a/src/rust/vendor/num-bigint/src/biguint/power.rs b/src/rust/vendor/num-bigint/src/biguint/power.rs new file mode 100644 index 000000000..d24651bf4 --- /dev/null +++ b/src/rust/vendor/num-bigint/src/biguint/power.rs @@ -0,0 +1,258 @@ +use super::monty::monty_modpow; +use super::BigUint; + +use crate::big_digit::{self, BigDigit}; + +use num_integer::Integer; +use num_traits::{One, Pow, ToPrimitive, Zero}; + +impl<'b> Pow<&'b BigUint> for BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: &BigUint) -> BigUint { + if self.is_one() || exp.is_zero() { + BigUint::one() + } else if self.is_zero() { + BigUint::zero() + } else if let Some(exp) = exp.to_u64() { + self.pow(exp) + } else if let Some(exp) = exp.to_u128() { + self.pow(exp) + } else { + // At this point, `self >= 2` and `exp >= 2¹²⁸`. The smallest possible result given + // `2.pow(2¹²⁸)` would require far more memory than 64-bit targets can address! + panic!("memory overflow") + } + } +} + +impl Pow for BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: BigUint) -> BigUint { + Pow::pow(self, &exp) + } +} + +impl<'a, 'b> Pow<&'b BigUint> for &'a BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: &BigUint) -> BigUint { + if self.is_one() || exp.is_zero() { + BigUint::one() + } else if self.is_zero() { + BigUint::zero() + } else { + self.clone().pow(exp) + } + } +} + +impl<'a> Pow for &'a BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: BigUint) -> BigUint { + Pow::pow(self, &exp) + } +} + +macro_rules! pow_impl { + ($T:ty) => { + impl Pow<$T> for BigUint { + type Output = BigUint; + + fn pow(self, mut exp: $T) -> BigUint { + if exp == 0 { + return BigUint::one(); + } + let mut base = self; + + while exp & 1 == 0 { + base = &base * &base; + exp >>= 1; + } + + if exp == 1 { + return base; + } + + let mut acc = base.clone(); + while exp > 1 { + exp >>= 1; + base = &base * &base; + if exp & 1 == 1 { + acc *= &base; + } + } + acc + } + } + + impl<'b> Pow<&'b $T> for BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: &$T) -> BigUint { + Pow::pow(self, *exp) + } + } + + impl<'a> Pow<$T> for &'a BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: $T) -> BigUint { + if exp == 0 { + return BigUint::one(); + } + Pow::pow(self.clone(), exp) + } + } + + impl<'a, 'b> Pow<&'b $T> for &'a BigUint { + type Output = BigUint; + + #[inline] + fn pow(self, exp: &$T) -> BigUint { + Pow::pow(self, *exp) + } + } + }; +} + +pow_impl!(u8); +pow_impl!(u16); +pow_impl!(u32); +pow_impl!(u64); +pow_impl!(usize); +pow_impl!(u128); + +pub(super) fn modpow(x: &BigUint, exponent: &BigUint, modulus: &BigUint) -> BigUint { + assert!( + !modulus.is_zero(), + "attempt to calculate with zero modulus!" + ); + + if modulus.is_odd() { + // For an odd modulus, we can use Montgomery multiplication in base 2^32. + monty_modpow(x, exponent, modulus) + } else { + // Otherwise do basically the same as `num::pow`, but with a modulus. + plain_modpow(x, &exponent.data, modulus) + } +} + +fn plain_modpow(base: &BigUint, exp_data: &[BigDigit], modulus: &BigUint) -> BigUint { + assert!( + !modulus.is_zero(), + "attempt to calculate with zero modulus!" + ); + + let i = match exp_data.iter().position(|&r| r != 0) { + None => return BigUint::one(), + Some(i) => i, + }; + + let mut base = base % modulus; + for _ in 0..i { + for _ in 0..big_digit::BITS { + base = &base * &base % modulus; + } + } + + let mut r = exp_data[i]; + let mut b = 0u8; + while r.is_even() { + base = &base * &base % modulus; + r >>= 1; + b += 1; + } + + let mut exp_iter = exp_data[i + 1..].iter(); + if exp_iter.len() == 0 && r.is_one() { + return base; + } + + let mut acc = base.clone(); + r >>= 1; + b += 1; + + { + let mut unit = |exp_is_odd| { + base = &base * &base % modulus; + if exp_is_odd { + acc *= &base; + acc %= modulus; + } + }; + + if let Some(&last) = exp_iter.next_back() { + // consume exp_data[i] + for _ in b..big_digit::BITS { + unit(r.is_odd()); + r >>= 1; + } + + // consume all other digits before the last + for &r in exp_iter { + let mut r = r; + for _ in 0..big_digit::BITS { + unit(r.is_odd()); + r >>= 1; + } + } + r = last; + } + + debug_assert_ne!(r, 0); + while !r.is_zero() { + unit(r.is_odd()); + r >>= 1; + } + } + acc +} + +#[test] +fn test_plain_modpow() { + let two = &BigUint::from(2u32); + let modulus = BigUint::from(0x1100u32); + + let exp = vec![0, 0b1]; + assert_eq!( + two.pow(0b1_00000000_u32) % &modulus, + plain_modpow(&two, &exp, &modulus) + ); + let exp = vec![0, 0b10]; + assert_eq!( + two.pow(0b10_00000000_u32) % &modulus, + plain_modpow(&two, &exp, &modulus) + ); + let exp = vec![0, 0b110010]; + assert_eq!( + two.pow(0b110010_00000000_u32) % &modulus, + plain_modpow(&two, &exp, &modulus) + ); + let exp = vec![0b1, 0b1]; + assert_eq!( + two.pow(0b1_00000001_u32) % &modulus, + plain_modpow(&two, &exp, &modulus) + ); + let exp = vec![0b1100, 0, 0b1]; + assert_eq!( + two.pow(0b1_00000000_00001100_u32) % &modulus, + plain_modpow(&two, &exp, &modulus) + ); +} + +#[test] +fn test_pow_biguint() { + let base = BigUint::from(5u8); + let exponent = BigUint::from(3u8); + + assert_eq!(BigUint::from(125u8), base.pow(exponent)); +} diff --git a/src/rust/vendor/num-bigint/src/biguint/serde.rs b/src/rust/vendor/num-bigint/src/biguint/serde.rs new file mode 100644 index 000000000..ed663c6d9 --- /dev/null +++ b/src/rust/vendor/num-bigint/src/biguint/serde.rs @@ -0,0 +1,108 @@ +use super::{biguint_from_vec, BigUint}; + +use crate::std_alloc::Vec; + +use core::fmt; +use serde::de::{SeqAccess, Visitor}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +impl Serialize for BigUint { + #[cfg(not(u64_digit))] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // Note: do not change the serialization format, or it may break forward + // and backward compatibility of serialized data! If we ever change the + // internal representation, we should still serialize in base-`u32`. + let data: &[u32] = &self.data; + data.serialize(serializer) + } + + #[cfg(u64_digit)] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + use serde::ser::SerializeSeq; + + if let Some((&last, data)) = self.data.split_last() { + let last_lo = last as u32; + let last_hi = (last >> 32) as u32; + let u32_len = data.len() * 2 + 1 + (last_hi != 0) as usize; + let mut seq = serializer.serialize_seq(Some(u32_len))?; + for &x in data { + seq.serialize_element(&(x as u32))?; + seq.serialize_element(&((x >> 32) as u32))?; + } + seq.serialize_element(&last_lo)?; + if last_hi != 0 { + seq.serialize_element(&last_hi)?; + } + seq.end() + } else { + let data: &[u32] = &[]; + data.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for BigUint { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_seq(U32Visitor) + } +} + +struct U32Visitor; + +impl<'de> Visitor<'de> for U32Visitor { + type Value = BigUint; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("a sequence of unsigned 32-bit numbers") + } + + #[cfg(not(u64_digit))] + fn visit_seq(self, mut seq: S) -> Result + where + S: SeqAccess<'de>, + { + let len = seq.size_hint().unwrap_or(0); + let mut data = Vec::with_capacity(len); + + while let Some(value) = seq.next_element::()? { + data.push(value); + } + + Ok(biguint_from_vec(data)) + } + + #[cfg(u64_digit)] + fn visit_seq(self, mut seq: S) -> Result + where + S: SeqAccess<'de>, + { + use crate::big_digit::BigDigit; + use num_integer::Integer; + + let u32_len = seq.size_hint().unwrap_or(0); + let len = Integer::div_ceil(&u32_len, &2); + let mut data = Vec::with_capacity(len); + + while let Some(lo) = seq.next_element::()? { + let mut value = BigDigit::from(lo); + if let Some(hi) = seq.next_element::()? { + value |= BigDigit::from(hi) << 32; + data.push(value); + } else { + data.push(value); + break; + } + } + + Ok(biguint_from_vec(data)) + } +} diff --git a/src/rust/vendor/num-bigint/src/biguint/shift.rs b/src/rust/vendor/num-bigint/src/biguint/shift.rs new file mode 100644 index 000000000..05964d2a4 --- /dev/null +++ b/src/rust/vendor/num-bigint/src/biguint/shift.rs @@ -0,0 +1,172 @@ +use super::{biguint_from_vec, BigUint}; + +use crate::big_digit; +use crate::std_alloc::{Cow, Vec}; + +use core::mem; +use core::ops::{Shl, ShlAssign, Shr, ShrAssign}; +use num_traits::{PrimInt, Zero}; + +#[inline] +fn biguint_shl(n: Cow<'_, BigUint>, shift: T) -> BigUint { + if shift < T::zero() { + panic!("attempt to shift left with negative"); + } + if n.is_zero() { + return n.into_owned(); + } + let bits = T::from(big_digit::BITS).unwrap(); + let digits = (shift / bits).to_usize().expect("capacity overflow"); + let shift = (shift % bits).to_u8().unwrap(); + biguint_shl2(n, digits, shift) +} + +fn biguint_shl2(n: Cow<'_, BigUint>, digits: usize, shift: u8) -> BigUint { + let mut data = match digits { + 0 => n.into_owned().data, + _ => { + let len = digits.saturating_add(n.data.len() + 1); + let mut data = Vec::with_capacity(len); + data.resize(digits, 0); + data.extend(n.data.iter()); + data + } + }; + + if shift > 0 { + let mut carry = 0; + let carry_shift = big_digit::BITS as u8 - shift; + for elem in data[digits..].iter_mut() { + let new_carry = *elem >> carry_shift; + *elem = (*elem << shift) | carry; + carry = new_carry; + } + if carry != 0 { + data.push(carry); + } + } + + biguint_from_vec(data) +} + +#[inline] +fn biguint_shr(n: Cow<'_, BigUint>, shift: T) -> BigUint { + if shift < T::zero() { + panic!("attempt to shift right with negative"); + } + if n.is_zero() { + return n.into_owned(); + } + let bits = T::from(big_digit::BITS).unwrap(); + let digits = (shift / bits).to_usize().unwrap_or(core::usize::MAX); + let shift = (shift % bits).to_u8().unwrap(); + biguint_shr2(n, digits, shift) +} + +fn biguint_shr2(n: Cow<'_, BigUint>, digits: usize, shift: u8) -> BigUint { + if digits >= n.data.len() { + let mut n = n.into_owned(); + n.set_zero(); + return n; + } + let mut data = match n { + Cow::Borrowed(n) => n.data[digits..].to_vec(), + Cow::Owned(mut n) => { + n.data.drain(..digits); + n.data + } + }; + + if shift > 0 { + let mut borrow = 0; + let borrow_shift = big_digit::BITS as u8 - shift; + for elem in data.iter_mut().rev() { + let new_borrow = *elem << borrow_shift; + *elem = (*elem >> shift) | borrow; + borrow = new_borrow; + } + } + + biguint_from_vec(data) +} + +macro_rules! impl_shift { + (@ref $Shx:ident :: $shx:ident, $ShxAssign:ident :: $shx_assign:ident, $rhs:ty) => { + impl<'b> $Shx<&'b $rhs> for BigUint { + type Output = BigUint; + + #[inline] + fn $shx(self, rhs: &'b $rhs) -> BigUint { + $Shx::$shx(self, *rhs) + } + } + impl<'a, 'b> $Shx<&'b $rhs> for &'a BigUint { + type Output = BigUint; + + #[inline] + fn $shx(self, rhs: &'b $rhs) -> BigUint { + $Shx::$shx(self, *rhs) + } + } + impl<'b> $ShxAssign<&'b $rhs> for BigUint { + #[inline] + fn $shx_assign(&mut self, rhs: &'b $rhs) { + $ShxAssign::$shx_assign(self, *rhs); + } + } + }; + ($($rhs:ty),+) => {$( + impl Shl<$rhs> for BigUint { + type Output = BigUint; + + #[inline] + fn shl(self, rhs: $rhs) -> BigUint { + biguint_shl(Cow::Owned(self), rhs) + } + } + impl<'a> Shl<$rhs> for &'a BigUint { + type Output = BigUint; + + #[inline] + fn shl(self, rhs: $rhs) -> BigUint { + biguint_shl(Cow::Borrowed(self), rhs) + } + } + impl ShlAssign<$rhs> for BigUint { + #[inline] + fn shl_assign(&mut self, rhs: $rhs) { + let n = mem::replace(self, BigUint::zero()); + *self = n << rhs; + } + } + impl_shift! { @ref Shl::shl, ShlAssign::shl_assign, $rhs } + + impl Shr<$rhs> for BigUint { + type Output = BigUint; + + #[inline] + fn shr(self, rhs: $rhs) -> BigUint { + biguint_shr(Cow::Owned(self), rhs) + } + } + impl<'a> Shr<$rhs> for &'a BigUint { + type Output = BigUint; + + #[inline] + fn shr(self, rhs: $rhs) -> BigUint { + biguint_shr(Cow::Borrowed(self), rhs) + } + } + impl ShrAssign<$rhs> for BigUint { + #[inline] + fn shr_assign(&mut self, rhs: $rhs) { + let n = mem::replace(self, BigUint::zero()); + *self = n >> rhs; + } + } + impl_shift! { @ref Shr::shr, ShrAssign::shr_assign, $rhs } + )*}; +} + +impl_shift! { u8, u16, u32, u64, u128, usize } +impl_shift! { i8, i16, i32, i64, i128, isize } diff --git a/src/rust/vendor/num-bigint/src/biguint/subtraction.rs b/src/rust/vendor/num-bigint/src/biguint/subtraction.rs new file mode 100644 index 000000000..67005175e --- /dev/null +++ b/src/rust/vendor/num-bigint/src/biguint/subtraction.rs @@ -0,0 +1,312 @@ +#[cfg(not(u64_digit))] +use super::u32_from_u128; +use super::BigUint; + +use crate::big_digit::{self, BigDigit}; +use crate::UsizePromotion; + +use core::cmp::Ordering::{Equal, Greater, Less}; +use core::ops::{Sub, SubAssign}; +use num_traits::{CheckedSub, Zero}; + +#[cfg(all(use_addcarry, target_arch = "x86_64"))] +use core::arch::x86_64 as arch; + +#[cfg(all(use_addcarry, target_arch = "x86"))] +use core::arch::x86 as arch; + +// Subtract with borrow: +#[cfg(all(use_addcarry, u64_digit))] +#[inline] +fn sbb(borrow: u8, a: u64, b: u64, out: &mut u64) -> u8 { + // Safety: There are absolutely no safety concerns with calling `_subborrow_u64`. + // It's just unsafe for API consistency with other intrinsics. + unsafe { arch::_subborrow_u64(borrow, a, b, out) } +} + +#[cfg(all(use_addcarry, not(u64_digit)))] +#[inline] +fn sbb(borrow: u8, a: u32, b: u32, out: &mut u32) -> u8 { + // Safety: There are absolutely no safety concerns with calling `_subborrow_u32`. + // It's just unsafe for API consistency with other intrinsics. + unsafe { arch::_subborrow_u32(borrow, a, b, out) } +} + +// fallback for environments where we don't have a subborrow intrinsic +#[cfg(not(use_addcarry))] +#[inline] +fn sbb(borrow: u8, a: BigDigit, b: BigDigit, out: &mut BigDigit) -> u8 { + use crate::big_digit::SignedDoubleBigDigit; + + let difference = SignedDoubleBigDigit::from(a) + - SignedDoubleBigDigit::from(b) + - SignedDoubleBigDigit::from(borrow); + *out = difference as BigDigit; + u8::from(difference < 0) +} + +pub(super) fn sub2(a: &mut [BigDigit], b: &[BigDigit]) { + let mut borrow = 0; + + let len = Ord::min(a.len(), b.len()); + let (a_lo, a_hi) = a.split_at_mut(len); + let (b_lo, b_hi) = b.split_at(len); + + for (a, b) in a_lo.iter_mut().zip(b_lo) { + borrow = sbb(borrow, *a, *b, a); + } + + if borrow != 0 { + for a in a_hi { + borrow = sbb(borrow, *a, 0, a); + if borrow == 0 { + break; + } + } + } + + // note: we're _required_ to fail on underflow + assert!( + borrow == 0 && b_hi.iter().all(|x| *x == 0), + "Cannot subtract b from a because b is larger than a." + ); +} + +// Only for the Sub impl. `a` and `b` must have same length. +#[inline] +fn __sub2rev(a: &[BigDigit], b: &mut [BigDigit]) -> u8 { + debug_assert!(b.len() == a.len()); + + let mut borrow = 0; + + for (ai, bi) in a.iter().zip(b) { + borrow = sbb(borrow, *ai, *bi, bi); + } + + borrow +} + +fn sub2rev(a: &[BigDigit], b: &mut [BigDigit]) { + debug_assert!(b.len() >= a.len()); + + let len = Ord::min(a.len(), b.len()); + let (a_lo, a_hi) = a.split_at(len); + let (b_lo, b_hi) = b.split_at_mut(len); + + let borrow = __sub2rev(a_lo, b_lo); + + assert!(a_hi.is_empty()); + + // note: we're _required_ to fail on underflow + assert!( + borrow == 0 && b_hi.iter().all(|x| *x == 0), + "Cannot subtract b from a because b is larger than a." + ); +} + +forward_val_val_binop!(impl Sub for BigUint, sub); +forward_ref_ref_binop!(impl Sub for BigUint, sub); +forward_val_assign!(impl SubAssign for BigUint, sub_assign); + +impl<'a> Sub<&'a BigUint> for BigUint { + type Output = BigUint; + + fn sub(mut self, other: &BigUint) -> BigUint { + self -= other; + self + } +} +impl<'a> SubAssign<&'a BigUint> for BigUint { + fn sub_assign(&mut self, other: &'a BigUint) { + sub2(&mut self.data[..], &other.data[..]); + self.normalize(); + } +} + +impl<'a> Sub for &'a BigUint { + type Output = BigUint; + + fn sub(self, mut other: BigUint) -> BigUint { + let other_len = other.data.len(); + if other_len < self.data.len() { + let lo_borrow = __sub2rev(&self.data[..other_len], &mut other.data); + other.data.extend_from_slice(&self.data[other_len..]); + if lo_borrow != 0 { + sub2(&mut other.data[other_len..], &[1]) + } + } else { + sub2rev(&self.data[..], &mut other.data[..]); + } + other.normalized() + } +} + +promote_unsigned_scalars!(impl Sub for BigUint, sub); +promote_unsigned_scalars_assign!(impl SubAssign for BigUint, sub_assign); +forward_all_scalar_binop_to_val_val!(impl Sub for BigUint, sub); +forward_all_scalar_binop_to_val_val!(impl Sub for BigUint, sub); +forward_all_scalar_binop_to_val_val!(impl Sub for BigUint, sub); + +impl Sub for BigUint { + type Output = BigUint; + + #[inline] + fn sub(mut self, other: u32) -> BigUint { + self -= other; + self + } +} + +impl SubAssign for BigUint { + fn sub_assign(&mut self, other: u32) { + sub2(&mut self.data[..], &[other as BigDigit]); + self.normalize(); + } +} + +impl Sub for u32 { + type Output = BigUint; + + #[cfg(not(u64_digit))] + #[inline] + fn sub(self, mut other: BigUint) -> BigUint { + if other.data.len() == 0 { + other.data.push(self); + } else { + sub2rev(&[self], &mut other.data[..]); + } + other.normalized() + } + + #[cfg(u64_digit)] + #[inline] + fn sub(self, mut other: BigUint) -> BigUint { + if other.data.is_empty() { + other.data.push(self as BigDigit); + } else { + sub2rev(&[self as BigDigit], &mut other.data[..]); + } + other.normalized() + } +} + +impl Sub for BigUint { + type Output = BigUint; + + #[inline] + fn sub(mut self, other: u64) -> BigUint { + self -= other; + self + } +} + +impl SubAssign for BigUint { + #[cfg(not(u64_digit))] + #[inline] + fn sub_assign(&mut self, other: u64) { + let (hi, lo) = big_digit::from_doublebigdigit(other); + sub2(&mut self.data[..], &[lo, hi]); + self.normalize(); + } + + #[cfg(u64_digit)] + #[inline] + fn sub_assign(&mut self, other: u64) { + sub2(&mut self.data[..], &[other as BigDigit]); + self.normalize(); + } +} + +impl Sub for u64 { + type Output = BigUint; + + #[cfg(not(u64_digit))] + #[inline] + fn sub(self, mut other: BigUint) -> BigUint { + while other.data.len() < 2 { + other.data.push(0); + } + + let (hi, lo) = big_digit::from_doublebigdigit(self); + sub2rev(&[lo, hi], &mut other.data[..]); + other.normalized() + } + + #[cfg(u64_digit)] + #[inline] + fn sub(self, mut other: BigUint) -> BigUint { + if other.data.is_empty() { + other.data.push(self); + } else { + sub2rev(&[self], &mut other.data[..]); + } + other.normalized() + } +} + +impl Sub for BigUint { + type Output = BigUint; + + #[inline] + fn sub(mut self, other: u128) -> BigUint { + self -= other; + self + } +} + +impl SubAssign for BigUint { + #[cfg(not(u64_digit))] + #[inline] + fn sub_assign(&mut self, other: u128) { + let (a, b, c, d) = u32_from_u128(other); + sub2(&mut self.data[..], &[d, c, b, a]); + self.normalize(); + } + + #[cfg(u64_digit)] + #[inline] + fn sub_assign(&mut self, other: u128) { + let (hi, lo) = big_digit::from_doublebigdigit(other); + sub2(&mut self.data[..], &[lo, hi]); + self.normalize(); + } +} + +impl Sub for u128 { + type Output = BigUint; + + #[cfg(not(u64_digit))] + #[inline] + fn sub(self, mut other: BigUint) -> BigUint { + while other.data.len() < 4 { + other.data.push(0); + } + + let (a, b, c, d) = u32_from_u128(self); + sub2rev(&[d, c, b, a], &mut other.data[..]); + other.normalized() + } + + #[cfg(u64_digit)] + #[inline] + fn sub(self, mut other: BigUint) -> BigUint { + while other.data.len() < 2 { + other.data.push(0); + } + + let (hi, lo) = big_digit::from_doublebigdigit(self); + sub2rev(&[lo, hi], &mut other.data[..]); + other.normalized() + } +} + +impl CheckedSub for BigUint { + #[inline] + fn checked_sub(&self, v: &BigUint) -> Option { + match self.cmp(v) { + Less => None, + Equal => Some(Zero::zero()), + Greater => Some(self.sub(v)), + } + } +} diff --git a/src/rust/vendor/num-bigint/src/lib.rs b/src/rust/vendor/num-bigint/src/lib.rs index 837a7d050..b88c5df26 100644 --- a/src/rust/vendor/num-bigint/src/lib.rs +++ b/src/rust/vendor/num-bigint/src/lib.rs @@ -72,19 +72,20 @@ //! feature is enabled. To enable it include rand as //! //! ```toml -//! rand = "0.7" -//! num-bigint = { version = "0.3", features = ["rand"] } +//! rand = "0.8" +//! num-bigint = { version = "0.4", features = ["rand"] } //! ``` //! //! Note that you must use the version of `rand` that `num-bigint` is compatible -//! with: `0.7`. +//! with: `0.8`. //! //! //! ## Compatibility //! //! The `num-bigint` crate is tested for rustc 1.31 and greater. -#![doc(html_root_url = "https://docs.rs/num-bigint/0.3")] +#![doc(html_root_url = "https://docs.rs/num-bigint/0.4")] +#![warn(rust_2018_idioms)] #![no_std] #[cfg(feature = "std")] @@ -94,7 +95,7 @@ extern crate std; #[cfg(feature = "std")] mod std_alloc { pub(crate) use std::borrow::Cow; - #[cfg(any(feature = "quickcheck", feature = "arbitrary"))] + #[cfg(any(feature = "quickcheck"))] pub(crate) use std::boxed::Box; pub(crate) use std::string::String; pub(crate) use std::vec::Vec; @@ -107,7 +108,7 @@ extern crate alloc; #[cfg(not(feature = "std"))] mod std_alloc { pub(crate) use alloc::borrow::Cow; - #[cfg(any(feature = "quickcheck", feature = "arbitrary"))] + #[cfg(any(feature = "quickcheck"))] pub(crate) use alloc::boxed::Box; pub(crate) use alloc::string::String; pub(crate) use alloc::vec::Vec; @@ -221,13 +222,15 @@ where #[cfg(has_try_from)] impl fmt::Display for TryFromBigIntError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.__description().fmt(f) } } pub use crate::biguint::BigUint; pub use crate::biguint::ToBigUint; +pub use crate::biguint::U32Digits; +pub use crate::biguint::U64Digits; pub use crate::bigint::BigInt; pub use crate::bigint::Sign; diff --git a/src/rust/vendor/num-bigint/tests/bigint.rs b/src/rust/vendor/num-bigint/tests/bigint.rs index 292796efa..f244bc4bd 100644 --- a/src/rust/vendor/num-bigint/tests/bigint.rs +++ b/src/rust/vendor/num-bigint/tests/bigint.rs @@ -1306,4 +1306,101 @@ fn test_pow() { check!(u32); check!(u64); check!(usize); + + let pow_1e10000 = BigInt::from(10u32).pow(10_000_u32); + let manual_1e10000 = repeat(10u32).take(10_000).product::(); + assert!(manual_1e10000 == pow_1e10000); +} + +#[test] +fn test_bit() { + // 12 = (1100)_2 + assert!(!BigInt::from(0b1100u8).bit(0)); + assert!(!BigInt::from(0b1100u8).bit(1)); + assert!(BigInt::from(0b1100u8).bit(2)); + assert!(BigInt::from(0b1100u8).bit(3)); + assert!(!BigInt::from(0b1100u8).bit(4)); + assert!(!BigInt::from(0b1100u8).bit(200)); + assert!(!BigInt::from(0b1100u8).bit(u64::MAX)); + // -12 = (...110100)_2 + assert!(!BigInt::from(-12i8).bit(0)); + assert!(!BigInt::from(-12i8).bit(1)); + assert!(BigInt::from(-12i8).bit(2)); + assert!(!BigInt::from(-12i8).bit(3)); + assert!(BigInt::from(-12i8).bit(4)); + assert!(BigInt::from(-12i8).bit(200)); + assert!(BigInt::from(-12i8).bit(u64::MAX)); +} + +#[test] +fn test_set_bit() { + let mut x: BigInt; + + // zero + x = BigInt::zero(); + x.set_bit(200, true); + assert_eq!(x, BigInt::one() << 200); + x = BigInt::zero(); + x.set_bit(200, false); + assert_eq!(x, BigInt::zero()); + + // positive numbers + x = BigInt::from_biguint(Plus, BigUint::one() << 200); + x.set_bit(10, true); + x.set_bit(200, false); + assert_eq!(x, BigInt::one() << 10); + x.set_bit(10, false); + x.set_bit(5, false); + assert_eq!(x, BigInt::zero()); + + // negative numbers + x = BigInt::from(-12i8); + x.set_bit(200, true); + assert_eq!(x, BigInt::from(-12i8)); + x.set_bit(200, false); + assert_eq!( + x, + BigInt::from_biguint(Minus, BigUint::from(12u8) | (BigUint::one() << 200)) + ); + x.set_bit(6, false); + assert_eq!( + x, + BigInt::from_biguint(Minus, BigUint::from(76u8) | (BigUint::one() << 200)) + ); + x.set_bit(6, true); + assert_eq!( + x, + BigInt::from_biguint(Minus, BigUint::from(12u8) | (BigUint::one() << 200)) + ); + x.set_bit(200, true); + assert_eq!(x, BigInt::from(-12i8)); + + x = BigInt::from_biguint(Minus, BigUint::one() << 30); + x.set_bit(10, true); + assert_eq!( + x, + BigInt::from_biguint(Minus, (BigUint::one() << 30) - (BigUint::one() << 10)) + ); + + x = BigInt::from_biguint(Minus, BigUint::one() << 200); + x.set_bit(40, true); + assert_eq!( + x, + BigInt::from_biguint(Minus, (BigUint::one() << 200) - (BigUint::one() << 40)) + ); + + x = BigInt::from_biguint(Minus, (BigUint::one() << 200) | (BigUint::one() << 100)); + x.set_bit(100, false); + assert_eq!( + x, + BigInt::from_biguint(Minus, (BigUint::one() << 200) | (BigUint::one() << 101)) + ); + + x = BigInt::from_biguint(Minus, (BigUint::one() << 63) | (BigUint::one() << 62)); + x.set_bit(62, false); + assert_eq!(x, BigInt::from_biguint(Minus, BigUint::one() << 64)); + + x = BigInt::from_biguint(Minus, (BigUint::one() << 200) - BigUint::one()); + x.set_bit(0, false); + assert_eq!(x, BigInt::from_biguint(Minus, BigUint::one() << 200)); } diff --git a/src/rust/vendor/num-bigint/tests/bigint_scalar.rs b/src/rust/vendor/num-bigint/tests/bigint_scalar.rs index 485f2c5b1..2a19fafb3 100644 --- a/src/rust/vendor/num-bigint/tests/bigint_scalar.rs +++ b/src/rust/vendor/num-bigint/tests/bigint_scalar.rs @@ -1,8 +1,9 @@ use num_bigint::BigInt; use num_bigint::Sign::Plus; -use num_traits::{Signed, ToPrimitive, Zero}; +use num_traits::{One, Signed, ToPrimitive, Zero}; use std::ops::Neg; +use std::panic::catch_unwind; mod consts; use crate::consts::*; @@ -146,3 +147,11 @@ fn test_scalar_div_rem() { } } } + +#[test] +fn test_scalar_div_rem_zero() { + catch_unwind(|| BigInt::zero() / 0u32).unwrap_err(); + catch_unwind(|| BigInt::zero() % 0u32).unwrap_err(); + catch_unwind(|| BigInt::one() / 0u32).unwrap_err(); + catch_unwind(|| BigInt::one() % 0u32).unwrap_err(); +} diff --git a/src/rust/vendor/num-bigint/tests/biguint.rs b/src/rust/vendor/num-bigint/tests/biguint.rs index 14c33daac..821b754a4 100644 --- a/src/rust/vendor/num-bigint/tests/biguint.rs +++ b/src/rust/vendor/num-bigint/tests/biguint.rs @@ -1008,12 +1008,26 @@ fn test_checked_mul() { #[test] fn test_mul_overflow() { // Test for issue #187 - overflow due to mac3 incorrectly sizing temporary - let s = "531137992816767098689588206552468627329593117727031923199444138200403559860852242739162502232636710047537552105951370000796528760829212940754539968588340162273730474622005920097370111"; + let s = "5311379928167670986895882065524686273295931177270319231994441382\ + 0040355986085224273916250223263671004753755210595137000079652876\ + 0829212940754539968588340162273730474622005920097370111"; let a: BigUint = s.parse().unwrap(); let b = a.clone(); let _ = a.checked_mul(&b); } +#[test] +fn test_mul_overflow_2() { + // Try a bunch of sizes that are right on the edge of multiplication length + // overflow, where (x * x).data.len() == 2 * x.data.len() + 1. + for i in 1u8..20 { + let bits = 1u32 << i; + let x = (BigUint::one() << bits) - 1u32; + let x2 = (BigUint::one() << (2 * bits)) - &x - &x - 1u32; + assert_eq!(&x * &x, x2); + } +} + #[test] fn test_checked_div() { for elm in MUL_TRIPLES.iter() { @@ -1551,6 +1565,8 @@ fn test_from_and_to_radix() { } assert!(BigUint::from_radix_le(&[10, 100, 10], 50).is_none()); + assert_eq!(BigUint::from_radix_le(&[], 2), Some(BigUint::zero())); + assert_eq!(BigUint::from_radix_be(&[], 2), Some(BigUint::zero())); } #[test] @@ -1598,6 +1614,16 @@ fn test_all_str_radix() { } } +#[test] +fn test_big_str() { + for n in 2..=20_u32 { + let x: BigUint = BigUint::from(n).pow(10_000_u32); + let s = x.to_string(); + let y: BigUint = s.parse().unwrap(); + assert_eq!(x, y); + } +} + #[test] fn test_lower_hex() { let a = BigUint::parse_bytes(b"A", 16).unwrap(); @@ -1774,4 +1800,65 @@ fn test_pow() { check!(u64); check!(u128); check!(usize); + + let pow_1e10000 = BigUint::from(10u32).pow(10_000_u32); + let manual_1e10000 = repeat(10u32).take(10_000).product::(); + assert!(manual_1e10000 == pow_1e10000); +} + +#[test] +fn test_trailing_zeros() { + assert!(BigUint::from(0u8).trailing_zeros().is_none()); + assert_eq!(BigUint::from(1u8).trailing_zeros().unwrap(), 0); + assert_eq!(BigUint::from(2u8).trailing_zeros().unwrap(), 1); + let x: BigUint = BigUint::one() << 128; + assert_eq!(x.trailing_zeros().unwrap(), 128); +} + +#[test] +fn test_trailing_ones() { + assert_eq!(BigUint::from(0u8).trailing_ones(), 0); + assert_eq!(BigUint::from(1u8).trailing_ones(), 1); + assert_eq!(BigUint::from(2u8).trailing_ones(), 0); + assert_eq!(BigUint::from(3u8).trailing_ones(), 2); + let x: BigUint = (BigUint::from(3u8) << 128) | BigUint::from(3u8); + assert_eq!(x.trailing_ones(), 2); + let x: BigUint = (BigUint::one() << 128) - BigUint::one(); + assert_eq!(x.trailing_ones(), 128); +} + +#[test] +fn test_count_ones() { + assert_eq!(BigUint::from(0u8).count_ones(), 0); + assert_eq!(BigUint::from(1u8).count_ones(), 1); + assert_eq!(BigUint::from(2u8).count_ones(), 1); + assert_eq!(BigUint::from(3u8).count_ones(), 2); + let x: BigUint = (BigUint::from(3u8) << 128) | BigUint::from(3u8); + assert_eq!(x.count_ones(), 4); +} + +#[test] +fn test_bit() { + assert!(!BigUint::from(0u8).bit(0)); + assert!(!BigUint::from(0u8).bit(100)); + assert!(!BigUint::from(42u8).bit(4)); + assert!(BigUint::from(42u8).bit(5)); + let x: BigUint = (BigUint::from(3u8) << 128) | BigUint::from(3u8); + assert!(x.bit(129)); + assert!(!x.bit(130)); +} + +#[test] +fn test_set_bit() { + let mut x = BigUint::from(3u8); + x.set_bit(128, true); + x.set_bit(129, true); + assert_eq!(x, (BigUint::from(3u8) << 128) | BigUint::from(3u8)); + x.set_bit(0, false); + x.set_bit(128, false); + x.set_bit(130, false); + assert_eq!(x, (BigUint::from(2u8) << 128) | BigUint::from(2u8)); + x.set_bit(129, false); + x.set_bit(1, false); + assert_eq!(x, BigUint::zero()); } diff --git a/src/rust/vendor/num-bigint/tests/biguint_scalar.rs b/src/rust/vendor/num-bigint/tests/biguint_scalar.rs index b6eadd9e1..7c34f7ef7 100644 --- a/src/rust/vendor/num-bigint/tests/biguint_scalar.rs +++ b/src/rust/vendor/num-bigint/tests/biguint_scalar.rs @@ -1,5 +1,7 @@ use num_bigint::BigUint; -use num_traits::{ToPrimitive, Zero}; +use num_traits::{One, ToPrimitive, Zero}; + +use std::panic::catch_unwind; mod consts; use crate::consts::*; @@ -111,3 +113,11 @@ fn test_scalar_div_rem() { } } } + +#[test] +fn test_scalar_div_rem_zero() { + catch_unwind(|| BigUint::zero() / 0u32).unwrap_err(); + catch_unwind(|| BigUint::zero() % 0u32).unwrap_err(); + catch_unwind(|| BigUint::one() / 0u32).unwrap_err(); + catch_unwind(|| BigUint::one() % 0u32).unwrap_err(); +} diff --git a/src/rust/vendor/num-bigint/tests/fuzzed.rs b/src/rust/vendor/num-bigint/tests/fuzzed.rs new file mode 100644 index 000000000..7ff564191 --- /dev/null +++ b/src/rust/vendor/num-bigint/tests/fuzzed.rs @@ -0,0 +1,185 @@ +//! Check buggy inputs that were found by fuzzing + +use num_bigint::BigUint; +use num_traits::Num; + +#[test] +fn fuzzed_mul_1() { + let hex1 = "\ + cd6839ee857cf791a40494c2e522846eefbca9eca9912fdc1feed4561dbde75c75f1ddca2325ebb1\ + b9cd6eae07308578e58e57f4ddd7dc239b4fd347b883e37d87232a8e5d5a8690c8dba69c97fe8ac4\ + 58add18be7e460e03c9d1ae8223db53d20681a4027ffc17d1e43b764791c4db5ff7add849da7e378\ + ac8d9be0e8b517c490da3c0f944b6a52a0c5dc5217c71da8eec35d2c3110d8b041d2b52f3e2a8904\ + abcaaca517a8f2ef6cd26ceadd39a1cf9f770bc08f55f5a230cd81961348bb18534245430699de77\ + d93b805153cffd05dfd0f2cfc2332888cec9c5abf3ece9b4d7886ad94c784bf74fce12853b2a9a75\ + b62a845151a703446cc20300eafe7332330e992ae88817cd6ccef8877b66a7252300a4664d7074da\ + 181cd9fd502ea1cd71c0b02db3c009fe970a7d226382cdba5b5576c5c0341694681c7adc4ca2d059\ + d9a6b300957a2235a4eb6689b71d34dcc4037b520eabd2c8b66604bb662fe2bcf533ba8d242dbc91\ + f04c1795b9f0fee800d197d8c6e998248b15855a9602b76cb3f94b148d8f71f7d6225b79d63a8e20\ + 8ec8f0fa56a1c381b6c09bad9886056aec17fc92b9bb0f8625fd3444e40cccc2ede768ddb23c66ad\ + 59a680a26a26d519d02e4d46ce93cce9e9dd86702bdd376abae0959a0e8e418aa507a63fafb8f422\ + 83b03dc26f371c5e261a8f90f3ac9e2a6bcc7f0a39c3f73043b5aa5a950d4e945e9f68b2c2e593e3\ + b995be174714c1967b71f579043f89bfce37437af9388828a3ba0465c88954110cae6d38b638e094\ + 13c15c9faddd6fb63623fd50e06d00c4d5954e787158b3e4eea7e9fae8b189fa8a204b23ac2f7bbc\ + b601189c0df2075977c2424336024ba3594172bea87f0f92beb20276ce8510c8ef2a4cd5ede87e7e\ + 38b3fa49d66fbcd322be686a349c24919f4000000000000000000000000000000000000000000000\ + 000000000000000000000000000000000"; + let hex2 = "\ + 40000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000007"; + + // Result produced independently in Python + let hex_result = "\ + 335a0e7ba15f3de469012530b948a11bbbef2a7b2a644bf707fbb515876f79d71d7c777288c97aec\ + 6e735bab81cc215e396395fd3775f708e6d3f4d1ee20f8df61c8caa39756a1a43236e9a725ffa2b1\ + 162b7462f9f918380f2746ba088f6d4f481a069009fff05f4790edd91e47136d7fdeb7612769f8de\ + 2b2366f83a2d45f124368f03e512da94a831771485f1c76a3bb0d74b0c44362c1074ad4bcf8aa241\ + 2af2ab2945ea3cbbdb349b3ab74e6873e7ddc2f023d57d688c33606584d22ec614d09150c1a6779d\ + f64ee01454f3ff4177f43cb3f08cca2233b2716afcfb3a6d35e21ab6531e12fdd3f384a14ecaa69d\ + 6d8aa1145469c0d11b3080c03abf9ccc8cc3a64aba2205f35b33be21ded9a9c948c02919935c1d36\ + 8607367f540ba8735c702c0b6cf0027fa5c29f4898e0b36e96d55db1700d05a51a071eb71328b416\ + 7669acc0255e888d693ad9a26dc74d373100ded483aaf4b22d99812ed98bf8af3d4ceea3490b6f24\ + 7c1305e56e7c3fba003465f631ba660922c56156a580addb2cfe52c52363dc7df58896de758ea388\ + 23b23c3e95a870e06db026eb6621815abb05ff24ae6ec3e1897f4d1139033330bb79da376c8f19ab\ + 5669a0289a89b546740b9351b3a4f33a7a77619c0af74ddaaeb8256683a39062a941e98febee3d08\ + a0ec0f709bcdc7178986a3e43ceb278a9af31fc28e70fdcc10ed6a96a54353a517a7da2cb0b964f8\ + ee656f85d1c530659edc7d5e410fe26ff38dd0debe4e220a28ee811972225504432b9b4e2d8e3825\ + 04f05727eb775bed8d88ff54381b40313565539e1c562cf93ba9fa7eba2c627ea28812c8eb0bdeef\ + 2d804627037c81d65df09090cd8092e8d6505cafaa1fc3e4afac809db3a144323bca93358117f935\ + 13d3695771180f461cf38bb995b531c9e072f84f04df87ce5ad0315387399d1086f60971dc149e06\ + c23253a64e46e467b210e704f93f2ec6f60b9b386eb1f629e48d79adf57e018e4827f5cb5e6cc0ba\ + d3573ea621a84bbc58efaff4abe2d8b7c117fe4a6bd3da03bf4fc61ff9fc5c0ea04f97384cb7df43\ + 265cf3a65ff5f7a46d0e0fe8426569063ea671cf9e87578c355775ecd1ccc2f44ab329bf20b28ab8\ + 83a59ea48bf9c0fa6c0c936cad5c415243eb59b76f559e8b1a86fd1daa46cfe4d52e351546f0a082\ + 394aafeb291eb6a3ae4f661bbda78467b3ab7a63f1e4baebf1174a13c32ea281a49e2a3937fb299e\ + 393b9116def94e15066cf5265f6566302c5bb8a69df9a8cbb45fce9203f5047ecc1e1331f6a8c9f5\ + ed31466c9e1c44d13fea4045f621496bf0b893a0187f563f68416c9e0ed8c75c061873b274f38ee5\ + 041656ef77826fcdc401cc72095c185f3e66b2c37cfcca211fcb4f332ab46a19dbfd4027fd9214a5\ + 181596f85805bb26ed706328ffcd96a57a1a1303f8ebd10d8fdeec1dc6daf08054db99e2e3e77e96\ + d85e6c588bff4441bf2baa25ec74a7e803141d6cab09ec6de23c5999548153de0fdfa6cebd738d84\ + 70e70fd3b4b1441cefa60a9a65650ead11330c83eb1c24173665e3caca83358bbdce0eacf199d1b0\ + 510a81c6930ab9ecf6a9b85328f2977947945bc251d9f7a87a135d260e965bdce354470b3a131832\ + a2f1914b1d601db64f1dbcc43ea382d85cd08bb91c7a161ec87bc14c7758c4fc8cfb8e240c8a4988\ + 5dc10e0dfb7afbed3622fb0561d715254b196ceb42869765dc5cdac5d9c6e20df9b54c6228fa07ac\ + 44619e3372464fcfd67a10117770ca23369b796d0336de113fa5a3757e8a2819d9815b75738cebd8\ + 04dd0e29c5f334dae77044fffb5ac000000000000000000000000000000000000000000000000000\ + 000000000000000000000000000"; + + let bn1 = &BigUint::from_str_radix(hex1, 16).unwrap(); + let bn2 = &BigUint::from_str_radix(hex2, 16).unwrap(); + let result = BigUint::from_str_radix(hex_result, 16).unwrap(); + + assert_eq!(bn1 * bn2, result); + assert_eq!(bn2 * bn1, result); +} + +#[test] +fn fuzzed_mul_2() { + let hex_a = "\ + 812cff04ff812cff04ff8180ff80ffff11ff80ff2cff04ff812cff04ff812cff04ff81232cff047d\ + ff04ff812cff04ff812cff04ff812cff047f812cff04ff8180ff2cff04ff04ff8180ff2cff04ff04\ + ff812cbf04ff8180ff2cff04ff812cff0401010000000000000000ffff1a80ffc006c70084ffff80\ + ffc0064006000084ffff72ffc020ffffffffffff06d709000000dbffffffc799999999b999999999\ + 99999999000084ffff72ffc02006e1ffffffc70900ffffff00f312ff80ebffffff6f505f6c2e6712\ + 108970ffff5f6c6f6727020000000000007400000000000000000000a50000000000000000000000\ + 000000000000000000000000ff812cff04ff812cff2c04ff812cff8180ff2cff04ff04ff818b8b8b\ + 8b8b8b8b8b8b8b8b8b8b8b8b8b06c70084ffff80ffc006c700847fff80ffc006c700ffff12c70084\ + ffff80ffc0060000000000000056ff00c789bfff80ffc006c70084ffff80ffc006c700ffff840100\ + 00000000001289ffc08b8b8b8b8b8b8b2c"; + let hex_b = "\ + 7ed300fb007ed300fb007e7f00db00fb007ed3007ed300fb007edcd300fb8200fb007ed300fb007e\ + d300fb007ed300fb007ed300fbfeffffffffffffa8fb007e7f00d300fb00fb007ed340fb007e7f00\ + 00fb007ed300fb007ed300fb007e7f00d300fb00fb007e7f00d300fb007efb007e7f00d300fb007e\ + d300fb007e7f0097d300fb00bf007ed300fb007ed300fb00fb00fb00fbffffffffffffffffffff00\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 000000000000df9b3900ff908fa08d9e968c9a0000e7fffb7fff0000003fd9004c90d8f600de7f00\ + 3fdf9b3900ff908fa08d9e968cf9b9ff0000ed38ff7b00007f003ff9ffffffffffffffa900ff3876\ + 000078003ff938ff7b00007f003ff938ff00007bfeffffffffffffed76003f74747474747474d300\ + fb00fb007e7f00d300fb007efb007e7f00d3003e7f007ed300fb007ed300fb007e7f00d300fb017e\ + d300fb007ed300fb007edcd300fb8200fb007e0000e580"; + let hex_c = "\ + 7b00387ffff938ff7b80007f003ff9b9ff00fdec38ff7b00007f003ff9ffffffffffffffa900ff38\ + 76000078003ff938ff7b00007f003ff938ff00007bfeffffffffffffed76003f74747474747474d3\ + 00fb00fb007e7f00d300fb007efb007e7f00d3003e7f007ed300fb007ed300fb007e7f00d300fb01\ + 7ed300fb007ed300fb007edcd300fb8200fb007e000000ee7f003f0000007b00387ffff938ff7b80\ + 007f003ff9b9ff00fdec38ff7b00007f003ff9ffffffffffffffa900ff3876000078003ff938ff7b\ + 00007f003ff938ff00007bfeffffffffffffed76003f74747474747474d300fb00fb007e7f00d300\ + fb007efb007e7f00d3003e7f007ed300fb007ed300fb007e7f00d300fb017ed300fb007ed300fb00\ + 7edcd300fb8200fb007e000000ee7f003f000000000000000000000000000000002a000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000000000000000000000000\ + 0000000000000000000000df9b3900ff908fa08d9e968c9a0000e7fffb7fff0000003fd9004c90d8\ + f600de7f003fdf9b3900ff908fa08d9e968c9a0000e7fffa7fff0000004005004c90d8f600de908f\ + dcd300fb8200fb007e0000e57f003ff938ff7b00007f003d7ed300fb007ed300fb007ed300fb007e\ + fa00fb007ed300fbf9ffffffffffffffa900ff387600007f003ff938ff7b00007f003ff938fd0000\ + 7bfeffffffffffffed76003f74747474747474d300fc"; + + // Result produced independently in Python + let hex_result = "\ + 1ebf6415da7ac71a689cd450727b7a361402a1626e0b6cd057e0e2a77d4cb722c1b7d0cbd73a7c07\ + d756813fe97d73d5905c4a26404c7162769ba2dbc1e2742855a1db803e2d2c2fddc77c0598cc70fe\ + 066fd4b81cae3e23c55b4795de63acacd1343cf5ad5e715e6919d140c01bab1af1a737ebbf8a7775\ + 7602acd611f555ee2d5be56cc14b97c248009cd77490a3dfd6762bae25459a544e369eb4b0cc952a\ + 8e6a551ff35a4a7a6e5f5b0b72495c4baadf3a26b9d5d97402ad60fa2324e93adc96ca159b62d147\ + 5695f26ff27da100a76e2d273420572e61b4dfbd97e826d9d946f85b87434523f6aa7ce43c443285\ + 33f5b5adf32574167b1e9ea3bf6254d6afacf865894907de196285169cfcc1c0fcf438873d13f7e8\ + 654acc27c1abb00bec2729e34c994ff2152f60406f75db3ab616541795d9db8ca0b381148de7875f\ + e7a8191407abc390718003698ca28498948caf1dbc3f02593dd85fa929ebae86cfe783d7be473e98\ + 0060d9ec60843661cb4cb9b8ddb24bb710f93700b22530501b5ea26c5c94c7370fe0ccbafe0ce7e4\ + cd4f071d0cf0ac151c85a5b132ecaa75793abfb4a6ee33fddd2aa2f5cf2a8eb19c75322792c0d8dc\ + 1efb2dcd8ae2b49dd57b84898f531c7f745464f637716151831db56b3e293f587dc95a5e12edfe6b\ + 8458033dddf3556da55bef55ba3c3769def0c0f0c86786aca8313dc0ce09118760721eb545d69b46\ + cdb89d377f2c80e67b572da0f75760c2849288a8457c18c6f0b58244b7f95a7567ce23756f1fe359\ + 64f7e84fbe28b188157519dd99b8798b076e21984d15c37f41da1309e0fbc539e8b9b09fed36a908\ + 28c94f72e7b755c187e58db6bfef0c02309086626ad0fe2efd2ff1467b3de11e057687865f4f85e7\ + 0a39bcbc4674dcaded9b04562afe08eb92fbd96ea4a99aa4f9347a075d4421f070ce3a33225f5af1\ + 9c27ec5d1720e659ca7fff9686f46b01d76d7de64c738671aaec57ee5582ef7956206fb37c6a36f8\ + 8f226ce2124a7f9894a0e9a7aa02001746e6def35699d7adc84a7dcf513ff3da20fd849950f41a5d\ + bb02c91666697156d69ebbe2ef26732b6595d1b6d014a60006d2d3c7055ff9b531779195b8dcd7d9\ + 426e776cbc9041735384568ba4adbf7eeea7e0e6cbb47b70335a7ed12a68904eecd334921e4ae6d9\ + c983af20d73215c39573963f03bc87082450cc1c70250e1e8eaa318acaf044a072891fc60324d134\ + 6c0a1d02cceb4d4806e536d6017bf6bc125c41694ded38766fea51bfbf7a008ca0b3eb1168766486\ + 8aa8469b3e6787a5d5bad6cd67c24005a5cbaa10b63d1b4d05ac42a8b31263052a1260b5900be628\ + 4dcab4eb0cf5cda815412ced7bd78f87c00ac3581f41a04352a4a186805a5c9e37b14561a5fc97d2\ + 52ca4654fe3d82f42080c21483789cc4b4cbb568f79844f7a317aa2a6555774da26c6f027d3cb0ee\ + 9276c6dc4f285fc3b4b9a3cd51c8815cebf110e73c80a9b842cc3b7c80af13f702662b10e868eb61\ + 947000b390cd2f3a0899f6f1bab86acf767062f5526507790645ae13b9701ba96b3f873047c9d3b8\ + 5e8a5d904a01fbfe10e63495b6021e7cc082aa66679e4d92b3e4e2d62490b44f7e250584cedff0e7\ + 072a870ddaa9687a1eae11afc874d83065fb98dbc3cfd90f39517ff3015c71a8c0ab36a6483c7b87\ + f41b2c832fa9428fe95ffba4e49cc553d9e2d33a540958da51588e5120fef6497bfaa96a4dcfc024\ + 8170c57f78e9ab9546efbbaf8e9ad6a993493577edd3d29ce8fd9a2e9eb4363b5b472a4ecb2065eb\ + 38f876a841af1f227a703248955c8978329dffcd8e065d8da4d42504796ff7abc62832ed86c4f8d0\ + 0f55cd567fb9d42524be57ebdacef730c3f94c0372f86fa1b0114f8620f553e4329b2a586fcfeedc\ + af47934909090e14a1f1204e6f1681fb2df05356381e6340f4feaf0787e06218b0b0d8df51acb0bc\ + f98546f33273adf260da959d6fc4a04872122af6508d124abb963c14c30e7c07fee368324921fe33\ + 9ae89490c5d6cdae0c356bb6921de95ea13b54e23800"; + + let a = &BigUint::from_str_radix(hex_a, 16).unwrap(); + let b = &BigUint::from_str_radix(hex_b, 16).unwrap(); + let c = &BigUint::from_str_radix(hex_c, 16).unwrap(); + let result = BigUint::from_str_radix(hex_result, 16).unwrap(); + + assert_eq!(a * b * c, result); + assert_eq!(a * c * b, result); + assert_eq!(b * a * c, result); + assert_eq!(b * c * a, result); + assert_eq!(c * a * b, result); + assert_eq!(c * b * a, result); +}