From 10c4bd0c8aa0e04bd6e6a75147a62084dbb1e43f Mon Sep 17 00:00:00 2001 From: beerosagos Date: Wed, 11 Dec 2024 11:27:01 +0100 Subject: [PATCH] wip: shamir secret sharing - communication works, shamir seems to work too. to be tested --- messages/hww.proto | 4 + messages/shamir.proto | 21 + py/bitbox02/Makefile | 2 +- py/bitbox02/bitbox02/bitbox02/bitbox02.py | 12 + .../communication/generated/hww_pb2.py | 19 +- .../communication/generated/hww_pb2.pyi | 15 +- .../communication/generated/shamir_pb2.py | 27 + .../communication/generated/shamir_pb2.pyi | 20 + py/send_message.py | 8 + src/keystore.c | 21 + src/keystore.h | 11 + src/rust/Cargo.lock | 82 + src/rust/bitbox02-rust/Cargo.toml | 4 + src/rust/bitbox02-rust/src/hww/api.rs | 3 + .../bitbox02-rust/src/hww/api/show_shamir.rs | 90 + .../bitbox02-rust/src/shiftcrypto.bitbox02.rs | 177 +- src/rust/bitbox02-sys/build.rs | 1 + src/rust/bitbox02/src/keystore.rs | 22 + src/rust/vendor/ahash/.cargo-checksum.json | 1 + src/rust/vendor/ahash/Cargo.toml | 109 + src/rust/vendor/ahash/LICENSE-APACHE | 201 + src/rust/vendor/ahash/LICENSE-MIT | 25 + src/rust/vendor/ahash/README.md | 245 + src/rust/vendor/ahash/rustfmt.toml | 1 + .../smhasher/0001-Add-support-for-aHash.patch | 135 + .../smhasher/0002-Add-support-for-aHash.patch | 269 + .../vendor/ahash/smhasher/ahashOutput.txt | 1516 +++ .../vendor/ahash/smhasher/clone_smhasher.sh | 1 + .../vendor/ahash/smhasher/fallbackOutput.txt | 1467 +++ src/rust/vendor/ahash/src/aes_hash.rs | 292 + src/rust/vendor/ahash/src/convert.rs | 172 + src/rust/vendor/ahash/src/fallback_hash.rs | 223 + src/rust/vendor/ahash/src/hash_map.rs | 177 + .../vendor/ahash/src/hash_quality_test.rs | 451 + src/rust/vendor/ahash/src/hash_set.rs | 267 + src/rust/vendor/ahash/src/lib.rs | 203 + src/rust/vendor/ahash/src/operations.rs | 277 + src/rust/vendor/ahash/src/random_state.rs | 153 + src/rust/vendor/ahash/src/specialize.rs | 162 + src/rust/vendor/ahash/tests/bench.rs | 224 + src/rust/vendor/ahash/tests/map_tests.rs | 204 + src/rust/vendor/ahash/tests/nopanic.rs | 54 + .../vendor/byteorder/.cargo-checksum.json | 1 + src/rust/vendor/byteorder/CHANGELOG.md | 143 + src/rust/vendor/byteorder/COPYING | 3 + src/rust/vendor/byteorder/Cargo.toml | 54 + src/rust/vendor/byteorder/LICENSE-MIT | 21 + src/rust/vendor/byteorder/README.md | 77 + src/rust/vendor/byteorder/UNLICENSE | 24 + src/rust/vendor/byteorder/benches/bench.rs | 326 + src/rust/vendor/byteorder/rustfmt.toml | 2 + src/rust/vendor/byteorder/src/io.rs | 1592 ++++ src/rust/vendor/byteorder/src/lib.rs | 3975 ++++++++ .../hashbrown-0.9.1/.cargo-checksum.json | 1 + src/rust/vendor/hashbrown-0.9.1/CHANGELOG.md | 294 + src/rust/vendor/hashbrown-0.9.1/Cargo.toml | 80 + .../vendor/hashbrown-0.9.1/LICENSE-APACHE | 201 + src/rust/vendor/hashbrown-0.9.1/LICENSE-MIT | 25 + src/rust/vendor/hashbrown-0.9.1/README.md | 126 + .../vendor/hashbrown-0.9.1/benches/bench.rs | 260 + src/rust/vendor/hashbrown-0.9.1/clippy.toml | 1 + .../src/external_trait_impls/mod.rs | 4 + .../src/external_trait_impls/rayon/helpers.rs | 26 + .../src/external_trait_impls/rayon/map.rs | 666 ++ .../src/external_trait_impls/rayon/mod.rs | 4 + .../src/external_trait_impls/rayon/raw.rs | 199 + .../src/external_trait_impls/rayon/set.rs | 646 ++ .../src/external_trait_impls/serde.rs | 200 + src/rust/vendor/hashbrown-0.9.1/src/lib.rs | 112 + src/rust/vendor/hashbrown-0.9.1/src/macros.rs | 69 + src/rust/vendor/hashbrown-0.9.1/src/map.rs | 4524 +++++++++ .../vendor/hashbrown-0.9.1/src/raw/bitmask.rs | 122 + .../vendor/hashbrown-0.9.1/src/raw/generic.rs | 151 + .../vendor/hashbrown-0.9.1/src/raw/mod.rs | 1924 ++++ .../vendor/hashbrown-0.9.1/src/raw/sse2.rs | 144 + .../vendor/hashbrown-0.9.1/src/rustc_entry.rs | 618 ++ .../vendor/hashbrown-0.9.1/src/scopeguard.rs | 49 + src/rust/vendor/hashbrown-0.9.1/src/set.rs | 2119 +++++ .../vendor/hashbrown-0.9.1/tests/hasher.rs | 65 + .../vendor/hashbrown-0.9.1/tests/rayon.rs | 533 ++ .../vendor/hashbrown-0.9.1/tests/serde.rs | 65 + src/rust/vendor/hashbrown-0.9.1/tests/set.rs | 30 + .../vendor/ppv-lite86/.cargo-checksum.json | 1 + src/rust/vendor/ppv-lite86/CHANGELOG.md | 10 + src/rust/vendor/ppv-lite86/Cargo.toml | 55 + src/rust/vendor/ppv-lite86/LICENSE-APACHE | 201 + src/rust/vendor/ppv-lite86/LICENSE-MIT | 25 + src/rust/vendor/ppv-lite86/src/generic.rs | 861 ++ src/rust/vendor/ppv-lite86/src/lib.rs | 42 + src/rust/vendor/ppv-lite86/src/soft.rs | 475 + src/rust/vendor/ppv-lite86/src/types.rs | 298 + src/rust/vendor/ppv-lite86/src/x86_64/mod.rs | 439 + src/rust/vendor/ppv-lite86/src/x86_64/sse2.rs | 1707 ++++ src/rust/vendor/rand/.cargo-checksum.json | 1 + src/rust/vendor/rand/CHANGELOG.md | 699 ++ src/rust/vendor/rand/COPYRIGHT | 12 + src/rust/vendor/rand/Cargo.toml | 75 + src/rust/vendor/rand/LICENSE-APACHE | 176 + src/rust/vendor/rand/LICENSE-MIT | 26 + src/rust/vendor/rand/README.md | 158 + .../rand/src/distributions/bernoulli.rs | 219 + .../rand/src/distributions/distribution.rs | 272 + .../vendor/rand/src/distributions/float.rs | 312 + .../vendor/rand/src/distributions/integer.rs | 274 + src/rust/vendor/rand/src/distributions/mod.rs | 218 + .../vendor/rand/src/distributions/other.rs | 365 + .../vendor/rand/src/distributions/slice.rs | 117 + .../vendor/rand/src/distributions/uniform.rs | 1658 ++++ .../vendor/rand/src/distributions/utils.rs | 429 + .../vendor/rand/src/distributions/weighted.rs | 47 + .../rand/src/distributions/weighted_index.rs | 458 + src/rust/vendor/rand/src/lib.rs | 214 + src/rust/vendor/rand/src/prelude.rs | 34 + src/rust/vendor/rand/src/rng.rs | 600 ++ src/rust/vendor/rand/src/rngs/adapter/mod.rs | 16 + src/rust/vendor/rand/src/rngs/adapter/read.rs | 150 + .../vendor/rand/src/rngs/adapter/reseeding.rs | 386 + src/rust/vendor/rand/src/rngs/mock.rs | 87 + src/rust/vendor/rand/src/rngs/mod.rs | 119 + src/rust/vendor/rand/src/rngs/small.rs | 117 + src/rust/vendor/rand/src/rngs/std.rs | 98 + src/rust/vendor/rand/src/rngs/thread.rs | 143 + .../rand/src/rngs/xoshiro128plusplus.rs | 118 + .../rand/src/rngs/xoshiro256plusplus.rs | 122 + src/rust/vendor/rand/src/seq/index.rs | 678 ++ src/rust/vendor/rand/src/seq/mod.rs | 1356 +++ .../vendor/rand_chacha/.cargo-checksum.json | 1 + src/rust/vendor/rand_chacha/CHANGELOG.md | 35 + src/rust/vendor/rand_chacha/COPYRIGHT | 12 + src/rust/vendor/rand_chacha/Cargo.toml | 45 + src/rust/vendor/rand_chacha/LICENSE-APACHE | 201 + src/rust/vendor/rand_chacha/LICENSE-MIT | 26 + src/rust/vendor/rand_chacha/README.md | 48 + src/rust/vendor/rand_chacha/src/chacha.rs | 632 ++ src/rust/vendor/rand_chacha/src/guts.rs | 273 + src/rust/vendor/rand_chacha/src/lib.rs | 33 + src/rust/vendor/sharks/.cargo-checksum.json | 1 + src/rust/vendor/sharks/CHANGELOG.md | 71 + src/rust/vendor/sharks/COPYRIGHT | 12 + src/rust/vendor/sharks/Cargo.toml | 60 + src/rust/vendor/sharks/LICENSE-APACHE | 201 + src/rust/vendor/sharks/LICENSE-MIT | 21 + src/rust/vendor/sharks/README.md | 77 + src/rust/vendor/sharks/benches/benchmarks.rs | 40 + src/rust/vendor/sharks/codecov.yml | 20 + src/rust/vendor/sharks/src/field.rs | 228 + src/rust/vendor/sharks/src/lib.rs | 218 + src/rust/vendor/sharks/src/math.rs | 98 + src/rust/vendor/sharks/src/share.rs | 92 + .../zerocopy-derive/.cargo-checksum.json | 1 + src/rust/vendor/zerocopy-derive/Cargo.toml | 43 + .../vendor/zerocopy-derive/LICENSE-APACHE | 202 + src/rust/vendor/zerocopy-derive/LICENSE-BSD | 24 + src/rust/vendor/zerocopy-derive/LICENSE-MIT | 26 + src/rust/vendor/zerocopy-derive/src/ext.rs | 53 + src/rust/vendor/zerocopy-derive/src/lib.rs | 887 ++ src/rust/vendor/zerocopy-derive/src/repr.rs | 311 + .../zerocopy-derive/tests/enum_as_bytes.rs | 101 + .../zerocopy-derive/tests/enum_from_zeroes.rs | 35 + .../tests/enum_known_layout.rs | 46 + .../zerocopy-derive/tests/enum_unaligned.rs | 47 + .../vendor/zerocopy-derive/tests/hygiene.rs | 43 + .../tests/paths_and_modules.rs | 38 + .../zerocopy-derive/tests/priv_in_pub.rs | 24 + .../zerocopy-derive/tests/struct_as_bytes.rs | 161 + .../tests/struct_from_bytes.rs | 79 + .../tests/struct_from_zeroes.rs | 77 + .../tests/struct_known_layout.rs | 65 + .../zerocopy-derive/tests/struct_unaligned.rs | 100 + .../vendor/zerocopy-derive/tests/trybuild.rs | 19 + .../tests/ui-msrv/derive_transparent.rs | 40 + .../tests/ui-msrv/derive_transparent.stderr | 71 + .../zerocopy-derive/tests/ui-msrv/enum.rs | 194 + .../zerocopy-derive/tests/ui-msrv/enum.stderr | 199 + .../ui-msrv/enum_from_bytes_u8_too_few.rs | 272 + .../ui-msrv/enum_from_bytes_u8_too_few.stderr | 11 + .../tests/ui-msrv/late_compile_pass.rs | 75 + .../tests/ui-msrv/late_compile_pass.stderr | 74 + .../tests/ui-msrv/mid_compile_pass.rs | 61 + .../tests/ui-msrv/mid_compile_pass.stderr | 104 + .../zerocopy-derive/tests/ui-msrv/struct.rs | 99 + .../tests/ui-msrv/struct.stderr | 113 + .../zerocopy-derive/tests/ui-msrv/union.rs | 73 + .../tests/ui-msrv/union.stderr | 42 + .../tests/ui-nightly/derive_transparent.rs | 40 + .../ui-nightly/derive_transparent.stderr | 111 + .../zerocopy-derive/tests/ui-nightly/enum.rs | 194 + .../tests/ui-nightly/enum.stderr | 201 + .../ui-nightly/enum_from_bytes_u8_too_few.rs | 272 + .../enum_from_bytes_u8_too_few.stderr | 11 + .../tests/ui-nightly/late_compile_pass.rs | 75 + .../tests/ui-nightly/late_compile_pass.stderr | 168 + .../tests/ui-nightly/mid_compile_pass.rs | 61 + .../tests/ui-nightly/mid_compile_pass.stderr | 104 + .../tests/ui-nightly/struct.rs | 99 + .../tests/ui-nightly/struct.stderr | 161 + .../zerocopy-derive/tests/ui-nightly/union.rs | 73 + .../tests/ui-nightly/union.stderr | 51 + .../tests/ui-stable/derive_transparent.rs | 40 + .../tests/ui-stable/derive_transparent.stderr | 111 + .../zerocopy-derive/tests/ui-stable/enum.rs | 194 + .../tests/ui-stable/enum.stderr | 201 + .../ui-stable/enum_from_bytes_u8_too_few.rs | 272 + .../enum_from_bytes_u8_too_few.stderr | 11 + .../tests/ui-stable/late_compile_pass.rs | 75 + .../tests/ui-stable/late_compile_pass.stderr | 144 + .../tests/ui-stable/mid_compile_pass.rs | 61 + .../tests/ui-stable/mid_compile_pass.stderr | 104 + .../zerocopy-derive/tests/ui-stable/struct.rs | 99 + .../tests/ui-stable/struct.stderr | 137 + .../zerocopy-derive/tests/ui-stable/union.rs | 73 + .../tests/ui-stable/union.stderr | 47 + .../zerocopy-derive/tests/union_as_bytes.rs | 75 + .../zerocopy-derive/tests/union_from_bytes.rs | 72 + .../tests/union_from_zeroes.rs | 72 + .../tests/union_known_layout.rs | 65 + .../zerocopy-derive/tests/union_unaligned.rs | 77 + src/rust/vendor/zerocopy-derive/tests/util.rs | 20 + src/rust/vendor/zerocopy/.cargo-checksum.json | 1 + src/rust/vendor/zerocopy/CONTRIBUTING.md | 215 + src/rust/vendor/zerocopy/Cargo.toml | 101 + src/rust/vendor/zerocopy/INTERNAL.md | 44 + src/rust/vendor/zerocopy/LICENSE-APACHE | 202 + src/rust/vendor/zerocopy/LICENSE-BSD | 24 + src/rust/vendor/zerocopy/LICENSE-MIT | 26 + src/rust/vendor/zerocopy/POLICIES.md | 114 + src/rust/vendor/zerocopy/README.md | 154 + src/rust/vendor/zerocopy/cargo.sh | 120 + src/rust/vendor/zerocopy/clippy.toml | 10 + src/rust/vendor/zerocopy/generate-readme.sh | 50 + src/rust/vendor/zerocopy/rustfmt.toml | 19 + src/rust/vendor/zerocopy/src/byteorder.rs | 1071 +++ src/rust/vendor/zerocopy/src/lib.rs | 8284 +++++++++++++++++ src/rust/vendor/zerocopy/src/macro_util.rs | 673 ++ src/rust/vendor/zerocopy/src/macros.rs | 416 + ...ost_monomorphization_compile_fail_tests.rs | 118 + .../src/third_party/rust/LICENSE-APACHE | 176 + .../zerocopy/src/third_party/rust/LICENSE-MIT | 23 + .../src/third_party/rust/README.fuchsia | 7 + .../zerocopy/src/third_party/rust/layout.rs | 45 + src/rust/vendor/zerocopy/src/util.rs | 810 ++ src/rust/vendor/zerocopy/src/wrappers.rs | 503 + .../zerocopy/testdata/include_value/data | 1 + src/rust/vendor/zerocopy/tests/trybuild.rs | 41 + .../ui-msrv/include_value_not_from_bytes.rs | 12 + .../include_value_not_from_bytes.stderr | 12 + .../tests/ui-msrv/include_value_wrong_size.rs | 11 + .../ui-msrv/include_value_wrong_size.stderr | 9 + .../ui-msrv/invalid-impls/invalid-impls.rs | 29 + .../invalid-impls/invalid-impls.stderr | 127 + .../zerocopy/tests/ui-msrv/max-align.rs | 99 + .../zerocopy/tests/ui-msrv/max-align.stderr | 5 + .../ui-msrv/transmute-dst-not-frombytes.rs | 18 + .../transmute-dst-not-frombytes.stderr | 12 + .../transmute-mut-alignment-increase.rs | 19 + .../transmute-mut-alignment-increase.stderr | 36 + .../tests/ui-msrv/transmute-mut-const.rs | 20 + .../tests/ui-msrv/transmute-mut-const.stderr | 41 + .../ui-msrv/transmute-mut-dst-generic.rs | 18 + .../ui-msrv/transmute-mut-dst-generic.stderr | 19 + .../transmute-mut-dst-not-a-reference.rs | 17 + .../transmute-mut-dst-not-a-reference.stderr | 39 + .../ui-msrv/transmute-mut-dst-not-asbytes.rs | 24 + .../transmute-mut-dst-not-asbytes.stderr | 12 + .../transmute-mut-dst-not-frombytes.rs | 24 + .../transmute-mut-dst-not-frombytes.stderr | 12 + .../ui-msrv/transmute-mut-dst-unsized.rs | 17 + .../ui-msrv/transmute-mut-dst-unsized.stderr | 108 + .../ui-msrv/transmute-mut-illegal-lifetime.rs | 15 + .../transmute-mut-illegal-lifetime.stderr | 9 + .../ui-msrv/transmute-mut-size-decrease.rs | 17 + .../transmute-mut-size-decrease.stderr | 36 + .../ui-msrv/transmute-mut-size-increase.rs | 17 + .../transmute-mut-size-increase.stderr | 36 + .../ui-msrv/transmute-mut-src-dst-generic.rs | 19 + .../transmute-mut-src-dst-generic.stderr | 19 + .../transmute-mut-src-dst-not-references.rs | 17 + ...ransmute-mut-src-dst-not-references.stderr | 12 + .../ui-msrv/transmute-mut-src-dst-unsized.rs | 17 + .../transmute-mut-src-dst-unsized.stderr | 237 + .../ui-msrv/transmute-mut-src-generic.rs | 18 + .../ui-msrv/transmute-mut-src-generic.stderr | 10 + .../ui-msrv/transmute-mut-src-immutable.rs | 18 + .../transmute-mut-src-immutable.stderr | 11 + .../transmute-mut-src-not-a-reference.rs | 17 + .../transmute-mut-src-not-a-reference.stderr | 12 + .../ui-msrv/transmute-mut-src-not-asbytes.rs | 24 + .../transmute-mut-src-not-asbytes.stderr | 25 + .../transmute-mut-src-not-frombytes.rs | 24 + .../transmute-mut-src-not-frombytes.stderr | 25 + .../ui-msrv/transmute-mut-src-unsized.rs | 16 + .../ui-msrv/transmute-mut-src-unsized.stderr | 198 + .../tests/ui-msrv/transmute-ptr-to-usize.rs | 20 + .../ui-msrv/transmute-ptr-to-usize.stderr | 37 + .../transmute-ref-alignment-increase.rs | 19 + .../transmute-ref-alignment-increase.stderr | 9 + .../ui-msrv/transmute-ref-dst-generic.rs | 18 + .../ui-msrv/transmute-ref-dst-generic.stderr | 19 + .../ui-msrv/transmute-ref-dst-mutable.rs | 19 + .../ui-msrv/transmute-ref-dst-mutable.stderr | 29 + .../transmute-ref-dst-not-a-reference.rs | 17 + .../transmute-ref-dst-not-a-reference.stderr | 29 + .../transmute-ref-dst-not-frombytes.rs | 18 + .../transmute-ref-dst-not-frombytes.stderr | 12 + .../ui-msrv/transmute-ref-dst-unsized.rs | 17 + .../ui-msrv/transmute-ref-dst-unsized.stderr | 94 + .../ui-msrv/transmute-ref-illegal-lifetime.rs | 15 + .../transmute-ref-illegal-lifetime.stderr | 9 + .../ui-msrv/transmute-ref-size-decrease.rs | 17 + .../transmute-ref-size-decrease.stderr | 9 + .../ui-msrv/transmute-ref-size-increase.rs | 17 + .../transmute-ref-size-increase.stderr | 9 + .../ui-msrv/transmute-ref-src-dst-generic.rs | 19 + .../transmute-ref-src-dst-generic.stderr | 19 + .../transmute-ref-src-dst-not-references.rs | 17 + ...ransmute-ref-src-dst-not-references.stderr | 42 + .../ui-msrv/transmute-ref-src-dst-unsized.rs | 17 + .../transmute-ref-src-dst-unsized.stderr | 195 + .../ui-msrv/transmute-ref-src-generic.rs | 18 + .../ui-msrv/transmute-ref-src-generic.stderr | 19 + .../transmute-ref-src-not-a-reference.rs | 17 + .../transmute-ref-src-not-a-reference.stderr | 12 + .../ui-msrv/transmute-ref-src-not-asbytes.rs | 18 + .../transmute-ref-src-not-asbytes.stderr | 25 + .../ui-msrv/transmute-ref-src-unsized.rs | 16 + .../ui-msrv/transmute-ref-src-unsized.stderr | 170 + .../tests/ui-msrv/transmute-size-decrease.rs | 19 + .../ui-msrv/transmute-size-decrease.stderr | 9 + .../tests/ui-msrv/transmute-size-increase.rs | 19 + .../ui-msrv/transmute-size-increase.stderr | 9 + .../ui-msrv/transmute-src-not-asbytes.rs | 18 + .../ui-msrv/transmute-src-not-asbytes.stderr | 25 + .../include_value_not_from_bytes.rs | 12 + .../include_value_not_from_bytes.stderr | 25 + .../ui-nightly/include_value_wrong_size.rs | 11 + .../include_value_wrong_size.stderr | 9 + .../ui-nightly/invalid-impls/invalid-impls.rs | 29 + .../invalid-impls/invalid-impls.stderr | 107 + .../zerocopy/tests/ui-nightly/max-align.rs | 99 + .../tests/ui-nightly/max-align.stderr | 5 + .../ui-nightly/transmute-dst-not-frombytes.rs | 18 + .../transmute-dst-not-frombytes.stderr | 25 + .../transmute-mut-alignment-increase.rs | 19 + .../transmute-mut-alignment-increase.stderr | 9 + .../tests/ui-nightly/transmute-mut-const.rs | 20 + .../ui-nightly/transmute-mut-const.stderr | 33 + .../ui-nightly/transmute-mut-dst-generic.rs | 18 + .../transmute-mut-dst-generic.stderr | 19 + .../transmute-mut-dst-not-a-reference.rs | 17 + .../transmute-mut-dst-not-a-reference.stderr | 39 + .../transmute-mut-dst-not-asbytes.rs | 24 + .../transmute-mut-dst-not-asbytes.stderr | 25 + .../transmute-mut-dst-not-frombytes.rs | 24 + .../transmute-mut-dst-not-frombytes.stderr | 25 + .../ui-nightly/transmute-mut-dst-unsized.rs | 17 + .../transmute-mut-dst-unsized.stderr | 86 + .../transmute-mut-illegal-lifetime.rs | 15 + .../transmute-mut-illegal-lifetime.stderr | 12 + .../ui-nightly/transmute-mut-size-decrease.rs | 17 + .../transmute-mut-size-decrease.stderr | 9 + .../ui-nightly/transmute-mut-size-increase.rs | 17 + .../transmute-mut-size-increase.stderr | 9 + .../transmute-mut-src-dst-generic.rs | 19 + .../transmute-mut-src-dst-generic.stderr | 19 + .../transmute-mut-src-dst-not-references.rs | 17 + ...ransmute-mut-src-dst-not-references.stderr | 44 + .../transmute-mut-src-dst-unsized.rs | 17 + .../transmute-mut-src-dst-unsized.stderr | 231 + .../ui-nightly/transmute-mut-src-generic.rs | 18 + .../transmute-mut-src-generic.stderr | 10 + .../ui-nightly/transmute-mut-src-immutable.rs | 18 + .../transmute-mut-src-immutable.stderr | 40 + .../transmute-mut-src-not-a-reference.rs | 17 + .../transmute-mut-src-not-a-reference.stderr | 44 + .../transmute-mut-src-not-asbytes.rs | 24 + .../transmute-mut-src-not-asbytes.stderr | 48 + .../transmute-mut-src-not-frombytes.rs | 24 + .../transmute-mut-src-not-frombytes.stderr | 48 + .../ui-nightly/transmute-mut-src-unsized.rs | 16 + .../transmute-mut-src-unsized.stderr | 158 + .../ui-nightly/transmute-ptr-to-usize.rs | 20 + .../ui-nightly/transmute-ptr-to-usize.stderr | 30 + .../transmute-ref-alignment-increase.rs | 19 + .../transmute-ref-alignment-increase.stderr | 9 + .../ui-nightly/transmute-ref-dst-generic.rs | 18 + .../transmute-ref-dst-generic.stderr | 19 + .../ui-nightly/transmute-ref-dst-mutable.rs | 19 + .../transmute-ref-dst-mutable.stderr | 29 + .../transmute-ref-dst-not-a-reference.rs | 17 + .../transmute-ref-dst-not-a-reference.stderr | 29 + .../transmute-ref-dst-not-frombytes.rs | 18 + .../transmute-ref-dst-not-frombytes.stderr | 25 + .../ui-nightly/transmute-ref-dst-unsized.rs | 17 + .../transmute-ref-dst-unsized.stderr | 69 + .../transmute-ref-illegal-lifetime.rs | 15 + .../transmute-ref-illegal-lifetime.stderr | 12 + .../ui-nightly/transmute-ref-size-decrease.rs | 17 + .../transmute-ref-size-decrease.stderr | 9 + .../ui-nightly/transmute-ref-size-increase.rs | 17 + .../transmute-ref-size-increase.stderr | 9 + .../transmute-ref-src-dst-generic.rs | 19 + .../transmute-ref-src-dst-generic.stderr | 19 + .../transmute-ref-src-dst-not-references.rs | 17 + ...ransmute-ref-src-dst-not-references.stderr | 85 + .../transmute-ref-src-dst-unsized.rs | 17 + .../transmute-ref-src-dst-unsized.stderr | 183 + .../ui-nightly/transmute-ref-src-generic.rs | 18 + .../transmute-ref-src-generic.stderr | 19 + .../transmute-ref-src-not-a-reference.rs | 17 + .../transmute-ref-src-not-a-reference.stderr | 55 + .../transmute-ref-src-not-asbytes.rs | 18 + .../transmute-ref-src-not-asbytes.stderr | 48 + .../ui-nightly/transmute-ref-src-unsized.rs | 16 + .../transmute-ref-src-unsized.stderr | 127 + .../ui-nightly/transmute-size-decrease.rs | 19 + .../ui-nightly/transmute-size-decrease.stderr | 9 + .../ui-nightly/transmute-size-increase.rs | 19 + .../ui-nightly/transmute-size-increase.stderr | 9 + .../ui-nightly/transmute-src-not-asbytes.rs | 18 + .../transmute-src-not-asbytes.stderr | 48 + .../ui-stable/include_value_not_from_bytes.rs | 12 + .../include_value_not_from_bytes.stderr | 25 + .../ui-stable/include_value_wrong_size.rs | 11 + .../ui-stable/include_value_wrong_size.stderr | 9 + .../ui-stable/invalid-impls/invalid-impls.rs | 29 + .../invalid-impls/invalid-impls.stderr | 107 + .../zerocopy/tests/ui-stable/max-align.rs | 99 + .../zerocopy/tests/ui-stable/max-align.stderr | 5 + .../ui-stable/transmute-dst-not-frombytes.rs | 18 + .../transmute-dst-not-frombytes.stderr | 25 + .../transmute-mut-alignment-increase.rs | 19 + .../transmute-mut-alignment-increase.stderr | 9 + .../tests/ui-stable/transmute-mut-const.rs | 20 + .../ui-stable/transmute-mut-const.stderr | 31 + .../ui-stable/transmute-mut-dst-generic.rs | 18 + .../transmute-mut-dst-generic.stderr | 19 + .../transmute-mut-dst-not-a-reference.rs | 17 + .../transmute-mut-dst-not-a-reference.stderr | 39 + .../transmute-mut-dst-not-asbytes.rs | 24 + .../transmute-mut-dst-not-asbytes.stderr | 25 + .../transmute-mut-dst-not-frombytes.rs | 24 + .../transmute-mut-dst-not-frombytes.stderr | 25 + .../ui-stable/transmute-mut-dst-unsized.rs | 17 + .../transmute-mut-dst-unsized.stderr | 86 + .../transmute-mut-illegal-lifetime.rs | 15 + .../transmute-mut-illegal-lifetime.stderr | 12 + .../ui-stable/transmute-mut-size-decrease.rs | 17 + .../transmute-mut-size-decrease.stderr | 9 + .../ui-stable/transmute-mut-size-increase.rs | 17 + .../transmute-mut-size-increase.stderr | 9 + .../transmute-mut-src-dst-generic.rs | 19 + .../transmute-mut-src-dst-generic.stderr | 19 + .../transmute-mut-src-dst-not-references.rs | 17 + ...ransmute-mut-src-dst-not-references.stderr | 15 + .../transmute-mut-src-dst-unsized.rs | 17 + .../transmute-mut-src-dst-unsized.stderr | 231 + .../ui-stable/transmute-mut-src-generic.rs | 18 + .../transmute-mut-src-generic.stderr | 10 + .../ui-stable/transmute-mut-src-immutable.rs | 18 + .../transmute-mut-src-immutable.stderr | 11 + .../transmute-mut-src-not-a-reference.rs | 17 + .../transmute-mut-src-not-a-reference.stderr | 15 + .../transmute-mut-src-not-asbytes.rs | 24 + .../transmute-mut-src-not-asbytes.stderr | 48 + .../transmute-mut-src-not-frombytes.rs | 24 + .../transmute-mut-src-not-frombytes.stderr | 48 + .../ui-stable/transmute-mut-src-unsized.rs | 16 + .../transmute-mut-src-unsized.stderr | 158 + .../tests/ui-stable/transmute-ptr-to-usize.rs | 20 + .../ui-stable/transmute-ptr-to-usize.stderr | 30 + .../transmute-ref-alignment-increase.rs | 19 + .../transmute-ref-alignment-increase.stderr | 9 + .../ui-stable/transmute-ref-dst-generic.rs | 18 + .../transmute-ref-dst-generic.stderr | 19 + .../ui-stable/transmute-ref-dst-mutable.rs | 19 + .../transmute-ref-dst-mutable.stderr | 29 + .../transmute-ref-dst-not-a-reference.rs | 17 + .../transmute-ref-dst-not-a-reference.stderr | 29 + .../transmute-ref-dst-not-frombytes.rs | 18 + .../transmute-ref-dst-not-frombytes.stderr | 25 + .../ui-stable/transmute-ref-dst-unsized.rs | 17 + .../transmute-ref-dst-unsized.stderr | 69 + .../transmute-ref-illegal-lifetime.rs | 15 + .../transmute-ref-illegal-lifetime.stderr | 12 + .../ui-stable/transmute-ref-size-decrease.rs | 17 + .../transmute-ref-size-decrease.stderr | 9 + .../ui-stable/transmute-ref-size-increase.rs | 17 + .../transmute-ref-size-increase.stderr | 9 + .../transmute-ref-src-dst-generic.rs | 19 + .../transmute-ref-src-dst-generic.stderr | 19 + .../transmute-ref-src-dst-not-references.rs | 17 + ...ransmute-ref-src-dst-not-references.stderr | 45 + .../transmute-ref-src-dst-unsized.rs | 17 + .../transmute-ref-src-dst-unsized.stderr | 183 + .../ui-stable/transmute-ref-src-generic.rs | 18 + .../transmute-ref-src-generic.stderr | 19 + .../transmute-ref-src-not-a-reference.rs | 17 + .../transmute-ref-src-not-a-reference.stderr | 15 + .../transmute-ref-src-not-asbytes.rs | 18 + .../transmute-ref-src-not-asbytes.stderr | 48 + .../ui-stable/transmute-ref-src-unsized.rs | 16 + .../transmute-ref-src-unsized.stderr | 127 + .../ui-stable/transmute-size-decrease.rs | 19 + .../ui-stable/transmute-size-decrease.stderr | 9 + .../ui-stable/transmute-size-increase.rs | 19 + .../ui-stable/transmute-size-increase.stderr | 9 + .../ui-stable/transmute-src-not-asbytes.rs | 18 + .../transmute-src-not-asbytes.stderr | 48 + 508 files changed, 71792 insertions(+), 156 deletions(-) create mode 100644 messages/shamir.proto create mode 100644 py/bitbox02/bitbox02/communication/generated/shamir_pb2.py create mode 100644 py/bitbox02/bitbox02/communication/generated/shamir_pb2.pyi create mode 100644 src/rust/bitbox02-rust/src/hww/api/show_shamir.rs create mode 100644 src/rust/vendor/ahash/.cargo-checksum.json create mode 100644 src/rust/vendor/ahash/Cargo.toml create mode 100644 src/rust/vendor/ahash/LICENSE-APACHE create mode 100644 src/rust/vendor/ahash/LICENSE-MIT create mode 100644 src/rust/vendor/ahash/README.md create mode 100644 src/rust/vendor/ahash/rustfmt.toml create mode 100644 src/rust/vendor/ahash/smhasher/0001-Add-support-for-aHash.patch create mode 100644 src/rust/vendor/ahash/smhasher/0002-Add-support-for-aHash.patch create mode 100644 src/rust/vendor/ahash/smhasher/ahashOutput.txt create mode 100755 src/rust/vendor/ahash/smhasher/clone_smhasher.sh create mode 100644 src/rust/vendor/ahash/smhasher/fallbackOutput.txt create mode 100644 src/rust/vendor/ahash/src/aes_hash.rs create mode 100644 src/rust/vendor/ahash/src/convert.rs create mode 100644 src/rust/vendor/ahash/src/fallback_hash.rs create mode 100644 src/rust/vendor/ahash/src/hash_map.rs create mode 100644 src/rust/vendor/ahash/src/hash_quality_test.rs create mode 100644 src/rust/vendor/ahash/src/hash_set.rs create mode 100644 src/rust/vendor/ahash/src/lib.rs create mode 100644 src/rust/vendor/ahash/src/operations.rs create mode 100644 src/rust/vendor/ahash/src/random_state.rs create mode 100644 src/rust/vendor/ahash/src/specialize.rs create mode 100644 src/rust/vendor/ahash/tests/bench.rs create mode 100644 src/rust/vendor/ahash/tests/map_tests.rs create mode 100644 src/rust/vendor/ahash/tests/nopanic.rs create mode 100644 src/rust/vendor/byteorder/.cargo-checksum.json create mode 100644 src/rust/vendor/byteorder/CHANGELOG.md create mode 100644 src/rust/vendor/byteorder/COPYING create mode 100644 src/rust/vendor/byteorder/Cargo.toml create mode 100644 src/rust/vendor/byteorder/LICENSE-MIT create mode 100644 src/rust/vendor/byteorder/README.md create mode 100644 src/rust/vendor/byteorder/UNLICENSE create mode 100644 src/rust/vendor/byteorder/benches/bench.rs create mode 100644 src/rust/vendor/byteorder/rustfmt.toml create mode 100644 src/rust/vendor/byteorder/src/io.rs create mode 100644 src/rust/vendor/byteorder/src/lib.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/.cargo-checksum.json create mode 100644 src/rust/vendor/hashbrown-0.9.1/CHANGELOG.md create mode 100644 src/rust/vendor/hashbrown-0.9.1/Cargo.toml create mode 100644 src/rust/vendor/hashbrown-0.9.1/LICENSE-APACHE create mode 100644 src/rust/vendor/hashbrown-0.9.1/LICENSE-MIT create mode 100644 src/rust/vendor/hashbrown-0.9.1/README.md create mode 100644 src/rust/vendor/hashbrown-0.9.1/benches/bench.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/clippy.toml create mode 100644 src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/mod.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/rayon/helpers.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/rayon/map.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/rayon/mod.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/rayon/raw.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/rayon/set.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/serde.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/src/lib.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/src/macros.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/src/map.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/src/raw/bitmask.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/src/raw/generic.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/src/raw/mod.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/src/raw/sse2.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/src/rustc_entry.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/src/scopeguard.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/src/set.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/tests/hasher.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/tests/rayon.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/tests/serde.rs create mode 100644 src/rust/vendor/hashbrown-0.9.1/tests/set.rs create mode 100644 src/rust/vendor/ppv-lite86/.cargo-checksum.json create mode 100644 src/rust/vendor/ppv-lite86/CHANGELOG.md create mode 100644 src/rust/vendor/ppv-lite86/Cargo.toml create mode 100644 src/rust/vendor/ppv-lite86/LICENSE-APACHE create mode 100644 src/rust/vendor/ppv-lite86/LICENSE-MIT create mode 100644 src/rust/vendor/ppv-lite86/src/generic.rs create mode 100644 src/rust/vendor/ppv-lite86/src/lib.rs create mode 100644 src/rust/vendor/ppv-lite86/src/soft.rs create mode 100644 src/rust/vendor/ppv-lite86/src/types.rs create mode 100644 src/rust/vendor/ppv-lite86/src/x86_64/mod.rs create mode 100644 src/rust/vendor/ppv-lite86/src/x86_64/sse2.rs create mode 100644 src/rust/vendor/rand/.cargo-checksum.json create mode 100644 src/rust/vendor/rand/CHANGELOG.md create mode 100644 src/rust/vendor/rand/COPYRIGHT create mode 100644 src/rust/vendor/rand/Cargo.toml create mode 100644 src/rust/vendor/rand/LICENSE-APACHE create mode 100644 src/rust/vendor/rand/LICENSE-MIT create mode 100644 src/rust/vendor/rand/README.md create mode 100644 src/rust/vendor/rand/src/distributions/bernoulli.rs create mode 100644 src/rust/vendor/rand/src/distributions/distribution.rs create mode 100644 src/rust/vendor/rand/src/distributions/float.rs create mode 100644 src/rust/vendor/rand/src/distributions/integer.rs create mode 100644 src/rust/vendor/rand/src/distributions/mod.rs create mode 100644 src/rust/vendor/rand/src/distributions/other.rs create mode 100644 src/rust/vendor/rand/src/distributions/slice.rs create mode 100644 src/rust/vendor/rand/src/distributions/uniform.rs create mode 100644 src/rust/vendor/rand/src/distributions/utils.rs create mode 100644 src/rust/vendor/rand/src/distributions/weighted.rs create mode 100644 src/rust/vendor/rand/src/distributions/weighted_index.rs create mode 100644 src/rust/vendor/rand/src/lib.rs create mode 100644 src/rust/vendor/rand/src/prelude.rs create mode 100644 src/rust/vendor/rand/src/rng.rs create mode 100644 src/rust/vendor/rand/src/rngs/adapter/mod.rs create mode 100644 src/rust/vendor/rand/src/rngs/adapter/read.rs create mode 100644 src/rust/vendor/rand/src/rngs/adapter/reseeding.rs create mode 100644 src/rust/vendor/rand/src/rngs/mock.rs create mode 100644 src/rust/vendor/rand/src/rngs/mod.rs create mode 100644 src/rust/vendor/rand/src/rngs/small.rs create mode 100644 src/rust/vendor/rand/src/rngs/std.rs create mode 100644 src/rust/vendor/rand/src/rngs/thread.rs create mode 100644 src/rust/vendor/rand/src/rngs/xoshiro128plusplus.rs create mode 100644 src/rust/vendor/rand/src/rngs/xoshiro256plusplus.rs create mode 100644 src/rust/vendor/rand/src/seq/index.rs create mode 100644 src/rust/vendor/rand/src/seq/mod.rs create mode 100644 src/rust/vendor/rand_chacha/.cargo-checksum.json create mode 100644 src/rust/vendor/rand_chacha/CHANGELOG.md create mode 100644 src/rust/vendor/rand_chacha/COPYRIGHT create mode 100644 src/rust/vendor/rand_chacha/Cargo.toml create mode 100644 src/rust/vendor/rand_chacha/LICENSE-APACHE create mode 100644 src/rust/vendor/rand_chacha/LICENSE-MIT create mode 100644 src/rust/vendor/rand_chacha/README.md create mode 100644 src/rust/vendor/rand_chacha/src/chacha.rs create mode 100644 src/rust/vendor/rand_chacha/src/guts.rs create mode 100644 src/rust/vendor/rand_chacha/src/lib.rs create mode 100644 src/rust/vendor/sharks/.cargo-checksum.json create mode 100644 src/rust/vendor/sharks/CHANGELOG.md create mode 100644 src/rust/vendor/sharks/COPYRIGHT create mode 100644 src/rust/vendor/sharks/Cargo.toml create mode 100644 src/rust/vendor/sharks/LICENSE-APACHE create mode 100644 src/rust/vendor/sharks/LICENSE-MIT create mode 100644 src/rust/vendor/sharks/README.md create mode 100644 src/rust/vendor/sharks/benches/benchmarks.rs create mode 100644 src/rust/vendor/sharks/codecov.yml create mode 100644 src/rust/vendor/sharks/src/field.rs create mode 100644 src/rust/vendor/sharks/src/lib.rs create mode 100644 src/rust/vendor/sharks/src/math.rs create mode 100644 src/rust/vendor/sharks/src/share.rs create mode 100644 src/rust/vendor/zerocopy-derive/.cargo-checksum.json create mode 100644 src/rust/vendor/zerocopy-derive/Cargo.toml create mode 100644 src/rust/vendor/zerocopy-derive/LICENSE-APACHE create mode 100644 src/rust/vendor/zerocopy-derive/LICENSE-BSD create mode 100644 src/rust/vendor/zerocopy-derive/LICENSE-MIT create mode 100644 src/rust/vendor/zerocopy-derive/src/ext.rs create mode 100644 src/rust/vendor/zerocopy-derive/src/lib.rs create mode 100644 src/rust/vendor/zerocopy-derive/src/repr.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/enum_as_bytes.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/enum_from_zeroes.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/enum_known_layout.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/enum_unaligned.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/hygiene.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/paths_and_modules.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/priv_in_pub.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/struct_as_bytes.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/struct_from_bytes.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/struct_from_zeroes.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/struct_known_layout.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/struct_unaligned.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/trybuild.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-msrv/derive_transparent.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-msrv/derive_transparent.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-msrv/enum.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-msrv/enum.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-msrv/enum_from_bytes_u8_too_few.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-msrv/enum_from_bytes_u8_too_few.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-msrv/late_compile_pass.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-msrv/late_compile_pass.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-msrv/mid_compile_pass.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-msrv/mid_compile_pass.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-msrv/struct.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-msrv/struct.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-msrv/union.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-msrv/union.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-nightly/derive_transparent.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-nightly/derive_transparent.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-nightly/enum.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-nightly/enum.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-nightly/enum_from_bytes_u8_too_few.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-nightly/enum_from_bytes_u8_too_few.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-nightly/late_compile_pass.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-nightly/late_compile_pass.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-nightly/mid_compile_pass.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-nightly/mid_compile_pass.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-nightly/struct.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-nightly/struct.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-nightly/union.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-nightly/union.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-stable/derive_transparent.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-stable/derive_transparent.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-stable/enum.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-stable/enum.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-stable/enum_from_bytes_u8_too_few.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-stable/enum_from_bytes_u8_too_few.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-stable/late_compile_pass.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-stable/late_compile_pass.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-stable/mid_compile_pass.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-stable/mid_compile_pass.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-stable/struct.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-stable/struct.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-stable/union.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/ui-stable/union.stderr create mode 100644 src/rust/vendor/zerocopy-derive/tests/union_as_bytes.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/union_from_bytes.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/union_from_zeroes.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/union_known_layout.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/union_unaligned.rs create mode 100644 src/rust/vendor/zerocopy-derive/tests/util.rs create mode 100644 src/rust/vendor/zerocopy/.cargo-checksum.json create mode 100644 src/rust/vendor/zerocopy/CONTRIBUTING.md create mode 100644 src/rust/vendor/zerocopy/Cargo.toml create mode 100644 src/rust/vendor/zerocopy/INTERNAL.md create mode 100644 src/rust/vendor/zerocopy/LICENSE-APACHE create mode 100644 src/rust/vendor/zerocopy/LICENSE-BSD create mode 100644 src/rust/vendor/zerocopy/LICENSE-MIT create mode 100644 src/rust/vendor/zerocopy/POLICIES.md create mode 100644 src/rust/vendor/zerocopy/README.md create mode 100755 src/rust/vendor/zerocopy/cargo.sh create mode 100644 src/rust/vendor/zerocopy/clippy.toml create mode 100755 src/rust/vendor/zerocopy/generate-readme.sh create mode 100644 src/rust/vendor/zerocopy/rustfmt.toml create mode 100644 src/rust/vendor/zerocopy/src/byteorder.rs create mode 100644 src/rust/vendor/zerocopy/src/lib.rs create mode 100644 src/rust/vendor/zerocopy/src/macro_util.rs create mode 100644 src/rust/vendor/zerocopy/src/macros.rs create mode 100644 src/rust/vendor/zerocopy/src/post_monomorphization_compile_fail_tests.rs create mode 100644 src/rust/vendor/zerocopy/src/third_party/rust/LICENSE-APACHE create mode 100644 src/rust/vendor/zerocopy/src/third_party/rust/LICENSE-MIT create mode 100644 src/rust/vendor/zerocopy/src/third_party/rust/README.fuchsia create mode 100644 src/rust/vendor/zerocopy/src/third_party/rust/layout.rs create mode 100644 src/rust/vendor/zerocopy/src/util.rs create mode 100644 src/rust/vendor/zerocopy/src/wrappers.rs create mode 100644 src/rust/vendor/zerocopy/testdata/include_value/data create mode 100644 src/rust/vendor/zerocopy/tests/trybuild.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/include_value_not_from_bytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/include_value_not_from_bytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/include_value_wrong_size.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/include_value_wrong_size.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/invalid-impls/invalid-impls.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/invalid-impls/invalid-impls.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/max-align.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/max-align.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-dst-not-frombytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-dst-not-frombytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-alignment-increase.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-alignment-increase.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-const.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-const.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-generic.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-generic.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-a-reference.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-a-reference.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-asbytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-asbytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-frombytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-frombytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-unsized.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-unsized.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-illegal-lifetime.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-illegal-lifetime.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-size-decrease.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-size-decrease.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-size-increase.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-size-increase.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-generic.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-generic.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-not-references.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-not-references.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-unsized.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-unsized.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-generic.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-generic.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-immutable.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-immutable.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-a-reference.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-a-reference.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-asbytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-asbytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-frombytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-frombytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-unsized.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-unsized.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ptr-to-usize.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ptr-to-usize.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-alignment-increase.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-alignment-increase.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-generic.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-generic.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-mutable.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-mutable.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-not-a-reference.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-not-a-reference.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-not-frombytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-not-frombytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-unsized.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-unsized.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-illegal-lifetime.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-illegal-lifetime.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-size-decrease.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-size-decrease.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-size-increase.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-size-increase.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-generic.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-generic.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-not-references.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-not-references.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-unsized.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-unsized.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-generic.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-generic.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-not-a-reference.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-not-a-reference.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-not-asbytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-not-asbytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-unsized.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-unsized.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-size-decrease.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-size-decrease.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-size-increase.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-size-increase.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-src-not-asbytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-msrv/transmute-src-not-asbytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/include_value_not_from_bytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/include_value_not_from_bytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/include_value_wrong_size.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/include_value_wrong_size.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/invalid-impls/invalid-impls.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/invalid-impls/invalid-impls.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/max-align.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/max-align.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-dst-not-frombytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-dst-not-frombytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-alignment-increase.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-alignment-increase.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-const.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-const.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-generic.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-generic.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-a-reference.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-a-reference.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-asbytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-asbytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-frombytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-frombytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-unsized.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-unsized.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-illegal-lifetime.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-illegal-lifetime.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-size-decrease.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-size-decrease.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-size-increase.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-size-increase.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-generic.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-generic.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-not-references.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-not-references.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-unsized.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-unsized.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-generic.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-generic.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-immutable.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-immutable.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-a-reference.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-a-reference.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-asbytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-asbytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-frombytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-frombytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-unsized.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-unsized.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ptr-to-usize.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ptr-to-usize.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-alignment-increase.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-alignment-increase.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-generic.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-generic.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-mutable.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-mutable.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-not-a-reference.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-not-a-reference.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-not-frombytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-not-frombytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-unsized.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-unsized.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-illegal-lifetime.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-illegal-lifetime.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-size-decrease.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-size-decrease.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-size-increase.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-size-increase.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-generic.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-generic.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-not-references.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-not-references.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-unsized.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-unsized.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-generic.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-generic.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-not-a-reference.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-not-a-reference.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-not-asbytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-not-asbytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-unsized.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-unsized.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-size-decrease.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-size-decrease.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-size-increase.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-size-increase.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-src-not-asbytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-nightly/transmute-src-not-asbytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/include_value_not_from_bytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/include_value_not_from_bytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/include_value_wrong_size.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/include_value_wrong_size.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/invalid-impls/invalid-impls.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/invalid-impls/invalid-impls.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/max-align.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/max-align.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-dst-not-frombytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-dst-not-frombytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-alignment-increase.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-alignment-increase.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-const.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-const.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-generic.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-generic.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-a-reference.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-a-reference.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-asbytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-asbytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-frombytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-frombytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-unsized.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-unsized.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-illegal-lifetime.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-illegal-lifetime.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-size-decrease.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-size-decrease.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-size-increase.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-size-increase.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-generic.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-generic.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-not-references.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-not-references.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-unsized.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-unsized.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-generic.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-generic.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-immutable.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-immutable.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-a-reference.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-a-reference.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-asbytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-asbytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-frombytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-frombytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-unsized.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-unsized.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ptr-to-usize.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ptr-to-usize.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-alignment-increase.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-alignment-increase.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-generic.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-generic.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-mutable.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-mutable.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-not-a-reference.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-not-a-reference.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-not-frombytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-not-frombytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-unsized.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-unsized.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-illegal-lifetime.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-illegal-lifetime.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-size-decrease.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-size-decrease.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-size-increase.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-size-increase.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-generic.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-generic.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-not-references.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-not-references.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-unsized.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-unsized.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-generic.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-generic.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-not-a-reference.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-not-a-reference.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-not-asbytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-not-asbytes.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-unsized.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-unsized.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-size-decrease.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-size-decrease.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-size-increase.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-size-increase.stderr create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-src-not-asbytes.rs create mode 100644 src/rust/vendor/zerocopy/tests/ui-stable/transmute-src-not-asbytes.stderr diff --git a/messages/hww.proto b/messages/hww.proto index 54c34f6d4..3f6e02f43 100644 --- a/messages/hww.proto +++ b/messages/hww.proto @@ -25,6 +25,7 @@ import "eth.proto"; import "keystore.proto"; import "mnemonic.proto"; import "system.proto"; +import "shamir.proto"; import "perform_attestation.proto"; @@ -67,6 +68,9 @@ message Request { ElectrumEncryptionKeyRequest electrum_encryption_key = 26; CardanoRequest cardano = 27; BIP85Request bip85 = 28; + ShowShamirRequest show_shamir = 29; + RestoreFromShamirRequest restore_from_shamir = 30; + } } diff --git a/messages/shamir.proto b/messages/shamir.proto new file mode 100644 index 000000000..5dcc33ea8 --- /dev/null +++ b/messages/shamir.proto @@ -0,0 +1,21 @@ +// Copyright 2019 Shift Cryptosecurity AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +package shiftcrypto.bitbox02; + +message ShowShamirRequest { +} +message RestoreFromShamirRequest { +} diff --git a/py/bitbox02/Makefile b/py/bitbox02/Makefile index 8823f1cf0..314fc7131 100644 --- a/py/bitbox02/Makefile +++ b/py/bitbox02/Makefile @@ -15,7 +15,7 @@ OUT_DIR=bitbox02/communication/generated -PROTO_FILES=hww.proto common.proto backup_commands.proto bitbox02_system.proto btc.proto cardano.proto eth.proto mnemonic.proto system.proto perform_attestation.proto keystore.proto antiklepto.proto +PROTO_FILES=hww.proto common.proto backup_commands.proto bitbox02_system.proto btc.proto cardano.proto eth.proto mnemonic.proto system.proto perform_attestation.proto keystore.proto antiklepto.proto shamir.proto TARGETS=$(addprefix ${OUT_DIR}/, $(PROTO_FILES:.proto=_pb2.py)) diff --git a/py/bitbox02/bitbox02/bitbox02/bitbox02.py b/py/bitbox02/bitbox02/bitbox02/bitbox02.py index 36b831c57..97094a5b7 100644 --- a/py/bitbox02/bitbox02/bitbox02/bitbox02.py +++ b/py/bitbox02/bitbox02/bitbox02/bitbox02.py @@ -38,6 +38,7 @@ from bitbox02.communication.generated import btc_pb2 as btc from bitbox02.communication.generated import cardano_pb2 as cardano from bitbox02.communication.generated import mnemonic_pb2 as mnemonic + from bitbox02.communication.generated import shamir_pb2 as shamir from bitbox02.communication.generated import bitbox02_system_pb2 as bitbox02_system from bitbox02.communication.generated import backup_commands_pb2 as backup from bitbox02.communication.generated import common_pb2 as common @@ -259,6 +260,17 @@ def show_mnemonic(self) -> None: request.show_mnemonic.CopyFrom(mnemonic.ShowMnemonicRequest()) self._msg_query(request, expected_response="success") + def show_shamir(self) -> None: + """ + Returns True if shamir mnemonics were successfully shown and confirmed. + Raises a Bitbox02Exception on failure. + """ + # self._require_atleast(semver.VersionInfo(9, 16, 0)) TODO + # pylint: disable=no-member + request = hww.Request() + request.show_shamir.CopyFrom(shamir.ShowShamirRequest()) + self._msg_query(request, expected_response="success") + def _btc_msg_query( self, btc_request: btc.BTCRequest, expected_response: Optional[str] = None ) -> btc.BTCResponse: diff --git a/py/bitbox02/bitbox02/communication/generated/hww_pb2.py b/py/bitbox02/bitbox02/communication/generated/hww_pb2.py index 4606d1d94..72d6acb15 100644 --- a/py/bitbox02/bitbox02/communication/generated/hww_pb2.py +++ b/py/bitbox02/bitbox02/communication/generated/hww_pb2.py @@ -20,22 +20,23 @@ from . import keystore_pb2 as keystore__pb2 from . import mnemonic_pb2 as mnemonic__pb2 from . import system_pb2 as system__pb2 +from . import shamir_pb2 as shamir__pb2 from . import perform_attestation_pb2 as perform__attestation__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\thww.proto\x12\x14shiftcrypto.bitbox02\x1a\x0c\x63ommon.proto\x1a\x15\x62\x61\x63kup_commands.proto\x1a\x15\x62itbox02_system.proto\x1a\tbtc.proto\x1a\rcardano.proto\x1a\teth.proto\x1a\x0ekeystore.proto\x1a\x0emnemonic.proto\x1a\x0csystem.proto\x1a\x19perform_attestation.proto\"&\n\x05\x45rror\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\"\t\n\x07Success\"\xfd\r\n\x07Request\x12\x41\n\x0b\x64\x65vice_name\x18\x02 \x01(\x0b\x32*.shiftcrypto.bitbox02.SetDeviceNameRequestH\x00\x12I\n\x0f\x64\x65vice_language\x18\x03 \x01(\x0b\x32..shiftcrypto.bitbox02.SetDeviceLanguageRequestH\x00\x12>\n\x0b\x64\x65vice_info\x18\x04 \x01(\x0b\x32\'.shiftcrypto.bitbox02.DeviceInfoRequestH\x00\x12@\n\x0cset_password\x18\x05 \x01(\x0b\x32(.shiftcrypto.bitbox02.SetPasswordRequestH\x00\x12\x42\n\rcreate_backup\x18\x06 \x01(\x0b\x32).shiftcrypto.bitbox02.CreateBackupRequestH\x00\x12\x42\n\rshow_mnemonic\x18\x07 \x01(\x0b\x32).shiftcrypto.bitbox02.ShowMnemonicRequestH\x00\x12\x36\n\x07\x62tc_pub\x18\x08 \x01(\x0b\x32#.shiftcrypto.bitbox02.BTCPubRequestH\x00\x12\x41\n\rbtc_sign_init\x18\t \x01(\x0b\x32(.shiftcrypto.bitbox02.BTCSignInitRequestH\x00\x12\x43\n\x0e\x62tc_sign_input\x18\n \x01(\x0b\x32).shiftcrypto.bitbox02.BTCSignInputRequestH\x00\x12\x45\n\x0f\x62tc_sign_output\x18\x0b \x01(\x0b\x32*.shiftcrypto.bitbox02.BTCSignOutputRequestH\x00\x12O\n\x14insert_remove_sdcard\x18\x0c \x01(\x0b\x32/.shiftcrypto.bitbox02.InsertRemoveSDCardRequestH\x00\x12@\n\x0c\x63heck_sdcard\x18\r \x01(\x0b\x32(.shiftcrypto.bitbox02.CheckSDCardRequestH\x00\x12\x64\n\x1fset_mnemonic_passphrase_enabled\x18\x0e \x01(\x0b\x32\x39.shiftcrypto.bitbox02.SetMnemonicPassphraseEnabledRequestH\x00\x12@\n\x0clist_backups\x18\x0f \x01(\x0b\x32(.shiftcrypto.bitbox02.ListBackupsRequestH\x00\x12\x44\n\x0erestore_backup\x18\x10 \x01(\x0b\x32*.shiftcrypto.bitbox02.RestoreBackupRequestH\x00\x12N\n\x13perform_attestation\x18\x11 \x01(\x0b\x32/.shiftcrypto.bitbox02.PerformAttestationRequestH\x00\x12\x35\n\x06reboot\x18\x12 \x01(\x0b\x32#.shiftcrypto.bitbox02.RebootRequestH\x00\x12@\n\x0c\x63heck_backup\x18\x13 \x01(\x0b\x32(.shiftcrypto.bitbox02.CheckBackupRequestH\x00\x12/\n\x03\x65th\x18\x14 \x01(\x0b\x32 .shiftcrypto.bitbox02.ETHRequestH\x00\x12\x33\n\x05reset\x18\x15 \x01(\x0b\x32\".shiftcrypto.bitbox02.ResetRequestH\x00\x12Q\n\x15restore_from_mnemonic\x18\x16 \x01(\x0b\x32\x30.shiftcrypto.bitbox02.RestoreFromMnemonicRequestH\x00\x12\x43\n\x0b\x66ingerprint\x18\x18 \x01(\x0b\x32,.shiftcrypto.bitbox02.RootFingerprintRequestH\x00\x12/\n\x03\x62tc\x18\x19 \x01(\x0b\x32 .shiftcrypto.bitbox02.BTCRequestH\x00\x12U\n\x17\x65lectrum_encryption_key\x18\x1a \x01(\x0b\x32\x32.shiftcrypto.bitbox02.ElectrumEncryptionKeyRequestH\x00\x12\x37\n\x07\x63\x61rdano\x18\x1b \x01(\x0b\x32$.shiftcrypto.bitbox02.CardanoRequestH\x00\x12\x33\n\x05\x62ip85\x18\x1c \x01(\x0b\x32\".shiftcrypto.bitbox02.BIP85RequestH\x00\x42\t\n\x07requestJ\x04\x08\x01\x10\x02J\x04\x08\x17\x10\x18\"\xbf\x07\n\x08Response\x12\x30\n\x07success\x18\x01 \x01(\x0b\x32\x1d.shiftcrypto.bitbox02.SuccessH\x00\x12,\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x1b.shiftcrypto.bitbox02.ErrorH\x00\x12?\n\x0b\x64\x65vice_info\x18\x04 \x01(\x0b\x32(.shiftcrypto.bitbox02.DeviceInfoResponseH\x00\x12\x30\n\x03pub\x18\x05 \x01(\x0b\x32!.shiftcrypto.bitbox02.PubResponseH\x00\x12\x42\n\rbtc_sign_next\x18\x06 \x01(\x0b\x32).shiftcrypto.bitbox02.BTCSignNextResponseH\x00\x12\x41\n\x0clist_backups\x18\x07 \x01(\x0b\x32).shiftcrypto.bitbox02.ListBackupsResponseH\x00\x12\x41\n\x0c\x63heck_backup\x18\x08 \x01(\x0b\x32).shiftcrypto.bitbox02.CheckBackupResponseH\x00\x12O\n\x13perform_attestation\x18\t \x01(\x0b\x32\x30.shiftcrypto.bitbox02.PerformAttestationResponseH\x00\x12\x41\n\x0c\x63heck_sdcard\x18\n \x01(\x0b\x32).shiftcrypto.bitbox02.CheckSDCardResponseH\x00\x12\x30\n\x03\x65th\x18\x0b \x01(\x0b\x32!.shiftcrypto.bitbox02.ETHResponseH\x00\x12\x44\n\x0b\x66ingerprint\x18\x0c \x01(\x0b\x32-.shiftcrypto.bitbox02.RootFingerprintResponseH\x00\x12\x30\n\x03\x62tc\x18\r \x01(\x0b\x32!.shiftcrypto.bitbox02.BTCResponseH\x00\x12V\n\x17\x65lectrum_encryption_key\x18\x0e \x01(\x0b\x32\x33.shiftcrypto.bitbox02.ElectrumEncryptionKeyResponseH\x00\x12\x38\n\x07\x63\x61rdano\x18\x0f \x01(\x0b\x32%.shiftcrypto.bitbox02.CardanoResponseH\x00\x12\x34\n\x05\x62ip85\x18\x10 \x01(\x0b\x32#.shiftcrypto.bitbox02.BIP85ResponseH\x00\x42\n\n\x08responseJ\x04\x08\x03\x10\x04\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\thww.proto\x12\x14shiftcrypto.bitbox02\x1a\x0c\x63ommon.proto\x1a\x15\x62\x61\x63kup_commands.proto\x1a\x15\x62itbox02_system.proto\x1a\tbtc.proto\x1a\rcardano.proto\x1a\teth.proto\x1a\x0ekeystore.proto\x1a\x0emnemonic.proto\x1a\x0csystem.proto\x1a\x0cshamir.proto\x1a\x19perform_attestation.proto\"&\n\x05\x45rror\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\"\t\n\x07Success\"\x8c\x0f\n\x07Request\x12\x41\n\x0b\x64\x65vice_name\x18\x02 \x01(\x0b\x32*.shiftcrypto.bitbox02.SetDeviceNameRequestH\x00\x12I\n\x0f\x64\x65vice_language\x18\x03 \x01(\x0b\x32..shiftcrypto.bitbox02.SetDeviceLanguageRequestH\x00\x12>\n\x0b\x64\x65vice_info\x18\x04 \x01(\x0b\x32\'.shiftcrypto.bitbox02.DeviceInfoRequestH\x00\x12@\n\x0cset_password\x18\x05 \x01(\x0b\x32(.shiftcrypto.bitbox02.SetPasswordRequestH\x00\x12\x42\n\rcreate_backup\x18\x06 \x01(\x0b\x32).shiftcrypto.bitbox02.CreateBackupRequestH\x00\x12\x42\n\rshow_mnemonic\x18\x07 \x01(\x0b\x32).shiftcrypto.bitbox02.ShowMnemonicRequestH\x00\x12\x36\n\x07\x62tc_pub\x18\x08 \x01(\x0b\x32#.shiftcrypto.bitbox02.BTCPubRequestH\x00\x12\x41\n\rbtc_sign_init\x18\t \x01(\x0b\x32(.shiftcrypto.bitbox02.BTCSignInitRequestH\x00\x12\x43\n\x0e\x62tc_sign_input\x18\n \x01(\x0b\x32).shiftcrypto.bitbox02.BTCSignInputRequestH\x00\x12\x45\n\x0f\x62tc_sign_output\x18\x0b \x01(\x0b\x32*.shiftcrypto.bitbox02.BTCSignOutputRequestH\x00\x12O\n\x14insert_remove_sdcard\x18\x0c \x01(\x0b\x32/.shiftcrypto.bitbox02.InsertRemoveSDCardRequestH\x00\x12@\n\x0c\x63heck_sdcard\x18\r \x01(\x0b\x32(.shiftcrypto.bitbox02.CheckSDCardRequestH\x00\x12\x64\n\x1fset_mnemonic_passphrase_enabled\x18\x0e \x01(\x0b\x32\x39.shiftcrypto.bitbox02.SetMnemonicPassphraseEnabledRequestH\x00\x12@\n\x0clist_backups\x18\x0f \x01(\x0b\x32(.shiftcrypto.bitbox02.ListBackupsRequestH\x00\x12\x44\n\x0erestore_backup\x18\x10 \x01(\x0b\x32*.shiftcrypto.bitbox02.RestoreBackupRequestH\x00\x12N\n\x13perform_attestation\x18\x11 \x01(\x0b\x32/.shiftcrypto.bitbox02.PerformAttestationRequestH\x00\x12\x35\n\x06reboot\x18\x12 \x01(\x0b\x32#.shiftcrypto.bitbox02.RebootRequestH\x00\x12@\n\x0c\x63heck_backup\x18\x13 \x01(\x0b\x32(.shiftcrypto.bitbox02.CheckBackupRequestH\x00\x12/\n\x03\x65th\x18\x14 \x01(\x0b\x32 .shiftcrypto.bitbox02.ETHRequestH\x00\x12\x33\n\x05reset\x18\x15 \x01(\x0b\x32\".shiftcrypto.bitbox02.ResetRequestH\x00\x12Q\n\x15restore_from_mnemonic\x18\x16 \x01(\x0b\x32\x30.shiftcrypto.bitbox02.RestoreFromMnemonicRequestH\x00\x12\x43\n\x0b\x66ingerprint\x18\x18 \x01(\x0b\x32,.shiftcrypto.bitbox02.RootFingerprintRequestH\x00\x12/\n\x03\x62tc\x18\x19 \x01(\x0b\x32 .shiftcrypto.bitbox02.BTCRequestH\x00\x12U\n\x17\x65lectrum_encryption_key\x18\x1a \x01(\x0b\x32\x32.shiftcrypto.bitbox02.ElectrumEncryptionKeyRequestH\x00\x12\x37\n\x07\x63\x61rdano\x18\x1b \x01(\x0b\x32$.shiftcrypto.bitbox02.CardanoRequestH\x00\x12\x33\n\x05\x62ip85\x18\x1c \x01(\x0b\x32\".shiftcrypto.bitbox02.BIP85RequestH\x00\x12>\n\x0bshow_shamir\x18\x1d \x01(\x0b\x32\'.shiftcrypto.bitbox02.ShowShamirRequestH\x00\x12M\n\x13restore_from_shamir\x18\x1e \x01(\x0b\x32..shiftcrypto.bitbox02.RestoreFromShamirRequestH\x00\x42\t\n\x07requestJ\x04\x08\x01\x10\x02J\x04\x08\x17\x10\x18\"\xbf\x07\n\x08Response\x12\x30\n\x07success\x18\x01 \x01(\x0b\x32\x1d.shiftcrypto.bitbox02.SuccessH\x00\x12,\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x1b.shiftcrypto.bitbox02.ErrorH\x00\x12?\n\x0b\x64\x65vice_info\x18\x04 \x01(\x0b\x32(.shiftcrypto.bitbox02.DeviceInfoResponseH\x00\x12\x30\n\x03pub\x18\x05 \x01(\x0b\x32!.shiftcrypto.bitbox02.PubResponseH\x00\x12\x42\n\rbtc_sign_next\x18\x06 \x01(\x0b\x32).shiftcrypto.bitbox02.BTCSignNextResponseH\x00\x12\x41\n\x0clist_backups\x18\x07 \x01(\x0b\x32).shiftcrypto.bitbox02.ListBackupsResponseH\x00\x12\x41\n\x0c\x63heck_backup\x18\x08 \x01(\x0b\x32).shiftcrypto.bitbox02.CheckBackupResponseH\x00\x12O\n\x13perform_attestation\x18\t \x01(\x0b\x32\x30.shiftcrypto.bitbox02.PerformAttestationResponseH\x00\x12\x41\n\x0c\x63heck_sdcard\x18\n \x01(\x0b\x32).shiftcrypto.bitbox02.CheckSDCardResponseH\x00\x12\x30\n\x03\x65th\x18\x0b \x01(\x0b\x32!.shiftcrypto.bitbox02.ETHResponseH\x00\x12\x44\n\x0b\x66ingerprint\x18\x0c \x01(\x0b\x32-.shiftcrypto.bitbox02.RootFingerprintResponseH\x00\x12\x30\n\x03\x62tc\x18\r \x01(\x0b\x32!.shiftcrypto.bitbox02.BTCResponseH\x00\x12V\n\x17\x65lectrum_encryption_key\x18\x0e \x01(\x0b\x32\x33.shiftcrypto.bitbox02.ElectrumEncryptionKeyResponseH\x00\x12\x38\n\x07\x63\x61rdano\x18\x0f \x01(\x0b\x32%.shiftcrypto.bitbox02.CardanoResponseH\x00\x12\x34\n\x05\x62ip85\x18\x10 \x01(\x0b\x32#.shiftcrypto.bitbox02.BIP85ResponseH\x00\x42\n\n\x08responseJ\x04\x08\x03\x10\x04\x62\x06proto3') _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'hww_pb2', globals()) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None - _ERROR._serialized_start=205 - _ERROR._serialized_end=243 - _SUCCESS._serialized_start=245 - _SUCCESS._serialized_end=254 - _REQUEST._serialized_start=257 - _REQUEST._serialized_end=2046 - _RESPONSE._serialized_start=2049 - _RESPONSE._serialized_end=3008 + _ERROR._serialized_start=219 + _ERROR._serialized_end=257 + _SUCCESS._serialized_start=259 + _SUCCESS._serialized_end=268 + _REQUEST._serialized_start=271 + _REQUEST._serialized_end=2203 + _RESPONSE._serialized_start=2206 + _RESPONSE._serialized_end=3165 # @@protoc_insertion_point(module_scope) diff --git a/py/bitbox02/bitbox02/communication/generated/hww_pb2.pyi b/py/bitbox02/bitbox02/communication/generated/hww_pb2.pyi index df9aeacf1..5e51b7cd0 100644 --- a/py/bitbox02/bitbox02/communication/generated/hww_pb2.pyi +++ b/py/bitbox02/bitbox02/communication/generated/hww_pb2.pyi @@ -14,6 +14,7 @@ import google.protobuf.message from . import keystore_pb2 from . import mnemonic_pb2 from . import perform_attestation_pb2 +from . import shamir_pb2 from . import system_pb2 import typing import typing_extensions @@ -68,6 +69,8 @@ class Request(google.protobuf.message.Message): ELECTRUM_ENCRYPTION_KEY_FIELD_NUMBER: builtins.int CARDANO_FIELD_NUMBER: builtins.int BIP85_FIELD_NUMBER: builtins.int + SHOW_SHAMIR_FIELD_NUMBER: builtins.int + RESTORE_FROM_SHAMIR_FIELD_NUMBER: builtins.int @property def device_name(self) -> bitbox02_system_pb2.SetDeviceNameRequest: """removed: RandomNumberRequest random_number = 1;""" @@ -124,6 +127,10 @@ class Request(google.protobuf.message.Message): def cardano(self) -> cardano_pb2.CardanoRequest: ... @property def bip85(self) -> keystore_pb2.BIP85Request: ... + @property + def show_shamir(self) -> shamir_pb2.ShowShamirRequest: ... + @property + def restore_from_shamir(self) -> shamir_pb2.RestoreFromShamirRequest: ... def __init__(self, *, device_name: typing.Optional[bitbox02_system_pb2.SetDeviceNameRequest] = ..., @@ -152,10 +159,12 @@ class Request(google.protobuf.message.Message): electrum_encryption_key: typing.Optional[keystore_pb2.ElectrumEncryptionKeyRequest] = ..., cardano: typing.Optional[cardano_pb2.CardanoRequest] = ..., bip85: typing.Optional[keystore_pb2.BIP85Request] = ..., + show_shamir: typing.Optional[shamir_pb2.ShowShamirRequest] = ..., + restore_from_shamir: typing.Optional[shamir_pb2.RestoreFromShamirRequest] = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["bip85",b"bip85","btc",b"btc","btc_pub",b"btc_pub","btc_sign_init",b"btc_sign_init","btc_sign_input",b"btc_sign_input","btc_sign_output",b"btc_sign_output","cardano",b"cardano","check_backup",b"check_backup","check_sdcard",b"check_sdcard","create_backup",b"create_backup","device_info",b"device_info","device_language",b"device_language","device_name",b"device_name","electrum_encryption_key",b"electrum_encryption_key","eth",b"eth","fingerprint",b"fingerprint","insert_remove_sdcard",b"insert_remove_sdcard","list_backups",b"list_backups","perform_attestation",b"perform_attestation","reboot",b"reboot","request",b"request","reset",b"reset","restore_backup",b"restore_backup","restore_from_mnemonic",b"restore_from_mnemonic","set_mnemonic_passphrase_enabled",b"set_mnemonic_passphrase_enabled","set_password",b"set_password","show_mnemonic",b"show_mnemonic"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["bip85",b"bip85","btc",b"btc","btc_pub",b"btc_pub","btc_sign_init",b"btc_sign_init","btc_sign_input",b"btc_sign_input","btc_sign_output",b"btc_sign_output","cardano",b"cardano","check_backup",b"check_backup","check_sdcard",b"check_sdcard","create_backup",b"create_backup","device_info",b"device_info","device_language",b"device_language","device_name",b"device_name","electrum_encryption_key",b"electrum_encryption_key","eth",b"eth","fingerprint",b"fingerprint","insert_remove_sdcard",b"insert_remove_sdcard","list_backups",b"list_backups","perform_attestation",b"perform_attestation","reboot",b"reboot","request",b"request","reset",b"reset","restore_backup",b"restore_backup","restore_from_mnemonic",b"restore_from_mnemonic","set_mnemonic_passphrase_enabled",b"set_mnemonic_passphrase_enabled","set_password",b"set_password","show_mnemonic",b"show_mnemonic"]) -> None: ... - def WhichOneof(self, oneof_group: typing_extensions.Literal["request",b"request"]) -> typing.Optional[typing_extensions.Literal["device_name","device_language","device_info","set_password","create_backup","show_mnemonic","btc_pub","btc_sign_init","btc_sign_input","btc_sign_output","insert_remove_sdcard","check_sdcard","set_mnemonic_passphrase_enabled","list_backups","restore_backup","perform_attestation","reboot","check_backup","eth","reset","restore_from_mnemonic","fingerprint","btc","electrum_encryption_key","cardano","bip85"]]: ... + def HasField(self, field_name: typing_extensions.Literal["bip85",b"bip85","btc",b"btc","btc_pub",b"btc_pub","btc_sign_init",b"btc_sign_init","btc_sign_input",b"btc_sign_input","btc_sign_output",b"btc_sign_output","cardano",b"cardano","check_backup",b"check_backup","check_sdcard",b"check_sdcard","create_backup",b"create_backup","device_info",b"device_info","device_language",b"device_language","device_name",b"device_name","electrum_encryption_key",b"electrum_encryption_key","eth",b"eth","fingerprint",b"fingerprint","insert_remove_sdcard",b"insert_remove_sdcard","list_backups",b"list_backups","perform_attestation",b"perform_attestation","reboot",b"reboot","request",b"request","reset",b"reset","restore_backup",b"restore_backup","restore_from_mnemonic",b"restore_from_mnemonic","restore_from_shamir",b"restore_from_shamir","set_mnemonic_passphrase_enabled",b"set_mnemonic_passphrase_enabled","set_password",b"set_password","show_mnemonic",b"show_mnemonic","show_shamir",b"show_shamir"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["bip85",b"bip85","btc",b"btc","btc_pub",b"btc_pub","btc_sign_init",b"btc_sign_init","btc_sign_input",b"btc_sign_input","btc_sign_output",b"btc_sign_output","cardano",b"cardano","check_backup",b"check_backup","check_sdcard",b"check_sdcard","create_backup",b"create_backup","device_info",b"device_info","device_language",b"device_language","device_name",b"device_name","electrum_encryption_key",b"electrum_encryption_key","eth",b"eth","fingerprint",b"fingerprint","insert_remove_sdcard",b"insert_remove_sdcard","list_backups",b"list_backups","perform_attestation",b"perform_attestation","reboot",b"reboot","request",b"request","reset",b"reset","restore_backup",b"restore_backup","restore_from_mnemonic",b"restore_from_mnemonic","restore_from_shamir",b"restore_from_shamir","set_mnemonic_passphrase_enabled",b"set_mnemonic_passphrase_enabled","set_password",b"set_password","show_mnemonic",b"show_mnemonic","show_shamir",b"show_shamir"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["request",b"request"]) -> typing.Optional[typing_extensions.Literal["device_name","device_language","device_info","set_password","create_backup","show_mnemonic","btc_pub","btc_sign_init","btc_sign_input","btc_sign_output","insert_remove_sdcard","check_sdcard","set_mnemonic_passphrase_enabled","list_backups","restore_backup","perform_attestation","reboot","check_backup","eth","reset","restore_from_mnemonic","fingerprint","btc","electrum_encryption_key","cardano","bip85","show_shamir","restore_from_shamir"]]: ... global___Request = Request class Response(google.protobuf.message.Message): diff --git a/py/bitbox02/bitbox02/communication/generated/shamir_pb2.py b/py/bitbox02/bitbox02/communication/generated/shamir_pb2.py new file mode 100644 index 000000000..be4e9da95 --- /dev/null +++ b/py/bitbox02/bitbox02/communication/generated/shamir_pb2.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: shamir.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0cshamir.proto\x12\x14shiftcrypto.bitbox02\"\x13\n\x11ShowShamirRequest\"\x1a\n\x18RestoreFromShamirRequestb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'shamir_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + _SHOWSHAMIRREQUEST._serialized_start=38 + _SHOWSHAMIRREQUEST._serialized_end=57 + _RESTOREFROMSHAMIRREQUEST._serialized_start=59 + _RESTOREFROMSHAMIRREQUEST._serialized_end=85 +# @@protoc_insertion_point(module_scope) diff --git a/py/bitbox02/bitbox02/communication/generated/shamir_pb2.pyi b/py/bitbox02/bitbox02/communication/generated/shamir_pb2.pyi new file mode 100644 index 000000000..b7719da6d --- /dev/null +++ b/py/bitbox02/bitbox02/communication/generated/shamir_pb2.pyi @@ -0,0 +1,20 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" +import google.protobuf.descriptor +import google.protobuf.message + +DESCRIPTOR: google.protobuf.descriptor.FileDescriptor + +class ShowShamirRequest(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + def __init__(self, + ) -> None: ... +global___ShowShamirRequest = ShowShamirRequest + +class RestoreFromShamirRequest(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + def __init__(self, + ) -> None: ... +global___RestoreFromShamirRequest = RestoreFromShamirRequest diff --git a/py/send_message.py b/py/send_message.py index 617408d8d..e488aaa4b 100755 --- a/py/send_message.py +++ b/py/send_message.py @@ -898,6 +898,13 @@ def _show_mnemnoic_seed(self) -> None: print("Success") except UserAbortException: print("Aborted by user") + def _show_shamir_seed(self) -> None: + print("Your BitBox02 will now show the Shamir seed mnemonics") + try: + self._device.show_shamir() + print("Success") + except UserAbortException: + print("Aborted by user") def _create_backup(self) -> None: if self._device.check_backup(silent=True) is not None: @@ -1417,6 +1424,7 @@ def _menu_init(self) -> None: ("List backups", self._print_backups), ("Check backup", self._check_backup), ("Show mnemonic", self._show_mnemnoic_seed), + ("Show shamir", self._show_shamir_seed), ("Create backup", self._create_backup), ("Reboot into bootloader", self._reboot), ("Check if SD card inserted", self._check_sd_presence), diff --git a/src/keystore.c b/src/keystore.c index ef17bd488..2b87d304a 100644 --- a/src/keystore.c +++ b/src/keystore.c @@ -564,6 +564,27 @@ bool keystore_get_bip39_mnemonic(char* mnemonic_out, size_t mnemonic_out_size) return snprintf_result >= 0 && snprintf_result < (int)mnemonic_out_size; } +bool keystore_get_bip39_mnemonic_from_bytes(const uint8_t* bytes, size_t len, char* mnemonic_out, size_t mnemonic_out_size) +{ + if (keystore_is_locked()) { + return false; + } + + if (len > KEYSTORE_MAX_SEED_LENGTH) { + return false; + } + char* mnemonic = NULL; + if (bip39_mnemonic_from_bytes(NULL, bytes, len, &mnemonic) != WALLY_OK) { + return false; + } + int snprintf_result = snprintf(mnemonic_out, mnemonic_out_size, "%s", mnemonic); + util_cleanup_str(&mnemonic); + free(mnemonic); + return snprintf_result >= 0 && snprintf_result < (int)mnemonic_out_size; +} + + + bool keystore_bip39_mnemonic_to_seed(const char* mnemonic, uint8_t* seed_out, size_t* seed_len_out) { return bip39_mnemonic_to_bytes(NULL, mnemonic, seed_out, 32, seed_len_out) == WALLY_OK; diff --git a/src/keystore.h b/src/keystore.h index ba2f37f9b..e7d5d1bb8 100644 --- a/src/keystore.h +++ b/src/keystore.h @@ -124,6 +124,17 @@ USE_RESULT bool keystore_is_locked(void); */ USE_RESULT bool keystore_get_bip39_mnemonic(char* mnemonic_out, size_t mnemonic_out_size); +/** + * @param[in] bytes array containing the input seed + * @param[in] length of the input seed + * @param[out] mnemonic_out resulting mnemonic + * @param[in] mnemonic_out_size size of mnemonic_out. Should be at least 216 bytes (longest possible + * 24 word phrase plus null terminator). + * @return returns false if the keystore is not unlocked or the mnemonic does not fit. + * The resulting string should be safely zeroed after use. + */ +USE_RESULT bool keystore_get_bip39_mnemonic_from_bytes(const uint8_t* bytes, size_t len, char* mnemonic_out, size_t mnemonic_out_size); + /** * Turn a bip39 mnemonic into a seed. Make sure to use UTIL_CLEANUP_32 to destroy it. * Output can be fed into `keystore_encrypt_and_store_seed` to create a keystore from the mnemonic. diff --git a/src/rust/Cargo.lock b/src/rust/Cargo.lock index 43f841733..21a6478fc 100644 --- a/src/rust/Cargo.lock +++ b/src/rust/Cargo.lock @@ -12,6 +12,12 @@ dependencies = [ "generic-array", ] +[[package]] +name = "ahash" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0453232ace82dee0dd0b4c87a59bd90f7b53b314f3e0f61fe2ee7c8a16482289" + [[package]] name = "anyhow" version = "1.0.31" @@ -121,8 +127,10 @@ dependencies = [ "num-bigint", "num-traits", "prost", + "rand_chacha", "sha2", "sha3", + "sharks", "streaming-silent-payments", "util", "zeroize", @@ -222,6 +230,12 @@ dependencies = [ "generic-array", ] +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + [[package]] name = "bytes" version = "1.0.0" @@ -508,6 +522,15 @@ dependencies = [ "subtle", ] +[[package]] +name = "hashbrown" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +dependencies = [ + "ahash", +] + [[package]] name = "hex" version = "0.4.3" @@ -702,6 +725,15 @@ dependencies = [ "universal-hash", ] +[[package]] +name = "ppv-lite86" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] + [[package]] name = "primeorder" version = "0.13.6" @@ -752,6 +784,25 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + [[package]] name = "rand_core" version = "0.6.4" @@ -907,6 +958,16 @@ dependencies = [ "keccak", ] +[[package]] +name = "sharks" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "902b1e955f8a2e429fb1bad49f83fb952e6195d3c360ac547ff00fb826388753" +dependencies = [ + "hashbrown", + "rand", +] + [[package]] name = "signature" version = "2.2.0" @@ -1020,6 +1081,27 @@ dependencies = [ "rand_core", ] +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "byteorder", + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "zeroize" version = "1.7.0" diff --git a/src/rust/bitbox02-rust/Cargo.toml b/src/rust/bitbox02-rust/Cargo.toml index d3ab9a60f..5f344e0b7 100644 --- a/src/rust/bitbox02-rust/Cargo.toml +++ b/src/rust/bitbox02-rust/Cargo.toml @@ -50,6 +50,8 @@ ed25519-dalek = { version = "2.1.1", default-features = false, features = ["hazm hmac = { version = "0.12.1", default-features = false, features = ["reset"] } miniscript = { version = "12.2.0", default-features = false, features = ["no-std"], optional = true } +sharks = { version = "0.5.0", default-features = false, optional = true } +rand_chacha = { version = "0.3.1", default-features = false, optional = true } bitcoin = { workspace = true } # We don't rely on this dep directly, the miniscript/bitcoin deps do. We list it here to enable the # small-hash feature to reduce the binary size, saving around 2784 bytes (as measured at time of @@ -79,6 +81,8 @@ app-bitcoin = [ "dep:bech32", "dep:miniscript", "dep:streaming-silent-payments", + "dep:sharks", + "dep:rand_chacha", "bitbox02/app-bitcoin", ] app-litecoin = [ diff --git a/src/rust/bitbox02-rust/src/hww/api.rs b/src/rust/bitbox02-rust/src/hww/api.rs index ba528f22a..00554f1ec 100644 --- a/src/rust/bitbox02-rust/src/hww/api.rs +++ b/src/rust/bitbox02-rust/src/hww/api.rs @@ -38,6 +38,7 @@ mod set_mnemonic_passphrase_enabled; mod set_password; mod show_mnemonic; mod system; +mod show_shamir; use alloc::vec::Vec; @@ -120,6 +121,7 @@ fn can_call(request: &Request) -> bool { Request::RestoreFromMnemonic(_) => matches!(state, State::Uninitialized | State::Seeded), Request::CreateBackup(_) => matches!(state, State::Seeded | State::Initialized), Request::ShowMnemonic(_) => matches!(state, State::Seeded | State::Initialized), + Request::ShowShamir(_) => matches!(state, State::Seeded | State::Initialized), Request::Fingerprint(_) => matches!(state, State::Initialized), Request::ElectrumEncryptionKey(_) => matches!(state, State::Initialized), Request::BtcPub(_) | Request::Btc(_) | Request::BtcSignInit(_) => { @@ -160,6 +162,7 @@ async fn process_api(request: &Request) -> Result { Request::ShowMnemonic(_) => show_mnemonic::process().await, Request::RestoreFromMnemonic(ref request) => restore::from_mnemonic(request).await, Request::ElectrumEncryptionKey(ref request) => electrum::process(request).await, + Request::ShowShamir(_) => show_shamir::process().await, #[cfg(feature = "app-ethereum")] Request::Eth(pb::EthRequest { diff --git a/src/rust/bitbox02-rust/src/hww/api/show_shamir.rs b/src/rust/bitbox02-rust/src/hww/api/show_shamir.rs new file mode 100644 index 000000000..3b1162304 --- /dev/null +++ b/src/rust/bitbox02-rust/src/hww/api/show_shamir.rs @@ -0,0 +1,90 @@ +// Copyright 2020 Shift Crypto AG +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use alloc::vec::Vec; + +use super::Error; +use crate::pb; +use crate::workflow::confirm; + +use pb::response::Response; + +use crate::workflow::{mnemonic, status, unlock}; +use bitbox02::keystore; +use sharks::{ Sharks, Share }; +use rand_chacha::rand_core::SeedableRng; + +/// Handle the ShowShamir API call. This shows the seed shards encoded as +/// 12/18/24 BIP39 English words. Afterwards, for each word, the user +/// is asked to pick the right word among 5 words, to check if they +/// wrote it down correctly. +pub async fn process() -> Result { + if bitbox02::memory::is_initialized() { + unlock::unlock_keystore("Unlock device", unlock::CanCancel::Yes).await?; + } + // Set a minimum threshold of 10 shares + let sharks = Sharks(3); + + // // Obtain an iterator over the shares for secret [1, 2, 3, 4] + // // TODO: use RNG from SE? + let mut rng = rand_chacha::ChaCha8Rng::from_seed([0x90; 32]); + + let seed = bitbox02::keystore::copy_seed()?; + let dealer = sharks.dealer_rng(&seed, &mut rng); + // let dealer = sharks.dealer_rng(&[1,2,3,4], &mut rng); + // Get 3 shares + let mut shares: Vec = dealer.take(3).collect(); + for s in shares { + + // shares.remove(1); + // shares.remove(0); + // Recover the original secret! + // bitbox02::print_stdout("Recovering...\n"); + // let secret = sharks.recover(shares.as_slice()); + // match secret { + // Err(e) => bitbox02::print_stdout(&format!("Error {}\n", e)), + // Ok(_) => bitbox02::print_stdout("***test ok\n"), + // } + // assert_eq!(*secret.unwrap(), *seed); + let mnemonic_sentence = keystore::get_bip39_mnemonic_from_bytes(Vec::from(&s).as_ptr(), seed.len())?; + + // let mnemonic_sentence = keystore::get_bip39_mnemonic()?; + + confirm::confirm(&confirm::Params { + title: "Warning", + body: "DO NOT share your\nrecovery words with\nanyone!", + accept_is_nextarrow: true, + ..Default::default() + }) + .await?; + + confirm::confirm(&confirm::Params { + title: "Recovery\nwords", + body: "Please write down\nthe following words", + accept_is_nextarrow: true, + ..Default::default() + }) + .await?; + + let words: Vec<&str> = mnemonic_sentence.split(' ').collect(); + + mnemonic::show_and_confirm_mnemonic(&words).await?; + + } + bitbox02::memory::set_initialized().or(Err(Error::Memory))?; + + status::status("Backup created", true).await; + + Ok(Response::Success(pb::Success {})) +} diff --git a/src/rust/bitbox02-rust/src/shiftcrypto.bitbox02.rs b/src/rust/bitbox02-rust/src/shiftcrypto.bitbox02.rs index c14644e3f..177d4d20a 100644 --- a/src/rust/bitbox02-rust/src/shiftcrypto.bitbox02.rs +++ b/src/rust/bitbox02-rust/src/shiftcrypto.bitbox02.rs @@ -135,17 +135,7 @@ pub struct InsertRemoveSdCardRequest { } /// Nested message and enum types in `InsertRemoveSDCardRequest`. pub mod insert_remove_sd_card_request { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SdCardAction { RemoveCard = 0, @@ -238,15 +228,7 @@ pub mod btc_script_config { /// Nested message and enum types in `Multisig`. pub mod multisig { #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration + Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration, )] #[repr(i32)] pub enum ScriptType { @@ -287,17 +269,7 @@ pub mod btc_script_config { pub keys: ::prost::alloc::vec::Vec, } /// SimpleType is a "simple" script: one public key, no additional inputs. - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SimpleType { P2wpkhP2sh = 0, @@ -351,17 +323,7 @@ pub struct BtcPubRequest { } /// Nested message and enum types in `BTCPubRequest`. pub mod btc_pub_request { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum XPubType { Tpub = 0, @@ -459,17 +421,7 @@ pub struct BtcSignInitRequest { } /// Nested message and enum types in `BTCSignInitRequest`. pub mod btc_sign_init_request { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum FormatUnit { /// According to `coin` (BTC, LTC, etc.). @@ -516,9 +468,7 @@ pub struct BtcSignNextResponse { #[prost(uint32, tag = "5")] pub prev_index: u32, #[prost(message, optional, tag = "6")] - pub anti_klepto_signer_commitment: ::core::option::Option< - AntiKleptoSignerCommitment, - >, + pub anti_klepto_signer_commitment: ::core::option::Option, /// Generated output. The host *must* verify its correctness using `silent_payment_dleq_proof`. #[prost(bytes = "vec", tag = "7")] pub generated_output_pkscript: ::prost::alloc::vec::Vec, @@ -527,17 +477,7 @@ pub struct BtcSignNextResponse { } /// Nested message and enum types in `BTCSignNextResponse`. pub mod btc_sign_next_response { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Type { Input = 0, @@ -675,22 +615,15 @@ pub struct BtcRegisterScriptConfigRequest { /// If empty, the name is entered on the device instead. #[prost(string, tag = "2")] pub name: ::prost::alloc::string::String, - #[prost(enumeration = "btc_register_script_config_request::XPubType", tag = "3")] + #[prost( + enumeration = "btc_register_script_config_request::XPubType", + tag = "3" + )] pub xpub_type: i32, } /// Nested message and enum types in `BTCRegisterScriptConfigRequest`. pub mod btc_register_script_config_request { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum XPubType { /// Automatically choose to match Electrum's xpub format (e.g. Zpub/Vpub for p2wsh multisig mainnet/testnet). @@ -1007,13 +940,9 @@ pub struct CardanoSignTransactionRequest { #[prost(uint64, tag = "5")] pub ttl: u64, #[prost(message, repeated, tag = "6")] - pub certificates: ::prost::alloc::vec::Vec< - cardano_sign_transaction_request::Certificate, - >, + pub certificates: ::prost::alloc::vec::Vec, #[prost(message, repeated, tag = "7")] - pub withdrawals: ::prost::alloc::vec::Vec< - cardano_sign_transaction_request::Withdrawal, - >, + pub withdrawals: ::prost::alloc::vec::Vec, #[prost(uint64, tag = "8")] pub validity_interval_start: u64, /// include ttl even if it is zero @@ -1097,15 +1026,7 @@ pub mod cardano_sign_transaction_request { /// Nested message and enum types in `VoteDelegation`. pub mod vote_delegation { #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration + Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration, )] #[repr(i32)] pub enum CardanoDRepType { @@ -1165,9 +1086,8 @@ pub mod cardano_sign_transaction_request { #[derive(Clone, PartialEq, ::prost::Message)] pub struct CardanoSignTransactionResponse { #[prost(message, repeated, tag = "1")] - pub shelley_witnesses: ::prost::alloc::vec::Vec< - cardano_sign_transaction_response::ShelleyWitness, - >, + pub shelley_witnesses: + ::prost::alloc::vec::Vec, } /// Nested message and enum types in `CardanoSignTransactionResponse`. pub mod cardano_sign_transaction_response { @@ -1264,17 +1184,7 @@ pub struct EthPubRequest { } /// Nested message and enum types in `ETHPubRequest`. pub mod eth_pub_request { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum OutputType { Address = 0, @@ -1437,17 +1347,7 @@ pub mod eth_sign_typed_message_request { #[prost(message, repeated, tag = "2")] pub members: ::prost::alloc::vec::Vec, } - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum DataType { Unknown = 0, @@ -1498,24 +1398,17 @@ pub mod eth_sign_typed_message_request { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct EthTypedMessageValueResponse { - #[prost(enumeration = "eth_typed_message_value_response::RootObject", tag = "1")] + #[prost( + enumeration = "eth_typed_message_value_response::RootObject", + tag = "1" + )] pub root_object: i32, #[prost(uint32, repeated, tag = "2")] pub path: ::prost::alloc::vec::Vec, } /// Nested message and enum types in `ETHTypedMessageValueResponse`. pub mod eth_typed_message_value_response { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum RootObject { Unknown = 0, @@ -1730,6 +1623,12 @@ pub struct SetMnemonicPassphraseEnabledRequest { pub enabled: bool, } #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ShowShamirRequest {} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RestoreFromShamirRequest {} +#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct RebootRequest { #[prost(enumeration = "reboot_request::Purpose", tag = "1")] @@ -1737,17 +1636,7 @@ pub struct RebootRequest { } /// Nested message and enum types in `RebootRequest`. pub mod reboot_request { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Purpose { Upgrade = 0, @@ -1813,7 +1702,7 @@ pub struct Success {} pub struct Request { #[prost( oneof = "request::Request", - tags = "2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 24, 25, 26, 27, 28" + tags = "2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 24, 25, 26, 27, 28, 29" )] pub request: ::core::option::Option, } @@ -1876,6 +1765,8 @@ pub mod request { Cardano(super::CardanoRequest), #[prost(message, tag = "28")] Bip85(super::Bip85Request), + #[prost(message, tag = "29")] + ShowShamir(super::ShowShamirRequest), } } #[allow(clippy::derive_partial_eq_without_eq)] diff --git a/src/rust/bitbox02-sys/build.rs b/src/rust/bitbox02-sys/build.rs index 7bc80b41e..a01eb02ee 100644 --- a/src/rust/bitbox02-sys/build.rs +++ b/src/rust/bitbox02-sys/build.rs @@ -68,6 +68,7 @@ const ALLOWLIST_FNS: &[&str] = &[ "keystore_encode_xpub_at_keypath", "keystore_encrypt_and_store_seed", "keystore_get_bip39_mnemonic", + "keystore_get_bip39_mnemonic_from_bytes", "keystore_get_bip39_word", "keystore_get_ed25519_seed", "keystore_is_locked", diff --git a/src/rust/bitbox02/src/keystore.rs b/src/rust/bitbox02/src/keystore.rs index 53076f686..6fcfa1cfa 100644 --- a/src/rust/bitbox02/src/keystore.rs +++ b/src/rust/bitbox02/src/keystore.rs @@ -132,6 +132,28 @@ pub fn get_bip39_mnemonic() -> Result, ()> { } } +pub fn get_bip39_mnemonic_from_bytes( + bytes: *const u8, + len: usize, +) -> Result, ()> { + let mut mnemonic = zeroize::Zeroizing::new([0u8; 256]); + match unsafe { + bitbox02_sys::keystore_get_bip39_mnemonic_from_bytes( + bytes, + len, + mnemonic.as_mut_ptr(), + mnemonic.len() as _, + ) + } { + false => Err(()), + true => Ok(zeroize::Zeroizing::new( + crate::util::str_from_null_terminated(&mnemonic[..]) + .unwrap() + .into(), + )), + } +} + /// `idx` must be smaller than BIP39_WORDLIST_LEN. pub fn get_bip39_word(idx: u16) -> Result, ()> { let mut word_ptr: *mut u8 = core::ptr::null_mut(); diff --git a/src/rust/vendor/ahash/.cargo-checksum.json b/src/rust/vendor/ahash/.cargo-checksum.json new file mode 100644 index 000000000..61614ac5b --- /dev/null +++ b/src/rust/vendor/ahash/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"385a1fffe8fe8eaeefb731f7e8a2301a69448c69cb44142ea94f870a49dcda5b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"ff8f68cb076caf8cefe7a6430d4ac086ce6af2ca8ce2c4e5a2004d4552ef52a2","README.md":"96e6af5def25a27317b764c060c944dbaee267f32096708e47eb159a8159379c","rustfmt.toml":"e090969e99df9360705680cc0097cfaddae10c22dc2e01470592cf3b9787fd36","smhasher/0001-Add-support-for-aHash.patch":"df01a714dd66554cab415afdec1bd634d19a53e564029ba4b84d2e1baffb6104","smhasher/0002-Add-support-for-aHash.patch":"47fe89f174b921dd134ce95d7147468680b87ad12180d3a00aa8aebdace1c104","smhasher/ahashOutput.txt":"25eaaf3615286ababb15d560ce16a741ed6447a8679b6b6633e0f39c0cc9c5c2","smhasher/clone_smhasher.sh":"510e12ec67052486853f186ce3e18f342fc80da88a47bf9b1aca81c827cb72b7","smhasher/fallbackOutput.txt":"247a408cb8426ed6c0119861d1b06809983cbbff73eafc181fb29910139c1f01","src/aes_hash.rs":"9aa49ede2f9a393a797d55a98d104ec959f1c7387eb262ec306dcf8cf0284297","src/convert.rs":"59e9c20bfd37e8604c80b97339b77e6f6a600c8b177b672cad9ac3600f7f88e0","src/fallback_hash.rs":"5636a30aac53aff84dad46b9228db8957a9b2c2358ab51b1bcb786d751e55954","src/hash_map.rs":"36bf0b13e334d7cedbf83e4822438098b227e7c7e381abe5e1eeac1ff6caa209","src/hash_quality_test.rs":"bc642c29539a4acc75aa460d409008b7939c0eb8687e07e1a0d47f29fbbc9abb","src/hash_set.rs":"4289672c142e314a0bfc6535b5e8f5e07cc78b60c0c7b308a43fa361eca6ddea","src/lib.rs":"4a0b56b99ba7b941fc3a9412e825af4a9525e003192a894a055b8df4fda1bfbc","src/operations.rs":"82663e2e9d2dac714e84eb0c1daac979d20f7e80a7391027f8d03b0987186804","src/random_state.rs":"ac2aae2cf85d05be97376819291fe094109b85ce6bdcdcbeda26d85e10d2f6b3","src/specialize.rs":"3136ba21f0e9b991f6df589744f724d8e71586e0b48c969183a8b33ce7966c8c","tests/bench.rs":"90cc49473a25184c4d8d21d623a52b103e2d7c7c30e487da000b510c4ba5d944","tests/map_tests.rs":"c1e14ec39c2009d8a081349245e70af317c2d8424d40dc239a6c250c0adfae77","tests/nopanic.rs":"5edbaa43a16f8fc96180c2f3d576436640e6fc3221856df329b0540024625cab"},"package":"0453232ace82dee0dd0b4c87a59bd90f7b53b314f3e0f61fe2ee7c8a16482289"} \ No newline at end of file diff --git a/src/rust/vendor/ahash/Cargo.toml b/src/rust/vendor/ahash/Cargo.toml new file mode 100644 index 000000000..3b93b7881 --- /dev/null +++ b/src/rust/vendor/ahash/Cargo.toml @@ -0,0 +1,109 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "ahash" +version = "0.4.8" +authors = ["Tom Kaitchuck "] +description = "A non-cryptographic hash function using AES-NI for high performance" +documentation = "https://docs.rs/ahash" +readme = "README.md" +keywords = [ + "hash", + "hashmap", + "aes", + "aes-ni", + "no-std", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/tkaitchuck/ahash" + +[package.metadata.docs.rs] +features = ["std"] +rustc-args = [ + "-C", + "target-feature=+aes", +] +rustdoc-args = [ + "-C", + "target-feature=+aes", +] + +[profile.bench] +opt-level = 3 +lto = "fat" +codegen-units = 1 +debug = 0 +debug-assertions = false + +[profile.release] +opt-level = 3 +lto = "fat" +codegen-units = 1 +debug = 0 +debug-assertions = false + +[profile.test] +opt-level = 2 +lto = "fat" + +[lib] +name = "ahash" +path = "src/lib.rs" +test = true +doctest = true +bench = true +doc = true + +[[bench]] +name = "ahash" +path = "tests/bench.rs" +harness = false + +[[bench]] +name = "map" +path = "tests/map_tests.rs" +harness = false + +[dependencies.const-random] +version = "0.1.6" +optional = true + +[dev-dependencies.criterion] +version = "0.3.2" + +[dev-dependencies.fnv] +version = "1.0.5" + +[dev-dependencies.fxhash] +version = "0.2.1" + +[dev-dependencies.hex] +version = "0.3.2" + +[dev-dependencies.no-panic] +version = "0.1.10" + +[dev-dependencies.rand] +version = "0.6.5" + +[dev-dependencies.seahash] +version = "3.0.5" + +[features] +compile-time-rng = ["const-random"] +default = [ + "compile-time-rng", + "std", +] +specialize = [] +std = [] diff --git a/src/rust/vendor/ahash/LICENSE-APACHE b/src/rust/vendor/ahash/LICENSE-APACHE new file mode 100644 index 000000000..16fe87b06 --- /dev/null +++ b/src/rust/vendor/ahash/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/src/rust/vendor/ahash/LICENSE-MIT b/src/rust/vendor/ahash/LICENSE-MIT new file mode 100644 index 000000000..5afc2a7b0 --- /dev/null +++ b/src/rust/vendor/ahash/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2016 Amanieu d'Antras + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/src/rust/vendor/ahash/README.md b/src/rust/vendor/ahash/README.md new file mode 100644 index 000000000..b29d7080e --- /dev/null +++ b/src/rust/vendor/ahash/README.md @@ -0,0 +1,245 @@ +# aHash ![Build Status](https://img.shields.io/github/workflow/status/tkaitchuck/ahash/Rust) ![Licence](https://img.shields.io/crates/l/ahash) ![Downloads](https://img.shields.io/crates/d/ahash) + +AHash is a high speed keyed hashing algorithm intended for use in in-memory hashmaps. It provides a high quality +64bit hash. AHash is designed for performance and is not cryptographically secure. + +## Goals + +AHash is the fastest DOS resistant hash for use in HashMaps available in the Rust language. +Failing in any of these criteria will be treated as a bug. + +## Design + +AHash is a keyed hash, so two instances initialized with different keys will produce completely different hashes, and the +resulting hashes cannot be predicted without knowing the keys. [This prevents DOS attacks where an attacker sends a large +number of items whose hashes collide that get used as keys in a hashmap.](https://github.com/tkaitchuck/aHash/wiki/How-aHash-is-resists-DOS-attacks) + +AHash takes advantage of specialized hardware instructions whenever possible including the [hardware AES instruction](https://en.wikipedia.org/wiki/AES_instruction_set) +on X86 processors when it is available. If it is not available it falls back on a somewhat slower (but still DOS resistant) +[algorithm based on multiplication](https://github.com/tkaitchuck/aHash/wiki/AHash-fallback-algorithm). + +As such aHash does not have a fixed standard for its output. This is not a problem for Hashmaps, and allows aHash to achieve high performance and improve over time. + +## Non-Goals + +Because different computers or computers on versions of the code will observe different outputs Hash is not recommended +for use other than in-memory maps. Specifically aHash does not intend to be: + +* Used as a MACs or other application requiring a cryptographically secure hash +* Used for distributed applications or ones requiring persisting hashed values + +## Hash quality + +**Both aHash's aes variant and the fallback pass the full [SMHasher test suite](https://github.com/rurban/smhasher)** (the output of the tests is checked into the smhasher subdirectory.) + +At **over 50GB/s** aHash is the fastest algorithm to pass the full test suite by more than a factor of 2. Even the fallback algorithm is in the top 5 in terms of throughput. + +## Speed + +When it is available aHash uses AES rounds using the AES-NI instruction. AES-NI is very fast (on an intel i7-6700 it +is as fast as a 64 bit multiplication.) and handles 16 bytes of input at a time, while being a very strong permutation. + +This is obviously much faster than most standard approaches to hashing, and does a better job of scrambling data than most non-secure hashes. + +On an intel i7-6700 compiled on nightly Rust with flags `-C opt-level=3 -C target-cpu=native -C codegen-units=1`: + +| Input | SipHash 1-3 time | FnvHash time|FxHash time| aHash time| aHash Fallback* | +|----------------|-----------|-----------|-----------|-----------|---------------| +| u8 | 9.3271 ns | 0.808 ns | **0.594 ns** | 0.7704 ns | 0.7664 ns | +| u16 | 9.5139 ns | 0.803 ns | **0.594 ns** | 0.7653 ns | 0.7704 ns | +| u32 | 9.1196 ns | 1.4424 ns | **0.594 ns** | 0.7637 ns | 0.7712 ns | +| u64 | 10.854 ns | 3.0484 ns | **0.628 ns** | 0.7788 ns | 0.7888 ns | +| u128 | 12.465 ns | 7.0728 ns | 0.799 ns | **0.6174 ns** | 0.6250 ns | +| 1 byte string | 11.745 ns | 2.4743 ns | 2.4000 ns | **1.4921 ns** | 1.5861 ns | +| 3 byte string | 12.066 ns | 3.5221 ns | 2.9253 ns | **1.4745 ns** | 1.8518 ns | +| 4 byte string | 11.634 ns | 4.0770 ns | 1.8818 ns | **1.5206 ns** | 1.8924 ns | +| 7 byte string | 14.762 ns | 5.9780 ns | 3.2282 ns | **1.5207 ns** | 1.8933 ns | +| 8 byte string | 13.442 ns | 4.0535 ns | 2.9422 ns | **1.6262 ns** | 1.8929 ns | +| 15 byte string | 16.880 ns | 8.3434 ns | 4.6070 ns | **1.6265 ns** | 1.7965 ns | +| 16 byte string | 15.155 ns | 7.5796 ns | 3.2619 ns | **1.6262 ns** | 1.8011 ns | +| 24 byte string | 16.521 ns | 12.492 ns | 3.5424 ns | **1.6266 ns** | 2.8311 ns | +| 68 byte string | 24.598 ns | 50.715 ns | 5.8312 ns | **4.8282 ns** | 5.4824 ns | +| 132 byte string| 39.224 ns | 119.96 ns | 11.777 ns | **6.5087 ns** | 9.1459 ns | +|1024 byte string| 254.00 ns | 1087.3 ns | 156.41 ns | **25.402 ns** | 54.566 ns | + +* Fallback refers to the algorithm aHash would use if AES instructions are unavailable. +For reference a hash that does nothing (not even reads the input data takes) **0.520 ns**. So that represents the fastest +possible time. + +As you can see above aHash like `FxHash` provides a large speedup over `SipHash-1-3` which is already nearly twice as fast as `SipHash-2-4`. + +Rust's HashMap by default uses `SipHash-1-3` because faster hash functions such as `FxHash` are predictable and vulnerable to denial of +service attacks. While `aHash` has both very strong scrambling and very high performance. + +AHash performs well when dealing with large inputs because aHash reads 8 or 16 bytes at a time. (depending on availability of AES-NI) + +Because of this, and its optimized logic, `aHash` is able to outperform `FxHash` with strings. +It also provides especially good performance dealing with unaligned input. +(Notice the big performance gaps between 3 vs 4, 7 vs 8 and 15 vs 16 in `FxHash` above) + +For more a more representative performance comparison which includes the overhead of using a HashMap, see [HashBrown's benchmarks](https://github.com/rust-lang/hashbrown#performance) +as HashBrown now uses aHash as its hasher by default. + +## Security + +AHash is designed to [prevent an adversary that does not know the key from being able to create hash collisions or partial collisions.](https://github.com/tkaitchuck/aHash/wiki/How-aHash-is-resists-DOS-attacks) + +This achieved by ensuring that: + +* aHash is designed to [resist differential crypto analysis](https://github.com/tkaitchuck/aHash/wiki/How-aHash-is-resists-DOS-attacks#differential-analysis). Meaning it should not be possible to devise a scheme to "cancel" out a modification of the internal state from a block of input via some corresponding change in a subsequent block of input. + * This is achieved by not performing any "premixing" - This reversible mixing gave previous hashes such as murmurhash confidence in their quality, but could be undone by a deliberate attack. + * Before it is used each chunk of input is "masked" such as by xoring it with an unpredictable value. +* aHash obeys the '[strict avalanche criterion](https://en.wikipedia.org/wiki/Avalanche_effect#Strict_avalanche_criterion)': +Each bit of input has the potential to flip every bit of the output. +* Similarly, each bit in the key can affect every bit in the output. +* Input bits never affect just one, or a very few, bits in intermediate state. This is specifically designed to prevent the sort of +[differential attacks launched by the sipHash authors](https://emboss.github.io/blog/2012/12/14/breaking-murmur-hash-flooding-dos-reloaded/) which cancel previous inputs. +* The `finish` call at the end of the hash is designed to not expose individual bits of the internal state. + * For example in the main algorithm 256bits of state and 256bits of keys are reduced to 64 total bits using 3 rounds of AES encryption. +Reversing this is more than non-trivial. Most of the information is by definition gone, and any given bit of the internal state is fully diffused across the output. +* In both aHash and its fallback the internal state is divided into two halves which are updated by two unrelated techniques using the same input. - This means that if there is a way to attack one of them it likely won't be able to attack both of them at the same time. +* It is deliberately difficult to 'chain' collisions. + * To attack Previous attacks on hash functions have relied on the ability + +More details are available on [the wiki](https://github.com/tkaitchuck/aHash/wiki/How-aHash-is-resists-DOS-attacks). + +### aHash is not cryptographically secure + +AHash should not be used for situations where cryptographic security is needed. +It is not intended for this and will likely fail to hold up for several reasons. + +1. aHash relies on random keys which are assumed to not be observable by an attacker. For a cryptographic hash all inputs can be seen and controlled by the attacker. +2. aHash has not yet gone through peer review. +3. Because aHash uses reduced rounds of AES as opposed to the standard of 10. Things like the SQUARE attack apply to part of the internal state. +(These are mitigated by other means to prevent producing collections, but would be a problem in other contexts). +4. Like any cypher based hash, it will show certain statistical deviations from truly random output when comparing a (VERY) large number of hashes. +(By definition cyphers have fewer collisions than truly random data.) + +There are several efforts to build a secure hash function that uses AES-NI for acceleration, but aHash is not one of them. + +## Accelerated CPUs + +Hardware AES instructions are built into Intel processors built after 2010 and AMD processors after 2012. +It is also available on [many other CPUs](https://en.wikipedia.org/wiki/AES_instruction_set) should in eventually +be able to get aHash to work. However, only X86 and X86-64 are the only supported architectures at the moment, as currently +they are the only architectures for which Rust provides an intrinsic. + +aHash also uses `sse2` and `sse3` instructions. X86 processors that have `aesni` also have these instruction sets. + +## Why not use a cryptographic hash in a hashmap. + +Cryptographic hashes are designed to make is nearly impossible to find two items that collide when the attacker has full control +over the input. This has several implications: + +* They are very difficult to construct, and have to go to a lot of effort to ensure that collisions are not possible. +* They have no notion of a 'key'. Rather, they are fully deterministic and provide exactly one hash for a given input. + +For a HashMap the requirements are different. + +* Speed is very important, especially for short inputs. Often the key for a HashMap is a single `u32` or similar, and to be effective +the bucket that it should be hashed to needs to be computed in just a few CPU cycles. +* A hashmap does not need to provide a hard and fast guarantee that no two inputs will ever collide. Hence, hashCodes are not 256bits +but are just 64 or 32 bits in length. Often the first thing done with the hashcode is to truncate it further to compute which among a few buckets should be used for a key. + * Here collisions are expected, and a cheap to deal with provided there is no systematic way to generated huge numbers of values that all +go to the same bucket. + * This also means that unlike a cryptographic hash partial collisions matter. It doesn't do a hashmap any good to produce a unique 256bit hash if +the lower 12 bits are all the same. This means that even a provably irreversible hash would not offer protection from a DOS attack in a hashmap +because an attacker can easily just brute force the bottom N bits. + +From a cryptography point of view, a hashmap needs something closer to a block cypher. +Where the input can be quickly mixed in a way that cannot be reversed without knowing a key. + +# Why use aHash over X + +## SipHash + +For a hashmap: Because aHash nearly **10x** faster. + +SipHash is however useful in other contexts, such as for a HMAC, where aHash would be completely inappropriate. + +*SipHash-2-4* is designed to provide DOS attack resistance, and has no presently known attacks +against this claim that doesn't involve learning bits of the key. + +SipHash is also available in the "1-3" variant which is about twice as fast as the standard version. +The SipHash authors don't recommend using this variation when DOS attacks are a concern, but there are still no known +practical DOS attacks against the algorithm. Rust has opted for the "1-3" version as the default in `std::collections::HashMap`, +because the speed trade off of "2-4" was not worth it. + +As you can see in the table above, aHash is **much** faster than even *SipHash-1-3*, but it also provides DOS resistance, +and any attack against the accelerated form would likely involve a weakness in AES. + +## FxHash + +In terms of performance, aHash is faster than the FXhash for strings and byte arrays but not primitives. +So it might seem like using Fxhash for hashmaps when the key is a primitive is a good idea. This is *not* the case. + +When FX hash is operating on a 4 or 8 byte input such as a u32 or a u64, it reduces to multiplying the input by a fixed +constant. This is a bad hashing algorithm because it means that lower bits can never be influenced by any higher bit. In +the context of a hashmap where the low order bits are used to determine which bucket to put an item in, this isn't +any better than the identity function. Any keys that happen to end in the same bit pattern will all collide. +Some examples of where this is likely to occur are: + +* Strings encoded in base64 +* Null terminated strings (when working with C code) +* Integers that have the lower bits as zeros. (IE any multiple of small power of 2, which isn't a rare pattern in computer programs.) + * For example when taking lengths of data or locations in data it is common for values to +have a multiple of 1024, if these were used as keys in a map they will collide and end up in the same bucket. + +Like any non-keyed hash FxHash can be attacked. But FxHash is so prone to this that you may find yourself doing it accidentally. + +For example, it is possible to [accidentally introduce quadratic behavior by reading from one map in iteration order and writing to another.](https://accidentallyquadratic.tumblr.com/post/153545455987/rust-hash-iteration-reinsertion) + +Fxhash flaws make sense when you understand it for what it is. It is a quick and dirty hash, nothing more. +it was not published and promoted by its creator, it was **found**! + +Because it is error-prone, FxHash should never be used as a default. In specialized instances where the keys are understood +it makes sense, but given that aHash is faster on almost any object, it's probably not worth it. + +## FnvHash + +FnvHash is also a poor default. It only handles one byte at a time, so its performance really suffers with large inputs. +It is also non-keyed so it is still subject to DOS attacks and [accidentally quadratic behavior.](https://accidentallyquadratic.tumblr.com/post/153545455987/rust-hash-iteration-reinsertion) + +## MurmurHash, CityHash, MetroHash, FarmHash, HighwayHash, XXHash, SeaHash + +Murmur, City, Metro, Farm and Highway are all related, and appear to directly replace one another. Sea and XX are independent +and compete. + +They are all fine hashing algorithms, they do a good job of scrambling data, but they are all targeted at a different +usecase. They are intended to work in distributed systems where the hash is expected to be the same over time and from one +computer to the next, efficiently hashing large volumes of data. + +This is quite different from the needs of a Hasher used in a hashmap. In a map the typical value is under 10 bytes. None +of these algorithms scale down to handle that small of data at a competitive time. What's more the restriction that they +provide consistent output prevents them from taking advantage of different hardware capabilities on different CPUs. It makes +sense for a hashmap to work differently on a phone than on a server, or in wasm. + +If you need to persist or transmit a hash of a file, then using one of these is probably a good idea. HighwayHash seems to be the preferred solution du jour. But inside a simple Hashmap, stick with aHash. + +## AquaHash + +AquaHash is structured similarly to aHash. (Though the two were designed completely independently). AquaHash does not scale down nearly as well and +does poorly with for example a single `i32` as input. Its only implementation at this point is in C++. + +## t1ha + +T1ha is fairly fast at large sizes, and the output is of fairly high quality, but it is not clear what usecase it aims for. +It has many different versions and is very complex, and uses hardware tricks, so one might infer it is meant for +hashmaps like aHash. But any hash using it take at least **20ns**, and it doesn't outperform even SipHash until the +input sizes are larger than 128 bytes and is not designed to be DOS resistant. So uses are likely niche. + +# License + +Licensed under either of: + + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +## Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any +additional terms or conditions. + diff --git a/src/rust/vendor/ahash/rustfmt.toml b/src/rust/vendor/ahash/rustfmt.toml new file mode 100644 index 000000000..753065179 --- /dev/null +++ b/src/rust/vendor/ahash/rustfmt.toml @@ -0,0 +1 @@ +max_width = 120 diff --git a/src/rust/vendor/ahash/smhasher/0001-Add-support-for-aHash.patch b/src/rust/vendor/ahash/smhasher/0001-Add-support-for-aHash.patch new file mode 100644 index 000000000..99a98d380 --- /dev/null +++ b/src/rust/vendor/ahash/smhasher/0001-Add-support-for-aHash.patch @@ -0,0 +1,135 @@ +From 426384ce34cf410d892eeeeeb7f6046d52bff8e7 Mon Sep 17 00:00:00 2001 +From: Tom Kaitchuck +Date: Sat, 11 Jul 2020 17:15:56 -0700 +Subject: [PATCH] Add support for ahash + +--- + CMakeLists.txt | 1 + + Hashes.h | 5 +++++ + ahash.h | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ + main.cpp | 2 +- + 4 files changed, 55 insertions(+), 1 deletion(-) + create mode 100644 ahash.h + +diff --git a/CMakeLists.txt b/CMakeLists.txt +index 6ebab1a..9d79e98 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -470,10 +470,11 @@ add_executable( + target_link_libraries( + SMHasher + SMHasherSupport + ${HIGHWAY_LIB} + ${BLAKE3_LIB} ++ libahash_c.a + ${CMAKE_THREAD_LIBS_INIT} + ) + + #add_executable( + # bittest +diff --git a/Hashes.h b/Hashes.h +index 4e111c1..fcd3e38 100644 +--- a/Hashes.h ++++ b/Hashes.h +@@ -19,10 +19,11 @@ + #if defined(__SSE4_2__) && defined(__x86_64__) + #include "metrohash/metrohash64crc.h" + #include "metrohash/metrohash128crc.h" + #endif + ++#include "ahash.h" + #include "fasthash.h" + #include "jody_hash32.h" + #include "jody_hash64.h" + + // objsize: 0-0x113 = 276 +@@ -356,10 +357,14 @@ inline void fasthash32_test ( const void * key, int len, uint32_t seed, void * o + } + #ifdef HAVE_INT64 + inline void fasthash64_test ( const void * key, int len, uint32_t seed, void * out ) { + *(uint64_t*)out = fasthash64(key, (size_t) len, (uint64_t)seed); + } ++inline void ahash64_test ( const void * key, int len, uint32_t seed, void * out ) { ++ *(uint64_t*)out = ahash64(key, (size_t) len, (uint64_t)seed); ++} ++ + #endif + + // objsize 0-778: 1912 + void mum_hash_test(const void * key, int len, uint32_t seed, void * out); + +diff --git a/ahash.h b/ahash.h +new file mode 100644 +index 0000000..6c59caf +--- /dev/null ++++ b/ahash.h +@@ -0,0 +1,48 @@ ++/* The MIT License ++ ++ Copyright (C) 2012 Zilong Tan (eric.zltan@gmail.com) ++ ++ Permission is hereby granted, free of charge, to any person ++ obtaining a copy of this software and associated documentation ++ files (the "Software"), to deal in the Software without ++ restriction, including without limitation the rights to use, copy, ++ modify, merge, publish, distribute, sublicense, and/or sell copies ++ of the Software, and to permit persons to whom the Software is ++ furnished to do so, subject to the following conditions: ++ ++ The above copyright notice and this permission notice shall be ++ included in all copies or substantial portions of the Software. ++ ++ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS ++ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ++ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++ SOFTWARE. ++*/ ++ ++#ifndef _AHASH_H ++#define _AHASH_H ++ ++#include ++#include ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++/** ++ * Ahash - 64-bit implementation of aHash ++ * @buf: data buffer ++ * @len: data size ++ * @seed: the seed ++ */ ++ uint64_t ahash64(const void *buf, size_t len, uint64_t seed); ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif +\ No newline at end of file +diff --git a/main.cpp b/main.cpp +index 04060f2..7489aaf 100644 +--- a/main.cpp ++++ b/main.cpp +@@ -263,11 +263,11 @@ HashInfo g_hashes[] = + + { xxh3_test, 64, 0x39CD9E4A, "xxh3", "xxHash v3, 64-bit", GOOD }, + { xxh3low_test, 32, 0xFAE8467B, "xxh3low", "xxHash v3, 64-bit, low 32-bits part", GOOD }, + { xxh128_test, 128, 0xEB61B3A0, "xxh128", "xxHash v3, 128-bit", GOOD }, + { xxh128low_test, 64, 0x54D1CC70, "xxh128low", "xxHash v3, 128-bit, low 64-bits part", GOOD }, +- ++ { ahash64_test, 64, 0x00000000, "ahash64", "ahash 64bit", GOOD }, //Expected value set to zero because aHash does not adhere to a fixed output. + #if __WORDSIZE >= 64 + # define TIFU_VERIF 0x644236D4 + #else + // broken on certain travis + # define TIFU_VERIF 0x0 +-- +2.25.1 + diff --git a/src/rust/vendor/ahash/smhasher/0002-Add-support-for-aHash.patch b/src/rust/vendor/ahash/smhasher/0002-Add-support-for-aHash.patch new file mode 100644 index 000000000..93ccbff47 --- /dev/null +++ b/src/rust/vendor/ahash/smhasher/0002-Add-support-for-aHash.patch @@ -0,0 +1,269 @@ +From 426384ce34cf410d892eeeeeb7f6046d52bff8e7 Mon Sep 17 00:00:00 2001 +From: Tom Kaitchuck +Date: Sat, 11 Jul 2020 17:15:56 -0700 +Subject: [PATCH] Add support for ahash + +--- + CMakeLists.txt | 1 + + Hashes.h | 5 +++++ + ahash.h | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ + main.cpp | 2 +- + 4 files changed, 55 insertions(+), 1 deletion(-) + create mode 100644 ahash.h + +diff --git a/CMakeLists.txt b/CMakeLists.txt +index 6ebab1a..9d79e98 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -470,10 +470,11 @@ add_executable( + target_link_libraries( + SMHasher + SMHasherSupport + ${HIGHWAY_LIB} + ${BLAKE3_LIB} ++ libahash_c.a + ${CMAKE_THREAD_LIBS_INIT} + ) + + #add_executable( + # bittest +diff --git a/Hashes.h b/Hashes.h +index 4e111c1..fcd3e38 100644 +--- a/Hashes.h ++++ b/Hashes.h +@@ -19,10 +19,11 @@ + #if defined(__SSE4_2__) && defined(__x86_64__) + #include "metrohash/metrohash64crc.h" + #include "metrohash/metrohash128crc.h" + #endif + ++#include "ahash.h" + #include "fasthash.h" + #include "jody_hash32.h" + #include "jody_hash64.h" + + // objsize: 0-0x113 = 276 +@@ -356,10 +357,14 @@ inline void fasthash32_test ( const void * key, int len, uint32_t seed, void * o + } + #ifdef HAVE_INT64 + inline void fasthash64_test ( const void * key, int len, uint32_t seed, void * out ) { + *(uint64_t*)out = fasthash64(key, (size_t) len, (uint64_t)seed); + } ++inline void ahash64_test ( const void * key, int len, uint32_t seed, void * out ) { ++ *(uint64_t*)out = ahash64(key, (size_t) len, (uint64_t)seed); ++} ++ + #endif + + // objsize 0-778: 1912 + void mum_hash_test(const void * key, int len, uint32_t seed, void * out); + +diff --git a/ahash.h b/ahash.h +new file mode 100644 +index 0000000..6c59caf +--- /dev/null ++++ b/ahash.h +@@ -0,0 +1,48 @@ ++/* The MIT License ++ ++ Copyright (C) 2012 Zilong Tan (eric.zltan@gmail.com) ++ ++ Permission is hereby granted, free of charge, to any person ++ obtaining a copy of this software and associated documentation ++ files (the "Software"), to deal in the Software without ++ restriction, including without limitation the rights to use, copy, ++ modify, merge, publish, distribute, sublicense, and/or sell copies ++ of the Software, and to permit persons to whom the Software is ++ furnished to do so, subject to the following conditions: ++ ++ The above copyright notice and this permission notice shall be ++ included in all copies or substantial portions of the Software. ++ ++ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS ++ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ++ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++ SOFTWARE. ++*/ ++ ++#ifndef _AHASH_H ++#define _AHASH_H ++ ++#include ++#include ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++/** ++ * Ahash - 64-bit implementation of aHash ++ * @buf: data buffer ++ * @len: data size ++ * @seed: the seed ++ */ ++ uint64_t ahash64(const void *buf, size_t len, uint64_t seed); ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif +\ No newline at end of file +diff --git a/main.cpp b/main.cpp +index 04060f2..7489aaf 100644 +--- a/main.cpp ++++ b/main.cpp +@@ -263,11 +263,11 @@ HashInfo g_hashes[] = + + { xxh3_test, 64, 0x39CD9E4A, "xxh3", "xxHash v3, 64-bit", GOOD }, + { xxh3low_test, 32, 0xFAE8467B, "xxh3low", "xxHash v3, 64-bit, low 32-bits part", GOOD }, + { xxh128_test, 128, 0xEB61B3A0, "xxh128", "xxHash v3, 128-bit", GOOD }, + { xxh128low_test, 64, 0x54D1CC70, "xxh128low", "xxHash v3, 128-bit, low 64-bits part", GOOD }, +- ++ { ahash64_test, 64, 0x00000000, "ahash64", "ahash 64bit", GOOD }, //Expected value set to zero because aHash does not adhere to a fixed output. + #if __WORDSIZE >= 64 + # define TIFU_VERIF 0x644236D4 + #else + // broken on certain travis + # define TIFU_VERIF 0x0 +-- +2.25.1 + +diff --git a/CMakeLists.txt b/CMakeLists.txt +index e4658a7..efef724 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -630,20 +630,21 @@ if(ipo_supported) + set_property(TARGET SMHasherSupport PROPERTY INTERPROCEDURAL_OPTIMIZATION + True) + set_property(TARGET SMHasher PROPERTY INTERPROCEDURAL_OPTIMIZATION True) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DLTO") + # set_source_files_properties(main.cpp PROPERTIES COMPILE_FLAGS "-DLTO") + else() + message(STATUS "IPO / LTO not supported: <${error}>") + endif() + + target_link_libraries(SMHasher SMHasherSupport ${HIGHWAY_LIB} ${BLAKE3_LIB} ++ libahash_c.a + ${CMAKE_THREAD_LIBS_INIT}) + + # add_executable( bittest bittest.cpp ) + # + # target_link_libraries( bittest SMHasherSupport ${CMAKE_THREAD_LIBS_INIT} ) + + if(NOT (CMAKE_CROSSCOMPILING)) + enable_testing() + add_test(VerifyAll SMHasher --test=VerifyAll) + add_test(Sanity SMHasher --test=Sanity) +diff --git a/Hashes.h b/Hashes.h +index f795403..036b49b 100644 +--- a/Hashes.h ++++ b/Hashes.h +@@ -14,20 +14,21 @@ + #include "metrohash/metrohash64.h" + #include "metrohash/metrohash128.h" + #include "cmetrohash.h" + #include "opt_cmetrohash.h" + + #if defined(__SSE4_2__) && defined(__x86_64__) + #include "metrohash/metrohash64crc.h" + #include "metrohash/metrohash128crc.h" + #endif + ++#include "ahash.h" + #include "fasthash.h" + #include "jody_hash32.h" + #include "jody_hash64.h" + + // objsize: 0-0x113 = 276 + #include "tifuhash.h" + // objsize: 5f0-85f = 623 + #include "floppsyhash.h" + + #include "vmac.h" +@@ -353,20 +354,24 @@ inline void cmetrohash64_2_test ( const void * key, int len, uint32_t seed, void + } + #endif + + inline void fasthash32_test ( const void * key, int len, uint32_t seed, void * out ) { + *(uint32_t*)out = fasthash32(key, (size_t) len, seed); + } + #ifdef HAVE_INT64 + inline void fasthash64_test ( const void * key, int len, uint32_t seed, void * out ) { + *(uint64_t*)out = fasthash64(key, (size_t) len, (uint64_t)seed); + } ++ ++inline void ahash64_test ( const void * key, int len, uint32_t seed, void * out ) { ++ *(uint64_t*)out = ahash64(key, (size_t) len, (uint64_t)seed); ++} + #endif + + // objsize 0-778: 1912 + void mum_hash_test(const void * key, int len, uint32_t seed, void * out); + + inline void mum_low_test ( const void * key, int len, uint32_t seed, void * out ) { + uint64_t result; + mum_hash_test(key, len, seed, &result); + *(uint32_t*)out = (uint32_t)result; + } +diff --git a/ahash.h b/ahash.h +new file mode 100644 +index 0000000..2ed416d +--- /dev/null ++++ b/ahash.h +@@ -0,0 +1,24 @@ ++ ++#ifndef _AHASH_H ++#define _AHASH_H ++ ++#include ++#include ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++/** ++ * Ahash - 64-bit implementation of aHash ++ * @buf: data buffer ++ * @len: data size ++ * @seed: the seed ++ */ ++ uint64_t ahash64(const void *buf, size_t len, uint64_t seed); ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif +\ No newline at end of file +diff --git a/main.cpp b/main.cpp +index f742fbf..c221f7d 100644 +--- a/main.cpp ++++ b/main.cpp +@@ -434,20 +434,21 @@ HashInfo g_hashes[] = + { t1ha0_ia32aes_avx1_test, 64, 0xF07C4DA5, "t1ha0_aes_avx1", "Fast Positive Hash (machine-specific, requires AES-NI & AVX)", GOOD }, + # endif /* __AVX__ */ + # if defined(__AVX2__) + { t1ha0_ia32aes_avx2_test, 64, 0x8B38C599, "t1ha0_aes_avx2", "Fast Positive Hash (machine-specific, requires AES-NI & AVX2)", GOOD }, + # endif /* __AVX2__ */ + #endif /* T1HA0_AESNI_AVAILABLE */ + { xxh3_test, 64, 0x39CD9E4A, "xxh3", "xxHash v3, 64-bit", GOOD }, + { xxh3low_test, 32, 0xFAE8467B, "xxh3low", "xxHash v3, 64-bit, low 32-bits part", GOOD }, + { xxh128_test, 128, 0xEB61B3A0, "xxh128", "xxHash v3, 128-bit", GOOD }, + { xxh128low_test, 64, 0x54D1CC70, "xxh128low", "xxHash v3, 128-bit, low 64-bits part", GOOD }, ++ { ahash64_test, 64, 0x00000000, "ahash64", "ahash 64bit", GOOD }, //Expected value set to zero because aHash does not adhere to a fixed output. + #ifdef HAVE_BIT32 + { wyhash32_test, 32, 0x09DE8066, "wyhash32", "wyhash (32-bit)", GOOD }, + #else + { wyhash32low, 32, 0x9241B8A3, "wyhash32low", "wyhash lower 32bit", GOOD }, + #endif + #ifdef HAVE_INT64 + { wyhash_test, 64, 0x7C62138D, "wyhash", "wyhash (64-bit)", GOOD }, + #endif + + }; diff --git a/src/rust/vendor/ahash/smhasher/ahashOutput.txt b/src/rust/vendor/ahash/smhasher/ahashOutput.txt new file mode 100644 index 000000000..3bc122bfe --- /dev/null +++ b/src/rust/vendor/ahash/smhasher/ahashOutput.txt @@ -0,0 +1,1516 @@ +------------------------------------------------------------------------------- +--- Testing ahash64 "ahash 64bit" GOOD + +[[[ Sanity Tests ]]] + +Verification value 0x84A46E17 ....... SKIP (self- or unseeded) +Running sanity check 1 .......... PASS +Running AppendedZeroesTest .......... PASS + +[[[ Speed Tests ]]] + +Bulk speed test - 262144-byte keys +Alignment 7 - 8.351 bytes/cycle - 23891.85 MiB/sec @ 3 ghz +Alignment 6 - 8.327 bytes/cycle - 23823.64 MiB/sec @ 3 ghz +Alignment 5 - 8.312 bytes/cycle - 23780.76 MiB/sec @ 3 ghz +Alignment 4 - 8.309 bytes/cycle - 23772.79 MiB/sec @ 3 ghz +Alignment 3 - 8.315 bytes/cycle - 23790.37 MiB/sec @ 3 ghz +Alignment 2 - 8.339 bytes/cycle - 23858.92 MiB/sec @ 3 ghz +Alignment 1 - 8.320 bytes/cycle - 23804.48 MiB/sec @ 3 ghz +Alignment 0 - 8.364 bytes/cycle - 23930.33 MiB/sec @ 3 ghz +Average - 8.330 bytes/cycle - 23831.64 MiB/sec @ 3 ghz + +Small key speed test - 1-byte keys - 11.98 cycles/hash +Small key speed test - 2-byte keys - 12.95 cycles/hash +Small key speed test - 3-byte keys - 13.00 cycles/hash +Small key speed test - 4-byte keys - 14.00 cycles/hash +Small key speed test - 5-byte keys - 14.28 cycles/hash +Small key speed test - 6-byte keys - 13.98 cycles/hash +Small key speed test - 7-byte keys - 14.24 cycles/hash +Small key speed test - 8-byte keys - 14.28 cycles/hash +Small key speed test - 9-byte keys - 14.18 cycles/hash +Small key speed test - 10-byte keys - 14.29 cycles/hash +Small key speed test - 11-byte keys - 14.12 cycles/hash +Small key speed test - 12-byte keys - 14.00 cycles/hash +Small key speed test - 13-byte keys - 14.00 cycles/hash +Small key speed test - 14-byte keys - 14.00 cycles/hash +Small key speed test - 15-byte keys - 14.23 cycles/hash +Small key speed test - 16-byte keys - 14.00 cycles/hash +Small key speed test - 17-byte keys - 16.05 cycles/hash +Small key speed test - 18-byte keys - 16.14 cycles/hash +Small key speed test - 19-byte keys - 16.00 cycles/hash +Small key speed test - 20-byte keys - 16.17 cycles/hash +Small key speed test - 21-byte keys - 16.00 cycles/hash +Small key speed test - 22-byte keys - 16.07 cycles/hash +Small key speed test - 23-byte keys - 16.13 cycles/hash +Small key speed test - 24-byte keys - 15.99 cycles/hash +Small key speed test - 25-byte keys - 16.12 cycles/hash +Small key speed test - 26-byte keys - 15.99 cycles/hash +Small key speed test - 27-byte keys - 16.00 cycles/hash +Small key speed test - 28-byte keys - 16.30 cycles/hash +Small key speed test - 29-byte keys - 18.11 cycles/hash +Small key speed test - 30-byte keys - 18.52 cycles/hash +Small key speed test - 31-byte keys - 17.98 cycles/hash +Average 15.132 cycles/hash + +[[[ 'Hashmap' Speed Tests ]]] + +std::unordered_map +Init std HashMapTest: 270.009 cycles/op (102401 inserts, 1% deletions) +Running std HashMapTest: 120.593 cycles/op (3.6 stdv) + +greg7mdp/parallel-hashmap +Init fast HashMapTest: 110.896 cycles/op (102401 inserts, 1% deletions) +Running fast HashMapTest: 81.841 cycles/op (0.1 stdv) ....... PASS + +[[[ Avalanche Tests ]]] + +Testing 24-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.702667% +Testing 32-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.670667% +Testing 40-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.608667% +Testing 48-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.670667% +Testing 56-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.670000% +Testing 64-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.663333% +Testing 72-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.780667% +Testing 80-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.716000% +Testing 96-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.621333% +Testing 112-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.642667% +Testing 128-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.629333% +Testing 160-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.747333% +Testing 512-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.757333% +Testing 1024-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.820000% + +[[[ Keyset 'Sparse' Tests ]]] + +Keyset 'Sparse' - 16-bit keys with up to 9 bits set - 50643 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 0.3, actual 1 (3.35x) (1) ! +Testing collisions (high 19-25 bits) - Worst is 24 bits: 90/76 (1.18x) +Testing collisions (high 12-bit) - Expected 46547.0, actual 46547 (1.00x) +Testing collisions (high 8-bit) - Expected 50387.0, actual 50387 (1.00x) +Testing collisions (low 32-bit) - Expected 0.3, actual 0 (0.00x) +Testing collisions (low 19-25 bits) - Worst is 25 bits: 39/38 (1.02x) +Testing collisions (low 12-bit) - Expected 46547.0, actual 46547 (1.00x) +Testing collisions (low 8-bit) - Expected 50387.0, actual 50387 (1.00x) +Testing distribution - Worst bias is the 13-bit window at bit 55 - 0.572% + +Keyset 'Sparse' - 24-bit keys with up to 8 bits set - 1271626 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 188.2, actual 174 (0.92x) +Testing collisions (high 24-35 bits) - Worst is 26 bits: 12182/12047 (1.01x) +Testing collisions (high 12-bit) - Expected 1267530.0, actual 1267530 (1.00x) +Testing collisions (high 8-bit) - Expected 1271370.0, actual 1271370 (1.00x) +Testing collisions (low 32-bit) - Expected 188.2, actual 201 (1.07x) (13) +Testing collisions (low 24-35 bits) - Worst is 32 bits: 201/188 (1.07x) +Testing collisions (low 12-bit) - Expected 1267530.0, actual 1267530 (1.00x) +Testing collisions (low 8-bit) - Expected 1271370.0, actual 1271370 (1.00x) +Testing distribution - Worst bias is the 17-bit window at bit 53 - 0.082% + +Keyset 'Sparse' - 32-bit keys with up to 7 bits set - 4514873 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 2373.0, actual 2366 (1.00x) (-7) +Testing collisions (high 25-38 bits) - Worst is 30 bits: 9627/9492 (1.01x) +Testing collisions (high 12-bit) - Expected 4510777.0, actual 4510777 (1.00x) +Testing collisions (high 8-bit) - Expected 4514617.0, actual 4514617 (1.00x) +Testing collisions (low 32-bit) - Expected 2373.0, actual 2295 (0.97x) +Testing collisions (low 25-38 bits) - Worst is 30 bits: 9493/9492 (1.00x) +Testing collisions (low 12-bit) - Expected 4510777.0, actual 4510777 (1.00x) +Testing collisions (low 8-bit) - Expected 4514617.0, actual 4514617 (1.00x) +Testing distribution - Worst bias is the 19-bit window at bit 12 - 0.048% + +Keyset 'Sparse' - 40-bit keys with up to 6 bits set - 4598479 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 2461.7, actual 2561 (1.04x) (100) +Testing collisions (high 25-38 bits) - Worst is 32 bits: 2561/2461 (1.04x) +Testing collisions (high 12-bit) - Expected 4594383.0, actual 4594383 (1.00x) +Testing collisions (high 8-bit) - Expected 4598223.0, actual 4598223 (1.00x) +Testing collisions (low 32-bit) - Expected 2461.7, actual 2444 (0.99x) (-17) +Testing collisions (low 25-38 bits) - Worst is 35 bits: 323/307 (1.05x) +Testing collisions (low 12-bit) - Expected 4594383.0, actual 4594383 (1.00x) +Testing collisions (low 8-bit) - Expected 4598223.0, actual 4598223 (1.00x) +Testing distribution - Worst bias is the 19-bit window at bit 2 - 0.056% + +Keyset 'Sparse' - 48-bit keys with up to 6 bits set - 14196869 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 23463.6, actual 23431 (1.00x) (-32) +Testing collisions (high 27-42 bits) - Worst is 37 bits: 747/733 (1.02x) +Testing collisions (high 12-bit) - Expected 14192773.0, actual 14192773 (1.00x) +Testing collisions (high 8-bit) - Expected 14196613.0, actual 14196613 (1.00x) +Testing collisions (low 32-bit) - Expected 23463.6, actual 23284 (0.99x) (-179) +Testing collisions (low 27-42 bits) - Worst is 41 bits: 51/45 (1.11x) +Testing collisions (low 12-bit) - Expected 14192773.0, actual 14192773 (1.00x) +Testing collisions (low 8-bit) - Expected 14196613.0, actual 14196613 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 44 - 0.029% + +Keyset 'Sparse' - 56-bit keys with up to 5 bits set - 4216423 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 2069.7, actual 2053 (0.99x) (-16) +Testing collisions (high 25-38 bits) - Worst is 31 bits: 4136/4139 (1.00x) +Testing collisions (high 12-bit) - Expected 4212327.0, actual 4212327 (1.00x) +Testing collisions (high 8-bit) - Expected 4216167.0, actual 4216167 (1.00x) +Testing collisions (low 32-bit) - Expected 2069.7, actual 2058 (0.99x) (-11) +Testing collisions (low 25-38 bits) - Worst is 30 bits: 8320/8278 (1.00x) +Testing collisions (low 12-bit) - Expected 4212327.0, actual 4212327 (1.00x) +Testing collisions (low 8-bit) - Expected 4216167.0, actual 4216167 (1.00x) +Testing distribution - Worst bias is the 18-bit window at bit 26 - 0.051% + +Keyset 'Sparse' - 64-bit keys with up to 5 bits set - 8303633 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 8026.9, actual 8024 (1.00x) (-2) +Testing collisions (high 26-40 bits) - Worst is 37 bits: 267/250 (1.06x) +Testing collisions (high 12-bit) - Expected 8299537.0, actual 8299537 (1.00x) +Testing collisions (high 8-bit) - Expected 8303377.0, actual 8303377 (1.00x) +Testing collisions (low 32-bit) - Expected 8026.9, actual 8006 (1.00x) (-20) +Testing collisions (low 26-40 bits) - Worst is 40 bits: 34/31 (1.08x) +Testing collisions (low 12-bit) - Expected 8299537.0, actual 8299537 (1.00x) +Testing collisions (low 8-bit) - Expected 8303377.0, actual 8303377 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 25 - 0.041% + +Keyset 'Sparse' - 72-bit keys with up to 5 bits set - 15082603 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 26482.7, actual 26443 (1.00x) (-39) +Testing collisions (high 27-42 bits) - Worst is 42 bits: 33/25 (1.28x) +Testing collisions (high 12-bit) - Expected 15078507.0, actual 15078507 (1.00x) +Testing collisions (high 8-bit) - Expected 15082347.0, actual 15082347 (1.00x) +Testing collisions (low 32-bit) - Expected 26482.7, actual 26586 (1.00x) (104) +Testing collisions (low 27-42 bits) - Worst is 42 bits: 29/25 (1.12x) +Testing collisions (low 12-bit) - Expected 15078507.0, actual 15078507 (1.00x) +Testing collisions (low 8-bit) - Expected 15082347.0, actual 15082347 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 31 - 0.033% + +Keyset 'Sparse' - 96-bit keys with up to 4 bits set - 3469497 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1401.3, actual 1457 (1.04x) (56) +Testing collisions (high 25-38 bits) - Worst is 32 bits: 1457/1401 (1.04x) +Testing collisions (high 12-bit) - Expected 3465401.0, actual 3465401 (1.00x) +Testing collisions (high 8-bit) - Expected 3469241.0, actual 3469241 (1.00x) +Testing collisions (low 32-bit) - Expected 1401.3, actual 1390 (0.99x) (-11) +Testing collisions (low 25-38 bits) - Worst is 38 bits: 26/21 (1.19x) +Testing collisions (low 12-bit) - Expected 3465401.0, actual 3465401 (1.00x) +Testing collisions (low 8-bit) - Expected 3469241.0, actual 3469241 (1.00x) +Testing distribution - Worst bias is the 19-bit window at bit 52 - 0.070% + +Keyset 'Sparse' - 160-bit keys with up to 4 bits set - 26977161 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 84723.3, actual 84650 (1.00x) (-73) +Testing collisions (high 28-44 bits) - Worst is 40 bits: 336/330 (1.02x) +Testing collisions (high 12-bit) - Expected 26973065.0, actual 26973065 (1.00x) +Testing collisions (high 8-bit) - Expected 26976905.0, actual 26976905 (1.00x) +Testing collisions (low 32-bit) - Expected 84723.3, actual 84029 (0.99x) (-694) +Testing collisions (low 28-44 bits) - Worst is 37 bits: 2744/2647 (1.04x) +Testing collisions (low 12-bit) - Expected 26973065.0, actual 26973065 (1.00x) +Testing collisions (low 8-bit) - Expected 26976905.0, actual 26976905 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 57 - 0.013% + +Keyset 'Sparse' - 256-bit keys with up to 3 bits set - 2796417 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 910.4, actual 903 (0.99x) (-7) +Testing collisions (high 25-37 bits) - Worst is 37 bits: 29/28 (1.02x) +Testing collisions (high 12-bit) - Expected 2792321.0, actual 2792321 (1.00x) +Testing collisions (high 8-bit) - Expected 2796161.0, actual 2796161 (1.00x) +Testing collisions (low 32-bit) - Expected 910.4, actual 855 (0.94x) +Testing collisions (low 25-37 bits) - Worst is 28 bits: 14588/14565 (1.00x) +Testing collisions (low 12-bit) - Expected 2792321.0, actual 2792321 (1.00x) +Testing collisions (low 8-bit) - Expected 2796161.0, actual 2796161 (1.00x) +Testing distribution - Worst bias is the 19-bit window at bit 7 - 0.112% + +Keyset 'Sparse' - 512-bit keys with up to 3 bits set - 22370049 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 58256.4, actual 58578 (1.01x) (322) +Testing collisions (high 28-43 bits) - Worst is 43 bits: 32/28 (1.12x) +Testing collisions (high 12-bit) - Expected 22365953.0, actual 22365953 (1.00x) +Testing collisions (high 8-bit) - Expected 22369793.0, actual 22369793 (1.00x) +Testing collisions (low 32-bit) - Expected 58256.4, actual 58543 (1.00x) (287) +Testing collisions (low 28-43 bits) - Worst is 36 bits: 3712/3641 (1.02x) +Testing collisions (low 12-bit) - Expected 22365953.0, actual 22365953 (1.00x) +Testing collisions (low 8-bit) - Expected 22369793.0, actual 22369793 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 43 - 0.015% + +Keyset 'Sparse' - 1024-bit keys with up to 2 bits set - 524801 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 32.1, actual 35 (1.09x) (3) +Testing collisions (high 22-32 bits) - Worst is 32 bits: 35/32 (1.09x) +Testing collisions (high 12-bit) - Expected 520705.0, actual 520705 (1.00x) +Testing collisions (high 8-bit) - Expected 524545.0, actual 524545 (1.00x) +Testing collisions (low 32-bit) - Expected 32.1, actual 38 (1.19x) (6) +Testing collisions (low 22-32 bits) - Worst is 32 bits: 38/32 (1.19x) +Testing collisions (low 12-bit) - Expected 520705.0, actual 520705 (1.00x) +Testing collisions (low 8-bit) - Expected 524545.0, actual 524545 (1.00x) +Testing distribution - Worst bias is the 16-bit window at bit 17 - 0.142% + +Keyset 'Sparse' - 2048-bit keys with up to 2 bits set - 2098177 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.5, actual 488 (0.95x) +Testing collisions (high 24-36 bits) - Worst is 30 bits: 2050/2050 (1.00x) +Testing collisions (high 12-bit) - Expected 2094081.0, actual 2094081 (1.00x) +Testing collisions (high 8-bit) - Expected 2097921.0, actual 2097921 (1.00x) +Testing collisions (low 32-bit) - Expected 512.5, actual 523 (1.02x) (11) +Testing collisions (low 24-36 bits) - Worst is 34 bits: 147/128 (1.15x) +Testing collisions (low 12-bit) - Expected 2094081.0, actual 2094081 (1.00x) +Testing collisions (low 8-bit) - Expected 2097921.0, actual 2097921 (1.00x) +Testing distribution - Worst bias is the 18-bit window at bit 57 - 0.080% + + +[[[ Keyset 'Permutation' Tests ]]] + +Combination Lowbits Tests: +Keyset 'Combination' - up to 7 blocks from a set of 8 - 2396744 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 668.7, actual 645 (0.96x) +Testing collisions (high 25-37 bits) - Worst is 35 bits: 94/83 (1.12x) +Testing collisions (high 12-bit) - Expected 2392648.0, actual 2392648 (1.00x) +Testing collisions (high 8-bit) - Expected 2396488.0, actual 2396488 (1.00x) +Testing collisions (low 32-bit) - Expected 668.7, actual 693 (1.04x) (25) +Testing collisions (low 25-37 bits) - Worst is 35 bits: 98/83 (1.17x) +Testing collisions (low 12-bit) - Expected 2392648.0, actual 2392648 (1.00x) +Testing collisions (low 8-bit) - Expected 2396488.0, actual 2396488 (1.00x) +Testing distribution - Worst bias is the 17-bit window at bit 45 - 0.076% + + +Combination Highbits Tests +Keyset 'Combination' - up to 7 blocks from a set of 8 - 2396744 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 668.7, actual 682 (1.02x) (14) +Testing collisions (high 25-37 bits) - Worst is 37 bits: 26/20 (1.24x) +Testing collisions (high 12-bit) - Expected 2392648.0, actual 2392648 (1.00x) +Testing collisions (high 8-bit) - Expected 2396488.0, actual 2396488 (1.00x) +Testing collisions (low 32-bit) - Expected 668.7, actual 690 (1.03x) (22) +Testing collisions (low 25-37 bits) - Worst is 36 bits: 47/41 (1.12x) +Testing collisions (low 12-bit) - Expected 2392648.0, actual 2392648 (1.00x) +Testing collisions (low 8-bit) - Expected 2396488.0, actual 2396488 (1.00x) +Testing distribution - Worst bias is the 18-bit window at bit 13 - 0.059% + + +Combination Hi-Lo Tests: +Keyset 'Combination' - up to 6 blocks from a set of 15 - 12204240 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 17339.3, actual 17195 (0.99x) (-144) +Testing collisions (high 27-41 bits) - Worst is 40 bits: 72/67 (1.06x) +Testing collisions (high 12-bit) - Expected 12200144.0, actual 12200144 (1.00x) +Testing collisions (high 8-bit) - Expected 12203984.0, actual 12203984 (1.00x) +Testing collisions (low 32-bit) - Expected 17339.3, actual 17096 (0.99x) (-243) +Testing collisions (low 27-41 bits) - Worst is 41 bits: 36/33 (1.06x) +Testing collisions (low 12-bit) - Expected 12200144.0, actual 12200144 (1.00x) +Testing collisions (low 8-bit) - Expected 12203984.0, actual 12203984 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 48 - 0.026% + + +Combination 0x8000000 Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 8192.0, actual 8190 (1.00x) (-1) +Testing collisions (high 26-40 bits) - Worst is 40 bits: 43/31 (1.34x) +Testing collisions (high 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (high 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing collisions (low 32-bit) - Expected 8192.0, actual 8149 (0.99x) (-42) +Testing collisions (low 26-40 bits) - Worst is 37 bits: 272/255 (1.06x) +Testing collisions (low 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (low 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 54 - 0.052% + + +Combination 0x0000001 Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 8192.0, actual 8262 (1.01x) (71) +Testing collisions (high 26-40 bits) - Worst is 39 bits: 71/63 (1.11x) +Testing collisions (high 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (high 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing collisions (low 32-bit) - Expected 8192.0, actual 8167 (1.00x) (-24) +Testing collisions (low 26-40 bits) - Worst is 34 bits: 2066/2047 (1.01x) +Testing collisions (low 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (low 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 2 - 0.049% + + +Combination 0x800000000000000 Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 8192.0, actual 8224 (1.00x) (33) +Testing collisions (high 26-40 bits) - Worst is 40 bits: 38/31 (1.19x) +Testing collisions (high 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (high 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing collisions (low 32-bit) - Expected 8192.0, actual 8182 (1.00x) (-9) +Testing collisions (low 26-40 bits) - Worst is 30 bits: 32790/32767 (1.00x) +Testing collisions (low 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (low 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 37 - 0.045% + + +Combination 0x000000000000001 Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 8192.0, actual 8173 (1.00x) (-18) +Testing collisions (high 26-40 bits) - Worst is 34 bits: 2054/2047 (1.00x) +Testing collisions (high 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (high 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing collisions (low 32-bit) - Expected 8192.0, actual 8313 (1.01x) (122) +Testing collisions (low 26-40 bits) - Worst is 40 bits: 37/31 (1.16x) +Testing collisions (low 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (low 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 20 - 0.037% + + +Combination 16-bytes [0-1] Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 8192.0, actual 8316 (1.02x) (125) +Testing collisions (high 26-40 bits) - Worst is 36 bits: 545/511 (1.06x) +Testing collisions (high 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (high 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing collisions (low 32-bit) - Expected 8192.0, actual 8278 (1.01x) (87) +Testing collisions (low 26-40 bits) - Worst is 40 bits: 36/31 (1.13x) +Testing collisions (low 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (low 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 22 - 0.041% + + +Combination 16-bytes [0-last] Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 8192.0, actual 8106 (0.99x) (-85) +Testing collisions (high 26-40 bits) - Worst is 38 bits: 142/127 (1.11x) +Testing collisions (high 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (high 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing collisions (low 32-bit) - Expected 8192.0, actual 8250 (1.01x) (59) +Testing collisions (low 26-40 bits) - Worst is 39 bits: 75/63 (1.17x) +Testing collisions (low 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (low 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 28 - 0.039% + + +Combination 32-bytes [0-1] Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 8192.0, actual 8337 (1.02x) (146) +Testing collisions (high 26-40 bits) - Worst is 40 bits: 35/31 (1.09x) +Testing collisions (high 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (high 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing collisions (low 32-bit) - Expected 8192.0, actual 8248 (1.01x) (57) +Testing collisions (low 26-40 bits) - Worst is 40 bits: 37/31 (1.16x) +Testing collisions (low 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (low 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 25 - 0.038% + + +Combination 32-bytes [0-last] Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 8192.0, actual 8227 (1.00x) (36) +Testing collisions (high 26-40 bits) - Worst is 35 bits: 1047/1023 (1.02x) +Testing collisions (high 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (high 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing collisions (low 32-bit) - Expected 8192.0, actual 8191 (1.00x) +Testing collisions (low 26-40 bits) - Worst is 39 bits: 73/63 (1.14x) +Testing collisions (low 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (low 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 35 - 0.043% + + +Combination 64-bytes [0-1] Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 8192.0, actual 8221 (1.00x) (30) +Testing collisions (high 26-40 bits) - Worst is 39 bits: 73/63 (1.14x) +Testing collisions (high 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (high 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing collisions (low 32-bit) - Expected 8192.0, actual 8060 (0.98x) (-131) +Testing collisions (low 26-40 bits) - Worst is 29 bits: 65173/65535 (0.99x) +Testing collisions (low 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (low 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 8 - 0.060% + + +Combination 64-bytes [0-last] Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 8192.0, actual 8254 (1.01x) (63) +Testing collisions (high 26-40 bits) - Worst is 38 bits: 137/127 (1.07x) +Testing collisions (high 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (high 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing collisions (low 32-bit) - Expected 8192.0, actual 8101 (0.99x) (-90) +Testing collisions (low 26-40 bits) - Worst is 37 bits: 260/255 (1.02x) +Testing collisions (low 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (low 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 41 - 0.040% + + +Combination 128-bytes [0-1] Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 8192.0, actual 8141 (0.99x) (-50) +Testing collisions (high 26-40 bits) - Worst is 40 bits: 33/31 (1.03x) +Testing collisions (high 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (high 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing collisions (low 32-bit) - Expected 8192.0, actual 8340 (1.02x) (149) +Testing collisions (low 26-40 bits) - Worst is 40 bits: 38/31 (1.19x) +Testing collisions (low 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (low 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 39 - 0.062% + + +Combination 128-bytes [0-last] Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 8192.0, actual 8309 (1.01x) (118) +Testing collisions (high 26-40 bits) - Worst is 37 bits: 275/255 (1.07x) +Testing collisions (high 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (high 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing collisions (low 32-bit) - Expected 8192.0, actual 8176 (1.00x) (-15) +Testing collisions (low 26-40 bits) - Worst is 36 bits: 561/511 (1.10x) +Testing collisions (low 12-bit) - Expected 8384510.0, actual 8384510 (1.00x) +Testing collisions (low 8-bit) - Expected 8388350.0, actual 8388350 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 17 - 0.032% + + +[[[ Keyset 'Window' Tests ]]] + +Keyset 'Window' - 32-bit key, 25-bit window - 32 tests, 33554432 keys per test +Window at 0 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 1 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 2 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 3 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 4 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 5 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 6 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 7 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 8 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 9 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 10 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 11 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 12 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 13 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 14 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 15 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 16 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 17 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 18 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 19 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 20 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 21 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 22 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 23 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 24 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 25 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 26 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 27 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 28 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 29 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 30 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 31 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 32 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) + +[[[ Keyset 'Cyclic' Tests ]]] + +Keyset 'Cyclic' - 8 cycles of 8 bytes - 1000000 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 116.4, actual 119 (1.02x) (3) +Testing collisions (high 23-34 bits) - Worst is 32 bits: 119/116 (1.02x) +Testing collisions (high 12-bit) - Expected 995904.0, actual 995904 (1.00x) +Testing collisions (high 8-bit) - Expected 999744.0, actual 999744 (1.00x) +Testing collisions (low 32-bit) - Expected 116.4, actual 118 (1.01x) (2) +Testing collisions (low 23-34 bits) - Worst is 30 bits: 476/465 (1.02x) +Testing collisions (low 12-bit) - Expected 995904.0, actual 995904 (1.00x) +Testing collisions (low 8-bit) - Expected 999744.0, actual 999744 (1.00x) +Testing distribution - Worst bias is the 17-bit window at bit 44 - 0.134% + +Keyset 'Cyclic' - 8 cycles of 9 bytes - 1000000 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 116.4, actual 127 (1.09x) (11) +Testing collisions (high 23-34 bits) - Worst is 31 bits: 262/232 (1.13x) +Testing collisions (high 12-bit) - Expected 995904.0, actual 995904 (1.00x) +Testing collisions (high 8-bit) - Expected 999744.0, actual 999744 (1.00x) +Testing collisions (low 32-bit) - Expected 116.4, actual 112 (0.96x) +Testing collisions (low 23-34 bits) - Worst is 34 bits: 33/29 (1.13x) +Testing collisions (low 12-bit) - Expected 995904.0, actual 995904 (1.00x) +Testing collisions (low 8-bit) - Expected 999744.0, actual 999744 (1.00x) +Testing distribution - Worst bias is the 17-bit window at bit 32 - 0.141% + +Keyset 'Cyclic' - 8 cycles of 10 bytes - 1000000 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 116.4, actual 131 (1.13x) (15) +Testing collisions (high 23-34 bits) - Worst is 34 bits: 33/29 (1.13x) +Testing collisions (high 12-bit) - Expected 995904.0, actual 995904 (1.00x) +Testing collisions (high 8-bit) - Expected 999744.0, actual 999744 (1.00x) +Testing collisions (low 32-bit) - Expected 116.4, actual 109 (0.94x) +Testing collisions (low 23-34 bits) - Worst is 34 bits: 37/29 (1.27x) +Testing collisions (low 12-bit) - Expected 995904.0, actual 995904 (1.00x) +Testing collisions (low 8-bit) - Expected 999744.0, actual 999744 (1.00x) +Testing distribution - Worst bias is the 17-bit window at bit 16 - 0.122% + +Keyset 'Cyclic' - 8 cycles of 11 bytes - 1000000 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 116.4, actual 104 (0.89x) +Testing collisions (high 23-34 bits) - Worst is 34 bits: 31/29 (1.07x) +Testing collisions (high 12-bit) - Expected 995904.0, actual 995904 (1.00x) +Testing collisions (high 8-bit) - Expected 999744.0, actual 999744 (1.00x) +Testing collisions (low 32-bit) - Expected 116.4, actual 124 (1.07x) (8) +Testing collisions (low 23-34 bits) - Worst is 34 bits: 34/29 (1.17x) +Testing collisions (low 12-bit) - Expected 995904.0, actual 995904 (1.00x) +Testing collisions (low 8-bit) - Expected 999744.0, actual 999744 (1.00x) +Testing distribution - Worst bias is the 17-bit window at bit 47 - 0.137% + +Keyset 'Cyclic' - 8 cycles of 12 bytes - 1000000 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 116.4, actual 113 (0.97x) +Testing collisions (high 23-34 bits) - Worst is 28 bits: 1877/1862 (1.01x) +Testing collisions (high 12-bit) - Expected 995904.0, actual 995904 (1.00x) +Testing collisions (high 8-bit) - Expected 999744.0, actual 999744 (1.00x) +Testing collisions (low 32-bit) - Expected 116.4, actual 113 (0.97x) +Testing collisions (low 23-34 bits) - Worst is 34 bits: 38/29 (1.31x) +Testing collisions (low 12-bit) - Expected 995904.0, actual 995904 (1.00x) +Testing collisions (low 8-bit) - Expected 999744.0, actual 999744 (1.00x) +Testing distribution - Worst bias is the 17-bit window at bit 53 - 0.109% + +Keyset 'Cyclic' - 8 cycles of 16 bytes - 1000000 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 116.4, actual 122 (1.05x) (6) +Testing collisions (high 23-34 bits) - Worst is 34 bits: 31/29 (1.07x) +Testing collisions (high 12-bit) - Expected 995904.0, actual 995904 (1.00x) +Testing collisions (high 8-bit) - Expected 999744.0, actual 999744 (1.00x) +Testing collisions (low 32-bit) - Expected 116.4, actual 112 (0.96x) +Testing collisions (low 23-34 bits) - Worst is 33 bits: 62/58 (1.07x) +Testing collisions (low 12-bit) - Expected 995904.0, actual 995904 (1.00x) +Testing collisions (low 8-bit) - Expected 999744.0, actual 999744 (1.00x) +Testing distribution - Worst bias is the 17-bit window at bit 37 - 0.086% + + +[[[ Keyset 'TwoBytes' Tests ]]] + +Keyset 'TwoBytes' - up-to-4-byte keys, 652545 total keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 49.6, actual 61 (1.23x) (12) +Testing collisions (high 23-33 bits) - Worst is 33 bits: 31/24 (1.25x) +Testing collisions (high 12-bit) - Expected 648449.0, actual 648449 (1.00x) +Testing collisions (high 8-bit) - Expected 652289.0, actual 652289 (1.00x) +Testing collisions (low 32-bit) - Expected 49.6, actual 53 (1.07x) (4) +Testing collisions (low 23-33 bits) - Worst is 32 bits: 53/49 (1.07x) +Testing collisions (low 12-bit) - Expected 648449.0, actual 648449 (1.00x) +Testing collisions (low 8-bit) - Expected 652289.0, actual 652289 (1.00x) +Testing distribution - Worst bias is the 15-bit window at bit 54 - 0.125% + +Keyset 'TwoBytes' - up-to-8-byte keys, 5471025 total keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 3484.6, actual 3456 (0.99x) (-28) +Testing collisions (high 26-39 bits) - Worst is 34 bits: 878/871 (1.01x) +Testing collisions (high 12-bit) - Expected 5466929.0, actual 5466929 (1.00x) +Testing collisions (high 8-bit) - Expected 5470769.0, actual 5470769 (1.00x) +Testing collisions (low 32-bit) - Expected 3484.6, actual 3430 (0.98x) (-54) +Testing collisions (low 26-39 bits) - Worst is 35 bits: 441/435 (1.01x) +Testing collisions (low 12-bit) - Expected 5466929.0, actual 5466929 (1.00x) +Testing collisions (low 8-bit) - Expected 5470769.0, actual 5470769 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 30 - 0.072% + +Keyset 'TwoBytes' - up-to-12-byte keys, 18616785 total keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 40347.8, actual 40130 (0.99x) (-217) +Testing collisions (high 27-42 bits) - Worst is 32 bits: 40130/40347 (0.99x) +Testing collisions (high 12-bit) - Expected 18612689.0, actual 18612689 (1.00x) +Testing collisions (high 8-bit) - Expected 18616529.0, actual 18616529 (1.00x) +Testing collisions (low 32-bit) - Expected 40347.8, actual 40032 (0.99x) (-315) +Testing collisions (low 27-42 bits) - Worst is 42 bits: 46/39 (1.17x) +Testing collisions (low 12-bit) - Expected 18612689.0, actual 18612689 (1.00x) +Testing collisions (low 8-bit) - Expected 18616529.0, actual 18616529 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 40 - 0.023% + +Keyset 'TwoBytes' - up-to-16-byte keys, 44251425 total keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 227963.2, actual 226681 (0.99x) (-1282) +Testing collisions (high 29-45 bits) - Worst is 45 bits: 31/27 (1.11x) +Testing collisions (high 12-bit) - Expected 44247329.0, actual 44247329 (1.00x) +Testing collisions (high 8-bit) - Expected 44251169.0, actual 44251169 (1.00x) +Testing collisions (low 32-bit) - Expected 227963.2, actual 227005 (1.00x) (-958) +Testing collisions (low 29-45 bits) - Worst is 43 bits: 130/111 (1.17x) +Testing collisions (low 12-bit) - Expected 44247329.0, actual 44247329 (1.00x) +Testing collisions (low 8-bit) - Expected 44251169.0, actual 44251169 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 44 - 0.009% + +Keyset 'TwoBytes' - up-to-20-byte keys, 86536545 total keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 871784.7, actual 865470 (0.99x) (-6314) +Testing collisions (high 30-47 bits) - Worst is 41 bits: 1794/1702 (1.05x) +Testing collisions (high 12-bit) - Expected 86532449.0, actual 86532449 (1.00x) +Testing collisions (high 8-bit) - Expected 86536289.0, actual 86536289 (1.00x) +Testing collisions (low 32-bit) - Expected 871784.7, actual 867660 (1.00x) (-4124) +Testing collisions (low 30-47 bits) - Worst is 43 bits: 452/425 (1.06x) +Testing collisions (low 12-bit) - Expected 86532449.0, actual 86532449 (1.00x) +Testing collisions (low 8-bit) - Expected 86536289.0, actual 86536289 (1.00x) +Testing distribution - Worst bias is the 19-bit window at bit 46 - 0.004% + + +[[[ Keyset 'Text' Tests ]]] + +Keyset 'Text' - keys of form "FooXXXXBar" - 14776336 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 25418.1, actual 25256 (0.99x) (-162) +Testing collisions (high 27-42 bits) - Worst is 42 bits: 30/24 (1.21x) +Testing collisions (high 12-bit) - Expected 14772240.0, actual 14772240 (1.00x) +Testing collisions (high 8-bit) - Expected 14776080.0, actual 14776080 (1.00x) +Testing collisions (low 32-bit) - Expected 25418.1, actual 25375 (1.00x) (-43) +Testing collisions (low 27-42 bits) - Worst is 40 bits: 112/99 (1.13x) +Testing collisions (low 12-bit) - Expected 14772240.0, actual 14772240 (1.00x) +Testing collisions (low 8-bit) - Expected 14776080.0, actual 14776080 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 61 - 0.024% + +Keyset 'Text' - keys of form "FooBarXXXX" - 14776336 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 25418.1, actual 25878 (1.02x) (460) +Testing collisions (high 27-42 bits) - Worst is 33 bits: 12976/12709 (1.02x) +Testing collisions (high 12-bit) - Expected 14772240.0, actual 14772240 (1.00x) +Testing collisions (high 8-bit) - Expected 14776080.0, actual 14776080 (1.00x) +Testing collisions (low 32-bit) - Expected 25418.1, actual 25540 (1.00x) (122) +Testing collisions (low 27-42 bits) - Worst is 38 bits: 416/397 (1.05x) +Testing collisions (low 12-bit) - Expected 14772240.0, actual 14772240 (1.00x) +Testing collisions (low 8-bit) - Expected 14776080.0, actual 14776080 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 27 - 0.023% + +Keyset 'Text' - keys of form "XXXXFooBar" - 14776336 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 25418.1, actual 25266 (0.99x) (-152) +Testing collisions (high 27-42 bits) - Worst is 34 bits: 6369/6354 (1.00x) +Testing collisions (high 12-bit) - Expected 14772240.0, actual 14772240 (1.00x) +Testing collisions (high 8-bit) - Expected 14776080.0, actual 14776080 (1.00x) +Testing collisions (low 32-bit) - Expected 25418.1, actual 25162 (0.99x) (-256) +Testing collisions (low 27-42 bits) - Worst is 40 bits: 101/99 (1.02x) +Testing collisions (low 12-bit) - Expected 14772240.0, actual 14772240 (1.00x) +Testing collisions (low 8-bit) - Expected 14776080.0, actual 14776080 (1.00x) +Testing distribution - Worst bias is the 20-bit window at bit 10 - 0.016% + +Keyset 'Words' - 4000000 random keys of len 6-16 from alnum charset +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1862.6, actual 1923 (1.03x) (61) +Testing collisions (high 25-38 bits) - Worst is 37 bits: 65/58 (1.12x) +Testing collisions (high 12-bit) - Expected 3995904.0, actual 3995904 (1.00x) +Testing collisions (high 8-bit) - Expected 3999744.0, actual 3999744 (1.00x) +Testing collisions (low 32-bit) - Expected 1862.6, actual 1876 (1.01x) (14) +Testing collisions (low 25-38 bits) - Worst is 32 bits: 1876/1862 (1.01x) +Testing collisions (low 12-bit) - Expected 3995904.0, actual 3995904 (1.00x) +Testing collisions (low 8-bit) - Expected 3999744.0, actual 3999744 (1.00x) +Testing distribution - Worst bias is the 19-bit window at bit 1 - 0.075% + +Keyset 'Words' - 4000000 random keys of len 6-16 from password charset +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1862.6, actual 1884 (1.01x) (22) +Testing collisions (high 25-38 bits) - Worst is 35 bits: 253/232 (1.09x) +Testing collisions (high 12-bit) - Expected 3995904.0, actual 3995904 (1.00x) +Testing collisions (high 8-bit) - Expected 3999744.0, actual 3999744 (1.00x) +Testing collisions (low 32-bit) - Expected 1862.6, actual 1858 (1.00x) (-4) +Testing collisions (low 25-38 bits) - Worst is 36 bits: 140/116 (1.20x) +Testing collisions (low 12-bit) - Expected 3995904.0, actual 3995904 (1.00x) +Testing collisions (low 8-bit) - Expected 3999744.0, actual 3999744 (1.00x) +Testing distribution - Worst bias is the 18-bit window at bit 47 - 0.053% + +Keyset 'Words' - 102401 dict words +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1.2, actual 1 (0.82x) +Testing collisions (high 20-27 bits) - Worst is 24 bits: 333/312 (1.07x) +Testing collisions (high 12-bit) - Expected 98305.0, actual 98305 (1.00x) +Testing collisions (high 8-bit) - Expected 102145.0, actual 102145 (1.00x) +Testing collisions (low 32-bit) - Expected 1.2, actual 1 (0.82x) +Testing collisions (low 20-27 bits) - Worst is 22 bits: 1284/1250 (1.03x) +Testing collisions (low 12-bit) - Expected 98305.0, actual 98305 (1.00x) +Testing collisions (low 8-bit) - Expected 102145.0, actual 102145 (1.00x) +Testing distribution - Worst bias is the 14-bit window at bit 28 - 0.409% + + +[[[ Keyset 'Zeroes' Tests ]]] + +Keyset 'Zeroes' - 204800 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 4.9, actual 6 (1.23x) (2) +Testing collisions (high 21-29 bits) - Worst is 28 bits: 82/78 (1.05x) +Testing collisions (high 12-bit) - Expected 200704.0, actual 200704 (1.00x) +Testing collisions (high 8-bit) - Expected 204544.0, actual 204544 (1.00x) +Testing collisions (low 32-bit) - Expected 4.9, actual 5 (1.02x) (1) +Testing collisions (low 21-29 bits) - Worst is 29 bits: 40/39 (1.02x) +Testing collisions (low 12-bit) - Expected 200704.0, actual 200704 (1.00x) +Testing collisions (low 8-bit) - Expected 204544.0, actual 204544 (1.00x) +Testing distribution - Worst bias is the 14-bit window at bit 50 - 0.267% + + +[[[ Keyset 'Seed' Tests ]]] + +Keyset 'Seed' - 5000000 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 2910.4, actual 2988 (1.03x) (78) +Testing collisions (high 26-39 bits) - Worst is 39 bits: 27/22 (1.19x) +Testing collisions (high 12-bit) - Expected 4995904.0, actual 4995904 (1.00x) +Testing collisions (high 8-bit) - Expected 4999744.0, actual 4999744 (1.00x) +Testing collisions (low 32-bit) - Expected 2910.4, actual 3040 (1.04x) (130) +Testing collisions (low 26-39 bits) - Worst is 33 bits: 1528/1455 (1.05x) +Testing collisions (low 12-bit) - Expected 4995904.0, actual 4995904 (1.00x) +Testing collisions (low 8-bit) - Expected 4999744.0, actual 4999744 (1.00x) +Testing distribution - Worst bias is the 19-bit window at bit 8 - 0.045% + + +[[[ Keyset 'PerlinNoise' Tests ]]] + +Testing 16777216 coordinates (L2) : +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 32768.0, actual 32864 (1.00x) (97) +Testing collisions (high 27-42 bits) - Worst is 38 bits: 548/511 (1.07x) +Testing collisions (high 12-bit) - Expected 16773120.0, actual 16773120 (1.00x) +Testing collisions (high 8-bit) - Expected 16776960.0, actual 16776960 (1.00x) +Testing collisions (low 32-bit) - Expected 32768.0, actual 32219 (0.98x) (-548) +Testing collisions (low 27-42 bits) - Worst is 42 bits: 44/31 (1.38x) +Testing collisions (low 12-bit) - Expected 16773120.0, actual 16773120 (1.00x) +Testing collisions (low 8-bit) - Expected 16776960.0, actual 16776960 (1.00x) + + +[[[ Diff 'Differential' Tests ]]] + +Testing 8303632 up-to-5-bit differentials in 64-bit keys -> 64 bit hashes. +1000 reps, 8303632000 total tests, expecting 0.00 random collisions.......... +0 total collisions, of which 0 single collisions were ignored + +Testing 11017632 up-to-4-bit differentials in 128-bit keys -> 64 bit hashes. +1000 reps, 11017632000 total tests, expecting 0.00 random collisions.......... +0 total collisions, of which 0 single collisions were ignored + +Testing 2796416 up-to-3-bit differentials in 256-bit keys -> 64 bit hashes. +1000 reps, 2796416000 total tests, expecting 0.00 random collisions.......... +0 total collisions, of which 0 single collisions were ignored + + +[[[ DiffDist 'Differential Distribution' Tests ]]] + +Testing bit 0 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 503 (0.98x) (-8) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 37/31 (1.16x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 499 (0.97x) +Testing collisions (low 24-36 bits) - Worst is 31 bits: 1028/1023 (1.00x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 1 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 550 (1.07x) (39) +Testing collisions (high 24-36 bits) - Worst is 34 bits: 147/127 (1.15x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 504 (0.98x) (-7) +Testing collisions (low 24-36 bits) - Worst is 26 bits: 32606/32767 (1.00x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 2 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 490 (0.96x) +Testing collisions (high 24-36 bits) - Worst is 35 bits: 67/63 (1.05x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 522 (1.02x) (11) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 37/31 (1.16x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 3 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 483 (0.94x) +Testing collisions (high 24-36 bits) - Worst is 28 bits: 8201/8191 (1.00x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 519 (1.01x) (8) +Testing collisions (low 24-36 bits) - Worst is 35 bits: 86/63 (1.34x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 4 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 476 (0.93x) +Testing collisions (high 24-36 bits) - Worst is 30 bits: 2026/2047 (0.99x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 511 (1.00x) +Testing collisions (low 24-36 bits) - Worst is 34 bits: 145/127 (1.13x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 5 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 497 (0.97x) +Testing collisions (high 24-36 bits) - Worst is 29 bits: 4206/4095 (1.03x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 488 (0.95x) +Testing collisions (low 24-36 bits) - Worst is 34 bits: 141/127 (1.10x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 6 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 506 (0.99x) (-5) +Testing collisions (high 24-36 bits) - Worst is 34 bits: 137/127 (1.07x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 512 (1.00x) (1) +Testing collisions (low 24-36 bits) - Worst is 28 bits: 8292/8191 (1.01x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 7 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 481 (0.94x) +Testing collisions (high 24-36 bits) - Worst is 30 bits: 2011/2047 (0.98x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 551 (1.08x) (40) +Testing collisions (low 24-36 bits) - Worst is 32 bits: 551/511 (1.08x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 8 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 550 (1.07x) (39) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 47/31 (1.47x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 499 (0.97x) +Testing collisions (low 24-36 bits) - Worst is 34 bits: 139/127 (1.09x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 9 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 493 (0.96x) +Testing collisions (high 24-36 bits) - Worst is 35 bits: 66/63 (1.03x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 537 (1.05x) (26) +Testing collisions (low 24-36 bits) - Worst is 33 bits: 299/255 (1.17x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 10 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 555 (1.08x) (44) +Testing collisions (high 24-36 bits) - Worst is 33 bits: 281/255 (1.10x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 495 (0.97x) +Testing collisions (low 24-36 bits) - Worst is 31 bits: 1048/1023 (1.02x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 11 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 459 (0.90x) +Testing collisions (high 24-36 bits) - Worst is 29 bits: 4100/4095 (1.00x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 488 (0.95x) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 42/31 (1.31x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 12 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 491 (0.96x) +Testing collisions (high 24-36 bits) - Worst is 29 bits: 4167/4095 (1.02x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 480 (0.94x) +Testing collisions (low 24-36 bits) - Worst is 30 bits: 2089/2047 (1.02x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 13 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 497 (0.97x) +Testing collisions (high 24-36 bits) - Worst is 34 bits: 132/127 (1.03x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 553 (1.08x) (42) +Testing collisions (low 24-36 bits) - Worst is 32 bits: 553/511 (1.08x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 14 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 538 (1.05x) (27) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 34/31 (1.06x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 508 (0.99x) (-3) +Testing collisions (low 24-36 bits) - Worst is 33 bits: 271/255 (1.06x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 15 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 534 (1.04x) (23) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 36/31 (1.13x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 530 (1.04x) (19) +Testing collisions (low 24-36 bits) - Worst is 33 bits: 267/255 (1.04x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 16 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 481 (0.94x) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 33/31 (1.03x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 498 (0.97x) +Testing collisions (low 24-36 bits) - Worst is 29 bits: 4135/4095 (1.01x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 17 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 526 (1.03x) (15) +Testing collisions (high 24-36 bits) - Worst is 35 bits: 80/63 (1.25x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 477 (0.93x) +Testing collisions (low 24-36 bits) - Worst is 35 bits: 64/63 (1.00x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 18 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 483 (0.94x) +Testing collisions (high 24-36 bits) - Worst is 28 bits: 8326/8191 (1.02x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 490 (0.96x) +Testing collisions (low 24-36 bits) - Worst is 29 bits: 4095/4095 (1.00x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 19 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 492 (0.96x) +Testing collisions (high 24-36 bits) - Worst is 34 bits: 136/127 (1.06x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 529 (1.03x) (18) +Testing collisions (low 24-36 bits) - Worst is 35 bits: 79/63 (1.23x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 20 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 494 (0.96x) +Testing collisions (high 24-36 bits) - Worst is 35 bits: 67/63 (1.05x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 511 (1.00x) +Testing collisions (low 24-36 bits) - Worst is 34 bits: 137/127 (1.07x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 21 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 494 (0.96x) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 45/31 (1.41x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 523 (1.02x) (12) +Testing collisions (low 24-36 bits) - Worst is 30 bits: 2124/2047 (1.04x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 22 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 497 (0.97x) +Testing collisions (high 24-36 bits) - Worst is 34 bits: 129/127 (1.01x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 499 (0.97x) +Testing collisions (low 24-36 bits) - Worst is 33 bits: 258/255 (1.01x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 23 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 520 (1.02x) (9) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 37/31 (1.16x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 515 (1.01x) (4) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 39/31 (1.22x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 24 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 496 (0.97x) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 37/31 (1.16x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 486 (0.95x) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 36/31 (1.13x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 25 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 515 (1.01x) (4) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 35/31 (1.09x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 511 (1.00x) +Testing collisions (low 24-36 bits) - Worst is 28 bits: 8241/8191 (1.01x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 26 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 525 (1.03x) (14) +Testing collisions (high 24-36 bits) - Worst is 35 bits: 88/63 (1.38x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 500 (0.98x) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 43/31 (1.34x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 27 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 496 (0.97x) +Testing collisions (high 24-36 bits) - Worst is 35 bits: 70/63 (1.09x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 516 (1.01x) (5) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 38/31 (1.19x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 28 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 523 (1.02x) (12) +Testing collisions (high 24-36 bits) - Worst is 32 bits: 523/511 (1.02x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 512 (1.00x) (1) +Testing collisions (low 24-36 bits) - Worst is 30 bits: 2100/2047 (1.03x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 29 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 534 (1.04x) (23) +Testing collisions (high 24-36 bits) - Worst is 32 bits: 534/511 (1.04x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 494 (0.96x) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 34/31 (1.06x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 30 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 505 (0.99x) (-6) +Testing collisions (high 24-36 bits) - Worst is 29 bits: 4057/4095 (0.99x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 499 (0.97x) +Testing collisions (low 24-36 bits) - Worst is 35 bits: 68/63 (1.06x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 31 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 526 (1.03x) (15) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 36/31 (1.13x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 517 (1.01x) (6) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 40/31 (1.25x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 32 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 467 (0.91x) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 41/31 (1.28x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 512 (1.00x) (1) +Testing collisions (low 24-36 bits) - Worst is 34 bits: 142/127 (1.11x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 33 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 518 (1.01x) (7) +Testing collisions (high 24-36 bits) - Worst is 33 bits: 267/255 (1.04x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 518 (1.01x) (7) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 40/31 (1.25x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 34 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 540 (1.05x) (29) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 49/31 (1.53x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 556 (1.09x) (45) +Testing collisions (low 24-36 bits) - Worst is 32 bits: 556/511 (1.09x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 35 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 486 (0.95x) +Testing collisions (high 24-36 bits) - Worst is 30 bits: 2057/2047 (1.00x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 504 (0.98x) (-7) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 33/31 (1.03x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 36 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 514 (1.00x) (3) +Testing collisions (high 24-36 bits) - Worst is 31 bits: 1035/1023 (1.01x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 462 (0.90x) +Testing collisions (low 24-36 bits) - Worst is 35 bits: 64/63 (1.00x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 37 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 496 (0.97x) +Testing collisions (high 24-36 bits) - Worst is 35 bits: 65/63 (1.02x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 506 (0.99x) (-5) +Testing collisions (low 24-36 bits) - Worst is 35 bits: 73/63 (1.14x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 38 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 489 (0.96x) +Testing collisions (high 24-36 bits) - Worst is 30 bits: 2065/2047 (1.01x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 490 (0.96x) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 39/31 (1.22x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 39 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 539 (1.05x) (28) +Testing collisions (high 24-36 bits) - Worst is 33 bits: 271/255 (1.06x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 514 (1.00x) (3) +Testing collisions (low 24-36 bits) - Worst is 32 bits: 514/511 (1.00x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 40 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 484 (0.95x) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 36/31 (1.13x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 515 (1.01x) (4) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 37/31 (1.16x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 41 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 505 (0.99x) (-6) +Testing collisions (high 24-36 bits) - Worst is 34 bits: 137/127 (1.07x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 520 (1.02x) (9) +Testing collisions (low 24-36 bits) - Worst is 35 bits: 67/63 (1.05x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 42 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 506 (0.99x) (-5) +Testing collisions (high 24-36 bits) - Worst is 35 bits: 90/63 (1.41x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 566 (1.11x) (55) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 39/31 (1.22x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 43 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 541 (1.06x) (30) +Testing collisions (high 24-36 bits) - Worst is 32 bits: 541/511 (1.06x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 480 (0.94x) +Testing collisions (low 24-36 bits) - Worst is 27 bits: 16264/16383 (0.99x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 44 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 474 (0.93x) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 38/31 (1.19x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 496 (0.97x) +Testing collisions (low 24-36 bits) - Worst is 30 bits: 2097/2047 (1.02x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 45 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 497 (0.97x) +Testing collisions (high 24-36 bits) - Worst is 35 bits: 65/63 (1.02x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 557 (1.09x) (46) +Testing collisions (low 24-36 bits) - Worst is 32 bits: 557/511 (1.09x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 46 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 514 (1.00x) (3) +Testing collisions (high 24-36 bits) - Worst is 32 bits: 514/511 (1.00x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 540 (1.05x) (29) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 43/31 (1.34x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 47 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 520 (1.02x) (9) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 39/31 (1.22x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 540 (1.05x) (29) +Testing collisions (low 24-36 bits) - Worst is 35 bits: 81/63 (1.27x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 48 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 533 (1.04x) (22) +Testing collisions (high 24-36 bits) - Worst is 35 bits: 75/63 (1.17x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 513 (1.00x) (2) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 40/31 (1.25x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 49 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 522 (1.02x) (11) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 37/31 (1.16x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 491 (0.96x) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 40/31 (1.25x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 50 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 527 (1.03x) (16) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 35/31 (1.09x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 545 (1.06x) (34) +Testing collisions (low 24-36 bits) - Worst is 34 bits: 168/127 (1.31x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 51 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 505 (0.99x) (-6) +Testing collisions (high 24-36 bits) - Worst is 33 bits: 280/255 (1.09x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 492 (0.96x) +Testing collisions (low 24-36 bits) - Worst is 35 bits: 74/63 (1.16x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 52 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 468 (0.91x) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 36/31 (1.13x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 509 (0.99x) (-2) +Testing collisions (low 24-36 bits) - Worst is 31 bits: 1028/1023 (1.00x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 53 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 495 (0.97x) +Testing collisions (high 24-36 bits) - Worst is 28 bits: 8270/8191 (1.01x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 486 (0.95x) +Testing collisions (low 24-36 bits) - Worst is 29 bits: 4190/4095 (1.02x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 54 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 513 (1.00x) (2) +Testing collisions (high 24-36 bits) - Worst is 35 bits: 77/63 (1.20x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 477 (0.93x) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 39/31 (1.22x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 55 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 509 (0.99x) (-2) +Testing collisions (high 24-36 bits) - Worst is 35 bits: 71/63 (1.11x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 527 (1.03x) (16) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 34/31 (1.06x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 56 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 487 (0.95x) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 39/31 (1.22x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 485 (0.95x) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 41/31 (1.28x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 57 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 485 (0.95x) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 41/31 (1.28x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 545 (1.06x) (34) +Testing collisions (low 24-36 bits) - Worst is 35 bits: 73/63 (1.14x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 58 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 509 (0.99x) (-2) +Testing collisions (high 24-36 bits) - Worst is 34 bits: 135/127 (1.05x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 511 (1.00x) +Testing collisions (low 24-36 bits) - Worst is 30 bits: 2106/2047 (1.03x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 59 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 509 (0.99x) (-2) +Testing collisions (high 24-36 bits) - Worst is 31 bits: 1032/1023 (1.01x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 505 (0.99x) (-6) +Testing collisions (low 24-36 bits) - Worst is 29 bits: 4237/4095 (1.03x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 60 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 499 (0.97x) +Testing collisions (high 24-36 bits) - Worst is 28 bits: 8290/8191 (1.01x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 526 (1.03x) (15) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 41/31 (1.28x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 61 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 534 (1.04x) (23) +Testing collisions (high 24-36 bits) - Worst is 34 bits: 134/127 (1.05x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 508 (0.99x) (-3) +Testing collisions (low 24-36 bits) - Worst is 31 bits: 1035/1023 (1.01x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 62 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 530 (1.04x) (19) +Testing collisions (high 24-36 bits) - Worst is 32 bits: 530/511 (1.04x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 517 (1.01x) (6) +Testing collisions (low 24-36 bits) - Worst is 36 bits: 42/31 (1.31x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + +Testing bit 63 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 512.0, actual 521 (1.02x) (10) +Testing collisions (high 24-36 bits) - Worst is 36 bits: 43/31 (1.34x) +Testing collisions (high 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (high 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) +Testing collisions (low 32-bit) - Expected 512.0, actual 472 (0.92x) +Testing collisions (low 24-36 bits) - Worst is 28 bits: 8141/8191 (0.99x) +Testing collisions (low 12-bit) - Expected 2093056.0, actual 2093056 (1.00x) +Testing collisions (low 8-bit) - Expected 2096896.0, actual 2096896 (1.00x) + + +[[[ MomentChi2 Tests ]]] + +Analyze hashes produced from a serie of linearly increasing numbers of 32-bit, using a step of 3 ... +Target values to approximate : 38918200.000000 - 410450.000000 +Popcount 1 stats : 38919121.989370 - 410434.062934 +Popcount 0 stats : 38918028.259881 - 410423.768513 +MomentChi2 for bits 1 : 1.03555 +MomentChi2 for bits 0 : 0.0359308 + +Derivative stats (transition from 2 consecutive values) : +Popcount 1 stats : 38919523.956012 - 410475.202934 +Popcount 0 stats : 38918557.696824 - 410478.877341 +MomentChi2 for deriv b1 : 2.13522 +MomentChi2 for deriv b0 : 0.155856 + + Great !! + + +[[[ Prng Tests ]]] + +Generating 33554432 random numbers : +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 131072.0, actual 130471 (1.00x) (-600) +Testing collisions (high 28-44 bits) - Worst is 42 bits: 130/127 (1.02x) +Testing collisions (high 12-bit) - Expected 33550336.0, actual 33550336 (1.00x) +Testing collisions (high 8-bit) - Expected 33554176.0, actual 33554176 (1.00x) +Testing collisions (low 32-bit) - Expected 131072.0, actual 130871 (1.00x) (-200) +Testing collisions (low 28-44 bits) - Worst is 43 bits: 70/63 (1.09x) +Testing collisions (low 12-bit) - Expected 33550336.0, actual 33550336 (1.00x) +Testing collisions (low 8-bit) - Expected 33554176.0, actual 33554176 (1.00x) + + +Input vcode 0x00000001, Output vcode 0x00000001, Result vcode 0x00000001 +Verification value is 0x00000001 - Testing took 584.720469 seconds +------------------------------------------------------------------------------- diff --git a/src/rust/vendor/ahash/smhasher/clone_smhasher.sh b/src/rust/vendor/ahash/smhasher/clone_smhasher.sh new file mode 100755 index 000000000..e3290d2df --- /dev/null +++ b/src/rust/vendor/ahash/smhasher/clone_smhasher.sh @@ -0,0 +1 @@ +git clone https://github.com/rurban/smhasher.git && cd smhasher && git apply ../0001-Add-support-for-aHash.patch diff --git a/src/rust/vendor/ahash/smhasher/fallbackOutput.txt b/src/rust/vendor/ahash/smhasher/fallbackOutput.txt new file mode 100644 index 000000000..cb324c5da --- /dev/null +++ b/src/rust/vendor/ahash/smhasher/fallbackOutput.txt @@ -0,0 +1,1467 @@ +------------------------------------------------------------------------------- +--- Testing ahash64 "ahash 64bit" GOOD + +[[[ Sanity Tests ]]] + +Verification value 0x52EC0BA4 ....... SKIP (self- or unseeded) +Running sanity check 1 .......... PASS +Running AppendedZeroesTest .......... PASS + +[[[ Speed Tests ]]] + +Bulk speed test - 262144-byte keys +Alignment 7 - 8.506 bytes/cycle - 24336.28 MiB/sec @ 3 ghz +Alignment 6 - 8.505 bytes/cycle - 24333.38 MiB/sec @ 3 ghz +Alignment 5 - 8.500 bytes/cycle - 24317.30 MiB/sec @ 3 ghz +Alignment 4 - 8.491 bytes/cycle - 24294.09 MiB/sec @ 3 ghz +Alignment 3 - 8.491 bytes/cycle - 24293.90 MiB/sec @ 3 ghz +Alignment 2 - 8.492 bytes/cycle - 24296.22 MiB/sec @ 3 ghz +Alignment 1 - 8.508 bytes/cycle - 24340.25 MiB/sec @ 3 ghz +Alignment 0 - 8.748 bytes/cycle - 25028.73 MiB/sec @ 3 ghz +Average - 8.530 bytes/cycle - 24405.02 MiB/sec @ 3 ghz + +Small key speed test - 1-byte keys - 14.97 cycles/hash +Small key speed test - 2-byte keys - 15.00 cycles/hash +Small key speed test - 3-byte keys - 15.00 cycles/hash +Small key speed test - 4-byte keys - 15.00 cycles/hash +Small key speed test - 5-byte keys - 16.00 cycles/hash +Small key speed test - 6-byte keys - 16.00 cycles/hash +Small key speed test - 7-byte keys - 16.11 cycles/hash +Small key speed test - 8-byte keys - 15.00 cycles/hash +Small key speed test - 9-byte keys - 19.04 cycles/hash +Small key speed test - 10-byte keys - 19.70 cycles/hash +Small key speed test - 11-byte keys - 19.43 cycles/hash +Small key speed test - 12-byte keys - 19.54 cycles/hash +Small key speed test - 13-byte keys - 19.65 cycles/hash +Small key speed test - 14-byte keys - 19.45 cycles/hash +Small key speed test - 15-byte keys - 19.00 cycles/hash +Small key speed test - 16-byte keys - 19.45 cycles/hash +Small key speed test - 17-byte keys - 19.84 cycles/hash +Small key speed test - 18-byte keys - 19.65 cycles/hash +Small key speed test - 19-byte keys - 19.36 cycles/hash +Small key speed test - 20-byte keys - 19.74 cycles/hash +Small key speed test - 21-byte keys - 19.56 cycles/hash +Small key speed test - 22-byte keys - 20.11 cycles/hash +Small key speed test - 23-byte keys - 20.08 cycles/hash +Small key speed test - 24-byte keys - 20.29 cycles/hash +Small key speed test - 25-byte keys - 20.55 cycles/hash +Small key speed test - 26-byte keys - 20.42 cycles/hash +Small key speed test - 27-byte keys - 20.43 cycles/hash +Small key speed test - 28-byte keys - 20.37 cycles/hash +Small key speed test - 29-byte keys - 20.42 cycles/hash +Small key speed test - 30-byte keys - 20.42 cycles/hash +Small key speed test - 31-byte keys - 20.37 cycles/hash +Average 18.708 cycles/hash + +[[[ 'Hashmap' Speed Tests ]]] + +std::unordered_map +Init std HashMapTest: 295.723 cycles/op (102401 inserts, 1% deletions) +Running std HashMapTest: 124.234 cycles/op (1.7 stdv) + +greg7mdp/parallel-hashmap +Init fast HashMapTest: 112.031 cycles/op (102401 inserts, 1% deletions) +Running fast HashMapTest: 85.002 cycles/op (2.1 stdv) ....... PASS + +[[[ Avalanche Tests ]]] + +Testing 24-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.700000% +Testing 32-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.628000% +Testing 40-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.628667% +Testing 48-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.662000% +Testing 56-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.699333% +Testing 64-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.665333% +Testing 72-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.630667% +Testing 80-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.692000% +Testing 96-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.774000% +Testing 112-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.730667% +Testing 128-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.794000% +Testing 160-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.702000% +Testing 512-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.763333% +Testing 1024-bit keys -> 64-bit hashes, 300000 reps worst bias is 0.816667% + +[[[ Keyset 'Sparse' Tests ]]] + +Keyset 'Sparse' - 16-bit keys with up to 9 bits set - 50643 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 0.6, actual 0 (0.00x) +Testing collisions (high 19-26 bits) - Worst is 22 bits: 320/611 (0.52x) +Testing collisions (high 12-bit) - Expected 50643.0, actual 46547 (0.92x) +Testing collisions (high 8-bit) - Expected 50643.0, actual 50387 (0.99x) (-256) +Testing collisions (low 32-bit) - Expected 0.6, actual 1 (1.67x) (1) +Testing collisions (low 19-26 bits) - Worst is 20 bits: 1168/2445 (0.48x) +Testing collisions (low 12-bit) - Expected 50643.0, actual 46547 (0.92x) +Testing collisions (low 8-bit) - Expected 50643.0, actual 50387 (0.99x) (-256) +Testing distribution - Worst bias is the 13-bit window at bit 4 - 0.462% + +Keyset 'Sparse' - 24-bit keys with up to 8 bits set - 1271626 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 376.5, actual 180 (0.48x) +Testing collisions (high 24-36 bits) - Worst is 35 bits: 26/47 (0.55x) +Testing collisions (high 12-bit) - Expected 1271626.0, actual 1267530 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 1271626.0, actual 1271370 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 376.5, actual 184 (0.49x) +Testing collisions (low 24-36 bits) - Worst is 34 bits: 52/94 (0.55x) +Testing collisions (low 12-bit) - Expected 1271626.0, actual 1267530 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 1271626.0, actual 1271370 (1.00x) (-256) +Testing distribution - Worst bias is the 17-bit window at bit 8 - 0.085% + +Keyset 'Sparse' - 32-bit keys with up to 7 bits set - 4514873 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 4746.0, actual 2412 (0.51x) +Testing collisions (high 26-39 bits) - Worst is 39 bits: 24/37 (0.65x) +Testing collisions (high 12-bit) - Expected 4514873.0, actual 4510777 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 4514873.0, actual 4514617 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 4746.0, actual 2445 (0.52x) +Testing collisions (low 26-39 bits) - Worst is 34 bits: 630/1186 (0.53x) +Testing collisions (low 12-bit) - Expected 4514873.0, actual 4510777 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 4514873.0, actual 4514617 (1.00x) (-256) +Testing distribution - Worst bias is the 19-bit window at bit 47 - 0.045% + +Keyset 'Sparse' - 40-bit keys with up to 6 bits set - 4598479 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 4923.4, actual 2402 (0.49x) +Testing collisions (high 26-39 bits) - Worst is 34 bits: 639/1230 (0.52x) +Testing collisions (high 12-bit) - Expected 4598479.0, actual 4594383 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 4598479.0, actual 4598223 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 4923.4, actual 2444 (0.50x) +Testing collisions (low 26-39 bits) - Worst is 39 bits: 22/38 (0.57x) +Testing collisions (low 12-bit) - Expected 4598479.0, actual 4594383 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 4598479.0, actual 4598223 (1.00x) (-256) +Testing distribution - Worst bias is the 19-bit window at bit 62 - 0.044% + +Keyset 'Sparse' - 48-bit keys with up to 6 bits set - 14196869 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 46927.3, actual 23533 (0.50x) +Testing collisions (high 28-43 bits) - Worst is 43 bits: 19/22 (0.83x) +Testing collisions (high 12-bit) - Expected 14196869.0, actual 14192773 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 14196869.0, actual 14196613 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 46927.3, actual 23338 (0.50x) +Testing collisions (low 28-43 bits) - Worst is 35 bits: 2947/5865 (0.50x) +Testing collisions (low 12-bit) - Expected 14196869.0, actual 14192773 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 14196869.0, actual 14196613 (1.00x) (-256) +Testing distribution - Worst bias is the 19-bit window at bit 45 - 0.021% + +Keyset 'Sparse' - 56-bit keys with up to 5 bits set - 4216423 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 4139.3, actual 2065 (0.50x) +Testing collisions (high 26-39 bits) - Worst is 39 bits: 22/32 (0.68x) +Testing collisions (high 12-bit) - Expected 4216423.0, actual 4212327 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 4216423.0, actual 4216167 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 4139.3, actual 1999 (0.48x) +Testing collisions (low 26-39 bits) - Worst is 31 bits: 4110/8278 (0.50x) +Testing collisions (low 12-bit) - Expected 4216423.0, actual 4212327 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 4216423.0, actual 4216167 (1.00x) (-256) +Testing distribution - Worst bias is the 19-bit window at bit 26 - 0.049% + +Keyset 'Sparse' - 64-bit keys with up to 5 bits set - 8303633 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 16053.7, actual 7972 (0.50x) +Testing collisions (high 27-41 bits) - Worst is 40 bits: 39/62 (0.62x) +Testing collisions (high 12-bit) - Expected 8303633.0, actual 8299537 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 8303633.0, actual 8303377 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 16053.7, actual 7866 (0.49x) +Testing collisions (low 27-41 bits) - Worst is 40 bits: 36/62 (0.57x) +Testing collisions (low 12-bit) - Expected 8303633.0, actual 8299537 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 8303633.0, actual 8303377 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 61 - 0.047% + +Keyset 'Sparse' - 72-bit keys with up to 5 bits set - 15082603 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 52965.5, actual 26424 (0.50x) +Testing collisions (high 28-43 bits) - Worst is 42 bits: 32/51 (0.62x) +Testing collisions (high 12-bit) - Expected 15082603.0, actual 15078507 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 15082603.0, actual 15082347 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 52965.5, actual 26433 (0.50x) +Testing collisions (low 28-43 bits) - Worst is 42 bits: 34/51 (0.66x) +Testing collisions (low 12-bit) - Expected 15082603.0, actual 15078507 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 15082603.0, actual 15082347 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 59 - 0.022% + +Keyset 'Sparse' - 96-bit keys with up to 4 bits set - 3469497 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 2802.7, actual 1406 (0.50x) +Testing collisions (high 26-39 bits) - Worst is 33 bits: 706/1401 (0.50x) +Testing collisions (high 12-bit) - Expected 3469497.0, actual 3465401 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 3469497.0, actual 3469241 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 2802.7, actual 1374 (0.49x) +Testing collisions (low 26-39 bits) - Worst is 37 bits: 44/87 (0.50x) +Testing collisions (low 12-bit) - Expected 3469497.0, actual 3465401 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 3469497.0, actual 3469241 (1.00x) (-256) +Testing distribution - Worst bias is the 19-bit window at bit 5 - 0.066% + +Keyset 'Sparse' - 160-bit keys with up to 4 bits set - 26977161 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 169446.5, actual 84730 (0.50x) +Testing collisions (high 29-45 bits) - Worst is 45 bits: 14/20 (0.68x) +Testing collisions (high 12-bit) - Expected 26977161.0, actual 26973065 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 26977161.0, actual 26976905 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 169446.5, actual 84408 (0.50x) +Testing collisions (low 29-45 bits) - Worst is 36 bits: 5329/10590 (0.50x) +Testing collisions (low 12-bit) - Expected 26977161.0, actual 26973065 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 26977161.0, actual 26976905 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 20 - 0.010% + +Keyset 'Sparse' - 256-bit keys with up to 3 bits set - 2796417 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1820.7, actual 908 (0.50x) +Testing collisions (high 25-38 bits) - Worst is 35 bits: 118/227 (0.52x) +Testing collisions (high 12-bit) - Expected 2796417.0, actual 2792321 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2796417.0, actual 2796161 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1820.7, actual 921 (0.51x) +Testing collisions (low 25-38 bits) - Worst is 38 bits: 18/28 (0.63x) +Testing collisions (low 12-bit) - Expected 2796417.0, actual 2792321 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2796417.0, actual 2796161 (1.00x) (-256) +Testing distribution - Worst bias is the 19-bit window at bit 8 - 0.067% + +Keyset 'Sparse' - 512-bit keys with up to 3 bits set - 22370049 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 116512.9, actual 58015 (0.50x) +Testing collisions (high 28-44 bits) - Worst is 44 bits: 19/28 (0.67x) +Testing collisions (high 12-bit) - Expected 22370049.0, actual 22365953 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 22370049.0, actual 22369793 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 116512.9, actual 58134 (0.50x) +Testing collisions (low 28-44 bits) - Worst is 40 bits: 241/455 (0.53x) +Testing collisions (low 12-bit) - Expected 22370049.0, actual 22365953 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 22370049.0, actual 22369793 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 62 - 0.016% + +Keyset 'Sparse' - 1024-bit keys with up to 2 bits set - 524801 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 64.1, actual 30 (0.47x) +Testing collisions (high 23-33 bits) - Worst is 33 bits: 21/32 (0.65x) +Testing collisions (high 12-bit) - Expected 524801.0, actual 520705 (0.99x) (-4096) +Testing collisions (high 8-bit) - Expected 524801.0, actual 524545 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 64.1, actual 37 (0.58x) +Testing collisions (low 23-33 bits) - Worst is 33 bits: 23/32 (0.72x) +Testing collisions (low 12-bit) - Expected 524801.0, actual 520705 (0.99x) (-4096) +Testing collisions (low 8-bit) - Expected 524801.0, actual 524545 (1.00x) (-256) +Testing distribution - Worst bias is the 16-bit window at bit 54 - 0.182% + +Keyset 'Sparse' - 2048-bit keys with up to 2 bits set - 2098177 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1025.0, actual 529 (0.52x) +Testing collisions (high 25-37 bits) - Worst is 37 bits: 22/32 (0.69x) +Testing collisions (high 12-bit) - Expected 2098177.0, actual 2094081 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2098177.0, actual 2097921 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1025.0, actual 525 (0.51x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 24/32 (0.75x) +Testing collisions (low 12-bit) - Expected 2098177.0, actual 2094081 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2098177.0, actual 2097921 (1.00x) (-256) +Testing distribution - Worst bias is the 18-bit window at bit 4 - 0.088% + + +[[[ Keyset 'Permutation' Tests ]]] + +Combination Lowbits Tests: +Keyset 'Combination' - up to 7 blocks from a set of 8 - 2396744 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1337.5, actual 659 (0.49x) +Testing collisions (high 25-38 bits) - Worst is 36 bits: 55/83 (0.66x) +Testing collisions (high 12-bit) - Expected 2396744.0, actual 2392648 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2396744.0, actual 2396488 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1337.5, actual 692 (0.52x) +Testing collisions (low 25-38 bits) - Worst is 38 bits: 13/20 (0.62x) +Testing collisions (low 12-bit) - Expected 2396744.0, actual 2392648 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2396744.0, actual 2396488 (1.00x) (-256) +Testing distribution - Worst bias is the 17-bit window at bit 8 - 0.049% + + +Combination Highbits Tests +Keyset 'Combination' - up to 7 blocks from a set of 8 - 2396744 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1337.5, actual 668 (0.50x) +Testing collisions (high 25-38 bits) - Worst is 34 bits: 175/334 (0.52x) +Testing collisions (high 12-bit) - Expected 2396744.0, actual 2392648 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2396744.0, actual 2396488 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1337.5, actual 675 (0.50x) +Testing collisions (low 25-38 bits) - Worst is 36 bits: 54/83 (0.65x) +Testing collisions (low 12-bit) - Expected 2396744.0, actual 2392648 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2396744.0, actual 2396488 (1.00x) (-256) +Testing distribution - Worst bias is the 18-bit window at bit 5 - 0.074% + + +Combination Hi-Lo Tests: +Keyset 'Combination' - up to 6 blocks from a set of 15 - 12204240 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 34678.6, actual 17094 (0.49x) +Testing collisions (high 27-42 bits) - Worst is 36 bits: 1095/2167 (0.51x) +Testing collisions (high 12-bit) - Expected 12204240.0, actual 12200144 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 12204240.0, actual 12203984 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 34678.6, actual 17320 (0.50x) +Testing collisions (low 27-42 bits) - Worst is 40 bits: 75/135 (0.55x) +Testing collisions (low 12-bit) - Expected 12204240.0, actual 12200144 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 12204240.0, actual 12203984 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 12 - 0.032% + + +Combination 0x8000000 Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 16384.0, actual 8224 (0.50x) +Testing collisions (high 27-41 bits) - Worst is 33 bits: 4198/8191 (0.51x) +Testing collisions (high 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 16384.0, actual 8166 (0.50x) +Testing collisions (low 27-41 bits) - Worst is 36 bits: 529/1023 (0.52x) +Testing collisions (low 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 26 - 0.040% + + +Combination 0x0000001 Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 16384.0, actual 8221 (0.50x) +Testing collisions (high 27-41 bits) - Worst is 38 bits: 139/255 (0.54x) +Testing collisions (high 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 16384.0, actual 8070 (0.49x) +Testing collisions (low 27-41 bits) - Worst is 37 bits: 273/511 (0.53x) +Testing collisions (low 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 33 - 0.045% + + +Combination 0x800000000000000 Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 16384.0, actual 8143 (0.50x) +Testing collisions (high 27-41 bits) - Worst is 41 bits: 20/31 (0.63x) +Testing collisions (high 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 16384.0, actual 8230 (0.50x) +Testing collisions (low 27-41 bits) - Worst is 38 bits: 144/255 (0.56x) +Testing collisions (low 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 54 - 0.035% + + +Combination 0x000000000000001 Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 16384.0, actual 8039 (0.49x) +Testing collisions (high 27-41 bits) - Worst is 41 bits: 17/31 (0.53x) +Testing collisions (high 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 16384.0, actual 8271 (0.50x) +Testing collisions (low 27-41 bits) - Worst is 41 bits: 20/31 (0.63x) +Testing collisions (low 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 40 - 0.045% + + +Combination 16-bytes [0-1] Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 16384.0, actual 8194 (0.50x) +Testing collisions (high 27-41 bits) - Worst is 33 bits: 4138/8191 (0.51x) +Testing collisions (high 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 16384.0, actual 8163 (0.50x) +Testing collisions (low 27-41 bits) - Worst is 41 bits: 20/31 (0.63x) +Testing collisions (low 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 9 - 0.037% + + +Combination 16-bytes [0-last] Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 16384.0, actual 8063 (0.49x) +Testing collisions (high 27-41 bits) - Worst is 41 bits: 18/31 (0.56x) +Testing collisions (high 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 16384.0, actual 8241 (0.50x) +Testing collisions (low 27-41 bits) - Worst is 39 bits: 91/127 (0.71x) +Testing collisions (low 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 18 - 0.035% + + +Combination 32-bytes [0-1] Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 16384.0, actual 7942 (0.48x) +Testing collisions (high 27-41 bits) - Worst is 41 bits: 17/31 (0.53x) +Testing collisions (high 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 16384.0, actual 8191 (0.50x) +Testing collisions (low 27-41 bits) - Worst is 41 bits: 17/31 (0.53x) +Testing collisions (low 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 5 - 0.038% + + +Combination 32-bytes [0-last] Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 16384.0, actual 8218 (0.50x) +Testing collisions (high 27-41 bits) - Worst is 39 bits: 71/127 (0.55x) +Testing collisions (high 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 16384.0, actual 8144 (0.50x) +Testing collisions (low 27-41 bits) - Worst is 30 bits: 32683/65535 (0.50x) +Testing collisions (low 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 42 - 0.039% + + +Combination 64-bytes [0-1] Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 16384.0, actual 8140 (0.50x) +Testing collisions (high 27-41 bits) - Worst is 40 bits: 39/63 (0.61x) +Testing collisions (high 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 16384.0, actual 8127 (0.50x) +Testing collisions (low 27-41 bits) - Worst is 40 bits: 34/63 (0.53x) +Testing collisions (low 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 53 - 0.042% + + +Combination 64-bytes [0-last] Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 16384.0, actual 8178 (0.50x) +Testing collisions (high 27-41 bits) - Worst is 40 bits: 46/63 (0.72x) +Testing collisions (high 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 16384.0, actual 8354 (0.51x) +Testing collisions (low 27-41 bits) - Worst is 38 bits: 136/255 (0.53x) +Testing collisions (low 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 26 - 0.038% + + +Combination 128-bytes [0-1] Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 16384.0, actual 8384 (0.51x) +Testing collisions (high 27-41 bits) - Worst is 32 bits: 8384/16383 (0.51x) +Testing collisions (high 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 16384.0, actual 8287 (0.51x) +Testing collisions (low 27-41 bits) - Worst is 33 bits: 4188/8191 (0.51x) +Testing collisions (low 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 23 - 0.037% + + +Combination 128-bytes [0-last] Tests: +Keyset 'Combination' - up to 22 blocks from a set of 2 - 8388606 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 16384.0, actual 8104 (0.49x) +Testing collisions (high 27-41 bits) - Worst is 34 bits: 2045/4095 (0.50x) +Testing collisions (high 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 16384.0, actual 8263 (0.50x) +Testing collisions (low 27-41 bits) - Worst is 41 bits: 19/31 (0.59x) +Testing collisions (low 12-bit) - Expected 8388606.0, actual 8384510 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 8388606.0, actual 8388350 (1.00x) (-256) +Testing distribution - Worst bias is the 19-bit window at bit 16 - 0.040% + + +[[[ Keyset 'Window' Tests ]]] + +Keyset 'Window' - 32-bit key, 25-bit window - 32 tests, 33554432 keys per test +Window at 0 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 1 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 2 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 3 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 4 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 5 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 6 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 7 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 8 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 9 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 10 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 11 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 12 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 13 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 14 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 15 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 16 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 17 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 18 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 19 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 20 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 21 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 22 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 23 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 24 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 25 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 26 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 27 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 28 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 29 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 30 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 31 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Window at 32 - Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) + +[[[ Keyset 'Cyclic' Tests ]]] + +Keyset 'Cyclic' - 8 cycles of 8 bytes - 1000000 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 232.8, actual 107 (0.46x) +Testing collisions (high 24-35 bits) - Worst is 34 bits: 38/58 (0.65x) +Testing collisions (high 12-bit) - Expected 1000000.0, actual 995904 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 1000000.0, actual 999744 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 232.8, actual 100 (0.43x) +Testing collisions (low 24-35 bits) - Worst is 27 bits: 3707/7450 (0.50x) +Testing collisions (low 12-bit) - Expected 1000000.0, actual 995904 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 1000000.0, actual 999744 (1.00x) (-256) +Testing distribution - Worst bias is the 17-bit window at bit 63 - 0.088% + +Keyset 'Cyclic' - 8 cycles of 9 bytes - 1000000 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 232.8, actual 106 (0.46x) +Testing collisions (high 24-35 bits) - Worst is 26 bits: 7405/14901 (0.50x) +Testing collisions (high 12-bit) - Expected 1000000.0, actual 995904 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 1000000.0, actual 999744 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 232.8, actual 126 (0.54x) +Testing collisions (low 24-35 bits) - Worst is 35 bits: 18/29 (0.62x) +Testing collisions (low 12-bit) - Expected 1000000.0, actual 995904 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 1000000.0, actual 999744 (1.00x) (-256) +Testing distribution - Worst bias is the 17-bit window at bit 55 - 0.099% + +Keyset 'Cyclic' - 8 cycles of 10 bytes - 1000000 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 232.8, actual 127 (0.55x) +Testing collisions (high 24-35 bits) - Worst is 33 bits: 66/116 (0.57x) +Testing collisions (high 12-bit) - Expected 1000000.0, actual 995904 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 1000000.0, actual 999744 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 232.8, actual 104 (0.45x) +Testing collisions (low 24-35 bits) - Worst is 27 bits: 3807/7450 (0.51x) +Testing collisions (low 12-bit) - Expected 1000000.0, actual 995904 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 1000000.0, actual 999744 (1.00x) (-256) +Testing distribution - Worst bias is the 17-bit window at bit 7 - 0.136% + +Keyset 'Cyclic' - 8 cycles of 11 bytes - 1000000 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 232.8, actual 118 (0.51x) +Testing collisions (high 24-35 bits) - Worst is 34 bits: 33/58 (0.57x) +Testing collisions (high 12-bit) - Expected 1000000.0, actual 995904 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 1000000.0, actual 999744 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 232.8, actual 128 (0.55x) +Testing collisions (low 24-35 bits) - Worst is 32 bits: 128/232 (0.55x) +Testing collisions (low 12-bit) - Expected 1000000.0, actual 995904 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 1000000.0, actual 999744 (1.00x) (-256) +Testing distribution - Worst bias is the 17-bit window at bit 38 - 0.115% + +Keyset 'Cyclic' - 8 cycles of 12 bytes - 1000000 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 232.8, actual 91 (0.39x) +Testing collisions (high 24-35 bits) - Worst is 27 bits: 3813/7450 (0.51x) +Testing collisions (high 12-bit) - Expected 1000000.0, actual 995904 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 1000000.0, actual 999744 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 232.8, actual 102 (0.44x) +Testing collisions (low 24-35 bits) - Worst is 25 bits: 14959/29802 (0.50x) +Testing collisions (low 12-bit) - Expected 1000000.0, actual 995904 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 1000000.0, actual 999744 (1.00x) (-256) +Testing distribution - Worst bias is the 17-bit window at bit 63 - 0.130% + +Keyset 'Cyclic' - 8 cycles of 16 bytes - 1000000 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 232.8, actual 122 (0.52x) +Testing collisions (high 24-35 bits) - Worst is 35 bits: 17/29 (0.58x) +Testing collisions (high 12-bit) - Expected 1000000.0, actual 995904 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 1000000.0, actual 999744 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 232.8, actual 116 (0.50x) +Testing collisions (low 24-35 bits) - Worst is 33 bits: 61/116 (0.52x) +Testing collisions (low 12-bit) - Expected 1000000.0, actual 995904 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 1000000.0, actual 999744 (1.00x) (-256) +Testing distribution - Worst bias is the 17-bit window at bit 19 - 0.122% + + +[[[ Keyset 'TwoBytes' Tests ]]] + +Keyset 'TwoBytes' - up-to-4-byte keys, 652545 total keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 99.1, actual 47 (0.47x) +Testing collisions (high 23-34 bits) - Worst is 34 bits: 16/24 (0.65x) +Testing collisions (high 12-bit) - Expected 652545.0, actual 648449 (0.99x) (-4096) +Testing collisions (high 8-bit) - Expected 652545.0, actual 652289 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 99.1, actual 46 (0.46x) +Testing collisions (low 23-34 bits) - Worst is 33 bits: 28/49 (0.56x) +Testing collisions (low 12-bit) - Expected 652545.0, actual 648449 (0.99x) (-4096) +Testing collisions (low 8-bit) - Expected 652545.0, actual 652289 (1.00x) (-256) +Testing distribution - Worst bias is the 16-bit window at bit 34 - 0.138% + +Keyset 'TwoBytes' - up-to-8-byte keys, 5471025 total keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 6969.1, actual 3548 (0.51x) +Testing collisions (high 26-40 bits) - Worst is 40 bits: 15/27 (0.55x) +Testing collisions (high 12-bit) - Expected 5471025.0, actual 5466929 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 5471025.0, actual 5470769 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 6969.1, actual 3378 (0.48x) +Testing collisions (low 26-40 bits) - Worst is 39 bits: 34/54 (0.62x) +Testing collisions (low 12-bit) - Expected 5471025.0, actual 5466929 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 5471025.0, actual 5470769 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 12 - 0.056% + +Keyset 'TwoBytes' - up-to-12-byte keys, 18616785 total keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 80695.5, actual 40607 (0.50x) +Testing collisions (high 28-43 bits) - Worst is 42 bits: 42/78 (0.53x) +Testing collisions (high 12-bit) - Expected 18616785.0, actual 18612689 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 18616785.0, actual 18616529 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 80695.5, actual 40085 (0.50x) +Testing collisions (low 28-43 bits) - Worst is 36 bits: 2521/5043 (0.50x) +Testing collisions (low 12-bit) - Expected 18616785.0, actual 18612689 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 18616785.0, actual 18616529 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 56 - 0.012% + +Keyset 'TwoBytes' - up-to-16-byte keys, 44251425 total keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 455926.3, actual 227080 (0.50x) +Testing collisions (high 29-46 bits) - Worst is 46 bits: 15/27 (0.54x) +Testing collisions (high 12-bit) - Expected 44251425.0, actual 44247329 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 44251425.0, actual 44251169 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 455926.3, actual 226684 (0.50x) +Testing collisions (low 29-46 bits) - Worst is 33 bits: 113923/227963 (0.50x) +Testing collisions (low 12-bit) - Expected 44251425.0, actual 44247329 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 44251425.0, actual 44251169 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 0 - 0.007% + +Keyset 'TwoBytes' - up-to-20-byte keys, 86536545 total keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1743569.4, actual 866241 (0.50x) +Testing collisions (high 30-48 bits) - Worst is 36 bits: 54556/108973 (0.50x) +Testing collisions (high 12-bit) - Expected 86536545.0, actual 86532449 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 86536545.0, actual 86536289 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1743569.4, actual 865870 (0.50x) +Testing collisions (low 30-48 bits) - Worst is 37 bits: 27421/54486 (0.50x) +Testing collisions (low 12-bit) - Expected 86536545.0, actual 86532449 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 86536545.0, actual 86536289 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 5 - 0.005% + + +[[[ Keyset 'Text' Tests ]]] + +Keyset 'Text' - keys of form "Foo[XXXX]Bar" - 14776336 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 50836.3, actual 25649 (0.50x) +Testing collisions (high 28-43 bits) - Worst is 34 bits: 6513/12709 (0.51x) +Testing collisions (high 12-bit) - Expected 14776336.0, actual 14772240 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 14776336.0, actual 14776080 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 50836.3, actual 25314 (0.50x) +Testing collisions (low 28-43 bits) - Worst is 40 bits: 108/198 (0.54x) +Testing collisions (low 12-bit) - Expected 14776336.0, actual 14772240 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 14776336.0, actual 14776080 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 35 - 0.020% + +Keyset 'Text' - keys of form "FooBar[XXXX]" - 14776336 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 50836.3, actual 25522 (0.50x) +Testing collisions (high 28-43 bits) - Worst is 43 bits: 15/24 (0.60x) +Testing collisions (high 12-bit) - Expected 14776336.0, actual 14772240 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 14776336.0, actual 14776080 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 50836.3, actual 25294 (0.50x) +Testing collisions (low 28-43 bits) - Worst is 41 bits: 61/99 (0.61x) +Testing collisions (low 12-bit) - Expected 14776336.0, actual 14772240 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 14776336.0, actual 14776080 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 37 - 0.017% + +Keyset 'Text' - keys of form "[XXXX]FooBar" - 14776336 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 50836.3, actual 25439 (0.50x) +Testing collisions (high 28-43 bits) - Worst is 38 bits: 416/794 (0.52x) +Testing collisions (high 12-bit) - Expected 14776336.0, actual 14772240 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 14776336.0, actual 14776080 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 50836.3, actual 25310 (0.50x) +Testing collisions (low 28-43 bits) - Worst is 42 bits: 32/49 (0.64x) +Testing collisions (low 12-bit) - Expected 14776336.0, actual 14772240 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 14776336.0, actual 14776080 (1.00x) (-256) +Testing distribution - Worst bias is the 20-bit window at bit 2 - 0.025% + + +[[[ Keyset 'Zeroes' Tests ]]] + +Keyset 'Zeroes' - 204800 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 9.8, actual 5 (0.51x) +Testing collisions (high 21-30 bits) - Worst is 29 bits: 41/78 (0.52x) +Testing collisions (high 12-bit) - Expected 204800.0, actual 200704 (0.98x) +Testing collisions (high 8-bit) - Expected 204800.0, actual 204544 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 9.8, actual 4 (0.41x) +Testing collisions (low 21-30 bits) - Worst is 25 bits: 643/1249 (0.51x) +Testing collisions (low 12-bit) - Expected 204800.0, actual 200704 (0.98x) +Testing collisions (low 8-bit) - Expected 204800.0, actual 204544 (1.00x) (-256) +Testing distribution - Worst bias is the 15-bit window at bit 14 - 0.281% + + +[[[ Keyset 'Seed' Tests ]]] + +Keyset 'Seed' - 5000000 keys +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 5820.8, actual 2880 (0.49x) +Testing collisions (high 26-40 bits) - Worst is 37 bits: 105/181 (0.58x) +Testing collisions (high 12-bit) - Expected 5000000.0, actual 4995904 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 5000000.0, actual 4999744 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 5820.8, actual 2957 (0.51x) +Testing collisions (low 26-40 bits) - Worst is 33 bits: 1494/2910 (0.51x) +Testing collisions (low 12-bit) - Expected 5000000.0, actual 4995904 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 5000000.0, actual 4999744 (1.00x) (-256) +Testing distribution - Worst bias is the 19-bit window at bit 59 - 0.046% + + +[[[ Keyset 'PerlinNoise' Tests ]]] + +Testing 16777216 coordinates (L2) : +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 65536.0, actual 32715 (0.50x) +Testing collisions (high 28-43 bits) - Worst is 42 bits: 46/63 (0.72x) +Testing collisions (high 12-bit) - Expected 16777216.0, actual 16773120 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 16777216.0, actual 16776960 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 65536.0, actual 32752 (0.50x) +Testing collisions (low 28-43 bits) - Worst is 41 bits: 69/127 (0.54x) +Testing collisions (low 12-bit) - Expected 16777216.0, actual 16773120 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 16777216.0, actual 16776960 (1.00x) (-256) + + +[[[ Diff 'Differential' Tests ]]] + +Testing 8303632 up-to-5-bit differentials in 64-bit keys -> 64 bit hashes. +1000 reps, 8303632000 total tests, expecting 0.00 random collisions.......... +0 total collisions, of which 0 single collisions were ignored + +Testing 11017632 up-to-4-bit differentials in 128-bit keys -> 64 bit hashes. +1000 reps, 11017632000 total tests, expecting 0.00 random collisions.......... +0 total collisions, of which 0 single collisions were ignored + +Testing 2796416 up-to-3-bit differentials in 256-bit keys -> 64 bit hashes. +1000 reps, 2796416000 total tests, expecting 0.00 random collisions.......... +0 total collisions, of which 0 single collisions were ignored + + +[[[ DiffDist 'Differential Distribution' Tests ]]] + +Testing bit 0 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 516 (0.50x) +Testing collisions (high 25-37 bits) - Worst is 37 bits: 19/31 (0.59x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 470 (0.46x) +Testing collisions (low 25-37 bits) - Worst is 28 bits: 8112/16383 (0.50x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 1 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 514 (0.50x) +Testing collisions (high 25-37 bits) - Worst is 37 bits: 24/31 (0.75x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 507 (0.50x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 19/31 (0.59x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 2 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 536 (0.52x) +Testing collisions (high 25-37 bits) - Worst is 34 bits: 135/255 (0.53x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 479 (0.47x) +Testing collisions (low 25-37 bits) - Worst is 29 bits: 4068/8191 (0.50x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 3 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 535 (0.52x) +Testing collisions (high 25-37 bits) - Worst is 32 bits: 535/1023 (0.52x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 533 (0.52x) +Testing collisions (low 25-37 bits) - Worst is 31 bits: 1106/2047 (0.54x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 4 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 519 (0.51x) +Testing collisions (high 25-37 bits) - Worst is 37 bits: 17/31 (0.53x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 513 (0.50x) +Testing collisions (low 25-37 bits) - Worst is 33 bits: 274/511 (0.54x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 5 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 520 (0.51x) +Testing collisions (high 25-37 bits) - Worst is 32 bits: 520/1023 (0.51x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 527 (0.51x) +Testing collisions (low 25-37 bits) - Worst is 33 bits: 269/511 (0.53x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 6 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 519 (0.51x) +Testing collisions (high 25-37 bits) - Worst is 36 bits: 41/63 (0.64x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 518 (0.51x) +Testing collisions (low 25-37 bits) - Worst is 29 bits: 4236/8191 (0.52x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 7 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 463 (0.45x) +Testing collisions (high 25-37 bits) - Worst is 28 bits: 8190/16383 (0.50x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 497 (0.49x) +Testing collisions (low 25-37 bits) - Worst is 34 bits: 134/255 (0.52x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 8 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 513 (0.50x) +Testing collisions (high 25-37 bits) - Worst is 37 bits: 21/31 (0.66x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 470 (0.46x) +Testing collisions (low 25-37 bits) - Worst is 36 bits: 32/63 (0.50x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 9 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 527 (0.51x) +Testing collisions (high 25-37 bits) - Worst is 36 bits: 37/63 (0.58x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 541 (0.53x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 19/31 (0.59x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 10 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 516 (0.50x) +Testing collisions (high 25-37 bits) - Worst is 34 bits: 142/255 (0.55x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 470 (0.46x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 19/31 (0.59x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 11 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 500 (0.49x) +Testing collisions (high 25-37 bits) - Worst is 31 bits: 1038/2047 (0.51x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 526 (0.51x) +Testing collisions (low 25-37 bits) - Worst is 36 bits: 34/63 (0.53x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 12 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 503 (0.49x) +Testing collisions (high 25-37 bits) - Worst is 35 bits: 83/127 (0.65x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 479 (0.47x) +Testing collisions (low 25-37 bits) - Worst is 35 bits: 68/127 (0.53x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 13 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 515 (0.50x) +Testing collisions (high 25-37 bits) - Worst is 36 bits: 40/63 (0.63x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 468 (0.46x) +Testing collisions (low 25-37 bits) - Worst is 36 bits: 33/63 (0.52x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 14 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 465 (0.45x) +Testing collisions (high 25-37 bits) - Worst is 36 bits: 33/63 (0.52x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 549 (0.54x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 20/31 (0.63x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 15 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 523 (0.51x) +Testing collisions (high 25-37 bits) - Worst is 36 bits: 35/63 (0.55x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 537 (0.52x) +Testing collisions (low 25-37 bits) - Worst is 35 bits: 71/127 (0.55x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 16 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 517 (0.50x) +Testing collisions (high 25-37 bits) - Worst is 37 bits: 19/31 (0.59x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 484 (0.47x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 25/31 (0.78x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 17 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 504 (0.49x) +Testing collisions (high 25-37 bits) - Worst is 37 bits: 18/31 (0.56x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 487 (0.48x) +Testing collisions (low 25-37 bits) - Worst is 35 bits: 68/127 (0.53x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 18 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 534 (0.52x) +Testing collisions (high 25-37 bits) - Worst is 33 bits: 280/511 (0.55x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 519 (0.51x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 22/31 (0.69x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 19 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 571 (0.56x) +Testing collisions (high 25-37 bits) - Worst is 37 bits: 22/31 (0.69x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 493 (0.48x) +Testing collisions (low 25-37 bits) - Worst is 36 bits: 33/63 (0.52x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 20 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 536 (0.52x) +Testing collisions (high 25-37 bits) - Worst is 33 bits: 272/511 (0.53x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 536 (0.52x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 23/31 (0.72x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 21 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 464 (0.45x) +Testing collisions (high 25-37 bits) - Worst is 35 bits: 67/127 (0.52x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 536 (0.52x) +Testing collisions (low 25-37 bits) - Worst is 35 bits: 81/127 (0.63x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 22 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 508 (0.50x) +Testing collisions (high 25-37 bits) - Worst is 34 bits: 131/255 (0.51x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 482 (0.47x) +Testing collisions (low 25-37 bits) - Worst is 30 bits: 2054/4095 (0.50x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 23 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 492 (0.48x) +Testing collisions (high 25-37 bits) - Worst is 37 bits: 20/31 (0.63x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 493 (0.48x) +Testing collisions (low 25-37 bits) - Worst is 28 bits: 8176/16383 (0.50x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 24 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 518 (0.51x) +Testing collisions (high 25-37 bits) - Worst is 30 bits: 2102/4095 (0.51x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 463 (0.45x) +Testing collisions (low 25-37 bits) - Worst is 36 bits: 33/63 (0.52x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 25 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 532 (0.52x) +Testing collisions (high 25-37 bits) - Worst is 32 bits: 532/1023 (0.52x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 514 (0.50x) +Testing collisions (low 25-37 bits) - Worst is 31 bits: 1032/2047 (0.50x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 26 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 479 (0.47x) +Testing collisions (high 25-37 bits) - Worst is 36 bits: 40/63 (0.63x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 532 (0.52x) +Testing collisions (low 25-37 bits) - Worst is 33 bits: 269/511 (0.53x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 27 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 511 (0.50x) +Testing collisions (high 25-37 bits) - Worst is 33 bits: 272/511 (0.53x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 450 (0.44x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 18/31 (0.56x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 28 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 538 (0.53x) +Testing collisions (high 25-37 bits) - Worst is 34 bits: 138/255 (0.54x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 520 (0.51x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 19/31 (0.59x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 29 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 525 (0.51x) +Testing collisions (high 25-37 bits) - Worst is 32 bits: 525/1023 (0.51x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 516 (0.50x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 23/31 (0.72x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 30 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 483 (0.47x) +Testing collisions (high 25-37 bits) - Worst is 35 bits: 66/127 (0.52x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 512 (0.50x) +Testing collisions (low 25-37 bits) - Worst is 30 bits: 2100/4095 (0.51x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 31 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 503 (0.49x) +Testing collisions (high 25-37 bits) - Worst is 27 bits: 16180/32767 (0.49x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 514 (0.50x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 21/31 (0.66x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 32 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 557 (0.54x) +Testing collisions (high 25-37 bits) - Worst is 37 bits: 21/31 (0.66x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 502 (0.49x) +Testing collisions (low 25-37 bits) - Worst is 30 bits: 2087/4095 (0.51x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 33 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 494 (0.48x) +Testing collisions (high 25-37 bits) - Worst is 37 bits: 17/31 (0.53x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 481 (0.47x) +Testing collisions (low 25-37 bits) - Worst is 36 bits: 37/63 (0.58x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 34 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 520 (0.51x) +Testing collisions (high 25-37 bits) - Worst is 37 bits: 22/31 (0.69x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 500 (0.49x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 21/31 (0.66x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 35 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 526 (0.51x) +Testing collisions (high 25-37 bits) - Worst is 32 bits: 526/1023 (0.51x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 507 (0.50x) +Testing collisions (low 25-37 bits) - Worst is 34 bits: 134/255 (0.52x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 36 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 530 (0.52x) +Testing collisions (high 25-37 bits) - Worst is 37 bits: 20/31 (0.63x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 503 (0.49x) +Testing collisions (low 25-37 bits) - Worst is 31 bits: 1034/2047 (0.50x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 37 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 489 (0.48x) +Testing collisions (high 25-37 bits) - Worst is 35 bits: 67/127 (0.52x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 482 (0.47x) +Testing collisions (low 25-37 bits) - Worst is 36 bits: 36/63 (0.56x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 38 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 521 (0.51x) +Testing collisions (high 25-37 bits) - Worst is 33 bits: 273/511 (0.53x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 498 (0.49x) +Testing collisions (low 25-37 bits) - Worst is 30 bits: 2041/4095 (0.50x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 39 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 483 (0.47x) +Testing collisions (high 25-37 bits) - Worst is 35 bits: 72/127 (0.56x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 529 (0.52x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 20/31 (0.63x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 40 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 489 (0.48x) +Testing collisions (high 25-37 bits) - Worst is 36 bits: 33/63 (0.52x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 499 (0.49x) +Testing collisions (low 25-37 bits) - Worst is 29 bits: 4246/8191 (0.52x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 41 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 536 (0.52x) +Testing collisions (high 25-37 bits) - Worst is 34 bits: 137/255 (0.54x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 543 (0.53x) +Testing collisions (low 25-37 bits) - Worst is 33 bits: 281/511 (0.55x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 42 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 513 (0.50x) +Testing collisions (high 25-37 bits) - Worst is 31 bits: 1082/2047 (0.53x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 494 (0.48x) +Testing collisions (low 25-37 bits) - Worst is 34 bits: 131/255 (0.51x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 43 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 495 (0.48x) +Testing collisions (high 25-37 bits) - Worst is 29 bits: 4158/8191 (0.51x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 473 (0.46x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 16/31 (0.50x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 44 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 479 (0.47x) +Testing collisions (high 25-37 bits) - Worst is 37 bits: 19/31 (0.59x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 541 (0.53x) +Testing collisions (low 25-37 bits) - Worst is 35 bits: 74/127 (0.58x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 45 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 531 (0.52x) +Testing collisions (high 25-37 bits) - Worst is 32 bits: 531/1023 (0.52x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 513 (0.50x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 18/31 (0.56x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 46 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 531 (0.52x) +Testing collisions (high 25-37 bits) - Worst is 36 bits: 37/63 (0.58x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 510 (0.50x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 17/31 (0.53x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 47 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 502 (0.49x) +Testing collisions (high 25-37 bits) - Worst is 28 bits: 8325/16383 (0.51x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 529 (0.52x) +Testing collisions (low 25-37 bits) - Worst is 36 bits: 39/63 (0.61x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 48 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 512 (0.50x) +Testing collisions (high 25-37 bits) - Worst is 35 bits: 69/127 (0.54x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 495 (0.48x) +Testing collisions (low 25-37 bits) - Worst is 36 bits: 33/63 (0.52x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 49 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 551 (0.54x) +Testing collisions (high 25-37 bits) - Worst is 36 bits: 38/63 (0.59x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 470 (0.46x) +Testing collisions (low 25-37 bits) - Worst is 36 bits: 37/63 (0.58x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 50 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 483 (0.47x) +Testing collisions (high 25-37 bits) - Worst is 37 bits: 22/31 (0.69x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 512 (0.50x) +Testing collisions (low 25-37 bits) - Worst is 31 bits: 1030/2047 (0.50x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 51 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 520 (0.51x) +Testing collisions (high 25-37 bits) - Worst is 37 bits: 17/31 (0.53x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 510 (0.50x) +Testing collisions (low 25-37 bits) - Worst is 31 bits: 1040/2047 (0.51x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 52 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 531 (0.52x) +Testing collisions (high 25-37 bits) - Worst is 32 bits: 531/1023 (0.52x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 534 (0.52x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 22/31 (0.69x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 53 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 546 (0.53x) +Testing collisions (high 25-37 bits) - Worst is 37 bits: 26/31 (0.81x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 483 (0.47x) +Testing collisions (low 25-37 bits) - Worst is 35 bits: 65/127 (0.51x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 54 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 488 (0.48x) +Testing collisions (high 25-37 bits) - Worst is 29 bits: 4102/8191 (0.50x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 501 (0.49x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 21/31 (0.66x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 55 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 509 (0.50x) +Testing collisions (high 25-37 bits) - Worst is 34 bits: 130/255 (0.51x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 493 (0.48x) +Testing collisions (low 25-37 bits) - Worst is 34 bits: 136/255 (0.53x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 56 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 528 (0.52x) +Testing collisions (high 25-37 bits) - Worst is 33 bits: 274/511 (0.54x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 551 (0.54x) +Testing collisions (low 25-37 bits) - Worst is 32 bits: 551/1023 (0.54x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 57 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 556 (0.54x) +Testing collisions (high 25-37 bits) - Worst is 34 bits: 157/255 (0.61x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 489 (0.48x) +Testing collisions (low 25-37 bits) - Worst is 30 bits: 2047/4095 (0.50x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 58 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 467 (0.46x) +Testing collisions (high 25-37 bits) - Worst is 37 bits: 18/31 (0.56x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 523 (0.51x) +Testing collisions (low 25-37 bits) - Worst is 36 bits: 39/63 (0.61x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 59 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 497 (0.49x) +Testing collisions (high 25-37 bits) - Worst is 30 bits: 2031/4095 (0.50x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 530 (0.52x) +Testing collisions (low 25-37 bits) - Worst is 33 bits: 278/511 (0.54x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 60 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 510 (0.50x) +Testing collisions (high 25-37 bits) - Worst is 28 bits: 8176/16383 (0.50x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 517 (0.50x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 19/31 (0.59x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 61 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 496 (0.48x) +Testing collisions (high 25-37 bits) - Worst is 30 bits: 2041/4095 (0.50x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 529 (0.52x) +Testing collisions (low 25-37 bits) - Worst is 32 bits: 529/1023 (0.52x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 62 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 552 (0.54x) +Testing collisions (high 25-37 bits) - Worst is 32 bits: 552/1023 (0.54x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 507 (0.50x) +Testing collisions (low 25-37 bits) - Worst is 37 bits: 18/31 (0.56x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + +Testing bit 63 +Testing collisions ( 64-bit) - Expected 0.0, actual 0 (0.00x) +Testing collisions (high 32-bit) - Expected 1024.0, actual 484 (0.47x) +Testing collisions (high 25-37 bits) - Worst is 34 bits: 135/255 (0.53x) +Testing collisions (high 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (high 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) +Testing collisions (low 32-bit) - Expected 1024.0, actual 500 (0.49x) +Testing collisions (low 25-37 bits) - Worst is 33 bits: 277/511 (0.54x) +Testing collisions (low 12-bit) - Expected 2097152.0, actual 2093056 (1.00x) (-4096) +Testing collisions (low 8-bit) - Expected 2097152.0, actual 2096896 (1.00x) (-256) + + +[[[ MomentChi2 Tests ]]] + +Analyze hashes produced from a serie of linearly increasing numbers of 32-bit, using a step of 3 ... +Target values to approximate : 38918200.000000 - 410450.000000 +Popcount 1 stats : 38918484.206651 - 410464.360454 +Popcount 0 stats : 38919365.145760 - 410461.861348 +MomentChi2 for bits 1 : 0.0983945 +MomentChi2 for bits 0 : 1.65373 + +Derivative stats (transition from 2 consecutive values) : +Popcount 1 stats : 38917342.700616 - 410405.257542 +Popcount 0 stats : 38919729.298852 - 410467.929221 +MomentChi2 for deriv b1 : 0.895362 +MomentChi2 for deriv b0 : 2.84895 + + Great !! + + + +Input vcode 0x00000001, Output vcode 0x00000001, Result vcode 0x00000001 +Verification value is 0x00000001 - Testing took 934.304636 seconds +------------------------------------------------------------------------------- diff --git a/src/rust/vendor/ahash/src/aes_hash.rs b/src/rust/vendor/ahash/src/aes_hash.rs new file mode 100644 index 000000000..6e1b5c43d --- /dev/null +++ b/src/rust/vendor/ahash/src/aes_hash.rs @@ -0,0 +1,292 @@ +use crate::convert::*; +use crate::operations::*; +#[cfg(feature = "specialize")] +use crate::HasherExt; +use core::hash::Hasher; + +/// A `Hasher` for hashing an arbitrary stream of bytes. +/// +/// Instances of [`AHasher`] represent state that is updated while hashing data. +/// +/// Each method updates the internal state based on the new data provided. Once +/// all of the data has been provided, the resulting hash can be obtained by calling +/// `finish()` +/// +/// [Clone] is also provided in case you wish to calculate hashes for two different items that +/// start with the same data. +/// +#[derive(Debug, Clone)] +pub struct AHasher { + enc: u128, + sum: u128, + key: u128, +} + +impl AHasher { + /// Creates a new hasher keyed to the provided keys. + /// + /// Normally hashers are created via `AHasher::default()` for fixed keys or `RandomState::new()` for randomly + /// generated keys and `RandomState::with_seeds(a,b)` for seeds that are set and can be reused. All of these work at + /// map creation time (and hence don't have any overhead on a per-item bais). + /// + /// This method directly creates the hasher instance and performs no transformation on the provided seeds. This may + /// be useful where a HashBuilder is not desired, such as for testing purposes. + /// + /// # Example + /// + /// ``` + /// use std::hash::Hasher; + /// use ahash::AHasher; + /// + /// let mut hasher = AHasher::new_with_keys(1234, 5678); + /// + /// hasher.write_u32(1989); + /// hasher.write_u8(11); + /// hasher.write_u8(9); + /// hasher.write(b"Huh?"); + /// + /// println!("Hash is {:x}!", hasher.finish()); + /// ``` + #[inline] + pub fn new_with_keys(key1: u128, key2: u128) -> Self { + Self { + enc: key1, + sum: key2, + key: key1 ^ key2, + } + } + + #[cfg(test)] + pub(crate) fn test_with_keys(key1: u64, key2: u64) -> AHasher { + use crate::random_state::scramble_keys; + let (k1, k2, k3, k4) = scramble_keys(key1, key2); + AHasher { + enc: [k1, k2].convert(), + sum: [k3, k4].convert(), + key: add_by_64s([k1, k2], [k3, k4]).convert(), + } + } + + #[inline(always)] + fn add_in_length(&mut self, length: u64) { + //This will be scrambled by the next AES round. + let mut enc: [u64; 2] = self.enc.convert(); + enc[0] = enc[0].wrapping_add(length); + self.enc = enc.convert(); + } + + #[inline(always)] + fn hash_in(&mut self, new_value: u128) { + self.enc = aesenc(self.enc, new_value); + self.sum = shuffle_and_add(self.sum, new_value); + } + + #[inline(always)] + fn hash_in_2(&mut self, v1: u128, v2: u128) { + self.enc = aesenc(self.enc, v1); + self.sum = shuffle_and_add(self.sum, v1); + self.enc = aesenc(self.enc, v2); + self.sum = shuffle_and_add(self.sum, v2); + } +} + +#[cfg(feature = "specialize")] +impl HasherExt for AHasher { + #[inline] + fn hash_u64(self, value: u64) -> u64 { + let mask = self.sum as u64; + let rot = (self.enc & 64) as u32; + folded_multiply(value ^ mask, crate::fallback_hash::MULTIPLE).rotate_left(rot) + } + + #[inline] + fn short_finish(&self) -> u64 { + let buffer: [u64; 2] = self.enc.convert(); + folded_multiply(buffer[0], buffer[1]) + } +} + +/// Provides methods to hash all of the primitive types. +impl Hasher for AHasher { + #[inline] + fn write_u8(&mut self, i: u8) { + self.write_u64(i as u64); + } + + #[inline] + fn write_u16(&mut self, i: u16) { + self.write_u64(i as u64); + } + + #[inline] + fn write_u32(&mut self, i: u32) { + self.write_u64(i as u64); + } + + #[inline] + fn write_u128(&mut self, i: u128) { + self.hash_in(i); + } + + #[inline] + fn write_usize(&mut self, i: usize) { + self.write_u64(i as u64); + } + + #[inline] + fn write_u64(&mut self, i: u64) { + self.write_u128(i as u128); + } + + #[inline] + #[allow(clippy::collapsible_if)] + fn write(&mut self, input: &[u8]) { + let mut data = input; + let length = data.len(); + self.add_in_length(length as u64); + //A 'binary search' on sizes reduces the number of comparisons. + if data.len() < 8 { + let value: [u64; 2] = if data.len() >= 2 { + if data.len() >= 4 { + //len 4-8 + [data.read_u32().0 as u64, data.read_last_u32() as u64] + } else { + //len 2-3 + [data.read_u16().0 as u64, data[data.len() - 1] as u64] + } + } else { + if data.len() > 0 { + [data[0] as u64, 0] + } else { + [0, 0] + } + }; + self.hash_in(value.convert()); + } else { + if data.len() > 32 { + if data.len() > 64 { + let tail = data.read_last_u128x4(); + let mut current: [u128; 4] = [self.key; 4]; + current[0] = aesenc(current[0], tail[0]); + current[1] = aesdec(current[1], tail[1]); + current[2] = aesenc(current[2], tail[2]); + current[3] = aesdec(current[3], tail[3]); + let mut sum: [u128; 2] = [self.key, !self.key]; + sum[0] = add_by_64s(sum[0].convert(), tail[0].convert()).convert(); + sum[1] = add_by_64s(sum[1].convert(), tail[1].convert()).convert(); + sum[0] = shuffle_and_add(sum[0], tail[2]); + sum[1] = shuffle_and_add(sum[1], tail[3]); + while data.len() > 64 { + let (blocks, rest) = data.read_u128x4(); + current[0] = aesenc(current[0], blocks[0]); + current[1] = aesenc(current[1], blocks[1]); + current[2] = aesenc(current[2], blocks[2]); + current[3] = aesenc(current[3], blocks[3]); + sum[0] = shuffle_and_add(sum[0], blocks[0]); + sum[1] = shuffle_and_add(sum[1], blocks[1]); + sum[0] = shuffle_and_add(sum[0], blocks[2]); + sum[1] = shuffle_and_add(sum[1], blocks[3]); + data = rest; + } + self.hash_in_2(current[0], current[1]); + self.hash_in_2(current[2], current[3]); + self.hash_in_2(sum[0], sum[1]); + } else { + //len 33-64 + let (head, _) = data.read_u128x2(); + let tail = data.read_last_u128x2(); + self.hash_in_2(head[0], head[1]); + self.hash_in_2(tail[0], tail[1]); + } + } else { + if data.len() > 16 { + //len 17-32 + self.hash_in_2(data.read_u128().0, data.read_last_u128()); + } else { + //len 9-16 + let value: [u64; 2] = [data.read_u64().0, data.read_last_u64()]; + self.hash_in(value.convert()); + } + } + } + } + #[inline] + fn finish(&self) -> u64 { + let combined = aesdec(self.sum, self.enc); + let result: [u64; 2] = aesenc(aesenc(combined, self.key), combined).convert(); + result[1] + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::convert::Convert; + use crate::operations::aesenc; + use crate::RandomState; + use std::hash::{BuildHasher, Hasher}; + #[test] + fn test_sanity() { + let mut hasher = RandomState::with_seeds(192837465, 1234567890).build_hasher(); + hasher.write_u64(0); + let h1 = hasher.finish(); + hasher.write(&[1, 0, 0, 0, 0, 0, 0, 0]); + let h2 = hasher.finish(); + assert_ne!(h1, h2); + } + + #[cfg(feature = "compile-time-rng")] + #[test] + fn test_builder() { + use std::collections::HashMap; + use std::hash::BuildHasherDefault; + + let mut map = HashMap::>::default(); + map.insert(1, 3); + } + + #[cfg(feature = "compile-time-rng")] + #[test] + fn test_default() { + let hasher_a = AHasher::default(); + let a_enc: [u64; 2] = hasher_a.enc.convert(); + let a_sum: [u64; 2] = hasher_a.sum.convert(); + assert_ne!(0, a_enc[0]); + assert_ne!(0, a_enc[1]); + assert_ne!(0, a_sum[0]); + assert_ne!(0, a_sum[1]); + assert_ne!(a_enc[0], a_enc[1]); + assert_ne!(a_sum[0], a_sum[1]); + assert_ne!(a_enc[0], a_sum[0]); + assert_ne!(a_enc[1], a_sum[1]); + let hasher_b = AHasher::default(); + let b_enc: [u64; 2] = hasher_b.enc.convert(); + let b_sum: [u64; 2] = hasher_b.sum.convert(); + assert_eq!(a_enc[0], b_enc[0]); + assert_eq!(a_enc[1], b_enc[1]); + assert_eq!(a_sum[0], b_sum[0]); + assert_eq!(a_sum[1], b_sum[1]); + } + + #[test] + fn test_hash() { + let mut result: [u64; 2] = [0x6c62272e07bb0142, 0x62b821756295c58d]; + let value: [u64; 2] = [1 << 32, 0xFEDCBA9876543210]; + result = aesenc(value.convert(), result.convert()).convert(); + result = aesenc(result.convert(), result.convert()).convert(); + let mut result2: [u64; 2] = [0x6c62272e07bb0142, 0x62b821756295c58d]; + let value2: [u64; 2] = [1, 0xFEDCBA9876543210]; + result2 = aesenc(value2.convert(), result2.convert()).convert(); + result2 = aesenc(result2.convert(), result.convert()).convert(); + let result: [u8; 16] = result.convert(); + let result2: [u8; 16] = result2.convert(); + assert_ne!(hex::encode(result), hex::encode(result2)); + } + + #[test] + fn test_conversion() { + let input: &[u8] = "dddddddd".as_bytes(); + let bytes: u64 = as_array!(input, 8).convert(); + assert_eq!(bytes, 0x6464646464646464); + } +} diff --git a/src/rust/vendor/ahash/src/convert.rs b/src/rust/vendor/ahash/src/convert.rs new file mode 100644 index 000000000..435c03c49 --- /dev/null +++ b/src/rust/vendor/ahash/src/convert.rs @@ -0,0 +1,172 @@ +pub(crate) trait Convert { + fn convert(self) -> To; +} + +macro_rules! convert { + ($a:ty, $b:ty) => { + impl Convert<$b> for $a { + #[inline(always)] + fn convert(self) -> $b { + unsafe { + let mut result: $b = core::mem::zeroed(); + core::ptr::copy_nonoverlapping( + &self as *const $a as *const u8, + &mut result as *mut $b as *mut u8, + core::mem::size_of::<$b>(), + ); + return result; + } + } + } + impl Convert<$a> for $b { + #[inline(always)] + fn convert(self) -> $a { + unsafe { + let mut result: $a = core::mem::zeroed(); + core::ptr::copy_nonoverlapping( + &self as *const $b as *const u8, + &mut result as *mut $a as *mut u8, + core::mem::size_of::<$a>(), + ); + return result; + } + } + } + }; +} + +convert!([u128; 4], [u64; 8]); +convert!([u128; 4], [u32; 16]); +convert!([u128; 4], [u16; 32]); +convert!([u128; 4], [u8; 64]); +convert!([u128; 2], [u64; 4]); +convert!([u128; 2], [u32; 8]); +convert!([u128; 2], [u16; 16]); +convert!([u128; 2], [u8; 32]); +convert!(u128, [u64; 2]); +convert!(u128, [u32; 4]); +convert!(u128, [u16; 8]); +convert!(u128, [u8; 16]); +convert!([u64; 2], [u32; 4]); +convert!([u64; 2], [u16; 8]); +convert!([u64; 2], [u8; 16]); +convert!([u32; 4], [u16; 8]); +convert!([u32; 4], [u8; 16]); +convert!([u16; 8], [u8; 16]); +convert!(u64, [u32; 2]); +convert!(u64, [u16; 4]); +convert!(u64, [u8; 8]); +convert!([u32; 2], [u16; 4]); +convert!([u32; 2], [u8; 8]); +convert!(u32, [u16; 2]); +convert!(u32, [u8; 4]); +convert!([u16; 2], [u8; 4]); +convert!(u16, [u8; 2]); + +convert!([f64; 2], [u8; 16]); +convert!([f32; 4], [u8; 16]); +convert!(f64, [u8; 8]); +convert!([f32; 2], [u8; 8]); +convert!(f32, [u8; 4]); + +macro_rules! as_array { + ($input:expr, $len:expr) => {{ + { + #[inline(always)] + fn as_array(slice: &[T]) -> &[T; $len] { + assert_eq!(slice.len(), $len); + unsafe { &*(slice.as_ptr() as *const [_; $len]) } + } + as_array($input) + } + }}; +} + +pub(crate) trait ReadFromSlice { + fn read_u16(&self) -> (u16, &[u8]); + fn read_u32(&self) -> (u32, &[u8]); + fn read_u64(&self) -> (u64, &[u8]); + fn read_u128(&self) -> (u128, &[u8]); + fn read_u128x2(&self) -> ([u128; 2], &[u8]); + fn read_u128x4(&self) -> ([u128; 4], &[u8]); + fn read_last_u16(&self) -> u16; + fn read_last_u32(&self) -> u32; + fn read_last_u64(&self) -> u64; + fn read_last_u128(&self) -> u128; + fn read_last_u128x2(&self) -> [u128; 2]; + fn read_last_u128x4(&self) -> [u128; 4]; +} + +impl ReadFromSlice for [u8] { + #[inline(always)] + fn read_u16(&self) -> (u16, &[u8]) { + let (value, rest) = self.split_at(2); + (as_array!(value, 2).convert(), rest) + } + + #[inline(always)] + fn read_u32(&self) -> (u32, &[u8]) { + let (value, rest) = self.split_at(4); + (as_array!(value, 4).convert(), rest) + } + + #[inline(always)] + fn read_u64(&self) -> (u64, &[u8]) { + let (value, rest) = self.split_at(8); + (as_array!(value, 8).convert(), rest) + } + + #[inline(always)] + fn read_u128(&self) -> (u128, &[u8]) { + let (value, rest) = self.split_at(16); + (as_array!(value, 16).convert(), rest) + } + + #[inline(always)] + fn read_u128x2(&self) -> ([u128; 2], &[u8]) { + let (value, rest) = self.split_at(32); + (as_array!(value, 32).convert(), rest) + } + + #[inline(always)] + fn read_u128x4(&self) -> ([u128; 4], &[u8]) { + let (value, rest) = self.split_at(64); + (as_array!(value, 64).convert(), rest) + } + + #[inline(always)] + fn read_last_u16(&self) -> u16 { + let (_, value) = self.split_at(self.len() - 2); + as_array!(value, 2).convert() + } + + #[inline(always)] + fn read_last_u32(&self) -> u32 { + let (_, value) = self.split_at(self.len() - 4); + as_array!(value, 4).convert() + } + + #[inline(always)] + fn read_last_u64(&self) -> u64 { + let (_, value) = self.split_at(self.len() - 8); + as_array!(value, 8).convert() + } + + #[inline(always)] + fn read_last_u128(&self) -> u128 { + let (_, value) = self.split_at(self.len() - 16); + as_array!(value, 16).convert() + } + + #[inline(always)] + fn read_last_u128x2(&self) -> [u128; 2] { + let (_, value) = self.split_at(self.len() - 32); + as_array!(value, 32).convert() + } + + #[inline(always)] + fn read_last_u128x4(&self) -> [u128; 4] { + let (_, value) = self.split_at(self.len() - 64); + as_array!(value, 64).convert() + } +} diff --git a/src/rust/vendor/ahash/src/fallback_hash.rs b/src/rust/vendor/ahash/src/fallback_hash.rs new file mode 100644 index 000000000..a43b1c1b1 --- /dev/null +++ b/src/rust/vendor/ahash/src/fallback_hash.rs @@ -0,0 +1,223 @@ +use crate::convert::*; +use crate::operations::folded_multiply; +#[cfg(feature = "specialize")] +use crate::HasherExt; +use core::hash::Hasher; + +///This constant come from Kunth's prng (Empirically it works better than those from splitmix32). +pub(crate) const MULTIPLE: u64 = 6364136223846793005; +const ROT: u32 = 23; //17 + +/// A `Hasher` for hashing an arbitrary stream of bytes. +/// +/// Instances of [`AHasher`] represent state that is updated while hashing data. +/// +/// Each method updates the internal state based on the new data provided. Once +/// all of the data has been provided, the resulting hash can be obtained by calling +/// `finish()` +/// +/// [Clone] is also provided in case you wish to calculate hashes for two different items that +/// start with the same data. +/// +#[derive(Debug, Clone)] +pub struct AHasher { + buffer: u64, + pad: u64, + extra_keys: [u64; 2], +} + +impl AHasher { + /// Creates a new hasher keyed to the provided key. + #[inline] + #[allow(dead_code)] // Is not called if non-fallback hash is used. + pub fn new_with_keys(key1: u128, key2: u128) -> AHasher { + AHasher { + buffer: key1 as u64, + pad: key2 as u64, + extra_keys: (key1 ^ key2).convert(), + } + } + + #[cfg(test)] + #[allow(dead_code)] // Is not called if non-fallback hash is used. + pub(crate) fn test_with_keys(key1: u64, key2: u64) -> AHasher { + use crate::random_state::scramble_keys; + let (k1, k2, k3, k4) = scramble_keys(key1, key2); + AHasher { + buffer: k1, + pad: k2, + extra_keys: [k3, k4], + } + } + + /// This update function has the goal of updating the buffer with a single multiply + /// FxHash does this but is vulnerable to attack. To avoid this input needs to be masked to with an + /// unpredictable value. Other hashes such as murmurhash have taken this approach but were found vulnerable + /// to attack. The attack was based on the idea of reversing the pre-mixing (Which is necessarily + /// reversible otherwise bits would be lost) then placing a difference in the highest bit before the + /// multiply used to mix the data. Because a multiply can never affect the bits to the right of it, a + /// subsequent update that also differed in this bit could result in a predictable collision. + /// + /// This version avoids this vulnerability while still only using a single multiply. It takes advantage + /// of the fact that when a 64 bit multiply is performed the upper 64 bits are usually computed and thrown + /// away. Instead it creates two 128 bit values where the upper 64 bits are zeros and multiplies them. + /// (The compiler is smart enough to turn this into a 64 bit multiplication in the assembly) + /// Then the upper bits are xored with the lower bits to produce a single 64 bit result. + /// + /// To understand why this is a good scrambling function it helps to understand multiply-with-carry PRNGs: + /// https://en.wikipedia.org/wiki/Multiply-with-carry_pseudorandom_number_generator + /// If the multiple is chosen well, this creates a long period, decent quality PRNG. + /// Notice that this function is equivalent to this except the `buffer`/`state` is being xored with each + /// new block of data. In the event that data is all zeros, it is exactly equivalent to a MWC PRNG. + /// + /// This is impervious to attack because every bit buffer at the end is dependent on every bit in + /// `new_data ^ buffer`. For example suppose two inputs differed in only the 5th bit. Then when the + /// multiplication is performed the `result` will differ in bits 5-69. More specifically it will differ by + /// 2^5 * MULTIPLE. However in the next step bits 65-128 are turned into a separate 64 bit value. So the + /// differing bits will be in the lower 6 bits of this value. The two intermediate values that differ in + /// bits 5-63 and in bits 0-5 respectively get added together. Producing an output that differs in every + /// bit. The addition carries in the multiplication and at the end additionally mean that the even if an + /// attacker somehow knew part of (but not all) the contents of the buffer before hand, + /// they would not be able to predict any of the bits in the buffer at the end. + #[inline(always)] + fn update(&mut self, new_data: u64) { + self.buffer = folded_multiply(new_data ^ self.buffer, MULTIPLE); + } + + /// Similar to the above this function performs an update using a "folded multiply". + /// However it takes in 128 bits of data instead of 64. Both halves must be masked. + /// + /// This makes it impossible for an attacker to place a single bit difference between + /// two blocks so as to cancel each other. + /// + /// However this is not sufficient. to prevent (a,b) from hashing the same as (b,a) the buffer itself must + /// be updated between calls in a way that does not commute. To achieve this XOR and Rotate are used. + /// Add followed by xor is not the same as xor followed by add, and rotate ensures that the same out bits + /// can't be changed by the same set of input bits. To cancel this sequence with subsequent input would require + /// knowing the keys. + #[inline(always)] + fn large_update(&mut self, new_data: u128) { + let block: [u64; 2] = new_data.convert(); + let combined = folded_multiply(block[0] ^ self.extra_keys[0], block[1] ^ self.extra_keys[1]); + self.buffer = (combined.wrapping_add(self.buffer) ^ self.pad).rotate_left(ROT); + } +} + +#[cfg(feature = "specialize")] +impl HasherExt for AHasher { + #[inline] + fn hash_u64(self, value: u64) -> u64 { + let rot = (self.pad & 64) as u32; + folded_multiply(value ^ self.buffer, MULTIPLE).rotate_left(rot) + } + + #[inline] + fn short_finish(&self) -> u64 { + self.buffer.wrapping_add(self.pad) + } +} + +/// Provides methods to hash all of the primitive types. +impl Hasher for AHasher { + #[inline] + fn write_u8(&mut self, i: u8) { + self.update(i as u64); + } + + #[inline] + fn write_u16(&mut self, i: u16) { + self.update(i as u64); + } + + #[inline] + fn write_u32(&mut self, i: u32) { + self.update(i as u64); + } + + #[inline] + fn write_u64(&mut self, i: u64) { + self.update(i as u64); + } + + #[inline] + fn write_u128(&mut self, i: u128) { + let data: [u64; 2] = i.convert(); + self.update(data[0]); + self.update(data[1]); + } + + #[inline] + fn write_usize(&mut self, i: usize) { + self.write_u64(i as u64); + } + + #[inline] + #[allow(clippy::collapsible_if)] + fn write(&mut self, input: &[u8]) { + let mut data = input; + let length = data.len() as u64; + //Needs to be an add rather than an xor because otherwise it could be canceled with carefully formed input. + self.buffer = self.buffer.wrapping_add(length).wrapping_mul(MULTIPLE); + //A 'binary search' on sizes reduces the number of comparisons. + if data.len() > 8 { + if data.len() > 16 { + let tail = data.read_last_u128(); + self.large_update(tail); + while data.len() > 16 { + let (block, rest) = data.read_u128(); + self.large_update(block); + data = rest; + } + } else { + self.large_update([data.read_u64().0, data.read_last_u64()].convert()); + } + } else { + if data.len() >= 2 { + if data.len() >= 4 { + let block = [data.read_u32().0 as u64, data.read_last_u32() as u64]; + self.large_update(block.convert()); + } else { + let value = [data.read_u16().0 as u32, data[data.len() - 1] as u32]; + self.update(value.convert()); + } + } else { + if data.len() > 0 { + self.update(data[0] as u64); + } + } + } + } + #[inline] + fn finish(&self) -> u64 { + let rot = (self.buffer & 63) as u32; + folded_multiply(self.buffer, self.pad).rotate_left(rot) + } +} + +#[cfg(test)] +mod tests { + use crate::convert::Convert; + use crate::fallback_hash::*; + + #[test] + fn test_hash() { + let mut hasher = AHasher::new_with_keys(0, 0); + let value: u64 = 1 << 32; + hasher.update(value); + let result = hasher.buffer; + let mut hasher = AHasher::new_with_keys(0, 0); + let value2: u64 = 1; + hasher.update(value2); + let result2 = hasher.buffer; + let result: [u8; 8] = result.convert(); + let result2: [u8; 8] = result2.convert(); + assert_ne!(hex::encode(result), hex::encode(result2)); + } + + #[test] + fn test_conversion() { + let input: &[u8] = "dddddddd".as_bytes(); + let bytes: u64 = as_array!(input, 8).convert(); + assert_eq!(bytes, 0x6464646464646464); + } +} diff --git a/src/rust/vendor/ahash/src/hash_map.rs b/src/rust/vendor/ahash/src/hash_map.rs new file mode 100644 index 000000000..362ac8551 --- /dev/null +++ b/src/rust/vendor/ahash/src/hash_map.rs @@ -0,0 +1,177 @@ +use std::borrow::Borrow; +use std::collections::{hash_map, HashMap}; +use std::fmt::{self, Debug}; +use std::hash::{BuildHasher, Hash}; +use std::iter::FromIterator; +use std::ops::{Deref, DerefMut, Index}; +use std::panic::UnwindSafe; + +/// A [`HashMap`](std::collections::HashMap) using [`RandomState`](crate::RandomState) to hash the items. +/// Requires the `std` feature to be enabled. +#[derive(Clone)] +pub struct AHashMap(HashMap); + +impl AHashMap +where + K: Hash + Eq, + S: BuildHasher + Default, +{ + pub fn new() -> Self { + AHashMap(HashMap::with_hasher(S::default())) + } + + pub fn with_capacity(capacity: usize) -> Self { + AHashMap(HashMap::with_capacity_and_hasher(capacity, S::default())) + } +} + +impl AHashMap +where + K: Hash + Eq, + S: BuildHasher, +{ + pub fn with_hasher(hash_builder: S) -> Self { + AHashMap(HashMap::with_hasher(hash_builder)) + } + + pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self { + AHashMap(HashMap::with_capacity_and_hasher(capacity, hash_builder)) + } +} + +impl Deref for AHashMap { + type Target = HashMap; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for AHashMap { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl UnwindSafe for AHashMap +where + K: UnwindSafe, + V: UnwindSafe, +{ +} + +impl PartialEq for AHashMap +where + K: Eq + Hash, + V: PartialEq, + S: BuildHasher, +{ + fn eq(&self, other: &AHashMap) -> bool { + self.0.eq(&other.0) + } +} + +impl Eq for AHashMap +where + K: Eq + Hash, + V: Eq, + S: BuildHasher, +{ +} + +impl Index<&Q> for AHashMap +where + K: Eq + Hash + Borrow, + Q: Eq + Hash, + S: BuildHasher, +{ + type Output = V; + + /// Returns a reference to the value corresponding to the supplied key. + /// + /// # Panics + /// + /// Panics if the key is not present in the `HashMap`. + #[inline] + fn index(&self, key: &Q) -> &V { + self.0.index(key) + } +} + +impl Debug for AHashMap +where + K: Eq + Hash + Debug, + V: Debug, + S: BuildHasher, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(fmt) + } +} + +impl FromIterator<(K, V)> for AHashMap +where + K: Eq + Hash, + S: BuildHasher + Default, +{ + fn from_iter>(iter: T) -> Self { + AHashMap(HashMap::from_iter(iter)) + } +} + +impl<'a, K, V, S> IntoIterator for &'a AHashMap { + type Item = (&'a K, &'a V); + type IntoIter = hash_map::Iter<'a, K, V>; + fn into_iter(self) -> Self::IntoIter { + (&self.0).iter() + } +} + +impl<'a, K, V, S> IntoIterator for &'a mut AHashMap { + type Item = (&'a K, &'a mut V); + type IntoIter = hash_map::IterMut<'a, K, V>; + fn into_iter(self) -> Self::IntoIter { + (&mut self.0).iter_mut() + } +} + +impl IntoIterator for AHashMap { + type Item = (K, V); + type IntoIter = hash_map::IntoIter; + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl Extend<(K, V)> for AHashMap +where + K: Eq + Hash, + S: BuildHasher, +{ + #[inline] + fn extend>(&mut self, iter: T) { + self.0.extend(iter) + } +} + +impl<'a, K, V, S> Extend<(&'a K, &'a V)> for AHashMap +where + K: Eq + Hash + Copy + 'a, + V: Copy + 'a, + S: BuildHasher, +{ + #[inline] + fn extend>(&mut self, iter: T) { + self.0.extend(iter) + } +} + +impl Default for AHashMap +where + K: Eq + Hash, + S: BuildHasher + Default, +{ + #[inline] + fn default() -> AHashMap { + AHashMap::with_hasher(Default::default()) + } +} diff --git a/src/rust/vendor/ahash/src/hash_quality_test.rs b/src/rust/vendor/ahash/src/hash_quality_test.rs new file mode 100644 index 000000000..1a69562da --- /dev/null +++ b/src/rust/vendor/ahash/src/hash_quality_test.rs @@ -0,0 +1,451 @@ +use crate::{CallHasher, HasherExt}; +use core::hash::{Hash, Hasher}; +use std::collections::HashMap; + +fn assert_sufficiently_different(a: u64, b: u64, tolerance: i32) { + let (same_byte_count, same_nibble_count) = count_same_bytes_and_nibbles(a, b); + assert!(same_byte_count <= tolerance, "{:x} vs {:x}: {:}", a, b, same_byte_count); + assert!( + same_nibble_count <= tolerance * 3, + "{:x} vs {:x}: {:}", + a, + b, + same_nibble_count + ); + let flipped_bits = (a ^ b).count_ones(); + assert!( + flipped_bits > 12 && flipped_bits < 52, + "{:x} and {:x}: {:}", + a, + b, + flipped_bits + ); + for rotate in 0..64 { + let flipped_bits2 = (a ^ (b.rotate_left(rotate))).count_ones(); + assert!( + flipped_bits2 > 10 && flipped_bits2 < 54, + "{:x} and {:x}: {:}", + a, + b.rotate_left(rotate), + flipped_bits2 + ); + } +} + +fn count_same_bytes_and_nibbles(a: u64, b: u64) -> (i32, i32) { + let mut same_byte_count = 0; + let mut same_nibble_count = 0; + for byte in 0..8 { + let ba = (a >> (8 * byte)) as u8; + let bb = (b >> (8 * byte)) as u8; + if ba == bb { + same_byte_count += 1; + } + if ba & 0xF0u8 == bb & 0xF0u8 { + same_nibble_count += 1; + } + if ba & 0x0Fu8 == bb & 0x0Fu8 { + same_nibble_count += 1; + } + } + (same_byte_count, same_nibble_count) +} + +fn gen_combinations(options: &[u32; 8], depth: u32, so_far: Vec, combinations: &mut Vec>) { + if depth == 0 { + return; + } + for option in options { + let mut next = so_far.clone(); + next.push(*option); + combinations.push(next.clone()); + gen_combinations(options, depth - 1, next, combinations); + } +} + +fn test_no_full_collisions(gen_hash: impl Fn() -> T) { + let options: [u32; 8] = [ + 0x00000000, 0x20000000, 0x40000000, 0x60000000, 0x80000000, 0xA0000000, 0xC0000000, 0xE0000000, + ]; + let mut combinations = Vec::new(); + gen_combinations(&options, 7, Vec::new(), &mut combinations); + let mut map: HashMap> = HashMap::new(); + for combination in combinations { + let array = unsafe { + let (begin, middle, end) = combination.align_to::(); + assert_eq!(0, begin.len()); + assert_eq!(0, end.len()); + middle.to_vec() + }; + let mut hasher = gen_hash(); + hasher.write(&array); + let hash = hasher.finish(); + if let Some(value) = map.get(&hash) { + assert_eq!( + value, &array, + "Found a collision between {:x?} and {:x?}", + value, &array + ); + } else { + map.insert(hash, array); + } + } + assert_eq!(2396744, map.len()); +} + +fn test_keys_change_output(constructor: impl Fn(u64, u64) -> T) { + let mut a = constructor(1, 1); + let mut b = constructor(1, 2); + let mut c = constructor(2, 1); + let mut d = constructor(2, 2); + "test".hash(&mut a); + "test".hash(&mut b); + "test".hash(&mut c); + "test".hash(&mut d); + assert_sufficiently_different(a.finish(), b.finish(), 1); + assert_sufficiently_different(a.finish(), c.finish(), 1); + assert_sufficiently_different(a.finish(), d.finish(), 1); + assert_sufficiently_different(b.finish(), c.finish(), 1); + assert_sufficiently_different(b.finish(), d.finish(), 1); + assert_sufficiently_different(c.finish(), d.finish(), 1); +} + +fn test_input_affect_every_byte(constructor: impl Fn(u64, u64) -> T) { + let base = 0.get_hash(constructor(0, 0)); + for shift in 0..16 { + let mut alternitives = vec![]; + for v in 0..256 { + let input = (v as u128) << (shift * 8); + let hasher = constructor(0, 0); + alternitives.push(input.get_hash(hasher)); + } + assert_each_byte_differs(base, alternitives); + } +} + +///Ensures that for every bit in the output there is some value for each byte in the key that flips it. +fn test_keys_affect_every_byte(item: H, constructor: impl Fn(u64, u64) -> T) { + let base = item.get_hash(constructor(0, 0)); + for shift in 0..8 { + let mut alternitives1 = vec![]; + let mut alternitives2 = vec![]; + for v in 0..256 { + let input = (v as u64) << (shift * 8); + let hasher1 = constructor(input, 0); + let hasher2 = constructor(0, input); + let h1 = item.get_hash(hasher1); + let h2 = item.get_hash(hasher2); + alternitives1.push(h1); + alternitives2.push(h2); + } + assert_each_byte_differs(base, alternitives1); + assert_each_byte_differs(base, alternitives2); + } +} + +fn assert_each_byte_differs(base: u64, alternitives: Vec) { + let mut changed_bits = 0_u64; + for alternitive in alternitives { + changed_bits |= base ^ alternitive + } + assert_eq!(core::u64::MAX, changed_bits, "Bits changed: {:x}", changed_bits); +} + +fn test_finish_is_consistent(constructor: impl Fn(u64, u64) -> T) { + let mut hasher = constructor(1, 2); + "Foo".hash(&mut hasher); + let a = hasher.finish(); + let b = hasher.finish(); + assert_eq!(a, b); +} + +fn test_single_key_bit_flip(constructor: impl Fn(u64, u64) -> T) { + for bit in 0..64 { + let mut a = constructor(0, 0); + let mut b = constructor(0, 1 << bit); + let mut c = constructor(1 << bit, 0); + "1234".hash(&mut a); + "1234".hash(&mut b); + "1234".hash(&mut c); + assert_sufficiently_different(a.finish(), b.finish(), 2); + assert_sufficiently_different(a.finish(), c.finish(), 2); + assert_sufficiently_different(b.finish(), c.finish(), 2); + let mut a = constructor(0, 0); + let mut b = constructor(0, 1 << bit); + let mut c = constructor(1 << bit, 0); + "12345678".hash(&mut a); + "12345678".hash(&mut b); + "12345678".hash(&mut c); + assert_sufficiently_different(a.finish(), b.finish(), 2); + assert_sufficiently_different(a.finish(), c.finish(), 2); + assert_sufficiently_different(b.finish(), c.finish(), 2); + let mut a = constructor(0, 0); + let mut b = constructor(0, 1 << bit); + let mut c = constructor(1 << bit, 0); + "1234567812345678".hash(&mut a); + "1234567812345678".hash(&mut b); + "1234567812345678".hash(&mut c); + assert_sufficiently_different(a.finish(), b.finish(), 2); + assert_sufficiently_different(a.finish(), c.finish(), 2); + assert_sufficiently_different(b.finish(), c.finish(), 2); + } +} + +fn test_all_bytes_matter(hasher: impl Fn() -> T) { + let mut item = vec![0; 256]; + let base_hash = hash(&item, &hasher); + for pos in 0..256 { + item[pos] = 255; + let hash = hash(&item, &hasher); + assert_ne!(base_hash, hash, "Position {} did not affect output", pos); + item[pos] = 0; + } +} + +fn test_no_pair_collisions(hasher: impl Fn() -> T) { + let base = [0_u64, 0_u64]; + let base_hash = hash(&base, &hasher); + for bitpos1 in 0..64 { + let a = 1_u64 << bitpos1; + for bitpos2 in 0..bitpos1 { + let b = 1_u64 << bitpos2; + let aa = hash(&[a, a], &hasher); + let ab = hash(&[a, b], &hasher); + let ba = hash(&[b, a], &hasher); + let bb = hash(&[b, b], &hasher); + assert_sufficiently_different(base_hash, aa, 3); + assert_sufficiently_different(base_hash, ab, 3); + assert_sufficiently_different(base_hash, ba, 3); + assert_sufficiently_different(base_hash, bb, 3); + assert_sufficiently_different(aa, ab, 3); + assert_sufficiently_different(ab, ba, 3); + assert_sufficiently_different(ba, bb, 3); + assert_sufficiently_different(aa, ba, 3); + assert_sufficiently_different(ab, bb, 3); + assert_sufficiently_different(aa, bb, 3); + } + } +} + +fn hash(b: &H, hasher: &dyn Fn() -> T) -> u64 { + b.get_hash(hasher()) +} + +fn test_single_bit_flip(hasher: impl Fn() -> T) { + let size = 32; + let compare_value = hash(&0u32, &hasher); + for pos in 0..size { + let test_value = hash(&(1u32 << pos), &hasher); + assert_sufficiently_different(compare_value, test_value, 2); + } + let size = 64; + let compare_value = hash(&0u64, &hasher); + for pos in 0..size { + let test_value = hash(&(1u64 << pos), &hasher); + assert_sufficiently_different(compare_value, test_value, 2); + } + let size = 128; + let compare_value = hash(&0u128, &hasher); + for pos in 0..size { + let test_value = hash(&(1u128 << pos), &hasher); + assert_sufficiently_different(compare_value, test_value, 2); + } +} + +fn test_padding_doesnot_collide(hasher: impl Fn() -> T) { + for c in 0..128u8 { + for string in ["", "\0", "\x01", "1234", "12345678", "1234567812345678"].iter() { + let mut short = hasher(); + string.hash(&mut short); + let value = short.finish(); + let mut padded = string.to_string(); + for num in 1..=128 { + let mut long = hasher(); + padded.push(c as char); + padded.hash(&mut long); + let (same_bytes, same_nibbles) = count_same_bytes_and_nibbles(value, long.finish()); + assert!( + same_bytes <= 3, + format!("{} bytes of {} -> {:x} vs {:x}", num, c, value, long.finish()) + ); + assert!( + same_nibbles <= 8, + format!("{} bytes of {} -> {:x} vs {:x}", num, c, value, long.finish()) + ); + let flipped_bits = (value ^ long.finish()).count_ones(); + assert!(flipped_bits > 10); + } + if string.len() > 0 { + let mut padded = string[1..].to_string(); + padded.push(c as char); + for num in 2..=128 { + let mut long = hasher(); + padded.push(c as char); + padded.hash(&mut long); + let (same_bytes, same_nibbles) = count_same_bytes_and_nibbles(value, long.finish()); + assert!( + same_bytes <= 3, + format!( + "string {:?} + {} bytes of {} -> {:x} vs {:x}", + string, + num, + c, + value, + long.finish() + ) + ); + assert!( + same_nibbles <= 8, + format!( + "string {:?} + {} bytes of {} -> {:x} vs {:x}", + string, + num, + c, + value, + long.finish() + ) + ); + let flipped_bits = (value ^ long.finish()).count_ones(); + assert!(flipped_bits > 10); + } + } + } + } +} + +#[cfg(test)] +mod fallback_tests { + use crate::fallback_hash::*; + use crate::hash_quality_test::*; + + #[test] + fn fallback_single_bit_flip() { + test_single_bit_flip(|| AHasher::test_with_keys(0, 0)) + } + + #[test] + fn fallback_single_key_bit_flip() { + test_single_key_bit_flip(AHasher::test_with_keys) + } + + #[test] + fn fallback_all_bytes_matter() { + test_all_bytes_matter(|| AHasher::test_with_keys(0, 0)); + } + + #[test] + fn fallback_test_no_pair_collisions() { + test_no_pair_collisions(|| AHasher::test_with_keys(0, 0)); + } + + #[test] + fn fallback_test_no_full_collisions() { + test_no_full_collisions(|| AHasher::test_with_keys(12345, 67890)); + } + + #[test] + fn fallback_keys_change_output() { + test_keys_change_output(AHasher::test_with_keys); + } + + #[test] + fn fallback_input_affect_every_byte() { + test_input_affect_every_byte(AHasher::test_with_keys); + } + + #[test] + fn fallback_keys_affect_every_byte() { + test_keys_affect_every_byte(0, AHasher::test_with_keys); + test_keys_affect_every_byte("", AHasher::test_with_keys); + test_keys_affect_every_byte((0, 0), AHasher::test_with_keys); + } + + #[test] + fn fallback_finish_is_consistant() { + test_finish_is_consistent(AHasher::test_with_keys) + } + + #[test] + fn fallback_padding_doesnot_collide() { + test_padding_doesnot_collide(|| AHasher::test_with_keys(0, 0)); + test_padding_doesnot_collide(|| AHasher::test_with_keys(0, 1)); + test_padding_doesnot_collide(|| AHasher::test_with_keys(1, 0)); + test_padding_doesnot_collide(|| AHasher::test_with_keys(1, 1)); + } +} + +///Basic sanity tests of the cypto properties of aHash. +#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes", not(miri)))] +#[cfg(test)] +mod aes_tests { + use crate::aes_hash::*; + use crate::hash_quality_test::*; + use std::hash::{Hash, Hasher}; + + const BAD_KEY: u64 = 0x5252_5252_5252_5252; //This encrypts to 0. + const BAD_KEY2: u64 = 0x6363_6363_6363_6363; //This decrypts to 0. + + #[test] + fn test_single_bit_in_byte() { + let mut hasher1 = AHasher::new_with_keys(0, 0); + 8_u32.hash(&mut hasher1); + let mut hasher2 = AHasher::new_with_keys(0, 0); + 0_u32.hash(&mut hasher2); + assert_sufficiently_different(hasher1.finish(), hasher2.finish(), 1); + } + + #[test] + fn aes_single_bit_flip() { + test_single_bit_flip(|| AHasher::test_with_keys(BAD_KEY, BAD_KEY)); + test_single_bit_flip(|| AHasher::test_with_keys(BAD_KEY2, BAD_KEY2)); + } + + #[test] + fn aes_single_key_bit_flip() { + test_single_key_bit_flip(|k1, k2| AHasher::test_with_keys(k1, k2)) + } + + #[test] + fn aes_all_bytes_matter() { + test_all_bytes_matter(|| AHasher::test_with_keys(BAD_KEY, BAD_KEY)); + test_all_bytes_matter(|| AHasher::test_with_keys(BAD_KEY2, BAD_KEY2)); + } + + #[test] + fn aes_test_no_pair_collisions() { + test_no_pair_collisions(|| AHasher::test_with_keys(BAD_KEY, BAD_KEY)); + test_no_pair_collisions(|| AHasher::test_with_keys(BAD_KEY2, BAD_KEY2)); + } + + #[test] + fn ase_test_no_full_collisions() { + test_no_full_collisions(|| AHasher::test_with_keys(12345, 67890)); + } + + #[test] + fn aes_keys_change_output() { + test_keys_change_output(AHasher::test_with_keys); + } + + #[test] + fn aes_input_affect_every_byte() { + test_input_affect_every_byte(AHasher::test_with_keys); + } + + #[test] + fn aes_keys_affect_every_byte() { + test_keys_affect_every_byte(0, AHasher::test_with_keys); + test_keys_affect_every_byte("", AHasher::test_with_keys); + test_keys_affect_every_byte((0, 0), AHasher::test_with_keys); + } + #[test] + fn aes_finish_is_consistant() { + test_finish_is_consistent(AHasher::test_with_keys) + } + + #[test] + fn aes_padding_doesnot_collide() { + test_padding_doesnot_collide(|| AHasher::test_with_keys(BAD_KEY, BAD_KEY)); + test_padding_doesnot_collide(|| AHasher::test_with_keys(BAD_KEY2, BAD_KEY2)); + } +} diff --git a/src/rust/vendor/ahash/src/hash_set.rs b/src/rust/vendor/ahash/src/hash_set.rs new file mode 100644 index 000000000..2950ec928 --- /dev/null +++ b/src/rust/vendor/ahash/src/hash_set.rs @@ -0,0 +1,267 @@ +use std::collections::{hash_set, HashSet}; +use std::fmt::{self, Debug}; +use std::hash::{BuildHasher, Hash}; +use std::iter::FromIterator; +use std::ops::{BitAnd, BitOr, BitXor, Deref, DerefMut, Sub}; + +/// A [`HashSet`](std::collections::HashSet) using [`RandomState`](crate::RandomState) to hash the items. +/// Requires the `std` feature to be enabled. +#[derive(Clone)] +pub struct AHashSet(HashSet); + +impl AHashSet +where + T: Hash + Eq, + S: BuildHasher + Default, +{ + pub fn new() -> Self { + AHashSet(HashSet::with_hasher(S::default())) + } + + pub fn with_capacity(capacity: usize) -> Self { + AHashSet(HashSet::with_capacity_and_hasher(capacity, S::default())) + } +} + +impl AHashSet +where + T: Hash + Eq, + S: BuildHasher, +{ + pub fn with_hasher(hash_builder: S) -> Self { + AHashSet(HashSet::with_hasher(hash_builder)) + } + + pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self { + AHashSet(HashSet::with_capacity_and_hasher(capacity, hash_builder)) + } +} + +impl Deref for AHashSet { + type Target = HashSet; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for AHashSet { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl PartialEq for AHashSet +where + T: Eq + Hash, + S: BuildHasher, +{ + fn eq(&self, other: &AHashSet) -> bool { + self.0.eq(&other.0) + } +} + +impl Eq for AHashSet +where + T: Eq + Hash, + S: BuildHasher, +{ +} + +impl BitOr<&AHashSet> for &AHashSet +where + T: Eq + Hash + Clone, + S: BuildHasher + Default, +{ + type Output = AHashSet; + + /// Returns the union of `self` and `rhs` as a new `AHashSet`. + /// + /// # Examples + /// + /// ``` + /// use ahash::AHashSet; + /// + /// let a: AHashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: AHashSet<_> = vec![3, 4, 5].into_iter().collect(); + /// + /// let set = &a | &b; + /// + /// let mut i = 0; + /// let expected = [1, 2, 3, 4, 5]; + /// for x in &set { + /// assert!(expected.contains(x)); + /// i += 1; + /// } + /// assert_eq!(i, expected.len()); + /// ``` + fn bitor(self, rhs: &AHashSet) -> AHashSet { + AHashSet(self.0.bitor(&rhs.0)) + } +} + +impl BitAnd<&AHashSet> for &AHashSet +where + T: Eq + Hash + Clone, + S: BuildHasher + Default, +{ + type Output = AHashSet; + + /// Returns the intersection of `self` and `rhs` as a new `AHashSet`. + /// + /// # Examples + /// + /// ``` + /// use ahash::AHashSet; + /// + /// let a: AHashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: AHashSet<_> = vec![2, 3, 4].into_iter().collect(); + /// + /// let set = &a & &b; + /// + /// let mut i = 0; + /// let expected = [2, 3]; + /// for x in &set { + /// assert!(expected.contains(x)); + /// i += 1; + /// } + /// assert_eq!(i, expected.len()); + /// ``` + fn bitand(self, rhs: &AHashSet) -> AHashSet { + AHashSet(self.0.bitand(&rhs.0)) + } +} + +impl BitXor<&AHashSet> for &AHashSet +where + T: Eq + Hash + Clone, + S: BuildHasher + Default, +{ + type Output = AHashSet; + + /// Returns the symmetric difference of `self` and `rhs` as a new `AHashSet`. + /// + /// # Examples + /// + /// ``` + /// use ahash::AHashSet; + /// + /// let a: AHashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: AHashSet<_> = vec![3, 4, 5].into_iter().collect(); + /// + /// let set = &a ^ &b; + /// + /// let mut i = 0; + /// let expected = [1, 2, 4, 5]; + /// for x in &set { + /// assert!(expected.contains(x)); + /// i += 1; + /// } + /// assert_eq!(i, expected.len()); + /// ``` + fn bitxor(self, rhs: &AHashSet) -> AHashSet { + AHashSet(self.0.bitxor(&rhs.0)) + } +} + +impl Sub<&AHashSet> for &AHashSet +where + T: Eq + Hash + Clone, + S: BuildHasher + Default, +{ + type Output = AHashSet; + + /// Returns the difference of `self` and `rhs` as a new `AHashSet`. + /// + /// # Examples + /// + /// ``` + /// use ahash::AHashSet; + /// + /// let a: AHashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: AHashSet<_> = vec![3, 4, 5].into_iter().collect(); + /// + /// let set = &a - &b; + /// + /// let mut i = 0; + /// let expected = [1, 2]; + /// for x in &set { + /// assert!(expected.contains(x)); + /// i += 1; + /// } + /// assert_eq!(i, expected.len()); + /// ``` + fn sub(self, rhs: &AHashSet) -> AHashSet { + AHashSet(self.0.sub(&rhs.0)) + } +} + +impl Debug for AHashSet +where + T: Eq + Hash + Debug, + S: BuildHasher, +{ + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(fmt) + } +} + +impl FromIterator for AHashSet +where + T: Eq + Hash, + S: BuildHasher + Default, +{ + #[inline] + fn from_iter>(iter: I) -> AHashSet { + AHashSet(HashSet::from_iter(iter)) + } +} + +impl<'a, T, S> IntoIterator for &'a AHashSet { + type Item = &'a T; + type IntoIter = hash_set::Iter<'a, T>; + fn into_iter(self) -> Self::IntoIter { + (&self.0).iter() + } +} + +impl IntoIterator for AHashSet { + type Item = T; + type IntoIter = hash_set::IntoIter; + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl Extend for AHashSet +where + T: Eq + Hash, + S: BuildHasher, +{ + #[inline] + fn extend>(&mut self, iter: I) { + self.0.extend(iter) + } +} + +impl<'a, T, S> Extend<&'a T> for AHashSet +where + T: 'a + Eq + Hash + Copy, + S: BuildHasher, +{ + #[inline] + fn extend>(&mut self, iter: I) { + self.0.extend(iter) + } +} + +impl Default for AHashSet +where + T: Eq + Hash, + S: BuildHasher + Default, +{ + /// Creates an empty `AHashSet` with the `Default` value for the hasher. + #[inline] + fn default() -> AHashSet { + AHashSet(HashSet::default()) + } +} diff --git a/src/rust/vendor/ahash/src/lib.rs b/src/rust/vendor/ahash/src/lib.rs new file mode 100644 index 000000000..542fa35e9 --- /dev/null +++ b/src/rust/vendor/ahash/src/lib.rs @@ -0,0 +1,203 @@ +//! # aHash +//! +//! This hashing algorithm is intended to be a high performance, (hardware specific), keyed hash function. +//! This can be seen as a DOS resistant alternative to `FxHash`, or a fast equivalent to `SipHash`. +//! It provides a high speed hash algorithm, but where the result is not predictable without knowing a Key. +//! This allows it to be used in a `HashMap` without allowing for the possibility that an malicious user can +//! induce a collision. +//! +//! # How aHash works +//! +//! aHash uses the hardware AES instruction on x86 processors to provide a keyed hash function. +//! aHash is not a cryptographically secure hash. +#![deny(clippy::correctness, clippy::complexity, clippy::perf)] +#![allow(clippy::pedantic, clippy::cast_lossless, clippy::unreadable_literal)] +#![cfg_attr(all(not(test), not(feature = "std")), no_std)] +#![cfg_attr(feature = "specialize", feature(specialization))] + +#[macro_use] +mod convert; + +#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes", not(miri)))] +mod aes_hash; +mod fallback_hash; +#[cfg(test)] +mod hash_quality_test; + +mod operations; +#[cfg(feature = "std")] +mod hash_map; +#[cfg(feature = "std")] +mod hash_set; +mod random_state; +mod specialize; + +#[cfg(feature = "compile-time-rng")] +use const_random::const_random; + +#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes", not(miri)))] +pub use crate::aes_hash::AHasher; + +#[cfg(not(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes", not(miri))))] +pub use crate::fallback_hash::AHasher; +pub use crate::random_state::RandomState; + +pub use crate::specialize::CallHasher; + +#[cfg(feature = "std")] +pub use crate::hash_map::AHashMap; +#[cfg(feature = "std")] +pub use crate::hash_set::AHashSet; +use core::hash::Hasher; + +/// Provides a default [Hasher] compile time generated constants for keys. +/// This is typically used in conjunction with [BuildHasherDefault] to create +/// [AHasher]s in order to hash the keys of the map. +/// +/// Generally it is preferable to use [RandomState] instead, so that different +/// hashmaps will have different keys. However if fixed keys are desireable this +/// may be used instead. +/// +/// # Example +/// ``` +/// use std::hash::BuildHasherDefault; +/// use ahash::{AHasher, RandomState}; +/// use std::collections::HashMap; +/// +/// let mut map: HashMap> = HashMap::default(); +/// map.insert(12, 34); +/// ``` +/// +/// [BuildHasherDefault]: std::hash::BuildHasherDefault +/// [Hasher]: std::hash::Hasher +/// [HashMap]: std::collections::HashMap +impl Default for AHasher { + + /// Constructs a new [AHasher] with compile time generated constants for keys if the + /// `compile-time-rng`feature is enabled. Otherwise the keys will be fixed constants. + /// This means the keys will be the same from one instance to another, + /// but different from build to the next. So if it is possible for a potential + /// attacker to have access to the compiled binary it would be better + /// to specify keys generated at runtime. + /// + /// # Examples + /// + /// ``` + /// use ahash::AHasher; + /// use std::hash::Hasher; + /// + /// let mut hasher_1 = AHasher::default(); + /// let mut hasher_2 = AHasher::default(); + /// + /// hasher_1.write_u32(1234); + /// hasher_2.write_u32(1234); + /// + /// assert_eq!(hasher_1.finish(), hasher_2.finish()); + /// ``` + #[inline] + #[cfg(feature = "compile-time-rng")] + fn default() -> AHasher { + AHasher::new_with_keys(const_random!(u128), const_random!(u128)) + } + + /// Constructs a new [AHasher] with compile time generated constants for keys if the + /// `compile-time-rng`feature is enabled. Otherwise the keys will be fixed constants. + /// This means the keys will be the same from one instance to another, + /// but different from build to the next. So if it is possible for a potential + /// attacker to have access to the compiled binary it would be better + /// to specify keys generated at runtime. + /// + /// # Examples + /// + /// ``` + /// use ahash::AHasher; + /// use std::hash::Hasher; + /// + /// let mut hasher_1 = AHasher::default(); + /// let mut hasher_2 = AHasher::default(); + /// + /// hasher_1.write_u32(1234); + /// hasher_2.write_u32(1234); + /// + /// assert_eq!(hasher_1.finish(), hasher_2.finish()); + /// ``` + #[inline] + #[cfg(not(feature = "compile-time-rng"))] + fn default() -> AHasher { + const K1: u128 = (random_state::INIT_SEED[0] as u128).wrapping_mul(random_state::MULTIPLE as u128); + const K2: u128 = (random_state::INIT_SEED[1] as u128).wrapping_mul(random_state::MULTIPLE as u128); + AHasher::new_with_keys(K1, K2) + } +} + +/// Used for specialization. (Sealed) +pub(crate) trait HasherExt: Hasher { + #[doc(hidden)] + fn hash_u64(self, value: u64) -> u64; + + #[doc(hidden)] + fn short_finish(&self) -> u64; +} + +impl HasherExt for T { + #[inline] + #[cfg(feature = "specialize")] + default fn hash_u64(self, value: u64) -> u64 { + value.get_hash(self) + } + #[inline] + #[cfg(not(feature = "specialize"))] + fn hash_u64(self, value: u64) -> u64 { + value.get_hash(self) + } + #[inline] + #[cfg(feature = "specialize")] + default fn short_finish(&self) -> u64 { + self.finish() + } + #[inline] + #[cfg(not(feature = "specialize"))] + fn short_finish(&self) -> u64 { + self.finish() + } +} + +// #[inline(never)] +// #[doc(hidden)] +// pub fn hash_test(input: &[u8]) -> u64 { +// let a = AHasher::new_with_keys(11111111111_u128, 2222222222_u128); +// input.get_hash(a) +// } + +#[cfg(test)] +mod test { + use crate::convert::Convert; + use crate::*; + use std::collections::HashMap; + + #[cfg(feature = "std")] + #[test] + fn test_default_builder() { + use core::hash::BuildHasherDefault; + + let mut map = HashMap::>::default(); + map.insert(1, 3); + } + #[test] + fn test_builder() { + let mut map = HashMap::::default(); + map.insert(1, 3); + } + + #[test] + fn test_conversion() { + let input: &[u8] = b"dddddddd"; + let bytes: u64 = as_array!(input, 8).convert(); + assert_eq!(bytes, 0x6464646464646464); + } + + #[test] + fn test_ahasher_construction() { + let _ = AHasher::new_with_keys(1234, 5678); + } +} diff --git a/src/rust/vendor/ahash/src/operations.rs b/src/rust/vendor/ahash/src/operations.rs new file mode 100644 index 000000000..0646c446c --- /dev/null +++ b/src/rust/vendor/ahash/src/operations.rs @@ -0,0 +1,277 @@ +use crate::convert::*; + +/// This is a constant with a lot of special properties found by automated search. +/// See the unit tests below. (Below are alternative values) +#[cfg(all(target_feature = "ssse3", not(miri)))] +const SHUFFLE_MASK: u128 = 0x020a0700_0c01030e_050f0d08_06090b04_u128; +//const SHUFFLE_MASK: u128 = 0x000d0702_0a040301_05080f0c_0e0b0609_u128; +//const SHUFFLE_MASK: u128 = 0x040A0700_030E0106_0D050F08_020B0C09_u128; + +pub(crate) const fn folded_multiply(s: u64, by: u64) -> u64 { + let result = (s as u128).wrapping_mul(by as u128); + ((result & 0xffff_ffff_ffff_ffff) as u64) ^ ((result >> 64) as u64) +} + +#[inline(always)] +pub(crate) fn shuffle(a: u128) -> u128 { + #[cfg(all(target_feature = "ssse3", not(miri)))] + { + use core::mem::transmute; + #[cfg(target_arch = "x86")] + use core::arch::x86::*; + #[cfg(target_arch = "x86_64")] + use core::arch::x86_64::*; + unsafe { + transmute(_mm_shuffle_epi8(transmute(a), transmute(SHUFFLE_MASK))) + } + } + #[cfg(not(all(target_feature = "ssse3", not(miri))))] + { + a.swap_bytes() + } +} + +#[allow(unused)] //not used by fallback +#[inline(always)] +pub(crate) fn add_and_shuffle(a: u128, b: u128) -> u128 { + let sum = add_by_64s(a.convert(), b.convert()); + shuffle(sum.convert()) +} + +#[allow(unused)] //not used by fallbac +#[inline(always)] +pub(crate) fn shuffle_and_add(base: u128, to_add: u128) -> u128 { + let shuffled: [u64; 2] = shuffle(base).convert(); + add_by_64s(shuffled, to_add.convert()).convert() +} + +#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "sse2", not(miri)))] +#[inline(always)] +pub(crate) fn add_by_64s(a: [u64; 2], b: [u64; 2]) -> [u64; 2] { + use core::mem::transmute; + unsafe { + #[cfg(target_arch = "x86")] + use core::arch::x86::*; + #[cfg(target_arch = "x86_64")] + use core::arch::x86_64::*; + transmute(_mm_add_epi64(transmute(a), transmute(b))) + } +} + +#[cfg(not(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "sse2", not(miri))))] +#[inline(always)] +pub(crate) fn add_by_64s(a: [u64; 2], b: [u64; 2]) -> [u64; 2] { + [a[0].wrapping_add(b[0]), a[1].wrapping_add(b[1])] +} + +#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes", not(miri)))] +#[allow(unused)] +#[inline(always)] +pub(crate) fn aesenc(value: u128, xor: u128) -> u128 { + #[cfg(target_arch = "x86")] + use core::arch::x86::*; + #[cfg(target_arch = "x86_64")] + use core::arch::x86_64::*; + use core::mem::transmute; + unsafe { + let value = transmute(value); + transmute(_mm_aesenc_si128(value, transmute(xor))) + } +} +#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes", not(miri)))] +#[allow(unused)] +#[inline(always)] +pub(crate) fn aesdec(value: u128, xor: u128) -> u128 { + #[cfg(target_arch = "x86")] + use core::arch::x86::*; + #[cfg(target_arch = "x86_64")] + use core::arch::x86_64::*; + use core::mem::transmute; + unsafe { + let value = transmute(value); + transmute(_mm_aesdec_si128(value, transmute(xor))) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::convert::Convert; + + // This is code to search for the shuffle constant + // + //thread_local! { static MASK: Cell = Cell::new(0); } + // + // fn shuffle(a: u128) -> u128 { + // use std::intrinsics::transmute; + // #[cfg(target_arch = "x86")] + // use core::arch::x86::*; + // #[cfg(target_arch = "x86_64")] + // use core::arch::x86_64::*; + // MASK.with(|mask| { + // unsafe { transmute(_mm_shuffle_epi8(transmute(a), transmute(mask.get()))) } + // }) + // } + // + // #[test] + // fn find_shuffle() { + // use rand::prelude::*; + // use SliceRandom; + // use std::panic; + // use std::io::Write; + // + // let mut value: [u8; 16] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 ,13, 14, 15]; + // let mut rand = thread_rng(); + // let mut successful_list = HashMap::new(); + // for _attempt in 0..10000000 { + // rand.shuffle(&mut value); + // let test_val = value.convert(); + // MASK.with(|mask| { + // mask.set(test_val); + // }); + // if let Ok(successful) = panic::catch_unwind(|| { + // test_shuffle_does_not_collide_with_aes(); + // test_shuffle_moves_high_bits(); + // test_shuffle_moves_every_value(); + // //test_shuffle_does_not_loop(); + // value + // }) { + // let successful: u128 = successful.convert(); + // successful_list.insert(successful, iters_before_loop()); + // } + // } + // let write_file = File::create("/tmp/output").unwrap(); + // let mut writer = BufWriter::new(&write_file); + // + // for success in successful_list { + // writeln!(writer, "Found successful: {:x?} - {:?}", success.0, success.1); + // } + // } + // + // fn iters_before_loop() -> u32 { + // let numbered = 0x00112233_44556677_8899AABB_CCDDEEFF; + // let mut shuffled = shuffle(numbered); + // let mut count = 0; + // loop { + // // println!("{:>16x}", shuffled); + // if numbered == shuffled { + // break; + // } + // count += 1; + // shuffled = shuffle(shuffled); + // } + // count + // } + + #[cfg(all( + any(target_arch = "x86", target_arch = "x86_64"), + target_feature = "ssse3", + target_feature = "aes", + not(miri) + ))] + #[test] + fn test_shuffle_does_not_collide_with_aes() { + let mut value: [u8; 16] = [0; 16]; + let zero_mask_enc = aesenc(0, 0); + let zero_mask_dec = aesdec(0, 0); + for index in 0..16 { + value[index] = 1; + let excluded_positions_enc: [u8; 16] = aesenc(value.convert(), zero_mask_enc).convert(); + let excluded_positions_dec: [u8; 16] = aesdec(value.convert(), zero_mask_dec).convert(); + let actual_location: [u8; 16] = shuffle(value.convert()).convert(); + for pos in 0..16 { + if actual_location[pos] != 0 { + assert_eq!( + 0, excluded_positions_enc[pos], + "Forward Overlap between {:?} and {:?} at {}", + excluded_positions_enc, actual_location, index + ); + assert_eq!( + 0, excluded_positions_dec[pos], + "Reverse Overlap between {:?} and {:?} at {}", + excluded_positions_dec, actual_location, index + ); + } + } + value[index] = 0; + } + } + + #[test] + fn test_shuffle_contains_each_value() { + let value: [u8; 16] = 0x00010203_04050607_08090A0B_0C0D0E0F_u128.convert(); + let shuffled: [u8; 16] = shuffle(value.convert()).convert(); + for index in 0..16_u8 { + assert!(shuffled.contains(&index), "Value is missing {}", index); + } + } + + #[test] + fn test_shuffle_moves_every_value() { + let mut value: [u8; 16] = [0; 16]; + for index in 0..16 { + value[index] = 1; + let shuffled: [u8; 16] = shuffle(value.convert()).convert(); + assert_eq!(0, shuffled[index], "Value is not moved {}", index); + value[index] = 0; + } + } + + #[test] + fn test_shuffle_moves_high_bits() { + assert!( + shuffle(1) > (1_u128 << 80), + "Low bits must be moved to other half {:?} -> {:?}", + 0, + shuffle(1) + ); + + assert!( + shuffle(1_u128 << 58) >= (1_u128 << 64), + "High bits must be moved to other half {:?} -> {:?}", + 7, + shuffle(1_u128 << 58) + ); + assert!( + shuffle(1_u128 << 58) < (1_u128 << 112), + "High bits must not remain high {:?} -> {:?}", + 7, + shuffle(1_u128 << 58) + ); + assert!( + shuffle(1_u128 << 64) < (1_u128 << 64), + "Low bits must be moved to other half {:?} -> {:?}", + 8, + shuffle(1_u128 << 64) + ); + assert!( + shuffle(1_u128 << 64) >= (1_u128 << 16), + "Low bits must not remain low {:?} -> {:?}", + 8, + shuffle(1_u128 << 64) + ); + + assert!( + shuffle(1_u128 << 120) < (1_u128 << 50), + "High bits must be moved to low half {:?} -> {:?}", + 15, + shuffle(1_u128 << 120) + ); + } + + #[cfg(all( + any(target_arch = "x86", target_arch = "x86_64"), + target_feature = "ssse3", + not(miri) + ))] + #[test] + fn test_shuffle_does_not_loop() { + let numbered = 0x00112233_44556677_8899AABB_CCDDEEFF; + let mut shuffled = shuffle(numbered); + for count in 0..100 { + // println!("{:>16x}", shuffled); + assert_ne!(numbered, shuffled, "Equal after {} vs {:x}", count, shuffled); + shuffled = shuffle(shuffled); + } + } +} diff --git a/src/rust/vendor/ahash/src/random_state.rs b/src/rust/vendor/ahash/src/random_state.rs new file mode 100644 index 000000000..0936556c3 --- /dev/null +++ b/src/rust/vendor/ahash/src/random_state.rs @@ -0,0 +1,153 @@ +use crate::convert::Convert; +use crate::AHasher; +use core::fmt; +use core::hash::BuildHasher; +use core::sync::atomic::AtomicUsize; +use core::sync::atomic::Ordering; + +use crate::operations::folded_multiply; +#[cfg(all(feature = "compile-time-rng", not(test)))] +use const_random::const_random; + +///This constant come from Kunth's prng +pub(crate) const MULTIPLE: u64 = 6364136223846793005; +pub(crate) const INCREMENT: u64 = 1442695040888963407; + +// Const random provides randomized starting key with no runtime cost. +#[cfg(all(feature = "compile-time-rng", not(test)))] +pub(crate) const INIT_SEED: [u64; 2] = [const_random!(u64), const_random!(u64)]; + +#[cfg(any(not(feature = "compile-time-rng"), test))] +pub(crate) const INIT_SEED: [u64; 2] = [0x2360_ED05_1FC6_5DA4, 0x4385_DF64_9FCC_F645]; //From PCG-64 + +#[cfg(all(feature = "compile-time-rng", not(test)))] +static SEED: AtomicUsize = AtomicUsize::new(const_random!(u64) as usize); + +#[cfg(any(not(feature = "compile-time-rng"), test))] +static SEED: AtomicUsize = AtomicUsize::new(INCREMENT as usize); + +/// Provides a [Hasher] factory. This is typically used (e.g. by [HashMap]) to create +/// [AHasher]s in order to hash the keys of the map. See `build_hasher` below. +/// +/// [build_hasher]: ahash:: +/// [Hasher]: std::hash::Hasher +/// [BuildHasher]: std::hash::BuildHasher +/// [HashMap]: std::collections::HashMap +#[derive(Clone)] +pub struct RandomState { + pub(crate) k0: u64, + pub(crate) k1: u64, + pub(crate) k2: u64, + pub(crate) k3: u64, +} + +impl fmt::Debug for RandomState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("RandomState { .. }") + } +} + +impl RandomState { + #[inline] + pub fn new() -> RandomState { + //Using a self pointer. When running with ASLR this is a random value. + let previous = SEED.load(Ordering::Relaxed) as u64; + let stack_mem_loc = &previous as *const _ as u64; + //This is similar to the update function in the fallback. + //only one multiply is needed because memory locations are not under an attackers control. + let current_seed = previous + .wrapping_add(stack_mem_loc) + .wrapping_mul(MULTIPLE) + .rotate_right(31); + SEED.store(current_seed as usize, Ordering::Relaxed); + let (k0, k1, k2, k3) = scramble_keys(&SEED as *const _ as u64, current_seed); + RandomState { k0, k1, k2, k3 } + } + + /// Allows for explicitly setting the seeds to used. + pub const fn with_seeds(k0: u64, k1: u64) -> RandomState { + let (k0, k1, k2, k3) = scramble_keys(k0, k1); + RandomState { k0, k1, k2, k3 } + } +} + +/// This is based on the fallback hasher +#[inline] +pub(crate) const fn scramble_keys(a: u64, b: u64) -> (u64, u64, u64, u64) { + let k1 = folded_multiply(INIT_SEED[0] ^ a, MULTIPLE).wrapping_add(b); + let k2 = folded_multiply(INIT_SEED[0] ^ b, MULTIPLE).wrapping_add(a); + let k3 = folded_multiply(INIT_SEED[1] ^ a, MULTIPLE).wrapping_add(b); + let k4 = folded_multiply(INIT_SEED[1] ^ b, MULTIPLE).wrapping_add(a); + let combined = folded_multiply(a ^ b, MULTIPLE).wrapping_add(INCREMENT); + let rot1 = (combined & 63) as u32; + let rot2 = ((combined >> 16) & 63) as u32; + let rot3 = ((combined >> 32) & 63) as u32; + let rot4 = ((combined >> 48) & 63) as u32; + ( + k1.rotate_left(rot1), + k2.rotate_left(rot2), + k3.rotate_left(rot3), + k4.rotate_left(rot4), + ) +} + +impl Default for RandomState { + #[inline] + fn default() -> Self { + Self::new() + } +} + +impl BuildHasher for RandomState { + type Hasher = AHasher; + + /// Constructs a new [AHasher] with keys based on compile time generated constants** and the location + /// this object was constructed at in memory. This means that two different [BuildHasher]s will will generate + /// [AHasher]s that will return different hashcodes, but [Hasher]s created from the same [BuildHasher] + /// will generate the same hashes for the same input data. + /// + /// ** - only if the `compile-time-rng` feature is enabled. + /// + /// # Examples + /// + /// ``` + /// use ahash::{AHasher, RandomState}; + /// use std::hash::{Hasher, BuildHasher}; + /// + /// let build_hasher = RandomState::new(); + /// let mut hasher_1 = build_hasher.build_hasher(); + /// let mut hasher_2 = build_hasher.build_hasher(); + /// + /// hasher_1.write_u32(1234); + /// hasher_2.write_u32(1234); + /// + /// assert_eq!(hasher_1.finish(), hasher_2.finish()); + /// + /// let other_build_hasher = RandomState::new(); + /// let mut different_hasher = other_build_hasher.build_hasher(); + /// different_hasher.write_u32(1234); + /// assert_ne!(different_hasher.finish(), hasher_1.finish()); + /// ``` + /// [Hasher]: std::hash::Hasher + /// [BuildHasher]: std::hash::BuildHasher + /// [HashMap]: std::collections::HashMap + #[inline] + fn build_hasher(&self) -> AHasher { + AHasher::new_with_keys([self.k0, self.k1].convert(), [self.k2, self.k3].convert()) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_const_rand_disabled() { + assert_eq!(INIT_SEED, [0x2360_ED05_1FC6_5DA4, 0x4385_DF64_9FCC_F645]); + } + + #[test] + fn test_with_seeds_const() { + const _CONST_RANDOM_STATE: RandomState = RandomState::with_seeds(17, 19); + } +} diff --git a/src/rust/vendor/ahash/src/specialize.rs b/src/rust/vendor/ahash/src/specialize.rs new file mode 100644 index 000000000..2c0bc2d8a --- /dev/null +++ b/src/rust/vendor/ahash/src/specialize.rs @@ -0,0 +1,162 @@ +#[cfg(feature = "specialize")] +use crate::HasherExt; +use core::hash::Hash; +use core::hash::Hasher; + +/// Provides a way to get an optimized hasher for a given data type. +/// Rather than using a Hasher generically which can hash any value, this provides a way to get a specialized hash +/// for a specific type. So this may be faster for primitive types. It does however consume the hasher in the process. +/// #Example +/// ``` +/// use std::hash::BuildHasher; +/// use ahash::RandomState; +/// use ahash::CallHasher; +/// +/// let hash_builder = RandomState::new(); +/// //... +/// let value = 17; +/// let hash = value.get_hash(hash_builder.build_hasher()); +/// ``` +pub trait CallHasher: Hash { + fn get_hash(&self, hasher: H) -> u64; +} + +#[cfg(not(feature = "specialize"))] +impl CallHasher for T +where + T: Hash, +{ + #[inline] + fn get_hash(&self, mut hasher: H) -> u64 { + self.hash(&mut hasher); + hasher.finish() + } +} + +#[cfg(feature = "specialize")] +impl CallHasher for T +where + T: Hash, +{ + #[inline] + default fn get_hash(&self, mut hasher: H) -> u64 { + self.hash(&mut hasher); + hasher.finish() + } +} + +macro_rules! call_hasher_impl { + ($typ:ty) => { + #[cfg(feature = "specialize")] + impl CallHasher for $typ { + #[inline] + fn get_hash(&self, hasher: H) -> u64 { + hasher.hash_u64(*self as u64) + } + } + }; +} +call_hasher_impl!(u8); +call_hasher_impl!(u16); +call_hasher_impl!(u32); +call_hasher_impl!(u64); +call_hasher_impl!(i8); +call_hasher_impl!(i16); +call_hasher_impl!(i32); +call_hasher_impl!(i64); + +#[cfg(feature = "specialize")] +impl CallHasher for u128 { + #[inline] + fn get_hash(&self, mut hasher: H) -> u64 { + hasher.write_u128(*self); + hasher.short_finish() + } +} + +#[cfg(feature = "specialize")] +impl CallHasher for i128 { + #[inline] + fn get_hash(&self, mut hasher: H) -> u64 { + hasher.write_u128(*self as u128); + hasher.short_finish() + } +} + +#[cfg(feature = "specialize")] +impl CallHasher for [u8] { + #[inline] + fn get_hash(&self, mut hasher: H) -> u64 { + hasher.write(self); + hasher.finish() + } +} + +#[cfg(all(feature = "specialize", feature = "std"))] +impl CallHasher for Vec { + #[inline] + fn get_hash(&self, mut hasher: H) -> u64 { + hasher.write(self); + hasher.finish() + } +} + +#[cfg(feature = "specialize")] +impl CallHasher for str { + #[inline] + fn get_hash(&self, mut hasher: H) -> u64 { + hasher.write(self.as_bytes()); + hasher.finish() + } +} + +#[cfg(all(feature = "specialize", feature = "std"))] +impl CallHasher for String { + #[inline] + fn get_hash(&self, mut hasher: H) -> u64 { + hasher.write(self.as_bytes()); + hasher.finish() + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::*; + + #[test] + #[cfg(feature = "specialize")] + pub fn test_specialized_invoked() { + let shortened = 0_u64.get_hash(AHasher::new_with_keys(1, 2)); + let mut hasher = AHasher::new_with_keys(1, 2); + 0_u64.hash(&mut hasher); + assert_ne!(hasher.finish(), shortened); + } + + /// Tests that some non-trivial transformation takes place. + #[test] + pub fn test_input_processed() { + let hasher = || AHasher::new_with_keys(3, 2); + assert_ne!(0, 0_u64.get_hash(hasher())); + assert_ne!(1, 0_u64.get_hash(hasher())); + assert_ne!(2, 0_u64.get_hash(hasher())); + assert_ne!(3, 0_u64.get_hash(hasher())); + assert_ne!(4, 0_u64.get_hash(hasher())); + assert_ne!(5, 0_u64.get_hash(hasher())); + + assert_ne!(0, 1_u64.get_hash(hasher())); + assert_ne!(1, 1_u64.get_hash(hasher())); + assert_ne!(2, 1_u64.get_hash(hasher())); + assert_ne!(3, 1_u64.get_hash(hasher())); + assert_ne!(4, 1_u64.get_hash(hasher())); + assert_ne!(5, 1_u64.get_hash(hasher())); + + let xored = 0_u64.get_hash(hasher()) ^ 1_u64.get_hash(hasher()); + assert_ne!(0, xored); + assert_ne!(1, xored); + assert_ne!(2, xored); + assert_ne!(3, xored); + assert_ne!(4, xored); + assert_ne!(5, xored); + } +} diff --git a/src/rust/vendor/ahash/tests/bench.rs b/src/rust/vendor/ahash/tests/bench.rs new file mode 100644 index 000000000..c03a0f56e --- /dev/null +++ b/src/rust/vendor/ahash/tests/bench.rs @@ -0,0 +1,224 @@ +use ahash::{AHasher, CallHasher}; +use criterion::*; +use fxhash::FxHasher; +use std::collections::hash_map::DefaultHasher; +use std::hash::{Hash, Hasher}; + +#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes"))] +fn aeshash(b: &H) -> u64 { + let hasher = AHasher::default(); + b.get_hash(hasher) +} +#[cfg(not(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes")))] +fn aeshash(_b: &H) -> u64 { + panic!("aes must be enabled") +} + +#[cfg(not(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes")))] +fn fallbackhash(b: &H) -> u64 { + let hasher = AHasher::default(); + b.get_hash(hasher) +} +#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes"))] +fn fallbackhash(_b: &H) -> u64 { + panic!("aes must be disabled") +} + +fn fnvhash(b: &H) -> u64 { + let mut hasher = fnv::FnvHasher::default(); + b.hash(&mut hasher); + hasher.finish() +} + +fn siphash(b: &H) -> u64 { + let mut hasher = DefaultHasher::default(); + b.hash(&mut hasher); + hasher.finish() +} + +fn fxhash(b: &H) -> u64 { + let mut hasher = FxHasher::default(); + b.hash(&mut hasher); + hasher.finish() +} + +fn seahash(b: &H) -> u64 { + let mut hasher = seahash::SeaHasher::default(); + b.hash(&mut hasher); + hasher.finish() +} + +const STRING_LENGTHS: [u32; 12] = [1, 3, 4, 7, 8, 15, 16, 24, 33, 68, 132, 1024]; + +fn gen_strings() -> Vec { + STRING_LENGTHS + .iter() + .map(|len| { + let mut string = String::default(); + for pos in 1..=*len { + let c = (48 + (pos % 10) as u8) as char; + string.push(c); + } + string + }) + .collect() +} + +const U8_VALUES: [u8; 1] = [123]; +const U16_VALUES: [u16; 1] = [1234]; +const U32_VALUES: [u32; 1] = [12345678]; +const U64_VALUES: [u64; 1] = [1234567890123456]; +const U128_VALUES: [u128; 1] = [12345678901234567890123456789012]; + +fn bench_ahash(c: &mut Criterion) { + c.bench( + "aeshash", + ParameterizedBenchmark::new("u8", |b, &s| b.iter(|| black_box(aeshash(s))), &U8_VALUES), + ); + c.bench( + "aeshash", + ParameterizedBenchmark::new("u16", |b, &s| b.iter(|| black_box(aeshash(s))), &U16_VALUES), + ); + c.bench( + "aeshash", + ParameterizedBenchmark::new("u32", |b, &s| b.iter(|| black_box(aeshash(s))), &U32_VALUES), + ); + c.bench( + "aeshash", + ParameterizedBenchmark::new("u64", |b, &s| b.iter(|| black_box(aeshash(s))), &U64_VALUES), + ); + c.bench( + "aeshash", + ParameterizedBenchmark::new("u128", |b, &s| b.iter(|| black_box(aeshash(s))), &U128_VALUES), + ); + c.bench( + "aeshash", + ParameterizedBenchmark::new("string", |b, s| b.iter(|| black_box(aeshash(s))), gen_strings()), + ); +} + +fn bench_fallback(c: &mut Criterion) { + c.bench( + "fallback", + ParameterizedBenchmark::new("u8", |b, &s| b.iter(|| black_box(fallbackhash(s))), &U8_VALUES), + ); + c.bench( + "fallback", + ParameterizedBenchmark::new("u16", |b, &s| b.iter(|| black_box(fallbackhash(s))), &U16_VALUES), + ); + c.bench( + "fallback", + ParameterizedBenchmark::new("u32", |b, &s| b.iter(|| black_box(fallbackhash(s))), &U32_VALUES), + ); + c.bench( + "fallback", + ParameterizedBenchmark::new("u64", |b, &s| b.iter(|| black_box(fallbackhash(s))), &U64_VALUES), + ); + c.bench( + "fallback", + ParameterizedBenchmark::new("u128", |b, &s| b.iter(|| black_box(fallbackhash(s))), &U128_VALUES), + ); + c.bench( + "fallback", + ParameterizedBenchmark::new("string", |b, s| b.iter(|| black_box(fallbackhash(s))), gen_strings()), + ); +} + +fn bench_fx(c: &mut Criterion) { + c.bench( + "fx", + ParameterizedBenchmark::new("u8", |b, &s| b.iter(|| black_box(fxhash(s))), &U8_VALUES), + ); + c.bench( + "fx", + ParameterizedBenchmark::new("u16", |b, &s| b.iter(|| black_box(fxhash(s))), &U16_VALUES), + ); + c.bench( + "fx", + ParameterizedBenchmark::new("u32", |b, &s| b.iter(|| black_box(fxhash(s))), &U32_VALUES), + ); + c.bench( + "fx", + ParameterizedBenchmark::new("u64", |b, &s| b.iter(|| black_box(fxhash(s))), &U64_VALUES), + ); + c.bench( + "fx", + ParameterizedBenchmark::new("u128", |b, &s| b.iter(|| black_box(fxhash(s))), &U128_VALUES), + ); + c.bench( + "fx", + ParameterizedBenchmark::new("string", |b, s| b.iter(|| black_box(fxhash(s))), gen_strings()), + ); +} + +fn bench_fnv(c: &mut Criterion) { + c.bench( + "fnv", + ParameterizedBenchmark::new("u8", |b, &s| b.iter(|| black_box(fnvhash(s))), &U8_VALUES), + ); + c.bench( + "fnv", + ParameterizedBenchmark::new("u16", |b, &s| b.iter(|| black_box(fnvhash(s))), &U16_VALUES), + ); + c.bench( + "fnv", + ParameterizedBenchmark::new("u32", |b, &s| b.iter(|| black_box(fnvhash(s))), &U32_VALUES), + ); + c.bench( + "fnv", + ParameterizedBenchmark::new("u64", |b, &s| b.iter(|| black_box(fnvhash(s))), &U64_VALUES), + ); + c.bench( + "fnv", + ParameterizedBenchmark::new("u128", |b, &s| b.iter(|| black_box(fnvhash(s))), &U128_VALUES), + ); + c.bench( + "fnv", + ParameterizedBenchmark::new("string", |b, s| b.iter(|| black_box(fnvhash(s))), gen_strings()), + ); +} + +fn bench_sea(c: &mut Criterion) { + c.bench( + "sea", + ParameterizedBenchmark::new("string", |b, s| b.iter(|| black_box(seahash(s))), gen_strings()), + ); +} + +fn bench_sip(c: &mut Criterion) { + c.bench( + "sip", + ParameterizedBenchmark::new("u8", |b, &s| b.iter(|| black_box(siphash(s))), &U8_VALUES), + ); + c.bench( + "sip", + ParameterizedBenchmark::new("u16", |b, &s| b.iter(|| black_box(siphash(s))), &U16_VALUES), + ); + c.bench( + "sip", + ParameterizedBenchmark::new("u32", |b, &s| b.iter(|| black_box(siphash(s))), &U32_VALUES), + ); + c.bench( + "sip", + ParameterizedBenchmark::new("u64", |b, &s| b.iter(|| black_box(siphash(s))), &U64_VALUES), + ); + c.bench( + "sip", + ParameterizedBenchmark::new("u128", |b, &s| b.iter(|| black_box(siphash(s))), &U128_VALUES), + ); + c.bench( + "sip", + ParameterizedBenchmark::new("string", |b, s| b.iter(|| black_box(siphash(s))), gen_strings()), + ); +} + +criterion_main!(benches); +criterion_group!( + benches, + bench_ahash, + bench_fallback, + bench_fx, + bench_fnv, + bench_sea, + bench_sip +); diff --git a/src/rust/vendor/ahash/tests/map_tests.rs b/src/rust/vendor/ahash/tests/map_tests.rs new file mode 100644 index 000000000..70e4a86cf --- /dev/null +++ b/src/rust/vendor/ahash/tests/map_tests.rs @@ -0,0 +1,204 @@ +use std::hash::{Hash, Hasher}; + +use criterion::*; +use fxhash::FxHasher; + +use ahash::{AHasher, CallHasher}; + +fn gen_word_pairs() -> Vec { + let words: Vec<_> = r#" +a, ability, able, about, above, accept, according, account, across, act, action, +activity, actually, add, address, administration, admit, adult, affect, after, +again, against, age, agency, agent, ago, agree, agreement, ahead, air, all, +allow, almost, alone, along, already, also, although, always, American, among, +amount, analysis, and, animal, another, answer, any, anyone, anything, appear, +apply, approach, area, argue, arm, around, arrive, art, article, artist, as, +ask, assume, at, attack, attention, attorney, audience, author, authority, +available, avoid, away, baby, back, bad, bag, ball, bank, bar, base, be, beat, +beautiful, because, become, bed, before, begin, behavior, behind, believe, +benefit, best, better, between, beyond, big, bill, billion, bit, black, blood, +blue, board, body, book, born, both, box, boy, break, bring, brother, budget, +build, building, business, but, buy, by, call, camera, campaign, can, cancer, +candidate, capital, car, card, care, career, carry, case, catch, cause, cell, +center, central, century, certain, certainly, chair, challenge, chance, change, +character, charge, check, child, choice, choose, church, citizen, city, civil, +claim, class, clear, clearly, close, coach, cold, collection, college, color, +come, commercial, common, community, company, compare, computer, concern, +condition, conference, Congress, consider, consumer, contain, continue, control, +cost, could, country, couple, course, court, cover, create, crime, cultural, +culture, cup, current, customer, cut, dark, data, daughter, day, dead, deal, +death, debate, decade, decide, decision, deep, defense, degree, Democrat, +democratic, describe, design, despite, detail, determine, develop, development, +die, difference, different, difficult, dinner, direction, director, discover, +discuss, discussion, disease, do, doctor, dog, door, down, draw, dream, drive, +drop, drug, during, each, early, east, easy, eat, economic, economy, edge, +education, effect, effort, eight, either, election, else, employee, end, energy, +enjoy, enough, enter, entire, environment, environmental, especially, establish, +even, evening, event, ever, every, everybody, everyone, everything, evidence, +exactly, example, executive, exist, expect, experience, expert, explain, eye, +face, fact, factor, fail, fall, family, far, fast, father, fear, federal, feel, +feeling, few, field, fight, figure, fill, film, final, finally, financial, find, +fine, finger, finish, fire, firm, first, fish, five, floor, fly, focus, follow, +food, foot, for, force, foreign, forget, form, former, forward, four, free, +friend, from, front, full, fund, future, game, garden, gas, general, generation, +get, girl, give, glass, go, goal, good, government, great, green, ground, group, +grow, growth, guess, gun, guy, hair, half, hand, hang, happen, happy, hard, +have, he, head, health, hear, heart, heat, heavy, help, her, here, herself, +high, him, himself, his, history, hit, hold, home, hope, hospital, hot, hotel, +hour, house, how, however, huge, human, hundred, husband, I, idea, identify, if, +image, imagine, impact, important, improve, in, include, including, increase, +indeed, indicate, individual, industry, information, inside, instead, +institution, interest, interesting, international, interview, into, investment, +involve, issue, it, item, its, itself, job, join, just, keep, key, kid, kill, +kind, kitchen, know, knowledge, land, language, large, last, late, later, laugh, +law, lawyer, lay, lead, leader, learn, least, leave, left, leg, legal, less, +let, letter, level, lie, life, light, like, likely, line, list, listen, little, +live, local, long, look, lose, loss, lot, love, low, machine, magazine, main, +maintain, major, majority, make, man, manage, management, manager, many, market, +marriage, material, matter, may, maybe, me, mean, measure, media, medical, meet, +meeting, member, memory, mention, message, method, middle, might, military, +million, mind, minute, miss, mission, model, modern, moment, money, month, more, +morning, most, mother, mouth, move, movement, movie, Mr, Mrs, much, music, must, +my, myself, name, nation, national, natural, nature, near, nearly, necessary, +need, network, never, new, news, newspaper, next, nice, night, no, none, nor, +north, not, note, nothing, notice, now, n't, number, occur, of, off, offer, +office, officer, official, often, oh, oil, ok, old, on, once, one, only, onto, +open, operation, opportunity, option, or, order, organization, other, others, +our, out, outside, over, own, owner, page, pain, painting, paper, parent, part, +participant, particular, particularly, partner, party, pass, past, patient, +pattern, pay, peace, people, per, perform, performance, perhaps, period, person, +personal, phone, physical, pick, picture, piece, place, plan, plant, play, +player, PM, point, police, policy, political, politics, poor, popular, +population, position, positive, possible, power, practice, prepare, present, +president, pressure, pretty, prevent, price, private, probably, problem, +process, produce, product, production, professional, professor, program, +project, property, protect, prove, provide, public, pull, purpose, push, put, +quality, question, quickly, quite, race, radio, raise, range, rate, rather, +reach, read, ready, real, reality, realize, really, reason, receive, recent, +recently, recognize, record, red, reduce, reflect, region, relate, relationship, +religious, remain, remember, remove, report, represent, Republican, require, +research, resource, respond, response, responsibility, rest, result, return, +reveal, rich, right, rise, risk, road, rock, role, room, rule, run, safe, same, +save, say, scene, school, science, scientist, score, sea, season, seat, second, +section, security, see, seek, seem, sell, send, senior, sense, series, serious, +serve, service, set, seven, several, sex, sexual, shake, share, she, shoot, +short, shot, should, shoulder, show, side, sign, significant, similar, simple, +simply, since, sing, single, sister, sit, site, situation, six, size, skill, +skin, small, smile, so, social, society, soldier, some, somebody, someone, +something, sometimes, son, song, soon, sort, sound, source, south, southern, +space, speak, special, specific, speech, spend, sport, spring, staff, stage, +stand, standard, star, start, state, statement, station, stay, step, still, +stock, stop, store, story, strategy, street, strong, structure, student, study, +stuff, style, subject, success, successful, such, suddenly, suffer, suggest, +summer, support, sure, surface, system, table, take, talk, task, tax, teach, +teacher, team, technology, television, tell, ten, tend, term, test, than, thank, +that, the, their, them, themselves, then, theory, there, these, they, thing, +think, third, this, those, though, thought, thousand, threat, three, through, +throughout, throw, thus, time, to, today, together, tonight, too, top, total, +tough, toward, town, trade, traditional, training, travel, treat, treatment, +tree, trial, trip, trouble, true, truth, try, turn, TV, two, type, under, +understand, unit, until, up, upon, us, use, usually, value, various, very, +victim, view, violence, visit, voice, vote, wait, walk, wall, want, war, watch, +water, way, we, weapon, wear, week, weight, well, west, western, what, whatever, +when, where, whether, which, while, white, who, whole, whom, whose, why, wide, +wife, will, win, wind, window, wish, with, within, without, woman, wonder, word, +work, worker, world, worry, would, write, writer, wrong, yard, yeah, year, yes, +yet, you, young, your, yourself"# + .split(',') + .map(|word| word.trim()) + .collect(); + + let mut word_pairs: Vec<_> = Vec::new(); + for word in &words { + for other_word in &words { + word_pairs.push(word.to_string() + " " + other_word); + } + } + assert_eq!(1_000_000, word_pairs.len()); + word_pairs +} + +#[allow(unused)] // False positive +fn test_hash_common_words(hasher: impl Fn() -> T) { + let word_pairs: Vec<_> = gen_word_pairs(); + check_for_collisions(&hasher, &word_pairs, 32); +} + +#[allow(unused)] // False positive +fn check_for_collisions(hasher: &impl Fn() -> T, items: &[H], bucket_count: usize) { + let mut buckets = vec![0; bucket_count]; + for item in items { + let value = hash(item, &hasher) as usize; + buckets[value % bucket_count] += 1; + } + let mean = items.len() / bucket_count; + let max = *buckets.iter().max().unwrap(); + let min = *buckets.iter().min().unwrap(); + assert!( + (min as f64) > (mean as f64) * 0.95, + "min: {}, max:{}, {:?}", + min, + max, + buckets + ); + assert!( + (max as f64) < (mean as f64) * 1.05, + "min: {}, max:{}, {:?}", + min, + max, + buckets + ); +} + +#[allow(unused)] // False positive +fn hash(b: &impl Hash, hasher: &dyn Fn() -> T) -> u64 { + let hasher = hasher(); + b.get_hash(hasher) +} + +#[test] +fn test_bucket_distribution() { + let hasher = || AHasher::new_with_keys(123456789, 987654321); + test_hash_common_words(&hasher); + let sequence: Vec<_> = (0..320000).collect(); + check_for_collisions(&hasher, &sequence, 32); + let sequence: Vec<_> = (0..2560000).collect(); + check_for_collisions(&hasher, &sequence, 256); + let sequence: Vec<_> = (0..320000).map(|i| i * 1024).collect(); + check_for_collisions(&hasher, &sequence, 32); + let sequence: Vec<_> = (0..2560000_u64).map(|i| i * 1024).collect(); + check_for_collisions(&hasher, &sequence, 256); +} + +fn ahash_vec(b: &Vec) -> u64 { + let mut total: u64 = 0; + for item in b { + let mut hasher = AHasher::new_with_keys(1234, 5678); + item.hash(&mut hasher); + total = total.wrapping_add(hasher.finish()); + } + total +} + +fn fxhash_vec(b: &Vec) -> u64 { + let mut total: u64 = 0; + for item in b { + let mut hasher = FxHasher::default(); + item.hash(&mut hasher); + total = total.wrapping_add(hasher.finish()); + } + total +} + +fn bench_ahash_words(c: &mut Criterion) { + let words = gen_word_pairs(); + c.bench_function("aes_words", |b| b.iter(|| black_box(ahash_vec(&words)))); +} + +fn bench_fx_words(c: &mut Criterion) { + let words = gen_word_pairs(); + c.bench_function("fx_words", |b| b.iter(|| black_box(fxhash_vec(&words)))); +} + +criterion_main!(benches); +criterion_group!(benches, bench_ahash_words, bench_fx_words,); diff --git a/src/rust/vendor/ahash/tests/nopanic.rs b/src/rust/vendor/ahash/tests/nopanic.rs new file mode 100644 index 000000000..f3d9361d2 --- /dev/null +++ b/src/rust/vendor/ahash/tests/nopanic.rs @@ -0,0 +1,54 @@ +use ahash::{AHasher, CallHasher, RandomState}; +use std::hash::BuildHasher; + +#[macro_use] +extern crate no_panic; + +#[inline(never)] +#[no_panic] +fn hash_test_final(num: i32, string: &str) -> (u64, u64) { + use core::hash::Hasher; + let mut hasher1 = AHasher::new_with_keys(1, 2); + let mut hasher2 = AHasher::new_with_keys(3, 4); + hasher1.write_i32(num); + hasher2.write(string.as_bytes()); + (hasher1.finish(), hasher2.finish()) +} + +#[inline(never)] +fn hash_test_final_wrapper(num: i32, string: &str) { + hash_test_final(num, string); +} + +#[inline(never)] +#[no_panic] +fn hash_test_specialize(num: i32, string: &str) -> (u64, u64) { + let hasher1 = AHasher::new_with_keys(1, 2); + let hasher2 = AHasher::new_with_keys(1, 2); + (num.get_hash(hasher1), string.as_bytes().get_hash(hasher2)) +} + +#[inline(never)] +fn hash_test_random_wrapper(num: i32, string: &str) { + hash_test_specialize(num, string); +} + +#[inline(never)] +#[no_panic] +fn hash_test_random(num: i32, string: &str) -> (u64, u64) { + let hasher1 = RandomState::with_seeds(1, 2).build_hasher(); + let hasher2 = RandomState::with_seeds(1, 2).build_hasher(); + (num.get_hash(hasher1), string.as_bytes().get_hash(hasher2)) +} + +#[inline(never)] +fn hash_test_specialize_wrapper(num: i32, string: &str) { + hash_test_specialize(num, string); +} + +#[test] +fn test_no_panic() { + hash_test_final_wrapper(2, "Foo"); + hash_test_specialize_wrapper(2, "Bar"); + hash_test_random_wrapper(2, "Baz"); +} diff --git a/src/rust/vendor/byteorder/.cargo-checksum.json b/src/rust/vendor/byteorder/.cargo-checksum.json new file mode 100644 index 000000000..bffae5de4 --- /dev/null +++ b/src/rust/vendor/byteorder/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"c1cb69be6db5933c4bb4ebb6591e0fe3e7b97d491face3abcf947383c218bb31","COPYING":"01c266bced4a434da0051174d6bee16a4c82cf634e2679b6155d40d75012390f","Cargo.toml":"94ba374cb26f3c68fb83da2e5e7dce85920fc4fb827620b06b39d71a9d0e1e18","LICENSE-MIT":"0f96a83840e146e43c0ec96a22ec1f392e0680e6c1226e6f3ba87e0740af850f","README.md":"2f2d64924c35b7203e3e3f3d136fcb714281762d145ca3513246da5547b1d014","UNLICENSE":"7e12e5df4bae12cb21581ba157ced20e1986a0508dd10d0e8a4ab9a4cf94e85c","benches/bench.rs":"8b114080042d3292ec8de425904e4114b7f532fe3add0d807521e6cc166a17ea","rustfmt.toml":"1ca600239a27401c4a43f363cf3f38183a212affc1f31bff3ae93234bbaec228","src/io.rs":"9612530634d0e7ce9887a23836b58c0d972c1f45b05d9ada8355961567075627","src/lib.rs":"ab3394c385b32457795931440cfb8dbca70ba5d9e1a428fcf651f7ccb2d6c34f"},"package":"1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"} \ No newline at end of file diff --git a/src/rust/vendor/byteorder/CHANGELOG.md b/src/rust/vendor/byteorder/CHANGELOG.md new file mode 100644 index 000000000..9efb7ed29 --- /dev/null +++ b/src/rust/vendor/byteorder/CHANGELOG.md @@ -0,0 +1,143 @@ +**WARNING:** This CHANGELOG is no longer updated. The activity for this project +is sparse enough that you should refer to the commit log instead. + + +1.3.4 +===== +This patch release squashes deprecation warnings for the `try!` macro, in +accordance with byteorder's minimum supported Rust version (currently at Rust +1.12.0). + + +1.3.3 +===== +This patch release adds `ByteOrder::write_i8_into()` as a simple, safe interface +for ordinarily unsafe or tedious code. + + +1.3.2 +===== +This patch release adds `ReadBytesExt::read_i8_into()` as a simple, safe interface +for ordinarily unsafe or tedious code. + + +1.3.1 +===== +This minor release performs mostly small internal changes. Going forward, these +are not going to be incorporated into the changelog. + + +1.3.0 +===== +This new minor release now enables `i128` support automatically on Rust +compilers that support 128-bit integers. The `i128` feature is now a no-op, but +continues to exist for backward compatibility purposes. The crate continues to +maintain compatibility with Rust 1.12.0. + +This release also deprecates the `ByteOrder` trait methods +`read_f32_into_unchecked` and `read_f64_into_unchecked` in favor of +`read_f32_into` and `read_f64_into`. This was an oversight from the 1.2 release +where the corresponding methods on `ReadBytesExt` were deprecated. + +`quickcheck` and `rand` were bumped to `0.8` and `0.6`, respectively. + +A few small documentation related bugs have been fixed. + + +1.2.7 +===== +This patch release excludes some CI files from the crate release and updates +the license field to use `OR` instead of `/`. + + +1.2.6 +===== +This patch release fixes some test compilation errors introduced by an +over-eager release of 1.2.5. + + +1.2.5 +===== +This patch release fixes some typos in the docs, adds doc tests to methods on +`WriteByteExt` and bumps the quickcheck dependency to `0.7`. + + +1.2.4 +===== +This patch release adds support for 48-bit integers by adding the following +methods to the `ByteOrder` trait: `read_u48`, `read_i48`, `write_u48` and +`write_i48`. Corresponding methods have been added to the `ReadBytesExt` and +`WriteBytesExt` traits as well. + + +1.2.3 +===== +This patch release removes the use of `feature(i128_type)` from byteorder, +since it has been stabilized. We leave byteorder's `i128` feature in place +in order to continue supporting compilation on older versions of Rust. + + +1.2.2 +===== +This patch release only consists of internal improvements and refactorings. +Notably, this removes all uses of `transmute` and instead uses pointer casts. + + +1.2.1 +===== +This patch release removes more unnecessary uses of `unsafe` that +were overlooked in the prior `1.2.0` release. In particular, the +`ReadBytesExt::read_{f32,f64}_into_checked` methods have been deprecated and +replaced by more appropriately named `read_{f32,f64}_into` methods. + + +1.2.0 +===== +The most prominent change in this release of `byteorder` is the removal of +unnecessary signaling NaN masking, and in turn, the `unsafe` annotations +associated with methods that didn't do masking. See +[#103](https://github.com/BurntSushi/byteorder/issues/103) +for more details. + +* [BUG #102](https://github.com/BurntSushi/byteorder/issues/102): + Fix big endian tests. +* [BUG #103](https://github.com/BurntSushi/byteorder/issues/103): + Remove sNaN masking. + + +1.1.0 +===== +This release of `byteorder` features a number of fixes and improvements, mostly +as a result of the +[Litz Blitz evaluation](https://public.etherpad-mozilla.org/p/rust-crate-eval-byteorder). + +Feature enhancements: + +* [FEATURE #63](https://github.com/BurntSushi/byteorder/issues/63): + Add methods for reading/writing slices of numbers for a specific + endianness. +* [FEATURE #65](https://github.com/BurntSushi/byteorder/issues/65): + Add support for `u128`/`i128` types. (Behind the nightly only `i128` + feature.) +* [FEATURE #72](https://github.com/BurntSushi/byteorder/issues/72): + Add "panics" and "errors" sections for each relevant public API item. +* [FEATURE #74](https://github.com/BurntSushi/byteorder/issues/74): + Add CI badges to Cargo.toml. +* [FEATURE #75](https://github.com/BurntSushi/byteorder/issues/75): + Add more examples to public API items. +* Add 24-bit read/write methods. +* Add `BE` and `LE` type aliases for `BigEndian` and `LittleEndian`, + respectively. + +Bug fixes: + +* [BUG #68](https://github.com/BurntSushi/byteorder/issues/68): + Panic in {BigEndian,LittleEndian}::default. +* [BUG #69](https://github.com/BurntSushi/byteorder/issues/69): + Seal the `ByteOrder` trait to prevent out-of-crate implementations. +* [BUG #71](https://github.com/BurntSushi/byteorder/issues/71): + Guarantee that the results of `read_f32`/`read_f64` are always defined. +* [BUG #73](https://github.com/BurntSushi/byteorder/issues/73): + Add crates.io categories. +* [BUG #77](https://github.com/BurntSushi/byteorder/issues/77): + Add `html_root` doc attribute. diff --git a/src/rust/vendor/byteorder/COPYING b/src/rust/vendor/byteorder/COPYING new file mode 100644 index 000000000..bb9c20a09 --- /dev/null +++ b/src/rust/vendor/byteorder/COPYING @@ -0,0 +1,3 @@ +This project is dual-licensed under the Unlicense and MIT licenses. + +You may use this code under the terms of either license. diff --git a/src/rust/vendor/byteorder/Cargo.toml b/src/rust/vendor/byteorder/Cargo.toml new file mode 100644 index 000000000..da515d9d9 --- /dev/null +++ b/src/rust/vendor/byteorder/Cargo.toml @@ -0,0 +1,54 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.60" +name = "byteorder" +version = "1.5.0" +authors = ["Andrew Gallant "] +description = "Library for reading/writing numbers in big-endian and little-endian." +homepage = "https://github.com/BurntSushi/byteorder" +documentation = "https://docs.rs/byteorder" +readme = "README.md" +keywords = [ + "byte", + "endian", + "big-endian", + "little-endian", + "binary", +] +categories = [ + "encoding", + "parsing", + "no-std", +] +license = "Unlicense OR MIT" +repository = "https://github.com/BurntSushi/byteorder" + +[profile.bench] +opt-level = 3 + +[lib] +name = "byteorder" +bench = false + +[dev-dependencies.quickcheck] +version = "0.9.2" +default-features = false + +[dev-dependencies.rand] +version = "0.7" + +[features] +default = ["std"] +i128 = [] +std = [] diff --git a/src/rust/vendor/byteorder/LICENSE-MIT b/src/rust/vendor/byteorder/LICENSE-MIT new file mode 100644 index 000000000..3b0a5dc09 --- /dev/null +++ b/src/rust/vendor/byteorder/LICENSE-MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Andrew Gallant + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/src/rust/vendor/byteorder/README.md b/src/rust/vendor/byteorder/README.md new file mode 100644 index 000000000..7c46019e0 --- /dev/null +++ b/src/rust/vendor/byteorder/README.md @@ -0,0 +1,77 @@ +byteorder +========= +This crate provides convenience methods for encoding and decoding +numbers in either big-endian or little-endian order. + +[![Build status](https://github.com/BurntSushi/byteorder/workflows/ci/badge.svg)](https://github.com/BurntSushi/byteorder/actions) +[![crates.io](https://img.shields.io/crates/v/byteorder.svg)](https://crates.io/crates/byteorder) + +Dual-licensed under MIT or the [UNLICENSE](https://unlicense.org/). + + +### Documentation + +https://docs.rs/byteorder + + +### Installation + +This crate works with Cargo and is on +[crates.io](https://crates.io/crates/byteorder). Add it to your `Cargo.toml` +like so: + +```toml +[dependencies] +byteorder = "1" +``` + +If you want to augment existing `Read` and `Write` traits, then import the +extension methods like so: + +```rust +use byteorder::{ReadBytesExt, WriteBytesExt, BigEndian, LittleEndian}; +``` + +For example: + +```rust +use std::io::Cursor; +use byteorder::{BigEndian, ReadBytesExt}; + +let mut rdr = Cursor::new(vec![2, 5, 3, 0]); +// Note that we use type parameters to indicate which kind of byte order +// we want! +assert_eq!(517, rdr.read_u16::().unwrap()); +assert_eq!(768, rdr.read_u16::().unwrap()); +``` + +### `no_std` crates + +This crate has a feature, `std`, that is enabled by default. To use this crate +in a `no_std` context, add the following to your `Cargo.toml`: + +```toml +[dependencies] +byteorder = { version = "1", default-features = false } +``` + + +### Minimum Rust version policy + +This crate's minimum supported `rustc` version is `1.60.0`. + +The current policy is that the minimum Rust version required to use this crate +can be increased in minor version updates. For example, if `crate 1.0` requires +Rust 1.20.0, then `crate 1.0.z` for all values of `z` will also require Rust +1.20.0 or newer. However, `crate 1.y` for `y > 0` may require a newer minimum +version of Rust. + +In general, this crate will be conservative with respect to the minimum +supported version of Rust. + + +### Alternatives + +Note that as of Rust 1.32, the standard numeric types provide built-in methods +like `to_le_bytes` and `from_le_bytes`, which support some of the same use +cases. diff --git a/src/rust/vendor/byteorder/UNLICENSE b/src/rust/vendor/byteorder/UNLICENSE new file mode 100644 index 000000000..68a49daad --- /dev/null +++ b/src/rust/vendor/byteorder/UNLICENSE @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/src/rust/vendor/byteorder/benches/bench.rs b/src/rust/vendor/byteorder/benches/bench.rs new file mode 100644 index 000000000..963251ce5 --- /dev/null +++ b/src/rust/vendor/byteorder/benches/bench.rs @@ -0,0 +1,326 @@ +#![feature(test)] + +extern crate test; + +macro_rules! bench_num { + ($name:ident, $read:ident, $bytes:expr, $data:expr) => { + mod $name { + use byteorder::{ + BigEndian, ByteOrder, LittleEndian, NativeEndian, + }; + use test::black_box as bb; + use test::Bencher; + + const NITER: usize = 100_000; + + #[bench] + fn read_big_endian(b: &mut Bencher) { + let buf = $data; + b.iter(|| { + for _ in 0..NITER { + bb(BigEndian::$read(&buf, $bytes)); + } + }); + } + + #[bench] + fn read_little_endian(b: &mut Bencher) { + let buf = $data; + b.iter(|| { + for _ in 0..NITER { + bb(LittleEndian::$read(&buf, $bytes)); + } + }); + } + + #[bench] + fn read_native_endian(b: &mut Bencher) { + let buf = $data; + b.iter(|| { + for _ in 0..NITER { + bb(NativeEndian::$read(&buf, $bytes)); + } + }); + } + } + }; + ($ty:ident, $max:ident, + $read:ident, $write:ident, $size:expr, $data:expr) => { + mod $ty { + use byteorder::{ + BigEndian, ByteOrder, LittleEndian, NativeEndian, + }; + use std::$ty; + use test::black_box as bb; + use test::Bencher; + + const NITER: usize = 100_000; + + #[bench] + fn read_big_endian(b: &mut Bencher) { + let buf = $data; + b.iter(|| { + for _ in 0..NITER { + bb(BigEndian::$read(&buf)); + } + }); + } + + #[bench] + fn read_little_endian(b: &mut Bencher) { + let buf = $data; + b.iter(|| { + for _ in 0..NITER { + bb(LittleEndian::$read(&buf)); + } + }); + } + + #[bench] + fn read_native_endian(b: &mut Bencher) { + let buf = $data; + b.iter(|| { + for _ in 0..NITER { + bb(NativeEndian::$read(&buf)); + } + }); + } + + #[bench] + fn write_big_endian(b: &mut Bencher) { + let mut buf = $data; + let n = $ty::$max; + b.iter(|| { + for _ in 0..NITER { + bb(BigEndian::$write(&mut buf, n)); + } + }); + } + + #[bench] + fn write_little_endian(b: &mut Bencher) { + let mut buf = $data; + let n = $ty::$max; + b.iter(|| { + for _ in 0..NITER { + bb(LittleEndian::$write(&mut buf, n)); + } + }); + } + + #[bench] + fn write_native_endian(b: &mut Bencher) { + let mut buf = $data; + let n = $ty::$max; + b.iter(|| { + for _ in 0..NITER { + bb(NativeEndian::$write(&mut buf, n)); + } + }); + } + } + }; +} + +bench_num!(u16, MAX, read_u16, write_u16, 2, [1, 2]); +bench_num!(i16, MAX, read_i16, write_i16, 2, [1, 2]); +bench_num!(u32, MAX, read_u32, write_u32, 4, [1, 2, 3, 4]); +bench_num!(i32, MAX, read_i32, write_i32, 4, [1, 2, 3, 4]); +bench_num!(u64, MAX, read_u64, write_u64, 8, [1, 2, 3, 4, 5, 6, 7, 8]); +bench_num!(i64, MAX, read_i64, write_i64, 8, [1, 2, 3, 4, 5, 6, 7, 8]); +bench_num!(f32, MAX, read_f32, write_f32, 4, [1, 2, 3, 4]); +bench_num!(f64, MAX, read_f64, write_f64, 8, [1, 2, 3, 4, 5, 6, 7, 8]); + +bench_num!(uint_1, read_uint, 1, [1]); +bench_num!(uint_2, read_uint, 2, [1, 2]); +bench_num!(uint_3, read_uint, 3, [1, 2, 3]); +bench_num!(uint_4, read_uint, 4, [1, 2, 3, 4]); +bench_num!(uint_5, read_uint, 5, [1, 2, 3, 4, 5]); +bench_num!(uint_6, read_uint, 6, [1, 2, 3, 4, 5, 6]); +bench_num!(uint_7, read_uint, 7, [1, 2, 3, 4, 5, 6, 7]); +bench_num!(uint_8, read_uint, 8, [1, 2, 3, 4, 5, 6, 7, 8]); + +bench_num!(int_1, read_int, 1, [1]); +bench_num!(int_2, read_int, 2, [1, 2]); +bench_num!(int_3, read_int, 3, [1, 2, 3]); +bench_num!(int_4, read_int, 4, [1, 2, 3, 4]); +bench_num!(int_5, read_int, 5, [1, 2, 3, 4, 5]); +bench_num!(int_6, read_int, 6, [1, 2, 3, 4, 5, 6]); +bench_num!(int_7, read_int, 7, [1, 2, 3, 4, 5, 6, 7]); +bench_num!(int_8, read_int, 8, [1, 2, 3, 4, 5, 6, 7, 8]); + +bench_num!( + u128, + MAX, + read_u128, + write_u128, + 16, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] +); +bench_num!( + i128, + MAX, + read_i128, + write_i128, + 16, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] +); + +bench_num!(uint128_1, read_uint128, 1, [1]); +bench_num!(uint128_2, read_uint128, 2, [1, 2]); +bench_num!(uint128_3, read_uint128, 3, [1, 2, 3]); +bench_num!(uint128_4, read_uint128, 4, [1, 2, 3, 4]); +bench_num!(uint128_5, read_uint128, 5, [1, 2, 3, 4, 5]); +bench_num!(uint128_6, read_uint128, 6, [1, 2, 3, 4, 5, 6]); +bench_num!(uint128_7, read_uint128, 7, [1, 2, 3, 4, 5, 6, 7]); +bench_num!(uint128_8, read_uint128, 8, [1, 2, 3, 4, 5, 6, 7, 8]); +bench_num!(uint128_9, read_uint128, 9, [1, 2, 3, 4, 5, 6, 7, 8, 9]); +bench_num!(uint128_10, read_uint128, 10, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); +bench_num!(uint128_11, read_uint128, 11, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]); +bench_num!( + uint128_12, + read_uint128, + 12, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] +); +bench_num!( + uint128_13, + read_uint128, + 13, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] +); +bench_num!( + uint128_14, + read_uint128, + 14, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] +); +bench_num!( + uint128_15, + read_uint128, + 15, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] +); +bench_num!( + uint128_16, + read_uint128, + 16, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] +); + +bench_num!(int128_1, read_int128, 1, [1]); +bench_num!(int128_2, read_int128, 2, [1, 2]); +bench_num!(int128_3, read_int128, 3, [1, 2, 3]); +bench_num!(int128_4, read_int128, 4, [1, 2, 3, 4]); +bench_num!(int128_5, read_int128, 5, [1, 2, 3, 4, 5]); +bench_num!(int128_6, read_int128, 6, [1, 2, 3, 4, 5, 6]); +bench_num!(int128_7, read_int128, 7, [1, 2, 3, 4, 5, 6, 7]); +bench_num!(int128_8, read_int128, 8, [1, 2, 3, 4, 5, 6, 7, 8]); +bench_num!(int128_9, read_int128, 9, [1, 2, 3, 4, 5, 6, 7, 8, 9]); +bench_num!(int128_10, read_int128, 10, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); +bench_num!(int128_11, read_int128, 11, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]); +bench_num!( + int128_12, + read_int128, + 12, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] +); +bench_num!( + int128_13, + read_int128, + 13, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] +); +bench_num!( + int128_14, + read_int128, + 14, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] +); +bench_num!( + int128_15, + read_int128, + 15, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] +); +bench_num!( + int128_16, + read_int128, + 16, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] +); + +macro_rules! bench_slice { + ($name:ident, $numty:ty, $read:ident, $write:ident) => { + mod $name { + use std::mem::size_of; + + use byteorder::{BigEndian, ByteOrder, LittleEndian}; + use rand::distributions; + use rand::{self, Rng}; + use test::Bencher; + + #[bench] + fn read_big_endian(b: &mut Bencher) { + let mut numbers: Vec<$numty> = rand::thread_rng() + .sample_iter(&distributions::Standard) + .take(100000) + .collect(); + let mut bytes = vec![0; numbers.len() * size_of::<$numty>()]; + BigEndian::$write(&numbers, &mut bytes); + + b.bytes = bytes.len() as u64; + b.iter(|| { + BigEndian::$read(&bytes, &mut numbers); + }); + } + + #[bench] + fn read_little_endian(b: &mut Bencher) { + let mut numbers: Vec<$numty> = rand::thread_rng() + .sample_iter(&distributions::Standard) + .take(100000) + .collect(); + let mut bytes = vec![0; numbers.len() * size_of::<$numty>()]; + LittleEndian::$write(&numbers, &mut bytes); + + b.bytes = bytes.len() as u64; + b.iter(|| { + LittleEndian::$read(&bytes, &mut numbers); + }); + } + + #[bench] + fn write_big_endian(b: &mut Bencher) { + let numbers: Vec<$numty> = rand::thread_rng() + .sample_iter(&distributions::Standard) + .take(100000) + .collect(); + let mut bytes = vec![0; numbers.len() * size_of::<$numty>()]; + + b.bytes = bytes.len() as u64; + b.iter(|| { + BigEndian::$write(&numbers, &mut bytes); + }); + } + + #[bench] + fn write_little_endian(b: &mut Bencher) { + let numbers: Vec<$numty> = rand::thread_rng() + .sample_iter(&distributions::Standard) + .take(100000) + .collect(); + let mut bytes = vec![0; numbers.len() * size_of::<$numty>()]; + + b.bytes = bytes.len() as u64; + b.iter(|| { + LittleEndian::$write(&numbers, &mut bytes); + }); + } + } + }; +} + +bench_slice!(slice_u16, u16, read_u16_into, write_u16_into); +bench_slice!(slice_u64, u64, read_u64_into, write_u64_into); +bench_slice!(slice_i64, i64, read_i64_into, write_i64_into); diff --git a/src/rust/vendor/byteorder/rustfmt.toml b/src/rust/vendor/byteorder/rustfmt.toml new file mode 100644 index 000000000..aa37a218b --- /dev/null +++ b/src/rust/vendor/byteorder/rustfmt.toml @@ -0,0 +1,2 @@ +max_width = 79 +use_small_heuristics = "max" diff --git a/src/rust/vendor/byteorder/src/io.rs b/src/rust/vendor/byteorder/src/io.rs new file mode 100644 index 000000000..dfad2ca39 --- /dev/null +++ b/src/rust/vendor/byteorder/src/io.rs @@ -0,0 +1,1592 @@ +use std::{ + io::{self, Result}, + slice, +}; + +use crate::ByteOrder; + +/// Extends [`Read`] with methods for reading numbers. (For `std::io`.) +/// +/// Most of the methods defined here have an unconstrained type parameter that +/// must be explicitly instantiated. Typically, it is instantiated with either +/// the [`BigEndian`] or [`LittleEndian`] types defined in this crate. +/// +/// # Examples +/// +/// Read unsigned 16 bit big-endian integers from a [`Read`]: +/// +/// ```rust +/// use std::io::Cursor; +/// use byteorder::{BigEndian, ReadBytesExt}; +/// +/// let mut rdr = Cursor::new(vec![2, 5, 3, 0]); +/// assert_eq!(517, rdr.read_u16::().unwrap()); +/// assert_eq!(768, rdr.read_u16::().unwrap()); +/// ``` +/// +/// [`BigEndian`]: enum.BigEndian.html +/// [`LittleEndian`]: enum.LittleEndian.html +/// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html +pub trait ReadBytesExt: io::Read { + /// Reads an unsigned 8 bit integer from the underlying reader. + /// + /// Note that since this reads a single byte, no byte order conversions + /// are used. It is included for completeness. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read unsigned 8 bit integers from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::ReadBytesExt; + /// + /// let mut rdr = Cursor::new(vec![2, 5]); + /// assert_eq!(2, rdr.read_u8().unwrap()); + /// assert_eq!(5, rdr.read_u8().unwrap()); + /// ``` + #[inline] + fn read_u8(&mut self) -> Result { + let mut buf = [0; 1]; + self.read_exact(&mut buf)?; + Ok(buf[0]) + } + + /// Reads a signed 8 bit integer from the underlying reader. + /// + /// Note that since this reads a single byte, no byte order conversions + /// are used. It is included for completeness. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read signed 8 bit integers from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::ReadBytesExt; + /// + /// let mut rdr = Cursor::new(vec![0x02, 0xfb]); + /// assert_eq!(2, rdr.read_i8().unwrap()); + /// assert_eq!(-5, rdr.read_i8().unwrap()); + /// ``` + #[inline] + fn read_i8(&mut self) -> Result { + let mut buf = [0; 1]; + self.read_exact(&mut buf)?; + Ok(buf[0] as i8) + } + + /// Reads an unsigned 16 bit integer from the underlying reader. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read unsigned 16 bit big-endian integers from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![2, 5, 3, 0]); + /// assert_eq!(517, rdr.read_u16::().unwrap()); + /// assert_eq!(768, rdr.read_u16::().unwrap()); + /// ``` + #[inline] + fn read_u16(&mut self) -> Result { + let mut buf = [0; 2]; + self.read_exact(&mut buf)?; + Ok(T::read_u16(&buf)) + } + + /// Reads a signed 16 bit integer from the underlying reader. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read signed 16 bit big-endian integers from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![0x00, 0xc1, 0xff, 0x7c]); + /// assert_eq!(193, rdr.read_i16::().unwrap()); + /// assert_eq!(-132, rdr.read_i16::().unwrap()); + /// ``` + #[inline] + fn read_i16(&mut self) -> Result { + let mut buf = [0; 2]; + self.read_exact(&mut buf)?; + Ok(T::read_i16(&buf)) + } + + /// Reads an unsigned 24 bit integer from the underlying reader. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read unsigned 24 bit big-endian integers from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![0x00, 0x01, 0x0b]); + /// assert_eq!(267, rdr.read_u24::().unwrap()); + /// ``` + #[inline] + fn read_u24(&mut self) -> Result { + let mut buf = [0; 3]; + self.read_exact(&mut buf)?; + Ok(T::read_u24(&buf)) + } + + /// Reads a signed 24 bit integer from the underlying reader. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read signed 24 bit big-endian integers from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![0xff, 0x7a, 0x33]); + /// assert_eq!(-34253, rdr.read_i24::().unwrap()); + /// ``` + #[inline] + fn read_i24(&mut self) -> Result { + let mut buf = [0; 3]; + self.read_exact(&mut buf)?; + Ok(T::read_i24(&buf)) + } + + /// Reads an unsigned 32 bit integer from the underlying reader. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read unsigned 32 bit big-endian integers from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![0x00, 0x00, 0x01, 0x0b]); + /// assert_eq!(267, rdr.read_u32::().unwrap()); + /// ``` + #[inline] + fn read_u32(&mut self) -> Result { + let mut buf = [0; 4]; + self.read_exact(&mut buf)?; + Ok(T::read_u32(&buf)) + } + + /// Reads a signed 32 bit integer from the underlying reader. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read signed 32 bit big-endian integers from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![0xff, 0xff, 0x7a, 0x33]); + /// assert_eq!(-34253, rdr.read_i32::().unwrap()); + /// ``` + #[inline] + fn read_i32(&mut self) -> Result { + let mut buf = [0; 4]; + self.read_exact(&mut buf)?; + Ok(T::read_i32(&buf)) + } + + /// Reads an unsigned 48 bit integer from the underlying reader. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read unsigned 48 bit big-endian integers from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![0xb6, 0x71, 0x6b, 0xdc, 0x2b, 0x31]); + /// assert_eq!(200598257150769, rdr.read_u48::().unwrap()); + /// ``` + #[inline] + fn read_u48(&mut self) -> Result { + let mut buf = [0; 6]; + self.read_exact(&mut buf)?; + Ok(T::read_u48(&buf)) + } + + /// Reads a signed 48 bit integer from the underlying reader. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read signed 48 bit big-endian integers from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![0x9d, 0x71, 0xab, 0xe7, 0x97, 0x8f]); + /// assert_eq!(-108363435763825, rdr.read_i48::().unwrap()); + /// ``` + #[inline] + fn read_i48(&mut self) -> Result { + let mut buf = [0; 6]; + self.read_exact(&mut buf)?; + Ok(T::read_i48(&buf)) + } + + /// Reads an unsigned 64 bit integer from the underlying reader. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read an unsigned 64 bit big-endian integer from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83]); + /// assert_eq!(918733457491587, rdr.read_u64::().unwrap()); + /// ``` + #[inline] + fn read_u64(&mut self) -> Result { + let mut buf = [0; 8]; + self.read_exact(&mut buf)?; + Ok(T::read_u64(&buf)) + } + + /// Reads a signed 64 bit integer from the underlying reader. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read a signed 64 bit big-endian integer from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![0x80, 0, 0, 0, 0, 0, 0, 0]); + /// assert_eq!(i64::min_value(), rdr.read_i64::().unwrap()); + /// ``` + #[inline] + fn read_i64(&mut self) -> Result { + let mut buf = [0; 8]; + self.read_exact(&mut buf)?; + Ok(T::read_i64(&buf)) + } + + /// Reads an unsigned 128 bit integer from the underlying reader. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read an unsigned 128 bit big-endian integer from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![ + /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83, + /// 0x00, 0x03, 0x43, 0x95, 0x4d, 0x60, 0x86, 0x83 + /// ]); + /// assert_eq!(16947640962301618749969007319746179, rdr.read_u128::().unwrap()); + /// ``` + #[inline] + fn read_u128(&mut self) -> Result { + let mut buf = [0; 16]; + self.read_exact(&mut buf)?; + Ok(T::read_u128(&buf)) + } + + /// Reads a signed 128 bit integer from the underlying reader. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read a signed 128 bit big-endian integer from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + /// assert_eq!(i128::min_value(), rdr.read_i128::().unwrap()); + /// ``` + #[inline] + fn read_i128(&mut self) -> Result { + let mut buf = [0; 16]; + self.read_exact(&mut buf)?; + Ok(T::read_i128(&buf)) + } + + /// Reads an unsigned n-bytes integer from the underlying reader. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read an unsigned n-byte big-endian integer from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![0x80, 0x74, 0xfa]); + /// assert_eq!(8418554, rdr.read_uint::(3).unwrap()); + #[inline] + fn read_uint(&mut self, nbytes: usize) -> Result { + let mut buf = [0; 8]; + self.read_exact(&mut buf[..nbytes])?; + Ok(T::read_uint(&buf[..nbytes], nbytes)) + } + + /// Reads a signed n-bytes integer from the underlying reader. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read an unsigned n-byte big-endian integer from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![0xc1, 0xff, 0x7c]); + /// assert_eq!(-4063364, rdr.read_int::(3).unwrap()); + #[inline] + fn read_int(&mut self, nbytes: usize) -> Result { + let mut buf = [0; 8]; + self.read_exact(&mut buf[..nbytes])?; + Ok(T::read_int(&buf[..nbytes], nbytes)) + } + + /// Reads an unsigned n-bytes integer from the underlying reader. + #[inline] + fn read_uint128(&mut self, nbytes: usize) -> Result { + let mut buf = [0; 16]; + self.read_exact(&mut buf[..nbytes])?; + Ok(T::read_uint128(&buf[..nbytes], nbytes)) + } + + /// Reads a signed n-bytes integer from the underlying reader. + #[inline] + fn read_int128(&mut self, nbytes: usize) -> Result { + let mut buf = [0; 16]; + self.read_exact(&mut buf[..nbytes])?; + Ok(T::read_int128(&buf[..nbytes], nbytes)) + } + + /// Reads a IEEE754 single-precision (4 bytes) floating point number from + /// the underlying reader. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read a big-endian single-precision floating point number from a `Read`: + /// + /// ```rust + /// use std::f32; + /// use std::io::Cursor; + /// + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![ + /// 0x40, 0x49, 0x0f, 0xdb, + /// ]); + /// assert_eq!(f32::consts::PI, rdr.read_f32::().unwrap()); + /// ``` + #[inline] + fn read_f32(&mut self) -> Result { + let mut buf = [0; 4]; + self.read_exact(&mut buf)?; + Ok(T::read_f32(&buf)) + } + + /// Reads a IEEE754 double-precision (8 bytes) floating point number from + /// the underlying reader. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read a big-endian double-precision floating point number from a `Read`: + /// + /// ```rust + /// use std::f64; + /// use std::io::Cursor; + /// + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![ + /// 0x40, 0x09, 0x21, 0xfb, 0x54, 0x44, 0x2d, 0x18, + /// ]); + /// assert_eq!(f64::consts::PI, rdr.read_f64::().unwrap()); + /// ``` + #[inline] + fn read_f64(&mut self) -> Result { + let mut buf = [0; 8]; + self.read_exact(&mut buf)?; + Ok(T::read_f64(&buf)) + } + + /// Reads a sequence of unsigned 16 bit integers from the underlying + /// reader. + /// + /// The given buffer is either filled completely or an error is returned. + /// If an error is returned, the contents of `dst` are unspecified. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read a sequence of unsigned 16 bit big-endian integers from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![2, 5, 3, 0]); + /// let mut dst = [0; 2]; + /// rdr.read_u16_into::(&mut dst).unwrap(); + /// assert_eq!([517, 768], dst); + /// ``` + #[inline] + fn read_u16_into(&mut self, dst: &mut [u16]) -> Result<()> { + { + let buf = unsafe { slice_to_u8_mut(dst) }; + self.read_exact(buf)?; + } + T::from_slice_u16(dst); + Ok(()) + } + + /// Reads a sequence of unsigned 32 bit integers from the underlying + /// reader. + /// + /// The given buffer is either filled completely or an error is returned. + /// If an error is returned, the contents of `dst` are unspecified. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read a sequence of unsigned 32 bit big-endian integers from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![0, 0, 2, 5, 0, 0, 3, 0]); + /// let mut dst = [0; 2]; + /// rdr.read_u32_into::(&mut dst).unwrap(); + /// assert_eq!([517, 768], dst); + /// ``` + #[inline] + fn read_u32_into(&mut self, dst: &mut [u32]) -> Result<()> { + { + let buf = unsafe { slice_to_u8_mut(dst) }; + self.read_exact(buf)?; + } + T::from_slice_u32(dst); + Ok(()) + } + + /// Reads a sequence of unsigned 64 bit integers from the underlying + /// reader. + /// + /// The given buffer is either filled completely or an error is returned. + /// If an error is returned, the contents of `dst` are unspecified. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read a sequence of unsigned 64 bit big-endian integers from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![ + /// 0, 0, 0, 0, 0, 0, 2, 5, + /// 0, 0, 0, 0, 0, 0, 3, 0, + /// ]); + /// let mut dst = [0; 2]; + /// rdr.read_u64_into::(&mut dst).unwrap(); + /// assert_eq!([517, 768], dst); + /// ``` + #[inline] + fn read_u64_into(&mut self, dst: &mut [u64]) -> Result<()> { + { + let buf = unsafe { slice_to_u8_mut(dst) }; + self.read_exact(buf)?; + } + T::from_slice_u64(dst); + Ok(()) + } + + /// Reads a sequence of unsigned 128 bit integers from the underlying + /// reader. + /// + /// The given buffer is either filled completely or an error is returned. + /// If an error is returned, the contents of `dst` are unspecified. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read a sequence of unsigned 128 bit big-endian integers from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![ + /// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 5, + /// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, + /// ]); + /// let mut dst = [0; 2]; + /// rdr.read_u128_into::(&mut dst).unwrap(); + /// assert_eq!([517, 768], dst); + /// ``` + #[inline] + fn read_u128_into( + &mut self, + dst: &mut [u128], + ) -> Result<()> { + { + let buf = unsafe { slice_to_u8_mut(dst) }; + self.read_exact(buf)?; + } + T::from_slice_u128(dst); + Ok(()) + } + + /// Reads a sequence of signed 8 bit integers from the underlying reader. + /// + /// The given buffer is either filled completely or an error is returned. + /// If an error is returned, the contents of `dst` are unspecified. + /// + /// Note that since each `i8` is a single byte, no byte order conversions + /// are used. This method is included because it provides a safe, simple + /// way for the caller to read into a `&mut [i8]` buffer. (Without this + /// method, the caller would have to either use `unsafe` code or convert + /// each byte to `i8` individually.) + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read a sequence of signed 8 bit integers from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![2, 251, 3]); + /// let mut dst = [0; 3]; + /// rdr.read_i8_into(&mut dst).unwrap(); + /// assert_eq!([2, -5, 3], dst); + /// ``` + #[inline] + fn read_i8_into(&mut self, dst: &mut [i8]) -> Result<()> { + let buf = unsafe { slice_to_u8_mut(dst) }; + self.read_exact(buf) + } + + /// Reads a sequence of signed 16 bit integers from the underlying + /// reader. + /// + /// The given buffer is either filled completely or an error is returned. + /// If an error is returned, the contents of `dst` are unspecified. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read a sequence of signed 16 bit big-endian integers from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![2, 5, 3, 0]); + /// let mut dst = [0; 2]; + /// rdr.read_i16_into::(&mut dst).unwrap(); + /// assert_eq!([517, 768], dst); + /// ``` + #[inline] + fn read_i16_into(&mut self, dst: &mut [i16]) -> Result<()> { + { + let buf = unsafe { slice_to_u8_mut(dst) }; + self.read_exact(buf)?; + } + T::from_slice_i16(dst); + Ok(()) + } + + /// Reads a sequence of signed 32 bit integers from the underlying + /// reader. + /// + /// The given buffer is either filled completely or an error is returned. + /// If an error is returned, the contents of `dst` are unspecified. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read a sequence of signed 32 bit big-endian integers from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![0, 0, 2, 5, 0, 0, 3, 0]); + /// let mut dst = [0; 2]; + /// rdr.read_i32_into::(&mut dst).unwrap(); + /// assert_eq!([517, 768], dst); + /// ``` + #[inline] + fn read_i32_into(&mut self, dst: &mut [i32]) -> Result<()> { + { + let buf = unsafe { slice_to_u8_mut(dst) }; + self.read_exact(buf)?; + } + T::from_slice_i32(dst); + Ok(()) + } + + /// Reads a sequence of signed 64 bit integers from the underlying + /// reader. + /// + /// The given buffer is either filled completely or an error is returned. + /// If an error is returned, the contents of `dst` are unspecified. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read a sequence of signed 64 bit big-endian integers from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![ + /// 0, 0, 0, 0, 0, 0, 2, 5, + /// 0, 0, 0, 0, 0, 0, 3, 0, + /// ]); + /// let mut dst = [0; 2]; + /// rdr.read_i64_into::(&mut dst).unwrap(); + /// assert_eq!([517, 768], dst); + /// ``` + #[inline] + fn read_i64_into(&mut self, dst: &mut [i64]) -> Result<()> { + { + let buf = unsafe { slice_to_u8_mut(dst) }; + self.read_exact(buf)?; + } + T::from_slice_i64(dst); + Ok(()) + } + + /// Reads a sequence of signed 128 bit integers from the underlying + /// reader. + /// + /// The given buffer is either filled completely or an error is returned. + /// If an error is returned, the contents of `dst` are unspecified. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read a sequence of signed 128 bit big-endian integers from a `Read`: + /// + /// ```rust + /// use std::io::Cursor; + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![ + /// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 5, + /// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, + /// ]); + /// let mut dst = [0; 2]; + /// rdr.read_i128_into::(&mut dst).unwrap(); + /// assert_eq!([517, 768], dst); + /// ``` + #[inline] + fn read_i128_into( + &mut self, + dst: &mut [i128], + ) -> Result<()> { + { + let buf = unsafe { slice_to_u8_mut(dst) }; + self.read_exact(buf)?; + } + T::from_slice_i128(dst); + Ok(()) + } + + /// Reads a sequence of IEEE754 single-precision (4 bytes) floating + /// point numbers from the underlying reader. + /// + /// The given buffer is either filled completely or an error is returned. + /// If an error is returned, the contents of `dst` are unspecified. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read a sequence of big-endian single-precision floating point number + /// from a `Read`: + /// + /// ```rust + /// use std::f32; + /// use std::io::Cursor; + /// + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![ + /// 0x40, 0x49, 0x0f, 0xdb, + /// 0x3f, 0x80, 0x00, 0x00, + /// ]); + /// let mut dst = [0.0; 2]; + /// rdr.read_f32_into::(&mut dst).unwrap(); + /// assert_eq!([f32::consts::PI, 1.0], dst); + /// ``` + #[inline] + fn read_f32_into(&mut self, dst: &mut [f32]) -> Result<()> { + { + let buf = unsafe { slice_to_u8_mut(dst) }; + self.read_exact(buf)?; + } + T::from_slice_f32(dst); + Ok(()) + } + + /// **DEPRECATED**. + /// + /// This method is deprecated. Use `read_f32_into` instead. + /// + /// Reads a sequence of IEEE754 single-precision (4 bytes) floating + /// point numbers from the underlying reader. + /// + /// The given buffer is either filled completely or an error is returned. + /// If an error is returned, the contents of `dst` are unspecified. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read a sequence of big-endian single-precision floating point number + /// from a `Read`: + /// + /// ```rust + /// use std::f32; + /// use std::io::Cursor; + /// + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![ + /// 0x40, 0x49, 0x0f, 0xdb, + /// 0x3f, 0x80, 0x00, 0x00, + /// ]); + /// let mut dst = [0.0; 2]; + /// rdr.read_f32_into_unchecked::(&mut dst).unwrap(); + /// assert_eq!([f32::consts::PI, 1.0], dst); + /// ``` + #[inline] + #[deprecated(since = "1.2.0", note = "please use `read_f32_into` instead")] + fn read_f32_into_unchecked( + &mut self, + dst: &mut [f32], + ) -> Result<()> { + self.read_f32_into::(dst) + } + + /// Reads a sequence of IEEE754 double-precision (8 bytes) floating + /// point numbers from the underlying reader. + /// + /// The given buffer is either filled completely or an error is returned. + /// If an error is returned, the contents of `dst` are unspecified. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read a sequence of big-endian single-precision floating point number + /// from a `Read`: + /// + /// ```rust + /// use std::f64; + /// use std::io::Cursor; + /// + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![ + /// 0x40, 0x09, 0x21, 0xfb, 0x54, 0x44, 0x2d, 0x18, + /// 0x3f, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /// ]); + /// let mut dst = [0.0; 2]; + /// rdr.read_f64_into::(&mut dst).unwrap(); + /// assert_eq!([f64::consts::PI, 1.0], dst); + /// ``` + #[inline] + fn read_f64_into(&mut self, dst: &mut [f64]) -> Result<()> { + { + let buf = unsafe { slice_to_u8_mut(dst) }; + self.read_exact(buf)?; + } + T::from_slice_f64(dst); + Ok(()) + } + + /// **DEPRECATED**. + /// + /// This method is deprecated. Use `read_f64_into` instead. + /// + /// Reads a sequence of IEEE754 double-precision (8 bytes) floating + /// point numbers from the underlying reader. + /// + /// The given buffer is either filled completely or an error is returned. + /// If an error is returned, the contents of `dst` are unspecified. + /// + /// # Safety + /// + /// This method is unsafe because there are no guarantees made about the + /// floating point values. In particular, this method does not check for + /// signaling NaNs, which may result in undefined behavior. + /// + /// # Errors + /// + /// This method returns the same errors as [`Read::read_exact`]. + /// + /// [`Read::read_exact`]: https://doc.rust-lang.org/std/io/trait.Read.html#method.read_exact + /// + /// # Examples + /// + /// Read a sequence of big-endian single-precision floating point number + /// from a `Read`: + /// + /// ```rust + /// use std::f64; + /// use std::io::Cursor; + /// + /// use byteorder::{BigEndian, ReadBytesExt}; + /// + /// let mut rdr = Cursor::new(vec![ + /// 0x40, 0x09, 0x21, 0xfb, 0x54, 0x44, 0x2d, 0x18, + /// 0x3f, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /// ]); + /// let mut dst = [0.0; 2]; + /// rdr.read_f64_into_unchecked::(&mut dst).unwrap(); + /// assert_eq!([f64::consts::PI, 1.0], dst); + /// ``` + #[inline] + #[deprecated(since = "1.2.0", note = "please use `read_f64_into` instead")] + fn read_f64_into_unchecked( + &mut self, + dst: &mut [f64], + ) -> Result<()> { + self.read_f64_into::(dst) + } +} + +/// All types that implement `Read` get methods defined in `ReadBytesExt` +/// for free. +impl ReadBytesExt for R {} + +/// Extends [`Write`] with methods for writing numbers. (For `std::io`.) +/// +/// Most of the methods defined here have an unconstrained type parameter that +/// must be explicitly instantiated. Typically, it is instantiated with either +/// the [`BigEndian`] or [`LittleEndian`] types defined in this crate. +/// +/// # Examples +/// +/// Write unsigned 16 bit big-endian integers to a [`Write`]: +/// +/// ```rust +/// use byteorder::{BigEndian, WriteBytesExt}; +/// +/// let mut wtr = vec![]; +/// wtr.write_u16::(517).unwrap(); +/// wtr.write_u16::(768).unwrap(); +/// assert_eq!(wtr, vec![2, 5, 3, 0]); +/// ``` +/// +/// [`BigEndian`]: enum.BigEndian.html +/// [`LittleEndian`]: enum.LittleEndian.html +/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html +pub trait WriteBytesExt: io::Write { + /// Writes an unsigned 8 bit integer to the underlying writer. + /// + /// Note that since this writes a single byte, no byte order conversions + /// are used. It is included for completeness. + /// + /// # Errors + /// + /// This method returns the same errors as [`Write::write_all`]. + /// + /// [`Write::write_all`]: https://doc.rust-lang.org/std/io/trait.Write.html#method.write_all + /// + /// # Examples + /// + /// Write unsigned 8 bit integers to a `Write`: + /// + /// ```rust + /// use byteorder::WriteBytesExt; + /// + /// let mut wtr = Vec::new(); + /// wtr.write_u8(2).unwrap(); + /// wtr.write_u8(5).unwrap(); + /// assert_eq!(wtr, b"\x02\x05"); + /// ``` + #[inline] + fn write_u8(&mut self, n: u8) -> Result<()> { + self.write_all(&[n]) + } + + /// Writes a signed 8 bit integer to the underlying writer. + /// + /// Note that since this writes a single byte, no byte order conversions + /// are used. It is included for completeness. + /// + /// # Errors + /// + /// This method returns the same errors as [`Write::write_all`]. + /// + /// [`Write::write_all`]: https://doc.rust-lang.org/std/io/trait.Write.html#method.write_all + /// + /// # Examples + /// + /// Write signed 8 bit integers to a `Write`: + /// + /// ```rust + /// use byteorder::WriteBytesExt; + /// + /// let mut wtr = Vec::new(); + /// wtr.write_i8(2).unwrap(); + /// wtr.write_i8(-5).unwrap(); + /// assert_eq!(wtr, b"\x02\xfb"); + /// ``` + #[inline] + fn write_i8(&mut self, n: i8) -> Result<()> { + self.write_all(&[n as u8]) + } + + /// Writes an unsigned 16 bit integer to the underlying writer. + /// + /// # Errors + /// + /// This method returns the same errors as [`Write::write_all`]. + /// + /// [`Write::write_all`]: https://doc.rust-lang.org/std/io/trait.Write.html#method.write_all + /// + /// # Examples + /// + /// Write unsigned 16 bit big-endian integers to a `Write`: + /// + /// ```rust + /// use byteorder::{BigEndian, WriteBytesExt}; + /// + /// let mut wtr = Vec::new(); + /// wtr.write_u16::(517).unwrap(); + /// wtr.write_u16::(768).unwrap(); + /// assert_eq!(wtr, b"\x02\x05\x03\x00"); + /// ``` + #[inline] + fn write_u16(&mut self, n: u16) -> Result<()> { + let mut buf = [0; 2]; + T::write_u16(&mut buf, n); + self.write_all(&buf) + } + + /// Writes a signed 16 bit integer to the underlying writer. + /// + /// # Errors + /// + /// This method returns the same errors as [`Write::write_all`]. + /// + /// [`Write::write_all`]: https://doc.rust-lang.org/std/io/trait.Write.html#method.write_all + /// + /// # Examples + /// + /// Write signed 16 bit big-endian integers to a `Write`: + /// + /// ```rust + /// use byteorder::{BigEndian, WriteBytesExt}; + /// + /// let mut wtr = Vec::new(); + /// wtr.write_i16::(193).unwrap(); + /// wtr.write_i16::(-132).unwrap(); + /// assert_eq!(wtr, b"\x00\xc1\xff\x7c"); + /// ``` + #[inline] + fn write_i16(&mut self, n: i16) -> Result<()> { + let mut buf = [0; 2]; + T::write_i16(&mut buf, n); + self.write_all(&buf) + } + + /// Writes an unsigned 24 bit integer to the underlying writer. + /// + /// # Errors + /// + /// This method returns the same errors as [`Write::write_all`]. + /// + /// [`Write::write_all`]: https://doc.rust-lang.org/std/io/trait.Write.html#method.write_all + /// + /// # Examples + /// + /// Write unsigned 24 bit big-endian integers to a `Write`: + /// + /// ```rust + /// use byteorder::{BigEndian, WriteBytesExt}; + /// + /// let mut wtr = Vec::new(); + /// wtr.write_u24::(267).unwrap(); + /// wtr.write_u24::(120111).unwrap(); + /// assert_eq!(wtr, b"\x00\x01\x0b\x01\xd5\x2f"); + /// ``` + #[inline] + fn write_u24(&mut self, n: u32) -> Result<()> { + let mut buf = [0; 3]; + T::write_u24(&mut buf, n); + self.write_all(&buf) + } + + /// Writes a signed 24 bit integer to the underlying writer. + /// + /// # Errors + /// + /// This method returns the same errors as [`Write::write_all`]. + /// + /// [`Write::write_all`]: https://doc.rust-lang.org/std/io/trait.Write.html#method.write_all + /// + /// # Examples + /// + /// Write signed 24 bit big-endian integers to a `Write`: + /// + /// ```rust + /// use byteorder::{BigEndian, WriteBytesExt}; + /// + /// let mut wtr = Vec::new(); + /// wtr.write_i24::(-34253).unwrap(); + /// wtr.write_i24::(120111).unwrap(); + /// assert_eq!(wtr, b"\xff\x7a\x33\x01\xd5\x2f"); + /// ``` + #[inline] + fn write_i24(&mut self, n: i32) -> Result<()> { + let mut buf = [0; 3]; + T::write_i24(&mut buf, n); + self.write_all(&buf) + } + + /// Writes an unsigned 32 bit integer to the underlying writer. + /// + /// # Errors + /// + /// This method returns the same errors as [`Write::write_all`]. + /// + /// [`Write::write_all`]: https://doc.rust-lang.org/std/io/trait.Write.html#method.write_all + /// + /// # Examples + /// + /// Write unsigned 32 bit big-endian integers to a `Write`: + /// + /// ```rust + /// use byteorder::{BigEndian, WriteBytesExt}; + /// + /// let mut wtr = Vec::new(); + /// wtr.write_u32::(267).unwrap(); + /// wtr.write_u32::(1205419366).unwrap(); + /// assert_eq!(wtr, b"\x00\x00\x01\x0b\x47\xd9\x3d\x66"); + /// ``` + #[inline] + fn write_u32(&mut self, n: u32) -> Result<()> { + let mut buf = [0; 4]; + T::write_u32(&mut buf, n); + self.write_all(&buf) + } + + /// Writes a signed 32 bit integer to the underlying writer. + /// + /// # Errors + /// + /// This method returns the same errors as [`Write::write_all`]. + /// + /// [`Write::write_all`]: https://doc.rust-lang.org/std/io/trait.Write.html#method.write_all + /// + /// # Examples + /// + /// Write signed 32 bit big-endian integers to a `Write`: + /// + /// ```rust + /// use byteorder::{BigEndian, WriteBytesExt}; + /// + /// let mut wtr = Vec::new(); + /// wtr.write_i32::(-34253).unwrap(); + /// wtr.write_i32::(1205419366).unwrap(); + /// assert_eq!(wtr, b"\xff\xff\x7a\x33\x47\xd9\x3d\x66"); + /// ``` + #[inline] + fn write_i32(&mut self, n: i32) -> Result<()> { + let mut buf = [0; 4]; + T::write_i32(&mut buf, n); + self.write_all(&buf) + } + + /// Writes an unsigned 48 bit integer to the underlying writer. + /// + /// # Errors + /// + /// This method returns the same errors as [`Write::write_all`]. + /// + /// [`Write::write_all`]: https://doc.rust-lang.org/std/io/trait.Write.html#method.write_all + /// + /// # Examples + /// + /// Write unsigned 48 bit big-endian integers to a `Write`: + /// + /// ```rust + /// use byteorder::{BigEndian, WriteBytesExt}; + /// + /// let mut wtr = Vec::new(); + /// wtr.write_u48::(52360336390828).unwrap(); + /// wtr.write_u48::(541).unwrap(); + /// assert_eq!(wtr, b"\x2f\x9f\x17\x40\x3a\xac\x00\x00\x00\x00\x02\x1d"); + /// ``` + #[inline] + fn write_u48(&mut self, n: u64) -> Result<()> { + let mut buf = [0; 6]; + T::write_u48(&mut buf, n); + self.write_all(&buf) + } + + /// Writes a signed 48 bit integer to the underlying writer. + /// + /// # Errors + /// + /// This method returns the same errors as [`Write::write_all`]. + /// + /// [`Write::write_all`]: https://doc.rust-lang.org/std/io/trait.Write.html#method.write_all + /// + /// # Examples + /// + /// Write signed 48 bit big-endian integers to a `Write`: + /// + /// ```rust + /// use byteorder::{BigEndian, WriteBytesExt}; + /// + /// let mut wtr = Vec::new(); + /// wtr.write_i48::(-108363435763825).unwrap(); + /// wtr.write_i48::(77).unwrap(); + /// assert_eq!(wtr, b"\x9d\x71\xab\xe7\x97\x8f\x00\x00\x00\x00\x00\x4d"); + /// ``` + #[inline] + fn write_i48(&mut self, n: i64) -> Result<()> { + let mut buf = [0; 6]; + T::write_i48(&mut buf, n); + self.write_all(&buf) + } + + /// Writes an unsigned 64 bit integer to the underlying writer. + /// + /// # Errors + /// + /// This method returns the same errors as [`Write::write_all`]. + /// + /// [`Write::write_all`]: https://doc.rust-lang.org/std/io/trait.Write.html#method.write_all + /// + /// # Examples + /// + /// Write unsigned 64 bit big-endian integers to a `Write`: + /// + /// ```rust + /// use byteorder::{BigEndian, WriteBytesExt}; + /// + /// let mut wtr = Vec::new(); + /// wtr.write_u64::(918733457491587).unwrap(); + /// wtr.write_u64::(143).unwrap(); + /// assert_eq!(wtr, b"\x00\x03\x43\x95\x4d\x60\x86\x83\x00\x00\x00\x00\x00\x00\x00\x8f"); + /// ``` + #[inline] + fn write_u64(&mut self, n: u64) -> Result<()> { + let mut buf = [0; 8]; + T::write_u64(&mut buf, n); + self.write_all(&buf) + } + + /// Writes a signed 64 bit integer to the underlying writer. + /// + /// # Errors + /// + /// This method returns the same errors as [`Write::write_all`]. + /// + /// [`Write::write_all`]: https://doc.rust-lang.org/std/io/trait.Write.html#method.write_all + /// + /// # Examples + /// + /// Write signed 64 bit big-endian integers to a `Write`: + /// + /// ```rust + /// use byteorder::{BigEndian, WriteBytesExt}; + /// + /// let mut wtr = Vec::new(); + /// wtr.write_i64::(i64::min_value()).unwrap(); + /// wtr.write_i64::(i64::max_value()).unwrap(); + /// assert_eq!(wtr, b"\x80\x00\x00\x00\x00\x00\x00\x00\x7f\xff\xff\xff\xff\xff\xff\xff"); + /// ``` + #[inline] + fn write_i64(&mut self, n: i64) -> Result<()> { + let mut buf = [0; 8]; + T::write_i64(&mut buf, n); + self.write_all(&buf) + } + + /// Writes an unsigned 128 bit integer to the underlying writer. + #[inline] + fn write_u128(&mut self, n: u128) -> Result<()> { + let mut buf = [0; 16]; + T::write_u128(&mut buf, n); + self.write_all(&buf) + } + + /// Writes a signed 128 bit integer to the underlying writer. + #[inline] + fn write_i128(&mut self, n: i128) -> Result<()> { + let mut buf = [0; 16]; + T::write_i128(&mut buf, n); + self.write_all(&buf) + } + + /// Writes an unsigned n-bytes integer to the underlying writer. + /// + /// # Errors + /// + /// This method returns the same errors as [`Write::write_all`]. + /// + /// [`Write::write_all`]: https://doc.rust-lang.org/std/io/trait.Write.html#method.write_all + /// + /// # Panics + /// + /// If the given integer is not representable in the given number of bytes, + /// this method panics. If `nbytes > 8`, this method panics. + /// + /// # Examples + /// + /// Write unsigned 40 bit big-endian integers to a `Write`: + /// + /// ```rust + /// use byteorder::{BigEndian, WriteBytesExt}; + /// + /// let mut wtr = Vec::new(); + /// wtr.write_uint::(312550384361, 5).unwrap(); + /// wtr.write_uint::(43, 5).unwrap(); + /// assert_eq!(wtr, b"\x48\xc5\x74\x62\xe9\x00\x00\x00\x00\x2b"); + /// ``` + #[inline] + fn write_uint( + &mut self, + n: u64, + nbytes: usize, + ) -> Result<()> { + let mut buf = [0; 8]; + T::write_uint(&mut buf, n, nbytes); + self.write_all(&buf[0..nbytes]) + } + + /// Writes a signed n-bytes integer to the underlying writer. + /// + /// # Errors + /// + /// This method returns the same errors as [`Write::write_all`]. + /// + /// [`Write::write_all`]: https://doc.rust-lang.org/std/io/trait.Write.html#method.write_all + /// + /// # Panics + /// + /// If the given integer is not representable in the given number of bytes, + /// this method panics. If `nbytes > 8`, this method panics. + /// + /// # Examples + /// + /// Write signed 56 bit big-endian integers to a `Write`: + /// + /// ```rust + /// use byteorder::{BigEndian, WriteBytesExt}; + /// + /// let mut wtr = Vec::new(); + /// wtr.write_int::(-3548172039376767, 7).unwrap(); + /// wtr.write_int::(43, 7).unwrap(); + /// assert_eq!(wtr, b"\xf3\x64\xf4\xd1\xfd\xb0\x81\x00\x00\x00\x00\x00\x00\x2b"); + /// ``` + #[inline] + fn write_int( + &mut self, + n: i64, + nbytes: usize, + ) -> Result<()> { + let mut buf = [0; 8]; + T::write_int(&mut buf, n, nbytes); + self.write_all(&buf[0..nbytes]) + } + + /// Writes an unsigned n-bytes integer to the underlying writer. + /// + /// If the given integer is not representable in the given number of bytes, + /// this method panics. If `nbytes > 16`, this method panics. + #[inline] + fn write_uint128( + &mut self, + n: u128, + nbytes: usize, + ) -> Result<()> { + let mut buf = [0; 16]; + T::write_uint128(&mut buf, n, nbytes); + self.write_all(&buf[0..nbytes]) + } + + /// Writes a signed n-bytes integer to the underlying writer. + /// + /// If the given integer is not representable in the given number of bytes, + /// this method panics. If `nbytes > 16`, this method panics. + #[inline] + fn write_int128( + &mut self, + n: i128, + nbytes: usize, + ) -> Result<()> { + let mut buf = [0; 16]; + T::write_int128(&mut buf, n, nbytes); + self.write_all(&buf[0..nbytes]) + } + + /// Writes a IEEE754 single-precision (4 bytes) floating point number to + /// the underlying writer. + /// + /// # Errors + /// + /// This method returns the same errors as [`Write::write_all`]. + /// + /// [`Write::write_all`]: https://doc.rust-lang.org/std/io/trait.Write.html#method.write_all + /// + /// # Examples + /// + /// Write a big-endian single-precision floating point number to a `Write`: + /// + /// ```rust + /// use std::f32; + /// + /// use byteorder::{BigEndian, WriteBytesExt}; + /// + /// let mut wtr = Vec::new(); + /// wtr.write_f32::(f32::consts::PI).unwrap(); + /// assert_eq!(wtr, b"\x40\x49\x0f\xdb"); + /// ``` + #[inline] + fn write_f32(&mut self, n: f32) -> Result<()> { + let mut buf = [0; 4]; + T::write_f32(&mut buf, n); + self.write_all(&buf) + } + + /// Writes a IEEE754 double-precision (8 bytes) floating point number to + /// the underlying writer. + /// + /// # Errors + /// + /// This method returns the same errors as [`Write::write_all`]. + /// + /// [`Write::write_all`]: https://doc.rust-lang.org/std/io/trait.Write.html#method.write_all + /// + /// # Examples + /// + /// Write a big-endian double-precision floating point number to a `Write`: + /// + /// ```rust + /// use std::f64; + /// + /// use byteorder::{BigEndian, WriteBytesExt}; + /// + /// let mut wtr = Vec::new(); + /// wtr.write_f64::(f64::consts::PI).unwrap(); + /// assert_eq!(wtr, b"\x40\x09\x21\xfb\x54\x44\x2d\x18"); + /// ``` + #[inline] + fn write_f64(&mut self, n: f64) -> Result<()> { + let mut buf = [0; 8]; + T::write_f64(&mut buf, n); + self.write_all(&buf) + } +} + +/// All types that implement `Write` get methods defined in `WriteBytesExt` +/// for free. +impl WriteBytesExt for W {} + +/// Convert a slice of T (where T is plain old data) to its mutable binary +/// representation. +/// +/// This function is wildly unsafe because it permits arbitrary modification of +/// the binary representation of any `Copy` type. Use with care. It's intended +/// to be called only where `T` is a numeric type. +unsafe fn slice_to_u8_mut(slice: &mut [T]) -> &mut [u8] { + use std::mem::size_of; + + let len = size_of::() * slice.len(); + slice::from_raw_parts_mut(slice.as_mut_ptr() as *mut u8, len) +} diff --git a/src/rust/vendor/byteorder/src/lib.rs b/src/rust/vendor/byteorder/src/lib.rs new file mode 100644 index 000000000..cfd53c3f9 --- /dev/null +++ b/src/rust/vendor/byteorder/src/lib.rs @@ -0,0 +1,3975 @@ +/*! +This crate provides convenience methods for encoding and decoding numbers in +either [big-endian or little-endian order]. + +The organization of the crate is pretty simple. A trait, [`ByteOrder`], specifies +byte conversion methods for each type of number in Rust (sans numbers that have +a platform dependent size like `usize` and `isize`). Two types, [`BigEndian`] +and [`LittleEndian`] implement these methods. Finally, [`ReadBytesExt`] and +[`WriteBytesExt`] provide convenience methods available to all types that +implement [`Read`] and [`Write`]. + +An alias, [`NetworkEndian`], for [`BigEndian`] is provided to help improve +code clarity. + +An additional alias, [`NativeEndian`], is provided for the endianness of the +local platform. This is convenient when serializing data for use and +conversions are not desired. + +# Examples + +Read unsigned 16 bit big-endian integers from a [`Read`] type: + +```rust +use std::io::Cursor; +use byteorder::{BigEndian, ReadBytesExt}; + +let mut rdr = Cursor::new(vec![2, 5, 3, 0]); +// Note that we use type parameters to indicate which kind of byte order +// we want! +assert_eq!(517, rdr.read_u16::().unwrap()); +assert_eq!(768, rdr.read_u16::().unwrap()); +``` + +Write unsigned 16 bit little-endian integers to a [`Write`] type: + +```rust +use byteorder::{LittleEndian, WriteBytesExt}; + +let mut wtr = vec![]; +wtr.write_u16::(517).unwrap(); +wtr.write_u16::(768).unwrap(); +assert_eq!(wtr, vec![5, 2, 0, 3]); +``` + +# Optional Features + +This crate optionally provides support for 128 bit values (`i128` and `u128`) +when built with the `i128` feature enabled. + +This crate can also be used without the standard library. + +# Alternatives + +Note that as of Rust 1.32, the standard numeric types provide built-in methods +like `to_le_bytes` and `from_le_bytes`, which support some of the same use +cases. + +[big-endian or little-endian order]: https://en.wikipedia.org/wiki/Endianness +[`ByteOrder`]: trait.ByteOrder.html +[`BigEndian`]: enum.BigEndian.html +[`LittleEndian`]: enum.LittleEndian.html +[`ReadBytesExt`]: trait.ReadBytesExt.html +[`WriteBytesExt`]: trait.WriteBytesExt.html +[`NetworkEndian`]: type.NetworkEndian.html +[`NativeEndian`]: type.NativeEndian.html +[`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html +[`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html +*/ + +#![deny(missing_docs)] +#![cfg_attr(not(feature = "std"), no_std)] +// When testing under miri, we disable tests that take too long. But this +// provokes lots of dead code warnings. So we just squash them. +#![cfg_attr(miri, allow(dead_code, unused_macros))] + +use core::{ + convert::TryInto, fmt::Debug, hash::Hash, mem::align_of, + ptr::copy_nonoverlapping, slice, +}; + +#[cfg(feature = "std")] +pub use crate::io::{ReadBytesExt, WriteBytesExt}; + +#[cfg(feature = "std")] +mod io; + +#[inline] +fn extend_sign(val: u64, nbytes: usize) -> i64 { + let shift = (8 - nbytes) * 8; + (val << shift) as i64 >> shift +} + +#[inline] +fn extend_sign128(val: u128, nbytes: usize) -> i128 { + let shift = (16 - nbytes) * 8; + (val << shift) as i128 >> shift +} + +#[inline] +fn unextend_sign(val: i64, nbytes: usize) -> u64 { + let shift = (8 - nbytes) * 8; + (val << shift) as u64 >> shift +} + +#[inline] +fn unextend_sign128(val: i128, nbytes: usize) -> u128 { + let shift = (16 - nbytes) * 8; + (val << shift) as u128 >> shift +} + +#[inline] +fn pack_size(n: u64) -> usize { + if n < 1 << 8 { + 1 + } else if n < 1 << 16 { + 2 + } else if n < 1 << 24 { + 3 + } else if n < 1 << 32 { + 4 + } else if n < 1 << 40 { + 5 + } else if n < 1 << 48 { + 6 + } else if n < 1 << 56 { + 7 + } else { + 8 + } +} + +#[inline] +fn pack_size128(n: u128) -> usize { + if n < 1 << 8 { + 1 + } else if n < 1 << 16 { + 2 + } else if n < 1 << 24 { + 3 + } else if n < 1 << 32 { + 4 + } else if n < 1 << 40 { + 5 + } else if n < 1 << 48 { + 6 + } else if n < 1 << 56 { + 7 + } else if n < 1 << 64 { + 8 + } else if n < 1 << 72 { + 9 + } else if n < 1 << 80 { + 10 + } else if n < 1 << 88 { + 11 + } else if n < 1 << 96 { + 12 + } else if n < 1 << 104 { + 13 + } else if n < 1 << 112 { + 14 + } else if n < 1 << 120 { + 15 + } else { + 16 + } +} + +mod private { + /// Sealed stops crates other than byteorder from implementing any traits + /// that use it. + pub trait Sealed {} + impl Sealed for super::LittleEndian {} + impl Sealed for super::BigEndian {} +} + +/// `ByteOrder` describes types that can serialize integers as bytes. +/// +/// Note that `Self` does not appear anywhere in this trait's definition! +/// Therefore, in order to use it, you'll need to use syntax like +/// `T::read_u16(&[0, 1])` where `T` implements `ByteOrder`. +/// +/// This crate provides two types that implement `ByteOrder`: [`BigEndian`] +/// and [`LittleEndian`]. +/// This trait is sealed and cannot be implemented for callers to avoid +/// breaking backwards compatibility when adding new derived traits. +/// +/// # Examples +/// +/// Write and read `u32` numbers in little endian order: +/// +/// ```rust +/// use byteorder::{ByteOrder, LittleEndian}; +/// +/// let mut buf = [0; 4]; +/// LittleEndian::write_u32(&mut buf, 1_000_000); +/// assert_eq!(1_000_000, LittleEndian::read_u32(&buf)); +/// ``` +/// +/// Write and read `i16` numbers in big endian order: +/// +/// ```rust +/// use byteorder::{ByteOrder, BigEndian}; +/// +/// let mut buf = [0; 2]; +/// BigEndian::write_i16(&mut buf, -5_000); +/// assert_eq!(-5_000, BigEndian::read_i16(&buf)); +/// ``` +/// +/// [`BigEndian`]: enum.BigEndian.html +/// [`LittleEndian`]: enum.LittleEndian.html +pub trait ByteOrder: + Clone + + Copy + + Debug + + Default + + Eq + + Hash + + Ord + + PartialEq + + PartialOrd + + private::Sealed +{ + /// Reads an unsigned 16 bit integer from `buf`. + /// + /// # Panics + /// + /// Panics when `buf.len() < 2`. + fn read_u16(buf: &[u8]) -> u16; + + /// Reads an unsigned 24 bit integer from `buf`, stored in u32. + /// + /// # Panics + /// + /// Panics when `buf.len() < 3`. + /// + /// # Examples + /// + /// Write and read 24 bit `u32` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 3]; + /// LittleEndian::write_u24(&mut buf, 1_000_000); + /// assert_eq!(1_000_000, LittleEndian::read_u24(&buf)); + /// ``` + fn read_u24(buf: &[u8]) -> u32 { + Self::read_uint(buf, 3) as u32 + } + + /// Reads an unsigned 32 bit integer from `buf`. + /// + /// # Panics + /// + /// Panics when `buf.len() < 4`. + /// + /// # Examples + /// + /// Write and read `u32` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 4]; + /// LittleEndian::write_u32(&mut buf, 1_000_000); + /// assert_eq!(1_000_000, LittleEndian::read_u32(&buf)); + /// ``` + fn read_u32(buf: &[u8]) -> u32; + + /// Reads an unsigned 48 bit integer from `buf`, stored in u64. + /// + /// # Panics + /// + /// Panics when `buf.len() < 6`. + /// + /// # Examples + /// + /// Write and read 48 bit `u64` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 6]; + /// LittleEndian::write_u48(&mut buf, 1_000_000_000_000); + /// assert_eq!(1_000_000_000_000, LittleEndian::read_u48(&buf)); + /// ``` + fn read_u48(buf: &[u8]) -> u64 { + Self::read_uint(buf, 6) as u64 + } + + /// Reads an unsigned 64 bit integer from `buf`. + /// + /// # Panics + /// + /// Panics when `buf.len() < 8`. + /// + /// # Examples + /// + /// Write and read `u64` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 8]; + /// LittleEndian::write_u64(&mut buf, 1_000_000); + /// assert_eq!(1_000_000, LittleEndian::read_u64(&buf)); + /// ``` + fn read_u64(buf: &[u8]) -> u64; + + /// Reads an unsigned 128 bit integer from `buf`. + /// + /// # Panics + /// + /// Panics when `buf.len() < 16`. + /// + /// # Examples + /// + /// Write and read `u128` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 16]; + /// LittleEndian::write_u128(&mut buf, 1_000_000); + /// assert_eq!(1_000_000, LittleEndian::read_u128(&buf)); + /// ``` + fn read_u128(buf: &[u8]) -> u128; + + /// Reads an unsigned n-bytes integer from `buf`. + /// + /// # Panics + /// + /// Panics when `nbytes < 1` or `nbytes > 8` or + /// `buf.len() < nbytes` + /// + /// # Examples + /// + /// Write and read an n-byte number in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 3]; + /// LittleEndian::write_uint(&mut buf, 1_000_000, 3); + /// assert_eq!(1_000_000, LittleEndian::read_uint(&buf, 3)); + /// ``` + fn read_uint(buf: &[u8], nbytes: usize) -> u64; + + /// Reads an unsigned n-bytes integer from `buf`. + /// + /// # Panics + /// + /// Panics when `nbytes < 1` or `nbytes > 16` or + /// `buf.len() < nbytes` + /// + /// # Examples + /// + /// Write and read an n-byte number in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 3]; + /// LittleEndian::write_uint128(&mut buf, 1_000_000, 3); + /// assert_eq!(1_000_000, LittleEndian::read_uint128(&buf, 3)); + /// ``` + fn read_uint128(buf: &[u8], nbytes: usize) -> u128; + + /// Writes an unsigned 16 bit integer `n` to `buf`. + /// + /// # Panics + /// + /// Panics when `buf.len() < 2`. + /// + /// # Examples + /// + /// Write and read `u16` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 2]; + /// LittleEndian::write_u16(&mut buf, 1_000); + /// assert_eq!(1_000, LittleEndian::read_u16(&buf)); + /// ``` + fn write_u16(buf: &mut [u8], n: u16); + + /// Writes an unsigned 24 bit integer `n` to `buf`, stored in u32. + /// + /// # Panics + /// + /// Panics when `buf.len() < 3`. + /// + /// # Examples + /// + /// Write and read 24 bit `u32` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 3]; + /// LittleEndian::write_u24(&mut buf, 1_000_000); + /// assert_eq!(1_000_000, LittleEndian::read_u24(&buf)); + /// ``` + fn write_u24(buf: &mut [u8], n: u32) { + Self::write_uint(buf, n as u64, 3) + } + + /// Writes an unsigned 32 bit integer `n` to `buf`. + /// + /// # Panics + /// + /// Panics when `buf.len() < 4`. + /// + /// # Examples + /// + /// Write and read `u32` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 4]; + /// LittleEndian::write_u32(&mut buf, 1_000_000); + /// assert_eq!(1_000_000, LittleEndian::read_u32(&buf)); + /// ``` + fn write_u32(buf: &mut [u8], n: u32); + + /// Writes an unsigned 48 bit integer `n` to `buf`, stored in u64. + /// + /// # Panics + /// + /// Panics when `buf.len() < 6`. + /// + /// # Examples + /// + /// Write and read 48 bit `u64` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 6]; + /// LittleEndian::write_u48(&mut buf, 1_000_000_000_000); + /// assert_eq!(1_000_000_000_000, LittleEndian::read_u48(&buf)); + /// ``` + fn write_u48(buf: &mut [u8], n: u64) { + Self::write_uint(buf, n as u64, 6) + } + + /// Writes an unsigned 64 bit integer `n` to `buf`. + /// + /// # Panics + /// + /// Panics when `buf.len() < 8`. + /// + /// # Examples + /// + /// Write and read `u64` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 8]; + /// LittleEndian::write_u64(&mut buf, 1_000_000); + /// assert_eq!(1_000_000, LittleEndian::read_u64(&buf)); + /// ``` + fn write_u64(buf: &mut [u8], n: u64); + + /// Writes an unsigned 128 bit integer `n` to `buf`. + /// + /// # Panics + /// + /// Panics when `buf.len() < 16`. + /// + /// # Examples + /// + /// Write and read `u128` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 16]; + /// LittleEndian::write_u128(&mut buf, 1_000_000); + /// assert_eq!(1_000_000, LittleEndian::read_u128(&buf)); + /// ``` + fn write_u128(buf: &mut [u8], n: u128); + + /// Writes an unsigned integer `n` to `buf` using only `nbytes`. + /// + /// # Panics + /// + /// If `n` is not representable in `nbytes`, or if `nbytes` is `> 8`, then + /// this method panics. + /// + /// # Examples + /// + /// Write and read an n-byte number in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 3]; + /// LittleEndian::write_uint(&mut buf, 1_000_000, 3); + /// assert_eq!(1_000_000, LittleEndian::read_uint(&buf, 3)); + /// ``` + fn write_uint(buf: &mut [u8], n: u64, nbytes: usize); + + /// Writes an unsigned integer `n` to `buf` using only `nbytes`. + /// + /// # Panics + /// + /// If `n` is not representable in `nbytes`, or if `nbytes` is `> 16`, then + /// this method panics. + /// + /// # Examples + /// + /// Write and read an n-byte number in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 3]; + /// LittleEndian::write_uint128(&mut buf, 1_000_000, 3); + /// assert_eq!(1_000_000, LittleEndian::read_uint128(&buf, 3)); + /// ``` + fn write_uint128(buf: &mut [u8], n: u128, nbytes: usize); + + /// Reads a signed 16 bit integer from `buf`. + /// + /// # Panics + /// + /// Panics when `buf.len() < 2`. + /// + /// # Examples + /// + /// Write and read `i16` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 2]; + /// LittleEndian::write_i16(&mut buf, -1_000); + /// assert_eq!(-1_000, LittleEndian::read_i16(&buf)); + /// ``` + #[inline] + fn read_i16(buf: &[u8]) -> i16 { + Self::read_u16(buf) as i16 + } + + /// Reads a signed 24 bit integer from `buf`, stored in i32. + /// + /// # Panics + /// + /// Panics when `buf.len() < 3`. + /// + /// # Examples + /// + /// Write and read 24 bit `i32` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 3]; + /// LittleEndian::write_i24(&mut buf, -1_000_000); + /// assert_eq!(-1_000_000, LittleEndian::read_i24(&buf)); + /// ``` + #[inline] + fn read_i24(buf: &[u8]) -> i32 { + Self::read_int(buf, 3) as i32 + } + + /// Reads a signed 32 bit integer from `buf`. + /// + /// # Panics + /// + /// Panics when `buf.len() < 4`. + /// + /// # Examples + /// + /// Write and read `i32` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 4]; + /// LittleEndian::write_i32(&mut buf, -1_000_000); + /// assert_eq!(-1_000_000, LittleEndian::read_i32(&buf)); + /// ``` + #[inline] + fn read_i32(buf: &[u8]) -> i32 { + Self::read_u32(buf) as i32 + } + + /// Reads a signed 48 bit integer from `buf`, stored in i64. + /// + /// # Panics + /// + /// Panics when `buf.len() < 6`. + /// + /// # Examples + /// + /// Write and read 48 bit `i64` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 6]; + /// LittleEndian::write_i48(&mut buf, -1_000_000_000_000); + /// assert_eq!(-1_000_000_000_000, LittleEndian::read_i48(&buf)); + /// ``` + #[inline] + fn read_i48(buf: &[u8]) -> i64 { + Self::read_int(buf, 6) as i64 + } + + /// Reads a signed 64 bit integer from `buf`. + /// + /// # Panics + /// + /// Panics when `buf.len() < 8`. + /// + /// # Examples + /// + /// Write and read `i64` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 8]; + /// LittleEndian::write_i64(&mut buf, -1_000_000_000); + /// assert_eq!(-1_000_000_000, LittleEndian::read_i64(&buf)); + /// ``` + #[inline] + fn read_i64(buf: &[u8]) -> i64 { + Self::read_u64(buf) as i64 + } + + /// Reads a signed 128 bit integer from `buf`. + /// + /// # Panics + /// + /// Panics when `buf.len() < 16`. + /// + /// # Examples + /// + /// Write and read `i128` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 16]; + /// LittleEndian::write_i128(&mut buf, -1_000_000_000); + /// assert_eq!(-1_000_000_000, LittleEndian::read_i128(&buf)); + /// ``` + #[inline] + fn read_i128(buf: &[u8]) -> i128 { + Self::read_u128(buf) as i128 + } + + /// Reads a signed n-bytes integer from `buf`. + /// + /// # Panics + /// + /// Panics when `nbytes < 1` or `nbytes > 8` or + /// `buf.len() < nbytes` + /// + /// # Examples + /// + /// Write and read n-length signed numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 3]; + /// LittleEndian::write_int(&mut buf, -1_000, 3); + /// assert_eq!(-1_000, LittleEndian::read_int(&buf, 3)); + /// ``` + #[inline] + fn read_int(buf: &[u8], nbytes: usize) -> i64 { + extend_sign(Self::read_uint(buf, nbytes), nbytes) + } + + /// Reads a signed n-bytes integer from `buf`. + /// + /// # Panics + /// + /// Panics when `nbytes < 1` or `nbytes > 16` or + /// `buf.len() < nbytes` + /// + /// # Examples + /// + /// Write and read n-length signed numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 3]; + /// LittleEndian::write_int128(&mut buf, -1_000, 3); + /// assert_eq!(-1_000, LittleEndian::read_int128(&buf, 3)); + /// ``` + #[inline] + fn read_int128(buf: &[u8], nbytes: usize) -> i128 { + extend_sign128(Self::read_uint128(buf, nbytes), nbytes) + } + + /// Reads a IEEE754 single-precision (4 bytes) floating point number. + /// + /// # Panics + /// + /// Panics when `buf.len() < 4`. + /// + /// # Examples + /// + /// Write and read `f32` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let e = 2.71828; + /// let mut buf = [0; 4]; + /// LittleEndian::write_f32(&mut buf, e); + /// assert_eq!(e, LittleEndian::read_f32(&buf)); + /// ``` + #[inline] + fn read_f32(buf: &[u8]) -> f32 { + f32::from_bits(Self::read_u32(buf)) + } + + /// Reads a IEEE754 double-precision (8 bytes) floating point number. + /// + /// # Panics + /// + /// Panics when `buf.len() < 8`. + /// + /// # Examples + /// + /// Write and read `f64` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let phi = 1.6180339887; + /// let mut buf = [0; 8]; + /// LittleEndian::write_f64(&mut buf, phi); + /// assert_eq!(phi, LittleEndian::read_f64(&buf)); + /// ``` + #[inline] + fn read_f64(buf: &[u8]) -> f64 { + f64::from_bits(Self::read_u64(buf)) + } + + /// Writes a signed 16 bit integer `n` to `buf`. + /// + /// # Panics + /// + /// Panics when `buf.len() < 2`. + /// + /// # Examples + /// + /// Write and read `i16` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 2]; + /// LittleEndian::write_i16(&mut buf, -1_000); + /// assert_eq!(-1_000, LittleEndian::read_i16(&buf)); + /// ``` + #[inline] + fn write_i16(buf: &mut [u8], n: i16) { + Self::write_u16(buf, n as u16) + } + + /// Writes a signed 24 bit integer `n` to `buf`, stored in i32. + /// + /// # Panics + /// + /// Panics when `buf.len() < 3`. + /// + /// # Examples + /// + /// Write and read 24 bit `i32` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 3]; + /// LittleEndian::write_i24(&mut buf, -1_000_000); + /// assert_eq!(-1_000_000, LittleEndian::read_i24(&buf)); + /// ``` + #[inline] + fn write_i24(buf: &mut [u8], n: i32) { + Self::write_int(buf, n as i64, 3) + } + + /// Writes a signed 32 bit integer `n` to `buf`. + /// + /// # Panics + /// + /// Panics when `buf.len() < 4`. + /// + /// # Examples + /// + /// Write and read `i32` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 4]; + /// LittleEndian::write_i32(&mut buf, -1_000_000); + /// assert_eq!(-1_000_000, LittleEndian::read_i32(&buf)); + /// ``` + #[inline] + fn write_i32(buf: &mut [u8], n: i32) { + Self::write_u32(buf, n as u32) + } + + /// Writes a signed 48 bit integer `n` to `buf`, stored in i64. + /// + /// # Panics + /// + /// Panics when `buf.len() < 6`. + /// + /// # Examples + /// + /// Write and read 48 bit `i64` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 6]; + /// LittleEndian::write_i48(&mut buf, -1_000_000_000_000); + /// assert_eq!(-1_000_000_000_000, LittleEndian::read_i48(&buf)); + /// ``` + #[inline] + fn write_i48(buf: &mut [u8], n: i64) { + Self::write_int(buf, n as i64, 6) + } + + /// Writes a signed 64 bit integer `n` to `buf`. + /// + /// # Panics + /// + /// Panics when `buf.len() < 8`. + /// + /// # Examples + /// + /// Write and read `i64` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 8]; + /// LittleEndian::write_i64(&mut buf, -1_000_000_000); + /// assert_eq!(-1_000_000_000, LittleEndian::read_i64(&buf)); + /// ``` + #[inline] + fn write_i64(buf: &mut [u8], n: i64) { + Self::write_u64(buf, n as u64) + } + + /// Writes a signed 128 bit integer `n` to `buf`. + /// + /// # Panics + /// + /// Panics when `buf.len() < 16`. + /// + /// # Examples + /// + /// Write and read n-byte `i128` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 16]; + /// LittleEndian::write_i128(&mut buf, -1_000_000_000); + /// assert_eq!(-1_000_000_000, LittleEndian::read_i128(&buf)); + /// ``` + #[inline] + fn write_i128(buf: &mut [u8], n: i128) { + Self::write_u128(buf, n as u128) + } + + /// Writes a signed integer `n` to `buf` using only `nbytes`. + /// + /// # Panics + /// + /// If `n` is not representable in `nbytes`, or if `nbytes` is `> 8`, then + /// this method panics. + /// + /// # Examples + /// + /// Write and read an n-byte number in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 3]; + /// LittleEndian::write_int(&mut buf, -1_000, 3); + /// assert_eq!(-1_000, LittleEndian::read_int(&buf, 3)); + /// ``` + #[inline] + fn write_int(buf: &mut [u8], n: i64, nbytes: usize) { + Self::write_uint(buf, unextend_sign(n, nbytes), nbytes) + } + + /// Writes a signed integer `n` to `buf` using only `nbytes`. + /// + /// # Panics + /// + /// If `n` is not representable in `nbytes`, or if `nbytes` is `> 16`, then + /// this method panics. + /// + /// # Examples + /// + /// Write and read n-length signed numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut buf = [0; 3]; + /// LittleEndian::write_int128(&mut buf, -1_000, 3); + /// assert_eq!(-1_000, LittleEndian::read_int128(&buf, 3)); + /// ``` + #[inline] + fn write_int128(buf: &mut [u8], n: i128, nbytes: usize) { + Self::write_uint128(buf, unextend_sign128(n, nbytes), nbytes) + } + + /// Writes a IEEE754 single-precision (4 bytes) floating point number. + /// + /// # Panics + /// + /// Panics when `buf.len() < 4`. + /// + /// # Examples + /// + /// Write and read `f32` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let e = 2.71828; + /// let mut buf = [0; 4]; + /// LittleEndian::write_f32(&mut buf, e); + /// assert_eq!(e, LittleEndian::read_f32(&buf)); + /// ``` + #[inline] + fn write_f32(buf: &mut [u8], n: f32) { + Self::write_u32(buf, n.to_bits()) + } + + /// Writes a IEEE754 double-precision (8 bytes) floating point number. + /// + /// # Panics + /// + /// Panics when `buf.len() < 8`. + /// + /// # Examples + /// + /// Write and read `f64` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let phi = 1.6180339887; + /// let mut buf = [0; 8]; + /// LittleEndian::write_f64(&mut buf, phi); + /// assert_eq!(phi, LittleEndian::read_f64(&buf)); + /// ``` + #[inline] + fn write_f64(buf: &mut [u8], n: f64) { + Self::write_u64(buf, n.to_bits()) + } + + /// Reads unsigned 16 bit integers from `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `src.len() != 2*dst.len()`. + /// + /// # Examples + /// + /// Write and read `u16` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 8]; + /// let numbers_given = [1, 2, 0xf00f, 0xffee]; + /// LittleEndian::write_u16_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0; 4]; + /// LittleEndian::read_u16_into(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + fn read_u16_into(src: &[u8], dst: &mut [u16]); + + /// Reads unsigned 32 bit integers from `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `src.len() != 4*dst.len()`. + /// + /// # Examples + /// + /// Write and read `u32` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 16]; + /// let numbers_given = [1, 2, 0xf00f, 0xffee]; + /// LittleEndian::write_u32_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0; 4]; + /// LittleEndian::read_u32_into(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + fn read_u32_into(src: &[u8], dst: &mut [u32]); + + /// Reads unsigned 64 bit integers from `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `src.len() != 8*dst.len()`. + /// + /// # Examples + /// + /// Write and read `u64` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 32]; + /// let numbers_given = [1, 2, 0xf00f, 0xffee]; + /// LittleEndian::write_u64_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0; 4]; + /// LittleEndian::read_u64_into(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + fn read_u64_into(src: &[u8], dst: &mut [u64]); + + /// Reads unsigned 128 bit integers from `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `src.len() != 16*dst.len()`. + /// + /// # Examples + /// + /// Write and read `u128` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 64]; + /// let numbers_given = [1, 2, 0xf00f, 0xffee]; + /// LittleEndian::write_u128_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0; 4]; + /// LittleEndian::read_u128_into(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + fn read_u128_into(src: &[u8], dst: &mut [u128]); + + /// Reads signed 16 bit integers from `src` to `dst`. + /// + /// # Panics + /// + /// Panics when `buf.len() != 2*dst.len()`. + /// + /// # Examples + /// + /// Write and read `i16` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 8]; + /// let numbers_given = [1, 2, 0x0f, 0xee]; + /// LittleEndian::write_i16_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0; 4]; + /// LittleEndian::read_i16_into(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + #[inline] + fn read_i16_into(src: &[u8], dst: &mut [i16]) { + let dst = unsafe { + slice::from_raw_parts_mut(dst.as_mut_ptr() as *mut u16, dst.len()) + }; + Self::read_u16_into(src, dst) + } + + /// Reads signed 32 bit integers from `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `src.len() != 4*dst.len()`. + /// + /// # Examples + /// + /// Write and read `i32` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 16]; + /// let numbers_given = [1, 2, 0xf00f, 0xffee]; + /// LittleEndian::write_i32_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0; 4]; + /// LittleEndian::read_i32_into(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + #[inline] + fn read_i32_into(src: &[u8], dst: &mut [i32]) { + let dst = unsafe { + slice::from_raw_parts_mut(dst.as_mut_ptr() as *mut u32, dst.len()) + }; + Self::read_u32_into(src, dst); + } + + /// Reads signed 64 bit integers from `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `src.len() != 8*dst.len()`. + /// + /// # Examples + /// + /// Write and read `i64` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 32]; + /// let numbers_given = [1, 2, 0xf00f, 0xffee]; + /// LittleEndian::write_i64_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0; 4]; + /// LittleEndian::read_i64_into(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + #[inline] + fn read_i64_into(src: &[u8], dst: &mut [i64]) { + let dst = unsafe { + slice::from_raw_parts_mut(dst.as_mut_ptr() as *mut u64, dst.len()) + }; + Self::read_u64_into(src, dst); + } + + /// Reads signed 128 bit integers from `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `src.len() != 16*dst.len()`. + /// + /// # Examples + /// + /// Write and read `i128` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 64]; + /// let numbers_given = [1, 2, 0xf00f, 0xffee]; + /// LittleEndian::write_i128_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0; 4]; + /// LittleEndian::read_i128_into(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + #[inline] + fn read_i128_into(src: &[u8], dst: &mut [i128]) { + let dst = unsafe { + slice::from_raw_parts_mut(dst.as_mut_ptr() as *mut u128, dst.len()) + }; + Self::read_u128_into(src, dst); + } + + /// Reads IEEE754 single-precision (4 bytes) floating point numbers from + /// `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `src.len() != 4*dst.len()`. + /// + /// # Examples + /// + /// Write and read `f32` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 16]; + /// let numbers_given = [1.0, 2.0, 31.312e31, -11.32e19]; + /// LittleEndian::write_f32_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0.0; 4]; + /// LittleEndian::read_f32_into(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + #[inline] + fn read_f32_into(src: &[u8], dst: &mut [f32]) { + let dst = unsafe { + const _: () = assert!(align_of::() <= align_of::()); + slice::from_raw_parts_mut(dst.as_mut_ptr() as *mut u32, dst.len()) + }; + Self::read_u32_into(src, dst); + } + + /// **DEPRECATED**. + /// + /// This method is deprecated. Use `read_f32_into` instead. + /// Reads IEEE754 single-precision (4 bytes) floating point numbers from + /// `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `src.len() != 4*dst.len()`. + /// + /// # Examples + /// + /// Write and read `f32` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 16]; + /// let numbers_given = [1.0, 2.0, 31.312e31, -11.32e19]; + /// LittleEndian::write_f32_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0.0; 4]; + /// LittleEndian::read_f32_into_unchecked(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + #[inline] + #[deprecated(since = "1.3.0", note = "please use `read_f32_into` instead")] + fn read_f32_into_unchecked(src: &[u8], dst: &mut [f32]) { + Self::read_f32_into(src, dst); + } + + /// Reads IEEE754 single-precision (4 bytes) floating point numbers from + /// `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `src.len() != 8*dst.len()`. + /// + /// # Examples + /// + /// Write and read `f64` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 32]; + /// let numbers_given = [1.0, 2.0, 31.312e211, -11.32e91]; + /// LittleEndian::write_f64_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0.0; 4]; + /// LittleEndian::read_f64_into(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + #[inline] + fn read_f64_into(src: &[u8], dst: &mut [f64]) { + let dst = unsafe { + const _: () = assert!(align_of::() <= align_of::()); + slice::from_raw_parts_mut(dst.as_mut_ptr() as *mut u64, dst.len()) + }; + Self::read_u64_into(src, dst); + } + + /// **DEPRECATED**. + /// + /// This method is deprecated. Use `read_f64_into` instead. + /// + /// Reads IEEE754 single-precision (4 bytes) floating point numbers from + /// `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `src.len() != 8*dst.len()`. + /// + /// # Examples + /// + /// Write and read `f64` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 32]; + /// let numbers_given = [1.0, 2.0, 31.312e211, -11.32e91]; + /// LittleEndian::write_f64_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0.0; 4]; + /// LittleEndian::read_f64_into_unchecked(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + #[inline] + #[deprecated(since = "1.3.0", note = "please use `read_f64_into` instead")] + fn read_f64_into_unchecked(src: &[u8], dst: &mut [f64]) { + Self::read_f64_into(src, dst); + } + + /// Writes unsigned 16 bit integers from `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `dst.len() != 2*src.len()`. + /// + /// # Examples + /// + /// Write and read `u16` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 8]; + /// let numbers_given = [1, 2, 0xf00f, 0xffee]; + /// LittleEndian::write_u16_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0; 4]; + /// LittleEndian::read_u16_into(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + fn write_u16_into(src: &[u16], dst: &mut [u8]); + + /// Writes unsigned 32 bit integers from `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `dst.len() != 4*src.len()`. + /// + /// # Examples + /// + /// Write and read `u32` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 16]; + /// let numbers_given = [1, 2, 0xf00f, 0xffee]; + /// LittleEndian::write_u32_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0; 4]; + /// LittleEndian::read_u32_into(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + fn write_u32_into(src: &[u32], dst: &mut [u8]); + + /// Writes unsigned 64 bit integers from `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `dst.len() != 8*src.len()`. + /// + /// # Examples + /// + /// Write and read `u64` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 32]; + /// let numbers_given = [1, 2, 0xf00f, 0xffee]; + /// LittleEndian::write_u64_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0; 4]; + /// LittleEndian::read_u64_into(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + fn write_u64_into(src: &[u64], dst: &mut [u8]); + + /// Writes unsigned 128 bit integers from `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `dst.len() != 16*src.len()`. + /// + /// # Examples + /// + /// Write and read `u128` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 64]; + /// let numbers_given = [1, 2, 0xf00f, 0xffee]; + /// LittleEndian::write_u128_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0; 4]; + /// LittleEndian::read_u128_into(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + fn write_u128_into(src: &[u128], dst: &mut [u8]); + + /// Writes signed 8 bit integers from `src` into `dst`. + /// + /// Note that since each `i8` is a single byte, no byte order conversions + /// are used. This method is included because it provides a safe, simple + /// way for the caller to write from a `&[i8]` buffer. (Without this + /// method, the caller would have to either use `unsafe` code or convert + /// each byte to `u8` individually.) + /// + /// # Panics + /// + /// Panics when `buf.len() != src.len()`. + /// + /// # Examples + /// + /// Write and read `i8` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian, ReadBytesExt}; + /// + /// let mut bytes = [0; 4]; + /// let numbers_given = [1, 2, 0xf, 0xe]; + /// LittleEndian::write_i8_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0; 4]; + /// bytes.as_ref().read_i8_into(&mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + fn write_i8_into(src: &[i8], dst: &mut [u8]) { + let src = unsafe { + slice::from_raw_parts(src.as_ptr() as *const u8, src.len()) + }; + dst.copy_from_slice(src); + } + + /// Writes signed 16 bit integers from `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `buf.len() != 2*src.len()`. + /// + /// # Examples + /// + /// Write and read `i16` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 8]; + /// let numbers_given = [1, 2, 0x0f, 0xee]; + /// LittleEndian::write_i16_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0; 4]; + /// LittleEndian::read_i16_into(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + fn write_i16_into(src: &[i16], dst: &mut [u8]) { + let src = unsafe { + slice::from_raw_parts(src.as_ptr() as *const u16, src.len()) + }; + Self::write_u16_into(src, dst); + } + + /// Writes signed 32 bit integers from `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `dst.len() != 4*src.len()`. + /// + /// # Examples + /// + /// Write and read `i32` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 16]; + /// let numbers_given = [1, 2, 0xf00f, 0xffee]; + /// LittleEndian::write_i32_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0; 4]; + /// LittleEndian::read_i32_into(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + fn write_i32_into(src: &[i32], dst: &mut [u8]) { + let src = unsafe { + slice::from_raw_parts(src.as_ptr() as *const u32, src.len()) + }; + Self::write_u32_into(src, dst); + } + + /// Writes signed 64 bit integers from `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `dst.len() != 8*src.len()`. + /// + /// # Examples + /// + /// Write and read `i64` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 32]; + /// let numbers_given = [1, 2, 0xf00f, 0xffee]; + /// LittleEndian::write_i64_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0; 4]; + /// LittleEndian::read_i64_into(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + fn write_i64_into(src: &[i64], dst: &mut [u8]) { + let src = unsafe { + slice::from_raw_parts(src.as_ptr() as *const u64, src.len()) + }; + Self::write_u64_into(src, dst); + } + + /// Writes signed 128 bit integers from `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `dst.len() != 16*src.len()`. + /// + /// # Examples + /// + /// Write and read `i128` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 64]; + /// let numbers_given = [1, 2, 0xf00f, 0xffee]; + /// LittleEndian::write_i128_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0; 4]; + /// LittleEndian::read_i128_into(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + fn write_i128_into(src: &[i128], dst: &mut [u8]) { + let src = unsafe { + slice::from_raw_parts(src.as_ptr() as *const u128, src.len()) + }; + Self::write_u128_into(src, dst); + } + + /// Writes IEEE754 single-precision (4 bytes) floating point numbers from + /// `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `src.len() != 4*dst.len()`. + /// + /// # Examples + /// + /// Write and read `f32` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 16]; + /// let numbers_given = [1.0, 2.0, 31.312e31, -11.32e19]; + /// LittleEndian::write_f32_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0.0; 4]; + /// LittleEndian::read_f32_into(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + fn write_f32_into(src: &[f32], dst: &mut [u8]) { + let src = unsafe { + slice::from_raw_parts(src.as_ptr() as *const u32, src.len()) + }; + Self::write_u32_into(src, dst); + } + + /// Writes IEEE754 double-precision (8 bytes) floating point numbers from + /// `src` into `dst`. + /// + /// # Panics + /// + /// Panics when `src.len() != 8*dst.len()`. + /// + /// # Examples + /// + /// Write and read `f64` numbers in little endian order: + /// + /// ```rust + /// use byteorder::{ByteOrder, LittleEndian}; + /// + /// let mut bytes = [0; 32]; + /// let numbers_given = [1.0, 2.0, 31.312e211, -11.32e91]; + /// LittleEndian::write_f64_into(&numbers_given, &mut bytes); + /// + /// let mut numbers_got = [0.0; 4]; + /// LittleEndian::read_f64_into(&bytes, &mut numbers_got); + /// assert_eq!(numbers_given, numbers_got); + /// ``` + fn write_f64_into(src: &[f64], dst: &mut [u8]) { + let src = unsafe { + slice::from_raw_parts(src.as_ptr() as *const u64, src.len()) + }; + Self::write_u64_into(src, dst); + } + + /// Converts the given slice of unsigned 16 bit integers to a particular + /// endianness. + /// + /// If the endianness matches the endianness of the host platform, then + /// this is a no-op. + /// + /// # Examples + /// + /// Convert the host platform's endianness to big-endian: + /// + /// ```rust + /// use byteorder::{ByteOrder, BigEndian}; + /// + /// let mut numbers = [5, 65000]; + /// BigEndian::from_slice_u16(&mut numbers); + /// assert_eq!(numbers, [5u16.to_be(), 65000u16.to_be()]); + /// ``` + fn from_slice_u16(numbers: &mut [u16]); + + /// Converts the given slice of unsigned 32 bit integers to a particular + /// endianness. + /// + /// If the endianness matches the endianness of the host platform, then + /// this is a no-op. + /// + /// # Examples + /// + /// Convert the host platform's endianness to big-endian: + /// + /// ```rust + /// use byteorder::{ByteOrder, BigEndian}; + /// + /// let mut numbers = [5, 65000]; + /// BigEndian::from_slice_u32(&mut numbers); + /// assert_eq!(numbers, [5u32.to_be(), 65000u32.to_be()]); + /// ``` + fn from_slice_u32(numbers: &mut [u32]); + + /// Converts the given slice of unsigned 64 bit integers to a particular + /// endianness. + /// + /// If the endianness matches the endianness of the host platform, then + /// this is a no-op. + /// + /// # Examples + /// + /// Convert the host platform's endianness to big-endian: + /// + /// ```rust + /// use byteorder::{ByteOrder, BigEndian}; + /// + /// let mut numbers = [5, 65000]; + /// BigEndian::from_slice_u64(&mut numbers); + /// assert_eq!(numbers, [5u64.to_be(), 65000u64.to_be()]); + /// ``` + fn from_slice_u64(numbers: &mut [u64]); + + /// Converts the given slice of unsigned 128 bit integers to a particular + /// endianness. + /// + /// If the endianness matches the endianness of the host platform, then + /// this is a no-op. + /// + /// # Examples + /// + /// Convert the host platform's endianness to big-endian: + /// + /// ```rust + /// use byteorder::{ByteOrder, BigEndian}; + /// + /// let mut numbers = [5, 65000]; + /// BigEndian::from_slice_u128(&mut numbers); + /// assert_eq!(numbers, [5u128.to_be(), 65000u128.to_be()]); + /// ``` + fn from_slice_u128(numbers: &mut [u128]); + + /// Converts the given slice of signed 16 bit integers to a particular + /// endianness. + /// + /// If the endianness matches the endianness of the host platform, then + /// this is a no-op. + /// + /// # Examples + /// + /// Convert the host platform's endianness to big-endian: + /// + /// ```rust + /// use byteorder::{ByteOrder, BigEndian}; + /// + /// let mut numbers = [5, 6500]; + /// BigEndian::from_slice_i16(&mut numbers); + /// assert_eq!(numbers, [5i16.to_be(), 6500i16.to_be()]); + /// ``` + #[inline] + fn from_slice_i16(src: &mut [i16]) { + let src = unsafe { + slice::from_raw_parts_mut(src.as_mut_ptr() as *mut u16, src.len()) + }; + Self::from_slice_u16(src); + } + + /// Converts the given slice of signed 32 bit integers to a particular + /// endianness. + /// + /// If the endianness matches the endianness of the host platform, then + /// this is a no-op. + /// + /// # Examples + /// + /// Convert the host platform's endianness to big-endian: + /// + /// ```rust + /// use byteorder::{ByteOrder, BigEndian}; + /// + /// let mut numbers = [5, 65000]; + /// BigEndian::from_slice_i32(&mut numbers); + /// assert_eq!(numbers, [5i32.to_be(), 65000i32.to_be()]); + /// ``` + #[inline] + fn from_slice_i32(src: &mut [i32]) { + let src = unsafe { + slice::from_raw_parts_mut(src.as_mut_ptr() as *mut u32, src.len()) + }; + Self::from_slice_u32(src); + } + + /// Converts the given slice of signed 64 bit integers to a particular + /// endianness. + /// + /// If the endianness matches the endianness of the host platform, then + /// this is a no-op. + /// + /// # Examples + /// + /// Convert the host platform's endianness to big-endian: + /// + /// ```rust + /// use byteorder::{ByteOrder, BigEndian}; + /// + /// let mut numbers = [5, 65000]; + /// BigEndian::from_slice_i64(&mut numbers); + /// assert_eq!(numbers, [5i64.to_be(), 65000i64.to_be()]); + /// ``` + #[inline] + fn from_slice_i64(src: &mut [i64]) { + let src = unsafe { + slice::from_raw_parts_mut(src.as_mut_ptr() as *mut u64, src.len()) + }; + Self::from_slice_u64(src); + } + + /// Converts the given slice of signed 128 bit integers to a particular + /// endianness. + /// + /// If the endianness matches the endianness of the host platform, then + /// this is a no-op. + /// + /// # Examples + /// + /// Convert the host platform's endianness to big-endian: + /// + /// ```rust + /// use byteorder::{ByteOrder, BigEndian}; + /// + /// let mut numbers = [5, 65000]; + /// BigEndian::from_slice_i128(&mut numbers); + /// assert_eq!(numbers, [5i128.to_be(), 65000i128.to_be()]); + /// ``` + #[inline] + fn from_slice_i128(src: &mut [i128]) { + let src = unsafe { + slice::from_raw_parts_mut(src.as_mut_ptr() as *mut u128, src.len()) + }; + Self::from_slice_u128(src); + } + + /// Converts the given slice of IEEE754 single-precision (4 bytes) floating + /// point numbers to a particular endianness. + /// + /// If the endianness matches the endianness of the host platform, then + /// this is a no-op. + fn from_slice_f32(numbers: &mut [f32]); + + /// Converts the given slice of IEEE754 double-precision (8 bytes) floating + /// point numbers to a particular endianness. + /// + /// If the endianness matches the endianness of the host platform, then + /// this is a no-op. + fn from_slice_f64(numbers: &mut [f64]); +} + +/// Defines big-endian serialization. +/// +/// Note that this type has no value constructor. It is used purely at the +/// type level. +/// +/// # Examples +/// +/// Write and read `u32` numbers in big endian order: +/// +/// ```rust +/// use byteorder::{ByteOrder, BigEndian}; +/// +/// let mut buf = [0; 4]; +/// BigEndian::write_u32(&mut buf, 1_000_000); +/// assert_eq!(1_000_000, BigEndian::read_u32(&buf)); +/// ``` +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub enum BigEndian {} + +impl Default for BigEndian { + fn default() -> BigEndian { + panic!("BigEndian default") + } +} + +/// A type alias for [`BigEndian`]. +/// +/// [`BigEndian`]: enum.BigEndian.html +pub type BE = BigEndian; + +/// Defines little-endian serialization. +/// +/// Note that this type has no value constructor. It is used purely at the +/// type level. +/// +/// # Examples +/// +/// Write and read `u32` numbers in little endian order: +/// +/// ```rust +/// use byteorder::{ByteOrder, LittleEndian}; +/// +/// let mut buf = [0; 4]; +/// LittleEndian::write_u32(&mut buf, 1_000_000); +/// assert_eq!(1_000_000, LittleEndian::read_u32(&buf)); +/// ``` +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub enum LittleEndian {} + +impl Default for LittleEndian { + fn default() -> LittleEndian { + panic!("LittleEndian default") + } +} + +/// A type alias for [`LittleEndian`]. +/// +/// [`LittleEndian`]: enum.LittleEndian.html +pub type LE = LittleEndian; + +/// Defines network byte order serialization. +/// +/// Network byte order is defined by [RFC 1700][1] to be big-endian, and is +/// referred to in several protocol specifications. This type is an alias of +/// [`BigEndian`]. +/// +/// [1]: https://tools.ietf.org/html/rfc1700 +/// +/// Note that this type has no value constructor. It is used purely at the +/// type level. +/// +/// # Examples +/// +/// Write and read `i16` numbers in big endian order: +/// +/// ```rust +/// use byteorder::{ByteOrder, NetworkEndian, BigEndian}; +/// +/// let mut buf = [0; 2]; +/// BigEndian::write_i16(&mut buf, -5_000); +/// assert_eq!(-5_000, NetworkEndian::read_i16(&buf)); +/// ``` +/// +/// [`BigEndian`]: enum.BigEndian.html +pub type NetworkEndian = BigEndian; + +/// Defines system native-endian serialization. +/// +/// Note that this type has no value constructor. It is used purely at the +/// type level. +/// +/// On this platform, this is an alias for [`LittleEndian`]. +/// +/// [`LittleEndian`]: enum.LittleEndian.html +#[cfg(target_endian = "little")] +pub type NativeEndian = LittleEndian; + +/// Defines system native-endian serialization. +/// +/// Note that this type has no value constructor. It is used purely at the +/// type level. +/// +/// On this platform, this is an alias for [`BigEndian`]. +/// +/// [`BigEndian`]: enum.BigEndian.html +#[cfg(target_endian = "big")] +pub type NativeEndian = BigEndian; + +/// Copies a &[u8] $src into a &mut [$ty] $dst for the endianness given by +/// $from_bytes (must be either from_be_bytes or from_le_bytes). +/// +/// Panics if $src.len() != $dst.len() * size_of::<$ty>(). +macro_rules! read_slice { + ($src:expr, $dst:expr, $ty:ty, $from_bytes:ident) => {{ + const SIZE: usize = core::mem::size_of::<$ty>(); + // Check types: + let src: &[u8] = $src; + let dst: &mut [$ty] = $dst; + assert_eq!(src.len(), dst.len() * SIZE); + for (src, dst) in src.chunks_exact(SIZE).zip(dst.iter_mut()) { + *dst = <$ty>::$from_bytes(src.try_into().unwrap()); + } + }}; +} + +/// Copies a &[$ty] $src into a &mut [u8] $dst for the endianness given by +/// $from_bytes (must be either from_be_bytes or from_le_bytes). +/// +/// Panics if $src.len() * size_of::<$ty>() != $dst.len(). +macro_rules! write_slice { + ($src:expr, $dst:expr, $ty:ty, $to_bytes:ident) => {{ + const SIZE: usize = core::mem::size_of::<$ty>(); + // Check types: + let src: &[$ty] = $src; + let dst: &mut [u8] = $dst; + assert_eq!(src.len() * SIZE, dst.len()); + for (src, dst) in src.iter().zip(dst.chunks_exact_mut(SIZE)) { + dst.copy_from_slice(&src.$to_bytes()); + } + }}; +} + +impl ByteOrder for BigEndian { + #[inline] + fn read_u16(buf: &[u8]) -> u16 { + u16::from_be_bytes(buf[..2].try_into().unwrap()) + } + + #[inline] + fn read_u32(buf: &[u8]) -> u32 { + u32::from_be_bytes(buf[..4].try_into().unwrap()) + } + + #[inline] + fn read_u64(buf: &[u8]) -> u64 { + u64::from_be_bytes(buf[..8].try_into().unwrap()) + } + + #[inline] + fn read_u128(buf: &[u8]) -> u128 { + u128::from_be_bytes(buf[..16].try_into().unwrap()) + } + + #[inline] + fn read_uint(buf: &[u8], nbytes: usize) -> u64 { + let mut out = [0; 8]; + assert!(1 <= nbytes && nbytes <= out.len() && nbytes <= buf.len()); + let start = out.len() - nbytes; + out[start..].copy_from_slice(&buf[..nbytes]); + u64::from_be_bytes(out) + } + + #[inline] + fn read_uint128(buf: &[u8], nbytes: usize) -> u128 { + let mut out = [0; 16]; + assert!(1 <= nbytes && nbytes <= out.len() && nbytes <= buf.len()); + let start = out.len() - nbytes; + out[start..].copy_from_slice(&buf[..nbytes]); + u128::from_be_bytes(out) + } + + #[inline] + fn write_u16(buf: &mut [u8], n: u16) { + buf[..2].copy_from_slice(&n.to_be_bytes()); + } + + #[inline] + fn write_u32(buf: &mut [u8], n: u32) { + buf[..4].copy_from_slice(&n.to_be_bytes()); + } + + #[inline] + fn write_u64(buf: &mut [u8], n: u64) { + buf[..8].copy_from_slice(&n.to_be_bytes()); + } + + #[inline] + fn write_u128(buf: &mut [u8], n: u128) { + buf[..16].copy_from_slice(&n.to_be_bytes()); + } + + #[inline] + fn write_uint(buf: &mut [u8], n: u64, nbytes: usize) { + assert!(pack_size(n) <= nbytes && nbytes <= 8); + assert!(nbytes <= buf.len()); + unsafe { + let bytes = *(&n.to_be() as *const u64 as *const [u8; 8]); + copy_nonoverlapping( + bytes.as_ptr().offset((8 - nbytes) as isize), + buf.as_mut_ptr(), + nbytes, + ); + } + } + + #[inline] + fn write_uint128(buf: &mut [u8], n: u128, nbytes: usize) { + assert!(pack_size128(n) <= nbytes && nbytes <= 16); + assert!(nbytes <= buf.len()); + unsafe { + let bytes = *(&n.to_be() as *const u128 as *const [u8; 16]); + copy_nonoverlapping( + bytes.as_ptr().offset((16 - nbytes) as isize), + buf.as_mut_ptr(), + nbytes, + ); + } + } + + #[inline] + fn read_u16_into(src: &[u8], dst: &mut [u16]) { + read_slice!(src, dst, u16, from_be_bytes); + } + + #[inline] + fn read_u32_into(src: &[u8], dst: &mut [u32]) { + read_slice!(src, dst, u32, from_be_bytes); + } + + #[inline] + fn read_u64_into(src: &[u8], dst: &mut [u64]) { + read_slice!(src, dst, u64, from_be_bytes); + } + + #[inline] + fn read_u128_into(src: &[u8], dst: &mut [u128]) { + read_slice!(src, dst, u128, from_be_bytes); + } + + #[inline] + fn write_u16_into(src: &[u16], dst: &mut [u8]) { + write_slice!(src, dst, u16, to_be_bytes); + } + + #[inline] + fn write_u32_into(src: &[u32], dst: &mut [u8]) { + write_slice!(src, dst, u32, to_be_bytes); + } + + #[inline] + fn write_u64_into(src: &[u64], dst: &mut [u8]) { + write_slice!(src, dst, u64, to_be_bytes); + } + + #[inline] + fn write_u128_into(src: &[u128], dst: &mut [u8]) { + write_slice!(src, dst, u128, to_be_bytes); + } + + #[inline] + fn from_slice_u16(numbers: &mut [u16]) { + if cfg!(target_endian = "little") { + for n in numbers { + *n = n.to_be(); + } + } + } + + #[inline] + fn from_slice_u32(numbers: &mut [u32]) { + if cfg!(target_endian = "little") { + for n in numbers { + *n = n.to_be(); + } + } + } + + #[inline] + fn from_slice_u64(numbers: &mut [u64]) { + if cfg!(target_endian = "little") { + for n in numbers { + *n = n.to_be(); + } + } + } + + #[inline] + fn from_slice_u128(numbers: &mut [u128]) { + if cfg!(target_endian = "little") { + for n in numbers { + *n = n.to_be(); + } + } + } + + #[inline] + fn from_slice_f32(numbers: &mut [f32]) { + if cfg!(target_endian = "little") { + for n in numbers { + unsafe { + let int = *(n as *const f32 as *const u32); + *n = *(&int.to_be() as *const u32 as *const f32); + } + } + } + } + + #[inline] + fn from_slice_f64(numbers: &mut [f64]) { + if cfg!(target_endian = "little") { + for n in numbers { + unsafe { + let int = *(n as *const f64 as *const u64); + *n = *(&int.to_be() as *const u64 as *const f64); + } + } + } + } +} + +impl ByteOrder for LittleEndian { + #[inline] + fn read_u16(buf: &[u8]) -> u16 { + u16::from_le_bytes(buf[..2].try_into().unwrap()) + } + + #[inline] + fn read_u32(buf: &[u8]) -> u32 { + u32::from_le_bytes(buf[..4].try_into().unwrap()) + } + + #[inline] + fn read_u64(buf: &[u8]) -> u64 { + u64::from_le_bytes(buf[..8].try_into().unwrap()) + } + + #[inline] + fn read_u128(buf: &[u8]) -> u128 { + u128::from_le_bytes(buf[..16].try_into().unwrap()) + } + + #[inline] + fn read_uint(buf: &[u8], nbytes: usize) -> u64 { + let mut out = [0; 8]; + assert!(1 <= nbytes && nbytes <= out.len() && nbytes <= buf.len()); + out[..nbytes].copy_from_slice(&buf[..nbytes]); + u64::from_le_bytes(out) + } + + #[inline] + fn read_uint128(buf: &[u8], nbytes: usize) -> u128 { + let mut out = [0; 16]; + assert!(1 <= nbytes && nbytes <= out.len() && nbytes <= buf.len()); + out[..nbytes].copy_from_slice(&buf[..nbytes]); + u128::from_le_bytes(out) + } + + #[inline] + fn write_u16(buf: &mut [u8], n: u16) { + buf[..2].copy_from_slice(&n.to_le_bytes()); + } + + #[inline] + fn write_u32(buf: &mut [u8], n: u32) { + buf[..4].copy_from_slice(&n.to_le_bytes()); + } + + #[inline] + fn write_u64(buf: &mut [u8], n: u64) { + buf[..8].copy_from_slice(&n.to_le_bytes()); + } + + #[inline] + fn write_u128(buf: &mut [u8], n: u128) { + buf[..16].copy_from_slice(&n.to_le_bytes()); + } + + #[inline] + fn write_uint(buf: &mut [u8], n: u64, nbytes: usize) { + assert!(pack_size(n as u64) <= nbytes && nbytes <= 8); + assert!(nbytes <= buf.len()); + unsafe { + let bytes = *(&n.to_le() as *const u64 as *const [u8; 8]); + copy_nonoverlapping(bytes.as_ptr(), buf.as_mut_ptr(), nbytes); + } + } + + #[inline] + fn write_uint128(buf: &mut [u8], n: u128, nbytes: usize) { + assert!(pack_size128(n as u128) <= nbytes && nbytes <= 16); + assert!(nbytes <= buf.len()); + unsafe { + let bytes = *(&n.to_le() as *const u128 as *const [u8; 16]); + copy_nonoverlapping(bytes.as_ptr(), buf.as_mut_ptr(), nbytes); + } + } + + #[inline] + fn read_u16_into(src: &[u8], dst: &mut [u16]) { + read_slice!(src, dst, u16, from_le_bytes); + } + + #[inline] + fn read_u32_into(src: &[u8], dst: &mut [u32]) { + read_slice!(src, dst, u32, from_le_bytes); + } + + #[inline] + fn read_u64_into(src: &[u8], dst: &mut [u64]) { + read_slice!(src, dst, u64, from_le_bytes); + } + + #[inline] + fn read_u128_into(src: &[u8], dst: &mut [u128]) { + read_slice!(src, dst, u128, from_le_bytes); + } + + #[inline] + fn write_u16_into(src: &[u16], dst: &mut [u8]) { + write_slice!(src, dst, u16, to_le_bytes); + } + + #[inline] + fn write_u32_into(src: &[u32], dst: &mut [u8]) { + write_slice!(src, dst, u32, to_le_bytes); + } + + #[inline] + fn write_u64_into(src: &[u64], dst: &mut [u8]) { + write_slice!(src, dst, u64, to_le_bytes); + } + + #[inline] + fn write_u128_into(src: &[u128], dst: &mut [u8]) { + write_slice!(src, dst, u128, to_le_bytes); + } + + #[inline] + fn from_slice_u16(numbers: &mut [u16]) { + if cfg!(target_endian = "big") { + for n in numbers { + *n = n.to_le(); + } + } + } + + #[inline] + fn from_slice_u32(numbers: &mut [u32]) { + if cfg!(target_endian = "big") { + for n in numbers { + *n = n.to_le(); + } + } + } + + #[inline] + fn from_slice_u64(numbers: &mut [u64]) { + if cfg!(target_endian = "big") { + for n in numbers { + *n = n.to_le(); + } + } + } + + #[inline] + fn from_slice_u128(numbers: &mut [u128]) { + if cfg!(target_endian = "big") { + for n in numbers { + *n = n.to_le(); + } + } + } + + #[inline] + fn from_slice_f32(numbers: &mut [f32]) { + if cfg!(target_endian = "big") { + for n in numbers { + unsafe { + let int = *(n as *const f32 as *const u32); + *n = *(&int.to_le() as *const u32 as *const f32); + } + } + } + } + + #[inline] + fn from_slice_f64(numbers: &mut [f64]) { + if cfg!(target_endian = "big") { + for n in numbers { + unsafe { + let int = *(n as *const f64 as *const u64); + *n = *(&int.to_le() as *const u64 as *const f64); + } + } + } + } +} + +#[cfg(test)] +mod test { + use quickcheck::{Arbitrary, Gen, QuickCheck, StdGen, Testable}; + use rand::{thread_rng, Rng}; + + pub const U24_MAX: u32 = 16_777_215; + pub const I24_MAX: i32 = 8_388_607; + pub const U48_MAX: u64 = 281_474_976_710_655; + pub const I48_MAX: i64 = 140_737_488_355_327; + + pub const U64_MAX: u64 = ::core::u64::MAX; + pub const I64_MAX: u64 = ::core::i64::MAX as u64; + + macro_rules! calc_max { + ($max:expr, $bytes:expr) => { + calc_max!($max, $bytes, 8) + }; + ($max:expr, $bytes:expr, $maxbytes:expr) => { + ($max - 1) >> (8 * ($maxbytes - $bytes)) + }; + } + + #[derive(Clone, Debug)] + pub struct Wi128(pub T); + + impl Wi128 { + pub fn clone(&self) -> T { + self.0.clone() + } + } + + impl PartialEq for Wi128 { + fn eq(&self, other: &T) -> bool { + self.0.eq(other) + } + } + + impl Arbitrary for Wi128 { + fn arbitrary(gen: &mut G) -> Wi128 { + let max = calc_max!(::core::u128::MAX, gen.size(), 16); + let output = (gen.gen::() as u128) + | ((gen.gen::() as u128) << 64); + Wi128(output & (max - 1)) + } + } + + impl Arbitrary for Wi128 { + fn arbitrary(gen: &mut G) -> Wi128 { + let max = calc_max!(::core::i128::MAX, gen.size(), 16); + let output = (gen.gen::() as i128) + | ((gen.gen::() as i128) << 64); + Wi128(output & (max - 1)) + } + } + + pub fn qc_sized(f: A, size: u64) { + QuickCheck::new() + .gen(StdGen::new(thread_rng(), size as usize)) + .tests(1_00) + .max_tests(10_000) + .quickcheck(f); + } + + macro_rules! qc_byte_order { + ($name:ident, $ty_int:ty, $max:expr, + $bytes:expr, $read:ident, $write:ident) => { + #[cfg(not(miri))] + mod $name { + #[allow(unused_imports)] + use super::{qc_sized, Wi128}; + use crate::{ + BigEndian, ByteOrder, LittleEndian, NativeEndian, + }; + + #[test] + fn big_endian() { + fn prop(n: $ty_int) -> bool { + let mut buf = [0; 16]; + BigEndian::$write(&mut buf, n.clone(), $bytes); + n == BigEndian::$read(&buf[..$bytes], $bytes) + } + qc_sized(prop as fn($ty_int) -> bool, $max); + } + + #[test] + fn little_endian() { + fn prop(n: $ty_int) -> bool { + let mut buf = [0; 16]; + LittleEndian::$write(&mut buf, n.clone(), $bytes); + n == LittleEndian::$read(&buf[..$bytes], $bytes) + } + qc_sized(prop as fn($ty_int) -> bool, $max); + } + + #[test] + fn native_endian() { + fn prop(n: $ty_int) -> bool { + let mut buf = [0; 16]; + NativeEndian::$write(&mut buf, n.clone(), $bytes); + n == NativeEndian::$read(&buf[..$bytes], $bytes) + } + qc_sized(prop as fn($ty_int) -> bool, $max); + } + } + }; + ($name:ident, $ty_int:ty, $max:expr, + $read:ident, $write:ident) => { + #[cfg(not(miri))] + mod $name { + #[allow(unused_imports)] + use super::{qc_sized, Wi128}; + use crate::{ + BigEndian, ByteOrder, LittleEndian, NativeEndian, + }; + use core::mem::size_of; + + #[test] + fn big_endian() { + fn prop(n: $ty_int) -> bool { + let bytes = size_of::<$ty_int>(); + let mut buf = [0; 16]; + BigEndian::$write(&mut buf[16 - bytes..], n.clone()); + n == BigEndian::$read(&buf[16 - bytes..]) + } + qc_sized(prop as fn($ty_int) -> bool, $max - 1); + } + + #[test] + fn little_endian() { + fn prop(n: $ty_int) -> bool { + let bytes = size_of::<$ty_int>(); + let mut buf = [0; 16]; + LittleEndian::$write(&mut buf[..bytes], n.clone()); + n == LittleEndian::$read(&buf[..bytes]) + } + qc_sized(prop as fn($ty_int) -> bool, $max - 1); + } + + #[test] + fn native_endian() { + fn prop(n: $ty_int) -> bool { + let bytes = size_of::<$ty_int>(); + let mut buf = [0; 16]; + NativeEndian::$write(&mut buf[..bytes], n.clone()); + n == NativeEndian::$read(&buf[..bytes]) + } + qc_sized(prop as fn($ty_int) -> bool, $max - 1); + } + } + }; + } + + qc_byte_order!( + prop_u16, + u16, + ::core::u16::MAX as u64, + read_u16, + write_u16 + ); + qc_byte_order!( + prop_i16, + i16, + ::core::i16::MAX as u64, + read_i16, + write_i16 + ); + qc_byte_order!( + prop_u24, + u32, + crate::test::U24_MAX as u64, + read_u24, + write_u24 + ); + qc_byte_order!( + prop_i24, + i32, + crate::test::I24_MAX as u64, + read_i24, + write_i24 + ); + qc_byte_order!( + prop_u32, + u32, + ::core::u32::MAX as u64, + read_u32, + write_u32 + ); + qc_byte_order!( + prop_i32, + i32, + ::core::i32::MAX as u64, + read_i32, + write_i32 + ); + qc_byte_order!( + prop_u48, + u64, + crate::test::U48_MAX as u64, + read_u48, + write_u48 + ); + qc_byte_order!( + prop_i48, + i64, + crate::test::I48_MAX as u64, + read_i48, + write_i48 + ); + qc_byte_order!( + prop_u64, + u64, + ::core::u64::MAX as u64, + read_u64, + write_u64 + ); + qc_byte_order!( + prop_i64, + i64, + ::core::i64::MAX as u64, + read_i64, + write_i64 + ); + qc_byte_order!( + prop_f32, + f32, + ::core::u64::MAX as u64, + read_f32, + write_f32 + ); + qc_byte_order!( + prop_f64, + f64, + ::core::i64::MAX as u64, + read_f64, + write_f64 + ); + + qc_byte_order!(prop_u128, Wi128, 16 + 1, read_u128, write_u128); + qc_byte_order!(prop_i128, Wi128, 16 + 1, read_i128, write_i128); + + qc_byte_order!( + prop_uint_1, + u64, + calc_max!(super::U64_MAX, 1), + 1, + read_uint, + write_uint + ); + qc_byte_order!( + prop_uint_2, + u64, + calc_max!(super::U64_MAX, 2), + 2, + read_uint, + write_uint + ); + qc_byte_order!( + prop_uint_3, + u64, + calc_max!(super::U64_MAX, 3), + 3, + read_uint, + write_uint + ); + qc_byte_order!( + prop_uint_4, + u64, + calc_max!(super::U64_MAX, 4), + 4, + read_uint, + write_uint + ); + qc_byte_order!( + prop_uint_5, + u64, + calc_max!(super::U64_MAX, 5), + 5, + read_uint, + write_uint + ); + qc_byte_order!( + prop_uint_6, + u64, + calc_max!(super::U64_MAX, 6), + 6, + read_uint, + write_uint + ); + qc_byte_order!( + prop_uint_7, + u64, + calc_max!(super::U64_MAX, 7), + 7, + read_uint, + write_uint + ); + qc_byte_order!( + prop_uint_8, + u64, + calc_max!(super::U64_MAX, 8), + 8, + read_uint, + write_uint + ); + + qc_byte_order!( + prop_uint128_1, + Wi128, + 1, + 1, + read_uint128, + write_uint128 + ); + qc_byte_order!( + prop_uint128_2, + Wi128, + 2, + 2, + read_uint128, + write_uint128 + ); + qc_byte_order!( + prop_uint128_3, + Wi128, + 3, + 3, + read_uint128, + write_uint128 + ); + qc_byte_order!( + prop_uint128_4, + Wi128, + 4, + 4, + read_uint128, + write_uint128 + ); + qc_byte_order!( + prop_uint128_5, + Wi128, + 5, + 5, + read_uint128, + write_uint128 + ); + qc_byte_order!( + prop_uint128_6, + Wi128, + 6, + 6, + read_uint128, + write_uint128 + ); + qc_byte_order!( + prop_uint128_7, + Wi128, + 7, + 7, + read_uint128, + write_uint128 + ); + qc_byte_order!( + prop_uint128_8, + Wi128, + 8, + 8, + read_uint128, + write_uint128 + ); + qc_byte_order!( + prop_uint128_9, + Wi128, + 9, + 9, + read_uint128, + write_uint128 + ); + qc_byte_order!( + prop_uint128_10, + Wi128, + 10, + 10, + read_uint128, + write_uint128 + ); + qc_byte_order!( + prop_uint128_11, + Wi128, + 11, + 11, + read_uint128, + write_uint128 + ); + qc_byte_order!( + prop_uint128_12, + Wi128, + 12, + 12, + read_uint128, + write_uint128 + ); + qc_byte_order!( + prop_uint128_13, + Wi128, + 13, + 13, + read_uint128, + write_uint128 + ); + qc_byte_order!( + prop_uint128_14, + Wi128, + 14, + 14, + read_uint128, + write_uint128 + ); + qc_byte_order!( + prop_uint128_15, + Wi128, + 15, + 15, + read_uint128, + write_uint128 + ); + qc_byte_order!( + prop_uint128_16, + Wi128, + 16, + 16, + read_uint128, + write_uint128 + ); + + qc_byte_order!( + prop_int_1, + i64, + calc_max!(super::I64_MAX, 1), + 1, + read_int, + write_int + ); + qc_byte_order!( + prop_int_2, + i64, + calc_max!(super::I64_MAX, 2), + 2, + read_int, + write_int + ); + qc_byte_order!( + prop_int_3, + i64, + calc_max!(super::I64_MAX, 3), + 3, + read_int, + write_int + ); + qc_byte_order!( + prop_int_4, + i64, + calc_max!(super::I64_MAX, 4), + 4, + read_int, + write_int + ); + qc_byte_order!( + prop_int_5, + i64, + calc_max!(super::I64_MAX, 5), + 5, + read_int, + write_int + ); + qc_byte_order!( + prop_int_6, + i64, + calc_max!(super::I64_MAX, 6), + 6, + read_int, + write_int + ); + qc_byte_order!( + prop_int_7, + i64, + calc_max!(super::I64_MAX, 7), + 7, + read_int, + write_int + ); + qc_byte_order!( + prop_int_8, + i64, + calc_max!(super::I64_MAX, 8), + 8, + read_int, + write_int + ); + + qc_byte_order!( + prop_int128_1, + Wi128, + 1, + 1, + read_int128, + write_int128 + ); + qc_byte_order!( + prop_int128_2, + Wi128, + 2, + 2, + read_int128, + write_int128 + ); + qc_byte_order!( + prop_int128_3, + Wi128, + 3, + 3, + read_int128, + write_int128 + ); + qc_byte_order!( + prop_int128_4, + Wi128, + 4, + 4, + read_int128, + write_int128 + ); + qc_byte_order!( + prop_int128_5, + Wi128, + 5, + 5, + read_int128, + write_int128 + ); + qc_byte_order!( + prop_int128_6, + Wi128, + 6, + 6, + read_int128, + write_int128 + ); + qc_byte_order!( + prop_int128_7, + Wi128, + 7, + 7, + read_int128, + write_int128 + ); + qc_byte_order!( + prop_int128_8, + Wi128, + 8, + 8, + read_int128, + write_int128 + ); + qc_byte_order!( + prop_int128_9, + Wi128, + 9, + 9, + read_int128, + write_int128 + ); + qc_byte_order!( + prop_int128_10, + Wi128, + 10, + 10, + read_int128, + write_int128 + ); + qc_byte_order!( + prop_int128_11, + Wi128, + 11, + 11, + read_int128, + write_int128 + ); + qc_byte_order!( + prop_int128_12, + Wi128, + 12, + 12, + read_int128, + write_int128 + ); + qc_byte_order!( + prop_int128_13, + Wi128, + 13, + 13, + read_int128, + write_int128 + ); + qc_byte_order!( + prop_int128_14, + Wi128, + 14, + 14, + read_int128, + write_int128 + ); + qc_byte_order!( + prop_int128_15, + Wi128, + 15, + 15, + read_int128, + write_int128 + ); + qc_byte_order!( + prop_int128_16, + Wi128, + 16, + 16, + read_int128, + write_int128 + ); + + // Test that all of the byte conversion functions panic when given a + // buffer that is too small. + // + // These tests are critical to ensure safety, otherwise we might end up + // with a buffer overflow. + macro_rules! too_small { + ($name:ident, $maximally_small:expr, $zero:expr, + $read:ident, $write:ident) => { + mod $name { + use crate::{ + BigEndian, ByteOrder, LittleEndian, NativeEndian, + }; + + #[test] + #[should_panic] + fn read_big_endian() { + let buf = [0; $maximally_small]; + BigEndian::$read(&buf); + } + + #[test] + #[should_panic] + fn read_little_endian() { + let buf = [0; $maximally_small]; + LittleEndian::$read(&buf); + } + + #[test] + #[should_panic] + fn read_native_endian() { + let buf = [0; $maximally_small]; + NativeEndian::$read(&buf); + } + + #[test] + #[should_panic] + fn write_big_endian() { + let mut buf = [0; $maximally_small]; + BigEndian::$write(&mut buf, $zero); + } + + #[test] + #[should_panic] + fn write_little_endian() { + let mut buf = [0; $maximally_small]; + LittleEndian::$write(&mut buf, $zero); + } + + #[test] + #[should_panic] + fn write_native_endian() { + let mut buf = [0; $maximally_small]; + NativeEndian::$write(&mut buf, $zero); + } + } + }; + ($name:ident, $maximally_small:expr, $read:ident) => { + mod $name { + use crate::{ + BigEndian, ByteOrder, LittleEndian, NativeEndian, + }; + + #[test] + #[should_panic] + fn read_big_endian() { + let buf = [0; $maximally_small]; + BigEndian::$read(&buf, $maximally_small + 1); + } + + #[test] + #[should_panic] + fn read_little_endian() { + let buf = [0; $maximally_small]; + LittleEndian::$read(&buf, $maximally_small + 1); + } + + #[test] + #[should_panic] + fn read_native_endian() { + let buf = [0; $maximally_small]; + NativeEndian::$read(&buf, $maximally_small + 1); + } + } + }; + } + + too_small!(small_u16, 1, 0, read_u16, write_u16); + too_small!(small_i16, 1, 0, read_i16, write_i16); + too_small!(small_u32, 3, 0, read_u32, write_u32); + too_small!(small_i32, 3, 0, read_i32, write_i32); + too_small!(small_u64, 7, 0, read_u64, write_u64); + too_small!(small_i64, 7, 0, read_i64, write_i64); + too_small!(small_f32, 3, 0.0, read_f32, write_f32); + too_small!(small_f64, 7, 0.0, read_f64, write_f64); + too_small!(small_u128, 15, 0, read_u128, write_u128); + too_small!(small_i128, 15, 0, read_i128, write_i128); + + too_small!(small_uint_1, 1, read_uint); + too_small!(small_uint_2, 2, read_uint); + too_small!(small_uint_3, 3, read_uint); + too_small!(small_uint_4, 4, read_uint); + too_small!(small_uint_5, 5, read_uint); + too_small!(small_uint_6, 6, read_uint); + too_small!(small_uint_7, 7, read_uint); + + too_small!(small_uint128_1, 1, read_uint128); + too_small!(small_uint128_2, 2, read_uint128); + too_small!(small_uint128_3, 3, read_uint128); + too_small!(small_uint128_4, 4, read_uint128); + too_small!(small_uint128_5, 5, read_uint128); + too_small!(small_uint128_6, 6, read_uint128); + too_small!(small_uint128_7, 7, read_uint128); + too_small!(small_uint128_8, 8, read_uint128); + too_small!(small_uint128_9, 9, read_uint128); + too_small!(small_uint128_10, 10, read_uint128); + too_small!(small_uint128_11, 11, read_uint128); + too_small!(small_uint128_12, 12, read_uint128); + too_small!(small_uint128_13, 13, read_uint128); + too_small!(small_uint128_14, 14, read_uint128); + too_small!(small_uint128_15, 15, read_uint128); + + too_small!(small_int_1, 1, read_int); + too_small!(small_int_2, 2, read_int); + too_small!(small_int_3, 3, read_int); + too_small!(small_int_4, 4, read_int); + too_small!(small_int_5, 5, read_int); + too_small!(small_int_6, 6, read_int); + too_small!(small_int_7, 7, read_int); + + too_small!(small_int128_1, 1, read_int128); + too_small!(small_int128_2, 2, read_int128); + too_small!(small_int128_3, 3, read_int128); + too_small!(small_int128_4, 4, read_int128); + too_small!(small_int128_5, 5, read_int128); + too_small!(small_int128_6, 6, read_int128); + too_small!(small_int128_7, 7, read_int128); + too_small!(small_int128_8, 8, read_int128); + too_small!(small_int128_9, 9, read_int128); + too_small!(small_int128_10, 10, read_int128); + too_small!(small_int128_11, 11, read_int128); + too_small!(small_int128_12, 12, read_int128); + too_small!(small_int128_13, 13, read_int128); + too_small!(small_int128_14, 14, read_int128); + too_small!(small_int128_15, 15, read_int128); + + // Test that reading/writing slices enforces the correct lengths. + macro_rules! slice_lengths { + ($name:ident, $read:ident, $write:ident, + $num_bytes:expr, $numbers:expr) => { + mod $name { + use crate::{ + BigEndian, ByteOrder, LittleEndian, NativeEndian, + }; + + #[test] + #[should_panic] + fn read_big_endian() { + let bytes = [0; $num_bytes]; + let mut numbers = $numbers; + BigEndian::$read(&bytes, &mut numbers); + } + + #[test] + #[should_panic] + fn read_little_endian() { + let bytes = [0; $num_bytes]; + let mut numbers = $numbers; + LittleEndian::$read(&bytes, &mut numbers); + } + + #[test] + #[should_panic] + fn read_native_endian() { + let bytes = [0; $num_bytes]; + let mut numbers = $numbers; + NativeEndian::$read(&bytes, &mut numbers); + } + + #[test] + #[should_panic] + fn write_big_endian() { + let mut bytes = [0; $num_bytes]; + let numbers = $numbers; + BigEndian::$write(&numbers, &mut bytes); + } + + #[test] + #[should_panic] + fn write_little_endian() { + let mut bytes = [0; $num_bytes]; + let numbers = $numbers; + LittleEndian::$write(&numbers, &mut bytes); + } + + #[test] + #[should_panic] + fn write_native_endian() { + let mut bytes = [0; $num_bytes]; + let numbers = $numbers; + NativeEndian::$write(&numbers, &mut bytes); + } + } + }; + } + + slice_lengths!( + slice_len_too_small_u16, + read_u16_into, + write_u16_into, + 3, + [0, 0] + ); + slice_lengths!( + slice_len_too_big_u16, + read_u16_into, + write_u16_into, + 5, + [0, 0] + ); + slice_lengths!( + slice_len_too_small_i16, + read_i16_into, + write_i16_into, + 3, + [0, 0] + ); + slice_lengths!( + slice_len_too_big_i16, + read_i16_into, + write_i16_into, + 5, + [0, 0] + ); + + slice_lengths!( + slice_len_too_small_u32, + read_u32_into, + write_u32_into, + 7, + [0, 0] + ); + slice_lengths!( + slice_len_too_big_u32, + read_u32_into, + write_u32_into, + 9, + [0, 0] + ); + slice_lengths!( + slice_len_too_small_i32, + read_i32_into, + write_i32_into, + 7, + [0, 0] + ); + slice_lengths!( + slice_len_too_big_i32, + read_i32_into, + write_i32_into, + 9, + [0, 0] + ); + + slice_lengths!( + slice_len_too_small_u64, + read_u64_into, + write_u64_into, + 15, + [0, 0] + ); + slice_lengths!( + slice_len_too_big_u64, + read_u64_into, + write_u64_into, + 17, + [0, 0] + ); + slice_lengths!( + slice_len_too_small_i64, + read_i64_into, + write_i64_into, + 15, + [0, 0] + ); + slice_lengths!( + slice_len_too_big_i64, + read_i64_into, + write_i64_into, + 17, + [0, 0] + ); + + slice_lengths!( + slice_len_too_small_u128, + read_u128_into, + write_u128_into, + 31, + [0, 0] + ); + slice_lengths!( + slice_len_too_big_u128, + read_u128_into, + write_u128_into, + 33, + [0, 0] + ); + slice_lengths!( + slice_len_too_small_i128, + read_i128_into, + write_i128_into, + 31, + [0, 0] + ); + slice_lengths!( + slice_len_too_big_i128, + read_i128_into, + write_i128_into, + 33, + [0, 0] + ); + + #[test] + fn uint_bigger_buffer() { + use crate::{ByteOrder, LittleEndian}; + let n = LittleEndian::read_uint(&[1, 2, 3, 4, 5, 6, 7, 8], 5); + assert_eq!(n, 0x05_0403_0201); + } + + #[test] + fn regression173_array_impl() { + use crate::{BigEndian, ByteOrder, LittleEndian}; + + let xs = [0; 100]; + + let x = BigEndian::read_u16(&xs); + assert_eq!(x, 0); + let x = BigEndian::read_u32(&xs); + assert_eq!(x, 0); + let x = BigEndian::read_u64(&xs); + assert_eq!(x, 0); + let x = BigEndian::read_u128(&xs); + assert_eq!(x, 0); + let x = BigEndian::read_i16(&xs); + assert_eq!(x, 0); + let x = BigEndian::read_i32(&xs); + assert_eq!(x, 0); + let x = BigEndian::read_i64(&xs); + assert_eq!(x, 0); + let x = BigEndian::read_i128(&xs); + assert_eq!(x, 0); + + let x = LittleEndian::read_u16(&xs); + assert_eq!(x, 0); + let x = LittleEndian::read_u32(&xs); + assert_eq!(x, 0); + let x = LittleEndian::read_u64(&xs); + assert_eq!(x, 0); + let x = LittleEndian::read_u128(&xs); + assert_eq!(x, 0); + let x = LittleEndian::read_i16(&xs); + assert_eq!(x, 0); + let x = LittleEndian::read_i32(&xs); + assert_eq!(x, 0); + let x = LittleEndian::read_i64(&xs); + assert_eq!(x, 0); + let x = LittleEndian::read_i128(&xs); + assert_eq!(x, 0); + } +} + +#[cfg(test)] +#[cfg(feature = "std")] +mod stdtests { + extern crate quickcheck; + extern crate rand; + + use self::quickcheck::{QuickCheck, StdGen, Testable}; + use self::rand::thread_rng; + + fn qc_unsized(f: A) { + QuickCheck::new() + .gen(StdGen::new(thread_rng(), 16)) + .tests(1_00) + .max_tests(10_000) + .quickcheck(f); + } + + macro_rules! calc_max { + ($max:expr, $bytes:expr) => { + ($max - 1) >> (8 * (8 - $bytes)) + }; + } + + macro_rules! qc_bytes_ext { + ($name:ident, $ty_int:ty, $max:expr, + $bytes:expr, $read:ident, $write:ident) => { + #[cfg(not(miri))] + mod $name { + #[allow(unused_imports)] + use crate::test::{qc_sized, Wi128}; + use crate::{ + BigEndian, LittleEndian, NativeEndian, ReadBytesExt, + WriteBytesExt, + }; + use std::io::Cursor; + + #[test] + fn big_endian() { + fn prop(n: $ty_int) -> bool { + let mut wtr = vec![]; + wtr.$write::(n.clone()).unwrap(); + let offset = wtr.len() - $bytes; + let mut rdr = Cursor::new(&mut wtr[offset..]); + n == rdr.$read::($bytes).unwrap() + } + qc_sized(prop as fn($ty_int) -> bool, $max); + } + + #[test] + fn little_endian() { + fn prop(n: $ty_int) -> bool { + let mut wtr = vec![]; + wtr.$write::(n.clone()).unwrap(); + let mut rdr = Cursor::new(wtr); + n == rdr.$read::($bytes).unwrap() + } + qc_sized(prop as fn($ty_int) -> bool, $max); + } + + #[test] + fn native_endian() { + fn prop(n: $ty_int) -> bool { + let mut wtr = vec![]; + wtr.$write::(n.clone()).unwrap(); + let offset = if cfg!(target_endian = "big") { + wtr.len() - $bytes + } else { + 0 + }; + let mut rdr = Cursor::new(&mut wtr[offset..]); + n == rdr.$read::($bytes).unwrap() + } + qc_sized(prop as fn($ty_int) -> bool, $max); + } + } + }; + ($name:ident, $ty_int:ty, $max:expr, $read:ident, $write:ident) => { + #[cfg(not(miri))] + mod $name { + #[allow(unused_imports)] + use crate::test::{qc_sized, Wi128}; + use crate::{ + BigEndian, LittleEndian, NativeEndian, ReadBytesExt, + WriteBytesExt, + }; + use std::io::Cursor; + + #[test] + fn big_endian() { + fn prop(n: $ty_int) -> bool { + let mut wtr = vec![]; + wtr.$write::(n.clone()).unwrap(); + let mut rdr = Cursor::new(wtr); + n == rdr.$read::().unwrap() + } + qc_sized(prop as fn($ty_int) -> bool, $max - 1); + } + + #[test] + fn little_endian() { + fn prop(n: $ty_int) -> bool { + let mut wtr = vec![]; + wtr.$write::(n.clone()).unwrap(); + let mut rdr = Cursor::new(wtr); + n == rdr.$read::().unwrap() + } + qc_sized(prop as fn($ty_int) -> bool, $max - 1); + } + + #[test] + fn native_endian() { + fn prop(n: $ty_int) -> bool { + let mut wtr = vec![]; + wtr.$write::(n.clone()).unwrap(); + let mut rdr = Cursor::new(wtr); + n == rdr.$read::().unwrap() + } + qc_sized(prop as fn($ty_int) -> bool, $max - 1); + } + } + }; + } + + qc_bytes_ext!( + prop_ext_u16, + u16, + ::std::u16::MAX as u64, + read_u16, + write_u16 + ); + qc_bytes_ext!( + prop_ext_i16, + i16, + ::std::i16::MAX as u64, + read_i16, + write_i16 + ); + qc_bytes_ext!( + prop_ext_u32, + u32, + ::std::u32::MAX as u64, + read_u32, + write_u32 + ); + qc_bytes_ext!( + prop_ext_i32, + i32, + ::std::i32::MAX as u64, + read_i32, + write_i32 + ); + qc_bytes_ext!( + prop_ext_u64, + u64, + ::std::u64::MAX as u64, + read_u64, + write_u64 + ); + qc_bytes_ext!( + prop_ext_i64, + i64, + ::std::i64::MAX as u64, + read_i64, + write_i64 + ); + qc_bytes_ext!( + prop_ext_f32, + f32, + ::std::u64::MAX as u64, + read_f32, + write_f32 + ); + qc_bytes_ext!( + prop_ext_f64, + f64, + ::std::i64::MAX as u64, + read_f64, + write_f64 + ); + + qc_bytes_ext!(prop_ext_u128, Wi128, 16 + 1, read_u128, write_u128); + qc_bytes_ext!(prop_ext_i128, Wi128, 16 + 1, read_i128, write_i128); + + qc_bytes_ext!( + prop_ext_uint_1, + u64, + calc_max!(crate::test::U64_MAX, 1), + 1, + read_uint, + write_u64 + ); + qc_bytes_ext!( + prop_ext_uint_2, + u64, + calc_max!(crate::test::U64_MAX, 2), + 2, + read_uint, + write_u64 + ); + qc_bytes_ext!( + prop_ext_uint_3, + u64, + calc_max!(crate::test::U64_MAX, 3), + 3, + read_uint, + write_u64 + ); + qc_bytes_ext!( + prop_ext_uint_4, + u64, + calc_max!(crate::test::U64_MAX, 4), + 4, + read_uint, + write_u64 + ); + qc_bytes_ext!( + prop_ext_uint_5, + u64, + calc_max!(crate::test::U64_MAX, 5), + 5, + read_uint, + write_u64 + ); + qc_bytes_ext!( + prop_ext_uint_6, + u64, + calc_max!(crate::test::U64_MAX, 6), + 6, + read_uint, + write_u64 + ); + qc_bytes_ext!( + prop_ext_uint_7, + u64, + calc_max!(crate::test::U64_MAX, 7), + 7, + read_uint, + write_u64 + ); + qc_bytes_ext!( + prop_ext_uint_8, + u64, + calc_max!(crate::test::U64_MAX, 8), + 8, + read_uint, + write_u64 + ); + + qc_bytes_ext!( + prop_ext_uint128_1, + Wi128, + 1, + 1, + read_uint128, + write_u128 + ); + qc_bytes_ext!( + prop_ext_uint128_2, + Wi128, + 2, + 2, + read_uint128, + write_u128 + ); + qc_bytes_ext!( + prop_ext_uint128_3, + Wi128, + 3, + 3, + read_uint128, + write_u128 + ); + qc_bytes_ext!( + prop_ext_uint128_4, + Wi128, + 4, + 4, + read_uint128, + write_u128 + ); + qc_bytes_ext!( + prop_ext_uint128_5, + Wi128, + 5, + 5, + read_uint128, + write_u128 + ); + qc_bytes_ext!( + prop_ext_uint128_6, + Wi128, + 6, + 6, + read_uint128, + write_u128 + ); + qc_bytes_ext!( + prop_ext_uint128_7, + Wi128, + 7, + 7, + read_uint128, + write_u128 + ); + qc_bytes_ext!( + prop_ext_uint128_8, + Wi128, + 8, + 8, + read_uint128, + write_u128 + ); + qc_bytes_ext!( + prop_ext_uint128_9, + Wi128, + 9, + 9, + read_uint128, + write_u128 + ); + qc_bytes_ext!( + prop_ext_uint128_10, + Wi128, + 10, + 10, + read_uint128, + write_u128 + ); + qc_bytes_ext!( + prop_ext_uint128_11, + Wi128, + 11, + 11, + read_uint128, + write_u128 + ); + qc_bytes_ext!( + prop_ext_uint128_12, + Wi128, + 12, + 12, + read_uint128, + write_u128 + ); + qc_bytes_ext!( + prop_ext_uint128_13, + Wi128, + 13, + 13, + read_uint128, + write_u128 + ); + qc_bytes_ext!( + prop_ext_uint128_14, + Wi128, + 14, + 14, + read_uint128, + write_u128 + ); + qc_bytes_ext!( + prop_ext_uint128_15, + Wi128, + 15, + 15, + read_uint128, + write_u128 + ); + qc_bytes_ext!( + prop_ext_uint128_16, + Wi128, + 16, + 16, + read_uint128, + write_u128 + ); + + qc_bytes_ext!( + prop_ext_int_1, + i64, + calc_max!(crate::test::I64_MAX, 1), + 1, + read_int, + write_i64 + ); + qc_bytes_ext!( + prop_ext_int_2, + i64, + calc_max!(crate::test::I64_MAX, 2), + 2, + read_int, + write_i64 + ); + qc_bytes_ext!( + prop_ext_int_3, + i64, + calc_max!(crate::test::I64_MAX, 3), + 3, + read_int, + write_i64 + ); + qc_bytes_ext!( + prop_ext_int_4, + i64, + calc_max!(crate::test::I64_MAX, 4), + 4, + read_int, + write_i64 + ); + qc_bytes_ext!( + prop_ext_int_5, + i64, + calc_max!(crate::test::I64_MAX, 5), + 5, + read_int, + write_i64 + ); + qc_bytes_ext!( + prop_ext_int_6, + i64, + calc_max!(crate::test::I64_MAX, 6), + 6, + read_int, + write_i64 + ); + qc_bytes_ext!( + prop_ext_int_7, + i64, + calc_max!(crate::test::I64_MAX, 1), + 7, + read_int, + write_i64 + ); + qc_bytes_ext!( + prop_ext_int_8, + i64, + calc_max!(crate::test::I64_MAX, 8), + 8, + read_int, + write_i64 + ); + + qc_bytes_ext!( + prop_ext_int128_1, + Wi128, + 1, + 1, + read_int128, + write_i128 + ); + qc_bytes_ext!( + prop_ext_int128_2, + Wi128, + 2, + 2, + read_int128, + write_i128 + ); + qc_bytes_ext!( + prop_ext_int128_3, + Wi128, + 3, + 3, + read_int128, + write_i128 + ); + qc_bytes_ext!( + prop_ext_int128_4, + Wi128, + 4, + 4, + read_int128, + write_i128 + ); + qc_bytes_ext!( + prop_ext_int128_5, + Wi128, + 5, + 5, + read_int128, + write_i128 + ); + qc_bytes_ext!( + prop_ext_int128_6, + Wi128, + 6, + 6, + read_int128, + write_i128 + ); + qc_bytes_ext!( + prop_ext_int128_7, + Wi128, + 7, + 7, + read_int128, + write_i128 + ); + qc_bytes_ext!( + prop_ext_int128_8, + Wi128, + 8, + 8, + read_int128, + write_i128 + ); + qc_bytes_ext!( + prop_ext_int128_9, + Wi128, + 9, + 9, + read_int128, + write_i128 + ); + qc_bytes_ext!( + prop_ext_int128_10, + Wi128, + 10, + 10, + read_int128, + write_i128 + ); + qc_bytes_ext!( + prop_ext_int128_11, + Wi128, + 11, + 11, + read_int128, + write_i128 + ); + qc_bytes_ext!( + prop_ext_int128_12, + Wi128, + 12, + 12, + read_int128, + write_i128 + ); + qc_bytes_ext!( + prop_ext_int128_13, + Wi128, + 13, + 13, + read_int128, + write_i128 + ); + qc_bytes_ext!( + prop_ext_int128_14, + Wi128, + 14, + 14, + read_int128, + write_i128 + ); + qc_bytes_ext!( + prop_ext_int128_15, + Wi128, + 15, + 15, + read_int128, + write_i128 + ); + qc_bytes_ext!( + prop_ext_int128_16, + Wi128, + 16, + 16, + read_int128, + write_i128 + ); + + // Test slice serialization/deserialization. + macro_rules! qc_slice { + ($name:ident, $ty_int:ty, $read:ident, $write:ident, $zero:expr) => { + #[cfg(not(miri))] + mod $name { + use super::qc_unsized; + #[allow(unused_imports)] + use crate::test::Wi128; + use crate::{ + BigEndian, ByteOrder, LittleEndian, NativeEndian, + }; + use core::mem::size_of; + + #[test] + fn big_endian() { + #[allow(unused_unsafe)] + fn prop(numbers: Vec<$ty_int>) -> bool { + let numbers: Vec<_> = + numbers.into_iter().map(|x| x.clone()).collect(); + let num_bytes = size_of::<$ty_int>() * numbers.len(); + let mut bytes = vec![0; num_bytes]; + + BigEndian::$write(&numbers, &mut bytes); + + let mut got = vec![$zero; numbers.len()]; + unsafe { + BigEndian::$read(&bytes, &mut got); + } + + numbers == got + } + qc_unsized(prop as fn(_) -> bool); + } + + #[test] + fn little_endian() { + #[allow(unused_unsafe)] + fn prop(numbers: Vec<$ty_int>) -> bool { + let numbers: Vec<_> = + numbers.into_iter().map(|x| x.clone()).collect(); + let num_bytes = size_of::<$ty_int>() * numbers.len(); + let mut bytes = vec![0; num_bytes]; + + LittleEndian::$write(&numbers, &mut bytes); + + let mut got = vec![$zero; numbers.len()]; + unsafe { + LittleEndian::$read(&bytes, &mut got); + } + + numbers == got + } + qc_unsized(prop as fn(_) -> bool); + } + + #[test] + fn native_endian() { + #[allow(unused_unsafe)] + fn prop(numbers: Vec<$ty_int>) -> bool { + let numbers: Vec<_> = + numbers.into_iter().map(|x| x.clone()).collect(); + let num_bytes = size_of::<$ty_int>() * numbers.len(); + let mut bytes = vec![0; num_bytes]; + + NativeEndian::$write(&numbers, &mut bytes); + + let mut got = vec![$zero; numbers.len()]; + unsafe { + NativeEndian::$read(&bytes, &mut got); + } + + numbers == got + } + qc_unsized(prop as fn(_) -> bool); + } + } + }; + } + + qc_slice!(prop_slice_u16, u16, read_u16_into, write_u16_into, 0); + qc_slice!(prop_slice_i16, i16, read_i16_into, write_i16_into, 0); + qc_slice!(prop_slice_u32, u32, read_u32_into, write_u32_into, 0); + qc_slice!(prop_slice_i32, i32, read_i32_into, write_i32_into, 0); + qc_slice!(prop_slice_u64, u64, read_u64_into, write_u64_into, 0); + qc_slice!(prop_slice_i64, i64, read_i64_into, write_i64_into, 0); + qc_slice!( + prop_slice_u128, + Wi128, + read_u128_into, + write_u128_into, + 0 + ); + qc_slice!( + prop_slice_i128, + Wi128, + read_i128_into, + write_i128_into, + 0 + ); + + qc_slice!(prop_slice_f32, f32, read_f32_into, write_f32_into, 0.0); + qc_slice!(prop_slice_f64, f64, read_f64_into, write_f64_into, 0.0); +} diff --git a/src/rust/vendor/hashbrown-0.9.1/.cargo-checksum.json b/src/rust/vendor/hashbrown-0.9.1/.cargo-checksum.json new file mode 100644 index 000000000..53328fd47 --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"5a7c89b011b8a47ae3c3097d0badc86dadeac1084ce459cc9888f3f6519d7e47","Cargo.toml":"ba475021bd4563dde7d812713dbb79fc9fc7f522ad58bd0deaeed9ebadff69ef","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"ff8f68cb076caf8cefe7a6430d4ac086ce6af2ca8ce2c4e5a2004d4552ef52a2","README.md":"29d265575e99c6e4b8960fa0a17571580c8973b91b1b2ce63b9b83f8e1bdc58a","benches/bench.rs":"a3f8426559ebf68d93e37edee0bf83c28f18572b394b22e47dbff33e25cac403","clippy.toml":"7535949f908c6d9aea4f9a9f3a7625552c93fc29e963d059d40f4def9d77ea7b","src/external_trait_impls/mod.rs":"d69528827794524cfd9acbeacc1ac4f6131e3c7574311e6d919f818f65fbff07","src/external_trait_impls/rayon/helpers.rs":"d4fbca4db924925548f8dab8eb94cf4a3955a53c5e1ff15f59c460546c394034","src/external_trait_impls/rayon/map.rs":"eee0d42bd8cd347d49cfb1332f15297ca63b864c3690299a3ccd6d52c22c67de","src/external_trait_impls/rayon/mod.rs":"156de9c1ad0123334ea3b7e5a17444faf1b8bf971aa88a1f23e2f2d1c3021141","src/external_trait_impls/rayon/raw.rs":"d1b2415a4c3c42279f99a23bcf45c80ddb9a641c7f7974d42ed4d55f57bf6854","src/external_trait_impls/rayon/set.rs":"59afc7b1cdc985a85952d456e34eada4ca2fedf90d2a14dccf98a69f8f496137","src/external_trait_impls/serde.rs":"9306fb6e0e339398dc23ba9e7400a9a28d713df248e8b260e3d4dc44f799e101","src/lib.rs":"a455a0387b0133114247380659c2825713a4b91ef38a45f737007b47a2c30ee4","src/macros.rs":"0b1e9a55e8f5232b82f7e56f352a98904b35ddfca015377cf71daa31939baabf","src/map.rs":"56dc55edfd3c818d8c69464ec9edce9e3fe40e5975c02a2965e0a15878a08295","src/raw/bitmask.rs":"05e72c64957af7383001ca43a827cc5b3a8a39d00fac332ecea2fd7d2704099c","src/raw/generic.rs":"28da6bb3a722dcaa26cb5aba9e028111f32212dc9ce0c323e8f39ff5f367385e","src/raw/mod.rs":"ad887f865799502e852a3c100a49cc785ee97cb755b8394b31f87ed24fffa836","src/raw/sse2.rs":"ff332a9104558fe6a86b85ab975b6f43d4a042c634d5dc6cf70cf1d71d97ad7d","src/rustc_entry.rs":"64e47870015a9f152340017b79e2262e5c70d0f42b4fc2dfa48dd25ca70465f7","src/scopeguard.rs":"337cde60c9e1109cd19d4fa53529014cef1e3d5900dffde82f647881df1505f7","src/set.rs":"4ec68cf40a41bbc50da754047893640c62c55f612e03c43e3e3e837a23defc6d","tests/hasher.rs":"9a8fdf67e4415618e16729969c386eefe71408cded5d46cf7b67d969276a3452","tests/rayon.rs":"2286707a87b139f41902c82488c355b9fb402a3e734f392f3a73e87b9b932795","tests/serde.rs":"eed27382c0e43f67e402cd9eed20dea23ef5582e1a26a183e708ca9217a559e0","tests/set.rs":"374bd312c01a01cf8953bbbc9494f431b260c2657d7c79cc250e977b869a76ad"},"package":"d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"} \ No newline at end of file diff --git a/src/rust/vendor/hashbrown-0.9.1/CHANGELOG.md b/src/rust/vendor/hashbrown-0.9.1/CHANGELOG.md new file mode 100644 index 000000000..b6eb671f9 --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/CHANGELOG.md @@ -0,0 +1,294 @@ +# Change Log + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/) +and this project adheres to [Semantic Versioning](http://semver.org/). + +## [Unreleased] + +## [v0.9.1] - 2020-09-28 + +## Added +- Added safe methods to `RawTable` (#202): + - `get`: `find` and `as_ref` + - `get_mut`: `find` and `as_mut` + - `insert_entry`: `insert` and `as_mut` + - `remove_entry`: `find` and `remove` + - `erase_entry`: `find` and `erase` + +## Changed +- Removed `from_key_hashed_nocheck`'s `Q: Hash`. (#200) +- Made `RawTable::drain` safe. (#201) + +## [v0.9.0] - 2020-09-03 + +### Fixed +- `drain_filter` now removes and yields items that do match the predicate, + rather than items that don't. This is a **breaking change** to match the + behavior of the `drain_filter` methods in `std`. (#187) + +### Added +- Added `replace_entry_with` to `OccupiedEntry`, and `and_replace_entry_with` to `Entry`. (#190) +- Implemented `FusedIterator` and `size_hint` for `DrainFilter`. (#188) + +### Changed +- The minimum Rust version has been bumped to 1.36 (due to `crossbeam` dependency). (#193) +- Updated `ahash` dependency to 0.4. (#198) +- `HashMap::with_hasher` and `HashSet::with_hasher` are now `const fn`. (#195) +- Removed `T: Hash + Eq` and `S: BuildHasher` bounds on `HashSet::new`, + `with_capacity`, `with_hasher`, and `with_capacity_and_hasher`. (#185) + +## [v0.8.2] - 2020-08-08 + +### Changed +- Avoid closures to improve compile times. (#183) +- Do not iterate to drop if empty. (#182) + +## [v0.8.1] - 2020-07-16 + +### Added +- Added `erase` and `remove` to `RawTable`. (#171) +- Added `try_with_capacity` to `RawTable`. (#174) +- Added methods that allow re-using a `RawIter` for `RawDrain`, + `RawIntoIter`, and `RawParIter`. (#175) +- Added `reflect_remove` and `reflect_insert` to `RawIter`. (#175) +- Added a `drain_filter` function to `HashSet`. (#179) + +### Changed +- Deprecated `RawTable::erase_no_drop` in favor of `erase` and `remove`. (#176) +- `insert_no_grow` is now exposed under the `"raw"` feature. (#180) + +## [v0.8.0] - 2020-06-18 + +### Fixed +- Marked `RawTable::par_iter` as `unsafe`. (#157) + +### Changed +- Reduced the size of `HashMap`. (#159) +- No longer create tables with a capacity of 1 element. (#162) +- Removed `K: Eq + Hash` bounds on `retain`. (#163) +- Pulled in `HashMap` changes from rust-lang/rust (#164): + - `extend_one` support on nightly. + - `CollectionAllocErr` renamed to `TryReserveError`. + - Added `HashSet::get_or_insert_owned`. + - `Default` for `HashSet` no longer requires `T: Eq + Hash` and `S: BuildHasher`. + +## [v0.7.2] - 2020-04-27 + +### Added +- Added `or_insert_with_key` to `Entry`. (#152) + +### Fixed +- Partially reverted `Clone` optimization which was unsound. (#154) + +### Changed +- Disabled use of `const-random` by default, which prevented reproducible builds. (#155) +- Optimized `repeat` function. (#150) +- Use `NonNull` for buckets, which improves codegen for iterators. (#148) + +## [v0.7.1] - 2020-03-16 + +### Added +- Added `HashMap::get_key_value_mut`. (#145) + +### Changed +- Optimized `Clone` implementation. (#146) + +## [v0.7.0] - 2020-01-31 + +### Added +- Added a `drain_filter` function to `HashMap`. (#135) + +### Changed +- Updated `ahash` dependency to 0.3. (#141) +- Optimized set union and intersection. (#130) +- `raw_entry` can now be used without requiring `S: BuildHasher`. (#123) +- `RawTable::bucket_index` can now be used under the `raw` feature. (#128) + +## [v0.6.3] - 2019-10-31 + +### Added +- Added an `ahash-compile-time-rng` feature (enabled by default) which allows disabling the + `compile-time-rng` feature in `ahash` to work around a Cargo bug. (#125) + +## [v0.6.2] - 2019-10-23 + +### Added +- Added an `inline-more` feature (enabled by default) which allows choosing a tradeoff between + runtime performance and compilation time. (#119) + +## [v0.6.1] - 2019-10-04 + +### Added +- Added `Entry::insert` and `RawEntryMut::insert`. (#118) + +### Changed +- `Group::static_empty` was changed from a `const` to a `static` (#116). + +## [v0.6.0] - 2019-08-13 + +### Fixed +- Fixed AHash accidentally depending on `std`. (#110) + +### Changed +- The minimum Rust version has been bumped to 1.32 (due to `rand` dependency). + +## ~~[v0.5.1] - 2019-08-04~~ + +This release was _yanked_ due to a breaking change for users of `no-default-features`. + +### Added +- The experimental and unsafe `RawTable` API is available under the "raw" feature. (#108) +- Added entry-like methods for `HashSet`. (#98) + +### Changed +- Changed the default hasher from FxHash to AHash. (#97) +- `hashbrown` is now fully `no_std` on recent Rust versions (1.36+). (#96) + +### Fixed +- We now avoid growing the table during insertions when it wasn't necessary. (#106) +- `RawOccupiedEntryMut` now properly implements `Send` and `Sync`. (#100) +- Relaxed `lazy_static` version. (#92) + +## [v0.5.0] - 2019-06-12 + +### Fixed +- Resize with a more conservative amount of space after deletions. (#86) + +### Changed +- Exposed the Layout of the failed allocation in CollectionAllocErr::AllocErr. (#89) + +## [v0.4.0] - 2019-05-30 + +### Fixed +- Fixed `Send` trait bounds on `IterMut` not matching the libstd one. (#82) + +## [v0.3.1] - 2019-05-30 + +### Fixed +- Fixed incorrect use of slice in unsafe code. (#80) + +## [v0.3.0] - 2019-04-23 + +### Changed +- Changed shrink_to to not panic if min_capacity < capacity. (#67) + +### Fixed +- Worked around emscripten bug emscripten-core/emscripten-fastcomp#258. (#66) + +## [v0.2.2] - 2019-04-16 + +### Fixed +- Inlined non-nightly lowest_set_bit_nonzero. (#64) +- Fixed build on latest nightly. (#65) + +## [v0.2.1] - 2019-04-14 + +### Changed +- Use for_each in map Extend and FromIterator. (#58) +- Improved worst-case performance of HashSet.is_subset. (#61) + +### Fixed +- Removed incorrect debug_assert. (#60) + +## [v0.2.0] - 2019-03-31 + +### Changed +- The code has been updated to Rust 2018 edition. This means that the minimum + Rust version has been bumped to 1.31 (2018 edition). + +### Added +- Added `insert_with_hasher` to the raw_entry API to allow `K: !(Hash + Eq)`. (#54) +- Added support for using hashbrown as the hash table implementation in libstd. (#46) + +### Fixed +- Fixed cargo build with minimal-versions. (#45) +- Fixed `#[may_dangle]` attributes to match the libstd `HashMap`. (#46) +- ZST keys and values are now handled properly. (#46) + +## [v0.1.8] - 2019-01-14 + +### Added +- Rayon parallel iterator support (#37) +- `raw_entry` support (#31) +- `#[may_dangle]` on nightly (#31) +- `try_reserve` support (#31) + +### Fixed +- Fixed variance on `IterMut`. (#31) + +## [v0.1.7] - 2018-12-05 + +### Fixed +- Fixed non-SSE version of convert_special_to_empty_and_full_to_deleted. (#32) +- Fixed overflow in rehash_in_place. (#33) + +## [v0.1.6] - 2018-11-17 + +### Fixed +- Fixed compile error on nightly. (#29) + +## [v0.1.5] - 2018-11-08 + +### Fixed +- Fixed subtraction overflow in generic::Group::match_byte. (#28) + +## [v0.1.4] - 2018-11-04 + +### Fixed +- Fixed a bug in the `erase_no_drop` implementation. (#26) + +## [v0.1.3] - 2018-11-01 + +### Added +- Serde support. (#14) + +### Fixed +- Make the compiler inline functions more aggressively. (#20) + +## [v0.1.2] - 2018-10-31 + +### Fixed +- `clear` segfaults when called on an empty table. (#13) + +## [v0.1.1] - 2018-10-30 + +### Fixed +- `erase_no_drop` optimization not triggering in the SSE2 implementation. (#3) +- Missing `Send` and `Sync` for hash map and iterator types. (#7) +- Bug when inserting into a table smaller than the group width. (#5) + +## v0.1.0 - 2018-10-29 + +- Initial release + +[Unreleased]: https://github.com/rust-lang/hashbrown/compare/v0.9.1...HEAD +[v0.9.1]: https://github.com/rust-lang/hashbrown/compare/v0.9.0...v0.9.1 +[v0.9.0]: https://github.com/rust-lang/hashbrown/compare/v0.8.2...v0.9.0 +[v0.8.2]: https://github.com/rust-lang/hashbrown/compare/v0.8.1...v0.8.2 +[v0.8.1]: https://github.com/rust-lang/hashbrown/compare/v0.8.0...v0.8.1 +[v0.8.0]: https://github.com/rust-lang/hashbrown/compare/v0.7.2...v0.8.0 +[v0.7.2]: https://github.com/rust-lang/hashbrown/compare/v0.7.1...v0.7.2 +[v0.7.1]: https://github.com/rust-lang/hashbrown/compare/v0.7.0...v0.7.1 +[v0.7.0]: https://github.com/rust-lang/hashbrown/compare/v0.6.3...v0.7.0 +[v0.6.3]: https://github.com/rust-lang/hashbrown/compare/v0.6.2...v0.6.3 +[v0.6.2]: https://github.com/rust-lang/hashbrown/compare/v0.6.1...v0.6.2 +[v0.6.1]: https://github.com/rust-lang/hashbrown/compare/v0.6.0...v0.6.1 +[v0.6.0]: https://github.com/rust-lang/hashbrown/compare/v0.5.1...v0.6.0 +[v0.5.1]: https://github.com/rust-lang/hashbrown/compare/v0.5.0...v0.5.1 +[v0.5.0]: https://github.com/rust-lang/hashbrown/compare/v0.4.0...v0.5.0 +[v0.4.0]: https://github.com/rust-lang/hashbrown/compare/v0.3.1...v0.4.0 +[v0.3.1]: https://github.com/rust-lang/hashbrown/compare/v0.3.0...v0.3.1 +[v0.3.0]: https://github.com/rust-lang/hashbrown/compare/v0.2.2...v0.3.0 +[v0.2.2]: https://github.com/rust-lang/hashbrown/compare/v0.2.1...v0.2.2 +[v0.2.1]: https://github.com/rust-lang/hashbrown/compare/v0.2.0...v0.2.1 +[v0.2.0]: https://github.com/rust-lang/hashbrown/compare/v0.1.8...v0.2.0 +[v0.1.8]: https://github.com/rust-lang/hashbrown/compare/v0.1.7...v0.1.8 +[v0.1.7]: https://github.com/rust-lang/hashbrown/compare/v0.1.6...v0.1.7 +[v0.1.6]: https://github.com/rust-lang/hashbrown/compare/v0.1.5...v0.1.6 +[v0.1.5]: https://github.com/rust-lang/hashbrown/compare/v0.1.4...v0.1.5 +[v0.1.4]: https://github.com/rust-lang/hashbrown/compare/v0.1.3...v0.1.4 +[v0.1.3]: https://github.com/rust-lang/hashbrown/compare/v0.1.2...v0.1.3 +[v0.1.2]: https://github.com/rust-lang/hashbrown/compare/v0.1.1...v0.1.2 +[v0.1.1]: https://github.com/rust-lang/hashbrown/compare/v0.1.0...v0.1.1 diff --git a/src/rust/vendor/hashbrown-0.9.1/Cargo.toml b/src/rust/vendor/hashbrown-0.9.1/Cargo.toml new file mode 100644 index 000000000..7be0341b3 --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/Cargo.toml @@ -0,0 +1,80 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "hashbrown" +version = "0.9.1" +authors = ["Amanieu d'Antras "] +exclude = [".travis.yml", "bors.toml", "/ci/*"] +description = "A Rust port of Google's SwissTable hash map" +readme = "README.md" +keywords = ["hash", "no_std", "hashmap", "swisstable"] +categories = ["data-structures", "no-std"] +license = "Apache-2.0/MIT" +repository = "https://github.com/rust-lang/hashbrown" +[package.metadata.docs.rs] +features = ["nightly", "rayon", "serde", "raw"] +[dependencies.ahash] +version = "0.4.4" +optional = true +default-features = false + +[dependencies.alloc] +version = "1.0.0" +optional = true +package = "rustc-std-workspace-alloc" + +[dependencies.compiler_builtins] +version = "0.1.2" +optional = true + +[dependencies.core] +version = "1.0.0" +optional = true +package = "rustc-std-workspace-core" + +[dependencies.rayon] +version = "1.0" +optional = true + +[dependencies.serde] +version = "1.0.25" +optional = true +default-features = false +[dev-dependencies.doc-comment] +version = "0.3.1" + +[dev-dependencies.lazy_static] +version = "1.2" + +[dev-dependencies.rand] +version = "0.7.3" +features = ["small_rng"] + +[dev-dependencies.rayon] +version = "1.0" + +[dev-dependencies.rustc-hash] +version = "=1.0" + +[dev-dependencies.serde_test] +version = "1.0" + +[features] +ahash-compile-time-rng = ["ahash/compile-time-rng"] +default = ["ahash", "inline-more"] +inline-more = [] +nightly = [] +raw = [] +rustc-dep-of-std = ["nightly", "core", "compiler_builtins", "alloc", "rustc-internal-api"] +rustc-internal-api = [] diff --git a/src/rust/vendor/hashbrown-0.9.1/LICENSE-APACHE b/src/rust/vendor/hashbrown-0.9.1/LICENSE-APACHE new file mode 100644 index 000000000..16fe87b06 --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/src/rust/vendor/hashbrown-0.9.1/LICENSE-MIT b/src/rust/vendor/hashbrown-0.9.1/LICENSE-MIT new file mode 100644 index 000000000..5afc2a7b0 --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2016 Amanieu d'Antras + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/src/rust/vendor/hashbrown-0.9.1/README.md b/src/rust/vendor/hashbrown-0.9.1/README.md new file mode 100644 index 000000000..2e431710f --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/README.md @@ -0,0 +1,126 @@ +hashbrown +========= + +[![Build Status](https://travis-ci.com/rust-lang/hashbrown.svg?branch=master)](https://travis-ci.com/rust-lang/hashbrown) +[![Crates.io](https://img.shields.io/crates/v/hashbrown.svg)](https://crates.io/crates/hashbrown) +[![Documentation](https://docs.rs/hashbrown/badge.svg)](https://docs.rs/hashbrown) +[![Rust](https://img.shields.io/badge/rust-1.36.0%2B-blue.svg?maxAge=3600)](https://github.com/rust-lang/hashbrown) + +This crate is a Rust port of Google's high-performance [SwissTable] hash +map, adapted to make it a drop-in replacement for Rust's standard `HashMap` +and `HashSet` types. + +The original C++ version of SwissTable can be found [here], and this +[CppCon talk] gives an overview of how the algorithm works. + +Since Rust 1.36, this is now the `HashMap` implementation for the Rust standard +library. However you may still want to use this crate instead since it works +in environments without `std`, such as embedded systems and kernels. + +[SwissTable]: https://abseil.io/blog/20180927-swisstables +[here]: https://github.com/abseil/abseil-cpp/blob/master/absl/container/internal/raw_hash_set.h +[CppCon talk]: https://www.youtube.com/watch?v=ncHmEUmJZf4 + +## [Change log](CHANGELOG.md) + +## Features + +- Drop-in replacement for the standard library `HashMap` and `HashSet` types. +- Uses `AHash` as the default hasher, which is much faster than SipHash. +- Around 2x faster than the previous standard library `HashMap`. +- Lower memory usage: only 1 byte of overhead per entry instead of 8. +- Compatible with `#[no_std]` (but requires a global allocator with the `alloc` crate). +- Empty hash maps do not allocate any memory. +- SIMD lookups to scan multiple hash entries in parallel. + +## Performance + +Compared to the previous implementation of `std::collections::HashMap` (Rust 1.35). + +With the hashbrown default AHash hasher (not HashDoS-resistant): + +```text + name oldstdhash ns/iter hashbrown ns/iter diff ns/iter diff % speedup + insert_ahash_highbits 20,846 7,397 -13,449 -64.52% x 2.82 + insert_ahash_random 20,515 7,796 -12,719 -62.00% x 2.63 + insert_ahash_serial 21,668 7,264 -14,404 -66.48% x 2.98 + insert_erase_ahash_highbits 29,570 17,498 -12,072 -40.83% x 1.69 + insert_erase_ahash_random 39,569 17,474 -22,095 -55.84% x 2.26 + insert_erase_ahash_serial 32,073 17,332 -14,741 -45.96% x 1.85 + iter_ahash_highbits 1,572 2,087 515 32.76% x 0.75 + iter_ahash_random 1,609 2,074 465 28.90% x 0.78 + iter_ahash_serial 2,293 2,120 -173 -7.54% x 1.08 + lookup_ahash_highbits 3,460 4,403 943 27.25% x 0.79 + lookup_ahash_random 6,377 3,911 -2,466 -38.67% x 1.63 + lookup_ahash_serial 3,629 3,586 -43 -1.18% x 1.01 + lookup_fail_ahash_highbits 5,286 3,411 -1,875 -35.47% x 1.55 + lookup_fail_ahash_random 12,365 4,171 -8,194 -66.27% x 2.96 + lookup_fail_ahash_serial 4,902 3,240 -1,662 -33.90% x 1.51 +``` + +With the libstd default SipHash hasher (HashDoS-resistant): + +```text + name oldstdhash ns/iter hashbrown ns/iter diff ns/iter diff % speedup + insert_std_highbits 32,598 20,199 -12,399 -38.04% x 1.61 + insert_std_random 29,824 20,760 -9,064 -30.39% x 1.44 + insert_std_serial 33,151 17,256 -15,895 -47.95% x 1.92 + insert_erase_std_highbits 74,731 48,735 -25,996 -34.79% x 1.53 + insert_erase_std_random 73,828 47,649 -26,179 -35.46% x 1.55 + insert_erase_std_serial 73,864 40,147 -33,717 -45.65% x 1.84 + iter_std_highbits 1,518 2,264 746 49.14% x 0.67 + iter_std_random 1,502 2,414 912 60.72% x 0.62 + iter_std_serial 6,361 2,118 -4,243 -66.70% x 3.00 + lookup_std_highbits 21,705 16,962 -4,743 -21.85% x 1.28 + lookup_std_random 21,654 17,158 -4,496 -20.76% x 1.26 + lookup_std_serial 18,726 14,509 -4,217 -22.52% x 1.29 + lookup_fail_std_highbits 25,852 17,323 -8,529 -32.99% x 1.49 + lookup_fail_std_random 25,913 17,760 -8,153 -31.46% x 1.46 + lookup_fail_std_serial 22,648 14,839 -7,809 -34.48% x 1.53 +``` + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +hashbrown = "0.9" +``` + +Then: + +```rust +use hashbrown::HashMap; + +let mut map = HashMap::new(); +map.insert(1, "one"); +``` + +This crate has the following Cargo features: + +- `nightly`: Enables nightly-only features: `#[may_dangle]`. +- `serde`: Enables serde serialization support. +- `rayon`: Enables rayon parallel iterator support. +- `raw`: Enables access to the experimental and unsafe `RawTable` API. +- `inline-more`: Adds inline hints to most functions, improving run-time performance at the cost + of compilation time. (enabled by default) +- `ahash`: Compiles with ahash as default hasher. (enabled by default) +- `ahash-compile-time-rng`: Activates the `compile-time-rng` feature of ahash, to increase the + DOS-resistance, but can result in issues for `no_std` builds. More details in + [issue#124](https://github.com/rust-lang/hashbrown/issues/124). (enabled by default) + +## License + +Licensed under either of: + + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any +additional terms or conditions. diff --git a/src/rust/vendor/hashbrown-0.9.1/benches/bench.rs b/src/rust/vendor/hashbrown-0.9.1/benches/bench.rs new file mode 100644 index 000000000..771e7169a --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/benches/bench.rs @@ -0,0 +1,260 @@ +// This benchmark suite contains some benchmarks along a set of dimensions: +// Hasher: std default (SipHash) and crate default (AHash). +// Int key distribution: low bit heavy, top bit heavy, and random. +// Task: basic functionality: insert, insert_erase, lookup, lookup_fail, iter +#![feature(test)] + +extern crate test; + +use test::{black_box, Bencher}; + +use hashbrown::hash_map::DefaultHashBuilder; +use hashbrown::HashMap; +use std::collections::hash_map::RandomState; + +const SIZE: usize = 1000; + +// The default hashmap when using this crate directly. +type AHashMap = HashMap; +// This uses the hashmap from this crate with the default hasher of the stdlib. +type StdHashMap = HashMap; + +// A random key iterator. +#[derive(Clone, Copy)] +struct RandomKeys { + state: usize, +} + +impl RandomKeys { + fn new() -> Self { + RandomKeys { state: 0 } + } +} + +impl Iterator for RandomKeys { + type Item = usize; + fn next(&mut self) -> Option { + // Add 1 then multiply by some 32 bit prime. + self.state = self.state.wrapping_add(1).wrapping_mul(3787392781); + Some(self.state) + } +} + +macro_rules! bench_suite { + ($bench_macro:ident, $bench_ahash_serial:ident, $bench_std_serial:ident, + $bench_ahash_highbits:ident, $bench_std_highbits:ident, + $bench_ahash_random:ident, $bench_std_random:ident) => { + $bench_macro!($bench_ahash_serial, AHashMap, 0..); + $bench_macro!($bench_std_serial, StdHashMap, 0..); + $bench_macro!( + $bench_ahash_highbits, + AHashMap, + (0..).map(usize::swap_bytes) + ); + $bench_macro!( + $bench_std_highbits, + StdHashMap, + (0..).map(usize::swap_bytes) + ); + $bench_macro!($bench_ahash_random, AHashMap, RandomKeys::new()); + $bench_macro!($bench_std_random, StdHashMap, RandomKeys::new()); + }; +} + +macro_rules! bench_insert { + ($name:ident, $maptype:ident, $keydist:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + let mut m = $maptype::with_capacity_and_hasher(SIZE, Default::default()); + b.iter(|| { + m.clear(); + for i in ($keydist).take(SIZE) { + m.insert(i, i); + } + black_box(&mut m); + }) + } + }; +} + +bench_suite!( + bench_insert, + insert_ahash_serial, + insert_std_serial, + insert_ahash_highbits, + insert_std_highbits, + insert_ahash_random, + insert_std_random +); + +macro_rules! bench_insert_erase { + ($name:ident, $maptype:ident, $keydist:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + let mut base = $maptype::default(); + for i in ($keydist).take(SIZE) { + base.insert(i, i); + } + let skip = $keydist.skip(SIZE); + b.iter(|| { + let mut m = base.clone(); + let mut add_iter = skip.clone(); + let mut remove_iter = $keydist; + // While keeping the size constant, + // replace the first keydist with the second. + for (add, remove) in (&mut add_iter).zip(&mut remove_iter).take(SIZE) { + m.insert(add, add); + black_box(m.remove(&remove)); + } + black_box(m); + }) + } + }; +} + +bench_suite!( + bench_insert_erase, + insert_erase_ahash_serial, + insert_erase_std_serial, + insert_erase_ahash_highbits, + insert_erase_std_highbits, + insert_erase_ahash_random, + insert_erase_std_random +); + +macro_rules! bench_lookup { + ($name:ident, $maptype:ident, $keydist:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + let mut m = $maptype::default(); + for i in $keydist.take(SIZE) { + m.insert(i, i); + } + + b.iter(|| { + for i in $keydist.take(SIZE) { + black_box(m.get(&i)); + } + }) + } + }; +} + +bench_suite!( + bench_lookup, + lookup_ahash_serial, + lookup_std_serial, + lookup_ahash_highbits, + lookup_std_highbits, + lookup_ahash_random, + lookup_std_random +); + +macro_rules! bench_lookup_fail { + ($name:ident, $maptype:ident, $keydist:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + let mut m = $maptype::default(); + let mut iter = $keydist; + for i in (&mut iter).take(SIZE) { + m.insert(i, i); + } + + b.iter(|| { + for i in (&mut iter).take(SIZE) { + black_box(m.get(&i)); + } + }) + } + }; +} + +bench_suite!( + bench_lookup_fail, + lookup_fail_ahash_serial, + lookup_fail_std_serial, + lookup_fail_ahash_highbits, + lookup_fail_std_highbits, + lookup_fail_ahash_random, + lookup_fail_std_random +); + +macro_rules! bench_iter { + ($name:ident, $maptype:ident, $keydist:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + let mut m = $maptype::default(); + for i in ($keydist).take(SIZE) { + m.insert(i, i); + } + + b.iter(|| { + for i in &m { + black_box(i); + } + }) + } + }; +} + +bench_suite!( + bench_iter, + iter_ahash_serial, + iter_std_serial, + iter_ahash_highbits, + iter_std_highbits, + iter_ahash_random, + iter_std_random +); + +#[bench] +fn clone_small(b: &mut Bencher) { + let mut m = HashMap::new(); + for i in 0..10 { + m.insert(i, i); + } + + b.iter(|| { + black_box(m.clone()); + }) +} + +#[bench] +fn clone_from_small(b: &mut Bencher) { + let mut m = HashMap::new(); + let mut m2 = HashMap::new(); + for i in 0..10 { + m.insert(i, i); + } + + b.iter(|| { + m2.clone_from(&m); + black_box(&mut m2); + }) +} + +#[bench] +fn clone_large(b: &mut Bencher) { + let mut m = HashMap::new(); + for i in 0..1000 { + m.insert(i, i); + } + + b.iter(|| { + black_box(m.clone()); + }) +} + +#[bench] +fn clone_from_large(b: &mut Bencher) { + let mut m = HashMap::new(); + let mut m2 = HashMap::new(); + for i in 0..1000 { + m.insert(i, i); + } + + b.iter(|| { + m2.clone_from(&m); + black_box(&mut m2); + }) +} diff --git a/src/rust/vendor/hashbrown-0.9.1/clippy.toml b/src/rust/vendor/hashbrown-0.9.1/clippy.toml new file mode 100644 index 000000000..d98bf2c09 --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/clippy.toml @@ -0,0 +1 @@ +doc-valid-idents = [ "CppCon", "SwissTable", "SipHash", "HashDoS" ] diff --git a/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/mod.rs b/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/mod.rs new file mode 100644 index 000000000..ef497836c --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/mod.rs @@ -0,0 +1,4 @@ +#[cfg(feature = "rayon")] +pub(crate) mod rayon; +#[cfg(feature = "serde")] +mod serde; diff --git a/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/rayon/helpers.rs b/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/rayon/helpers.rs new file mode 100644 index 000000000..9382007ea --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/rayon/helpers.rs @@ -0,0 +1,26 @@ +use alloc::collections::LinkedList; +use alloc::vec::Vec; + +use rayon::iter::{IntoParallelIterator, ParallelIterator}; + +/// Helper for collecting parallel iterators to an intermediary +pub(super) fn collect(iter: I) -> (LinkedList>, usize) { + let list = iter + .into_par_iter() + .fold(Vec::new, |mut vec, elem| { + vec.push(elem); + vec + }) + .map(|vec| { + let mut list = LinkedList::new(); + list.push_back(vec); + list + }) + .reduce(LinkedList::new, |mut list1, mut list2| { + list1.append(&mut list2); + list1 + }); + + let len = list.iter().map(Vec::len).sum(); + (list, len) +} diff --git a/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/rayon/map.rs b/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/rayon/map.rs new file mode 100644 index 000000000..334f8bb58 --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/rayon/map.rs @@ -0,0 +1,666 @@ +//! Rayon extensions for `HashMap`. + +use crate::hash_map::HashMap; +use core::fmt; +use core::hash::{BuildHasher, Hash}; +use rayon::iter::plumbing::UnindexedConsumer; +use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator}; + +/// Parallel iterator over shared references to entries in a map. +/// +/// This iterator is created by the [`par_iter`] method on [`HashMap`] +/// (provided by the [`IntoParallelRefIterator`] trait). +/// See its documentation for more. +/// +/// [`par_iter`]: /hashbrown/struct.HashMap.html#method.par_iter +/// [`HashMap`]: /hashbrown/struct.HashMap.html +/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html +pub struct ParIter<'a, K, V, S> { + map: &'a HashMap, +} + +impl<'a, K: Sync, V: Sync, S: Sync> ParallelIterator for ParIter<'a, K, V, S> { + type Item = (&'a K, &'a V); + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + unsafe { self.map.table.par_iter() } + .map(|x| unsafe { + let r = x.as_ref(); + (&r.0, &r.1) + }) + .drive_unindexed(consumer) + } +} + +impl Clone for ParIter<'_, K, V, S> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + ParIter { map: self.map } + } +} + +impl fmt::Debug for ParIter<'_, K, V, S> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.map.iter().fmt(f) + } +} + +/// Parallel iterator over shared references to keys in a map. +/// +/// This iterator is created by the [`par_keys`] method on [`HashMap`]. +/// See its documentation for more. +/// +/// [`par_keys`]: /hashbrown/struct.HashMap.html#method.par_keys +/// [`HashMap`]: /hashbrown/struct.HashMap.html +pub struct ParKeys<'a, K, V, S> { + map: &'a HashMap, +} + +impl<'a, K: Sync, V: Sync, S: Sync> ParallelIterator for ParKeys<'a, K, V, S> { + type Item = &'a K; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + unsafe { self.map.table.par_iter() } + .map(|x| unsafe { &x.as_ref().0 }) + .drive_unindexed(consumer) + } +} + +impl Clone for ParKeys<'_, K, V, S> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + ParKeys { map: self.map } + } +} + +impl fmt::Debug for ParKeys<'_, K, V, S> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.map.keys().fmt(f) + } +} + +/// Parallel iterator over shared references to values in a map. +/// +/// This iterator is created by the [`par_values`] method on [`HashMap`]. +/// See its documentation for more. +/// +/// [`par_values`]: /hashbrown/struct.HashMap.html#method.par_values +/// [`HashMap`]: /hashbrown/struct.HashMap.html +pub struct ParValues<'a, K, V, S> { + map: &'a HashMap, +} + +impl<'a, K: Sync, V: Sync, S: Sync> ParallelIterator for ParValues<'a, K, V, S> { + type Item = &'a V; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + unsafe { self.map.table.par_iter() } + .map(|x| unsafe { &x.as_ref().1 }) + .drive_unindexed(consumer) + } +} + +impl Clone for ParValues<'_, K, V, S> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + ParValues { map: self.map } + } +} + +impl fmt::Debug for ParValues<'_, K, V, S> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.map.values().fmt(f) + } +} + +/// Parallel iterator over mutable references to entries in a map. +/// +/// This iterator is created by the [`par_iter_mut`] method on [`HashMap`] +/// (provided by the [`IntoParallelRefMutIterator`] trait). +/// See its documentation for more. +/// +/// [`par_iter_mut`]: /hashbrown/struct.HashMap.html#method.par_iter_mut +/// [`HashMap`]: /hashbrown/struct.HashMap.html +/// [`IntoParallelRefMutIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefMutIterator.html +pub struct ParIterMut<'a, K, V, S> { + map: &'a mut HashMap, +} + +impl<'a, K: Send + Sync, V: Send, S: Send> ParallelIterator for ParIterMut<'a, K, V, S> { + type Item = (&'a K, &'a mut V); + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + unsafe { self.map.table.par_iter() } + .map(|x| unsafe { + let r = x.as_mut(); + (&r.0, &mut r.1) + }) + .drive_unindexed(consumer) + } +} + +impl fmt::Debug + for ParIterMut<'_, K, V, S> +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.map.iter().fmt(f) + } +} + +/// Parallel iterator over mutable references to values in a map. +/// +/// This iterator is created by the [`par_values_mut`] method on [`HashMap`]. +/// See its documentation for more. +/// +/// [`par_values_mut`]: /hashbrown/struct.HashMap.html#method.par_values_mut +/// [`HashMap`]: /hashbrown/struct.HashMap.html +pub struct ParValuesMut<'a, K, V, S> { + map: &'a mut HashMap, +} + +impl<'a, K: Send, V: Send, S: Send> ParallelIterator for ParValuesMut<'a, K, V, S> { + type Item = &'a mut V; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + unsafe { self.map.table.par_iter() } + .map(|x| unsafe { &mut x.as_mut().1 }) + .drive_unindexed(consumer) + } +} + +impl fmt::Debug for ParValuesMut<'_, K, V, S> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.map.values().fmt(f) + } +} + +/// Parallel iterator over entries of a consumed map. +/// +/// This iterator is created by the [`into_par_iter`] method on [`HashMap`] +/// (provided by the [`IntoParallelIterator`] trait). +/// See its documentation for more. +/// +/// [`into_par_iter`]: /hashbrown/struct.HashMap.html#method.into_par_iter +/// [`HashMap`]: /hashbrown/struct.HashMap.html +/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html +pub struct IntoParIter { + map: HashMap, +} + +impl ParallelIterator for IntoParIter { + type Item = (K, V); + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.map.table.into_par_iter().drive_unindexed(consumer) + } +} + +impl fmt::Debug for IntoParIter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.map.iter().fmt(f) + } +} + +/// Parallel draining iterator over entries of a map. +/// +/// This iterator is created by the [`par_drain`] method on [`HashMap`]. +/// See its documentation for more. +/// +/// [`par_drain`]: /hashbrown/struct.HashMap.html#method.par_drain +/// [`HashMap`]: /hashbrown/struct.HashMap.html +pub struct ParDrain<'a, K, V, S> { + map: &'a mut HashMap, +} + +impl ParallelIterator for ParDrain<'_, K, V, S> { + type Item = (K, V); + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.map.table.par_drain().drive_unindexed(consumer) + } +} + +impl fmt::Debug + for ParDrain<'_, K, V, S> +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.map.iter().fmt(f) + } +} + +impl HashMap { + /// Visits (potentially in parallel) immutably borrowed keys in an arbitrary order. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_keys(&self) -> ParKeys<'_, K, V, S> { + ParKeys { map: self } + } + + /// Visits (potentially in parallel) immutably borrowed values in an arbitrary order. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_values(&self) -> ParValues<'_, K, V, S> { + ParValues { map: self } + } +} + +impl HashMap { + /// Visits (potentially in parallel) mutably borrowed values in an arbitrary order. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V, S> { + ParValuesMut { map: self } + } + + /// Consumes (potentially in parallel) all values in an arbitrary order, + /// while preserving the map's allocated memory for reuse. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_drain(&mut self) -> ParDrain<'_, K, V, S> { + ParDrain { map: self } + } +} + +impl HashMap +where + K: Eq + Hash + Sync, + V: PartialEq + Sync, + S: BuildHasher + Sync, +{ + /// Returns `true` if the map is equal to another, + /// i.e. both maps contain the same keys mapped to the same values. + /// + /// This method runs in a potentially parallel fashion. + pub fn par_eq(&self, other: &Self) -> bool { + self.len() == other.len() + && self + .into_par_iter() + .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) + } +} + +impl IntoParallelIterator for HashMap { + type Item = (K, V); + type Iter = IntoParIter; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + IntoParIter { map: self } + } +} + +impl<'a, K: Sync, V: Sync, S: Sync> IntoParallelIterator for &'a HashMap { + type Item = (&'a K, &'a V); + type Iter = ParIter<'a, K, V, S>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + ParIter { map: self } + } +} + +impl<'a, K: Send + Sync, V: Send, S: Send> IntoParallelIterator for &'a mut HashMap { + type Item = (&'a K, &'a mut V); + type Iter = ParIterMut<'a, K, V, S>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + ParIterMut { map: self } + } +} + +/// Collect (key, value) pairs from a parallel iterator into a +/// hashmap. If multiple pairs correspond to the same key, then the +/// ones produced earlier in the parallel iterator will be +/// overwritten, just as with a sequential iterator. +impl FromParallelIterator<(K, V)> for HashMap +where + K: Eq + Hash + Send, + V: Send, + S: BuildHasher + Default, +{ + fn from_par_iter

(par_iter: P) -> Self + where + P: IntoParallelIterator, + { + let mut map = HashMap::default(); + map.par_extend(par_iter); + map + } +} + +/// Extend a hash map with items from a parallel iterator. +impl ParallelExtend<(K, V)> for HashMap +where + K: Eq + Hash + Send, + V: Send, + S: BuildHasher, +{ + fn par_extend(&mut self, par_iter: I) + where + I: IntoParallelIterator, + { + extend(self, par_iter); + } +} + +/// Extend a hash map with copied items from a parallel iterator. +impl<'a, K, V, S> ParallelExtend<(&'a K, &'a V)> for HashMap +where + K: Copy + Eq + Hash + Sync, + V: Copy + Sync, + S: BuildHasher, +{ + fn par_extend(&mut self, par_iter: I) + where + I: IntoParallelIterator, + { + extend(self, par_iter); + } +} + +// This is equal to the normal `HashMap` -- no custom advantage. +fn extend(map: &mut HashMap, par_iter: I) +where + K: Eq + Hash, + S: BuildHasher, + I: IntoParallelIterator, + HashMap: Extend, +{ + let (list, len) = super::helpers::collect(par_iter); + + // Keys may be already present or show multiple times in the iterator. + // Reserve the entire length if the map is empty. + // Otherwise reserve half the length (rounded up), so the map + // will only resize twice in the worst case. + let reserve = if map.is_empty() { len } else { (len + 1) / 2 }; + map.reserve(reserve); + for vec in list { + map.extend(vec); + } +} + +#[cfg(test)] +mod test_par_map { + use alloc::vec::Vec; + use core::hash::{Hash, Hasher}; + use core::sync::atomic::{AtomicUsize, Ordering}; + + use rayon::prelude::*; + + use crate::hash_map::HashMap; + + struct Dropable<'a> { + k: usize, + counter: &'a AtomicUsize, + } + + impl Dropable<'_> { + fn new(k: usize, counter: &AtomicUsize) -> Dropable<'_> { + counter.fetch_add(1, Ordering::Relaxed); + + Dropable { k, counter } + } + } + + impl Drop for Dropable<'_> { + fn drop(&mut self) { + self.counter.fetch_sub(1, Ordering::Relaxed); + } + } + + impl Clone for Dropable<'_> { + fn clone(&self) -> Self { + Dropable::new(self.k, self.counter) + } + } + + impl Hash for Dropable<'_> { + fn hash(&self, state: &mut H) + where + H: Hasher, + { + self.k.hash(state) + } + } + + impl PartialEq for Dropable<'_> { + fn eq(&self, other: &Self) -> bool { + self.k == other.k + } + } + + impl Eq for Dropable<'_> {} + + #[test] + fn test_into_iter_drops() { + let key = AtomicUsize::new(0); + let value = AtomicUsize::new(0); + + let hm = { + let mut hm = HashMap::new(); + + assert_eq!(key.load(Ordering::Relaxed), 0); + assert_eq!(value.load(Ordering::Relaxed), 0); + + for i in 0..100 { + let d1 = Dropable::new(i, &key); + let d2 = Dropable::new(i + 100, &value); + hm.insert(d1, d2); + } + + assert_eq!(key.load(Ordering::Relaxed), 100); + assert_eq!(value.load(Ordering::Relaxed), 100); + + hm + }; + + // By the way, ensure that cloning doesn't screw up the dropping. + drop(hm.clone()); + + assert_eq!(key.load(Ordering::Relaxed), 100); + assert_eq!(value.load(Ordering::Relaxed), 100); + + // Ensure that dropping the iterator does not leak anything. + drop(hm.clone().into_par_iter()); + + { + assert_eq!(key.load(Ordering::Relaxed), 100); + assert_eq!(value.load(Ordering::Relaxed), 100); + + // retain only half + let _v: Vec<_> = hm + .into_par_iter() + .filter(|&(ref key, _)| key.k < 50) + .collect(); + + assert_eq!(key.load(Ordering::Relaxed), 50); + assert_eq!(value.load(Ordering::Relaxed), 50); + }; + + assert_eq!(key.load(Ordering::Relaxed), 0); + assert_eq!(value.load(Ordering::Relaxed), 0); + } + + #[test] + fn test_drain_drops() { + let key = AtomicUsize::new(0); + let value = AtomicUsize::new(0); + + let mut hm = { + let mut hm = HashMap::new(); + + assert_eq!(key.load(Ordering::Relaxed), 0); + assert_eq!(value.load(Ordering::Relaxed), 0); + + for i in 0..100 { + let d1 = Dropable::new(i, &key); + let d2 = Dropable::new(i + 100, &value); + hm.insert(d1, d2); + } + + assert_eq!(key.load(Ordering::Relaxed), 100); + assert_eq!(value.load(Ordering::Relaxed), 100); + + hm + }; + + // By the way, ensure that cloning doesn't screw up the dropping. + drop(hm.clone()); + + assert_eq!(key.load(Ordering::Relaxed), 100); + assert_eq!(value.load(Ordering::Relaxed), 100); + + // Ensure that dropping the drain iterator does not leak anything. + drop(hm.clone().par_drain()); + + { + assert_eq!(key.load(Ordering::Relaxed), 100); + assert_eq!(value.load(Ordering::Relaxed), 100); + + // retain only half + let _v: Vec<_> = hm.drain().filter(|&(ref key, _)| key.k < 50).collect(); + assert!(hm.is_empty()); + + assert_eq!(key.load(Ordering::Relaxed), 50); + assert_eq!(value.load(Ordering::Relaxed), 50); + }; + + assert_eq!(key.load(Ordering::Relaxed), 0); + assert_eq!(value.load(Ordering::Relaxed), 0); + } + + #[test] + fn test_empty_iter() { + let mut m: HashMap = HashMap::new(); + assert_eq!(m.par_drain().count(), 0); + assert_eq!(m.par_keys().count(), 0); + assert_eq!(m.par_values().count(), 0); + assert_eq!(m.par_values_mut().count(), 0); + assert_eq!(m.par_iter().count(), 0); + assert_eq!(m.par_iter_mut().count(), 0); + assert_eq!(m.len(), 0); + assert!(m.is_empty()); + assert_eq!(m.into_par_iter().count(), 0); + } + + #[test] + fn test_iterate() { + let mut m = HashMap::with_capacity(4); + for i in 0..32 { + assert!(m.insert(i, i * 2).is_none()); + } + assert_eq!(m.len(), 32); + + let observed = AtomicUsize::new(0); + + m.par_iter().for_each(|(k, v)| { + assert_eq!(*v, *k * 2); + observed.fetch_or(1 << *k, Ordering::Relaxed); + }); + assert_eq!(observed.into_inner(), 0xFFFF_FFFF); + } + + #[test] + fn test_keys() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: HashMap<_, _> = vec.into_par_iter().collect(); + let keys: Vec<_> = map.par_keys().cloned().collect(); + assert_eq!(keys.len(), 3); + assert!(keys.contains(&1)); + assert!(keys.contains(&2)); + assert!(keys.contains(&3)); + } + + #[test] + fn test_values() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: HashMap<_, _> = vec.into_par_iter().collect(); + let values: Vec<_> = map.par_values().cloned().collect(); + assert_eq!(values.len(), 3); + assert!(values.contains(&'a')); + assert!(values.contains(&'b')); + assert!(values.contains(&'c')); + } + + #[test] + fn test_values_mut() { + let vec = vec![(1, 1), (2, 2), (3, 3)]; + let mut map: HashMap<_, _> = vec.into_par_iter().collect(); + map.par_values_mut().for_each(|value| *value = (*value) * 2); + let values: Vec<_> = map.par_values().cloned().collect(); + assert_eq!(values.len(), 3); + assert!(values.contains(&2)); + assert!(values.contains(&4)); + assert!(values.contains(&6)); + } + + #[test] + fn test_eq() { + let mut m1 = HashMap::new(); + m1.insert(1, 2); + m1.insert(2, 3); + m1.insert(3, 4); + + let mut m2 = HashMap::new(); + m2.insert(1, 2); + m2.insert(2, 3); + + assert!(!m1.par_eq(&m2)); + + m2.insert(3, 4); + + assert!(m1.par_eq(&m2)); + } + + #[test] + fn test_from_iter() { + let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; + + let map: HashMap<_, _> = xs.par_iter().cloned().collect(); + + for &(k, v) in &xs { + assert_eq!(map.get(&k), Some(&v)); + } + } + + #[test] + fn test_extend_ref() { + let mut a = HashMap::new(); + a.insert(1, "one"); + let mut b = HashMap::new(); + b.insert(2, "two"); + b.insert(3, "three"); + + a.par_extend(&b); + + assert_eq!(a.len(), 3); + assert_eq!(a[&1], "one"); + assert_eq!(a[&2], "two"); + assert_eq!(a[&3], "three"); + } +} diff --git a/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/rayon/mod.rs b/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/rayon/mod.rs new file mode 100644 index 000000000..99337a1ce --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/rayon/mod.rs @@ -0,0 +1,4 @@ +mod helpers; +pub(crate) mod map; +pub(crate) mod raw; +pub(crate) mod set; diff --git a/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/rayon/raw.rs b/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/rayon/raw.rs new file mode 100644 index 000000000..1bd2c1779 --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/rayon/raw.rs @@ -0,0 +1,199 @@ +use crate::raw::Bucket; +use crate::raw::{RawIter, RawIterRange, RawTable}; +use crate::scopeguard::guard; +use alloc::alloc::dealloc; +use core::marker::PhantomData; +use core::mem; +use core::ptr::NonNull; +use rayon::iter::{ + plumbing::{self, Folder, UnindexedConsumer, UnindexedProducer}, + ParallelIterator, +}; + +/// Parallel iterator which returns a raw pointer to every full bucket in the table. +pub struct RawParIter { + iter: RawIterRange, +} + +impl From> for RawParIter { + fn from(it: RawIter) -> Self { + RawParIter { iter: it.iter } + } +} + +impl ParallelIterator for RawParIter { + type Item = Bucket; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + let producer = ParIterProducer { iter: self.iter }; + plumbing::bridge_unindexed(producer, consumer) + } +} + +/// Producer which returns a `Bucket` for every element. +struct ParIterProducer { + iter: RawIterRange, +} + +impl UnindexedProducer for ParIterProducer { + type Item = Bucket; + + #[cfg_attr(feature = "inline-more", inline)] + fn split(self) -> (Self, Option) { + let (left, right) = self.iter.split(); + let left = ParIterProducer { iter: left }; + let right = right.map(|right| ParIterProducer { iter: right }); + (left, right) + } + + #[cfg_attr(feature = "inline-more", inline)] + fn fold_with(self, folder: F) -> F + where + F: Folder, + { + folder.consume_iter(self.iter) + } +} + +/// Parallel iterator which consumes a table and returns elements. +pub struct RawIntoParIter { + table: RawTable, +} + +impl ParallelIterator for RawIntoParIter { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + let iter = unsafe { self.table.iter().iter }; + let _guard = guard(self.table.into_alloc(), |alloc| { + if let Some((ptr, layout)) = *alloc { + unsafe { + dealloc(ptr.as_ptr(), layout); + } + } + }); + let producer = ParDrainProducer { iter }; + plumbing::bridge_unindexed(producer, consumer) + } +} + +/// Parallel iterator which consumes elements without freeing the table storage. +pub struct RawParDrain<'a, T> { + // We don't use a &'a mut RawTable because we want RawParDrain to be + // covariant over T. + table: NonNull>, + marker: PhantomData<&'a RawTable>, +} + +unsafe impl Send for RawParDrain<'_, T> {} + +impl ParallelIterator for RawParDrain<'_, T> { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + let _guard = guard(self.table, |table| unsafe { + table.as_mut().clear_no_drop() + }); + let iter = unsafe { self.table.as_ref().iter().iter }; + mem::forget(self); + let producer = ParDrainProducer { iter }; + plumbing::bridge_unindexed(producer, consumer) + } +} + +impl Drop for RawParDrain<'_, T> { + fn drop(&mut self) { + // If drive_unindexed is not called then simply clear the table. + unsafe { self.table.as_mut().clear() } + } +} + +/// Producer which will consume all elements in the range, even if it is dropped +/// halfway through. +struct ParDrainProducer { + iter: RawIterRange, +} + +impl UnindexedProducer for ParDrainProducer { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn split(self) -> (Self, Option) { + let (left, right) = self.iter.clone().split(); + mem::forget(self); + let left = ParDrainProducer { iter: left }; + let right = right.map(|right| ParDrainProducer { iter: right }); + (left, right) + } + + #[cfg_attr(feature = "inline-more", inline)] + fn fold_with(mut self, mut folder: F) -> F + where + F: Folder, + { + // Make sure to modify the iterator in-place so that any remaining + // elements are processed in our Drop impl. + while let Some(item) = self.iter.next() { + folder = folder.consume(unsafe { item.read() }); + if folder.full() { + return folder; + } + } + + // If we processed all elements then we don't need to run the drop. + mem::forget(self); + folder + } +} + +impl Drop for ParDrainProducer { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + // Drop all remaining elements + if mem::needs_drop::() { + while let Some(item) = self.iter.next() { + unsafe { + item.drop(); + } + } + } + } +} + +impl RawTable { + /// Returns a parallel iterator over the elements in a `RawTable`. + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn par_iter(&self) -> RawParIter { + RawParIter { + iter: self.iter().iter, + } + } + + /// Returns a parallel iterator over the elements in a `RawTable`. + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_par_iter(self) -> RawIntoParIter { + RawIntoParIter { table: self } + } + + /// Returns a parallel iterator which consumes all elements of a `RawTable` + /// without freeing its memory allocation. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_drain(&mut self) -> RawParDrain<'_, T> { + RawParDrain { + table: NonNull::from(self), + marker: PhantomData, + } + } +} diff --git a/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/rayon/set.rs b/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/rayon/set.rs new file mode 100644 index 000000000..53d2660d5 --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/rayon/set.rs @@ -0,0 +1,646 @@ +//! Rayon extensions for `HashSet`. + +use crate::hash_set::HashSet; +use core::hash::{BuildHasher, Hash}; +use rayon::iter::plumbing::UnindexedConsumer; +use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator}; + +/// Parallel iterator over elements of a consumed set. +/// +/// This iterator is created by the [`into_par_iter`] method on [`HashSet`] +/// (provided by the [`IntoParallelIterator`] trait). +/// See its documentation for more. +/// +/// [`into_par_iter`]: /hashbrown/struct.HashSet.html#method.into_par_iter +/// [`HashSet`]: /hashbrown/struct.HashSet.html +/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html +pub struct IntoParIter { + set: HashSet, +} + +impl ParallelIterator for IntoParIter { + type Item = T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.set + .map + .into_par_iter() + .map(|(k, _)| k) + .drive_unindexed(consumer) + } +} + +/// Parallel draining iterator over entries of a set. +/// +/// This iterator is created by the [`par_drain`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`par_drain`]: /hashbrown/struct.HashSet.html#method.par_drain +/// [`HashSet`]: /hashbrown/struct.HashSet.html +pub struct ParDrain<'a, T, S> { + set: &'a mut HashSet, +} + +impl ParallelIterator for ParDrain<'_, T, S> { + type Item = T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.set + .map + .par_drain() + .map(|(k, _)| k) + .drive_unindexed(consumer) + } +} + +/// Parallel iterator over shared references to elements in a set. +/// +/// This iterator is created by the [`par_iter`] method on [`HashSet`] +/// (provided by the [`IntoParallelRefIterator`] trait). +/// See its documentation for more. +/// +/// [`par_iter`]: /hashbrown/struct.HashSet.html#method.par_iter +/// [`HashSet`]: /hashbrown/struct.HashSet.html +/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html +pub struct ParIter<'a, T, S> { + set: &'a HashSet, +} + +impl<'a, T: Sync, S: Sync> ParallelIterator for ParIter<'a, T, S> { + type Item = &'a T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.set.map.par_keys().drive_unindexed(consumer) + } +} + +/// Parallel iterator over shared references to elements in the difference of +/// sets. +/// +/// This iterator is created by the [`par_difference`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`par_difference`]: /hashbrown/struct.HashSet.html#method.par_difference +/// [`HashSet`]: /hashbrown/struct.HashSet.html +pub struct ParDifference<'a, T, S> { + a: &'a HashSet, + b: &'a HashSet, +} + +impl<'a, T, S> ParallelIterator for ParDifference<'a, T, S> +where + T: Eq + Hash + Sync, + S: BuildHasher + Sync, +{ + type Item = &'a T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.a + .into_par_iter() + .filter(|&x| !self.b.contains(x)) + .drive_unindexed(consumer) + } +} + +/// Parallel iterator over shared references to elements in the symmetric +/// difference of sets. +/// +/// This iterator is created by the [`par_symmetric_difference`] method on +/// [`HashSet`]. +/// See its documentation for more. +/// +/// [`par_symmetric_difference`]: /hashbrown/struct.HashSet.html#method.par_symmetric_difference +/// [`HashSet`]: /hashbrown/struct.HashSet.html +pub struct ParSymmetricDifference<'a, T, S> { + a: &'a HashSet, + b: &'a HashSet, +} + +impl<'a, T, S> ParallelIterator for ParSymmetricDifference<'a, T, S> +where + T: Eq + Hash + Sync, + S: BuildHasher + Sync, +{ + type Item = &'a T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.a + .par_difference(self.b) + .chain(self.b.par_difference(self.a)) + .drive_unindexed(consumer) + } +} + +/// Parallel iterator over shared references to elements in the intersection of +/// sets. +/// +/// This iterator is created by the [`par_intersection`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`par_intersection`]: /hashbrown/struct.HashSet.html#method.par_intersection +/// [`HashSet`]: /hashbrown/struct.HashSet.html +pub struct ParIntersection<'a, T, S> { + a: &'a HashSet, + b: &'a HashSet, +} + +impl<'a, T, S> ParallelIterator for ParIntersection<'a, T, S> +where + T: Eq + Hash + Sync, + S: BuildHasher + Sync, +{ + type Item = &'a T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.a + .into_par_iter() + .filter(|&x| self.b.contains(x)) + .drive_unindexed(consumer) + } +} + +/// Parallel iterator over shared references to elements in the union of sets. +/// +/// This iterator is created by the [`par_union`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`par_union`]: /hashbrown/struct.HashSet.html#method.par_union +/// [`HashSet`]: /hashbrown/struct.HashSet.html +pub struct ParUnion<'a, T, S> { + a: &'a HashSet, + b: &'a HashSet, +} + +impl<'a, T, S> ParallelIterator for ParUnion<'a, T, S> +where + T: Eq + Hash + Sync, + S: BuildHasher + Sync, +{ + type Item = &'a T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.a + .into_par_iter() + .chain(self.b.par_difference(self.a)) + .drive_unindexed(consumer) + } +} + +impl HashSet +where + T: Eq + Hash + Sync, + S: BuildHasher + Sync, +{ + /// Visits (potentially in parallel) the values representing the difference, + /// i.e. the values that are in `self` but not in `other`. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_difference<'a>(&'a self, other: &'a Self) -> ParDifference<'a, T, S> { + ParDifference { a: self, b: other } + } + + /// Visits (potentially in parallel) the values representing the symmetric + /// difference, i.e. the values that are in `self` or in `other` but not in both. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_symmetric_difference<'a>( + &'a self, + other: &'a Self, + ) -> ParSymmetricDifference<'a, T, S> { + ParSymmetricDifference { a: self, b: other } + } + + /// Visits (potentially in parallel) the values representing the + /// intersection, i.e. the values that are both in `self` and `other`. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_intersection<'a>(&'a self, other: &'a Self) -> ParIntersection<'a, T, S> { + ParIntersection { a: self, b: other } + } + + /// Visits (potentially in parallel) the values representing the union, + /// i.e. all the values in `self` or `other`, without duplicates. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_union<'a>(&'a self, other: &'a Self) -> ParUnion<'a, T, S> { + ParUnion { a: self, b: other } + } + + /// Returns `true` if `self` has no elements in common with `other`. + /// This is equivalent to checking for an empty intersection. + /// + /// This method runs in a potentially parallel fashion. + pub fn par_is_disjoint(&self, other: &Self) -> bool { + self.into_par_iter().all(|x| !other.contains(x)) + } + + /// Returns `true` if the set is a subset of another, + /// i.e. `other` contains at least all the values in `self`. + /// + /// This method runs in a potentially parallel fashion. + pub fn par_is_subset(&self, other: &Self) -> bool { + if self.len() <= other.len() { + self.into_par_iter().all(|x| other.contains(x)) + } else { + false + } + } + + /// Returns `true` if the set is a superset of another, + /// i.e. `self` contains at least all the values in `other`. + /// + /// This method runs in a potentially parallel fashion. + pub fn par_is_superset(&self, other: &Self) -> bool { + other.par_is_subset(self) + } + + /// Returns `true` if the set is equal to another, + /// i.e. both sets contain the same values. + /// + /// This method runs in a potentially parallel fashion. + pub fn par_eq(&self, other: &Self) -> bool { + self.len() == other.len() && self.par_is_subset(other) + } +} + +impl HashSet +where + T: Eq + Hash + Send, + S: BuildHasher + Send, +{ + /// Consumes (potentially in parallel) all values in an arbitrary order, + /// while preserving the set's allocated memory for reuse. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_drain(&mut self) -> ParDrain<'_, T, S> { + ParDrain { set: self } + } +} + +impl IntoParallelIterator for HashSet { + type Item = T; + type Iter = IntoParIter; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + IntoParIter { set: self } + } +} + +impl<'a, T: Sync, S: Sync> IntoParallelIterator for &'a HashSet { + type Item = &'a T; + type Iter = ParIter<'a, T, S>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + ParIter { set: self } + } +} + +/// Collect values from a parallel iterator into a hashset. +impl FromParallelIterator for HashSet +where + T: Eq + Hash + Send, + S: BuildHasher + Default, +{ + fn from_par_iter

(par_iter: P) -> Self + where + P: IntoParallelIterator, + { + let mut set = HashSet::default(); + set.par_extend(par_iter); + set + } +} + +/// Extend a hash set with items from a parallel iterator. +impl ParallelExtend for HashSet +where + T: Eq + Hash + Send, + S: BuildHasher, +{ + fn par_extend(&mut self, par_iter: I) + where + I: IntoParallelIterator, + { + extend(self, par_iter); + } +} + +/// Extend a hash set with copied items from a parallel iterator. +impl<'a, T, S> ParallelExtend<&'a T> for HashSet +where + T: 'a + Copy + Eq + Hash + Sync, + S: BuildHasher, +{ + fn par_extend(&mut self, par_iter: I) + where + I: IntoParallelIterator, + { + extend(self, par_iter); + } +} + +// This is equal to the normal `HashSet` -- no custom advantage. +fn extend(set: &mut HashSet, par_iter: I) +where + T: Eq + Hash, + S: BuildHasher, + I: IntoParallelIterator, + HashSet: Extend, +{ + let (list, len) = super::helpers::collect(par_iter); + + // Values may be already present or show multiple times in the iterator. + // Reserve the entire length if the set is empty. + // Otherwise reserve half the length (rounded up), so the set + // will only resize twice in the worst case. + let reserve = if set.is_empty() { len } else { (len + 1) / 2 }; + set.reserve(reserve); + for vec in list { + set.extend(vec); + } +} + +#[cfg(test)] +mod test_par_set { + use alloc::vec::Vec; + use core::sync::atomic::{AtomicUsize, Ordering}; + + use rayon::prelude::*; + + use crate::hash_set::HashSet; + + #[test] + fn test_disjoint() { + let mut xs = HashSet::new(); + let mut ys = HashSet::new(); + assert!(xs.par_is_disjoint(&ys)); + assert!(ys.par_is_disjoint(&xs)); + assert!(xs.insert(5)); + assert!(ys.insert(11)); + assert!(xs.par_is_disjoint(&ys)); + assert!(ys.par_is_disjoint(&xs)); + assert!(xs.insert(7)); + assert!(xs.insert(19)); + assert!(xs.insert(4)); + assert!(ys.insert(2)); + assert!(ys.insert(-11)); + assert!(xs.par_is_disjoint(&ys)); + assert!(ys.par_is_disjoint(&xs)); + assert!(ys.insert(7)); + assert!(!xs.par_is_disjoint(&ys)); + assert!(!ys.par_is_disjoint(&xs)); + } + + #[test] + fn test_subset_and_superset() { + let mut a = HashSet::new(); + assert!(a.insert(0)); + assert!(a.insert(5)); + assert!(a.insert(11)); + assert!(a.insert(7)); + + let mut b = HashSet::new(); + assert!(b.insert(0)); + assert!(b.insert(7)); + assert!(b.insert(19)); + assert!(b.insert(250)); + assert!(b.insert(11)); + assert!(b.insert(200)); + + assert!(!a.par_is_subset(&b)); + assert!(!a.par_is_superset(&b)); + assert!(!b.par_is_subset(&a)); + assert!(!b.par_is_superset(&a)); + + assert!(b.insert(5)); + + assert!(a.par_is_subset(&b)); + assert!(!a.par_is_superset(&b)); + assert!(!b.par_is_subset(&a)); + assert!(b.par_is_superset(&a)); + } + + #[test] + fn test_iterate() { + let mut a = HashSet::new(); + for i in 0..32 { + assert!(a.insert(i)); + } + let observed = AtomicUsize::new(0); + a.par_iter().for_each(|k| { + observed.fetch_or(1 << *k, Ordering::Relaxed); + }); + assert_eq!(observed.into_inner(), 0xFFFF_FFFF); + } + + #[test] + fn test_intersection() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(11)); + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(77)); + assert!(a.insert(103)); + assert!(a.insert(5)); + assert!(a.insert(-5)); + + assert!(b.insert(2)); + assert!(b.insert(11)); + assert!(b.insert(77)); + assert!(b.insert(-9)); + assert!(b.insert(-42)); + assert!(b.insert(5)); + assert!(b.insert(3)); + + let expected = [3, 5, 11, 77]; + let i = a + .par_intersection(&b) + .map(|x| { + assert!(expected.contains(x)); + 1 + }) + .sum::(); + assert_eq!(i, expected.len()); + } + + #[test] + fn test_difference() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(5)); + assert!(a.insert(9)); + assert!(a.insert(11)); + + assert!(b.insert(3)); + assert!(b.insert(9)); + + let expected = [1, 5, 11]; + let i = a + .par_difference(&b) + .map(|x| { + assert!(expected.contains(x)); + 1 + }) + .sum::(); + assert_eq!(i, expected.len()); + } + + #[test] + fn test_symmetric_difference() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(5)); + assert!(a.insert(9)); + assert!(a.insert(11)); + + assert!(b.insert(-2)); + assert!(b.insert(3)); + assert!(b.insert(9)); + assert!(b.insert(14)); + assert!(b.insert(22)); + + let expected = [-2, 1, 5, 11, 14, 22]; + let i = a + .par_symmetric_difference(&b) + .map(|x| { + assert!(expected.contains(x)); + 1 + }) + .sum::(); + assert_eq!(i, expected.len()); + } + + #[test] + fn test_union() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(5)); + assert!(a.insert(9)); + assert!(a.insert(11)); + assert!(a.insert(16)); + assert!(a.insert(19)); + assert!(a.insert(24)); + + assert!(b.insert(-2)); + assert!(b.insert(1)); + assert!(b.insert(5)); + assert!(b.insert(9)); + assert!(b.insert(13)); + assert!(b.insert(19)); + + let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24]; + let i = a + .par_union(&b) + .map(|x| { + assert!(expected.contains(x)); + 1 + }) + .sum::(); + assert_eq!(i, expected.len()); + } + + #[test] + fn test_from_iter() { + let xs = [1, 2, 3, 4, 5, 6, 7, 8, 9]; + + let set: HashSet<_> = xs.par_iter().cloned().collect(); + + for x in &xs { + assert!(set.contains(x)); + } + } + + #[test] + fn test_move_iter() { + let hs = { + let mut hs = HashSet::new(); + + hs.insert('a'); + hs.insert('b'); + + hs + }; + + let v = hs.into_par_iter().collect::>(); + assert!(v == ['a', 'b'] || v == ['b', 'a']); + } + + #[test] + fn test_eq() { + // These constants once happened to expose a bug in insert(). + // I'm keeping them around to prevent a regression. + let mut s1 = HashSet::new(); + + s1.insert(1); + s1.insert(2); + s1.insert(3); + + let mut s2 = HashSet::new(); + + s2.insert(1); + s2.insert(2); + + assert!(!s1.par_eq(&s2)); + + s2.insert(3); + + assert!(s1.par_eq(&s2)); + } + + #[test] + fn test_extend_ref() { + let mut a = HashSet::new(); + a.insert(1); + + a.par_extend(&[2, 3, 4][..]); + + assert_eq!(a.len(), 4); + assert!(a.contains(&1)); + assert!(a.contains(&2)); + assert!(a.contains(&3)); + assert!(a.contains(&4)); + + let mut b = HashSet::new(); + b.insert(5); + b.insert(6); + + a.par_extend(&b); + + assert_eq!(a.len(), 6); + assert!(a.contains(&1)); + assert!(a.contains(&2)); + assert!(a.contains(&3)); + assert!(a.contains(&4)); + assert!(a.contains(&5)); + assert!(a.contains(&6)); + } +} diff --git a/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/serde.rs b/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/serde.rs new file mode 100644 index 000000000..7816e7803 --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/src/external_trait_impls/serde.rs @@ -0,0 +1,200 @@ +mod size_hint { + use core::cmp; + + /// This presumably exists to prevent denial of service attacks. + /// + /// Original discussion: https://github.com/serde-rs/serde/issues/1114. + #[cfg_attr(feature = "inline-more", inline)] + pub(super) fn cautious(hint: Option) -> usize { + cmp::min(hint.unwrap_or(0), 4096) + } +} + +mod map { + use core::fmt; + use core::hash::{BuildHasher, Hash}; + use core::marker::PhantomData; + use serde::de::{Deserialize, Deserializer, MapAccess, Visitor}; + use serde::ser::{Serialize, Serializer}; + + use crate::hash_map::HashMap; + + use super::size_hint; + + impl Serialize for HashMap + where + K: Serialize + Eq + Hash, + V: Serialize, + H: BuildHasher, + { + #[cfg_attr(feature = "inline-more", inline)] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.collect_map(self) + } + } + + impl<'de, K, V, S> Deserialize<'de> for HashMap + where + K: Deserialize<'de> + Eq + Hash, + V: Deserialize<'de>, + S: BuildHasher + Default, + { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct MapVisitor { + marker: PhantomData>, + } + + impl<'de, K, V, S> Visitor<'de> for MapVisitor + where + K: Deserialize<'de> + Eq + Hash, + V: Deserialize<'de>, + S: BuildHasher + Default, + { + type Value = HashMap; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("a map") + } + + #[cfg_attr(feature = "inline-more", inline)] + fn visit_map(self, mut map: A) -> Result + where + A: MapAccess<'de>, + { + let mut values = HashMap::with_capacity_and_hasher( + size_hint::cautious(map.size_hint()), + S::default(), + ); + + while let Some((key, value)) = map.next_entry()? { + values.insert(key, value); + } + + Ok(values) + } + } + + let visitor = MapVisitor { + marker: PhantomData, + }; + deserializer.deserialize_map(visitor) + } + } +} + +mod set { + use core::fmt; + use core::hash::{BuildHasher, Hash}; + use core::marker::PhantomData; + use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor}; + use serde::ser::{Serialize, Serializer}; + + use crate::hash_set::HashSet; + + use super::size_hint; + + impl Serialize for HashSet + where + T: Serialize + Eq + Hash, + H: BuildHasher, + { + #[cfg_attr(feature = "inline-more", inline)] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.collect_seq(self) + } + } + + impl<'de, T, S> Deserialize<'de> for HashSet + where + T: Deserialize<'de> + Eq + Hash, + S: BuildHasher + Default, + { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct SeqVisitor { + marker: PhantomData>, + } + + impl<'de, T, S> Visitor<'de> for SeqVisitor + where + T: Deserialize<'de> + Eq + Hash, + S: BuildHasher + Default, + { + type Value = HashSet; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("a sequence") + } + + #[cfg_attr(feature = "inline-more", inline)] + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let mut values = HashSet::with_capacity_and_hasher( + size_hint::cautious(seq.size_hint()), + S::default(), + ); + + while let Some(value) = seq.next_element()? { + values.insert(value); + } + + Ok(values) + } + } + + let visitor = SeqVisitor { + marker: PhantomData, + }; + deserializer.deserialize_seq(visitor) + } + + fn deserialize_in_place(deserializer: D, place: &mut Self) -> Result<(), D::Error> + where + D: Deserializer<'de>, + { + struct SeqInPlaceVisitor<'a, T, S>(&'a mut HashSet); + + impl<'a, 'de, T, S> Visitor<'de> for SeqInPlaceVisitor<'a, T, S> + where + T: Deserialize<'de> + Eq + Hash, + S: BuildHasher + Default, + { + type Value = (); + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("a sequence") + } + + #[cfg_attr(feature = "inline-more", inline)] + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + self.0.clear(); + self.0.reserve(size_hint::cautious(seq.size_hint())); + + while let Some(value) = seq.next_element()? { + self.0.insert(value); + } + + Ok(()) + } + } + + deserializer.deserialize_seq(SeqInPlaceVisitor(place)) + } + } +} diff --git a/src/rust/vendor/hashbrown-0.9.1/src/lib.rs b/src/rust/vendor/hashbrown-0.9.1/src/lib.rs new file mode 100644 index 000000000..3aff40a4f --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/src/lib.rs @@ -0,0 +1,112 @@ +//! This crate is a Rust port of Google's high-performance [SwissTable] hash +//! map, adapted to make it a drop-in replacement for Rust's standard `HashMap` +//! and `HashSet` types. +//! +//! The original C++ version of [SwissTable] can be found [here], and this +//! [CppCon talk] gives an overview of how the algorithm works. +//! +//! [SwissTable]: https://abseil.io/blog/20180927-swisstables +//! [here]: https://github.com/abseil/abseil-cpp/blob/master/absl/container/internal/raw_hash_set.h +//! [CppCon talk]: https://www.youtube.com/watch?v=ncHmEUmJZf4 + +#![no_std] +#![cfg_attr( + feature = "nightly", + feature(test, core_intrinsics, dropck_eyepatch, min_specialization, extend_one) +)] +#![allow( + clippy::doc_markdown, + clippy::module_name_repetitions, + clippy::must_use_candidate, + clippy::option_if_let_else +)] +#![warn(missing_docs)] +#![warn(rust_2018_idioms)] + +#[cfg(test)] +#[macro_use] +extern crate std; + +#[cfg_attr(test, macro_use)] +extern crate alloc; + +#[cfg(feature = "nightly")] +#[cfg(doctest)] +doc_comment::doctest!("../README.md"); + +#[macro_use] +mod macros; + +#[cfg(feature = "raw")] +/// Experimental and unsafe `RawTable` API. This module is only available if the +/// `raw` feature is enabled. +pub mod raw { + // The RawTable API is still experimental and is not properly documented yet. + #[allow(missing_docs)] + #[path = "mod.rs"] + mod inner; + pub use inner::*; + + #[cfg(feature = "rayon")] + pub mod rayon { + pub use crate::external_trait_impls::rayon::raw::*; + } +} +#[cfg(not(feature = "raw"))] +mod raw; + +mod external_trait_impls; +mod map; +#[cfg(feature = "rustc-internal-api")] +mod rustc_entry; +mod scopeguard; +mod set; + +pub mod hash_map { + //! A hash map implemented with quadratic probing and SIMD lookup. + pub use crate::map::*; + + #[cfg(feature = "rustc-internal-api")] + pub use crate::rustc_entry::*; + + #[cfg(feature = "rayon")] + /// [rayon]-based parallel iterator types for hash maps. + /// You will rarely need to interact with it directly unless you have need + /// to name one of the iterator types. + /// + /// [rayon]: https://docs.rs/rayon/1.0/rayon + pub mod rayon { + pub use crate::external_trait_impls::rayon::map::*; + } +} +pub mod hash_set { + //! A hash set implemented as a `HashMap` where the value is `()`. + pub use crate::set::*; + + #[cfg(feature = "rayon")] + /// [rayon]-based parallel iterator types for hash sets. + /// You will rarely need to interact with it directly unless you have need + /// to name one of the iterator types. + /// + /// [rayon]: https://docs.rs/rayon/1.0/rayon + pub mod rayon { + pub use crate::external_trait_impls::rayon::set::*; + } +} + +pub use crate::map::HashMap; +pub use crate::set::HashSet; + +/// The error type for `try_reserve` methods. +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum TryReserveError { + /// Error due to the computed capacity exceeding the collection's maximum + /// (usually `isize::MAX` bytes). + CapacityOverflow, + + /// The memory allocator returned an error + AllocError { + /// The layout of the allocation request that failed. + layout: alloc::alloc::Layout, + }, +} diff --git a/src/rust/vendor/hashbrown-0.9.1/src/macros.rs b/src/rust/vendor/hashbrown-0.9.1/src/macros.rs new file mode 100644 index 000000000..027959731 --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/src/macros.rs @@ -0,0 +1,69 @@ +// See the cfg-if crate. +macro_rules! cfg_if { + // match if/else chains with a final `else` + ($( + if #[cfg($($meta:meta),*)] { $($it:item)* } + ) else * else { + $($it2:item)* + }) => { + cfg_if! { + @__items + () ; + $( ( ($($meta),*) ($($it)*) ), )* + ( () ($($it2)*) ), + } + }; + + // match if/else chains lacking a final `else` + ( + if #[cfg($($i_met:meta),*)] { $($i_it:item)* } + $( + else if #[cfg($($e_met:meta),*)] { $($e_it:item)* } + )* + ) => { + cfg_if! { + @__items + () ; + ( ($($i_met),*) ($($i_it)*) ), + $( ( ($($e_met),*) ($($e_it)*) ), )* + ( () () ), + } + }; + + // Internal and recursive macro to emit all the items + // + // Collects all the negated cfgs in a list at the beginning and after the + // semicolon is all the remaining items + (@__items ($($not:meta,)*) ; ) => {}; + (@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($it:item)*) ), $($rest:tt)*) => { + // Emit all items within one block, applying an approprate #[cfg]. The + // #[cfg] will require all `$m` matchers specified and must also negate + // all previous matchers. + cfg_if! { @__apply cfg(all($($m,)* not(any($($not),*)))), $($it)* } + + // Recurse to emit all other items in `$rest`, and when we do so add all + // our `$m` matchers to the list of `$not` matchers as future emissions + // will have to negate everything we just matched as well. + cfg_if! { @__items ($($not,)* $($m,)*) ; $($rest)* } + }; + + // Internal macro to Apply a cfg attribute to a list of items + (@__apply $m:meta, $($it:item)*) => { + $(#[$m] $it)* + }; +} + +// Helper macro for specialization. This also helps avoid parse errors if the +// default fn syntax for specialization changes in the future. +#[cfg(feature = "nightly")] +macro_rules! default_fn { + ($($tt:tt)*) => { + default $($tt)* + } +} +#[cfg(not(feature = "nightly"))] +macro_rules! default_fn { + ($($tt:tt)*) => { + $($tt)* + } +} diff --git a/src/rust/vendor/hashbrown-0.9.1/src/map.rs b/src/rust/vendor/hashbrown-0.9.1/src/map.rs new file mode 100644 index 000000000..1ccba3157 --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/src/map.rs @@ -0,0 +1,4524 @@ +use crate::raw::{Bucket, RawDrain, RawIntoIter, RawIter, RawTable}; +use crate::TryReserveError; +use core::borrow::Borrow; +use core::fmt::{self, Debug}; +use core::hash::{BuildHasher, Hash, Hasher}; +use core::iter::{FromIterator, FusedIterator}; +use core::marker::PhantomData; +use core::mem; +use core::ops::Index; + +/// Default hasher for `HashMap`. +#[cfg(feature = "ahash")] +pub type DefaultHashBuilder = ahash::RandomState; + +/// Dummy default hasher for `HashMap`. +#[cfg(not(feature = "ahash"))] +pub enum DefaultHashBuilder {} + +/// A hash map implemented with quadratic probing and SIMD lookup. +/// +/// The default hashing algorithm is currently [`AHash`], though this is +/// subject to change at any point in the future. This hash function is very +/// fast for all types of keys, but this algorithm will typically *not* protect +/// against attacks such as HashDoS. +/// +/// The hashing algorithm can be replaced on a per-`HashMap` basis using the +/// [`default`], [`with_hasher`], and [`with_capacity_and_hasher`] methods. Many +/// alternative algorithms are available on crates.io, such as the [`fnv`] crate. +/// +/// It is required that the keys implement the [`Eq`] and [`Hash`] traits, although +/// this can frequently be achieved by using `#[derive(PartialEq, Eq, Hash)]`. +/// If you implement these yourself, it is important that the following +/// property holds: +/// +/// ```text +/// k1 == k2 -> hash(k1) == hash(k2) +/// ``` +/// +/// In other words, if two keys are equal, their hashes must be equal. +/// +/// It is a logic error for a key to be modified in such a way that the key's +/// hash, as determined by the [`Hash`] trait, or its equality, as determined by +/// the [`Eq`] trait, changes while it is in the map. This is normally only +/// possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. +/// +/// It is also a logic error for the [`Hash`] implementation of a key to panic. +/// This is generally only possible if the trait is implemented manually. If a +/// panic does occur then the contents of the `HashMap` may become corrupted and +/// some items may be dropped from the table. +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// // Type inference lets us omit an explicit type signature (which +/// // would be `HashMap` in this example). +/// let mut book_reviews = HashMap::new(); +/// +/// // Review some books. +/// book_reviews.insert( +/// "Adventures of Huckleberry Finn".to_string(), +/// "My favorite book.".to_string(), +/// ); +/// book_reviews.insert( +/// "Grimms' Fairy Tales".to_string(), +/// "Masterpiece.".to_string(), +/// ); +/// book_reviews.insert( +/// "Pride and Prejudice".to_string(), +/// "Very enjoyable.".to_string(), +/// ); +/// book_reviews.insert( +/// "The Adventures of Sherlock Holmes".to_string(), +/// "Eye lyked it alot.".to_string(), +/// ); +/// +/// // Check for a specific one. +/// // When collections store owned values (String), they can still be +/// // queried using references (&str). +/// if !book_reviews.contains_key("Les Misérables") { +/// println!("We've got {} reviews, but Les Misérables ain't one.", +/// book_reviews.len()); +/// } +/// +/// // oops, this review has a lot of spelling mistakes, let's delete it. +/// book_reviews.remove("The Adventures of Sherlock Holmes"); +/// +/// // Look up the values associated with some keys. +/// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"]; +/// for &book in &to_find { +/// match book_reviews.get(book) { +/// Some(review) => println!("{}: {}", book, review), +/// None => println!("{} is unreviewed.", book) +/// } +/// } +/// +/// // Look up the value for a key (will panic if the key is not found). +/// println!("Review for Jane: {}", book_reviews["Pride and Prejudice"]); +/// +/// // Iterate over everything. +/// for (book, review) in &book_reviews { +/// println!("{}: \"{}\"", book, review); +/// } +/// ``` +/// +/// `HashMap` also implements an [`Entry API`](#method.entry), which allows +/// for more complex methods of getting, setting, updating and removing keys and +/// their values: +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// // type inference lets us omit an explicit type signature (which +/// // would be `HashMap<&str, u8>` in this example). +/// let mut player_stats = HashMap::new(); +/// +/// fn random_stat_buff() -> u8 { +/// // could actually return some random value here - let's just return +/// // some fixed value for now +/// 42 +/// } +/// +/// // insert a key only if it doesn't already exist +/// player_stats.entry("health").or_insert(100); +/// +/// // insert a key using a function that provides a new value only if it +/// // doesn't already exist +/// player_stats.entry("defence").or_insert_with(random_stat_buff); +/// +/// // update a key, guarding against the key possibly not being set +/// let stat = player_stats.entry("attack").or_insert(100); +/// *stat += random_stat_buff(); +/// ``` +/// +/// The easiest way to use `HashMap` with a custom key type is to derive [`Eq`] and [`Hash`]. +/// We must also derive [`PartialEq`]. +/// +/// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html +/// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html +/// [`PartialEq`]: https://doc.rust-lang.org/std/cmp/trait.PartialEq.html +/// [`RefCell`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html +/// [`Cell`]: https://doc.rust-lang.org/std/cell/struct.Cell.html +/// [`default`]: #method.default +/// [`with_hasher`]: #method.with_hasher +/// [`with_capacity_and_hasher`]: #method.with_capacity_and_hasher +/// [`fnv`]: https://crates.io/crates/fnv +/// [`AHash`]: https://crates.io/crates/ahash +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// #[derive(Hash, Eq, PartialEq, Debug)] +/// struct Viking { +/// name: String, +/// country: String, +/// } +/// +/// impl Viking { +/// /// Creates a new Viking. +/// fn new(name: &str, country: &str) -> Viking { +/// Viking { name: name.to_string(), country: country.to_string() } +/// } +/// } +/// +/// // Use a HashMap to store the vikings' health points. +/// let mut vikings = HashMap::new(); +/// +/// vikings.insert(Viking::new("Einar", "Norway"), 25); +/// vikings.insert(Viking::new("Olaf", "Denmark"), 24); +/// vikings.insert(Viking::new("Harald", "Iceland"), 12); +/// +/// // Use derived implementation to print the status of the vikings. +/// for (viking, health) in &vikings { +/// println!("{:?} has {} hp", viking, health); +/// } +/// ``` +/// +/// A `HashMap` with fixed list of elements can be initialized from an array: +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let timber_resources: HashMap<&str, i32> = [("Norway", 100), ("Denmark", 50), ("Iceland", 10)] +/// .iter().cloned().collect(); +/// // use the values stored in map +/// ``` +pub struct HashMap { + pub(crate) hash_builder: S, + pub(crate) table: RawTable<(K, V)>, +} + +impl Clone for HashMap { + fn clone(&self) -> Self { + HashMap { + hash_builder: self.hash_builder.clone(), + table: self.table.clone(), + } + } + + fn clone_from(&mut self, source: &Self) { + self.table.clone_from(&source.table); + + // Update hash_builder only if we successfully cloned all elements. + self.hash_builder.clone_from(&source.hash_builder); + } +} + +#[cfg_attr(feature = "inline-more", inline)] +pub(crate) fn make_hash(hash_builder: &impl BuildHasher, val: &K) -> u64 { + let mut state = hash_builder.build_hasher(); + val.hash(&mut state); + state.finish() +} + +#[cfg(feature = "ahash")] +impl HashMap { + /// Creates an empty `HashMap`. + /// + /// The hash map is initially created with a capacity of 0, so it will not allocate until it + /// is first inserted into. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// let mut map: HashMap<&str, i32> = HashMap::new(); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn new() -> Self { + Self::default() + } + + /// Creates an empty `HashMap` with the specified capacity. + /// + /// The hash map will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash map will not allocate. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// let mut map: HashMap<&str, i32> = HashMap::with_capacity(10); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_capacity(capacity: usize) -> Self { + Self::with_capacity_and_hasher(capacity, DefaultHashBuilder::default()) + } +} + +impl HashMap { + /// Creates an empty `HashMap` which will use the given hash builder to hash + /// keys. + /// + /// The created map has the default initial capacity. + /// + /// Warning: `hash_builder` is normally randomly generated, and + /// is designed to allow HashMaps to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it + /// manually using this function can expose a DoS attack vector. + /// + /// The `hash_builder` passed should implement the [`BuildHasher`] trait for + /// the HashMap to be useful, see its documentation for details. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::DefaultHashBuilder; + /// + /// let s = DefaultHashBuilder::default(); + /// let mut map = HashMap::with_hasher(s); + /// map.insert(1, 2); + /// ``` + /// + /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html + #[cfg_attr(feature = "inline-more", inline)] + pub const fn with_hasher(hash_builder: S) -> Self { + Self { + hash_builder, + table: RawTable::new(), + } + } + + /// Creates an empty `HashMap` with the specified capacity, using `hash_builder` + /// to hash the keys. + /// + /// The hash map will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash map will not allocate. + /// + /// Warning: `hash_builder` is normally randomly generated, and + /// is designed to allow HashMaps to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it + /// manually using this function can expose a DoS attack vector. + /// + /// The `hash_builder` passed should implement the [`BuildHasher`] trait for + /// the HashMap to be useful, see its documentation for details. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::DefaultHashBuilder; + /// + /// let s = DefaultHashBuilder::default(); + /// let mut map = HashMap::with_capacity_and_hasher(10, s); + /// map.insert(1, 2); + /// ``` + /// + /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self { + Self { + hash_builder, + table: RawTable::with_capacity(capacity), + } + } + + /// Returns a reference to the map's [`BuildHasher`]. + /// + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::DefaultHashBuilder; + /// + /// let hasher = DefaultHashBuilder::default(); + /// let map: HashMap = HashMap::with_hasher(hasher); + /// let hasher: &DefaultHashBuilder = map.hasher(); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn hasher(&self) -> &S { + &self.hash_builder + } + + /// Returns the number of elements the map can hold without reallocating. + /// + /// This number is a lower bound; the `HashMap` might be able to hold + /// more, but is guaranteed to be able to hold at least this many. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// let map: HashMap = HashMap::with_capacity(100); + /// assert!(map.capacity() >= 100); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn capacity(&self) -> usize { + self.table.capacity() + } + + /// An iterator visiting all keys in arbitrary order. + /// The iterator element type is `&'a K`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert("a", 1); + /// map.insert("b", 2); + /// map.insert("c", 3); + /// + /// for key in map.keys() { + /// println!("{}", key); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn keys(&self) -> Keys<'_, K, V> { + Keys { inner: self.iter() } + } + + /// An iterator visiting all values in arbitrary order. + /// The iterator element type is `&'a V`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert("a", 1); + /// map.insert("b", 2); + /// map.insert("c", 3); + /// + /// for val in map.values() { + /// println!("{}", val); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn values(&self) -> Values<'_, K, V> { + Values { inner: self.iter() } + } + + /// An iterator visiting all values mutably in arbitrary order. + /// The iterator element type is `&'a mut V`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// + /// map.insert("a", 1); + /// map.insert("b", 2); + /// map.insert("c", 3); + /// + /// for val in map.values_mut() { + /// *val = *val + 10; + /// } + /// + /// for val in map.values() { + /// println!("{}", val); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { + ValuesMut { + inner: self.iter_mut(), + } + } + + /// An iterator visiting all key-value pairs in arbitrary order. + /// The iterator element type is `(&'a K, &'a V)`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert("a", 1); + /// map.insert("b", 2); + /// map.insert("c", 3); + /// + /// for (key, val) in map.iter() { + /// println!("key: {} val: {}", key, val); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn iter(&self) -> Iter<'_, K, V> { + // Here we tie the lifetime of self to the iter. + unsafe { + Iter { + inner: self.table.iter(), + marker: PhantomData, + } + } + } + + /// An iterator visiting all key-value pairs in arbitrary order, + /// with mutable references to the values. + /// The iterator element type is `(&'a K, &'a mut V)`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert("a", 1); + /// map.insert("b", 2); + /// map.insert("c", 3); + /// + /// // Update all values + /// for (_, val) in map.iter_mut() { + /// *val *= 2; + /// } + /// + /// for (key, val) in &map { + /// println!("key: {} val: {}", key, val); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { + // Here we tie the lifetime of self to the iter. + unsafe { + IterMut { + inner: self.table.iter(), + marker: PhantomData, + } + } + } + + #[cfg(test)] + #[cfg_attr(feature = "inline-more", inline)] + fn raw_capacity(&self) -> usize { + self.table.buckets() + } + + /// Returns the number of elements in the map. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut a = HashMap::new(); + /// assert_eq!(a.len(), 0); + /// a.insert(1, "a"); + /// assert_eq!(a.len(), 1); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn len(&self) -> usize { + self.table.len() + } + + /// Returns `true` if the map contains no elements. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut a = HashMap::new(); + /// assert!(a.is_empty()); + /// a.insert(1, "a"); + /// assert!(!a.is_empty()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Clears the map, returning all key-value pairs as an iterator. Keeps the + /// allocated memory for reuse. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut a = HashMap::new(); + /// a.insert(1, "a"); + /// a.insert(2, "b"); + /// + /// for (k, v) in a.drain().take(1) { + /// assert!(k == 1 || k == 2); + /// assert!(v == "a" || v == "b"); + /// } + /// + /// assert!(a.is_empty()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn drain(&mut self) -> Drain<'_, K, V> { + Drain { + inner: self.table.drain(), + } + } + + /// Retains only the elements specified by the predicate. + /// + /// In other words, remove all pairs `(k, v)` such that `f(&k,&mut v)` returns `false`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = (0..8).map(|x|(x, x*10)).collect(); + /// map.retain(|&k, _| k % 2 == 0); + /// assert_eq!(map.len(), 4); + /// ``` + pub fn retain(&mut self, mut f: F) + where + F: FnMut(&K, &mut V) -> bool, + { + // Here we only use `iter` as a temporary, preventing use-after-free + unsafe { + for item in self.table.iter() { + let &mut (ref key, ref mut value) = item.as_mut(); + if !f(key, value) { + self.table.erase(item); + } + } + } + } + + /// Drains elements which are true under the given predicate, + /// and returns an iterator over the removed items. + /// + /// In other words, move all pairs `(k, v)` such that `f(&k,&mut v)` returns `true` out + /// into another iterator. + /// + /// When the returned DrainedFilter is dropped, any remaining elements that satisfy + /// the predicate are dropped from the table. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = (0..8).map(|x| (x, x)).collect(); + /// let drained: HashMap = map.drain_filter(|k, _v| k % 2 == 0).collect(); + /// + /// let mut evens = drained.keys().cloned().collect::>(); + /// let mut odds = map.keys().cloned().collect::>(); + /// evens.sort(); + /// odds.sort(); + /// + /// assert_eq!(evens, vec![0, 2, 4, 6]); + /// assert_eq!(odds, vec![1, 3, 5, 7]); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn drain_filter(&mut self, f: F) -> DrainFilter<'_, K, V, F> + where + F: FnMut(&K, &mut V) -> bool, + { + DrainFilter { + f, + inner: DrainFilterInner { + iter: unsafe { self.table.iter() }, + table: &mut self.table, + }, + } + } + + /// Clears the map, removing all key-value pairs. Keeps the allocated memory + /// for reuse. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut a = HashMap::new(); + /// a.insert(1, "a"); + /// a.clear(); + /// assert!(a.is_empty()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn clear(&mut self) { + self.table.clear(); + } +} + +impl HashMap +where + K: Eq + Hash, + S: BuildHasher, +{ + /// Reserves capacity for at least `additional` more elements to be inserted + /// in the `HashMap`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// # Panics + /// + /// Panics if the new allocation size overflows [`usize`]. + /// + /// [`usize`]: https://doc.rust-lang.org/std/primitive.usize.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// let mut map: HashMap<&str, i32> = HashMap::new(); + /// map.reserve(10); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn reserve(&mut self, additional: usize) { + let hash_builder = &self.hash_builder; + self.table + .reserve(additional, |x| make_hash(hash_builder, &x.0)); + } + + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `HashMap`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// let mut map: HashMap<&str, isize> = HashMap::new(); + /// map.try_reserve(10).expect("why is the test harness OOMing on 10 bytes?"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { + let hash_builder = &self.hash_builder; + self.table + .try_reserve(additional, |x| make_hash(hash_builder, &x.0)) + } + + /// Shrinks the capacity of the map as much as possible. It will drop + /// down as much as possible while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = HashMap::with_capacity(100); + /// map.insert(1, 2); + /// map.insert(3, 4); + /// assert!(map.capacity() >= 100); + /// map.shrink_to_fit(); + /// assert!(map.capacity() >= 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn shrink_to_fit(&mut self) { + let hash_builder = &self.hash_builder; + self.table.shrink_to(0, |x| make_hash(hash_builder, &x.0)); + } + + /// Shrinks the capacity of the map with a lower limit. It will drop + /// down no lower than the supplied limit while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// This function does nothing if the current capacity is smaller than the + /// supplied minimum capacity. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = HashMap::with_capacity(100); + /// map.insert(1, 2); + /// map.insert(3, 4); + /// assert!(map.capacity() >= 100); + /// map.shrink_to(10); + /// assert!(map.capacity() >= 10); + /// map.shrink_to(0); + /// assert!(map.capacity() >= 2); + /// map.shrink_to(10); + /// assert!(map.capacity() >= 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn shrink_to(&mut self, min_capacity: usize) { + let hash_builder = &self.hash_builder; + self.table + .shrink_to(min_capacity, |x| make_hash(hash_builder, &x.0)); + } + + /// Gets the given key's corresponding entry in the map for in-place manipulation. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut letters = HashMap::new(); + /// + /// for ch in "a short treatise on fungi".chars() { + /// let counter = letters.entry(ch).or_insert(0); + /// *counter += 1; + /// } + /// + /// assert_eq!(letters[&'s'], 2); + /// assert_eq!(letters[&'t'], 3); + /// assert_eq!(letters[&'u'], 1); + /// assert_eq!(letters.get(&'y'), None); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn entry(&mut self, key: K) -> Entry<'_, K, V, S> { + let hash = make_hash(&self.hash_builder, &key); + if let Some(elem) = self.table.find(hash, |q| q.0.eq(&key)) { + Entry::Occupied(OccupiedEntry { + hash, + key: Some(key), + elem, + table: self, + }) + } else { + Entry::Vacant(VacantEntry { + hash, + key, + table: self, + }) + } + } + + /// Returns a reference to the value corresponding to the key. + /// + /// The key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, "a"); + /// assert_eq!(map.get(&1), Some(&"a")); + /// assert_eq!(map.get(&2), None); + /// ``` + #[inline] + pub fn get(&self, k: &Q) -> Option<&V> + where + K: Borrow, + Q: Hash + Eq, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.get_inner(k) { + Some(&(_, ref v)) => Some(v), + None => None, + } + } + + /// Returns the key-value pair corresponding to the supplied key. + /// + /// The supplied key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, "a"); + /// assert_eq!(map.get_key_value(&1), Some((&1, &"a"))); + /// assert_eq!(map.get_key_value(&2), None); + /// ``` + #[inline] + pub fn get_key_value(&self, k: &Q) -> Option<(&K, &V)> + where + K: Borrow, + Q: Hash + Eq, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.get_inner(k) { + Some(&(ref key, ref value)) => Some((key, value)), + None => None, + } + } + + #[inline] + fn get_inner(&self, k: &Q) -> Option<&(K, V)> + where + K: Borrow, + Q: Hash + Eq, + { + let hash = make_hash(&self.hash_builder, k); + self.table.get(hash, |x| k.eq(x.0.borrow())) + } + + /// Returns the key-value pair corresponding to the supplied key, with a mutable reference to value. + /// + /// The supplied key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, "a"); + /// let (k, v) = map.get_key_value_mut(&1).unwrap(); + /// assert_eq!(k, &1); + /// assert_eq!(v, &mut "a"); + /// *v = "b"; + /// assert_eq!(map.get_key_value_mut(&1), Some((&1, &mut "b"))); + /// assert_eq!(map.get_key_value_mut(&2), None); + /// ``` + #[inline] + pub fn get_key_value_mut(&mut self, k: &Q) -> Option<(&K, &mut V)> + where + K: Borrow, + Q: Hash + Eq, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.get_inner_mut(k) { + Some(&mut (ref key, ref mut value)) => Some((key, value)), + None => None, + } + } + + /// Returns `true` if the map contains a value for the specified key. + /// + /// The key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, "a"); + /// assert_eq!(map.contains_key(&1), true); + /// assert_eq!(map.contains_key(&2), false); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn contains_key(&self, k: &Q) -> bool + where + K: Borrow, + Q: Hash + Eq, + { + self.get_inner(k).is_some() + } + + /// Returns a mutable reference to the value corresponding to the key. + /// + /// The key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, "a"); + /// if let Some(x) = map.get_mut(&1) { + /// *x = "b"; + /// } + /// assert_eq!(map[&1], "b"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_mut(&mut self, k: &Q) -> Option<&mut V> + where + K: Borrow, + Q: Hash + Eq, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.get_inner_mut(k) { + Some(&mut (_, ref mut v)) => Some(v), + None => None, + } + } + + #[inline] + fn get_inner_mut(&mut self, k: &Q) -> Option<&mut (K, V)> + where + K: Borrow, + Q: Hash + Eq, + { + let hash = make_hash(&self.hash_builder, k); + self.table.get_mut(hash, |x| k.eq(x.0.borrow())) + } + + /// Inserts a key-value pair into the map. + /// + /// If the map did not have this key present, [`None`] is returned. + /// + /// If the map did have this key present, the value is updated, and the old + /// value is returned. The key is not updated, though; this matters for + /// types that can be `==` without being identical. See the [module-level + /// documentation] for more. + /// + /// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None + /// [module-level documentation]: index.html#insert-and-complex-keys + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// assert_eq!(map.insert(37, "a"), None); + /// assert_eq!(map.is_empty(), false); + /// + /// map.insert(37, "b"); + /// assert_eq!(map.insert(37, "c"), Some("b")); + /// assert_eq!(map[&37], "c"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(&mut self, k: K, v: V) -> Option { + let hash = make_hash(&self.hash_builder, &k); + if let Some((_, item)) = self.table.get_mut(hash, |x| k.eq(&x.0)) { + Some(mem::replace(item, v)) + } else { + let hash_builder = &self.hash_builder; + self.table + .insert(hash, (k, v), |x| make_hash(hash_builder, &x.0)); + None + } + } + + /// Removes a key from the map, returning the value at the key if the key + /// was previously in the map. + /// + /// The key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, "a"); + /// assert_eq!(map.remove(&1), Some("a")); + /// assert_eq!(map.remove(&1), None); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove(&mut self, k: &Q) -> Option + where + K: Borrow, + Q: Hash + Eq, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.remove_entry(k) { + Some((_, v)) => Some(v), + None => None, + } + } + + /// Removes a key from the map, returning the stored key and value if the + /// key was previously in the map. + /// + /// The key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, "a"); + /// assert_eq!(map.remove_entry(&1), Some((1, "a"))); + /// assert_eq!(map.remove(&1), None); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove_entry(&mut self, k: &Q) -> Option<(K, V)> + where + K: Borrow, + Q: Hash + Eq, + { + let hash = make_hash(&self.hash_builder, &k); + self.table.remove_entry(hash, |x| k.eq(x.0.borrow())) + } +} + +impl HashMap { + /// Creates a raw entry builder for the HashMap. + /// + /// Raw entries provide the lowest level of control for searching and + /// manipulating a map. They must be manually initialized with a hash and + /// then manually searched. After this, insertions into a vacant entry + /// still require an owned key to be provided. + /// + /// Raw entries are useful for such exotic situations as: + /// + /// * Hash memoization + /// * Deferring the creation of an owned key until it is known to be required + /// * Using a search key that doesn't work with the Borrow trait + /// * Using custom comparison logic without newtype wrappers + /// + /// Because raw entries provide much more low-level control, it's much easier + /// to put the HashMap into an inconsistent state which, while memory-safe, + /// will cause the map to produce seemingly random results. Higher-level and + /// more foolproof APIs like `entry` should be preferred when possible. + /// + /// In particular, the hash used to initialized the raw entry must still be + /// consistent with the hash of the key that is ultimately stored in the entry. + /// This is because implementations of HashMap may need to recompute hashes + /// when resizing, at which point only the keys are available. + /// + /// Raw entries give mutable access to the keys. This must not be used + /// to modify how the key would compare or hash, as the map will not re-evaluate + /// where the key should go, meaning the keys may become "lost" if their + /// location does not reflect their state. For instance, if you change a key + /// so that the map now contains keys which compare equal, search may start + /// acting erratically, with two keys randomly masking each other. Implementations + /// are free to assume this doesn't happen (within the limits of memory-safety). + #[cfg_attr(feature = "inline-more", inline)] + pub fn raw_entry_mut(&mut self) -> RawEntryBuilderMut<'_, K, V, S> { + RawEntryBuilderMut { map: self } + } + + /// Creates a raw immutable entry builder for the HashMap. + /// + /// Raw entries provide the lowest level of control for searching and + /// manipulating a map. They must be manually initialized with a hash and + /// then manually searched. + /// + /// This is useful for + /// * Hash memoization + /// * Using a search key that doesn't work with the Borrow trait + /// * Using custom comparison logic without newtype wrappers + /// + /// Unless you are in such a situation, higher-level and more foolproof APIs like + /// `get` should be preferred. + /// + /// Immutable raw entries have very limited use; you might instead want `raw_entry_mut`. + #[cfg_attr(feature = "inline-more", inline)] + pub fn raw_entry(&self) -> RawEntryBuilder<'_, K, V, S> { + RawEntryBuilder { map: self } + } +} + +impl PartialEq for HashMap +where + K: Eq + Hash, + V: PartialEq, + S: BuildHasher, +{ + fn eq(&self, other: &Self) -> bool { + if self.len() != other.len() { + return false; + } + + self.iter() + .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) + } +} + +impl Eq for HashMap +where + K: Eq + Hash, + V: Eq, + S: BuildHasher, +{ +} + +impl Debug for HashMap +where + K: Debug, + V: Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_map().entries(self.iter()).finish() + } +} + +impl Default for HashMap +where + S: Default, +{ + /// Creates an empty `HashMap`, with the `Default` value for the hasher. + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + Self::with_hasher(Default::default()) + } +} + +impl Index<&Q> for HashMap +where + K: Eq + Hash + Borrow, + Q: Eq + Hash, + S: BuildHasher, +{ + type Output = V; + + /// Returns a reference to the value corresponding to the supplied key. + /// + /// # Panics + /// + /// Panics if the key is not present in the `HashMap`. + #[cfg_attr(feature = "inline-more", inline)] + fn index(&self, key: &Q) -> &V { + self.get(key).expect("no entry found for key") + } +} + +/// An iterator over the entries of a `HashMap`. +/// +/// This `struct` is created by the [`iter`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`iter`]: struct.HashMap.html#method.iter +/// [`HashMap`]: struct.HashMap.html +pub struct Iter<'a, K, V> { + inner: RawIter<(K, V)>, + marker: PhantomData<(&'a K, &'a V)>, +} + +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +impl Clone for Iter<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Iter { + inner: self.inner.clone(), + marker: PhantomData, + } + } +} + +impl fmt::Debug for Iter<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// A mutable iterator over the entries of a `HashMap`. +/// +/// This `struct` is created by the [`iter_mut`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`iter_mut`]: struct.HashMap.html#method.iter_mut +/// [`HashMap`]: struct.HashMap.html +pub struct IterMut<'a, K, V> { + inner: RawIter<(K, V)>, + // To ensure invariance with respect to V + marker: PhantomData<(&'a K, &'a mut V)>, +} + +// We override the default Send impl which has K: Sync instead of K: Send. Both +// are correct, but this one is more general since it allows keys which +// implement Send but not Sync. +unsafe impl Send for IterMut<'_, K, V> {} + +impl IterMut<'_, K, V> { + /// Returns a iterator of references over the remaining items. + #[cfg_attr(feature = "inline-more", inline)] + pub(super) fn iter(&self) -> Iter<'_, K, V> { + Iter { + inner: self.inner.clone(), + marker: PhantomData, + } + } +} + +/// An owning iterator over the entries of a `HashMap`. +/// +/// This `struct` is created by the [`into_iter`] method on [`HashMap`] +/// (provided by the `IntoIterator` trait). See its documentation for more. +/// +/// [`into_iter`]: struct.HashMap.html#method.into_iter +/// [`HashMap`]: struct.HashMap.html +pub struct IntoIter { + inner: RawIntoIter<(K, V)>, +} + +impl IntoIter { + /// Returns a iterator of references over the remaining items. + #[cfg_attr(feature = "inline-more", inline)] + pub(super) fn iter(&self) -> Iter<'_, K, V> { + Iter { + inner: self.inner.iter(), + marker: PhantomData, + } + } +} + +/// An iterator over the keys of a `HashMap`. +/// +/// This `struct` is created by the [`keys`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`keys`]: struct.HashMap.html#method.keys +/// [`HashMap`]: struct.HashMap.html +pub struct Keys<'a, K, V> { + inner: Iter<'a, K, V>, +} + +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +impl Clone for Keys<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Keys { + inner: self.inner.clone(), + } + } +} + +impl fmt::Debug for Keys<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// An iterator over the values of a `HashMap`. +/// +/// This `struct` is created by the [`values`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`values`]: struct.HashMap.html#method.values +/// [`HashMap`]: struct.HashMap.html +pub struct Values<'a, K, V> { + inner: Iter<'a, K, V>, +} + +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +impl Clone for Values<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Values { + inner: self.inner.clone(), + } + } +} + +impl fmt::Debug for Values<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// A draining iterator over the entries of a `HashMap`. +/// +/// This `struct` is created by the [`drain`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`drain`]: struct.HashMap.html#method.drain +/// [`HashMap`]: struct.HashMap.html +pub struct Drain<'a, K, V> { + inner: RawDrain<'a, (K, V)>, +} + +impl Drain<'_, K, V> { + /// Returns a iterator of references over the remaining items. + #[cfg_attr(feature = "inline-more", inline)] + pub(super) fn iter(&self) -> Iter<'_, K, V> { + Iter { + inner: self.inner.iter(), + marker: PhantomData, + } + } +} + +/// A draining iterator over entries of a `HashMap` which don't satisfy the predicate `f`. +/// +/// This `struct` is created by the [`drain_filter`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`drain_filter`]: struct.HashMap.html#method.drain_filter +/// [`HashMap`]: struct.HashMap.html +pub struct DrainFilter<'a, K, V, F> +where + F: FnMut(&K, &mut V) -> bool, +{ + f: F, + inner: DrainFilterInner<'a, K, V>, +} + +impl<'a, K, V, F> Drop for DrainFilter<'a, K, V, F> +where + F: FnMut(&K, &mut V) -> bool, +{ + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + while let Some(item) = self.next() { + let guard = ConsumeAllOnDrop(self); + drop(item); + mem::forget(guard); + } + } +} + +pub(super) struct ConsumeAllOnDrop<'a, T: Iterator>(pub &'a mut T); + +impl Drop for ConsumeAllOnDrop<'_, T> { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + self.0.for_each(drop) + } +} + +impl Iterator for DrainFilter<'_, K, V, F> +where + F: FnMut(&K, &mut V) -> bool, +{ + type Item = (K, V); + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option { + self.inner.next(&mut self.f) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + (0, self.inner.iter.size_hint().1) + } +} + +impl FusedIterator for DrainFilter<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool {} + +/// Portions of `DrainFilter` shared with `set::DrainFilter` +pub(super) struct DrainFilterInner<'a, K, V> { + pub iter: RawIter<(K, V)>, + pub table: &'a mut RawTable<(K, V)>, +} + +impl DrainFilterInner<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + pub(super) fn next(&mut self, f: &mut F) -> Option<(K, V)> + where + F: FnMut(&K, &mut V) -> bool, + { + unsafe { + while let Some(item) = self.iter.next() { + let &mut (ref key, ref mut value) = item.as_mut(); + if f(key, value) { + return Some(self.table.remove(item)); + } + } + } + None + } +} + +/// A mutable iterator over the values of a `HashMap`. +/// +/// This `struct` is created by the [`values_mut`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`values_mut`]: struct.HashMap.html#method.values_mut +/// [`HashMap`]: struct.HashMap.html +pub struct ValuesMut<'a, K, V> { + inner: IterMut<'a, K, V>, +} + +/// A builder for computing where in a [`HashMap`] a key-value pair would be stored. +/// +/// See the [`HashMap::raw_entry_mut`] docs for usage examples. +/// +/// [`HashMap::raw_entry_mut`]: struct.HashMap.html#method.raw_entry_mut +pub struct RawEntryBuilderMut<'a, K, V, S> { + map: &'a mut HashMap, +} + +/// A view into a single entry in a map, which may either be vacant or occupied. +/// +/// This is a lower-level version of [`Entry`]. +/// +/// This `enum` is constructed through the [`raw_entry_mut`] method on [`HashMap`], +/// then calling one of the methods of that [`RawEntryBuilderMut`]. +/// +/// [`HashMap`]: struct.HashMap.html +/// [`Entry`]: enum.Entry.html +/// [`raw_entry_mut`]: struct.HashMap.html#method.raw_entry_mut +/// [`RawEntryBuilderMut`]: struct.RawEntryBuilderMut.html +pub enum RawEntryMut<'a, K, V, S> { + /// An occupied entry. + Occupied(RawOccupiedEntryMut<'a, K, V, S>), + /// A vacant entry. + Vacant(RawVacantEntryMut<'a, K, V, S>), +} + +/// A view into an occupied entry in a `HashMap`. +/// It is part of the [`RawEntryMut`] enum. +/// +/// [`RawEntryMut`]: enum.RawEntryMut.html +pub struct RawOccupiedEntryMut<'a, K, V, S> { + elem: Bucket<(K, V)>, + table: &'a mut RawTable<(K, V)>, + hash_builder: &'a S, +} + +unsafe impl Send for RawOccupiedEntryMut<'_, K, V, S> +where + K: Send, + V: Send, + S: Sync, +{ +} +unsafe impl Sync for RawOccupiedEntryMut<'_, K, V, S> +where + K: Sync, + V: Sync, + S: Sync, +{ +} + +/// A view into a vacant entry in a `HashMap`. +/// It is part of the [`RawEntryMut`] enum. +/// +/// [`RawEntryMut`]: enum.RawEntryMut.html +pub struct RawVacantEntryMut<'a, K, V, S> { + table: &'a mut RawTable<(K, V)>, + hash_builder: &'a S, +} + +/// A builder for computing where in a [`HashMap`] a key-value pair would be stored. +/// +/// See the [`HashMap::raw_entry`] docs for usage examples. +/// +/// [`HashMap::raw_entry`]: struct.HashMap.html#method.raw_entry +pub struct RawEntryBuilder<'a, K, V, S> { + map: &'a HashMap, +} + +impl<'a, K, V, S> RawEntryBuilderMut<'a, K, V, S> { + /// Creates a `RawEntryMut` from the given key. + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::wrong_self_convention)] + pub fn from_key(self, k: &Q) -> RawEntryMut<'a, K, V, S> + where + S: BuildHasher, + K: Borrow, + Q: Hash + Eq, + { + let mut hasher = self.map.hash_builder.build_hasher(); + k.hash(&mut hasher); + self.from_key_hashed_nocheck(hasher.finish(), k) + } + + /// Creates a `RawEntryMut` from the given key and its hash. + #[inline] + #[allow(clippy::wrong_self_convention)] + pub fn from_key_hashed_nocheck(self, hash: u64, k: &Q) -> RawEntryMut<'a, K, V, S> + where + K: Borrow, + Q: Eq, + { + self.from_hash(hash, |q| q.borrow().eq(k)) + } +} + +impl<'a, K, V, S> RawEntryBuilderMut<'a, K, V, S> { + /// Creates a `RawEntryMut` from the given hash. + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::wrong_self_convention)] + pub fn from_hash(self, hash: u64, is_match: F) -> RawEntryMut<'a, K, V, S> + where + for<'b> F: FnMut(&'b K) -> bool, + { + self.search(hash, is_match) + } + + #[cfg_attr(feature = "inline-more", inline)] + fn search(self, hash: u64, mut is_match: F) -> RawEntryMut<'a, K, V, S> + where + for<'b> F: FnMut(&'b K) -> bool, + { + match self.map.table.find(hash, |(k, _)| is_match(k)) { + Some(elem) => RawEntryMut::Occupied(RawOccupiedEntryMut { + elem, + table: &mut self.map.table, + hash_builder: &self.map.hash_builder, + }), + None => RawEntryMut::Vacant(RawVacantEntryMut { + table: &mut self.map.table, + hash_builder: &self.map.hash_builder, + }), + } + } +} + +impl<'a, K, V, S> RawEntryBuilder<'a, K, V, S> { + /// Access an entry by key. + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::wrong_self_convention)] + pub fn from_key(self, k: &Q) -> Option<(&'a K, &'a V)> + where + S: BuildHasher, + K: Borrow, + Q: Hash + Eq, + { + let mut hasher = self.map.hash_builder.build_hasher(); + k.hash(&mut hasher); + self.from_key_hashed_nocheck(hasher.finish(), k) + } + + /// Access an entry by a key and its hash. + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::wrong_self_convention)] + pub fn from_key_hashed_nocheck(self, hash: u64, k: &Q) -> Option<(&'a K, &'a V)> + where + K: Borrow, + Q: Eq, + { + self.from_hash(hash, |q| q.borrow().eq(k)) + } + + #[cfg_attr(feature = "inline-more", inline)] + fn search(self, hash: u64, mut is_match: F) -> Option<(&'a K, &'a V)> + where + F: FnMut(&K) -> bool, + { + match self.map.table.get(hash, |(k, _)| is_match(k)) { + Some(&(ref key, ref value)) => Some((key, value)), + None => None, + } + } + + /// Access an entry by hash. + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::wrong_self_convention)] + pub fn from_hash(self, hash: u64, is_match: F) -> Option<(&'a K, &'a V)> + where + F: FnMut(&K) -> bool, + { + self.search(hash, is_match) + } +} + +impl<'a, K, V, S> RawEntryMut<'a, K, V, S> { + /// Sets the value of the entry, and returns a RawOccupiedEntryMut. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// let entry = map.raw_entry_mut().from_key("horseyland").insert("horseyland", 37); + /// + /// assert_eq!(entry.remove_entry(), ("horseyland", 37)); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self, key: K, value: V) -> RawOccupiedEntryMut<'a, K, V, S> + where + K: Hash, + S: BuildHasher, + { + match self { + RawEntryMut::Occupied(mut entry) => { + entry.insert(value); + entry + } + RawEntryMut::Vacant(entry) => entry.insert_entry(key, value), + } + } + + /// Ensures a value is in the entry by inserting the default if empty, and returns + /// mutable references to the key and value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// map.raw_entry_mut().from_key("poneyland").or_insert("poneyland", 3); + /// assert_eq!(map["poneyland"], 3); + /// + /// *map.raw_entry_mut().from_key("poneyland").or_insert("poneyland", 10).1 *= 2; + /// assert_eq!(map["poneyland"], 6); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert(self, default_key: K, default_val: V) -> (&'a mut K, &'a mut V) + where + K: Hash, + S: BuildHasher, + { + match self { + RawEntryMut::Occupied(entry) => entry.into_key_value(), + RawEntryMut::Vacant(entry) => entry.insert(default_key, default_val), + } + } + + /// Ensures a value is in the entry by inserting the result of the default function if empty, + /// and returns mutable references to the key and value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, String> = HashMap::new(); + /// + /// map.raw_entry_mut().from_key("poneyland").or_insert_with(|| { + /// ("poneyland", "hoho".to_string()) + /// }); + /// + /// assert_eq!(map["poneyland"], "hoho".to_string()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert_with(self, default: F) -> (&'a mut K, &'a mut V) + where + F: FnOnce() -> (K, V), + K: Hash, + S: BuildHasher, + { + match self { + RawEntryMut::Occupied(entry) => entry.into_key_value(), + RawEntryMut::Vacant(entry) => { + let (k, v) = default(); + entry.insert(k, v) + } + } + } + + /// Provides in-place mutable access to an occupied entry before any + /// potential inserts into the map. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// map.raw_entry_mut() + /// .from_key("poneyland") + /// .and_modify(|_k, v| { *v += 1 }) + /// .or_insert("poneyland", 42); + /// assert_eq!(map["poneyland"], 42); + /// + /// map.raw_entry_mut() + /// .from_key("poneyland") + /// .and_modify(|_k, v| { *v += 1 }) + /// .or_insert("poneyland", 0); + /// assert_eq!(map["poneyland"], 43); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn and_modify(self, f: F) -> Self + where + F: FnOnce(&mut K, &mut V), + { + match self { + RawEntryMut::Occupied(mut entry) => { + { + let (k, v) = entry.get_key_value_mut(); + f(k, v); + } + RawEntryMut::Occupied(entry) + } + RawEntryMut::Vacant(entry) => RawEntryMut::Vacant(entry), + } + } + + /// Provides shared access to the key and owned access to the value of + /// an occupied entry and allows to replace or remove it based on the + /// value of the returned option. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RawEntryMut; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// let entry = map + /// .raw_entry_mut() + /// .from_key("poneyland") + /// .and_replace_entry_with(|_k, _v| panic!()); + /// + /// match entry { + /// RawEntryMut::Vacant(_) => {}, + /// RawEntryMut::Occupied(_) => panic!(), + /// } + /// + /// map.insert("poneyland", 42); + /// + /// let entry = map + /// .raw_entry_mut() + /// .from_key("poneyland") + /// .and_replace_entry_with(|k, v| { + /// assert_eq!(k, &"poneyland"); + /// assert_eq!(v, 42); + /// Some(v + 1) + /// }); + /// + /// match entry { + /// RawEntryMut::Occupied(e) => { + /// assert_eq!(e.key(), &"poneyland"); + /// assert_eq!(e.get(), &43); + /// }, + /// RawEntryMut::Vacant(_) => panic!(), + /// } + /// + /// assert_eq!(map["poneyland"], 43); + /// + /// let entry = map + /// .raw_entry_mut() + /// .from_key("poneyland") + /// .and_replace_entry_with(|_k, _v| None); + /// + /// match entry { + /// RawEntryMut::Vacant(_) => {}, + /// RawEntryMut::Occupied(_) => panic!(), + /// } + /// + /// assert!(!map.contains_key("poneyland")); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn and_replace_entry_with(self, f: F) -> Self + where + F: FnOnce(&K, V) -> Option, + { + match self { + RawEntryMut::Occupied(entry) => entry.replace_entry_with(f), + RawEntryMut::Vacant(_) => self, + } + } +} + +impl<'a, K, V, S> RawOccupiedEntryMut<'a, K, V, S> { + /// Gets a reference to the key in the entry. + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + unsafe { &self.elem.as_ref().0 } + } + + /// Gets a mutable reference to the key in the entry. + #[cfg_attr(feature = "inline-more", inline)] + pub fn key_mut(&mut self) -> &mut K { + unsafe { &mut self.elem.as_mut().0 } + } + + /// Converts the entry into a mutable reference to the key in the entry + /// with a lifetime bound to the map itself. + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_key(self) -> &'a mut K { + unsafe { &mut self.elem.as_mut().0 } + } + + /// Gets a reference to the value in the entry. + #[cfg_attr(feature = "inline-more", inline)] + pub fn get(&self) -> &V { + unsafe { &self.elem.as_ref().1 } + } + + /// Converts the OccupiedEntry into a mutable reference to the value in the entry + /// with a lifetime bound to the map itself. + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_mut(self) -> &'a mut V { + unsafe { &mut self.elem.as_mut().1 } + } + + /// Gets a mutable reference to the value in the entry. + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_mut(&mut self) -> &mut V { + unsafe { &mut self.elem.as_mut().1 } + } + + /// Gets a reference to the key and value in the entry. + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_key_value(&mut self) -> (&K, &V) { + unsafe { + let &(ref key, ref value) = self.elem.as_ref(); + (key, value) + } + } + + /// Gets a mutable reference to the key and value in the entry. + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_key_value_mut(&mut self) -> (&mut K, &mut V) { + unsafe { + let &mut (ref mut key, ref mut value) = self.elem.as_mut(); + (key, value) + } + } + + /// Converts the OccupiedEntry into a mutable reference to the key and value in the entry + /// with a lifetime bound to the map itself. + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_key_value(self) -> (&'a mut K, &'a mut V) { + unsafe { + let &mut (ref mut key, ref mut value) = self.elem.as_mut(); + (key, value) + } + } + + /// Sets the value of the entry, and returns the entry's old value. + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(&mut self, value: V) -> V { + mem::replace(self.get_mut(), value) + } + + /// Sets the value of the entry, and returns the entry's old value. + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert_key(&mut self, key: K) -> K { + mem::replace(self.key_mut(), key) + } + + /// Takes the value out of the entry, and returns it. + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove(self) -> V { + self.remove_entry().1 + } + + /// Take the ownership of the key and value from the map. + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove_entry(self) -> (K, V) { + unsafe { self.table.remove(self.elem) } + } + + /// Provides shared access to the key and owned access to the value of + /// the entry and allows to replace or remove it based on the + /// value of the returned option. + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace_entry_with(self, f: F) -> RawEntryMut<'a, K, V, S> + where + F: FnOnce(&K, V) -> Option, + { + unsafe { + let still_occupied = self + .table + .replace_bucket_with(self.elem.clone(), |(key, value)| { + f(&key, value).map(|new_value| (key, new_value)) + }); + + if still_occupied { + RawEntryMut::Occupied(self) + } else { + RawEntryMut::Vacant(RawVacantEntryMut { + table: self.table, + hash_builder: self.hash_builder, + }) + } + } + } +} + +impl<'a, K, V, S> RawVacantEntryMut<'a, K, V, S> { + /// Sets the value of the entry with the VacantEntry's key, + /// and returns a mutable reference to it. + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self, key: K, value: V) -> (&'a mut K, &'a mut V) + where + K: Hash, + S: BuildHasher, + { + let mut hasher = self.hash_builder.build_hasher(); + key.hash(&mut hasher); + self.insert_hashed_nocheck(hasher.finish(), key, value) + } + + /// Sets the value of the entry with the VacantEntry's key, + /// and returns a mutable reference to it. + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::shadow_unrelated)] + pub fn insert_hashed_nocheck(self, hash: u64, key: K, value: V) -> (&'a mut K, &'a mut V) + where + K: Hash, + S: BuildHasher, + { + let hash_builder = self.hash_builder; + self.insert_with_hasher(hash, key, value, |k| make_hash(hash_builder, k)) + } + + /// Set the value of an entry with a custom hasher function. + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert_with_hasher( + self, + hash: u64, + key: K, + value: V, + hasher: H, + ) -> (&'a mut K, &'a mut V) + where + H: Fn(&K) -> u64, + { + let &mut (ref mut k, ref mut v) = self + .table + .insert_entry(hash, (key, value), |x| hasher(&x.0)); + (k, v) + } + + #[cfg_attr(feature = "inline-more", inline)] + fn insert_entry(self, key: K, value: V) -> RawOccupiedEntryMut<'a, K, V, S> + where + K: Hash, + S: BuildHasher, + { + let hash_builder = self.hash_builder; + let mut hasher = self.hash_builder.build_hasher(); + key.hash(&mut hasher); + + let elem = self.table.insert(hasher.finish(), (key, value), |k| { + make_hash(hash_builder, &k.0) + }); + RawOccupiedEntryMut { + elem, + table: self.table, + hash_builder: self.hash_builder, + } + } +} + +impl Debug for RawEntryBuilderMut<'_, K, V, S> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RawEntryBuilder").finish() + } +} + +impl Debug for RawEntryMut<'_, K, V, S> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + RawEntryMut::Vacant(ref v) => f.debug_tuple("RawEntry").field(v).finish(), + RawEntryMut::Occupied(ref o) => f.debug_tuple("RawEntry").field(o).finish(), + } + } +} + +impl Debug for RawOccupiedEntryMut<'_, K, V, S> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RawOccupiedEntryMut") + .field("key", self.key()) + .field("value", self.get()) + .finish() + } +} + +impl Debug for RawVacantEntryMut<'_, K, V, S> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RawVacantEntryMut").finish() + } +} + +impl Debug for RawEntryBuilder<'_, K, V, S> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RawEntryBuilder").finish() + } +} + +/// A view into a single entry in a map, which may either be vacant or occupied. +/// +/// This `enum` is constructed from the [`entry`] method on [`HashMap`]. +/// +/// [`HashMap`]: struct.HashMap.html +/// [`entry`]: struct.HashMap.html#method.entry +pub enum Entry<'a, K, V, S> { + /// An occupied entry. + Occupied(OccupiedEntry<'a, K, V, S>), + + /// A vacant entry. + Vacant(VacantEntry<'a, K, V, S>), +} + +impl Debug for Entry<'_, K, V, S> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), + Entry::Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(), + } + } +} + +/// A view into an occupied entry in a `HashMap`. +/// It is part of the [`Entry`] enum. +/// +/// [`Entry`]: enum.Entry.html +pub struct OccupiedEntry<'a, K, V, S> { + hash: u64, + key: Option, + elem: Bucket<(K, V)>, + table: &'a mut HashMap, +} + +unsafe impl Send for OccupiedEntry<'_, K, V, S> +where + K: Send, + V: Send, + S: Send, +{ +} +unsafe impl Sync for OccupiedEntry<'_, K, V, S> +where + K: Sync, + V: Sync, + S: Sync, +{ +} + +impl Debug for OccupiedEntry<'_, K, V, S> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OccupiedEntry") + .field("key", self.key()) + .field("value", self.get()) + .finish() + } +} + +/// A view into a vacant entry in a `HashMap`. +/// It is part of the [`Entry`] enum. +/// +/// [`Entry`]: enum.Entry.html +pub struct VacantEntry<'a, K, V, S> { + hash: u64, + key: K, + table: &'a mut HashMap, +} + +impl Debug for VacantEntry<'_, K, V, S> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("VacantEntry").field(self.key()).finish() + } +} + +impl<'a, K, V, S> IntoIterator for &'a HashMap { + type Item = (&'a K, &'a V); + type IntoIter = Iter<'a, K, V>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_iter(self) -> Iter<'a, K, V> { + self.iter() + } +} + +impl<'a, K, V, S> IntoIterator for &'a mut HashMap { + type Item = (&'a K, &'a mut V); + type IntoIter = IterMut<'a, K, V>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_iter(self) -> IterMut<'a, K, V> { + self.iter_mut() + } +} + +impl IntoIterator for HashMap { + type Item = (K, V); + type IntoIter = IntoIter; + + /// Creates a consuming iterator, that is, one that moves each key-value + /// pair out of the map in arbitrary order. The map cannot be used after + /// calling this. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert("a", 1); + /// map.insert("b", 2); + /// map.insert("c", 3); + /// + /// // Not possible with .iter() + /// let vec: Vec<(&str, i32)> = map.into_iter().collect(); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + fn into_iter(self) -> IntoIter { + IntoIter { + inner: self.table.into_iter(), + } + } +} + +impl<'a, K, V> Iterator for Iter<'a, K, V> { + type Item = (&'a K, &'a V); + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<(&'a K, &'a V)> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.inner.next() { + Some(x) => unsafe { + let r = x.as_ref(); + Some((&r.0, &r.1)) + }, + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} +impl ExactSizeIterator for Iter<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} + +impl FusedIterator for Iter<'_, K, V> {} + +impl<'a, K, V> Iterator for IterMut<'a, K, V> { + type Item = (&'a K, &'a mut V); + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<(&'a K, &'a mut V)> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.inner.next() { + Some(x) => unsafe { + let r = x.as_mut(); + Some((&r.0, &mut r.1)) + }, + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} +impl ExactSizeIterator for IterMut<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} +impl FusedIterator for IterMut<'_, K, V> {} + +impl fmt::Debug for IterMut<'_, K, V> +where + K: fmt::Debug, + V: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.iter()).finish() + } +} + +impl Iterator for IntoIter { + type Item = (K, V); + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<(K, V)> { + self.inner.next() + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} +impl ExactSizeIterator for IntoIter { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} +impl FusedIterator for IntoIter {} + +impl fmt::Debug for IntoIter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.iter()).finish() + } +} + +impl<'a, K, V> Iterator for Keys<'a, K, V> { + type Item = &'a K; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a K> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.inner.next() { + Some((k, _)) => Some(k), + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} +impl ExactSizeIterator for Keys<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} +impl FusedIterator for Keys<'_, K, V> {} + +impl<'a, K, V> Iterator for Values<'a, K, V> { + type Item = &'a V; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a V> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.inner.next() { + Some((_, v)) => Some(v), + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} +impl ExactSizeIterator for Values<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} +impl FusedIterator for Values<'_, K, V> {} + +impl<'a, K, V> Iterator for ValuesMut<'a, K, V> { + type Item = &'a mut V; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a mut V> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.inner.next() { + Some((_, v)) => Some(v), + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} +impl ExactSizeIterator for ValuesMut<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} +impl FusedIterator for ValuesMut<'_, K, V> {} + +impl fmt::Debug for ValuesMut<'_, K, V> +where + K: fmt::Debug, + V: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.inner.iter()).finish() + } +} + +impl<'a, K, V> Iterator for Drain<'a, K, V> { + type Item = (K, V); + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<(K, V)> { + self.inner.next() + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} +impl ExactSizeIterator for Drain<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} +impl FusedIterator for Drain<'_, K, V> {} + +impl fmt::Debug for Drain<'_, K, V> +where + K: fmt::Debug, + V: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.iter()).finish() + } +} + +impl<'a, K, V, S> Entry<'a, K, V, S> { + /// Sets the value of the entry, and returns an OccupiedEntry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// let entry = map.entry("horseyland").insert(37); + /// + /// assert_eq!(entry.key(), &"horseyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self, value: V) -> OccupiedEntry<'a, K, V, S> + where + K: Hash, + S: BuildHasher, + { + match self { + Entry::Occupied(mut entry) => { + entry.insert(value); + entry + } + Entry::Vacant(entry) => entry.insert_entry(value), + } + } + + /// Ensures a value is in the entry by inserting the default if empty, and returns + /// a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// map.entry("poneyland").or_insert(3); + /// assert_eq!(map["poneyland"], 3); + /// + /// *map.entry("poneyland").or_insert(10) *= 2; + /// assert_eq!(map["poneyland"], 6); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert(self, default: V) -> &'a mut V + where + K: Hash, + S: BuildHasher, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(default), + } + } + + /// Ensures a value is in the entry by inserting the result of the default function if empty, + /// and returns a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, String> = HashMap::new(); + /// let s = "hoho".to_string(); + /// + /// map.entry("poneyland").or_insert_with(|| s); + /// + /// assert_eq!(map["poneyland"], "hoho".to_string()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert_with V>(self, default: F) -> &'a mut V + where + K: Hash, + S: BuildHasher, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(default()), + } + } + + /// Ensures a value is in the entry by inserting, if empty, the result of the default function, + /// which takes the key as its argument, and returns a mutable reference to the value in the + /// entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, usize> = HashMap::new(); + /// + /// map.entry("poneyland").or_insert_with_key(|key| key.chars().count()); + /// + /// assert_eq!(map["poneyland"], 9); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert_with_key V>(self, default: F) -> &'a mut V + where + K: Hash, + S: BuildHasher, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => { + let value = default(entry.key()); + entry.insert(value) + } + } + } + + /// Returns a reference to this entry's key. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// assert_eq!(map.entry("poneyland").key(), &"poneyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + match *self { + Entry::Occupied(ref entry) => entry.key(), + Entry::Vacant(ref entry) => entry.key(), + } + } + + /// Provides in-place mutable access to an occupied entry before any + /// potential inserts into the map. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// map.entry("poneyland") + /// .and_modify(|e| { *e += 1 }) + /// .or_insert(42); + /// assert_eq!(map["poneyland"], 42); + /// + /// map.entry("poneyland") + /// .and_modify(|e| { *e += 1 }) + /// .or_insert(42); + /// assert_eq!(map["poneyland"], 43); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn and_modify(self, f: F) -> Self + where + F: FnOnce(&mut V), + { + match self { + Entry::Occupied(mut entry) => { + f(entry.get_mut()); + Entry::Occupied(entry) + } + Entry::Vacant(entry) => Entry::Vacant(entry), + } + } + + /// Provides shared access to the key and owned access to the value of + /// an occupied entry and allows to replace or remove it based on the + /// value of the returned option. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// let entry = map + /// .entry("poneyland") + /// .and_replace_entry_with(|_k, _v| panic!()); + /// + /// match entry { + /// Entry::Vacant(e) => { + /// assert_eq!(e.key(), &"poneyland"); + /// } + /// Entry::Occupied(_) => panic!(), + /// } + /// + /// map.insert("poneyland", 42); + /// + /// let entry = map + /// .entry("poneyland") + /// .and_replace_entry_with(|k, v| { + /// assert_eq!(k, &"poneyland"); + /// assert_eq!(v, 42); + /// Some(v + 1) + /// }); + /// + /// match entry { + /// Entry::Occupied(e) => { + /// assert_eq!(e.key(), &"poneyland"); + /// assert_eq!(e.get(), &43); + /// } + /// Entry::Vacant(_) => panic!(), + /// } + /// + /// assert_eq!(map["poneyland"], 43); + /// + /// let entry = map + /// .entry("poneyland") + /// .and_replace_entry_with(|_k, _v| None); + /// + /// match entry { + /// Entry::Vacant(e) => assert_eq!(e.key(), &"poneyland"), + /// Entry::Occupied(_) => panic!(), + /// } + /// + /// assert!(!map.contains_key("poneyland")); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn and_replace_entry_with(self, f: F) -> Self + where + F: FnOnce(&K, V) -> Option, + { + match self { + Entry::Occupied(entry) => entry.replace_entry_with(f), + Entry::Vacant(_) => self, + } + } +} + +impl<'a, K, V: Default, S> Entry<'a, K, V, S> { + /// Ensures a value is in the entry by inserting the default value if empty, + /// and returns a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, Option> = HashMap::new(); + /// map.entry("poneyland").or_default(); + /// + /// assert_eq!(map["poneyland"], None); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_default(self) -> &'a mut V + where + K: Hash, + S: BuildHasher, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(Default::default()), + } + } +} + +impl<'a, K, V, S> OccupiedEntry<'a, K, V, S> { + /// Gets a reference to the key in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.entry("poneyland").or_insert(12); + /// assert_eq!(map.entry("poneyland").key(), &"poneyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + unsafe { &self.elem.as_ref().0 } + } + + /// Take the ownership of the key and value from the map. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// if let Entry::Occupied(o) = map.entry("poneyland") { + /// // We delete the entry from the map. + /// o.remove_entry(); + /// } + /// + /// assert_eq!(map.contains_key("poneyland"), false); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove_entry(self) -> (K, V) { + unsafe { self.table.table.remove(self.elem) } + } + + /// Gets a reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// if let Entry::Occupied(o) = map.entry("poneyland") { + /// assert_eq!(o.get(), &12); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get(&self) -> &V { + unsafe { &self.elem.as_ref().1 } + } + + /// Gets a mutable reference to the value in the entry. + /// + /// If you need a reference to the `OccupiedEntry` which may outlive the + /// destruction of the `Entry` value, see [`into_mut`]. + /// + /// [`into_mut`]: #method.into_mut + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// assert_eq!(map["poneyland"], 12); + /// if let Entry::Occupied(mut o) = map.entry("poneyland") { + /// *o.get_mut() += 10; + /// assert_eq!(*o.get(), 22); + /// + /// // We can use the same Entry multiple times. + /// *o.get_mut() += 2; + /// } + /// + /// assert_eq!(map["poneyland"], 24); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_mut(&mut self) -> &mut V { + unsafe { &mut self.elem.as_mut().1 } + } + + /// Converts the OccupiedEntry into a mutable reference to the value in the entry + /// with a lifetime bound to the map itself. + /// + /// If you need multiple references to the `OccupiedEntry`, see [`get_mut`]. + /// + /// [`get_mut`]: #method.get_mut + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// assert_eq!(map["poneyland"], 12); + /// if let Entry::Occupied(o) = map.entry("poneyland") { + /// *o.into_mut() += 10; + /// } + /// + /// assert_eq!(map["poneyland"], 22); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_mut(self) -> &'a mut V { + unsafe { &mut self.elem.as_mut().1 } + } + + /// Sets the value of the entry, and returns the entry's old value. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// if let Entry::Occupied(mut o) = map.entry("poneyland") { + /// assert_eq!(o.insert(15), 12); + /// } + /// + /// assert_eq!(map["poneyland"], 15); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(&mut self, mut value: V) -> V { + let old_value = self.get_mut(); + mem::swap(&mut value, old_value); + value + } + + /// Takes the value out of the entry, and returns it. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// if let Entry::Occupied(o) = map.entry("poneyland") { + /// assert_eq!(o.remove(), 12); + /// } + /// + /// assert_eq!(map.contains_key("poneyland"), false); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove(self) -> V { + self.remove_entry().1 + } + + /// Replaces the entry, returning the old key and value. The new key in the hash map will be + /// the key used to create this entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{Entry, HashMap}; + /// use std::rc::Rc; + /// + /// let mut map: HashMap, u32> = HashMap::new(); + /// map.insert(Rc::new("Stringthing".to_string()), 15); + /// + /// let my_key = Rc::new("Stringthing".to_string()); + /// + /// if let Entry::Occupied(entry) = map.entry(my_key) { + /// // Also replace the key with a handle to our other key. + /// let (old_key, old_value): (Rc, u32) = entry.replace_entry(16); + /// } + /// + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace_entry(self, value: V) -> (K, V) { + let entry = unsafe { self.elem.as_mut() }; + + let old_key = mem::replace(&mut entry.0, self.key.unwrap()); + let old_value = mem::replace(&mut entry.1, value); + + (old_key, old_value) + } + + /// Replaces the key in the hash map with the key used to create this entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{Entry, HashMap}; + /// use std::rc::Rc; + /// + /// let mut map: HashMap, u32> = HashMap::new(); + /// let mut known_strings: Vec> = Vec::new(); + /// + /// // Initialise known strings, run program, etc. + /// + /// reclaim_memory(&mut map, &known_strings); + /// + /// fn reclaim_memory(map: &mut HashMap, u32>, known_strings: &[Rc] ) { + /// for s in known_strings { + /// if let Entry::Occupied(entry) = map.entry(s.clone()) { + /// // Replaces the entry's key with our version of it in `known_strings`. + /// entry.replace_key(); + /// } + /// } + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace_key(self) -> K { + let entry = unsafe { self.elem.as_mut() }; + mem::replace(&mut entry.0, self.key.unwrap()) + } + + /// Provides shared access to the key and owned access to the value of + /// the entry and allows to replace or remove it based on the + /// value of the returned option. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.insert("poneyland", 42); + /// + /// let entry = match map.entry("poneyland") { + /// Entry::Occupied(e) => { + /// e.replace_entry_with(|k, v| { + /// assert_eq!(k, &"poneyland"); + /// assert_eq!(v, 42); + /// Some(v + 1) + /// }) + /// } + /// Entry::Vacant(_) => panic!(), + /// }; + /// + /// match entry { + /// Entry::Occupied(e) => { + /// assert_eq!(e.key(), &"poneyland"); + /// assert_eq!(e.get(), &43); + /// } + /// Entry::Vacant(_) => panic!(), + /// } + /// + /// assert_eq!(map["poneyland"], 43); + /// + /// let entry = match map.entry("poneyland") { + /// Entry::Occupied(e) => e.replace_entry_with(|_k, _v| None), + /// Entry::Vacant(_) => panic!(), + /// }; + /// + /// match entry { + /// Entry::Vacant(e) => { + /// assert_eq!(e.key(), &"poneyland"); + /// } + /// Entry::Occupied(_) => panic!(), + /// } + /// + /// assert!(!map.contains_key("poneyland")); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace_entry_with(self, f: F) -> Entry<'a, K, V, S> + where + F: FnOnce(&K, V) -> Option, + { + unsafe { + let mut spare_key = None; + + self.table + .table + .replace_bucket_with(self.elem.clone(), |(key, value)| { + if let Some(new_value) = f(&key, value) { + Some((key, new_value)) + } else { + spare_key = Some(key); + None + } + }); + + if let Some(key) = spare_key { + Entry::Vacant(VacantEntry { + hash: self.hash, + key, + table: self.table, + }) + } else { + Entry::Occupied(self) + } + } + } +} + +impl<'a, K, V, S> VacantEntry<'a, K, V, S> { + /// Gets a reference to the key that would be used when inserting a value + /// through the `VacantEntry`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// assert_eq!(map.entry("poneyland").key(), &"poneyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + &self.key + } + + /// Take ownership of the key. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// if let Entry::Vacant(v) = map.entry("poneyland") { + /// v.into_key(); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_key(self) -> K { + self.key + } + + /// Sets the value of the entry with the VacantEntry's key, + /// and returns a mutable reference to it. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// if let Entry::Vacant(o) = map.entry("poneyland") { + /// o.insert(37); + /// } + /// assert_eq!(map["poneyland"], 37); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self, value: V) -> &'a mut V + where + K: Hash, + S: BuildHasher, + { + let hash_builder = &self.table.hash_builder; + let table = &mut self.table.table; + let entry = table.insert_entry(self.hash, (self.key, value), |x| { + make_hash(hash_builder, &x.0) + }); + &mut entry.1 + } + + #[cfg_attr(feature = "inline-more", inline)] + fn insert_entry(self, value: V) -> OccupiedEntry<'a, K, V, S> + where + K: Hash, + S: BuildHasher, + { + let hash_builder = &self.table.hash_builder; + let elem = self.table.table.insert(self.hash, (self.key, value), |x| { + make_hash(hash_builder, &x.0) + }); + OccupiedEntry { + hash: self.hash, + key: None, + elem, + table: self.table, + } + } +} + +impl FromIterator<(K, V)> for HashMap +where + K: Eq + Hash, + S: BuildHasher + Default, +{ + #[cfg_attr(feature = "inline-more", inline)] + fn from_iter>(iter: T) -> Self { + let iter = iter.into_iter(); + let mut map = Self::with_capacity_and_hasher(iter.size_hint().0, S::default()); + iter.for_each(|(k, v)| { + map.insert(k, v); + }); + map + } +} + +/// Inserts all new key-values from the iterator and replaces values with existing +/// keys with new values returned from the iterator. +impl Extend<(K, V)> for HashMap +where + K: Eq + Hash, + S: BuildHasher, +{ + #[cfg_attr(feature = "inline-more", inline)] + fn extend>(&mut self, iter: T) { + // Keys may be already present or show multiple times in the iterator. + // Reserve the entire hint lower bound if the map is empty. + // Otherwise reserve half the hint (rounded up), so the map + // will only resize twice in the worst case. + let iter = iter.into_iter(); + let reserve = if self.is_empty() { + iter.size_hint().0 + } else { + (iter.size_hint().0 + 1) / 2 + }; + self.reserve(reserve); + iter.for_each(move |(k, v)| { + self.insert(k, v); + }); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_one(&mut self, (k, v): (K, V)) { + self.insert(k, v); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_reserve(&mut self, additional: usize) { + // Keys may be already present or show multiple times in the iterator. + // Reserve the entire hint lower bound if the map is empty. + // Otherwise reserve half the hint (rounded up), so the map + // will only resize twice in the worst case. + let reserve = if self.is_empty() { + additional + } else { + (additional + 1) / 2 + }; + self.reserve(reserve); + } +} + +impl<'a, K, V, S> Extend<(&'a K, &'a V)> for HashMap +where + K: Eq + Hash + Copy, + V: Copy, + S: BuildHasher, +{ + #[cfg_attr(feature = "inline-more", inline)] + fn extend>(&mut self, iter: T) { + self.extend(iter.into_iter().map(|(&key, &value)| (key, value))); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_one(&mut self, (k, v): (&'a K, &'a V)) { + self.insert(*k, *v); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_reserve(&mut self, additional: usize) { + Extend::<(K, V)>::extend_reserve(self, additional); + } +} + +#[allow(dead_code)] +fn assert_covariance() { + fn map_key<'new>(v: HashMap<&'static str, u8>) -> HashMap<&'new str, u8> { + v + } + fn map_val<'new>(v: HashMap) -> HashMap { + v + } + fn iter_key<'a, 'new>(v: Iter<'a, &'static str, u8>) -> Iter<'a, &'new str, u8> { + v + } + fn iter_val<'a, 'new>(v: Iter<'a, u8, &'static str>) -> Iter<'a, u8, &'new str> { + v + } + fn into_iter_key<'new>(v: IntoIter<&'static str, u8>) -> IntoIter<&'new str, u8> { + v + } + fn into_iter_val<'new>(v: IntoIter) -> IntoIter { + v + } + fn keys_key<'a, 'new>(v: Keys<'a, &'static str, u8>) -> Keys<'a, &'new str, u8> { + v + } + fn keys_val<'a, 'new>(v: Keys<'a, u8, &'static str>) -> Keys<'a, u8, &'new str> { + v + } + fn values_key<'a, 'new>(v: Values<'a, &'static str, u8>) -> Values<'a, &'new str, u8> { + v + } + fn values_val<'a, 'new>(v: Values<'a, u8, &'static str>) -> Values<'a, u8, &'new str> { + v + } + fn drain<'new>( + d: Drain<'static, &'static str, &'static str>, + ) -> Drain<'new, &'new str, &'new str> { + d + } +} + +#[cfg(test)] +mod test_map { + use super::DefaultHashBuilder; + use super::Entry::{Occupied, Vacant}; + use super::{HashMap, RawEntryMut}; + use crate::TryReserveError::*; + use rand::{rngs::SmallRng, Rng, SeedableRng}; + use std::cell::RefCell; + use std::usize; + use std::vec::Vec; + + #[test] + fn test_zero_capacities() { + type HM = HashMap; + + let m = HM::new(); + assert_eq!(m.capacity(), 0); + + let m = HM::default(); + assert_eq!(m.capacity(), 0); + + let m = HM::with_hasher(DefaultHashBuilder::default()); + assert_eq!(m.capacity(), 0); + + let m = HM::with_capacity(0); + assert_eq!(m.capacity(), 0); + + let m = HM::with_capacity_and_hasher(0, DefaultHashBuilder::default()); + assert_eq!(m.capacity(), 0); + + let mut m = HM::new(); + m.insert(1, 1); + m.insert(2, 2); + m.remove(&1); + m.remove(&2); + m.shrink_to_fit(); + assert_eq!(m.capacity(), 0); + + let mut m = HM::new(); + m.reserve(0); + assert_eq!(m.capacity(), 0); + } + + #[test] + fn test_create_capacity_zero() { + let mut m = HashMap::with_capacity(0); + + assert!(m.insert(1, 1).is_none()); + + assert!(m.contains_key(&1)); + assert!(!m.contains_key(&0)); + } + + #[test] + fn test_insert() { + let mut m = HashMap::new(); + assert_eq!(m.len(), 0); + assert!(m.insert(1, 2).is_none()); + assert_eq!(m.len(), 1); + assert!(m.insert(2, 4).is_none()); + assert_eq!(m.len(), 2); + assert_eq!(*m.get(&1).unwrap(), 2); + assert_eq!(*m.get(&2).unwrap(), 4); + } + + #[test] + fn test_clone() { + let mut m = HashMap::new(); + assert_eq!(m.len(), 0); + assert!(m.insert(1, 2).is_none()); + assert_eq!(m.len(), 1); + assert!(m.insert(2, 4).is_none()); + assert_eq!(m.len(), 2); + let m2 = m.clone(); + assert_eq!(*m2.get(&1).unwrap(), 2); + assert_eq!(*m2.get(&2).unwrap(), 4); + assert_eq!(m2.len(), 2); + } + + #[test] + fn test_clone_from() { + let mut m = HashMap::new(); + let mut m2 = HashMap::new(); + assert_eq!(m.len(), 0); + assert!(m.insert(1, 2).is_none()); + assert_eq!(m.len(), 1); + assert!(m.insert(2, 4).is_none()); + assert_eq!(m.len(), 2); + m2.clone_from(&m); + assert_eq!(*m2.get(&1).unwrap(), 2); + assert_eq!(*m2.get(&2).unwrap(), 4); + assert_eq!(m2.len(), 2); + } + + thread_local! { static DROP_VECTOR: RefCell> = RefCell::new(Vec::new()) } + + #[derive(Hash, PartialEq, Eq)] + struct Droppable { + k: usize, + } + + impl Droppable { + fn new(k: usize) -> Droppable { + DROP_VECTOR.with(|slot| { + slot.borrow_mut()[k] += 1; + }); + + Droppable { k } + } + } + + impl Drop for Droppable { + fn drop(&mut self) { + DROP_VECTOR.with(|slot| { + slot.borrow_mut()[self.k] -= 1; + }); + } + } + + impl Clone for Droppable { + fn clone(&self) -> Self { + Droppable::new(self.k) + } + } + + #[test] + fn test_drops() { + DROP_VECTOR.with(|slot| { + *slot.borrow_mut() = vec![0; 200]; + }); + + { + let mut m = HashMap::new(); + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 0); + } + }); + + for i in 0..100 { + let d1 = Droppable::new(i); + let d2 = Droppable::new(i + 100); + m.insert(d1, d2); + } + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 1); + } + }); + + for i in 0..50 { + let k = Droppable::new(i); + let v = m.remove(&k); + + assert!(v.is_some()); + + DROP_VECTOR.with(|v| { + assert_eq!(v.borrow()[i], 1); + assert_eq!(v.borrow()[i + 100], 1); + }); + } + + DROP_VECTOR.with(|v| { + for i in 0..50 { + assert_eq!(v.borrow()[i], 0); + assert_eq!(v.borrow()[i + 100], 0); + } + + for i in 50..100 { + assert_eq!(v.borrow()[i], 1); + assert_eq!(v.borrow()[i + 100], 1); + } + }); + } + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 0); + } + }); + } + + #[test] + fn test_into_iter_drops() { + DROP_VECTOR.with(|v| { + *v.borrow_mut() = vec![0; 200]; + }); + + let hm = { + let mut hm = HashMap::new(); + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 0); + } + }); + + for i in 0..100 { + let d1 = Droppable::new(i); + let d2 = Droppable::new(i + 100); + hm.insert(d1, d2); + } + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 1); + } + }); + + hm + }; + + // By the way, ensure that cloning doesn't screw up the dropping. + drop(hm.clone()); + + { + let mut half = hm.into_iter().take(50); + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 1); + } + }); + + for _ in half.by_ref() {} + + DROP_VECTOR.with(|v| { + let nk = (0..100).filter(|&i| v.borrow()[i] == 1).count(); + + let nv = (0..100).filter(|&i| v.borrow()[i + 100] == 1).count(); + + assert_eq!(nk, 50); + assert_eq!(nv, 50); + }); + }; + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 0); + } + }); + } + + #[test] + fn test_empty_remove() { + let mut m: HashMap = HashMap::new(); + assert_eq!(m.remove(&0), None); + } + + #[test] + fn test_empty_entry() { + let mut m: HashMap = HashMap::new(); + match m.entry(0) { + Occupied(_) => panic!(), + Vacant(_) => {} + } + assert!(*m.entry(0).or_insert(true)); + assert_eq!(m.len(), 1); + } + + #[test] + fn test_empty_iter() { + let mut m: HashMap = HashMap::new(); + assert_eq!(m.drain().next(), None); + assert_eq!(m.keys().next(), None); + assert_eq!(m.values().next(), None); + assert_eq!(m.values_mut().next(), None); + assert_eq!(m.iter().next(), None); + assert_eq!(m.iter_mut().next(), None); + assert_eq!(m.len(), 0); + assert!(m.is_empty()); + assert_eq!(m.into_iter().next(), None); + } + + #[test] + #[cfg_attr(miri, ignore)] // FIXME: takes too long + fn test_lots_of_insertions() { + let mut m = HashMap::new(); + + // Try this a few times to make sure we never screw up the hashmap's + // internal state. + for _ in 0..10 { + assert!(m.is_empty()); + + for i in 1..1001 { + assert!(m.insert(i, i).is_none()); + + for j in 1..=i { + let r = m.get(&j); + assert_eq!(r, Some(&j)); + } + + for j in i + 1..1001 { + let r = m.get(&j); + assert_eq!(r, None); + } + } + + for i in 1001..2001 { + assert!(!m.contains_key(&i)); + } + + // remove forwards + for i in 1..1001 { + assert!(m.remove(&i).is_some()); + + for j in 1..=i { + assert!(!m.contains_key(&j)); + } + + for j in i + 1..1001 { + assert!(m.contains_key(&j)); + } + } + + for i in 1..1001 { + assert!(!m.contains_key(&i)); + } + + for i in 1..1001 { + assert!(m.insert(i, i).is_none()); + } + + // remove backwards + for i in (1..1001).rev() { + assert!(m.remove(&i).is_some()); + + for j in i..1001 { + assert!(!m.contains_key(&j)); + } + + for j in 1..i { + assert!(m.contains_key(&j)); + } + } + } + } + + #[test] + fn test_find_mut() { + let mut m = HashMap::new(); + assert!(m.insert(1, 12).is_none()); + assert!(m.insert(2, 8).is_none()); + assert!(m.insert(5, 14).is_none()); + let new = 100; + match m.get_mut(&5) { + None => panic!(), + Some(x) => *x = new, + } + assert_eq!(m.get(&5), Some(&new)); + } + + #[test] + fn test_insert_overwrite() { + let mut m = HashMap::new(); + assert!(m.insert(1, 2).is_none()); + assert_eq!(*m.get(&1).unwrap(), 2); + assert!(!m.insert(1, 3).is_none()); + assert_eq!(*m.get(&1).unwrap(), 3); + } + + #[test] + fn test_insert_conflicts() { + let mut m = HashMap::with_capacity(4); + assert!(m.insert(1, 2).is_none()); + assert!(m.insert(5, 3).is_none()); + assert!(m.insert(9, 4).is_none()); + assert_eq!(*m.get(&9).unwrap(), 4); + assert_eq!(*m.get(&5).unwrap(), 3); + assert_eq!(*m.get(&1).unwrap(), 2); + } + + #[test] + fn test_conflict_remove() { + let mut m = HashMap::with_capacity(4); + assert!(m.insert(1, 2).is_none()); + assert_eq!(*m.get(&1).unwrap(), 2); + assert!(m.insert(5, 3).is_none()); + assert_eq!(*m.get(&1).unwrap(), 2); + assert_eq!(*m.get(&5).unwrap(), 3); + assert!(m.insert(9, 4).is_none()); + assert_eq!(*m.get(&1).unwrap(), 2); + assert_eq!(*m.get(&5).unwrap(), 3); + assert_eq!(*m.get(&9).unwrap(), 4); + assert!(m.remove(&1).is_some()); + assert_eq!(*m.get(&9).unwrap(), 4); + assert_eq!(*m.get(&5).unwrap(), 3); + } + + #[test] + fn test_is_empty() { + let mut m = HashMap::with_capacity(4); + assert!(m.insert(1, 2).is_none()); + assert!(!m.is_empty()); + assert!(m.remove(&1).is_some()); + assert!(m.is_empty()); + } + + #[test] + fn test_remove() { + let mut m = HashMap::new(); + m.insert(1, 2); + assert_eq!(m.remove(&1), Some(2)); + assert_eq!(m.remove(&1), None); + } + + #[test] + fn test_remove_entry() { + let mut m = HashMap::new(); + m.insert(1, 2); + assert_eq!(m.remove_entry(&1), Some((1, 2))); + assert_eq!(m.remove(&1), None); + } + + #[test] + fn test_iterate() { + let mut m = HashMap::with_capacity(4); + for i in 0..32 { + assert!(m.insert(i, i * 2).is_none()); + } + assert_eq!(m.len(), 32); + + let mut observed: u32 = 0; + + for (k, v) in &m { + assert_eq!(*v, *k * 2); + observed |= 1 << *k; + } + assert_eq!(observed, 0xFFFF_FFFF); + } + + #[test] + fn test_keys() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: HashMap<_, _> = vec.into_iter().collect(); + let keys: Vec<_> = map.keys().cloned().collect(); + assert_eq!(keys.len(), 3); + assert!(keys.contains(&1)); + assert!(keys.contains(&2)); + assert!(keys.contains(&3)); + } + + #[test] + fn test_values() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: HashMap<_, _> = vec.into_iter().collect(); + let values: Vec<_> = map.values().cloned().collect(); + assert_eq!(values.len(), 3); + assert!(values.contains(&'a')); + assert!(values.contains(&'b')); + assert!(values.contains(&'c')); + } + + #[test] + fn test_values_mut() { + let vec = vec![(1, 1), (2, 2), (3, 3)]; + let mut map: HashMap<_, _> = vec.into_iter().collect(); + for value in map.values_mut() { + *value = (*value) * 2 + } + let values: Vec<_> = map.values().cloned().collect(); + assert_eq!(values.len(), 3); + assert!(values.contains(&2)); + assert!(values.contains(&4)); + assert!(values.contains(&6)); + } + + #[test] + fn test_find() { + let mut m = HashMap::new(); + assert!(m.get(&1).is_none()); + m.insert(1, 2); + match m.get(&1) { + None => panic!(), + Some(v) => assert_eq!(*v, 2), + } + } + + #[test] + fn test_eq() { + let mut m1 = HashMap::new(); + m1.insert(1, 2); + m1.insert(2, 3); + m1.insert(3, 4); + + let mut m2 = HashMap::new(); + m2.insert(1, 2); + m2.insert(2, 3); + + assert!(m1 != m2); + + m2.insert(3, 4); + + assert_eq!(m1, m2); + } + + #[test] + fn test_show() { + let mut map = HashMap::new(); + let empty: HashMap = HashMap::new(); + + map.insert(1, 2); + map.insert(3, 4); + + let map_str = format!("{:?}", map); + + assert!(map_str == "{1: 2, 3: 4}" || map_str == "{3: 4, 1: 2}"); + assert_eq!(format!("{:?}", empty), "{}"); + } + + #[test] + fn test_expand() { + let mut m = HashMap::new(); + + assert_eq!(m.len(), 0); + assert!(m.is_empty()); + + let mut i = 0; + let old_raw_cap = m.raw_capacity(); + while old_raw_cap == m.raw_capacity() { + m.insert(i, i); + i += 1; + } + + assert_eq!(m.len(), i); + assert!(!m.is_empty()); + } + + #[test] + fn test_behavior_resize_policy() { + let mut m = HashMap::new(); + + assert_eq!(m.len(), 0); + assert_eq!(m.raw_capacity(), 1); + assert!(m.is_empty()); + + m.insert(0, 0); + m.remove(&0); + assert!(m.is_empty()); + let initial_raw_cap = m.raw_capacity(); + m.reserve(initial_raw_cap); + let raw_cap = m.raw_capacity(); + + assert_eq!(raw_cap, initial_raw_cap * 2); + + let mut i = 0; + for _ in 0..raw_cap * 3 / 4 { + m.insert(i, i); + i += 1; + } + // three quarters full + + assert_eq!(m.len(), i); + assert_eq!(m.raw_capacity(), raw_cap); + + for _ in 0..raw_cap / 4 { + m.insert(i, i); + i += 1; + } + // half full + + let new_raw_cap = m.raw_capacity(); + assert_eq!(new_raw_cap, raw_cap * 2); + + for _ in 0..raw_cap / 2 - 1 { + i -= 1; + m.remove(&i); + assert_eq!(m.raw_capacity(), new_raw_cap); + } + // A little more than one quarter full. + m.shrink_to_fit(); + assert_eq!(m.raw_capacity(), raw_cap); + // again, a little more than half full + for _ in 0..raw_cap / 2 { + i -= 1; + m.remove(&i); + } + m.shrink_to_fit(); + + assert_eq!(m.len(), i); + assert!(!m.is_empty()); + assert_eq!(m.raw_capacity(), initial_raw_cap); + } + + #[test] + fn test_reserve_shrink_to_fit() { + let mut m = HashMap::new(); + m.insert(0, 0); + m.remove(&0); + assert!(m.capacity() >= m.len()); + for i in 0..128 { + m.insert(i, i); + } + m.reserve(256); + + let usable_cap = m.capacity(); + for i in 128..(128 + 256) { + m.insert(i, i); + assert_eq!(m.capacity(), usable_cap); + } + + for i in 100..(128 + 256) { + assert_eq!(m.remove(&i), Some(i)); + } + m.shrink_to_fit(); + + assert_eq!(m.len(), 100); + assert!(!m.is_empty()); + assert!(m.capacity() >= m.len()); + + for i in 0..100 { + assert_eq!(m.remove(&i), Some(i)); + } + m.shrink_to_fit(); + m.insert(0, 0); + + assert_eq!(m.len(), 1); + assert!(m.capacity() >= m.len()); + assert_eq!(m.remove(&0), Some(0)); + } + + #[test] + fn test_from_iter() { + let xs = [(1, 1), (2, 2), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; + + let map: HashMap<_, _> = xs.iter().cloned().collect(); + + for &(k, v) in &xs { + assert_eq!(map.get(&k), Some(&v)); + } + + assert_eq!(map.iter().len(), xs.len() - 1); + } + + #[test] + fn test_size_hint() { + let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; + + let map: HashMap<_, _> = xs.iter().cloned().collect(); + + let mut iter = map.iter(); + + for _ in iter.by_ref().take(3) {} + + assert_eq!(iter.size_hint(), (3, Some(3))); + } + + #[test] + fn test_iter_len() { + let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; + + let map: HashMap<_, _> = xs.iter().cloned().collect(); + + let mut iter = map.iter(); + + for _ in iter.by_ref().take(3) {} + + assert_eq!(iter.len(), 3); + } + + #[test] + fn test_mut_size_hint() { + let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; + + let mut map: HashMap<_, _> = xs.iter().cloned().collect(); + + let mut iter = map.iter_mut(); + + for _ in iter.by_ref().take(3) {} + + assert_eq!(iter.size_hint(), (3, Some(3))); + } + + #[test] + fn test_iter_mut_len() { + let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; + + let mut map: HashMap<_, _> = xs.iter().cloned().collect(); + + let mut iter = map.iter_mut(); + + for _ in iter.by_ref().take(3) {} + + assert_eq!(iter.len(), 3); + } + + #[test] + fn test_index() { + let mut map = HashMap::new(); + + map.insert(1, 2); + map.insert(2, 1); + map.insert(3, 4); + + assert_eq!(map[&2], 1); + } + + #[test] + #[should_panic] + fn test_index_nonexistent() { + let mut map = HashMap::new(); + + map.insert(1, 2); + map.insert(2, 1); + map.insert(3, 4); + + map[&4]; + } + + #[test] + fn test_entry() { + let xs = [(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)]; + + let mut map: HashMap<_, _> = xs.iter().cloned().collect(); + + // Existing key (insert) + match map.entry(1) { + Vacant(_) => unreachable!(), + Occupied(mut view) => { + assert_eq!(view.get(), &10); + assert_eq!(view.insert(100), 10); + } + } + assert_eq!(map.get(&1).unwrap(), &100); + assert_eq!(map.len(), 6); + + // Existing key (update) + match map.entry(2) { + Vacant(_) => unreachable!(), + Occupied(mut view) => { + let v = view.get_mut(); + let new_v = (*v) * 10; + *v = new_v; + } + } + assert_eq!(map.get(&2).unwrap(), &200); + assert_eq!(map.len(), 6); + + // Existing key (take) + match map.entry(3) { + Vacant(_) => unreachable!(), + Occupied(view) => { + assert_eq!(view.remove(), 30); + } + } + assert_eq!(map.get(&3), None); + assert_eq!(map.len(), 5); + + // Inexistent key (insert) + match map.entry(10) { + Occupied(_) => unreachable!(), + Vacant(view) => { + assert_eq!(*view.insert(1000), 1000); + } + } + assert_eq!(map.get(&10).unwrap(), &1000); + assert_eq!(map.len(), 6); + } + + #[test] + fn test_entry_take_doesnt_corrupt() { + #![allow(deprecated)] //rand + // Test for #19292 + fn check(m: &HashMap) { + for k in m.keys() { + assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); + } + } + + let mut m = HashMap::new(); + + let mut rng = { + let seed = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + SmallRng::from_seed(seed) + }; + + // Populate the map with some items. + for _ in 0..50 { + let x = rng.gen_range(-10, 10); + m.insert(x, ()); + } + + for _ in 0..1000 { + let x = rng.gen_range(-10, 10); + match m.entry(x) { + Vacant(_) => {} + Occupied(e) => { + e.remove(); + } + } + + check(&m); + } + } + + #[test] + fn test_extend_ref() { + let mut a = HashMap::new(); + a.insert(1, "one"); + let mut b = HashMap::new(); + b.insert(2, "two"); + b.insert(3, "three"); + + a.extend(&b); + + assert_eq!(a.len(), 3); + assert_eq!(a[&1], "one"); + assert_eq!(a[&2], "two"); + assert_eq!(a[&3], "three"); + } + + #[test] + fn test_capacity_not_less_than_len() { + let mut a = HashMap::new(); + let mut item = 0; + + for _ in 0..116 { + a.insert(item, 0); + item += 1; + } + + assert!(a.capacity() > a.len()); + + let free = a.capacity() - a.len(); + for _ in 0..free { + a.insert(item, 0); + item += 1; + } + + assert_eq!(a.len(), a.capacity()); + + // Insert at capacity should cause allocation. + a.insert(item, 0); + assert!(a.capacity() > a.len()); + } + + #[test] + fn test_occupied_entry_key() { + let mut a = HashMap::new(); + let key = "hello there"; + let value = "value goes here"; + assert!(a.is_empty()); + a.insert(key.clone(), value.clone()); + assert_eq!(a.len(), 1); + assert_eq!(a[key], value); + + match a.entry(key.clone()) { + Vacant(_) => panic!(), + Occupied(e) => assert_eq!(key, *e.key()), + } + assert_eq!(a.len(), 1); + assert_eq!(a[key], value); + } + + #[test] + fn test_vacant_entry_key() { + let mut a = HashMap::new(); + let key = "hello there"; + let value = "value goes here"; + + assert!(a.is_empty()); + match a.entry(key.clone()) { + Occupied(_) => panic!(), + Vacant(e) => { + assert_eq!(key, *e.key()); + e.insert(value.clone()); + } + } + assert_eq!(a.len(), 1); + assert_eq!(a[key], value); + } + + #[test] + fn test_occupied_entry_replace_entry_with() { + let mut a = HashMap::new(); + + let key = "a key"; + let value = "an initial value"; + let new_value = "a new value"; + + let entry = a.entry(key).insert(value).replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, value); + Some(new_value) + }); + + match entry { + Occupied(e) => { + assert_eq!(e.key(), &key); + assert_eq!(e.get(), &new_value); + } + Vacant(_) => panic!(), + } + + assert_eq!(a[key], new_value); + assert_eq!(a.len(), 1); + + let entry = match a.entry(key) { + Occupied(e) => e.replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, new_value); + None + }), + Vacant(_) => panic!(), + }; + + match entry { + Vacant(e) => assert_eq!(e.key(), &key), + Occupied(_) => panic!(), + } + + assert!(!a.contains_key(key)); + assert_eq!(a.len(), 0); + } + + #[test] + fn test_entry_and_replace_entry_with() { + let mut a = HashMap::new(); + + let key = "a key"; + let value = "an initial value"; + let new_value = "a new value"; + + let entry = a.entry(key).and_replace_entry_with(|_, _| panic!()); + + match entry { + Vacant(e) => assert_eq!(e.key(), &key), + Occupied(_) => panic!(), + } + + a.insert(key, value); + + let entry = a.entry(key).and_replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, value); + Some(new_value) + }); + + match entry { + Occupied(e) => { + assert_eq!(e.key(), &key); + assert_eq!(e.get(), &new_value); + } + Vacant(_) => panic!(), + } + + assert_eq!(a[key], new_value); + assert_eq!(a.len(), 1); + + let entry = a.entry(key).and_replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, new_value); + None + }); + + match entry { + Vacant(e) => assert_eq!(e.key(), &key), + Occupied(_) => panic!(), + } + + assert!(!a.contains_key(key)); + assert_eq!(a.len(), 0); + } + + #[test] + fn test_raw_occupied_entry_replace_entry_with() { + let mut a = HashMap::new(); + + let key = "a key"; + let value = "an initial value"; + let new_value = "a new value"; + + let entry = a + .raw_entry_mut() + .from_key(&key) + .insert(key, value) + .replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, value); + Some(new_value) + }); + + match entry { + RawEntryMut::Occupied(e) => { + assert_eq!(e.key(), &key); + assert_eq!(e.get(), &new_value); + } + RawEntryMut::Vacant(_) => panic!(), + } + + assert_eq!(a[key], new_value); + assert_eq!(a.len(), 1); + + let entry = match a.raw_entry_mut().from_key(&key) { + RawEntryMut::Occupied(e) => e.replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, new_value); + None + }), + RawEntryMut::Vacant(_) => panic!(), + }; + + match entry { + RawEntryMut::Vacant(_) => {} + RawEntryMut::Occupied(_) => panic!(), + } + + assert!(!a.contains_key(key)); + assert_eq!(a.len(), 0); + } + + #[test] + fn test_raw_entry_and_replace_entry_with() { + let mut a = HashMap::new(); + + let key = "a key"; + let value = "an initial value"; + let new_value = "a new value"; + + let entry = a + .raw_entry_mut() + .from_key(&key) + .and_replace_entry_with(|_, _| panic!()); + + match entry { + RawEntryMut::Vacant(_) => {} + RawEntryMut::Occupied(_) => panic!(), + } + + a.insert(key, value); + + let entry = a + .raw_entry_mut() + .from_key(&key) + .and_replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, value); + Some(new_value) + }); + + match entry { + RawEntryMut::Occupied(e) => { + assert_eq!(e.key(), &key); + assert_eq!(e.get(), &new_value); + } + RawEntryMut::Vacant(_) => panic!(), + } + + assert_eq!(a[key], new_value); + assert_eq!(a.len(), 1); + + let entry = a + .raw_entry_mut() + .from_key(&key) + .and_replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, new_value); + None + }); + + match entry { + RawEntryMut::Vacant(_) => {} + RawEntryMut::Occupied(_) => panic!(), + } + + assert!(!a.contains_key(key)); + assert_eq!(a.len(), 0); + } + + #[test] + fn test_replace_entry_with_doesnt_corrupt() { + #![allow(deprecated)] //rand + // Test for #19292 + fn check(m: &HashMap) { + for k in m.keys() { + assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); + } + } + + let mut m = HashMap::new(); + + let mut rng = { + let seed = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + SmallRng::from_seed(seed) + }; + + // Populate the map with some items. + for _ in 0..50 { + let x = rng.gen_range(-10, 10); + m.insert(x, ()); + } + + for _ in 0..1000 { + let x = rng.gen_range(-10, 10); + m.entry(x).and_replace_entry_with(|_, _| None); + check(&m); + } + } + + #[test] + fn test_retain() { + let mut map: HashMap = (0..100).map(|x| (x, x * 10)).collect(); + + map.retain(|&k, _| k % 2 == 0); + assert_eq!(map.len(), 50); + assert_eq!(map[&2], 20); + assert_eq!(map[&4], 40); + assert_eq!(map[&6], 60); + } + + #[test] + fn test_drain_filter() { + { + let mut map: HashMap = (0..8).map(|x| (x, x * 10)).collect(); + let drained = map.drain_filter(|&k, _| k % 2 == 0); + let mut out = drained.collect::>(); + out.sort_unstable(); + assert_eq!(vec![(0, 0), (2, 20), (4, 40), (6, 60)], out); + assert_eq!(map.len(), 4); + } + { + let mut map: HashMap = (0..8).map(|x| (x, x * 10)).collect(); + drop(map.drain_filter(|&k, _| k % 2 == 0)); + assert_eq!(map.len(), 4); + } + } + + #[test] + #[cfg_attr(miri, ignore)] // FIXME: no OOM signalling (https://github.com/rust-lang/miri/issues/613) + fn test_try_reserve() { + let mut empty_bytes: HashMap = HashMap::new(); + + const MAX_USIZE: usize = usize::MAX; + + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) { + } else { + panic!("usize::MAX should trigger an overflow!"); + } + + if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_USIZE / 8) { + } else { + // This may succeed if there is enough free memory. Attempt to + // allocate a second hashmap to ensure the allocation will fail. + let mut empty_bytes2: HashMap = HashMap::new(); + if let Err(AllocError { .. }) = empty_bytes2.try_reserve(MAX_USIZE / 8) { + } else { + panic!("usize::MAX / 8 should trigger an OOM!"); + } + } + } + + #[test] + fn test_raw_entry() { + use super::RawEntryMut::{Occupied, Vacant}; + + let xs = [(1i32, 10i32), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)]; + + let mut map: HashMap<_, _> = xs.iter().cloned().collect(); + + let compute_hash = |map: &HashMap, k: i32| -> u64 { + use core::hash::{BuildHasher, Hash, Hasher}; + + let mut hasher = map.hasher().build_hasher(); + k.hash(&mut hasher); + hasher.finish() + }; + + // Existing key (insert) + match map.raw_entry_mut().from_key(&1) { + Vacant(_) => unreachable!(), + Occupied(mut view) => { + assert_eq!(view.get(), &10); + assert_eq!(view.insert(100), 10); + } + } + let hash1 = compute_hash(&map, 1); + assert_eq!(map.raw_entry().from_key(&1).unwrap(), (&1, &100)); + assert_eq!( + map.raw_entry().from_hash(hash1, |k| *k == 1).unwrap(), + (&1, &100) + ); + assert_eq!( + map.raw_entry().from_key_hashed_nocheck(hash1, &1).unwrap(), + (&1, &100) + ); + assert_eq!(map.len(), 6); + + // Existing key (update) + match map.raw_entry_mut().from_key(&2) { + Vacant(_) => unreachable!(), + Occupied(mut view) => { + let v = view.get_mut(); + let new_v = (*v) * 10; + *v = new_v; + } + } + let hash2 = compute_hash(&map, 2); + assert_eq!(map.raw_entry().from_key(&2).unwrap(), (&2, &200)); + assert_eq!( + map.raw_entry().from_hash(hash2, |k| *k == 2).unwrap(), + (&2, &200) + ); + assert_eq!( + map.raw_entry().from_key_hashed_nocheck(hash2, &2).unwrap(), + (&2, &200) + ); + assert_eq!(map.len(), 6); + + // Existing key (take) + let hash3 = compute_hash(&map, 3); + match map.raw_entry_mut().from_key_hashed_nocheck(hash3, &3) { + Vacant(_) => unreachable!(), + Occupied(view) => { + assert_eq!(view.remove_entry(), (3, 30)); + } + } + assert_eq!(map.raw_entry().from_key(&3), None); + assert_eq!(map.raw_entry().from_hash(hash3, |k| *k == 3), None); + assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash3, &3), None); + assert_eq!(map.len(), 5); + + // Nonexistent key (insert) + match map.raw_entry_mut().from_key(&10) { + Occupied(_) => unreachable!(), + Vacant(view) => { + assert_eq!(view.insert(10, 1000), (&mut 10, &mut 1000)); + } + } + assert_eq!(map.raw_entry().from_key(&10).unwrap(), (&10, &1000)); + assert_eq!(map.len(), 6); + + // Ensure all lookup methods produce equivalent results. + for k in 0..12 { + let hash = compute_hash(&map, k); + let v = map.get(&k).cloned(); + let kv = v.as_ref().map(|v| (&k, v)); + + assert_eq!(map.raw_entry().from_key(&k), kv); + assert_eq!(map.raw_entry().from_hash(hash, |q| *q == k), kv); + assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &k), kv); + + match map.raw_entry_mut().from_key(&k) { + Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv), + Vacant(_) => assert_eq!(v, None), + } + match map.raw_entry_mut().from_key_hashed_nocheck(hash, &k) { + Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv), + Vacant(_) => assert_eq!(v, None), + } + match map.raw_entry_mut().from_hash(hash, |q| *q == k) { + Occupied(mut o) => assert_eq!(Some(o.get_key_value()), kv), + Vacant(_) => assert_eq!(v, None), + } + } + } + + #[test] + fn test_key_without_hash_impl() { + #[derive(Debug)] + struct IntWrapper(u64); + + let mut m: HashMap = HashMap::default(); + { + assert!(m.raw_entry().from_hash(0, |k| k.0 == 0).is_none()); + } + { + let vacant_entry = match m.raw_entry_mut().from_hash(0, |k| k.0 == 0) { + RawEntryMut::Occupied(..) => panic!("Found entry for key 0"), + RawEntryMut::Vacant(e) => e, + }; + vacant_entry.insert_with_hasher(0, IntWrapper(0), (), |k| k.0); + } + { + assert!(m.raw_entry().from_hash(0, |k| k.0 == 0).is_some()); + assert!(m.raw_entry().from_hash(1, |k| k.0 == 1).is_none()); + assert!(m.raw_entry().from_hash(2, |k| k.0 == 2).is_none()); + } + { + let vacant_entry = match m.raw_entry_mut().from_hash(1, |k| k.0 == 1) { + RawEntryMut::Occupied(..) => panic!("Found entry for key 1"), + RawEntryMut::Vacant(e) => e, + }; + vacant_entry.insert_with_hasher(1, IntWrapper(1), (), |k| k.0); + } + { + assert!(m.raw_entry().from_hash(0, |k| k.0 == 0).is_some()); + assert!(m.raw_entry().from_hash(1, |k| k.0 == 1).is_some()); + assert!(m.raw_entry().from_hash(2, |k| k.0 == 2).is_none()); + } + { + let occupied_entry = match m.raw_entry_mut().from_hash(0, |k| k.0 == 0) { + RawEntryMut::Occupied(e) => e, + RawEntryMut::Vacant(..) => panic!("Couldn't find entry for key 0"), + }; + occupied_entry.remove(); + } + assert!(m.raw_entry().from_hash(0, |k| k.0 == 0).is_none()); + assert!(m.raw_entry().from_hash(1, |k| k.0 == 1).is_some()); + assert!(m.raw_entry().from_hash(2, |k| k.0 == 2).is_none()); + } + + #[test] + #[cfg(feature = "raw")] + fn test_into_iter_refresh() { + use core::hash::{BuildHasher, Hash, Hasher}; + + #[cfg(miri)] + const N: usize = 32; + #[cfg(not(miri))] + const N: usize = 128; + + let mut rng = rand::thread_rng(); + for n in 0..N { + let mut m = HashMap::new(); + for i in 0..n { + assert!(m.insert(i, 2 * i).is_none()); + } + let hasher = m.hasher().clone(); + + let mut it = unsafe { m.table.iter() }; + assert_eq!(it.len(), n); + + let mut i = 0; + let mut left = n; + let mut removed = Vec::new(); + loop { + // occasionally remove some elements + if i < n && rng.gen_bool(0.1) { + let mut hsh = hasher.build_hasher(); + i.hash(&mut hsh); + let hash = hsh.finish(); + + unsafe { + let e = m.table.find(hash, |q| q.0.eq(&i)); + if let Some(e) = e { + it.reflect_remove(&e); + let t = m.table.remove(e); + removed.push(t); + left -= 1; + } else { + assert!(removed.contains(&(i, 2 * i)), "{} not in {:?}", i, removed); + let e = m + .table + .insert(hash, (i, 2 * i), |x| super::make_hash(&hasher, &x.0)); + it.reflect_insert(&e); + if let Some(p) = removed.iter().position(|e| e == &(i, 2 * i)) { + removed.swap_remove(p); + } + left += 1; + } + } + } + + let e = it.next(); + if e.is_none() { + break; + } + assert!(i < n); + let t = unsafe { e.unwrap().as_ref() }; + assert!(!removed.contains(t)); + let (k, v) = t; + assert_eq!(*v, 2 * k); + i += 1; + } + assert!(i <= n); + + // just for safety: + assert_eq!(m.table.len(), left); + } + } + + #[test] + fn test_const_with_hasher() { + use core::hash::BuildHasher; + use std::borrow::ToOwned; + use std::collections::hash_map::DefaultHasher; + + #[derive(Clone)] + struct MyHasher; + impl BuildHasher for MyHasher { + type Hasher = DefaultHasher; + + fn build_hasher(&self) -> DefaultHasher { + DefaultHasher::new() + } + } + + const EMPTY_MAP: HashMap = + HashMap::with_hasher(MyHasher); + + let mut map = EMPTY_MAP.clone(); + map.insert(17, "seventeen".to_owned()); + assert_eq!("seventeen", map[&17]); + } +} diff --git a/src/rust/vendor/hashbrown-0.9.1/src/raw/bitmask.rs b/src/rust/vendor/hashbrown-0.9.1/src/raw/bitmask.rs new file mode 100644 index 000000000..99b2d5341 --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/src/raw/bitmask.rs @@ -0,0 +1,122 @@ +use super::imp::{BitMaskWord, BITMASK_MASK, BITMASK_STRIDE}; +#[cfg(feature = "nightly")] +use core::intrinsics; + +/// A bit mask which contains the result of a `Match` operation on a `Group` and +/// allows iterating through them. +/// +/// The bit mask is arranged so that low-order bits represent lower memory +/// addresses for group match results. +/// +/// For implementation reasons, the bits in the set may be sparsely packed, so +/// that there is only one bit-per-byte used (the high bit, 7). If this is the +/// case, `BITMASK_STRIDE` will be 8 to indicate a divide-by-8 should be +/// performed on counts/indices to normalize this difference. `BITMASK_MASK` is +/// similarly a mask of all the actually-used bits. +#[derive(Copy, Clone)] +pub struct BitMask(pub BitMaskWord); + +#[allow(clippy::use_self)] +impl BitMask { + /// Returns a new `BitMask` with all bits inverted. + #[inline] + #[must_use] + pub fn invert(self) -> Self { + BitMask(self.0 ^ BITMASK_MASK) + } + + /// Flip the bit in the mask for the entry at the given index. + /// + /// Returns the bit's previous state. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + #[cfg(feature = "raw")] + pub unsafe fn flip(&mut self, index: usize) -> bool { + // NOTE: The + BITMASK_STRIDE - 1 is to set the high bit. + let mask = 1 << (index * BITMASK_STRIDE + BITMASK_STRIDE - 1); + self.0 ^= mask; + // The bit was set if the bit is now 0. + self.0 & mask == 0 + } + + /// Returns a new `BitMask` with the lowest bit removed. + #[inline] + #[must_use] + pub fn remove_lowest_bit(self) -> Self { + BitMask(self.0 & (self.0 - 1)) + } + /// Returns whether the `BitMask` has at least one set bit. + #[inline] + pub fn any_bit_set(self) -> bool { + self.0 != 0 + } + + /// Returns the first set bit in the `BitMask`, if there is one. + #[inline] + pub fn lowest_set_bit(self) -> Option { + if self.0 == 0 { + None + } else { + Some(unsafe { self.lowest_set_bit_nonzero() }) + } + } + + /// Returns the first set bit in the `BitMask`, if there is one. The + /// bitmask must not be empty. + #[inline] + #[cfg(feature = "nightly")] + pub unsafe fn lowest_set_bit_nonzero(self) -> usize { + intrinsics::cttz_nonzero(self.0) as usize / BITMASK_STRIDE + } + #[inline] + #[cfg(not(feature = "nightly"))] + pub unsafe fn lowest_set_bit_nonzero(self) -> usize { + self.trailing_zeros() + } + + /// Returns the number of trailing zeroes in the `BitMask`. + #[inline] + pub fn trailing_zeros(self) -> usize { + // ARM doesn't have a trailing_zeroes instruction, and instead uses + // reverse_bits (RBIT) + leading_zeroes (CLZ). However older ARM + // versions (pre-ARMv7) don't have RBIT and need to emulate it + // instead. Since we only have 1 bit set in each byte on ARM, we can + // use swap_bytes (REV) + leading_zeroes instead. + if cfg!(target_arch = "arm") && BITMASK_STRIDE % 8 == 0 { + self.0.swap_bytes().leading_zeros() as usize / BITMASK_STRIDE + } else { + self.0.trailing_zeros() as usize / BITMASK_STRIDE + } + } + + /// Returns the number of leading zeroes in the `BitMask`. + #[inline] + pub fn leading_zeros(self) -> usize { + self.0.leading_zeros() as usize / BITMASK_STRIDE + } +} + +impl IntoIterator for BitMask { + type Item = usize; + type IntoIter = BitMaskIter; + + #[inline] + fn into_iter(self) -> BitMaskIter { + BitMaskIter(self) + } +} + +/// Iterator over the contents of a `BitMask`, returning the indicies of set +/// bits. +pub struct BitMaskIter(BitMask); + +impl Iterator for BitMaskIter { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + let bit = self.0.lowest_set_bit()?; + self.0 = self.0.remove_lowest_bit(); + Some(bit) + } +} diff --git a/src/rust/vendor/hashbrown-0.9.1/src/raw/generic.rs b/src/rust/vendor/hashbrown-0.9.1/src/raw/generic.rs new file mode 100644 index 000000000..26f8c5896 --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/src/raw/generic.rs @@ -0,0 +1,151 @@ +use super::bitmask::BitMask; +use super::EMPTY; +use core::{mem, ptr}; + +// Use the native word size as the group size. Using a 64-bit group size on +// a 32-bit architecture will just end up being more expensive because +// shifts and multiplies will need to be emulated. +#[cfg(any( + target_pointer_width = "64", + target_arch = "aarch64", + target_arch = "x86_64", +))] +type GroupWord = u64; +#[cfg(all( + target_pointer_width = "32", + not(target_arch = "aarch64"), + not(target_arch = "x86_64"), +))] +type GroupWord = u32; + +pub type BitMaskWord = GroupWord; +pub const BITMASK_STRIDE: usize = 8; +// We only care about the highest bit of each byte for the mask. +#[allow(clippy::cast_possible_truncation, clippy::unnecessary_cast)] +pub const BITMASK_MASK: BitMaskWord = 0x8080_8080_8080_8080_u64 as GroupWord; + +/// Helper function to replicate a byte across a `GroupWord`. +#[inline] +fn repeat(byte: u8) -> GroupWord { + GroupWord::from_ne_bytes([byte; Group::WIDTH]) +} + +/// Abstraction over a group of control bytes which can be scanned in +/// parallel. +/// +/// This implementation uses a word-sized integer. +#[derive(Copy, Clone)] +pub struct Group(GroupWord); + +// We perform all operations in the native endianess, and convert to +// little-endian just before creating a BitMask. The can potentially +// enable the compiler to eliminate unnecessary byte swaps if we are +// only checking whether a BitMask is empty. +#[allow(clippy::use_self)] +impl Group { + /// Number of bytes in the group. + pub const WIDTH: usize = mem::size_of::(); + + /// Returns a full group of empty bytes, suitable for use as the initial + /// value for an empty hash table. + /// + /// This is guaranteed to be aligned to the group size. + pub const fn static_empty() -> &'static [u8; Group::WIDTH] { + #[repr(C)] + struct AlignedBytes { + _align: [Group; 0], + bytes: [u8; Group::WIDTH], + }; + const ALIGNED_BYTES: AlignedBytes = AlignedBytes { + _align: [], + bytes: [EMPTY; Group::WIDTH], + }; + &ALIGNED_BYTES.bytes + } + + /// Loads a group of bytes starting at the given address. + #[inline] + #[allow(clippy::cast_ptr_alignment)] // unaligned load + pub unsafe fn load(ptr: *const u8) -> Self { + Group(ptr::read_unaligned(ptr as *const _)) + } + + /// Loads a group of bytes starting at the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub unsafe fn load_aligned(ptr: *const u8) -> Self { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + Group(ptr::read(ptr as *const _)) + } + + /// Stores the group of bytes to the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub unsafe fn store_aligned(self, ptr: *mut u8) { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + ptr::write(ptr as *mut _, self.0); + } + + /// Returns a `BitMask` indicating all bytes in the group which *may* + /// have the given value. + /// + /// This function may return a false positive in certain cases where + /// the byte in the group differs from the searched value only in its + /// lowest bit. This is fine because: + /// - This never happens for `EMPTY` and `DELETED`, only full entries. + /// - The check for key equality will catch these. + /// - This only happens if there is at least 1 true match. + /// - The chance of this happening is very low (< 1% chance per byte). + #[inline] + pub fn match_byte(self, byte: u8) -> BitMask { + // This algorithm is derived from + // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord + let cmp = self.0 ^ repeat(byte); + BitMask((cmp.wrapping_sub(repeat(0x01)) & !cmp & repeat(0x80)).to_le()) + } + + /// Returns a `BitMask` indicating all bytes in the group which are + /// `EMPTY`. + #[inline] + pub fn match_empty(self) -> BitMask { + // If the high bit is set, then the byte must be either: + // 1111_1111 (EMPTY) or 1000_0000 (DELETED). + // So we can just check if the top two bits are 1 by ANDing them. + BitMask((self.0 & (self.0 << 1) & repeat(0x80)).to_le()) + } + + /// Returns a `BitMask` indicating all bytes in the group which are + /// `EMPTY` or `DELETED`. + #[inline] + pub fn match_empty_or_deleted(self) -> BitMask { + // A byte is EMPTY or DELETED iff the high bit is set + BitMask((self.0 & repeat(0x80)).to_le()) + } + + /// Returns a `BitMask` indicating all bytes in the group which are full. + #[inline] + pub fn match_full(self) -> BitMask { + self.match_empty_or_deleted().invert() + } + + /// Performs the following transformation on all bytes in the group: + /// - `EMPTY => EMPTY` + /// - `DELETED => EMPTY` + /// - `FULL => DELETED` + #[inline] + pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self { + // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111 + // and high_bit = 0 (FULL) to 1000_0000 + // + // Here's this logic expanded to concrete values: + // let full = 1000_0000 (true) or 0000_0000 (false) + // !1000_0000 + 1 = 0111_1111 + 1 = 1000_0000 (no carry) + // !0000_0000 + 0 = 1111_1111 + 0 = 1111_1111 (no carry) + let full = !self.0 & repeat(0x80); + Group(!full + (full >> 7)) + } +} diff --git a/src/rust/vendor/hashbrown-0.9.1/src/raw/mod.rs b/src/rust/vendor/hashbrown-0.9.1/src/raw/mod.rs new file mode 100644 index 000000000..32fec9847 --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/src/raw/mod.rs @@ -0,0 +1,1924 @@ +use crate::alloc::alloc::{alloc, dealloc, handle_alloc_error}; +use crate::scopeguard::guard; +use crate::TryReserveError; +use core::alloc::Layout; +use core::hint; +use core::iter::FusedIterator; +use core::marker::PhantomData; +use core::mem; +use core::mem::ManuallyDrop; +use core::ptr::NonNull; + +cfg_if! { + // Use the SSE2 implementation if possible: it allows us to scan 16 buckets + // at once instead of 8. We don't bother with AVX since it would require + // runtime dispatch and wouldn't gain us much anyways: the probability of + // finding a match drops off drastically after the first few buckets. + // + // I attempted an implementation on ARM using NEON instructions, but it + // turns out that most NEON instructions have multi-cycle latency, which in + // the end outweighs any gains over the generic implementation. + if #[cfg(all( + target_feature = "sse2", + any(target_arch = "x86", target_arch = "x86_64"), + not(miri) + ))] { + mod sse2; + use sse2 as imp; + } else { + #[path = "generic.rs"] + mod generic; + use generic as imp; + } +} + +mod bitmask; + +use self::bitmask::{BitMask, BitMaskIter}; +use self::imp::Group; + +// Branch prediction hint. This is currently only available on nightly but it +// consistently improves performance by 10-15%. +#[cfg(feature = "nightly")] +use core::intrinsics::{likely, unlikely}; +#[cfg(not(feature = "nightly"))] +#[inline] +fn likely(b: bool) -> bool { + b +} +#[cfg(not(feature = "nightly"))] +#[inline] +fn unlikely(b: bool) -> bool { + b +} + +#[cfg(feature = "nightly")] +#[cfg_attr(feature = "inline-more", inline)] +unsafe fn offset_from(to: *const T, from: *const T) -> usize { + to.offset_from(from) as usize +} +#[cfg(not(feature = "nightly"))] +#[cfg_attr(feature = "inline-more", inline)] +unsafe fn offset_from(to: *const T, from: *const T) -> usize { + (to as usize - from as usize) / mem::size_of::() +} + +/// Whether memory allocation errors should return an error or abort. +#[derive(Copy, Clone)] +enum Fallibility { + Fallible, + Infallible, +} + +impl Fallibility { + /// Error to return on capacity overflow. + #[cfg_attr(feature = "inline-more", inline)] + fn capacity_overflow(self) -> TryReserveError { + match self { + Fallibility::Fallible => TryReserveError::CapacityOverflow, + Fallibility::Infallible => panic!("Hash table capacity overflow"), + } + } + + /// Error to return on allocation error. + #[cfg_attr(feature = "inline-more", inline)] + fn alloc_err(self, layout: Layout) -> TryReserveError { + match self { + Fallibility::Fallible => TryReserveError::AllocError { layout }, + Fallibility::Infallible => handle_alloc_error(layout), + } + } +} + +/// Control byte value for an empty bucket. +const EMPTY: u8 = 0b1111_1111; + +/// Control byte value for a deleted bucket. +const DELETED: u8 = 0b1000_0000; + +/// Checks whether a control byte represents a full bucket (top bit is clear). +#[inline] +fn is_full(ctrl: u8) -> bool { + ctrl & 0x80 == 0 +} + +/// Checks whether a control byte represents a special value (top bit is set). +#[inline] +fn is_special(ctrl: u8) -> bool { + ctrl & 0x80 != 0 +} + +/// Checks whether a special control value is EMPTY (just check 1 bit). +#[inline] +fn special_is_empty(ctrl: u8) -> bool { + debug_assert!(is_special(ctrl)); + ctrl & 0x01 != 0 +} + +/// Primary hash function, used to select the initial bucket to probe from. +#[inline] +#[allow(clippy::cast_possible_truncation)] +fn h1(hash: u64) -> usize { + // On 32-bit platforms we simply ignore the higher hash bits. + hash as usize +} + +/// Secondary hash function, saved in the low 7 bits of the control byte. +#[inline] +#[allow(clippy::cast_possible_truncation)] +fn h2(hash: u64) -> u8 { + // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit + // value, some hash functions (such as FxHash) produce a usize result + // instead, which means that the top 32 bits are 0 on 32-bit platforms. + let hash_len = usize::min(mem::size_of::(), mem::size_of::()); + let top7 = hash >> (hash_len * 8 - 7); + (top7 & 0x7f) as u8 // truncation +} + +/// Probe sequence based on triangular numbers, which is guaranteed (since our +/// table size is a power of two) to visit every group of elements exactly once. +/// +/// A triangular probe has us jump by 1 more group every time. So first we +/// jump by 1 group (meaning we just continue our linear scan), then 2 groups +/// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on. +/// +/// Proof that the probe will visit every group in the table: +/// +struct ProbeSeq { + bucket_mask: usize, + pos: usize, + stride: usize, +} + +impl Iterator for ProbeSeq { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + // We should have found an empty bucket by now and ended the probe. + debug_assert!( + self.stride <= self.bucket_mask, + "Went past end of probe sequence" + ); + + let result = self.pos; + self.stride += Group::WIDTH; + self.pos += self.stride; + self.pos &= self.bucket_mask; + Some(result) + } +} + +/// Returns the number of buckets needed to hold the given number of items, +/// taking the maximum load factor into account. +/// +/// Returns `None` if an overflow occurs. +// Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258 +#[cfg_attr(target_os = "emscripten", inline(never))] +#[cfg_attr(not(target_os = "emscripten"), inline)] +fn capacity_to_buckets(cap: usize) -> Option { + debug_assert_ne!(cap, 0); + + // For small tables we require at least 1 empty bucket so that lookups are + // guaranteed to terminate if an element doesn't exist in the table. + if cap < 8 { + // We don't bother with a table size of 2 buckets since that can only + // hold a single element. Instead we skip directly to a 4 bucket table + // which can hold 3 elements. + return Some(if cap < 4 { 4 } else { 8 }); + } + + // Otherwise require 1/8 buckets to be empty (87.5% load) + // + // Be careful when modifying this, calculate_layout relies on the + // overflow check here. + let adjusted_cap = cap.checked_mul(8)? / 7; + + // Any overflows will have been caught by the checked_mul. Also, any + // rounding errors from the division above will be cleaned up by + // next_power_of_two (which can't overflow because of the previous divison). + Some(adjusted_cap.next_power_of_two()) +} + +/// Returns the maximum effective capacity for the given bucket mask, taking +/// the maximum load factor into account. +#[inline] +fn bucket_mask_to_capacity(bucket_mask: usize) -> usize { + if bucket_mask < 8 { + // For tables with 1/2/4/8 buckets, we always reserve one empty slot. + // Keep in mind that the bucket mask is one less than the bucket count. + bucket_mask + } else { + // For larger tables we reserve 12.5% of the slots as empty. + ((bucket_mask + 1) / 8) * 7 + } +} + +/// Returns a Layout which describes the allocation required for a hash table, +/// and the offset of the control bytes in the allocation. +/// (the offset is also one past last element of buckets) +/// +/// Returns `None` if an overflow occurs. +#[cfg_attr(feature = "inline-more", inline)] +#[cfg(feature = "nightly")] +fn calculate_layout(buckets: usize) -> Option<(Layout, usize)> { + debug_assert!(buckets.is_power_of_two()); + + // Array of buckets + let data = Layout::array::(buckets).ok()?; + + // Array of control bytes. This must be aligned to the group size. + // + // We add `Group::WIDTH` control bytes at the end of the array which + // replicate the bytes at the start of the array and thus avoids the need to + // perform bounds-checking while probing. + // + // There is no possible overflow here since buckets is a power of two and + // Group::WIDTH is a small number. + let ctrl = unsafe { Layout::from_size_align_unchecked(buckets + Group::WIDTH, Group::WIDTH) }; + + data.extend(ctrl).ok() +} + +/// Returns a Layout which describes the allocation required for a hash table, +/// and the offset of the control bytes in the allocation. +/// (the offset is also one past last element of buckets) +/// +/// Returns `None` if an overflow occurs. +#[cfg_attr(feature = "inline-more", inline)] +#[cfg(not(feature = "nightly"))] +fn calculate_layout(buckets: usize) -> Option<(Layout, usize)> { + debug_assert!(buckets.is_power_of_two()); + + // Manual layout calculation since Layout methods are not yet stable. + let ctrl_align = usize::max(mem::align_of::(), Group::WIDTH); + let ctrl_offset = mem::size_of::() + .checked_mul(buckets)? + .checked_add(ctrl_align - 1)? + & !(ctrl_align - 1); + let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?; + + Some(( + unsafe { Layout::from_size_align_unchecked(len, ctrl_align) }, + ctrl_offset, + )) +} + +/// A reference to a hash table bucket containing a `T`. +/// +/// This is usually just a pointer to the element itself. However if the element +/// is a ZST, then we instead track the index of the element in the table so +/// that `erase` works properly. +pub struct Bucket { + // Actually it is pointer to next element than element itself + // this is needed to maintain pointer arithmetic invariants + // keeping direct pointer to element introduces difficulty. + // Using `NonNull` for variance and niche layout + ptr: NonNull, +} + +// This Send impl is needed for rayon support. This is safe since Bucket is +// never exposed in a public API. +unsafe impl Send for Bucket {} + +impl Clone for Bucket { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { ptr: self.ptr } + } +} + +impl Bucket { + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn from_base_index(base: NonNull, index: usize) -> Self { + let ptr = if mem::size_of::() == 0 { + // won't overflow because index must be less than length + (index + 1) as *mut T + } else { + base.as_ptr().sub(index) + }; + Self { + ptr: NonNull::new_unchecked(ptr), + } + } + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn to_base_index(&self, base: NonNull) -> usize { + if mem::size_of::() == 0 { + self.ptr.as_ptr() as usize - 1 + } else { + offset_from(base.as_ptr(), self.ptr.as_ptr()) + } + } + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn as_ptr(&self) -> *mut T { + if mem::size_of::() == 0 { + // Just return an arbitrary ZST pointer which is properly aligned + mem::align_of::() as *mut T + } else { + self.ptr.as_ptr().sub(1) + } + } + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn next_n(&self, offset: usize) -> Self { + let ptr = if mem::size_of::() == 0 { + (self.ptr.as_ptr() as usize + offset) as *mut T + } else { + self.ptr.as_ptr().sub(offset) + }; + Self { + ptr: NonNull::new_unchecked(ptr), + } + } + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn drop(&self) { + self.as_ptr().drop_in_place(); + } + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn read(&self) -> T { + self.as_ptr().read() + } + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn write(&self, val: T) { + self.as_ptr().write(val); + } + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn as_ref<'a>(&self) -> &'a T { + &*self.as_ptr() + } + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn as_mut<'a>(&self) -> &'a mut T { + &mut *self.as_ptr() + } + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) { + self.as_ptr().copy_from_nonoverlapping(other.as_ptr(), 1); + } +} + +/// A raw hash table with an unsafe API. +pub struct RawTable { + // Mask to get an index from a hash value. The value is one less than the + // number of buckets in the table. + bucket_mask: usize, + + // [Padding], T1, T2, ..., Tlast, C1, C2, ... + // ^ points here + ctrl: NonNull, + + // Number of elements that can be inserted before we need to grow the table + growth_left: usize, + + // Number of elements in the table, only really used by len() + items: usize, + + // Tell dropck that we own instances of T. + marker: PhantomData, +} + +impl RawTable { + /// Creates a new empty hash table without allocating any memory. + /// + /// In effect this returns a table with exactly 1 bucket. However we can + /// leave the data pointer dangling since that bucket is never written to + /// due to our load factor forcing us to always have at least 1 free bucket. + #[cfg_attr(feature = "inline-more", inline)] + pub const fn new() -> Self { + Self { + // Be careful to cast the entire slice to a raw pointer. + ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) }, + bucket_mask: 0, + items: 0, + growth_left: 0, + marker: PhantomData, + } + } + + /// Allocates a new hash table with the given number of buckets. + /// + /// The control bytes are left uninitialized. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn new_uninitialized( + buckets: usize, + fallability: Fallibility, + ) -> Result { + debug_assert!(buckets.is_power_of_two()); + + // Avoid `Option::ok_or_else` because it bloats LLVM IR. + let (layout, ctrl_offset) = match calculate_layout::(buckets) { + Some(lco) => lco, + None => return Err(fallability.capacity_overflow()), + }; + let ptr = match NonNull::new(alloc(layout)) { + Some(ptr) => ptr, + None => return Err(fallability.alloc_err(layout)), + }; + let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset)); + Ok(Self { + ctrl, + bucket_mask: buckets - 1, + items: 0, + growth_left: bucket_mask_to_capacity(buckets - 1), + marker: PhantomData, + }) + } + + /// Attempts to allocate a new hash table with at least enough capacity + /// for inserting the given number of elements without reallocating. + fn fallible_with_capacity( + capacity: usize, + fallability: Fallibility, + ) -> Result { + if capacity == 0 { + Ok(Self::new()) + } else { + unsafe { + // Avoid `Option::ok_or_else` because it bloats LLVM IR. + let buckets = match capacity_to_buckets(capacity) { + Some(buckets) => buckets, + None => return Err(fallability.capacity_overflow()), + }; + let result = Self::new_uninitialized(buckets, fallability)?; + result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes()); + + Ok(result) + } + } + } + + /// Attempts to allocate a new hash table with at least enough capacity + /// for inserting the given number of elements without reallocating. + #[cfg(feature = "raw")] + pub fn try_with_capacity(capacity: usize) -> Result { + Self::fallible_with_capacity(capacity, Fallibility::Fallible) + } + + /// Allocates a new hash table with at least enough capacity for inserting + /// the given number of elements without reallocating. + pub fn with_capacity(capacity: usize) -> Self { + // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. + match Self::fallible_with_capacity(capacity, Fallibility::Infallible) { + Ok(capacity) => capacity, + Err(_) => unsafe { hint::unreachable_unchecked() }, + } + } + + /// Deallocates the table without dropping any entries. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn free_buckets(&mut self) { + // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. + let (layout, ctrl_offset) = match calculate_layout::(self.buckets()) { + Some(lco) => lco, + None => hint::unreachable_unchecked(), + }; + dealloc(self.ctrl.as_ptr().sub(ctrl_offset), layout); + } + + /// Returns pointer to one past last element of data table. + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn data_end(&self) -> NonNull { + NonNull::new_unchecked(self.ctrl.as_ptr() as *mut T) + } + + /// Returns pointer to start of data table. + #[cfg_attr(feature = "inline-more", inline)] + #[cfg(feature = "nightly")] + pub unsafe fn data_start(&self) -> *mut T { + self.data_end().as_ptr().wrapping_sub(self.buckets()) + } + + /// Returns the index of a bucket from a `Bucket`. + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn bucket_index(&self, bucket: &Bucket) -> usize { + bucket.to_base_index(self.data_end()) + } + + /// Returns a pointer to a control byte. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn ctrl(&self, index: usize) -> *mut u8 { + debug_assert!(index < self.num_ctrl_bytes()); + self.ctrl.as_ptr().add(index) + } + + /// Returns a pointer to an element in the table. + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn bucket(&self, index: usize) -> Bucket { + debug_assert_ne!(self.bucket_mask, 0); + debug_assert!(index < self.buckets()); + Bucket::from_base_index(self.data_end(), index) + } + + /// Erases an element from the table without dropping it. + #[cfg_attr(feature = "inline-more", inline)] + #[deprecated(since = "0.8.1", note = "use erase or remove instead")] + pub unsafe fn erase_no_drop(&mut self, item: &Bucket) { + let index = self.bucket_index(item); + debug_assert!(is_full(*self.ctrl(index))); + let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask; + let empty_before = Group::load(self.ctrl(index_before)).match_empty(); + let empty_after = Group::load(self.ctrl(index)).match_empty(); + + // If we are inside a continuous block of Group::WIDTH full or deleted + // cells then a probe window may have seen a full block when trying to + // insert. We therefore need to keep that block non-empty so that + // lookups will continue searching to the next probe window. + // + // Note that in this context `leading_zeros` refers to the bytes at the + // end of a group, while `trailing_zeros` refers to the bytes at the + // begining of a group. + let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH { + DELETED + } else { + self.growth_left += 1; + EMPTY + }; + self.set_ctrl(index, ctrl); + self.items -= 1; + } + + /// Erases an element from the table, dropping it in place. + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::needless_pass_by_value)] + #[allow(deprecated)] + pub unsafe fn erase(&mut self, item: Bucket) { + // Erase the element from the table first since drop might panic. + self.erase_no_drop(&item); + item.drop(); + } + + /// Finds and erases an element from the table, dropping it in place. + /// Returns true if an element was found. + #[cfg(feature = "raw")] + #[cfg_attr(feature = "inline-more", inline)] + pub fn erase_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> bool { + // Avoid `Option::map` because it bloats LLVM IR. + if let Some(bucket) = self.find(hash, eq) { + unsafe { self.erase(bucket) }; + true + } else { + false + } + } + + /// Removes an element from the table, returning it. + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::needless_pass_by_value)] + #[allow(deprecated)] + pub unsafe fn remove(&mut self, item: Bucket) -> T { + self.erase_no_drop(&item); + item.read() + } + + /// Finds and removes an element from the table, returning it. + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option { + // Avoid `Option::map` because it bloats LLVM IR. + match self.find(hash, eq) { + Some(bucket) => Some(unsafe { self.remove(bucket) }), + None => None, + } + } + + /// Returns an iterator for a probe sequence on the table. + /// + /// This iterator never terminates, but is guaranteed to visit each bucket + /// group exactly once. The loop using `probe_seq` must terminate upon + /// reaching a group containing an empty bucket. + #[cfg_attr(feature = "inline-more", inline)] + fn probe_seq(&self, hash: u64) -> ProbeSeq { + ProbeSeq { + bucket_mask: self.bucket_mask, + pos: h1(hash) & self.bucket_mask, + stride: 0, + } + } + + /// Sets a control byte, and possibly also the replicated control byte at + /// the end of the array. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn set_ctrl(&self, index: usize, ctrl: u8) { + // Replicate the first Group::WIDTH control bytes at the end of + // the array without using a branch: + // - If index >= Group::WIDTH then index == index2. + // - Otherwise index2 == self.bucket_mask + 1 + index. + // + // The very last replicated control byte is never actually read because + // we mask the initial index for unaligned loads, but we write it + // anyways because it makes the set_ctrl implementation simpler. + // + // If there are fewer buckets than Group::WIDTH then this code will + // replicate the buckets at the end of the trailing group. For example + // with 2 buckets and a group size of 4, the control bytes will look + // like this: + // + // Real | Replicated + // --------------------------------------------- + // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] | + // --------------------------------------------- + let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH; + + *self.ctrl(index) = ctrl; + *self.ctrl(index2) = ctrl; + } + + /// Searches for an empty or deleted bucket which is suitable for inserting + /// a new element. + /// + /// There must be at least 1 empty bucket in the table. + #[cfg_attr(feature = "inline-more", inline)] + fn find_insert_slot(&self, hash: u64) -> usize { + for pos in self.probe_seq(hash) { + unsafe { + let group = Group::load(self.ctrl(pos)); + if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() { + let result = (pos + bit) & self.bucket_mask; + + // In tables smaller than the group width, trailing control + // bytes outside the range of the table are filled with + // EMPTY entries. These will unfortunately trigger a + // match, but once masked may point to a full bucket that + // is already occupied. We detect this situation here and + // perform a second scan starting at the begining of the + // table. This second scan is guaranteed to find an empty + // slot (due to the load factor) before hitting the trailing + // control bytes (containing EMPTY). + if unlikely(is_full(*self.ctrl(result))) { + debug_assert!(self.bucket_mask < Group::WIDTH); + debug_assert_ne!(pos, 0); + return Group::load_aligned(self.ctrl(0)) + .match_empty_or_deleted() + .lowest_set_bit_nonzero(); + } else { + return result; + } + } + } + } + + // probe_seq never returns. + unreachable!(); + } + + /// Marks all table buckets as empty without dropping their contents. + #[cfg_attr(feature = "inline-more", inline)] + pub fn clear_no_drop(&mut self) { + if !self.is_empty_singleton() { + unsafe { + self.ctrl(0).write_bytes(EMPTY, self.num_ctrl_bytes()); + } + } + self.items = 0; + self.growth_left = bucket_mask_to_capacity(self.bucket_mask); + } + + /// Removes all elements from the table without freeing the backing memory. + #[cfg_attr(feature = "inline-more", inline)] + pub fn clear(&mut self) { + // Ensure that the table is reset even if one of the drops panic + let self_ = guard(self, |self_| self_.clear_no_drop()); + + if mem::needs_drop::() && self_.len() != 0 { + unsafe { + for item in self_.iter() { + item.drop(); + } + } + } + } + + /// Shrinks the table to fit `max(self.len(), min_size)` elements. + #[cfg_attr(feature = "inline-more", inline)] + pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) { + // Calculate the minimal number of elements that we need to reserve + // space for. + let min_size = usize::max(self.items, min_size); + if min_size == 0 { + *self = Self::new(); + return; + } + + // Calculate the number of buckets that we need for this number of + // elements. If the calculation overflows then the requested bucket + // count must be larger than what we have right and nothing needs to be + // done. + let min_buckets = match capacity_to_buckets(min_size) { + Some(buckets) => buckets, + None => return, + }; + + // If we have more buckets than we need, shrink the table. + if min_buckets < self.buckets() { + // Fast path if the table is empty + if self.items == 0 { + *self = Self::with_capacity(min_size) + } else { + // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. + if self + .resize(min_size, hasher, Fallibility::Infallible) + .is_err() + { + unsafe { hint::unreachable_unchecked() } + } + } + } + } + + /// Ensures that at least `additional` items can be inserted into the table + /// without reallocation. + #[cfg_attr(feature = "inline-more", inline)] + pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) { + if additional > self.growth_left { + // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. + if self + .reserve_rehash(additional, hasher, Fallibility::Infallible) + .is_err() + { + unsafe { hint::unreachable_unchecked() } + } + } + } + + /// Tries to ensure that at least `additional` items can be inserted into + /// the table without reallocation. + #[cfg_attr(feature = "inline-more", inline)] + pub fn try_reserve( + &mut self, + additional: usize, + hasher: impl Fn(&T) -> u64, + ) -> Result<(), TryReserveError> { + if additional > self.growth_left { + self.reserve_rehash(additional, hasher, Fallibility::Fallible) + } else { + Ok(()) + } + } + + /// Out-of-line slow path for `reserve` and `try_reserve`. + #[cold] + #[inline(never)] + fn reserve_rehash( + &mut self, + additional: usize, + hasher: impl Fn(&T) -> u64, + fallability: Fallibility, + ) -> Result<(), TryReserveError> { + // Avoid `Option::ok_or_else` because it bloats LLVM IR. + let new_items = match self.items.checked_add(additional) { + Some(new_items) => new_items, + None => return Err(fallability.capacity_overflow()), + }; + let full_capacity = bucket_mask_to_capacity(self.bucket_mask); + if new_items <= full_capacity / 2 { + // Rehash in-place without re-allocating if we have plenty of spare + // capacity that is locked up due to DELETED entries. + self.rehash_in_place(hasher); + Ok(()) + } else { + // Otherwise, conservatively resize to at least the next size up + // to avoid churning deletes into frequent rehashes. + self.resize( + usize::max(new_items, full_capacity + 1), + hasher, + fallability, + ) + } + } + + /// Rehashes the contents of the table in place (i.e. without changing the + /// allocation). + /// + /// If `hasher` panics then some the table's contents may be lost. + fn rehash_in_place(&mut self, hasher: impl Fn(&T) -> u64) { + unsafe { + // Bulk convert all full control bytes to DELETED, and all DELETED + // control bytes to EMPTY. This effectively frees up all buckets + // containing a DELETED entry. + for i in (0..self.buckets()).step_by(Group::WIDTH) { + let group = Group::load_aligned(self.ctrl(i)); + let group = group.convert_special_to_empty_and_full_to_deleted(); + group.store_aligned(self.ctrl(i)); + } + + // Fix up the trailing control bytes. See the comments in set_ctrl + // for the handling of tables smaller than the group width. + if self.buckets() < Group::WIDTH { + self.ctrl(0) + .copy_to(self.ctrl(Group::WIDTH), self.buckets()); + } else { + self.ctrl(0) + .copy_to(self.ctrl(self.buckets()), Group::WIDTH); + } + + // If the hash function panics then properly clean up any elements + // that we haven't rehashed yet. We unfortunately can't preserve the + // element since we lost their hash and have no way of recovering it + // without risking another panic. + let mut guard = guard(self, |self_| { + if mem::needs_drop::() { + for i in 0..self_.buckets() { + if *self_.ctrl(i) == DELETED { + self_.set_ctrl(i, EMPTY); + self_.bucket(i).drop(); + self_.items -= 1; + } + } + } + self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items; + }); + + // At this point, DELETED elements are elements that we haven't + // rehashed yet. Find them and re-insert them at their ideal + // position. + 'outer: for i in 0..guard.buckets() { + if *guard.ctrl(i) != DELETED { + continue; + } + 'inner: loop { + // Hash the current item + let item = guard.bucket(i); + let hash = hasher(item.as_ref()); + + // Search for a suitable place to put it + let new_i = guard.find_insert_slot(hash); + + // Probing works by scanning through all of the control + // bytes in groups, which may not be aligned to the group + // size. If both the new and old position fall within the + // same unaligned group, then there is no benefit in moving + // it and we can just continue to the next item. + let probe_index = |pos: usize| { + (pos.wrapping_sub(guard.probe_seq(hash).pos) & guard.bucket_mask) + / Group::WIDTH + }; + if likely(probe_index(i) == probe_index(new_i)) { + guard.set_ctrl(i, h2(hash)); + continue 'outer; + } + + // We are moving the current item to a new position. Write + // our H2 to the control byte of the new position. + let prev_ctrl = *guard.ctrl(new_i); + guard.set_ctrl(new_i, h2(hash)); + + if prev_ctrl == EMPTY { + // If the target slot is empty, simply move the current + // element into the new slot and clear the old control + // byte. + guard.set_ctrl(i, EMPTY); + guard.bucket(new_i).copy_from_nonoverlapping(&item); + continue 'outer; + } else { + // If the target slot is occupied, swap the two elements + // and then continue processing the element that we just + // swapped into the old slot. + debug_assert_eq!(prev_ctrl, DELETED); + mem::swap(guard.bucket(new_i).as_mut(), item.as_mut()); + continue 'inner; + } + } + } + + guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items; + mem::forget(guard); + } + } + + /// Allocates a new table of a different size and moves the contents of the + /// current table into it. + fn resize( + &mut self, + capacity: usize, + hasher: impl Fn(&T) -> u64, + fallability: Fallibility, + ) -> Result<(), TryReserveError> { + unsafe { + debug_assert!(self.items <= capacity); + + // Allocate and initialize the new table. + let mut new_table = Self::fallible_with_capacity(capacity, fallability)?; + new_table.growth_left -= self.items; + new_table.items = self.items; + + // The hash function may panic, in which case we simply free the new + // table without dropping any elements that may have been copied into + // it. + // + // This guard is also used to free the old table on success, see + // the comment at the bottom of this function. + let mut new_table = guard(ManuallyDrop::new(new_table), |new_table| { + if !new_table.is_empty_singleton() { + new_table.free_buckets(); + } + }); + + // Copy all elements to the new table. + for item in self.iter() { + // This may panic. + let hash = hasher(item.as_ref()); + + // We can use a simpler version of insert() here since: + // - there are no DELETED entries. + // - we know there is enough space in the table. + // - all elements are unique. + let index = new_table.find_insert_slot(hash); + new_table.set_ctrl(index, h2(hash)); + new_table.bucket(index).copy_from_nonoverlapping(&item); + } + + // We successfully copied all elements without panicking. Now replace + // self with the new table. The old table will have its memory freed but + // the items will not be dropped (since they have been moved into the + // new table). + mem::swap(self, &mut new_table); + + Ok(()) + } + } + + /// Inserts a new element into the table, and returns its raw bucket. + /// + /// This does not check if the given element already exists in the table. + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket { + unsafe { + let mut index = self.find_insert_slot(hash); + + // We can avoid growing the table once we have reached our load + // factor if we are replacing a tombstone. This works since the + // number of EMPTY slots does not change in this case. + let old_ctrl = *self.ctrl(index); + if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) { + self.reserve(1, hasher); + index = self.find_insert_slot(hash); + } + + let bucket = self.bucket(index); + self.growth_left -= special_is_empty(old_ctrl) as usize; + self.set_ctrl(index, h2(hash)); + bucket.write(value); + self.items += 1; + bucket + } + } + + /// Inserts a new element into the table, and returns a mutable reference to it. + /// + /// This does not check if the given element already exists in the table. + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T { + unsafe { self.insert(hash, value, hasher).as_mut() } + } + + /// Inserts a new element into the table, without growing the table. + /// + /// There must be enough space in the table to insert the new element. + /// + /// This does not check if the given element already exists in the table. + #[cfg_attr(feature = "inline-more", inline)] + #[cfg(any(feature = "raw", feature = "rustc-internal-api"))] + pub fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket { + unsafe { + let index = self.find_insert_slot(hash); + let bucket = self.bucket(index); + + // If we are replacing a DELETED entry then we don't need to update + // the load counter. + let old_ctrl = *self.ctrl(index); + self.growth_left -= special_is_empty(old_ctrl) as usize; + + self.set_ctrl(index, h2(hash)); + bucket.write(value); + self.items += 1; + bucket + } + } + + /// Temporary removes a bucket, applying the given function to the removed + /// element and optionally put back the returned value in the same bucket. + /// + /// Returns `true` if the bucket still contains an element + /// + /// This does not check if the given bucket is actually occupied. + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn replace_bucket_with(&mut self, bucket: Bucket, f: F) -> bool + where + F: FnOnce(T) -> Option, + { + let index = self.bucket_index(&bucket); + let old_ctrl = *self.ctrl(index); + debug_assert!(is_full(old_ctrl)); + let old_growth_left = self.growth_left; + let item = self.remove(bucket); + if let Some(new_item) = f(item) { + self.growth_left = old_growth_left; + self.set_ctrl(index, old_ctrl); + self.items += 1; + self.bucket(index).write(new_item); + true + } else { + false + } + } + + /// Searches for an element in the table. + #[inline] + pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option> { + unsafe { + for bucket in self.iter_hash(hash) { + let elm = bucket.as_ref(); + if likely(eq(elm)) { + return Some(bucket); + } + } + None + } + } + + /// Gets a reference to an element in the table. + #[inline] + pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.find(hash, eq) { + Some(bucket) => Some(unsafe { bucket.as_ref() }), + None => None, + } + } + + /// Gets a mutable reference to an element in the table. + #[inline] + pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.find(hash, eq) { + Some(bucket) => Some(unsafe { bucket.as_mut() }), + None => None, + } + } + + /// Returns the number of elements the map can hold without reallocating. + /// + /// This number is a lower bound; the table might be able to hold + /// more, but is guaranteed to be able to hold at least this many. + #[cfg_attr(feature = "inline-more", inline)] + pub fn capacity(&self) -> usize { + self.items + self.growth_left + } + + /// Returns the number of elements in the table. + #[cfg_attr(feature = "inline-more", inline)] + pub fn len(&self) -> usize { + self.items + } + + /// Returns the number of buckets in the table. + #[cfg_attr(feature = "inline-more", inline)] + pub fn buckets(&self) -> usize { + self.bucket_mask + 1 + } + + /// Returns the number of control bytes in the table. + #[cfg_attr(feature = "inline-more", inline)] + fn num_ctrl_bytes(&self) -> usize { + self.bucket_mask + 1 + Group::WIDTH + } + + /// Returns whether this table points to the empty singleton with a capacity + /// of 0. + #[cfg_attr(feature = "inline-more", inline)] + fn is_empty_singleton(&self) -> bool { + self.bucket_mask == 0 + } + + /// Returns an iterator over every element in the table. It is up to + /// the caller to ensure that the `RawTable` outlives the `RawIter`. + /// Because we cannot make the `next` method unsafe on the `RawIter` + /// struct, we have to make the `iter` method unsafe. + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn iter(&self) -> RawIter { + let data = Bucket::from_base_index(self.data_end(), 0); + RawIter { + iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()), + items: self.items, + } + } + + /// Returns an iterator over occupied buckets that could match a given hash. + /// + /// In rare cases, the iterator may return a bucket with a different hash. + /// + /// It is up to the caller to ensure that the `RawTable` outlives the + /// `RawIterHash`. Because we cannot make the `next` method unsafe on the + /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe. + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<'_, T> { + RawIterHash::new(self, hash) + } + + /// Returns an iterator which removes all elements from the table without + /// freeing the memory. + #[cfg_attr(feature = "inline-more", inline)] + pub fn drain(&mut self) -> RawDrain<'_, T> { + unsafe { + let iter = self.iter(); + self.drain_iter_from(iter) + } + } + + /// Returns an iterator which removes all elements from the table without + /// freeing the memory. + /// + /// Iteration starts at the provided iterator's current location. + /// + /// It is up to the caller to ensure that the iterator is valid for this + /// `RawTable` and covers all items that remain in the table. + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn drain_iter_from(&mut self, iter: RawIter) -> RawDrain<'_, T> { + debug_assert_eq!(iter.len(), self.len()); + RawDrain { + iter, + table: ManuallyDrop::new(mem::replace(self, Self::new())), + orig_table: NonNull::from(self), + marker: PhantomData, + } + } + + /// Returns an iterator which consumes all elements from the table. + /// + /// Iteration starts at the provided iterator's current location. + /// + /// It is up to the caller to ensure that the iterator is valid for this + /// `RawTable` and covers all items that remain in the table. + pub unsafe fn into_iter_from(self, iter: RawIter) -> RawIntoIter { + debug_assert_eq!(iter.len(), self.len()); + + let alloc = self.into_alloc(); + RawIntoIter { + iter, + alloc, + marker: PhantomData, + } + } + + /// Converts the table into a raw allocation. The contents of the table + /// should be dropped using a `RawIter` before freeing the allocation. + #[cfg_attr(feature = "inline-more", inline)] + pub(crate) fn into_alloc(self) -> Option<(NonNull, Layout)> { + let alloc = if self.is_empty_singleton() { + None + } else { + // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. + let (layout, ctrl_offset) = match calculate_layout::(self.buckets()) { + Some(lco) => lco, + None => unsafe { hint::unreachable_unchecked() }, + }; + Some(( + unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) }, + layout, + )) + }; + mem::forget(self); + alloc + } +} + +unsafe impl Send for RawTable where T: Send {} +unsafe impl Sync for RawTable where T: Sync {} + +impl Clone for RawTable { + fn clone(&self) -> Self { + if self.is_empty_singleton() { + Self::new() + } else { + unsafe { + let mut new_table = ManuallyDrop::new( + // Avoid `Result::ok_or_else` because it bloats LLVM IR. + match Self::new_uninitialized(self.buckets(), Fallibility::Infallible) { + Ok(table) => table, + Err(_) => hint::unreachable_unchecked(), + }, + ); + + new_table.clone_from_spec(self, |new_table| { + // We need to free the memory allocated for the new table. + new_table.free_buckets(); + }); + + // Return the newly created table. + ManuallyDrop::into_inner(new_table) + } + } + } + + fn clone_from(&mut self, source: &Self) { + if source.is_empty_singleton() { + *self = Self::new(); + } else { + unsafe { + // First, drop all our elements without clearing the control bytes. + if mem::needs_drop::() && self.len() != 0 { + for item in self.iter() { + item.drop(); + } + } + + // If necessary, resize our table to match the source. + if self.buckets() != source.buckets() { + // Skip our drop by using ptr::write. + if !self.is_empty_singleton() { + self.free_buckets(); + } + (self as *mut Self).write( + // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. + match Self::new_uninitialized(source.buckets(), Fallibility::Infallible) { + Ok(table) => table, + Err(_) => hint::unreachable_unchecked(), + }, + ); + } + + self.clone_from_spec(source, |self_| { + // We need to leave the table in an empty state. + self_.clear_no_drop() + }); + } + } + } +} + +/// Specialization of `clone_from` for `Copy` types +trait RawTableClone { + unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self)); +} +impl RawTableClone for RawTable { + #[cfg_attr(feature = "inline-more", inline)] + default_fn! { + unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self)) { + self.clone_from_impl(source, on_panic); + } + } +} +#[cfg(feature = "nightly")] +impl RawTableClone for RawTable { + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn clone_from_spec(&mut self, source: &Self, _on_panic: impl FnMut(&mut Self)) { + source + .ctrl(0) + .copy_to_nonoverlapping(self.ctrl(0), self.num_ctrl_bytes()); + source + .data_start() + .copy_to_nonoverlapping(self.data_start(), self.buckets()); + + self.items = source.items; + self.growth_left = source.growth_left; + } +} + +impl RawTable { + /// Common code for clone and clone_from. Assumes `self.buckets() == source.buckets()`. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn clone_from_impl(&mut self, source: &Self, mut on_panic: impl FnMut(&mut Self)) { + // Copy the control bytes unchanged. We do this in a single pass + source + .ctrl(0) + .copy_to_nonoverlapping(self.ctrl(0), self.num_ctrl_bytes()); + + // The cloning of elements may panic, in which case we need + // to make sure we drop only the elements that have been + // cloned so far. + let mut guard = guard((0, &mut *self), |(index, self_)| { + if mem::needs_drop::() && self_.len() != 0 { + for i in 0..=*index { + if is_full(*self_.ctrl(i)) { + self_.bucket(i).drop(); + } + } + } + + // Depending on whether we were called from clone or clone_from, we + // either need to free the memory for the destination table or just + // clear the control bytes. + on_panic(self_); + }); + + for from in source.iter() { + let index = source.bucket_index(&from); + let to = guard.1.bucket(index); + to.write(from.as_ref().clone()); + + // Update the index in case we need to unwind. + guard.0 = index; + } + + // Successfully cloned all items, no need to clean up. + mem::forget(guard); + + self.items = source.items; + self.growth_left = source.growth_left; + } + + /// Variant of `clone_from` to use when a hasher is available. + #[cfg(feature = "raw")] + pub fn clone_from_with_hasher(&mut self, source: &Self, hasher: impl Fn(&T) -> u64) { + // If we have enough capacity in the table, just clear it and insert + // elements one by one. We don't do this if we have the same number of + // buckets as the source since we can just copy the contents directly + // in that case. + if self.buckets() != source.buckets() + && bucket_mask_to_capacity(self.bucket_mask) >= source.len() + { + self.clear(); + + let guard_self = guard(&mut *self, |self_| { + // Clear the partially copied table if a panic occurs, otherwise + // items and growth_left will be out of sync with the contents + // of the table. + self_.clear(); + }); + + unsafe { + for item in source.iter() { + // This may panic. + let item = item.as_ref().clone(); + let hash = hasher(&item); + + // We can use a simpler version of insert() here since: + // - there are no DELETED entries. + // - we know there is enough space in the table. + // - all elements are unique. + let index = guard_self.find_insert_slot(hash); + guard_self.set_ctrl(index, h2(hash)); + guard_self.bucket(index).write(item); + } + } + + // Successfully cloned all items, no need to clean up. + mem::forget(guard_self); + + self.items = source.items; + self.growth_left -= source.items; + } else { + self.clone_from(source); + } + } +} + +#[cfg(feature = "nightly")] +unsafe impl<#[may_dangle] T> Drop for RawTable { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + if !self.is_empty_singleton() { + unsafe { + if mem::needs_drop::() && self.len() != 0 { + for item in self.iter() { + item.drop(); + } + } + self.free_buckets(); + } + } + } +} +#[cfg(not(feature = "nightly"))] +impl Drop for RawTable { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + if !self.is_empty_singleton() { + unsafe { + if mem::needs_drop::() && self.len() != 0 { + for item in self.iter() { + item.drop(); + } + } + self.free_buckets(); + } + } + } +} + +impl IntoIterator for RawTable { + type Item = T; + type IntoIter = RawIntoIter; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_iter(self) -> RawIntoIter { + unsafe { + let iter = self.iter(); + self.into_iter_from(iter) + } + } +} + +/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does +/// not track an item count. +pub(crate) struct RawIterRange { + // Mask of full buckets in the current group. Bits are cleared from this + // mask as each element is processed. + current_group: BitMask, + + // Pointer to the buckets for the current group. + data: Bucket, + + // Pointer to the next group of control bytes, + // Must be aligned to the group size. + next_ctrl: *const u8, + + // Pointer one past the last control byte of this range. + end: *const u8, +} + +impl RawIterRange { + /// Returns a `RawIterRange` covering a subset of a table. + /// + /// The control byte address must be aligned to the group size. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn new(ctrl: *const u8, data: Bucket, len: usize) -> Self { + debug_assert_ne!(len, 0); + debug_assert_eq!(ctrl as usize % Group::WIDTH, 0); + let end = ctrl.add(len); + + // Load the first group and advance ctrl to point to the next group + let current_group = Group::load_aligned(ctrl).match_full(); + let next_ctrl = ctrl.add(Group::WIDTH); + + Self { + current_group, + data, + next_ctrl, + end, + } + } + + /// Splits a `RawIterRange` into two halves. + /// + /// Returns `None` if the remaining range is smaller than or equal to the + /// group width. + #[cfg_attr(feature = "inline-more", inline)] + #[cfg(feature = "rayon")] + pub(crate) fn split(mut self) -> (Self, Option>) { + unsafe { + if self.end <= self.next_ctrl { + // Nothing to split if the group that we are current processing + // is the last one. + (self, None) + } else { + // len is the remaining number of elements after the group that + // we are currently processing. It must be a multiple of the + // group size (small tables are caught by the check above). + let len = offset_from(self.end, self.next_ctrl); + debug_assert_eq!(len % Group::WIDTH, 0); + + // Split the remaining elements into two halves, but round the + // midpoint down in case there is an odd number of groups + // remaining. This ensures that: + // - The tail is at least 1 group long. + // - The split is roughly even considering we still have the + // current group to process. + let mid = (len / 2) & !(Group::WIDTH - 1); + + let tail = Self::new( + self.next_ctrl.add(mid), + self.data.next_n(Group::WIDTH).next_n(mid), + len - mid, + ); + debug_assert_eq!( + self.data.next_n(Group::WIDTH).next_n(mid).ptr, + tail.data.ptr + ); + debug_assert_eq!(self.end, tail.end); + self.end = self.next_ctrl.add(mid); + debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl); + (self, Some(tail)) + } + } + } +} + +// We make raw iterators unconditionally Send and Sync, and let the PhantomData +// in the actual iterator implementations determine the real Send/Sync bounds. +unsafe impl Send for RawIterRange {} +unsafe impl Sync for RawIterRange {} + +impl Clone for RawIterRange { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { + data: self.data.clone(), + next_ctrl: self.next_ctrl, + current_group: self.current_group, + end: self.end, + } + } +} + +impl Iterator for RawIterRange { + type Item = Bucket; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option> { + unsafe { + loop { + if let Some(index) = self.current_group.lowest_set_bit() { + self.current_group = self.current_group.remove_lowest_bit(); + return Some(self.data.next_n(index)); + } + + if self.next_ctrl >= self.end { + return None; + } + + // We might read past self.end up to the next group boundary, + // but this is fine because it only occurs on tables smaller + // than the group size where the trailing control bytes are all + // EMPTY. On larger tables self.end is guaranteed to be aligned + // to the group size (since tables are power-of-two sized). + self.current_group = Group::load_aligned(self.next_ctrl).match_full(); + self.data = self.data.next_n(Group::WIDTH); + self.next_ctrl = self.next_ctrl.add(Group::WIDTH); + } + } + } + + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + // We don't have an item count, so just guess based on the range size. + ( + 0, + Some(unsafe { offset_from(self.end, self.next_ctrl) + Group::WIDTH }), + ) + } +} + +impl FusedIterator for RawIterRange {} + +/// Iterator which returns a raw pointer to every full bucket in the table. +/// +/// For maximum flexibility this iterator is not bound by a lifetime, but you +/// must observe several rules when using it: +/// - You must not free the hash table while iterating (including via growing/shrinking). +/// - It is fine to erase a bucket that has been yielded by the iterator. +/// - Erasing a bucket that has not yet been yielded by the iterator may still +/// result in the iterator yielding that bucket (unless `reflect_remove` is called). +/// - It is unspecified whether an element inserted after the iterator was +/// created will be yielded by that iterator (unless `reflect_insert` is called). +/// - The order in which the iterator yields bucket is unspecified and may +/// change in the future. +pub struct RawIter { + pub(crate) iter: RawIterRange, + items: usize, +} + +impl RawIter { + /// Refresh the iterator so that it reflects a removal from the given bucket. + /// + /// For the iterator to remain valid, this method must be called once + /// for each removed bucket before `next` is called again. + /// + /// This method should be called _before_ the removal is made. It is not necessary to call this + /// method if you are removing an item that this iterator yielded in the past. + #[cfg(feature = "raw")] + pub fn reflect_remove(&mut self, b: &Bucket) { + self.reflect_toggle_full(b, false); + } + + /// Refresh the iterator so that it reflects an insertion into the given bucket. + /// + /// For the iterator to remain valid, this method must be called once + /// for each insert before `next` is called again. + /// + /// This method does not guarantee that an insertion of a bucket witha greater + /// index than the last one yielded will be reflected in the iterator. + /// + /// This method should be called _after_ the given insert is made. + #[cfg(feature = "raw")] + pub fn reflect_insert(&mut self, b: &Bucket) { + self.reflect_toggle_full(b, true); + } + + /// Refresh the iterator so that it reflects a change to the state of the given bucket. + #[cfg(feature = "raw")] + fn reflect_toggle_full(&mut self, b: &Bucket, is_insert: bool) { + unsafe { + if b.as_ptr() > self.iter.data.as_ptr() { + // The iterator has already passed the bucket's group. + // So the toggle isn't relevant to this iterator. + return; + } + + if self.iter.next_ctrl < self.iter.end + && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr() + { + // The iterator has not yet reached the bucket's group. + // We don't need to reload anything, but we do need to adjust the item count. + + if cfg!(debug_assertions) { + // Double-check that the user isn't lying to us by checking the bucket state. + // To do that, we need to find its control byte. We know that self.iter.data is + // at self.iter.next_ctrl - Group::WIDTH, so we work from there: + let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr()); + let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset); + // This method should be called _before_ a removal, or _after_ an insert, + // so in both cases the ctrl byte should indicate that the bucket is full. + assert!(is_full(*ctrl)); + } + + if is_insert { + self.items += 1; + } else { + self.items -= 1; + } + + return; + } + + // The iterator is at the bucket group that the toggled bucket is in. + // We need to do two things: + // + // - Determine if the iterator already yielded the toggled bucket. + // If it did, we're done. + // - Otherwise, update the iterator cached group so that it won't + // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket. + // We'll also need ot update the item count accordingly. + if let Some(index) = self.iter.current_group.lowest_set_bit() { + let next_bucket = self.iter.data.next_n(index); + if b.as_ptr() > next_bucket.as_ptr() { + // The toggled bucket is "before" the bucket the iterator would yield next. We + // therefore don't need to do anything --- the iterator has already passed the + // bucket in question. + // + // The item count must already be correct, since a removal or insert "prior" to + // the iterator's position wouldn't affect the item count. + } else { + // The removed bucket is an upcoming bucket. We need to make sure it does _not_ + // get yielded, and also that it's no longer included in the item count. + // + // NOTE: We can't just reload the group here, both since that might reflect + // inserts we've already passed, and because that might inadvertently unset the + // bits for _other_ removals. If we do that, we'd have to also decrement the + // item count for those other bits that we unset. But the presumably subsequent + // call to reflect for those buckets might _also_ decrement the item count. + // Instead, we _just_ flip the bit for the particular bucket the caller asked + // us to reflect. + let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr()); + let was_full = self.iter.current_group.flip(our_bit); + debug_assert_ne!(was_full, is_insert); + + if is_insert { + self.items += 1; + } else { + self.items -= 1; + } + + if cfg!(debug_assertions) { + if b.as_ptr() == next_bucket.as_ptr() { + // The removed bucket should no longer be next + debug_assert_ne!(self.iter.current_group.lowest_set_bit(), Some(index)); + } else { + // We should not have changed what bucket comes next. + debug_assert_eq!(self.iter.current_group.lowest_set_bit(), Some(index)); + } + } + } + } else { + // We must have already iterated past the removed item. + } + } + } +} + +impl Clone for RawIter { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { + iter: self.iter.clone(), + items: self.items, + } + } +} + +impl Iterator for RawIter { + type Item = Bucket; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option> { + if let Some(b) = self.iter.next() { + self.items -= 1; + Some(b) + } else { + // We don't check against items == 0 here to allow the + // compiler to optimize away the item count entirely if the + // iterator length is never queried. + debug_assert_eq!(self.items, 0); + None + } + } + + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + (self.items, Some(self.items)) + } +} + +impl ExactSizeIterator for RawIter {} +impl FusedIterator for RawIter {} + +/// Iterator which consumes a table and returns elements. +pub struct RawIntoIter { + iter: RawIter, + alloc: Option<(NonNull, Layout)>, + marker: PhantomData, +} + +impl RawIntoIter { + #[cfg_attr(feature = "inline-more", inline)] + pub fn iter(&self) -> RawIter { + self.iter.clone() + } +} + +unsafe impl Send for RawIntoIter where T: Send {} +unsafe impl Sync for RawIntoIter where T: Sync {} + +#[cfg(feature = "nightly")] +unsafe impl<#[may_dangle] T> Drop for RawIntoIter { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + unsafe { + // Drop all remaining elements + if mem::needs_drop::() && self.iter.len() != 0 { + while let Some(item) = self.iter.next() { + item.drop(); + } + } + + // Free the table + if let Some((ptr, layout)) = self.alloc { + dealloc(ptr.as_ptr(), layout); + } + } + } +} +#[cfg(not(feature = "nightly"))] +impl Drop for RawIntoIter { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + unsafe { + // Drop all remaining elements + if mem::needs_drop::() && self.iter.len() != 0 { + while let Some(item) = self.iter.next() { + item.drop(); + } + } + + // Free the table + if let Some((ptr, layout)) = self.alloc { + dealloc(ptr.as_ptr(), layout); + } + } + } +} + +impl Iterator for RawIntoIter { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option { + unsafe { Some(self.iter.next()?.read()) } + } + + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl ExactSizeIterator for RawIntoIter {} +impl FusedIterator for RawIntoIter {} + +/// Iterator which consumes elements without freeing the table storage. +pub struct RawDrain<'a, T> { + iter: RawIter, + + // The table is moved into the iterator for the duration of the drain. This + // ensures that an empty table is left if the drain iterator is leaked + // without dropping. + table: ManuallyDrop>, + orig_table: NonNull>, + + // We don't use a &'a mut RawTable because we want RawDrain to be + // covariant over T. + marker: PhantomData<&'a RawTable>, +} + +impl RawDrain<'_, T> { + #[cfg_attr(feature = "inline-more", inline)] + pub fn iter(&self) -> RawIter { + self.iter.clone() + } +} + +unsafe impl Send for RawDrain<'_, T> where T: Send {} +unsafe impl Sync for RawDrain<'_, T> where T: Sync {} + +impl Drop for RawDrain<'_, T> { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + unsafe { + // Drop all remaining elements. Note that this may panic. + if mem::needs_drop::() && self.iter.len() != 0 { + while let Some(item) = self.iter.next() { + item.drop(); + } + } + + // Reset the contents of the table now that all elements have been + // dropped. + self.table.clear_no_drop(); + + // Move the now empty table back to its original location. + self.orig_table + .as_ptr() + .copy_from_nonoverlapping(&*self.table, 1); + } + } +} + +impl Iterator for RawDrain<'_, T> { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option { + unsafe { + let item = self.iter.next()?; + Some(item.read()) + } + } + + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl ExactSizeIterator for RawDrain<'_, T> {} +impl FusedIterator for RawDrain<'_, T> {} + +/// Iterator over occupied buckets that could match a given hash. +/// +/// In rare cases, the iterator may return a bucket with a different hash. +pub struct RawIterHash<'a, T> { + table: &'a RawTable, + + // The top 7 bits of the hash. + h2_hash: u8, + + // The sequence of groups to probe in the search. + probe_seq: ProbeSeq, + + // The current group and its position. + pos: usize, + group: Group, + + // The elements within the group with a matching h2-hash. + bitmask: BitMaskIter, +} + +impl<'a, T> RawIterHash<'a, T> { + fn new(table: &'a RawTable, hash: u64) -> Self { + unsafe { + let h2_hash = h2(hash); + let mut probe_seq = table.probe_seq(hash); + let pos = probe_seq.next().unwrap(); + let group = Group::load(table.ctrl(pos)); + let bitmask = group.match_byte(h2_hash).into_iter(); + + RawIterHash { + table, + h2_hash, + probe_seq, + pos, + group, + bitmask, + } + } + } +} + +impl<'a, T> Iterator for RawIterHash<'a, T> { + type Item = Bucket; + + fn next(&mut self) -> Option> { + unsafe { + loop { + if let Some(bit) = self.bitmask.next() { + let index = (self.pos + bit) & self.table.bucket_mask; + let bucket = self.table.bucket(index); + return Some(bucket); + } + if likely(self.group.match_empty().any_bit_set()) { + return None; + } + self.pos = self.probe_seq.next().unwrap(); + self.group = Group::load(self.table.ctrl(self.pos)); + self.bitmask = self.group.match_byte(self.h2_hash).into_iter(); + } + } + } +} diff --git a/src/rust/vendor/hashbrown-0.9.1/src/raw/sse2.rs b/src/rust/vendor/hashbrown-0.9.1/src/raw/sse2.rs new file mode 100644 index 000000000..a27bc0910 --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/src/raw/sse2.rs @@ -0,0 +1,144 @@ +use super::bitmask::BitMask; +use super::EMPTY; +use core::mem; + +#[cfg(target_arch = "x86")] +use core::arch::x86; +#[cfg(target_arch = "x86_64")] +use core::arch::x86_64 as x86; + +pub type BitMaskWord = u16; +pub const BITMASK_STRIDE: usize = 1; +pub const BITMASK_MASK: BitMaskWord = 0xffff; + +/// Abstraction over a group of control bytes which can be scanned in +/// parallel. +/// +/// This implementation uses a 128-bit SSE value. +#[derive(Copy, Clone)] +pub struct Group(x86::__m128i); + +// FIXME: https://github.com/rust-lang/rust-clippy/issues/3859 +#[allow(clippy::use_self)] +impl Group { + /// Number of bytes in the group. + pub const WIDTH: usize = mem::size_of::(); + + /// Returns a full group of empty bytes, suitable for use as the initial + /// value for an empty hash table. + /// + /// This is guaranteed to be aligned to the group size. + pub const fn static_empty() -> &'static [u8; Group::WIDTH] { + #[repr(C)] + struct AlignedBytes { + _align: [Group; 0], + bytes: [u8; Group::WIDTH], + }; + const ALIGNED_BYTES: AlignedBytes = AlignedBytes { + _align: [], + bytes: [EMPTY; Group::WIDTH], + }; + &ALIGNED_BYTES.bytes + } + + /// Loads a group of bytes starting at the given address. + #[inline] + #[allow(clippy::cast_ptr_alignment)] // unaligned load + pub unsafe fn load(ptr: *const u8) -> Self { + Group(x86::_mm_loadu_si128(ptr as *const _)) + } + + /// Loads a group of bytes starting at the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub unsafe fn load_aligned(ptr: *const u8) -> Self { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + Group(x86::_mm_load_si128(ptr as *const _)) + } + + /// Stores the group of bytes to the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub unsafe fn store_aligned(self, ptr: *mut u8) { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + x86::_mm_store_si128(ptr as *mut _, self.0); + } + + /// Returns a `BitMask` indicating all bytes in the group which have + /// the given value. + #[inline] + pub fn match_byte(self, byte: u8) -> BitMask { + #[allow( + clippy::cast_possible_wrap, // byte: u8 as i8 + // byte: i32 as u16 + // note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the + // upper 16-bits of the i32 are zeroed: + clippy::cast_sign_loss, + clippy::cast_possible_truncation + )] + unsafe { + let cmp = x86::_mm_cmpeq_epi8(self.0, x86::_mm_set1_epi8(byte as i8)); + BitMask(x86::_mm_movemask_epi8(cmp) as u16) + } + } + + /// Returns a `BitMask` indicating all bytes in the group which are + /// `EMPTY`. + #[inline] + pub fn match_empty(self) -> BitMask { + self.match_byte(EMPTY) + } + + /// Returns a `BitMask` indicating all bytes in the group which are + /// `EMPTY` or `DELETED`. + #[inline] + pub fn match_empty_or_deleted(self) -> BitMask { + #[allow( + // byte: i32 as u16 + // note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the + // upper 16-bits of the i32 are zeroed: + clippy::cast_sign_loss, + clippy::cast_possible_truncation + )] + unsafe { + // A byte is EMPTY or DELETED iff the high bit is set + BitMask(x86::_mm_movemask_epi8(self.0) as u16) + } + } + + /// Returns a `BitMask` indicating all bytes in the group which are full. + #[inline] + pub fn match_full(&self) -> BitMask { + self.match_empty_or_deleted().invert() + } + + /// Performs the following transformation on all bytes in the group: + /// - `EMPTY => EMPTY` + /// - `DELETED => EMPTY` + /// - `FULL => DELETED` + #[inline] + pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self { + // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111 + // and high_bit = 0 (FULL) to 1000_0000 + // + // Here's this logic expanded to concrete values: + // let special = 0 > byte = 1111_1111 (true) or 0000_0000 (false) + // 1111_1111 | 1000_0000 = 1111_1111 + // 0000_0000 | 1000_0000 = 1000_0000 + #[allow( + clippy::cast_possible_wrap, // byte: 0x80_u8 as i8 + )] + unsafe { + let zero = x86::_mm_setzero_si128(); + let special = x86::_mm_cmpgt_epi8(zero, self.0); + Group(x86::_mm_or_si128( + special, + x86::_mm_set1_epi8(0x80_u8 as i8), + )) + } + } +} diff --git a/src/rust/vendor/hashbrown-0.9.1/src/rustc_entry.rs b/src/rust/vendor/hashbrown-0.9.1/src/rustc_entry.rs new file mode 100644 index 000000000..b6ea7bc5c --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/src/rustc_entry.rs @@ -0,0 +1,618 @@ +use self::RustcEntry::*; +use crate::map::{make_hash, Drain, HashMap, IntoIter, Iter, IterMut}; +use crate::raw::{Bucket, RawTable}; +use core::fmt::{self, Debug}; +use core::hash::{BuildHasher, Hash}; +use core::mem; + +impl HashMap +where + K: Eq + Hash, + S: BuildHasher, +{ + /// Gets the given key's corresponding entry in the map for in-place manipulation. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut letters = HashMap::new(); + /// + /// for ch in "a short treatise on fungi".chars() { + /// let counter = letters.rustc_entry(ch).or_insert(0); + /// *counter += 1; + /// } + /// + /// assert_eq!(letters[&'s'], 2); + /// assert_eq!(letters[&'t'], 3); + /// assert_eq!(letters[&'u'], 1); + /// assert_eq!(letters.get(&'y'), None); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn rustc_entry(&mut self, key: K) -> RustcEntry<'_, K, V> { + let hash = make_hash(&self.hash_builder, &key); + if let Some(elem) = self.table.find(hash, |q| q.0.eq(&key)) { + RustcEntry::Occupied(RustcOccupiedEntry { + key: Some(key), + elem, + table: &mut self.table, + }) + } else { + // Ideally we would put this in VacantEntry::insert, but Entry is not + // generic over the BuildHasher and adding a generic parameter would be + // a breaking change. + self.reserve(1); + + RustcEntry::Vacant(RustcVacantEntry { + hash, + key, + table: &mut self.table, + }) + } + } +} + +/// A view into a single entry in a map, which may either be vacant or occupied. +/// +/// This `enum` is constructed from the [`entry`] method on [`HashMap`]. +/// +/// [`HashMap`]: struct.HashMap.html +/// [`entry`]: struct.HashMap.html#method.rustc_entry +pub enum RustcEntry<'a, K, V> { + /// An occupied entry. + Occupied(RustcOccupiedEntry<'a, K, V>), + + /// A vacant entry. + Vacant(RustcVacantEntry<'a, K, V>), +} + +impl Debug for RustcEntry<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), + Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(), + } + } +} + +/// A view into an occupied entry in a `HashMap`. +/// It is part of the [`RustcEntry`] enum. +/// +/// [`RustcEntry`]: enum.RustcEntry.html +pub struct RustcOccupiedEntry<'a, K, V> { + key: Option, + elem: Bucket<(K, V)>, + table: &'a mut RawTable<(K, V)>, +} + +unsafe impl Send for RustcOccupiedEntry<'_, K, V> +where + K: Send, + V: Send, +{ +} +unsafe impl Sync for RustcOccupiedEntry<'_, K, V> +where + K: Sync, + V: Sync, +{ +} + +impl Debug for RustcOccupiedEntry<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OccupiedEntry") + .field("key", self.key()) + .field("value", self.get()) + .finish() + } +} + +/// A view into a vacant entry in a `HashMap`. +/// It is part of the [`RustcEntry`] enum. +/// +/// [`RustcEntry`]: enum.RustcEntry.html +pub struct RustcVacantEntry<'a, K, V> { + hash: u64, + key: K, + table: &'a mut RawTable<(K, V)>, +} + +impl Debug for RustcVacantEntry<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("VacantEntry").field(self.key()).finish() + } +} + +impl<'a, K, V> RustcEntry<'a, K, V> { + /// Sets the value of the entry, and returns a RustcOccupiedEntry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// let entry = map.entry("horseyland").insert(37); + /// + /// assert_eq!(entry.key(), &"horseyland"); + /// ``` + pub fn insert(self, value: V) -> RustcOccupiedEntry<'a, K, V> { + match self { + Vacant(entry) => entry.insert_entry(value), + Occupied(mut entry) => { + entry.insert(value); + entry + } + } + } + + /// Ensures a value is in the entry by inserting the default if empty, and returns + /// a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// map.rustc_entry("poneyland").or_insert(3); + /// assert_eq!(map["poneyland"], 3); + /// + /// *map.rustc_entry("poneyland").or_insert(10) *= 2; + /// assert_eq!(map["poneyland"], 6); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert(self, default: V) -> &'a mut V + where + K: Hash, + { + match self { + Occupied(entry) => entry.into_mut(), + Vacant(entry) => entry.insert(default), + } + } + + /// Ensures a value is in the entry by inserting the result of the default function if empty, + /// and returns a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, String> = HashMap::new(); + /// let s = "hoho".to_string(); + /// + /// map.rustc_entry("poneyland").or_insert_with(|| s); + /// + /// assert_eq!(map["poneyland"], "hoho".to_string()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert_with V>(self, default: F) -> &'a mut V + where + K: Hash, + { + match self { + Occupied(entry) => entry.into_mut(), + Vacant(entry) => entry.insert(default()), + } + } + + /// Returns a reference to this entry's key. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + match *self { + Occupied(ref entry) => entry.key(), + Vacant(ref entry) => entry.key(), + } + } + + /// Provides in-place mutable access to an occupied entry before any + /// potential inserts into the map. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// map.rustc_entry("poneyland") + /// .and_modify(|e| { *e += 1 }) + /// .or_insert(42); + /// assert_eq!(map["poneyland"], 42); + /// + /// map.rustc_entry("poneyland") + /// .and_modify(|e| { *e += 1 }) + /// .or_insert(42); + /// assert_eq!(map["poneyland"], 43); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn and_modify(self, f: F) -> Self + where + F: FnOnce(&mut V), + { + match self { + Occupied(mut entry) => { + f(entry.get_mut()); + Occupied(entry) + } + Vacant(entry) => Vacant(entry), + } + } +} + +impl<'a, K, V: Default> RustcEntry<'a, K, V> { + /// Ensures a value is in the entry by inserting the default value if empty, + /// and returns a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// # fn main() { + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, Option> = HashMap::new(); + /// map.rustc_entry("poneyland").or_default(); + /// + /// assert_eq!(map["poneyland"], None); + /// # } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_default(self) -> &'a mut V + where + K: Hash, + { + match self { + Occupied(entry) => entry.into_mut(), + Vacant(entry) => entry.insert(Default::default()), + } + } +} + +impl<'a, K, V> RustcOccupiedEntry<'a, K, V> { + /// Gets a reference to the key in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + unsafe { &self.elem.as_ref().0 } + } + + /// Take the ownership of the key and value from the map. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// + /// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") { + /// // We delete the entry from the map. + /// o.remove_entry(); + /// } + /// + /// assert_eq!(map.contains_key("poneyland"), false); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove_entry(self) -> (K, V) { + unsafe { self.table.remove(self.elem) } + } + + /// Gets a reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// + /// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") { + /// assert_eq!(o.get(), &12); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get(&self) -> &V { + unsafe { &self.elem.as_ref().1 } + } + + /// Gets a mutable reference to the value in the entry. + /// + /// If you need a reference to the `RustcOccupiedEntry` which may outlive the + /// destruction of the `RustcEntry` value, see [`into_mut`]. + /// + /// [`into_mut`]: #method.into_mut + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// + /// assert_eq!(map["poneyland"], 12); + /// if let RustcEntry::Occupied(mut o) = map.rustc_entry("poneyland") { + /// *o.get_mut() += 10; + /// assert_eq!(*o.get(), 22); + /// + /// // We can use the same RustcEntry multiple times. + /// *o.get_mut() += 2; + /// } + /// + /// assert_eq!(map["poneyland"], 24); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_mut(&mut self) -> &mut V { + unsafe { &mut self.elem.as_mut().1 } + } + + /// Converts the RustcOccupiedEntry into a mutable reference to the value in the entry + /// with a lifetime bound to the map itself. + /// + /// If you need multiple references to the `RustcOccupiedEntry`, see [`get_mut`]. + /// + /// [`get_mut`]: #method.get_mut + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// + /// assert_eq!(map["poneyland"], 12); + /// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") { + /// *o.into_mut() += 10; + /// } + /// + /// assert_eq!(map["poneyland"], 22); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_mut(self) -> &'a mut V { + unsafe { &mut self.elem.as_mut().1 } + } + + /// Sets the value of the entry, and returns the entry's old value. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// + /// if let RustcEntry::Occupied(mut o) = map.rustc_entry("poneyland") { + /// assert_eq!(o.insert(15), 12); + /// } + /// + /// assert_eq!(map["poneyland"], 15); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(&mut self, mut value: V) -> V { + let old_value = self.get_mut(); + mem::swap(&mut value, old_value); + value + } + + /// Takes the value out of the entry, and returns it. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// + /// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") { + /// assert_eq!(o.remove(), 12); + /// } + /// + /// assert_eq!(map.contains_key("poneyland"), false); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove(self) -> V { + self.remove_entry().1 + } + + /// Replaces the entry, returning the old key and value. The new key in the hash map will be + /// the key used to create this entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{RustcEntry, HashMap}; + /// use std::rc::Rc; + /// + /// let mut map: HashMap, u32> = HashMap::new(); + /// map.insert(Rc::new("Stringthing".to_string()), 15); + /// + /// let my_key = Rc::new("Stringthing".to_string()); + /// + /// if let RustcEntry::Occupied(entry) = map.rustc_entry(my_key) { + /// // Also replace the key with a handle to our other key. + /// let (old_key, old_value): (Rc, u32) = entry.replace_entry(16); + /// } + /// + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace_entry(self, value: V) -> (K, V) { + let entry = unsafe { self.elem.as_mut() }; + + let old_key = mem::replace(&mut entry.0, self.key.unwrap()); + let old_value = mem::replace(&mut entry.1, value); + + (old_key, old_value) + } + + /// Replaces the key in the hash map with the key used to create this entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{RustcEntry, HashMap}; + /// use std::rc::Rc; + /// + /// let mut map: HashMap, u32> = HashMap::new(); + /// let mut known_strings: Vec> = Vec::new(); + /// + /// // Initialise known strings, run program, etc. + /// + /// reclaim_memory(&mut map, &known_strings); + /// + /// fn reclaim_memory(map: &mut HashMap, u32>, known_strings: &[Rc] ) { + /// for s in known_strings { + /// if let RustcEntry::Occupied(entry) = map.rustc_entry(s.clone()) { + /// // Replaces the entry's key with our version of it in `known_strings`. + /// entry.replace_key(); + /// } + /// } + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace_key(self) -> K { + let entry = unsafe { self.elem.as_mut() }; + mem::replace(&mut entry.0, self.key.unwrap()) + } +} + +impl<'a, K, V> RustcVacantEntry<'a, K, V> { + /// Gets a reference to the key that would be used when inserting a value + /// through the `RustcVacantEntry`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + &self.key + } + + /// Take ownership of the key. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// if let RustcEntry::Vacant(v) = map.rustc_entry("poneyland") { + /// v.into_key(); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_key(self) -> K { + self.key + } + + /// Sets the value of the entry with the RustcVacantEntry's key, + /// and returns a mutable reference to it. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// if let RustcEntry::Vacant(o) = map.rustc_entry("poneyland") { + /// o.insert(37); + /// } + /// assert_eq!(map["poneyland"], 37); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self, value: V) -> &'a mut V { + let bucket = self.table.insert_no_grow(self.hash, (self.key, value)); + unsafe { &mut bucket.as_mut().1 } + } + + /// Sets the value of the entry with the RustcVacantEntry's key, + /// and returns a RustcOccupiedEntry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// if let RustcEntry::Vacant(v) = map.rustc_entry("poneyland") { + /// let o = v.insert_entry(37); + /// assert_eq!(o.get(), &37); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert_entry(self, value: V) -> RustcOccupiedEntry<'a, K, V> { + let bucket = self.table.insert_no_grow(self.hash, (self.key, value)); + RustcOccupiedEntry { + key: None, + elem: bucket, + table: self.table, + } + } +} + +impl IterMut<'_, K, V> { + /// Returns a iterator of references over the remaining items. + #[cfg_attr(feature = "inline-more", inline)] + pub fn rustc_iter(&self) -> Iter<'_, K, V> { + self.iter() + } +} + +impl IntoIter { + /// Returns a iterator of references over the remaining items. + #[cfg_attr(feature = "inline-more", inline)] + pub fn rustc_iter(&self) -> Iter<'_, K, V> { + self.iter() + } +} + +impl Drain<'_, K, V> { + /// Returns a iterator of references over the remaining items. + #[cfg_attr(feature = "inline-more", inline)] + pub fn rustc_iter(&self) -> Iter<'_, K, V> { + self.iter() + } +} diff --git a/src/rust/vendor/hashbrown-0.9.1/src/scopeguard.rs b/src/rust/vendor/hashbrown-0.9.1/src/scopeguard.rs new file mode 100644 index 000000000..32c969437 --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/src/scopeguard.rs @@ -0,0 +1,49 @@ +// Extracted from the scopeguard crate +use core::ops::{Deref, DerefMut}; + +pub struct ScopeGuard +where + F: FnMut(&mut T), +{ + dropfn: F, + value: T, +} + +#[cfg_attr(feature = "inline-more", inline)] +pub fn guard(value: T, dropfn: F) -> ScopeGuard +where + F: FnMut(&mut T), +{ + ScopeGuard { dropfn, value } +} + +impl Deref for ScopeGuard +where + F: FnMut(&mut T), +{ + type Target = T; + #[cfg_attr(feature = "inline-more", inline)] + fn deref(&self) -> &T { + &self.value + } +} + +impl DerefMut for ScopeGuard +where + F: FnMut(&mut T), +{ + #[cfg_attr(feature = "inline-more", inline)] + fn deref_mut(&mut self) -> &mut T { + &mut self.value + } +} + +impl Drop for ScopeGuard +where + F: FnMut(&mut T), +{ + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + (self.dropfn)(&mut self.value) + } +} diff --git a/src/rust/vendor/hashbrown-0.9.1/src/set.rs b/src/rust/vendor/hashbrown-0.9.1/src/set.rs new file mode 100644 index 000000000..b8460fd3b --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/src/set.rs @@ -0,0 +1,2119 @@ +use crate::TryReserveError; +use alloc::borrow::ToOwned; +use core::borrow::Borrow; +use core::fmt; +use core::hash::{BuildHasher, Hash}; +use core::iter::{Chain, FromIterator, FusedIterator}; +use core::mem; +use core::ops::{BitAnd, BitOr, BitXor, Sub}; + +use super::map::{self, ConsumeAllOnDrop, DefaultHashBuilder, DrainFilterInner, HashMap, Keys}; + +// Future Optimization (FIXME!) +// ============================= +// +// Iteration over zero sized values is a noop. There is no need +// for `bucket.val` in the case of HashSet. I suppose we would need HKT +// to get rid of it properly. + +/// A hash set implemented as a `HashMap` where the value is `()`. +/// +/// As with the [`HashMap`] type, a `HashSet` requires that the elements +/// implement the [`Eq`] and [`Hash`] traits. This can frequently be achieved by +/// using `#[derive(PartialEq, Eq, Hash)]`. If you implement these yourself, +/// it is important that the following property holds: +/// +/// ```text +/// k1 == k2 -> hash(k1) == hash(k2) +/// ``` +/// +/// In other words, if two keys are equal, their hashes must be equal. +/// +/// +/// It is a logic error for an item to be modified in such a way that the +/// item's hash, as determined by the [`Hash`] trait, or its equality, as +/// determined by the [`Eq`] trait, changes while it is in the set. This is +/// normally only possible through [`Cell`], [`RefCell`], global state, I/O, or +/// unsafe code. +/// +/// It is also a logic error for the [`Hash`] implementation of a key to panic. +/// This is generally only possible if the trait is implemented manually. If a +/// panic does occur then the contents of the `HashSet` may become corrupted and +/// some items may be dropped from the table. +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashSet; +/// // Type inference lets us omit an explicit type signature (which +/// // would be `HashSet` in this example). +/// let mut books = HashSet::new(); +/// +/// // Add some books. +/// books.insert("A Dance With Dragons".to_string()); +/// books.insert("To Kill a Mockingbird".to_string()); +/// books.insert("The Odyssey".to_string()); +/// books.insert("The Great Gatsby".to_string()); +/// +/// // Check for a specific one. +/// if !books.contains("The Winds of Winter") { +/// println!("We have {} books, but The Winds of Winter ain't one.", +/// books.len()); +/// } +/// +/// // Remove a book. +/// books.remove("The Odyssey"); +/// +/// // Iterate over everything. +/// for book in &books { +/// println!("{}", book); +/// } +/// ``` +/// +/// The easiest way to use `HashSet` with a custom type is to derive +/// [`Eq`] and [`Hash`]. We must also derive [`PartialEq`], this will in the +/// future be implied by [`Eq`]. +/// +/// ``` +/// use hashbrown::HashSet; +/// #[derive(Hash, Eq, PartialEq, Debug)] +/// struct Viking { +/// name: String, +/// power: usize, +/// } +/// +/// let mut vikings = HashSet::new(); +/// +/// vikings.insert(Viking { name: "Einar".to_string(), power: 9 }); +/// vikings.insert(Viking { name: "Einar".to_string(), power: 9 }); +/// vikings.insert(Viking { name: "Olaf".to_string(), power: 4 }); +/// vikings.insert(Viking { name: "Harald".to_string(), power: 8 }); +/// +/// // Use derived implementation to print the vikings. +/// for x in &vikings { +/// println!("{:?}", x); +/// } +/// ``` +/// +/// A `HashSet` with fixed list of elements can be initialized from an array: +/// +/// ``` +/// use hashbrown::HashSet; +/// +/// let viking_names: HashSet<&'static str> = +/// [ "Einar", "Olaf", "Harald" ].iter().cloned().collect(); +/// // use the values stored in the set +/// ``` +/// +/// [`Cell`]: https://doc.rust-lang.org/std/cell/struct.Cell.html +/// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html +/// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html +/// [`HashMap`]: struct.HashMap.html +/// [`PartialEq`]: https://doc.rust-lang.org/std/cmp/trait.PartialEq.html +/// [`RefCell`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html +pub struct HashSet { + pub(crate) map: HashMap, +} + +impl Clone for HashSet { + fn clone(&self) -> Self { + HashSet { + map: self.map.clone(), + } + } + + fn clone_from(&mut self, source: &Self) { + self.map.clone_from(&source.map); + } +} + +#[cfg(feature = "ahash")] +impl HashSet { + /// Creates an empty `HashSet`. + /// + /// The hash set is initially created with a capacity of 0, so it will not allocate until it + /// is first inserted into. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let set: HashSet = HashSet::new(); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn new() -> Self { + Self { + map: HashMap::new(), + } + } + + /// Creates an empty `HashSet` with the specified capacity. + /// + /// The hash set will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash set will not allocate. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let set: HashSet = HashSet::with_capacity(10); + /// assert!(set.capacity() >= 10); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_capacity(capacity: usize) -> Self { + Self { + map: HashMap::with_capacity(capacity), + } + } +} + +impl HashSet { + /// Creates a new empty hash set which will use the given hasher to hash + /// keys. + /// + /// The hash set is also created with the default initial capacity. + /// + /// Warning: `hasher` is normally randomly generated, and + /// is designed to allow `HashSet`s to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it + /// manually using this function can expose a DoS attack vector. + /// + /// The `hash_builder` passed should implement the [`BuildHasher`] trait for + /// the HashMap to be useful, see its documentation for details. + /// + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// use hashbrown::hash_map::DefaultHashBuilder; + /// + /// let s = DefaultHashBuilder::default(); + /// let mut set = HashSet::with_hasher(s); + /// set.insert(2); + /// ``` + /// + /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html + #[cfg_attr(feature = "inline-more", inline)] + pub const fn with_hasher(hasher: S) -> Self { + Self { + map: HashMap::with_hasher(hasher), + } + } + + /// Creates an empty `HashSet` with the specified capacity, using + /// `hasher` to hash the keys. + /// + /// The hash set will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash set will not allocate. + /// + /// Warning: `hasher` is normally randomly generated, and + /// is designed to allow `HashSet`s to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it + /// manually using this function can expose a DoS attack vector. + /// + /// The `hash_builder` passed should implement the [`BuildHasher`] trait for + /// the HashMap to be useful, see its documentation for details. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// use hashbrown::hash_map::DefaultHashBuilder; + /// + /// let s = DefaultHashBuilder::default(); + /// let mut set = HashSet::with_capacity_and_hasher(10, s); + /// set.insert(1); + /// ``` + /// + /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> Self { + Self { + map: HashMap::with_capacity_and_hasher(capacity, hasher), + } + } + + /// Returns the number of elements the set can hold without reallocating. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let set: HashSet = HashSet::with_capacity(100); + /// assert!(set.capacity() >= 100); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn capacity(&self) -> usize { + self.map.capacity() + } + + /// An iterator visiting all elements in arbitrary order. + /// The iterator element type is `&'a T`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let mut set = HashSet::new(); + /// set.insert("a"); + /// set.insert("b"); + /// + /// // Will print in an arbitrary order. + /// for x in set.iter() { + /// println!("{}", x); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn iter(&self) -> Iter<'_, T> { + Iter { + iter: self.map.keys(), + } + } + + /// Returns the number of elements in the set. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut v = HashSet::new(); + /// assert_eq!(v.len(), 0); + /// v.insert(1); + /// assert_eq!(v.len(), 1); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn len(&self) -> usize { + self.map.len() + } + + /// Returns `true` if the set contains no elements. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut v = HashSet::new(); + /// assert!(v.is_empty()); + /// v.insert(1); + /// assert!(!v.is_empty()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn is_empty(&self) -> bool { + self.map.is_empty() + } + + /// Clears the set, returning all elements in an iterator. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// assert!(!set.is_empty()); + /// + /// // print 1, 2, 3 in an arbitrary order + /// for i in set.drain() { + /// println!("{}", i); + /// } + /// + /// assert!(set.is_empty()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn drain(&mut self) -> Drain<'_, T> { + Drain { + iter: self.map.drain(), + } + } + + /// Retains only the elements specified by the predicate. + /// + /// In other words, remove all elements `e` such that `f(&e)` returns `false`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let xs = [1,2,3,4,5,6]; + /// let mut set: HashSet = xs.iter().cloned().collect(); + /// set.retain(|&k| k % 2 == 0); + /// assert_eq!(set.len(), 3); + /// ``` + pub fn retain(&mut self, mut f: F) + where + F: FnMut(&T) -> bool, + { + self.map.retain(|k, _| f(k)); + } + + /// Drains elements which are true under the given predicate, + /// and returns an iterator over the removed items. + /// + /// In other words, move all elements `e` such that `f(&e)` returns `true` out + /// into another iterator. + /// + /// When the returned DrainedFilter is dropped, any remaining elements that satisfy + /// the predicate are dropped from the set. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet = (0..8).collect(); + /// let drained: HashSet = set.drain_filter(|v| v % 2 == 0).collect(); + /// + /// let mut evens = drained.into_iter().collect::>(); + /// let mut odds = set.into_iter().collect::>(); + /// evens.sort(); + /// odds.sort(); + /// + /// assert_eq!(evens, vec![0, 2, 4, 6]); + /// assert_eq!(odds, vec![1, 3, 5, 7]); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn drain_filter(&mut self, f: F) -> DrainFilter<'_, T, F> + where + F: FnMut(&T) -> bool, + { + DrainFilter { + f, + inner: DrainFilterInner { + iter: unsafe { self.map.table.iter() }, + table: &mut self.map.table, + }, + } + } + + /// Clears the set, removing all values. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut v = HashSet::new(); + /// v.insert(1); + /// v.clear(); + /// assert!(v.is_empty()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn clear(&mut self) { + self.map.clear() + } + + /// Returns a reference to the set's [`BuildHasher`]. + /// + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// use hashbrown::hash_map::DefaultHashBuilder; + /// + /// let hasher = DefaultHashBuilder::default(); + /// let set: HashSet = HashSet::with_hasher(hasher); + /// let hasher: &DefaultHashBuilder = set.hasher(); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn hasher(&self) -> &S { + self.map.hasher() + } +} + +impl HashSet +where + T: Eq + Hash, + S: BuildHasher, +{ + /// Reserves capacity for at least `additional` more elements to be inserted + /// in the `HashSet`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// # Panics + /// + /// Panics if the new allocation size overflows `usize`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let mut set: HashSet = HashSet::new(); + /// set.reserve(10); + /// assert!(set.capacity() >= 10); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn reserve(&mut self, additional: usize) { + self.map.reserve(additional) + } + + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `HashSet`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let mut set: HashSet = HashSet::new(); + /// set.try_reserve(10).expect("why is the test harness OOMing on 10 bytes?"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { + self.map.try_reserve(additional) + } + + /// Shrinks the capacity of the set as much as possible. It will drop + /// down as much as possible while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set = HashSet::with_capacity(100); + /// set.insert(1); + /// set.insert(2); + /// assert!(set.capacity() >= 100); + /// set.shrink_to_fit(); + /// assert!(set.capacity() >= 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn shrink_to_fit(&mut self) { + self.map.shrink_to_fit() + } + + /// Shrinks the capacity of the set with a lower limit. It will drop + /// down no lower than the supplied limit while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// Panics if the current capacity is smaller than the supplied + /// minimum capacity. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set = HashSet::with_capacity(100); + /// set.insert(1); + /// set.insert(2); + /// assert!(set.capacity() >= 100); + /// set.shrink_to(10); + /// assert!(set.capacity() >= 10); + /// set.shrink_to(0); + /// assert!(set.capacity() >= 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn shrink_to(&mut self, min_capacity: usize) { + self.map.shrink_to(min_capacity) + } + + /// Visits the values representing the difference, + /// i.e., the values that are in `self` but not in `other`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// let b: HashSet<_> = [4, 2, 3, 4].iter().cloned().collect(); + /// + /// // Can be seen as `a - b`. + /// for x in a.difference(&b) { + /// println!("{}", x); // Print 1 + /// } + /// + /// let diff: HashSet<_> = a.difference(&b).collect(); + /// assert_eq!(diff, [1].iter().collect()); + /// + /// // Note that difference is not symmetric, + /// // and `b - a` means something else: + /// let diff: HashSet<_> = b.difference(&a).collect(); + /// assert_eq!(diff, [4].iter().collect()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn difference<'a>(&'a self, other: &'a Self) -> Difference<'a, T, S> { + Difference { + iter: self.iter(), + other, + } + } + + /// Visits the values representing the symmetric difference, + /// i.e., the values that are in `self` or in `other` but not in both. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// let b: HashSet<_> = [4, 2, 3, 4].iter().cloned().collect(); + /// + /// // Print 1, 4 in arbitrary order. + /// for x in a.symmetric_difference(&b) { + /// println!("{}", x); + /// } + /// + /// let diff1: HashSet<_> = a.symmetric_difference(&b).collect(); + /// let diff2: HashSet<_> = b.symmetric_difference(&a).collect(); + /// + /// assert_eq!(diff1, diff2); + /// assert_eq!(diff1, [1, 4].iter().collect()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn symmetric_difference<'a>(&'a self, other: &'a Self) -> SymmetricDifference<'a, T, S> { + SymmetricDifference { + iter: self.difference(other).chain(other.difference(self)), + } + } + + /// Visits the values representing the intersection, + /// i.e., the values that are both in `self` and `other`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// let b: HashSet<_> = [4, 2, 3, 4].iter().cloned().collect(); + /// + /// // Print 2, 3 in arbitrary order. + /// for x in a.intersection(&b) { + /// println!("{}", x); + /// } + /// + /// let intersection: HashSet<_> = a.intersection(&b).collect(); + /// assert_eq!(intersection, [2, 3].iter().collect()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn intersection<'a>(&'a self, other: &'a Self) -> Intersection<'a, T, S> { + let (smaller, larger) = if self.len() <= other.len() { + (self, other) + } else { + (other, self) + }; + Intersection { + iter: smaller.iter(), + other: larger, + } + } + + /// Visits the values representing the union, + /// i.e., all the values in `self` or `other`, without duplicates. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// let b: HashSet<_> = [4, 2, 3, 4].iter().cloned().collect(); + /// + /// // Print 1, 2, 3, 4 in arbitrary order. + /// for x in a.union(&b) { + /// println!("{}", x); + /// } + /// + /// let union: HashSet<_> = a.union(&b).collect(); + /// assert_eq!(union, [1, 2, 3, 4].iter().collect()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn union<'a>(&'a self, other: &'a Self) -> Union<'a, T, S> { + let (smaller, larger) = if self.len() >= other.len() { + (self, other) + } else { + (other, self) + }; + Union { + iter: larger.iter().chain(smaller.difference(larger)), + } + } + + /// Returns `true` if the set contains a value. + /// + /// The value may be any borrowed form of the set's value type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the value type. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let set: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// assert_eq!(set.contains(&1), true); + /// assert_eq!(set.contains(&4), false); + /// ``` + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + #[cfg_attr(feature = "inline-more", inline)] + pub fn contains(&self, value: &Q) -> bool + where + T: Borrow, + Q: Hash + Eq, + { + self.map.contains_key(value) + } + + /// Returns a reference to the value in the set, if any, that is equal to the given value. + /// + /// The value may be any borrowed form of the set's value type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the value type. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let set: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// assert_eq!(set.get(&2), Some(&2)); + /// assert_eq!(set.get(&4), None); + /// ``` + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + #[cfg_attr(feature = "inline-more", inline)] + pub fn get(&self, value: &Q) -> Option<&T> + where + T: Borrow, + Q: Hash + Eq, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.map.get_key_value(value) { + Some((k, _)) => Some(k), + None => None, + } + } + + /// Inserts the given `value` into the set if it is not present, then + /// returns a reference to the value in the set. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// assert_eq!(set.len(), 3); + /// assert_eq!(set.get_or_insert(2), &2); + /// assert_eq!(set.get_or_insert(100), &100); + /// assert_eq!(set.len(), 4); // 100 was inserted + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_or_insert(&mut self, value: T) -> &T { + // Although the raw entry gives us `&mut T`, we only return `&T` to be consistent with + // `get`. Key mutation is "raw" because you're not supposed to affect `Eq` or `Hash`. + self.map + .raw_entry_mut() + .from_key(&value) + .or_insert(value, ()) + .0 + } + + /// Inserts an owned copy of the given `value` into the set if it is not + /// present, then returns a reference to the value in the set. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet = ["cat", "dog", "horse"] + /// .iter().map(|&pet| pet.to_owned()).collect(); + /// + /// assert_eq!(set.len(), 3); + /// for &pet in &["cat", "dog", "fish"] { + /// let value = set.get_or_insert_owned(pet); + /// assert_eq!(value, pet); + /// } + /// assert_eq!(set.len(), 4); // a new "fish" was inserted + /// ``` + #[inline] + pub fn get_or_insert_owned(&mut self, value: &Q) -> &T + where + T: Borrow, + Q: Hash + Eq + ToOwned, + { + // Although the raw entry gives us `&mut T`, we only return `&T` to be consistent with + // `get`. Key mutation is "raw" because you're not supposed to affect `Eq` or `Hash`. + self.map + .raw_entry_mut() + .from_key(value) + .or_insert_with(|| (value.to_owned(), ())) + .0 + } + + /// Inserts a value computed from `f` into the set if the given `value` is + /// not present, then returns a reference to the value in the set. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet = ["cat", "dog", "horse"] + /// .iter().map(|&pet| pet.to_owned()).collect(); + /// + /// assert_eq!(set.len(), 3); + /// for &pet in &["cat", "dog", "fish"] { + /// let value = set.get_or_insert_with(pet, str::to_owned); + /// assert_eq!(value, pet); + /// } + /// assert_eq!(set.len(), 4); // a new "fish" was inserted + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_or_insert_with(&mut self, value: &Q, f: F) -> &T + where + T: Borrow, + Q: Hash + Eq, + F: FnOnce(&Q) -> T, + { + // Although the raw entry gives us `&mut T`, we only return `&T` to be consistent with + // `get`. Key mutation is "raw" because you're not supposed to affect `Eq` or `Hash`. + self.map + .raw_entry_mut() + .from_key(value) + .or_insert_with(|| (f(value), ())) + .0 + } + + /// Returns `true` if `self` has no elements in common with `other`. + /// This is equivalent to checking for an empty intersection. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// let mut b = HashSet::new(); + /// + /// assert_eq!(a.is_disjoint(&b), true); + /// b.insert(4); + /// assert_eq!(a.is_disjoint(&b), true); + /// b.insert(1); + /// assert_eq!(a.is_disjoint(&b), false); + /// ``` + pub fn is_disjoint(&self, other: &Self) -> bool { + self.iter().all(|v| !other.contains(v)) + } + + /// Returns `true` if the set is a subset of another, + /// i.e., `other` contains at least all the values in `self`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let sup: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// let mut set = HashSet::new(); + /// + /// assert_eq!(set.is_subset(&sup), true); + /// set.insert(2); + /// assert_eq!(set.is_subset(&sup), true); + /// set.insert(4); + /// assert_eq!(set.is_subset(&sup), false); + /// ``` + pub fn is_subset(&self, other: &Self) -> bool { + self.len() <= other.len() && self.iter().all(|v| other.contains(v)) + } + + /// Returns `true` if the set is a superset of another, + /// i.e., `self` contains at least all the values in `other`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let sub: HashSet<_> = [1, 2].iter().cloned().collect(); + /// let mut set = HashSet::new(); + /// + /// assert_eq!(set.is_superset(&sub), false); + /// + /// set.insert(0); + /// set.insert(1); + /// assert_eq!(set.is_superset(&sub), false); + /// + /// set.insert(2); + /// assert_eq!(set.is_superset(&sub), true); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn is_superset(&self, other: &Self) -> bool { + other.is_subset(self) + } + + /// Adds a value to the set. + /// + /// If the set did not have this value present, `true` is returned. + /// + /// If the set did have this value present, `false` is returned. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set = HashSet::new(); + /// + /// assert_eq!(set.insert(2), true); + /// assert_eq!(set.insert(2), false); + /// assert_eq!(set.len(), 1); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(&mut self, value: T) -> bool { + self.map.insert(value, ()).is_none() + } + + /// Adds a value to the set, replacing the existing value, if any, that is equal to the given + /// one. Returns the replaced value. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set = HashSet::new(); + /// set.insert(Vec::::new()); + /// + /// assert_eq!(set.get(&[][..]).unwrap().capacity(), 0); + /// set.replace(Vec::with_capacity(10)); + /// assert_eq!(set.get(&[][..]).unwrap().capacity(), 10); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace(&mut self, value: T) -> Option { + match self.map.entry(value) { + map::Entry::Occupied(occupied) => Some(occupied.replace_key()), + map::Entry::Vacant(vacant) => { + vacant.insert(()); + None + } + } + } + + /// Removes a value from the set. Returns whether the value was + /// present in the set. + /// + /// The value may be any borrowed form of the set's value type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the value type. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set = HashSet::new(); + /// + /// set.insert(2); + /// assert_eq!(set.remove(&2), true); + /// assert_eq!(set.remove(&2), false); + /// ``` + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove(&mut self, value: &Q) -> bool + where + T: Borrow, + Q: Hash + Eq, + { + self.map.remove(value).is_some() + } + + /// Removes and returns the value in the set, if any, that is equal to the given one. + /// + /// The value may be any borrowed form of the set's value type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the value type. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// assert_eq!(set.take(&2), Some(2)); + /// assert_eq!(set.take(&2), None); + /// ``` + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + #[cfg_attr(feature = "inline-more", inline)] + pub fn take(&mut self, value: &Q) -> Option + where + T: Borrow, + Q: Hash + Eq, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.map.remove_entry(value) { + Some((k, _)) => Some(k), + None => None, + } + } +} + +impl PartialEq for HashSet +where + T: Eq + Hash, + S: BuildHasher, +{ + fn eq(&self, other: &Self) -> bool { + if self.len() != other.len() { + return false; + } + + self.iter().all(|key| other.contains(key)) + } +} + +impl Eq for HashSet +where + T: Eq + Hash, + S: BuildHasher, +{ +} + +impl fmt::Debug for HashSet +where + T: Eq + Hash + fmt::Debug, + S: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_set().entries(self.iter()).finish() + } +} + +impl FromIterator for HashSet +where + T: Eq + Hash, + S: BuildHasher + Default, +{ + #[cfg_attr(feature = "inline-more", inline)] + fn from_iter>(iter: I) -> Self { + let mut set = Self::with_hasher(Default::default()); + set.extend(iter); + set + } +} + +impl Extend for HashSet +where + T: Eq + Hash, + S: BuildHasher, +{ + #[cfg_attr(feature = "inline-more", inline)] + fn extend>(&mut self, iter: I) { + self.map.extend(iter.into_iter().map(|k| (k, ()))); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_one(&mut self, k: T) { + self.map.insert(k, ()); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_reserve(&mut self, additional: usize) { + Extend::<(T, ())>::extend_reserve(&mut self.map, additional); + } +} + +impl<'a, T, S> Extend<&'a T> for HashSet +where + T: 'a + Eq + Hash + Copy, + S: BuildHasher, +{ + #[cfg_attr(feature = "inline-more", inline)] + fn extend>(&mut self, iter: I) { + self.extend(iter.into_iter().cloned()); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_one(&mut self, k: &'a T) { + self.map.insert(*k, ()); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_reserve(&mut self, additional: usize) { + Extend::<(T, ())>::extend_reserve(&mut self.map, additional); + } +} + +impl Default for HashSet +where + S: Default, +{ + /// Creates an empty `HashSet` with the `Default` value for the hasher. + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + Self { + map: HashMap::default(), + } + } +} + +impl BitOr<&HashSet> for &HashSet +where + T: Eq + Hash + Clone, + S: BuildHasher + Default, +{ + type Output = HashSet; + + /// Returns the union of `self` and `rhs` as a new `HashSet`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = vec![3, 4, 5].into_iter().collect(); + /// + /// let set = &a | &b; + /// + /// let mut i = 0; + /// let expected = [1, 2, 3, 4, 5]; + /// for x in &set { + /// assert!(expected.contains(x)); + /// i += 1; + /// } + /// assert_eq!(i, expected.len()); + /// ``` + fn bitor(self, rhs: &HashSet) -> HashSet { + self.union(rhs).cloned().collect() + } +} + +impl BitAnd<&HashSet> for &HashSet +where + T: Eq + Hash + Clone, + S: BuildHasher + Default, +{ + type Output = HashSet; + + /// Returns the intersection of `self` and `rhs` as a new `HashSet`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = vec![2, 3, 4].into_iter().collect(); + /// + /// let set = &a & &b; + /// + /// let mut i = 0; + /// let expected = [2, 3]; + /// for x in &set { + /// assert!(expected.contains(x)); + /// i += 1; + /// } + /// assert_eq!(i, expected.len()); + /// ``` + fn bitand(self, rhs: &HashSet) -> HashSet { + self.intersection(rhs).cloned().collect() + } +} + +impl BitXor<&HashSet> for &HashSet +where + T: Eq + Hash + Clone, + S: BuildHasher + Default, +{ + type Output = HashSet; + + /// Returns the symmetric difference of `self` and `rhs` as a new `HashSet`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = vec![3, 4, 5].into_iter().collect(); + /// + /// let set = &a ^ &b; + /// + /// let mut i = 0; + /// let expected = [1, 2, 4, 5]; + /// for x in &set { + /// assert!(expected.contains(x)); + /// i += 1; + /// } + /// assert_eq!(i, expected.len()); + /// ``` + fn bitxor(self, rhs: &HashSet) -> HashSet { + self.symmetric_difference(rhs).cloned().collect() + } +} + +impl Sub<&HashSet> for &HashSet +where + T: Eq + Hash + Clone, + S: BuildHasher + Default, +{ + type Output = HashSet; + + /// Returns the difference of `self` and `rhs` as a new `HashSet`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = vec![3, 4, 5].into_iter().collect(); + /// + /// let set = &a - &b; + /// + /// let mut i = 0; + /// let expected = [1, 2]; + /// for x in &set { + /// assert!(expected.contains(x)); + /// i += 1; + /// } + /// assert_eq!(i, expected.len()); + /// ``` + fn sub(self, rhs: &HashSet) -> HashSet { + self.difference(rhs).cloned().collect() + } +} + +/// An iterator over the items of a `HashSet`. +/// +/// This `struct` is created by the [`iter`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`iter`]: struct.HashSet.html#method.iter +pub struct Iter<'a, K> { + iter: Keys<'a, K, ()>, +} + +/// An owning iterator over the items of a `HashSet`. +/// +/// This `struct` is created by the [`into_iter`] method on [`HashSet`] +/// (provided by the `IntoIterator` trait). See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`into_iter`]: struct.HashSet.html#method.into_iter +pub struct IntoIter { + iter: map::IntoIter, +} + +/// A draining iterator over the items of a `HashSet`. +/// +/// This `struct` is created by the [`drain`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`drain`]: struct.HashSet.html#method.drain +pub struct Drain<'a, K> { + iter: map::Drain<'a, K, ()>, +} + +/// A draining iterator over entries of a `HashSet` which don't satisfy the predicate `f`. +/// +/// This `struct` is created by the [`drain_filter`] method on [`HashSet`]. See its +/// documentation for more. +/// +/// [`drain_filter`]: struct.HashSet.html#method.drain_filter +/// [`HashSet`]: struct.HashSet.html +pub struct DrainFilter<'a, K, F> +where + F: FnMut(&K) -> bool, +{ + f: F, + inner: DrainFilterInner<'a, K, ()>, +} + +/// A lazy iterator producing elements in the intersection of `HashSet`s. +/// +/// This `struct` is created by the [`intersection`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`intersection`]: struct.HashSet.html#method.intersection +pub struct Intersection<'a, T, S> { + // iterator of the first set + iter: Iter<'a, T>, + // the second set + other: &'a HashSet, +} + +/// A lazy iterator producing elements in the difference of `HashSet`s. +/// +/// This `struct` is created by the [`difference`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`difference`]: struct.HashSet.html#method.difference +pub struct Difference<'a, T, S> { + // iterator of the first set + iter: Iter<'a, T>, + // the second set + other: &'a HashSet, +} + +/// A lazy iterator producing elements in the symmetric difference of `HashSet`s. +/// +/// This `struct` is created by the [`symmetric_difference`] method on +/// [`HashSet`]. See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`symmetric_difference`]: struct.HashSet.html#method.symmetric_difference +pub struct SymmetricDifference<'a, T, S> { + iter: Chain, Difference<'a, T, S>>, +} + +/// A lazy iterator producing elements in the union of `HashSet`s. +/// +/// This `struct` is created by the [`union`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`union`]: struct.HashSet.html#method.union +pub struct Union<'a, T, S> { + iter: Chain, Difference<'a, T, S>>, +} + +impl<'a, T, S> IntoIterator for &'a HashSet { + type Item = &'a T; + type IntoIter = Iter<'a, T>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_iter(self) -> Iter<'a, T> { + self.iter() + } +} + +impl IntoIterator for HashSet { + type Item = T; + type IntoIter = IntoIter; + + /// Creates a consuming iterator, that is, one that moves each value out + /// of the set in arbitrary order. The set cannot be used after calling + /// this. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let mut set = HashSet::new(); + /// set.insert("a".to_string()); + /// set.insert("b".to_string()); + /// + /// // Not possible to collect to a Vec with a regular `.iter()`. + /// let v: Vec = set.into_iter().collect(); + /// + /// // Will print in an arbitrary order. + /// for x in &v { + /// println!("{}", x); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + fn into_iter(self) -> IntoIter { + IntoIter { + iter: self.map.into_iter(), + } + } +} + +impl Clone for Iter<'_, K> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Iter { + iter: self.iter.clone(), + } + } +} +impl<'a, K> Iterator for Iter<'a, K> { + type Item = &'a K; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a K> { + self.iter.next() + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} +impl<'a, K> ExactSizeIterator for Iter<'a, K> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.iter.len() + } +} +impl FusedIterator for Iter<'_, K> {} + +impl fmt::Debug for Iter<'_, K> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +impl Iterator for IntoIter { + type Item = K; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option { + // Avoid `Option::map` because it bloats LLVM IR. + match self.iter.next() { + Some((k, _)) => Some(k), + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} +impl ExactSizeIterator for IntoIter { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.iter.len() + } +} +impl FusedIterator for IntoIter {} + +impl fmt::Debug for IntoIter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let entries_iter = self.iter.iter().map(|(k, _)| k); + f.debug_list().entries(entries_iter).finish() + } +} + +impl Iterator for Drain<'_, K> { + type Item = K; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option { + // Avoid `Option::map` because it bloats LLVM IR. + match self.iter.next() { + Some((k, _)) => Some(k), + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} +impl ExactSizeIterator for Drain<'_, K> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.iter.len() + } +} +impl FusedIterator for Drain<'_, K> {} + +impl fmt::Debug for Drain<'_, K> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let entries_iter = self.iter.iter().map(|(k, _)| k); + f.debug_list().entries(entries_iter).finish() + } +} + +impl<'a, K, F> Drop for DrainFilter<'a, K, F> +where + F: FnMut(&K) -> bool, +{ + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + while let Some(item) = self.next() { + let guard = ConsumeAllOnDrop(self); + drop(item); + mem::forget(guard); + } + } +} + +impl Iterator for DrainFilter<'_, K, F> +where + F: FnMut(&K) -> bool, +{ + type Item = K; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option { + let f = &mut self.f; + let (k, _) = self.inner.next(&mut |k, _| f(k))?; + Some(k) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + (0, self.inner.iter.size_hint().1) + } +} + +impl FusedIterator for DrainFilter<'_, K, F> where F: FnMut(&K) -> bool {} + +impl Clone for Intersection<'_, T, S> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Intersection { + iter: self.iter.clone(), + ..*self + } + } +} + +impl<'a, T, S> Iterator for Intersection<'a, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ + type Item = &'a T; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a T> { + loop { + let elt = self.iter.next()?; + if self.other.contains(elt) { + return Some(elt); + } + } + } + + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) + } +} + +impl fmt::Debug for Intersection<'_, T, S> +where + T: fmt::Debug + Eq + Hash, + S: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +impl FusedIterator for Intersection<'_, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ +} + +impl Clone for Difference<'_, T, S> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Difference { + iter: self.iter.clone(), + ..*self + } + } +} + +impl<'a, T, S> Iterator for Difference<'a, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ + type Item = &'a T; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a T> { + loop { + let elt = self.iter.next()?; + if !self.other.contains(elt) { + return Some(elt); + } + } + } + + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) + } +} + +impl FusedIterator for Difference<'_, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ +} + +impl fmt::Debug for Difference<'_, T, S> +where + T: fmt::Debug + Eq + Hash, + S: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +impl Clone for SymmetricDifference<'_, T, S> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + SymmetricDifference { + iter: self.iter.clone(), + } + } +} + +impl<'a, T, S> Iterator for SymmetricDifference<'a, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ + type Item = &'a T; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a T> { + self.iter.next() + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl FusedIterator for SymmetricDifference<'_, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ +} + +impl fmt::Debug for SymmetricDifference<'_, T, S> +where + T: fmt::Debug + Eq + Hash, + S: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +impl Clone for Union<'_, T, S> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Union { + iter: self.iter.clone(), + } + } +} + +impl FusedIterator for Union<'_, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ +} + +impl fmt::Debug for Union<'_, T, S> +where + T: fmt::Debug + Eq + Hash, + S: BuildHasher, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +impl<'a, T, S> Iterator for Union<'a, T, S> +where + T: Eq + Hash, + S: BuildHasher, +{ + type Item = &'a T; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a T> { + self.iter.next() + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +#[allow(dead_code)] +fn assert_covariance() { + fn set<'new>(v: HashSet<&'static str>) -> HashSet<&'new str> { + v + } + fn iter<'a, 'new>(v: Iter<'a, &'static str>) -> Iter<'a, &'new str> { + v + } + fn into_iter<'new>(v: IntoIter<&'static str>) -> IntoIter<&'new str> { + v + } + fn difference<'a, 'new>( + v: Difference<'a, &'static str, DefaultHashBuilder>, + ) -> Difference<'a, &'new str, DefaultHashBuilder> { + v + } + fn symmetric_difference<'a, 'new>( + v: SymmetricDifference<'a, &'static str, DefaultHashBuilder>, + ) -> SymmetricDifference<'a, &'new str, DefaultHashBuilder> { + v + } + fn intersection<'a, 'new>( + v: Intersection<'a, &'static str, DefaultHashBuilder>, + ) -> Intersection<'a, &'new str, DefaultHashBuilder> { + v + } + fn union<'a, 'new>( + v: Union<'a, &'static str, DefaultHashBuilder>, + ) -> Union<'a, &'new str, DefaultHashBuilder> { + v + } + fn drain<'new>(d: Drain<'static, &'static str>) -> Drain<'new, &'new str> { + d + } +} + +#[cfg(test)] +mod test_set { + use super::super::map::DefaultHashBuilder; + use super::HashSet; + use std::vec::Vec; + + #[test] + fn test_zero_capacities() { + type HS = HashSet; + + let s = HS::new(); + assert_eq!(s.capacity(), 0); + + let s = HS::default(); + assert_eq!(s.capacity(), 0); + + let s = HS::with_hasher(DefaultHashBuilder::default()); + assert_eq!(s.capacity(), 0); + + let s = HS::with_capacity(0); + assert_eq!(s.capacity(), 0); + + let s = HS::with_capacity_and_hasher(0, DefaultHashBuilder::default()); + assert_eq!(s.capacity(), 0); + + let mut s = HS::new(); + s.insert(1); + s.insert(2); + s.remove(&1); + s.remove(&2); + s.shrink_to_fit(); + assert_eq!(s.capacity(), 0); + + let mut s = HS::new(); + s.reserve(0); + assert_eq!(s.capacity(), 0); + } + + #[test] + fn test_disjoint() { + let mut xs = HashSet::new(); + let mut ys = HashSet::new(); + assert!(xs.is_disjoint(&ys)); + assert!(ys.is_disjoint(&xs)); + assert!(xs.insert(5)); + assert!(ys.insert(11)); + assert!(xs.is_disjoint(&ys)); + assert!(ys.is_disjoint(&xs)); + assert!(xs.insert(7)); + assert!(xs.insert(19)); + assert!(xs.insert(4)); + assert!(ys.insert(2)); + assert!(ys.insert(-11)); + assert!(xs.is_disjoint(&ys)); + assert!(ys.is_disjoint(&xs)); + assert!(ys.insert(7)); + assert!(!xs.is_disjoint(&ys)); + assert!(!ys.is_disjoint(&xs)); + } + + #[test] + fn test_subset_and_superset() { + let mut a = HashSet::new(); + assert!(a.insert(0)); + assert!(a.insert(5)); + assert!(a.insert(11)); + assert!(a.insert(7)); + + let mut b = HashSet::new(); + assert!(b.insert(0)); + assert!(b.insert(7)); + assert!(b.insert(19)); + assert!(b.insert(250)); + assert!(b.insert(11)); + assert!(b.insert(200)); + + assert!(!a.is_subset(&b)); + assert!(!a.is_superset(&b)); + assert!(!b.is_subset(&a)); + assert!(!b.is_superset(&a)); + + assert!(b.insert(5)); + + assert!(a.is_subset(&b)); + assert!(!a.is_superset(&b)); + assert!(!b.is_subset(&a)); + assert!(b.is_superset(&a)); + } + + #[test] + fn test_iterate() { + let mut a = HashSet::new(); + for i in 0..32 { + assert!(a.insert(i)); + } + let mut observed: u32 = 0; + for k in &a { + observed |= 1 << *k; + } + assert_eq!(observed, 0xFFFF_FFFF); + } + + #[test] + fn test_intersection() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(11)); + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(77)); + assert!(a.insert(103)); + assert!(a.insert(5)); + assert!(a.insert(-5)); + + assert!(b.insert(2)); + assert!(b.insert(11)); + assert!(b.insert(77)); + assert!(b.insert(-9)); + assert!(b.insert(-42)); + assert!(b.insert(5)); + assert!(b.insert(3)); + + let mut i = 0; + let expected = [3, 5, 11, 77]; + for x in a.intersection(&b) { + assert!(expected.contains(x)); + i += 1 + } + assert_eq!(i, expected.len()); + } + + #[test] + fn test_difference() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(5)); + assert!(a.insert(9)); + assert!(a.insert(11)); + + assert!(b.insert(3)); + assert!(b.insert(9)); + + let mut i = 0; + let expected = [1, 5, 11]; + for x in a.difference(&b) { + assert!(expected.contains(x)); + i += 1 + } + assert_eq!(i, expected.len()); + } + + #[test] + fn test_symmetric_difference() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(5)); + assert!(a.insert(9)); + assert!(a.insert(11)); + + assert!(b.insert(-2)); + assert!(b.insert(3)); + assert!(b.insert(9)); + assert!(b.insert(14)); + assert!(b.insert(22)); + + let mut i = 0; + let expected = [-2, 1, 5, 11, 14, 22]; + for x in a.symmetric_difference(&b) { + assert!(expected.contains(x)); + i += 1 + } + assert_eq!(i, expected.len()); + } + + #[test] + fn test_union() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(5)); + assert!(a.insert(9)); + assert!(a.insert(11)); + assert!(a.insert(16)); + assert!(a.insert(19)); + assert!(a.insert(24)); + + assert!(b.insert(-2)); + assert!(b.insert(1)); + assert!(b.insert(5)); + assert!(b.insert(9)); + assert!(b.insert(13)); + assert!(b.insert(19)); + + let mut i = 0; + let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24]; + for x in a.union(&b) { + assert!(expected.contains(x)); + i += 1 + } + assert_eq!(i, expected.len()); + } + + #[test] + fn test_from_iter() { + let xs = [1, 2, 2, 3, 4, 5, 6, 7, 8, 9]; + + let set: HashSet<_> = xs.iter().cloned().collect(); + + for x in &xs { + assert!(set.contains(x)); + } + + assert_eq!(set.iter().len(), xs.len() - 1); + } + + #[test] + fn test_move_iter() { + let hs = { + let mut hs = HashSet::new(); + + hs.insert('a'); + hs.insert('b'); + + hs + }; + + let v = hs.into_iter().collect::>(); + assert!(v == ['a', 'b'] || v == ['b', 'a']); + } + + #[test] + fn test_eq() { + // These constants once happened to expose a bug in insert(). + // I'm keeping them around to prevent a regression. + let mut s1 = HashSet::new(); + + s1.insert(1); + s1.insert(2); + s1.insert(3); + + let mut s2 = HashSet::new(); + + s2.insert(1); + s2.insert(2); + + assert!(s1 != s2); + + s2.insert(3); + + assert_eq!(s1, s2); + } + + #[test] + fn test_show() { + let mut set = HashSet::new(); + let empty = HashSet::::new(); + + set.insert(1); + set.insert(2); + + let set_str = format!("{:?}", set); + + assert!(set_str == "{1, 2}" || set_str == "{2, 1}"); + assert_eq!(format!("{:?}", empty), "{}"); + } + + #[test] + fn test_trivial_drain() { + let mut s = HashSet::::new(); + for _ in s.drain() {} + assert!(s.is_empty()); + drop(s); + + let mut s = HashSet::::new(); + drop(s.drain()); + assert!(s.is_empty()); + } + + #[test] + fn test_drain() { + let mut s: HashSet<_> = (1..100).collect(); + + // try this a bunch of times to make sure we don't screw up internal state. + for _ in 0..20 { + assert_eq!(s.len(), 99); + + { + let mut last_i = 0; + let mut d = s.drain(); + for (i, x) in d.by_ref().take(50).enumerate() { + last_i = i; + assert!(x != 0); + } + assert_eq!(last_i, 49); + } + + for _ in &s { + panic!("s should be empty!"); + } + + // reset to try again. + s.extend(1..100); + } + } + + #[test] + fn test_replace() { + use core::hash; + + #[derive(Debug)] + struct Foo(&'static str, i32); + + impl PartialEq for Foo { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + + impl Eq for Foo {} + + impl hash::Hash for Foo { + fn hash(&self, h: &mut H) { + self.0.hash(h); + } + } + + let mut s = HashSet::new(); + assert_eq!(s.replace(Foo("a", 1)), None); + assert_eq!(s.len(), 1); + assert_eq!(s.replace(Foo("a", 2)), Some(Foo("a", 1))); + assert_eq!(s.len(), 1); + + let mut it = s.iter(); + assert_eq!(it.next(), Some(&Foo("a", 2))); + assert_eq!(it.next(), None); + } + + #[test] + fn test_extend_ref() { + let mut a = HashSet::new(); + a.insert(1); + + a.extend(&[2, 3, 4]); + + assert_eq!(a.len(), 4); + assert!(a.contains(&1)); + assert!(a.contains(&2)); + assert!(a.contains(&3)); + assert!(a.contains(&4)); + + let mut b = HashSet::new(); + b.insert(5); + b.insert(6); + + a.extend(&b); + + assert_eq!(a.len(), 6); + assert!(a.contains(&1)); + assert!(a.contains(&2)); + assert!(a.contains(&3)); + assert!(a.contains(&4)); + assert!(a.contains(&5)); + assert!(a.contains(&6)); + } + + #[test] + fn test_retain() { + let xs = [1, 2, 3, 4, 5, 6]; + let mut set: HashSet = xs.iter().cloned().collect(); + set.retain(|&k| k % 2 == 0); + assert_eq!(set.len(), 3); + assert!(set.contains(&2)); + assert!(set.contains(&4)); + assert!(set.contains(&6)); + } + + #[test] + fn test_drain_filter() { + { + let mut set: HashSet = (0..8).collect(); + let drained = set.drain_filter(|&k| k % 2 == 0); + let mut out = drained.collect::>(); + out.sort_unstable(); + assert_eq!(vec![0, 2, 4, 6], out); + assert_eq!(set.len(), 4); + } + { + let mut set: HashSet = (0..8).collect(); + drop(set.drain_filter(|&k| k % 2 == 0)); + assert_eq!(set.len(), 4, "Removes non-matching items on drop"); + } + } + + #[test] + fn test_const_with_hasher() { + use core::hash::BuildHasher; + use std::collections::hash_map::DefaultHasher; + + #[derive(Clone)] + struct MyHasher; + impl BuildHasher for MyHasher { + type Hasher = DefaultHasher; + + fn build_hasher(&self) -> DefaultHasher { + DefaultHasher::new() + } + } + + const EMPTY_SET: HashSet = HashSet::with_hasher(MyHasher); + + let mut set = EMPTY_SET.clone(); + set.insert(19); + assert!(set.contains(&19)); + } +} diff --git a/src/rust/vendor/hashbrown-0.9.1/tests/hasher.rs b/src/rust/vendor/hashbrown-0.9.1/tests/hasher.rs new file mode 100644 index 000000000..e455e3d3c --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/tests/hasher.rs @@ -0,0 +1,65 @@ +//! Sanity check that alternate hashers work correctly. + +#![cfg(not(miri))] // FIXME: takes too long + +use hashbrown::HashSet; +use std::hash::{BuildHasher, BuildHasherDefault, Hasher}; + +fn check() { + let range = 0..1_000; + + let mut set = HashSet::::default(); + set.extend(range.clone()); + + assert!(!set.contains(&i32::min_value())); + assert!(!set.contains(&(range.start - 1))); + for i in range.clone() { + assert!(set.contains(&i)); + } + assert!(!set.contains(&range.end)); + assert!(!set.contains(&i32::max_value())); +} + +/// Use hashbrown's default hasher. +#[test] +fn default() { + check::(); +} + +/// Use std's default hasher. +#[test] +fn random_state() { + check::(); +} + +/// Use a constant 0 hash. +#[test] +fn zero() { + #[derive(Default)] + struct ZeroHasher; + + impl Hasher for ZeroHasher { + fn finish(&self) -> u64 { + 0 + } + fn write(&mut self, _: &[u8]) {} + } + + check::>(); +} + +/// Use a constant maximum hash. +#[test] +fn max() { + #[derive(Default)] + struct MaxHasher; + + impl Hasher for MaxHasher { + fn finish(&self) -> u64 { + u64::max_value() + } + fn write(&mut self, _: &[u8]) {} + } + + check::>(); +} diff --git a/src/rust/vendor/hashbrown-0.9.1/tests/rayon.rs b/src/rust/vendor/hashbrown-0.9.1/tests/rayon.rs new file mode 100644 index 000000000..39b47708d --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/tests/rayon.rs @@ -0,0 +1,533 @@ +#![cfg(feature = "rayon")] + +#[macro_use] +extern crate lazy_static; + +use hashbrown::{HashMap, HashSet}; +use rayon::iter::{ + IntoParallelIterator, IntoParallelRefIterator, IntoParallelRefMutIterator, ParallelExtend, + ParallelIterator, +}; + +macro_rules! assert_eq3 { + ($e1:expr, $e2:expr, $e3:expr) => {{ + assert_eq!($e1, $e2); + assert_eq!($e1, $e3); + assert_eq!($e2, $e3); + }}; +} + +lazy_static! { + static ref MAP_EMPTY: HashMap = HashMap::new(); + static ref MAP: HashMap = { + let mut m = HashMap::new(); + m.insert('b', 20); + m.insert('a', 10); + m.insert('c', 30); + m.insert('e', 50); + m.insert('f', 60); + m.insert('d', 40); + m + }; +} + +#[test] +fn map_seq_par_equivalence_iter_empty() { + let vec_seq = MAP_EMPTY.iter().collect::>(); + let vec_par = MAP_EMPTY.par_iter().collect::>(); + + assert_eq3!(vec_seq, vec_par, []); +} + +#[test] +fn map_seq_par_equivalence_iter() { + let mut vec_seq = MAP.iter().collect::>(); + let mut vec_par = MAP.par_iter().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [ + (&'a', &10), + (&'b', &20), + (&'c', &30), + (&'d', &40), + (&'e', &50), + (&'f', &60), + ]; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +#[test] +fn map_seq_par_equivalence_keys_empty() { + let vec_seq = MAP_EMPTY.keys().collect::>(); + let vec_par = MAP_EMPTY.par_keys().collect::>(); + + let expected: [&char; 0] = []; + + assert_eq3!(vec_seq, vec_par, expected); +} + +#[test] +fn map_seq_par_equivalence_keys() { + let mut vec_seq = MAP.keys().collect::>(); + let mut vec_par = MAP.par_keys().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [&'a', &'b', &'c', &'d', &'e', &'f']; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +#[test] +fn map_seq_par_equivalence_values_empty() { + let vec_seq = MAP_EMPTY.values().collect::>(); + let vec_par = MAP_EMPTY.par_values().collect::>(); + + let expected: [&u32; 0] = []; + + assert_eq3!(vec_seq, vec_par, expected); +} + +#[test] +fn map_seq_par_equivalence_values() { + let mut vec_seq = MAP.values().collect::>(); + let mut vec_par = MAP.par_values().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [&10, &20, &30, &40, &50, &60]; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +#[test] +fn map_seq_par_equivalence_iter_mut_empty() { + let mut map1 = MAP_EMPTY.clone(); + let mut map2 = MAP_EMPTY.clone(); + + let vec_seq = map1.iter_mut().collect::>(); + let vec_par = map2.par_iter_mut().collect::>(); + + assert_eq3!(vec_seq, vec_par, []); +} + +#[test] +fn map_seq_par_equivalence_iter_mut() { + let mut map1 = MAP.clone(); + let mut map2 = MAP.clone(); + + let mut vec_seq = map1.iter_mut().collect::>(); + let mut vec_par = map2.par_iter_mut().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [ + (&'a', &mut 10), + (&'b', &mut 20), + (&'c', &mut 30), + (&'d', &mut 40), + (&'e', &mut 50), + (&'f', &mut 60), + ]; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +#[test] +fn map_seq_par_equivalence_values_mut_empty() { + let mut map1 = MAP_EMPTY.clone(); + let mut map2 = MAP_EMPTY.clone(); + + let vec_seq = map1.values_mut().collect::>(); + let vec_par = map2.par_values_mut().collect::>(); + + let expected: [&u32; 0] = []; + + assert_eq3!(vec_seq, vec_par, expected); +} + +#[test] +fn map_seq_par_equivalence_values_mut() { + let mut map1 = MAP.clone(); + let mut map2 = MAP.clone(); + + let mut vec_seq = map1.values_mut().collect::>(); + let mut vec_par = map2.par_values_mut().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [&mut 10, &mut 20, &mut 30, &mut 40, &mut 50, &mut 60]; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +#[test] +fn map_seq_par_equivalence_into_iter_empty() { + let vec_seq = MAP_EMPTY.clone().into_iter().collect::>(); + let vec_par = MAP_EMPTY.clone().into_par_iter().collect::>(); + + assert_eq3!(vec_seq, vec_par, []); +} + +#[test] +fn map_seq_par_equivalence_into_iter() { + let mut vec_seq = MAP.clone().into_iter().collect::>(); + let mut vec_par = MAP.clone().into_par_iter().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [ + ('a', 10), + ('b', 20), + ('c', 30), + ('d', 40), + ('e', 50), + ('f', 60), + ]; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +lazy_static! { + static ref MAP_VEC_EMPTY: Vec<(char, u32)> = vec![]; + static ref MAP_VEC: Vec<(char, u32)> = vec![ + ('b', 20), + ('a', 10), + ('c', 30), + ('e', 50), + ('f', 60), + ('d', 40), + ]; +} + +#[test] +fn map_seq_par_equivalence_collect_empty() { + let map_expected = MAP_EMPTY.clone(); + let map_seq = MAP_VEC_EMPTY.clone().into_iter().collect::>(); + let map_par = MAP_VEC_EMPTY + .clone() + .into_par_iter() + .collect::>(); + + assert_eq!(map_seq, map_par); + assert_eq!(map_seq, map_expected); + assert_eq!(map_par, map_expected); +} + +#[test] +fn map_seq_par_equivalence_collect() { + let map_expected = MAP.clone(); + let map_seq = MAP_VEC.clone().into_iter().collect::>(); + let map_par = MAP_VEC.clone().into_par_iter().collect::>(); + + assert_eq!(map_seq, map_par); + assert_eq!(map_seq, map_expected); + assert_eq!(map_par, map_expected); +} + +lazy_static! { + static ref MAP_EXISTING_EMPTY: HashMap = HashMap::new(); + static ref MAP_EXISTING: HashMap = { + let mut m = HashMap::new(); + m.insert('b', 20); + m.insert('a', 10); + m + }; + static ref MAP_EXTENSION_EMPTY: Vec<(char, u32)> = vec![]; + static ref MAP_EXTENSION: Vec<(char, u32)> = vec![('c', 30), ('e', 50), ('f', 60), ('d', 40),]; +} + +#[test] +fn map_seq_par_equivalence_existing_empty_extend_empty() { + let expected = HashMap::new(); + let mut map_seq = MAP_EXISTING_EMPTY.clone(); + let mut map_par = MAP_EXISTING_EMPTY.clone(); + + map_seq.extend(MAP_EXTENSION_EMPTY.iter().cloned()); + map_par.par_extend(MAP_EXTENSION_EMPTY.par_iter().cloned()); + + assert_eq3!(map_seq, map_par, expected); +} + +#[test] +fn map_seq_par_equivalence_existing_empty_extend() { + let expected = MAP_EXTENSION.iter().cloned().collect::>(); + let mut map_seq = MAP_EXISTING_EMPTY.clone(); + let mut map_par = MAP_EXISTING_EMPTY.clone(); + + map_seq.extend(MAP_EXTENSION.iter().cloned()); + map_par.par_extend(MAP_EXTENSION.par_iter().cloned()); + + assert_eq3!(map_seq, map_par, expected); +} + +#[test] +fn map_seq_par_equivalence_existing_extend_empty() { + let expected = MAP_EXISTING.clone(); + let mut map_seq = MAP_EXISTING.clone(); + let mut map_par = MAP_EXISTING.clone(); + + map_seq.extend(MAP_EXTENSION_EMPTY.iter().cloned()); + map_par.par_extend(MAP_EXTENSION_EMPTY.par_iter().cloned()); + + assert_eq3!(map_seq, map_par, expected); +} + +#[test] +fn map_seq_par_equivalence_existing_extend() { + let expected = MAP.clone(); + let mut map_seq = MAP_EXISTING.clone(); + let mut map_par = MAP_EXISTING.clone(); + + map_seq.extend(MAP_EXTENSION.iter().cloned()); + map_par.par_extend(MAP_EXTENSION.par_iter().cloned()); + + assert_eq3!(map_seq, map_par, expected); +} + +lazy_static! { + static ref SET_EMPTY: HashSet = HashSet::new(); + static ref SET: HashSet = { + let mut s = HashSet::new(); + s.insert('b'); + s.insert('a'); + s.insert('c'); + s.insert('e'); + s.insert('f'); + s.insert('d'); + s + }; +} + +#[test] +fn set_seq_par_equivalence_iter_empty() { + let vec_seq = SET_EMPTY.iter().collect::>(); + let vec_par = SET_EMPTY.par_iter().collect::>(); + + let expected: [&char; 0] = []; + + assert_eq3!(vec_seq, vec_par, expected); +} + +#[test] +fn set_seq_par_equivalence_iter() { + let mut vec_seq = SET.iter().collect::>(); + let mut vec_par = SET.par_iter().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [&'a', &'b', &'c', &'d', &'e', &'f']; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +#[test] +fn set_seq_par_equivalence_into_iter_empty() { + let vec_seq = SET_EMPTY.clone().into_iter().collect::>(); + let vec_par = SET_EMPTY.clone().into_par_iter().collect::>(); + + assert_eq3!(vec_seq, vec_par, []); +} + +#[test] +fn set_seq_par_equivalence_into_iter() { + let mut vec_seq = SET.clone().into_iter().collect::>(); + let mut vec_par = SET.clone().into_par_iter().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = ['a', 'b', 'c', 'd', 'e', 'f']; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +lazy_static! { + static ref SET_VEC_EMPTY: Vec = vec![]; + static ref SET_VEC: Vec = vec!['b', 'a', 'c', 'e', 'f', 'd',]; +} + +#[test] +fn set_seq_par_equivalence_collect_empty() { + let set_expected = SET_EMPTY.clone(); + let set_seq = SET_VEC_EMPTY.clone().into_iter().collect::>(); + let set_par = SET_VEC_EMPTY + .clone() + .into_par_iter() + .collect::>(); + + assert_eq!(set_seq, set_par); + assert_eq!(set_seq, set_expected); + assert_eq!(set_par, set_expected); +} + +#[test] +fn set_seq_par_equivalence_collect() { + let set_expected = SET.clone(); + let set_seq = SET_VEC.clone().into_iter().collect::>(); + let set_par = SET_VEC.clone().into_par_iter().collect::>(); + + assert_eq!(set_seq, set_par); + assert_eq!(set_seq, set_expected); + assert_eq!(set_par, set_expected); +} + +lazy_static! { + static ref SET_EXISTING_EMPTY: HashSet = HashSet::new(); + static ref SET_EXISTING: HashSet = { + let mut s = HashSet::new(); + s.insert('b'); + s.insert('a'); + s + }; + static ref SET_EXTENSION_EMPTY: Vec = vec![]; + static ref SET_EXTENSION: Vec = vec!['c', 'e', 'f', 'd',]; +} + +#[test] +fn set_seq_par_equivalence_existing_empty_extend_empty() { + let expected = HashSet::new(); + let mut set_seq = SET_EXISTING_EMPTY.clone(); + let mut set_par = SET_EXISTING_EMPTY.clone(); + + set_seq.extend(SET_EXTENSION_EMPTY.iter().cloned()); + set_par.par_extend(SET_EXTENSION_EMPTY.par_iter().cloned()); + + assert_eq3!(set_seq, set_par, expected); +} + +#[test] +fn set_seq_par_equivalence_existing_empty_extend() { + let expected = SET_EXTENSION.iter().cloned().collect::>(); + let mut set_seq = SET_EXISTING_EMPTY.clone(); + let mut set_par = SET_EXISTING_EMPTY.clone(); + + set_seq.extend(SET_EXTENSION.iter().cloned()); + set_par.par_extend(SET_EXTENSION.par_iter().cloned()); + + assert_eq3!(set_seq, set_par, expected); +} + +#[test] +fn set_seq_par_equivalence_existing_extend_empty() { + let expected = SET_EXISTING.clone(); + let mut set_seq = SET_EXISTING.clone(); + let mut set_par = SET_EXISTING.clone(); + + set_seq.extend(SET_EXTENSION_EMPTY.iter().cloned()); + set_par.par_extend(SET_EXTENSION_EMPTY.par_iter().cloned()); + + assert_eq3!(set_seq, set_par, expected); +} + +#[test] +fn set_seq_par_equivalence_existing_extend() { + let expected = SET.clone(); + let mut set_seq = SET_EXISTING.clone(); + let mut set_par = SET_EXISTING.clone(); + + set_seq.extend(SET_EXTENSION.iter().cloned()); + set_par.par_extend(SET_EXTENSION.par_iter().cloned()); + + assert_eq3!(set_seq, set_par, expected); +} + +lazy_static! { + static ref SET_A: HashSet = ['a', 'b', 'c', 'd'].iter().cloned().collect(); + static ref SET_B: HashSet = ['a', 'b', 'e', 'f'].iter().cloned().collect(); + static ref SET_DIFF_AB: HashSet = ['c', 'd'].iter().cloned().collect(); + static ref SET_DIFF_BA: HashSet = ['e', 'f'].iter().cloned().collect(); + static ref SET_SYMM_DIFF_AB: HashSet = ['c', 'd', 'e', 'f'].iter().cloned().collect(); + static ref SET_INTERSECTION_AB: HashSet = ['a', 'b'].iter().cloned().collect(); + static ref SET_UNION_AB: HashSet = + ['a', 'b', 'c', 'd', 'e', 'f'].iter().cloned().collect(); +} + +#[test] +fn set_seq_par_equivalence_difference() { + let diff_ab_seq = SET_A.difference(&*SET_B).cloned().collect::>(); + let diff_ab_par = SET_A + .par_difference(&*SET_B) + .cloned() + .collect::>(); + + assert_eq3!(diff_ab_seq, diff_ab_par, *SET_DIFF_AB); + + let diff_ba_seq = SET_B.difference(&*SET_A).cloned().collect::>(); + let diff_ba_par = SET_B + .par_difference(&*SET_A) + .cloned() + .collect::>(); + + assert_eq3!(diff_ba_seq, diff_ba_par, *SET_DIFF_BA); +} + +#[test] +fn set_seq_par_equivalence_symmetric_difference() { + let symm_diff_ab_seq = SET_A + .symmetric_difference(&*SET_B) + .cloned() + .collect::>(); + let symm_diff_ab_par = SET_A + .par_symmetric_difference(&*SET_B) + .cloned() + .collect::>(); + + assert_eq3!(symm_diff_ab_seq, symm_diff_ab_par, *SET_SYMM_DIFF_AB); +} + +#[test] +fn set_seq_par_equivalence_intersection() { + let intersection_ab_seq = SET_A.intersection(&*SET_B).cloned().collect::>(); + let intersection_ab_par = SET_A + .par_intersection(&*SET_B) + .cloned() + .collect::>(); + + assert_eq3!( + intersection_ab_seq, + intersection_ab_par, + *SET_INTERSECTION_AB + ); +} + +#[test] +fn set_seq_par_equivalence_union() { + let union_ab_seq = SET_A.union(&*SET_B).cloned().collect::>(); + let union_ab_par = SET_A.par_union(&*SET_B).cloned().collect::>(); + + assert_eq3!(union_ab_seq, union_ab_par, *SET_UNION_AB); +} diff --git a/src/rust/vendor/hashbrown-0.9.1/tests/serde.rs b/src/rust/vendor/hashbrown-0.9.1/tests/serde.rs new file mode 100644 index 000000000..570bf70da --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/tests/serde.rs @@ -0,0 +1,65 @@ +#![cfg(feature = "serde")] + +use core::hash::BuildHasherDefault; +use hashbrown::{HashMap, HashSet}; +use rustc_hash::FxHasher; +use serde_test::{assert_tokens, Token}; + +// We use FxHash for this test because we rely on the ordering +type FxHashMap = HashMap>; +type FxHashSet = HashSet>; + +#[test] +fn map_serde_tokens_empty() { + let map = FxHashMap::::default(); + + assert_tokens(&map, &[Token::Map { len: Some(0) }, Token::MapEnd]); +} + +#[test] +fn map_serde_tokens() { + let mut map = FxHashMap::default(); + map.insert('b', 20); + map.insert('a', 10); + map.insert('c', 30); + + assert_tokens( + &map, + &[ + Token::Map { len: Some(3) }, + Token::Char('a'), + Token::I32(10), + Token::Char('b'), + Token::I32(20), + Token::Char('c'), + Token::I32(30), + Token::MapEnd, + ], + ); +} + +#[test] +fn set_serde_tokens_empty() { + let set = FxHashSet::::default(); + + assert_tokens(&set, &[Token::Seq { len: Some(0) }, Token::SeqEnd]); +} + +#[test] +fn set_serde_tokens() { + let mut set = FxHashSet::default(); + set.insert(20); + set.insert(10); + set.insert(30); + + assert_tokens( + &set, + &[ + Token::Seq { len: Some(3) }, + Token::I32(20), + Token::I32(10), + Token::I32(30), + Token::SeqEnd, + ], + ); +} diff --git a/src/rust/vendor/hashbrown-0.9.1/tests/set.rs b/src/rust/vendor/hashbrown-0.9.1/tests/set.rs new file mode 100644 index 000000000..3fc071705 --- /dev/null +++ b/src/rust/vendor/hashbrown-0.9.1/tests/set.rs @@ -0,0 +1,30 @@ +#![cfg(not(miri))] // FIXME: takes too long + +use hashbrown::HashSet; +use rand::{distributions::Alphanumeric, rngs::SmallRng, Rng, SeedableRng}; + +#[test] +fn test_hashset_insert_remove() { + let mut m: HashSet> = HashSet::new(); + //let num: u32 = 4096; + //let tx: Vec> = (0..num).map(|i| (i..(16 + i)).collect()).collect(); + let seed: [u8; 16] = [ + 130, 220, 246, 217, 111, 124, 221, 189, 190, 234, 121, 93, 67, 95, 100, 43, + ]; + + let rng = &mut SmallRng::from_seed(seed); + let tx: Vec> = (0..4096) + .map(|_| (rng.sample_iter(&Alphanumeric).take(32).collect())) + .collect(); + + for _ in 0..32 { + for i in 0..4096 { + assert_eq!(m.contains(&tx[i].clone()), false); + assert_eq!(m.insert(tx[i].clone()), true); + } + for i in 0..4096 { + println!("removing {} {:?}", i, tx[i]); + assert_eq!(m.remove(&tx[i]), true); + } + } +} diff --git a/src/rust/vendor/ppv-lite86/.cargo-checksum.json b/src/rust/vendor/ppv-lite86/.cargo-checksum.json new file mode 100644 index 000000000..267f5c1c2 --- /dev/null +++ b/src/rust/vendor/ppv-lite86/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"0bd1d2bdb4a940a0d867a782644eb007e79611be0a8d74d4ba106e83597716df","Cargo.toml":"db172e26faf8c73a7456063fceb5db7ebeb9ea1945dfaca7aa1099ee9c6d5841","LICENSE-APACHE":"0218327e7a480793ffdd4eb792379a9709e5c135c7ba267f709d6f6d4d70af0a","LICENSE-MIT":"4cada0bd02ea3692eee6f16400d86c6508bbd3bafb2b65fed0419f36d4f83e8f","src/generic.rs":"b2ef3b6f39acb1c4ac1f361f948d61570a72f81fa9dc0c0ce8e067b8245827de","src/lib.rs":"45f498128e4843e920df48a285b26b87d2366983226b131e8f55535b45d03127","src/soft.rs":"cad6c11ccf31c93c35fbd64145972ad957ea3f18956e6ca8fdc85eea934fb0f2","src/types.rs":"a1c9e993f85a99d1762597193d72ee8ff00c3f1116885040b4e4ecfbdedabf0a","src/x86_64/mod.rs":"b6d3b8f5f84bd18e466d93fe896e667a41e03bf36b0d65257d1a3240c1de0e94","src/x86_64/sse2.rs":"71893b410f667b9919ac16e1659fbe5a360f2ba780ae178ddf29e54d556a7a48"},"package":"77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04"} \ No newline at end of file diff --git a/src/rust/vendor/ppv-lite86/CHANGELOG.md b/src/rust/vendor/ppv-lite86/CHANGELOG.md new file mode 100644 index 000000000..6e34be395 --- /dev/null +++ b/src/rust/vendor/ppv-lite86/CHANGELOG.md @@ -0,0 +1,10 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [0.2.16] +### Added +- add [u64; 4] conversion for generic vec256, to support BLAKE on non-x86. +- impl `From` (rather than just `Into`) for conversions between `*_storage` types and arrays. diff --git a/src/rust/vendor/ppv-lite86/Cargo.toml b/src/rust/vendor/ppv-lite86/Cargo.toml new file mode 100644 index 000000000..cddb4e480 --- /dev/null +++ b/src/rust/vendor/ppv-lite86/Cargo.toml @@ -0,0 +1,55 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.61" +name = "ppv-lite86" +version = "0.2.20" +authors = ["The CryptoCorrosion Contributors"] +build = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Implementation of the crypto-simd API for x86" +readme = false +keywords = [ + "crypto", + "simd", + "x86", +] +categories = [ + "cryptography", + "no-std", +] +license = "MIT/Apache-2.0" +repository = "https://github.com/cryptocorrosion/cryptocorrosion" + +[lib] +name = "ppv_lite86" +path = "src/lib.rs" + +[dependencies.zerocopy] +version = "0.7" +features = [ + "simd", + "derive", +] + +[features] +default = ["std"] +no_simd = [] +simd = [] +std = [] + +[badges.travis-ci] +repository = "cryptocorrosion/cryptocorrosion" diff --git a/src/rust/vendor/ppv-lite86/LICENSE-APACHE b/src/rust/vendor/ppv-lite86/LICENSE-APACHE new file mode 100644 index 000000000..1eb321535 --- /dev/null +++ b/src/rust/vendor/ppv-lite86/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright 2019 The CryptoCorrosion Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/src/rust/vendor/ppv-lite86/LICENSE-MIT b/src/rust/vendor/ppv-lite86/LICENSE-MIT new file mode 100644 index 000000000..d78c961bc --- /dev/null +++ b/src/rust/vendor/ppv-lite86/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2019 The CryptoCorrosion Contributors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/src/rust/vendor/ppv-lite86/src/generic.rs b/src/rust/vendor/ppv-lite86/src/generic.rs new file mode 100644 index 000000000..8989482a4 --- /dev/null +++ b/src/rust/vendor/ppv-lite86/src/generic.rs @@ -0,0 +1,861 @@ +#![allow(non_camel_case_types)] + +use crate::soft::{x2, x4}; +use crate::types::*; +use core::ops::*; +use zerocopy::{AsBytes, FromBytes, FromZeroes}; + +#[repr(C)] +#[derive(Clone, Copy, FromBytes, AsBytes, FromZeroes)] +pub union vec128_storage { + d: [u32; 4], + q: [u64; 2], +} +impl From<[u32; 4]> for vec128_storage { + #[inline(always)] + fn from(d: [u32; 4]) -> Self { + Self { d } + } +} +impl From for [u32; 4] { + #[inline(always)] + fn from(d: vec128_storage) -> Self { + unsafe { d.d } + } +} +impl From<[u64; 2]> for vec128_storage { + #[inline(always)] + fn from(q: [u64; 2]) -> Self { + Self { q } + } +} +impl From for [u64; 2] { + #[inline(always)] + fn from(q: vec128_storage) -> Self { + unsafe { q.q } + } +} +impl Default for vec128_storage { + #[inline(always)] + fn default() -> Self { + Self { q: [0, 0] } + } +} +impl Eq for vec128_storage {} +impl PartialEq for vec128_storage { + #[inline(always)] + fn eq(&self, rhs: &Self) -> bool { + unsafe { self.q == rhs.q } + } +} +#[derive(Clone, Copy, PartialEq, Eq, Default)] +pub struct vec256_storage { + v128: [vec128_storage; 2], +} +impl vec256_storage { + #[inline(always)] + pub fn new128(v128: [vec128_storage; 2]) -> Self { + Self { v128 } + } + #[inline(always)] + pub fn split128(self) -> [vec128_storage; 2] { + self.v128 + } +} +impl From for [u64; 4] { + #[inline(always)] + fn from(q: vec256_storage) -> Self { + let [a, b]: [u64; 2] = q.v128[0].into(); + let [c, d]: [u64; 2] = q.v128[1].into(); + [a, b, c, d] + } +} +impl From<[u64; 4]> for vec256_storage { + #[inline(always)] + fn from([a, b, c, d]: [u64; 4]) -> Self { + Self { + v128: [[a, b].into(), [c, d].into()], + } + } +} +#[derive(Clone, Copy, PartialEq, Eq, Default)] +pub struct vec512_storage { + v128: [vec128_storage; 4], +} +impl vec512_storage { + #[inline(always)] + pub fn new128(v128: [vec128_storage; 4]) -> Self { + Self { v128 } + } + #[inline(always)] + pub fn split128(self) -> [vec128_storage; 4] { + self.v128 + } +} + +#[inline(always)] +fn dmap(t: T, f: F) -> T +where + T: Store + Into, + F: Fn(u32) -> u32, +{ + let t: vec128_storage = t.into(); + let d = unsafe { t.d }; + let d = vec128_storage { + d: [f(d[0]), f(d[1]), f(d[2]), f(d[3])], + }; + unsafe { T::unpack(d) } +} + +fn dmap2(a: T, b: T, f: F) -> T +where + T: Store + Into, + F: Fn(u32, u32) -> u32, +{ + let a: vec128_storage = a.into(); + let b: vec128_storage = b.into(); + let ao = unsafe { a.d }; + let bo = unsafe { b.d }; + let d = vec128_storage { + d: [ + f(ao[0], bo[0]), + f(ao[1], bo[1]), + f(ao[2], bo[2]), + f(ao[3], bo[3]), + ], + }; + unsafe { T::unpack(d) } +} + +#[inline(always)] +fn qmap(t: T, f: F) -> T +where + T: Store + Into, + F: Fn(u64) -> u64, +{ + let t: vec128_storage = t.into(); + let q = unsafe { t.q }; + let q = vec128_storage { + q: [f(q[0]), f(q[1])], + }; + unsafe { T::unpack(q) } +} + +#[inline(always)] +fn qmap2(a: T, b: T, f: F) -> T +where + T: Store + Into, + F: Fn(u64, u64) -> u64, +{ + let a: vec128_storage = a.into(); + let b: vec128_storage = b.into(); + let ao = unsafe { a.q }; + let bo = unsafe { b.q }; + let q = vec128_storage { + q: [f(ao[0], bo[0]), f(ao[1], bo[1])], + }; + unsafe { T::unpack(q) } +} + +#[inline(always)] +fn o_of_q(q: [u64; 2]) -> u128 { + u128::from(q[0]) | (u128::from(q[1]) << 64) +} + +#[inline(always)] +fn q_of_o(o: u128) -> [u64; 2] { + [o as u64, (o >> 64) as u64] +} + +#[inline(always)] +fn omap(a: T, f: F) -> T +where + T: Store + Into, + F: Fn(u128) -> u128, +{ + let a: vec128_storage = a.into(); + let ao = o_of_q(unsafe { a.q }); + let o = vec128_storage { q: q_of_o(f(ao)) }; + unsafe { T::unpack(o) } +} + +#[inline(always)] +fn omap2(a: T, b: T, f: F) -> T +where + T: Store + Into, + F: Fn(u128, u128) -> u128, +{ + let a: vec128_storage = a.into(); + let b: vec128_storage = b.into(); + let ao = o_of_q(unsafe { a.q }); + let bo = o_of_q(unsafe { b.q }); + let o = vec128_storage { + q: q_of_o(f(ao, bo)), + }; + unsafe { T::unpack(o) } +} + +impl RotateEachWord128 for u128x1_generic {} +impl BitOps128 for u128x1_generic {} +impl BitOps64 for u128x1_generic {} +impl BitOps64 for u64x2_generic {} +impl BitOps32 for u128x1_generic {} +impl BitOps32 for u64x2_generic {} +impl BitOps32 for u32x4_generic {} +impl BitOps0 for u128x1_generic {} +impl BitOps0 for u64x2_generic {} +impl BitOps0 for u32x4_generic {} + +macro_rules! impl_bitops { + ($vec:ident) => { + impl Not for $vec { + type Output = Self; + #[inline(always)] + fn not(self) -> Self::Output { + omap(self, |x| !x) + } + } + impl BitAnd for $vec { + type Output = Self; + #[inline(always)] + fn bitand(self, rhs: Self) -> Self::Output { + omap2(self, rhs, |x, y| x & y) + } + } + impl BitOr for $vec { + type Output = Self; + #[inline(always)] + fn bitor(self, rhs: Self) -> Self::Output { + omap2(self, rhs, |x, y| x | y) + } + } + impl BitXor for $vec { + type Output = Self; + #[inline(always)] + fn bitxor(self, rhs: Self) -> Self::Output { + omap2(self, rhs, |x, y| x ^ y) + } + } + impl AndNot for $vec { + type Output = Self; + #[inline(always)] + fn andnot(self, rhs: Self) -> Self::Output { + omap2(self, rhs, |x, y| !x & y) + } + } + impl BitAndAssign for $vec { + #[inline(always)] + fn bitand_assign(&mut self, rhs: Self) { + *self = *self & rhs + } + } + impl BitOrAssign for $vec { + #[inline(always)] + fn bitor_assign(&mut self, rhs: Self) { + *self = *self | rhs + } + } + impl BitXorAssign for $vec { + #[inline(always)] + fn bitxor_assign(&mut self, rhs: Self) { + *self = *self ^ rhs + } + } + + impl Swap64 for $vec { + #[inline(always)] + fn swap1(self) -> Self { + qmap(self, |x| { + ((x & 0x5555555555555555) << 1) | ((x & 0xaaaaaaaaaaaaaaaa) >> 1) + }) + } + #[inline(always)] + fn swap2(self) -> Self { + qmap(self, |x| { + ((x & 0x3333333333333333) << 2) | ((x & 0xcccccccccccccccc) >> 2) + }) + } + #[inline(always)] + fn swap4(self) -> Self { + qmap(self, |x| { + ((x & 0x0f0f0f0f0f0f0f0f) << 4) | ((x & 0xf0f0f0f0f0f0f0f0) >> 4) + }) + } + #[inline(always)] + fn swap8(self) -> Self { + qmap(self, |x| { + ((x & 0x00ff00ff00ff00ff) << 8) | ((x & 0xff00ff00ff00ff00) >> 8) + }) + } + #[inline(always)] + fn swap16(self) -> Self { + dmap(self, |x| x.rotate_left(16)) + } + #[inline(always)] + fn swap32(self) -> Self { + qmap(self, |x| x.rotate_left(32)) + } + #[inline(always)] + fn swap64(self) -> Self { + omap(self, |x| (x << 64) | (x >> 64)) + } + } + }; +} +impl_bitops!(u32x4_generic); +impl_bitops!(u64x2_generic); +impl_bitops!(u128x1_generic); + +impl RotateEachWord32 for u32x4_generic { + #[inline(always)] + fn rotate_each_word_right7(self) -> Self { + dmap(self, |x| x.rotate_right(7)) + } + #[inline(always)] + fn rotate_each_word_right8(self) -> Self { + dmap(self, |x| x.rotate_right(8)) + } + #[inline(always)] + fn rotate_each_word_right11(self) -> Self { + dmap(self, |x| x.rotate_right(11)) + } + #[inline(always)] + fn rotate_each_word_right12(self) -> Self { + dmap(self, |x| x.rotate_right(12)) + } + #[inline(always)] + fn rotate_each_word_right16(self) -> Self { + dmap(self, |x| x.rotate_right(16)) + } + #[inline(always)] + fn rotate_each_word_right20(self) -> Self { + dmap(self, |x| x.rotate_right(20)) + } + #[inline(always)] + fn rotate_each_word_right24(self) -> Self { + dmap(self, |x| x.rotate_right(24)) + } + #[inline(always)] + fn rotate_each_word_right25(self) -> Self { + dmap(self, |x| x.rotate_right(25)) + } +} + +impl RotateEachWord32 for u64x2_generic { + #[inline(always)] + fn rotate_each_word_right7(self) -> Self { + qmap(self, |x| x.rotate_right(7)) + } + #[inline(always)] + fn rotate_each_word_right8(self) -> Self { + qmap(self, |x| x.rotate_right(8)) + } + #[inline(always)] + fn rotate_each_word_right11(self) -> Self { + qmap(self, |x| x.rotate_right(11)) + } + #[inline(always)] + fn rotate_each_word_right12(self) -> Self { + qmap(self, |x| x.rotate_right(12)) + } + #[inline(always)] + fn rotate_each_word_right16(self) -> Self { + qmap(self, |x| x.rotate_right(16)) + } + #[inline(always)] + fn rotate_each_word_right20(self) -> Self { + qmap(self, |x| x.rotate_right(20)) + } + #[inline(always)] + fn rotate_each_word_right24(self) -> Self { + qmap(self, |x| x.rotate_right(24)) + } + #[inline(always)] + fn rotate_each_word_right25(self) -> Self { + qmap(self, |x| x.rotate_right(25)) + } +} +impl RotateEachWord64 for u64x2_generic { + #[inline(always)] + fn rotate_each_word_right32(self) -> Self { + qmap(self, |x| x.rotate_right(32)) + } +} + +// workaround for koute/cargo-web#52 (u128::rotate_* broken with cargo web) +#[inline(always)] +fn rotate_u128_right(x: u128, i: u32) -> u128 { + (x >> i) | (x << (128 - i)) +} +#[test] +fn test_rotate_u128() { + const X: u128 = 0x0001_0203_0405_0607_0809_0a0b_0c0d_0e0f; + assert_eq!(rotate_u128_right(X, 17), X.rotate_right(17)); +} + +impl RotateEachWord32 for u128x1_generic { + #[inline(always)] + fn rotate_each_word_right7(self) -> Self { + Self([rotate_u128_right(self.0[0], 7)]) + } + #[inline(always)] + fn rotate_each_word_right8(self) -> Self { + Self([rotate_u128_right(self.0[0], 8)]) + } + #[inline(always)] + fn rotate_each_word_right11(self) -> Self { + Self([rotate_u128_right(self.0[0], 11)]) + } + #[inline(always)] + fn rotate_each_word_right12(self) -> Self { + Self([rotate_u128_right(self.0[0], 12)]) + } + #[inline(always)] + fn rotate_each_word_right16(self) -> Self { + Self([rotate_u128_right(self.0[0], 16)]) + } + #[inline(always)] + fn rotate_each_word_right20(self) -> Self { + Self([rotate_u128_right(self.0[0], 20)]) + } + #[inline(always)] + fn rotate_each_word_right24(self) -> Self { + Self([rotate_u128_right(self.0[0], 24)]) + } + #[inline(always)] + fn rotate_each_word_right25(self) -> Self { + Self([rotate_u128_right(self.0[0], 25)]) + } +} +impl RotateEachWord64 for u128x1_generic { + #[inline(always)] + fn rotate_each_word_right32(self) -> Self { + Self([rotate_u128_right(self.0[0], 32)]) + } +} + +#[derive(Copy, Clone)] +pub struct GenericMachine; +impl Machine for GenericMachine { + type u32x4 = u32x4_generic; + type u64x2 = u64x2_generic; + type u128x1 = u128x1_generic; + type u32x4x2 = u32x4x2_generic; + type u64x2x2 = u64x2x2_generic; + type u64x4 = u64x4_generic; + type u128x2 = u128x2_generic; + type u32x4x4 = u32x4x4_generic; + type u64x2x4 = u64x2x4_generic; + type u128x4 = u128x4_generic; + #[inline(always)] + unsafe fn instance() -> Self { + Self + } +} + +#[derive(Copy, Clone, Debug, PartialEq, FromBytes, AsBytes, FromZeroes)] +#[repr(transparent)] +pub struct u32x4_generic([u32; 4]); +#[derive(Copy, Clone, Debug, PartialEq, FromBytes, AsBytes, FromZeroes)] +#[repr(transparent)] +pub struct u64x2_generic([u64; 2]); +#[derive(Copy, Clone, Debug, PartialEq, FromBytes, AsBytes, FromZeroes)] +#[repr(transparent)] +pub struct u128x1_generic([u128; 1]); + +impl From for vec128_storage { + #[inline(always)] + fn from(d: u32x4_generic) -> Self { + Self { d: d.0 } + } +} +impl From for vec128_storage { + #[inline(always)] + fn from(q: u64x2_generic) -> Self { + Self { q: q.0 } + } +} +impl From for vec128_storage { + #[inline(always)] + fn from(o: u128x1_generic) -> Self { + Self { q: q_of_o(o.0[0]) } + } +} + +impl Store for u32x4_generic { + #[inline(always)] + unsafe fn unpack(s: vec128_storage) -> Self { + Self(s.d) + } +} +impl Store for u64x2_generic { + #[inline(always)] + unsafe fn unpack(s: vec128_storage) -> Self { + Self(s.q) + } +} +impl Store for u128x1_generic { + #[inline(always)] + unsafe fn unpack(s: vec128_storage) -> Self { + Self([o_of_q(s.q); 1]) + } +} + +impl ArithOps for u32x4_generic {} +impl ArithOps for u64x2_generic {} +impl ArithOps for u128x1_generic {} + +impl Add for u32x4_generic { + type Output = Self; + #[inline(always)] + fn add(self, rhs: Self) -> Self::Output { + dmap2(self, rhs, |x, y| x.wrapping_add(y)) + } +} +impl Add for u64x2_generic { + type Output = Self; + #[inline(always)] + fn add(self, rhs: Self) -> Self::Output { + qmap2(self, rhs, |x, y| x.wrapping_add(y)) + } +} +impl Add for u128x1_generic { + type Output = Self; + #[inline(always)] + fn add(self, rhs: Self) -> Self::Output { + omap2(self, rhs, |x, y| x.wrapping_add(y)) + } +} +impl AddAssign for u32x4_generic { + #[inline(always)] + fn add_assign(&mut self, rhs: Self) { + *self = *self + rhs + } +} +impl AddAssign for u64x2_generic { + #[inline(always)] + fn add_assign(&mut self, rhs: Self) { + *self = *self + rhs + } +} +impl AddAssign for u128x1_generic { + #[inline(always)] + fn add_assign(&mut self, rhs: Self) { + *self = *self + rhs + } +} +impl BSwap for u32x4_generic { + #[inline(always)] + fn bswap(self) -> Self { + dmap(self, |x| x.swap_bytes()) + } +} +impl BSwap for u64x2_generic { + #[inline(always)] + fn bswap(self) -> Self { + qmap(self, |x| x.swap_bytes()) + } +} +impl BSwap for u128x1_generic { + #[inline(always)] + fn bswap(self) -> Self { + omap(self, |x| x.swap_bytes()) + } +} +impl StoreBytes for u32x4_generic { + #[inline(always)] + unsafe fn unsafe_read_le(input: &[u8]) -> Self { + let x = u32x4_generic::read_from(input).unwrap(); + dmap(x, |x| x.to_le()) + } + #[inline(always)] + unsafe fn unsafe_read_be(input: &[u8]) -> Self { + let x = u32x4_generic::read_from(input).unwrap(); + dmap(x, |x| x.to_be()) + } + #[inline(always)] + fn write_le(self, out: &mut [u8]) { + let x = dmap(self, |x| x.to_le()); + x.write_to(out).unwrap(); + } + #[inline(always)] + fn write_be(self, out: &mut [u8]) { + let x = dmap(self, |x| x.to_be()); + x.write_to(out).unwrap(); + } +} +impl StoreBytes for u64x2_generic { + #[inline(always)] + unsafe fn unsafe_read_le(input: &[u8]) -> Self { + let x = u64x2_generic::read_from(input).unwrap(); + qmap(x, |x| x.to_le()) + } + #[inline(always)] + unsafe fn unsafe_read_be(input: &[u8]) -> Self { + let x = u64x2_generic::read_from(input).unwrap(); + qmap(x, |x| x.to_be()) + } + #[inline(always)] + fn write_le(self, out: &mut [u8]) { + let x = qmap(self, |x| x.to_le()); + x.write_to(out).unwrap(); + } + #[inline(always)] + fn write_be(self, out: &mut [u8]) { + let x = qmap(self, |x| x.to_be()); + x.write_to(out).unwrap(); + } +} + +#[derive(Copy, Clone)] +pub struct G0; +#[derive(Copy, Clone)] +pub struct G1; +pub type u32x4x2_generic = x2; +pub type u64x2x2_generic = x2; +pub type u64x4_generic = x2; +pub type u128x2_generic = x2; +pub type u32x4x4_generic = x4; +pub type u64x2x4_generic = x4; +pub type u128x4_generic = x4; + +impl Vector<[u32; 16]> for u32x4x4_generic { + fn to_scalars(self) -> [u32; 16] { + let [a, b, c, d] = self.0; + let a = a.0; + let b = b.0; + let c = c.0; + let d = d.0; + [ + a[0], a[1], a[2], a[3], // + b[0], b[1], b[2], b[3], // + c[0], c[1], c[2], c[3], // + d[0], d[1], d[2], d[3], // + ] + } +} + +impl MultiLane<[u32; 4]> for u32x4_generic { + #[inline(always)] + fn to_lanes(self) -> [u32; 4] { + self.0 + } + #[inline(always)] + fn from_lanes(xs: [u32; 4]) -> Self { + Self(xs) + } +} +impl MultiLane<[u64; 2]> for u64x2_generic { + #[inline(always)] + fn to_lanes(self) -> [u64; 2] { + self.0 + } + #[inline(always)] + fn from_lanes(xs: [u64; 2]) -> Self { + Self(xs) + } +} +impl MultiLane<[u64; 4]> for u64x4_generic { + #[inline(always)] + fn to_lanes(self) -> [u64; 4] { + let (a, b) = (self.0[0].to_lanes(), self.0[1].to_lanes()); + [a[0], a[1], b[0], b[1]] + } + #[inline(always)] + fn from_lanes(xs: [u64; 4]) -> Self { + let (a, b) = ( + u64x2_generic::from_lanes([xs[0], xs[1]]), + u64x2_generic::from_lanes([xs[2], xs[3]]), + ); + x2::new([a, b]) + } +} +impl MultiLane<[u128; 1]> for u128x1_generic { + #[inline(always)] + fn to_lanes(self) -> [u128; 1] { + self.0 + } + #[inline(always)] + fn from_lanes(xs: [u128; 1]) -> Self { + Self(xs) + } +} +impl Vec4 for u32x4_generic { + #[inline(always)] + fn extract(self, i: u32) -> u32 { + self.0[i as usize] + } + #[inline(always)] + fn insert(mut self, v: u32, i: u32) -> Self { + self.0[i as usize] = v; + self + } +} +impl Vec4 for u64x4_generic { + #[inline(always)] + fn extract(self, i: u32) -> u64 { + let d: [u64; 4] = self.to_lanes(); + d[i as usize] + } + #[inline(always)] + fn insert(self, v: u64, i: u32) -> Self { + self.0[(i / 2) as usize].insert(v, i % 2); + self + } +} +impl Vec2 for u64x2_generic { + #[inline(always)] + fn extract(self, i: u32) -> u64 { + self.0[i as usize] + } + #[inline(always)] + fn insert(mut self, v: u64, i: u32) -> Self { + self.0[i as usize] = v; + self + } +} + +impl Words4 for u32x4_generic { + #[inline(always)] + fn shuffle2301(self) -> Self { + self.swap64() + } + #[inline(always)] + fn shuffle1230(self) -> Self { + let x = self.0; + Self([x[3], x[0], x[1], x[2]]) + } + #[inline(always)] + fn shuffle3012(self) -> Self { + let x = self.0; + Self([x[1], x[2], x[3], x[0]]) + } +} +impl LaneWords4 for u32x4_generic { + #[inline(always)] + fn shuffle_lane_words2301(self) -> Self { + self.shuffle2301() + } + #[inline(always)] + fn shuffle_lane_words1230(self) -> Self { + self.shuffle1230() + } + #[inline(always)] + fn shuffle_lane_words3012(self) -> Self { + self.shuffle3012() + } +} + +impl Words4 for u64x4_generic { + #[inline(always)] + fn shuffle2301(self) -> Self { + x2::new([self.0[1], self.0[0]]) + } + #[inline(always)] + fn shuffle1230(self) -> Self { + unimplemented!() + } + #[inline(always)] + fn shuffle3012(self) -> Self { + unimplemented!() + } +} + +impl u32x4 for u32x4_generic {} +impl u64x2 for u64x2_generic {} +impl u128x1 for u128x1_generic {} +impl u32x4x2 for u32x4x2_generic {} +impl u64x2x2 for u64x2x2_generic {} +impl u64x4 for u64x4_generic {} +impl u128x2 for u128x2_generic {} +impl u32x4x4 for u32x4x4_generic {} +impl u64x2x4 for u64x2x4_generic {} +impl u128x4 for u128x4_generic {} + +#[macro_export] +macro_rules! dispatch { + ($mach:ident, $MTy:ident, { $([$pub:tt$(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) -> $ret:ty $body:block }) => { + #[inline(always)] + $($pub$(($krate))*)* fn $name($($arg: $argty),*) -> $ret { + let $mach = unsafe { $crate::generic::GenericMachine::instance() }; + #[inline(always)] + fn fn_impl<$MTy: $crate::Machine>($mach: $MTy, $($arg: $argty),*) -> $ret $body + fn_impl($mach, $($arg),*) + } + }; + ($mach:ident, $MTy:ident, { $([$pub:tt $(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) $body:block }) => { + dispatch!($mach, $MTy, { + $([$pub $(($krate))*])* fn $name($($arg: $argty),*) -> () $body + }); + } +} +#[macro_export] +macro_rules! dispatch_light128 { + ($mach:ident, $MTy:ident, { $([$pub:tt$(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) -> $ret:ty $body:block }) => { + #[inline(always)] + $($pub$(($krate))*)* fn $name($($arg: $argty),*) -> $ret { + let $mach = unsafe { $crate::generic::GenericMachine::instance() }; + #[inline(always)] + fn fn_impl<$MTy: $crate::Machine>($mach: $MTy, $($arg: $argty),*) -> $ret $body + fn_impl($mach, $($arg),*) + } + }; + ($mach:ident, $MTy:ident, { $([$pub:tt $(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) $body:block }) => { + dispatch!($mach, $MTy, { + $([$pub $(($krate))*])* fn $name($($arg: $argty),*) -> () $body + }); + } +} +#[macro_export] +macro_rules! dispatch_light256 { + ($mach:ident, $MTy:ident, { $([$pub:tt$(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) -> $ret:ty $body:block }) => { + #[inline(always)] + $($pub$(($krate))*)* fn $name($($arg: $argty),*) -> $ret { + let $mach = unsafe { $crate::generic::GenericMachine::instance() }; + #[inline(always)] + fn fn_impl<$MTy: $crate::Machine>($mach: $MTy, $($arg: $argty),*) -> $ret $body + fn_impl($mach, $($arg),*) + } + }; + ($mach:ident, $MTy:ident, { $([$pub:tt $(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) $body:block }) => { + dispatch!($mach, $MTy, { + $([$pub $(($krate))*])* fn $name($($arg: $argty),*) -> () $body + }); + } +} +#[macro_export] +macro_rules! dispatch_light512 { + ($mach:ident, $MTy:ident, { $([$pub:tt$(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) -> $ret:ty $body:block }) => { + #[inline(always)] + $($pub$(($krate))*)* fn $name($($arg: $argty),*) -> $ret { + let $mach = unsafe { $crate::generic::GenericMachine::instance() }; + #[inline(always)] + fn fn_impl<$MTy: $crate::Machine>($mach: $MTy, $($arg: $argty),*) -> $ret $body + fn_impl($mach, $($arg),*) + } + }; + ($mach:ident, $MTy:ident, { $([$pub:tt $(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) $body:block }) => { + dispatch!($mach, $MTy, { + $([$pub $(($krate))*])* fn $name($($arg: $argty),*) -> () $body + }); + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_bswap32() { + let xs = [0x0f0e_0d0c, 0x0b0a_0908, 0x0706_0504, 0x0302_0100]; + let ys = [0x0c0d_0e0f, 0x0809_0a0b, 0x0405_0607, 0x0001_0203]; + + let m = unsafe { GenericMachine::instance() }; + + let x: ::u32x4 = m.vec(xs); + let x = x.bswap(); + + let y = m.vec(ys); + assert_eq!(x, y); + } +} diff --git a/src/rust/vendor/ppv-lite86/src/lib.rs b/src/rust/vendor/ppv-lite86/src/lib.rs new file mode 100644 index 000000000..311df97b4 --- /dev/null +++ b/src/rust/vendor/ppv-lite86/src/lib.rs @@ -0,0 +1,42 @@ +#![no_std] + +// Design: +// - safety: safe creation of any machine type is done only by instance methods of a +// Machine (which is a ZST + Copy type), which can only by created unsafely or safely +// through feature detection (e.g. fn AVX2::try_get() -> Option). + +mod soft; +mod types; +pub use self::types::*; + +#[cfg(all( + target_arch = "x86_64", + target_feature = "sse2", + not(feature = "no_simd"), + not(miri) +))] +pub mod x86_64; +#[cfg(all( + target_arch = "x86_64", + target_feature = "sse2", + not(feature = "no_simd"), + not(miri) +))] +use self::x86_64 as arch; + +#[cfg(any( + feature = "no_simd", + miri, + not(target_arch = "x86_64"), + all(target_arch = "x86_64", not(target_feature = "sse2")) +))] +pub mod generic; +#[cfg(any( + feature = "no_simd", + miri, + not(target_arch = "x86_64"), + all(target_arch = "x86_64", not(target_feature = "sse2")) +))] +use self::generic as arch; + +pub use self::arch::{vec128_storage, vec256_storage, vec512_storage}; diff --git a/src/rust/vendor/ppv-lite86/src/soft.rs b/src/rust/vendor/ppv-lite86/src/soft.rs new file mode 100644 index 000000000..b2cf0e19b --- /dev/null +++ b/src/rust/vendor/ppv-lite86/src/soft.rs @@ -0,0 +1,475 @@ +//! Implement 256- and 512- bit in terms of 128-bit, for machines without native wide SIMD. + +use crate::types::*; +use crate::{vec128_storage, vec256_storage, vec512_storage}; +use core::marker::PhantomData; +use core::ops::*; +use zerocopy::{AsBytes, FromBytes, FromZeroes}; + +#[derive(Copy, Clone, Default, FromBytes, AsBytes, FromZeroes)] +#[repr(transparent)] +#[allow(non_camel_case_types)] +pub struct x2(pub [W; 2], PhantomData); +impl x2 { + #[inline(always)] + pub fn new(xs: [W; 2]) -> Self { + x2(xs, PhantomData) + } +} +macro_rules! fwd_binop_x2 { + ($trait:ident, $fn:ident) => { + impl $trait for x2 { + type Output = x2; + #[inline(always)] + fn $fn(self, rhs: Self) -> Self::Output { + x2::new([self.0[0].$fn(rhs.0[0]), self.0[1].$fn(rhs.0[1])]) + } + } + }; +} +macro_rules! fwd_binop_assign_x2 { + ($trait:ident, $fn_assign:ident) => { + impl $trait for x2 { + #[inline(always)] + fn $fn_assign(&mut self, rhs: Self) { + (self.0[0]).$fn_assign(rhs.0[0]); + (self.0[1]).$fn_assign(rhs.0[1]); + } + } + }; +} +macro_rules! fwd_unop_x2 { + ($fn:ident) => { + #[inline(always)] + fn $fn(self) -> Self { + x2::new([self.0[0].$fn(), self.0[1].$fn()]) + } + }; +} +impl RotateEachWord32 for x2 +where + W: Copy + RotateEachWord32, +{ + fwd_unop_x2!(rotate_each_word_right7); + fwd_unop_x2!(rotate_each_word_right8); + fwd_unop_x2!(rotate_each_word_right11); + fwd_unop_x2!(rotate_each_word_right12); + fwd_unop_x2!(rotate_each_word_right16); + fwd_unop_x2!(rotate_each_word_right20); + fwd_unop_x2!(rotate_each_word_right24); + fwd_unop_x2!(rotate_each_word_right25); +} +impl RotateEachWord64 for x2 +where + W: Copy + RotateEachWord64, +{ + fwd_unop_x2!(rotate_each_word_right32); +} +impl RotateEachWord128 for x2 where W: RotateEachWord128 {} +impl BitOps0 for x2 +where + W: BitOps0, + G: Copy, +{ +} +impl BitOps32 for x2 +where + W: BitOps32 + BitOps0, + G: Copy, +{ +} +impl BitOps64 for x2 +where + W: BitOps64 + BitOps0, + G: Copy, +{ +} +impl BitOps128 for x2 +where + W: BitOps128 + BitOps0, + G: Copy, +{ +} +fwd_binop_x2!(BitAnd, bitand); +fwd_binop_x2!(BitOr, bitor); +fwd_binop_x2!(BitXor, bitxor); +fwd_binop_x2!(AndNot, andnot); +fwd_binop_assign_x2!(BitAndAssign, bitand_assign); +fwd_binop_assign_x2!(BitOrAssign, bitor_assign); +fwd_binop_assign_x2!(BitXorAssign, bitxor_assign); +impl ArithOps for x2 +where + W: ArithOps, + G: Copy, +{ +} +fwd_binop_x2!(Add, add); +fwd_binop_assign_x2!(AddAssign, add_assign); +impl Not for x2 { + type Output = x2; + #[inline(always)] + fn not(self) -> Self::Output { + x2::new([self.0[0].not(), self.0[1].not()]) + } +} +impl UnsafeFrom<[W; 2]> for x2 { + #[inline(always)] + unsafe fn unsafe_from(xs: [W; 2]) -> Self { + x2::new(xs) + } +} +impl Vec2 for x2 { + #[inline(always)] + fn extract(self, i: u32) -> W { + self.0[i as usize] + } + #[inline(always)] + fn insert(mut self, w: W, i: u32) -> Self { + self.0[i as usize] = w; + self + } +} +impl, G> Store for x2 { + #[inline(always)] + unsafe fn unpack(p: vec256_storage) -> Self { + let p = p.split128(); + x2::new([W::unpack(p[0]), W::unpack(p[1])]) + } +} +impl From> for vec256_storage +where + W: Copy, + vec128_storage: From, +{ + #[inline(always)] + fn from(x: x2) -> Self { + vec256_storage::new128([x.0[0].into(), x.0[1].into()]) + } +} +impl Swap64 for x2 +where + W: Swap64 + Copy, +{ + fwd_unop_x2!(swap1); + fwd_unop_x2!(swap2); + fwd_unop_x2!(swap4); + fwd_unop_x2!(swap8); + fwd_unop_x2!(swap16); + fwd_unop_x2!(swap32); + fwd_unop_x2!(swap64); +} +impl MultiLane<[W; 2]> for x2 { + #[inline(always)] + fn to_lanes(self) -> [W; 2] { + self.0 + } + #[inline(always)] + fn from_lanes(lanes: [W; 2]) -> Self { + x2::new(lanes) + } +} +impl BSwap for x2 { + #[inline(always)] + fn bswap(self) -> Self { + x2::new([self.0[0].bswap(), self.0[1].bswap()]) + } +} +impl StoreBytes for x2 { + #[inline(always)] + unsafe fn unsafe_read_le(input: &[u8]) -> Self { + let input = input.split_at(input.len() / 2); + x2::new([W::unsafe_read_le(input.0), W::unsafe_read_le(input.1)]) + } + #[inline(always)] + unsafe fn unsafe_read_be(input: &[u8]) -> Self { + let input = input.split_at(input.len() / 2); + x2::new([W::unsafe_read_be(input.0), W::unsafe_read_be(input.1)]) + } + #[inline(always)] + fn write_le(self, out: &mut [u8]) { + let out = out.split_at_mut(out.len() / 2); + self.0[0].write_le(out.0); + self.0[1].write_le(out.1); + } + #[inline(always)] + fn write_be(self, out: &mut [u8]) { + let out = out.split_at_mut(out.len() / 2); + self.0[0].write_be(out.0); + self.0[1].write_be(out.1); + } +} +impl LaneWords4 for x2 { + #[inline(always)] + fn shuffle_lane_words2301(self) -> Self { + Self::new([ + self.0[0].shuffle_lane_words2301(), + self.0[1].shuffle_lane_words2301(), + ]) + } + #[inline(always)] + fn shuffle_lane_words1230(self) -> Self { + Self::new([ + self.0[0].shuffle_lane_words1230(), + self.0[1].shuffle_lane_words1230(), + ]) + } + #[inline(always)] + fn shuffle_lane_words3012(self) -> Self { + Self::new([ + self.0[0].shuffle_lane_words3012(), + self.0[1].shuffle_lane_words3012(), + ]) + } +} + +#[derive(Copy, Clone, Default, FromBytes, AsBytes, FromZeroes)] +#[repr(transparent)] +#[allow(non_camel_case_types)] +pub struct x4(pub [W; 4]); +impl x4 { + #[inline(always)] + pub fn new(xs: [W; 4]) -> Self { + x4(xs) + } +} +macro_rules! fwd_binop_x4 { + ($trait:ident, $fn:ident) => { + impl $trait for x4 { + type Output = x4; + #[inline(always)] + fn $fn(self, rhs: Self) -> Self::Output { + x4([ + self.0[0].$fn(rhs.0[0]), + self.0[1].$fn(rhs.0[1]), + self.0[2].$fn(rhs.0[2]), + self.0[3].$fn(rhs.0[3]), + ]) + } + } + }; +} +macro_rules! fwd_binop_assign_x4 { + ($trait:ident, $fn_assign:ident) => { + impl $trait for x4 { + #[inline(always)] + fn $fn_assign(&mut self, rhs: Self) { + self.0[0].$fn_assign(rhs.0[0]); + self.0[1].$fn_assign(rhs.0[1]); + self.0[2].$fn_assign(rhs.0[2]); + self.0[3].$fn_assign(rhs.0[3]); + } + } + }; +} +macro_rules! fwd_unop_x4 { + ($fn:ident) => { + #[inline(always)] + fn $fn(self) -> Self { + x4([ + self.0[0].$fn(), + self.0[1].$fn(), + self.0[2].$fn(), + self.0[3].$fn(), + ]) + } + }; +} +impl RotateEachWord32 for x4 +where + W: Copy + RotateEachWord32, +{ + fwd_unop_x4!(rotate_each_word_right7); + fwd_unop_x4!(rotate_each_word_right8); + fwd_unop_x4!(rotate_each_word_right11); + fwd_unop_x4!(rotate_each_word_right12); + fwd_unop_x4!(rotate_each_word_right16); + fwd_unop_x4!(rotate_each_word_right20); + fwd_unop_x4!(rotate_each_word_right24); + fwd_unop_x4!(rotate_each_word_right25); +} +impl RotateEachWord64 for x4 +where + W: Copy + RotateEachWord64, +{ + fwd_unop_x4!(rotate_each_word_right32); +} +impl RotateEachWord128 for x4 where W: RotateEachWord128 {} +impl BitOps0 for x4 where W: BitOps0 {} +impl BitOps32 for x4 where W: BitOps32 + BitOps0 {} +impl BitOps64 for x4 where W: BitOps64 + BitOps0 {} +impl BitOps128 for x4 where W: BitOps128 + BitOps0 {} +fwd_binop_x4!(BitAnd, bitand); +fwd_binop_x4!(BitOr, bitor); +fwd_binop_x4!(BitXor, bitxor); +fwd_binop_x4!(AndNot, andnot); +fwd_binop_assign_x4!(BitAndAssign, bitand_assign); +fwd_binop_assign_x4!(BitOrAssign, bitor_assign); +fwd_binop_assign_x4!(BitXorAssign, bitxor_assign); +impl ArithOps for x4 where W: ArithOps {} +fwd_binop_x4!(Add, add); +fwd_binop_assign_x4!(AddAssign, add_assign); +impl Not for x4 { + type Output = x4; + #[inline(always)] + fn not(self) -> Self::Output { + x4([ + self.0[0].not(), + self.0[1].not(), + self.0[2].not(), + self.0[3].not(), + ]) + } +} +impl UnsafeFrom<[W; 4]> for x4 { + #[inline(always)] + unsafe fn unsafe_from(xs: [W; 4]) -> Self { + x4(xs) + } +} +impl Vec4 for x4 { + #[inline(always)] + fn extract(self, i: u32) -> W { + self.0[i as usize] + } + #[inline(always)] + fn insert(mut self, w: W, i: u32) -> Self { + self.0[i as usize] = w; + self + } +} +impl Vec4Ext for x4 { + #[inline(always)] + fn transpose4(a: Self, b: Self, c: Self, d: Self) -> (Self, Self, Self, Self) + where + Self: Sized, + { + ( + x4([a.0[0], b.0[0], c.0[0], d.0[0]]), + x4([a.0[1], b.0[1], c.0[1], d.0[1]]), + x4([a.0[2], b.0[2], c.0[2], d.0[2]]), + x4([a.0[3], b.0[3], c.0[3], d.0[3]]), + ) + } +} +impl> Store for x4 { + #[inline(always)] + unsafe fn unpack(p: vec512_storage) -> Self { + let p = p.split128(); + x4([ + W::unpack(p[0]), + W::unpack(p[1]), + W::unpack(p[2]), + W::unpack(p[3]), + ]) + } +} +impl From> for vec512_storage +where + W: Copy, + vec128_storage: From, +{ + #[inline(always)] + fn from(x: x4) -> Self { + vec512_storage::new128([x.0[0].into(), x.0[1].into(), x.0[2].into(), x.0[3].into()]) + } +} +impl Swap64 for x4 +where + W: Swap64 + Copy, +{ + fwd_unop_x4!(swap1); + fwd_unop_x4!(swap2); + fwd_unop_x4!(swap4); + fwd_unop_x4!(swap8); + fwd_unop_x4!(swap16); + fwd_unop_x4!(swap32); + fwd_unop_x4!(swap64); +} +impl MultiLane<[W; 4]> for x4 { + #[inline(always)] + fn to_lanes(self) -> [W; 4] { + self.0 + } + #[inline(always)] + fn from_lanes(lanes: [W; 4]) -> Self { + x4(lanes) + } +} +impl BSwap for x4 { + #[inline(always)] + fn bswap(self) -> Self { + x4([ + self.0[0].bswap(), + self.0[1].bswap(), + self.0[2].bswap(), + self.0[3].bswap(), + ]) + } +} +impl StoreBytes for x4 { + #[inline(always)] + unsafe fn unsafe_read_le(input: &[u8]) -> Self { + let n = input.len() / 4; + x4([ + W::unsafe_read_le(&input[..n]), + W::unsafe_read_le(&input[n..n * 2]), + W::unsafe_read_le(&input[n * 2..n * 3]), + W::unsafe_read_le(&input[n * 3..]), + ]) + } + #[inline(always)] + unsafe fn unsafe_read_be(input: &[u8]) -> Self { + let n = input.len() / 4; + x4([ + W::unsafe_read_be(&input[..n]), + W::unsafe_read_be(&input[n..n * 2]), + W::unsafe_read_be(&input[n * 2..n * 3]), + W::unsafe_read_be(&input[n * 3..]), + ]) + } + #[inline(always)] + fn write_le(self, out: &mut [u8]) { + let n = out.len() / 4; + self.0[0].write_le(&mut out[..n]); + self.0[1].write_le(&mut out[n..n * 2]); + self.0[2].write_le(&mut out[n * 2..n * 3]); + self.0[3].write_le(&mut out[n * 3..]); + } + #[inline(always)] + fn write_be(self, out: &mut [u8]) { + let n = out.len() / 4; + self.0[0].write_be(&mut out[..n]); + self.0[1].write_be(&mut out[n..n * 2]); + self.0[2].write_be(&mut out[n * 2..n * 3]); + self.0[3].write_be(&mut out[n * 3..]); + } +} +impl LaneWords4 for x4 { + #[inline(always)] + fn shuffle_lane_words2301(self) -> Self { + x4([ + self.0[0].shuffle_lane_words2301(), + self.0[1].shuffle_lane_words2301(), + self.0[2].shuffle_lane_words2301(), + self.0[3].shuffle_lane_words2301(), + ]) + } + #[inline(always)] + fn shuffle_lane_words1230(self) -> Self { + x4([ + self.0[0].shuffle_lane_words1230(), + self.0[1].shuffle_lane_words1230(), + self.0[2].shuffle_lane_words1230(), + self.0[3].shuffle_lane_words1230(), + ]) + } + #[inline(always)] + fn shuffle_lane_words3012(self) -> Self { + x4([ + self.0[0].shuffle_lane_words3012(), + self.0[1].shuffle_lane_words3012(), + self.0[2].shuffle_lane_words3012(), + self.0[3].shuffle_lane_words3012(), + ]) + } +} diff --git a/src/rust/vendor/ppv-lite86/src/types.rs b/src/rust/vendor/ppv-lite86/src/types.rs new file mode 100644 index 000000000..f9f3bf1ce --- /dev/null +++ b/src/rust/vendor/ppv-lite86/src/types.rs @@ -0,0 +1,298 @@ +#![allow(non_camel_case_types)] +use core::ops::{Add, AddAssign, BitAnd, BitOr, BitXor, BitXorAssign, Not}; + +pub trait AndNot { + type Output; + fn andnot(self, rhs: Self) -> Self::Output; +} +pub trait BSwap { + fn bswap(self) -> Self; +} +/// Ops that depend on word size +pub trait ArithOps: Add + AddAssign + Sized + Copy + Clone + BSwap {} +/// Ops that are independent of word size and endian +pub trait BitOps0: + BitAnd + + BitOr + + BitXor + + BitXorAssign + + Not + + AndNot + + Sized + + Copy + + Clone +{ +} + +pub trait BitOps32: BitOps0 + RotateEachWord32 {} +pub trait BitOps64: BitOps32 + RotateEachWord64 {} +pub trait BitOps128: BitOps64 + RotateEachWord128 {} + +pub trait RotateEachWord32 { + fn rotate_each_word_right7(self) -> Self; + fn rotate_each_word_right8(self) -> Self; + fn rotate_each_word_right11(self) -> Self; + fn rotate_each_word_right12(self) -> Self; + fn rotate_each_word_right16(self) -> Self; + fn rotate_each_word_right20(self) -> Self; + fn rotate_each_word_right24(self) -> Self; + fn rotate_each_word_right25(self) -> Self; +} + +pub trait RotateEachWord64 { + fn rotate_each_word_right32(self) -> Self; +} + +pub trait RotateEachWord128 {} + +// Vector type naming scheme: +// uN[xP]xL +// Unsigned; N-bit words * P bits per lane * L lanes +// +// A lane is always 128-bits, chosen because common SIMD architectures treat 128-bit units of +// wide vectors specially (supporting e.g. intra-lane shuffles), and tend to have limited and +// slow inter-lane operations. + +use crate::arch::{vec128_storage, vec256_storage, vec512_storage}; + +#[allow(clippy::missing_safety_doc)] +pub trait UnsafeFrom { + unsafe fn unsafe_from(t: T) -> Self; +} + +/// A vector composed of two elements, which may be words or themselves vectors. +pub trait Vec2 { + fn extract(self, i: u32) -> W; + fn insert(self, w: W, i: u32) -> Self; +} + +/// A vector composed of four elements, which may be words or themselves vectors. +pub trait Vec4 { + fn extract(self, i: u32) -> W; + fn insert(self, w: W, i: u32) -> Self; +} +/// Vec4 functions which may not be implemented yet for all Vec4 types. +/// NOTE: functions in this trait may be moved to Vec4 in any patch release. To avoid breakage, +/// import Vec4Ext only together with Vec4, and don't qualify its methods. +pub trait Vec4Ext { + fn transpose4(a: Self, b: Self, c: Self, d: Self) -> (Self, Self, Self, Self) + where + Self: Sized; +} +pub trait Vector { + fn to_scalars(self) -> T; +} + +// TODO: multiples of 4 should inherit this +/// A vector composed of four words; depending on their size, operations may cross lanes. +pub trait Words4 { + fn shuffle1230(self) -> Self; + fn shuffle2301(self) -> Self; + fn shuffle3012(self) -> Self; +} + +/// A vector composed one or more lanes each composed of four words. +pub trait LaneWords4 { + fn shuffle_lane_words1230(self) -> Self; + fn shuffle_lane_words2301(self) -> Self; + fn shuffle_lane_words3012(self) -> Self; +} + +// TODO: make this a part of BitOps +/// Exchange neigboring ranges of bits of the specified size +pub trait Swap64 { + fn swap1(self) -> Self; + fn swap2(self) -> Self; + fn swap4(self) -> Self; + fn swap8(self) -> Self; + fn swap16(self) -> Self; + fn swap32(self) -> Self; + fn swap64(self) -> Self; +} + +pub trait u32x4: + BitOps32 + + Store + + ArithOps + + Vec4 + + Words4 + + LaneWords4 + + StoreBytes + + MultiLane<[u32; 4]> + + Into +{ +} +pub trait u64x2: + BitOps64 + Store + ArithOps + Vec2 + MultiLane<[u64; 2]> + Into +{ +} +pub trait u128x1: + BitOps128 + Store + Swap64 + MultiLane<[u128; 1]> + Into +{ +} + +pub trait u32x4x2: + BitOps32 + + Store + + Vec2 + + MultiLane<[M::u32x4; 2]> + + ArithOps + + Into + + StoreBytes +{ +} +pub trait u64x2x2: + BitOps64 + + Store + + Vec2 + + MultiLane<[M::u64x2; 2]> + + ArithOps + + StoreBytes + + Into +{ +} +pub trait u64x4: + BitOps64 + + Store + + Vec4 + + MultiLane<[u64; 4]> + + ArithOps + + Words4 + + StoreBytes + + Into +{ +} +pub trait u128x2: + BitOps128 + + Store + + Vec2 + + MultiLane<[M::u128x1; 2]> + + Swap64 + + Into +{ +} + +pub trait u32x4x4: + BitOps32 + + Store + + Vec4 + + Vec4Ext + + Vector<[u32; 16]> + + MultiLane<[M::u32x4; 4]> + + ArithOps + + LaneWords4 + + Into + + StoreBytes +{ +} +pub trait u64x2x4: + BitOps64 + + Store + + Vec4 + + MultiLane<[M::u64x2; 4]> + + ArithOps + + Into +{ +} +// TODO: Words4 +pub trait u128x4: + BitOps128 + + Store + + Vec4 + + MultiLane<[M::u128x1; 4]> + + Swap64 + + Into +{ +} + +/// A vector composed of multiple 128-bit lanes. +pub trait MultiLane { + /// Split a multi-lane vector into single-lane vectors. + fn to_lanes(self) -> Lanes; + /// Build a multi-lane vector from individual lanes. + fn from_lanes(lanes: Lanes) -> Self; +} + +/// Combine single vectors into a multi-lane vector. +pub trait VZip { + fn vzip(self) -> V; +} + +impl VZip for T +where + V: MultiLane, +{ + #[inline(always)] + fn vzip(self) -> V { + V::from_lanes(self) + } +} + +pub trait Machine: Sized + Copy { + type u32x4: u32x4; + type u64x2: u64x2; + type u128x1: u128x1; + + type u32x4x2: u32x4x2; + type u64x2x2: u64x2x2; + type u64x4: u64x4; + type u128x2: u128x2; + + type u32x4x4: u32x4x4; + type u64x2x4: u64x2x4; + type u128x4: u128x4; + + #[inline(always)] + fn unpack>(self, s: S) -> V { + unsafe { V::unpack(s) } + } + + #[inline(always)] + fn vec(self, a: A) -> V + where + V: MultiLane, + { + V::from_lanes(a) + } + + #[inline(always)] + fn read_le(self, input: &[u8]) -> V + where + V: StoreBytes, + { + unsafe { V::unsafe_read_le(input) } + } + + #[inline(always)] + fn read_be(self, input: &[u8]) -> V + where + V: StoreBytes, + { + unsafe { V::unsafe_read_be(input) } + } + + /// # Safety + /// Caller must ensure the type of Self is appropriate for the hardware of the execution + /// environment. + unsafe fn instance() -> Self; +} + +pub trait Store { + /// # Safety + /// Caller must ensure the type of Self is appropriate for the hardware of the execution + /// environment. + unsafe fn unpack(p: S) -> Self; +} + +pub trait StoreBytes { + /// # Safety + /// Caller must ensure the type of Self is appropriate for the hardware of the execution + /// environment. + unsafe fn unsafe_read_le(input: &[u8]) -> Self; + /// # Safety + /// Caller must ensure the type of Self is appropriate for the hardware of the execution + /// environment. + unsafe fn unsafe_read_be(input: &[u8]) -> Self; + fn write_le(self, out: &mut [u8]); + fn write_be(self, out: &mut [u8]); +} diff --git a/src/rust/vendor/ppv-lite86/src/x86_64/mod.rs b/src/rust/vendor/ppv-lite86/src/x86_64/mod.rs new file mode 100644 index 000000000..9d22c0d6d --- /dev/null +++ b/src/rust/vendor/ppv-lite86/src/x86_64/mod.rs @@ -0,0 +1,439 @@ +// crate minimums: sse2, x86_64 + +use crate::types::*; +use core::arch::x86_64::{__m128i, __m256i}; +use zerocopy::{AsBytes, FromBytes, FromZeroes}; + +mod sse2; + +#[derive(Copy, Clone)] +pub struct YesS3; +#[derive(Copy, Clone)] +pub struct NoS3; + +#[derive(Copy, Clone)] +pub struct YesS4; +#[derive(Copy, Clone)] +pub struct NoS4; + +#[derive(Copy, Clone)] +pub struct YesA1; +#[derive(Copy, Clone)] +pub struct NoA1; + +#[derive(Copy, Clone)] +pub struct YesA2; +#[derive(Copy, Clone)] +pub struct NoA2; + +#[derive(Copy, Clone)] +pub struct YesNI; +#[derive(Copy, Clone)] +pub struct NoNI; + +use core::marker::PhantomData; + +#[derive(Copy, Clone)] +pub struct SseMachine(PhantomData<(S3, S4, NI)>); +impl Machine for SseMachine +where + sse2::u128x1_sse2: Swap64, + sse2::u64x2_sse2: BSwap + RotateEachWord32 + MultiLane<[u64; 2]> + Vec2, + sse2::u32x4_sse2: BSwap + RotateEachWord32 + MultiLane<[u32; 4]> + Vec4, + sse2::u64x4_sse2: BSwap + Words4, + sse2::u128x1_sse2: BSwap, + sse2::u128x2_sse2: Into>, + sse2::u128x2_sse2: Into>, + sse2::u128x2_sse2: Into>, + sse2::u128x4_sse2: Into>, + sse2::u128x4_sse2: Into>, +{ + type u32x4 = sse2::u32x4_sse2; + type u64x2 = sse2::u64x2_sse2; + type u128x1 = sse2::u128x1_sse2; + + type u32x4x2 = sse2::u32x4x2_sse2; + type u64x2x2 = sse2::u64x2x2_sse2; + type u64x4 = sse2::u64x4_sse2; + type u128x2 = sse2::u128x2_sse2; + + type u32x4x4 = sse2::u32x4x4_sse2; + type u64x2x4 = sse2::u64x2x4_sse2; + type u128x4 = sse2::u128x4_sse2; + + #[inline(always)] + unsafe fn instance() -> Self { + SseMachine(PhantomData) + } +} + +#[derive(Copy, Clone)] +pub struct Avx2Machine(PhantomData); +impl Machine for Avx2Machine +where + sse2::u128x1_sse2: BSwap + Swap64, + sse2::u64x2_sse2: BSwap + RotateEachWord32 + MultiLane<[u64; 2]> + Vec2, + sse2::u32x4_sse2: BSwap + RotateEachWord32 + MultiLane<[u32; 4]> + Vec4, + sse2::u64x4_sse2: BSwap + Words4, +{ + type u32x4 = sse2::u32x4_sse2; + type u64x2 = sse2::u64x2_sse2; + type u128x1 = sse2::u128x1_sse2; + + type u32x4x2 = sse2::avx2::u32x4x2_avx2; + type u64x2x2 = sse2::u64x2x2_sse2; + type u64x4 = sse2::u64x4_sse2; + type u128x2 = sse2::u128x2_sse2; + + type u32x4x4 = sse2::avx2::u32x4x4_avx2; + type u64x2x4 = sse2::u64x2x4_sse2; + type u128x4 = sse2::u128x4_sse2; + + #[inline(always)] + unsafe fn instance() -> Self { + Avx2Machine(PhantomData) + } +} + +pub type SSE2 = SseMachine; +pub type SSSE3 = SseMachine; +pub type SSE41 = SseMachine; +/// AVX but not AVX2: only 128-bit integer operations, but use VEX versions of everything +/// to avoid expensive SSE/VEX conflicts. +pub type AVX = SseMachine; +pub type AVX2 = Avx2Machine; + +/// Generic wrapper for unparameterized storage of any of the possible impls. +/// Converting into and out of this type should be essentially free, although it may be more +/// aligned than a particular impl requires. +#[allow(non_camel_case_types)] +#[derive(Copy, Clone, FromBytes, AsBytes, FromZeroes)] +#[repr(C)] +pub union vec128_storage { + u32x4: [u32; 4], + u64x2: [u64; 2], + u128x1: [u128; 1], + sse2: __m128i, +} +impl Store for vec128_storage { + #[inline(always)] + unsafe fn unpack(p: vec128_storage) -> Self { + p + } +} +impl<'a> From<&'a vec128_storage> for &'a [u32; 4] { + #[inline(always)] + fn from(x: &'a vec128_storage) -> Self { + unsafe { &x.u32x4 } + } +} +impl From<[u32; 4]> for vec128_storage { + #[inline(always)] + fn from(u32x4: [u32; 4]) -> Self { + vec128_storage { u32x4 } + } +} +impl Default for vec128_storage { + #[inline(always)] + fn default() -> Self { + vec128_storage { u128x1: [0] } + } +} +impl Eq for vec128_storage {} +impl PartialEq for vec128_storage { + #[inline(always)] + fn eq(&self, rhs: &Self) -> bool { + unsafe { self.u128x1 == rhs.u128x1 } + } +} + +#[allow(non_camel_case_types)] +#[derive(Copy, Clone)] +pub union vec256_storage { + u32x8: [u32; 8], + u64x4: [u64; 4], + u128x2: [u128; 2], + sse2: [vec128_storage; 2], + avx: __m256i, +} +impl From<[u64; 4]> for vec256_storage { + #[inline(always)] + fn from(u64x4: [u64; 4]) -> Self { + vec256_storage { u64x4 } + } +} +impl Default for vec256_storage { + #[inline(always)] + fn default() -> Self { + vec256_storage { u128x2: [0, 0] } + } +} +impl vec256_storage { + #[inline(always)] + pub fn new128(xs: [vec128_storage; 2]) -> Self { + Self { sse2: xs } + } + #[inline(always)] + pub fn split128(self) -> [vec128_storage; 2] { + unsafe { self.sse2 } + } +} +impl Eq for vec256_storage {} +impl PartialEq for vec256_storage { + #[inline(always)] + fn eq(&self, rhs: &Self) -> bool { + unsafe { self.sse2 == rhs.sse2 } + } +} + +#[allow(non_camel_case_types)] +#[derive(Copy, Clone)] +pub union vec512_storage { + u32x16: [u32; 16], + u64x8: [u64; 8], + u128x4: [u128; 4], + sse2: [vec128_storage; 4], + avx: [vec256_storage; 2], +} +impl Default for vec512_storage { + #[inline(always)] + fn default() -> Self { + vec512_storage { + u128x4: [0, 0, 0, 0], + } + } +} +impl vec512_storage { + #[inline(always)] + pub fn new128(xs: [vec128_storage; 4]) -> Self { + Self { sse2: xs } + } + #[inline(always)] + pub fn split128(self) -> [vec128_storage; 4] { + unsafe { self.sse2 } + } +} +impl Eq for vec512_storage {} +impl PartialEq for vec512_storage { + #[inline(always)] + fn eq(&self, rhs: &Self) -> bool { + unsafe { self.avx == rhs.avx } + } +} + +macro_rules! impl_into { + ($storage:ident, $array:ty, $name:ident) => { + impl From<$storage> for $array { + #[inline(always)] + fn from(vec: $storage) -> Self { + unsafe { vec.$name } + } + } + }; +} +impl_into!(vec128_storage, [u32; 4], u32x4); +impl_into!(vec128_storage, [u64; 2], u64x2); +impl_into!(vec128_storage, [u128; 1], u128x1); +impl_into!(vec256_storage, [u32; 8], u32x8); +impl_into!(vec256_storage, [u64; 4], u64x4); +impl_into!(vec256_storage, [u128; 2], u128x2); +impl_into!(vec512_storage, [u32; 16], u32x16); +impl_into!(vec512_storage, [u64; 8], u64x8); +impl_into!(vec512_storage, [u128; 4], u128x4); + +/// Generate the full set of optimized implementations to take advantage of the most important +/// hardware feature sets. +/// +/// This dispatcher is suitable for maximizing throughput. +#[macro_export] +macro_rules! dispatch { + ($mach:ident, $MTy:ident, { $([$pub:tt$(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) -> $ret:ty $body:block }) => { + #[cfg(feature = "std")] + $($pub$(($krate))*)* fn $name($($arg: $argty),*) -> $ret { + #[inline(always)] + fn fn_impl<$MTy: $crate::Machine>($mach: $MTy, $($arg: $argty),*) -> $ret $body + use std::arch::x86_64::*; + #[target_feature(enable = "avx2")] + unsafe fn impl_avx2($($arg: $argty),*) -> $ret { + let ret = fn_impl($crate::x86_64::AVX2::instance(), $($arg),*); + _mm256_zeroupper(); + ret + } + #[target_feature(enable = "avx")] + #[target_feature(enable = "sse4.1")] + #[target_feature(enable = "ssse3")] + unsafe fn impl_avx($($arg: $argty),*) -> $ret { + let ret = fn_impl($crate::x86_64::AVX::instance(), $($arg),*); + _mm256_zeroupper(); + ret + } + #[target_feature(enable = "sse4.1")] + #[target_feature(enable = "ssse3")] + unsafe fn impl_sse41($($arg: $argty),*) -> $ret { + fn_impl($crate::x86_64::SSE41::instance(), $($arg),*) + } + #[target_feature(enable = "ssse3")] + unsafe fn impl_ssse3($($arg: $argty),*) -> $ret { + fn_impl($crate::x86_64::SSSE3::instance(), $($arg),*) + } + #[target_feature(enable = "sse2")] + unsafe fn impl_sse2($($arg: $argty),*) -> $ret { + fn_impl($crate::x86_64::SSE2::instance(), $($arg),*) + } + unsafe { + if is_x86_feature_detected!("avx2") { + impl_avx2($($arg),*) + } else if is_x86_feature_detected!("avx") { + impl_avx($($arg),*) + } else if is_x86_feature_detected!("sse4.1") { + impl_sse41($($arg),*) + } else if is_x86_feature_detected!("ssse3") { + impl_ssse3($($arg),*) + } else if is_x86_feature_detected!("sse2") { + impl_sse2($($arg),*) + } else { + unimplemented!() + } + } + } + #[cfg(not(feature = "std"))] + #[inline(always)] + $($pub$(($krate))*)* fn $name($($arg: $argty),*) -> $ret { + unsafe fn fn_impl<$MTy: $crate::Machine>($mach: $MTy, $($arg: $argty),*) -> $ret $body + unsafe { + if cfg!(target_feature = "avx2") { + fn_impl($crate::x86_64::AVX2::instance(), $($arg),*) + } else if cfg!(target_feature = "avx") { + fn_impl($crate::x86_64::AVX::instance(), $($arg),*) + } else if cfg!(target_feature = "sse4.1") { + fn_impl($crate::x86_64::SSE41::instance(), $($arg),*) + } else if cfg!(target_feature = "ssse3") { + fn_impl($crate::x86_64::SSSE3::instance(), $($arg),*) + } else { + fn_impl($crate::x86_64::SSE2::instance(), $($arg),*) + } + } + } + }; + ($mach:ident, $MTy:ident, { $([$pub:tt $(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) $body:block }) => { + dispatch!($mach, $MTy, { + $([$pub $(($krate))*])* fn $name($($arg: $argty),*) -> () $body + }); + } +} + +/// Generate only the basic implementations necessary to be able to operate efficiently on 128-bit +/// vectors on this platfrom. For x86-64, that would mean SSE2 and AVX. +/// +/// This dispatcher is suitable for vector operations that do not benefit from advanced hardware +/// features (e.g. because they are done infrequently), so minimizing their contribution to code +/// size is more important. +#[macro_export] +macro_rules! dispatch_light128 { + ($mach:ident, $MTy:ident, { $([$pub:tt$(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) -> $ret:ty $body:block }) => { + #[cfg(feature = "std")] + $($pub $(($krate))*)* fn $name($($arg: $argty),*) -> $ret { + #[inline(always)] + fn fn_impl<$MTy: $crate::Machine>($mach: $MTy, $($arg: $argty),*) -> $ret $body + use std::arch::x86_64::*; + #[target_feature(enable = "avx")] + unsafe fn impl_avx($($arg: $argty),*) -> $ret { + fn_impl($crate::x86_64::AVX::instance(), $($arg),*) + } + #[target_feature(enable = "sse2")] + unsafe fn impl_sse2($($arg: $argty),*) -> $ret { + fn_impl($crate::x86_64::SSE2::instance(), $($arg),*) + } + unsafe { + if is_x86_feature_detected!("avx") { + impl_avx($($arg),*) + } else if is_x86_feature_detected!("sse2") { + impl_sse2($($arg),*) + } else { + unimplemented!() + } + } + } + #[cfg(not(feature = "std"))] + #[inline(always)] + $($pub$(($krate))*)* fn $name($($arg: $argty),*) -> $ret { + unsafe fn fn_impl<$MTy: $crate::Machine>($mach: $MTy, $($arg: $argty),*) -> $ret $body + unsafe { + if cfg!(target_feature = "avx2") { + fn_impl($crate::x86_64::AVX2::instance(), $($arg),*) + } else if cfg!(target_feature = "avx") { + fn_impl($crate::x86_64::AVX::instance(), $($arg),*) + } else if cfg!(target_feature = "sse4.1") { + fn_impl($crate::x86_64::SSE41::instance(), $($arg),*) + } else if cfg!(target_feature = "ssse3") { + fn_impl($crate::x86_64::SSSE3::instance(), $($arg),*) + } else { + fn_impl($crate::x86_64::SSE2::instance(), $($arg),*) + } + } + } + }; + ($mach:ident, $MTy:ident, { $([$pub:tt$(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) $body:block }) => { + dispatch_light128!($mach, $MTy, { + $([$pub $(($krate))*])* fn $name($($arg: $argty),*) -> () $body + }); + } +} + +/// Generate only the basic implementations necessary to be able to operate efficiently on 256-bit +/// vectors on this platfrom. For x86-64, that would mean SSE2, AVX, and AVX2. +/// +/// This dispatcher is suitable for vector operations that do not benefit from advanced hardware +/// features (e.g. because they are done infrequently), so minimizing their contribution to code +/// size is more important. +#[macro_export] +macro_rules! dispatch_light256 { + ($mach:ident, $MTy:ident, { $([$pub:tt$(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) -> $ret:ty $body:block }) => { + #[cfg(feature = "std")] + $([$pub $(($krate))*])* fn $name($($arg: $argty),*) -> $ret { + #[inline(always)] + fn fn_impl<$MTy: $crate::Machine>($mach: $MTy, $($arg: $argty),*) -> $ret $body + use std::arch::x86_64::*; + #[target_feature(enable = "avx")] + unsafe fn impl_avx($($arg: $argty),*) -> $ret { + fn_impl($crate::x86_64::AVX::instance(), $($arg),*) + } + #[target_feature(enable = "sse2")] + unsafe fn impl_sse2($($arg: $argty),*) -> $ret { + fn_impl($crate::x86_64::SSE2::instance(), $($arg),*) + } + unsafe { + if is_x86_feature_detected!("avx") { + impl_avx($($arg),*) + } else if is_x86_feature_detected!("sse2") { + impl_sse2($($arg),*) + } else { + unimplemented!() + } + } + } + #[cfg(not(feature = "std"))] + #[inline(always)] + $($pub$(($krate))*)* fn $name($($arg: $argty),*) -> $ret { + unsafe fn fn_impl<$MTy: $crate::Machine>($mach: $MTy, $($arg: $argty),*) -> $ret $body + unsafe { + if cfg!(target_feature = "avx2") { + fn_impl($crate::x86_64::AVX2::instance(), $($arg),*) + } else if cfg!(target_feature = "avx") { + fn_impl($crate::x86_64::AVX::instance(), $($arg),*) + } else if cfg!(target_feature = "sse4.1") { + fn_impl($crate::x86_64::SSE41::instance(), $($arg),*) + } else if cfg!(target_feature = "ssse3") { + fn_impl($crate::x86_64::SSSE3::instance(), $($arg),*) + } else { + fn_impl($crate::x86_64::SSE2::instance(), $($arg),*) + } + } + } + }; + ($mach:ident, $MTy:ident, { $([$pub:tt$(($krate:tt))*])* fn $name:ident($($arg:ident: $argty:ty),* $(,)*) $body:block }) => { + dispatch_light256!($mach, $MTy, { + $([$pub $(($krate))*])* fn $name($($arg: $argty),*) -> () $body + }); + } +} diff --git a/src/rust/vendor/ppv-lite86/src/x86_64/sse2.rs b/src/rust/vendor/ppv-lite86/src/x86_64/sse2.rs new file mode 100644 index 000000000..4b95911d4 --- /dev/null +++ b/src/rust/vendor/ppv-lite86/src/x86_64/sse2.rs @@ -0,0 +1,1707 @@ +use crate::soft::{x2, x4}; +use crate::types::*; +use crate::vec128_storage; +use crate::x86_64::Avx2Machine; +use crate::x86_64::SseMachine as Machine86; +use crate::x86_64::{NoS3, NoS4, YesS3, YesS4}; +use core::arch::x86_64::*; +use core::marker::PhantomData; +use core::ops::{ + Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not, +}; +use zerocopy::{transmute, AsBytes, FromBytes, FromZeroes}; + +macro_rules! impl_binop { + ($vec:ident, $trait:ident, $fn:ident, $impl_fn:ident) => { + impl $trait for $vec { + type Output = Self; + #[inline(always)] + fn $fn(self, rhs: Self) -> Self::Output { + Self::new(unsafe { $impl_fn(self.x, rhs.x) }) + } + } + }; +} + +macro_rules! impl_binop_assign { + ($vec:ident, $trait:ident, $fn_assign:ident, $fn:ident) => { + impl $trait for $vec + where + $vec: Copy, + { + #[inline(always)] + fn $fn_assign(&mut self, rhs: Self) { + *self = self.$fn(rhs); + } + } + }; +} + +macro_rules! def_vec { + ($vec:ident, $word:ident) => { + #[allow(non_camel_case_types)] + #[derive(Copy, Clone, FromBytes, AsBytes, FromZeroes)] + #[repr(transparent)] + pub struct $vec { + x: __m128i, + s3: PhantomData, + s4: PhantomData, + ni: PhantomData, + } + + impl Store for $vec { + #[inline(always)] + unsafe fn unpack(x: vec128_storage) -> Self { + Self::new(x.sse2) + } + } + impl From<$vec> for vec128_storage { + #[inline(always)] + fn from(x: $vec) -> Self { + vec128_storage { sse2: x.x } + } + } + impl $vec { + #[inline(always)] + fn new(x: __m128i) -> Self { + $vec { + x, + s3: PhantomData, + s4: PhantomData, + ni: PhantomData, + } + } + } + + impl StoreBytes for $vec + where + Self: BSwap, + { + #[inline(always)] + unsafe fn unsafe_read_le(input: &[u8]) -> Self { + assert_eq!(input.len(), 16); + Self::new(_mm_loadu_si128(input.as_ptr() as *const _)) + } + #[inline(always)] + unsafe fn unsafe_read_be(input: &[u8]) -> Self { + assert_eq!(input.len(), 16); + Self::new(_mm_loadu_si128(input.as_ptr() as *const _)).bswap() + } + #[inline(always)] + fn write_le(self, out: &mut [u8]) { + assert_eq!(out.len(), 16); + unsafe { _mm_storeu_si128(out.as_mut_ptr() as *mut _, self.x) } + } + #[inline(always)] + fn write_be(self, out: &mut [u8]) { + assert_eq!(out.len(), 16); + let x = self.bswap().x; + unsafe { + _mm_storeu_si128(out.as_mut_ptr() as *mut _, x); + } + } + } + + impl Default for $vec { + #[inline(always)] + fn default() -> Self { + Self::new(unsafe { _mm_setzero_si128() }) + } + } + + impl Not for $vec { + type Output = Self; + #[inline(always)] + fn not(self) -> Self::Output { + unsafe { + let ff = _mm_set1_epi64x(-1i64); + self ^ Self::new(ff) + } + } + } + + impl BitOps0 for $vec {} + impl_binop!($vec, BitAnd, bitand, _mm_and_si128); + impl_binop!($vec, BitOr, bitor, _mm_or_si128); + impl_binop!($vec, BitXor, bitxor, _mm_xor_si128); + impl_binop_assign!($vec, BitAndAssign, bitand_assign, bitand); + impl_binop_assign!($vec, BitOrAssign, bitor_assign, bitor); + impl_binop_assign!($vec, BitXorAssign, bitxor_assign, bitxor); + impl AndNot for $vec { + type Output = Self; + #[inline(always)] + fn andnot(self, rhs: Self) -> Self { + Self::new(unsafe { _mm_andnot_si128(self.x, rhs.x) }) + } + } + }; +} + +macro_rules! impl_bitops32 { + ($vec:ident) => { + impl BitOps32 for $vec where + $vec: RotateEachWord32 + { + } + }; +} + +macro_rules! impl_bitops64 { + ($vec:ident) => { + impl_bitops32!($vec); + impl BitOps64 for $vec where + $vec: RotateEachWord64 + RotateEachWord32 + { + } + }; +} + +macro_rules! impl_bitops128 { + ($vec:ident) => { + impl_bitops64!($vec); + impl BitOps128 for $vec where + $vec: RotateEachWord128 + { + } + }; +} + +macro_rules! rotr_32_s3 { + ($name:ident, $k0:expr, $k1:expr) => { + #[inline(always)] + fn $name(self) -> Self { + Self::new(unsafe { _mm_shuffle_epi8(self.x, _mm_set_epi64x($k0, $k1)) }) + } + }; +} +macro_rules! rotr_32 { + ($name:ident, $i:expr) => { + #[inline(always)] + fn $name(self) -> Self { + Self::new(unsafe { + _mm_or_si128( + _mm_srli_epi32(self.x, $i as i32), + _mm_slli_epi32(self.x, 32 - $i as i32), + ) + }) + } + }; +} +impl RotateEachWord32 for u32x4_sse2 { + rotr_32!(rotate_each_word_right7, 7); + rotr_32_s3!( + rotate_each_word_right8, + 0x0c0f_0e0d_080b_0a09, + 0x0407_0605_0003_0201 + ); + rotr_32!(rotate_each_word_right11, 11); + rotr_32!(rotate_each_word_right12, 12); + rotr_32_s3!( + rotate_each_word_right16, + 0x0d0c_0f0e_0908_0b0a, + 0x0504_0706_0100_0302 + ); + rotr_32!(rotate_each_word_right20, 20); + rotr_32_s3!( + rotate_each_word_right24, + 0x0e0d_0c0f_0a09_080b, + 0x0605_0407_0201_0003 + ); + rotr_32!(rotate_each_word_right25, 25); +} +impl RotateEachWord32 for u32x4_sse2 { + rotr_32!(rotate_each_word_right7, 7); + rotr_32!(rotate_each_word_right8, 8); + rotr_32!(rotate_each_word_right11, 11); + rotr_32!(rotate_each_word_right12, 12); + #[inline(always)] + fn rotate_each_word_right16(self) -> Self { + Self::new(swap16_s2(self.x)) + } + rotr_32!(rotate_each_word_right20, 20); + rotr_32!(rotate_each_word_right24, 24); + rotr_32!(rotate_each_word_right25, 25); +} + +macro_rules! rotr_64_s3 { + ($name:ident, $k0:expr, $k1:expr) => { + #[inline(always)] + fn $name(self) -> Self { + Self::new(unsafe { _mm_shuffle_epi8(self.x, _mm_set_epi64x($k0, $k1)) }) + } + }; +} +macro_rules! rotr_64 { + ($name:ident, $i:expr) => { + #[inline(always)] + fn $name(self) -> Self { + Self::new(unsafe { + _mm_or_si128( + _mm_srli_epi64(self.x, $i as i32), + _mm_slli_epi64(self.x, 64 - $i as i32), + ) + }) + } + }; +} +impl RotateEachWord32 for u64x2_sse2 { + rotr_64!(rotate_each_word_right7, 7); + rotr_64_s3!( + rotate_each_word_right8, + 0x080f_0e0d_0c0b_0a09, + 0x0007_0605_0403_0201 + ); + rotr_64!(rotate_each_word_right11, 11); + rotr_64!(rotate_each_word_right12, 12); + rotr_64_s3!( + rotate_each_word_right16, + 0x0908_0f0e_0d0c_0b0a, + 0x0100_0706_0504_0302 + ); + rotr_64!(rotate_each_word_right20, 20); + rotr_64_s3!( + rotate_each_word_right24, + 0x0a09_080f_0e0d_0c0b, + 0x0201_0007_0605_0403 + ); + rotr_64!(rotate_each_word_right25, 25); +} +impl RotateEachWord32 for u64x2_sse2 { + rotr_64!(rotate_each_word_right7, 7); + rotr_64!(rotate_each_word_right8, 8); + rotr_64!(rotate_each_word_right11, 11); + rotr_64!(rotate_each_word_right12, 12); + #[inline(always)] + fn rotate_each_word_right16(self) -> Self { + Self::new(swap16_s2(self.x)) + } + rotr_64!(rotate_each_word_right20, 20); + rotr_64!(rotate_each_word_right24, 24); + rotr_64!(rotate_each_word_right25, 25); +} +impl RotateEachWord64 for u64x2_sse2 { + #[inline(always)] + fn rotate_each_word_right32(self) -> Self { + Self::new(unsafe { _mm_shuffle_epi32(self.x, 0b10110001) }) + } +} + +macro_rules! rotr_128 { + ($name:ident, $i:expr) => { + #[inline(always)] + fn $name(self) -> Self { + Self::new(unsafe { + _mm_or_si128( + _mm_srli_si128(self.x, $i as i32), + _mm_slli_si128(self.x, 128 - $i as i32), + ) + }) + } + }; +} +// TODO: completely unoptimized +impl RotateEachWord32 for u128x1_sse2 { + rotr_128!(rotate_each_word_right7, 7); + rotr_128!(rotate_each_word_right8, 8); + rotr_128!(rotate_each_word_right11, 11); + rotr_128!(rotate_each_word_right12, 12); + rotr_128!(rotate_each_word_right16, 16); + rotr_128!(rotate_each_word_right20, 20); + rotr_128!(rotate_each_word_right24, 24); + rotr_128!(rotate_each_word_right25, 25); +} +// TODO: completely unoptimized +impl RotateEachWord64 for u128x1_sse2 { + rotr_128!(rotate_each_word_right32, 32); +} +impl RotateEachWord128 for u128x1_sse2 {} + +def_vec!(u32x4_sse2, u32); +def_vec!(u64x2_sse2, u64); +def_vec!(u128x1_sse2, u128); + +impl MultiLane<[u32; 4]> for u32x4_sse2 { + #[inline(always)] + fn to_lanes(self) -> [u32; 4] { + unsafe { + let x = _mm_cvtsi128_si64(self.x) as u64; + let y = _mm_extract_epi64(self.x, 1) as u64; + [x as u32, (x >> 32) as u32, y as u32, (y >> 32) as u32] + } + } + #[inline(always)] + fn from_lanes(xs: [u32; 4]) -> Self { + unsafe { + let mut x = _mm_cvtsi64_si128((xs[0] as u64 | ((xs[1] as u64) << 32)) as i64); + x = _mm_insert_epi64(x, (xs[2] as u64 | ((xs[3] as u64) << 32)) as i64, 1); + Self::new(x) + } + } +} +impl MultiLane<[u32; 4]> for u32x4_sse2 { + #[inline(always)] + fn to_lanes(self) -> [u32; 4] { + unsafe { + let x = _mm_cvtsi128_si64(self.x) as u64; + let y = _mm_cvtsi128_si64(_mm_shuffle_epi32(self.x, 0b11101110)) as u64; + [x as u32, (x >> 32) as u32, y as u32, (y >> 32) as u32] + } + } + #[inline(always)] + fn from_lanes(xs: [u32; 4]) -> Self { + unsafe { + let x = (xs[0] as u64 | ((xs[1] as u64) << 32)) as i64; + let y = (xs[2] as u64 | ((xs[3] as u64) << 32)) as i64; + let x = _mm_cvtsi64_si128(x); + let y = _mm_slli_si128(_mm_cvtsi64_si128(y), 8); + Self::new(_mm_or_si128(x, y)) + } + } +} +impl MultiLane<[u64; 2]> for u64x2_sse2 { + #[inline(always)] + fn to_lanes(self) -> [u64; 2] { + unsafe { + [ + _mm_cvtsi128_si64(self.x) as u64, + _mm_extract_epi64(self.x, 1) as u64, + ] + } + } + #[inline(always)] + fn from_lanes(xs: [u64; 2]) -> Self { + unsafe { + let mut x = _mm_cvtsi64_si128(xs[0] as i64); + x = _mm_insert_epi64(x, xs[1] as i64, 1); + Self::new(x) + } + } +} +impl MultiLane<[u64; 2]> for u64x2_sse2 { + #[inline(always)] + fn to_lanes(self) -> [u64; 2] { + unsafe { + [ + _mm_cvtsi128_si64(self.x) as u64, + _mm_cvtsi128_si64(_mm_srli_si128(self.x, 8)) as u64, + ] + } + } + #[inline(always)] + fn from_lanes(xs: [u64; 2]) -> Self { + unsafe { + let x = _mm_cvtsi64_si128(xs[0] as i64); + let y = _mm_slli_si128(_mm_cvtsi64_si128(xs[1] as i64), 8); + Self::new(_mm_or_si128(x, y)) + } + } +} +impl MultiLane<[u128; 1]> for u128x1_sse2 { + #[inline(always)] + fn to_lanes(self) -> [u128; 1] { + unimplemented!() + } + #[inline(always)] + fn from_lanes(xs: [u128; 1]) -> Self { + unimplemented!("{:?}", xs) + } +} + +impl MultiLane<[u64; 4]> for u64x4_sse2 +where + u64x2_sse2: MultiLane<[u64; 2]> + Copy, +{ + #[inline(always)] + fn to_lanes(self) -> [u64; 4] { + let (a, b) = (self.0[0].to_lanes(), self.0[1].to_lanes()); + [a[0], a[1], b[0], b[1]] + } + #[inline(always)] + fn from_lanes(xs: [u64; 4]) -> Self { + let (a, b) = ( + u64x2_sse2::from_lanes([xs[0], xs[1]]), + u64x2_sse2::from_lanes([xs[2], xs[3]]), + ); + x2::new([a, b]) + } +} + +macro_rules! impl_into { + ($from:ident, $to:ident) => { + impl From<$from> for $to { + #[inline(always)] + fn from(x: $from) -> Self { + $to::new(x.x) + } + } + }; +} + +impl_into!(u128x1_sse2, u32x4_sse2); +impl_into!(u128x1_sse2, u64x2_sse2); + +impl_bitops32!(u32x4_sse2); +impl_bitops64!(u64x2_sse2); +impl_bitops128!(u128x1_sse2); + +impl ArithOps for u32x4_sse2 where + u32x4_sse2: BSwap +{ +} +impl ArithOps for u64x2_sse2 where + u64x2_sse2: BSwap +{ +} +impl_binop!(u32x4_sse2, Add, add, _mm_add_epi32); +impl_binop!(u64x2_sse2, Add, add, _mm_add_epi64); +impl_binop_assign!(u32x4_sse2, AddAssign, add_assign, add); +impl_binop_assign!(u64x2_sse2, AddAssign, add_assign, add); + +impl u32x4> for u32x4_sse2 +where + u32x4_sse2: RotateEachWord32 + BSwap + MultiLane<[u32; 4]> + Vec4, + Machine86: Machine, +{ +} +impl u64x2> for u64x2_sse2 +where + u64x2_sse2: + RotateEachWord64 + RotateEachWord32 + BSwap + MultiLane<[u64; 2]> + Vec2, + Machine86: Machine, +{ +} +impl u128x1> for u128x1_sse2 +where + u128x1_sse2: Swap64 + RotateEachWord64 + RotateEachWord32 + BSwap, + Machine86: Machine, + u128x1_sse2: Into< as Machine>::u32x4>, + u128x1_sse2: Into< as Machine>::u64x2>, +{ +} + +impl u32x4> for u32x4_sse2 +where + u32x4_sse2: RotateEachWord32 + BSwap + MultiLane<[u32; 4]> + Vec4, + Machine86: Machine, +{ +} +impl u64x2> for u64x2_sse2 +where + u64x2_sse2: + RotateEachWord64 + RotateEachWord32 + BSwap + MultiLane<[u64; 2]> + Vec2, + Machine86: Machine, +{ +} +impl u128x1> for u128x1_sse2 +where + u128x1_sse2: Swap64 + RotateEachWord64 + RotateEachWord32 + BSwap, + Machine86: Machine, + u128x1_sse2: Into< as Machine>::u32x4>, + u128x1_sse2: Into< as Machine>::u64x2>, +{ +} + +impl UnsafeFrom<[u32; 4]> for u32x4_sse2 { + #[inline(always)] + unsafe fn unsafe_from(xs: [u32; 4]) -> Self { + Self::new(_mm_set_epi32( + xs[3] as i32, + xs[2] as i32, + xs[1] as i32, + xs[0] as i32, + )) + } +} + +impl Vec4 for u32x4_sse2 +where + Self: MultiLane<[u32; 4]>, +{ + #[inline(always)] + fn extract(self, i: u32) -> u32 { + self.to_lanes()[i as usize] + } + #[inline(always)] + fn insert(self, v: u32, i: u32) -> Self { + Self::new(unsafe { + match i { + 0 => _mm_insert_epi32(self.x, v as i32, 0), + 1 => _mm_insert_epi32(self.x, v as i32, 1), + 2 => _mm_insert_epi32(self.x, v as i32, 2), + 3 => _mm_insert_epi32(self.x, v as i32, 3), + _ => unreachable!(), + } + }) + } +} +impl Vec4 for u32x4_sse2 +where + Self: MultiLane<[u32; 4]>, +{ + #[inline(always)] + fn extract(self, i: u32) -> u32 { + self.to_lanes()[i as usize] + } + #[inline(always)] + fn insert(self, v: u32, i: u32) -> Self { + Self::new(unsafe { + match i { + 0 => { + let x = _mm_andnot_si128(_mm_cvtsi32_si128(-1), self.x); + _mm_or_si128(x, _mm_cvtsi32_si128(v as i32)) + } + 1 => { + let mut x = _mm_shuffle_epi32(self.x, 0b0111_1000); + x = _mm_slli_si128(x, 4); + x = _mm_or_si128(x, _mm_cvtsi32_si128(v as i32)); + _mm_shuffle_epi32(x, 0b1110_0001) + } + 2 => { + let mut x = _mm_shuffle_epi32(self.x, 0b1011_0100); + x = _mm_slli_si128(x, 4); + x = _mm_or_si128(x, _mm_cvtsi32_si128(v as i32)); + _mm_shuffle_epi32(x, 0b1100_1001) + } + 3 => { + let mut x = _mm_slli_si128(self.x, 4); + x = _mm_or_si128(x, _mm_cvtsi32_si128(v as i32)); + _mm_shuffle_epi32(x, 0b0011_1001) + } + _ => unreachable!(), + } + }) + } +} + +impl LaneWords4 for u32x4_sse2 { + #[inline(always)] + fn shuffle_lane_words2301(self) -> Self { + self.shuffle2301() + } + #[inline(always)] + fn shuffle_lane_words1230(self) -> Self { + self.shuffle1230() + } + #[inline(always)] + fn shuffle_lane_words3012(self) -> Self { + self.shuffle3012() + } +} + +impl Words4 for u32x4_sse2 { + #[inline(always)] + fn shuffle2301(self) -> Self { + Self::new(unsafe { _mm_shuffle_epi32(self.x, 0b0100_1110) }) + } + #[inline(always)] + fn shuffle1230(self) -> Self { + Self::new(unsafe { _mm_shuffle_epi32(self.x, 0b1001_0011) }) + } + #[inline(always)] + fn shuffle3012(self) -> Self { + Self::new(unsafe { _mm_shuffle_epi32(self.x, 0b0011_1001) }) + } +} + +impl Words4 for u64x4_sse2 { + #[inline(always)] + fn shuffle2301(self) -> Self { + x2::new([u64x2_sse2::new(self.0[1].x), u64x2_sse2::new(self.0[0].x)]) + } + #[inline(always)] + fn shuffle3012(self) -> Self { + unsafe { + x2::new([ + u64x2_sse2::new(_mm_alignr_epi8(self.0[1].x, self.0[0].x, 8)), + u64x2_sse2::new(_mm_alignr_epi8(self.0[0].x, self.0[1].x, 8)), + ]) + } + } + #[inline(always)] + fn shuffle1230(self) -> Self { + unsafe { + x2::new([ + u64x2_sse2::new(_mm_alignr_epi8(self.0[0].x, self.0[1].x, 8)), + u64x2_sse2::new(_mm_alignr_epi8(self.0[1].x, self.0[0].x, 8)), + ]) + } + } +} +impl Words4 for u64x4_sse2 { + #[inline(always)] + fn shuffle2301(self) -> Self { + x2::new([u64x2_sse2::new(self.0[1].x), u64x2_sse2::new(self.0[0].x)]) + } + #[inline(always)] + fn shuffle3012(self) -> Self { + unsafe { + let a = _mm_srli_si128(self.0[0].x, 8); + let b = _mm_slli_si128(self.0[0].x, 8); + let c = _mm_srli_si128(self.0[1].x, 8); + let d = _mm_slli_si128(self.0[1].x, 8); + let da = _mm_or_si128(d, a); + let bc = _mm_or_si128(b, c); + x2::new([u64x2_sse2::new(da), u64x2_sse2::new(bc)]) + } + } + #[inline(always)] + fn shuffle1230(self) -> Self { + unsafe { + let a = _mm_srli_si128(self.0[0].x, 8); + let b = _mm_slli_si128(self.0[0].x, 8); + let c = _mm_srli_si128(self.0[1].x, 8); + let d = _mm_slli_si128(self.0[1].x, 8); + let da = _mm_or_si128(d, a); + let bc = _mm_or_si128(b, c); + x2::new([u64x2_sse2::new(bc), u64x2_sse2::new(da)]) + } + } +} + +impl UnsafeFrom<[u64; 2]> for u64x2_sse2 { + #[inline(always)] + unsafe fn unsafe_from(xs: [u64; 2]) -> Self { + Self::new(_mm_set_epi64x(xs[1] as i64, xs[0] as i64)) + } +} + +impl Vec2 for u64x2_sse2 { + #[inline(always)] + fn extract(self, i: u32) -> u64 { + unsafe { + match i { + 0 => _mm_cvtsi128_si64(self.x) as u64, + 1 => _mm_extract_epi64(self.x, 1) as u64, + _ => unreachable!(), + } + } + } + #[inline(always)] + fn insert(self, x: u64, i: u32) -> Self { + Self::new(unsafe { + match i { + 0 => _mm_insert_epi64(self.x, x as i64, 0), + 1 => _mm_insert_epi64(self.x, x as i64, 1), + _ => unreachable!(), + } + }) + } +} +impl Vec2 for u64x2_sse2 { + #[inline(always)] + fn extract(self, i: u32) -> u64 { + unsafe { + match i { + 0 => _mm_cvtsi128_si64(self.x) as u64, + 1 => _mm_cvtsi128_si64(_mm_shuffle_epi32(self.x, 0b11101110)) as u64, + _ => unreachable!(), + } + } + } + #[inline(always)] + fn insert(self, x: u64, i: u32) -> Self { + Self::new(unsafe { + match i { + 0 => _mm_or_si128( + _mm_andnot_si128(_mm_cvtsi64_si128(-1), self.x), + _mm_cvtsi64_si128(x as i64), + ), + 1 => _mm_or_si128( + _mm_move_epi64(self.x), + _mm_slli_si128(_mm_cvtsi64_si128(x as i64), 8), + ), + _ => unreachable!(), + } + }) + } +} + +impl BSwap for u32x4_sse2 { + #[inline(always)] + fn bswap(self) -> Self { + Self::new(unsafe { + let k = _mm_set_epi64x(0x0c0d_0e0f_0809_0a0b, 0x0405_0607_0001_0203); + _mm_shuffle_epi8(self.x, k) + }) + } +} +#[inline(always)] +fn bswap32_s2(x: __m128i) -> __m128i { + unsafe { + let mut y = _mm_unpacklo_epi8(x, _mm_setzero_si128()); + y = _mm_shufflehi_epi16(y, 0b0001_1011); + y = _mm_shufflelo_epi16(y, 0b0001_1011); + let mut z = _mm_unpackhi_epi8(x, _mm_setzero_si128()); + z = _mm_shufflehi_epi16(z, 0b0001_1011); + z = _mm_shufflelo_epi16(z, 0b0001_1011); + _mm_packus_epi16(y, z) + } +} +impl BSwap for u32x4_sse2 { + #[inline(always)] + fn bswap(self) -> Self { + Self::new(bswap32_s2(self.x)) + } +} + +impl BSwap for u64x2_sse2 { + #[inline(always)] + fn bswap(self) -> Self { + Self::new(unsafe { + let k = _mm_set_epi64x(0x0809_0a0b_0c0d_0e0f, 0x0001_0203_0405_0607); + _mm_shuffle_epi8(self.x, k) + }) + } +} +impl BSwap for u64x2_sse2 { + #[inline(always)] + fn bswap(self) -> Self { + Self::new(unsafe { bswap32_s2(_mm_shuffle_epi32(self.x, 0b1011_0001)) }) + } +} + +impl BSwap for u128x1_sse2 { + #[inline(always)] + fn bswap(self) -> Self { + Self::new(unsafe { + let k = _mm_set_epi64x(0x0f0e_0d0c_0b0a_0908, 0x0706_0504_0302_0100); + _mm_shuffle_epi8(self.x, k) + }) + } +} +impl BSwap for u128x1_sse2 { + #[inline(always)] + fn bswap(self) -> Self { + unimplemented!() + } +} + +macro_rules! swapi { + ($x:expr, $i:expr, $k:expr) => { + unsafe { + const K: u8 = $k; + let k = _mm_set1_epi8(K as i8); + u128x1_sse2::new(_mm_or_si128( + _mm_srli_epi16(_mm_and_si128($x.x, k), $i), + _mm_and_si128(_mm_slli_epi16($x.x, $i), k), + )) + } + }; +} +#[inline(always)] +fn swap16_s2(x: __m128i) -> __m128i { + unsafe { _mm_shufflehi_epi16(_mm_shufflelo_epi16(x, 0b1011_0001), 0b1011_0001) } +} +impl Swap64 for u128x1_sse2 { + #[inline(always)] + fn swap1(self) -> Self { + swapi!(self, 1, 0xaa) + } + #[inline(always)] + fn swap2(self) -> Self { + swapi!(self, 2, 0xcc) + } + #[inline(always)] + fn swap4(self) -> Self { + swapi!(self, 4, 0xf0) + } + #[inline(always)] + fn swap8(self) -> Self { + u128x1_sse2::new(unsafe { + let k = _mm_set_epi64x(0x0e0f_0c0d_0a0b_0809, 0x0607_0405_0203_0001); + _mm_shuffle_epi8(self.x, k) + }) + } + #[inline(always)] + fn swap16(self) -> Self { + u128x1_sse2::new(unsafe { + let k = _mm_set_epi64x(0x0d0c_0f0e_0908_0b0a, 0x0504_0706_0100_0302); + _mm_shuffle_epi8(self.x, k) + }) + } + #[inline(always)] + fn swap32(self) -> Self { + u128x1_sse2::new(unsafe { _mm_shuffle_epi32(self.x, 0b1011_0001) }) + } + #[inline(always)] + fn swap64(self) -> Self { + u128x1_sse2::new(unsafe { _mm_shuffle_epi32(self.x, 0b0100_1110) }) + } +} +impl Swap64 for u128x1_sse2 { + #[inline(always)] + fn swap1(self) -> Self { + swapi!(self, 1, 0xaa) + } + #[inline(always)] + fn swap2(self) -> Self { + swapi!(self, 2, 0xcc) + } + #[inline(always)] + fn swap4(self) -> Self { + swapi!(self, 4, 0xf0) + } + #[inline(always)] + fn swap8(self) -> Self { + u128x1_sse2::new(unsafe { + _mm_or_si128(_mm_slli_epi16(self.x, 8), _mm_srli_epi16(self.x, 8)) + }) + } + #[inline(always)] + fn swap16(self) -> Self { + u128x1_sse2::new(swap16_s2(self.x)) + } + #[inline(always)] + fn swap32(self) -> Self { + u128x1_sse2::new(unsafe { _mm_shuffle_epi32(self.x, 0b1011_0001) }) + } + #[inline(always)] + fn swap64(self) -> Self { + u128x1_sse2::new(unsafe { _mm_shuffle_epi32(self.x, 0b0100_1110) }) + } +} + +#[derive(Copy, Clone)] +pub struct G0; +#[derive(Copy, Clone)] +pub struct G1; + +#[allow(non_camel_case_types)] +pub type u32x4x2_sse2 = x2, G0>; +#[allow(non_camel_case_types)] +pub type u64x2x2_sse2 = x2, G0>; +#[allow(non_camel_case_types)] +pub type u64x4_sse2 = x2, G1>; +#[allow(non_camel_case_types)] +pub type u128x2_sse2 = x2, G0>; + +#[allow(non_camel_case_types)] +pub type u32x4x4_sse2 = x4>; +#[allow(non_camel_case_types)] +pub type u64x2x4_sse2 = x4>; +#[allow(non_camel_case_types)] +pub type u128x4_sse2 = x4>; + +impl Vector<[u32; 16]> for u32x4x4_sse2 { + #[inline(always)] + fn to_scalars(self) -> [u32; 16] { + transmute!(self) + } +} + +impl u32x4x2> for u32x4x2_sse2 +where + u32x4_sse2: RotateEachWord32 + BSwap, + Machine86: Machine, + u32x4x2_sse2: MultiLane<[ as Machine>::u32x4; 2]>, + u32x4x2_sse2: Vec2< as Machine>::u32x4>, +{ +} +impl u64x2x2> for u64x2x2_sse2 +where + u64x2_sse2: RotateEachWord64 + RotateEachWord32 + BSwap, + Machine86: Machine, + u64x2x2_sse2: MultiLane<[ as Machine>::u64x2; 2]>, + u64x2x2_sse2: Vec2< as Machine>::u64x2>, +{ +} +impl u64x4> for u64x4_sse2 +where + u64x2_sse2: RotateEachWord64 + RotateEachWord32 + BSwap, + Machine86: Machine, + u64x4_sse2: MultiLane<[u64; 4]> + Vec4 + Words4, +{ +} +impl u128x2> for u128x2_sse2 +where + u128x1_sse2: Swap64 + BSwap, + Machine86: Machine, + u128x2_sse2: MultiLane<[ as Machine>::u128x1; 2]>, + u128x2_sse2: Vec2< as Machine>::u128x1>, + u128x2_sse2: Into< as Machine>::u32x4x2>, + u128x2_sse2: Into< as Machine>::u64x2x2>, + u128x2_sse2: Into< as Machine>::u64x4>, +{ +} + +impl u32x4x2> for u32x4x2_sse2 +where + u32x4_sse2: RotateEachWord32 + BSwap, + Avx2Machine: Machine, + u32x4x2_sse2: MultiLane<[ as Machine>::u32x4; 2]>, + u32x4x2_sse2: Vec2< as Machine>::u32x4>, +{ +} +impl u64x2x2> for u64x2x2_sse2 +where + u64x2_sse2: RotateEachWord64 + RotateEachWord32 + BSwap, + Avx2Machine: Machine, + u64x2x2_sse2: MultiLane<[ as Machine>::u64x2; 2]>, + u64x2x2_sse2: Vec2< as Machine>::u64x2>, +{ +} +impl u64x4> for u64x4_sse2 +where + u64x2_sse2: RotateEachWord64 + RotateEachWord32 + BSwap, + Avx2Machine: Machine, + u64x4_sse2: MultiLane<[u64; 4]> + Vec4 + Words4, +{ +} +impl u128x2> for u128x2_sse2 +where + u128x1_sse2: Swap64 + BSwap, + Avx2Machine: Machine, + u128x2_sse2: MultiLane<[ as Machine>::u128x1; 2]>, + u128x2_sse2: Vec2< as Machine>::u128x1>, + u128x2_sse2: Into< as Machine>::u32x4x2>, + u128x2_sse2: Into< as Machine>::u64x2x2>, + u128x2_sse2: Into< as Machine>::u64x4>, +{ +} + +impl Vec4 for u64x4_sse2 +where + u64x2_sse2: Copy + Vec2, +{ + #[inline(always)] + fn extract(self, i: u32) -> u64 { + match i { + 0 => self.0[0].extract(0), + 1 => self.0[0].extract(1), + 2 => self.0[1].extract(0), + 3 => self.0[1].extract(1), + _ => panic!(), + } + } + #[inline(always)] + fn insert(mut self, w: u64, i: u32) -> Self { + match i { + 0 => self.0[0] = self.0[0].insert(w, 0), + 1 => self.0[0] = self.0[0].insert(w, 1), + 2 => self.0[1] = self.0[1].insert(w, 0), + 3 => self.0[1] = self.0[1].insert(w, 1), + _ => panic!(), + }; + self + } +} + +impl u32x4x4> for u32x4x4_sse2 +where + u32x4_sse2: RotateEachWord32 + BSwap, + Machine86: Machine, + u32x4x4_sse2: MultiLane<[ as Machine>::u32x4; 4]>, + u32x4x4_sse2: Vec4< as Machine>::u32x4>, + u32x4x4_sse2: Vec4Ext< as Machine>::u32x4>, + u32x4x4_sse2: Vector<[u32; 16]>, +{ +} +impl u64x2x4> for u64x2x4_sse2 +where + u64x2_sse2: RotateEachWord64 + RotateEachWord32 + BSwap, + Machine86: Machine, + u64x2x4_sse2: MultiLane<[ as Machine>::u64x2; 4]>, + u64x2x4_sse2: Vec4< as Machine>::u64x2>, +{ +} +impl u128x4> for u128x4_sse2 +where + u128x1_sse2: Swap64 + BSwap, + Machine86: Machine, + u128x4_sse2: MultiLane<[ as Machine>::u128x1; 4]>, + u128x4_sse2: Vec4< as Machine>::u128x1>, + u128x4_sse2: Into< as Machine>::u32x4x4>, + u128x4_sse2: Into< as Machine>::u64x2x4>, +{ +} + +impl u64x2x4> for u64x2x4_sse2 +where + u64x2_sse2: RotateEachWord64 + RotateEachWord32 + BSwap, + Avx2Machine: Machine, + u64x2x4_sse2: MultiLane<[ as Machine>::u64x2; 4]>, + u64x2x4_sse2: Vec4< as Machine>::u64x2>, +{ +} +impl u128x4> for u128x4_sse2 +where + u128x1_sse2: Swap64 + BSwap, + Avx2Machine: Machine, + u128x4_sse2: MultiLane<[ as Machine>::u128x1; 4]>, + u128x4_sse2: Vec4< as Machine>::u128x1>, + u128x4_sse2: Into< as Machine>::u32x4x4>, + u128x4_sse2: Into< as Machine>::u64x2x4>, +{ +} + +macro_rules! impl_into_x { + ($from:ident, $to:ident) => { + impl From, Gf>> + for x2<$to, Gt> + { + #[inline(always)] + fn from(x: x2<$from, Gf>) -> Self { + x2::new([$to::from(x.0[0]), $to::from(x.0[1])]) + } + } + impl From>> for x4<$to> { + #[inline(always)] + fn from(x: x4<$from>) -> Self { + x4::new([ + $to::from(x.0[0]), + $to::from(x.0[1]), + $to::from(x.0[2]), + $to::from(x.0[3]), + ]) + } + } + }; +} +impl_into_x!(u128x1_sse2, u64x2_sse2); +impl_into_x!(u128x1_sse2, u32x4_sse2); + +///// Debugging + +use core::fmt::{Debug, Formatter, Result}; + +impl PartialEq for x2 { + #[inline(always)] + fn eq(&self, rhs: &Self) -> bool { + self.0[0] == rhs.0[0] && self.0[1] == rhs.0[1] + } +} + +#[allow(unused)] +#[inline(always)] +unsafe fn eq128_s4(x: __m128i, y: __m128i) -> bool { + let q = _mm_shuffle_epi32(_mm_cmpeq_epi64(x, y), 0b1100_0110); + _mm_cvtsi128_si64(q) == -1 +} + +#[inline(always)] +unsafe fn eq128_s2(x: __m128i, y: __m128i) -> bool { + let q = _mm_cmpeq_epi32(x, y); + let p = _mm_cvtsi128_si64(_mm_srli_si128(q, 8)); + let q = _mm_cvtsi128_si64(q); + (p & q) == -1 +} + +impl PartialEq for u32x4_sse2 { + #[inline(always)] + fn eq(&self, rhs: &Self) -> bool { + unsafe { eq128_s2(self.x, rhs.x) } + } +} +impl Debug for u32x4_sse2 +where + Self: Copy + MultiLane<[u32; 4]>, +{ + #[cold] + fn fmt(&self, fmt: &mut Formatter) -> Result { + fmt.write_fmt(format_args!("{:08x?}", &self.to_lanes())) + } +} + +impl PartialEq for u64x2_sse2 { + #[inline(always)] + fn eq(&self, rhs: &Self) -> bool { + unsafe { eq128_s2(self.x, rhs.x) } + } +} +impl Debug for u64x2_sse2 +where + Self: Copy + MultiLane<[u64; 2]>, +{ + #[cold] + fn fmt(&self, fmt: &mut Formatter) -> Result { + fmt.write_fmt(format_args!("{:016x?}", &self.to_lanes())) + } +} + +impl Debug for u64x4_sse2 +where + u64x2_sse2: Copy + MultiLane<[u64; 2]>, +{ + #[cold] + fn fmt(&self, fmt: &mut Formatter) -> Result { + let (a, b) = (self.0[0].to_lanes(), self.0[1].to_lanes()); + fmt.write_fmt(format_args!("{:016x?}", &[a[0], a[1], b[0], b[1]])) + } +} + +#[cfg(test)] +#[cfg(target_arch = "x86_64")] +mod test { + use super::*; + use crate::x86_64::{SSE2, SSE41, SSSE3}; + use crate::Machine; + + #[test] + #[cfg_attr(not(target_feature = "ssse3"), ignore)] + fn test_bswap32_s2_vs_s3() { + let xs = [0x0f0e_0d0c, 0x0b0a_0908, 0x0706_0504, 0x0302_0100]; + let ys = [0x0c0d_0e0f, 0x0809_0a0b, 0x0405_0607, 0x0001_0203]; + + let s2 = unsafe { SSE2::instance() }; + let s3 = unsafe { SSSE3::instance() }; + + let x_s2 = { + let x_s2: ::u32x4 = s2.vec(xs); + x_s2.bswap() + }; + + let x_s3 = { + let x_s3: ::u32x4 = s3.vec(xs); + x_s3.bswap() + }; + + assert_eq!(x_s2, transmute!(x_s3)); + assert_eq!(x_s2, s2.vec(ys)); + } + + #[test] + #[cfg_attr(not(target_feature = "ssse3"), ignore)] + fn test_bswap64_s2_vs_s3() { + let xs = [0x0f0e_0d0c_0b0a_0908, 0x0706_0504_0302_0100]; + let ys = [0x0809_0a0b_0c0d_0e0f, 0x0001_0203_0405_0607]; + + let s2 = unsafe { SSE2::instance() }; + let s3 = unsafe { SSSE3::instance() }; + + let x_s2 = { + let x_s2: ::u64x2 = s2.vec(xs); + x_s2.bswap() + }; + + let x_s3 = { + let x_s3: ::u64x2 = s3.vec(xs); + x_s3.bswap() + }; + + assert_eq!(x_s2, s2.vec(ys)); + assert_eq!(x_s3, transmute!(x_s3)); + } + + #[test] + #[cfg_attr(not(target_feature = "ssse3"), ignore)] + fn test_shuffle32_s2_vs_s3() { + let xs = [0x0, 0x1, 0x2, 0x3]; + let ys = [0x2, 0x3, 0x0, 0x1]; + let zs = [0x1, 0x2, 0x3, 0x0]; + + let s2 = unsafe { SSE2::instance() }; + let s3 = unsafe { SSSE3::instance() }; + + let x_s2 = { + let x_s2: ::u32x4 = s2.vec(xs); + x_s2.shuffle2301() + }; + let x_s3 = { + let x_s3: ::u32x4 = s3.vec(xs); + x_s3.shuffle2301() + }; + assert_eq!(x_s2, s2.vec(ys)); + assert_eq!(x_s3, transmute!(x_s3)); + + let x_s2 = { + let x_s2: ::u32x4 = s2.vec(xs); + x_s2.shuffle3012() + }; + let x_s3 = { + let x_s3: ::u32x4 = s3.vec(xs); + x_s3.shuffle3012() + }; + assert_eq!(x_s2, s2.vec(zs)); + assert_eq!(x_s3, transmute!(x_s3)); + + let x_s2 = x_s2.shuffle1230(); + let x_s3 = x_s3.shuffle1230(); + assert_eq!(x_s2, s2.vec(xs)); + assert_eq!(x_s3, transmute!(x_s3)); + } + + #[test] + #[cfg_attr(not(target_feature = "ssse3"), ignore)] + fn test_shuffle64_s2_vs_s3() { + let xs = [0x0, 0x1, 0x2, 0x3]; + let ys = [0x2, 0x3, 0x0, 0x1]; + let zs = [0x1, 0x2, 0x3, 0x0]; + + let s2 = unsafe { SSE2::instance() }; + let s3 = unsafe { SSSE3::instance() }; + + let x_s2 = { + let x_s2: ::u64x4 = s2.vec(xs); + x_s2.shuffle2301() + }; + let x_s3 = { + let x_s3: ::u64x4 = s3.vec(xs); + x_s3.shuffle2301() + }; + assert_eq!(x_s2, s2.vec(ys)); + assert_eq!(x_s3, transmute!(x_s3)); + + let x_s2 = { + let x_s2: ::u64x4 = s2.vec(xs); + x_s2.shuffle3012() + }; + let x_s3 = { + let x_s3: ::u64x4 = s3.vec(xs); + x_s3.shuffle3012() + }; + assert_eq!(x_s2, s2.vec(zs)); + assert_eq!(x_s3, transmute!(x_s3)); + + let x_s2 = x_s2.shuffle1230(); + let x_s3 = x_s3.shuffle1230(); + assert_eq!(x_s2, s2.vec(xs)); + assert_eq!(x_s3, transmute!(x_s3)); + } + + #[cfg_attr(not(all(target_feature = "ssse3", target_feature = "sse4.1")), ignore)] + #[test] + fn test_lanes_u32x4() { + let xs = [0x1, 0x2, 0x3, 0x4]; + + let s2 = unsafe { SSE2::instance() }; + let s3 = unsafe { SSSE3::instance() }; + let s4 = unsafe { SSE41::instance() }; + + { + let x_s2: ::u32x4 = s2.vec(xs); + let y_s2 = ::u32x4::from_lanes(xs); + assert_eq!(x_s2, y_s2); + assert_eq!(xs, y_s2.to_lanes()); + } + + { + let x_s3: ::u32x4 = s3.vec(xs); + let y_s3 = ::u32x4::from_lanes(xs); + assert_eq!(x_s3, y_s3); + assert_eq!(xs, y_s3.to_lanes()); + } + + { + let x_s4: ::u32x4 = s4.vec(xs); + let y_s4 = ::u32x4::from_lanes(xs); + assert_eq!(x_s4, y_s4); + assert_eq!(xs, y_s4.to_lanes()); + } + } + + #[test] + #[cfg_attr(not(all(target_feature = "ssse3", target_feature = "sse4.1")), ignore)] + fn test_lanes_u64x2() { + let xs = [0x1, 0x2]; + + let s2 = unsafe { SSE2::instance() }; + let s3 = unsafe { SSSE3::instance() }; + let s4 = unsafe { SSE41::instance() }; + + { + let x_s2: ::u64x2 = s2.vec(xs); + let y_s2 = ::u64x2::from_lanes(xs); + assert_eq!(x_s2, y_s2); + assert_eq!(xs, y_s2.to_lanes()); + } + + { + let x_s3: ::u64x2 = s3.vec(xs); + let y_s3 = ::u64x2::from_lanes(xs); + assert_eq!(x_s3, y_s3); + assert_eq!(xs, y_s3.to_lanes()); + } + + { + let x_s4: ::u64x2 = s4.vec(xs); + let y_s4 = ::u64x2::from_lanes(xs); + assert_eq!(x_s4, y_s4); + assert_eq!(xs, y_s4.to_lanes()); + } + } + + #[test] + fn test_vec4_u32x4_s2() { + let xs = [1, 2, 3, 4]; + let s2 = unsafe { SSE2::instance() }; + let x_s2: ::u32x4 = s2.vec(xs); + assert_eq!(x_s2.extract(0), 1); + assert_eq!(x_s2.extract(1), 2); + assert_eq!(x_s2.extract(2), 3); + assert_eq!(x_s2.extract(3), 4); + assert_eq!(x_s2.insert(0xf, 0), s2.vec([0xf, 2, 3, 4])); + assert_eq!(x_s2.insert(0xf, 1), s2.vec([1, 0xf, 3, 4])); + assert_eq!(x_s2.insert(0xf, 2), s2.vec([1, 2, 0xf, 4])); + assert_eq!(x_s2.insert(0xf, 3), s2.vec([1, 2, 3, 0xf])); + } + + #[test] + #[cfg_attr(not(all(target_feature = "ssse3", target_feature = "sse4.1")), ignore)] + fn test_vec4_u32x4_s4() { + let xs = [1, 2, 3, 4]; + let s4 = unsafe { SSE41::instance() }; + let x_s4: ::u32x4 = s4.vec(xs); + assert_eq!(x_s4.extract(0), 1); + assert_eq!(x_s4.extract(1), 2); + assert_eq!(x_s4.extract(2), 3); + assert_eq!(x_s4.extract(3), 4); + assert_eq!(x_s4.insert(0xf, 0), s4.vec([0xf, 2, 3, 4])); + assert_eq!(x_s4.insert(0xf, 1), s4.vec([1, 0xf, 3, 4])); + assert_eq!(x_s4.insert(0xf, 2), s4.vec([1, 2, 0xf, 4])); + assert_eq!(x_s4.insert(0xf, 3), s4.vec([1, 2, 3, 0xf])); + } + + #[test] + fn test_vec2_u64x2_s2() { + let xs = [0x1, 0x2]; + let s2 = unsafe { SSE2::instance() }; + let x_s2: ::u64x2 = s2.vec(xs); + assert_eq!(x_s2.extract(0), 1); + assert_eq!(x_s2.extract(1), 2); + assert_eq!(x_s2.insert(0xf, 0), s2.vec([0xf, 2])); + assert_eq!(x_s2.insert(0xf, 1), s2.vec([1, 0xf])); + } + + #[test] + #[cfg_attr(not(all(target_feature = "ssse3", target_feature = "sse4.1")), ignore)] + fn test_vec4_u64x2_s4() { + let xs = [0x1, 0x2]; + let s4 = unsafe { SSE41::instance() }; + let x_s4: ::u64x2 = s4.vec(xs); + assert_eq!(x_s4.extract(0), 1); + assert_eq!(x_s4.extract(1), 2); + assert_eq!(x_s4.insert(0xf, 0), s4.vec([0xf, 2])); + assert_eq!(x_s4.insert(0xf, 1), s4.vec([1, 0xf])); + } +} + +pub mod avx2 { + #![allow(non_camel_case_types)] + use crate::soft::{x2, x4}; + use crate::types::*; + use crate::x86_64::sse2::{u128x1_sse2, u32x4_sse2, G0}; + use crate::x86_64::{vec256_storage, vec512_storage, Avx2Machine, YesS3, YesS4}; + use core::arch::x86_64::*; + use core::marker::PhantomData; + use core::ops::*; + use zerocopy::{transmute, AsBytes, FromBytes, FromZeroes}; + + #[derive(Copy, Clone, FromBytes, AsBytes, FromZeroes)] + #[repr(transparent)] + pub struct u32x4x2_avx2 { + x: __m256i, + ni: PhantomData, + } + + impl u32x4x2_avx2 { + #[inline(always)] + fn new(x: __m256i) -> Self { + Self { x, ni: PhantomData } + } + } + + impl u32x4x2> for u32x4x2_avx2 where NI: Copy {} + impl Store for u32x4x2_avx2 { + #[inline(always)] + unsafe fn unpack(p: vec256_storage) -> Self { + Self::new(p.avx) + } + } + impl StoreBytes for u32x4x2_avx2 { + #[inline(always)] + unsafe fn unsafe_read_le(input: &[u8]) -> Self { + assert_eq!(input.len(), 32); + Self::new(_mm256_loadu_si256(input.as_ptr() as *const _)) + } + #[inline(always)] + unsafe fn unsafe_read_be(input: &[u8]) -> Self { + Self::unsafe_read_le(input).bswap() + } + #[inline(always)] + fn write_le(self, out: &mut [u8]) { + unsafe { + assert_eq!(out.len(), 32); + _mm256_storeu_si256(out.as_mut_ptr() as *mut _, self.x) + } + } + #[inline(always)] + fn write_be(self, out: &mut [u8]) { + self.bswap().write_le(out) + } + } + impl MultiLane<[u32x4_sse2; 2]> for u32x4x2_avx2 { + #[inline(always)] + fn to_lanes(self) -> [u32x4_sse2; 2] { + unsafe { + [ + u32x4_sse2::new(_mm256_extracti128_si256(self.x, 0)), + u32x4_sse2::new(_mm256_extracti128_si256(self.x, 1)), + ] + } + } + #[inline(always)] + fn from_lanes(x: [u32x4_sse2; 2]) -> Self { + Self::new(unsafe { _mm256_setr_m128i(x[0].x, x[1].x) }) + } + } + impl Vec2> for u32x4x2_avx2 { + #[inline(always)] + fn extract(self, i: u32) -> u32x4_sse2 { + unsafe { + match i { + 0 => u32x4_sse2::new(_mm256_extracti128_si256(self.x, 0)), + 1 => u32x4_sse2::new(_mm256_extracti128_si256(self.x, 1)), + _ => panic!(), + } + } + } + #[inline(always)] + fn insert(self, w: u32x4_sse2, i: u32) -> Self { + Self::new(unsafe { + match i { + 0 => _mm256_inserti128_si256(self.x, w.x, 0), + 1 => _mm256_inserti128_si256(self.x, w.x, 1), + _ => panic!(), + } + }) + } + } + impl BitOps32 for u32x4x2_avx2 where NI: Copy {} + impl ArithOps for u32x4x2_avx2 where NI: Copy {} + macro_rules! shuf_lane_bytes { + ($name:ident, $k0:expr, $k1:expr) => { + #[inline(always)] + fn $name(self) -> Self { + Self::new(unsafe { + _mm256_shuffle_epi8(self.x, _mm256_set_epi64x($k0, $k1, $k0, $k1)) + }) + } + }; + } + macro_rules! rotr_32 { + ($name:ident, $i:expr) => { + #[inline(always)] + fn $name(self) -> Self { + Self::new(unsafe { + _mm256_or_si256( + _mm256_srli_epi32(self.x, $i as i32), + _mm256_slli_epi32(self.x, 32 - $i as i32), + ) + }) + } + }; + } + impl RotateEachWord32 for u32x4x2_avx2 { + rotr_32!(rotate_each_word_right7, 7); + shuf_lane_bytes!( + rotate_each_word_right8, + 0x0c0f_0e0d_080b_0a09, + 0x0407_0605_0003_0201 + ); + rotr_32!(rotate_each_word_right11, 11); + rotr_32!(rotate_each_word_right12, 12); + shuf_lane_bytes!( + rotate_each_word_right16, + 0x0d0c_0f0e_0908_0b0a, + 0x0504_0706_0100_0302 + ); + rotr_32!(rotate_each_word_right20, 20); + shuf_lane_bytes!( + rotate_each_word_right24, + 0x0e0d_0c0f_0a09_080b, + 0x0605_0407_0201_0003 + ); + rotr_32!(rotate_each_word_right25, 25); + } + impl BitOps0 for u32x4x2_avx2 where NI: Copy {} + impl From> for vec256_storage { + #[inline(always)] + fn from(x: u32x4x2_avx2) -> Self { + Self { avx: x.x } + } + } + + macro_rules! impl_assign { + ($vec:ident, $Assign:ident, $assign_fn:ident, $bin_fn:ident) => { + impl $Assign for $vec + where + NI: Copy, + { + #[inline(always)] + fn $assign_fn(&mut self, rhs: Self) { + *self = self.$bin_fn(rhs); + } + } + }; + } + impl_assign!(u32x4x2_avx2, BitXorAssign, bitxor_assign, bitxor); + impl_assign!(u32x4x2_avx2, BitOrAssign, bitor_assign, bitor); + impl_assign!(u32x4x2_avx2, BitAndAssign, bitand_assign, bitand); + impl_assign!(u32x4x2_avx2, AddAssign, add_assign, add); + + macro_rules! impl_bitop { + ($vec:ident, $Op:ident, $op_fn:ident, $impl_fn:ident) => { + impl $Op for $vec { + type Output = Self; + #[inline(always)] + fn $op_fn(self, rhs: Self) -> Self::Output { + Self::new(unsafe { $impl_fn(self.x, rhs.x) }) + } + } + }; + } + impl_bitop!(u32x4x2_avx2, BitXor, bitxor, _mm256_xor_si256); + impl_bitop!(u32x4x2_avx2, BitOr, bitor, _mm256_or_si256); + impl_bitop!(u32x4x2_avx2, BitAnd, bitand, _mm256_and_si256); + impl_bitop!(u32x4x2_avx2, AndNot, andnot, _mm256_andnot_si256); + impl_bitop!(u32x4x2_avx2, Add, add, _mm256_add_epi32); + + impl Not for u32x4x2_avx2 { + type Output = Self; + #[inline(always)] + fn not(self) -> Self::Output { + unsafe { + let f = _mm256_set1_epi8(-0x7f); + Self::new(f) ^ self + } + } + } + + impl BSwap for u32x4x2_avx2 { + shuf_lane_bytes!(bswap, 0x0c0d_0e0f_0809_0a0b, 0x0405_0607_0001_0203); + } + + impl From, G0>> for u32x4x2_avx2 + where + NI: Copy, + { + #[inline(always)] + fn from(x: x2, G0>) -> Self { + Self::new(unsafe { _mm256_setr_m128i(x.0[0].x, x.0[1].x) }) + } + } + + impl LaneWords4 for u32x4x2_avx2 { + #[inline(always)] + fn shuffle_lane_words1230(self) -> Self { + Self::new(unsafe { _mm256_shuffle_epi32(self.x, 0b1001_0011) }) + } + #[inline(always)] + fn shuffle_lane_words2301(self) -> Self { + Self::new(unsafe { _mm256_shuffle_epi32(self.x, 0b0100_1110) }) + } + #[inline(always)] + fn shuffle_lane_words3012(self) -> Self { + Self::new(unsafe { _mm256_shuffle_epi32(self.x, 0b0011_1001) }) + } + } + + /////////////////////////////////////////////////////////////////////////////////////////// + + pub type u32x4x4_avx2 = x2, G0>; + impl u32x4x4> for u32x4x4_avx2 {} + + impl Store for u32x4x4_avx2 { + #[inline(always)] + unsafe fn unpack(p: vec512_storage) -> Self { + Self::new([ + u32x4x2_avx2::unpack(p.avx[0]), + u32x4x2_avx2::unpack(p.avx[1]), + ]) + } + } + impl MultiLane<[u32x4_sse2; 4]> for u32x4x4_avx2 { + #[inline(always)] + fn to_lanes(self) -> [u32x4_sse2; 4] { + let [a, b] = self.0[0].to_lanes(); + let [c, d] = self.0[1].to_lanes(); + [a, b, c, d] + } + #[inline(always)] + fn from_lanes(x: [u32x4_sse2; 4]) -> Self { + let ab = u32x4x2_avx2::from_lanes([x[0], x[1]]); + let cd = u32x4x2_avx2::from_lanes([x[2], x[3]]); + Self::new([ab, cd]) + } + } + impl Vec4> for u32x4x4_avx2 { + #[inline(always)] + fn extract(self, i: u32) -> u32x4_sse2 { + match i { + 0 => self.0[0].extract(0), + 1 => self.0[0].extract(1), + 2 => self.0[1].extract(0), + 3 => self.0[1].extract(1), + _ => panic!(), + } + } + #[inline(always)] + fn insert(self, w: u32x4_sse2, i: u32) -> Self { + Self::new(match i { + 0 | 1 => [self.0[0].insert(w, i), self.0[1]], + 2 | 3 => [self.0[0], self.0[1].insert(w, i - 2)], + _ => panic!(), + }) + } + } + impl Vec4Ext> for u32x4x4_avx2 { + #[inline(always)] + fn transpose4(a: Self, b: Self, c: Self, d: Self) -> (Self, Self, Self, Self) { + /* + * a00:a01 a10:a11 + * b00:b01 b10:b11 + * c00:c01 c10:c11 + * d00:d01 d10:d11 + * => + * a00:b00 c00:d00 + * a01:b01 c01:d01 + * a10:b10 c10:d10 + * a11:b11 c11:d11 + */ + unsafe { + let ab00 = u32x4x2_avx2::new(_mm256_permute2x128_si256(a.0[0].x, b.0[0].x, 0x20)); + let ab01 = u32x4x2_avx2::new(_mm256_permute2x128_si256(a.0[0].x, b.0[0].x, 0x31)); + let ab10 = u32x4x2_avx2::new(_mm256_permute2x128_si256(a.0[1].x, b.0[1].x, 0x20)); + let ab11 = u32x4x2_avx2::new(_mm256_permute2x128_si256(a.0[1].x, b.0[1].x, 0x31)); + let cd00 = u32x4x2_avx2::new(_mm256_permute2x128_si256(c.0[0].x, d.0[0].x, 0x20)); + let cd01 = u32x4x2_avx2::new(_mm256_permute2x128_si256(c.0[0].x, d.0[0].x, 0x31)); + let cd10 = u32x4x2_avx2::new(_mm256_permute2x128_si256(c.0[1].x, d.0[1].x, 0x20)); + let cd11 = u32x4x2_avx2::new(_mm256_permute2x128_si256(c.0[1].x, d.0[1].x, 0x31)); + ( + Self::new([ab00, cd00]), + Self::new([ab01, cd01]), + Self::new([ab10, cd10]), + Self::new([ab11, cd11]), + ) + } + } + } + impl Vector<[u32; 16]> for u32x4x4_avx2 { + #[inline(always)] + fn to_scalars(self) -> [u32; 16] { + transmute!(self) + } + } + impl From> for vec512_storage { + #[inline(always)] + fn from(x: u32x4x4_avx2) -> Self { + Self { + avx: [ + vec256_storage { avx: x.0[0].x }, + vec256_storage { avx: x.0[1].x }, + ], + } + } + } + impl From>> for u32x4x4_avx2 { + #[inline(always)] + fn from(x: x4>) -> Self { + Self::new(unsafe { + [ + u32x4x2_avx2::new(_mm256_setr_m128i(x.0[0].x, x.0[1].x)), + u32x4x2_avx2::new(_mm256_setr_m128i(x.0[2].x, x.0[3].x)), + ] + }) + } + } +} diff --git a/src/rust/vendor/rand/.cargo-checksum.json b/src/rust/vendor/rand/.cargo-checksum.json new file mode 100644 index 000000000..6e76c5887 --- /dev/null +++ b/src/rust/vendor/rand/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"76b505678de234d2eef751593feec6d9debb76c20d45564a9f23c9e9783dbc63","COPYRIGHT":"90eb64f0279b0d9432accfa6023ff803bc4965212383697eee27a0f426d5f8d5","Cargo.toml":"9bb028fb3b697653beb433ddcf4c1292b3db10ea5ed27a695df6e4e604ba6d4b","LICENSE-APACHE":"35242e7a83f69875e6edeff02291e688c97caafe2f8902e4e19b49d3e78b4cab","LICENSE-MIT":"209fbbe0ad52d9235e37badf9cadfe4dbdc87203179c0899e738b39ade42177b","README.md":"ddb5a1fa9442c6cab92a3510365937e729f839c94b97e75d3f0430bf3a4dd2bd","src/distributions/bernoulli.rs":"437e61c41f73b6fffad11a65cc45d05df198ab05b37328eba687a9779b86948a","src/distributions/distribution.rs":"36086233c9682eb16874ba87bb1ec39db71559c5ce2ca618dc8c6bd9710d3b3a","src/distributions/float.rs":"ef894cbfeab9c734894468175c4553100b4a261f431047f2bbc4949aa43e2ccd","src/distributions/integer.rs":"a380e0627c97cfad0d94e0fdfb4dad73060d23073cc1d379f06c4dbd2a4fc2db","src/distributions/mod.rs":"f87a133e704e38ad554c8b4f62497d6320c74ef7d37df7871c61bde48d200b5b","src/distributions/other.rs":"e60568f8eadc0594636641a2070e53f5127fb532a74101ed4767f424a6e92622","src/distributions/slice.rs":"94f5abfe602679e980e4561bb03dcac28bbd3bb5f7bd2821f396a6293c0878db","src/distributions/uniform.rs":"9eb0769b7c268c2e4f502ede0d779cb1ab5243d70a1fb39f2f5e316bcf9586e2","src/distributions/utils.rs":"41304f5e2d74e750fc62f7871443c6e9d510a6c99be4614fb5c756682e0344d7","src/distributions/weighted.rs":"ae019d9b688e33cb912c9a04668cce3e7df86abab994db88478c6c339f98222f","src/distributions/weighted_index.rs":"874d1db2e258d9c049be08ae80b72ec2c75af0f2571f83091a26a3f6c747a6f0","src/lib.rs":"a773ff7b0dad376e5ef23661c40b7a96df4233fef90dab303db93f209aee314f","src/prelude.rs":"2f2132d74ce9f70513224baad3b161b1585a639f9136a254cdb0e7f8ffceb25b","src/rng.rs":"5d9b55069197f9f98298e8d930b13d8c65ab2701660bfbf52d83c6d7d7aff8c6","src/rngs/adapter/mod.rs":"28318871529da61dccc0fe8c0bcafa99b133c721d4bb506fa34d0831f4ca2639","src/rngs/adapter/read.rs":"b044061c46d0b8e6a4f25c69d3e8bb6f9df08cd8df9b5eae131a1d4934020e03","src/rngs/adapter/reseeding.rs":"89abebade9bca847889155ac3357c0021d2c6181dd47478332a644820ade0c6e","src/rngs/mock.rs":"0074abe04cf84b1263218f50140931fa4188f4e0a43fe3205556a00e4c36d1e9","src/rngs/mod.rs":"a6dec3d19e1726ba05f130ab9b20719d79177b8c1584cdd7b5f37b9996315ed3","src/rngs/small.rs":"a8e61c6e0bad62f06db1325e3b93eff1d4aa9e82cf0316fbfd02da2ef5b85b83","src/rngs/std.rs":"3cee48bf1fea18b84f585680a947f3aeea949b756cc37d99217291f9759be7c9","src/rngs/thread.rs":"c3cc07465bf02d08182afc47a40e50095d7c83633e09dcd071974b2a902e6fce","src/rngs/xoshiro128plusplus.rs":"deca2450a2d5ea826ca6f47cccb9ee06daeac38799a30a107b78c5dae78ae30c","src/rngs/xoshiro256plusplus.rs":"d7e214f8288041cede7ef26e829dd2196f7b4843455d7f1b9a3ef080d570bc5f","src/seq/index.rs":"5247833f7bfc8c5c11337ce7dc0a55a6979ea664ddddd70b6e2b9598058ab44d","src/seq/mod.rs":"dd97a635e89e1d50153c57ec03d8a346a063486998ef14ca4fdc60659f1612fb"},"package":"34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"} \ No newline at end of file diff --git a/src/rust/vendor/rand/CHANGELOG.md b/src/rust/vendor/rand/CHANGELOG.md new file mode 100644 index 000000000..b0872af6d --- /dev/null +++ b/src/rust/vendor/rand/CHANGELOG.md @@ -0,0 +1,699 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +A [separate changelog is kept for rand_core](rand_core/CHANGELOG.md). + +You may also find the [Upgrade Guide](https://rust-random.github.io/book/update.html) useful. + +## [0.8.5] - 2021-08-20 +### Fixes +- Fix build on non-32/64-bit architectures (#1144) +- Fix "min_const_gen" feature for `no_std` (#1173) +- Check `libc::pthread_atfork` return value with panic on error (#1178) +- More robust reseeding in case `ReseedingRng` is used from a fork handler (#1178) +- Fix nightly: remove unused `slice_partition_at_index` feature (#1215) +- Fix nightly + `simd_support`: update `packed_simd` (#1216) + +### Rngs +- `StdRng`: Switch from HC128 to ChaCha12 on emscripten (#1142). + We now use ChaCha12 on all platforms. + +### Documentation +- Added docs about rand's use of const generics (#1150) +- Better random chars example (#1157) + + +## [0.8.4] - 2021-06-15 +### Additions +- Use const-generics to support arrays of all sizes (#1104) +- Implement `Clone` and `Copy` for `Alphanumeric` (#1126) +- Add `Distribution::map` to derive a distribution using a closure (#1129) +- Add `Slice` distribution (#1107) +- Add `DistString` trait with impls for `Standard` and `Alphanumeric` (#1133) + +### Other +- Reorder asserts in `Uniform` float distributions for easier debugging of non-finite arguments + (#1094, #1108) +- Add range overflow check in `Uniform` float distributions (#1108) +- Deprecate `rngs::adapter::ReadRng` (#1130) + +## [0.8.3] - 2021-01-25 +### Fixes +- Fix `no-std` + `alloc` build by gating `choose_multiple_weighted` on `std` (#1088) + +## [0.8.2] - 2021-01-12 +### Fixes +- Fix panic in `UniformInt::sample_single_inclusive` and `Rng::gen_range` when + providing a full integer range (eg `0..=MAX`) (#1087) + +## [0.8.1] - 2020-12-31 +### Other +- Enable all stable features in the playground (#1081) + +## [0.8.0] - 2020-12-18 +### Platform support +- The minimum supported Rust version is now 1.36 (#1011) +- `getrandom` updated to v0.2 (#1041) +- Remove `wasm-bindgen` and `stdweb` feature flags. For details of WASM support, + see the [getrandom documentation](https://docs.rs/getrandom/latest). (#948) +- `ReadRng::next_u32` and `next_u64` now use little-Endian conversion instead + of native-Endian, affecting results on Big-Endian platforms (#1061) +- The `nightly` feature no longer implies the `simd_support` feature (#1048) +- Fix `simd_support` feature to work on current nightlies (#1056) + +### Rngs +- `ThreadRng` is no longer `Copy` to enable safe usage within thread-local destructors (#1035) +- `gen_range(a, b)` was replaced with `gen_range(a..b)`. `gen_range(a..=b)` is + also supported. Note that `a` and `b` can no longer be references or SIMD types. (#744, #1003) +- Replace `AsByteSliceMut` with `Fill` and add support for `[bool], [char], [f32], [f64]` (#940) +- Restrict `rand::rngs::adapter` to `std` (#1027; see also #928) +- `StdRng`: add new `std_rng` feature flag (enabled by default, but might need + to be used if disabling default crate features) (#948) +- `StdRng`: Switch from ChaCha20 to ChaCha12 for better performance (#1028) +- `SmallRng`: Replace PCG algorithm with xoshiro{128,256}++ (#1038) + +### Sequences +- Add `IteratorRandom::choose_stable` as an alternative to `choose` which does + not depend on size hints (#1057) +- Improve accuracy and performance of `IteratorRandom::choose` (#1059) +- Implement `IntoIterator` for `IndexVec`, replacing the `into_iter` method (#1007) +- Add value stability tests for `seq` module (#933) + +### Misc +- Support `PartialEq` and `Eq` for `StdRng`, `SmallRng` and `StepRng` (#979) +- Added a `serde1` feature and added Serialize/Deserialize to `UniformInt` and `WeightedIndex` (#974) +- Drop some unsafe code (#962, #963, #1011) +- Reduce packaged crate size (#983) +- Migrate to GitHub Actions from Travis+AppVeyor (#1073) + +### Distributions +- `Alphanumeric` samples bytes instead of chars (#935) +- `Uniform` now supports `char`, enabling `rng.gen_range('A'..='Z')` (#1068) +- Add `UniformSampler::sample_single_inclusive` (#1003) + +#### Weighted sampling +- Implement weighted sampling without replacement (#976, #1013) +- `rand::distributions::alias_method::WeightedIndex` was moved to `rand_distr::WeightedAliasIndex`. + The simpler alternative `rand::distribution::WeightedIndex` remains. (#945) +- Improve treatment of rounding errors in `WeightedIndex::update_weights` (#956) +- `WeightedIndex`: return error on NaN instead of panic (#1005) + +### Documentation +- Document types supported by `random` (#994) +- Document notes on password generation (#995) +- Note that `SmallRng` may not be the best choice for performance and in some + other cases (#1038) +- Use `doc(cfg)` to annotate feature-gated items (#1019) +- Adjust README (#1065) + +## [0.7.3] - 2020-01-10 +### Fixes +- The `Bernoulli` distribution constructors now reports an error on NaN and on + `denominator == 0`. (#925) +- Use `std::sync::Once` to register fork handler, avoiding possible atomicity violation (#928) +- Fix documentation on the precision of generated floating-point values + +### Changes +- Unix: make libc dependency optional; only use fork protection with std feature (#928) + +### Additions +- Implement `std::error::Error` for `BernoulliError` (#919) + +## [0.7.2] - 2019-09-16 +### Fixes +- Fix dependency on `rand_core` 0.5.1 (#890) + +### Additions +- Unit tests for value stability of distributions added (#888) + +## [0.7.1] - 2019-09-13 +### Yanked +This release was yanked since it depends on `rand_core::OsRng` added in 0.5.1 +but specifies a dependency on version 0.5.0 (#890), causing a broken builds +when updating from `rand 0.7.0` without also updating `rand_core`. + +### Fixes +- Fix `no_std` behaviour, appropriately enable c2-chacha's `std` feature (#844) +- `alloc` feature in `no_std` is available since Rust 1.36 (#856) +- Fix or squelch issues from Clippy lints (#840) + +### Additions +- Add a `no_std` target to CI to continuously evaluate `no_std` status (#844) +- `WeightedIndex`: allow adjusting a sub-set of weights (#866) + +## [0.7.0] - 2019-06-28 + +### Fixes +- Fix incorrect pointer usages revealed by Miri testing (#780, #781) +- Fix (tiny!) bias in `Uniform` for 8- and 16-bit ints (#809) + +### Crate +- Bumped MSRV (min supported Rust version) to 1.32.0 +- Updated to Rust Edition 2018 (#823, #824) +- Removed dependence on `rand_xorshift`, `rand_isaac`, `rand_jitter` crates (#759, #765) +- Remove dependency on `winapi` (#724) +- Removed all `build.rs` files (#824) +- Removed code already deprecated in version 0.6 (#757) +- Removed the serde1 feature (It's still available for backwards compatibility, but it does not do anything. #830) +- Many documentation changes + +### rand_core +- Updated to `rand_core` 0.5.0 +- `Error` type redesigned with new API (#800) +- Move `from_entropy` method to `SeedableRng` and remove `FromEntropy` (#800) +- `SeedableRng::from_rng` is now expected to be value-stable (#815) + +### Standard RNGs +- OS interface moved from `rand_os` to new `getrandom` crate (#765, [getrandom](https://github.com/rust-random/getrandom)) +- Use ChaCha for `StdRng` and `ThreadRng` (#792) +- Feature-gate `SmallRng` (#792) +- `ThreadRng` now supports `Copy` (#758) +- Deprecated `EntropyRng` (#765) +- Enable fork protection of ReseedingRng without `std` (#724) + +### Distributions +- Many distributions have been moved to `rand_distr` (#761) +- `Bernoulli::new` constructor now returns a `Result` (#803) +- `Distribution::sample_iter` adjusted for more flexibility (#758) +- Added `distributions::weighted::alias_method::WeightedIndex` for `O(1)` sampling (#692) +- Support sampling `NonZeroU*` types with the `Standard` distribution (#728) +- Optimised `Binomial` distribution sampling (#735, #740, #752) +- Optimised SIMD float sampling (#739) + +### Sequences +- Make results portable across 32- and 64-bit by using `u32` samples for `usize` where possible (#809) + +## [0.6.5] - 2019-01-28 +### Crates +- Update `rand_core` to 0.4 (#703) +- Move `JitterRng` to its own crate (#685) +- Add a wasm-bindgen test crate (#696) + +### Platforms +- Fuchsia: Replaced fuchsia-zircon with fuchsia-cprng + +### Doc +- Use RFC 1946 for doc links (#691) +- Fix some doc links and notes (#711) + +## [0.6.4] - 2019-01-08 +### Fixes +- Move wasm-bindgen shims to correct crate (#686) +- Make `wasm32-unknown-unknown` compile but fail at run-time if missing bindingsg (#686) + +## [0.6.3] - 2019-01-04 +### Fixes +- Make the `std` feature require the optional `rand_os` dependency (#675) +- Re-export the optional WASM dependencies of `rand_os` from `rand` to avoid breakage (#674) + +## [0.6.2] - 2019-01-04 +### Additions +- Add `Default` for `ThreadRng` (#657) +- Move `rngs::OsRng` to `rand_os` sub-crate; clean up code; use as dependency (#643) ##BLOCKER## +- Add `rand_xoshiro` sub-crate, plus benchmarks (#642, #668) + +### Fixes +- Fix bias in `UniformInt::sample_single` (#662) +- Use `autocfg` instead of `rustc_version` for rustc version detection (#664) +- Disable `i128` and `u128` if the `target_os` is `emscripten` (#671: work-around Emscripten limitation) +- CI fixes (#660, #671) + +### Optimisations +- Optimise memory usage of `UnitCircle` and `UnitSphereSurface` distributions (no PR) + +## [0.6.1] - 2018-11-22 +- Support sampling `Duration` also for `no_std` (only since Rust 1.25) (#649) +- Disable default features of `libc` (#647) + +## [0.6.0] - 2018-11-14 + +### Project organisation +- Rand has moved from [rust-lang-nursery](https://github.com/rust-lang-nursery/rand) + to [rust-random](https://github.com/rust-random/rand)! (#578) +- Created [The Rust Random Book](https://rust-random.github.io/book/) + ([source](https://github.com/rust-random/book)) +- Update copyright and licence notices (#591, #611) +- Migrate policy documentation from the wiki (#544) + +### Platforms +- Add fork protection on Unix (#466) +- Added support for wasm-bindgen. (#541, #559, #562, #600) +- Enable `OsRng` for powerpc64, sparc and sparc64 (#609) +- Use `syscall` from `libc` on Linux instead of redefining it (#629) + +### RNGs +- Switch `SmallRng` to use PCG (#623) +- Implement `Pcg32` and `Pcg64Mcg` generators (#632) +- Move ISAAC RNGs to a dedicated crate (#551) +- Move Xorshift RNG to its own crate (#557) +- Move ChaCha and HC128 RNGs to dedicated crates (#607, #636) +- Remove usage of `Rc` from `ThreadRng` (#615) + +### Sampling and distributions +- Implement `Rng.gen_ratio()` and `Bernoulli::new_ratio()` (#491) +- Make `Uniform` strictly respect `f32` / `f64` high/low bounds (#477) +- Allow `gen_range` and `Uniform` to work on non-`Copy` types (#506) +- `Uniform` supports inclusive ranges: `Uniform::from(a..=b)`. This is + automatically enabled for Rust >= 1.27. (#566) +- Implement `TrustedLen` and `FusedIterator` for `DistIter` (#620) + +#### New distributions +- Add the `Dirichlet` distribution (#485) +- Added sampling from the unit sphere and circle. (#567) +- Implement the triangular distribution (#575) +- Implement the Weibull distribution (#576) +- Implement the Beta distribution (#574) + +#### Optimisations + +- Optimise `Bernoulli::new` (#500) +- Optimise `char` sampling (#519) +- Optimise sampling of `std::time::Duration` (#583) + +### Sequences +- Redesign the `seq` module (#483, #515) +- Add `WeightedIndex` and `choose_weighted` (#518, #547) +- Optimised and changed return type of the `sample_indices` function. (#479) +- Use `Iterator::size_hint()` to speed up `IteratorRandom::choose` (#593) + +### SIMD +- Support for generating SIMD types (#523, #542, #561, #630) + +### Other +- Revise CI scripts (#632, #635) +- Remove functionality already deprecated in 0.5 (#499) +- Support for `i128` and `u128` is automatically enabled for Rust >= 1.26. This + renders the `i128_support` feature obsolete. It still exists for backwards + compatibility but does not have any effect. This breaks programs using Rand + with `i128_support` on nightlies older than Rust 1.26. (#571) + + +## [0.5.5] - 2018-08-07 +### Documentation +- Fix links in documentation (#582) + + +## [0.5.4] - 2018-07-11 +### Platform support +- Make `OsRng` work via WASM/stdweb for WebWorkers + + +## [0.5.3] - 2018-06-26 +### Platform support +- OpenBSD, Bitrig: fix compilation (broken in 0.5.1) (#530) + + +## [0.5.2] - 2018-06-18 +### Platform support +- Hide `OsRng` and `JitterRng` on unsupported platforms (#512; fixes #503). + + +## [0.5.1] - 2018-06-08 + +### New distributions +- Added Cauchy distribution. (#474, #486) +- Added Pareto distribution. (#495) + +### Platform support and `OsRng` +- Remove blanket Unix implementation. (#484) +- Remove Wasm unimplemented stub. (#484) +- Dragonfly BSD: read from `/dev/random`. (#484) +- Bitrig: use `getentropy` like OpenBSD. (#484) +- Solaris: (untested) use `getrandom` if available, otherwise `/dev/random`. (#484) +- Emscripten, `stdweb`: split the read up in chunks. (#484) +- Emscripten, Haiku: don't do an extra blocking read from `/dev/random`. (#484) +- Linux, NetBSD, Solaris: read in blocking mode on first use in `fill_bytes`. (#484) +- Fuchsia, CloudABI: fix compilation (broken in Rand 0.5). (#484) + + +## [0.5.0] - 2018-05-21 + +### Crate features and organisation +- Minimum Rust version update: 1.22.0. (#239) +- Create a separate `rand_core` crate. (#288) +- Deprecate `rand_derive`. (#256) +- Add `prelude` (and module reorganisation). (#435) +- Add `log` feature. Logging is now available in `JitterRng`, `OsRng`, `EntropyRng` and `ReseedingRng`. (#246) +- Add `serde1` feature for some PRNGs. (#189) +- `stdweb` feature for `OsRng` support on WASM via stdweb. (#272, #336) + +### `Rng` trait +- Split `Rng` in `RngCore` and `Rng` extension trait. + `next_u32`, `next_u64` and `fill_bytes` are now part of `RngCore`. (#265) +- Add `Rng::sample`. (#256) +- Deprecate `Rng::gen_weighted_bool`. (#308) +- Add `Rng::gen_bool`. (#308) +- Remove `Rng::next_f32` and `Rng::next_f64`. (#273) +- Add optimized `Rng::fill` and `Rng::try_fill` methods. (#247) +- Deprecate `Rng::gen_iter`. (#286) +- Deprecate `Rng::gen_ascii_chars`. (#279) + +### `rand_core` crate +- `rand` now depends on new `rand_core` crate (#288) +- `RngCore` and `SeedableRng` are now part of `rand_core`. (#288) +- Add modules to help implementing RNGs `impl` and `le`. (#209, #228) +- Add `Error` and `ErrorKind`. (#225) +- Add `CryptoRng` marker trait. (#273) +- Add `BlockRngCore` trait. (#281) +- Add `BlockRng` and `BlockRng64` wrappers to help implementations. (#281, #325) +- Revise the `SeedableRng` trait. (#233) +- Remove default implementations for `RngCore::next_u64` and `RngCore::fill_bytes`. (#288) +- Add `RngCore::try_fill_bytes`. (#225) + +### Other traits and types +- Add `FromEntropy` trait. (#233, #375) +- Add `SmallRng` wrapper. (#296) +- Rewrite `ReseedingRng` to only work with `BlockRngCore` (substantial performance improvement). (#281) +- Deprecate `weak_rng`. Use `SmallRng` instead. (#296) +- Deprecate `AsciiGenerator`. (#279) + +### Random number generators +- Switch `StdRng` and `thread_rng` to HC-128. (#277) +- `StdRng` must now be created with `from_entropy` instead of `new` +- Change `thread_rng` reseeding threshold to 32 MiB. (#277) +- PRNGs no longer implement `Copy`. (#209) +- `Debug` implementations no longer show internals. (#209) +- Implement `Clone` for `ReseedingRng`, `JitterRng`, OsRng`. (#383, #384) +- Implement serialization for `XorShiftRng`, `IsaacRng` and `Isaac64Rng` under the `serde1` feature. (#189) +- Implement `BlockRngCore` for `ChaChaCore` and `Hc128Core`. (#281) +- All PRNGs are now portable across big- and little-endian architectures. (#209) +- `Isaac64Rng::next_u32` no longer throws away half the results. (#209) +- Add `IsaacRng::new_from_u64` and `Isaac64Rng::new_from_u64`. (#209) +- Add the HC-128 CSPRNG `Hc128Rng`. (#210) +- Change ChaCha20 to have 64-bit counter and 64-bit stream. (#349) +- Changes to `JitterRng` to get its size down from 2112 to 24 bytes. (#251) +- Various performance improvements to all PRNGs. + +### Platform support and `OsRng` +- Add support for CloudABI. (#224) +- Remove support for NaCl. (#225) +- WASM support for `OsRng` via stdweb, behind the `stdweb` feature. (#272, #336) +- Use `getrandom` on more platforms for Linux, and on Android. (#338) +- Use the `SecRandomCopyBytes` interface on macOS. (#322) +- On systems that do not have a syscall interface, only keep a single file descriptor open for `OsRng`. (#239) +- On Unix, first try a single read from `/dev/random`, then `/dev/urandom`. (#338) +- Better error handling and reporting in `OsRng` (using new error type). (#225) +- `OsRng` now uses non-blocking when available. (#225) +- Add `EntropyRng`, which provides `OsRng`, but has `JitterRng` as a fallback. (#235) + +### Distributions +- New `Distribution` trait. (#256) +- Add `Distribution::sample_iter` and `Rng::::sample_iter`. (#361) +- Deprecate `Rand`, `Sample` and `IndependentSample` traits. (#256) +- Add a `Standard` distribution (replaces most `Rand` implementations). (#256) +- Add `Binomial` and `Poisson` distributions. (#96) +- Add `Bernoulli` dsitribution. (#411) +- Add `Alphanumeric` distribution. (#279) +- Remove `Closed01` distribution, add `OpenClosed01`. (#274, #420) +- Rework `Range` type, making it possible to implement it for user types. (#274) +- Rename `Range` to `Uniform`. (#395) +- Add `Uniform::new_inclusive` for inclusive ranges. (#274) +- Use widening multiply method for much faster integer range reduction. (#274) +- `Standard` distribution for `char` uses `Uniform` internally. (#274) +- `Standard` distribution for `bool` uses sign test. (#274) +- Implement `Standard` distribution for `Wrapping`. (#436) +- Implement `Uniform` distribution for `Duration`. (#427) + + +## [0.4.3] - 2018-08-16 +### Fixed +- Use correct syscall number for PowerPC (#589) + + +## [0.4.2] - 2018-01-06 +### Changed +- Use `winapi` on Windows +- Update for Fuchsia OS +- Remove dev-dependency on `log` + + +## [0.4.1] - 2017-12-17 +### Added +- `no_std` support + + +## [0.4.0-pre.0] - 2017-12-11 +### Added +- `JitterRng` added as a high-quality alternative entropy source using the + system timer +- new `seq` module with `sample_iter`, `sample_slice`, etc. +- WASM support via dummy implementations (fail at run-time) +- Additional benchmarks, covering generators and new seq code + +### Changed +- `thread_rng` uses `JitterRng` if seeding from system time fails + (slower but more secure than previous method) + +### Deprecated + - `sample` function deprecated (replaced by `sample_iter`) + + +## [0.3.20] - 2018-01-06 +### Changed +- Remove dev-dependency on `log` +- Update `fuchsia-zircon` dependency to 0.3.2 + + +## [0.3.19] - 2017-12-27 +### Changed +- Require `log <= 0.3.8` for dev builds +- Update `fuchsia-zircon` dependency to 0.3 +- Fix broken links in docs (to unblock compiler docs testing CI) + + +## [0.3.18] - 2017-11-06 +### Changed +- `thread_rng` is seeded from the system time if `OsRng` fails +- `weak_rng` now uses `thread_rng` internally + + +## [0.3.17] - 2017-10-07 +### Changed + - Fuchsia: Magenta was renamed Zircon + +## [0.3.16] - 2017-07-27 +### Added +- Implement Debug for mote non-public types +- implement `Rand` for (i|u)i128 +- Support for Fuchsia + +### Changed +- Add inline attribute to SampleRange::construct_range. + This improves the benchmark for sample in 11% and for shuffle in 16%. +- Use `RtlGenRandom` instead of `CryptGenRandom` + + +## [0.3.15] - 2016-11-26 +### Added +- Add `Rng` trait method `choose_mut` +- Redox support + +### Changed +- Use `arc4rand` for `OsRng` on FreeBSD. +- Use `arc4random(3)` for `OsRng` on OpenBSD. + +### Fixed +- Fix filling buffers 4 GiB or larger with `OsRng::fill_bytes` on Windows + + +## [0.3.14] - 2016-02-13 +### Fixed +- Inline definitions from winapi/advapi32, which decreases build times + + +## [0.3.13] - 2016-01-09 +### Fixed +- Compatible with Rust 1.7.0-nightly (needed some extra type annotations) + + +## [0.3.12] - 2015-11-09 +### Changed +- Replaced the methods in `next_f32` and `next_f64` with the technique described + Saito & Matsumoto at MCQMC'08. The new method should exhibit a slightly more + uniform distribution. +- Depend on libc 0.2 + +### Fixed +- Fix iterator protocol issue in `rand::sample` + + +## [0.3.11] - 2015-08-31 +### Added +- Implement `Rand` for arrays with n <= 32 + + +## [0.3.10] - 2015-08-17 +### Added +- Support for NaCl platforms + +### Changed +- Allow `Rng` to be `?Sized`, impl for `&mut R` and `Box` where `R: ?Sized + Rng` + + +## [0.3.9] - 2015-06-18 +### Changed +- Use `winapi` for Windows API things + +### Fixed +- Fixed test on stable/nightly +- Fix `getrandom` syscall number for aarch64-unknown-linux-gnu + + +## [0.3.8] - 2015-04-23 +### Changed +- `log` is a dev dependency + +### Fixed +- Fix race condition of atomics in `is_getrandom_available` + + +## [0.3.7] - 2015-04-03 +### Fixed +- Derive Copy/Clone changes + + +## [0.3.6] - 2015-04-02 +### Changed +- Move to stable Rust! + + +## [0.3.5] - 2015-04-01 +### Fixed +- Compatible with Rust master + + +## [0.3.4] - 2015-03-31 +### Added +- Implement Clone for `Weighted` + +### Fixed +- Compatible with Rust master + + +## [0.3.3] - 2015-03-26 +### Fixed +- Fix compile on Windows + + +## [0.3.2] - 2015-03-26 + + +## [0.3.1] - 2015-03-26 +### Fixed +- Fix compile on Windows + + +## [0.3.0] - 2015-03-25 +### Changed +- Update to use log version 0.3.x + + +## [0.2.1] - 2015-03-22 +### Fixed +- Compatible with Rust master +- Fixed iOS compilation + + +## [0.2.0] - 2015-03-06 +### Fixed +- Compatible with Rust master (move from `old_io` to `std::io`) + + +## [0.1.4] - 2015-03-04 +### Fixed +- Compatible with Rust master (use wrapping ops) + + +## [0.1.3] - 2015-02-20 +### Fixed +- Compatible with Rust master + +### Removed +- Removed Copy implementations from RNGs + + +## [0.1.2] - 2015-02-03 +### Added +- Imported functionality from `std::rand`, including: + - `StdRng`, `SeedableRng`, `TreadRng`, `weak_rng()` + - `ReaderRng`: A wrapper around any Reader to treat it as an RNG. +- Imported documentation from `std::rand` +- Imported tests from `std::rand` + + +## [0.1.1] - 2015-02-03 +### Added +- Migrate to a cargo-compatible directory structure. + +### Fixed +- Do not use entropy during `gen_weighted_bool(1)` + + +## [Rust 0.12.0] - 2014-10-09 +### Added +- Impl Rand for tuples of arity 11 and 12 +- Include ChaCha pseudorandom generator +- Add `next_f64` and `next_f32` to Rng +- Implement Clone for PRNGs + +### Changed +- Rename `TaskRng` to `ThreadRng` and `task_rng` to `thread_rng` (since a + runtime is removed from Rust). + +### Fixed +- Improved performance of ISAAC and ISAAC64 by 30% and 12 % respectively, by + informing the optimiser that indexing is never out-of-bounds. + +### Removed +- Removed the Deprecated `choose_option` + + +## [Rust 0.11.0] - 2014-07-02 +### Added +- document when to use `OSRng` in cryptographic context, and explain why we use `/dev/urandom` instead of `/dev/random` +- `Rng::gen_iter()` which will return an infinite stream of random values +- `Rng::gen_ascii_chars()` which will return an infinite stream of random ascii characters + +### Changed +- Now only depends on libcore! +- Remove `Rng.choose()`, rename `Rng.choose_option()` to `.choose()` +- Rename OSRng to OsRng +- The WeightedChoice structure is no longer built with a `Vec>`, + but rather a `&mut [Weighted]`. This means that the WeightedChoice + structure now has a lifetime associated with it. +- The `sample` method on `Rng` has been moved to a top-level function in the + `rand` module due to its dependence on `Vec`. + +### Removed +- `Rng::gen_vec()` was removed. Previous behavior can be regained with + `rng.gen_iter().take(n).collect()` +- `Rng::gen_ascii_str()` was removed. Previous behavior can be regained with + `rng.gen_ascii_chars().take(n).collect()` +- {IsaacRng, Isaac64Rng, XorShiftRng}::new() have all been removed. These all + relied on being able to use an OSRng for seeding, but this is no longer + available in librand (where these types are defined). To retain the same + functionality, these types now implement the `Rand` trait so they can be + generated with a random seed from another random number generator. This allows + the stdlib to use an OSRng to create seeded instances of these RNGs. +- Rand implementations for `Box` and `@T` were removed. These seemed to be + pretty rare in the codebase, and it allows for librand to not depend on + liballoc. Additionally, other pointer types like Rc and Arc were not + supported. +- Remove a slew of old deprecated functions + + +## [Rust 0.10] - 2014-04-03 +### Changed +- replace `Rng.shuffle's` functionality with `.shuffle_mut` +- bubble up IO errors when creating an OSRng + +### Fixed +- Use `fill()` instead of `read()` +- Rewrite OsRng in Rust for windows + +## [0.10-pre] - 2014-03-02 +### Added +- Separate `rand` out of the standard library diff --git a/src/rust/vendor/rand/COPYRIGHT b/src/rust/vendor/rand/COPYRIGHT new file mode 100644 index 000000000..468d907ca --- /dev/null +++ b/src/rust/vendor/rand/COPYRIGHT @@ -0,0 +1,12 @@ +Copyrights in the Rand project are retained by their contributors. No +copyright assignment is required to contribute to the Rand project. + +For full authorship information, see the version control history. + +Except as otherwise noted (below and/or in individual files), Rand is +licensed under the Apache License, Version 2.0 or + or the MIT license + or , at your option. + +The Rand project includes code from the Rust project +published under these same licenses. diff --git a/src/rust/vendor/rand/Cargo.toml b/src/rust/vendor/rand/Cargo.toml new file mode 100644 index 000000000..3f38081ee --- /dev/null +++ b/src/rust/vendor/rand/Cargo.toml @@ -0,0 +1,75 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "rand" +version = "0.8.5" +authors = ["The Rand Project Developers", "The Rust Project Developers"] +include = ["src/", "LICENSE-*", "README.md", "CHANGELOG.md", "COPYRIGHT"] +autobenches = true +description = "Random number generators and other randomness functionality.\n" +homepage = "https://rust-random.github.io/book" +documentation = "https://docs.rs/rand" +readme = "README.md" +keywords = ["random", "rng"] +categories = ["algorithms", "no-std"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-random/rand" +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "doc_cfg"] + +[package.metadata.playground] +features = ["small_rng", "serde1"] +[dependencies.log] +version = "0.4.4" +optional = true + +[dependencies.packed_simd] +version = "0.3.7" +features = ["into_bits"] +optional = true +package = "packed_simd_2" + +[dependencies.rand_chacha] +version = "0.3.0" +optional = true +default-features = false + +[dependencies.rand_core] +version = "0.6.0" + +[dependencies.serde] +version = "1.0.103" +features = ["derive"] +optional = true +[dev-dependencies.bincode] +version = "1.2.1" + +[dev-dependencies.rand_pcg] +version = "0.3.0" + +[features] +alloc = ["rand_core/alloc"] +default = ["std", "std_rng"] +getrandom = ["rand_core/getrandom"] +min_const_gen = [] +nightly = [] +serde1 = ["serde", "rand_core/serde1"] +simd_support = ["packed_simd"] +small_rng = [] +std = ["rand_core/std", "rand_chacha/std", "alloc", "getrandom", "libc"] +std_rng = ["rand_chacha"] +[target."cfg(unix)".dependencies.libc] +version = "0.2.22" +optional = true +default-features = false diff --git a/src/rust/vendor/rand/LICENSE-APACHE b/src/rust/vendor/rand/LICENSE-APACHE new file mode 100644 index 000000000..494ad3bfd --- /dev/null +++ b/src/rust/vendor/rand/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/src/rust/vendor/rand/LICENSE-MIT b/src/rust/vendor/rand/LICENSE-MIT new file mode 100644 index 000000000..d93b5baf3 --- /dev/null +++ b/src/rust/vendor/rand/LICENSE-MIT @@ -0,0 +1,26 @@ +Copyright 2018 Developers of the Rand project +Copyright (c) 2014 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/src/rust/vendor/rand/README.md b/src/rust/vendor/rand/README.md new file mode 100644 index 000000000..44c2e4d51 --- /dev/null +++ b/src/rust/vendor/rand/README.md @@ -0,0 +1,158 @@ +# Rand + +[![Test Status](https://github.com/rust-random/rand/workflows/Tests/badge.svg?event=push)](https://github.com/rust-random/rand/actions) +[![Crate](https://img.shields.io/crates/v/rand.svg)](https://crates.io/crates/rand) +[![Book](https://img.shields.io/badge/book-master-yellow.svg)](https://rust-random.github.io/book/) +[![API](https://img.shields.io/badge/api-master-yellow.svg)](https://rust-random.github.io/rand/rand) +[![API](https://docs.rs/rand/badge.svg)](https://docs.rs/rand) +[![Minimum rustc version](https://img.shields.io/badge/rustc-1.36+-lightgray.svg)](https://github.com/rust-random/rand#rust-version-requirements) + +A Rust library for random number generation, featuring: + +- Easy random value generation and usage via the [`Rng`](https://docs.rs/rand/*/rand/trait.Rng.html), + [`SliceRandom`](https://docs.rs/rand/*/rand/seq/trait.SliceRandom.html) and + [`IteratorRandom`](https://docs.rs/rand/*/rand/seq/trait.IteratorRandom.html) traits +- Secure seeding via the [`getrandom` crate](https://crates.io/crates/getrandom) + and fast, convenient generation via [`thread_rng`](https://docs.rs/rand/*/rand/fn.thread_rng.html) +- A modular design built over [`rand_core`](https://crates.io/crates/rand_core) + ([see the book](https://rust-random.github.io/book/crates.html)) +- Fast implementations of the best-in-class [cryptographic](https://rust-random.github.io/book/guide-rngs.html#cryptographically-secure-pseudo-random-number-generators-csprngs) and + [non-cryptographic](https://rust-random.github.io/book/guide-rngs.html#basic-pseudo-random-number-generators-prngs) generators +- A flexible [`distributions`](https://docs.rs/rand/*/rand/distributions/index.html) module +- Samplers for a large number of random number distributions via our own + [`rand_distr`](https://docs.rs/rand_distr) and via + the [`statrs`](https://docs.rs/statrs/0.13.0/statrs/) +- [Portably reproducible output](https://rust-random.github.io/book/portability.html) +- `#[no_std]` compatibility (partial) +- *Many* performance optimisations + +It's also worth pointing out what `rand` *is not*: + +- Small. Most low-level crates are small, but the higher-level `rand` and + `rand_distr` each contain a lot of functionality. +- Simple (implementation). We have a strong focus on correctness, speed and flexibility, but + not simplicity. If you prefer a small-and-simple library, there are + alternatives including [fastrand](https://crates.io/crates/fastrand) + and [oorandom](https://crates.io/crates/oorandom). +- Slow. We take performance seriously, with considerations also for set-up + time of new distributions, commonly-used parameters, and parameters of the + current sampler. + +Documentation: + +- [The Rust Rand Book](https://rust-random.github.io/book) +- [API reference (master branch)](https://rust-random.github.io/rand) +- [API reference (docs.rs)](https://docs.rs/rand) + + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +rand = "0.8.4" +``` + +To get started using Rand, see [The Book](https://rust-random.github.io/book). + + +## Versions + +Rand is *mature* (suitable for general usage, with infrequent breaking releases +which minimise breakage) but not yet at 1.0. We maintain compatibility with +pinned versions of the Rust compiler (see below). + +Current Rand versions are: + +- Version 0.7 was released in June 2019, moving most non-uniform distributions + to an external crate, moving `from_entropy` to `SeedableRng`, and many small + changes and fixes. +- Version 0.8 was released in December 2020 with many small changes. + +A detailed [changelog](CHANGELOG.md) is available for releases. + +When upgrading to the next minor series (especially 0.4 → 0.5), we recommend +reading the [Upgrade Guide](https://rust-random.github.io/book/update.html). + +Rand has not yet reached 1.0 implying some breaking changes may arrive in the +future ([SemVer](https://semver.org/) allows each 0.x.0 release to include +breaking changes), but is considered *mature*: breaking changes are minimised +and breaking releases are infrequent. + +Rand libs have inter-dependencies and make use of the +[semver trick](https://github.com/dtolnay/semver-trick/) in order to make traits +compatible across crate versions. (This is especially important for `RngCore` +and `SeedableRng`.) A few crate releases are thus compatibility shims, +depending on the *next* lib version (e.g. `rand_core` versions `0.2.2` and +`0.3.1`). This means, for example, that `rand_core_0_4_0::SeedableRng` and +`rand_core_0_3_0::SeedableRng` are distinct, incompatible traits, which can +cause build errors. Usually, running `cargo update` is enough to fix any issues. + +### Yanked versions + +Some versions of Rand crates have been yanked ("unreleased"). Where this occurs, +the crate's CHANGELOG *should* be updated with a rationale, and a search on the +issue tracker with the keyword `yank` *should* uncover the motivation. + +### Rust version requirements + +Since version 0.8, Rand requires **Rustc version 1.36 or greater**. +Rand 0.7 requires Rustc 1.32 or greater while versions 0.5 require Rustc 1.22 or +greater, and 0.4 and 0.3 (since approx. June 2017) require Rustc version 1.15 or +greater. Subsets of the Rand code may work with older Rust versions, but this is +not supported. + +Continuous Integration (CI) will always test the minimum supported Rustc version +(the MSRV). The current policy is that this can be updated in any +Rand release if required, but the change must be noted in the changelog. + +## Crate Features + +Rand is built with these features enabled by default: + +- `std` enables functionality dependent on the `std` lib +- `alloc` (implied by `std`) enables functionality requiring an allocator +- `getrandom` (implied by `std`) is an optional dependency providing the code + behind `rngs::OsRng` +- `std_rng` enables inclusion of `StdRng`, `thread_rng` and `random` + (the latter two *also* require that `std` be enabled) + +Optionally, the following dependencies can be enabled: + +- `log` enables logging via the `log` crate + +Additionally, these features configure Rand: + +- `small_rng` enables inclusion of the `SmallRng` PRNG +- `nightly` enables some optimizations requiring nightly Rust +- `simd_support` (experimental) enables sampling of SIMD values + (uniformly random SIMD integers and floats), requiring nightly Rust +- `min_const_gen` enables generating random arrays of + any size using min-const-generics, requiring Rust ≥ 1.51. + +Note that nightly features are not stable and therefore not all library and +compiler versions will be compatible. This is especially true of Rand's +experimental `simd_support` feature. + +Rand supports limited functionality in `no_std` mode (enabled via +`default-features = false`). In this case, `OsRng` and `from_entropy` are +unavailable (unless `getrandom` is enabled), large parts of `seq` are +unavailable (unless `alloc` is enabled), and `thread_rng` and `random` are +unavailable. + +### WASM support + +The WASM target `wasm32-unknown-unknown` is not *automatically* supported by +`rand` or `getrandom`. To solve this, either use a different target such as +`wasm32-wasi` or add a direct dependency on `getrandom` with the `js` feature +(if the target supports JavaScript). See +[getrandom#WebAssembly support](https://docs.rs/getrandom/latest/getrandom/#webassembly-support). + +# License + +Rand is distributed under the terms of both the MIT license and the +Apache License (Version 2.0). + +See [LICENSE-APACHE](LICENSE-APACHE) and [LICENSE-MIT](LICENSE-MIT), and +[COPYRIGHT](COPYRIGHT) for details. diff --git a/src/rust/vendor/rand/src/distributions/bernoulli.rs b/src/rust/vendor/rand/src/distributions/bernoulli.rs new file mode 100644 index 000000000..226db79fa --- /dev/null +++ b/src/rust/vendor/rand/src/distributions/bernoulli.rs @@ -0,0 +1,219 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The Bernoulli distribution. + +use crate::distributions::Distribution; +use crate::Rng; +use core::{fmt, u64}; + +#[cfg(feature = "serde1")] +use serde::{Serialize, Deserialize}; +/// The Bernoulli distribution. +/// +/// This is a special case of the Binomial distribution where `n = 1`. +/// +/// # Example +/// +/// ```rust +/// use rand::distributions::{Bernoulli, Distribution}; +/// +/// let d = Bernoulli::new(0.3).unwrap(); +/// let v = d.sample(&mut rand::thread_rng()); +/// println!("{} is from a Bernoulli distribution", v); +/// ``` +/// +/// # Precision +/// +/// This `Bernoulli` distribution uses 64 bits from the RNG (a `u64`), +/// so only probabilities that are multiples of 2-64 can be +/// represented. +#[derive(Clone, Copy, Debug, PartialEq)] +#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +pub struct Bernoulli { + /// Probability of success, relative to the maximal integer. + p_int: u64, +} + +// To sample from the Bernoulli distribution we use a method that compares a +// random `u64` value `v < (p * 2^64)`. +// +// If `p == 1.0`, the integer `v` to compare against can not represented as a +// `u64`. We manually set it to `u64::MAX` instead (2^64 - 1 instead of 2^64). +// Note that value of `p < 1.0` can never result in `u64::MAX`, because an +// `f64` only has 53 bits of precision, and the next largest value of `p` will +// result in `2^64 - 2048`. +// +// Also there is a 100% theoretical concern: if someone consistently wants to +// generate `true` using the Bernoulli distribution (i.e. by using a probability +// of `1.0`), just using `u64::MAX` is not enough. On average it would return +// false once every 2^64 iterations. Some people apparently care about this +// case. +// +// That is why we special-case `u64::MAX` to always return `true`, without using +// the RNG, and pay the performance price for all uses that *are* reasonable. +// Luckily, if `new()` and `sample` are close, the compiler can optimize out the +// extra check. +const ALWAYS_TRUE: u64 = u64::MAX; + +// This is just `2.0.powi(64)`, but written this way because it is not available +// in `no_std` mode. +const SCALE: f64 = 2.0 * (1u64 << 63) as f64; + +/// Error type returned from `Bernoulli::new`. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum BernoulliError { + /// `p < 0` or `p > 1`. + InvalidProbability, +} + +impl fmt::Display for BernoulliError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(match self { + BernoulliError::InvalidProbability => "p is outside [0, 1] in Bernoulli distribution", + }) + } +} + +#[cfg(feature = "std")] +impl ::std::error::Error for BernoulliError {} + +impl Bernoulli { + /// Construct a new `Bernoulli` with the given probability of success `p`. + /// + /// # Precision + /// + /// For `p = 1.0`, the resulting distribution will always generate true. + /// For `p = 0.0`, the resulting distribution will always generate false. + /// + /// This method is accurate for any input `p` in the range `[0, 1]` which is + /// a multiple of 2-64. (Note that not all multiples of + /// 2-64 in `[0, 1]` can be represented as a `f64`.) + #[inline] + pub fn new(p: f64) -> Result { + if !(0.0..1.0).contains(&p) { + if p == 1.0 { + return Ok(Bernoulli { p_int: ALWAYS_TRUE }); + } + return Err(BernoulliError::InvalidProbability); + } + Ok(Bernoulli { + p_int: (p * SCALE) as u64, + }) + } + + /// Construct a new `Bernoulli` with the probability of success of + /// `numerator`-in-`denominator`. I.e. `new_ratio(2, 3)` will return + /// a `Bernoulli` with a 2-in-3 chance, or about 67%, of returning `true`. + /// + /// return `true`. If `numerator == 0` it will always return `false`. + /// For `numerator > denominator` and `denominator == 0`, this returns an + /// error. Otherwise, for `numerator == denominator`, samples are always + /// true; for `numerator == 0` samples are always false. + #[inline] + pub fn from_ratio(numerator: u32, denominator: u32) -> Result { + if numerator > denominator || denominator == 0 { + return Err(BernoulliError::InvalidProbability); + } + if numerator == denominator { + return Ok(Bernoulli { p_int: ALWAYS_TRUE }); + } + let p_int = ((f64::from(numerator) / f64::from(denominator)) * SCALE) as u64; + Ok(Bernoulli { p_int }) + } +} + +impl Distribution for Bernoulli { + #[inline] + fn sample(&self, rng: &mut R) -> bool { + // Make sure to always return true for p = 1.0. + if self.p_int == ALWAYS_TRUE { + return true; + } + let v: u64 = rng.gen(); + v < self.p_int + } +} + +#[cfg(test)] +mod test { + use super::Bernoulli; + use crate::distributions::Distribution; + use crate::Rng; + + #[test] + #[cfg(feature="serde1")] + fn test_serializing_deserializing_bernoulli() { + let coin_flip = Bernoulli::new(0.5).unwrap(); + let de_coin_flip : Bernoulli = bincode::deserialize(&bincode::serialize(&coin_flip).unwrap()).unwrap(); + + assert_eq!(coin_flip.p_int, de_coin_flip.p_int); + } + + #[test] + fn test_trivial() { + // We prefer to be explicit here. + #![allow(clippy::bool_assert_comparison)] + + let mut r = crate::test::rng(1); + let always_false = Bernoulli::new(0.0).unwrap(); + let always_true = Bernoulli::new(1.0).unwrap(); + for _ in 0..5 { + assert_eq!(r.sample::(&always_false), false); + assert_eq!(r.sample::(&always_true), true); + assert_eq!(Distribution::::sample(&always_false, &mut r), false); + assert_eq!(Distribution::::sample(&always_true, &mut r), true); + } + } + + #[test] + #[cfg_attr(miri, ignore)] // Miri is too slow + fn test_average() { + const P: f64 = 0.3; + const NUM: u32 = 3; + const DENOM: u32 = 10; + let d1 = Bernoulli::new(P).unwrap(); + let d2 = Bernoulli::from_ratio(NUM, DENOM).unwrap(); + const N: u32 = 100_000; + + let mut sum1: u32 = 0; + let mut sum2: u32 = 0; + let mut rng = crate::test::rng(2); + for _ in 0..N { + if d1.sample(&mut rng) { + sum1 += 1; + } + if d2.sample(&mut rng) { + sum2 += 1; + } + } + let avg1 = (sum1 as f64) / (N as f64); + assert!((avg1 - P).abs() < 5e-3); + + let avg2 = (sum2 as f64) / (N as f64); + assert!((avg2 - (NUM as f64) / (DENOM as f64)).abs() < 5e-3); + } + + #[test] + fn value_stability() { + let mut rng = crate::test::rng(3); + let distr = Bernoulli::new(0.4532).unwrap(); + let mut buf = [false; 10]; + for x in &mut buf { + *x = rng.sample(&distr); + } + assert_eq!(buf, [ + true, false, false, true, false, false, true, true, true, true + ]); + } + + #[test] + fn bernoulli_distributions_can_be_compared() { + assert_eq!(Bernoulli::new(1.0), Bernoulli::new(1.0)); + } +} diff --git a/src/rust/vendor/rand/src/distributions/distribution.rs b/src/rust/vendor/rand/src/distributions/distribution.rs new file mode 100644 index 000000000..c5cf6a607 --- /dev/null +++ b/src/rust/vendor/rand/src/distributions/distribution.rs @@ -0,0 +1,272 @@ +// Copyright 2018 Developers of the Rand project. +// Copyright 2013-2017 The Rust Project Developers. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Distribution trait and associates + +use crate::Rng; +use core::iter; +#[cfg(feature = "alloc")] +use alloc::string::String; + +/// Types (distributions) that can be used to create a random instance of `T`. +/// +/// It is possible to sample from a distribution through both the +/// `Distribution` and [`Rng`] traits, via `distr.sample(&mut rng)` and +/// `rng.sample(distr)`. They also both offer the [`sample_iter`] method, which +/// produces an iterator that samples from the distribution. +/// +/// All implementations are expected to be immutable; this has the significant +/// advantage of not needing to consider thread safety, and for most +/// distributions efficient state-less sampling algorithms are available. +/// +/// Implementations are typically expected to be portable with reproducible +/// results when used with a PRNG with fixed seed; see the +/// [portability chapter](https://rust-random.github.io/book/portability.html) +/// of The Rust Rand Book. In some cases this does not apply, e.g. the `usize` +/// type requires different sampling on 32-bit and 64-bit machines. +/// +/// [`sample_iter`]: Distribution::sample_iter +pub trait Distribution { + /// Generate a random value of `T`, using `rng` as the source of randomness. + fn sample(&self, rng: &mut R) -> T; + + /// Create an iterator that generates random values of `T`, using `rng` as + /// the source of randomness. + /// + /// Note that this function takes `self` by value. This works since + /// `Distribution` is impl'd for `&D` where `D: Distribution`, + /// however borrowing is not automatic hence `distr.sample_iter(...)` may + /// need to be replaced with `(&distr).sample_iter(...)` to borrow or + /// `(&*distr).sample_iter(...)` to reborrow an existing reference. + /// + /// # Example + /// + /// ``` + /// use rand::thread_rng; + /// use rand::distributions::{Distribution, Alphanumeric, Uniform, Standard}; + /// + /// let mut rng = thread_rng(); + /// + /// // Vec of 16 x f32: + /// let v: Vec = Standard.sample_iter(&mut rng).take(16).collect(); + /// + /// // String: + /// let s: String = Alphanumeric + /// .sample_iter(&mut rng) + /// .take(7) + /// .map(char::from) + /// .collect(); + /// + /// // Dice-rolling: + /// let die_range = Uniform::new_inclusive(1, 6); + /// let mut roll_die = die_range.sample_iter(&mut rng); + /// while roll_die.next().unwrap() != 6 { + /// println!("Not a 6; rolling again!"); + /// } + /// ``` + fn sample_iter(self, rng: R) -> DistIter + where + R: Rng, + Self: Sized, + { + DistIter { + distr: self, + rng, + phantom: ::core::marker::PhantomData, + } + } + + /// Create a distribution of values of 'S' by mapping the output of `Self` + /// through the closure `F` + /// + /// # Example + /// + /// ``` + /// use rand::thread_rng; + /// use rand::distributions::{Distribution, Uniform}; + /// + /// let mut rng = thread_rng(); + /// + /// let die = Uniform::new_inclusive(1, 6); + /// let even_number = die.map(|num| num % 2 == 0); + /// while !even_number.sample(&mut rng) { + /// println!("Still odd; rolling again!"); + /// } + /// ``` + fn map(self, func: F) -> DistMap + where + F: Fn(T) -> S, + Self: Sized, + { + DistMap { + distr: self, + func, + phantom: ::core::marker::PhantomData, + } + } +} + +impl<'a, T, D: Distribution> Distribution for &'a D { + fn sample(&self, rng: &mut R) -> T { + (*self).sample(rng) + } +} + +/// An iterator that generates random values of `T` with distribution `D`, +/// using `R` as the source of randomness. +/// +/// This `struct` is created by the [`sample_iter`] method on [`Distribution`]. +/// See its documentation for more. +/// +/// [`sample_iter`]: Distribution::sample_iter +#[derive(Debug)] +pub struct DistIter { + distr: D, + rng: R, + phantom: ::core::marker::PhantomData, +} + +impl Iterator for DistIter +where + D: Distribution, + R: Rng, +{ + type Item = T; + + #[inline(always)] + fn next(&mut self) -> Option { + // Here, self.rng may be a reference, but we must take &mut anyway. + // Even if sample could take an R: Rng by value, we would need to do this + // since Rng is not copyable and we cannot enforce that this is "reborrowable". + Some(self.distr.sample(&mut self.rng)) + } + + fn size_hint(&self) -> (usize, Option) { + (usize::max_value(), None) + } +} + +impl iter::FusedIterator for DistIter +where + D: Distribution, + R: Rng, +{ +} + +#[cfg(features = "nightly")] +impl iter::TrustedLen for DistIter +where + D: Distribution, + R: Rng, +{ +} + +/// A distribution of values of type `S` derived from the distribution `D` +/// by mapping its output of type `T` through the closure `F`. +/// +/// This `struct` is created by the [`Distribution::map`] method. +/// See its documentation for more. +#[derive(Debug)] +pub struct DistMap { + distr: D, + func: F, + phantom: ::core::marker::PhantomData S>, +} + +impl Distribution for DistMap +where + D: Distribution, + F: Fn(T) -> S, +{ + fn sample(&self, rng: &mut R) -> S { + (self.func)(self.distr.sample(rng)) + } +} + +/// `String` sampler +/// +/// Sampling a `String` of random characters is not quite the same as collecting +/// a sequence of chars. This trait contains some helpers. +#[cfg(feature = "alloc")] +pub trait DistString { + /// Append `len` random chars to `string` + fn append_string(&self, rng: &mut R, string: &mut String, len: usize); + + /// Generate a `String` of `len` random chars + #[inline] + fn sample_string(&self, rng: &mut R, len: usize) -> String { + let mut s = String::new(); + self.append_string(rng, &mut s, len); + s + } +} + +#[cfg(test)] +mod tests { + use crate::distributions::{Distribution, Uniform}; + use crate::Rng; + + #[test] + fn test_distributions_iter() { + use crate::distributions::Open01; + let mut rng = crate::test::rng(210); + let distr = Open01; + let mut iter = Distribution::::sample_iter(distr, &mut rng); + let mut sum: f32 = 0.; + for _ in 0..100 { + sum += iter.next().unwrap(); + } + assert!(0. < sum && sum < 100.); + } + + #[test] + fn test_distributions_map() { + let dist = Uniform::new_inclusive(0, 5).map(|val| val + 15); + + let mut rng = crate::test::rng(212); + let val = dist.sample(&mut rng); + assert!((15..=20).contains(&val)); + } + + #[test] + fn test_make_an_iter() { + fn ten_dice_rolls_other_than_five( + rng: &mut R, + ) -> impl Iterator + '_ { + Uniform::new_inclusive(1, 6) + .sample_iter(rng) + .filter(|x| *x != 5) + .take(10) + } + + let mut rng = crate::test::rng(211); + let mut count = 0; + for val in ten_dice_rolls_other_than_five(&mut rng) { + assert!((1..=6).contains(&val) && val != 5); + count += 1; + } + assert_eq!(count, 10); + } + + #[test] + #[cfg(feature = "alloc")] + fn test_dist_string() { + use core::str; + use crate::distributions::{Alphanumeric, DistString, Standard}; + let mut rng = crate::test::rng(213); + + let s1 = Alphanumeric.sample_string(&mut rng, 20); + assert_eq!(s1.len(), 20); + assert_eq!(str::from_utf8(s1.as_bytes()), Ok(s1.as_str())); + + let s2 = Standard.sample_string(&mut rng, 20); + assert_eq!(s2.chars().count(), 20); + assert_eq!(str::from_utf8(s2.as_bytes()), Ok(s2.as_str())); + } +} diff --git a/src/rust/vendor/rand/src/distributions/float.rs b/src/rust/vendor/rand/src/distributions/float.rs new file mode 100644 index 000000000..ce5946f7f --- /dev/null +++ b/src/rust/vendor/rand/src/distributions/float.rs @@ -0,0 +1,312 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Basic floating-point number distributions + +use crate::distributions::utils::FloatSIMDUtils; +use crate::distributions::{Distribution, Standard}; +use crate::Rng; +use core::mem; +#[cfg(feature = "simd_support")] use packed_simd::*; + +#[cfg(feature = "serde1")] +use serde::{Serialize, Deserialize}; + +/// A distribution to sample floating point numbers uniformly in the half-open +/// interval `(0, 1]`, i.e. including 1 but not 0. +/// +/// All values that can be generated are of the form `n * ε/2`. For `f32` +/// the 24 most significant random bits of a `u32` are used and for `f64` the +/// 53 most significant bits of a `u64` are used. The conversion uses the +/// multiplicative method. +/// +/// See also: [`Standard`] which samples from `[0, 1)`, [`Open01`] +/// which samples from `(0, 1)` and [`Uniform`] which samples from arbitrary +/// ranges. +/// +/// # Example +/// ``` +/// use rand::{thread_rng, Rng}; +/// use rand::distributions::OpenClosed01; +/// +/// let val: f32 = thread_rng().sample(OpenClosed01); +/// println!("f32 from (0, 1): {}", val); +/// ``` +/// +/// [`Standard`]: crate::distributions::Standard +/// [`Open01`]: crate::distributions::Open01 +/// [`Uniform`]: crate::distributions::uniform::Uniform +#[derive(Clone, Copy, Debug)] +#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +pub struct OpenClosed01; + +/// A distribution to sample floating point numbers uniformly in the open +/// interval `(0, 1)`, i.e. not including either endpoint. +/// +/// All values that can be generated are of the form `n * ε + ε/2`. For `f32` +/// the 23 most significant random bits of an `u32` are used, for `f64` 52 from +/// an `u64`. The conversion uses a transmute-based method. +/// +/// See also: [`Standard`] which samples from `[0, 1)`, [`OpenClosed01`] +/// which samples from `(0, 1]` and [`Uniform`] which samples from arbitrary +/// ranges. +/// +/// # Example +/// ``` +/// use rand::{thread_rng, Rng}; +/// use rand::distributions::Open01; +/// +/// let val: f32 = thread_rng().sample(Open01); +/// println!("f32 from (0, 1): {}", val); +/// ``` +/// +/// [`Standard`]: crate::distributions::Standard +/// [`OpenClosed01`]: crate::distributions::OpenClosed01 +/// [`Uniform`]: crate::distributions::uniform::Uniform +#[derive(Clone, Copy, Debug)] +#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +pub struct Open01; + + +// This trait is needed by both this lib and rand_distr hence is a hidden export +#[doc(hidden)] +pub trait IntoFloat { + type F; + + /// Helper method to combine the fraction and a constant exponent into a + /// float. + /// + /// Only the least significant bits of `self` may be set, 23 for `f32` and + /// 52 for `f64`. + /// The resulting value will fall in a range that depends on the exponent. + /// As an example the range with exponent 0 will be + /// [20..21), which is [1..2). + fn into_float_with_exponent(self, exponent: i32) -> Self::F; +} + +macro_rules! float_impls { + ($ty:ident, $uty:ident, $f_scalar:ident, $u_scalar:ty, + $fraction_bits:expr, $exponent_bias:expr) => { + impl IntoFloat for $uty { + type F = $ty; + #[inline(always)] + fn into_float_with_exponent(self, exponent: i32) -> $ty { + // The exponent is encoded using an offset-binary representation + let exponent_bits: $u_scalar = + (($exponent_bias + exponent) as $u_scalar) << $fraction_bits; + $ty::from_bits(self | exponent_bits) + } + } + + impl Distribution<$ty> for Standard { + fn sample(&self, rng: &mut R) -> $ty { + // Multiply-based method; 24/53 random bits; [0, 1) interval. + // We use the most significant bits because for simple RNGs + // those are usually more random. + let float_size = mem::size_of::<$f_scalar>() as u32 * 8; + let precision = $fraction_bits + 1; + let scale = 1.0 / ((1 as $u_scalar << precision) as $f_scalar); + + let value: $uty = rng.gen(); + let value = value >> (float_size - precision); + scale * $ty::cast_from_int(value) + } + } + + impl Distribution<$ty> for OpenClosed01 { + fn sample(&self, rng: &mut R) -> $ty { + // Multiply-based method; 24/53 random bits; (0, 1] interval. + // We use the most significant bits because for simple RNGs + // those are usually more random. + let float_size = mem::size_of::<$f_scalar>() as u32 * 8; + let precision = $fraction_bits + 1; + let scale = 1.0 / ((1 as $u_scalar << precision) as $f_scalar); + + let value: $uty = rng.gen(); + let value = value >> (float_size - precision); + // Add 1 to shift up; will not overflow because of right-shift: + scale * $ty::cast_from_int(value + 1) + } + } + + impl Distribution<$ty> for Open01 { + fn sample(&self, rng: &mut R) -> $ty { + // Transmute-based method; 23/52 random bits; (0, 1) interval. + // We use the most significant bits because for simple RNGs + // those are usually more random. + use core::$f_scalar::EPSILON; + let float_size = mem::size_of::<$f_scalar>() as u32 * 8; + + let value: $uty = rng.gen(); + let fraction = value >> (float_size - $fraction_bits); + fraction.into_float_with_exponent(0) - (1.0 - EPSILON / 2.0) + } + } + } +} + +float_impls! { f32, u32, f32, u32, 23, 127 } +float_impls! { f64, u64, f64, u64, 52, 1023 } + +#[cfg(feature = "simd_support")] +float_impls! { f32x2, u32x2, f32, u32, 23, 127 } +#[cfg(feature = "simd_support")] +float_impls! { f32x4, u32x4, f32, u32, 23, 127 } +#[cfg(feature = "simd_support")] +float_impls! { f32x8, u32x8, f32, u32, 23, 127 } +#[cfg(feature = "simd_support")] +float_impls! { f32x16, u32x16, f32, u32, 23, 127 } + +#[cfg(feature = "simd_support")] +float_impls! { f64x2, u64x2, f64, u64, 52, 1023 } +#[cfg(feature = "simd_support")] +float_impls! { f64x4, u64x4, f64, u64, 52, 1023 } +#[cfg(feature = "simd_support")] +float_impls! { f64x8, u64x8, f64, u64, 52, 1023 } + + +#[cfg(test)] +mod tests { + use super::*; + use crate::rngs::mock::StepRng; + + const EPSILON32: f32 = ::core::f32::EPSILON; + const EPSILON64: f64 = ::core::f64::EPSILON; + + macro_rules! test_f32 { + ($fnn:ident, $ty:ident, $ZERO:expr, $EPSILON:expr) => { + #[test] + fn $fnn() { + // Standard + let mut zeros = StepRng::new(0, 0); + assert_eq!(zeros.gen::<$ty>(), $ZERO); + let mut one = StepRng::new(1 << 8 | 1 << (8 + 32), 0); + assert_eq!(one.gen::<$ty>(), $EPSILON / 2.0); + let mut max = StepRng::new(!0, 0); + assert_eq!(max.gen::<$ty>(), 1.0 - $EPSILON / 2.0); + + // OpenClosed01 + let mut zeros = StepRng::new(0, 0); + assert_eq!(zeros.sample::<$ty, _>(OpenClosed01), 0.0 + $EPSILON / 2.0); + let mut one = StepRng::new(1 << 8 | 1 << (8 + 32), 0); + assert_eq!(one.sample::<$ty, _>(OpenClosed01), $EPSILON); + let mut max = StepRng::new(!0, 0); + assert_eq!(max.sample::<$ty, _>(OpenClosed01), $ZERO + 1.0); + + // Open01 + let mut zeros = StepRng::new(0, 0); + assert_eq!(zeros.sample::<$ty, _>(Open01), 0.0 + $EPSILON / 2.0); + let mut one = StepRng::new(1 << 9 | 1 << (9 + 32), 0); + assert_eq!(one.sample::<$ty, _>(Open01), $EPSILON / 2.0 * 3.0); + let mut max = StepRng::new(!0, 0); + assert_eq!(max.sample::<$ty, _>(Open01), 1.0 - $EPSILON / 2.0); + } + }; + } + test_f32! { f32_edge_cases, f32, 0.0, EPSILON32 } + #[cfg(feature = "simd_support")] + test_f32! { f32x2_edge_cases, f32x2, f32x2::splat(0.0), f32x2::splat(EPSILON32) } + #[cfg(feature = "simd_support")] + test_f32! { f32x4_edge_cases, f32x4, f32x4::splat(0.0), f32x4::splat(EPSILON32) } + #[cfg(feature = "simd_support")] + test_f32! { f32x8_edge_cases, f32x8, f32x8::splat(0.0), f32x8::splat(EPSILON32) } + #[cfg(feature = "simd_support")] + test_f32! { f32x16_edge_cases, f32x16, f32x16::splat(0.0), f32x16::splat(EPSILON32) } + + macro_rules! test_f64 { + ($fnn:ident, $ty:ident, $ZERO:expr, $EPSILON:expr) => { + #[test] + fn $fnn() { + // Standard + let mut zeros = StepRng::new(0, 0); + assert_eq!(zeros.gen::<$ty>(), $ZERO); + let mut one = StepRng::new(1 << 11, 0); + assert_eq!(one.gen::<$ty>(), $EPSILON / 2.0); + let mut max = StepRng::new(!0, 0); + assert_eq!(max.gen::<$ty>(), 1.0 - $EPSILON / 2.0); + + // OpenClosed01 + let mut zeros = StepRng::new(0, 0); + assert_eq!(zeros.sample::<$ty, _>(OpenClosed01), 0.0 + $EPSILON / 2.0); + let mut one = StepRng::new(1 << 11, 0); + assert_eq!(one.sample::<$ty, _>(OpenClosed01), $EPSILON); + let mut max = StepRng::new(!0, 0); + assert_eq!(max.sample::<$ty, _>(OpenClosed01), $ZERO + 1.0); + + // Open01 + let mut zeros = StepRng::new(0, 0); + assert_eq!(zeros.sample::<$ty, _>(Open01), 0.0 + $EPSILON / 2.0); + let mut one = StepRng::new(1 << 12, 0); + assert_eq!(one.sample::<$ty, _>(Open01), $EPSILON / 2.0 * 3.0); + let mut max = StepRng::new(!0, 0); + assert_eq!(max.sample::<$ty, _>(Open01), 1.0 - $EPSILON / 2.0); + } + }; + } + test_f64! { f64_edge_cases, f64, 0.0, EPSILON64 } + #[cfg(feature = "simd_support")] + test_f64! { f64x2_edge_cases, f64x2, f64x2::splat(0.0), f64x2::splat(EPSILON64) } + #[cfg(feature = "simd_support")] + test_f64! { f64x4_edge_cases, f64x4, f64x4::splat(0.0), f64x4::splat(EPSILON64) } + #[cfg(feature = "simd_support")] + test_f64! { f64x8_edge_cases, f64x8, f64x8::splat(0.0), f64x8::splat(EPSILON64) } + + #[test] + fn value_stability() { + fn test_samples>( + distr: &D, zero: T, expected: &[T], + ) { + let mut rng = crate::test::rng(0x6f44f5646c2a7334); + let mut buf = [zero; 3]; + for x in &mut buf { + *x = rng.sample(&distr); + } + assert_eq!(&buf, expected); + } + + test_samples(&Standard, 0f32, &[0.0035963655, 0.7346052, 0.09778172]); + test_samples(&Standard, 0f64, &[ + 0.7346051961657583, + 0.20298547462974248, + 0.8166436635290655, + ]); + + test_samples(&OpenClosed01, 0f32, &[0.003596425, 0.73460525, 0.09778178]); + test_samples(&OpenClosed01, 0f64, &[ + 0.7346051961657584, + 0.2029854746297426, + 0.8166436635290656, + ]); + + test_samples(&Open01, 0f32, &[0.0035963655, 0.73460525, 0.09778172]); + test_samples(&Open01, 0f64, &[ + 0.7346051961657584, + 0.20298547462974248, + 0.8166436635290656, + ]); + + #[cfg(feature = "simd_support")] + { + // We only test a sub-set of types here. Values are identical to + // non-SIMD types; we assume this pattern continues across all + // SIMD types. + + test_samples(&Standard, f32x2::new(0.0, 0.0), &[ + f32x2::new(0.0035963655, 0.7346052), + f32x2::new(0.09778172, 0.20298547), + f32x2::new(0.34296435, 0.81664366), + ]); + + test_samples(&Standard, f64x2::new(0.0, 0.0), &[ + f64x2::new(0.7346051961657583, 0.20298547462974248), + f64x2::new(0.8166436635290655, 0.7423708925400552), + f64x2::new(0.16387782224016323, 0.9087068770169618), + ]); + } + } +} diff --git a/src/rust/vendor/rand/src/distributions/integer.rs b/src/rust/vendor/rand/src/distributions/integer.rs new file mode 100644 index 000000000..19ce71599 --- /dev/null +++ b/src/rust/vendor/rand/src/distributions/integer.rs @@ -0,0 +1,274 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The implementations of the `Standard` distribution for integer types. + +use crate::distributions::{Distribution, Standard}; +use crate::Rng; +#[cfg(all(target_arch = "x86", feature = "simd_support"))] +use core::arch::x86::{__m128i, __m256i}; +#[cfg(all(target_arch = "x86_64", feature = "simd_support"))] +use core::arch::x86_64::{__m128i, __m256i}; +use core::num::{NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, + NonZeroU128}; +#[cfg(feature = "simd_support")] use packed_simd::*; + +impl Distribution for Standard { + #[inline] + fn sample(&self, rng: &mut R) -> u8 { + rng.next_u32() as u8 + } +} + +impl Distribution for Standard { + #[inline] + fn sample(&self, rng: &mut R) -> u16 { + rng.next_u32() as u16 + } +} + +impl Distribution for Standard { + #[inline] + fn sample(&self, rng: &mut R) -> u32 { + rng.next_u32() + } +} + +impl Distribution for Standard { + #[inline] + fn sample(&self, rng: &mut R) -> u64 { + rng.next_u64() + } +} + +impl Distribution for Standard { + #[inline] + fn sample(&self, rng: &mut R) -> u128 { + // Use LE; we explicitly generate one value before the next. + let x = u128::from(rng.next_u64()); + let y = u128::from(rng.next_u64()); + (y << 64) | x + } +} + +impl Distribution for Standard { + #[inline] + #[cfg(any(target_pointer_width = "32", target_pointer_width = "16"))] + fn sample(&self, rng: &mut R) -> usize { + rng.next_u32() as usize + } + + #[inline] + #[cfg(target_pointer_width = "64")] + fn sample(&self, rng: &mut R) -> usize { + rng.next_u64() as usize + } +} + +macro_rules! impl_int_from_uint { + ($ty:ty, $uty:ty) => { + impl Distribution<$ty> for Standard { + #[inline] + fn sample(&self, rng: &mut R) -> $ty { + rng.gen::<$uty>() as $ty + } + } + }; +} + +impl_int_from_uint! { i8, u8 } +impl_int_from_uint! { i16, u16 } +impl_int_from_uint! { i32, u32 } +impl_int_from_uint! { i64, u64 } +impl_int_from_uint! { i128, u128 } +impl_int_from_uint! { isize, usize } + +macro_rules! impl_nzint { + ($ty:ty, $new:path) => { + impl Distribution<$ty> for Standard { + fn sample(&self, rng: &mut R) -> $ty { + loop { + if let Some(nz) = $new(rng.gen()) { + break nz; + } + } + } + } + }; +} + +impl_nzint!(NonZeroU8, NonZeroU8::new); +impl_nzint!(NonZeroU16, NonZeroU16::new); +impl_nzint!(NonZeroU32, NonZeroU32::new); +impl_nzint!(NonZeroU64, NonZeroU64::new); +impl_nzint!(NonZeroU128, NonZeroU128::new); +impl_nzint!(NonZeroUsize, NonZeroUsize::new); + +#[cfg(feature = "simd_support")] +macro_rules! simd_impl { + ($(($intrinsic:ident, $vec:ty),)+) => {$( + impl Distribution<$intrinsic> for Standard { + #[inline] + fn sample(&self, rng: &mut R) -> $intrinsic { + $intrinsic::from_bits(rng.gen::<$vec>()) + } + } + )+}; + + ($bits:expr,) => {}; + ($bits:expr, $ty:ty, $($ty_more:ty,)*) => { + simd_impl!($bits, $($ty_more,)*); + + impl Distribution<$ty> for Standard { + #[inline] + fn sample(&self, rng: &mut R) -> $ty { + let mut vec: $ty = Default::default(); + unsafe { + let ptr = &mut vec; + let b_ptr = &mut *(ptr as *mut $ty as *mut [u8; $bits/8]); + rng.fill_bytes(b_ptr); + } + vec.to_le() + } + } + }; +} + +#[cfg(feature = "simd_support")] +simd_impl!(16, u8x2, i8x2,); +#[cfg(feature = "simd_support")] +simd_impl!(32, u8x4, i8x4, u16x2, i16x2,); +#[cfg(feature = "simd_support")] +simd_impl!(64, u8x8, i8x8, u16x4, i16x4, u32x2, i32x2,); +#[cfg(feature = "simd_support")] +simd_impl!(128, u8x16, i8x16, u16x8, i16x8, u32x4, i32x4, u64x2, i64x2,); +#[cfg(feature = "simd_support")] +simd_impl!(256, u8x32, i8x32, u16x16, i16x16, u32x8, i32x8, u64x4, i64x4,); +#[cfg(feature = "simd_support")] +simd_impl!(512, u8x64, i8x64, u16x32, i16x32, u32x16, i32x16, u64x8, i64x8,); +#[cfg(all( + feature = "simd_support", + any(target_arch = "x86", target_arch = "x86_64") +))] +simd_impl!((__m128i, u8x16), (__m256i, u8x32),); + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_integers() { + let mut rng = crate::test::rng(806); + + rng.sample::(Standard); + rng.sample::(Standard); + rng.sample::(Standard); + rng.sample::(Standard); + rng.sample::(Standard); + rng.sample::(Standard); + + rng.sample::(Standard); + rng.sample::(Standard); + rng.sample::(Standard); + rng.sample::(Standard); + rng.sample::(Standard); + rng.sample::(Standard); + } + + #[test] + fn value_stability() { + fn test_samples(zero: T, expected: &[T]) + where Standard: Distribution { + let mut rng = crate::test::rng(807); + let mut buf = [zero; 3]; + for x in &mut buf { + *x = rng.sample(Standard); + } + assert_eq!(&buf, expected); + } + + test_samples(0u8, &[9, 247, 111]); + test_samples(0u16, &[32265, 42999, 38255]); + test_samples(0u32, &[2220326409, 2575017975, 2018088303]); + test_samples(0u64, &[ + 11059617991457472009, + 16096616328739788143, + 1487364411147516184, + ]); + test_samples(0u128, &[ + 296930161868957086625409848350820761097, + 145644820879247630242265036535529306392, + 111087889832015897993126088499035356354, + ]); + #[cfg(any(target_pointer_width = "32", target_pointer_width = "16"))] + test_samples(0usize, &[2220326409, 2575017975, 2018088303]); + #[cfg(target_pointer_width = "64")] + test_samples(0usize, &[ + 11059617991457472009, + 16096616328739788143, + 1487364411147516184, + ]); + + test_samples(0i8, &[9, -9, 111]); + // Skip further i* types: they are simple reinterpretation of u* samples + + #[cfg(feature = "simd_support")] + { + // We only test a sub-set of types here and make assumptions about the rest. + + test_samples(u8x2::default(), &[ + u8x2::new(9, 126), + u8x2::new(247, 167), + u8x2::new(111, 149), + ]); + test_samples(u8x4::default(), &[ + u8x4::new(9, 126, 87, 132), + u8x4::new(247, 167, 123, 153), + u8x4::new(111, 149, 73, 120), + ]); + test_samples(u8x8::default(), &[ + u8x8::new(9, 126, 87, 132, 247, 167, 123, 153), + u8x8::new(111, 149, 73, 120, 68, 171, 98, 223), + u8x8::new(24, 121, 1, 50, 13, 46, 164, 20), + ]); + + test_samples(i64x8::default(), &[ + i64x8::new( + -7387126082252079607, + -2350127744969763473, + 1487364411147516184, + 7895421560427121838, + 602190064936008898, + 6022086574635100741, + -5080089175222015595, + -4066367846667249123, + ), + i64x8::new( + 9180885022207963908, + 3095981199532211089, + 6586075293021332726, + 419343203796414657, + 3186951873057035255, + 5287129228749947252, + 444726432079249540, + -1587028029513790706, + ), + i64x8::new( + 6075236523189346388, + 1351763722368165432, + -6192309979959753740, + -7697775502176768592, + -4482022114172078123, + 7522501477800909500, + -1837258847956201231, + -586926753024886735, + ), + ]); + } + } +} diff --git a/src/rust/vendor/rand/src/distributions/mod.rs b/src/rust/vendor/rand/src/distributions/mod.rs new file mode 100644 index 000000000..05ca80606 --- /dev/null +++ b/src/rust/vendor/rand/src/distributions/mod.rs @@ -0,0 +1,218 @@ +// Copyright 2018 Developers of the Rand project. +// Copyright 2013-2017 The Rust Project Developers. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Generating random samples from probability distributions +//! +//! This module is the home of the [`Distribution`] trait and several of its +//! implementations. It is the workhorse behind some of the convenient +//! functionality of the [`Rng`] trait, e.g. [`Rng::gen`] and of course +//! [`Rng::sample`]. +//! +//! Abstractly, a [probability distribution] describes the probability of +//! occurrence of each value in its sample space. +//! +//! More concretely, an implementation of `Distribution` for type `X` is an +//! algorithm for choosing values from the sample space (a subset of `T`) +//! according to the distribution `X` represents, using an external source of +//! randomness (an RNG supplied to the `sample` function). +//! +//! A type `X` may implement `Distribution` for multiple types `T`. +//! Any type implementing [`Distribution`] is stateless (i.e. immutable), +//! but it may have internal parameters set at construction time (for example, +//! [`Uniform`] allows specification of its sample space as a range within `T`). +//! +//! +//! # The `Standard` distribution +//! +//! The [`Standard`] distribution is important to mention. This is the +//! distribution used by [`Rng::gen`] and represents the "default" way to +//! produce a random value for many different types, including most primitive +//! types, tuples, arrays, and a few derived types. See the documentation of +//! [`Standard`] for more details. +//! +//! Implementing `Distribution` for [`Standard`] for user types `T` makes it +//! possible to generate type `T` with [`Rng::gen`], and by extension also +//! with the [`random`] function. +//! +//! ## Random characters +//! +//! [`Alphanumeric`] is a simple distribution to sample random letters and +//! numbers of the `char` type; in contrast [`Standard`] may sample any valid +//! `char`. +//! +//! +//! # Uniform numeric ranges +//! +//! The [`Uniform`] distribution is more flexible than [`Standard`], but also +//! more specialised: it supports fewer target types, but allows the sample +//! space to be specified as an arbitrary range within its target type `T`. +//! Both [`Standard`] and [`Uniform`] are in some sense uniform distributions. +//! +//! Values may be sampled from this distribution using [`Rng::sample(Range)`] or +//! by creating a distribution object with [`Uniform::new`], +//! [`Uniform::new_inclusive`] or `From`. When the range limits are not +//! known at compile time it is typically faster to reuse an existing +//! `Uniform` object than to call [`Rng::sample(Range)`]. +//! +//! User types `T` may also implement `Distribution` for [`Uniform`], +//! although this is less straightforward than for [`Standard`] (see the +//! documentation in the [`uniform`] module). Doing so enables generation of +//! values of type `T` with [`Rng::sample(Range)`]. +//! +//! ## Open and half-open ranges +//! +//! There are surprisingly many ways to uniformly generate random floats. A +//! range between 0 and 1 is standard, but the exact bounds (open vs closed) +//! and accuracy differ. In addition to the [`Standard`] distribution Rand offers +//! [`Open01`] and [`OpenClosed01`]. See "Floating point implementation" section of +//! [`Standard`] documentation for more details. +//! +//! # Non-uniform sampling +//! +//! Sampling a simple true/false outcome with a given probability has a name: +//! the [`Bernoulli`] distribution (this is used by [`Rng::gen_bool`]). +//! +//! For weighted sampling from a sequence of discrete values, use the +//! [`WeightedIndex`] distribution. +//! +//! This crate no longer includes other non-uniform distributions; instead +//! it is recommended that you use either [`rand_distr`] or [`statrs`]. +//! +//! +//! [probability distribution]: https://en.wikipedia.org/wiki/Probability_distribution +//! [`rand_distr`]: https://crates.io/crates/rand_distr +//! [`statrs`]: https://crates.io/crates/statrs + +//! [`random`]: crate::random +//! [`rand_distr`]: https://crates.io/crates/rand_distr +//! [`statrs`]: https://crates.io/crates/statrs + +mod bernoulli; +mod distribution; +mod float; +mod integer; +mod other; +mod slice; +mod utils; +#[cfg(feature = "alloc")] +mod weighted_index; + +#[doc(hidden)] +pub mod hidden_export { + pub use super::float::IntoFloat; // used by rand_distr +} +pub mod uniform; +#[deprecated( + since = "0.8.0", + note = "use rand::distributions::{WeightedIndex, WeightedError} instead" +)] +#[cfg(feature = "alloc")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] +pub mod weighted; + +pub use self::bernoulli::{Bernoulli, BernoulliError}; +pub use self::distribution::{Distribution, DistIter, DistMap}; +#[cfg(feature = "alloc")] +pub use self::distribution::DistString; +pub use self::float::{Open01, OpenClosed01}; +pub use self::other::Alphanumeric; +pub use self::slice::Slice; +#[doc(inline)] +pub use self::uniform::Uniform; +#[cfg(feature = "alloc")] +pub use self::weighted_index::{WeightedError, WeightedIndex}; + +#[allow(unused)] +use crate::Rng; + +/// A generic random value distribution, implemented for many primitive types. +/// Usually generates values with a numerically uniform distribution, and with a +/// range appropriate to the type. +/// +/// ## Provided implementations +/// +/// Assuming the provided `Rng` is well-behaved, these implementations +/// generate values with the following ranges and distributions: +/// +/// * Integers (`i32`, `u32`, `isize`, `usize`, etc.): Uniformly distributed +/// over all values of the type. +/// * `char`: Uniformly distributed over all Unicode scalar values, i.e. all +/// code points in the range `0...0x10_FFFF`, except for the range +/// `0xD800...0xDFFF` (the surrogate code points). This includes +/// unassigned/reserved code points. +/// * `bool`: Generates `false` or `true`, each with probability 0.5. +/// * Floating point types (`f32` and `f64`): Uniformly distributed in the +/// half-open range `[0, 1)`. See notes below. +/// * Wrapping integers (`Wrapping`), besides the type identical to their +/// normal integer variants. +/// +/// The `Standard` distribution also supports generation of the following +/// compound types where all component types are supported: +/// +/// * Tuples (up to 12 elements): each element is generated sequentially. +/// * Arrays (up to 32 elements): each element is generated sequentially; +/// see also [`Rng::fill`] which supports arbitrary array length for integer +/// and float types and tends to be faster for `u32` and smaller types. +/// When using `rustc` ≥ 1.51, enable the `min_const_gen` feature to support +/// arrays larger than 32 elements. +/// Note that [`Rng::fill`] and `Standard`'s array support are *not* equivalent: +/// the former is optimised for integer types (using fewer RNG calls for +/// element types smaller than the RNG word size), while the latter supports +/// any element type supported by `Standard`. +/// * `Option` first generates a `bool`, and if true generates and returns +/// `Some(value)` where `value: T`, otherwise returning `None`. +/// +/// ## Custom implementations +/// +/// The [`Standard`] distribution may be implemented for user types as follows: +/// +/// ``` +/// # #![allow(dead_code)] +/// use rand::Rng; +/// use rand::distributions::{Distribution, Standard}; +/// +/// struct MyF32 { +/// x: f32, +/// } +/// +/// impl Distribution for Standard { +/// fn sample(&self, rng: &mut R) -> MyF32 { +/// MyF32 { x: rng.gen() } +/// } +/// } +/// ``` +/// +/// ## Example usage +/// ``` +/// use rand::prelude::*; +/// use rand::distributions::Standard; +/// +/// let val: f32 = StdRng::from_entropy().sample(Standard); +/// println!("f32 from [0, 1): {}", val); +/// ``` +/// +/// # Floating point implementation +/// The floating point implementations for `Standard` generate a random value in +/// the half-open interval `[0, 1)`, i.e. including 0 but not 1. +/// +/// All values that can be generated are of the form `n * ε/2`. For `f32` +/// the 24 most significant random bits of a `u32` are used and for `f64` the +/// 53 most significant bits of a `u64` are used. The conversion uses the +/// multiplicative method: `(rng.gen::<$uty>() >> N) as $ty * (ε/2)`. +/// +/// See also: [`Open01`] which samples from `(0, 1)`, [`OpenClosed01`] which +/// samples from `(0, 1]` and `Rng::gen_range(0..1)` which also samples from +/// `[0, 1)`. Note that `Open01` uses transmute-based methods which yield 1 bit +/// less precision but may perform faster on some architectures (on modern Intel +/// CPUs all methods have approximately equal performance). +/// +/// [`Uniform`]: uniform::Uniform +#[derive(Clone, Copy, Debug)] +#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] +pub struct Standard; diff --git a/src/rust/vendor/rand/src/distributions/other.rs b/src/rust/vendor/rand/src/distributions/other.rs new file mode 100644 index 000000000..03802a76d --- /dev/null +++ b/src/rust/vendor/rand/src/distributions/other.rs @@ -0,0 +1,365 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The implementations of the `Standard` distribution for other built-in types. + +use core::char; +use core::num::Wrapping; +#[cfg(feature = "alloc")] +use alloc::string::String; + +use crate::distributions::{Distribution, Standard, Uniform}; +#[cfg(feature = "alloc")] +use crate::distributions::DistString; +use crate::Rng; + +#[cfg(feature = "serde1")] +use serde::{Serialize, Deserialize}; +#[cfg(feature = "min_const_gen")] +use core::mem::{self, MaybeUninit}; + + +// ----- Sampling distributions ----- + +/// Sample a `u8`, uniformly distributed over ASCII letters and numbers: +/// a-z, A-Z and 0-9. +/// +/// # Example +/// +/// ``` +/// use rand::{Rng, thread_rng}; +/// use rand::distributions::Alphanumeric; +/// +/// let mut rng = thread_rng(); +/// let chars: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); +/// println!("Random chars: {}", chars); +/// ``` +/// +/// The [`DistString`] trait provides an easier method of generating +/// a random `String`, and offers more efficient allocation: +/// ``` +/// use rand::distributions::{Alphanumeric, DistString}; +/// let string = Alphanumeric.sample_string(&mut rand::thread_rng(), 16); +/// println!("Random string: {}", string); +/// ``` +/// +/// # Passwords +/// +/// Users sometimes ask whether it is safe to use a string of random characters +/// as a password. In principle, all RNGs in Rand implementing `CryptoRng` are +/// suitable as a source of randomness for generating passwords (if they are +/// properly seeded), but it is more conservative to only use randomness +/// directly from the operating system via the `getrandom` crate, or the +/// corresponding bindings of a crypto library. +/// +/// When generating passwords or keys, it is important to consider the threat +/// model and in some cases the memorability of the password. This is out of +/// scope of the Rand project, and therefore we defer to the following +/// references: +/// +/// - [Wikipedia article on Password Strength](https://en.wikipedia.org/wiki/Password_strength) +/// - [Diceware for generating memorable passwords](https://en.wikipedia.org/wiki/Diceware) +#[derive(Debug, Clone, Copy)] +#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +pub struct Alphanumeric; + + +// ----- Implementations of distributions ----- + +impl Distribution for Standard { + #[inline] + fn sample(&self, rng: &mut R) -> char { + // A valid `char` is either in the interval `[0, 0xD800)` or + // `(0xDFFF, 0x11_0000)`. All `char`s must therefore be in + // `[0, 0x11_0000)` but not in the "gap" `[0xD800, 0xDFFF]` which is + // reserved for surrogates. This is the size of that gap. + const GAP_SIZE: u32 = 0xDFFF - 0xD800 + 1; + + // Uniform::new(0, 0x11_0000 - GAP_SIZE) can also be used but it + // seemed slower. + let range = Uniform::new(GAP_SIZE, 0x11_0000); + + let mut n = range.sample(rng); + if n <= 0xDFFF { + n -= GAP_SIZE; + } + unsafe { char::from_u32_unchecked(n) } + } +} + +/// Note: the `String` is potentially left with excess capacity; optionally the +/// user may call `string.shrink_to_fit()` afterwards. +#[cfg(feature = "alloc")] +impl DistString for Standard { + fn append_string(&self, rng: &mut R, s: &mut String, len: usize) { + // A char is encoded with at most four bytes, thus this reservation is + // guaranteed to be sufficient. We do not shrink_to_fit afterwards so + // that repeated usage on the same `String` buffer does not reallocate. + s.reserve(4 * len); + s.extend(Distribution::::sample_iter(self, rng).take(len)); + } +} + +impl Distribution for Alphanumeric { + fn sample(&self, rng: &mut R) -> u8 { + const RANGE: u32 = 26 + 26 + 10; + const GEN_ASCII_STR_CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\ + abcdefghijklmnopqrstuvwxyz\ + 0123456789"; + // We can pick from 62 characters. This is so close to a power of 2, 64, + // that we can do better than `Uniform`. Use a simple bitshift and + // rejection sampling. We do not use a bitmask, because for small RNGs + // the most significant bits are usually of higher quality. + loop { + let var = rng.next_u32() >> (32 - 6); + if var < RANGE { + return GEN_ASCII_STR_CHARSET[var as usize]; + } + } + } +} + +#[cfg(feature = "alloc")] +impl DistString for Alphanumeric { + fn append_string(&self, rng: &mut R, string: &mut String, len: usize) { + unsafe { + let v = string.as_mut_vec(); + v.extend(self.sample_iter(rng).take(len)); + } + } +} + +impl Distribution for Standard { + #[inline] + fn sample(&self, rng: &mut R) -> bool { + // We can compare against an arbitrary bit of an u32 to get a bool. + // Because the least significant bits of a lower quality RNG can have + // simple patterns, we compare against the most significant bit. This is + // easiest done using a sign test. + (rng.next_u32() as i32) < 0 + } +} + +macro_rules! tuple_impl { + // use variables to indicate the arity of the tuple + ($($tyvar:ident),* ) => { + // the trailing commas are for the 1 tuple + impl< $( $tyvar ),* > + Distribution<( $( $tyvar ),* , )> + for Standard + where $( Standard: Distribution<$tyvar> ),* + { + #[inline] + fn sample(&self, _rng: &mut R) -> ( $( $tyvar ),* , ) { + ( + // use the $tyvar's to get the appropriate number of + // repeats (they're not actually needed) + $( + _rng.gen::<$tyvar>() + ),* + , + ) + } + } + } +} + +impl Distribution<()> for Standard { + #[allow(clippy::unused_unit)] + #[inline] + fn sample(&self, _: &mut R) -> () { + () + } +} +tuple_impl! {A} +tuple_impl! {A, B} +tuple_impl! {A, B, C} +tuple_impl! {A, B, C, D} +tuple_impl! {A, B, C, D, E} +tuple_impl! {A, B, C, D, E, F} +tuple_impl! {A, B, C, D, E, F, G} +tuple_impl! {A, B, C, D, E, F, G, H} +tuple_impl! {A, B, C, D, E, F, G, H, I} +tuple_impl! {A, B, C, D, E, F, G, H, I, J} +tuple_impl! {A, B, C, D, E, F, G, H, I, J, K} +tuple_impl! {A, B, C, D, E, F, G, H, I, J, K, L} + +#[cfg(feature = "min_const_gen")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "min_const_gen")))] +impl Distribution<[T; N]> for Standard +where Standard: Distribution +{ + #[inline] + fn sample(&self, _rng: &mut R) -> [T; N] { + let mut buff: [MaybeUninit; N] = unsafe { MaybeUninit::uninit().assume_init() }; + + for elem in &mut buff { + *elem = MaybeUninit::new(_rng.gen()); + } + + unsafe { mem::transmute_copy::<_, _>(&buff) } + } +} + +#[cfg(not(feature = "min_const_gen"))] +macro_rules! array_impl { + // recursive, given at least one type parameter: + {$n:expr, $t:ident, $($ts:ident,)*} => { + array_impl!{($n - 1), $($ts,)*} + + impl Distribution<[T; $n]> for Standard where Standard: Distribution { + #[inline] + fn sample(&self, _rng: &mut R) -> [T; $n] { + [_rng.gen::<$t>(), $(_rng.gen::<$ts>()),*] + } + } + }; + // empty case: + {$n:expr,} => { + impl Distribution<[T; $n]> for Standard { + fn sample(&self, _rng: &mut R) -> [T; $n] { [] } + } + }; +} + +#[cfg(not(feature = "min_const_gen"))] +array_impl! {32, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T,} + +impl Distribution> for Standard +where Standard: Distribution +{ + #[inline] + fn sample(&self, rng: &mut R) -> Option { + // UFCS is needed here: https://github.com/rust-lang/rust/issues/24066 + if rng.gen::() { + Some(rng.gen()) + } else { + None + } + } +} + +impl Distribution> for Standard +where Standard: Distribution +{ + #[inline] + fn sample(&self, rng: &mut R) -> Wrapping { + Wrapping(rng.gen()) + } +} + + +#[cfg(test)] +mod tests { + use super::*; + use crate::RngCore; + #[cfg(feature = "alloc")] use alloc::string::String; + + #[test] + fn test_misc() { + let rng: &mut dyn RngCore = &mut crate::test::rng(820); + + rng.sample::(Standard); + rng.sample::(Standard); + } + + #[cfg(feature = "alloc")] + #[test] + fn test_chars() { + use core::iter; + let mut rng = crate::test::rng(805); + + // Test by generating a relatively large number of chars, so we also + // take the rejection sampling path. + let word: String = iter::repeat(()) + .map(|()| rng.gen::()) + .take(1000) + .collect(); + assert!(!word.is_empty()); + } + + #[test] + fn test_alphanumeric() { + let mut rng = crate::test::rng(806); + + // Test by generating a relatively large number of chars, so we also + // take the rejection sampling path. + let mut incorrect = false; + for _ in 0..100 { + let c: char = rng.sample(Alphanumeric).into(); + incorrect |= !(('0'..='9').contains(&c) || + ('A'..='Z').contains(&c) || + ('a'..='z').contains(&c) ); + } + assert!(!incorrect); + } + + #[test] + fn value_stability() { + fn test_samples>( + distr: &D, zero: T, expected: &[T], + ) { + let mut rng = crate::test::rng(807); + let mut buf = [zero; 5]; + for x in &mut buf { + *x = rng.sample(&distr); + } + assert_eq!(&buf, expected); + } + + test_samples(&Standard, 'a', &[ + '\u{8cdac}', + '\u{a346a}', + '\u{80120}', + '\u{ed692}', + '\u{35888}', + ]); + test_samples(&Alphanumeric, 0, &[104, 109, 101, 51, 77]); + test_samples(&Standard, false, &[true, true, false, true, false]); + test_samples(&Standard, None as Option, &[ + Some(true), + None, + Some(false), + None, + Some(false), + ]); + test_samples(&Standard, Wrapping(0i32), &[ + Wrapping(-2074640887), + Wrapping(-1719949321), + Wrapping(2018088303), + Wrapping(-547181756), + Wrapping(838957336), + ]); + + // We test only sub-sets of tuple and array impls + test_samples(&Standard, (), &[(), (), (), (), ()]); + test_samples(&Standard, (false,), &[ + (true,), + (true,), + (false,), + (true,), + (false,), + ]); + test_samples(&Standard, (false, false), &[ + (true, true), + (false, true), + (false, false), + (true, false), + (false, false), + ]); + + test_samples(&Standard, [0u8; 0], &[[], [], [], [], []]); + test_samples(&Standard, [0u8; 3], &[ + [9, 247, 111], + [68, 24, 13], + [174, 19, 194], + [172, 69, 213], + [149, 207, 29], + ]); + } +} diff --git a/src/rust/vendor/rand/src/distributions/slice.rs b/src/rust/vendor/rand/src/distributions/slice.rs new file mode 100644 index 000000000..3302deb2a --- /dev/null +++ b/src/rust/vendor/rand/src/distributions/slice.rs @@ -0,0 +1,117 @@ +// Copyright 2021 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use crate::distributions::{Distribution, Uniform}; + +/// A distribution to sample items uniformly from a slice. +/// +/// [`Slice::new`] constructs a distribution referencing a slice and uniformly +/// samples references from the items in the slice. It may do extra work up +/// front to make sampling of multiple values faster; if only one sample from +/// the slice is required, [`SliceRandom::choose`] can be more efficient. +/// +/// Steps are taken to avoid bias which might be present in naive +/// implementations; for example `slice[rng.gen() % slice.len()]` samples from +/// the slice, but may be more likely to select numbers in the low range than +/// other values. +/// +/// This distribution samples with replacement; each sample is independent. +/// Sampling without replacement requires state to be retained, and therefore +/// cannot be handled by a distribution; you should instead consider methods +/// on [`SliceRandom`], such as [`SliceRandom::choose_multiple`]. +/// +/// # Example +/// +/// ``` +/// use rand::Rng; +/// use rand::distributions::Slice; +/// +/// let vowels = ['a', 'e', 'i', 'o', 'u']; +/// let vowels_dist = Slice::new(&vowels).unwrap(); +/// let rng = rand::thread_rng(); +/// +/// // build a string of 10 vowels +/// let vowel_string: String = rng +/// .sample_iter(&vowels_dist) +/// .take(10) +/// .collect(); +/// +/// println!("{}", vowel_string); +/// assert_eq!(vowel_string.len(), 10); +/// assert!(vowel_string.chars().all(|c| vowels.contains(&c))); +/// ``` +/// +/// For a single sample, [`SliceRandom::choose`][crate::seq::SliceRandom::choose] +/// may be preferred: +/// +/// ``` +/// use rand::seq::SliceRandom; +/// +/// let vowels = ['a', 'e', 'i', 'o', 'u']; +/// let mut rng = rand::thread_rng(); +/// +/// println!("{}", vowels.choose(&mut rng).unwrap()) +/// ``` +/// +/// [`SliceRandom`]: crate::seq::SliceRandom +/// [`SliceRandom::choose`]: crate::seq::SliceRandom::choose +/// [`SliceRandom::choose_multiple`]: crate::seq::SliceRandom::choose_multiple +#[derive(Debug, Clone, Copy)] +pub struct Slice<'a, T> { + slice: &'a [T], + range: Uniform, +} + +impl<'a, T> Slice<'a, T> { + /// Create a new `Slice` instance which samples uniformly from the slice. + /// Returns `Err` if the slice is empty. + pub fn new(slice: &'a [T]) -> Result { + match slice.len() { + 0 => Err(EmptySlice), + len => Ok(Self { + slice, + range: Uniform::new(0, len), + }), + } + } +} + +impl<'a, T> Distribution<&'a T> for Slice<'a, T> { + fn sample(&self, rng: &mut R) -> &'a T { + let idx = self.range.sample(rng); + + debug_assert!( + idx < self.slice.len(), + "Uniform::new(0, {}) somehow returned {}", + self.slice.len(), + idx + ); + + // Safety: at construction time, it was ensured that the slice was + // non-empty, and that the `Uniform` range produces values in range + // for the slice + unsafe { self.slice.get_unchecked(idx) } + } +} + +/// Error type indicating that a [`Slice`] distribution was improperly +/// constructed with an empty slice. +#[derive(Debug, Clone, Copy)] +pub struct EmptySlice; + +impl core::fmt::Display for EmptySlice { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!( + f, + "Tried to create a `distributions::Slice` with an empty slice" + ) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for EmptySlice {} diff --git a/src/rust/vendor/rand/src/distributions/uniform.rs b/src/rust/vendor/rand/src/distributions/uniform.rs new file mode 100644 index 000000000..261357b24 --- /dev/null +++ b/src/rust/vendor/rand/src/distributions/uniform.rs @@ -0,0 +1,1658 @@ +// Copyright 2018-2020 Developers of the Rand project. +// Copyright 2017 The Rust Project Developers. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A distribution uniformly sampling numbers within a given range. +//! +//! [`Uniform`] is the standard distribution to sample uniformly from a range; +//! e.g. `Uniform::new_inclusive(1, 6)` can sample integers from 1 to 6, like a +//! standard die. [`Rng::gen_range`] supports any type supported by +//! [`Uniform`]. +//! +//! This distribution is provided with support for several primitive types +//! (all integer and floating-point types) as well as [`std::time::Duration`], +//! and supports extension to user-defined types via a type-specific *back-end* +//! implementation. +//! +//! The types [`UniformInt`], [`UniformFloat`] and [`UniformDuration`] are the +//! back-ends supporting sampling from primitive integer and floating-point +//! ranges as well as from [`std::time::Duration`]; these types do not normally +//! need to be used directly (unless implementing a derived back-end). +//! +//! # Example usage +//! +//! ``` +//! use rand::{Rng, thread_rng}; +//! use rand::distributions::Uniform; +//! +//! let mut rng = thread_rng(); +//! let side = Uniform::new(-10.0, 10.0); +//! +//! // sample between 1 and 10 points +//! for _ in 0..rng.gen_range(1..=10) { +//! // sample a point from the square with sides -10 - 10 in two dimensions +//! let (x, y) = (rng.sample(side), rng.sample(side)); +//! println!("Point: {}, {}", x, y); +//! } +//! ``` +//! +//! # Extending `Uniform` to support a custom type +//! +//! To extend [`Uniform`] to support your own types, write a back-end which +//! implements the [`UniformSampler`] trait, then implement the [`SampleUniform`] +//! helper trait to "register" your back-end. See the `MyF32` example below. +//! +//! At a minimum, the back-end needs to store any parameters needed for sampling +//! (e.g. the target range) and implement `new`, `new_inclusive` and `sample`. +//! Those methods should include an assert to check the range is valid (i.e. +//! `low < high`). The example below merely wraps another back-end. +//! +//! The `new`, `new_inclusive` and `sample_single` functions use arguments of +//! type SampleBorrow in order to support passing in values by reference or +//! by value. In the implementation of these functions, you can choose to +//! simply use the reference returned by [`SampleBorrow::borrow`], or you can choose +//! to copy or clone the value, whatever is appropriate for your type. +//! +//! ``` +//! use rand::prelude::*; +//! use rand::distributions::uniform::{Uniform, SampleUniform, +//! UniformSampler, UniformFloat, SampleBorrow}; +//! +//! struct MyF32(f32); +//! +//! #[derive(Clone, Copy, Debug)] +//! struct UniformMyF32(UniformFloat); +//! +//! impl UniformSampler for UniformMyF32 { +//! type X = MyF32; +//! fn new(low: B1, high: B2) -> Self +//! where B1: SampleBorrow + Sized, +//! B2: SampleBorrow + Sized +//! { +//! UniformMyF32(UniformFloat::::new(low.borrow().0, high.borrow().0)) +//! } +//! fn new_inclusive(low: B1, high: B2) -> Self +//! where B1: SampleBorrow + Sized, +//! B2: SampleBorrow + Sized +//! { +//! UniformMyF32(UniformFloat::::new_inclusive( +//! low.borrow().0, +//! high.borrow().0, +//! )) +//! } +//! fn sample(&self, rng: &mut R) -> Self::X { +//! MyF32(self.0.sample(rng)) +//! } +//! } +//! +//! impl SampleUniform for MyF32 { +//! type Sampler = UniformMyF32; +//! } +//! +//! let (low, high) = (MyF32(17.0f32), MyF32(22.0f32)); +//! let uniform = Uniform::new(low, high); +//! let x = uniform.sample(&mut thread_rng()); +//! ``` +//! +//! [`SampleUniform`]: crate::distributions::uniform::SampleUniform +//! [`UniformSampler`]: crate::distributions::uniform::UniformSampler +//! [`UniformInt`]: crate::distributions::uniform::UniformInt +//! [`UniformFloat`]: crate::distributions::uniform::UniformFloat +//! [`UniformDuration`]: crate::distributions::uniform::UniformDuration +//! [`SampleBorrow::borrow`]: crate::distributions::uniform::SampleBorrow::borrow + +use core::time::Duration; +use core::ops::{Range, RangeInclusive}; + +use crate::distributions::float::IntoFloat; +use crate::distributions::utils::{BoolAsSIMD, FloatAsSIMD, FloatSIMDUtils, WideningMultiply}; +use crate::distributions::Distribution; +use crate::{Rng, RngCore}; + +#[cfg(not(feature = "std"))] +#[allow(unused_imports)] // rustc doesn't detect that this is actually used +use crate::distributions::utils::Float; + +#[cfg(feature = "simd_support")] use packed_simd::*; + +#[cfg(feature = "serde1")] +use serde::{Serialize, Deserialize}; + +/// Sample values uniformly between two bounds. +/// +/// [`Uniform::new`] and [`Uniform::new_inclusive`] construct a uniform +/// distribution sampling from the given range; these functions may do extra +/// work up front to make sampling of multiple values faster. If only one sample +/// from the range is required, [`Rng::gen_range`] can be more efficient. +/// +/// When sampling from a constant range, many calculations can happen at +/// compile-time and all methods should be fast; for floating-point ranges and +/// the full range of integer types this should have comparable performance to +/// the `Standard` distribution. +/// +/// Steps are taken to avoid bias which might be present in naive +/// implementations; for example `rng.gen::() % 170` samples from the range +/// `[0, 169]` but is twice as likely to select numbers less than 85 than other +/// values. Further, the implementations here give more weight to the high-bits +/// generated by the RNG than the low bits, since with some RNGs the low-bits +/// are of lower quality than the high bits. +/// +/// Implementations must sample in `[low, high)` range for +/// `Uniform::new(low, high)`, i.e., excluding `high`. In particular, care must +/// be taken to ensure that rounding never results values `< low` or `>= high`. +/// +/// # Example +/// +/// ``` +/// use rand::distributions::{Distribution, Uniform}; +/// +/// let between = Uniform::from(10..10000); +/// let mut rng = rand::thread_rng(); +/// let mut sum = 0; +/// for _ in 0..1000 { +/// sum += between.sample(&mut rng); +/// } +/// println!("{}", sum); +/// ``` +/// +/// For a single sample, [`Rng::gen_range`] may be preferred: +/// +/// ``` +/// use rand::Rng; +/// +/// let mut rng = rand::thread_rng(); +/// println!("{}", rng.gen_range(0..10)); +/// ``` +/// +/// [`new`]: Uniform::new +/// [`new_inclusive`]: Uniform::new_inclusive +/// [`Rng::gen_range`]: Rng::gen_range +#[derive(Clone, Copy, Debug, PartialEq)] +#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde1", serde(bound(serialize = "X::Sampler: Serialize")))] +#[cfg_attr(feature = "serde1", serde(bound(deserialize = "X::Sampler: Deserialize<'de>")))] +pub struct Uniform(X::Sampler); + +impl Uniform { + /// Create a new `Uniform` instance which samples uniformly from the half + /// open range `[low, high)` (excluding `high`). Panics if `low >= high`. + pub fn new(low: B1, high: B2) -> Uniform + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + Uniform(X::Sampler::new(low, high)) + } + + /// Create a new `Uniform` instance which samples uniformly from the closed + /// range `[low, high]` (inclusive). Panics if `low > high`. + pub fn new_inclusive(low: B1, high: B2) -> Uniform + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + Uniform(X::Sampler::new_inclusive(low, high)) + } +} + +impl Distribution for Uniform { + fn sample(&self, rng: &mut R) -> X { + self.0.sample(rng) + } +} + +/// Helper trait for creating objects using the correct implementation of +/// [`UniformSampler`] for the sampling type. +/// +/// See the [module documentation] on how to implement [`Uniform`] range +/// sampling for a custom type. +/// +/// [module documentation]: crate::distributions::uniform +pub trait SampleUniform: Sized { + /// The `UniformSampler` implementation supporting type `X`. + type Sampler: UniformSampler; +} + +/// Helper trait handling actual uniform sampling. +/// +/// See the [module documentation] on how to implement [`Uniform`] range +/// sampling for a custom type. +/// +/// Implementation of [`sample_single`] is optional, and is only useful when +/// the implementation can be faster than `Self::new(low, high).sample(rng)`. +/// +/// [module documentation]: crate::distributions::uniform +/// [`sample_single`]: UniformSampler::sample_single +pub trait UniformSampler: Sized { + /// The type sampled by this implementation. + type X; + + /// Construct self, with inclusive lower bound and exclusive upper bound + /// `[low, high)`. + /// + /// Usually users should not call this directly but instead use + /// `Uniform::new`, which asserts that `low < high` before calling this. + fn new(low: B1, high: B2) -> Self + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized; + + /// Construct self, with inclusive bounds `[low, high]`. + /// + /// Usually users should not call this directly but instead use + /// `Uniform::new_inclusive`, which asserts that `low <= high` before + /// calling this. + fn new_inclusive(low: B1, high: B2) -> Self + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized; + + /// Sample a value. + fn sample(&self, rng: &mut R) -> Self::X; + + /// Sample a single value uniformly from a range with inclusive lower bound + /// and exclusive upper bound `[low, high)`. + /// + /// By default this is implemented using + /// `UniformSampler::new(low, high).sample(rng)`. However, for some types + /// more optimal implementations for single usage may be provided via this + /// method (which is the case for integers and floats). + /// Results may not be identical. + /// + /// Note that to use this method in a generic context, the type needs to be + /// retrieved via `SampleUniform::Sampler` as follows: + /// ``` + /// use rand::{thread_rng, distributions::uniform::{SampleUniform, UniformSampler}}; + /// # #[allow(unused)] + /// fn sample_from_range(lb: T, ub: T) -> T { + /// let mut rng = thread_rng(); + /// ::Sampler::sample_single(lb, ub, &mut rng) + /// } + /// ``` + fn sample_single(low: B1, high: B2, rng: &mut R) -> Self::X + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + let uniform: Self = UniformSampler::new(low, high); + uniform.sample(rng) + } + + /// Sample a single value uniformly from a range with inclusive lower bound + /// and inclusive upper bound `[low, high]`. + /// + /// By default this is implemented using + /// `UniformSampler::new_inclusive(low, high).sample(rng)`. However, for + /// some types more optimal implementations for single usage may be provided + /// via this method. + /// Results may not be identical. + fn sample_single_inclusive(low: B1, high: B2, rng: &mut R) + -> Self::X + where B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized + { + let uniform: Self = UniformSampler::new_inclusive(low, high); + uniform.sample(rng) + } +} + +impl From> for Uniform { + fn from(r: ::core::ops::Range) -> Uniform { + Uniform::new(r.start, r.end) + } +} + +impl From> for Uniform { + fn from(r: ::core::ops::RangeInclusive) -> Uniform { + Uniform::new_inclusive(r.start(), r.end()) + } +} + + +/// Helper trait similar to [`Borrow`] but implemented +/// only for SampleUniform and references to SampleUniform in +/// order to resolve ambiguity issues. +/// +/// [`Borrow`]: std::borrow::Borrow +pub trait SampleBorrow { + /// Immutably borrows from an owned value. See [`Borrow::borrow`] + /// + /// [`Borrow::borrow`]: std::borrow::Borrow::borrow + fn borrow(&self) -> &Borrowed; +} +impl SampleBorrow for Borrowed +where Borrowed: SampleUniform +{ + #[inline(always)] + fn borrow(&self) -> &Borrowed { + self + } +} +impl<'a, Borrowed> SampleBorrow for &'a Borrowed +where Borrowed: SampleUniform +{ + #[inline(always)] + fn borrow(&self) -> &Borrowed { + *self + } +} + +/// Range that supports generating a single sample efficiently. +/// +/// Any type implementing this trait can be used to specify the sampled range +/// for `Rng::gen_range`. +pub trait SampleRange { + /// Generate a sample from the given range. + fn sample_single(self, rng: &mut R) -> T; + + /// Check whether the range is empty. + fn is_empty(&self) -> bool; +} + +impl SampleRange for Range { + #[inline] + fn sample_single(self, rng: &mut R) -> T { + T::Sampler::sample_single(self.start, self.end, rng) + } + + #[inline] + fn is_empty(&self) -> bool { + !(self.start < self.end) + } +} + +impl SampleRange for RangeInclusive { + #[inline] + fn sample_single(self, rng: &mut R) -> T { + T::Sampler::sample_single_inclusive(self.start(), self.end(), rng) + } + + #[inline] + fn is_empty(&self) -> bool { + !(self.start() <= self.end()) + } +} + + +//////////////////////////////////////////////////////////////////////////////// + +// What follows are all back-ends. + + +/// The back-end implementing [`UniformSampler`] for integer types. +/// +/// Unless you are implementing [`UniformSampler`] for your own type, this type +/// should not be used directly, use [`Uniform`] instead. +/// +/// # Implementation notes +/// +/// For simplicity, we use the same generic struct `UniformInt` for all +/// integer types `X`. This gives us only one field type, `X`; to store unsigned +/// values of this size, we take use the fact that these conversions are no-ops. +/// +/// For a closed range, the number of possible numbers we should generate is +/// `range = (high - low + 1)`. To avoid bias, we must ensure that the size of +/// our sample space, `zone`, is a multiple of `range`; other values must be +/// rejected (by replacing with a new random sample). +/// +/// As a special case, we use `range = 0` to represent the full range of the +/// result type (i.e. for `new_inclusive($ty::MIN, $ty::MAX)`). +/// +/// The optimum `zone` is the largest product of `range` which fits in our +/// (unsigned) target type. We calculate this by calculating how many numbers we +/// must reject: `reject = (MAX + 1) % range = (MAX - range + 1) % range`. Any (large) +/// product of `range` will suffice, thus in `sample_single` we multiply by a +/// power of 2 via bit-shifting (faster but may cause more rejections). +/// +/// The smallest integer PRNGs generate is `u32`. For 8- and 16-bit outputs we +/// use `u32` for our `zone` and samples (because it's not slower and because +/// it reduces the chance of having to reject a sample). In this case we cannot +/// store `zone` in the target type since it is too large, however we know +/// `ints_to_reject < range <= $unsigned::MAX`. +/// +/// An alternative to using a modulus is widening multiply: After a widening +/// multiply by `range`, the result is in the high word. Then comparing the low +/// word against `zone` makes sure our distribution is uniform. +#[derive(Clone, Copy, Debug, PartialEq)] +#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +pub struct UniformInt { + low: X, + range: X, + z: X, // either ints_to_reject or zone depending on implementation +} + +macro_rules! uniform_int_impl { + ($ty:ty, $unsigned:ident, $u_large:ident) => { + impl SampleUniform for $ty { + type Sampler = UniformInt<$ty>; + } + + impl UniformSampler for UniformInt<$ty> { + // We play free and fast with unsigned vs signed here + // (when $ty is signed), but that's fine, since the + // contract of this macro is for $ty and $unsigned to be + // "bit-equal", so casting between them is a no-op. + + type X = $ty; + + #[inline] // if the range is constant, this helps LLVM to do the + // calculations at compile-time. + fn new(low_b: B1, high_b: B2) -> Self + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + let low = *low_b.borrow(); + let high = *high_b.borrow(); + assert!(low < high, "Uniform::new called with `low >= high`"); + UniformSampler::new_inclusive(low, high - 1) + } + + #[inline] // if the range is constant, this helps LLVM to do the + // calculations at compile-time. + fn new_inclusive(low_b: B1, high_b: B2) -> Self + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + let low = *low_b.borrow(); + let high = *high_b.borrow(); + assert!( + low <= high, + "Uniform::new_inclusive called with `low > high`" + ); + let unsigned_max = ::core::$u_large::MAX; + + let range = high.wrapping_sub(low).wrapping_add(1) as $unsigned; + let ints_to_reject = if range > 0 { + let range = $u_large::from(range); + (unsigned_max - range + 1) % range + } else { + 0 + }; + + UniformInt { + low, + // These are really $unsigned values, but store as $ty: + range: range as $ty, + z: ints_to_reject as $unsigned as $ty, + } + } + + #[inline] + fn sample(&self, rng: &mut R) -> Self::X { + let range = self.range as $unsigned as $u_large; + if range > 0 { + let unsigned_max = ::core::$u_large::MAX; + let zone = unsigned_max - (self.z as $unsigned as $u_large); + loop { + let v: $u_large = rng.gen(); + let (hi, lo) = v.wmul(range); + if lo <= zone { + return self.low.wrapping_add(hi as $ty); + } + } + } else { + // Sample from the entire integer range. + rng.gen() + } + } + + #[inline] + fn sample_single(low_b: B1, high_b: B2, rng: &mut R) -> Self::X + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + let low = *low_b.borrow(); + let high = *high_b.borrow(); + assert!(low < high, "UniformSampler::sample_single: low >= high"); + Self::sample_single_inclusive(low, high - 1, rng) + } + + #[inline] + fn sample_single_inclusive(low_b: B1, high_b: B2, rng: &mut R) -> Self::X + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + let low = *low_b.borrow(); + let high = *high_b.borrow(); + assert!(low <= high, "UniformSampler::sample_single_inclusive: low > high"); + let range = high.wrapping_sub(low).wrapping_add(1) as $unsigned as $u_large; + // If the above resulted in wrap-around to 0, the range is $ty::MIN..=$ty::MAX, + // and any integer will do. + if range == 0 { + return rng.gen(); + } + + let zone = if ::core::$unsigned::MAX <= ::core::u16::MAX as $unsigned { + // Using a modulus is faster than the approximation for + // i8 and i16. I suppose we trade the cost of one + // modulus for near-perfect branch prediction. + let unsigned_max: $u_large = ::core::$u_large::MAX; + let ints_to_reject = (unsigned_max - range + 1) % range; + unsigned_max - ints_to_reject + } else { + // conservative but fast approximation. `- 1` is necessary to allow the + // same comparison without bias. + (range << range.leading_zeros()).wrapping_sub(1) + }; + + loop { + let v: $u_large = rng.gen(); + let (hi, lo) = v.wmul(range); + if lo <= zone { + return low.wrapping_add(hi as $ty); + } + } + } + } + }; +} + +uniform_int_impl! { i8, u8, u32 } +uniform_int_impl! { i16, u16, u32 } +uniform_int_impl! { i32, u32, u32 } +uniform_int_impl! { i64, u64, u64 } +uniform_int_impl! { i128, u128, u128 } +uniform_int_impl! { isize, usize, usize } +uniform_int_impl! { u8, u8, u32 } +uniform_int_impl! { u16, u16, u32 } +uniform_int_impl! { u32, u32, u32 } +uniform_int_impl! { u64, u64, u64 } +uniform_int_impl! { usize, usize, usize } +uniform_int_impl! { u128, u128, u128 } + +#[cfg(feature = "simd_support")] +macro_rules! uniform_simd_int_impl { + ($ty:ident, $unsigned:ident, $u_scalar:ident) => { + // The "pick the largest zone that can fit in an `u32`" optimization + // is less useful here. Multiple lanes complicate things, we don't + // know the PRNG's minimal output size, and casting to a larger vector + // is generally a bad idea for SIMD performance. The user can still + // implement it manually. + + // TODO: look into `Uniform::::new(0u32, 100)` functionality + // perhaps `impl SampleUniform for $u_scalar`? + impl SampleUniform for $ty { + type Sampler = UniformInt<$ty>; + } + + impl UniformSampler for UniformInt<$ty> { + type X = $ty; + + #[inline] // if the range is constant, this helps LLVM to do the + // calculations at compile-time. + fn new(low_b: B1, high_b: B2) -> Self + where B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized + { + let low = *low_b.borrow(); + let high = *high_b.borrow(); + assert!(low.lt(high).all(), "Uniform::new called with `low >= high`"); + UniformSampler::new_inclusive(low, high - 1) + } + + #[inline] // if the range is constant, this helps LLVM to do the + // calculations at compile-time. + fn new_inclusive(low_b: B1, high_b: B2) -> Self + where B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized + { + let low = *low_b.borrow(); + let high = *high_b.borrow(); + assert!(low.le(high).all(), + "Uniform::new_inclusive called with `low > high`"); + let unsigned_max = ::core::$u_scalar::MAX; + + // NOTE: these may need to be replaced with explicitly + // wrapping operations if `packed_simd` changes + let range: $unsigned = ((high - low) + 1).cast(); + // `% 0` will panic at runtime. + let not_full_range = range.gt($unsigned::splat(0)); + // replacing 0 with `unsigned_max` allows a faster `select` + // with bitwise OR + let modulo = not_full_range.select(range, $unsigned::splat(unsigned_max)); + // wrapping addition + let ints_to_reject = (unsigned_max - range + 1) % modulo; + // When `range` is 0, `lo` of `v.wmul(range)` will always be + // zero which means only one sample is needed. + let zone = unsigned_max - ints_to_reject; + + UniformInt { + low, + // These are really $unsigned values, but store as $ty: + range: range.cast(), + z: zone.cast(), + } + } + + fn sample(&self, rng: &mut R) -> Self::X { + let range: $unsigned = self.range.cast(); + let zone: $unsigned = self.z.cast(); + + // This might seem very slow, generating a whole new + // SIMD vector for every sample rejection. For most uses + // though, the chance of rejection is small and provides good + // general performance. With multiple lanes, that chance is + // multiplied. To mitigate this, we replace only the lanes of + // the vector which fail, iteratively reducing the chance of + // rejection. The replacement method does however add a little + // overhead. Benchmarking or calculating probabilities might + // reveal contexts where this replacement method is slower. + let mut v: $unsigned = rng.gen(); + loop { + let (hi, lo) = v.wmul(range); + let mask = lo.le(zone); + if mask.all() { + let hi: $ty = hi.cast(); + // wrapping addition + let result = self.low + hi; + // `select` here compiles to a blend operation + // When `range.eq(0).none()` the compare and blend + // operations are avoided. + let v: $ty = v.cast(); + return range.gt($unsigned::splat(0)).select(result, v); + } + // Replace only the failing lanes + v = mask.select(v, rng.gen()); + } + } + } + }; + + // bulk implementation + ($(($unsigned:ident, $signed:ident),)+ $u_scalar:ident) => { + $( + uniform_simd_int_impl!($unsigned, $unsigned, $u_scalar); + uniform_simd_int_impl!($signed, $unsigned, $u_scalar); + )+ + }; +} + +#[cfg(feature = "simd_support")] +uniform_simd_int_impl! { + (u64x2, i64x2), + (u64x4, i64x4), + (u64x8, i64x8), + u64 +} + +#[cfg(feature = "simd_support")] +uniform_simd_int_impl! { + (u32x2, i32x2), + (u32x4, i32x4), + (u32x8, i32x8), + (u32x16, i32x16), + u32 +} + +#[cfg(feature = "simd_support")] +uniform_simd_int_impl! { + (u16x2, i16x2), + (u16x4, i16x4), + (u16x8, i16x8), + (u16x16, i16x16), + (u16x32, i16x32), + u16 +} + +#[cfg(feature = "simd_support")] +uniform_simd_int_impl! { + (u8x2, i8x2), + (u8x4, i8x4), + (u8x8, i8x8), + (u8x16, i8x16), + (u8x32, i8x32), + (u8x64, i8x64), + u8 +} + +impl SampleUniform for char { + type Sampler = UniformChar; +} + +/// The back-end implementing [`UniformSampler`] for `char`. +/// +/// Unless you are implementing [`UniformSampler`] for your own type, this type +/// should not be used directly, use [`Uniform`] instead. +/// +/// This differs from integer range sampling since the range `0xD800..=0xDFFF` +/// are used for surrogate pairs in UCS and UTF-16, and consequently are not +/// valid Unicode code points. We must therefore avoid sampling values in this +/// range. +#[derive(Clone, Copy, Debug)] +#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +pub struct UniformChar { + sampler: UniformInt, +} + +/// UTF-16 surrogate range start +const CHAR_SURROGATE_START: u32 = 0xD800; +/// UTF-16 surrogate range size +const CHAR_SURROGATE_LEN: u32 = 0xE000 - CHAR_SURROGATE_START; + +/// Convert `char` to compressed `u32` +fn char_to_comp_u32(c: char) -> u32 { + match c as u32 { + c if c >= CHAR_SURROGATE_START => c - CHAR_SURROGATE_LEN, + c => c, + } +} + +impl UniformSampler for UniformChar { + type X = char; + + #[inline] // if the range is constant, this helps LLVM to do the + // calculations at compile-time. + fn new(low_b: B1, high_b: B2) -> Self + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + let low = char_to_comp_u32(*low_b.borrow()); + let high = char_to_comp_u32(*high_b.borrow()); + let sampler = UniformInt::::new(low, high); + UniformChar { sampler } + } + + #[inline] // if the range is constant, this helps LLVM to do the + // calculations at compile-time. + fn new_inclusive(low_b: B1, high_b: B2) -> Self + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + let low = char_to_comp_u32(*low_b.borrow()); + let high = char_to_comp_u32(*high_b.borrow()); + let sampler = UniformInt::::new_inclusive(low, high); + UniformChar { sampler } + } + + fn sample(&self, rng: &mut R) -> Self::X { + let mut x = self.sampler.sample(rng); + if x >= CHAR_SURROGATE_START { + x += CHAR_SURROGATE_LEN; + } + // SAFETY: x must not be in surrogate range or greater than char::MAX. + // This relies on range constructors which accept char arguments. + // Validity of input char values is assumed. + unsafe { core::char::from_u32_unchecked(x) } + } +} + +/// The back-end implementing [`UniformSampler`] for floating-point types. +/// +/// Unless you are implementing [`UniformSampler`] for your own type, this type +/// should not be used directly, use [`Uniform`] instead. +/// +/// # Implementation notes +/// +/// Instead of generating a float in the `[0, 1)` range using [`Standard`], the +/// `UniformFloat` implementation converts the output of an PRNG itself. This +/// way one or two steps can be optimized out. +/// +/// The floats are first converted to a value in the `[1, 2)` interval using a +/// transmute-based method, and then mapped to the expected range with a +/// multiply and addition. Values produced this way have what equals 23 bits of +/// random digits for an `f32`, and 52 for an `f64`. +/// +/// [`new`]: UniformSampler::new +/// [`new_inclusive`]: UniformSampler::new_inclusive +/// [`Standard`]: crate::distributions::Standard +#[derive(Clone, Copy, Debug, PartialEq)] +#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +pub struct UniformFloat { + low: X, + scale: X, +} + +macro_rules! uniform_float_impl { + ($ty:ty, $uty:ident, $f_scalar:ident, $u_scalar:ident, $bits_to_discard:expr) => { + impl SampleUniform for $ty { + type Sampler = UniformFloat<$ty>; + } + + impl UniformSampler for UniformFloat<$ty> { + type X = $ty; + + fn new(low_b: B1, high_b: B2) -> Self + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + let low = *low_b.borrow(); + let high = *high_b.borrow(); + debug_assert!( + low.all_finite(), + "Uniform::new called with `low` non-finite." + ); + debug_assert!( + high.all_finite(), + "Uniform::new called with `high` non-finite." + ); + assert!(low.all_lt(high), "Uniform::new called with `low >= high`"); + let max_rand = <$ty>::splat( + (::core::$u_scalar::MAX >> $bits_to_discard).into_float_with_exponent(0) - 1.0, + ); + + let mut scale = high - low; + assert!(scale.all_finite(), "Uniform::new: range overflow"); + + loop { + let mask = (scale * max_rand + low).ge_mask(high); + if mask.none() { + break; + } + scale = scale.decrease_masked(mask); + } + + debug_assert!(<$ty>::splat(0.0).all_le(scale)); + + UniformFloat { low, scale } + } + + fn new_inclusive(low_b: B1, high_b: B2) -> Self + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + let low = *low_b.borrow(); + let high = *high_b.borrow(); + debug_assert!( + low.all_finite(), + "Uniform::new_inclusive called with `low` non-finite." + ); + debug_assert!( + high.all_finite(), + "Uniform::new_inclusive called with `high` non-finite." + ); + assert!( + low.all_le(high), + "Uniform::new_inclusive called with `low > high`" + ); + let max_rand = <$ty>::splat( + (::core::$u_scalar::MAX >> $bits_to_discard).into_float_with_exponent(0) - 1.0, + ); + + let mut scale = (high - low) / max_rand; + assert!(scale.all_finite(), "Uniform::new_inclusive: range overflow"); + + loop { + let mask = (scale * max_rand + low).gt_mask(high); + if mask.none() { + break; + } + scale = scale.decrease_masked(mask); + } + + debug_assert!(<$ty>::splat(0.0).all_le(scale)); + + UniformFloat { low, scale } + } + + fn sample(&self, rng: &mut R) -> Self::X { + // Generate a value in the range [1, 2) + let value1_2 = (rng.gen::<$uty>() >> $bits_to_discard).into_float_with_exponent(0); + + // Get a value in the range [0, 1) in order to avoid + // overflowing into infinity when multiplying with scale + let value0_1 = value1_2 - 1.0; + + // We don't use `f64::mul_add`, because it is not available with + // `no_std`. Furthermore, it is slower for some targets (but + // faster for others). However, the order of multiplication and + // addition is important, because on some platforms (e.g. ARM) + // it will be optimized to a single (non-FMA) instruction. + value0_1 * self.scale + self.low + } + + #[inline] + fn sample_single(low_b: B1, high_b: B2, rng: &mut R) -> Self::X + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + let low = *low_b.borrow(); + let high = *high_b.borrow(); + debug_assert!( + low.all_finite(), + "UniformSampler::sample_single called with `low` non-finite." + ); + debug_assert!( + high.all_finite(), + "UniformSampler::sample_single called with `high` non-finite." + ); + assert!( + low.all_lt(high), + "UniformSampler::sample_single: low >= high" + ); + let mut scale = high - low; + assert!(scale.all_finite(), "UniformSampler::sample_single: range overflow"); + + loop { + // Generate a value in the range [1, 2) + let value1_2 = + (rng.gen::<$uty>() >> $bits_to_discard).into_float_with_exponent(0); + + // Get a value in the range [0, 1) in order to avoid + // overflowing into infinity when multiplying with scale + let value0_1 = value1_2 - 1.0; + + // Doing multiply before addition allows some architectures + // to use a single instruction. + let res = value0_1 * scale + low; + + debug_assert!(low.all_le(res) || !scale.all_finite()); + if res.all_lt(high) { + return res; + } + + // This handles a number of edge cases. + // * `low` or `high` is NaN. In this case `scale` and + // `res` are going to end up as NaN. + // * `low` is negative infinity and `high` is finite. + // `scale` is going to be infinite and `res` will be + // NaN. + // * `high` is positive infinity and `low` is finite. + // `scale` is going to be infinite and `res` will + // be infinite or NaN (if value0_1 is 0). + // * `low` is negative infinity and `high` is positive + // infinity. `scale` will be infinite and `res` will + // be NaN. + // * `low` and `high` are finite, but `high - low` + // overflows to infinite. `scale` will be infinite + // and `res` will be infinite or NaN (if value0_1 is 0). + // So if `high` or `low` are non-finite, we are guaranteed + // to fail the `res < high` check above and end up here. + // + // While we technically should check for non-finite `low` + // and `high` before entering the loop, by doing the checks + // here instead, we allow the common case to avoid these + // checks. But we are still guaranteed that if `low` or + // `high` are non-finite we'll end up here and can do the + // appropriate checks. + // + // Likewise `high - low` overflowing to infinity is also + // rare, so handle it here after the common case. + let mask = !scale.finite_mask(); + if mask.any() { + assert!( + low.all_finite() && high.all_finite(), + "Uniform::sample_single: low and high must be finite" + ); + scale = scale.decrease_masked(mask); + } + } + } + } + }; +} + +uniform_float_impl! { f32, u32, f32, u32, 32 - 23 } +uniform_float_impl! { f64, u64, f64, u64, 64 - 52 } + +#[cfg(feature = "simd_support")] +uniform_float_impl! { f32x2, u32x2, f32, u32, 32 - 23 } +#[cfg(feature = "simd_support")] +uniform_float_impl! { f32x4, u32x4, f32, u32, 32 - 23 } +#[cfg(feature = "simd_support")] +uniform_float_impl! { f32x8, u32x8, f32, u32, 32 - 23 } +#[cfg(feature = "simd_support")] +uniform_float_impl! { f32x16, u32x16, f32, u32, 32 - 23 } + +#[cfg(feature = "simd_support")] +uniform_float_impl! { f64x2, u64x2, f64, u64, 64 - 52 } +#[cfg(feature = "simd_support")] +uniform_float_impl! { f64x4, u64x4, f64, u64, 64 - 52 } +#[cfg(feature = "simd_support")] +uniform_float_impl! { f64x8, u64x8, f64, u64, 64 - 52 } + + +/// The back-end implementing [`UniformSampler`] for `Duration`. +/// +/// Unless you are implementing [`UniformSampler`] for your own types, this type +/// should not be used directly, use [`Uniform`] instead. +#[derive(Clone, Copy, Debug)] +#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +pub struct UniformDuration { + mode: UniformDurationMode, + offset: u32, +} + +#[derive(Debug, Copy, Clone)] +#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +enum UniformDurationMode { + Small { + secs: u64, + nanos: Uniform, + }, + Medium { + nanos: Uniform, + }, + Large { + max_secs: u64, + max_nanos: u32, + secs: Uniform, + }, +} + +impl SampleUniform for Duration { + type Sampler = UniformDuration; +} + +impl UniformSampler for UniformDuration { + type X = Duration; + + #[inline] + fn new(low_b: B1, high_b: B2) -> Self + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + let low = *low_b.borrow(); + let high = *high_b.borrow(); + assert!(low < high, "Uniform::new called with `low >= high`"); + UniformDuration::new_inclusive(low, high - Duration::new(0, 1)) + } + + #[inline] + fn new_inclusive(low_b: B1, high_b: B2) -> Self + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + let low = *low_b.borrow(); + let high = *high_b.borrow(); + assert!( + low <= high, + "Uniform::new_inclusive called with `low > high`" + ); + + let low_s = low.as_secs(); + let low_n = low.subsec_nanos(); + let mut high_s = high.as_secs(); + let mut high_n = high.subsec_nanos(); + + if high_n < low_n { + high_s -= 1; + high_n += 1_000_000_000; + } + + let mode = if low_s == high_s { + UniformDurationMode::Small { + secs: low_s, + nanos: Uniform::new_inclusive(low_n, high_n), + } + } else { + let max = high_s + .checked_mul(1_000_000_000) + .and_then(|n| n.checked_add(u64::from(high_n))); + + if let Some(higher_bound) = max { + let lower_bound = low_s * 1_000_000_000 + u64::from(low_n); + UniformDurationMode::Medium { + nanos: Uniform::new_inclusive(lower_bound, higher_bound), + } + } else { + // An offset is applied to simplify generation of nanoseconds + let max_nanos = high_n - low_n; + UniformDurationMode::Large { + max_secs: high_s, + max_nanos, + secs: Uniform::new_inclusive(low_s, high_s), + } + } + }; + UniformDuration { + mode, + offset: low_n, + } + } + + #[inline] + fn sample(&self, rng: &mut R) -> Duration { + match self.mode { + UniformDurationMode::Small { secs, nanos } => { + let n = nanos.sample(rng); + Duration::new(secs, n) + } + UniformDurationMode::Medium { nanos } => { + let nanos = nanos.sample(rng); + Duration::new(nanos / 1_000_000_000, (nanos % 1_000_000_000) as u32) + } + UniformDurationMode::Large { + max_secs, + max_nanos, + secs, + } => { + // constant folding means this is at least as fast as `Rng::sample(Range)` + let nano_range = Uniform::new(0, 1_000_000_000); + loop { + let s = secs.sample(rng); + let n = nano_range.sample(rng); + if !(s == max_secs && n > max_nanos) { + let sum = n + self.offset; + break Duration::new(s, sum); + } + } + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::rngs::mock::StepRng; + + #[test] + #[cfg(feature = "serde1")] + fn test_serialization_uniform_duration() { + let distr = UniformDuration::new(Duration::from_secs(10), Duration::from_secs(60)); + let de_distr: UniformDuration = bincode::deserialize(&bincode::serialize(&distr).unwrap()).unwrap(); + assert_eq!( + distr.offset, de_distr.offset + ); + match (distr.mode, de_distr.mode) { + (UniformDurationMode::Small {secs: a_secs, nanos: a_nanos}, UniformDurationMode::Small {secs, nanos}) => { + assert_eq!(a_secs, secs); + + assert_eq!(a_nanos.0.low, nanos.0.low); + assert_eq!(a_nanos.0.range, nanos.0.range); + assert_eq!(a_nanos.0.z, nanos.0.z); + } + (UniformDurationMode::Medium {nanos: a_nanos} , UniformDurationMode::Medium {nanos}) => { + assert_eq!(a_nanos.0.low, nanos.0.low); + assert_eq!(a_nanos.0.range, nanos.0.range); + assert_eq!(a_nanos.0.z, nanos.0.z); + } + (UniformDurationMode::Large {max_secs:a_max_secs, max_nanos:a_max_nanos, secs:a_secs}, UniformDurationMode::Large {max_secs, max_nanos, secs} ) => { + assert_eq!(a_max_secs, max_secs); + assert_eq!(a_max_nanos, max_nanos); + + assert_eq!(a_secs.0.low, secs.0.low); + assert_eq!(a_secs.0.range, secs.0.range); + assert_eq!(a_secs.0.z, secs.0.z); + } + _ => panic!("`UniformDurationMode` was not serialized/deserialized correctly") + } + } + + #[test] + #[cfg(feature = "serde1")] + fn test_uniform_serialization() { + let unit_box: Uniform = Uniform::new(-1, 1); + let de_unit_box: Uniform = bincode::deserialize(&bincode::serialize(&unit_box).unwrap()).unwrap(); + + assert_eq!(unit_box.0.low, de_unit_box.0.low); + assert_eq!(unit_box.0.range, de_unit_box.0.range); + assert_eq!(unit_box.0.z, de_unit_box.0.z); + + let unit_box: Uniform = Uniform::new(-1., 1.); + let de_unit_box: Uniform = bincode::deserialize(&bincode::serialize(&unit_box).unwrap()).unwrap(); + + assert_eq!(unit_box.0.low, de_unit_box.0.low); + assert_eq!(unit_box.0.scale, de_unit_box.0.scale); + } + + #[should_panic] + #[test] + fn test_uniform_bad_limits_equal_int() { + Uniform::new(10, 10); + } + + #[test] + fn test_uniform_good_limits_equal_int() { + let mut rng = crate::test::rng(804); + let dist = Uniform::new_inclusive(10, 10); + for _ in 0..20 { + assert_eq!(rng.sample(dist), 10); + } + } + + #[should_panic] + #[test] + fn test_uniform_bad_limits_flipped_int() { + Uniform::new(10, 5); + } + + #[test] + #[cfg_attr(miri, ignore)] // Miri is too slow + fn test_integers() { + use core::{i128, u128}; + use core::{i16, i32, i64, i8, isize}; + use core::{u16, u32, u64, u8, usize}; + + let mut rng = crate::test::rng(251); + macro_rules! t { + ($ty:ident, $v:expr, $le:expr, $lt:expr) => {{ + for &(low, high) in $v.iter() { + let my_uniform = Uniform::new(low, high); + for _ in 0..1000 { + let v: $ty = rng.sample(my_uniform); + assert!($le(low, v) && $lt(v, high)); + } + + let my_uniform = Uniform::new_inclusive(low, high); + for _ in 0..1000 { + let v: $ty = rng.sample(my_uniform); + assert!($le(low, v) && $le(v, high)); + } + + let my_uniform = Uniform::new(&low, high); + for _ in 0..1000 { + let v: $ty = rng.sample(my_uniform); + assert!($le(low, v) && $lt(v, high)); + } + + let my_uniform = Uniform::new_inclusive(&low, &high); + for _ in 0..1000 { + let v: $ty = rng.sample(my_uniform); + assert!($le(low, v) && $le(v, high)); + } + + for _ in 0..1000 { + let v = <$ty as SampleUniform>::Sampler::sample_single(low, high, &mut rng); + assert!($le(low, v) && $lt(v, high)); + } + + for _ in 0..1000 { + let v = <$ty as SampleUniform>::Sampler::sample_single_inclusive(low, high, &mut rng); + assert!($le(low, v) && $le(v, high)); + } + } + }}; + + // scalar bulk + ($($ty:ident),*) => {{ + $(t!( + $ty, + [(0, 10), (10, 127), ($ty::MIN, $ty::MAX)], + |x, y| x <= y, + |x, y| x < y + );)* + }}; + + // simd bulk + ($($ty:ident),* => $scalar:ident) => {{ + $(t!( + $ty, + [ + ($ty::splat(0), $ty::splat(10)), + ($ty::splat(10), $ty::splat(127)), + ($ty::splat($scalar::MIN), $ty::splat($scalar::MAX)), + ], + |x: $ty, y| x.le(y).all(), + |x: $ty, y| x.lt(y).all() + );)* + }}; + } + t!(i8, i16, i32, i64, isize, u8, u16, u32, u64, usize, i128, u128); + + #[cfg(feature = "simd_support")] + { + t!(u8x2, u8x4, u8x8, u8x16, u8x32, u8x64 => u8); + t!(i8x2, i8x4, i8x8, i8x16, i8x32, i8x64 => i8); + t!(u16x2, u16x4, u16x8, u16x16, u16x32 => u16); + t!(i16x2, i16x4, i16x8, i16x16, i16x32 => i16); + t!(u32x2, u32x4, u32x8, u32x16 => u32); + t!(i32x2, i32x4, i32x8, i32x16 => i32); + t!(u64x2, u64x4, u64x8 => u64); + t!(i64x2, i64x4, i64x8 => i64); + } + } + + #[test] + #[cfg_attr(miri, ignore)] // Miri is too slow + fn test_char() { + let mut rng = crate::test::rng(891); + let mut max = core::char::from_u32(0).unwrap(); + for _ in 0..100 { + let c = rng.gen_range('A'..='Z'); + assert!(('A'..='Z').contains(&c)); + max = max.max(c); + } + assert_eq!(max, 'Z'); + let d = Uniform::new( + core::char::from_u32(0xD7F0).unwrap(), + core::char::from_u32(0xE010).unwrap(), + ); + for _ in 0..100 { + let c = d.sample(&mut rng); + assert!((c as u32) < 0xD800 || (c as u32) > 0xDFFF); + } + } + + #[test] + #[cfg_attr(miri, ignore)] // Miri is too slow + fn test_floats() { + let mut rng = crate::test::rng(252); + let mut zero_rng = StepRng::new(0, 0); + let mut max_rng = StepRng::new(0xffff_ffff_ffff_ffff, 0); + macro_rules! t { + ($ty:ty, $f_scalar:ident, $bits_shifted:expr) => {{ + let v: &[($f_scalar, $f_scalar)] = &[ + (0.0, 100.0), + (-1e35, -1e25), + (1e-35, 1e-25), + (-1e35, 1e35), + (<$f_scalar>::from_bits(0), <$f_scalar>::from_bits(3)), + (-<$f_scalar>::from_bits(10), -<$f_scalar>::from_bits(1)), + (-<$f_scalar>::from_bits(5), 0.0), + (-<$f_scalar>::from_bits(7), -0.0), + (0.1 * ::core::$f_scalar::MAX, ::core::$f_scalar::MAX), + (-::core::$f_scalar::MAX * 0.2, ::core::$f_scalar::MAX * 0.7), + ]; + for &(low_scalar, high_scalar) in v.iter() { + for lane in 0..<$ty>::lanes() { + let low = <$ty>::splat(0.0 as $f_scalar).replace(lane, low_scalar); + let high = <$ty>::splat(1.0 as $f_scalar).replace(lane, high_scalar); + let my_uniform = Uniform::new(low, high); + let my_incl_uniform = Uniform::new_inclusive(low, high); + for _ in 0..100 { + let v = rng.sample(my_uniform).extract(lane); + assert!(low_scalar <= v && v < high_scalar); + let v = rng.sample(my_incl_uniform).extract(lane); + assert!(low_scalar <= v && v <= high_scalar); + let v = <$ty as SampleUniform>::Sampler + ::sample_single(low, high, &mut rng).extract(lane); + assert!(low_scalar <= v && v < high_scalar); + } + + assert_eq!( + rng.sample(Uniform::new_inclusive(low, low)).extract(lane), + low_scalar + ); + + assert_eq!(zero_rng.sample(my_uniform).extract(lane), low_scalar); + assert_eq!(zero_rng.sample(my_incl_uniform).extract(lane), low_scalar); + assert_eq!(<$ty as SampleUniform>::Sampler + ::sample_single(low, high, &mut zero_rng) + .extract(lane), low_scalar); + assert!(max_rng.sample(my_uniform).extract(lane) < high_scalar); + assert!(max_rng.sample(my_incl_uniform).extract(lane) <= high_scalar); + + // Don't run this test for really tiny differences between high and low + // since for those rounding might result in selecting high for a very + // long time. + if (high_scalar - low_scalar) > 0.0001 { + let mut lowering_max_rng = StepRng::new( + 0xffff_ffff_ffff_ffff, + (-1i64 << $bits_shifted) as u64, + ); + assert!( + <$ty as SampleUniform>::Sampler + ::sample_single(low, high, &mut lowering_max_rng) + .extract(lane) < high_scalar + ); + } + } + } + + assert_eq!( + rng.sample(Uniform::new_inclusive( + ::core::$f_scalar::MAX, + ::core::$f_scalar::MAX + )), + ::core::$f_scalar::MAX + ); + assert_eq!( + rng.sample(Uniform::new_inclusive( + -::core::$f_scalar::MAX, + -::core::$f_scalar::MAX + )), + -::core::$f_scalar::MAX + ); + }}; + } + + t!(f32, f32, 32 - 23); + t!(f64, f64, 64 - 52); + #[cfg(feature = "simd_support")] + { + t!(f32x2, f32, 32 - 23); + t!(f32x4, f32, 32 - 23); + t!(f32x8, f32, 32 - 23); + t!(f32x16, f32, 32 - 23); + t!(f64x2, f64, 64 - 52); + t!(f64x4, f64, 64 - 52); + t!(f64x8, f64, 64 - 52); + } + } + + #[test] + #[should_panic] + fn test_float_overflow() { + let _ = Uniform::from(::core::f64::MIN..::core::f64::MAX); + } + + #[test] + #[should_panic] + fn test_float_overflow_single() { + let mut rng = crate::test::rng(252); + rng.gen_range(::core::f64::MIN..::core::f64::MAX); + } + + #[test] + #[cfg(all( + feature = "std", + not(target_arch = "wasm32"), + not(target_arch = "asmjs") + ))] + fn test_float_assertions() { + use super::SampleUniform; + use std::panic::catch_unwind; + fn range(low: T, high: T) { + let mut rng = crate::test::rng(253); + T::Sampler::sample_single(low, high, &mut rng); + } + + macro_rules! t { + ($ty:ident, $f_scalar:ident) => {{ + let v: &[($f_scalar, $f_scalar)] = &[ + (::std::$f_scalar::NAN, 0.0), + (1.0, ::std::$f_scalar::NAN), + (::std::$f_scalar::NAN, ::std::$f_scalar::NAN), + (1.0, 0.5), + (::std::$f_scalar::MAX, -::std::$f_scalar::MAX), + (::std::$f_scalar::INFINITY, ::std::$f_scalar::INFINITY), + ( + ::std::$f_scalar::NEG_INFINITY, + ::std::$f_scalar::NEG_INFINITY, + ), + (::std::$f_scalar::NEG_INFINITY, 5.0), + (5.0, ::std::$f_scalar::INFINITY), + (::std::$f_scalar::NAN, ::std::$f_scalar::INFINITY), + (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::NAN), + (::std::$f_scalar::NEG_INFINITY, ::std::$f_scalar::INFINITY), + ]; + for &(low_scalar, high_scalar) in v.iter() { + for lane in 0..<$ty>::lanes() { + let low = <$ty>::splat(0.0 as $f_scalar).replace(lane, low_scalar); + let high = <$ty>::splat(1.0 as $f_scalar).replace(lane, high_scalar); + assert!(catch_unwind(|| range(low, high)).is_err()); + assert!(catch_unwind(|| Uniform::new(low, high)).is_err()); + assert!(catch_unwind(|| Uniform::new_inclusive(low, high)).is_err()); + assert!(catch_unwind(|| range(low, low)).is_err()); + assert!(catch_unwind(|| Uniform::new(low, low)).is_err()); + } + } + }}; + } + + t!(f32, f32); + t!(f64, f64); + #[cfg(feature = "simd_support")] + { + t!(f32x2, f32); + t!(f32x4, f32); + t!(f32x8, f32); + t!(f32x16, f32); + t!(f64x2, f64); + t!(f64x4, f64); + t!(f64x8, f64); + } + } + + + #[test] + #[cfg_attr(miri, ignore)] // Miri is too slow + fn test_durations() { + let mut rng = crate::test::rng(253); + + let v = &[ + (Duration::new(10, 50000), Duration::new(100, 1234)), + (Duration::new(0, 100), Duration::new(1, 50)), + ( + Duration::new(0, 0), + Duration::new(u64::max_value(), 999_999_999), + ), + ]; + for &(low, high) in v.iter() { + let my_uniform = Uniform::new(low, high); + for _ in 0..1000 { + let v = rng.sample(my_uniform); + assert!(low <= v && v < high); + } + } + } + + #[test] + fn test_custom_uniform() { + use crate::distributions::uniform::{ + SampleBorrow, SampleUniform, UniformFloat, UniformSampler, + }; + #[derive(Clone, Copy, PartialEq, PartialOrd)] + struct MyF32 { + x: f32, + } + #[derive(Clone, Copy, Debug)] + struct UniformMyF32(UniformFloat); + impl UniformSampler for UniformMyF32 { + type X = MyF32; + + fn new(low: B1, high: B2) -> Self + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + UniformMyF32(UniformFloat::::new(low.borrow().x, high.borrow().x)) + } + + fn new_inclusive(low: B1, high: B2) -> Self + where + B1: SampleBorrow + Sized, + B2: SampleBorrow + Sized, + { + UniformSampler::new(low, high) + } + + fn sample(&self, rng: &mut R) -> Self::X { + MyF32 { + x: self.0.sample(rng), + } + } + } + impl SampleUniform for MyF32 { + type Sampler = UniformMyF32; + } + + let (low, high) = (MyF32 { x: 17.0f32 }, MyF32 { x: 22.0f32 }); + let uniform = Uniform::new(low, high); + let mut rng = crate::test::rng(804); + for _ in 0..100 { + let x: MyF32 = rng.sample(uniform); + assert!(low <= x && x < high); + } + } + + #[test] + fn test_uniform_from_std_range() { + let r = Uniform::from(2u32..7); + assert_eq!(r.0.low, 2); + assert_eq!(r.0.range, 5); + let r = Uniform::from(2.0f64..7.0); + assert_eq!(r.0.low, 2.0); + assert_eq!(r.0.scale, 5.0); + } + + #[test] + fn test_uniform_from_std_range_inclusive() { + let r = Uniform::from(2u32..=6); + assert_eq!(r.0.low, 2); + assert_eq!(r.0.range, 5); + let r = Uniform::from(2.0f64..=7.0); + assert_eq!(r.0.low, 2.0); + assert!(r.0.scale > 5.0); + assert!(r.0.scale < 5.0 + 1e-14); + } + + #[test] + fn value_stability() { + fn test_samples( + lb: T, ub: T, expected_single: &[T], expected_multiple: &[T], + ) where Uniform: Distribution { + let mut rng = crate::test::rng(897); + let mut buf = [lb; 3]; + + for x in &mut buf { + *x = T::Sampler::sample_single(lb, ub, &mut rng); + } + assert_eq!(&buf, expected_single); + + let distr = Uniform::new(lb, ub); + for x in &mut buf { + *x = rng.sample(&distr); + } + assert_eq!(&buf, expected_multiple); + } + + // We test on a sub-set of types; possibly we should do more. + // TODO: SIMD types + + test_samples(11u8, 219, &[17, 66, 214], &[181, 93, 165]); + test_samples(11u32, 219, &[17, 66, 214], &[181, 93, 165]); + + test_samples(0f32, 1e-2f32, &[0.0003070104, 0.0026630748, 0.00979833], &[ + 0.008194133, + 0.00398172, + 0.007428536, + ]); + test_samples( + -1e10f64, + 1e10f64, + &[-4673848682.871551, 6388267422.932352, 4857075081.198343], + &[1173375212.1808167, 1917642852.109581, 2365076174.3153973], + ); + + test_samples( + Duration::new(2, 0), + Duration::new(4, 0), + &[ + Duration::new(2, 532615131), + Duration::new(3, 638826742), + Duration::new(3, 485707508), + ], + &[ + Duration::new(3, 117337521), + Duration::new(3, 191764285), + Duration::new(3, 236507617), + ], + ); + } + + #[test] + fn uniform_distributions_can_be_compared() { + assert_eq!(Uniform::new(1.0, 2.0), Uniform::new(1.0, 2.0)); + + // To cover UniformInt + assert_eq!(Uniform::new(1 as u32, 2 as u32), Uniform::new(1 as u32, 2 as u32)); + } +} diff --git a/src/rust/vendor/rand/src/distributions/utils.rs b/src/rust/vendor/rand/src/distributions/utils.rs new file mode 100644 index 000000000..89da5fd7a --- /dev/null +++ b/src/rust/vendor/rand/src/distributions/utils.rs @@ -0,0 +1,429 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Math helper functions + +#[cfg(feature = "simd_support")] use packed_simd::*; + + +pub(crate) trait WideningMultiply { + type Output; + + fn wmul(self, x: RHS) -> Self::Output; +} + +macro_rules! wmul_impl { + ($ty:ty, $wide:ty, $shift:expr) => { + impl WideningMultiply for $ty { + type Output = ($ty, $ty); + + #[inline(always)] + fn wmul(self, x: $ty) -> Self::Output { + let tmp = (self as $wide) * (x as $wide); + ((tmp >> $shift) as $ty, tmp as $ty) + } + } + }; + + // simd bulk implementation + ($(($ty:ident, $wide:ident),)+, $shift:expr) => { + $( + impl WideningMultiply for $ty { + type Output = ($ty, $ty); + + #[inline(always)] + fn wmul(self, x: $ty) -> Self::Output { + // For supported vectors, this should compile to a couple + // supported multiply & swizzle instructions (no actual + // casting). + // TODO: optimize + let y: $wide = self.cast(); + let x: $wide = x.cast(); + let tmp = y * x; + let hi: $ty = (tmp >> $shift).cast(); + let lo: $ty = tmp.cast(); + (hi, lo) + } + } + )+ + }; +} +wmul_impl! { u8, u16, 8 } +wmul_impl! { u16, u32, 16 } +wmul_impl! { u32, u64, 32 } +wmul_impl! { u64, u128, 64 } + +// This code is a translation of the __mulddi3 function in LLVM's +// compiler-rt. It is an optimised variant of the common method +// `(a + b) * (c + d) = ac + ad + bc + bd`. +// +// For some reason LLVM can optimise the C version very well, but +// keeps shuffling registers in this Rust translation. +macro_rules! wmul_impl_large { + ($ty:ty, $half:expr) => { + impl WideningMultiply for $ty { + type Output = ($ty, $ty); + + #[inline(always)] + fn wmul(self, b: $ty) -> Self::Output { + const LOWER_MASK: $ty = !0 >> $half; + let mut low = (self & LOWER_MASK).wrapping_mul(b & LOWER_MASK); + let mut t = low >> $half; + low &= LOWER_MASK; + t += (self >> $half).wrapping_mul(b & LOWER_MASK); + low += (t & LOWER_MASK) << $half; + let mut high = t >> $half; + t = low >> $half; + low &= LOWER_MASK; + t += (b >> $half).wrapping_mul(self & LOWER_MASK); + low += (t & LOWER_MASK) << $half; + high += t >> $half; + high += (self >> $half).wrapping_mul(b >> $half); + + (high, low) + } + } + }; + + // simd bulk implementation + (($($ty:ty,)+) $scalar:ty, $half:expr) => { + $( + impl WideningMultiply for $ty { + type Output = ($ty, $ty); + + #[inline(always)] + fn wmul(self, b: $ty) -> Self::Output { + // needs wrapping multiplication + const LOWER_MASK: $scalar = !0 >> $half; + let mut low = (self & LOWER_MASK) * (b & LOWER_MASK); + let mut t = low >> $half; + low &= LOWER_MASK; + t += (self >> $half) * (b & LOWER_MASK); + low += (t & LOWER_MASK) << $half; + let mut high = t >> $half; + t = low >> $half; + low &= LOWER_MASK; + t += (b >> $half) * (self & LOWER_MASK); + low += (t & LOWER_MASK) << $half; + high += t >> $half; + high += (self >> $half) * (b >> $half); + + (high, low) + } + } + )+ + }; +} +wmul_impl_large! { u128, 64 } + +macro_rules! wmul_impl_usize { + ($ty:ty) => { + impl WideningMultiply for usize { + type Output = (usize, usize); + + #[inline(always)] + fn wmul(self, x: usize) -> Self::Output { + let (high, low) = (self as $ty).wmul(x as $ty); + (high as usize, low as usize) + } + } + }; +} +#[cfg(target_pointer_width = "16")] +wmul_impl_usize! { u16 } +#[cfg(target_pointer_width = "32")] +wmul_impl_usize! { u32 } +#[cfg(target_pointer_width = "64")] +wmul_impl_usize! { u64 } + +#[cfg(feature = "simd_support")] +mod simd_wmul { + use super::*; + #[cfg(target_arch = "x86")] use core::arch::x86::*; + #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; + + wmul_impl! { + (u8x2, u16x2), + (u8x4, u16x4), + (u8x8, u16x8), + (u8x16, u16x16), + (u8x32, u16x32),, + 8 + } + + wmul_impl! { (u16x2, u32x2),, 16 } + wmul_impl! { (u16x4, u32x4),, 16 } + #[cfg(not(target_feature = "sse2"))] + wmul_impl! { (u16x8, u32x8),, 16 } + #[cfg(not(target_feature = "avx2"))] + wmul_impl! { (u16x16, u32x16),, 16 } + + // 16-bit lane widths allow use of the x86 `mulhi` instructions, which + // means `wmul` can be implemented with only two instructions. + #[allow(unused_macros)] + macro_rules! wmul_impl_16 { + ($ty:ident, $intrinsic:ident, $mulhi:ident, $mullo:ident) => { + impl WideningMultiply for $ty { + type Output = ($ty, $ty); + + #[inline(always)] + fn wmul(self, x: $ty) -> Self::Output { + let b = $intrinsic::from_bits(x); + let a = $intrinsic::from_bits(self); + let hi = $ty::from_bits(unsafe { $mulhi(a, b) }); + let lo = $ty::from_bits(unsafe { $mullo(a, b) }); + (hi, lo) + } + } + }; + } + + #[cfg(target_feature = "sse2")] + wmul_impl_16! { u16x8, __m128i, _mm_mulhi_epu16, _mm_mullo_epi16 } + #[cfg(target_feature = "avx2")] + wmul_impl_16! { u16x16, __m256i, _mm256_mulhi_epu16, _mm256_mullo_epi16 } + // FIXME: there are no `__m512i` types in stdsimd yet, so `wmul::` + // cannot use the same implementation. + + wmul_impl! { + (u32x2, u64x2), + (u32x4, u64x4), + (u32x8, u64x8),, + 32 + } + + // TODO: optimize, this seems to seriously slow things down + wmul_impl_large! { (u8x64,) u8, 4 } + wmul_impl_large! { (u16x32,) u16, 8 } + wmul_impl_large! { (u32x16,) u32, 16 } + wmul_impl_large! { (u64x2, u64x4, u64x8,) u64, 32 } +} + +/// Helper trait when dealing with scalar and SIMD floating point types. +pub(crate) trait FloatSIMDUtils { + // `PartialOrd` for vectors compares lexicographically. We want to compare all + // the individual SIMD lanes instead, and get the combined result over all + // lanes. This is possible using something like `a.lt(b).all()`, but we + // implement it as a trait so we can write the same code for `f32` and `f64`. + // Only the comparison functions we need are implemented. + fn all_lt(self, other: Self) -> bool; + fn all_le(self, other: Self) -> bool; + fn all_finite(self) -> bool; + + type Mask; + fn finite_mask(self) -> Self::Mask; + fn gt_mask(self, other: Self) -> Self::Mask; + fn ge_mask(self, other: Self) -> Self::Mask; + + // Decrease all lanes where the mask is `true` to the next lower value + // representable by the floating-point type. At least one of the lanes + // must be set. + fn decrease_masked(self, mask: Self::Mask) -> Self; + + // Convert from int value. Conversion is done while retaining the numerical + // value, not by retaining the binary representation. + type UInt; + fn cast_from_int(i: Self::UInt) -> Self; +} + +/// Implement functions available in std builds but missing from core primitives +#[cfg(not(std))] +// False positive: We are following `std` here. +#[allow(clippy::wrong_self_convention)] +pub(crate) trait Float: Sized { + fn is_nan(self) -> bool; + fn is_infinite(self) -> bool; + fn is_finite(self) -> bool; +} + +/// Implement functions on f32/f64 to give them APIs similar to SIMD types +pub(crate) trait FloatAsSIMD: Sized { + #[inline(always)] + fn lanes() -> usize { + 1 + } + #[inline(always)] + fn splat(scalar: Self) -> Self { + scalar + } + #[inline(always)] + fn extract(self, index: usize) -> Self { + debug_assert_eq!(index, 0); + self + } + #[inline(always)] + fn replace(self, index: usize, new_value: Self) -> Self { + debug_assert_eq!(index, 0); + new_value + } +} + +pub(crate) trait BoolAsSIMD: Sized { + fn any(self) -> bool; + fn all(self) -> bool; + fn none(self) -> bool; +} + +impl BoolAsSIMD for bool { + #[inline(always)] + fn any(self) -> bool { + self + } + + #[inline(always)] + fn all(self) -> bool { + self + } + + #[inline(always)] + fn none(self) -> bool { + !self + } +} + +macro_rules! scalar_float_impl { + ($ty:ident, $uty:ident) => { + #[cfg(not(std))] + impl Float for $ty { + #[inline] + fn is_nan(self) -> bool { + self != self + } + + #[inline] + fn is_infinite(self) -> bool { + self == ::core::$ty::INFINITY || self == ::core::$ty::NEG_INFINITY + } + + #[inline] + fn is_finite(self) -> bool { + !(self.is_nan() || self.is_infinite()) + } + } + + impl FloatSIMDUtils for $ty { + type Mask = bool; + type UInt = $uty; + + #[inline(always)] + fn all_lt(self, other: Self) -> bool { + self < other + } + + #[inline(always)] + fn all_le(self, other: Self) -> bool { + self <= other + } + + #[inline(always)] + fn all_finite(self) -> bool { + self.is_finite() + } + + #[inline(always)] + fn finite_mask(self) -> Self::Mask { + self.is_finite() + } + + #[inline(always)] + fn gt_mask(self, other: Self) -> Self::Mask { + self > other + } + + #[inline(always)] + fn ge_mask(self, other: Self) -> Self::Mask { + self >= other + } + + #[inline(always)] + fn decrease_masked(self, mask: Self::Mask) -> Self { + debug_assert!(mask, "At least one lane must be set"); + <$ty>::from_bits(self.to_bits() - 1) + } + + #[inline] + fn cast_from_int(i: Self::UInt) -> Self { + i as $ty + } + } + + impl FloatAsSIMD for $ty {} + }; +} + +scalar_float_impl!(f32, u32); +scalar_float_impl!(f64, u64); + + +#[cfg(feature = "simd_support")] +macro_rules! simd_impl { + ($ty:ident, $f_scalar:ident, $mty:ident, $uty:ident) => { + impl FloatSIMDUtils for $ty { + type Mask = $mty; + type UInt = $uty; + + #[inline(always)] + fn all_lt(self, other: Self) -> bool { + self.lt(other).all() + } + + #[inline(always)] + fn all_le(self, other: Self) -> bool { + self.le(other).all() + } + + #[inline(always)] + fn all_finite(self) -> bool { + self.finite_mask().all() + } + + #[inline(always)] + fn finite_mask(self) -> Self::Mask { + // This can possibly be done faster by checking bit patterns + let neg_inf = $ty::splat(::core::$f_scalar::NEG_INFINITY); + let pos_inf = $ty::splat(::core::$f_scalar::INFINITY); + self.gt(neg_inf) & self.lt(pos_inf) + } + + #[inline(always)] + fn gt_mask(self, other: Self) -> Self::Mask { + self.gt(other) + } + + #[inline(always)] + fn ge_mask(self, other: Self) -> Self::Mask { + self.ge(other) + } + + #[inline(always)] + fn decrease_masked(self, mask: Self::Mask) -> Self { + // Casting a mask into ints will produce all bits set for + // true, and 0 for false. Adding that to the binary + // representation of a float means subtracting one from + // the binary representation, resulting in the next lower + // value representable by $ty. This works even when the + // current value is infinity. + debug_assert!(mask.any(), "At least one lane must be set"); + <$ty>::from_bits(<$uty>::from_bits(self) + <$uty>::from_bits(mask)) + } + + #[inline] + fn cast_from_int(i: Self::UInt) -> Self { + i.cast() + } + } + }; +} + +#[cfg(feature="simd_support")] simd_impl! { f32x2, f32, m32x2, u32x2 } +#[cfg(feature="simd_support")] simd_impl! { f32x4, f32, m32x4, u32x4 } +#[cfg(feature="simd_support")] simd_impl! { f32x8, f32, m32x8, u32x8 } +#[cfg(feature="simd_support")] simd_impl! { f32x16, f32, m32x16, u32x16 } +#[cfg(feature="simd_support")] simd_impl! { f64x2, f64, m64x2, u64x2 } +#[cfg(feature="simd_support")] simd_impl! { f64x4, f64, m64x4, u64x4 } +#[cfg(feature="simd_support")] simd_impl! { f64x8, f64, m64x8, u64x8 } diff --git a/src/rust/vendor/rand/src/distributions/weighted.rs b/src/rust/vendor/rand/src/distributions/weighted.rs new file mode 100644 index 000000000..846b9df9c --- /dev/null +++ b/src/rust/vendor/rand/src/distributions/weighted.rs @@ -0,0 +1,47 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Weighted index sampling +//! +//! This module is deprecated. Use [`crate::distributions::WeightedIndex`] and +//! [`crate::distributions::WeightedError`] instead. + +pub use super::{WeightedIndex, WeightedError}; + +#[allow(missing_docs)] +#[deprecated(since = "0.8.0", note = "moved to rand_distr crate")] +pub mod alias_method { + // This module exists to provide a deprecation warning which minimises + // compile errors, but still fails to compile if ever used. + use core::marker::PhantomData; + use alloc::vec::Vec; + use super::WeightedError; + + #[derive(Debug)] + pub struct WeightedIndex { + _phantom: PhantomData, + } + impl WeightedIndex { + pub fn new(_weights: Vec) -> Result { + Err(WeightedError::NoItem) + } + } + + pub trait Weight {} + macro_rules! impl_weight { + () => {}; + ($T:ident, $($more:ident,)*) => { + impl Weight for $T {} + impl_weight!($($more,)*); + }; + } + impl_weight!(f64, f32,); + impl_weight!(u8, u16, u32, u64, usize,); + impl_weight!(i8, i16, i32, i64, isize,); + impl_weight!(u128, i128,); +} diff --git a/src/rust/vendor/rand/src/distributions/weighted_index.rs b/src/rust/vendor/rand/src/distributions/weighted_index.rs new file mode 100644 index 000000000..8252b172f --- /dev/null +++ b/src/rust/vendor/rand/src/distributions/weighted_index.rs @@ -0,0 +1,458 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Weighted index sampling + +use crate::distributions::uniform::{SampleBorrow, SampleUniform, UniformSampler}; +use crate::distributions::Distribution; +use crate::Rng; +use core::cmp::PartialOrd; +use core::fmt; + +// Note that this whole module is only imported if feature="alloc" is enabled. +use alloc::vec::Vec; + +#[cfg(feature = "serde1")] +use serde::{Serialize, Deserialize}; + +/// A distribution using weighted sampling of discrete items +/// +/// Sampling a `WeightedIndex` distribution returns the index of a randomly +/// selected element from the iterator used when the `WeightedIndex` was +/// created. The chance of a given element being picked is proportional to the +/// value of the element. The weights can use any type `X` for which an +/// implementation of [`Uniform`] exists. +/// +/// # Performance +/// +/// Time complexity of sampling from `WeightedIndex` is `O(log N)` where +/// `N` is the number of weights. As an alternative, +/// [`rand_distr::weighted_alias`](https://docs.rs/rand_distr/*/rand_distr/weighted_alias/index.html) +/// supports `O(1)` sampling, but with much higher initialisation cost. +/// +/// A `WeightedIndex` contains a `Vec` and a [`Uniform`] and so its +/// size is the sum of the size of those objects, possibly plus some alignment. +/// +/// Creating a `WeightedIndex` will allocate enough space to hold `N - 1` +/// weights of type `X`, where `N` is the number of weights. However, since +/// `Vec` doesn't guarantee a particular growth strategy, additional memory +/// might be allocated but not used. Since the `WeightedIndex` object also +/// contains, this might cause additional allocations, though for primitive +/// types, [`Uniform`] doesn't allocate any memory. +/// +/// Sampling from `WeightedIndex` will result in a single call to +/// `Uniform::sample` (method of the [`Distribution`] trait), which typically +/// will request a single value from the underlying [`RngCore`], though the +/// exact number depends on the implementation of `Uniform::sample`. +/// +/// # Example +/// +/// ``` +/// use rand::prelude::*; +/// use rand::distributions::WeightedIndex; +/// +/// let choices = ['a', 'b', 'c']; +/// let weights = [2, 1, 1]; +/// let dist = WeightedIndex::new(&weights).unwrap(); +/// let mut rng = thread_rng(); +/// for _ in 0..100 { +/// // 50% chance to print 'a', 25% chance to print 'b', 25% chance to print 'c' +/// println!("{}", choices[dist.sample(&mut rng)]); +/// } +/// +/// let items = [('a', 0), ('b', 3), ('c', 7)]; +/// let dist2 = WeightedIndex::new(items.iter().map(|item| item.1)).unwrap(); +/// for _ in 0..100 { +/// // 0% chance to print 'a', 30% chance to print 'b', 70% chance to print 'c' +/// println!("{}", items[dist2.sample(&mut rng)].0); +/// } +/// ``` +/// +/// [`Uniform`]: crate::distributions::Uniform +/// [`RngCore`]: crate::RngCore +#[derive(Debug, Clone, PartialEq)] +#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] +pub struct WeightedIndex { + cumulative_weights: Vec, + total_weight: X, + weight_distribution: X::Sampler, +} + +impl WeightedIndex { + /// Creates a new a `WeightedIndex` [`Distribution`] using the values + /// in `weights`. The weights can use any type `X` for which an + /// implementation of [`Uniform`] exists. + /// + /// Returns an error if the iterator is empty, if any weight is `< 0`, or + /// if its total value is 0. + /// + /// [`Uniform`]: crate::distributions::uniform::Uniform + pub fn new(weights: I) -> Result, WeightedError> + where + I: IntoIterator, + I::Item: SampleBorrow, + X: for<'a> ::core::ops::AddAssign<&'a X> + Clone + Default, + { + let mut iter = weights.into_iter(); + let mut total_weight: X = iter.next().ok_or(WeightedError::NoItem)?.borrow().clone(); + + let zero = ::default(); + if !(total_weight >= zero) { + return Err(WeightedError::InvalidWeight); + } + + let mut weights = Vec::::with_capacity(iter.size_hint().0); + for w in iter { + // Note that `!(w >= x)` is not equivalent to `w < x` for partially + // ordered types due to NaNs which are equal to nothing. + if !(w.borrow() >= &zero) { + return Err(WeightedError::InvalidWeight); + } + weights.push(total_weight.clone()); + total_weight += w.borrow(); + } + + if total_weight == zero { + return Err(WeightedError::AllWeightsZero); + } + let distr = X::Sampler::new(zero, total_weight.clone()); + + Ok(WeightedIndex { + cumulative_weights: weights, + total_weight, + weight_distribution: distr, + }) + } + + /// Update a subset of weights, without changing the number of weights. + /// + /// `new_weights` must be sorted by the index. + /// + /// Using this method instead of `new` might be more efficient if only a small number of + /// weights is modified. No allocations are performed, unless the weight type `X` uses + /// allocation internally. + /// + /// In case of error, `self` is not modified. + pub fn update_weights(&mut self, new_weights: &[(usize, &X)]) -> Result<(), WeightedError> + where X: for<'a> ::core::ops::AddAssign<&'a X> + + for<'a> ::core::ops::SubAssign<&'a X> + + Clone + + Default { + if new_weights.is_empty() { + return Ok(()); + } + + let zero = ::default(); + + let mut total_weight = self.total_weight.clone(); + + // Check for errors first, so we don't modify `self` in case something + // goes wrong. + let mut prev_i = None; + for &(i, w) in new_weights { + if let Some(old_i) = prev_i { + if old_i >= i { + return Err(WeightedError::InvalidWeight); + } + } + if !(*w >= zero) { + return Err(WeightedError::InvalidWeight); + } + if i > self.cumulative_weights.len() { + return Err(WeightedError::TooMany); + } + + let mut old_w = if i < self.cumulative_weights.len() { + self.cumulative_weights[i].clone() + } else { + self.total_weight.clone() + }; + if i > 0 { + old_w -= &self.cumulative_weights[i - 1]; + } + + total_weight -= &old_w; + total_weight += w; + prev_i = Some(i); + } + if total_weight <= zero { + return Err(WeightedError::AllWeightsZero); + } + + // Update the weights. Because we checked all the preconditions in the + // previous loop, this should never panic. + let mut iter = new_weights.iter(); + + let mut prev_weight = zero.clone(); + let mut next_new_weight = iter.next(); + let &(first_new_index, _) = next_new_weight.unwrap(); + let mut cumulative_weight = if first_new_index > 0 { + self.cumulative_weights[first_new_index - 1].clone() + } else { + zero.clone() + }; + for i in first_new_index..self.cumulative_weights.len() { + match next_new_weight { + Some(&(j, w)) if i == j => { + cumulative_weight += w; + next_new_weight = iter.next(); + } + _ => { + let mut tmp = self.cumulative_weights[i].clone(); + tmp -= &prev_weight; // We know this is positive. + cumulative_weight += &tmp; + } + } + prev_weight = cumulative_weight.clone(); + core::mem::swap(&mut prev_weight, &mut self.cumulative_weights[i]); + } + + self.total_weight = total_weight; + self.weight_distribution = X::Sampler::new(zero, self.total_weight.clone()); + + Ok(()) + } +} + +impl Distribution for WeightedIndex +where X: SampleUniform + PartialOrd +{ + fn sample(&self, rng: &mut R) -> usize { + use ::core::cmp::Ordering; + let chosen_weight = self.weight_distribution.sample(rng); + // Find the first item which has a weight *higher* than the chosen weight. + self.cumulative_weights + .binary_search_by(|w| { + if *w <= chosen_weight { + Ordering::Less + } else { + Ordering::Greater + } + }) + .unwrap_err() + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[cfg(feature = "serde1")] + #[test] + fn test_weightedindex_serde1() { + let weighted_index = WeightedIndex::new(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).unwrap(); + + let ser_weighted_index = bincode::serialize(&weighted_index).unwrap(); + let de_weighted_index: WeightedIndex = + bincode::deserialize(&ser_weighted_index).unwrap(); + + assert_eq!( + de_weighted_index.cumulative_weights, + weighted_index.cumulative_weights + ); + assert_eq!(de_weighted_index.total_weight, weighted_index.total_weight); + } + + #[test] + fn test_accepting_nan(){ + assert_eq!( + WeightedIndex::new(&[core::f32::NAN, 0.5]).unwrap_err(), + WeightedError::InvalidWeight, + ); + assert_eq!( + WeightedIndex::new(&[core::f32::NAN]).unwrap_err(), + WeightedError::InvalidWeight, + ); + assert_eq!( + WeightedIndex::new(&[0.5, core::f32::NAN]).unwrap_err(), + WeightedError::InvalidWeight, + ); + + assert_eq!( + WeightedIndex::new(&[0.5, 7.0]) + .unwrap() + .update_weights(&[(0, &core::f32::NAN)]) + .unwrap_err(), + WeightedError::InvalidWeight, + ) + } + + + #[test] + #[cfg_attr(miri, ignore)] // Miri is too slow + fn test_weightedindex() { + let mut r = crate::test::rng(700); + const N_REPS: u32 = 5000; + let weights = [1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7]; + let total_weight = weights.iter().sum::() as f32; + + let verify = |result: [i32; 14]| { + for (i, count) in result.iter().enumerate() { + let exp = (weights[i] * N_REPS) as f32 / total_weight; + let mut err = (*count as f32 - exp).abs(); + if err != 0.0 { + err /= exp; + } + assert!(err <= 0.25); + } + }; + + // WeightedIndex from vec + let mut chosen = [0i32; 14]; + let distr = WeightedIndex::new(weights.to_vec()).unwrap(); + for _ in 0..N_REPS { + chosen[distr.sample(&mut r)] += 1; + } + verify(chosen); + + // WeightedIndex from slice + chosen = [0i32; 14]; + let distr = WeightedIndex::new(&weights[..]).unwrap(); + for _ in 0..N_REPS { + chosen[distr.sample(&mut r)] += 1; + } + verify(chosen); + + // WeightedIndex from iterator + chosen = [0i32; 14]; + let distr = WeightedIndex::new(weights.iter()).unwrap(); + for _ in 0..N_REPS { + chosen[distr.sample(&mut r)] += 1; + } + verify(chosen); + + for _ in 0..5 { + assert_eq!(WeightedIndex::new(&[0, 1]).unwrap().sample(&mut r), 1); + assert_eq!(WeightedIndex::new(&[1, 0]).unwrap().sample(&mut r), 0); + assert_eq!( + WeightedIndex::new(&[0, 0, 0, 0, 10, 0]) + .unwrap() + .sample(&mut r), + 4 + ); + } + + assert_eq!( + WeightedIndex::new(&[10][0..0]).unwrap_err(), + WeightedError::NoItem + ); + assert_eq!( + WeightedIndex::new(&[0]).unwrap_err(), + WeightedError::AllWeightsZero + ); + assert_eq!( + WeightedIndex::new(&[10, 20, -1, 30]).unwrap_err(), + WeightedError::InvalidWeight + ); + assert_eq!( + WeightedIndex::new(&[-10, 20, 1, 30]).unwrap_err(), + WeightedError::InvalidWeight + ); + assert_eq!( + WeightedIndex::new(&[-10]).unwrap_err(), + WeightedError::InvalidWeight + ); + } + + #[test] + fn test_update_weights() { + let data = [ + ( + &[10u32, 2, 3, 4][..], + &[(1, &100), (2, &4)][..], // positive change + &[10, 100, 4, 4][..], + ), + ( + &[1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7][..], + &[(2, &1), (5, &1), (13, &100)][..], // negative change and last element + &[1u32, 2, 1, 0, 5, 1, 7, 1, 2, 3, 4, 5, 6, 100][..], + ), + ]; + + for (weights, update, expected_weights) in data.iter() { + let total_weight = weights.iter().sum::(); + let mut distr = WeightedIndex::new(weights.to_vec()).unwrap(); + assert_eq!(distr.total_weight, total_weight); + + distr.update_weights(update).unwrap(); + let expected_total_weight = expected_weights.iter().sum::(); + let expected_distr = WeightedIndex::new(expected_weights.to_vec()).unwrap(); + assert_eq!(distr.total_weight, expected_total_weight); + assert_eq!(distr.total_weight, expected_distr.total_weight); + assert_eq!(distr.cumulative_weights, expected_distr.cumulative_weights); + } + } + + #[test] + fn value_stability() { + fn test_samples( + weights: I, buf: &mut [usize], expected: &[usize], + ) where + I: IntoIterator, + I::Item: SampleBorrow, + X: for<'a> ::core::ops::AddAssign<&'a X> + Clone + Default, + { + assert_eq!(buf.len(), expected.len()); + let distr = WeightedIndex::new(weights).unwrap(); + let mut rng = crate::test::rng(701); + for r in buf.iter_mut() { + *r = rng.sample(&distr); + } + assert_eq!(buf, expected); + } + + let mut buf = [0; 10]; + test_samples(&[1i32, 1, 1, 1, 1, 1, 1, 1, 1], &mut buf, &[ + 0, 6, 2, 6, 3, 4, 7, 8, 2, 5, + ]); + test_samples(&[0.7f32, 0.1, 0.1, 0.1], &mut buf, &[ + 0, 0, 0, 1, 0, 0, 2, 3, 0, 0, + ]); + test_samples(&[1.0f64, 0.999, 0.998, 0.997], &mut buf, &[ + 2, 2, 1, 3, 2, 1, 3, 3, 2, 1, + ]); + } + + #[test] + fn weighted_index_distributions_can_be_compared() { + assert_eq!(WeightedIndex::new(&[1, 2]), WeightedIndex::new(&[1, 2])); + } +} + +/// Error type returned from `WeightedIndex::new`. +#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum WeightedError { + /// The provided weight collection contains no items. + NoItem, + + /// A weight is either less than zero, greater than the supported maximum, + /// NaN, or otherwise invalid. + InvalidWeight, + + /// All items in the provided weight collection are zero. + AllWeightsZero, + + /// Too many weights are provided (length greater than `u32::MAX`) + TooMany, +} + +#[cfg(feature = "std")] +impl std::error::Error for WeightedError {} + +impl fmt::Display for WeightedError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(match *self { + WeightedError::NoItem => "No weights provided in distribution", + WeightedError::InvalidWeight => "A weight is invalid in distribution", + WeightedError::AllWeightsZero => "All weights are zero in distribution", + WeightedError::TooMany => "Too many weights (hit u32::MAX) in distribution", + }) + } +} diff --git a/src/rust/vendor/rand/src/lib.rs b/src/rust/vendor/rand/src/lib.rs new file mode 100644 index 000000000..6d8471801 --- /dev/null +++ b/src/rust/vendor/rand/src/lib.rs @@ -0,0 +1,214 @@ +// Copyright 2018 Developers of the Rand project. +// Copyright 2013-2017 The Rust Project Developers. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Utilities for random number generation +//! +//! Rand provides utilities to generate random numbers, to convert them to +//! useful types and distributions, and some randomness-related algorithms. +//! +//! # Quick Start +//! +//! To get you started quickly, the easiest and highest-level way to get +//! a random value is to use [`random()`]; alternatively you can use +//! [`thread_rng()`]. The [`Rng`] trait provides a useful API on all RNGs, while +//! the [`distributions`] and [`seq`] modules provide further +//! functionality on top of RNGs. +//! +//! ``` +//! use rand::prelude::*; +//! +//! if rand::random() { // generates a boolean +//! // Try printing a random unicode code point (probably a bad idea)! +//! println!("char: {}", rand::random::()); +//! } +//! +//! let mut rng = rand::thread_rng(); +//! let y: f64 = rng.gen(); // generates a float between 0 and 1 +//! +//! let mut nums: Vec = (1..100).collect(); +//! nums.shuffle(&mut rng); +//! ``` +//! +//! # The Book +//! +//! For the user guide and further documentation, please read +//! [The Rust Rand Book](https://rust-random.github.io/book). + +#![doc( + html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png", + html_favicon_url = "https://www.rust-lang.org/favicon.ico", + html_root_url = "https://rust-random.github.io/rand/" +)] +#![deny(missing_docs)] +#![deny(missing_debug_implementations)] +#![doc(test(attr(allow(unused_variables), deny(warnings))))] +#![no_std] +#![cfg_attr(feature = "simd_support", feature(stdsimd))] +#![cfg_attr(doc_cfg, feature(doc_cfg))] +#![allow( + clippy::float_cmp, + clippy::neg_cmp_op_on_partial_ord, +)] + +#[cfg(feature = "std")] extern crate std; +#[cfg(feature = "alloc")] extern crate alloc; + +#[allow(unused)] +macro_rules! trace { ($($x:tt)*) => ( + #[cfg(feature = "log")] { + log::trace!($($x)*) + } +) } +#[allow(unused)] +macro_rules! debug { ($($x:tt)*) => ( + #[cfg(feature = "log")] { + log::debug!($($x)*) + } +) } +#[allow(unused)] +macro_rules! info { ($($x:tt)*) => ( + #[cfg(feature = "log")] { + log::info!($($x)*) + } +) } +#[allow(unused)] +macro_rules! warn { ($($x:tt)*) => ( + #[cfg(feature = "log")] { + log::warn!($($x)*) + } +) } +#[allow(unused)] +macro_rules! error { ($($x:tt)*) => ( + #[cfg(feature = "log")] { + log::error!($($x)*) + } +) } + +// Re-exports from rand_core +pub use rand_core::{CryptoRng, Error, RngCore, SeedableRng}; + +// Public modules +pub mod distributions; +pub mod prelude; +mod rng; +pub mod rngs; +pub mod seq; + +// Public exports +#[cfg(all(feature = "std", feature = "std_rng"))] +pub use crate::rngs::thread::thread_rng; +pub use rng::{Fill, Rng}; + +#[cfg(all(feature = "std", feature = "std_rng"))] +use crate::distributions::{Distribution, Standard}; + +/// Generates a random value using the thread-local random number generator. +/// +/// This is simply a shortcut for `thread_rng().gen()`. See [`thread_rng`] for +/// documentation of the entropy source and [`Standard`] for documentation of +/// distributions and type-specific generation. +/// +/// # Provided implementations +/// +/// The following types have provided implementations that +/// generate values with the following ranges and distributions: +/// +/// * Integers (`i32`, `u32`, `isize`, `usize`, etc.): Uniformly distributed +/// over all values of the type. +/// * `char`: Uniformly distributed over all Unicode scalar values, i.e. all +/// code points in the range `0...0x10_FFFF`, except for the range +/// `0xD800...0xDFFF` (the surrogate code points). This includes +/// unassigned/reserved code points. +/// * `bool`: Generates `false` or `true`, each with probability 0.5. +/// * Floating point types (`f32` and `f64`): Uniformly distributed in the +/// half-open range `[0, 1)`. See notes below. +/// * Wrapping integers (`Wrapping`), besides the type identical to their +/// normal integer variants. +/// +/// Also supported is the generation of the following +/// compound types where all component types are supported: +/// +/// * Tuples (up to 12 elements): each element is generated sequentially. +/// * Arrays (up to 32 elements): each element is generated sequentially; +/// see also [`Rng::fill`] which supports arbitrary array length for integer +/// types and tends to be faster for `u32` and smaller types. +/// * `Option` first generates a `bool`, and if true generates and returns +/// `Some(value)` where `value: T`, otherwise returning `None`. +/// +/// # Examples +/// +/// ``` +/// let x = rand::random::(); +/// println!("{}", x); +/// +/// let y = rand::random::(); +/// println!("{}", y); +/// +/// if rand::random() { // generates a boolean +/// println!("Better lucky than good!"); +/// } +/// ``` +/// +/// If you're calling `random()` in a loop, caching the generator as in the +/// following example can increase performance. +/// +/// ``` +/// use rand::Rng; +/// +/// let mut v = vec![1, 2, 3]; +/// +/// for x in v.iter_mut() { +/// *x = rand::random() +/// } +/// +/// // can be made faster by caching thread_rng +/// +/// let mut rng = rand::thread_rng(); +/// +/// for x in v.iter_mut() { +/// *x = rng.gen(); +/// } +/// ``` +/// +/// [`Standard`]: distributions::Standard +#[cfg(all(feature = "std", feature = "std_rng"))] +#[cfg_attr(doc_cfg, doc(cfg(all(feature = "std", feature = "std_rng"))))] +#[inline] +pub fn random() -> T +where Standard: Distribution { + thread_rng().gen() +} + +#[cfg(test)] +mod test { + use super::*; + + /// Construct a deterministic RNG with the given seed + pub fn rng(seed: u64) -> impl RngCore { + // For tests, we want a statistically good, fast, reproducible RNG. + // PCG32 will do fine, and will be easy to embed if we ever need to. + const INC: u64 = 11634580027462260723; + rand_pcg::Pcg32::new(seed, INC) + } + + #[test] + #[cfg(all(feature = "std", feature = "std_rng"))] + fn test_random() { + let _n: usize = random(); + let _f: f32 = random(); + let _o: Option> = random(); + #[allow(clippy::type_complexity)] + let _many: ( + (), + (usize, isize, Option<(u32, (bool,))>), + (u8, i8, u16, i16, u32, i32, u64, i64), + (f32, (f64, (f64,))), + ) = random(); + } +} diff --git a/src/rust/vendor/rand/src/prelude.rs b/src/rust/vendor/rand/src/prelude.rs new file mode 100644 index 000000000..51c457e3f --- /dev/null +++ b/src/rust/vendor/rand/src/prelude.rs @@ -0,0 +1,34 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Convenience re-export of common members +//! +//! Like the standard library's prelude, this module simplifies importing of +//! common items. Unlike the standard prelude, the contents of this module must +//! be imported manually: +//! +//! ``` +//! use rand::prelude::*; +//! # let mut r = StdRng::from_rng(thread_rng()).unwrap(); +//! # let _: f32 = r.gen(); +//! ``` + +#[doc(no_inline)] pub use crate::distributions::Distribution; +#[cfg(feature = "small_rng")] +#[doc(no_inline)] +pub use crate::rngs::SmallRng; +#[cfg(feature = "std_rng")] +#[doc(no_inline)] pub use crate::rngs::StdRng; +#[doc(no_inline)] +#[cfg(all(feature = "std", feature = "std_rng"))] +pub use crate::rngs::ThreadRng; +#[doc(no_inline)] pub use crate::seq::{IteratorRandom, SliceRandom}; +#[doc(no_inline)] +#[cfg(all(feature = "std", feature = "std_rng"))] +pub use crate::{random, thread_rng}; +#[doc(no_inline)] pub use crate::{CryptoRng, Rng, RngCore, SeedableRng}; diff --git a/src/rust/vendor/rand/src/rng.rs b/src/rust/vendor/rand/src/rng.rs new file mode 100644 index 000000000..79a9fbff4 --- /dev/null +++ b/src/rust/vendor/rand/src/rng.rs @@ -0,0 +1,600 @@ +// Copyright 2018 Developers of the Rand project. +// Copyright 2013-2017 The Rust Project Developers. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! [`Rng`] trait + +use rand_core::{Error, RngCore}; +use crate::distributions::uniform::{SampleRange, SampleUniform}; +use crate::distributions::{self, Distribution, Standard}; +use core::num::Wrapping; +use core::{mem, slice}; + +/// An automatically-implemented extension trait on [`RngCore`] providing high-level +/// generic methods for sampling values and other convenience methods. +/// +/// This is the primary trait to use when generating random values. +/// +/// # Generic usage +/// +/// The basic pattern is `fn foo(rng: &mut R)`. Some +/// things are worth noting here: +/// +/// - Since `Rng: RngCore` and every `RngCore` implements `Rng`, it makes no +/// difference whether we use `R: Rng` or `R: RngCore`. +/// - The `+ ?Sized` un-bounding allows functions to be called directly on +/// type-erased references; i.e. `foo(r)` where `r: &mut dyn RngCore`. Without +/// this it would be necessary to write `foo(&mut r)`. +/// +/// An alternative pattern is possible: `fn foo(rng: R)`. This has some +/// trade-offs. It allows the argument to be consumed directly without a `&mut` +/// (which is how `from_rng(thread_rng())` works); also it still works directly +/// on references (including type-erased references). Unfortunately within the +/// function `foo` it is not known whether `rng` is a reference type or not, +/// hence many uses of `rng` require an extra reference, either explicitly +/// (`distr.sample(&mut rng)`) or implicitly (`rng.gen()`); one may hope the +/// optimiser can remove redundant references later. +/// +/// Example: +/// +/// ``` +/// # use rand::thread_rng; +/// use rand::Rng; +/// +/// fn foo(rng: &mut R) -> f32 { +/// rng.gen() +/// } +/// +/// # let v = foo(&mut thread_rng()); +/// ``` +pub trait Rng: RngCore { + /// Return a random value supporting the [`Standard`] distribution. + /// + /// # Example + /// + /// ``` + /// use rand::{thread_rng, Rng}; + /// + /// let mut rng = thread_rng(); + /// let x: u32 = rng.gen(); + /// println!("{}", x); + /// println!("{:?}", rng.gen::<(f64, bool)>()); + /// ``` + /// + /// # Arrays and tuples + /// + /// The `rng.gen()` method is able to generate arrays (up to 32 elements) + /// and tuples (up to 12 elements), so long as all element types can be + /// generated. + /// When using `rustc` ≥ 1.51, enable the `min_const_gen` feature to support + /// arrays larger than 32 elements. + /// + /// For arrays of integers, especially for those with small element types + /// (< 64 bit), it will likely be faster to instead use [`Rng::fill`]. + /// + /// ``` + /// use rand::{thread_rng, Rng}; + /// + /// let mut rng = thread_rng(); + /// let tuple: (u8, i32, char) = rng.gen(); // arbitrary tuple support + /// + /// let arr1: [f32; 32] = rng.gen(); // array construction + /// let mut arr2 = [0u8; 128]; + /// rng.fill(&mut arr2); // array fill + /// ``` + /// + /// [`Standard`]: distributions::Standard + #[inline] + fn gen(&mut self) -> T + where Standard: Distribution { + Standard.sample(self) + } + + /// Generate a random value in the given range. + /// + /// This function is optimised for the case that only a single sample is + /// made from the given range. See also the [`Uniform`] distribution + /// type which may be faster if sampling from the same range repeatedly. + /// + /// Only `gen_range(low..high)` and `gen_range(low..=high)` are supported. + /// + /// # Panics + /// + /// Panics if the range is empty. + /// + /// # Example + /// + /// ``` + /// use rand::{thread_rng, Rng}; + /// + /// let mut rng = thread_rng(); + /// + /// // Exclusive range + /// let n: u32 = rng.gen_range(0..10); + /// println!("{}", n); + /// let m: f64 = rng.gen_range(-40.0..1.3e5); + /// println!("{}", m); + /// + /// // Inclusive range + /// let n: u32 = rng.gen_range(0..=10); + /// println!("{}", n); + /// ``` + /// + /// [`Uniform`]: distributions::uniform::Uniform + fn gen_range(&mut self, range: R) -> T + where + T: SampleUniform, + R: SampleRange + { + assert!(!range.is_empty(), "cannot sample empty range"); + range.sample_single(self) + } + + /// Sample a new value, using the given distribution. + /// + /// ### Example + /// + /// ``` + /// use rand::{thread_rng, Rng}; + /// use rand::distributions::Uniform; + /// + /// let mut rng = thread_rng(); + /// let x = rng.sample(Uniform::new(10u32, 15)); + /// // Type annotation requires two types, the type and distribution; the + /// // distribution can be inferred. + /// let y = rng.sample::(Uniform::new(10, 15)); + /// ``` + fn sample>(&mut self, distr: D) -> T { + distr.sample(self) + } + + /// Create an iterator that generates values using the given distribution. + /// + /// Note that this function takes its arguments by value. This works since + /// `(&mut R): Rng where R: Rng` and + /// `(&D): Distribution where D: Distribution`, + /// however borrowing is not automatic hence `rng.sample_iter(...)` may + /// need to be replaced with `(&mut rng).sample_iter(...)`. + /// + /// # Example + /// + /// ``` + /// use rand::{thread_rng, Rng}; + /// use rand::distributions::{Alphanumeric, Uniform, Standard}; + /// + /// let mut rng = thread_rng(); + /// + /// // Vec of 16 x f32: + /// let v: Vec = (&mut rng).sample_iter(Standard).take(16).collect(); + /// + /// // String: + /// let s: String = (&mut rng).sample_iter(Alphanumeric) + /// .take(7) + /// .map(char::from) + /// .collect(); + /// + /// // Combined values + /// println!("{:?}", (&mut rng).sample_iter(Standard).take(5) + /// .collect::>()); + /// + /// // Dice-rolling: + /// let die_range = Uniform::new_inclusive(1, 6); + /// let mut roll_die = (&mut rng).sample_iter(die_range); + /// while roll_die.next().unwrap() != 6 { + /// println!("Not a 6; rolling again!"); + /// } + /// ``` + fn sample_iter(self, distr: D) -> distributions::DistIter + where + D: Distribution, + Self: Sized, + { + distr.sample_iter(self) + } + + /// Fill any type implementing [`Fill`] with random data + /// + /// The distribution is expected to be uniform with portable results, but + /// this cannot be guaranteed for third-party implementations. + /// + /// This is identical to [`try_fill`] except that it panics on error. + /// + /// # Example + /// + /// ``` + /// use rand::{thread_rng, Rng}; + /// + /// let mut arr = [0i8; 20]; + /// thread_rng().fill(&mut arr[..]); + /// ``` + /// + /// [`fill_bytes`]: RngCore::fill_bytes + /// [`try_fill`]: Rng::try_fill + fn fill(&mut self, dest: &mut T) { + dest.try_fill(self).unwrap_or_else(|_| panic!("Rng::fill failed")) + } + + /// Fill any type implementing [`Fill`] with random data + /// + /// The distribution is expected to be uniform with portable results, but + /// this cannot be guaranteed for third-party implementations. + /// + /// This is identical to [`fill`] except that it forwards errors. + /// + /// # Example + /// + /// ``` + /// # use rand::Error; + /// use rand::{thread_rng, Rng}; + /// + /// # fn try_inner() -> Result<(), Error> { + /// let mut arr = [0u64; 4]; + /// thread_rng().try_fill(&mut arr[..])?; + /// # Ok(()) + /// # } + /// + /// # try_inner().unwrap() + /// ``` + /// + /// [`try_fill_bytes`]: RngCore::try_fill_bytes + /// [`fill`]: Rng::fill + fn try_fill(&mut self, dest: &mut T) -> Result<(), Error> { + dest.try_fill(self) + } + + /// Return a bool with a probability `p` of being true. + /// + /// See also the [`Bernoulli`] distribution, which may be faster if + /// sampling from the same probability repeatedly. + /// + /// # Example + /// + /// ``` + /// use rand::{thread_rng, Rng}; + /// + /// let mut rng = thread_rng(); + /// println!("{}", rng.gen_bool(1.0 / 3.0)); + /// ``` + /// + /// # Panics + /// + /// If `p < 0` or `p > 1`. + /// + /// [`Bernoulli`]: distributions::Bernoulli + #[inline] + fn gen_bool(&mut self, p: f64) -> bool { + let d = distributions::Bernoulli::new(p).unwrap(); + self.sample(d) + } + + /// Return a bool with a probability of `numerator/denominator` of being + /// true. I.e. `gen_ratio(2, 3)` has chance of 2 in 3, or about 67%, of + /// returning true. If `numerator == denominator`, then the returned value + /// is guaranteed to be `true`. If `numerator == 0`, then the returned + /// value is guaranteed to be `false`. + /// + /// See also the [`Bernoulli`] distribution, which may be faster if + /// sampling from the same `numerator` and `denominator` repeatedly. + /// + /// # Panics + /// + /// If `denominator == 0` or `numerator > denominator`. + /// + /// # Example + /// + /// ``` + /// use rand::{thread_rng, Rng}; + /// + /// let mut rng = thread_rng(); + /// println!("{}", rng.gen_ratio(2, 3)); + /// ``` + /// + /// [`Bernoulli`]: distributions::Bernoulli + #[inline] + fn gen_ratio(&mut self, numerator: u32, denominator: u32) -> bool { + let d = distributions::Bernoulli::from_ratio(numerator, denominator).unwrap(); + self.sample(d) + } +} + +impl Rng for R {} + +/// Types which may be filled with random data +/// +/// This trait allows arrays to be efficiently filled with random data. +/// +/// Implementations are expected to be portable across machines unless +/// clearly documented otherwise (see the +/// [Chapter on Portability](https://rust-random.github.io/book/portability.html)). +pub trait Fill { + /// Fill self with random data + fn try_fill(&mut self, rng: &mut R) -> Result<(), Error>; +} + +macro_rules! impl_fill_each { + () => {}; + ($t:ty) => { + impl Fill for [$t] { + fn try_fill(&mut self, rng: &mut R) -> Result<(), Error> { + for elt in self.iter_mut() { + *elt = rng.gen(); + } + Ok(()) + } + } + }; + ($t:ty, $($tt:ty,)*) => { + impl_fill_each!($t); + impl_fill_each!($($tt,)*); + }; +} + +impl_fill_each!(bool, char, f32, f64,); + +impl Fill for [u8] { + fn try_fill(&mut self, rng: &mut R) -> Result<(), Error> { + rng.try_fill_bytes(self) + } +} + +macro_rules! impl_fill { + () => {}; + ($t:ty) => { + impl Fill for [$t] { + #[inline(never)] // in micro benchmarks, this improves performance + fn try_fill(&mut self, rng: &mut R) -> Result<(), Error> { + if self.len() > 0 { + rng.try_fill_bytes(unsafe { + slice::from_raw_parts_mut(self.as_mut_ptr() + as *mut u8, + self.len() * mem::size_of::<$t>() + ) + })?; + for x in self { + *x = x.to_le(); + } + } + Ok(()) + } + } + + impl Fill for [Wrapping<$t>] { + #[inline(never)] + fn try_fill(&mut self, rng: &mut R) -> Result<(), Error> { + if self.len() > 0 { + rng.try_fill_bytes(unsafe { + slice::from_raw_parts_mut(self.as_mut_ptr() + as *mut u8, + self.len() * mem::size_of::<$t>() + ) + })?; + for x in self { + *x = Wrapping(x.0.to_le()); + } + } + Ok(()) + } + } + }; + ($t:ty, $($tt:ty,)*) => { + impl_fill!($t); + // TODO: this could replace above impl once Rust #32463 is fixed + // impl_fill!(Wrapping<$t>); + impl_fill!($($tt,)*); + } +} + +impl_fill!(u16, u32, u64, usize, u128,); +impl_fill!(i8, i16, i32, i64, isize, i128,); + +#[cfg_attr(doc_cfg, doc(cfg(feature = "min_const_gen")))] +#[cfg(feature = "min_const_gen")] +impl Fill for [T; N] +where [T]: Fill +{ + fn try_fill(&mut self, rng: &mut R) -> Result<(), Error> { + self[..].try_fill(rng) + } +} + +#[cfg(not(feature = "min_const_gen"))] +macro_rules! impl_fill_arrays { + ($n:expr,) => {}; + ($n:expr, $N:ident) => { + impl Fill for [T; $n] where [T]: Fill { + fn try_fill(&mut self, rng: &mut R) -> Result<(), Error> { + self[..].try_fill(rng) + } + } + }; + ($n:expr, $N:ident, $($NN:ident,)*) => { + impl_fill_arrays!($n, $N); + impl_fill_arrays!($n - 1, $($NN,)*); + }; + (!div $n:expr,) => {}; + (!div $n:expr, $N:ident, $($NN:ident,)*) => { + impl_fill_arrays!($n, $N); + impl_fill_arrays!(!div $n / 2, $($NN,)*); + }; +} +#[cfg(not(feature = "min_const_gen"))] +#[rustfmt::skip] +impl_fill_arrays!(32, N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,); +#[cfg(not(feature = "min_const_gen"))] +impl_fill_arrays!(!div 4096, N,N,N,N,N,N,N,); + +#[cfg(test)] +mod test { + use super::*; + use crate::test::rng; + use crate::rngs::mock::StepRng; + #[cfg(feature = "alloc")] use alloc::boxed::Box; + + #[test] + fn test_fill_bytes_default() { + let mut r = StepRng::new(0x11_22_33_44_55_66_77_88, 0); + + // check every remainder mod 8, both in small and big vectors. + let lengths = [0, 1, 2, 3, 4, 5, 6, 7, 80, 81, 82, 83, 84, 85, 86, 87]; + for &n in lengths.iter() { + let mut buffer = [0u8; 87]; + let v = &mut buffer[0..n]; + r.fill_bytes(v); + + // use this to get nicer error messages. + for (i, &byte) in v.iter().enumerate() { + if byte == 0 { + panic!("byte {} of {} is zero", i, n) + } + } + } + } + + #[test] + fn test_fill() { + let x = 9041086907909331047; // a random u64 + let mut rng = StepRng::new(x, 0); + + // Convert to byte sequence and back to u64; byte-swap twice if BE. + let mut array = [0u64; 2]; + rng.fill(&mut array[..]); + assert_eq!(array, [x, x]); + assert_eq!(rng.next_u64(), x); + + // Convert to bytes then u32 in LE order + let mut array = [0u32; 2]; + rng.fill(&mut array[..]); + assert_eq!(array, [x as u32, (x >> 32) as u32]); + assert_eq!(rng.next_u32(), x as u32); + + // Check equivalence using wrapped arrays + let mut warray = [Wrapping(0u32); 2]; + rng.fill(&mut warray[..]); + assert_eq!(array[0], warray[0].0); + assert_eq!(array[1], warray[1].0); + + // Check equivalence for generated floats + let mut array = [0f32; 2]; + rng.fill(&mut array); + let gen: [f32; 2] = rng.gen(); + assert_eq!(array, gen); + } + + #[test] + fn test_fill_empty() { + let mut array = [0u32; 0]; + let mut rng = StepRng::new(0, 1); + rng.fill(&mut array); + rng.fill(&mut array[..]); + } + + #[test] + fn test_gen_range_int() { + let mut r = rng(101); + for _ in 0..1000 { + let a = r.gen_range(-4711..17); + assert!((-4711..17).contains(&a)); + let a: i8 = r.gen_range(-3..42); + assert!((-3..42).contains(&a)); + let a: u16 = r.gen_range(10..99); + assert!((10..99).contains(&a)); + let a: i32 = r.gen_range(-100..2000); + assert!((-100..2000).contains(&a)); + let a: u32 = r.gen_range(12..=24); + assert!((12..=24).contains(&a)); + + assert_eq!(r.gen_range(0u32..1), 0u32); + assert_eq!(r.gen_range(-12i64..-11), -12i64); + assert_eq!(r.gen_range(3_000_000..3_000_001), 3_000_000); + } + } + + #[test] + fn test_gen_range_float() { + let mut r = rng(101); + for _ in 0..1000 { + let a = r.gen_range(-4.5..1.7); + assert!((-4.5..1.7).contains(&a)); + let a = r.gen_range(-1.1..=-0.3); + assert!((-1.1..=-0.3).contains(&a)); + + assert_eq!(r.gen_range(0.0f32..=0.0), 0.); + assert_eq!(r.gen_range(-11.0..=-11.0), -11.); + assert_eq!(r.gen_range(3_000_000.0..=3_000_000.0), 3_000_000.); + } + } + + #[test] + #[should_panic] + fn test_gen_range_panic_int() { + #![allow(clippy::reversed_empty_ranges)] + let mut r = rng(102); + r.gen_range(5..-2); + } + + #[test] + #[should_panic] + fn test_gen_range_panic_usize() { + #![allow(clippy::reversed_empty_ranges)] + let mut r = rng(103); + r.gen_range(5..2); + } + + #[test] + fn test_gen_bool() { + #![allow(clippy::bool_assert_comparison)] + + let mut r = rng(105); + for _ in 0..5 { + assert_eq!(r.gen_bool(0.0), false); + assert_eq!(r.gen_bool(1.0), true); + } + } + + #[test] + fn test_rng_trait_object() { + use crate::distributions::{Distribution, Standard}; + let mut rng = rng(109); + let mut r = &mut rng as &mut dyn RngCore; + r.next_u32(); + r.gen::(); + assert_eq!(r.gen_range(0..1), 0); + let _c: u8 = Standard.sample(&mut r); + } + + #[test] + #[cfg(feature = "alloc")] + fn test_rng_boxed_trait() { + use crate::distributions::{Distribution, Standard}; + let rng = rng(110); + let mut r = Box::new(rng) as Box; + r.next_u32(); + r.gen::(); + assert_eq!(r.gen_range(0..1), 0); + let _c: u8 = Standard.sample(&mut r); + } + + #[test] + #[cfg_attr(miri, ignore)] // Miri is too slow + fn test_gen_ratio_average() { + const NUM: u32 = 3; + const DENOM: u32 = 10; + const N: u32 = 100_000; + + let mut sum: u32 = 0; + let mut rng = rng(111); + for _ in 0..N { + if rng.gen_ratio(NUM, DENOM) { + sum += 1; + } + } + // Have Binomial(N, NUM/DENOM) distribution + let expected = (NUM * N) / DENOM; // exact integer + assert!(((sum - expected) as i32).abs() < 500); + } +} diff --git a/src/rust/vendor/rand/src/rngs/adapter/mod.rs b/src/rust/vendor/rand/src/rngs/adapter/mod.rs new file mode 100644 index 000000000..bd1d29432 --- /dev/null +++ b/src/rust/vendor/rand/src/rngs/adapter/mod.rs @@ -0,0 +1,16 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Wrappers / adapters forming RNGs + +mod read; +mod reseeding; + +#[allow(deprecated)] +pub use self::read::{ReadError, ReadRng}; +pub use self::reseeding::ReseedingRng; diff --git a/src/rust/vendor/rand/src/rngs/adapter/read.rs b/src/rust/vendor/rand/src/rngs/adapter/read.rs new file mode 100644 index 000000000..25a9ca7fc --- /dev/null +++ b/src/rust/vendor/rand/src/rngs/adapter/read.rs @@ -0,0 +1,150 @@ +// Copyright 2018 Developers of the Rand project. +// Copyright 2013 The Rust Project Developers. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A wrapper around any Read to treat it as an RNG. + +#![allow(deprecated)] + +use std::fmt; +use std::io::Read; + +use rand_core::{impls, Error, RngCore}; + + +/// An RNG that reads random bytes straight from any type supporting +/// [`std::io::Read`], for example files. +/// +/// This will work best with an infinite reader, but that is not required. +/// +/// This can be used with `/dev/urandom` on Unix but it is recommended to use +/// [`OsRng`] instead. +/// +/// # Panics +/// +/// `ReadRng` uses [`std::io::Read::read_exact`], which retries on interrupts. +/// All other errors from the underlying reader, including when it does not +/// have enough data, will only be reported through [`try_fill_bytes`]. +/// The other [`RngCore`] methods will panic in case of an error. +/// +/// [`OsRng`]: crate::rngs::OsRng +/// [`try_fill_bytes`]: RngCore::try_fill_bytes +#[derive(Debug)] +#[deprecated(since="0.8.4", note="removal due to lack of usage")] +pub struct ReadRng { + reader: R, +} + +impl ReadRng { + /// Create a new `ReadRng` from a `Read`. + pub fn new(r: R) -> ReadRng { + ReadRng { reader: r } + } +} + +impl RngCore for ReadRng { + fn next_u32(&mut self) -> u32 { + impls::next_u32_via_fill(self) + } + + fn next_u64(&mut self) -> u64 { + impls::next_u64_via_fill(self) + } + + fn fill_bytes(&mut self, dest: &mut [u8]) { + self.try_fill_bytes(dest).unwrap_or_else(|err| { + panic!( + "reading random bytes from Read implementation failed; error: {}", + err + ) + }); + } + + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { + if dest.is_empty() { + return Ok(()); + } + // Use `std::io::read_exact`, which retries on `ErrorKind::Interrupted`. + self.reader + .read_exact(dest) + .map_err(|e| Error::new(ReadError(e))) + } +} + +/// `ReadRng` error type +#[derive(Debug)] +#[deprecated(since="0.8.4")] +pub struct ReadError(std::io::Error); + +impl fmt::Display for ReadError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "ReadError: {}", self.0) + } +} + +impl std::error::Error for ReadError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + Some(&self.0) + } +} + + +#[cfg(test)] +mod test { + use std::println; + + use super::ReadRng; + use crate::RngCore; + + #[test] + fn test_reader_rng_u64() { + // transmute from the target to avoid endianness concerns. + #[rustfmt::skip] + let v = [0u8, 0, 0, 0, 0, 0, 0, 1, + 0, 4, 0, 0, 3, 0, 0, 2, + 5, 0, 0, 0, 0, 0, 0, 0]; + let mut rng = ReadRng::new(&v[..]); + + assert_eq!(rng.next_u64(), 1 << 56); + assert_eq!(rng.next_u64(), (2 << 56) + (3 << 32) + (4 << 8)); + assert_eq!(rng.next_u64(), 5); + } + + #[test] + fn test_reader_rng_u32() { + let v = [0u8, 0, 0, 1, 0, 0, 2, 0, 3, 0, 0, 0]; + let mut rng = ReadRng::new(&v[..]); + + assert_eq!(rng.next_u32(), 1 << 24); + assert_eq!(rng.next_u32(), 2 << 16); + assert_eq!(rng.next_u32(), 3); + } + + #[test] + fn test_reader_rng_fill_bytes() { + let v = [1u8, 2, 3, 4, 5, 6, 7, 8]; + let mut w = [0u8; 8]; + + let mut rng = ReadRng::new(&v[..]); + rng.fill_bytes(&mut w); + + assert!(v == w); + } + + #[test] + fn test_reader_rng_insufficient_bytes() { + let v = [1u8, 2, 3, 4, 5, 6, 7, 8]; + let mut w = [0u8; 9]; + + let mut rng = ReadRng::new(&v[..]); + + let result = rng.try_fill_bytes(&mut w); + assert!(result.is_err()); + println!("Error: {}", result.unwrap_err()); + } +} diff --git a/src/rust/vendor/rand/src/rngs/adapter/reseeding.rs b/src/rust/vendor/rand/src/rngs/adapter/reseeding.rs new file mode 100644 index 000000000..ae3fcbb2f --- /dev/null +++ b/src/rust/vendor/rand/src/rngs/adapter/reseeding.rs @@ -0,0 +1,386 @@ +// Copyright 2018 Developers of the Rand project. +// Copyright 2013 The Rust Project Developers. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A wrapper around another PRNG that reseeds it after it +//! generates a certain number of random bytes. + +use core::mem::size_of; + +use rand_core::block::{BlockRng, BlockRngCore}; +use rand_core::{CryptoRng, Error, RngCore, SeedableRng}; + +/// A wrapper around any PRNG that implements [`BlockRngCore`], that adds the +/// ability to reseed it. +/// +/// `ReseedingRng` reseeds the underlying PRNG in the following cases: +/// +/// - On a manual call to [`reseed()`]. +/// - After `clone()`, the clone will be reseeded on first use. +/// - When a process is forked on UNIX, the RNGs in both the parent and child +/// processes will be reseeded just before the next call to +/// [`BlockRngCore::generate`], i.e. "soon". For ChaCha and Hc128 this is a +/// maximum of fifteen `u32` values before reseeding. +/// - After the PRNG has generated a configurable number of random bytes. +/// +/// # When should reseeding after a fixed number of generated bytes be used? +/// +/// Reseeding after a fixed number of generated bytes is never strictly +/// *necessary*. Cryptographic PRNGs don't have a limited number of bytes they +/// can output, or at least not a limit reachable in any practical way. There is +/// no such thing as 'running out of entropy'. +/// +/// Occasionally reseeding can be seen as some form of 'security in depth'. Even +/// if in the future a cryptographic weakness is found in the CSPRNG being used, +/// or a flaw in the implementation, occasionally reseeding should make +/// exploiting it much more difficult or even impossible. +/// +/// Use [`ReseedingRng::new`] with a `threshold` of `0` to disable reseeding +/// after a fixed number of generated bytes. +/// +/// # Limitations +/// +/// It is recommended that a `ReseedingRng` (including `ThreadRng`) not be used +/// from a fork handler. +/// Use `OsRng` or `getrandom`, or defer your use of the RNG until later. +/// +/// # Error handling +/// +/// Although unlikely, reseeding the wrapped PRNG can fail. `ReseedingRng` will +/// never panic but try to handle the error intelligently through some +/// combination of retrying and delaying reseeding until later. +/// If handling the source error fails `ReseedingRng` will continue generating +/// data from the wrapped PRNG without reseeding. +/// +/// Manually calling [`reseed()`] will not have this retry or delay logic, but +/// reports the error. +/// +/// # Example +/// +/// ``` +/// use rand::prelude::*; +/// use rand_chacha::ChaCha20Core; // Internal part of ChaChaRng that +/// // implements BlockRngCore +/// use rand::rngs::OsRng; +/// use rand::rngs::adapter::ReseedingRng; +/// +/// let prng = ChaCha20Core::from_entropy(); +/// let mut reseeding_rng = ReseedingRng::new(prng, 0, OsRng); +/// +/// println!("{}", reseeding_rng.gen::()); +/// +/// let mut cloned_rng = reseeding_rng.clone(); +/// assert!(reseeding_rng.gen::() != cloned_rng.gen::()); +/// ``` +/// +/// [`BlockRngCore`]: rand_core::block::BlockRngCore +/// [`ReseedingRng::new`]: ReseedingRng::new +/// [`reseed()`]: ReseedingRng::reseed +#[derive(Debug)] +pub struct ReseedingRng(BlockRng>) +where + R: BlockRngCore + SeedableRng, + Rsdr: RngCore; + +impl ReseedingRng +where + R: BlockRngCore + SeedableRng, + Rsdr: RngCore, +{ + /// Create a new `ReseedingRng` from an existing PRNG, combined with a RNG + /// to use as reseeder. + /// + /// `threshold` sets the number of generated bytes after which to reseed the + /// PRNG. Set it to zero to never reseed based on the number of generated + /// values. + pub fn new(rng: R, threshold: u64, reseeder: Rsdr) -> Self { + ReseedingRng(BlockRng::new(ReseedingCore::new(rng, threshold, reseeder))) + } + + /// Reseed the internal PRNG. + pub fn reseed(&mut self) -> Result<(), Error> { + self.0.core.reseed() + } +} + +// TODO: this should be implemented for any type where the inner type +// implements RngCore, but we can't specify that because ReseedingCore is private +impl RngCore for ReseedingRng +where + R: BlockRngCore + SeedableRng, + ::Results: AsRef<[u32]> + AsMut<[u32]>, +{ + #[inline(always)] + fn next_u32(&mut self) -> u32 { + self.0.next_u32() + } + + #[inline(always)] + fn next_u64(&mut self) -> u64 { + self.0.next_u64() + } + + fn fill_bytes(&mut self, dest: &mut [u8]) { + self.0.fill_bytes(dest) + } + + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { + self.0.try_fill_bytes(dest) + } +} + +impl Clone for ReseedingRng +where + R: BlockRngCore + SeedableRng + Clone, + Rsdr: RngCore + Clone, +{ + fn clone(&self) -> ReseedingRng { + // Recreating `BlockRng` seems easier than cloning it and resetting + // the index. + ReseedingRng(BlockRng::new(self.0.core.clone())) + } +} + +impl CryptoRng for ReseedingRng +where + R: BlockRngCore + SeedableRng + CryptoRng, + Rsdr: RngCore + CryptoRng, +{ +} + +#[derive(Debug)] +struct ReseedingCore { + inner: R, + reseeder: Rsdr, + threshold: i64, + bytes_until_reseed: i64, + fork_counter: usize, +} + +impl BlockRngCore for ReseedingCore +where + R: BlockRngCore + SeedableRng, + Rsdr: RngCore, +{ + type Item = ::Item; + type Results = ::Results; + + fn generate(&mut self, results: &mut Self::Results) { + let global_fork_counter = fork::get_fork_counter(); + if self.bytes_until_reseed <= 0 || self.is_forked(global_fork_counter) { + // We get better performance by not calling only `reseed` here + // and continuing with the rest of the function, but by directly + // returning from a non-inlined function. + return self.reseed_and_generate(results, global_fork_counter); + } + let num_bytes = results.as_ref().len() * size_of::(); + self.bytes_until_reseed -= num_bytes as i64; + self.inner.generate(results); + } +} + +impl ReseedingCore +where + R: BlockRngCore + SeedableRng, + Rsdr: RngCore, +{ + /// Create a new `ReseedingCore`. + fn new(rng: R, threshold: u64, reseeder: Rsdr) -> Self { + use ::core::i64::MAX; + fork::register_fork_handler(); + + // Because generating more values than `i64::MAX` takes centuries on + // current hardware, we just clamp to that value. + // Also we set a threshold of 0, which indicates no limit, to that + // value. + let threshold = if threshold == 0 { + MAX + } else if threshold <= MAX as u64 { + threshold as i64 + } else { + MAX + }; + + ReseedingCore { + inner: rng, + reseeder, + threshold: threshold as i64, + bytes_until_reseed: threshold as i64, + fork_counter: 0, + } + } + + /// Reseed the internal PRNG. + fn reseed(&mut self) -> Result<(), Error> { + R::from_rng(&mut self.reseeder).map(|result| { + self.bytes_until_reseed = self.threshold; + self.inner = result + }) + } + + fn is_forked(&self, global_fork_counter: usize) -> bool { + // In theory, on 32-bit platforms, it is possible for + // `global_fork_counter` to wrap around after ~4e9 forks. + // + // This check will detect a fork in the normal case where + // `fork_counter < global_fork_counter`, and also when the difference + // between both is greater than `isize::MAX` (wrapped around). + // + // It will still fail to detect a fork if there have been more than + // `isize::MAX` forks, without any reseed in between. Seems unlikely + // enough. + (self.fork_counter.wrapping_sub(global_fork_counter) as isize) < 0 + } + + #[inline(never)] + fn reseed_and_generate( + &mut self, results: &mut ::Results, global_fork_counter: usize, + ) { + #![allow(clippy::if_same_then_else)] // false positive + if self.is_forked(global_fork_counter) { + info!("Fork detected, reseeding RNG"); + } else { + trace!("Reseeding RNG (periodic reseed)"); + } + + let num_bytes = results.as_ref().len() * size_of::<::Item>(); + + if let Err(e) = self.reseed() { + warn!("Reseeding RNG failed: {}", e); + let _ = e; + } + self.fork_counter = global_fork_counter; + + self.bytes_until_reseed = self.threshold - num_bytes as i64; + self.inner.generate(results); + } +} + +impl Clone for ReseedingCore +where + R: BlockRngCore + SeedableRng + Clone, + Rsdr: RngCore + Clone, +{ + fn clone(&self) -> ReseedingCore { + ReseedingCore { + inner: self.inner.clone(), + reseeder: self.reseeder.clone(), + threshold: self.threshold, + bytes_until_reseed: 0, // reseed clone on first use + fork_counter: self.fork_counter, + } + } +} + +impl CryptoRng for ReseedingCore +where + R: BlockRngCore + SeedableRng + CryptoRng, + Rsdr: RngCore + CryptoRng, +{ +} + + +#[cfg(all(unix, not(target_os = "emscripten")))] +mod fork { + use core::sync::atomic::{AtomicUsize, Ordering}; + use std::sync::Once; + + // Fork protection + // + // We implement fork protection on Unix using `pthread_atfork`. + // When the process is forked, we increment `RESEEDING_RNG_FORK_COUNTER`. + // Every `ReseedingRng` stores the last known value of the static in + // `fork_counter`. If the cached `fork_counter` is less than + // `RESEEDING_RNG_FORK_COUNTER`, it is time to reseed this RNG. + // + // If reseeding fails, we don't deal with this by setting a delay, but just + // don't update `fork_counter`, so a reseed is attempted as soon as + // possible. + + static RESEEDING_RNG_FORK_COUNTER: AtomicUsize = AtomicUsize::new(0); + + pub fn get_fork_counter() -> usize { + RESEEDING_RNG_FORK_COUNTER.load(Ordering::Relaxed) + } + + extern "C" fn fork_handler() { + // Note: fetch_add is defined to wrap on overflow + // (which is what we want). + RESEEDING_RNG_FORK_COUNTER.fetch_add(1, Ordering::Relaxed); + } + + pub fn register_fork_handler() { + static REGISTER: Once = Once::new(); + REGISTER.call_once(|| { + // Bump the counter before and after forking (see #1169): + let ret = unsafe { libc::pthread_atfork( + Some(fork_handler), + Some(fork_handler), + Some(fork_handler), + ) }; + if ret != 0 { + panic!("libc::pthread_atfork failed with code {}", ret); + } + }); + } +} + +#[cfg(not(all(unix, not(target_os = "emscripten"))))] +mod fork { + pub fn get_fork_counter() -> usize { + 0 + } + pub fn register_fork_handler() {} +} + + +#[cfg(feature = "std_rng")] +#[cfg(test)] +mod test { + use super::ReseedingRng; + use crate::rngs::mock::StepRng; + use crate::rngs::std::Core; + use crate::{Rng, SeedableRng}; + + #[test] + fn test_reseeding() { + let mut zero = StepRng::new(0, 0); + let rng = Core::from_rng(&mut zero).unwrap(); + let thresh = 1; // reseed every time the buffer is exhausted + let mut reseeding = ReseedingRng::new(rng, thresh, zero); + + // RNG buffer size is [u32; 64] + // Debug is only implemented up to length 32 so use two arrays + let mut buf = ([0u32; 32], [0u32; 32]); + reseeding.fill(&mut buf.0); + reseeding.fill(&mut buf.1); + let seq = buf; + for _ in 0..10 { + reseeding.fill(&mut buf.0); + reseeding.fill(&mut buf.1); + assert_eq!(buf, seq); + } + } + + #[test] + fn test_clone_reseeding() { + #![allow(clippy::redundant_clone)] + + let mut zero = StepRng::new(0, 0); + let rng = Core::from_rng(&mut zero).unwrap(); + let mut rng1 = ReseedingRng::new(rng, 32 * 4, zero); + + let first: u32 = rng1.gen(); + for _ in 0..10 { + let _ = rng1.gen::(); + } + + let mut rng2 = rng1.clone(); + assert_eq!(first, rng2.gen::()); + } +} diff --git a/src/rust/vendor/rand/src/rngs/mock.rs b/src/rust/vendor/rand/src/rngs/mock.rs new file mode 100644 index 000000000..a1745a490 --- /dev/null +++ b/src/rust/vendor/rand/src/rngs/mock.rs @@ -0,0 +1,87 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Mock random number generator + +use rand_core::{impls, Error, RngCore}; + +#[cfg(feature = "serde1")] +use serde::{Serialize, Deserialize}; + +/// A simple implementation of `RngCore` for testing purposes. +/// +/// This generates an arithmetic sequence (i.e. adds a constant each step) +/// over a `u64` number, using wrapping arithmetic. If the increment is 0 +/// the generator yields a constant. +/// +/// ``` +/// use rand::Rng; +/// use rand::rngs::mock::StepRng; +/// +/// let mut my_rng = StepRng::new(2, 1); +/// let sample: [u64; 3] = my_rng.gen(); +/// assert_eq!(sample, [2, 3, 4]); +/// ``` +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +pub struct StepRng { + v: u64, + a: u64, +} + +impl StepRng { + /// Create a `StepRng`, yielding an arithmetic sequence starting with + /// `initial` and incremented by `increment` each time. + pub fn new(initial: u64, increment: u64) -> Self { + StepRng { + v: initial, + a: increment, + } + } +} + +impl RngCore for StepRng { + #[inline] + fn next_u32(&mut self) -> u32 { + self.next_u64() as u32 + } + + #[inline] + fn next_u64(&mut self) -> u64 { + let result = self.v; + self.v = self.v.wrapping_add(self.a); + result + } + + #[inline] + fn fill_bytes(&mut self, dest: &mut [u8]) { + impls::fill_bytes_via_next(self, dest); + } + + #[inline] + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { + self.fill_bytes(dest); + Ok(()) + } +} + +#[cfg(test)] +mod tests { + #[test] + #[cfg(feature = "serde1")] + fn test_serialization_step_rng() { + use super::StepRng; + + let some_rng = StepRng::new(42, 7); + let de_some_rng: StepRng = + bincode::deserialize(&bincode::serialize(&some_rng).unwrap()).unwrap(); + assert_eq!(some_rng.v, de_some_rng.v); + assert_eq!(some_rng.a, de_some_rng.a); + + } +} diff --git a/src/rust/vendor/rand/src/rngs/mod.rs b/src/rust/vendor/rand/src/rngs/mod.rs new file mode 100644 index 000000000..ac3c2c595 --- /dev/null +++ b/src/rust/vendor/rand/src/rngs/mod.rs @@ -0,0 +1,119 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Random number generators and adapters +//! +//! ## Background: Random number generators (RNGs) +//! +//! Computers cannot produce random numbers from nowhere. We classify +//! random number generators as follows: +//! +//! - "True" random number generators (TRNGs) use hard-to-predict data sources +//! (e.g. the high-resolution parts of event timings and sensor jitter) to +//! harvest random bit-sequences, apply algorithms to remove bias and +//! estimate available entropy, then combine these bits into a byte-sequence +//! or an entropy pool. This job is usually done by the operating system or +//! a hardware generator (HRNG). +//! - "Pseudo"-random number generators (PRNGs) use algorithms to transform a +//! seed into a sequence of pseudo-random numbers. These generators can be +//! fast and produce well-distributed unpredictable random numbers (or not). +//! They are usually deterministic: given algorithm and seed, the output +//! sequence can be reproduced. They have finite period and eventually loop; +//! with many algorithms this period is fixed and can be proven sufficiently +//! long, while others are chaotic and the period depends on the seed. +//! - "Cryptographically secure" pseudo-random number generators (CSPRNGs) +//! are the sub-set of PRNGs which are secure. Security of the generator +//! relies both on hiding the internal state and using a strong algorithm. +//! +//! ## Traits and functionality +//! +//! All RNGs implement the [`RngCore`] trait, as a consequence of which the +//! [`Rng`] extension trait is automatically implemented. Secure RNGs may +//! additionally implement the [`CryptoRng`] trait. +//! +//! All PRNGs require a seed to produce their random number sequence. The +//! [`SeedableRng`] trait provides three ways of constructing PRNGs: +//! +//! - `from_seed` accepts a type specific to the PRNG +//! - `from_rng` allows a PRNG to be seeded from any other RNG +//! - `seed_from_u64` allows any PRNG to be seeded from a `u64` insecurely +//! - `from_entropy` securely seeds a PRNG from fresh entropy +//! +//! Use the [`rand_core`] crate when implementing your own RNGs. +//! +//! ## Our generators +//! +//! This crate provides several random number generators: +//! +//! - [`OsRng`] is an interface to the operating system's random number +//! source. Typically the operating system uses a CSPRNG with entropy +//! provided by a TRNG and some type of on-going re-seeding. +//! - [`ThreadRng`], provided by the [`thread_rng`] function, is a handle to a +//! thread-local CSPRNG with periodic seeding from [`OsRng`]. Because this +//! is local, it is typically much faster than [`OsRng`]. It should be +//! secure, though the paranoid may prefer [`OsRng`]. +//! - [`StdRng`] is a CSPRNG chosen for good performance and trust of security +//! (based on reviews, maturity and usage). The current algorithm is ChaCha12, +//! which is well established and rigorously analysed. +//! [`StdRng`] provides the algorithm used by [`ThreadRng`] but without +//! periodic reseeding. +//! - [`SmallRng`] is an **insecure** PRNG designed to be fast, simple, require +//! little memory, and have good output quality. +//! +//! The algorithms selected for [`StdRng`] and [`SmallRng`] may change in any +//! release and may be platform-dependent, therefore they should be considered +//! **not reproducible**. +//! +//! ## Additional generators +//! +//! **TRNGs**: The [`rdrand`] crate provides an interface to the RDRAND and +//! RDSEED instructions available in modern Intel and AMD CPUs. +//! The [`rand_jitter`] crate provides a user-space implementation of +//! entropy harvesting from CPU timer jitter, but is very slow and has +//! [security issues](https://github.com/rust-random/rand/issues/699). +//! +//! **PRNGs**: Several companion crates are available, providing individual or +//! families of PRNG algorithms. These provide the implementations behind +//! [`StdRng`] and [`SmallRng`] but can also be used directly, indeed *should* +//! be used directly when **reproducibility** matters. +//! Some suggestions are: [`rand_chacha`], [`rand_pcg`], [`rand_xoshiro`]. +//! A full list can be found by searching for crates with the [`rng` tag]. +//! +//! [`Rng`]: crate::Rng +//! [`RngCore`]: crate::RngCore +//! [`CryptoRng`]: crate::CryptoRng +//! [`SeedableRng`]: crate::SeedableRng +//! [`thread_rng`]: crate::thread_rng +//! [`rdrand`]: https://crates.io/crates/rdrand +//! [`rand_jitter`]: https://crates.io/crates/rand_jitter +//! [`rand_chacha`]: https://crates.io/crates/rand_chacha +//! [`rand_pcg`]: https://crates.io/crates/rand_pcg +//! [`rand_xoshiro`]: https://crates.io/crates/rand_xoshiro +//! [`rng` tag]: https://crates.io/keywords/rng + +#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] +#[cfg(feature = "std")] pub mod adapter; + +pub mod mock; // Public so we don't export `StepRng` directly, making it a bit + // more clear it is intended for testing. + +#[cfg(all(feature = "small_rng", target_pointer_width = "64"))] +mod xoshiro256plusplus; +#[cfg(all(feature = "small_rng", not(target_pointer_width = "64")))] +mod xoshiro128plusplus; +#[cfg(feature = "small_rng")] mod small; + +#[cfg(feature = "std_rng")] mod std; +#[cfg(all(feature = "std", feature = "std_rng"))] pub(crate) mod thread; + +#[cfg(feature = "small_rng")] pub use self::small::SmallRng; +#[cfg(feature = "std_rng")] pub use self::std::StdRng; +#[cfg(all(feature = "std", feature = "std_rng"))] pub use self::thread::ThreadRng; + +#[cfg_attr(doc_cfg, doc(cfg(feature = "getrandom")))] +#[cfg(feature = "getrandom")] pub use rand_core::OsRng; diff --git a/src/rust/vendor/rand/src/rngs/small.rs b/src/rust/vendor/rand/src/rngs/small.rs new file mode 100644 index 000000000..fb0e0d119 --- /dev/null +++ b/src/rust/vendor/rand/src/rngs/small.rs @@ -0,0 +1,117 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A small fast RNG + +use rand_core::{Error, RngCore, SeedableRng}; + +#[cfg(target_pointer_width = "64")] +type Rng = super::xoshiro256plusplus::Xoshiro256PlusPlus; +#[cfg(not(target_pointer_width = "64"))] +type Rng = super::xoshiro128plusplus::Xoshiro128PlusPlus; + +/// A small-state, fast non-crypto PRNG +/// +/// `SmallRng` may be a good choice when a PRNG with small state, cheap +/// initialization, good statistical quality and good performance are required. +/// Note that depending on the application, [`StdRng`] may be faster on many +/// modern platforms while providing higher-quality randomness. Furthermore, +/// `SmallRng` is **not** a good choice when: +/// - Security against prediction is important. Use [`StdRng`] instead. +/// - Seeds with many zeros are provided. In such cases, it takes `SmallRng` +/// about 10 samples to produce 0 and 1 bits with equal probability. Either +/// provide seeds with an approximately equal number of 0 and 1 (for example +/// by using [`SeedableRng::from_entropy`] or [`SeedableRng::seed_from_u64`]), +/// or use [`StdRng`] instead. +/// +/// The algorithm is deterministic but should not be considered reproducible +/// due to dependence on platform and possible replacement in future +/// library versions. For a reproducible generator, use a named PRNG from an +/// external crate, e.g. [rand_xoshiro] or [rand_chacha]. +/// Refer also to [The Book](https://rust-random.github.io/book/guide-rngs.html). +/// +/// The PRNG algorithm in `SmallRng` is chosen to be efficient on the current +/// platform, without consideration for cryptography or security. The size of +/// its state is much smaller than [`StdRng`]. The current algorithm is +/// `Xoshiro256PlusPlus` on 64-bit platforms and `Xoshiro128PlusPlus` on 32-bit +/// platforms. Both are also implemented by the [rand_xoshiro] crate. +/// +/// # Examples +/// +/// Initializing `SmallRng` with a random seed can be done using [`SeedableRng::from_entropy`]: +/// +/// ``` +/// use rand::{Rng, SeedableRng}; +/// use rand::rngs::SmallRng; +/// +/// // Create small, cheap to initialize and fast RNG with a random seed. +/// // The randomness is supplied by the operating system. +/// let mut small_rng = SmallRng::from_entropy(); +/// # let v: u32 = small_rng.gen(); +/// ``` +/// +/// When initializing a lot of `SmallRng`'s, using [`thread_rng`] can be more +/// efficient: +/// +/// ``` +/// use rand::{SeedableRng, thread_rng}; +/// use rand::rngs::SmallRng; +/// +/// // Create a big, expensive to initialize and slower, but unpredictable RNG. +/// // This is cached and done only once per thread. +/// let mut thread_rng = thread_rng(); +/// // Create small, cheap to initialize and fast RNGs with random seeds. +/// // One can generally assume this won't fail. +/// let rngs: Vec = (0..10) +/// .map(|_| SmallRng::from_rng(&mut thread_rng).unwrap()) +/// .collect(); +/// ``` +/// +/// [`StdRng`]: crate::rngs::StdRng +/// [`thread_rng`]: crate::thread_rng +/// [rand_chacha]: https://crates.io/crates/rand_chacha +/// [rand_xoshiro]: https://crates.io/crates/rand_xoshiro +#[cfg_attr(doc_cfg, doc(cfg(feature = "small_rng")))] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct SmallRng(Rng); + +impl RngCore for SmallRng { + #[inline(always)] + fn next_u32(&mut self) -> u32 { + self.0.next_u32() + } + + #[inline(always)] + fn next_u64(&mut self) -> u64 { + self.0.next_u64() + } + + #[inline(always)] + fn fill_bytes(&mut self, dest: &mut [u8]) { + self.0.fill_bytes(dest); + } + + #[inline(always)] + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { + self.0.try_fill_bytes(dest) + } +} + +impl SeedableRng for SmallRng { + type Seed = ::Seed; + + #[inline(always)] + fn from_seed(seed: Self::Seed) -> Self { + SmallRng(Rng::from_seed(seed)) + } + + #[inline(always)] + fn from_rng(rng: R) -> Result { + Rng::from_rng(rng).map(SmallRng) + } +} diff --git a/src/rust/vendor/rand/src/rngs/std.rs b/src/rust/vendor/rand/src/rngs/std.rs new file mode 100644 index 000000000..cdae8fab0 --- /dev/null +++ b/src/rust/vendor/rand/src/rngs/std.rs @@ -0,0 +1,98 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The standard RNG + +use crate::{CryptoRng, Error, RngCore, SeedableRng}; + +pub(crate) use rand_chacha::ChaCha12Core as Core; + +use rand_chacha::ChaCha12Rng as Rng; + +/// The standard RNG. The PRNG algorithm in `StdRng` is chosen to be efficient +/// on the current platform, to be statistically strong and unpredictable +/// (meaning a cryptographically secure PRNG). +/// +/// The current algorithm used is the ChaCha block cipher with 12 rounds. Please +/// see this relevant [rand issue] for the discussion. This may change as new +/// evidence of cipher security and performance becomes available. +/// +/// The algorithm is deterministic but should not be considered reproducible +/// due to dependence on configuration and possible replacement in future +/// library versions. For a secure reproducible generator, we recommend use of +/// the [rand_chacha] crate directly. +/// +/// [rand_chacha]: https://crates.io/crates/rand_chacha +/// [rand issue]: https://github.com/rust-random/rand/issues/932 +#[cfg_attr(doc_cfg, doc(cfg(feature = "std_rng")))] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct StdRng(Rng); + +impl RngCore for StdRng { + #[inline(always)] + fn next_u32(&mut self) -> u32 { + self.0.next_u32() + } + + #[inline(always)] + fn next_u64(&mut self) -> u64 { + self.0.next_u64() + } + + #[inline(always)] + fn fill_bytes(&mut self, dest: &mut [u8]) { + self.0.fill_bytes(dest); + } + + #[inline(always)] + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { + self.0.try_fill_bytes(dest) + } +} + +impl SeedableRng for StdRng { + type Seed = ::Seed; + + #[inline(always)] + fn from_seed(seed: Self::Seed) -> Self { + StdRng(Rng::from_seed(seed)) + } + + #[inline(always)] + fn from_rng(rng: R) -> Result { + Rng::from_rng(rng).map(StdRng) + } +} + +impl CryptoRng for StdRng {} + + +#[cfg(test)] +mod test { + use crate::rngs::StdRng; + use crate::{RngCore, SeedableRng}; + + #[test] + fn test_stdrng_construction() { + // Test value-stability of StdRng. This is expected to break any time + // the algorithm is changed. + #[rustfmt::skip] + let seed = [1,0,0,0, 23,0,0,0, 200,1,0,0, 210,30,0,0, + 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0]; + + let target = [10719222850664546238, 14064965282130556830]; + + let mut rng0 = StdRng::from_seed(seed); + let x0 = rng0.next_u64(); + + let mut rng1 = StdRng::from_rng(rng0).unwrap(); + let x1 = rng1.next_u64(); + + assert_eq!([x0, x1], target); + } +} diff --git a/src/rust/vendor/rand/src/rngs/thread.rs b/src/rust/vendor/rand/src/rngs/thread.rs new file mode 100644 index 000000000..baebb1d99 --- /dev/null +++ b/src/rust/vendor/rand/src/rngs/thread.rs @@ -0,0 +1,143 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Thread-local random number generator + +use core::cell::UnsafeCell; +use std::rc::Rc; +use std::thread_local; + +use super::std::Core; +use crate::rngs::adapter::ReseedingRng; +use crate::rngs::OsRng; +use crate::{CryptoRng, Error, RngCore, SeedableRng}; + +// Rationale for using `UnsafeCell` in `ThreadRng`: +// +// Previously we used a `RefCell`, with an overhead of ~15%. There will only +// ever be one mutable reference to the interior of the `UnsafeCell`, because +// we only have such a reference inside `next_u32`, `next_u64`, etc. Within a +// single thread (which is the definition of `ThreadRng`), there will only ever +// be one of these methods active at a time. +// +// A possible scenario where there could be multiple mutable references is if +// `ThreadRng` is used inside `next_u32` and co. But the implementation is +// completely under our control. We just have to ensure none of them use +// `ThreadRng` internally, which is nonsensical anyway. We should also never run +// `ThreadRng` in destructors of its implementation, which is also nonsensical. + + +// Number of generated bytes after which to reseed `ThreadRng`. +// According to benchmarks, reseeding has a noticeable impact with thresholds +// of 32 kB and less. We choose 64 kB to avoid significant overhead. +const THREAD_RNG_RESEED_THRESHOLD: u64 = 1024 * 64; + +/// A reference to the thread-local generator +/// +/// An instance can be obtained via [`thread_rng`] or via `ThreadRng::default()`. +/// This handle is safe to use everywhere (including thread-local destructors), +/// though it is recommended not to use inside a fork handler. +/// The handle cannot be passed between threads (is not `Send` or `Sync`). +/// +/// `ThreadRng` uses the same PRNG as [`StdRng`] for security and performance +/// and is automatically seeded from [`OsRng`]. +/// +/// Unlike `StdRng`, `ThreadRng` uses the [`ReseedingRng`] wrapper to reseed +/// the PRNG from fresh entropy every 64 kiB of random data as well as after a +/// fork on Unix (though not quite immediately; see documentation of +/// [`ReseedingRng`]). +/// Note that the reseeding is done as an extra precaution against side-channel +/// attacks and mis-use (e.g. if somehow weak entropy were supplied initially). +/// The PRNG algorithms used are assumed to be secure. +/// +/// [`ReseedingRng`]: crate::rngs::adapter::ReseedingRng +/// [`StdRng`]: crate::rngs::StdRng +#[cfg_attr(doc_cfg, doc(cfg(all(feature = "std", feature = "std_rng"))))] +#[derive(Clone, Debug)] +pub struct ThreadRng { + // Rc is explicitly !Send and !Sync + rng: Rc>>, +} + +thread_local!( + // We require Rc<..> to avoid premature freeing when thread_rng is used + // within thread-local destructors. See #968. + static THREAD_RNG_KEY: Rc>> = { + let r = Core::from_rng(OsRng).unwrap_or_else(|err| + panic!("could not initialize thread_rng: {}", err)); + let rng = ReseedingRng::new(r, + THREAD_RNG_RESEED_THRESHOLD, + OsRng); + Rc::new(UnsafeCell::new(rng)) + } +); + +/// Retrieve the lazily-initialized thread-local random number generator, +/// seeded by the system. Intended to be used in method chaining style, +/// e.g. `thread_rng().gen::()`, or cached locally, e.g. +/// `let mut rng = thread_rng();`. Invoked by the `Default` trait, making +/// `ThreadRng::default()` equivalent. +/// +/// For more information see [`ThreadRng`]. +#[cfg_attr(doc_cfg, doc(cfg(all(feature = "std", feature = "std_rng"))))] +pub fn thread_rng() -> ThreadRng { + let rng = THREAD_RNG_KEY.with(|t| t.clone()); + ThreadRng { rng } +} + +impl Default for ThreadRng { + fn default() -> ThreadRng { + crate::prelude::thread_rng() + } +} + +impl RngCore for ThreadRng { + #[inline(always)] + fn next_u32(&mut self) -> u32 { + // SAFETY: We must make sure to stop using `rng` before anyone else + // creates another mutable reference + let rng = unsafe { &mut *self.rng.get() }; + rng.next_u32() + } + + #[inline(always)] + fn next_u64(&mut self) -> u64 { + // SAFETY: We must make sure to stop using `rng` before anyone else + // creates another mutable reference + let rng = unsafe { &mut *self.rng.get() }; + rng.next_u64() + } + + fn fill_bytes(&mut self, dest: &mut [u8]) { + // SAFETY: We must make sure to stop using `rng` before anyone else + // creates another mutable reference + let rng = unsafe { &mut *self.rng.get() }; + rng.fill_bytes(dest) + } + + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { + // SAFETY: We must make sure to stop using `rng` before anyone else + // creates another mutable reference + let rng = unsafe { &mut *self.rng.get() }; + rng.try_fill_bytes(dest) + } +} + +impl CryptoRng for ThreadRng {} + + +#[cfg(test)] +mod test { + #[test] + fn test_thread_rng() { + use crate::Rng; + let mut r = crate::thread_rng(); + r.gen::(); + assert_eq!(r.gen_range(0..1), 0); + } +} diff --git a/src/rust/vendor/rand/src/rngs/xoshiro128plusplus.rs b/src/rust/vendor/rand/src/rngs/xoshiro128plusplus.rs new file mode 100644 index 000000000..ece98fafd --- /dev/null +++ b/src/rust/vendor/rand/src/rngs/xoshiro128plusplus.rs @@ -0,0 +1,118 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[cfg(feature="serde1")] use serde::{Serialize, Deserialize}; +use rand_core::impls::{next_u64_via_u32, fill_bytes_via_next}; +use rand_core::le::read_u32_into; +use rand_core::{SeedableRng, RngCore, Error}; + +/// A xoshiro128++ random number generator. +/// +/// The xoshiro128++ algorithm is not suitable for cryptographic purposes, but +/// is very fast and has excellent statistical properties. +/// +/// The algorithm used here is translated from [the `xoshiro128plusplus.c` +/// reference source code](http://xoshiro.di.unimi.it/xoshiro128plusplus.c) by +/// David Blackman and Sebastiano Vigna. +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature="serde1", derive(Serialize, Deserialize))] +pub struct Xoshiro128PlusPlus { + s: [u32; 4], +} + +impl SeedableRng for Xoshiro128PlusPlus { + type Seed = [u8; 16]; + + /// Create a new `Xoshiro128PlusPlus`. If `seed` is entirely 0, it will be + /// mapped to a different seed. + #[inline] + fn from_seed(seed: [u8; 16]) -> Xoshiro128PlusPlus { + if seed.iter().all(|&x| x == 0) { + return Self::seed_from_u64(0); + } + let mut state = [0; 4]; + read_u32_into(&seed, &mut state); + Xoshiro128PlusPlus { s: state } + } + + /// Create a new `Xoshiro128PlusPlus` from a `u64` seed. + /// + /// This uses the SplitMix64 generator internally. + fn seed_from_u64(mut state: u64) -> Self { + const PHI: u64 = 0x9e3779b97f4a7c15; + let mut seed = Self::Seed::default(); + for chunk in seed.as_mut().chunks_mut(8) { + state = state.wrapping_add(PHI); + let mut z = state; + z = (z ^ (z >> 30)).wrapping_mul(0xbf58476d1ce4e5b9); + z = (z ^ (z >> 27)).wrapping_mul(0x94d049bb133111eb); + z = z ^ (z >> 31); + chunk.copy_from_slice(&z.to_le_bytes()); + } + Self::from_seed(seed) + } +} + +impl RngCore for Xoshiro128PlusPlus { + #[inline] + fn next_u32(&mut self) -> u32 { + let result_starstar = self.s[0] + .wrapping_add(self.s[3]) + .rotate_left(7) + .wrapping_add(self.s[0]); + + let t = self.s[1] << 9; + + self.s[2] ^= self.s[0]; + self.s[3] ^= self.s[1]; + self.s[1] ^= self.s[2]; + self.s[0] ^= self.s[3]; + + self.s[2] ^= t; + + self.s[3] = self.s[3].rotate_left(11); + + result_starstar + } + + #[inline] + fn next_u64(&mut self) -> u64 { + next_u64_via_u32(self) + } + + #[inline] + fn fill_bytes(&mut self, dest: &mut [u8]) { + fill_bytes_via_next(self, dest); + } + + #[inline] + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { + self.fill_bytes(dest); + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn reference() { + let mut rng = Xoshiro128PlusPlus::from_seed( + [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0]); + // These values were produced with the reference implementation: + // http://xoshiro.di.unimi.it/xoshiro128plusplus.c + let expected = [ + 641, 1573767, 3222811527, 3517856514, 836907274, 4247214768, + 3867114732, 1355841295, 495546011, 621204420, + ]; + for &e in &expected { + assert_eq!(rng.next_u32(), e); + } + } +} diff --git a/src/rust/vendor/rand/src/rngs/xoshiro256plusplus.rs b/src/rust/vendor/rand/src/rngs/xoshiro256plusplus.rs new file mode 100644 index 000000000..8ffb18b80 --- /dev/null +++ b/src/rust/vendor/rand/src/rngs/xoshiro256plusplus.rs @@ -0,0 +1,122 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[cfg(feature="serde1")] use serde::{Serialize, Deserialize}; +use rand_core::impls::fill_bytes_via_next; +use rand_core::le::read_u64_into; +use rand_core::{SeedableRng, RngCore, Error}; + +/// A xoshiro256++ random number generator. +/// +/// The xoshiro256++ algorithm is not suitable for cryptographic purposes, but +/// is very fast and has excellent statistical properties. +/// +/// The algorithm used here is translated from [the `xoshiro256plusplus.c` +/// reference source code](http://xoshiro.di.unimi.it/xoshiro256plusplus.c) by +/// David Blackman and Sebastiano Vigna. +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature="serde1", derive(Serialize, Deserialize))] +pub struct Xoshiro256PlusPlus { + s: [u64; 4], +} + +impl SeedableRng for Xoshiro256PlusPlus { + type Seed = [u8; 32]; + + /// Create a new `Xoshiro256PlusPlus`. If `seed` is entirely 0, it will be + /// mapped to a different seed. + #[inline] + fn from_seed(seed: [u8; 32]) -> Xoshiro256PlusPlus { + if seed.iter().all(|&x| x == 0) { + return Self::seed_from_u64(0); + } + let mut state = [0; 4]; + read_u64_into(&seed, &mut state); + Xoshiro256PlusPlus { s: state } + } + + /// Create a new `Xoshiro256PlusPlus` from a `u64` seed. + /// + /// This uses the SplitMix64 generator internally. + fn seed_from_u64(mut state: u64) -> Self { + const PHI: u64 = 0x9e3779b97f4a7c15; + let mut seed = Self::Seed::default(); + for chunk in seed.as_mut().chunks_mut(8) { + state = state.wrapping_add(PHI); + let mut z = state; + z = (z ^ (z >> 30)).wrapping_mul(0xbf58476d1ce4e5b9); + z = (z ^ (z >> 27)).wrapping_mul(0x94d049bb133111eb); + z = z ^ (z >> 31); + chunk.copy_from_slice(&z.to_le_bytes()); + } + Self::from_seed(seed) + } +} + +impl RngCore for Xoshiro256PlusPlus { + #[inline] + fn next_u32(&mut self) -> u32 { + // The lowest bits have some linear dependencies, so we use the + // upper bits instead. + (self.next_u64() >> 32) as u32 + } + + #[inline] + fn next_u64(&mut self) -> u64 { + let result_plusplus = self.s[0] + .wrapping_add(self.s[3]) + .rotate_left(23) + .wrapping_add(self.s[0]); + + let t = self.s[1] << 17; + + self.s[2] ^= self.s[0]; + self.s[3] ^= self.s[1]; + self.s[1] ^= self.s[2]; + self.s[0] ^= self.s[3]; + + self.s[2] ^= t; + + self.s[3] = self.s[3].rotate_left(45); + + result_plusplus + } + + #[inline] + fn fill_bytes(&mut self, dest: &mut [u8]) { + fill_bytes_via_next(self, dest); + } + + #[inline] + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { + self.fill_bytes(dest); + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn reference() { + let mut rng = Xoshiro256PlusPlus::from_seed( + [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0]); + // These values were produced with the reference implementation: + // http://xoshiro.di.unimi.it/xoshiro256plusplus.c + let expected = [ + 41943041, 58720359, 3588806011781223, 3591011842654386, + 9228616714210784205, 9973669472204895162, 14011001112246962877, + 12406186145184390807, 15849039046786891736, 10450023813501588000, + ]; + for &e in &expected { + assert_eq!(rng.next_u64(), e); + } + } +} diff --git a/src/rust/vendor/rand/src/seq/index.rs b/src/rust/vendor/rand/src/seq/index.rs new file mode 100644 index 000000000..b38e4649d --- /dev/null +++ b/src/rust/vendor/rand/src/seq/index.rs @@ -0,0 +1,678 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Low-level API for sampling indices + +#[cfg(feature = "alloc")] use core::slice; + +#[cfg(feature = "alloc")] use alloc::vec::{self, Vec}; +// BTreeMap is not as fast in tests, but better than nothing. +#[cfg(all(feature = "alloc", not(feature = "std")))] +use alloc::collections::BTreeSet; +#[cfg(feature = "std")] use std::collections::HashSet; + +#[cfg(feature = "std")] +use crate::distributions::WeightedError; + +#[cfg(feature = "alloc")] +use crate::{Rng, distributions::{uniform::SampleUniform, Distribution, Uniform}}; + +#[cfg(feature = "serde1")] +use serde::{Serialize, Deserialize}; + +/// A vector of indices. +/// +/// Multiple internal representations are possible. +#[derive(Clone, Debug)] +#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))] +pub enum IndexVec { + #[doc(hidden)] + U32(Vec), + #[doc(hidden)] + USize(Vec), +} + +impl IndexVec { + /// Returns the number of indices + #[inline] + pub fn len(&self) -> usize { + match *self { + IndexVec::U32(ref v) => v.len(), + IndexVec::USize(ref v) => v.len(), + } + } + + /// Returns `true` if the length is 0. + #[inline] + pub fn is_empty(&self) -> bool { + match *self { + IndexVec::U32(ref v) => v.is_empty(), + IndexVec::USize(ref v) => v.is_empty(), + } + } + + /// Return the value at the given `index`. + /// + /// (Note: we cannot implement [`std::ops::Index`] because of lifetime + /// restrictions.) + #[inline] + pub fn index(&self, index: usize) -> usize { + match *self { + IndexVec::U32(ref v) => v[index] as usize, + IndexVec::USize(ref v) => v[index], + } + } + + /// Return result as a `Vec`. Conversion may or may not be trivial. + #[inline] + pub fn into_vec(self) -> Vec { + match self { + IndexVec::U32(v) => v.into_iter().map(|i| i as usize).collect(), + IndexVec::USize(v) => v, + } + } + + /// Iterate over the indices as a sequence of `usize` values + #[inline] + pub fn iter(&self) -> IndexVecIter<'_> { + match *self { + IndexVec::U32(ref v) => IndexVecIter::U32(v.iter()), + IndexVec::USize(ref v) => IndexVecIter::USize(v.iter()), + } + } +} + +impl IntoIterator for IndexVec { + type Item = usize; + type IntoIter = IndexVecIntoIter; + + /// Convert into an iterator over the indices as a sequence of `usize` values + #[inline] + fn into_iter(self) -> IndexVecIntoIter { + match self { + IndexVec::U32(v) => IndexVecIntoIter::U32(v.into_iter()), + IndexVec::USize(v) => IndexVecIntoIter::USize(v.into_iter()), + } + } +} + +impl PartialEq for IndexVec { + fn eq(&self, other: &IndexVec) -> bool { + use self::IndexVec::*; + match (self, other) { + (&U32(ref v1), &U32(ref v2)) => v1 == v2, + (&USize(ref v1), &USize(ref v2)) => v1 == v2, + (&U32(ref v1), &USize(ref v2)) => { + (v1.len() == v2.len()) && (v1.iter().zip(v2.iter()).all(|(x, y)| *x as usize == *y)) + } + (&USize(ref v1), &U32(ref v2)) => { + (v1.len() == v2.len()) && (v1.iter().zip(v2.iter()).all(|(x, y)| *x == *y as usize)) + } + } + } +} + +impl From> for IndexVec { + #[inline] + fn from(v: Vec) -> Self { + IndexVec::U32(v) + } +} + +impl From> for IndexVec { + #[inline] + fn from(v: Vec) -> Self { + IndexVec::USize(v) + } +} + +/// Return type of `IndexVec::iter`. +#[derive(Debug)] +pub enum IndexVecIter<'a> { + #[doc(hidden)] + U32(slice::Iter<'a, u32>), + #[doc(hidden)] + USize(slice::Iter<'a, usize>), +} + +impl<'a> Iterator for IndexVecIter<'a> { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + use self::IndexVecIter::*; + match *self { + U32(ref mut iter) => iter.next().map(|i| *i as usize), + USize(ref mut iter) => iter.next().cloned(), + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + match *self { + IndexVecIter::U32(ref v) => v.size_hint(), + IndexVecIter::USize(ref v) => v.size_hint(), + } + } +} + +impl<'a> ExactSizeIterator for IndexVecIter<'a> {} + +/// Return type of `IndexVec::into_iter`. +#[derive(Clone, Debug)] +pub enum IndexVecIntoIter { + #[doc(hidden)] + U32(vec::IntoIter), + #[doc(hidden)] + USize(vec::IntoIter), +} + +impl Iterator for IndexVecIntoIter { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + use self::IndexVecIntoIter::*; + match *self { + U32(ref mut v) => v.next().map(|i| i as usize), + USize(ref mut v) => v.next(), + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + use self::IndexVecIntoIter::*; + match *self { + U32(ref v) => v.size_hint(), + USize(ref v) => v.size_hint(), + } + } +} + +impl ExactSizeIterator for IndexVecIntoIter {} + + +/// Randomly sample exactly `amount` distinct indices from `0..length`, and +/// return them in random order (fully shuffled). +/// +/// This method is used internally by the slice sampling methods, but it can +/// sometimes be useful to have the indices themselves so this is provided as +/// an alternative. +/// +/// The implementation used is not specified; we automatically select the +/// fastest available algorithm for the `length` and `amount` parameters +/// (based on detailed profiling on an Intel Haswell CPU). Roughly speaking, +/// complexity is `O(amount)`, except that when `amount` is small, performance +/// is closer to `O(amount^2)`, and when `length` is close to `amount` then +/// `O(length)`. +/// +/// Note that performance is significantly better over `u32` indices than over +/// `u64` indices. Because of this we hide the underlying type behind an +/// abstraction, `IndexVec`. +/// +/// If an allocation-free `no_std` function is required, it is suggested +/// to adapt the internal `sample_floyd` implementation. +/// +/// Panics if `amount > length`. +pub fn sample(rng: &mut R, length: usize, amount: usize) -> IndexVec +where R: Rng + ?Sized { + if amount > length { + panic!("`amount` of samples must be less than or equal to `length`"); + } + if length > (::core::u32::MAX as usize) { + // We never want to use inplace here, but could use floyd's alg + // Lazy version: always use the cache alg. + return sample_rejection(rng, length, amount); + } + let amount = amount as u32; + let length = length as u32; + + // Choice of algorithm here depends on both length and amount. See: + // https://github.com/rust-random/rand/pull/479 + // We do some calculations with f32. Accuracy is not very important. + + if amount < 163 { + const C: [[f32; 2]; 2] = [[1.6, 8.0 / 45.0], [10.0, 70.0 / 9.0]]; + let j = if length < 500_000 { 0 } else { 1 }; + let amount_fp = amount as f32; + let m4 = C[0][j] * amount_fp; + // Short-cut: when amount < 12, floyd's is always faster + if amount > 11 && (length as f32) < (C[1][j] + m4) * amount_fp { + sample_inplace(rng, length, amount) + } else { + sample_floyd(rng, length, amount) + } + } else { + const C: [f32; 2] = [270.0, 330.0 / 9.0]; + let j = if length < 500_000 { 0 } else { 1 }; + if (length as f32) < C[j] * (amount as f32) { + sample_inplace(rng, length, amount) + } else { + sample_rejection(rng, length, amount) + } + } +} + +/// Randomly sample exactly `amount` distinct indices from `0..length`, and +/// return them in an arbitrary order (there is no guarantee of shuffling or +/// ordering). The weights are to be provided by the input function `weights`, +/// which will be called once for each index. +/// +/// This method is used internally by the slice sampling methods, but it can +/// sometimes be useful to have the indices themselves so this is provided as +/// an alternative. +/// +/// This implementation uses `O(length + amount)` space and `O(length)` time +/// if the "nightly" feature is enabled, or `O(length)` space and +/// `O(length + amount * log length)` time otherwise. +/// +/// Panics if `amount > length`. +#[cfg(feature = "std")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] +pub fn sample_weighted( + rng: &mut R, length: usize, weight: F, amount: usize, +) -> Result +where + R: Rng + ?Sized, + F: Fn(usize) -> X, + X: Into, +{ + if length > (core::u32::MAX as usize) { + sample_efraimidis_spirakis(rng, length, weight, amount) + } else { + assert!(amount <= core::u32::MAX as usize); + let amount = amount as u32; + let length = length as u32; + sample_efraimidis_spirakis(rng, length, weight, amount) + } +} + + +/// Randomly sample exactly `amount` distinct indices from `0..length`, and +/// return them in an arbitrary order (there is no guarantee of shuffling or +/// ordering). The weights are to be provided by the input function `weights`, +/// which will be called once for each index. +/// +/// This implementation uses the algorithm described by Efraimidis and Spirakis +/// in this paper: https://doi.org/10.1016/j.ipl.2005.11.003 +/// It uses `O(length + amount)` space and `O(length)` time if the +/// "nightly" feature is enabled, or `O(length)` space and `O(length +/// + amount * log length)` time otherwise. +/// +/// Panics if `amount > length`. +#[cfg(feature = "std")] +fn sample_efraimidis_spirakis( + rng: &mut R, length: N, weight: F, amount: N, +) -> Result +where + R: Rng + ?Sized, + F: Fn(usize) -> X, + X: Into, + N: UInt, + IndexVec: From>, +{ + if amount == N::zero() { + return Ok(IndexVec::U32(Vec::new())); + } + + if amount > length { + panic!("`amount` of samples must be less than or equal to `length`"); + } + + struct Element { + index: N, + key: f64, + } + impl PartialOrd for Element { + fn partial_cmp(&self, other: &Self) -> Option { + self.key.partial_cmp(&other.key) + } + } + impl Ord for Element { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + // partial_cmp will always produce a value, + // because we check that the weights are not nan + self.partial_cmp(other).unwrap() + } + } + impl PartialEq for Element { + fn eq(&self, other: &Self) -> bool { + self.key == other.key + } + } + impl Eq for Element {} + + #[cfg(feature = "nightly")] + { + let mut candidates = Vec::with_capacity(length.as_usize()); + let mut index = N::zero(); + while index < length { + let weight = weight(index.as_usize()).into(); + if !(weight >= 0.) { + return Err(WeightedError::InvalidWeight); + } + + let key = rng.gen::().powf(1.0 / weight); + candidates.push(Element { index, key }); + + index += N::one(); + } + + // Partially sort the array to find the `amount` elements with the greatest + // keys. Do this by using `select_nth_unstable` to put the elements with + // the *smallest* keys at the beginning of the list in `O(n)` time, which + // provides equivalent information about the elements with the *greatest* keys. + let (_, mid, greater) + = candidates.select_nth_unstable(length.as_usize() - amount.as_usize()); + + let mut result: Vec = Vec::with_capacity(amount.as_usize()); + result.push(mid.index); + for element in greater { + result.push(element.index); + } + Ok(IndexVec::from(result)) + } + + #[cfg(not(feature = "nightly"))] + { + use alloc::collections::BinaryHeap; + + // Partially sort the array such that the `amount` elements with the largest + // keys are first using a binary max heap. + let mut candidates = BinaryHeap::with_capacity(length.as_usize()); + let mut index = N::zero(); + while index < length { + let weight = weight(index.as_usize()).into(); + if !(weight >= 0.) { + return Err(WeightedError::InvalidWeight); + } + + let key = rng.gen::().powf(1.0 / weight); + candidates.push(Element { index, key }); + + index += N::one(); + } + + let mut result: Vec = Vec::with_capacity(amount.as_usize()); + while result.len() < amount.as_usize() { + result.push(candidates.pop().unwrap().index); + } + Ok(IndexVec::from(result)) + } +} + +/// Randomly sample exactly `amount` indices from `0..length`, using Floyd's +/// combination algorithm. +/// +/// The output values are fully shuffled. (Overhead is under 50%.) +/// +/// This implementation uses `O(amount)` memory and `O(amount^2)` time. +fn sample_floyd(rng: &mut R, length: u32, amount: u32) -> IndexVec +where R: Rng + ?Sized { + // For small amount we use Floyd's fully-shuffled variant. For larger + // amounts this is slow due to Vec::insert performance, so we shuffle + // afterwards. Benchmarks show little overhead from extra logic. + let floyd_shuffle = amount < 50; + + debug_assert!(amount <= length); + let mut indices = Vec::with_capacity(amount as usize); + for j in length - amount..length { + let t = rng.gen_range(0..=j); + if floyd_shuffle { + if let Some(pos) = indices.iter().position(|&x| x == t) { + indices.insert(pos, j); + continue; + } + } else if indices.contains(&t) { + indices.push(j); + continue; + } + indices.push(t); + } + if !floyd_shuffle { + // Reimplement SliceRandom::shuffle with smaller indices + for i in (1..amount).rev() { + // invariant: elements with index > i have been locked in place. + indices.swap(i as usize, rng.gen_range(0..=i) as usize); + } + } + IndexVec::from(indices) +} + +/// Randomly sample exactly `amount` indices from `0..length`, using an inplace +/// partial Fisher-Yates method. +/// Sample an amount of indices using an inplace partial fisher yates method. +/// +/// This allocates the entire `length` of indices and randomizes only the first `amount`. +/// It then truncates to `amount` and returns. +/// +/// This method is not appropriate for large `length` and potentially uses a lot +/// of memory; because of this we only implement for `u32` index (which improves +/// performance in all cases). +/// +/// Set-up is `O(length)` time and memory and shuffling is `O(amount)` time. +fn sample_inplace(rng: &mut R, length: u32, amount: u32) -> IndexVec +where R: Rng + ?Sized { + debug_assert!(amount <= length); + let mut indices: Vec = Vec::with_capacity(length as usize); + indices.extend(0..length); + for i in 0..amount { + let j: u32 = rng.gen_range(i..length); + indices.swap(i as usize, j as usize); + } + indices.truncate(amount as usize); + debug_assert_eq!(indices.len(), amount as usize); + IndexVec::from(indices) +} + +trait UInt: Copy + PartialOrd + Ord + PartialEq + Eq + SampleUniform + + core::hash::Hash + core::ops::AddAssign { + fn zero() -> Self; + fn one() -> Self; + fn as_usize(self) -> usize; +} +impl UInt for u32 { + #[inline] + fn zero() -> Self { + 0 + } + + #[inline] + fn one() -> Self { + 1 + } + + #[inline] + fn as_usize(self) -> usize { + self as usize + } +} +impl UInt for usize { + #[inline] + fn zero() -> Self { + 0 + } + + #[inline] + fn one() -> Self { + 1 + } + + #[inline] + fn as_usize(self) -> usize { + self + } +} + +/// Randomly sample exactly `amount` indices from `0..length`, using rejection +/// sampling. +/// +/// Since `amount <<< length` there is a low chance of a random sample in +/// `0..length` being a duplicate. We test for duplicates and resample where +/// necessary. The algorithm is `O(amount)` time and memory. +/// +/// This function is generic over X primarily so that results are value-stable +/// over 32-bit and 64-bit platforms. +fn sample_rejection(rng: &mut R, length: X, amount: X) -> IndexVec +where + R: Rng + ?Sized, + IndexVec: From>, +{ + debug_assert!(amount < length); + #[cfg(feature = "std")] + let mut cache = HashSet::with_capacity(amount.as_usize()); + #[cfg(not(feature = "std"))] + let mut cache = BTreeSet::new(); + let distr = Uniform::new(X::zero(), length); + let mut indices = Vec::with_capacity(amount.as_usize()); + for _ in 0..amount.as_usize() { + let mut pos = distr.sample(rng); + while !cache.insert(pos) { + pos = distr.sample(rng); + } + indices.push(pos); + } + + debug_assert_eq!(indices.len(), amount.as_usize()); + IndexVec::from(indices) +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + #[cfg(feature = "serde1")] + fn test_serialization_index_vec() { + let some_index_vec = IndexVec::from(vec![254_usize, 234, 2, 1]); + let de_some_index_vec: IndexVec = bincode::deserialize(&bincode::serialize(&some_index_vec).unwrap()).unwrap(); + match (some_index_vec, de_some_index_vec) { + (IndexVec::U32(a), IndexVec::U32(b)) => { + assert_eq!(a, b); + }, + (IndexVec::USize(a), IndexVec::USize(b)) => { + assert_eq!(a, b); + }, + _ => {panic!("failed to seralize/deserialize `IndexVec`")} + } + } + + #[cfg(feature = "alloc")] use alloc::vec; + + #[test] + fn test_sample_boundaries() { + let mut r = crate::test::rng(404); + + assert_eq!(sample_inplace(&mut r, 0, 0).len(), 0); + assert_eq!(sample_inplace(&mut r, 1, 0).len(), 0); + assert_eq!(sample_inplace(&mut r, 1, 1).into_vec(), vec![0]); + + assert_eq!(sample_rejection(&mut r, 1u32, 0).len(), 0); + + assert_eq!(sample_floyd(&mut r, 0, 0).len(), 0); + assert_eq!(sample_floyd(&mut r, 1, 0).len(), 0); + assert_eq!(sample_floyd(&mut r, 1, 1).into_vec(), vec![0]); + + // These algorithms should be fast with big numbers. Test average. + let sum: usize = sample_rejection(&mut r, 1 << 25, 10u32).into_iter().sum(); + assert!(1 << 25 < sum && sum < (1 << 25) * 25); + + let sum: usize = sample_floyd(&mut r, 1 << 25, 10).into_iter().sum(); + assert!(1 << 25 < sum && sum < (1 << 25) * 25); + } + + #[test] + #[cfg_attr(miri, ignore)] // Miri is too slow + fn test_sample_alg() { + let seed_rng = crate::test::rng; + + // We can't test which algorithm is used directly, but Floyd's alg + // should produce different results from the others. (Also, `inplace` + // and `cached` currently use different sizes thus produce different results.) + + // A small length and relatively large amount should use inplace + let (length, amount): (usize, usize) = (100, 50); + let v1 = sample(&mut seed_rng(420), length, amount); + let v2 = sample_inplace(&mut seed_rng(420), length as u32, amount as u32); + assert!(v1.iter().all(|e| e < length)); + assert_eq!(v1, v2); + + // Test Floyd's alg does produce different results + let v3 = sample_floyd(&mut seed_rng(420), length as u32, amount as u32); + assert!(v1 != v3); + + // A large length and small amount should use Floyd + let (length, amount): (usize, usize) = (1 << 20, 50); + let v1 = sample(&mut seed_rng(421), length, amount); + let v2 = sample_floyd(&mut seed_rng(421), length as u32, amount as u32); + assert!(v1.iter().all(|e| e < length)); + assert_eq!(v1, v2); + + // A large length and larger amount should use cache + let (length, amount): (usize, usize) = (1 << 20, 600); + let v1 = sample(&mut seed_rng(422), length, amount); + let v2 = sample_rejection(&mut seed_rng(422), length as u32, amount as u32); + assert!(v1.iter().all(|e| e < length)); + assert_eq!(v1, v2); + } + + #[cfg(feature = "std")] + #[test] + fn test_sample_weighted() { + let seed_rng = crate::test::rng; + for &(amount, len) in &[(0, 10), (5, 10), (10, 10)] { + let v = sample_weighted(&mut seed_rng(423), len, |i| i as f64, amount).unwrap(); + match v { + IndexVec::U32(mut indices) => { + assert_eq!(indices.len(), amount); + indices.sort_unstable(); + indices.dedup(); + assert_eq!(indices.len(), amount); + for &i in &indices { + assert!((i as usize) < len); + } + }, + IndexVec::USize(_) => panic!("expected `IndexVec::U32`"), + } + } + } + + #[test] + fn value_stability_sample() { + let do_test = |length, amount, values: &[u32]| { + let mut buf = [0u32; 8]; + let mut rng = crate::test::rng(410); + + let res = sample(&mut rng, length, amount); + let len = res.len().min(buf.len()); + for (x, y) in res.into_iter().zip(buf.iter_mut()) { + *y = x as u32; + } + assert_eq!( + &buf[0..len], + values, + "failed sampling {}, {}", + length, + amount + ); + }; + + do_test(10, 6, &[8, 0, 3, 5, 9, 6]); // floyd + do_test(25, 10, &[18, 15, 14, 9, 0, 13, 5, 24]); // floyd + do_test(300, 8, &[30, 283, 150, 1, 73, 13, 285, 35]); // floyd + do_test(300, 80, &[31, 289, 248, 154, 5, 78, 19, 286]); // inplace + do_test(300, 180, &[31, 289, 248, 154, 5, 78, 19, 286]); // inplace + + do_test(1_000_000, 8, &[ + 103717, 963485, 826422, 509101, 736394, 807035, 5327, 632573, + ]); // floyd + do_test(1_000_000, 180, &[ + 103718, 963490, 826426, 509103, 736396, 807036, 5327, 632573, + ]); // rejection + } +} diff --git a/src/rust/vendor/rand/src/seq/mod.rs b/src/rust/vendor/rand/src/seq/mod.rs new file mode 100644 index 000000000..069e9e6b1 --- /dev/null +++ b/src/rust/vendor/rand/src/seq/mod.rs @@ -0,0 +1,1356 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Sequence-related functionality +//! +//! This module provides: +//! +//! * [`SliceRandom`] slice sampling and mutation +//! * [`IteratorRandom`] iterator sampling +//! * [`index::sample`] low-level API to choose multiple indices from +//! `0..length` +//! +//! Also see: +//! +//! * [`crate::distributions::WeightedIndex`] distribution which provides +//! weighted index sampling. +//! +//! In order to make results reproducible across 32-64 bit architectures, all +//! `usize` indices are sampled as a `u32` where possible (also providing a +//! small performance boost in some cases). + + +#[cfg(feature = "alloc")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] +pub mod index; + +#[cfg(feature = "alloc")] use core::ops::Index; + +#[cfg(feature = "alloc")] use alloc::vec::Vec; + +#[cfg(feature = "alloc")] +use crate::distributions::uniform::{SampleBorrow, SampleUniform}; +#[cfg(feature = "alloc")] use crate::distributions::WeightedError; +use crate::Rng; + +/// Extension trait on slices, providing random mutation and sampling methods. +/// +/// This trait is implemented on all `[T]` slice types, providing several +/// methods for choosing and shuffling elements. You must `use` this trait: +/// +/// ``` +/// use rand::seq::SliceRandom; +/// +/// let mut rng = rand::thread_rng(); +/// let mut bytes = "Hello, random!".to_string().into_bytes(); +/// bytes.shuffle(&mut rng); +/// let str = String::from_utf8(bytes).unwrap(); +/// println!("{}", str); +/// ``` +/// Example output (non-deterministic): +/// ```none +/// l,nmroHado !le +/// ``` +pub trait SliceRandom { + /// The element type. + type Item; + + /// Returns a reference to one random element of the slice, or `None` if the + /// slice is empty. + /// + /// For slices, complexity is `O(1)`. + /// + /// # Example + /// + /// ``` + /// use rand::thread_rng; + /// use rand::seq::SliceRandom; + /// + /// let choices = [1, 2, 4, 8, 16, 32]; + /// let mut rng = thread_rng(); + /// println!("{:?}", choices.choose(&mut rng)); + /// assert_eq!(choices[..0].choose(&mut rng), None); + /// ``` + fn choose(&self, rng: &mut R) -> Option<&Self::Item> + where R: Rng + ?Sized; + + /// Returns a mutable reference to one random element of the slice, or + /// `None` if the slice is empty. + /// + /// For slices, complexity is `O(1)`. + fn choose_mut(&mut self, rng: &mut R) -> Option<&mut Self::Item> + where R: Rng + ?Sized; + + /// Chooses `amount` elements from the slice at random, without repetition, + /// and in random order. The returned iterator is appropriate both for + /// collection into a `Vec` and filling an existing buffer (see example). + /// + /// In case this API is not sufficiently flexible, use [`index::sample`]. + /// + /// For slices, complexity is the same as [`index::sample`]. + /// + /// # Example + /// ``` + /// use rand::seq::SliceRandom; + /// + /// let mut rng = &mut rand::thread_rng(); + /// let sample = "Hello, audience!".as_bytes(); + /// + /// // collect the results into a vector: + /// let v: Vec = sample.choose_multiple(&mut rng, 3).cloned().collect(); + /// + /// // store in a buffer: + /// let mut buf = [0u8; 5]; + /// for (b, slot) in sample.choose_multiple(&mut rng, buf.len()).zip(buf.iter_mut()) { + /// *slot = *b; + /// } + /// ``` + #[cfg(feature = "alloc")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + fn choose_multiple(&self, rng: &mut R, amount: usize) -> SliceChooseIter + where R: Rng + ?Sized; + + /// Similar to [`choose`], but where the likelihood of each outcome may be + /// specified. + /// + /// The specified function `weight` maps each item `x` to a relative + /// likelihood `weight(x)`. The probability of each item being selected is + /// therefore `weight(x) / s`, where `s` is the sum of all `weight(x)`. + /// + /// For slices of length `n`, complexity is `O(n)`. + /// See also [`choose_weighted_mut`], [`distributions::weighted`]. + /// + /// # Example + /// + /// ``` + /// use rand::prelude::*; + /// + /// let choices = [('a', 2), ('b', 1), ('c', 1)]; + /// let mut rng = thread_rng(); + /// // 50% chance to print 'a', 25% chance to print 'b', 25% chance to print 'c' + /// println!("{:?}", choices.choose_weighted(&mut rng, |item| item.1).unwrap().0); + /// ``` + /// [`choose`]: SliceRandom::choose + /// [`choose_weighted_mut`]: SliceRandom::choose_weighted_mut + /// [`distributions::weighted`]: crate::distributions::weighted + #[cfg(feature = "alloc")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + fn choose_weighted( + &self, rng: &mut R, weight: F, + ) -> Result<&Self::Item, WeightedError> + where + R: Rng + ?Sized, + F: Fn(&Self::Item) -> B, + B: SampleBorrow, + X: SampleUniform + + for<'a> ::core::ops::AddAssign<&'a X> + + ::core::cmp::PartialOrd + + Clone + + Default; + + /// Similar to [`choose_mut`], but where the likelihood of each outcome may + /// be specified. + /// + /// The specified function `weight` maps each item `x` to a relative + /// likelihood `weight(x)`. The probability of each item being selected is + /// therefore `weight(x) / s`, where `s` is the sum of all `weight(x)`. + /// + /// For slices of length `n`, complexity is `O(n)`. + /// See also [`choose_weighted`], [`distributions::weighted`]. + /// + /// [`choose_mut`]: SliceRandom::choose_mut + /// [`choose_weighted`]: SliceRandom::choose_weighted + /// [`distributions::weighted`]: crate::distributions::weighted + #[cfg(feature = "alloc")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + fn choose_weighted_mut( + &mut self, rng: &mut R, weight: F, + ) -> Result<&mut Self::Item, WeightedError> + where + R: Rng + ?Sized, + F: Fn(&Self::Item) -> B, + B: SampleBorrow, + X: SampleUniform + + for<'a> ::core::ops::AddAssign<&'a X> + + ::core::cmp::PartialOrd + + Clone + + Default; + + /// Similar to [`choose_multiple`], but where the likelihood of each element's + /// inclusion in the output may be specified. The elements are returned in an + /// arbitrary, unspecified order. + /// + /// The specified function `weight` maps each item `x` to a relative + /// likelihood `weight(x)`. The probability of each item being selected is + /// therefore `weight(x) / s`, where `s` is the sum of all `weight(x)`. + /// + /// If all of the weights are equal, even if they are all zero, each element has + /// an equal likelihood of being selected. + /// + /// The complexity of this method depends on the feature `partition_at_index`. + /// If the feature is enabled, then for slices of length `n`, the complexity + /// is `O(n)` space and `O(n)` time. Otherwise, the complexity is `O(n)` space and + /// `O(n * log amount)` time. + /// + /// # Example + /// + /// ``` + /// use rand::prelude::*; + /// + /// let choices = [('a', 2), ('b', 1), ('c', 1)]; + /// let mut rng = thread_rng(); + /// // First Draw * Second Draw = total odds + /// // ----------------------- + /// // (50% * 50%) + (25% * 67%) = 41.7% chance that the output is `['a', 'b']` in some order. + /// // (50% * 50%) + (25% * 67%) = 41.7% chance that the output is `['a', 'c']` in some order. + /// // (25% * 33%) + (25% * 33%) = 16.6% chance that the output is `['b', 'c']` in some order. + /// println!("{:?}", choices.choose_multiple_weighted(&mut rng, 2, |item| item.1).unwrap().collect::>()); + /// ``` + /// [`choose_multiple`]: SliceRandom::choose_multiple + // + // Note: this is feature-gated on std due to usage of f64::powf. + // If necessary, we may use alloc+libm as an alternative (see PR #1089). + #[cfg(feature = "std")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] + fn choose_multiple_weighted( + &self, rng: &mut R, amount: usize, weight: F, + ) -> Result, WeightedError> + where + R: Rng + ?Sized, + F: Fn(&Self::Item) -> X, + X: Into; + + /// Shuffle a mutable slice in place. + /// + /// For slices of length `n`, complexity is `O(n)`. + /// + /// # Example + /// + /// ``` + /// use rand::seq::SliceRandom; + /// use rand::thread_rng; + /// + /// let mut rng = thread_rng(); + /// let mut y = [1, 2, 3, 4, 5]; + /// println!("Unshuffled: {:?}", y); + /// y.shuffle(&mut rng); + /// println!("Shuffled: {:?}", y); + /// ``` + fn shuffle(&mut self, rng: &mut R) + where R: Rng + ?Sized; + + /// Shuffle a slice in place, but exit early. + /// + /// Returns two mutable slices from the source slice. The first contains + /// `amount` elements randomly permuted. The second has the remaining + /// elements that are not fully shuffled. + /// + /// This is an efficient method to select `amount` elements at random from + /// the slice, provided the slice may be mutated. + /// + /// If you only need to choose elements randomly and `amount > self.len()/2` + /// then you may improve performance by taking + /// `amount = values.len() - amount` and using only the second slice. + /// + /// If `amount` is greater than the number of elements in the slice, this + /// will perform a full shuffle. + /// + /// For slices, complexity is `O(m)` where `m = amount`. + fn partial_shuffle( + &mut self, rng: &mut R, amount: usize, + ) -> (&mut [Self::Item], &mut [Self::Item]) + where R: Rng + ?Sized; +} + +/// Extension trait on iterators, providing random sampling methods. +/// +/// This trait is implemented on all iterators `I` where `I: Iterator + Sized` +/// and provides methods for +/// choosing one or more elements. You must `use` this trait: +/// +/// ``` +/// use rand::seq::IteratorRandom; +/// +/// let mut rng = rand::thread_rng(); +/// +/// let faces = "😀😎😐😕😠😢"; +/// println!("I am {}!", faces.chars().choose(&mut rng).unwrap()); +/// ``` +/// Example output (non-deterministic): +/// ```none +/// I am 😀! +/// ``` +pub trait IteratorRandom: Iterator + Sized { + /// Choose one element at random from the iterator. + /// + /// Returns `None` if and only if the iterator is empty. + /// + /// This method uses [`Iterator::size_hint`] for optimisation. With an + /// accurate hint and where [`Iterator::nth`] is a constant-time operation + /// this method can offer `O(1)` performance. Where no size hint is + /// available, complexity is `O(n)` where `n` is the iterator length. + /// Partial hints (where `lower > 0`) also improve performance. + /// + /// Note that the output values and the number of RNG samples used + /// depends on size hints. In particular, `Iterator` combinators that don't + /// change the values yielded but change the size hints may result in + /// `choose` returning different elements. If you want consistent results + /// and RNG usage consider using [`IteratorRandom::choose_stable`]. + fn choose(mut self, rng: &mut R) -> Option + where R: Rng + ?Sized { + let (mut lower, mut upper) = self.size_hint(); + let mut consumed = 0; + let mut result = None; + + // Handling for this condition outside the loop allows the optimizer to eliminate the loop + // when the Iterator is an ExactSizeIterator. This has a large performance impact on e.g. + // seq_iter_choose_from_1000. + if upper == Some(lower) { + return if lower == 0 { + None + } else { + self.nth(gen_index(rng, lower)) + }; + } + + // Continue until the iterator is exhausted + loop { + if lower > 1 { + let ix = gen_index(rng, lower + consumed); + let skip = if ix < lower { + result = self.nth(ix); + lower - (ix + 1) + } else { + lower + }; + if upper == Some(lower) { + return result; + } + consumed += lower; + if skip > 0 { + self.nth(skip - 1); + } + } else { + let elem = self.next(); + if elem.is_none() { + return result; + } + consumed += 1; + if gen_index(rng, consumed) == 0 { + result = elem; + } + } + + let hint = self.size_hint(); + lower = hint.0; + upper = hint.1; + } + } + + /// Choose one element at random from the iterator. + /// + /// Returns `None` if and only if the iterator is empty. + /// + /// This method is very similar to [`choose`] except that the result + /// only depends on the length of the iterator and the values produced by + /// `rng`. Notably for any iterator of a given length this will make the + /// same requests to `rng` and if the same sequence of values are produced + /// the same index will be selected from `self`. This may be useful if you + /// need consistent results no matter what type of iterator you are working + /// with. If you do not need this stability prefer [`choose`]. + /// + /// Note that this method still uses [`Iterator::size_hint`] to skip + /// constructing elements where possible, however the selection and `rng` + /// calls are the same in the face of this optimization. If you want to + /// force every element to be created regardless call `.inspect(|e| ())`. + /// + /// [`choose`]: IteratorRandom::choose + fn choose_stable(mut self, rng: &mut R) -> Option + where R: Rng + ?Sized { + let mut consumed = 0; + let mut result = None; + + loop { + // Currently the only way to skip elements is `nth()`. So we need to + // store what index to access next here. + // This should be replaced by `advance_by()` once it is stable: + // https://github.com/rust-lang/rust/issues/77404 + let mut next = 0; + + let (lower, _) = self.size_hint(); + if lower >= 2 { + let highest_selected = (0..lower) + .filter(|ix| gen_index(rng, consumed+ix+1) == 0) + .last(); + + consumed += lower; + next = lower; + + if let Some(ix) = highest_selected { + result = self.nth(ix); + next -= ix + 1; + debug_assert!(result.is_some(), "iterator shorter than size_hint().0"); + } + } + + let elem = self.nth(next); + if elem.is_none() { + return result + } + + if gen_index(rng, consumed+1) == 0 { + result = elem; + } + consumed += 1; + } + } + + /// Collects values at random from the iterator into a supplied buffer + /// until that buffer is filled. + /// + /// Although the elements are selected randomly, the order of elements in + /// the buffer is neither stable nor fully random. If random ordering is + /// desired, shuffle the result. + /// + /// Returns the number of elements added to the buffer. This equals the length + /// of the buffer unless the iterator contains insufficient elements, in which + /// case this equals the number of elements available. + /// + /// Complexity is `O(n)` where `n` is the length of the iterator. + /// For slices, prefer [`SliceRandom::choose_multiple`]. + fn choose_multiple_fill(mut self, rng: &mut R, buf: &mut [Self::Item]) -> usize + where R: Rng + ?Sized { + let amount = buf.len(); + let mut len = 0; + while len < amount { + if let Some(elem) = self.next() { + buf[len] = elem; + len += 1; + } else { + // Iterator exhausted; stop early + return len; + } + } + + // Continue, since the iterator was not exhausted + for (i, elem) in self.enumerate() { + let k = gen_index(rng, i + 1 + amount); + if let Some(slot) = buf.get_mut(k) { + *slot = elem; + } + } + len + } + + /// Collects `amount` values at random from the iterator into a vector. + /// + /// This is equivalent to `choose_multiple_fill` except for the result type. + /// + /// Although the elements are selected randomly, the order of elements in + /// the buffer is neither stable nor fully random. If random ordering is + /// desired, shuffle the result. + /// + /// The length of the returned vector equals `amount` unless the iterator + /// contains insufficient elements, in which case it equals the number of + /// elements available. + /// + /// Complexity is `O(n)` where `n` is the length of the iterator. + /// For slices, prefer [`SliceRandom::choose_multiple`]. + #[cfg(feature = "alloc")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + fn choose_multiple(mut self, rng: &mut R, amount: usize) -> Vec + where R: Rng + ?Sized { + let mut reservoir = Vec::with_capacity(amount); + reservoir.extend(self.by_ref().take(amount)); + + // Continue unless the iterator was exhausted + // + // note: this prevents iterators that "restart" from causing problems. + // If the iterator stops once, then so do we. + if reservoir.len() == amount { + for (i, elem) in self.enumerate() { + let k = gen_index(rng, i + 1 + amount); + if let Some(slot) = reservoir.get_mut(k) { + *slot = elem; + } + } + } else { + // Don't hang onto extra memory. There is a corner case where + // `amount` was much less than `self.len()`. + reservoir.shrink_to_fit(); + } + reservoir + } +} + + +impl SliceRandom for [T] { + type Item = T; + + fn choose(&self, rng: &mut R) -> Option<&Self::Item> + where R: Rng + ?Sized { + if self.is_empty() { + None + } else { + Some(&self[gen_index(rng, self.len())]) + } + } + + fn choose_mut(&mut self, rng: &mut R) -> Option<&mut Self::Item> + where R: Rng + ?Sized { + if self.is_empty() { + None + } else { + let len = self.len(); + Some(&mut self[gen_index(rng, len)]) + } + } + + #[cfg(feature = "alloc")] + fn choose_multiple(&self, rng: &mut R, amount: usize) -> SliceChooseIter + where R: Rng + ?Sized { + let amount = ::core::cmp::min(amount, self.len()); + SliceChooseIter { + slice: self, + _phantom: Default::default(), + indices: index::sample(rng, self.len(), amount).into_iter(), + } + } + + #[cfg(feature = "alloc")] + fn choose_weighted( + &self, rng: &mut R, weight: F, + ) -> Result<&Self::Item, WeightedError> + where + R: Rng + ?Sized, + F: Fn(&Self::Item) -> B, + B: SampleBorrow, + X: SampleUniform + + for<'a> ::core::ops::AddAssign<&'a X> + + ::core::cmp::PartialOrd + + Clone + + Default, + { + use crate::distributions::{Distribution, WeightedIndex}; + let distr = WeightedIndex::new(self.iter().map(weight))?; + Ok(&self[distr.sample(rng)]) + } + + #[cfg(feature = "alloc")] + fn choose_weighted_mut( + &mut self, rng: &mut R, weight: F, + ) -> Result<&mut Self::Item, WeightedError> + where + R: Rng + ?Sized, + F: Fn(&Self::Item) -> B, + B: SampleBorrow, + X: SampleUniform + + for<'a> ::core::ops::AddAssign<&'a X> + + ::core::cmp::PartialOrd + + Clone + + Default, + { + use crate::distributions::{Distribution, WeightedIndex}; + let distr = WeightedIndex::new(self.iter().map(weight))?; + Ok(&mut self[distr.sample(rng)]) + } + + #[cfg(feature = "std")] + fn choose_multiple_weighted( + &self, rng: &mut R, amount: usize, weight: F, + ) -> Result, WeightedError> + where + R: Rng + ?Sized, + F: Fn(&Self::Item) -> X, + X: Into, + { + let amount = ::core::cmp::min(amount, self.len()); + Ok(SliceChooseIter { + slice: self, + _phantom: Default::default(), + indices: index::sample_weighted( + rng, + self.len(), + |idx| weight(&self[idx]).into(), + amount, + )? + .into_iter(), + }) + } + + fn shuffle(&mut self, rng: &mut R) + where R: Rng + ?Sized { + for i in (1..self.len()).rev() { + // invariant: elements with index > i have been locked in place. + self.swap(i, gen_index(rng, i + 1)); + } + } + + fn partial_shuffle( + &mut self, rng: &mut R, amount: usize, + ) -> (&mut [Self::Item], &mut [Self::Item]) + where R: Rng + ?Sized { + // This applies Durstenfeld's algorithm for the + // [Fisher–Yates shuffle](https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle#The_modern_algorithm) + // for an unbiased permutation, but exits early after choosing `amount` + // elements. + + let len = self.len(); + let end = if amount >= len { 0 } else { len - amount }; + + for i in (end..len).rev() { + // invariant: elements with index > i have been locked in place. + self.swap(i, gen_index(rng, i + 1)); + } + let r = self.split_at_mut(end); + (r.1, r.0) + } +} + +impl IteratorRandom for I where I: Iterator + Sized {} + + +/// An iterator over multiple slice elements. +/// +/// This struct is created by +/// [`SliceRandom::choose_multiple`](trait.SliceRandom.html#tymethod.choose_multiple). +#[cfg(feature = "alloc")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] +#[derive(Debug)] +pub struct SliceChooseIter<'a, S: ?Sized + 'a, T: 'a> { + slice: &'a S, + _phantom: ::core::marker::PhantomData, + indices: index::IndexVecIntoIter, +} + +#[cfg(feature = "alloc")] +impl<'a, S: Index + ?Sized + 'a, T: 'a> Iterator for SliceChooseIter<'a, S, T> { + type Item = &'a T; + + fn next(&mut self) -> Option { + // TODO: investigate using SliceIndex::get_unchecked when stable + self.indices.next().map(|i| &self.slice[i as usize]) + } + + fn size_hint(&self) -> (usize, Option) { + (self.indices.len(), Some(self.indices.len())) + } +} + +#[cfg(feature = "alloc")] +impl<'a, S: Index + ?Sized + 'a, T: 'a> ExactSizeIterator + for SliceChooseIter<'a, S, T> +{ + fn len(&self) -> usize { + self.indices.len() + } +} + + +// Sample a number uniformly between 0 and `ubound`. Uses 32-bit sampling where +// possible, primarily in order to produce the same output on 32-bit and 64-bit +// platforms. +#[inline] +fn gen_index(rng: &mut R, ubound: usize) -> usize { + if ubound <= (core::u32::MAX as usize) { + rng.gen_range(0..ubound as u32) as usize + } else { + rng.gen_range(0..ubound) + } +} + + +#[cfg(test)] +mod test { + use super::*; + #[cfg(feature = "alloc")] use crate::Rng; + #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::vec::Vec; + + #[test] + fn test_slice_choose() { + let mut r = crate::test::rng(107); + let chars = [ + 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', + ]; + let mut chosen = [0i32; 14]; + // The below all use a binomial distribution with n=1000, p=1/14. + // binocdf(40, 1000, 1/14) ~= 2e-5; 1-binocdf(106, ..) ~= 2e-5 + for _ in 0..1000 { + let picked = *chars.choose(&mut r).unwrap(); + chosen[(picked as usize) - ('a' as usize)] += 1; + } + for count in chosen.iter() { + assert!(40 < *count && *count < 106); + } + + chosen.iter_mut().for_each(|x| *x = 0); + for _ in 0..1000 { + *chosen.choose_mut(&mut r).unwrap() += 1; + } + for count in chosen.iter() { + assert!(40 < *count && *count < 106); + } + + let mut v: [isize; 0] = []; + assert_eq!(v.choose(&mut r), None); + assert_eq!(v.choose_mut(&mut r), None); + } + + #[test] + fn value_stability_slice() { + let mut r = crate::test::rng(413); + let chars = [ + 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', + ]; + let mut nums = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; + + assert_eq!(chars.choose(&mut r), Some(&'l')); + assert_eq!(nums.choose_mut(&mut r), Some(&mut 10)); + + #[cfg(feature = "alloc")] + assert_eq!( + &chars + .choose_multiple(&mut r, 8) + .cloned() + .collect::>(), + &['d', 'm', 'b', 'n', 'c', 'k', 'h', 'e'] + ); + + #[cfg(feature = "alloc")] + assert_eq!(chars.choose_weighted(&mut r, |_| 1), Ok(&'f')); + #[cfg(feature = "alloc")] + assert_eq!(nums.choose_weighted_mut(&mut r, |_| 1), Ok(&mut 5)); + + let mut r = crate::test::rng(414); + nums.shuffle(&mut r); + assert_eq!(nums, [9, 5, 3, 10, 7, 12, 8, 11, 6, 4, 0, 2, 1]); + nums = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; + let res = nums.partial_shuffle(&mut r, 6); + assert_eq!(res.0, &mut [7, 4, 8, 6, 9, 3]); + assert_eq!(res.1, &mut [0, 1, 2, 12, 11, 5, 10]); + } + + #[derive(Clone)] + struct UnhintedIterator { + iter: I, + } + impl Iterator for UnhintedIterator { + type Item = I::Item; + + fn next(&mut self) -> Option { + self.iter.next() + } + } + + #[derive(Clone)] + struct ChunkHintedIterator { + iter: I, + chunk_remaining: usize, + chunk_size: usize, + hint_total_size: bool, + } + impl Iterator for ChunkHintedIterator { + type Item = I::Item; + + fn next(&mut self) -> Option { + if self.chunk_remaining == 0 { + self.chunk_remaining = ::core::cmp::min(self.chunk_size, self.iter.len()); + } + self.chunk_remaining = self.chunk_remaining.saturating_sub(1); + + self.iter.next() + } + + fn size_hint(&self) -> (usize, Option) { + ( + self.chunk_remaining, + if self.hint_total_size { + Some(self.iter.len()) + } else { + None + }, + ) + } + } + + #[derive(Clone)] + struct WindowHintedIterator { + iter: I, + window_size: usize, + hint_total_size: bool, + } + impl Iterator for WindowHintedIterator { + type Item = I::Item; + + fn next(&mut self) -> Option { + self.iter.next() + } + + fn size_hint(&self) -> (usize, Option) { + ( + ::core::cmp::min(self.iter.len(), self.window_size), + if self.hint_total_size { + Some(self.iter.len()) + } else { + None + }, + ) + } + } + + #[test] + #[cfg_attr(miri, ignore)] // Miri is too slow + fn test_iterator_choose() { + let r = &mut crate::test::rng(109); + fn test_iter + Clone>(r: &mut R, iter: Iter) { + let mut chosen = [0i32; 9]; + for _ in 0..1000 { + let picked = iter.clone().choose(r).unwrap(); + chosen[picked] += 1; + } + for count in chosen.iter() { + // Samples should follow Binomial(1000, 1/9) + // Octave: binopdf(x, 1000, 1/9) gives the prob of *count == x + // Note: have seen 153, which is unlikely but not impossible. + assert!( + 72 < *count && *count < 154, + "count not close to 1000/9: {}", + count + ); + } + } + + test_iter(r, 0..9); + test_iter(r, [0, 1, 2, 3, 4, 5, 6, 7, 8].iter().cloned()); + #[cfg(feature = "alloc")] + test_iter(r, (0..9).collect::>().into_iter()); + test_iter(r, UnhintedIterator { iter: 0..9 }); + test_iter(r, ChunkHintedIterator { + iter: 0..9, + chunk_size: 4, + chunk_remaining: 4, + hint_total_size: false, + }); + test_iter(r, ChunkHintedIterator { + iter: 0..9, + chunk_size: 4, + chunk_remaining: 4, + hint_total_size: true, + }); + test_iter(r, WindowHintedIterator { + iter: 0..9, + window_size: 2, + hint_total_size: false, + }); + test_iter(r, WindowHintedIterator { + iter: 0..9, + window_size: 2, + hint_total_size: true, + }); + + assert_eq!((0..0).choose(r), None); + assert_eq!(UnhintedIterator { iter: 0..0 }.choose(r), None); + } + + #[test] + #[cfg_attr(miri, ignore)] // Miri is too slow + fn test_iterator_choose_stable() { + let r = &mut crate::test::rng(109); + fn test_iter + Clone>(r: &mut R, iter: Iter) { + let mut chosen = [0i32; 9]; + for _ in 0..1000 { + let picked = iter.clone().choose_stable(r).unwrap(); + chosen[picked] += 1; + } + for count in chosen.iter() { + // Samples should follow Binomial(1000, 1/9) + // Octave: binopdf(x, 1000, 1/9) gives the prob of *count == x + // Note: have seen 153, which is unlikely but not impossible. + assert!( + 72 < *count && *count < 154, + "count not close to 1000/9: {}", + count + ); + } + } + + test_iter(r, 0..9); + test_iter(r, [0, 1, 2, 3, 4, 5, 6, 7, 8].iter().cloned()); + #[cfg(feature = "alloc")] + test_iter(r, (0..9).collect::>().into_iter()); + test_iter(r, UnhintedIterator { iter: 0..9 }); + test_iter(r, ChunkHintedIterator { + iter: 0..9, + chunk_size: 4, + chunk_remaining: 4, + hint_total_size: false, + }); + test_iter(r, ChunkHintedIterator { + iter: 0..9, + chunk_size: 4, + chunk_remaining: 4, + hint_total_size: true, + }); + test_iter(r, WindowHintedIterator { + iter: 0..9, + window_size: 2, + hint_total_size: false, + }); + test_iter(r, WindowHintedIterator { + iter: 0..9, + window_size: 2, + hint_total_size: true, + }); + + assert_eq!((0..0).choose(r), None); + assert_eq!(UnhintedIterator { iter: 0..0 }.choose(r), None); + } + + #[test] + #[cfg_attr(miri, ignore)] // Miri is too slow + fn test_iterator_choose_stable_stability() { + fn test_iter(iter: impl Iterator + Clone) -> [i32; 9] { + let r = &mut crate::test::rng(109); + let mut chosen = [0i32; 9]; + for _ in 0..1000 { + let picked = iter.clone().choose_stable(r).unwrap(); + chosen[picked] += 1; + } + chosen + } + + let reference = test_iter(0..9); + assert_eq!(test_iter([0, 1, 2, 3, 4, 5, 6, 7, 8].iter().cloned()), reference); + + #[cfg(feature = "alloc")] + assert_eq!(test_iter((0..9).collect::>().into_iter()), reference); + assert_eq!(test_iter(UnhintedIterator { iter: 0..9 }), reference); + assert_eq!(test_iter(ChunkHintedIterator { + iter: 0..9, + chunk_size: 4, + chunk_remaining: 4, + hint_total_size: false, + }), reference); + assert_eq!(test_iter(ChunkHintedIterator { + iter: 0..9, + chunk_size: 4, + chunk_remaining: 4, + hint_total_size: true, + }), reference); + assert_eq!(test_iter(WindowHintedIterator { + iter: 0..9, + window_size: 2, + hint_total_size: false, + }), reference); + assert_eq!(test_iter(WindowHintedIterator { + iter: 0..9, + window_size: 2, + hint_total_size: true, + }), reference); + } + + #[test] + #[cfg_attr(miri, ignore)] // Miri is too slow + fn test_shuffle() { + let mut r = crate::test::rng(108); + let empty: &mut [isize] = &mut []; + empty.shuffle(&mut r); + let mut one = [1]; + one.shuffle(&mut r); + let b: &[_] = &[1]; + assert_eq!(one, b); + + let mut two = [1, 2]; + two.shuffle(&mut r); + assert!(two == [1, 2] || two == [2, 1]); + + fn move_last(slice: &mut [usize], pos: usize) { + // use slice[pos..].rotate_left(1); once we can use that + let last_val = slice[pos]; + for i in pos..slice.len() - 1 { + slice[i] = slice[i + 1]; + } + *slice.last_mut().unwrap() = last_val; + } + let mut counts = [0i32; 24]; + for _ in 0..10000 { + let mut arr: [usize; 4] = [0, 1, 2, 3]; + arr.shuffle(&mut r); + let mut permutation = 0usize; + let mut pos_value = counts.len(); + for i in 0..4 { + pos_value /= 4 - i; + let pos = arr.iter().position(|&x| x == i).unwrap(); + assert!(pos < (4 - i)); + permutation += pos * pos_value; + move_last(&mut arr, pos); + assert_eq!(arr[3], i); + } + for (i, &a) in arr.iter().enumerate() { + assert_eq!(a, i); + } + counts[permutation] += 1; + } + for count in counts.iter() { + // Binomial(10000, 1/24) with average 416.667 + // Octave: binocdf(n, 10000, 1/24) + // 99.9% chance samples lie within this range: + assert!(352 <= *count && *count <= 483, "count: {}", count); + } + } + + #[test] + fn test_partial_shuffle() { + let mut r = crate::test::rng(118); + + let mut empty: [u32; 0] = []; + let res = empty.partial_shuffle(&mut r, 10); + assert_eq!((res.0.len(), res.1.len()), (0, 0)); + + let mut v = [1, 2, 3, 4, 5]; + let res = v.partial_shuffle(&mut r, 2); + assert_eq!((res.0.len(), res.1.len()), (2, 3)); + assert!(res.0[0] != res.0[1]); + // First elements are only modified if selected, so at least one isn't modified: + assert!(res.1[0] == 1 || res.1[1] == 2 || res.1[2] == 3); + } + + #[test] + #[cfg(feature = "alloc")] + fn test_sample_iter() { + let min_val = 1; + let max_val = 100; + + let mut r = crate::test::rng(401); + let vals = (min_val..max_val).collect::>(); + let small_sample = vals.iter().choose_multiple(&mut r, 5); + let large_sample = vals.iter().choose_multiple(&mut r, vals.len() + 5); + + assert_eq!(small_sample.len(), 5); + assert_eq!(large_sample.len(), vals.len()); + // no randomization happens when amount >= len + assert_eq!(large_sample, vals.iter().collect::>()); + + assert!(small_sample + .iter() + .all(|e| { **e >= min_val && **e <= max_val })); + } + + #[test] + #[cfg(feature = "alloc")] + #[cfg_attr(miri, ignore)] // Miri is too slow + fn test_weighted() { + let mut r = crate::test::rng(406); + const N_REPS: u32 = 3000; + let weights = [1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7]; + let total_weight = weights.iter().sum::() as f32; + + let verify = |result: [i32; 14]| { + for (i, count) in result.iter().enumerate() { + let exp = (weights[i] * N_REPS) as f32 / total_weight; + let mut err = (*count as f32 - exp).abs(); + if err != 0.0 { + err /= exp; + } + assert!(err <= 0.25); + } + }; + + // choose_weighted + fn get_weight(item: &(u32, T)) -> u32 { + item.0 + } + let mut chosen = [0i32; 14]; + let mut items = [(0u32, 0usize); 14]; // (weight, index) + for (i, item) in items.iter_mut().enumerate() { + *item = (weights[i], i); + } + for _ in 0..N_REPS { + let item = items.choose_weighted(&mut r, get_weight).unwrap(); + chosen[item.1] += 1; + } + verify(chosen); + + // choose_weighted_mut + let mut items = [(0u32, 0i32); 14]; // (weight, count) + for (i, item) in items.iter_mut().enumerate() { + *item = (weights[i], 0); + } + for _ in 0..N_REPS { + items.choose_weighted_mut(&mut r, get_weight).unwrap().1 += 1; + } + for (ch, item) in chosen.iter_mut().zip(items.iter()) { + *ch = item.1; + } + verify(chosen); + + // Check error cases + let empty_slice = &mut [10][0..0]; + assert_eq!( + empty_slice.choose_weighted(&mut r, |_| 1), + Err(WeightedError::NoItem) + ); + assert_eq!( + empty_slice.choose_weighted_mut(&mut r, |_| 1), + Err(WeightedError::NoItem) + ); + assert_eq!( + ['x'].choose_weighted_mut(&mut r, |_| 0), + Err(WeightedError::AllWeightsZero) + ); + assert_eq!( + [0, -1].choose_weighted_mut(&mut r, |x| *x), + Err(WeightedError::InvalidWeight) + ); + assert_eq!( + [-1, 0].choose_weighted_mut(&mut r, |x| *x), + Err(WeightedError::InvalidWeight) + ); + } + + #[test] + fn value_stability_choose() { + fn choose>(iter: I) -> Option { + let mut rng = crate::test::rng(411); + iter.choose(&mut rng) + } + + assert_eq!(choose([].iter().cloned()), None); + assert_eq!(choose(0..100), Some(33)); + assert_eq!(choose(UnhintedIterator { iter: 0..100 }), Some(40)); + assert_eq!( + choose(ChunkHintedIterator { + iter: 0..100, + chunk_size: 32, + chunk_remaining: 32, + hint_total_size: false, + }), + Some(39) + ); + assert_eq!( + choose(ChunkHintedIterator { + iter: 0..100, + chunk_size: 32, + chunk_remaining: 32, + hint_total_size: true, + }), + Some(39) + ); + assert_eq!( + choose(WindowHintedIterator { + iter: 0..100, + window_size: 32, + hint_total_size: false, + }), + Some(90) + ); + assert_eq!( + choose(WindowHintedIterator { + iter: 0..100, + window_size: 32, + hint_total_size: true, + }), + Some(90) + ); + } + + #[test] + fn value_stability_choose_stable() { + fn choose>(iter: I) -> Option { + let mut rng = crate::test::rng(411); + iter.choose_stable(&mut rng) + } + + assert_eq!(choose([].iter().cloned()), None); + assert_eq!(choose(0..100), Some(40)); + assert_eq!(choose(UnhintedIterator { iter: 0..100 }), Some(40)); + assert_eq!( + choose(ChunkHintedIterator { + iter: 0..100, + chunk_size: 32, + chunk_remaining: 32, + hint_total_size: false, + }), + Some(40) + ); + assert_eq!( + choose(ChunkHintedIterator { + iter: 0..100, + chunk_size: 32, + chunk_remaining: 32, + hint_total_size: true, + }), + Some(40) + ); + assert_eq!( + choose(WindowHintedIterator { + iter: 0..100, + window_size: 32, + hint_total_size: false, + }), + Some(40) + ); + assert_eq!( + choose(WindowHintedIterator { + iter: 0..100, + window_size: 32, + hint_total_size: true, + }), + Some(40) + ); + } + + #[test] + fn value_stability_choose_multiple() { + fn do_test>(iter: I, v: &[u32]) { + let mut rng = crate::test::rng(412); + let mut buf = [0u32; 8]; + assert_eq!(iter.choose_multiple_fill(&mut rng, &mut buf), v.len()); + assert_eq!(&buf[0..v.len()], v); + } + + do_test(0..4, &[0, 1, 2, 3]); + do_test(0..8, &[0, 1, 2, 3, 4, 5, 6, 7]); + do_test(0..100, &[58, 78, 80, 92, 43, 8, 96, 7]); + + #[cfg(feature = "alloc")] + { + fn do_test>(iter: I, v: &[u32]) { + let mut rng = crate::test::rng(412); + assert_eq!(iter.choose_multiple(&mut rng, v.len()), v); + } + + do_test(0..4, &[0, 1, 2, 3]); + do_test(0..8, &[0, 1, 2, 3, 4, 5, 6, 7]); + do_test(0..100, &[58, 78, 80, 92, 43, 8, 96, 7]); + } + } + + #[test] + #[cfg(feature = "std")] + fn test_multiple_weighted_edge_cases() { + use super::*; + + let mut rng = crate::test::rng(413); + + // Case 1: One of the weights is 0 + let choices = [('a', 2), ('b', 1), ('c', 0)]; + for _ in 0..100 { + let result = choices + .choose_multiple_weighted(&mut rng, 2, |item| item.1) + .unwrap() + .collect::>(); + + assert_eq!(result.len(), 2); + assert!(!result.iter().any(|val| val.0 == 'c')); + } + + // Case 2: All of the weights are 0 + let choices = [('a', 0), ('b', 0), ('c', 0)]; + + assert_eq!(choices + .choose_multiple_weighted(&mut rng, 2, |item| item.1) + .unwrap().count(), 2); + + // Case 3: Negative weights + let choices = [('a', -1), ('b', 1), ('c', 1)]; + assert_eq!( + choices + .choose_multiple_weighted(&mut rng, 2, |item| item.1) + .unwrap_err(), + WeightedError::InvalidWeight + ); + + // Case 4: Empty list + let choices = []; + assert_eq!(choices + .choose_multiple_weighted(&mut rng, 0, |_: &()| 0) + .unwrap().count(), 0); + + // Case 5: NaN weights + let choices = [('a', core::f64::NAN), ('b', 1.0), ('c', 1.0)]; + assert_eq!( + choices + .choose_multiple_weighted(&mut rng, 2, |item| item.1) + .unwrap_err(), + WeightedError::InvalidWeight + ); + + // Case 6: +infinity weights + let choices = [('a', core::f64::INFINITY), ('b', 1.0), ('c', 1.0)]; + for _ in 0..100 { + let result = choices + .choose_multiple_weighted(&mut rng, 2, |item| item.1) + .unwrap() + .collect::>(); + assert_eq!(result.len(), 2); + assert!(result.iter().any(|val| val.0 == 'a')); + } + + // Case 7: -infinity weights + let choices = [('a', core::f64::NEG_INFINITY), ('b', 1.0), ('c', 1.0)]; + assert_eq!( + choices + .choose_multiple_weighted(&mut rng, 2, |item| item.1) + .unwrap_err(), + WeightedError::InvalidWeight + ); + + // Case 8: -0 weights + let choices = [('a', -0.0), ('b', 1.0), ('c', 1.0)]; + assert!(choices + .choose_multiple_weighted(&mut rng, 2, |item| item.1) + .is_ok()); + } + + #[test] + #[cfg(feature = "std")] + fn test_multiple_weighted_distributions() { + use super::*; + + // The theoretical probabilities of the different outcomes are: + // AB: 0.5 * 0.5 = 0.250 + // AC: 0.5 * 0.5 = 0.250 + // BA: 0.25 * 0.67 = 0.167 + // BC: 0.25 * 0.33 = 0.082 + // CA: 0.25 * 0.67 = 0.167 + // CB: 0.25 * 0.33 = 0.082 + let choices = [('a', 2), ('b', 1), ('c', 1)]; + let mut rng = crate::test::rng(414); + + let mut results = [0i32; 3]; + let expected_results = [4167, 4167, 1666]; + for _ in 0..10000 { + let result = choices + .choose_multiple_weighted(&mut rng, 2, |item| item.1) + .unwrap() + .collect::>(); + + assert_eq!(result.len(), 2); + + match (result[0].0, result[1].0) { + ('a', 'b') | ('b', 'a') => { + results[0] += 1; + } + ('a', 'c') | ('c', 'a') => { + results[1] += 1; + } + ('b', 'c') | ('c', 'b') => { + results[2] += 1; + } + (_, _) => panic!("unexpected result"), + } + } + + let mut diffs = results + .iter() + .zip(&expected_results) + .map(|(a, b)| (a - b).abs()); + assert!(!diffs.any(|deviation| deviation > 100)); + } +} diff --git a/src/rust/vendor/rand_chacha/.cargo-checksum.json b/src/rust/vendor/rand_chacha/.cargo-checksum.json new file mode 100644 index 000000000..a7100e9bc --- /dev/null +++ b/src/rust/vendor/rand_chacha/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"deb20cd6e8be14e767b7fdea0e503ddd8226afd1253a5221aacc28a23d45db20","COPYRIGHT":"90eb64f0279b0d9432accfa6023ff803bc4965212383697eee27a0f426d5f8d5","Cargo.toml":"e2ef45f3c9d6f013da266b76ca1e1f664ad5fa1d526b46580a77fb311c659fd8","LICENSE-APACHE":"aaff376532ea30a0cd5330b9502ad4a4c8bf769c539c87ffe78819d188a18ebf","LICENSE-MIT":"209fbbe0ad52d9235e37badf9cadfe4dbdc87203179c0899e738b39ade42177b","README.md":"f4221f35b7086649fa77807e826af020b57eb65b19cb693482d4a7e1e4d80537","src/chacha.rs":"dfd79ed4762e8267148d1776381c71b898808014a4069cfafbc78177247d5fe9","src/guts.rs":"898fd129897fb44d15053044227307ee2bf416970adb8e63b4f5eabb7431aa1e","src/lib.rs":"a27fe2bff676a764d43d604a20cf30a41dc1c5ef4053eb41129d2479f5ae83fe"},"package":"e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"} \ No newline at end of file diff --git a/src/rust/vendor/rand_chacha/CHANGELOG.md b/src/rust/vendor/rand_chacha/CHANGELOG.md new file mode 100644 index 000000000..a598bb7ee --- /dev/null +++ b/src/rust/vendor/rand_chacha/CHANGELOG.md @@ -0,0 +1,35 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [0.3.1] - 2021-06-09 +- add getters corresponding to existing setters: `get_seed`, `get_stream` (#1124) +- add serde support, gated by the `serde1` feature (#1124) +- ensure expected layout via `repr(transparent)` (#1120) + +## [0.3.0] - 2020-12-08 +- Bump `rand_core` version to 0.6.0 +- Bump MSRV to 1.36 (#1011) +- Remove usage of deprecated feature "simd" of `ppv-lite86` (#979), then revert + this change (#1023) since SIMD is only enabled by default from `ppv-lite86 v0.2.10` +- impl PartialEq+Eq for ChaChaXRng and ChaChaXCore (#979) +- Fix panic on block counter wrap that was occurring in debug builds (#980) + +## [0.2.2] - 2020-03-09 +- Integrate `c2-chacha`, reducing dependency count (#931) +- Add CryptoRng to ChaChaXCore (#944) + +## [0.2.1] - 2019-07-22 +- Force enable the `simd` feature of `c2-chacha` (#845) + +## [0.2.0] - 2019-06-06 +- Rewrite based on the much faster `c2-chacha` crate (#789) + +## [0.1.1] - 2019-01-04 +- Disable `i128` and `u128` if the `target_os` is `emscripten` (#671: work-around Emscripten limitation) +- Update readme and doc links + +## [0.1.0] - 2018-10-17 +- Pulled out of the Rand crate diff --git a/src/rust/vendor/rand_chacha/COPYRIGHT b/src/rust/vendor/rand_chacha/COPYRIGHT new file mode 100644 index 000000000..468d907ca --- /dev/null +++ b/src/rust/vendor/rand_chacha/COPYRIGHT @@ -0,0 +1,12 @@ +Copyrights in the Rand project are retained by their contributors. No +copyright assignment is required to contribute to the Rand project. + +For full authorship information, see the version control history. + +Except as otherwise noted (below and/or in individual files), Rand is +licensed under the Apache License, Version 2.0 or + or the MIT license + or , at your option. + +The Rand project includes code from the Rust project +published under these same licenses. diff --git a/src/rust/vendor/rand_chacha/Cargo.toml b/src/rust/vendor/rand_chacha/Cargo.toml new file mode 100644 index 000000000..c907ae974 --- /dev/null +++ b/src/rust/vendor/rand_chacha/Cargo.toml @@ -0,0 +1,45 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "rand_chacha" +version = "0.3.1" +authors = ["The Rand Project Developers", "The Rust Project Developers", "The CryptoCorrosion Contributors"] +description = "ChaCha random number generator\n" +homepage = "https://rust-random.github.io/book" +documentation = "https://docs.rs/rand_chacha" +readme = "README.md" +keywords = ["random", "rng", "chacha"] +categories = ["algorithms", "no-std"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-random/rand" +[dependencies.ppv-lite86] +version = "0.2.8" +features = ["simd"] +default-features = false + +[dependencies.rand_core] +version = "0.6.0" + +[dependencies.serde] +version = "1.0" +features = ["derive"] +optional = true +[dev-dependencies.serde_json] +version = "1.0" + +[features] +default = ["std"] +serde1 = ["serde"] +simd = [] +std = ["ppv-lite86/std"] diff --git a/src/rust/vendor/rand_chacha/LICENSE-APACHE b/src/rust/vendor/rand_chacha/LICENSE-APACHE new file mode 100644 index 000000000..17d74680f --- /dev/null +++ b/src/rust/vendor/rand_chacha/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/src/rust/vendor/rand_chacha/LICENSE-MIT b/src/rust/vendor/rand_chacha/LICENSE-MIT new file mode 100644 index 000000000..d93b5baf3 --- /dev/null +++ b/src/rust/vendor/rand_chacha/LICENSE-MIT @@ -0,0 +1,26 @@ +Copyright 2018 Developers of the Rand project +Copyright (c) 2014 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/src/rust/vendor/rand_chacha/README.md b/src/rust/vendor/rand_chacha/README.md new file mode 100644 index 000000000..edd754d79 --- /dev/null +++ b/src/rust/vendor/rand_chacha/README.md @@ -0,0 +1,48 @@ +# rand_chacha + +[![Test Status](https://github.com/rust-random/rand/workflows/Tests/badge.svg?event=push)](https://github.com/rust-random/rand/actions) +[![Latest version](https://img.shields.io/crates/v/rand_chacha.svg)](https://crates.io/crates/rand_chacha) +[![Book](https://img.shields.io/badge/book-master-yellow.svg)](https://rust-random.github.io/book/) +[![API](https://img.shields.io/badge/api-master-yellow.svg)](https://rust-random.github.io/rand/rand_chacha) +[![API](https://docs.rs/rand_chacha/badge.svg)](https://docs.rs/rand_chacha) +[![Minimum rustc version](https://img.shields.io/badge/rustc-1.36+-lightgray.svg)](https://github.com/rust-random/rand#rust-version-requirements) + +A cryptographically secure random number generator that uses the ChaCha +algorithm. + +ChaCha is a stream cipher designed by Daniel J. Bernstein[^1], that we use +as an RNG. It is an improved variant of the Salsa20 cipher family, which was +selected as one of the "stream ciphers suitable for widespread adoption" by +eSTREAM[^2]. + +The RNGs provided by this crate are implemented via the fast stream ciphers of +the [`c2-chacha`](https://crates.io/crates/c2-chacha) crate. + +Links: + +- [API documentation (master)](https://rust-random.github.io/rand/rand_chacha) +- [API documentation (docs.rs)](https://docs.rs/rand_chacha) +- [Changelog](https://github.com/rust-random/rand/blob/master/rand_chacha/CHANGELOG.md) + +[rand]: https://crates.io/crates/rand +[^1]: D. J. Bernstein, [*ChaCha, a variant of Salsa20*]( + https://cr.yp.to/chacha.html) + +[^2]: [eSTREAM: the ECRYPT Stream Cipher Project]( + http://www.ecrypt.eu.org/stream/) + + +## Crate Features + +`rand_chacha` is `no_std` compatible when disabling default features; the `std` +feature can be explicitly required to re-enable `std` support. Using `std` +allows detection of CPU features and thus better optimisation. + + +# License + +`rand_chacha` is distributed under the terms of both the MIT license and the +Apache License (Version 2.0). + +See [LICENSE-APACHE](LICENSE-APACHE) and [LICENSE-MIT](LICENSE-MIT), and +[COPYRIGHT](COPYRIGHT) for details. diff --git a/src/rust/vendor/rand_chacha/src/chacha.rs b/src/rust/vendor/rand_chacha/src/chacha.rs new file mode 100644 index 000000000..50da81bfa --- /dev/null +++ b/src/rust/vendor/rand_chacha/src/chacha.rs @@ -0,0 +1,632 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The ChaCha random number generator. + +#[cfg(not(feature = "std"))] use core; +#[cfg(feature = "std")] use std as core; + +use self::core::fmt; +use crate::guts::ChaCha; +use rand_core::block::{BlockRng, BlockRngCore}; +use rand_core::{CryptoRng, Error, RngCore, SeedableRng}; + +#[cfg(feature = "serde1")] use serde::{Serialize, Deserialize, Serializer, Deserializer}; + +// NB. this must remain consistent with some currently hard-coded numbers in this module +const BUF_BLOCKS: u8 = 4; +// number of 32-bit words per ChaCha block (fixed by algorithm definition) +const BLOCK_WORDS: u8 = 16; + +#[repr(transparent)] +pub struct Array64([T; 64]); +impl Default for Array64 +where T: Default +{ + #[rustfmt::skip] + fn default() -> Self { + Self([ + T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), + T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), + T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), + T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), + T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), + T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), + T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), + T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), T::default(), + ]) + } +} +impl AsRef<[T]> for Array64 { + fn as_ref(&self) -> &[T] { + &self.0 + } +} +impl AsMut<[T]> for Array64 { + fn as_mut(&mut self) -> &mut [T] { + &mut self.0 + } +} +impl Clone for Array64 +where T: Copy + Default +{ + fn clone(&self) -> Self { + let mut new = Self::default(); + new.0.copy_from_slice(&self.0); + new + } +} +impl fmt::Debug for Array64 { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Array64 {{}}") + } +} + +macro_rules! chacha_impl { + ($ChaChaXCore:ident, $ChaChaXRng:ident, $rounds:expr, $doc:expr, $abst:ident) => { + #[doc=$doc] + #[derive(Clone, PartialEq, Eq)] + pub struct $ChaChaXCore { + state: ChaCha, + } + + // Custom Debug implementation that does not expose the internal state + impl fmt::Debug for $ChaChaXCore { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "ChaChaXCore {{}}") + } + } + + impl BlockRngCore for $ChaChaXCore { + type Item = u32; + type Results = Array64; + #[inline] + fn generate(&mut self, r: &mut Self::Results) { + // Fill slice of words by writing to equivalent slice of bytes, then fixing endianness. + self.state.refill4($rounds, unsafe { + &mut *(&mut *r as *mut Array64 as *mut [u8; 256]) + }); + for x in r.as_mut() { + *x = x.to_le(); + } + } + } + + impl SeedableRng for $ChaChaXCore { + type Seed = [u8; 32]; + #[inline] + fn from_seed(seed: Self::Seed) -> Self { + $ChaChaXCore { state: ChaCha::new(&seed, &[0u8; 8]) } + } + } + + impl CryptoRng for $ChaChaXCore {} + + /// A cryptographically secure random number generator that uses the ChaCha algorithm. + /// + /// ChaCha is a stream cipher designed by Daniel J. Bernstein[^1], that we use as an RNG. It is + /// an improved variant of the Salsa20 cipher family, which was selected as one of the "stream + /// ciphers suitable for widespread adoption" by eSTREAM[^2]. + /// + /// ChaCha uses add-rotate-xor (ARX) operations as its basis. These are safe against timing + /// attacks, although that is mostly a concern for ciphers and not for RNGs. We provide a SIMD + /// implementation to support high throughput on a variety of common hardware platforms. + /// + /// With the ChaCha algorithm it is possible to choose the number of rounds the core algorithm + /// should run. The number of rounds is a tradeoff between performance and security, where 8 + /// rounds is the minimum potentially secure configuration, and 20 rounds is widely used as a + /// conservative choice. + /// + /// We use a 64-bit counter and 64-bit stream identifier as in Bernstein's implementation[^1] + /// except that we use a stream identifier in place of a nonce. A 64-bit counter over 64-byte + /// (16 word) blocks allows 1 ZiB of output before cycling, and the stream identifier allows + /// 264 unique streams of output per seed. Both counter and stream are initialized + /// to zero but may be set via the `set_word_pos` and `set_stream` methods. + /// + /// The word layout is: + /// + /// ```text + /// constant constant constant constant + /// seed seed seed seed + /// seed seed seed seed + /// counter counter stream_id stream_id + /// ``` + /// + /// This implementation uses an output buffer of sixteen `u32` words, and uses + /// [`BlockRng`] to implement the [`RngCore`] methods. + /// + /// [^1]: D. J. Bernstein, [*ChaCha, a variant of Salsa20*]( + /// https://cr.yp.to/chacha.html) + /// + /// [^2]: [eSTREAM: the ECRYPT Stream Cipher Project]( + /// http://www.ecrypt.eu.org/stream/) + #[derive(Clone, Debug)] + pub struct $ChaChaXRng { + rng: BlockRng<$ChaChaXCore>, + } + + impl SeedableRng for $ChaChaXRng { + type Seed = [u8; 32]; + #[inline] + fn from_seed(seed: Self::Seed) -> Self { + let core = $ChaChaXCore::from_seed(seed); + Self { + rng: BlockRng::new(core), + } + } + } + + impl RngCore for $ChaChaXRng { + #[inline] + fn next_u32(&mut self) -> u32 { + self.rng.next_u32() + } + #[inline] + fn next_u64(&mut self) -> u64 { + self.rng.next_u64() + } + #[inline] + fn fill_bytes(&mut self, bytes: &mut [u8]) { + self.rng.fill_bytes(bytes) + } + #[inline] + fn try_fill_bytes(&mut self, bytes: &mut [u8]) -> Result<(), Error> { + self.rng.try_fill_bytes(bytes) + } + } + + impl $ChaChaXRng { + // The buffer is a 4-block window, i.e. it is always at a block-aligned position in the + // stream but if the stream has been seeked it may not be self-aligned. + + /// Get the offset from the start of the stream, in 32-bit words. + /// + /// Since the generated blocks are 16 words (24) long and the + /// counter is 64-bits, the offset is a 68-bit number. Sub-word offsets are + /// not supported, hence the result can simply be multiplied by 4 to get a + /// byte-offset. + #[inline] + pub fn get_word_pos(&self) -> u128 { + let buf_start_block = { + let buf_end_block = self.rng.core.state.get_block_pos(); + u64::wrapping_sub(buf_end_block, BUF_BLOCKS.into()) + }; + let (buf_offset_blocks, block_offset_words) = { + let buf_offset_words = self.rng.index() as u64; + let blocks_part = buf_offset_words / u64::from(BLOCK_WORDS); + let words_part = buf_offset_words % u64::from(BLOCK_WORDS); + (blocks_part, words_part) + }; + let pos_block = u64::wrapping_add(buf_start_block, buf_offset_blocks); + let pos_block_words = u128::from(pos_block) * u128::from(BLOCK_WORDS); + pos_block_words + u128::from(block_offset_words) + } + + /// Set the offset from the start of the stream, in 32-bit words. + /// + /// As with `get_word_pos`, we use a 68-bit number. Since the generator + /// simply cycles at the end of its period (1 ZiB), we ignore the upper + /// 60 bits. + #[inline] + pub fn set_word_pos(&mut self, word_offset: u128) { + let block = (word_offset / u128::from(BLOCK_WORDS)) as u64; + self.rng + .core + .state + .set_block_pos(block); + self.rng.generate_and_set((word_offset % u128::from(BLOCK_WORDS)) as usize); + } + + /// Set the stream number. + /// + /// This is initialized to zero; 264 unique streams of output + /// are available per seed/key. + /// + /// Note that in order to reproduce ChaCha output with a specific 64-bit + /// nonce, one can convert that nonce to a `u64` in little-endian fashion + /// and pass to this function. In theory a 96-bit nonce can be used by + /// passing the last 64-bits to this function and using the first 32-bits as + /// the most significant half of the 64-bit counter (which may be set + /// indirectly via `set_word_pos`), but this is not directly supported. + #[inline] + pub fn set_stream(&mut self, stream: u64) { + self.rng + .core + .state + .set_nonce(stream); + if self.rng.index() != 64 { + let wp = self.get_word_pos(); + self.set_word_pos(wp); + } + } + + /// Get the stream number. + #[inline] + pub fn get_stream(&self) -> u64 { + self.rng + .core + .state + .get_nonce() + } + + /// Get the seed. + #[inline] + pub fn get_seed(&self) -> [u8; 32] { + self.rng + .core + .state + .get_seed() + } + } + + impl CryptoRng for $ChaChaXRng {} + + impl From<$ChaChaXCore> for $ChaChaXRng { + fn from(core: $ChaChaXCore) -> Self { + $ChaChaXRng { + rng: BlockRng::new(core), + } + } + } + + impl PartialEq<$ChaChaXRng> for $ChaChaXRng { + fn eq(&self, rhs: &$ChaChaXRng) -> bool { + let a: $abst::$ChaChaXRng = self.into(); + let b: $abst::$ChaChaXRng = rhs.into(); + a == b + } + } + impl Eq for $ChaChaXRng {} + + #[cfg(feature = "serde1")] + impl Serialize for $ChaChaXRng { + fn serialize(&self, s: S) -> Result + where S: Serializer { + $abst::$ChaChaXRng::from(self).serialize(s) + } + } + #[cfg(feature = "serde1")] + impl<'de> Deserialize<'de> for $ChaChaXRng { + fn deserialize(d: D) -> Result where D: Deserializer<'de> { + $abst::$ChaChaXRng::deserialize(d).map(|x| Self::from(&x)) + } + } + + mod $abst { + #[cfg(feature = "serde1")] use serde::{Serialize, Deserialize}; + + // The abstract state of a ChaCha stream, independent of implementation choices. The + // comparison and serialization of this object is considered a semver-covered part of + // the API. + #[derive(Debug, PartialEq, Eq)] + #[cfg_attr( + feature = "serde1", + derive(Serialize, Deserialize), + )] + pub(crate) struct $ChaChaXRng { + seed: [u8; 32], + stream: u64, + word_pos: u128, + } + + impl From<&super::$ChaChaXRng> for $ChaChaXRng { + // Forget all information about the input except what is necessary to determine the + // outputs of any sequence of pub API calls. + fn from(r: &super::$ChaChaXRng) -> Self { + Self { + seed: r.get_seed(), + stream: r.get_stream(), + word_pos: r.get_word_pos(), + } + } + } + + impl From<&$ChaChaXRng> for super::$ChaChaXRng { + // Construct one of the possible concrete RNGs realizing an abstract state. + fn from(a: &$ChaChaXRng) -> Self { + use rand_core::SeedableRng; + let mut r = Self::from_seed(a.seed); + r.set_stream(a.stream); + r.set_word_pos(a.word_pos); + r + } + } + } + } +} + +chacha_impl!(ChaCha20Core, ChaCha20Rng, 10, "ChaCha with 20 rounds", abstract20); +chacha_impl!(ChaCha12Core, ChaCha12Rng, 6, "ChaCha with 12 rounds", abstract12); +chacha_impl!(ChaCha8Core, ChaCha8Rng, 4, "ChaCha with 8 rounds", abstract8); + +#[cfg(test)] +mod test { + use rand_core::{RngCore, SeedableRng}; + + #[cfg(feature = "serde1")] use super::{ChaCha20Rng, ChaCha12Rng, ChaCha8Rng}; + + type ChaChaRng = super::ChaCha20Rng; + + #[cfg(feature = "serde1")] + #[test] + fn test_chacha_serde_roundtrip() { + let seed = [ + 1, 0, 52, 0, 0, 0, 0, 0, 1, 0, 10, 0, 22, 32, 0, 0, 2, 0, 55, 49, 0, 11, 0, 0, 3, 0, 0, 0, 0, + 0, 2, 92, + ]; + let mut rng1 = ChaCha20Rng::from_seed(seed); + let mut rng2 = ChaCha12Rng::from_seed(seed); + let mut rng3 = ChaCha8Rng::from_seed(seed); + + let encoded1 = serde_json::to_string(&rng1).unwrap(); + let encoded2 = serde_json::to_string(&rng2).unwrap(); + let encoded3 = serde_json::to_string(&rng3).unwrap(); + + let mut decoded1: ChaCha20Rng = serde_json::from_str(&encoded1).unwrap(); + let mut decoded2: ChaCha12Rng = serde_json::from_str(&encoded2).unwrap(); + let mut decoded3: ChaCha8Rng = serde_json::from_str(&encoded3).unwrap(); + + assert_eq!(rng1, decoded1); + assert_eq!(rng2, decoded2); + assert_eq!(rng3, decoded3); + + assert_eq!(rng1.next_u32(), decoded1.next_u32()); + assert_eq!(rng2.next_u32(), decoded2.next_u32()); + assert_eq!(rng3.next_u32(), decoded3.next_u32()); + } + + // This test validates that: + // 1. a hard-coded serialization demonstrating the format at time of initial release can still + // be deserialized to a ChaChaRng + // 2. re-serializing the resultant object produces exactly the original string + // + // Condition 2 is stronger than necessary: an equivalent serialization (e.g. with field order + // permuted, or whitespace differences) would also be admissible, but would fail this test. + // However testing for equivalence of serialized data is difficult, and there shouldn't be any + // reason we need to violate the stronger-than-needed condition, e.g. by changing the field + // definition order. + #[cfg(feature = "serde1")] + #[test] + fn test_chacha_serde_format_stability() { + let j = r#"{"seed":[4,8,15,16,23,42,4,8,15,16,23,42,4,8,15,16,23,42,4,8,15,16,23,42,4,8,15,16,23,42,4,8],"stream":27182818284,"word_pos":314159265359}"#; + let r: ChaChaRng = serde_json::from_str(&j).unwrap(); + let j1 = serde_json::to_string(&r).unwrap(); + assert_eq!(j, j1); + } + + #[test] + fn test_chacha_construction() { + let seed = [ + 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, + 0, 0, 0, + ]; + let mut rng1 = ChaChaRng::from_seed(seed); + assert_eq!(rng1.next_u32(), 137206642); + + let mut rng2 = ChaChaRng::from_rng(rng1).unwrap(); + assert_eq!(rng2.next_u32(), 1325750369); + } + + #[test] + fn test_chacha_true_values_a() { + // Test vectors 1 and 2 from + // https://tools.ietf.org/html/draft-nir-cfrg-chacha20-poly1305-04 + let seed = [0u8; 32]; + let mut rng = ChaChaRng::from_seed(seed); + + let mut results = [0u32; 16]; + for i in results.iter_mut() { + *i = rng.next_u32(); + } + let expected = [ + 0xade0b876, 0x903df1a0, 0xe56a5d40, 0x28bd8653, 0xb819d2bd, 0x1aed8da0, 0xccef36a8, + 0xc70d778b, 0x7c5941da, 0x8d485751, 0x3fe02477, 0x374ad8b8, 0xf4b8436a, 0x1ca11815, + 0x69b687c3, 0x8665eeb2, + ]; + assert_eq!(results, expected); + + for i in results.iter_mut() { + *i = rng.next_u32(); + } + let expected = [ + 0xbee7079f, 0x7a385155, 0x7c97ba98, 0x0d082d73, 0xa0290fcb, 0x6965e348, 0x3e53c612, + 0xed7aee32, 0x7621b729, 0x434ee69c, 0xb03371d5, 0xd539d874, 0x281fed31, 0x45fb0a51, + 0x1f0ae1ac, 0x6f4d794b, + ]; + assert_eq!(results, expected); + } + + #[test] + fn test_chacha_true_values_b() { + // Test vector 3 from + // https://tools.ietf.org/html/draft-nir-cfrg-chacha20-poly1305-04 + let seed = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, + ]; + let mut rng = ChaChaRng::from_seed(seed); + + // Skip block 0 + for _ in 0..16 { + rng.next_u32(); + } + + let mut results = [0u32; 16]; + for i in results.iter_mut() { + *i = rng.next_u32(); + } + let expected = [ + 0x2452eb3a, 0x9249f8ec, 0x8d829d9b, 0xddd4ceb1, 0xe8252083, 0x60818b01, 0xf38422b8, + 0x5aaa49c9, 0xbb00ca8e, 0xda3ba7b4, 0xc4b592d1, 0xfdf2732f, 0x4436274e, 0x2561b3c8, + 0xebdd4aa6, 0xa0136c00, + ]; + assert_eq!(results, expected); + } + + #[test] + fn test_chacha_true_values_c() { + // Test vector 4 from + // https://tools.ietf.org/html/draft-nir-cfrg-chacha20-poly1305-04 + let seed = [ + 0, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, + ]; + let expected = [ + 0xfb4dd572, 0x4bc42ef1, 0xdf922636, 0x327f1394, 0xa78dea8f, 0x5e269039, 0xa1bebbc1, + 0xcaf09aae, 0xa25ab213, 0x48a6b46c, 0x1b9d9bcb, 0x092c5be6, 0x546ca624, 0x1bec45d5, + 0x87f47473, 0x96f0992e, + ]; + let expected_end = 3 * 16; + let mut results = [0u32; 16]; + + // Test block 2 by skipping block 0 and 1 + let mut rng1 = ChaChaRng::from_seed(seed); + for _ in 0..32 { + rng1.next_u32(); + } + for i in results.iter_mut() { + *i = rng1.next_u32(); + } + assert_eq!(results, expected); + assert_eq!(rng1.get_word_pos(), expected_end); + + // Test block 2 by using `set_word_pos` + let mut rng2 = ChaChaRng::from_seed(seed); + rng2.set_word_pos(2 * 16); + for i in results.iter_mut() { + *i = rng2.next_u32(); + } + assert_eq!(results, expected); + assert_eq!(rng2.get_word_pos(), expected_end); + + // Test skipping behaviour with other types + let mut buf = [0u8; 32]; + rng2.fill_bytes(&mut buf[..]); + assert_eq!(rng2.get_word_pos(), expected_end + 8); + rng2.fill_bytes(&mut buf[0..25]); + assert_eq!(rng2.get_word_pos(), expected_end + 15); + rng2.next_u64(); + assert_eq!(rng2.get_word_pos(), expected_end + 17); + rng2.next_u32(); + rng2.next_u64(); + assert_eq!(rng2.get_word_pos(), expected_end + 20); + rng2.fill_bytes(&mut buf[0..1]); + assert_eq!(rng2.get_word_pos(), expected_end + 21); + } + + #[test] + fn test_chacha_multiple_blocks() { + let seed = [ + 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6, 0, 0, 0, 7, + 0, 0, 0, + ]; + let mut rng = ChaChaRng::from_seed(seed); + + // Store the 17*i-th 32-bit word, + // i.e., the i-th word of the i-th 16-word block + let mut results = [0u32; 16]; + for i in results.iter_mut() { + *i = rng.next_u32(); + for _ in 0..16 { + rng.next_u32(); + } + } + let expected = [ + 0xf225c81a, 0x6ab1be57, 0x04d42951, 0x70858036, 0x49884684, 0x64efec72, 0x4be2d186, + 0x3615b384, 0x11cfa18e, 0xd3c50049, 0x75c775f6, 0x434c6530, 0x2c5bad8f, 0x898881dc, + 0x5f1c86d9, 0xc1f8e7f4, + ]; + assert_eq!(results, expected); + } + + #[test] + fn test_chacha_true_bytes() { + let seed = [0u8; 32]; + let mut rng = ChaChaRng::from_seed(seed); + let mut results = [0u8; 32]; + rng.fill_bytes(&mut results); + let expected = [ + 118, 184, 224, 173, 160, 241, 61, 144, 64, 93, 106, 229, 83, 134, 189, 40, 189, 210, + 25, 184, 160, 141, 237, 26, 168, 54, 239, 204, 139, 119, 13, 199, + ]; + assert_eq!(results, expected); + } + + #[test] + fn test_chacha_nonce() { + // Test vector 5 from + // https://tools.ietf.org/html/draft-nir-cfrg-chacha20-poly1305-04 + // Although we do not support setting a nonce, we try it here anyway so + // we can use this test vector. + let seed = [0u8; 32]; + let mut rng = ChaChaRng::from_seed(seed); + // 96-bit nonce in LE order is: 0,0,0,0, 0,0,0,0, 0,0,0,2 + rng.set_stream(2u64 << (24 + 32)); + + let mut results = [0u32; 16]; + for i in results.iter_mut() { + *i = rng.next_u32(); + } + let expected = [ + 0x374dc6c2, 0x3736d58c, 0xb904e24a, 0xcd3f93ef, 0x88228b1a, 0x96a4dfb3, 0x5b76ab72, + 0xc727ee54, 0x0e0e978a, 0xf3145c95, 0x1b748ea8, 0xf786c297, 0x99c28f5f, 0x628314e8, + 0x398a19fa, 0x6ded1b53, + ]; + assert_eq!(results, expected); + } + + #[test] + fn test_chacha_clone_streams() { + let seed = [ + 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6, 0, 0, 0, 7, + 0, 0, 0, + ]; + let mut rng = ChaChaRng::from_seed(seed); + let mut clone = rng.clone(); + for _ in 0..16 { + assert_eq!(rng.next_u64(), clone.next_u64()); + } + + rng.set_stream(51); + for _ in 0..7 { + assert!(rng.next_u32() != clone.next_u32()); + } + clone.set_stream(51); // switch part way through block + for _ in 7..16 { + assert_eq!(rng.next_u32(), clone.next_u32()); + } + } + + #[test] + fn test_chacha_word_pos_wrap_exact() { + use super::{BUF_BLOCKS, BLOCK_WORDS}; + let mut rng = ChaChaRng::from_seed(Default::default()); + // refilling the buffer in set_word_pos will wrap the block counter to 0 + let last_block = (1 << 68) - u128::from(BUF_BLOCKS * BLOCK_WORDS); + rng.set_word_pos(last_block); + assert_eq!(rng.get_word_pos(), last_block); + } + + #[test] + fn test_chacha_word_pos_wrap_excess() { + use super::BLOCK_WORDS; + let mut rng = ChaChaRng::from_seed(Default::default()); + // refilling the buffer in set_word_pos will wrap the block counter past 0 + let last_block = (1 << 68) - u128::from(BLOCK_WORDS); + rng.set_word_pos(last_block); + assert_eq!(rng.get_word_pos(), last_block); + } + + #[test] + fn test_chacha_word_pos_zero() { + let mut rng = ChaChaRng::from_seed(Default::default()); + assert_eq!(rng.get_word_pos(), 0); + rng.set_word_pos(0); + assert_eq!(rng.get_word_pos(), 0); + } +} diff --git a/src/rust/vendor/rand_chacha/src/guts.rs b/src/rust/vendor/rand_chacha/src/guts.rs new file mode 100644 index 000000000..cee8cf75d --- /dev/null +++ b/src/rust/vendor/rand_chacha/src/guts.rs @@ -0,0 +1,273 @@ +// Copyright 2019 The CryptoCorrosion Contributors +// Copyright 2020 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The ChaCha random number generator. + +use ppv_lite86::{dispatch, dispatch_light128}; + +pub use ppv_lite86::Machine; +use ppv_lite86::{vec128_storage, ArithOps, BitOps32, LaneWords4, MultiLane, StoreBytes, Vec4}; + +pub(crate) const BLOCK: usize = 64; +pub(crate) const BLOCK64: u64 = BLOCK as u64; +const LOG2_BUFBLOCKS: u64 = 2; +const BUFBLOCKS: u64 = 1 << LOG2_BUFBLOCKS; +pub(crate) const BUFSZ64: u64 = BLOCK64 * BUFBLOCKS; +pub(crate) const BUFSZ: usize = BUFSZ64 as usize; + +const STREAM_PARAM_NONCE: u32 = 1; +const STREAM_PARAM_BLOCK: u32 = 0; + +#[derive(Clone, PartialEq, Eq)] +pub struct ChaCha { + pub(crate) b: vec128_storage, + pub(crate) c: vec128_storage, + pub(crate) d: vec128_storage, +} + +#[derive(Clone)] +pub struct State { + pub(crate) a: V, + pub(crate) b: V, + pub(crate) c: V, + pub(crate) d: V, +} + +#[inline(always)] +pub(crate) fn round(mut x: State) -> State { + x.a += x.b; + x.d = (x.d ^ x.a).rotate_each_word_right16(); + x.c += x.d; + x.b = (x.b ^ x.c).rotate_each_word_right20(); + x.a += x.b; + x.d = (x.d ^ x.a).rotate_each_word_right24(); + x.c += x.d; + x.b = (x.b ^ x.c).rotate_each_word_right25(); + x +} + +#[inline(always)] +pub(crate) fn diagonalize(mut x: State) -> State { + x.b = x.b.shuffle_lane_words3012(); + x.c = x.c.shuffle_lane_words2301(); + x.d = x.d.shuffle_lane_words1230(); + x +} +#[inline(always)] +pub(crate) fn undiagonalize(mut x: State) -> State { + x.b = x.b.shuffle_lane_words1230(); + x.c = x.c.shuffle_lane_words2301(); + x.d = x.d.shuffle_lane_words3012(); + x +} + +impl ChaCha { + #[inline(always)] + pub fn new(key: &[u8; 32], nonce: &[u8]) -> Self { + init_chacha(key, nonce) + } + + #[inline(always)] + fn pos64(&self, m: M) -> u64 { + let d: M::u32x4 = m.unpack(self.d); + ((d.extract(1) as u64) << 32) | d.extract(0) as u64 + } + + /// Produce 4 blocks of output, advancing the state + #[inline(always)] + pub fn refill4(&mut self, drounds: u32, out: &mut [u8; BUFSZ]) { + refill_wide(self, drounds, out) + } + + #[inline(always)] + pub fn set_block_pos(&mut self, value: u64) { + set_stream_param(self, STREAM_PARAM_BLOCK, value) + } + + #[inline(always)] + pub fn get_block_pos(&self) -> u64 { + get_stream_param(self, STREAM_PARAM_BLOCK) + } + + #[inline(always)] + pub fn set_nonce(&mut self, value: u64) { + set_stream_param(self, STREAM_PARAM_NONCE, value) + } + + #[inline(always)] + pub fn get_nonce(&self) -> u64 { + get_stream_param(self, STREAM_PARAM_NONCE) + } + + #[inline(always)] + pub fn get_seed(&self) -> [u8; 32] { + get_seed(self) + } +} + +#[allow(clippy::many_single_char_names)] +#[inline(always)] +fn refill_wide_impl( + m: Mach, state: &mut ChaCha, drounds: u32, out: &mut [u8; BUFSZ], +) { + let k = m.vec([0x6170_7865, 0x3320_646e, 0x7962_2d32, 0x6b20_6574]); + let mut pos = state.pos64(m); + let d0: Mach::u32x4 = m.unpack(state.d); + pos = pos.wrapping_add(1); + let d1 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); + pos = pos.wrapping_add(1); + let d2 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); + pos = pos.wrapping_add(1); + let d3 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); + + let b = m.unpack(state.b); + let c = m.unpack(state.c); + let mut x = State { + a: Mach::u32x4x4::from_lanes([k, k, k, k]), + b: Mach::u32x4x4::from_lanes([b, b, b, b]), + c: Mach::u32x4x4::from_lanes([c, c, c, c]), + d: m.unpack(Mach::u32x4x4::from_lanes([d0, d1, d2, d3]).into()), + }; + for _ in 0..drounds { + x = round(x); + x = undiagonalize(round(diagonalize(x))); + } + let mut pos = state.pos64(m); + let d0: Mach::u32x4 = m.unpack(state.d); + pos = pos.wrapping_add(1); + let d1 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); + pos = pos.wrapping_add(1); + let d2 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); + pos = pos.wrapping_add(1); + let d3 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); + pos = pos.wrapping_add(1); + let d4 = d0.insert((pos >> 32) as u32, 1).insert(pos as u32, 0); + + let (a, b, c, d) = ( + x.a.to_lanes(), + x.b.to_lanes(), + x.c.to_lanes(), + x.d.to_lanes(), + ); + let sb = m.unpack(state.b); + let sc = m.unpack(state.c); + let sd = [m.unpack(state.d), d1, d2, d3]; + state.d = d4.into(); + let mut words = out.chunks_exact_mut(16); + for ((((&a, &b), &c), &d), &sd) in a.iter().zip(&b).zip(&c).zip(&d).zip(&sd) { + (a + k).write_le(words.next().unwrap()); + (b + sb).write_le(words.next().unwrap()); + (c + sc).write_le(words.next().unwrap()); + (d + sd).write_le(words.next().unwrap()); + } +} + +dispatch!(m, Mach, { + fn refill_wide(state: &mut ChaCha, drounds: u32, out: &mut [u8; BUFSZ]) { + refill_wide_impl(m, state, drounds, out); + } +}); + +// Single-block, rounds-only; shared by try_apply_keystream for tails shorter than BUFSZ +// and XChaCha's setup step. +dispatch!(m, Mach, { + fn refill_narrow_rounds(state: &mut ChaCha, drounds: u32) -> State { + let k: Mach::u32x4 = m.vec([0x6170_7865, 0x3320_646e, 0x7962_2d32, 0x6b20_6574]); + let mut x = State { + a: k, + b: m.unpack(state.b), + c: m.unpack(state.c), + d: m.unpack(state.d), + }; + for _ in 0..drounds { + x = round(x); + x = undiagonalize(round(diagonalize(x))); + } + State { + a: x.a.into(), + b: x.b.into(), + c: x.c.into(), + d: x.d.into(), + } + } +}); + +dispatch_light128!(m, Mach, { + fn set_stream_param(state: &mut ChaCha, param: u32, value: u64) { + let d: Mach::u32x4 = m.unpack(state.d); + state.d = d + .insert((value >> 32) as u32, (param << 1) | 1) + .insert(value as u32, param << 1) + .into(); + } +}); + +dispatch_light128!(m, Mach, { + fn get_stream_param(state: &ChaCha, param: u32) -> u64 { + let d: Mach::u32x4 = m.unpack(state.d); + ((d.extract((param << 1) | 1) as u64) << 32) | d.extract(param << 1) as u64 + } +}); + +dispatch_light128!(m, Mach, { + fn get_seed(state: &ChaCha) -> [u8; 32] { + let b: Mach::u32x4 = m.unpack(state.b); + let c: Mach::u32x4 = m.unpack(state.c); + let mut key = [0u8; 32]; + b.write_le(&mut key[..16]); + c.write_le(&mut key[16..]); + key + } +}); + +fn read_u32le(xs: &[u8]) -> u32 { + assert_eq!(xs.len(), 4); + u32::from(xs[0]) | (u32::from(xs[1]) << 8) | (u32::from(xs[2]) << 16) | (u32::from(xs[3]) << 24) +} + +dispatch_light128!(m, Mach, { + fn init_chacha(key: &[u8; 32], nonce: &[u8]) -> ChaCha { + let ctr_nonce = [ + 0, + if nonce.len() == 12 { + read_u32le(&nonce[0..4]) + } else { + 0 + }, + read_u32le(&nonce[nonce.len() - 8..nonce.len() - 4]), + read_u32le(&nonce[nonce.len() - 4..]), + ]; + let key0: Mach::u32x4 = m.read_le(&key[..16]); + let key1: Mach::u32x4 = m.read_le(&key[16..]); + ChaCha { + b: key0.into(), + c: key1.into(), + d: ctr_nonce.into(), + } + } +}); + +dispatch_light128!(m, Mach, { + fn init_chacha_x(key: &[u8; 32], nonce: &[u8; 24], rounds: u32) -> ChaCha { + let key0: Mach::u32x4 = m.read_le(&key[..16]); + let key1: Mach::u32x4 = m.read_le(&key[16..]); + let nonce0: Mach::u32x4 = m.read_le(&nonce[..16]); + let mut state = ChaCha { + b: key0.into(), + c: key1.into(), + d: nonce0.into(), + }; + let x = refill_narrow_rounds(&mut state, rounds); + let ctr_nonce1 = [0, 0, read_u32le(&nonce[16..20]), read_u32le(&nonce[20..24])]; + state.b = x.a; + state.c = x.d; + state.d = ctr_nonce1.into(); + state + } +}); diff --git a/src/rust/vendor/rand_chacha/src/lib.rs b/src/rust/vendor/rand_chacha/src/lib.rs new file mode 100644 index 000000000..24125b45e --- /dev/null +++ b/src/rust/vendor/rand_chacha/src/lib.rs @@ -0,0 +1,33 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The ChaCha random number generator. + +#![doc( + html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png", + html_favicon_url = "https://www.rust-lang.org/favicon.ico", + html_root_url = "https://rust-random.github.io/rand/" +)] +#![deny(missing_docs)] +#![deny(missing_debug_implementations)] +#![doc(test(attr(allow(unused_variables), deny(warnings))))] +#![cfg_attr(not(feature = "std"), no_std)] + +pub use rand_core; + +mod chacha; +mod guts; + +pub use crate::chacha::{ + ChaCha12Core, ChaCha12Rng, ChaCha20Core, ChaCha20Rng, ChaCha8Core, ChaCha8Rng, +}; + +/// ChaCha with 20 rounds +pub type ChaChaRng = ChaCha20Rng; +/// ChaCha with 20 rounds, low-level interface +pub type ChaChaCore = ChaCha20Core; diff --git a/src/rust/vendor/sharks/.cargo-checksum.json b/src/rust/vendor/sharks/.cargo-checksum.json new file mode 100644 index 000000000..549e731d1 --- /dev/null +++ b/src/rust/vendor/sharks/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"3d191c6814ef027226899f390dcce46e60da995c0fd011bb56f81ad614aa6eb7","COPYRIGHT":"8d224c29095bc0e2a472c630cd3e4c48f4eb9ab7189ec15f2c8b0f1f26b3d2dd","Cargo.toml":"31858297af2ad91e3f5cb573f81e00a2cc2a8bacf5a86e2d6a5b771aa8496310","LICENSE-APACHE":"820677332f335a1fbda12a4de1f938425aca9fc5f40d2dd40fd3a46fe7e24dbc","LICENSE-MIT":"f54f36015f212f34a49b65cfbc0d9749ceaac48404df165d9f23cf5268a36aa6","README.md":"c80fcb987eaf37234fd4473217137a107052cfdf5e498e8dfcb0ee88e62dc46e","benches/benchmarks.rs":"865c7ebdaac995416f3b9441b66d39df950dff2635ad67cc75226925f1b2fff5","codecov.yml":"69410c6e3a0833887a1b8e6d0cd7a51c7d95776eabe5188848fc878d433ef158","src/field.rs":"fe9690b31c6bd644fc7731d56520dcc26004cf6e0a3477f81484bd1834988a29","src/lib.rs":"4bcdcc81b66648282116c7378fa95e4fe1b0b06f1ba3755f4b2969faffafb195","src/math.rs":"a0a06af31f32aea84960c589591f7ef7feb3a8dcd88de04c46482caf080cd8e2","src/share.rs":"1ddbf62a8d292f9e167378f4a0b6e54771c1f6fa740c368fb13b37e9e75c08ca"},"package":"902b1e955f8a2e429fb1bad49f83fb952e6195d3c360ac547ff00fb826388753"} \ No newline at end of file diff --git a/src/rust/vendor/sharks/CHANGELOG.md b/src/rust/vendor/sharks/CHANGELOG.md new file mode 100644 index 000000000..44bfd4b48 --- /dev/null +++ b/src/rust/vendor/sharks/CHANGELOG.md @@ -0,0 +1,71 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [0.5.0] - 2021-03-14 +### Added +- Zeroize memory on drop for generated secret shares + +## [0.4.3] - 2021-02-04 +### Changed +- Upgraded project dependencies + +## [0.4.2] - 2020-08-03 +### Fixed +- Small fix in docs + +## [0.4.1] - 2020-04-23 +### Added +- Fuzz tests + +### Fixed +- Unexpected panic when trying to recover secret from different length shares +- Unexpected panic when trying to convert less than 2 bytes to `Share` + +## [0.4.0] - 2020-04-02 +### Added +- It is now possible to compile without `std` with `--no-default-features` + +## [0.3.3] - 2020-03-23 +### Changed +- Fix codecov badge + +## [0.3.2] - 2020-03-09 +### Changed +- Share structs now derives the `Clone` trait + +## [0.3.1] - 2020-01-23 +### Changed +- Sharks recover method now accepts any iterable collection + +## [0.3.0] - 2020-01-22 +### Added +- Share struct which allows to convert from/to byte vectors + +### Changed +- Methods use the new Share struct, instead of (GF245, Vec) tuples + +## [0.2.0] - 2020-01-21 +### Added +- Computations performed over GF256 (much faster) +- Secret can now be arbitrarily long + +### Changed +- Some method names and docs +- Maximum number of shares enforced by Rust static types instead of conditional branching + +### Removed +- Modular arithmetic around Mersenne primes + +## [0.1.1] - 2020-01-13 +### Fixed +- Typo in cargo description + +### Removed +- Maintenance badges in cargo file + +## [0.1.0] - 2020-01-13 +### Added +- Initial version diff --git a/src/rust/vendor/sharks/COPYRIGHT b/src/rust/vendor/sharks/COPYRIGHT new file mode 100644 index 000000000..38c3e0d57 --- /dev/null +++ b/src/rust/vendor/sharks/COPYRIGHT @@ -0,0 +1,12 @@ +Copyrights in the Sharks project are retained by their contributors. No +copyright assignment is required to contribute to the Sharks project. + +For full authorship information, see the version control history. + +Except as otherwise noted (below and/or in individual files), Sharks is +licensed under the Apache License, Version 2.0 or + or the MIT license + or , at your option. + +The Sharks project includes code from the Rust project +published under these same licenses. diff --git a/src/rust/vendor/sharks/Cargo.toml b/src/rust/vendor/sharks/Cargo.toml new file mode 100644 index 000000000..e7ef3ce93 --- /dev/null +++ b/src/rust/vendor/sharks/Cargo.toml @@ -0,0 +1,60 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "sharks" +version = "0.5.0" +authors = ["Aitor Ruano "] +description = "Fast, small and secure Shamir's Secret Sharing library crate" +homepage = "https://github.com/c0dearm/sharks" +readme = "README.md" +keywords = ["shamir", "secret", "sharing", "share", "crypto"] +categories = ["algorithms", "cryptography", "mathematics"] +license = "MIT/Apache-2.0" +repository = "https://github.com/c0dearm/sharks" + +[[bench]] +name = "benchmarks" +harness = false +[dependencies.arbitrary] +version = "0.4.7" +features = ["derive"] +optional = true + +[dependencies.hashbrown] +version = "0.9" + +[dependencies.rand] +version = "0.8" +default-features = false + +[dependencies.zeroize] +version = "1.2.0" +features = ["zeroize_derive"] +optional = true +[dev-dependencies.criterion] +version = "0.3" + +[dev-dependencies.rand_chacha] +version = "0.3" + +[features] +default = ["std", "zeroize_memory"] +fuzzing = ["std", "arbitrary"] +std = ["rand/std", "rand/std_rng"] +zeroize_memory = ["zeroize"] +[badges.codecov] +repository = "c0dearm/sharks" + +[badges.maintenance] +status = "actively-developed" diff --git a/src/rust/vendor/sharks/LICENSE-APACHE b/src/rust/vendor/sharks/LICENSE-APACHE new file mode 100644 index 000000000..af1551deb --- /dev/null +++ b/src/rust/vendor/sharks/LICENSE-APACHE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 Aitor Ruano Miralles + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/rust/vendor/sharks/LICENSE-MIT b/src/rust/vendor/sharks/LICENSE-MIT new file mode 100644 index 000000000..c7829df5d --- /dev/null +++ b/src/rust/vendor/sharks/LICENSE-MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Aitor Ruano Miralles + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/src/rust/vendor/sharks/README.md b/src/rust/vendor/sharks/README.md new file mode 100644 index 000000000..565f0fa1c --- /dev/null +++ b/src/rust/vendor/sharks/README.md @@ -0,0 +1,77 @@ +# Sharks + +[![Rust](https://github.com/c0dearm/sharks/workflows/Rust/badge.svg?branch=master)](https://github.com/c0dearm/sharks/actions) +[![Crates](https://img.shields.io/crates/v/sharks.svg)](https://crates.io/crates/sharks) +[![Docs](https://docs.rs/sharks/badge.svg)](https://docs.rs/sharks) +[![Codecov](https://codecov.io/gh/c0dearm/sharks/branch/master/graph/badge.svg)](https://codecov.io/gh/c0dearm/sharks) +[![License](https://camo.githubusercontent.com/47069b7e06b64b608c692a8a7f40bc6915cf629c/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f6c6963656e73652d417061636865322e302532464d49542d626c75652e737667)](https://github.com/c0dearm/sharks/blob/master/COPYRIGHT) + +Fast, small and secure [Shamir's Secret Sharing](https://en.wikipedia.org/wiki/Shamir%27s_Secret_Sharing) library crate + +Documentation: +- [API reference (docs.rs)](https://docs.rs/sharks) + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +sharks = "0.4" +``` + +If your environment doesn't support `std`: + +```toml +[dependencies] +sharks = { version = "0.4", default-features = false } +``` + +To get started using Sharks, see the [Rust docs](https://docs.rs/sharks) + +## Features + +### Developer friendly +The API is simple and to the point, with minimal configuration. + +### Fast and small +The code is as idiomatic and clean as possible, with minimum external dependencies. + +### Secure by design +The implementation forbids the user to choose parameters that would result in an insecure application, +like generating more shares than what's allowed by the finite field length. + +## Limitations + +Because the Galois finite field it uses is [GF256](https://en.wikipedia.org/wiki/Finite_field#GF(p2)_for_an_odd_prime_p), +only up to 255 shares can be generated for a given secret. A larger number would be insecure as shares would start duplicating. +Nevertheless, the secret can be arbitrarily long as computations are performed on single byte chunks. + +## Testing + +This crate contains both unit and benchmark tests (as well as the examples included in the docs). +You can run them with `cargo test` and `cargo bench`. + +### Benchmark results [min mean max] + +| CPU | obtain_shares_dealer | step_shares_dealer | recover_secret | share_from_bytes | share_to_bytes | +| ----------------------------------------- | ------------------------------- | ------------------------------- | ------------------------------- | ------------------------------- | ------------------------------- | +| Intel(R) Core(TM) i7-8550U CPU @ 1.80GHz | [1.4321 us 1.4339 us 1.4357 us] | [1.3385 ns 1.3456 ns 1.3552 ns] | [228.77 us 232.17 us 236.23 us] | [24.688 ns 25.083 ns 25.551 ns] | [22.832 ns 22.910 ns 22.995 ns] | +| Intel(R) Core(TM) i7-8565U CPU @ 1.80GHz | [1.3439 us 1.3499 us 1.3562 us] | [1.5416 ns 1.5446 ns 1.5481 ns] | [197.46 us 198.37 us 199.22 us] | [20.455 ns 20.486 ns 20.518 ns] | [18.726 ns 18.850 ns 18.993 ns] | +| Apple M1 ARM (Macbook Air) | [3.3367 us 3.3629 us 3.4058 us] | [741.75 ps 742.65 ps 743.52 ps] | [210.14 us 210.23 us 210.34 us] | [27.567 ns 27.602 ns 27.650 ns] | [26.716 ns 26.735 ns 26.755 ns] | + +# Contributing + +If you find a vulnerability, bug or would like a new feature, [open a new issue](https://github.com/c0dearm/sharks/issues/new). + +To introduce your changes into the codebase, submit a Pull Request. + +Many thanks! + +# License + +Sharks is distributed under the terms of both the MIT license and the +Apache License (Version 2.0). + +See [LICENSE-APACHE](LICENSE-APACHE) and [LICENSE-MIT](LICENSE-MIT), and +[COPYRIGHT](COPYRIGHT) for details. diff --git a/src/rust/vendor/sharks/benches/benchmarks.rs b/src/rust/vendor/sharks/benches/benchmarks.rs new file mode 100644 index 000000000..b80eb1793 --- /dev/null +++ b/src/rust/vendor/sharks/benches/benchmarks.rs @@ -0,0 +1,40 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use std::convert::TryFrom; + +use sharks::{Share, Sharks}; + +fn dealer(c: &mut Criterion) { + let sharks = Sharks(255); + let mut dealer = sharks.dealer(&[1]); + + c.bench_function("obtain_shares_dealer", |b| { + b.iter(|| sharks.dealer(black_box(&[1]))) + }); + c.bench_function("step_shares_dealer", |b| b.iter(|| dealer.next())); +} + +fn recover(c: &mut Criterion) { + let sharks = Sharks(255); + let shares: Vec = sharks.dealer(&[1]).take(255).collect(); + + c.bench_function("recover_secret", |b| { + b.iter(|| sharks.recover(black_box(shares.as_slice()))) + }); +} + +fn share(c: &mut Criterion) { + let bytes_vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + let bytes = bytes_vec.as_slice(); + let share = Share::try_from(bytes).unwrap(); + + c.bench_function("share_from_bytes", |b| { + b.iter(|| Share::try_from(black_box(bytes))) + }); + + c.bench_function("share_to_bytes", |b| { + b.iter(|| Vec::from(black_box(&share))) + }); +} + +criterion_group!(benches, dealer, recover, share); +criterion_main!(benches); diff --git a/src/rust/vendor/sharks/codecov.yml b/src/rust/vendor/sharks/codecov.yml new file mode 100644 index 000000000..24b374239 --- /dev/null +++ b/src/rust/vendor/sharks/codecov.yml @@ -0,0 +1,20 @@ +codecov: + require_ci_to_pass: yes + +coverage: + precision: 2 + round: down + range: "90...100" + +parsers: + gcov: + branch_detection: + conditional: yes + loop: yes + method: no + macro: no + +comment: + layout: "reach,diff,flags,tree" + behavior: default + require_changes: no diff --git a/src/rust/vendor/sharks/src/field.rs b/src/rust/vendor/sharks/src/field.rs new file mode 100644 index 000000000..05b49606f --- /dev/null +++ b/src/rust/vendor/sharks/src/field.rs @@ -0,0 +1,228 @@ +// Basic operations overrided for the Galois Field 256 (2**8) +// Uses pre-calculated tables for 0x11d primitive polynomial (x**8 + x**4 + x**3 + x**2 + 1) + +use core::iter::{Product, Sum}; +use core::ops::{Add, Div, Mul, Sub}; + +#[cfg(feature = "fuzzing")] +use arbitrary::Arbitrary; + +#[cfg(feature = "zeroize_memory")] +use zeroize::Zeroize; + +const LOG_TABLE: [u8; 256] = [ + 0x00, 0x00, 0x01, 0x19, 0x02, 0x32, 0x1a, 0xc6, 0x03, 0xdf, 0x33, 0xee, 0x1b, 0x68, 0xc7, 0x4b, + 0x04, 0x64, 0xe0, 0x0e, 0x34, 0x8d, 0xef, 0x81, 0x1c, 0xc1, 0x69, 0xf8, 0xc8, 0x08, 0x4c, 0x71, + 0x05, 0x8a, 0x65, 0x2f, 0xe1, 0x24, 0x0f, 0x21, 0x35, 0x93, 0x8e, 0xda, 0xf0, 0x12, 0x82, 0x45, + 0x1d, 0xb5, 0xc2, 0x7d, 0x6a, 0x27, 0xf9, 0xb9, 0xc9, 0x9a, 0x09, 0x78, 0x4d, 0xe4, 0x72, 0xa6, + 0x06, 0xbf, 0x8b, 0x62, 0x66, 0xdd, 0x30, 0xfd, 0xe2, 0x98, 0x25, 0xb3, 0x10, 0x91, 0x22, 0x88, + 0x36, 0xd0, 0x94, 0xce, 0x8f, 0x96, 0xdb, 0xbd, 0xf1, 0xd2, 0x13, 0x5c, 0x83, 0x38, 0x46, 0x40, + 0x1e, 0x42, 0xb6, 0xa3, 0xc3, 0x48, 0x7e, 0x6e, 0x6b, 0x3a, 0x28, 0x54, 0xfa, 0x85, 0xba, 0x3d, + 0xca, 0x5e, 0x9b, 0x9f, 0x0a, 0x15, 0x79, 0x2b, 0x4e, 0xd4, 0xe5, 0xac, 0x73, 0xf3, 0xa7, 0x57, + 0x07, 0x70, 0xc0, 0xf7, 0x8c, 0x80, 0x63, 0x0d, 0x67, 0x4a, 0xde, 0xed, 0x31, 0xc5, 0xfe, 0x18, + 0xe3, 0xa5, 0x99, 0x77, 0x26, 0xb8, 0xb4, 0x7c, 0x11, 0x44, 0x92, 0xd9, 0x23, 0x20, 0x89, 0x2e, + 0x37, 0x3f, 0xd1, 0x5b, 0x95, 0xbc, 0xcf, 0xcd, 0x90, 0x87, 0x97, 0xb2, 0xdc, 0xfc, 0xbe, 0x61, + 0xf2, 0x56, 0xd3, 0xab, 0x14, 0x2a, 0x5d, 0x9e, 0x84, 0x3c, 0x39, 0x53, 0x47, 0x6d, 0x41, 0xa2, + 0x1f, 0x2d, 0x43, 0xd8, 0xb7, 0x7b, 0xa4, 0x76, 0xc4, 0x17, 0x49, 0xec, 0x7f, 0x0c, 0x6f, 0xf6, + 0x6c, 0xa1, 0x3b, 0x52, 0x29, 0x9d, 0x55, 0xaa, 0xfb, 0x60, 0x86, 0xb1, 0xbb, 0xcc, 0x3e, 0x5a, + 0xcb, 0x59, 0x5f, 0xb0, 0x9c, 0xa9, 0xa0, 0x51, 0x0b, 0xf5, 0x16, 0xeb, 0x7a, 0x75, 0x2c, 0xd7, + 0x4f, 0xae, 0xd5, 0xe9, 0xe6, 0xe7, 0xad, 0xe8, 0x74, 0xd6, 0xf4, 0xea, 0xa8, 0x50, 0x58, 0xaf, +]; + +const EXP_TABLE: [u8; 512] = [ + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1d, 0x3a, 0x74, 0xe8, 0xcd, 0x87, 0x13, 0x26, + 0x4c, 0x98, 0x2d, 0x5a, 0xb4, 0x75, 0xea, 0xc9, 0x8f, 0x03, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, + 0x9d, 0x27, 0x4e, 0x9c, 0x25, 0x4a, 0x94, 0x35, 0x6a, 0xd4, 0xb5, 0x77, 0xee, 0xc1, 0x9f, 0x23, + 0x46, 0x8c, 0x05, 0x0a, 0x14, 0x28, 0x50, 0xa0, 0x5d, 0xba, 0x69, 0xd2, 0xb9, 0x6f, 0xde, 0xa1, + 0x5f, 0xbe, 0x61, 0xc2, 0x99, 0x2f, 0x5e, 0xbc, 0x65, 0xca, 0x89, 0x0f, 0x1e, 0x3c, 0x78, 0xf0, + 0xfd, 0xe7, 0xd3, 0xbb, 0x6b, 0xd6, 0xb1, 0x7f, 0xfe, 0xe1, 0xdf, 0xa3, 0x5b, 0xb6, 0x71, 0xe2, + 0xd9, 0xaf, 0x43, 0x86, 0x11, 0x22, 0x44, 0x88, 0x0d, 0x1a, 0x34, 0x68, 0xd0, 0xbd, 0x67, 0xce, + 0x81, 0x1f, 0x3e, 0x7c, 0xf8, 0xed, 0xc7, 0x93, 0x3b, 0x76, 0xec, 0xc5, 0x97, 0x33, 0x66, 0xcc, + 0x85, 0x17, 0x2e, 0x5c, 0xb8, 0x6d, 0xda, 0xa9, 0x4f, 0x9e, 0x21, 0x42, 0x84, 0x15, 0x2a, 0x54, + 0xa8, 0x4d, 0x9a, 0x29, 0x52, 0xa4, 0x55, 0xaa, 0x49, 0x92, 0x39, 0x72, 0xe4, 0xd5, 0xb7, 0x73, + 0xe6, 0xd1, 0xbf, 0x63, 0xc6, 0x91, 0x3f, 0x7e, 0xfc, 0xe5, 0xd7, 0xb3, 0x7b, 0xf6, 0xf1, 0xff, + 0xe3, 0xdb, 0xab, 0x4b, 0x96, 0x31, 0x62, 0xc4, 0x95, 0x37, 0x6e, 0xdc, 0xa5, 0x57, 0xae, 0x41, + 0x82, 0x19, 0x32, 0x64, 0xc8, 0x8d, 0x07, 0x0e, 0x1c, 0x38, 0x70, 0xe0, 0xdd, 0xa7, 0x53, 0xa6, + 0x51, 0xa2, 0x59, 0xb2, 0x79, 0xf2, 0xf9, 0xef, 0xc3, 0x9b, 0x2b, 0x56, 0xac, 0x45, 0x8a, 0x09, + 0x12, 0x24, 0x48, 0x90, 0x3d, 0x7a, 0xf4, 0xf5, 0xf7, 0xf3, 0xfb, 0xeb, 0xcb, 0x8b, 0x0b, 0x16, + 0x2c, 0x58, 0xb0, 0x7d, 0xfa, 0xe9, 0xcf, 0x83, 0x1b, 0x36, 0x6c, 0xd8, 0xad, 0x47, 0x8e, 0x01, + 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1d, 0x3a, 0x74, 0xe8, 0xcd, 0x87, 0x13, 0x26, 0x4c, + 0x98, 0x2d, 0x5a, 0xb4, 0x75, 0xea, 0xc9, 0x8f, 0x03, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x9d, + 0x27, 0x4e, 0x9c, 0x25, 0x4a, 0x94, 0x35, 0x6a, 0xd4, 0xb5, 0x77, 0xee, 0xc1, 0x9f, 0x23, 0x46, + 0x8c, 0x05, 0x0a, 0x14, 0x28, 0x50, 0xa0, 0x5d, 0xba, 0x69, 0xd2, 0xb9, 0x6f, 0xde, 0xa1, 0x5f, + 0xbe, 0x61, 0xc2, 0x99, 0x2f, 0x5e, 0xbc, 0x65, 0xca, 0x89, 0x0f, 0x1e, 0x3c, 0x78, 0xf0, 0xfd, + 0xe7, 0xd3, 0xbb, 0x6b, 0xd6, 0xb1, 0x7f, 0xfe, 0xe1, 0xdf, 0xa3, 0x5b, 0xb6, 0x71, 0xe2, 0xd9, + 0xaf, 0x43, 0x86, 0x11, 0x22, 0x44, 0x88, 0x0d, 0x1a, 0x34, 0x68, 0xd0, 0xbd, 0x67, 0xce, 0x81, + 0x1f, 0x3e, 0x7c, 0xf8, 0xed, 0xc7, 0x93, 0x3b, 0x76, 0xec, 0xc5, 0x97, 0x33, 0x66, 0xcc, 0x85, + 0x17, 0x2e, 0x5c, 0xb8, 0x6d, 0xda, 0xa9, 0x4f, 0x9e, 0x21, 0x42, 0x84, 0x15, 0x2a, 0x54, 0xa8, + 0x4d, 0x9a, 0x29, 0x52, 0xa4, 0x55, 0xaa, 0x49, 0x92, 0x39, 0x72, 0xe4, 0xd5, 0xb7, 0x73, 0xe6, + 0xd1, 0xbf, 0x63, 0xc6, 0x91, 0x3f, 0x7e, 0xfc, 0xe5, 0xd7, 0xb3, 0x7b, 0xf6, 0xf1, 0xff, 0xe3, + 0xdb, 0xab, 0x4b, 0x96, 0x31, 0x62, 0xc4, 0x95, 0x37, 0x6e, 0xdc, 0xa5, 0x57, 0xae, 0x41, 0x82, + 0x19, 0x32, 0x64, 0xc8, 0x8d, 0x07, 0x0e, 0x1c, 0x38, 0x70, 0xe0, 0xdd, 0xa7, 0x53, 0xa6, 0x51, + 0xa2, 0x59, 0xb2, 0x79, 0xf2, 0xf9, 0xef, 0xc3, 0x9b, 0x2b, 0x56, 0xac, 0x45, 0x8a, 0x09, 0x12, + 0x24, 0x48, 0x90, 0x3d, 0x7a, 0xf4, 0xf5, 0xf7, 0xf3, 0xfb, 0xeb, 0xcb, 0x8b, 0x0b, 0x16, 0x2c, + 0x58, 0xb0, 0x7d, 0xfa, 0xe9, 0xcf, 0x83, 0x1b, 0x36, 0x6c, 0xd8, 0xad, 0x47, 0x8e, 0x01, 0x02, +]; + +#[derive(Debug, PartialEq, Clone)] +#[cfg_attr(feature = "fuzzing", derive(Arbitrary))] +#[cfg_attr(feature = "zeroize_memory", derive(Zeroize))] +#[cfg_attr(feature = "zeroize_memory", zeroize(drop))] +pub struct GF256(pub u8); + +#[allow(clippy::suspicious_arithmetic_impl)] +impl Add for GF256 { + type Output = GF256; + + fn add(self, other: Self) -> Self::Output { + Self(self.0 ^ other.0) + } +} + +#[allow(clippy::suspicious_arithmetic_impl)] +impl Sub for GF256 { + type Output = Self; + + fn sub(self, other: Self) -> Self::Output { + Self(self.0 ^ other.0) + } +} + +#[allow(clippy::suspicious_arithmetic_impl)] +impl Mul for GF256 { + type Output = Self; + + fn mul(self, other: Self) -> Self::Output { + let log_x = LOG_TABLE[self.0 as usize] as usize; + let log_y = LOG_TABLE[other.0 as usize] as usize; + + if self.0 == 0 || other.0 == 0 { + Self(0) + } else { + Self(EXP_TABLE[log_x + log_y]) + } + } +} + +#[allow(clippy::suspicious_arithmetic_impl)] +impl Div for GF256 { + type Output = Self; + + fn div(self, other: Self) -> Self::Output { + let log_x = LOG_TABLE[self.0 as usize] as usize; + let log_y = LOG_TABLE[other.0 as usize] as usize; + + if self.0 == 0 { + Self(0) + } else { + Self(EXP_TABLE[log_x + 255 - log_y]) + } + } +} + +impl Sum for GF256 { + fn sum>(iter: I) -> Self { + iter.fold(Self(0), |acc, x| acc + x) + } +} + +impl Product for GF256 { + fn product>(iter: I) -> Self { + iter.fold(Self(1), |acc, x| acc * x) + } +} + +#[cfg(test)] +mod tests { + use super::{EXP_TABLE, GF256, LOG_TABLE}; + use alloc::vec; + + #[test] + fn add_works() { + let answers: [u8; 256] = [ + 1, 2, 5, 17, 18, 18, 90, 70, 30, 229, 71, 6, 214, 239, 212, 109, 72, 252, 205, 84, 128, + 248, 5, 72, 147, 194, 111, 244, 208, 56, 44, 177, 152, 173, 43, 179, 196, 110, 155, 20, + 95, 71, 59, 173, 30, 211, 29, 102, 91, 57, 199, 119, 126, 15, 169, 25, 148, 32, 96, + 170, 244, 139, 172, 7, 89, 1, 234, 160, 255, 242, 110, 65, 135, 82, 172, 188, 14, 173, + 90, 120, 203, 55, 71, 117, 228, 64, 106, 194, 15, 51, 204, 255, 216, 142, 55, 162, 199, + 237, 245, 37, 210, 106, 58, 230, 102, 32, 28, 60, 42, 56, 221, 243, 75, 65, 165, 227, + 242, 248, 190, 184, 117, 162, 9, 105, 228, 192, 193, 155, 130, 103, 238, 171, 52, 237, + 185, 164, 40, 212, 255, 175, 181, 208, 212, 76, 75, 232, 3, 94, 116, 28, 225, 214, 88, + 214, 171, 171, 199, 245, 62, 93, 209, 238, 110, 56, 83, 45, 240, 179, 108, 98, 64, 1, + 167, 10, 79, 158, 17, 141, 120, 224, 130, 27, 63, 90, 17, 11, 87, 143, 226, 58, 239, + 227, 157, 52, 113, 188, 127, 246, 163, 120, 216, 47, 57, 12, 162, 171, 60, 80, 61, 3, + 98, 224, 80, 111, 172, 69, 56, 251, 173, 231, 23, 137, 180, 83, 217, 125, 23, 32, 161, + 211, 84, 164, 252, 6, 237, 0, 177, 254, 39, 193, 99, 246, 101, 148, 28, 14, 98, 107, + 111, 224, 152, 50, 5, 23, 214, 174, + ]; + + for (i, a) in answers.iter().enumerate() { + assert_eq!((GF256(LOG_TABLE[i]) + GF256(EXP_TABLE[i])).0, *a); + } + } + + #[test] + fn sub_works() { + add_works(); + } + + #[test] + fn mul_works() { + let answers: [u8; 256] = [ + 0, 0, 4, 200, 32, 14, 206, 179, 39, 134, 169, 160, 32, 59, 184, 50, 45, 121, 69, 43, + 102, 43, 139, 169, 18, 94, 107, 84, 18, 157, 159, 51, 211, 1, 52, 13, 51, 128, 31, 219, + 240, 230, 212, 219, 197, 19, 11, 135, 93, 163, 237, 53, 91, 177, 135, 124, 240, 224, 6, + 158, 167, 155, 155, 38, 223, 144, 70, 54, 50, 45, 134, 170, 126, 223, 103, 207, 253, + 176, 75, 98, 137, 87, 59, 50, 208, 116, 29, 200, 128, 82, 13, 138, 107, 53, 42, 34, + 123, 203, 65, 174, 111, 101, 19, 78, 165, 62, 115, 108, 175, 139, 126, 107, 55, 196, + 30, 209, 126, 8, 15, 211, 57, 191, 37, 254, 24, 136, 30, 111, 188, 30, 209, 208, 49, + 132, 181, 22, 207, 241, 28, 2, 97, 58, 244, 179, 190, 120, 249, 174, 99, 6, 215, 232, + 173, 1, 20, 216, 224, 191, 247, 78, 223, 101, 153, 1, 182, 203, 213, 75, 132, 98, 53, + 204, 13, 177, 22, 88, 218, 21, 32, 68, 247, 153, 11, 190, 47, 128, 214, 33, 110, 194, + 102, 77, 5, 178, 74, 65, 134, 62, 91, 190, 133, 15, 134, 94, 37, 247, 205, 51, 224, + 152, 15, 13, 13, 233, 189, 206, 100, 131, 222, 5, 70, 182, 231, 176, 167, 150, 156, + 249, 29, 189, 96, 149, 239, 162, 43, 239, 89, 8, 9, 57, 118, 227, 168, 243, 164, 188, + 125, 8, 8, 240, 36, 45, 21, 20, 44, 175, + ]; + + for (i, a) in answers.iter().enumerate() { + assert_eq!((GF256(LOG_TABLE[i]) * GF256(EXP_TABLE[i])).0, *a); + } + } + + #[test] + fn div_works() { + let answers: [u8; 256] = [ + 0, 0, 71, 174, 173, 87, 134, 213, 152, 231, 124, 39, 203, 113, 13, 198, 88, 171, 55, + 150, 177, 227, 25, 225, 227, 180, 157, 225, 252, 122, 88, 161, 45, 87, 148, 78, 40, + 165, 74, 134, 142, 120, 121, 163, 156, 75, 154, 241, 239, 27, 152, 130, 125, 235, 230, + 32, 138, 225, 145, 90, 214, 226, 182, 168, 155, 175, 179, 124, 105, 169, 249, 58, 201, + 14, 155, 217, 196, 254, 201, 143, 229, 12, 178, 24, 100, 226, 163, 234, 177, 36, 75, + 106, 114, 208, 162, 63, 235, 181, 108, 131, 248, 51, 190, 187, 235, 115, 112, 37, 79, + 90, 112, 237, 195, 121, 136, 110, 174, 143, 113, 134, 229, 255, 35, 175, 156, 208, 240, + 222, 94, 202, 228, 34, 123, 23, 48, 18, 122, 114, 75, 243, 212, 139, 56, 132, 157, 119, + 219, 170, 236, 11, 51, 86, 224, 221, 142, 200, 154, 136, 179, 72, 3, 32, 142, 149, 180, + 209, 253, 17, 210, 134, 162, 106, 38, 108, 154, 154, 74, 181, 115, 142, 204, 195, 23, + 162, 178, 41, 9, 90, 190, 14, 2, 45, 227, 253, 115, 93, 155, 244, 83, 219, 11, 196, + 167, 241, 33, 60, 103, 69, 181, 189, 145, 130, 174, 137, 65, 65, 45, 153, 79, 236, 199, + 209, 41, 10, 205, 44, 182, 38, 222, 209, 253, 247, 64, 71, 32, 1, 27, 53, 4, 110, 170, + 221, 215, 4, 179, 163, 64, 90, 152, 163, 235, 6, 41, 93, 176, 175, + ]; + + for (i, a) in answers.iter().enumerate() { + assert_eq!((GF256(LOG_TABLE[i]) / GF256(EXP_TABLE[i])).0, *a); + } + } + + #[test] + fn sum_works() { + let values = vec![GF256(0x53), GF256(0xCA), GF256(0)]; + assert_eq!(values.into_iter().sum::().0, 0x99); + } + + #[test] + fn product_works() { + let values = vec![GF256(1), GF256(1), GF256(4)]; + assert_eq!(values.into_iter().product::().0, 4); + } +} diff --git a/src/rust/vendor/sharks/src/lib.rs b/src/rust/vendor/sharks/src/lib.rs new file mode 100644 index 000000000..b76467cd4 --- /dev/null +++ b/src/rust/vendor/sharks/src/lib.rs @@ -0,0 +1,218 @@ +//! Fast, small and secure [Shamir's Secret Sharing](https://en.wikipedia.org/wiki/Shamir%27s_Secret_Sharing) library crate +//! +//! Usage example (std): +//! ``` +//! use sharks::{ Sharks, Share }; +//! +//! // Set a minimum threshold of 10 shares +//! let sharks = Sharks(10); +//! // Obtain an iterator over the shares for secret [1, 2, 3, 4] +//! # #[cfg(feature = "std")] +//! # { +//! let dealer = sharks.dealer(&[1, 2, 3, 4]); +//! // Get 10 shares +//! let shares: Vec = dealer.take(10).collect(); +//! // Recover the original secret! +//! let secret = sharks.recover(shares.as_slice()).unwrap(); +//! assert_eq!(secret, vec![1, 2, 3, 4]); +//! # } +//! ``` +//! +//! Usage example (no std): +//! ``` +//! use sharks::{ Sharks, Share }; +//! use rand_chacha::rand_core::SeedableRng; +//! +//! // Set a minimum threshold of 10 shares +//! let sharks = Sharks(10); +//! // Obtain an iterator over the shares for secret [1, 2, 3, 4] +//! let mut rng = rand_chacha::ChaCha8Rng::from_seed([0x90; 32]); +//! let dealer = sharks.dealer_rng(&[1, 2, 3, 4], &mut rng); +//! // Get 10 shares +//! let shares: Vec = dealer.take(10).collect(); +//! // Recover the original secret! +//! let secret = sharks.recover(shares.as_slice()).unwrap(); +//! assert_eq!(secret, vec![1, 2, 3, 4]); +//! ``` +#![cfg_attr(not(feature = "std"), no_std)] + +mod field; +mod math; +mod share; + +extern crate alloc; + +use alloc::vec::Vec; +use hashbrown::HashSet; + +use field::GF256; +pub use share::Share; + +/// Tuple struct which implements methods to generate shares and recover secrets over a 256 bits Galois Field. +/// Its only parameter is the minimum shares threshold. +/// +/// Usage example: +/// ``` +/// # use sharks::{ Sharks, Share }; +/// // Set a minimum threshold of 10 shares +/// let sharks = Sharks(10); +/// // Obtain an iterator over the shares for secret [1, 2, 3, 4] +/// # #[cfg(feature = "std")] +/// # { +/// let dealer = sharks.dealer(&[1, 2, 3, 4]); +/// // Get 10 shares +/// let shares: Vec = dealer.take(10).collect(); +/// // Recover the original secret! +/// let secret = sharks.recover(shares.as_slice()).unwrap(); +/// assert_eq!(secret, vec![1, 2, 3, 4]); +/// # } +/// ``` +pub struct Sharks(pub u8); + +impl Sharks { + /// This method is useful when `std` is not available. For typical usage + /// see the `dealer` method. + /// + /// Given a `secret` byte slice, returns an `Iterator` along new shares. + /// The maximum number of shares that can be generated is 256. + /// A random number generator has to be provided. + /// + /// Example: + /// ``` + /// # use sharks::{ Sharks, Share }; + /// # use rand_chacha::rand_core::SeedableRng; + /// # let sharks = Sharks(3); + /// // Obtain an iterator over the shares for secret [1, 2] + /// let mut rng = rand_chacha::ChaCha8Rng::from_seed([0x90; 32]); + /// let dealer = sharks.dealer_rng(&[1, 2], &mut rng); + /// // Get 3 shares + /// let shares: Vec = dealer.take(3).collect(); + pub fn dealer_rng( + &self, + secret: &[u8], + rng: &mut R, + ) -> impl Iterator { + let mut polys = Vec::with_capacity(secret.len()); + + for chunk in secret { + polys.push(math::random_polynomial(GF256(*chunk), self.0, rng)) + } + + math::get_evaluator(polys) + } + + /// Given a `secret` byte slice, returns an `Iterator` along new shares. + /// The maximum number of shares that can be generated is 256. + /// + /// Example: + /// ``` + /// # use sharks::{ Sharks, Share }; + /// # let sharks = Sharks(3); + /// // Obtain an iterator over the shares for secret [1, 2] + /// let dealer = sharks.dealer(&[1, 2]); + /// // Get 3 shares + /// let shares: Vec = dealer.take(3).collect(); + #[cfg(feature = "std")] + pub fn dealer(&self, secret: &[u8]) -> impl Iterator { + let mut rng = rand::thread_rng(); + self.dealer_rng(secret, &mut rng) + } + + /// Given an iterable collection of shares, recovers the original secret. + /// If the number of distinct shares is less than the minimum threshold an `Err` is returned, + /// otherwise an `Ok` containing the secret. + /// + /// Example: + /// ``` + /// # use sharks::{ Sharks, Share }; + /// # use rand_chacha::rand_core::SeedableRng; + /// # let sharks = Sharks(3); + /// # let mut rng = rand_chacha::ChaCha8Rng::from_seed([0x90; 32]); + /// # let mut shares: Vec = sharks.dealer_rng(&[1], &mut rng).take(3).collect(); + /// // Recover original secret from shares + /// let mut secret = sharks.recover(&shares); + /// // Secret correctly recovered + /// assert!(secret.is_ok()); + /// // Remove shares for demonstration purposes + /// shares.clear(); + /// secret = sharks.recover(&shares); + /// // Not enough shares to recover secret + /// assert!(secret.is_err()); + pub fn recover<'a, T>(&self, shares: T) -> Result, &str> + where + T: IntoIterator, + T::IntoIter: Iterator, + { + let mut share_length: Option = None; + let mut keys: HashSet = HashSet::new(); + let mut values: Vec = Vec::new(); + + for share in shares.into_iter() { + if share_length.is_none() { + share_length = Some(share.y.len()); + } + + if Some(share.y.len()) != share_length { + return Err("All shares must have the same length"); + } else { + keys.insert(share.x.0); + values.push(share.clone()); + } + } + + if keys.is_empty() || (keys.len() < self.0 as usize) { + Err("Not enough shares to recover original secret") + } else { + Ok(math::interpolate(values.as_slice())) + } + } +} + +#[cfg(test)] +mod tests { + use super::{Share, Sharks}; + use alloc::{vec, vec::Vec}; + + impl Sharks { + #[cfg(not(feature = "std"))] + fn make_shares(&self, secret: &[u8]) -> impl Iterator { + use rand_chacha::{rand_core::SeedableRng, ChaCha8Rng}; + + let mut rng = ChaCha8Rng::from_seed([0x90; 32]); + self.dealer_rng(secret, &mut rng) + } + + #[cfg(feature = "std")] + fn make_shares(&self, secret: &[u8]) -> impl Iterator { + self.dealer(secret) + } + } + + #[test] + fn test_insufficient_shares_err() { + let sharks = Sharks(255); + let shares: Vec = sharks.make_shares(&[1]).take(254).collect(); + let secret = sharks.recover(&shares); + assert!(secret.is_err()); + } + + #[test] + fn test_duplicate_shares_err() { + let sharks = Sharks(255); + let mut shares: Vec = sharks.make_shares(&[1]).take(255).collect(); + shares[1] = Share { + x: shares[0].x.clone(), + y: shares[0].y.clone(), + }; + let secret = sharks.recover(&shares); + assert!(secret.is_err()); + } + + #[test] + fn test_integration_works() { + let sharks = Sharks(255); + let shares: Vec = sharks.make_shares(&[1, 2, 3, 4]).take(255).collect(); + let secret = sharks.recover(&shares).unwrap(); + assert_eq!(secret, vec![1, 2, 3, 4]); + } +} diff --git a/src/rust/vendor/sharks/src/math.rs b/src/rust/vendor/sharks/src/math.rs new file mode 100644 index 000000000..ee6925266 --- /dev/null +++ b/src/rust/vendor/sharks/src/math.rs @@ -0,0 +1,98 @@ +// A module which contains necessary algorithms to compute Shamir's shares and recover secrets + +use alloc::vec::Vec; + +use rand::distributions::{Distribution, Uniform}; + +use super::field::GF256; +use super::share::Share; + +// Finds the [root of the Lagrange polynomial](https://en.wikipedia.org/wiki/Shamir%27s_Secret_Sharing#Computationally_efficient_approach). +// The expected `shares` argument format is the same as the output by the `get_evaluator´ function. +// Where each (key, value) pair corresponds to one share, where the key is the `x` and the value is a vector of `y`, +// where each element corresponds to one of the secret's byte chunks. +pub fn interpolate(shares: &[Share]) -> Vec { + (0..shares[0].y.len()) + .map(|s| { + shares + .iter() + .map(|s_i| { + shares + .iter() + .filter(|s_j| s_j.x != s_i.x) + .map(|s_j| s_j.x.clone() / (s_j.x.clone() - s_i.x.clone())) + .product::() + * s_i.y[s].clone() + }) + .sum::() + .0 + }) + .collect() +} + +// Generates `k` polynomial coefficients, being the last one `s` and the others randomly generated between `[1, 255]`. +// Coefficient degrees go from higher to lower in the returned vector order. +pub fn random_polynomial(s: GF256, k: u8, rng: &mut R) -> Vec { + let k = k as usize; + let mut poly = Vec::with_capacity(k); + let between = Uniform::new_inclusive(1, 255); + + for _ in 1..k { + poly.push(GF256(between.sample(rng))); + } + poly.push(s); + + poly +} + +// Returns an iterator over the points of the `polys` polynomials passed as argument. +// Each item of the iterator is a tuple `(x, [f_1(x), f_2(x)..])` where eaxh `f_i` is the result for the ith polynomial. +// Each polynomial corresponds to one byte chunk of the original secret. +// The iterator will start at `x = 1` and end at `x = 255`. +pub fn get_evaluator(polys: Vec>) -> impl Iterator { + (1..=u8::max_value()).map(GF256).map(move |x| Share { + x: x.clone(), + y: polys + .iter() + .map(|p| { + p.iter() + .fold(GF256(0), |acc, c| acc * x.clone() + c.clone()) + }) + .collect(), + }) +} + +#[cfg(test)] +mod tests { + use super::{get_evaluator, interpolate, random_polynomial, Share, GF256}; + use alloc::{vec, vec::Vec}; + use rand_chacha::rand_core::SeedableRng; + + #[test] + fn random_polynomial_works() { + let mut rng = rand_chacha::ChaCha8Rng::from_seed([0x90; 32]); + let poly = random_polynomial(GF256(1), 3, &mut rng); + assert_eq!(poly.len(), 3); + assert_eq!(poly[2], GF256(1)); + } + + #[test] + fn evaluator_works() { + let iter = get_evaluator(vec![vec![GF256(3), GF256(2), GF256(5)]]); + let values: Vec<_> = iter.take(2).map(|s| (s.x.clone(), s.y.clone())).collect(); + assert_eq!( + values, + vec![(GF256(1), vec![GF256(4)]), (GF256(2), vec![GF256(13)])] + ); + } + + #[test] + fn interpolate_works() { + let mut rng = rand_chacha::ChaCha8Rng::from_seed([0x90; 32]); + let poly = random_polynomial(GF256(185), 10, &mut rng); + let iter = get_evaluator(vec![poly]); + let shares: Vec = iter.take(10).collect(); + let root = interpolate(&shares); + assert_eq!(root, vec![185]); + } +} diff --git a/src/rust/vendor/sharks/src/share.rs b/src/rust/vendor/sharks/src/share.rs new file mode 100644 index 000000000..c9d1592ac --- /dev/null +++ b/src/rust/vendor/sharks/src/share.rs @@ -0,0 +1,92 @@ +use alloc::vec::Vec; + +use super::field::GF256; + +#[cfg(feature = "fuzzing")] +use arbitrary::Arbitrary; + +#[cfg(feature = "zeroize_memory")] +use zeroize::Zeroize; + +/// A share used to reconstruct the secret. Can be serialized to and from a byte array. +/// +/// Usage example: +/// ``` +/// use sharks::{Sharks, Share}; +/// use core::convert::TryFrom; +/// # use rand_chacha::rand_core::SeedableRng; +/// # fn send_to_printer(_: Vec) {} +/// # fn ask_shares() -> Vec> {vec![vec![1, 2], vec![2, 3], vec![3, 4]]} +/// +/// // Transmit the share bytes to a printer +/// let sharks = Sharks(3); +/// let mut rng = rand_chacha::ChaCha8Rng::from_seed([0x90; 32]); +/// let dealer = sharks.dealer_rng(&[1, 2, 3], &mut rng); +/// +/// // Get 5 shares and print paper keys +/// for s in dealer.take(5) { +/// send_to_printer(Vec::from(&s)); +/// }; +/// +/// // Get share bytes from an external source and recover secret +/// let shares_bytes: Vec> = ask_shares(); +/// let shares: Vec = shares_bytes.iter().map(|s| Share::try_from(s.as_slice()).unwrap()).collect(); +/// let secret = sharks.recover(&shares).unwrap(); +#[derive(Clone)] +#[cfg_attr(feature = "fuzzing", derive(Arbitrary, Debug))] +#[cfg_attr(feature = "zeroize_memory", derive(Zeroize))] +#[cfg_attr(feature = "zeroize_memory", zeroize(drop))] +pub struct Share { + pub x: GF256, + pub y: Vec, +} + +/// Obtains a byte vector from a `Share` instance +impl From<&Share> for Vec { + fn from(s: &Share) -> Vec { + let mut bytes = Vec::with_capacity(s.y.len() + 1); + bytes.push(s.x.0); + bytes.extend(s.y.iter().map(|p| p.0)); + bytes + } +} + +/// Obtains a `Share` instance from a byte slice +impl core::convert::TryFrom<&[u8]> for Share { + type Error = &'static str; + + fn try_from(s: &[u8]) -> Result { + if s.len() < 2 { + Err("A Share must be at least 2 bytes long") + } else { + let x = GF256(s[0]); + let y = s[1..].iter().map(|p| GF256(*p)).collect(); + Ok(Share { x, y }) + } + } +} + +#[cfg(test)] +mod tests { + use super::{Share, GF256}; + use alloc::{vec, vec::Vec}; + use core::convert::TryFrom; + + #[test] + fn vec_from_share_works() { + let share = Share { + x: GF256(1), + y: vec![GF256(2), GF256(3)], + }; + let bytes = Vec::from(&share); + assert_eq!(bytes, vec![1, 2, 3]); + } + + #[test] + fn share_from_u8_slice_works() { + let bytes = [1, 2, 3]; + let share = Share::try_from(&bytes[..]).unwrap(); + assert_eq!(share.x, GF256(1)); + assert_eq!(share.y, vec![GF256(2), GF256(3)]); + } +} diff --git a/src/rust/vendor/zerocopy-derive/.cargo-checksum.json b/src/rust/vendor/zerocopy-derive/.cargo-checksum.json new file mode 100644 index 000000000..fbb75d798 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"c40aeff1053c2c2d5fb16d9e56ce6ac385d04a49e10a1a61ef58bfdcee25a140","LICENSE-APACHE":"9d185ac6703c4b0453974c0d85e9eee43e6941009296bb1f5eb0b54e2329e9f3","LICENSE-BSD":"83c1763356e822adde0a2cae748d938a73fdc263849ccff6b27776dff213bd32","LICENSE-MIT":"1a2f5c12ddc934d58956aa5dbdd3255fe55fd957633ab7d0d39e4f0daa73f7df","src/ext.rs":"d741f6132fca552676b132a38b6431362054aec0c86dcf1c281bca51d5decad2","src/lib.rs":"d9f83e05ed8444076a6df846aa609dcbbea3931a1f6da3d1f37611163f8ff903","src/repr.rs":"780f547b9d51794ec35ea9359a2c55cd40cf1d84f6e1f4080034ff337df953c1","tests/enum_as_bytes.rs":"2e11daa46c6b922d748321e9a47c3b355e2a9e3b48af95a699c39186b7783711","tests/enum_from_zeroes.rs":"32ca3d0dc9085ef8eb9597b5e057ff0b3a0e92b6da44fac3326b2a124010ba4b","tests/enum_known_layout.rs":"7bc4979b3f9cadc4784afd821ea1c0670fe987a842627f9bb4406b248c5c6ce4","tests/enum_unaligned.rs":"0c42281098047c6a106c2eae2ee792b893b4f295e8147cf56eaf826888be5fbf","tests/hygiene.rs":"24f6fb3e4f1aa313753d3f16d9285105b836392f9d68de77ea436a9b24443102","tests/paths_and_modules.rs":"4218b6ac5d1aeb2d3b199af5f64eea45ab1117fc135e9d30588ff761e98b0f10","tests/priv_in_pub.rs":"57ff0106151dc34f2e14be90ca73c1c7e6791215683b28fc68abd2deed90fedb","tests/struct_as_bytes.rs":"334053105b4341376e11a895ceb534b1f0961100f7d04ece17745fbf7d58e0ca","tests/struct_from_bytes.rs":"90e4d0d7cd9b72f3338edff937f195614fca52b6d937cfbba5c2bc763ebc1e60","tests/struct_from_zeroes.rs":"52d6965cd7571926683e85b5b13e09a25144ad0615c7c73ac3a0abf99fa33cb8","tests/struct_known_layout.rs":"1d54c62a9f4682a1ae4174cee9c73c5f0986623f7bbb069c1bed78b82be36989","tests/struct_unaligned.rs":"a5d3377dda1ba884ec4b70ca043f599eccba3b2258de16c58a72c43633507e2e","tests/trybuild.rs":"0954299d2c50d06259a628fa828131e9f0e8953dfcc2cf0d52d6d9ff40c969d5","tests/ui-msrv/derive_transparent.rs":"9f15bf0bf22c8e47c3d645f99239462eae6a875bd469510ad41838d9ae4ed1f8","tests/ui-msrv/derive_transparent.stderr":"b8a66f15647fa8ef3ab5ab371710f36943b42af8f3d2d088509c05f029ad7b8d","tests/ui-msrv/enum.rs":"7eb4f7f912f91c9a040536882b398ac4f07153fd8dbcc49a30603c6eb8107899","tests/ui-msrv/enum.stderr":"321e41c161804d3918fd15214845862c5ca346581e88cf0260073e3c6203cc21","tests/ui-msrv/enum_from_bytes_u8_too_few.rs":"afbec6f24a4bfca472f2929abc5125d8f5b305a0a1b472a1215ad7739ed63100","tests/ui-msrv/enum_from_bytes_u8_too_few.stderr":"a5ab2757166ef9dfa51a09d3dbddd5e9e2b1a46bd3d4b8d175b33a90747878d7","tests/ui-msrv/late_compile_pass.rs":"244f7dcc9a821a400fe3c24323cf8ffe89aa28454527a85b492fc5afd5cae205","tests/ui-msrv/late_compile_pass.stderr":"a8598656086bfc855c7c69131e08b3ac0eac22c5a731346ab4fb5d06dc0dd8e6","tests/ui-msrv/mid_compile_pass.rs":"b80b01bfd383c537173f7d630ec2789a2af3e01bc6d53c807fdcf271b235d0c9","tests/ui-msrv/mid_compile_pass.stderr":"d2d8d441c7642ca266a4250e8b6a4a9aa693cfc2ec48f679600e392e7b6c6273","tests/ui-msrv/struct.rs":"882b8f0a84ac772aaec5a4f786a5216ad37a269a6d9f1f836f1b27cbe861743c","tests/ui-msrv/struct.stderr":"eb245197d856850ea4e9c6ec58fae60058dee5f7fb7ca68b113e4c9cd7826b34","tests/ui-msrv/union.rs":"0661431d493e5690653ba0ed076fba14ab03fff81471d572369269aa58bde5a0","tests/ui-msrv/union.stderr":"a75b425e50af3759dfe4d2bf832b4cb650ddbaf6b5c8853aa904be98685f1e53","tests/ui-nightly/derive_transparent.rs":"9f15bf0bf22c8e47c3d645f99239462eae6a875bd469510ad41838d9ae4ed1f8","tests/ui-nightly/derive_transparent.stderr":"9a3f6eae91f2f4f2c6ed8d517f98e2f427ec75ba23ad27c86cbb4c587dcd3e74","tests/ui-nightly/enum.rs":"7eb4f7f912f91c9a040536882b398ac4f07153fd8dbcc49a30603c6eb8107899","tests/ui-nightly/enum.stderr":"9b4d965632beb995022209f0f4ca5d875725f33149916a963402b1901e1dbf14","tests/ui-nightly/enum_from_bytes_u8_too_few.rs":"afbec6f24a4bfca472f2929abc5125d8f5b305a0a1b472a1215ad7739ed63100","tests/ui-nightly/enum_from_bytes_u8_too_few.stderr":"28a493e1057279ea8e20df49ff0c02dfa132027a86bb6f5fe50e250c14e62572","tests/ui-nightly/late_compile_pass.rs":"244f7dcc9a821a400fe3c24323cf8ffe89aa28454527a85b492fc5afd5cae205","tests/ui-nightly/late_compile_pass.stderr":"46d7db9d292eb56be04783f01e9b54d335003ccec48689126654a36b97cc9d92","tests/ui-nightly/mid_compile_pass.rs":"b80b01bfd383c537173f7d630ec2789a2af3e01bc6d53c807fdcf271b235d0c9","tests/ui-nightly/mid_compile_pass.stderr":"359f468b1f6313f5a7d5533fa5fe09d80a759e8adf523c9a8edff8b636b5e4b3","tests/ui-nightly/struct.rs":"882b8f0a84ac772aaec5a4f786a5216ad37a269a6d9f1f836f1b27cbe861743c","tests/ui-nightly/struct.stderr":"abc60481cd303a84507d06d6ed351f58598ee1e57502b37711534930fc611ea5","tests/ui-nightly/union.rs":"0661431d493e5690653ba0ed076fba14ab03fff81471d572369269aa58bde5a0","tests/ui-nightly/union.stderr":"bbaa6c1ac4df2e263fb884b6d356a3e366e68a0cdc7d8e32489eabac594b76a5","tests/ui-stable/derive_transparent.rs":"9f15bf0bf22c8e47c3d645f99239462eae6a875bd469510ad41838d9ae4ed1f8","tests/ui-stable/derive_transparent.stderr":"0698083580c991aeeeb7b8a9f8cac803abe711b6ebf2917a95a21de46ea9ff6c","tests/ui-stable/enum.rs":"7eb4f7f912f91c9a040536882b398ac4f07153fd8dbcc49a30603c6eb8107899","tests/ui-stable/enum.stderr":"3d2f2fa112f70f7c18e6aa3400ed5f28ff39b9241de4fdecf7e786bfe85ceadc","tests/ui-stable/enum_from_bytes_u8_too_few.rs":"afbec6f24a4bfca472f2929abc5125d8f5b305a0a1b472a1215ad7739ed63100","tests/ui-stable/enum_from_bytes_u8_too_few.stderr":"b3edb381f968f6f2ad9ab4810132df5962b0650460e07f77af818ababf124fe7","tests/ui-stable/late_compile_pass.rs":"244f7dcc9a821a400fe3c24323cf8ffe89aa28454527a85b492fc5afd5cae205","tests/ui-stable/late_compile_pass.stderr":"bd96e3da0befae5f09458e94c2e03066cf4c010ca6733ce541f8d847a5a03f42","tests/ui-stable/mid_compile_pass.rs":"b80b01bfd383c537173f7d630ec2789a2af3e01bc6d53c807fdcf271b235d0c9","tests/ui-stable/mid_compile_pass.stderr":"31eff0a802e3e5081337e30260d984c68358211ef0ba7a71fcdd409bf3740023","tests/ui-stable/struct.rs":"882b8f0a84ac772aaec5a4f786a5216ad37a269a6d9f1f836f1b27cbe861743c","tests/ui-stable/struct.stderr":"d7444f398e915d40b0cf4ab1d2600468ce1948c4bb0ac28b9f30d1c88404f90d","tests/ui-stable/union.rs":"0661431d493e5690653ba0ed076fba14ab03fff81471d572369269aa58bde5a0","tests/ui-stable/union.stderr":"a238e0295fd6b32e918fe820410322941c10662ff21f31af704c380c301fc165","tests/union_as_bytes.rs":"57e69981ed7bb8eebbb2ea2be160532074e32aa4cec6543e9e3af0f5e3767fd8","tests/union_from_bytes.rs":"7da559bbb70fb2dbbb7422ad3099d8c2504d5815bc1e87173ffa758b929382b2","tests/union_from_zeroes.rs":"448d21026955403e1f09c69e19c3542a454456aab1c13d32dad8c612b8cbc7f8","tests/union_known_layout.rs":"a94be098de0a2565d1cf3e9631b36c250ddae1c3490f18e9d8a7b6f70274ec00","tests/union_unaligned.rs":"c8a0458226645063805b9653c2148048e7b93b273b93e7959a969f15e167fa57","tests/util.rs":"8d0cfb435e4b154a3702511f3d10331d6b01bcd90f0d70d4a094778813e9e387"},"package":"fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e"} \ No newline at end of file diff --git a/src/rust/vendor/zerocopy-derive/Cargo.toml b/src/rust/vendor/zerocopy-derive/Cargo.toml new file mode 100644 index 000000000..70758ac9c --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/Cargo.toml @@ -0,0 +1,43 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "zerocopy-derive" +version = "0.7.35" +authors = ["Joshua Liebow-Feeser "] +exclude = [ + ".*", + "tests/enum_from_bytes.rs", + "tests/ui-nightly/enum_from_bytes_u16_too_few.rs.disabled", +] +description = "Custom derive for traits from the zerocopy crate" +license = "BSD-2-Clause OR Apache-2.0 OR MIT" +repository = "https://github.com/google/zerocopy" + +[lib] +proc-macro = true + +[dependencies.proc-macro2] +version = "1.0.1" + +[dependencies.quote] +version = "1.0.10" + +[dependencies.syn] +version = "2.0.31" + +[dev-dependencies.static_assertions] +version = "1.1" + +[dev-dependencies.trybuild] +version = "=1.0.85" +features = ["diff"] diff --git a/src/rust/vendor/zerocopy-derive/LICENSE-APACHE b/src/rust/vendor/zerocopy-derive/LICENSE-APACHE new file mode 100644 index 000000000..2dc22c12f --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/LICENSE-APACHE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 The Fuchsia Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/src/rust/vendor/zerocopy-derive/LICENSE-BSD b/src/rust/vendor/zerocopy-derive/LICENSE-BSD new file mode 100644 index 000000000..7ed244f42 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/LICENSE-BSD @@ -0,0 +1,24 @@ +Copyright 2019 The Fuchsia Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/rust/vendor/zerocopy-derive/LICENSE-MIT b/src/rust/vendor/zerocopy-derive/LICENSE-MIT new file mode 100644 index 000000000..26e15216c --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/LICENSE-MIT @@ -0,0 +1,26 @@ +Copyright 2023 The Fuchsia Authors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + diff --git a/src/rust/vendor/zerocopy-derive/src/ext.rs b/src/rust/vendor/zerocopy-derive/src/ext.rs new file mode 100644 index 000000000..87cf838f8 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/src/ext.rs @@ -0,0 +1,53 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use syn::{Data, DataEnum, DataStruct, DataUnion, Type}; + +pub trait DataExt { + /// Extract the types of all fields. For enums, extract the types of fields + /// from each variant. + fn field_types(&self) -> Vec<&Type>; +} + +impl DataExt for Data { + fn field_types(&self) -> Vec<&Type> { + match self { + Data::Struct(strc) => strc.field_types(), + Data::Enum(enm) => enm.field_types(), + Data::Union(un) => un.field_types(), + } + } +} + +impl DataExt for DataStruct { + fn field_types(&self) -> Vec<&Type> { + self.fields.iter().map(|f| &f.ty).collect() + } +} + +impl DataExt for DataEnum { + fn field_types(&self) -> Vec<&Type> { + self.variants.iter().flat_map(|var| &var.fields).map(|f| &f.ty).collect() + } +} + +impl DataExt for DataUnion { + fn field_types(&self) -> Vec<&Type> { + self.fields.named.iter().map(|f| &f.ty).collect() + } +} + +pub trait EnumExt { + fn is_c_like(&self) -> bool; +} + +impl EnumExt for DataEnum { + fn is_c_like(&self) -> bool { + self.field_types().is_empty() + } +} diff --git a/src/rust/vendor/zerocopy-derive/src/lib.rs b/src/rust/vendor/zerocopy-derive/src/lib.rs new file mode 100644 index 000000000..1767310e3 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/src/lib.rs @@ -0,0 +1,887 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +//! Derive macros for [zerocopy]'s traits. +//! +//! [zerocopy]: https://docs.rs/zerocopy + +// Sometimes we want to use lints which were added after our MSRV. +// `unknown_lints` is `warn` by default and we deny warnings in CI, so without +// this attribute, any unknown lint would cause a CI failure when testing with +// our MSRV. +#![allow(unknown_lints)] +#![deny(renamed_and_removed_lints)] +#![deny(clippy::all, clippy::missing_safety_doc, clippy::undocumented_unsafe_blocks)] +#![deny( + rustdoc::bare_urls, + rustdoc::broken_intra_doc_links, + rustdoc::invalid_codeblock_attributes, + rustdoc::invalid_html_tags, + rustdoc::invalid_rust_codeblocks, + rustdoc::missing_crate_level_docs, + rustdoc::private_intra_doc_links +)] +#![recursion_limit = "128"] + +mod ext; +mod repr; + +use { + proc_macro2::Span, + quote::quote, + syn::{ + parse_quote, Data, DataEnum, DataStruct, DataUnion, DeriveInput, Error, Expr, ExprLit, + GenericParam, Ident, Lit, + }, +}; + +use {crate::ext::*, crate::repr::*}; + +// Unwraps a `Result<_, Vec>`, converting any `Err` value into a +// `TokenStream` and returning it. +macro_rules! try_or_print { + ($e:expr) => { + match $e { + Ok(x) => x, + Err(errors) => return print_all_errors(errors).into(), + } + }; +} + +// TODO(https://github.com/rust-lang/rust/issues/54140): Some errors could be +// made better if we could add multiple lines of error output like this: +// +// error: unsupported representation +// --> enum.rs:28:8 +// | +// 28 | #[repr(transparent)] +// | +// help: required by the derive of FromBytes +// +// Instead, we have more verbose error messages like "unsupported representation +// for deriving FromZeroes, FromBytes, AsBytes, or Unaligned on an enum" +// +// This will probably require Span::error +// (https://doc.rust-lang.org/nightly/proc_macro/struct.Span.html#method.error), +// which is currently unstable. Revisit this once it's stable. + +#[proc_macro_derive(KnownLayout)] +pub fn derive_known_layout(ts: proc_macro::TokenStream) -> proc_macro::TokenStream { + let ast = syn::parse_macro_input!(ts as DeriveInput); + + let is_repr_c_struct = match &ast.data { + Data::Struct(..) => { + let reprs = try_or_print!(repr::reprs::(&ast.attrs)); + if reprs.iter().any(|(_meta, repr)| repr == &Repr::C) { + Some(reprs) + } else { + None + } + } + Data::Enum(..) | Data::Union(..) => None, + }; + + let fields = ast.data.field_types(); + + let (require_self_sized, extras) = if let ( + Some(reprs), + Some((trailing_field, leading_fields)), + ) = (is_repr_c_struct, fields.split_last()) + { + let repr_align = reprs + .iter() + .find_map( + |(_meta, repr)| { + if let Repr::Align(repr_align) = repr { + Some(repr_align) + } else { + None + } + }, + ) + .map(|repr_align| quote!(NonZeroUsize::new(#repr_align as usize))) + .unwrap_or(quote!(None)); + + let repr_packed = reprs + .iter() + .find_map(|(_meta, repr)| match repr { + Repr::Packed => Some(1), + Repr::PackedN(repr_packed) => Some(*repr_packed), + _ => None, + }) + .map(|repr_packed| quote!(NonZeroUsize::new(#repr_packed as usize))) + .unwrap_or(quote!(None)); + + ( + false, + quote!( + // SAFETY: `LAYOUT` accurately describes the layout of `Self`. + // The layout of `Self` is reflected using a sequence of + // invocations of `DstLayout::{new_zst,extend,pad_to_align}`. + // The documentation of these items vows that invocations in + // this manner will acurately describe a type, so long as: + // + // - that type is `repr(C)`, + // - its fields are enumerated in the order they appear, + // - the presence of `repr_align` and `repr_packed` are correctly accounted for. + // + // We respect all three of these preconditions here. This + // expansion is only used if `is_repr_c_struct`, we enumerate + // the fields in order, and we extract the values of `align(N)` + // and `packed(N)`. + const LAYOUT: ::zerocopy::DstLayout = { + use ::zerocopy::macro_util::core_reexport::num::NonZeroUsize; + use ::zerocopy::{DstLayout, KnownLayout}; + + let repr_align = #repr_align; + let repr_packed = #repr_packed; + + DstLayout::new_zst(repr_align) + #(.extend(DstLayout::for_type::<#leading_fields>(), repr_packed))* + .extend(<#trailing_field as KnownLayout>::LAYOUT, repr_packed) + .pad_to_align() + }; + + // SAFETY: + // - The recursive call to `raw_from_ptr_len` preserves both address and provenance. + // - The `as` cast preserves both address and provenance. + // - `NonNull::new_unchecked` preserves both address and provenance. + #[inline(always)] + fn raw_from_ptr_len( + bytes: ::zerocopy::macro_util::core_reexport::ptr::NonNull, + elems: usize, + ) -> ::zerocopy::macro_util::core_reexport::ptr::NonNull { + use ::zerocopy::{KnownLayout}; + let trailing = <#trailing_field as KnownLayout>::raw_from_ptr_len(bytes, elems); + let slf = trailing.as_ptr() as *mut Self; + // SAFETY: Constructed from `trailing`, which is non-null. + unsafe { ::zerocopy::macro_util::core_reexport::ptr::NonNull::new_unchecked(slf) } + } + ), + ) + } else { + // For enums, unions, and non-`repr(C)` structs, we require that + // `Self` is sized, and as a result don't need to reason about the + // internals of the type. + ( + true, + quote!( + // SAFETY: `LAYOUT` is guaranteed to accurately describe the + // layout of `Self`, because that is the documented safety + // contract of `DstLayout::for_type`. + const LAYOUT: ::zerocopy::DstLayout = ::zerocopy::DstLayout::for_type::(); + + // SAFETY: `.cast` preserves address and provenance. + // + // TODO(#429): Add documentation to `.cast` that promises that + // it preserves provenance. + #[inline(always)] + fn raw_from_ptr_len( + bytes: ::zerocopy::macro_util::core_reexport::ptr::NonNull, + _elems: usize, + ) -> ::zerocopy::macro_util::core_reexport::ptr::NonNull { + bytes.cast::() + } + ), + ) + }; + + match &ast.data { + Data::Struct(strct) => { + let require_trait_bound_on_field_types = if require_self_sized { + RequireBoundedFields::No + } else { + RequireBoundedFields::Trailing + }; + + // A bound on the trailing field is required, since structs are + // unsized if their trailing field is unsized. Reflecting the layout + // of an usized trailing field requires that the field is + // `KnownLayout`. + impl_block( + &ast, + strct, + Trait::KnownLayout, + require_trait_bound_on_field_types, + require_self_sized, + None, + Some(extras), + ) + } + Data::Enum(enm) => { + // A bound on the trailing field is not required, since enums cannot + // currently be unsized. + impl_block( + &ast, + enm, + Trait::KnownLayout, + RequireBoundedFields::No, + true, + None, + Some(extras), + ) + } + Data::Union(unn) => { + // A bound on the trailing field is not required, since unions + // cannot currently be unsized. + impl_block( + &ast, + unn, + Trait::KnownLayout, + RequireBoundedFields::No, + true, + None, + Some(extras), + ) + } + } + .into() +} + +#[proc_macro_derive(FromZeroes)] +pub fn derive_from_zeroes(ts: proc_macro::TokenStream) -> proc_macro::TokenStream { + let ast = syn::parse_macro_input!(ts as DeriveInput); + match &ast.data { + Data::Struct(strct) => derive_from_zeroes_struct(&ast, strct), + Data::Enum(enm) => derive_from_zeroes_enum(&ast, enm), + Data::Union(unn) => derive_from_zeroes_union(&ast, unn), + } + .into() +} + +#[proc_macro_derive(FromBytes)] +pub fn derive_from_bytes(ts: proc_macro::TokenStream) -> proc_macro::TokenStream { + let ast = syn::parse_macro_input!(ts as DeriveInput); + match &ast.data { + Data::Struct(strct) => derive_from_bytes_struct(&ast, strct), + Data::Enum(enm) => derive_from_bytes_enum(&ast, enm), + Data::Union(unn) => derive_from_bytes_union(&ast, unn), + } + .into() +} + +#[proc_macro_derive(AsBytes)] +pub fn derive_as_bytes(ts: proc_macro::TokenStream) -> proc_macro::TokenStream { + let ast = syn::parse_macro_input!(ts as DeriveInput); + match &ast.data { + Data::Struct(strct) => derive_as_bytes_struct(&ast, strct), + Data::Enum(enm) => derive_as_bytes_enum(&ast, enm), + Data::Union(unn) => derive_as_bytes_union(&ast, unn), + } + .into() +} + +#[proc_macro_derive(Unaligned)] +pub fn derive_unaligned(ts: proc_macro::TokenStream) -> proc_macro::TokenStream { + let ast = syn::parse_macro_input!(ts as DeriveInput); + match &ast.data { + Data::Struct(strct) => derive_unaligned_struct(&ast, strct), + Data::Enum(enm) => derive_unaligned_enum(&ast, enm), + Data::Union(unn) => derive_unaligned_union(&ast, unn), + } + .into() +} + +const STRUCT_UNION_ALLOWED_REPR_COMBINATIONS: &[&[StructRepr]] = &[ + &[StructRepr::C], + &[StructRepr::Transparent], + &[StructRepr::Packed], + &[StructRepr::C, StructRepr::Packed], +]; + +// A struct is `FromZeroes` if: +// - all fields are `FromZeroes` + +fn derive_from_zeroes_struct(ast: &DeriveInput, strct: &DataStruct) -> proc_macro2::TokenStream { + impl_block(ast, strct, Trait::FromZeroes, RequireBoundedFields::Yes, false, None, None) +} + +// An enum is `FromZeroes` if: +// - all of its variants are fieldless +// - one of the variants has a discriminant of `0` + +fn derive_from_zeroes_enum(ast: &DeriveInput, enm: &DataEnum) -> proc_macro2::TokenStream { + if !enm.is_c_like() { + return Error::new_spanned(ast, "only C-like enums can implement FromZeroes") + .to_compile_error(); + } + + let has_explicit_zero_discriminant = + enm.variants.iter().filter_map(|v| v.discriminant.as_ref()).any(|(_, e)| { + if let Expr::Lit(ExprLit { lit: Lit::Int(i), .. }) = e { + i.base10_parse::().ok() == Some(0) + } else { + false + } + }); + // If the first variant of an enum does not specify its discriminant, it is set to zero: + // https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations + let has_implicit_zero_discriminant = + enm.variants.iter().next().map(|v| v.discriminant.is_none()) == Some(true); + + if !has_explicit_zero_discriminant && !has_implicit_zero_discriminant { + return Error::new_spanned( + ast, + "FromZeroes only supported on enums with a variant that has a discriminant of `0`", + ) + .to_compile_error(); + } + + impl_block(ast, enm, Trait::FromZeroes, RequireBoundedFields::Yes, false, None, None) +} + +// Like structs, unions are `FromZeroes` if +// - all fields are `FromZeroes` + +fn derive_from_zeroes_union(ast: &DeriveInput, unn: &DataUnion) -> proc_macro2::TokenStream { + impl_block(ast, unn, Trait::FromZeroes, RequireBoundedFields::Yes, false, None, None) +} + +// A struct is `FromBytes` if: +// - all fields are `FromBytes` + +fn derive_from_bytes_struct(ast: &DeriveInput, strct: &DataStruct) -> proc_macro2::TokenStream { + impl_block(ast, strct, Trait::FromBytes, RequireBoundedFields::Yes, false, None, None) +} + +// An enum is `FromBytes` if: +// - Every possible bit pattern must be valid, which means that every bit +// pattern must correspond to a different enum variant. Thus, for an enum +// whose layout takes up N bytes, there must be 2^N variants. +// - Since we must know N, only representations which guarantee the layout's +// size are allowed. These are `repr(uN)` and `repr(iN)` (`repr(C)` implies an +// implementation-defined size). `usize` and `isize` technically guarantee the +// layout's size, but would require us to know how large those are on the +// target platform. This isn't terribly difficult - we could emit a const +// expression that could call `core::mem::size_of` in order to determine the +// size and check against the number of enum variants, but a) this would be +// platform-specific and, b) even on Rust's smallest bit width platform (32), +// this would require ~4 billion enum variants, which obviously isn't a thing. + +fn derive_from_bytes_enum(ast: &DeriveInput, enm: &DataEnum) -> proc_macro2::TokenStream { + if !enm.is_c_like() { + return Error::new_spanned(ast, "only C-like enums can implement FromBytes") + .to_compile_error(); + } + + let reprs = try_or_print!(ENUM_FROM_BYTES_CFG.validate_reprs(ast)); + + let variants_required = match reprs.as_slice() { + [EnumRepr::U8] | [EnumRepr::I8] => 1usize << 8, + [EnumRepr::U16] | [EnumRepr::I16] => 1usize << 16, + // `validate_reprs` has already validated that it's one of the preceding + // patterns. + _ => unreachable!(), + }; + if enm.variants.len() != variants_required { + return Error::new_spanned( + ast, + format!( + "FromBytes only supported on {} enum with {} variants", + reprs[0], variants_required + ), + ) + .to_compile_error(); + } + + impl_block(ast, enm, Trait::FromBytes, RequireBoundedFields::Yes, false, None, None) +} + +#[rustfmt::skip] +const ENUM_FROM_BYTES_CFG: Config = { + use EnumRepr::*; + Config { + allowed_combinations_message: r#"FromBytes requires repr of "u8", "u16", "i8", or "i16""#, + derive_unaligned: false, + allowed_combinations: &[ + &[U8], + &[U16], + &[I8], + &[I16], + ], + disallowed_but_legal_combinations: &[ + &[C], + &[U32], + &[I32], + &[U64], + &[I64], + &[Usize], + &[Isize], + ], + } +}; + +// Like structs, unions are `FromBytes` if +// - all fields are `FromBytes` + +fn derive_from_bytes_union(ast: &DeriveInput, unn: &DataUnion) -> proc_macro2::TokenStream { + impl_block(ast, unn, Trait::FromBytes, RequireBoundedFields::Yes, false, None, None) +} + +// A struct is `AsBytes` if: +// - all fields are `AsBytes` +// - `repr(C)` or `repr(transparent)` and +// - no padding (size of struct equals sum of size of field types) +// - `repr(packed)` + +fn derive_as_bytes_struct(ast: &DeriveInput, strct: &DataStruct) -> proc_macro2::TokenStream { + let reprs = try_or_print!(STRUCT_UNION_AS_BYTES_CFG.validate_reprs(ast)); + let is_transparent = reprs.contains(&StructRepr::Transparent); + let is_packed = reprs.contains(&StructRepr::Packed); + + // TODO(#10): Support type parameters for non-transparent, non-packed + // structs. + if !ast.generics.params.is_empty() && !is_transparent && !is_packed { + return Error::new( + Span::call_site(), + "unsupported on generic structs that are not repr(transparent) or repr(packed)", + ) + .to_compile_error(); + } + + // We don't need a padding check if the struct is repr(transparent) or + // repr(packed). + // - repr(transparent): The layout and ABI of the whole struct is the same + // as its only non-ZST field (meaning there's no padding outside of that + // field) and we require that field to be `AsBytes` (meaning there's no + // padding in that field). + // - repr(packed): Any inter-field padding bytes are removed, meaning that + // any padding bytes would need to come from the fields, all of which + // we require to be `AsBytes` (meaning they don't have any padding). + let padding_check = if is_transparent || is_packed { None } else { Some(PaddingCheck::Struct) }; + impl_block(ast, strct, Trait::AsBytes, RequireBoundedFields::Yes, false, padding_check, None) +} + +const STRUCT_UNION_AS_BYTES_CFG: Config = Config { + // Since `disallowed_but_legal_combinations` is empty, this message will + // never actually be emitted. + allowed_combinations_message: r#"AsBytes requires either a) repr "C" or "transparent" with all fields implementing AsBytes or, b) repr "packed""#, + derive_unaligned: false, + allowed_combinations: STRUCT_UNION_ALLOWED_REPR_COMBINATIONS, + disallowed_but_legal_combinations: &[], +}; + +// An enum is `AsBytes` if it is C-like and has a defined repr. + +fn derive_as_bytes_enum(ast: &DeriveInput, enm: &DataEnum) -> proc_macro2::TokenStream { + if !enm.is_c_like() { + return Error::new_spanned(ast, "only C-like enums can implement AsBytes") + .to_compile_error(); + } + + // We don't care what the repr is; we only care that it is one of the + // allowed ones. + let _: Vec = try_or_print!(ENUM_AS_BYTES_CFG.validate_reprs(ast)); + impl_block(ast, enm, Trait::AsBytes, RequireBoundedFields::No, false, None, None) +} + +#[rustfmt::skip] +const ENUM_AS_BYTES_CFG: Config = { + use EnumRepr::*; + Config { + // Since `disallowed_but_legal_combinations` is empty, this message will + // never actually be emitted. + allowed_combinations_message: r#"AsBytes requires repr of "C", "u8", "u16", "u32", "u64", "usize", "i8", "i16", "i32", "i64", or "isize""#, + derive_unaligned: false, + allowed_combinations: &[ + &[C], + &[U8], + &[U16], + &[I8], + &[I16], + &[U32], + &[I32], + &[U64], + &[I64], + &[Usize], + &[Isize], + ], + disallowed_but_legal_combinations: &[], + } +}; + +// A union is `AsBytes` if: +// - all fields are `AsBytes` +// - `repr(C)`, `repr(transparent)`, or `repr(packed)` +// - no padding (size of union equals size of each field type) + +fn derive_as_bytes_union(ast: &DeriveInput, unn: &DataUnion) -> proc_macro2::TokenStream { + // TODO(#10): Support type parameters. + if !ast.generics.params.is_empty() { + return Error::new(Span::call_site(), "unsupported on types with type parameters") + .to_compile_error(); + } + + try_or_print!(STRUCT_UNION_AS_BYTES_CFG.validate_reprs(ast)); + + impl_block( + ast, + unn, + Trait::AsBytes, + RequireBoundedFields::Yes, + false, + Some(PaddingCheck::Union), + None, + ) +} + +// A struct is `Unaligned` if: +// - `repr(align)` is no more than 1 and either +// - `repr(C)` or `repr(transparent)` and +// - all fields `Unaligned` +// - `repr(packed)` + +fn derive_unaligned_struct(ast: &DeriveInput, strct: &DataStruct) -> proc_macro2::TokenStream { + let reprs = try_or_print!(STRUCT_UNION_UNALIGNED_CFG.validate_reprs(ast)); + let require_trait_bounds_on_field_types = (!reprs.contains(&StructRepr::Packed)).into(); + + impl_block(ast, strct, Trait::Unaligned, require_trait_bounds_on_field_types, false, None, None) +} + +const STRUCT_UNION_UNALIGNED_CFG: Config = Config { + // Since `disallowed_but_legal_combinations` is empty, this message will + // never actually be emitted. + allowed_combinations_message: r#"Unaligned requires either a) repr "C" or "transparent" with all fields implementing Unaligned or, b) repr "packed""#, + derive_unaligned: true, + allowed_combinations: STRUCT_UNION_ALLOWED_REPR_COMBINATIONS, + disallowed_but_legal_combinations: &[], +}; + +// An enum is `Unaligned` if: +// - No `repr(align(N > 1))` +// - `repr(u8)` or `repr(i8)` + +fn derive_unaligned_enum(ast: &DeriveInput, enm: &DataEnum) -> proc_macro2::TokenStream { + if !enm.is_c_like() { + return Error::new_spanned(ast, "only C-like enums can implement Unaligned") + .to_compile_error(); + } + + // The only valid reprs are `u8` and `i8`, and optionally `align(1)`. We + // don't actually care what the reprs are so long as they satisfy that + // requirement. + let _: Vec = try_or_print!(ENUM_UNALIGNED_CFG.validate_reprs(ast)); + + // C-like enums cannot currently have type parameters, so this value of true + // for `require_trait_bound_on_field_types` doesn't really do anything. But + // it's marginally more future-proof in case that restriction is lifted in + // the future. + impl_block(ast, enm, Trait::Unaligned, RequireBoundedFields::Yes, false, None, None) +} + +#[rustfmt::skip] +const ENUM_UNALIGNED_CFG: Config = { + use EnumRepr::*; + Config { + allowed_combinations_message: + r#"Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1)))"#, + derive_unaligned: true, + allowed_combinations: &[ + &[U8], + &[I8], + ], + disallowed_but_legal_combinations: &[ + &[C], + &[U16], + &[U32], + &[U64], + &[Usize], + &[I16], + &[I32], + &[I64], + &[Isize], + ], + } +}; + +// Like structs, a union is `Unaligned` if: +// - `repr(align)` is no more than 1 and either +// - `repr(C)` or `repr(transparent)` and +// - all fields `Unaligned` +// - `repr(packed)` + +fn derive_unaligned_union(ast: &DeriveInput, unn: &DataUnion) -> proc_macro2::TokenStream { + let reprs = try_or_print!(STRUCT_UNION_UNALIGNED_CFG.validate_reprs(ast)); + let require_trait_bound_on_field_types = (!reprs.contains(&StructRepr::Packed)).into(); + + impl_block(ast, unn, Trait::Unaligned, require_trait_bound_on_field_types, false, None, None) +} + +// This enum describes what kind of padding check needs to be generated for the +// associated impl. +enum PaddingCheck { + // Check that the sum of the fields' sizes exactly equals the struct's size. + Struct, + // Check that the size of each field exactly equals the union's size. + Union, +} + +impl PaddingCheck { + /// Returns the ident of the macro to call in order to validate that a type + /// passes the padding check encoded by `PaddingCheck`. + fn validator_macro_ident(&self) -> Ident { + let s = match self { + PaddingCheck::Struct => "struct_has_padding", + PaddingCheck::Union => "union_has_padding", + }; + + Ident::new(s, Span::call_site()) + } +} + +#[derive(Debug, Eq, PartialEq)] +enum Trait { + KnownLayout, + FromZeroes, + FromBytes, + AsBytes, + Unaligned, +} + +impl Trait { + fn ident(&self) -> Ident { + Ident::new(format!("{:?}", self).as_str(), Span::call_site()) + } +} + +#[derive(Debug, Eq, PartialEq)] +enum RequireBoundedFields { + No, + Yes, + Trailing, +} + +impl From for RequireBoundedFields { + fn from(do_require: bool) -> Self { + match do_require { + true => Self::Yes, + false => Self::No, + } + } +} + +fn impl_block( + input: &DeriveInput, + data: &D, + trt: Trait, + require_trait_bound_on_field_types: RequireBoundedFields, + require_self_sized: bool, + padding_check: Option, + extras: Option, +) -> proc_macro2::TokenStream { + // In this documentation, we will refer to this hypothetical struct: + // + // #[derive(FromBytes)] + // struct Foo + // where + // T: Copy, + // I: Clone, + // I::Item: Clone, + // { + // a: u8, + // b: T, + // c: I::Item, + // } + // + // We extract the field types, which in this case are `u8`, `T`, and + // `I::Item`. We re-use the existing parameters and where clauses. If + // `require_trait_bound == true` (as it is for `FromBytes), we add where + // bounds for each field's type: + // + // impl FromBytes for Foo + // where + // T: Copy, + // I: Clone, + // I::Item: Clone, + // T: FromBytes, + // I::Item: FromBytes, + // { + // } + // + // NOTE: It is standard practice to only emit bounds for the type parameters + // themselves, not for field types based on those parameters (e.g., `T` vs + // `T::Foo`). For a discussion of why this is standard practice, see + // https://github.com/rust-lang/rust/issues/26925. + // + // The reason we diverge from this standard is that doing it that way for us + // would be unsound. E.g., consider a type, `T` where `T: FromBytes` but + // `T::Foo: !FromBytes`. It would not be sound for us to accept a type with + // a `T::Foo` field as `FromBytes` simply because `T: FromBytes`. + // + // While there's no getting around this requirement for us, it does have the + // pretty serious downside that, when lifetimes are involved, the trait + // solver ties itself in knots: + // + // #[derive(Unaligned)] + // #[repr(C)] + // struct Dup<'a, 'b> { + // a: PhantomData<&'a u8>, + // b: PhantomData<&'b u8>, + // } + // + // error[E0283]: type annotations required: cannot resolve `core::marker::PhantomData<&'a u8>: zerocopy::Unaligned` + // --> src/main.rs:6:10 + // | + // 6 | #[derive(Unaligned)] + // | ^^^^^^^^^ + // | + // = note: required by `zerocopy::Unaligned` + + let type_ident = &input.ident; + let trait_ident = trt.ident(); + let field_types = data.field_types(); + + let bound_tt = |ty| parse_quote!(#ty: ::zerocopy::#trait_ident); + let field_type_bounds: Vec<_> = match (require_trait_bound_on_field_types, &field_types[..]) { + (RequireBoundedFields::Yes, _) => field_types.iter().map(bound_tt).collect(), + (RequireBoundedFields::No, _) | (RequireBoundedFields::Trailing, []) => vec![], + (RequireBoundedFields::Trailing, [.., last]) => vec![bound_tt(last)], + }; + + // Don't bother emitting a padding check if there are no fields. + #[allow( + unstable_name_collisions, // See `BoolExt` below + clippy::incompatible_msrv, // https://github.com/rust-lang/rust-clippy/issues/12280 + )] + let padding_check_bound = padding_check.and_then(|check| (!field_types.is_empty()).then_some(check)).map(|check| { + let fields = field_types.iter(); + let validator_macro = check.validator_macro_ident(); + parse_quote!( + ::zerocopy::macro_util::HasPadding<#type_ident, {::zerocopy::#validator_macro!(#type_ident, #(#fields),*)}>: + ::zerocopy::macro_util::ShouldBe + ) + }); + + let self_sized_bound = if require_self_sized { Some(parse_quote!(Self: Sized)) } else { None }; + + let bounds = input + .generics + .where_clause + .as_ref() + .map(|where_clause| where_clause.predicates.iter()) + .into_iter() + .flatten() + .chain(field_type_bounds.iter()) + .chain(padding_check_bound.iter()) + .chain(self_sized_bound.iter()); + + // The parameters with trait bounds, but without type defaults. + let params = input.generics.params.clone().into_iter().map(|mut param| { + match &mut param { + GenericParam::Type(ty) => ty.default = None, + GenericParam::Const(cnst) => cnst.default = None, + GenericParam::Lifetime(_) => {} + } + quote!(#param) + }); + + // The identifiers of the parameters without trait bounds or type defaults. + let param_idents = input.generics.params.iter().map(|param| match param { + GenericParam::Type(ty) => { + let ident = &ty.ident; + quote!(#ident) + } + GenericParam::Lifetime(l) => { + let ident = &l.lifetime; + quote!(#ident) + } + GenericParam::Const(cnst) => { + let ident = &cnst.ident; + quote!({#ident}) + } + }); + + quote! { + // TODO(#553): Add a test that generates a warning when + // `#[allow(deprecated)]` isn't present. + #[allow(deprecated)] + unsafe impl < #(#params),* > ::zerocopy::#trait_ident for #type_ident < #(#param_idents),* > + where + #(#bounds,)* + { + fn only_derive_is_allowed_to_implement_this_trait() {} + + #extras + } + } +} + +fn print_all_errors(errors: Vec) -> proc_macro2::TokenStream { + errors.iter().map(Error::to_compile_error).collect() +} + +// A polyfill for `Option::then_some`, which was added after our MSRV. +// +// TODO(#67): Remove this once our MSRV is >= 1.62. +#[allow(unused)] +trait BoolExt { + fn then_some(self, t: T) -> Option; +} + +#[allow(unused)] +impl BoolExt for bool { + fn then_some(self, t: T) -> Option { + if self { + Some(t) + } else { + None + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_config_repr_orderings() { + // Validate that the repr lists in the various configs are in the + // canonical order. If they aren't, then our algorithm to look up in + // those lists won't work. + + // TODO(https://github.com/rust-lang/rust/issues/53485): Remove once + // `Vec::is_sorted` is stabilized. + fn is_sorted_and_deduped(ts: &[T]) -> bool { + let mut sorted = ts.to_vec(); + sorted.sort(); + sorted.dedup(); + ts == sorted.as_slice() + } + + fn elements_are_sorted_and_deduped(lists: &[&[T]]) -> bool { + lists.iter().all(|list| is_sorted_and_deduped(list)) + } + + fn config_is_sorted(config: &Config) -> bool { + elements_are_sorted_and_deduped(config.allowed_combinations) + && elements_are_sorted_and_deduped(config.disallowed_but_legal_combinations) + } + + assert!(config_is_sorted(&STRUCT_UNION_UNALIGNED_CFG)); + assert!(config_is_sorted(&ENUM_FROM_BYTES_CFG)); + assert!(config_is_sorted(&ENUM_UNALIGNED_CFG)); + } + + #[test] + fn test_config_repr_no_overlap() { + // Validate that no set of reprs appears in both the + // `allowed_combinations` and `disallowed_but_legal_combinations` lists. + + fn overlap(a: &[T], b: &[T]) -> bool { + a.iter().any(|elem| b.contains(elem)) + } + + fn config_overlaps(config: &Config) -> bool { + overlap(config.allowed_combinations, config.disallowed_but_legal_combinations) + } + + assert!(!config_overlaps(&STRUCT_UNION_UNALIGNED_CFG)); + assert!(!config_overlaps(&ENUM_FROM_BYTES_CFG)); + assert!(!config_overlaps(&ENUM_UNALIGNED_CFG)); + } +} diff --git a/src/rust/vendor/zerocopy-derive/src/repr.rs b/src/rust/vendor/zerocopy-derive/src/repr.rs new file mode 100644 index 000000000..f4f278868 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/src/repr.rs @@ -0,0 +1,311 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use core::fmt::{self, Display, Formatter}; + +use { + proc_macro2::Span, + syn::punctuated::Punctuated, + syn::spanned::Spanned, + syn::token::Comma, + syn::{Attribute, DeriveInput, Error, LitInt, Meta}, +}; + +pub struct Config { + // A human-readable message describing what combinations of representations + // are allowed. This will be printed to the user if they use an invalid + // combination. + pub allowed_combinations_message: &'static str, + // Whether we're checking as part of `derive(Unaligned)`. If not, we can + // ignore `repr(align)`, which makes the code (and the list of valid repr + // combinations we have to enumerate) somewhat simpler. If we're checking + // for `Unaligned`, then in addition to checking against illegal + // combinations, we also check to see if there exists a `repr(align(N > 1))` + // attribute. + pub derive_unaligned: bool, + // Combinations which are valid for the trait. + pub allowed_combinations: &'static [&'static [Repr]], + // Combinations which are not valid for the trait, but are legal according + // to Rust. Any combination not in this or `allowed_combinations` is either + // illegal according to Rust or the behavior is unspecified. If the behavior + // is unspecified, it might become specified in the future, and that + // specification might not play nicely with our requirements. Thus, we + // reject combinations with unspecified behavior in addition to illegal + // combinations. + pub disallowed_but_legal_combinations: &'static [&'static [Repr]], +} + +impl Config { + /// Validate that `input`'s representation attributes conform to the + /// requirements specified by this `Config`. + /// + /// `validate_reprs` extracts the `repr` attributes, validates that they + /// conform to the requirements of `self`, and returns them. Regardless of + /// whether `align` attributes are considered during validation, they are + /// stripped out of the returned value since no callers care about them. + pub fn validate_reprs(&self, input: &DeriveInput) -> Result, Vec> { + let mut metas_reprs = reprs(&input.attrs)?; + metas_reprs.sort_by(|a: &(_, R), b| a.1.partial_cmp(&b.1).unwrap()); + + if self.derive_unaligned { + if let Some((meta, _)) = + metas_reprs.iter().find(|&repr: &&(_, R)| repr.1.is_align_gt_one()) + { + return Err(vec![Error::new_spanned( + meta, + "cannot derive Unaligned with repr(align(N > 1))", + )]); + } + } + + let mut metas = Vec::new(); + let mut reprs = Vec::new(); + metas_reprs.into_iter().filter(|(_, repr)| !repr.is_align()).for_each(|(meta, repr)| { + metas.push(meta); + reprs.push(repr) + }); + + if reprs.is_empty() { + // Use `Span::call_site` to report this error on the + // `#[derive(...)]` itself. + return Err(vec![Error::new(Span::call_site(), "must have a non-align #[repr(...)] attribute in order to guarantee this type's memory layout")]); + } + + let initial_sp = metas[0].span(); + let err_span = metas.iter().skip(1).try_fold(initial_sp, |sp, meta| sp.join(meta.span())); + + if self.allowed_combinations.contains(&reprs.as_slice()) { + Ok(reprs) + } else if self.disallowed_but_legal_combinations.contains(&reprs.as_slice()) { + Err(vec![Error::new( + err_span.unwrap_or_else(|| input.span()), + self.allowed_combinations_message, + )]) + } else { + Err(vec![Error::new( + err_span.unwrap_or_else(|| input.span()), + "conflicting representation hints", + )]) + } + } +} + +// The type of valid reprs for a particular kind (enum, struct, union). +pub trait KindRepr: 'static + Sized + Ord { + fn is_align(&self) -> bool; + fn is_align_gt_one(&self) -> bool; + fn parse(meta: &Meta) -> syn::Result; +} + +// Defines an enum for reprs which are valid for a given kind (structs, enums, +// etc), and provide implementations of `KindRepr`, `Ord`, and `Display`, and +// those traits' super-traits. +macro_rules! define_kind_specific_repr { + ($type_name:expr, $repr_name:ident, [ $($repr_variant:ident),* ] , [ $($repr_variant_aligned:ident),* ]) => { + #[derive(Copy, Clone, Debug, Eq, PartialEq)] + pub enum $repr_name { + $($repr_variant,)* + $($repr_variant_aligned(u64),)* + } + + impl KindRepr for $repr_name { + fn is_align(&self) -> bool { + match self { + $($repr_name::$repr_variant_aligned(_) => true,)* + _ => false, + } + } + + fn is_align_gt_one(&self) -> bool { + match self { + // `packed(n)` only lowers alignment + $repr_name::Align(n) => n > &1, + _ => false, + } + } + + fn parse(meta: &Meta) -> syn::Result<$repr_name> { + match Repr::from_meta(meta)? { + $(Repr::$repr_variant => Ok($repr_name::$repr_variant),)* + $(Repr::$repr_variant_aligned(u) => Ok($repr_name::$repr_variant_aligned(u)),)* + _ => Err(Error::new_spanned(meta, concat!("unsupported representation for deriving FromBytes, AsBytes, or Unaligned on ", $type_name))) + } + } + } + + // Define a stable ordering so we can canonicalize lists of reprs. The + // ordering itself doesn't matter so long as it's stable. + impl PartialOrd for $repr_name { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + impl Ord for $repr_name { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + format!("{:?}", self).cmp(&format!("{:?}", other)) + } + } + + impl core::fmt::Display for $repr_name { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + $($repr_name::$repr_variant => Repr::$repr_variant,)* + $($repr_name::$repr_variant_aligned(u) => Repr::$repr_variant_aligned(*u),)* + }.fmt(f) + } + } + } +} + +define_kind_specific_repr!("a struct", StructRepr, [C, Transparent, Packed], [Align, PackedN]); +define_kind_specific_repr!( + "an enum", + EnumRepr, + [C, U8, U16, U32, U64, Usize, I8, I16, I32, I64, Isize], + [Align] +); + +// All representations known to Rust. +#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd)] +pub enum Repr { + U8, + U16, + U32, + U64, + Usize, + I8, + I16, + I32, + I64, + Isize, + C, + Transparent, + Packed, + PackedN(u64), + Align(u64), +} + +impl Repr { + fn from_meta(meta: &Meta) -> Result { + let (path, list) = match meta { + Meta::Path(path) => (path, None), + Meta::List(list) => (&list.path, Some(list)), + _ => return Err(Error::new_spanned(meta, "unrecognized representation hint")), + }; + + let ident = path + .get_ident() + .ok_or_else(|| Error::new_spanned(meta, "unrecognized representation hint"))?; + + Ok(match (ident.to_string().as_str(), list) { + ("u8", None) => Repr::U8, + ("u16", None) => Repr::U16, + ("u32", None) => Repr::U32, + ("u64", None) => Repr::U64, + ("usize", None) => Repr::Usize, + ("i8", None) => Repr::I8, + ("i16", None) => Repr::I16, + ("i32", None) => Repr::I32, + ("i64", None) => Repr::I64, + ("isize", None) => Repr::Isize, + ("C", None) => Repr::C, + ("transparent", None) => Repr::Transparent, + ("packed", None) => Repr::Packed, + ("packed", Some(list)) => { + Repr::PackedN(list.parse_args::()?.base10_parse::()?) + } + ("align", Some(list)) => { + Repr::Align(list.parse_args::()?.base10_parse::()?) + } + _ => return Err(Error::new_spanned(meta, "unrecognized representation hint")), + }) + } +} + +impl KindRepr for Repr { + fn is_align(&self) -> bool { + false + } + + fn is_align_gt_one(&self) -> bool { + false + } + + fn parse(meta: &Meta) -> syn::Result { + Self::from_meta(meta) + } +} + +impl Display for Repr { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { + if let Repr::Align(n) = self { + return write!(f, "repr(align({}))", n); + } + if let Repr::PackedN(n) = self { + return write!(f, "repr(packed({}))", n); + } + write!( + f, + "repr({})", + match self { + Repr::U8 => "u8", + Repr::U16 => "u16", + Repr::U32 => "u32", + Repr::U64 => "u64", + Repr::Usize => "usize", + Repr::I8 => "i8", + Repr::I16 => "i16", + Repr::I32 => "i32", + Repr::I64 => "i64", + Repr::Isize => "isize", + Repr::C => "C", + Repr::Transparent => "transparent", + Repr::Packed => "packed", + _ => unreachable!(), + } + ) + } +} + +pub(crate) fn reprs(attrs: &[Attribute]) -> Result, Vec> { + let mut reprs = Vec::new(); + let mut errors = Vec::new(); + for attr in attrs { + // Ignore documentation attributes. + if attr.path().is_ident("doc") { + continue; + } + if let Meta::List(ref meta_list) = attr.meta { + if meta_list.path.is_ident("repr") { + let parsed: Punctuated = + match meta_list.parse_args_with(Punctuated::parse_terminated) { + Ok(parsed) => parsed, + Err(_) => { + errors.push(Error::new_spanned( + &meta_list.tokens, + "unrecognized representation hint", + )); + continue; + } + }; + for meta in parsed { + match R::parse(&meta) { + Ok(repr) => reprs.push((meta, repr)), + Err(err) => errors.push(err), + } + } + } + } + } + + if !errors.is_empty() { + return Err(errors); + } + Ok(reprs) +} diff --git a/src/rust/vendor/zerocopy-derive/tests/enum_as_bytes.rs b/src/rust/vendor/zerocopy-derive/tests/enum_as_bytes.rs new file mode 100644 index 000000000..e305bc4ce --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/enum_as_bytes.rs @@ -0,0 +1,101 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#![allow(warnings)] + +use {static_assertions::assert_impl_all, zerocopy::AsBytes}; + +// An enum is `AsBytes` if if has a defined repr. + +#[derive(AsBytes)] +#[repr(C)] +enum C { + A, +} + +assert_impl_all!(C: AsBytes); + +#[derive(AsBytes)] +#[repr(u8)] +enum U8 { + A, +} + +assert_impl_all!(U8: AsBytes); + +#[derive(AsBytes)] +#[repr(u16)] +enum U16 { + A, +} + +assert_impl_all!(U16: AsBytes); + +#[derive(AsBytes)] +#[repr(u32)] +enum U32 { + A, +} + +assert_impl_all!(U32: AsBytes); + +#[derive(AsBytes)] +#[repr(u64)] +enum U64 { + A, +} + +assert_impl_all!(U64: AsBytes); + +#[derive(AsBytes)] +#[repr(usize)] +enum Usize { + A, +} + +assert_impl_all!(Usize: AsBytes); + +#[derive(AsBytes)] +#[repr(i8)] +enum I8 { + A, +} + +assert_impl_all!(I8: AsBytes); + +#[derive(AsBytes)] +#[repr(i16)] +enum I16 { + A, +} + +assert_impl_all!(I16: AsBytes); + +#[derive(AsBytes)] +#[repr(i32)] +enum I32 { + A, +} + +assert_impl_all!(I32: AsBytes); + +#[derive(AsBytes)] +#[repr(i64)] +enum I64 { + A, +} + +assert_impl_all!(I64: AsBytes); + +#[derive(AsBytes)] +#[repr(isize)] +enum Isize { + A, +} + +assert_impl_all!(Isize: AsBytes); diff --git a/src/rust/vendor/zerocopy-derive/tests/enum_from_zeroes.rs b/src/rust/vendor/zerocopy-derive/tests/enum_from_zeroes.rs new file mode 100644 index 000000000..c6bb675f5 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/enum_from_zeroes.rs @@ -0,0 +1,35 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#![allow(warnings)] + +mod util; + +use {static_assertions::assert_impl_all, zerocopy::FromZeroes}; + +#[derive(FromZeroes)] +enum Foo { + A, +} + +assert_impl_all!(Foo: FromZeroes); + +#[derive(FromZeroes)] +enum Bar { + A = 0, +} + +assert_impl_all!(Bar: FromZeroes); + +#[derive(FromZeroes)] +enum Baz { + A = 1, + B = 0, +} + +assert_impl_all!(Baz: FromZeroes); diff --git a/src/rust/vendor/zerocopy-derive/tests/enum_known_layout.rs b/src/rust/vendor/zerocopy-derive/tests/enum_known_layout.rs new file mode 100644 index 000000000..49a6765e5 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/enum_known_layout.rs @@ -0,0 +1,46 @@ +// Copyright 2022 The Fuchsia Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#![allow(warnings)] + +mod util; + +use {core::marker::PhantomData, static_assertions::assert_impl_all, zerocopy::KnownLayout}; + +#[derive(KnownLayout)] +enum Foo { + A, +} + +assert_impl_all!(Foo: KnownLayout); + +#[derive(KnownLayout)] +enum Bar { + A = 0, +} + +assert_impl_all!(Bar: KnownLayout); + +#[derive(KnownLayout)] +enum Baz { + A = 1, + B = 0, +} + +assert_impl_all!(Baz: KnownLayout); + +// Deriving `KnownLayout` should work if the enum has bounded parameters. + +#[derive(KnownLayout)] +#[repr(C)] +enum WithParams<'a: 'b, 'b: 'a, const N: usize, T: 'a + 'b + KnownLayout> +where + 'a: 'b, + 'b: 'a, + T: 'a + 'b + KnownLayout, +{ + Variant([T; N], PhantomData<&'a &'b ()>), +} + +assert_impl_all!(WithParams<'static, 'static, 42, u8>: KnownLayout); diff --git a/src/rust/vendor/zerocopy-derive/tests/enum_unaligned.rs b/src/rust/vendor/zerocopy-derive/tests/enum_unaligned.rs new file mode 100644 index 000000000..152ce276b --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/enum_unaligned.rs @@ -0,0 +1,47 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#![allow(warnings)] + +use {static_assertions::assert_impl_all, zerocopy::Unaligned}; + +// An enum is `Unaligned` if: +// - No `repr(align(N > 1))` +// - `repr(u8)` or `repr(i8)` + +#[derive(Unaligned)] +#[repr(u8)] +enum Foo { + A, +} + +assert_impl_all!(Foo: Unaligned); + +#[derive(Unaligned)] +#[repr(i8)] +enum Bar { + A, +} + +assert_impl_all!(Bar: Unaligned); + +#[derive(Unaligned)] +#[repr(u8, align(1))] +enum Baz { + A, +} + +assert_impl_all!(Baz: Unaligned); + +#[derive(Unaligned)] +#[repr(i8, align(1))] +enum Blah { + B, +} + +assert_impl_all!(Blah: Unaligned); diff --git a/src/rust/vendor/zerocopy-derive/tests/hygiene.rs b/src/rust/vendor/zerocopy-derive/tests/hygiene.rs new file mode 100644 index 000000000..b7b838d6c --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/hygiene.rs @@ -0,0 +1,43 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +// Make sure that macro hygiene will ensure that when we reference "zerocopy", +// that will work properly even if they've renamed the crate and have not +// imported its traits. + +#![allow(warnings)] + +extern crate zerocopy as _zerocopy; + +#[macro_use] +mod util; + +use std::{marker::PhantomData, option::IntoIter}; + +use static_assertions::assert_impl_all; + +#[derive( + _zerocopy::KnownLayout, _zerocopy::FromZeroes, _zerocopy::FromBytes, _zerocopy::Unaligned, +)] +#[repr(C)] +struct TypeParams<'a, T, I: Iterator> { + a: T, + c: I::Item, + d: u8, + e: PhantomData<&'a [u8]>, + f: PhantomData<&'static str>, + g: PhantomData, +} + +assert_impl_all!( + TypeParams<'static, (), IntoIter<()>>: + _zerocopy::KnownLayout, + _zerocopy::FromZeroes, + _zerocopy::FromBytes, + _zerocopy::Unaligned +); diff --git a/src/rust/vendor/zerocopy-derive/tests/paths_and_modules.rs b/src/rust/vendor/zerocopy-derive/tests/paths_and_modules.rs new file mode 100644 index 000000000..a01983b09 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/paths_and_modules.rs @@ -0,0 +1,38 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#![allow(warnings)] + +use zerocopy::{AsBytes, FromBytes, FromZeroes, Unaligned}; + +// Ensure that types that are use'd and types that are referenced by path work. + +mod foo { + use zerocopy::{AsBytes, FromBytes, FromZeroes, Unaligned}; + + #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] + #[repr(C)] + pub struct Foo { + foo: u8, + } + + #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] + #[repr(C)] + pub struct Bar { + bar: u8, + } +} + +use foo::Foo; + +#[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] +#[repr(C)] +struct Baz { + foo: Foo, + bar: foo::Bar, +} diff --git a/src/rust/vendor/zerocopy-derive/tests/priv_in_pub.rs b/src/rust/vendor/zerocopy-derive/tests/priv_in_pub.rs new file mode 100644 index 000000000..5f7d8749d --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/priv_in_pub.rs @@ -0,0 +1,24 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::{AsBytes, FromBytes, FromZeroes, KnownLayout, Unaligned}; + +// These derives do not result in E0446 as of Rust 1.59.0, because of +// https://github.com/rust-lang/rust/pull/90586. +// +// This change eliminates one of the major downsides of emitting `where` +// bounds for field types (i.e., the emission of E0446 for private field +// types). + +#[derive(KnownLayout, AsBytes, FromZeroes, FromBytes, Unaligned)] +#[repr(C)] +pub struct Public(Private); + +#[derive(KnownLayout, AsBytes, FromZeroes, FromBytes, Unaligned)] +#[repr(C)] +struct Private(()); diff --git a/src/rust/vendor/zerocopy-derive/tests/struct_as_bytes.rs b/src/rust/vendor/zerocopy-derive/tests/struct_as_bytes.rs new file mode 100644 index 000000000..3c71bf07b --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/struct_as_bytes.rs @@ -0,0 +1,161 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#![allow(warnings)] + +mod util; + +use std::{marker::PhantomData, mem::ManuallyDrop, option::IntoIter}; + +use {static_assertions::assert_impl_all, zerocopy::AsBytes}; + +use self::util::AU16; + +// A struct is `AsBytes` if: +// - all fields are `AsBytes` +// - `repr(C)` or `repr(transparent)` and +// - no padding (size of struct equals sum of size of field types) +// - `repr(packed)` + +#[derive(AsBytes)] +#[repr(C)] +struct CZst; + +assert_impl_all!(CZst: AsBytes); + +#[derive(AsBytes)] +#[repr(C)] +struct C { + a: u8, + b: u8, + c: AU16, +} + +assert_impl_all!(C: AsBytes); + +#[derive(AsBytes)] +#[repr(transparent)] +struct Transparent { + a: u8, + b: CZst, +} + +assert_impl_all!(Transparent: AsBytes); + +#[derive(AsBytes)] +#[repr(transparent)] +struct TransparentGeneric { + a: CZst, + b: T, +} + +assert_impl_all!(TransparentGeneric: AsBytes); +assert_impl_all!(TransparentGeneric<[u64]>: AsBytes); + +#[derive(AsBytes)] +#[repr(C, packed)] +struct CZstPacked; + +assert_impl_all!(CZstPacked: AsBytes); + +#[derive(AsBytes)] +#[repr(C, packed)] +struct CPacked { + a: u8, + // NOTE: The `u16` type is not guaranteed to have alignment 2, although it + // does on many platforms. However, to fix this would require a custom type + // with a `#[repr(align(2))]` attribute, and `#[repr(packed)]` types are not + // allowed to transitively contain `#[repr(align(...))]` types. Thus, we + // have no choice but to use `u16` here. Luckily, these tests run in CI on + // platforms on which `u16` has alignment 2, so this isn't that big of a + // deal. + b: u16, +} + +assert_impl_all!(CPacked: AsBytes); + +#[derive(AsBytes)] +#[repr(C, packed(2))] +// The same caveats as for CPacked apply - we're assuming u64 is at least +// 4-byte aligned by default. Without packed(2), this should fail, as there +// would be padding between a/b assuming u64 is 4+ byte aligned. +struct CPacked2 { + a: u16, + b: u64, +} + +assert_impl_all!(CPacked2: AsBytes); + +#[derive(AsBytes)] +#[repr(C, packed)] +struct CPackedGeneric { + t: T, + // Unsized types stored in `repr(packed)` structs must not be dropped + // because dropping them in-place might be unsound depending on the + // alignment of the outer struct. Sized types can be dropped by first being + // moved to an aligned stack variable, but this isn't possible with unsized + // types. + u: ManuallyDrop, +} + +assert_impl_all!(CPackedGeneric: AsBytes); +assert_impl_all!(CPackedGeneric: AsBytes); + +#[derive(AsBytes)] +#[repr(packed)] +struct Packed { + a: u8, + // NOTE: The `u16` type is not guaranteed to have alignment 2, although it + // does on many platforms. However, to fix this would require a custom type + // with a `#[repr(align(2))]` attribute, and `#[repr(packed)]` types are not + // allowed to transitively contain `#[repr(align(...))]` types. Thus, we + // have no choice but to use `u16` here. Luckily, these tests run in CI on + // platforms on which `u16` has alignment 2, so this isn't that big of a + // deal. + b: u16, +} + +assert_impl_all!(Packed: AsBytes); + +#[derive(AsBytes)] +#[repr(packed)] +struct PackedGeneric { + t: T, + // Unsized types stored in `repr(packed)` structs must not be dropped + // because dropping them in-place might be unsound depending on the + // alignment of the outer struct. Sized types can be dropped by first being + // moved to an aligned stack variable, but this isn't possible with unsized + // types. + u: ManuallyDrop, +} + +assert_impl_all!(PackedGeneric: AsBytes); +assert_impl_all!(PackedGeneric: AsBytes); + +#[derive(AsBytes)] +#[repr(transparent)] +struct Unsized { + a: [u8], +} + +assert_impl_all!(Unsized: AsBytes); + +// Deriving `AsBytes` should work if the struct has bounded parameters. + +#[derive(AsBytes)] +#[repr(transparent)] +struct WithParams<'a: 'b, 'b: 'a, const N: usize, T: 'a + 'b + AsBytes>( + [T; N], + PhantomData<&'a &'b ()>, +) +where + 'a: 'b, + 'b: 'a, + T: 'a + 'b + AsBytes; + +assert_impl_all!(WithParams<'static, 'static, 42, u8>: AsBytes); diff --git a/src/rust/vendor/zerocopy-derive/tests/struct_from_bytes.rs b/src/rust/vendor/zerocopy-derive/tests/struct_from_bytes.rs new file mode 100644 index 000000000..98f03d164 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/struct_from_bytes.rs @@ -0,0 +1,79 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#![allow(warnings)] + +mod util; + +use std::{marker::PhantomData, option::IntoIter}; + +use { + static_assertions::assert_impl_all, + zerocopy::{FromBytes, FromZeroes}, +}; + +use crate::util::AU16; + +// A struct is `FromBytes` if: +// - all fields are `FromBytes` + +#[derive(FromZeroes, FromBytes)] +struct Zst; + +assert_impl_all!(Zst: FromBytes); + +#[derive(FromZeroes, FromBytes)] +struct One { + a: u8, +} + +assert_impl_all!(One: FromBytes); + +#[derive(FromZeroes, FromBytes)] +struct Two { + a: u8, + b: Zst, +} + +assert_impl_all!(Two: FromBytes); + +#[derive(FromZeroes, FromBytes)] +struct Unsized { + a: [u8], +} + +assert_impl_all!(Unsized: FromBytes); + +#[derive(FromZeroes, FromBytes)] +struct TypeParams<'a, T: ?Sized, I: Iterator> { + a: I::Item, + b: u8, + c: PhantomData<&'a [u8]>, + d: PhantomData<&'static str>, + e: PhantomData, + f: T, +} + +assert_impl_all!(TypeParams<'static, (), IntoIter<()>>: FromBytes); +assert_impl_all!(TypeParams<'static, AU16, IntoIter<()>>: FromBytes); +assert_impl_all!(TypeParams<'static, [AU16], IntoIter<()>>: FromBytes); + +// Deriving `FromBytes` should work if the struct has bounded parameters. + +#[derive(FromZeroes, FromBytes)] +#[repr(transparent)] +struct WithParams<'a: 'b, 'b: 'a, const N: usize, T: 'a + 'b + FromBytes>( + [T; N], + PhantomData<&'a &'b ()>, +) +where + 'a: 'b, + 'b: 'a, + T: 'a + 'b + FromBytes; + +assert_impl_all!(WithParams<'static, 'static, 42, u8>: FromBytes); diff --git a/src/rust/vendor/zerocopy-derive/tests/struct_from_zeroes.rs b/src/rust/vendor/zerocopy-derive/tests/struct_from_zeroes.rs new file mode 100644 index 000000000..75d824594 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/struct_from_zeroes.rs @@ -0,0 +1,77 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#![allow(warnings)] + +#[macro_use] +mod util; + +use std::{marker::PhantomData, option::IntoIter}; + +use {static_assertions::assert_impl_all, zerocopy::FromZeroes}; + +use crate::util::AU16; + +// A struct is `FromZeroes` if: +// - all fields are `FromZeroes` + +#[derive(FromZeroes)] +struct Zst; + +assert_impl_all!(Zst: FromZeroes); + +#[derive(FromZeroes)] +struct One { + a: bool, +} + +assert_impl_all!(One: FromZeroes); + +#[derive(FromZeroes)] +struct Two { + a: bool, + b: Zst, +} + +assert_impl_all!(Two: FromZeroes); + +#[derive(FromZeroes)] +struct Unsized { + a: [u8], +} + +assert_impl_all!(Unsized: FromZeroes); + +#[derive(FromZeroes)] +struct TypeParams<'a, T: ?Sized, I: Iterator> { + a: I::Item, + b: u8, + c: PhantomData<&'a [u8]>, + d: PhantomData<&'static str>, + e: PhantomData, + f: T, +} + +assert_impl_all!(TypeParams<'static, (), IntoIter<()>>: FromZeroes); +assert_impl_all!(TypeParams<'static, AU16, IntoIter<()>>: FromZeroes); +assert_impl_all!(TypeParams<'static, [AU16], IntoIter<()>>: FromZeroes); + +// Deriving `FromZeroes` should work if the struct has bounded parameters. + +#[derive(FromZeroes)] +#[repr(transparent)] +struct WithParams<'a: 'b, 'b: 'a, const N: usize, T: 'a + 'b + FromZeroes>( + [T; N], + PhantomData<&'a &'b ()>, +) +where + 'a: 'b, + 'b: 'a, + T: 'a + 'b + FromZeroes; + +assert_impl_all!(WithParams<'static, 'static, 42, u8>: FromZeroes); diff --git a/src/rust/vendor/zerocopy-derive/tests/struct_known_layout.rs b/src/rust/vendor/zerocopy-derive/tests/struct_known_layout.rs new file mode 100644 index 000000000..68d1284de --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/struct_known_layout.rs @@ -0,0 +1,65 @@ +// Copyright 2022 The Fuchsia Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#![allow(warnings)] + +#[macro_use] +mod util; + +use std::{marker::PhantomData, option::IntoIter}; + +use { + static_assertions::assert_impl_all, + zerocopy::{DstLayout, KnownLayout}, +}; + +use crate::util::AU16; + +#[derive(KnownLayout)] +struct Zst; + +assert_impl_all!(Zst: KnownLayout); + +#[derive(KnownLayout)] +struct One { + a: bool, +} + +assert_impl_all!(One: KnownLayout); + +#[derive(KnownLayout)] +struct Two { + a: bool, + b: Zst, +} + +assert_impl_all!(Two: KnownLayout); + +#[derive(KnownLayout)] +struct TypeParams<'a, T, I: Iterator> { + a: I::Item, + b: u8, + c: PhantomData<&'a [u8]>, + d: PhantomData<&'static str>, + e: PhantomData, + f: T, +} + +assert_impl_all!(TypeParams<'static, (), IntoIter<()>>: KnownLayout); +assert_impl_all!(TypeParams<'static, AU16, IntoIter<()>>: KnownLayout); + +// Deriving `KnownLayout` should work if the struct has bounded parameters. + +#[derive(KnownLayout)] +#[repr(C)] +struct WithParams<'a: 'b, 'b: 'a, const N: usize, T: 'a + 'b + KnownLayout>( + [T; N], + PhantomData<&'a &'b ()>, +) +where + 'a: 'b, + 'b: 'a, + T: 'a + 'b + KnownLayout; + +assert_impl_all!(WithParams<'static, 'static, 42, u8>: KnownLayout); diff --git a/src/rust/vendor/zerocopy-derive/tests/struct_unaligned.rs b/src/rust/vendor/zerocopy-derive/tests/struct_unaligned.rs new file mode 100644 index 000000000..a7db4322a --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/struct_unaligned.rs @@ -0,0 +1,100 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#![allow(warnings)] + +mod util; + +use std::{marker::PhantomData, option::IntoIter}; + +use {static_assertions::assert_impl_all, zerocopy::Unaligned}; + +use crate::util::AU16; + +// A struct is `Unaligned` if: +// - `repr(align)` is no more than 1 and either +// - `repr(C)` or `repr(transparent)` and +// - all fields Unaligned +// - `repr(packed)` + +#[derive(Unaligned)] +#[repr(C)] +struct Foo { + a: u8, +} + +assert_impl_all!(Foo: Unaligned); + +#[derive(Unaligned)] +#[repr(transparent)] +struct Bar { + a: u8, +} + +assert_impl_all!(Bar: Unaligned); + +#[derive(Unaligned)] +#[repr(packed)] +struct Baz { + // NOTE: The `u16` type is not guaranteed to have alignment 2, although it + // does on many platforms. However, to fix this would require a custom type + // with a `#[repr(align(2))]` attribute, and `#[repr(packed)]` types are not + // allowed to transitively contain `#[repr(align(...))]` types. Thus, we + // have no choice but to use `u16` here. Luckily, these tests run in CI on + // platforms on which `u16` has alignment 2, so this isn't that big of a + // deal. + a: u16, +} + +assert_impl_all!(Baz: Unaligned); + +#[derive(Unaligned)] +#[repr(C, align(1))] +struct FooAlign { + a: u8, +} + +assert_impl_all!(FooAlign: Unaligned); + +#[derive(Unaligned)] +#[repr(transparent)] +struct Unsized { + a: [u8], +} + +assert_impl_all!(Unsized: Unaligned); + +#[derive(Unaligned)] +#[repr(C)] +struct TypeParams<'a, T: ?Sized, I: Iterator> { + a: I::Item, + b: u8, + c: PhantomData<&'a [u8]>, + d: PhantomData<&'static str>, + e: PhantomData, + f: T, +} + +assert_impl_all!(TypeParams<'static, (), IntoIter<()>>: Unaligned); +assert_impl_all!(TypeParams<'static, u8, IntoIter<()>>: Unaligned); +assert_impl_all!(TypeParams<'static, [u8], IntoIter<()>>: Unaligned); + +// Deriving `Unaligned` should work if the struct has bounded parameters. + +#[derive(Unaligned)] +#[repr(transparent)] +struct WithParams<'a: 'b, 'b: 'a, const N: usize, T: 'a + 'b + Unaligned>( + [T; N], + PhantomData<&'a &'b ()>, +) +where + 'a: 'b, + 'b: 'a, + T: 'a + 'b + Unaligned; + +assert_impl_all!(WithParams<'static, 'static, 42, u8>: Unaligned); diff --git a/src/rust/vendor/zerocopy-derive/tests/trybuild.rs b/src/rust/vendor/zerocopy-derive/tests/trybuild.rs new file mode 100644 index 000000000..3ea1c3bfb --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/trybuild.rs @@ -0,0 +1,19 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[test] +#[cfg_attr(miri, ignore)] +fn ui() { + let version = testutil::ToolchainVersion::extract_from_pwd().unwrap(); + // See the doc comment on this method for an explanation of what this does + // and why we store source files in different directories. + let source_files_dirname = version.get_ui_source_files_dirname_and_maybe_print_warning(); + + let t = trybuild::TestCases::new(); + t.compile_fail(format!("tests/{source_files_dirname}/*.rs")); +} diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-msrv/derive_transparent.rs b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/derive_transparent.rs new file mode 100644 index 000000000..2084d921b --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/derive_transparent.rs @@ -0,0 +1,40 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +#[path = "../util.rs"] +mod util; + +use core::marker::PhantomData; + +use { + static_assertions::assert_impl_all, + zerocopy::{AsBytes, FromBytes, FromZeroes, Unaligned}, +}; + +use self::util::NotZerocopy; + +fn main() {} + +// Test generic transparent structs + +#[derive(AsBytes, FromZeroes, FromBytes, Unaligned)] +#[repr(transparent)] +struct TransparentStruct { + inner: T, + _phantom: PhantomData<()>, +} + +// It should be legal to derive these traits on a transparent struct, but it +// must also ensure the traits are only implemented when the inner type +// implements them. +assert_impl_all!(TransparentStruct: FromZeroes); +assert_impl_all!(TransparentStruct: FromBytes); +assert_impl_all!(TransparentStruct: AsBytes); +assert_impl_all!(TransparentStruct: Unaligned); diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-msrv/derive_transparent.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/derive_transparent.stderr new file mode 100644 index 000000000..3b228b155 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/derive_transparent.stderr @@ -0,0 +1,71 @@ +error[E0277]: the trait bound `NotZerocopy: FromZeroes` is not satisfied + --> tests/ui-msrv/derive_transparent.rs:37:1 + | +37 | assert_impl_all!(TransparentStruct: FromZeroes); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FromZeroes` is not implemented for `NotZerocopy` + | +note: required because of the requirements on the impl of `FromZeroes` for `TransparentStruct` + --> tests/ui-msrv/derive_transparent.rs:27:19 + | +27 | #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)] + | ^^^^^^^^^^ +note: required by a bound in `_::{closure#0}::assert_impl_all` + --> tests/ui-msrv/derive_transparent.rs:37:1 + | +37 | assert_impl_all!(TransparentStruct: FromZeroes); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `_::{closure#0}::assert_impl_all` + = note: this error originates in the macro `assert_impl_all` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-msrv/derive_transparent.rs:38:1 + | +38 | assert_impl_all!(TransparentStruct: FromBytes); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FromBytes` is not implemented for `NotZerocopy` + | +note: required because of the requirements on the impl of `FromBytes` for `TransparentStruct` + --> tests/ui-msrv/derive_transparent.rs:27:31 + | +27 | #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)] + | ^^^^^^^^^ +note: required by a bound in `_::{closure#0}::assert_impl_all` + --> tests/ui-msrv/derive_transparent.rs:38:1 + | +38 | assert_impl_all!(TransparentStruct: FromBytes); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `_::{closure#0}::assert_impl_all` + = note: this error originates in the macro `assert_impl_all` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: AsBytes` is not satisfied + --> tests/ui-msrv/derive_transparent.rs:39:1 + | +39 | assert_impl_all!(TransparentStruct: AsBytes); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `NotZerocopy` + | +note: required because of the requirements on the impl of `AsBytes` for `TransparentStruct` + --> tests/ui-msrv/derive_transparent.rs:27:10 + | +27 | #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)] + | ^^^^^^^ +note: required by a bound in `_::{closure#0}::assert_impl_all` + --> tests/ui-msrv/derive_transparent.rs:39:1 + | +39 | assert_impl_all!(TransparentStruct: AsBytes); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `_::{closure#0}::assert_impl_all` + = note: this error originates in the macro `assert_impl_all` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: Unaligned` is not satisfied + --> tests/ui-msrv/derive_transparent.rs:40:1 + | +40 | assert_impl_all!(TransparentStruct: Unaligned); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `Unaligned` is not implemented for `NotZerocopy` + | +note: required because of the requirements on the impl of `Unaligned` for `TransparentStruct` + --> tests/ui-msrv/derive_transparent.rs:27:42 + | +27 | #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)] + | ^^^^^^^^^ +note: required by a bound in `_::{closure#0}::assert_impl_all` + --> tests/ui-msrv/derive_transparent.rs:40:1 + | +40 | assert_impl_all!(TransparentStruct: Unaligned); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `_::{closure#0}::assert_impl_all` + = note: this error originates in the macro `assert_impl_all` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-msrv/enum.rs b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/enum.rs new file mode 100644 index 000000000..31d5679d1 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/enum.rs @@ -0,0 +1,194 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[macro_use] +extern crate zerocopy; + +fn main() {} + +// +// Generic errors +// + +#[derive(FromZeroes, FromBytes)] +#[repr("foo")] +enum Generic1 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(foo)] +enum Generic2 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(transparent)] +enum Generic3 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(u8, u16)] +enum Generic4 { + A, +} + +#[derive(FromZeroes, FromBytes)] +enum Generic5 { + A, +} + +// +// FromZeroes errors +// + +#[derive(FromZeroes)] +enum FromZeroes1 { + A(u8), +} + +#[derive(FromZeroes)] +enum FromZeroes2 { + A, + B(u8), +} + +#[derive(FromZeroes)] +enum FromZeroes3 { + A = 1, + B, +} + +// +// FromBytes errors +// + +#[derive(FromZeroes, FromBytes)] +#[repr(C)] +enum FromBytes1 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(usize)] +enum FromBytes2 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(isize)] +enum FromBytes3 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(u32)] +enum FromBytes4 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(i32)] +enum FromBytes5 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(u64)] +enum FromBytes6 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(i64)] +enum FromBytes7 { + A, +} + +// +// Unaligned errors +// + +#[derive(Unaligned)] +#[repr(C)] +enum Unaligned1 { + A, +} + +#[derive(Unaligned)] +#[repr(u16)] +enum Unaligned2 { + A, +} + +#[derive(Unaligned)] +#[repr(i16)] +enum Unaligned3 { + A, +} + +#[derive(Unaligned)] +#[repr(u32)] +enum Unaligned4 { + A, +} + +#[derive(Unaligned)] +#[repr(i32)] +enum Unaligned5 { + A, +} + +#[derive(Unaligned)] +#[repr(u64)] +enum Unaligned6 { + A, +} + +#[derive(Unaligned)] +#[repr(i64)] +enum Unaligned7 { + A, +} + +#[derive(Unaligned)] +#[repr(usize)] +enum Unaligned8 { + A, +} + +#[derive(Unaligned)] +#[repr(isize)] +enum Unaligned9 { + A, +} + +#[derive(Unaligned)] +#[repr(u8, align(2))] +enum Unaligned10 { + A, +} + +#[derive(Unaligned)] +#[repr(i8, align(2))] +enum Unaligned11 { + A, +} + +#[derive(Unaligned)] +#[repr(align(1), align(2))] +enum Unaligned12 { + A, +} + +#[derive(Unaligned)] +#[repr(align(2), align(4))] +enum Unaligned13 { + A, +} diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-msrv/enum.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/enum.stderr new file mode 100644 index 000000000..39bde3f9e --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/enum.stderr @@ -0,0 +1,199 @@ +error: unrecognized representation hint + --> tests/ui-msrv/enum.rs:19:8 + | +19 | #[repr("foo")] + | ^^^^^ + +error: unrecognized representation hint + --> tests/ui-msrv/enum.rs:25:8 + | +25 | #[repr(foo)] + | ^^^ + +error: unsupported representation for deriving FromBytes, AsBytes, or Unaligned on an enum + --> tests/ui-msrv/enum.rs:31:8 + | +31 | #[repr(transparent)] + | ^^^^^^^^^^^ + +error: conflicting representation hints + --> tests/ui-msrv/enum.rs:37:1 + | +37 | #[repr(u8, u16)] + | ^ + +error: must have a non-align #[repr(...)] attribute in order to guarantee this type's memory layout + --> tests/ui-msrv/enum.rs:42:22 + | +42 | #[derive(FromZeroes, FromBytes)] + | ^^^^^^^^^ + | + = note: this error originates in the derive macro `FromBytes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: only C-like enums can implement FromZeroes + --> tests/ui-msrv/enum.rs:52:1 + | +52 | / enum FromZeroes1 { +53 | | A(u8), +54 | | } + | |_^ + +error: only C-like enums can implement FromZeroes + --> tests/ui-msrv/enum.rs:57:1 + | +57 | / enum FromZeroes2 { +58 | | A, +59 | | B(u8), +60 | | } + | |_^ + +error: FromZeroes only supported on enums with a variant that has a discriminant of `0` + --> tests/ui-msrv/enum.rs:63:1 + | +63 | / enum FromZeroes3 { +64 | | A = 1, +65 | | B, +66 | | } + | |_^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-msrv/enum.rs:73:8 + | +73 | #[repr(C)] + | ^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-msrv/enum.rs:79:8 + | +79 | #[repr(usize)] + | ^^^^^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-msrv/enum.rs:85:8 + | +85 | #[repr(isize)] + | ^^^^^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-msrv/enum.rs:91:8 + | +91 | #[repr(u32)] + | ^^^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-msrv/enum.rs:97:8 + | +97 | #[repr(i32)] + | ^^^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-msrv/enum.rs:103:8 + | +103 | #[repr(u64)] + | ^^^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-msrv/enum.rs:109:8 + | +109 | #[repr(i64)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-msrv/enum.rs:119:8 + | +119 | #[repr(C)] + | ^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-msrv/enum.rs:125:8 + | +125 | #[repr(u16)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-msrv/enum.rs:131:8 + | +131 | #[repr(i16)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-msrv/enum.rs:137:8 + | +137 | #[repr(u32)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-msrv/enum.rs:143:8 + | +143 | #[repr(i32)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-msrv/enum.rs:149:8 + | +149 | #[repr(u64)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-msrv/enum.rs:155:8 + | +155 | #[repr(i64)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-msrv/enum.rs:161:8 + | +161 | #[repr(usize)] + | ^^^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-msrv/enum.rs:167:8 + | +167 | #[repr(isize)] + | ^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-msrv/enum.rs:173:12 + | +173 | #[repr(u8, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-msrv/enum.rs:179:12 + | +179 | #[repr(i8, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-msrv/enum.rs:185:18 + | +185 | #[repr(align(1), align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-msrv/enum.rs:191:8 + | +191 | #[repr(align(2), align(4))] + | ^^^^^^^^ + +error[E0565]: meta item in `repr` must be an identifier + --> tests/ui-msrv/enum.rs:19:8 + | +19 | #[repr("foo")] + | ^^^^^ + +error[E0552]: unrecognized representation hint + --> tests/ui-msrv/enum.rs:25:8 + | +25 | #[repr(foo)] + | ^^^ + +error[E0566]: conflicting representation hints + --> tests/ui-msrv/enum.rs:37:8 + | +37 | #[repr(u8, u16)] + | ^^ ^^^ + | + = note: `#[deny(conflicting_repr_hints)]` on by default + = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! + = note: for more information, see issue #68585 diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-msrv/enum_from_bytes_u8_too_few.rs b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/enum_from_bytes_u8_too_few.rs new file mode 100644 index 000000000..1b1bed31f --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/enum_from_bytes_u8_too_few.rs @@ -0,0 +1,272 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[macro_use] +extern crate zerocopy; + +fn main() {} + +#[derive(FromBytes)] +#[repr(u8)] +enum Foo { + Variant0, + Variant1, + Variant2, + Variant3, + Variant4, + Variant5, + Variant6, + Variant7, + Variant8, + Variant9, + Variant10, + Variant11, + Variant12, + Variant13, + Variant14, + Variant15, + Variant16, + Variant17, + Variant18, + Variant19, + Variant20, + Variant21, + Variant22, + Variant23, + Variant24, + Variant25, + Variant26, + Variant27, + Variant28, + Variant29, + Variant30, + Variant31, + Variant32, + Variant33, + Variant34, + Variant35, + Variant36, + Variant37, + Variant38, + Variant39, + Variant40, + Variant41, + Variant42, + Variant43, + Variant44, + Variant45, + Variant46, + Variant47, + Variant48, + Variant49, + Variant50, + Variant51, + Variant52, + Variant53, + Variant54, + Variant55, + Variant56, + Variant57, + Variant58, + Variant59, + Variant60, + Variant61, + Variant62, + Variant63, + Variant64, + Variant65, + Variant66, + Variant67, + Variant68, + Variant69, + Variant70, + Variant71, + Variant72, + Variant73, + Variant74, + Variant75, + Variant76, + Variant77, + Variant78, + Variant79, + Variant80, + Variant81, + Variant82, + Variant83, + Variant84, + Variant85, + Variant86, + Variant87, + Variant88, + Variant89, + Variant90, + Variant91, + Variant92, + Variant93, + Variant94, + Variant95, + Variant96, + Variant97, + Variant98, + Variant99, + Variant100, + Variant101, + Variant102, + Variant103, + Variant104, + Variant105, + Variant106, + Variant107, + Variant108, + Variant109, + Variant110, + Variant111, + Variant112, + Variant113, + Variant114, + Variant115, + Variant116, + Variant117, + Variant118, + Variant119, + Variant120, + Variant121, + Variant122, + Variant123, + Variant124, + Variant125, + Variant126, + Variant127, + Variant128, + Variant129, + Variant130, + Variant131, + Variant132, + Variant133, + Variant134, + Variant135, + Variant136, + Variant137, + Variant138, + Variant139, + Variant140, + Variant141, + Variant142, + Variant143, + Variant144, + Variant145, + Variant146, + Variant147, + Variant148, + Variant149, + Variant150, + Variant151, + Variant152, + Variant153, + Variant154, + Variant155, + Variant156, + Variant157, + Variant158, + Variant159, + Variant160, + Variant161, + Variant162, + Variant163, + Variant164, + Variant165, + Variant166, + Variant167, + Variant168, + Variant169, + Variant170, + Variant171, + Variant172, + Variant173, + Variant174, + Variant175, + Variant176, + Variant177, + Variant178, + Variant179, + Variant180, + Variant181, + Variant182, + Variant183, + Variant184, + Variant185, + Variant186, + Variant187, + Variant188, + Variant189, + Variant190, + Variant191, + Variant192, + Variant193, + Variant194, + Variant195, + Variant196, + Variant197, + Variant198, + Variant199, + Variant200, + Variant201, + Variant202, + Variant203, + Variant204, + Variant205, + Variant206, + Variant207, + Variant208, + Variant209, + Variant210, + Variant211, + Variant212, + Variant213, + Variant214, + Variant215, + Variant216, + Variant217, + Variant218, + Variant219, + Variant220, + Variant221, + Variant222, + Variant223, + Variant224, + Variant225, + Variant226, + Variant227, + Variant228, + Variant229, + Variant230, + Variant231, + Variant232, + Variant233, + Variant234, + Variant235, + Variant236, + Variant237, + Variant238, + Variant239, + Variant240, + Variant241, + Variant242, + Variant243, + Variant244, + Variant245, + Variant246, + Variant247, + Variant248, + Variant249, + Variant250, + Variant251, + Variant252, + Variant253, + Variant254, +} diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-msrv/enum_from_bytes_u8_too_few.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/enum_from_bytes_u8_too_few.stderr new file mode 100644 index 000000000..ff828dccb --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/enum_from_bytes_u8_too_few.stderr @@ -0,0 +1,11 @@ +error: FromBytes only supported on repr(u8) enum with 256 variants + --> tests/ui-msrv/enum_from_bytes_u8_too_few.rs:15:1 + | +15 | / #[repr(u8)] +16 | | enum Foo { +17 | | Variant0, +18 | | Variant1, +... | +271 | | Variant254, +272 | | } + | |_^ diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-msrv/late_compile_pass.rs b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/late_compile_pass.rs new file mode 100644 index 000000000..cd65a6ed2 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/late_compile_pass.rs @@ -0,0 +1,75 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[macro_use] +extern crate zerocopy; + +#[path = "../util.rs"] +mod util; + +use self::util::{NotZerocopy, AU16}; +use zerocopy::KnownLayout; + +fn main() {} + +// These tests cause errors which are generated by a later compilation pass than +// the other errors we generate, and so if they're compiled in the same file, +// the compiler will never get to that pass, and so we won't get the errors. + +// +// FromZeroes errors +// + +#[derive(FromZeroes)] +struct FromZeroes1 { + value: NotZerocopy, +} + +// +// FromBytes errors +// + +#[derive(FromBytes)] +struct FromBytes1 { + value: NotZerocopy, +} + +// +// AsBytes errors +// + +#[derive(AsBytes)] +#[repr(C)] +struct AsBytes1 { + value: NotZerocopy, +} + +// +// Unaligned errors +// + +#[derive(Unaligned)] +#[repr(C)] +struct Unaligned1 { + aligned: AU16, +} + +// This specifically tests a bug we had in an old version of the code in which +// the trait bound would only be enforced for the first field's type. +#[derive(Unaligned)] +#[repr(C)] +struct Unaligned2 { + unaligned: u8, + aligned: AU16, +} + +#[derive(Unaligned)] +#[repr(transparent)] +struct Unaligned3 { + aligned: AU16, +} diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-msrv/late_compile_pass.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/late_compile_pass.stderr new file mode 100644 index 000000000..39dbcd186 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/late_compile_pass.stderr @@ -0,0 +1,74 @@ +warning: unused import: `zerocopy::KnownLayout` + --> tests/ui-msrv/late_compile_pass.rs:16:5 + | +16 | use zerocopy::KnownLayout; + | ^^^^^^^^^^^^^^^^^^^^^ + | + = note: `#[warn(unused_imports)]` on by default + +error[E0277]: the trait bound `NotZerocopy: FromZeroes` is not satisfied + --> tests/ui-msrv/late_compile_pass.rs:28:10 + | +28 | #[derive(FromZeroes)] + | ^^^^^^^^^^ the trait `FromZeroes` is not implemented for `NotZerocopy` + | + = help: see issue #48214 + = note: this error originates in the derive macro `FromZeroes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-msrv/late_compile_pass.rs:37:10 + | +37 | #[derive(FromBytes)] + | ^^^^^^^^^ the trait `FromBytes` is not implemented for `NotZerocopy` + | + = help: see issue #48214 + = note: this error originates in the derive macro `FromBytes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `FromBytes1: FromZeroes` is not satisfied + --> tests/ui-msrv/late_compile_pass.rs:37:10 + | +37 | #[derive(FromBytes)] + | ^^^^^^^^^ the trait `FromZeroes` is not implemented for `FromBytes1` + | +note: required by a bound in `FromBytes` + --> $WORKSPACE/src/lib.rs + | + | pub unsafe trait FromBytes: FromZeroes { + | ^^^^^^^^^^ required by this bound in `FromBytes` + = note: this error originates in the derive macro `FromBytes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: AsBytes` is not satisfied + --> tests/ui-msrv/late_compile_pass.rs:46:10 + | +46 | #[derive(AsBytes)] + | ^^^^^^^ the trait `AsBytes` is not implemented for `NotZerocopy` + | + = help: see issue #48214 + = note: this error originates in the derive macro `AsBytes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `AU16: Unaligned` is not satisfied + --> tests/ui-msrv/late_compile_pass.rs:56:10 + | +56 | #[derive(Unaligned)] + | ^^^^^^^^^ the trait `Unaligned` is not implemented for `AU16` + | + = help: see issue #48214 + = note: this error originates in the derive macro `Unaligned` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `AU16: Unaligned` is not satisfied + --> tests/ui-msrv/late_compile_pass.rs:64:10 + | +64 | #[derive(Unaligned)] + | ^^^^^^^^^ the trait `Unaligned` is not implemented for `AU16` + | + = help: see issue #48214 + = note: this error originates in the derive macro `Unaligned` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `AU16: Unaligned` is not satisfied + --> tests/ui-msrv/late_compile_pass.rs:71:10 + | +71 | #[derive(Unaligned)] + | ^^^^^^^^^ the trait `Unaligned` is not implemented for `AU16` + | + = help: see issue #48214 + = note: this error originates in the derive macro `Unaligned` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-msrv/mid_compile_pass.rs b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/mid_compile_pass.rs new file mode 100644 index 000000000..e0c4bc578 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/mid_compile_pass.rs @@ -0,0 +1,61 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::KnownLayout; + +fn main() {} + +// These tests cause errors which are generated by a later compilation pass than +// the other errors we generate, and so if they're compiled in the same file, +// the compiler will never get to that pass, and so we won't get the errors. + +// +// KnownLayout errors +// + +fn assert_kl(_: &T) {} + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | N | Y | N | N | KL04 | +#[derive(KnownLayout)] +struct KL04(u8, T); + +fn test_kl04(kl: &KL04) { + assert_kl(kl); +} + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | N | Y | Y | N | KL06 | +#[derive(KnownLayout)] +struct KL06(u8, T); + +fn test_kl06(kl: &KL06) { + assert_kl(kl); +} + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | Y | Y | N | N | KL12 | +#[derive(KnownLayout)] +#[repr(C)] +struct KL12(u8, T); + +fn test_kl12(kl: &KL12) { + assert_kl(kl) +} + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | Y | Y | N | Y | KL13 | +#[derive(KnownLayout)] +#[repr(C)] +struct KL13(u8, T); + +fn test_kl13(t: T) -> impl KnownLayout { + KL13(0u8, t) +} diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-msrv/mid_compile_pass.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/mid_compile_pass.stderr new file mode 100644 index 000000000..5aa2cde0a --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/mid_compile_pass.stderr @@ -0,0 +1,104 @@ +error[E0277]: the trait bound `T: KnownLayout` is not satisfied + --> tests/ui-msrv/mid_compile_pass.rs:59:26 + | +59 | fn test_kl13(t: T) -> impl KnownLayout { + | ^^^^^^^^^^^^^^^^ the trait `KnownLayout` is not implemented for `T` + | +note: required because of the requirements on the impl of `KnownLayout` for `KL13` + --> tests/ui-msrv/mid_compile_pass.rs:55:10 + | +55 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider restricting type parameter `T` + | +59 | fn test_kl13(t: T) -> impl KnownLayout { + | +++++++++++++++++++++++ + +error[E0277]: the size for values of type `T` cannot be known at compilation time + --> tests/ui-msrv/mid_compile_pass.rs:31:15 + | +30 | fn test_kl04(kl: &KL04) { + | - this type parameter needs to be `std::marker::Sized` +31 | assert_kl(kl); + | --------- ^^ doesn't have a size known at compile-time + | | + | required by a bound introduced by this call + | +note: required because it appears within the type `KL04` + --> tests/ui-msrv/mid_compile_pass.rs:28:8 + | +28 | struct KL04(u8, T); + | ^^^^ +note: required because of the requirements on the impl of `KnownLayout` for `KL04` + --> tests/ui-msrv/mid_compile_pass.rs:27:10 + | +27 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ +note: required by a bound in `assert_kl` + --> tests/ui-msrv/mid_compile_pass.rs:23:26 + | +23 | fn assert_kl(_: &T) {} + | ^^^^^^^^^^^ required by this bound in `assert_kl` + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider removing the `?Sized` bound to make the type parameter `Sized` + | +30 - fn test_kl04(kl: &KL04) { +30 + fn test_kl04(kl: &KL04) { + | + +error[E0277]: the size for values of type `T` cannot be known at compilation time + --> tests/ui-msrv/mid_compile_pass.rs:40:15 + | +39 | fn test_kl06(kl: &KL06) { + | - this type parameter needs to be `std::marker::Sized` +40 | assert_kl(kl); + | --------- ^^ doesn't have a size known at compile-time + | | + | required by a bound introduced by this call + | +note: required because it appears within the type `KL06` + --> tests/ui-msrv/mid_compile_pass.rs:37:8 + | +37 | struct KL06(u8, T); + | ^^^^ +note: required because of the requirements on the impl of `KnownLayout` for `KL06` + --> tests/ui-msrv/mid_compile_pass.rs:36:10 + | +36 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ +note: required by a bound in `assert_kl` + --> tests/ui-msrv/mid_compile_pass.rs:23:26 + | +23 | fn assert_kl(_: &T) {} + | ^^^^^^^^^^^ required by this bound in `assert_kl` + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider removing the `?Sized` bound to make the type parameter `Sized` + | +39 - fn test_kl06(kl: &KL06) { +39 + fn test_kl06(kl: &KL06) { + | + +error[E0277]: the trait bound `T: KnownLayout` is not satisfied + --> tests/ui-msrv/mid_compile_pass.rs:50:15 + | +50 | assert_kl(kl) + | --------- ^^ the trait `KnownLayout` is not implemented for `T` + | | + | required by a bound introduced by this call + | +note: required because of the requirements on the impl of `KnownLayout` for `KL12` + --> tests/ui-msrv/mid_compile_pass.rs:45:10 + | +45 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ +note: required by a bound in `assert_kl` + --> tests/ui-msrv/mid_compile_pass.rs:23:26 + | +23 | fn assert_kl(_: &T) {} + | ^^^^^^^^^^^ required by this bound in `assert_kl` + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider further restricting this bound + | +49 | fn test_kl12(kl: &KL12) { + | +++++++++++++++++++++++ diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-msrv/struct.rs b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/struct.rs new file mode 100644 index 000000000..c76dc7f95 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/struct.rs @@ -0,0 +1,99 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[macro_use] +extern crate zerocopy; + +#[path = "../util.rs"] +mod util; + +use zerocopy::KnownLayout; + +use self::util::AU16; + +fn main() {} + +// +// KnownLayout errors +// + +struct NotKnownLayout; + +struct NotKnownLayoutDst([u8]); + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | N | N | N | N | KL00 | +#[derive(KnownLayout)] +struct KL00(u8, NotKnownLayoutDst); + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | N | N | Y | N | KL02 | +#[derive(KnownLayout)] +struct KL02(u8, [u8]); + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | Y | N | N | N | KL08 | +#[derive(KnownLayout)] +#[repr(C)] +struct KL08(u8, NotKnownLayoutDst); + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | Y | N | N | Y | KL09 | +#[derive(KnownLayout)] +#[repr(C)] +struct KL09(NotKnownLayout, NotKnownLayout); + +// +// AsBytes errors +// + +#[derive(AsBytes)] +#[repr(C)] +struct AsBytes1(T); + +#[derive(AsBytes)] +#[repr(C)] +struct AsBytes2 { + foo: u8, + bar: AU16, +} + +#[derive(AsBytes)] +#[repr(C, packed(2))] +struct AsBytes3 { + foo: u8, + // We'd prefer to use AU64 here, but you can't use aligned types in + // packed structs. + bar: u64, +} + +// +// Unaligned errors +// + +#[derive(Unaligned)] +#[repr(C, align(2))] +struct Unaligned1; + +#[derive(Unaligned)] +#[repr(transparent, align(2))] +struct Unaligned2 { + foo: u8, +} + +#[derive(Unaligned)] +#[repr(packed, align(2))] +struct Unaligned3; + +#[derive(Unaligned)] +#[repr(align(1), align(2))] +struct Unaligned4; + +#[derive(Unaligned)] +#[repr(align(2), align(4))] +struct Unaligned5; diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-msrv/struct.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/struct.stderr new file mode 100644 index 000000000..f4a435d5f --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/struct.stderr @@ -0,0 +1,113 @@ +error: unsupported on generic structs that are not repr(transparent) or repr(packed) + --> tests/ui-msrv/struct.rs:55:10 + | +55 | #[derive(AsBytes)] + | ^^^^^^^ + | + = note: this error originates in the derive macro `AsBytes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-msrv/struct.rs:80:11 + | +80 | #[repr(C, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-msrv/struct.rs:84:21 + | +84 | #[repr(transparent, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-msrv/struct.rs:90:16 + | +90 | #[repr(packed, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-msrv/struct.rs:94:18 + | +94 | #[repr(align(1), align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-msrv/struct.rs:98:8 + | +98 | #[repr(align(2), align(4))] + | ^^^^^^^^ + +error[E0692]: transparent struct cannot have other repr hints + --> tests/ui-msrv/struct.rs:84:8 + | +84 | #[repr(transparent, align(2))] + | ^^^^^^^^^^^ ^^^^^^^^ + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/struct.rs:31:10 + | +31 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: within `KL00`, the trait `Sized` is not implemented for `[u8]` +note: required because it appears within the type `KL00` + --> tests/ui-msrv/struct.rs:32:8 + | +32 | struct KL00(u8, NotKnownLayoutDst); + | ^^^^ + = help: see issue #48214 + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/struct.rs:36:10 + | +36 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: within `KL02`, the trait `Sized` is not implemented for `[u8]` +note: required because it appears within the type `KL02` + --> tests/ui-msrv/struct.rs:37:8 + | +37 | struct KL02(u8, [u8]); + | ^^^^ + = help: see issue #48214 + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotKnownLayoutDst: KnownLayout` is not satisfied + --> tests/ui-msrv/struct.rs:41:10 + | +41 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ the trait `KnownLayout` is not implemented for `NotKnownLayoutDst` + | + = help: see issue #48214 + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotKnownLayout: KnownLayout` is not satisfied + --> tests/ui-msrv/struct.rs:47:10 + | +47 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ the trait `KnownLayout` is not implemented for `NotKnownLayout` + | + = help: see issue #48214 + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `HasPadding: ShouldBe` is not satisfied + --> tests/ui-msrv/struct.rs:59:10 + | +59 | #[derive(AsBytes)] + | ^^^^^^^ the trait `ShouldBe` is not implemented for `HasPadding` + | + = help: the following implementations were found: + as ShouldBe> + = help: see issue #48214 + = note: this error originates in the derive macro `AsBytes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `HasPadding: ShouldBe` is not satisfied + --> tests/ui-msrv/struct.rs:66:10 + | +66 | #[derive(AsBytes)] + | ^^^^^^^ the trait `ShouldBe` is not implemented for `HasPadding` + | + = help: the following implementations were found: + as ShouldBe> + = help: see issue #48214 + = note: this error originates in the derive macro `AsBytes` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-msrv/union.rs b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/union.rs new file mode 100644 index 000000000..8938e7847 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/union.rs @@ -0,0 +1,73 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[macro_use] +extern crate zerocopy; + +#[path = "../util.rs"] +mod util; + +use self::util::AU16; +use std::mem::ManuallyDrop; + +fn main() {} + +// +// AsBytes errors +// + +#[derive(AsBytes)] +#[repr(C)] +union AsBytes1 { + foo: ManuallyDrop, +} + +#[derive(AsBytes)] +#[repr(C)] +union AsBytes2 { + foo: u8, + bar: [u8; 2], +} + +// +// Unaligned errors +// + +#[derive(Unaligned)] +#[repr(C, align(2))] +union Unaligned1 { + foo: i16, + bar: AU16, +} + +// Transparent unions are unstable; see issue #60405 +// for more information. + +// #[derive(Unaligned)] +// #[repr(transparent, align(2))] +// union Unaligned2 { +// foo: u8, +// } + +#[derive(Unaligned)] +#[repr(packed, align(2))] +union Unaligned3 { + foo: u8, +} + +#[derive(Unaligned)] +#[repr(align(1), align(2))] +struct Unaligned4 { + foo: u8, +} + +#[derive(Unaligned)] +#[repr(align(2), align(4))] +struct Unaligned5 { + foo: u8, +} diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-msrv/union.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/union.stderr new file mode 100644 index 000000000..3e1305921 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-msrv/union.stderr @@ -0,0 +1,42 @@ +error: unsupported on types with type parameters + --> tests/ui-msrv/union.rs:24:10 + | +24 | #[derive(AsBytes)] + | ^^^^^^^ + | + = note: this error originates in the derive macro `AsBytes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-msrv/union.rs:42:11 + | +42 | #[repr(C, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-msrv/union.rs:58:16 + | +58 | #[repr(packed, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-msrv/union.rs:64:18 + | +64 | #[repr(align(1), align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-msrv/union.rs:70:8 + | +70 | #[repr(align(2), align(4))] + | ^^^^^^^^ + +error[E0277]: the trait bound `HasPadding: ShouldBe` is not satisfied + --> tests/ui-msrv/union.rs:30:10 + | +30 | #[derive(AsBytes)] + | ^^^^^^^ the trait `ShouldBe` is not implemented for `HasPadding` + | + = help: the following implementations were found: + as ShouldBe> + = help: see issue #48214 + = note: this error originates in the derive macro `AsBytes` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-nightly/derive_transparent.rs b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/derive_transparent.rs new file mode 100644 index 000000000..2084d921b --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/derive_transparent.rs @@ -0,0 +1,40 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +#[path = "../util.rs"] +mod util; + +use core::marker::PhantomData; + +use { + static_assertions::assert_impl_all, + zerocopy::{AsBytes, FromBytes, FromZeroes, Unaligned}, +}; + +use self::util::NotZerocopy; + +fn main() {} + +// Test generic transparent structs + +#[derive(AsBytes, FromZeroes, FromBytes, Unaligned)] +#[repr(transparent)] +struct TransparentStruct { + inner: T, + _phantom: PhantomData<()>, +} + +// It should be legal to derive these traits on a transparent struct, but it +// must also ensure the traits are only implemented when the inner type +// implements them. +assert_impl_all!(TransparentStruct: FromZeroes); +assert_impl_all!(TransparentStruct: FromBytes); +assert_impl_all!(TransparentStruct: AsBytes); +assert_impl_all!(TransparentStruct: Unaligned); diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-nightly/derive_transparent.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/derive_transparent.stderr new file mode 100644 index 000000000..86533b235 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/derive_transparent.stderr @@ -0,0 +1,111 @@ +error[E0277]: the trait bound `NotZerocopy: FromZeroes` is not satisfied + --> tests/ui-nightly/derive_transparent.rs:37:18 + | +37 | assert_impl_all!(TransparentStruct: FromZeroes); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FromZeroes` is not implemented for `NotZerocopy`, which is required by `TransparentStruct: FromZeroes` + | + = help: the following other types implement trait `FromZeroes`: + () + *const T + *mut T + AU16 + F32 + F64 + I128 + I16 + and $N others +note: required for `TransparentStruct` to implement `FromZeroes` + --> tests/ui-nightly/derive_transparent.rs:27:19 + | +27 | #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)] + | ^^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `_::{closure#0}::assert_impl_all` + --> tests/ui-nightly/derive_transparent.rs:37:1 + | +37 | assert_impl_all!(TransparentStruct: FromZeroes); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `assert_impl_all` + = note: this error originates in the derive macro `FromZeroes` which comes from the expansion of the macro `assert_impl_all` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-nightly/derive_transparent.rs:38:18 + | +38 | assert_impl_all!(TransparentStruct: FromBytes); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FromBytes` is not implemented for `NotZerocopy`, which is required by `TransparentStruct: FromBytes` + | + = help: the following other types implement trait `FromBytes`: + () + AU16 + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required for `TransparentStruct` to implement `FromBytes` + --> tests/ui-nightly/derive_transparent.rs:27:31 + | +27 | #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)] + | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `_::{closure#0}::assert_impl_all` + --> tests/ui-nightly/derive_transparent.rs:38:1 + | +38 | assert_impl_all!(TransparentStruct: FromBytes); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `assert_impl_all` + = note: this error originates in the derive macro `FromBytes` which comes from the expansion of the macro `assert_impl_all` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: AsBytes` is not satisfied + --> tests/ui-nightly/derive_transparent.rs:39:18 + | +39 | assert_impl_all!(TransparentStruct: AsBytes); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `NotZerocopy`, which is required by `TransparentStruct: AsBytes` + | + = help: the following other types implement trait `AsBytes`: + () + AU16 + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required for `TransparentStruct` to implement `AsBytes` + --> tests/ui-nightly/derive_transparent.rs:27:10 + | +27 | #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)] + | ^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `_::{closure#0}::assert_impl_all` + --> tests/ui-nightly/derive_transparent.rs:39:1 + | +39 | assert_impl_all!(TransparentStruct: AsBytes); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `assert_impl_all` + = note: this error originates in the derive macro `AsBytes` which comes from the expansion of the macro `assert_impl_all` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: Unaligned` is not satisfied + --> tests/ui-nightly/derive_transparent.rs:40:18 + | +40 | assert_impl_all!(TransparentStruct: Unaligned); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `Unaligned` is not implemented for `NotZerocopy`, which is required by `TransparentStruct: Unaligned` + | + = help: the following other types implement trait `Unaligned`: + () + F32 + F64 + I128 + I16 + I32 + I64 + ManuallyDrop + and $N others +note: required for `TransparentStruct` to implement `Unaligned` + --> tests/ui-nightly/derive_transparent.rs:27:42 + | +27 | #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)] + | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `_::{closure#0}::assert_impl_all` + --> tests/ui-nightly/derive_transparent.rs:40:1 + | +40 | assert_impl_all!(TransparentStruct: Unaligned); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `assert_impl_all` + = note: this error originates in the derive macro `Unaligned` which comes from the expansion of the macro `assert_impl_all` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-nightly/enum.rs b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/enum.rs new file mode 100644 index 000000000..31d5679d1 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/enum.rs @@ -0,0 +1,194 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[macro_use] +extern crate zerocopy; + +fn main() {} + +// +// Generic errors +// + +#[derive(FromZeroes, FromBytes)] +#[repr("foo")] +enum Generic1 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(foo)] +enum Generic2 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(transparent)] +enum Generic3 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(u8, u16)] +enum Generic4 { + A, +} + +#[derive(FromZeroes, FromBytes)] +enum Generic5 { + A, +} + +// +// FromZeroes errors +// + +#[derive(FromZeroes)] +enum FromZeroes1 { + A(u8), +} + +#[derive(FromZeroes)] +enum FromZeroes2 { + A, + B(u8), +} + +#[derive(FromZeroes)] +enum FromZeroes3 { + A = 1, + B, +} + +// +// FromBytes errors +// + +#[derive(FromZeroes, FromBytes)] +#[repr(C)] +enum FromBytes1 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(usize)] +enum FromBytes2 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(isize)] +enum FromBytes3 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(u32)] +enum FromBytes4 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(i32)] +enum FromBytes5 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(u64)] +enum FromBytes6 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(i64)] +enum FromBytes7 { + A, +} + +// +// Unaligned errors +// + +#[derive(Unaligned)] +#[repr(C)] +enum Unaligned1 { + A, +} + +#[derive(Unaligned)] +#[repr(u16)] +enum Unaligned2 { + A, +} + +#[derive(Unaligned)] +#[repr(i16)] +enum Unaligned3 { + A, +} + +#[derive(Unaligned)] +#[repr(u32)] +enum Unaligned4 { + A, +} + +#[derive(Unaligned)] +#[repr(i32)] +enum Unaligned5 { + A, +} + +#[derive(Unaligned)] +#[repr(u64)] +enum Unaligned6 { + A, +} + +#[derive(Unaligned)] +#[repr(i64)] +enum Unaligned7 { + A, +} + +#[derive(Unaligned)] +#[repr(usize)] +enum Unaligned8 { + A, +} + +#[derive(Unaligned)] +#[repr(isize)] +enum Unaligned9 { + A, +} + +#[derive(Unaligned)] +#[repr(u8, align(2))] +enum Unaligned10 { + A, +} + +#[derive(Unaligned)] +#[repr(i8, align(2))] +enum Unaligned11 { + A, +} + +#[derive(Unaligned)] +#[repr(align(1), align(2))] +enum Unaligned12 { + A, +} + +#[derive(Unaligned)] +#[repr(align(2), align(4))] +enum Unaligned13 { + A, +} diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-nightly/enum.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/enum.stderr new file mode 100644 index 000000000..a4d5edf35 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/enum.stderr @@ -0,0 +1,201 @@ +error: unrecognized representation hint + --> tests/ui-nightly/enum.rs:19:8 + | +19 | #[repr("foo")] + | ^^^^^ + +error: unrecognized representation hint + --> tests/ui-nightly/enum.rs:25:8 + | +25 | #[repr(foo)] + | ^^^ + +error: unsupported representation for deriving FromBytes, AsBytes, or Unaligned on an enum + --> tests/ui-nightly/enum.rs:31:8 + | +31 | #[repr(transparent)] + | ^^^^^^^^^^^ + +error: conflicting representation hints + --> tests/ui-nightly/enum.rs:37:8 + | +37 | #[repr(u8, u16)] + | ^^^^^^^ + +error: must have a non-align #[repr(...)] attribute in order to guarantee this type's memory layout + --> tests/ui-nightly/enum.rs:42:22 + | +42 | #[derive(FromZeroes, FromBytes)] + | ^^^^^^^^^ + | + = note: this error originates in the derive macro `FromBytes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: only C-like enums can implement FromZeroes + --> tests/ui-nightly/enum.rs:52:1 + | +52 | / enum FromZeroes1 { +53 | | A(u8), +54 | | } + | |_^ + +error: only C-like enums can implement FromZeroes + --> tests/ui-nightly/enum.rs:57:1 + | +57 | / enum FromZeroes2 { +58 | | A, +59 | | B(u8), +60 | | } + | |_^ + +error: FromZeroes only supported on enums with a variant that has a discriminant of `0` + --> tests/ui-nightly/enum.rs:63:1 + | +63 | / enum FromZeroes3 { +64 | | A = 1, +65 | | B, +66 | | } + | |_^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-nightly/enum.rs:73:8 + | +73 | #[repr(C)] + | ^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-nightly/enum.rs:79:8 + | +79 | #[repr(usize)] + | ^^^^^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-nightly/enum.rs:85:8 + | +85 | #[repr(isize)] + | ^^^^^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-nightly/enum.rs:91:8 + | +91 | #[repr(u32)] + | ^^^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-nightly/enum.rs:97:8 + | +97 | #[repr(i32)] + | ^^^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-nightly/enum.rs:103:8 + | +103 | #[repr(u64)] + | ^^^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-nightly/enum.rs:109:8 + | +109 | #[repr(i64)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-nightly/enum.rs:119:8 + | +119 | #[repr(C)] + | ^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-nightly/enum.rs:125:8 + | +125 | #[repr(u16)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-nightly/enum.rs:131:8 + | +131 | #[repr(i16)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-nightly/enum.rs:137:8 + | +137 | #[repr(u32)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-nightly/enum.rs:143:8 + | +143 | #[repr(i32)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-nightly/enum.rs:149:8 + | +149 | #[repr(u64)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-nightly/enum.rs:155:8 + | +155 | #[repr(i64)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-nightly/enum.rs:161:8 + | +161 | #[repr(usize)] + | ^^^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-nightly/enum.rs:167:8 + | +167 | #[repr(isize)] + | ^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-nightly/enum.rs:173:12 + | +173 | #[repr(u8, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-nightly/enum.rs:179:12 + | +179 | #[repr(i8, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-nightly/enum.rs:185:18 + | +185 | #[repr(align(1), align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-nightly/enum.rs:191:8 + | +191 | #[repr(align(2), align(4))] + | ^^^^^^^^ + +error[E0565]: meta item in `repr` must be an identifier + --> tests/ui-nightly/enum.rs:19:8 + | +19 | #[repr("foo")] + | ^^^^^ + +error[E0552]: unrecognized representation hint + --> tests/ui-nightly/enum.rs:25:8 + | +25 | #[repr(foo)] + | ^^^ + | + = help: valid reprs are `Rust` (default), `C`, `align`, `packed`, `transparent`, `simd`, `i8`, `u8`, `i16`, `u16`, `i32`, `u32`, `i64`, `u64`, `i128`, `u128`, `isize`, `usize` + +error[E0566]: conflicting representation hints + --> tests/ui-nightly/enum.rs:37:8 + | +37 | #[repr(u8, u16)] + | ^^ ^^^ + | + = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! + = note: for more information, see issue #68585 + = note: `#[deny(conflicting_repr_hints)]` on by default diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-nightly/enum_from_bytes_u8_too_few.rs b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/enum_from_bytes_u8_too_few.rs new file mode 100644 index 000000000..1b1bed31f --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/enum_from_bytes_u8_too_few.rs @@ -0,0 +1,272 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[macro_use] +extern crate zerocopy; + +fn main() {} + +#[derive(FromBytes)] +#[repr(u8)] +enum Foo { + Variant0, + Variant1, + Variant2, + Variant3, + Variant4, + Variant5, + Variant6, + Variant7, + Variant8, + Variant9, + Variant10, + Variant11, + Variant12, + Variant13, + Variant14, + Variant15, + Variant16, + Variant17, + Variant18, + Variant19, + Variant20, + Variant21, + Variant22, + Variant23, + Variant24, + Variant25, + Variant26, + Variant27, + Variant28, + Variant29, + Variant30, + Variant31, + Variant32, + Variant33, + Variant34, + Variant35, + Variant36, + Variant37, + Variant38, + Variant39, + Variant40, + Variant41, + Variant42, + Variant43, + Variant44, + Variant45, + Variant46, + Variant47, + Variant48, + Variant49, + Variant50, + Variant51, + Variant52, + Variant53, + Variant54, + Variant55, + Variant56, + Variant57, + Variant58, + Variant59, + Variant60, + Variant61, + Variant62, + Variant63, + Variant64, + Variant65, + Variant66, + Variant67, + Variant68, + Variant69, + Variant70, + Variant71, + Variant72, + Variant73, + Variant74, + Variant75, + Variant76, + Variant77, + Variant78, + Variant79, + Variant80, + Variant81, + Variant82, + Variant83, + Variant84, + Variant85, + Variant86, + Variant87, + Variant88, + Variant89, + Variant90, + Variant91, + Variant92, + Variant93, + Variant94, + Variant95, + Variant96, + Variant97, + Variant98, + Variant99, + Variant100, + Variant101, + Variant102, + Variant103, + Variant104, + Variant105, + Variant106, + Variant107, + Variant108, + Variant109, + Variant110, + Variant111, + Variant112, + Variant113, + Variant114, + Variant115, + Variant116, + Variant117, + Variant118, + Variant119, + Variant120, + Variant121, + Variant122, + Variant123, + Variant124, + Variant125, + Variant126, + Variant127, + Variant128, + Variant129, + Variant130, + Variant131, + Variant132, + Variant133, + Variant134, + Variant135, + Variant136, + Variant137, + Variant138, + Variant139, + Variant140, + Variant141, + Variant142, + Variant143, + Variant144, + Variant145, + Variant146, + Variant147, + Variant148, + Variant149, + Variant150, + Variant151, + Variant152, + Variant153, + Variant154, + Variant155, + Variant156, + Variant157, + Variant158, + Variant159, + Variant160, + Variant161, + Variant162, + Variant163, + Variant164, + Variant165, + Variant166, + Variant167, + Variant168, + Variant169, + Variant170, + Variant171, + Variant172, + Variant173, + Variant174, + Variant175, + Variant176, + Variant177, + Variant178, + Variant179, + Variant180, + Variant181, + Variant182, + Variant183, + Variant184, + Variant185, + Variant186, + Variant187, + Variant188, + Variant189, + Variant190, + Variant191, + Variant192, + Variant193, + Variant194, + Variant195, + Variant196, + Variant197, + Variant198, + Variant199, + Variant200, + Variant201, + Variant202, + Variant203, + Variant204, + Variant205, + Variant206, + Variant207, + Variant208, + Variant209, + Variant210, + Variant211, + Variant212, + Variant213, + Variant214, + Variant215, + Variant216, + Variant217, + Variant218, + Variant219, + Variant220, + Variant221, + Variant222, + Variant223, + Variant224, + Variant225, + Variant226, + Variant227, + Variant228, + Variant229, + Variant230, + Variant231, + Variant232, + Variant233, + Variant234, + Variant235, + Variant236, + Variant237, + Variant238, + Variant239, + Variant240, + Variant241, + Variant242, + Variant243, + Variant244, + Variant245, + Variant246, + Variant247, + Variant248, + Variant249, + Variant250, + Variant251, + Variant252, + Variant253, + Variant254, +} diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-nightly/enum_from_bytes_u8_too_few.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/enum_from_bytes_u8_too_few.stderr new file mode 100644 index 000000000..50cf0e7cb --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/enum_from_bytes_u8_too_few.stderr @@ -0,0 +1,11 @@ +error: FromBytes only supported on repr(u8) enum with 256 variants + --> tests/ui-nightly/enum_from_bytes_u8_too_few.rs:15:1 + | +15 | / #[repr(u8)] +16 | | enum Foo { +17 | | Variant0, +18 | | Variant1, +... | +271 | | Variant254, +272 | | } + | |_^ diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-nightly/late_compile_pass.rs b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/late_compile_pass.rs new file mode 100644 index 000000000..cd65a6ed2 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/late_compile_pass.rs @@ -0,0 +1,75 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[macro_use] +extern crate zerocopy; + +#[path = "../util.rs"] +mod util; + +use self::util::{NotZerocopy, AU16}; +use zerocopy::KnownLayout; + +fn main() {} + +// These tests cause errors which are generated by a later compilation pass than +// the other errors we generate, and so if they're compiled in the same file, +// the compiler will never get to that pass, and so we won't get the errors. + +// +// FromZeroes errors +// + +#[derive(FromZeroes)] +struct FromZeroes1 { + value: NotZerocopy, +} + +// +// FromBytes errors +// + +#[derive(FromBytes)] +struct FromBytes1 { + value: NotZerocopy, +} + +// +// AsBytes errors +// + +#[derive(AsBytes)] +#[repr(C)] +struct AsBytes1 { + value: NotZerocopy, +} + +// +// Unaligned errors +// + +#[derive(Unaligned)] +#[repr(C)] +struct Unaligned1 { + aligned: AU16, +} + +// This specifically tests a bug we had in an old version of the code in which +// the trait bound would only be enforced for the first field's type. +#[derive(Unaligned)] +#[repr(C)] +struct Unaligned2 { + unaligned: u8, + aligned: AU16, +} + +#[derive(Unaligned)] +#[repr(transparent)] +struct Unaligned3 { + aligned: AU16, +} diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-nightly/late_compile_pass.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/late_compile_pass.stderr new file mode 100644 index 000000000..8d4e33896 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/late_compile_pass.stderr @@ -0,0 +1,168 @@ +warning: unused import: `zerocopy::KnownLayout` + --> tests/ui-nightly/late_compile_pass.rs:16:5 + | +16 | use zerocopy::KnownLayout; + | ^^^^^^^^^^^^^^^^^^^^^ + | + = note: `#[warn(unused_imports)]` on by default + +error[E0277]: the trait bound `NotZerocopy: FromZeroes` is not satisfied + --> tests/ui-nightly/late_compile_pass.rs:28:10 + | +28 | #[derive(FromZeroes)] + | ^^^^^^^^^^ the trait `FromZeroes` is not implemented for `NotZerocopy` + | + = help: the following other types implement trait `FromZeroes`: + () + *const T + *mut T + AU16 + F32 + F64 + FromZeroes1 + I128 + and $N others + = help: see issue #48214 + = note: this error originates in the derive macro `FromZeroes` (in Nightly builds, run with -Z macro-backtrace for more info) +help: add `#![feature(trivial_bounds)]` to the crate attributes to enable + | +9 + #![feature(trivial_bounds)] + | + +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-nightly/late_compile_pass.rs:37:10 + | +37 | #[derive(FromBytes)] + | ^^^^^^^^^ the trait `FromBytes` is not implemented for `NotZerocopy` + | + = help: the following other types implement trait `FromBytes`: + () + AU16 + F32 + F64 + FromBytes1 + I128 + I16 + I32 + and $N others + = help: see issue #48214 + = note: this error originates in the derive macro `FromBytes` (in Nightly builds, run with -Z macro-backtrace for more info) +help: add `#![feature(trivial_bounds)]` to the crate attributes to enable + | +9 + #![feature(trivial_bounds)] + | + +error[E0277]: the trait bound `FromBytes1: FromZeroes` is not satisfied + --> tests/ui-nightly/late_compile_pass.rs:37:10 + | +37 | #[derive(FromBytes)] + | ^^^^^^^^^ the trait `FromZeroes` is not implemented for `FromBytes1` + | + = help: the following other types implement trait `FromZeroes`: + () + *const T + *mut T + AU16 + F32 + F64 + FromZeroes1 + I128 + and $N others +note: required by a bound in `FromBytes` + --> $WORKSPACE/src/lib.rs + | + | pub unsafe trait FromBytes: FromZeroes { + | ^^^^^^^^^^ required by this bound in `FromBytes` + = note: this error originates in the derive macro `FromBytes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: AsBytes` is not satisfied + --> tests/ui-nightly/late_compile_pass.rs:46:10 + | +46 | #[derive(AsBytes)] + | ^^^^^^^ the trait `AsBytes` is not implemented for `NotZerocopy` + | + = help: the following other types implement trait `AsBytes`: + () + AU16 + AsBytes1 + F32 + F64 + I128 + I16 + I32 + and $N others + = help: see issue #48214 + = note: this error originates in the derive macro `AsBytes` (in Nightly builds, run with -Z macro-backtrace for more info) +help: add `#![feature(trivial_bounds)]` to the crate attributes to enable + | +9 + #![feature(trivial_bounds)] + | + +error[E0277]: the trait bound `AU16: Unaligned` is not satisfied + --> tests/ui-nightly/late_compile_pass.rs:56:10 + | +56 | #[derive(Unaligned)] + | ^^^^^^^^^ the trait `Unaligned` is not implemented for `AU16` + | + = help: the following other types implement trait `Unaligned`: + () + F32 + F64 + I128 + I16 + I32 + I64 + ManuallyDrop + and $N others + = help: see issue #48214 + = note: this error originates in the derive macro `Unaligned` (in Nightly builds, run with -Z macro-backtrace for more info) +help: add `#![feature(trivial_bounds)]` to the crate attributes to enable + | +9 + #![feature(trivial_bounds)] + | + +error[E0277]: the trait bound `AU16: Unaligned` is not satisfied + --> tests/ui-nightly/late_compile_pass.rs:64:10 + | +64 | #[derive(Unaligned)] + | ^^^^^^^^^ the trait `Unaligned` is not implemented for `AU16` + | + = help: the following other types implement trait `Unaligned`: + () + F32 + F64 + I128 + I16 + I32 + I64 + ManuallyDrop + and $N others + = help: see issue #48214 + = note: this error originates in the derive macro `Unaligned` (in Nightly builds, run with -Z macro-backtrace for more info) +help: add `#![feature(trivial_bounds)]` to the crate attributes to enable + | +9 + #![feature(trivial_bounds)] + | + +error[E0277]: the trait bound `AU16: Unaligned` is not satisfied + --> tests/ui-nightly/late_compile_pass.rs:71:10 + | +71 | #[derive(Unaligned)] + | ^^^^^^^^^ the trait `Unaligned` is not implemented for `AU16` + | + = help: the following other types implement trait `Unaligned`: + () + F32 + F64 + I128 + I16 + I32 + I64 + ManuallyDrop + and $N others + = help: see issue #48214 + = note: this error originates in the derive macro `Unaligned` (in Nightly builds, run with -Z macro-backtrace for more info) +help: add `#![feature(trivial_bounds)]` to the crate attributes to enable + | +9 + #![feature(trivial_bounds)] + | diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-nightly/mid_compile_pass.rs b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/mid_compile_pass.rs new file mode 100644 index 000000000..e0c4bc578 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/mid_compile_pass.rs @@ -0,0 +1,61 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::KnownLayout; + +fn main() {} + +// These tests cause errors which are generated by a later compilation pass than +// the other errors we generate, and so if they're compiled in the same file, +// the compiler will never get to that pass, and so we won't get the errors. + +// +// KnownLayout errors +// + +fn assert_kl(_: &T) {} + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | N | Y | N | N | KL04 | +#[derive(KnownLayout)] +struct KL04(u8, T); + +fn test_kl04(kl: &KL04) { + assert_kl(kl); +} + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | N | Y | Y | N | KL06 | +#[derive(KnownLayout)] +struct KL06(u8, T); + +fn test_kl06(kl: &KL06) { + assert_kl(kl); +} + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | Y | Y | N | N | KL12 | +#[derive(KnownLayout)] +#[repr(C)] +struct KL12(u8, T); + +fn test_kl12(kl: &KL12) { + assert_kl(kl) +} + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | Y | Y | N | Y | KL13 | +#[derive(KnownLayout)] +#[repr(C)] +struct KL13(u8, T); + +fn test_kl13(t: T) -> impl KnownLayout { + KL13(0u8, t) +} diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-nightly/mid_compile_pass.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/mid_compile_pass.stderr new file mode 100644 index 000000000..591727502 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/mid_compile_pass.stderr @@ -0,0 +1,104 @@ +error[E0277]: the trait bound `T: KnownLayout` is not satisfied + --> tests/ui-nightly/mid_compile_pass.rs:59:26 + | +59 | fn test_kl13(t: T) -> impl KnownLayout { + | ^^^^^^^^^^^^^^^^ the trait `KnownLayout` is not implemented for `T`, which is required by `KL13: KnownLayout` + | +note: required for `KL13` to implement `KnownLayout` + --> tests/ui-nightly/mid_compile_pass.rs:55:10 + | +55 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider restricting type parameter `T` + | +59 | fn test_kl13(t: T) -> impl KnownLayout { + | +++++++++++++++++++++++ + +error[E0277]: the size for values of type `T` cannot be known at compilation time + --> tests/ui-nightly/mid_compile_pass.rs:31:15 + | +30 | fn test_kl04(kl: &KL04) { + | - this type parameter needs to be `Sized` +31 | assert_kl(kl); + | --------- ^^ doesn't have a size known at compile-time + | | + | required by a bound introduced by this call + | +note: required because it appears within the type `KL04` + --> tests/ui-nightly/mid_compile_pass.rs:28:8 + | +28 | struct KL04(u8, T); + | ^^^^ +note: required for `KL04` to implement `KnownLayout` + --> tests/ui-nightly/mid_compile_pass.rs:27:10 + | +27 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `assert_kl` + --> tests/ui-nightly/mid_compile_pass.rs:23:26 + | +23 | fn assert_kl(_: &T) {} + | ^^^^^^^^^^^ required by this bound in `assert_kl` + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider removing the `?Sized` bound to make the type parameter `Sized` + | +30 - fn test_kl04(kl: &KL04) { +30 + fn test_kl04(kl: &KL04) { + | + +error[E0277]: the size for values of type `T` cannot be known at compilation time + --> tests/ui-nightly/mid_compile_pass.rs:40:15 + | +39 | fn test_kl06(kl: &KL06) { + | - this type parameter needs to be `Sized` +40 | assert_kl(kl); + | --------- ^^ doesn't have a size known at compile-time + | | + | required by a bound introduced by this call + | +note: required because it appears within the type `KL06` + --> tests/ui-nightly/mid_compile_pass.rs:37:8 + | +37 | struct KL06(u8, T); + | ^^^^ +note: required for `KL06` to implement `KnownLayout` + --> tests/ui-nightly/mid_compile_pass.rs:36:10 + | +36 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `assert_kl` + --> tests/ui-nightly/mid_compile_pass.rs:23:26 + | +23 | fn assert_kl(_: &T) {} + | ^^^^^^^^^^^ required by this bound in `assert_kl` + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider removing the `?Sized` bound to make the type parameter `Sized` + | +39 - fn test_kl06(kl: &KL06) { +39 + fn test_kl06(kl: &KL06) { + | + +error[E0277]: the trait bound `T: KnownLayout` is not satisfied + --> tests/ui-nightly/mid_compile_pass.rs:50:15 + | +50 | assert_kl(kl) + | --------- ^^ the trait `KnownLayout` is not implemented for `T`, which is required by `KL12: KnownLayout` + | | + | required by a bound introduced by this call + | +note: required for `KL12` to implement `KnownLayout` + --> tests/ui-nightly/mid_compile_pass.rs:45:10 + | +45 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `assert_kl` + --> tests/ui-nightly/mid_compile_pass.rs:23:26 + | +23 | fn assert_kl(_: &T) {} + | ^^^^^^^^^^^ required by this bound in `assert_kl` + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider further restricting this bound + | +49 | fn test_kl12(kl: &KL12) { + | +++++++++++++++++++++++ diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-nightly/struct.rs b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/struct.rs new file mode 100644 index 000000000..c76dc7f95 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/struct.rs @@ -0,0 +1,99 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[macro_use] +extern crate zerocopy; + +#[path = "../util.rs"] +mod util; + +use zerocopy::KnownLayout; + +use self::util::AU16; + +fn main() {} + +// +// KnownLayout errors +// + +struct NotKnownLayout; + +struct NotKnownLayoutDst([u8]); + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | N | N | N | N | KL00 | +#[derive(KnownLayout)] +struct KL00(u8, NotKnownLayoutDst); + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | N | N | Y | N | KL02 | +#[derive(KnownLayout)] +struct KL02(u8, [u8]); + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | Y | N | N | N | KL08 | +#[derive(KnownLayout)] +#[repr(C)] +struct KL08(u8, NotKnownLayoutDst); + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | Y | N | N | Y | KL09 | +#[derive(KnownLayout)] +#[repr(C)] +struct KL09(NotKnownLayout, NotKnownLayout); + +// +// AsBytes errors +// + +#[derive(AsBytes)] +#[repr(C)] +struct AsBytes1(T); + +#[derive(AsBytes)] +#[repr(C)] +struct AsBytes2 { + foo: u8, + bar: AU16, +} + +#[derive(AsBytes)] +#[repr(C, packed(2))] +struct AsBytes3 { + foo: u8, + // We'd prefer to use AU64 here, but you can't use aligned types in + // packed structs. + bar: u64, +} + +// +// Unaligned errors +// + +#[derive(Unaligned)] +#[repr(C, align(2))] +struct Unaligned1; + +#[derive(Unaligned)] +#[repr(transparent, align(2))] +struct Unaligned2 { + foo: u8, +} + +#[derive(Unaligned)] +#[repr(packed, align(2))] +struct Unaligned3; + +#[derive(Unaligned)] +#[repr(align(1), align(2))] +struct Unaligned4; + +#[derive(Unaligned)] +#[repr(align(2), align(4))] +struct Unaligned5; diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-nightly/struct.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/struct.stderr new file mode 100644 index 000000000..77e0d9e67 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/struct.stderr @@ -0,0 +1,161 @@ +error: unsupported on generic structs that are not repr(transparent) or repr(packed) + --> tests/ui-nightly/struct.rs:55:10 + | +55 | #[derive(AsBytes)] + | ^^^^^^^ + | + = note: this error originates in the derive macro `AsBytes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-nightly/struct.rs:80:11 + | +80 | #[repr(C, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-nightly/struct.rs:84:21 + | +84 | #[repr(transparent, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-nightly/struct.rs:90:16 + | +90 | #[repr(packed, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-nightly/struct.rs:94:18 + | +94 | #[repr(align(1), align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-nightly/struct.rs:98:8 + | +98 | #[repr(align(2), align(4))] + | ^^^^^^^^ + +error[E0692]: transparent struct cannot have other repr hints + --> tests/ui-nightly/struct.rs:84:8 + | +84 | #[repr(transparent, align(2))] + | ^^^^^^^^^^^ ^^^^^^^^ + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/struct.rs:31:10 + | +31 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: within `KL00`, the trait `Sized` is not implemented for `[u8]`, which is required by `KL00: Sized` +note: required because it appears within the type `KL00` + --> tests/ui-nightly/struct.rs:32:8 + | +32 | struct KL00(u8, NotKnownLayoutDst); + | ^^^^ + = help: see issue #48214 + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) +help: add `#![feature(trivial_bounds)]` to the crate attributes to enable + | +9 + #![feature(trivial_bounds)] + | + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/struct.rs:36:10 + | +36 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: within `KL02`, the trait `Sized` is not implemented for `[u8]`, which is required by `KL02: Sized` +note: required because it appears within the type `KL02` + --> tests/ui-nightly/struct.rs:37:8 + | +37 | struct KL02(u8, [u8]); + | ^^^^ + = help: see issue #48214 + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) +help: add `#![feature(trivial_bounds)]` to the crate attributes to enable + | +9 + #![feature(trivial_bounds)] + | + +error[E0277]: the trait bound `NotKnownLayoutDst: KnownLayout` is not satisfied + --> tests/ui-nightly/struct.rs:41:10 + | +41 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ the trait `KnownLayout` is not implemented for `NotKnownLayoutDst` + | + = help: the following other types implement trait `KnownLayout`: + () + *const T + *mut T + AU16 + F32 + F64 + I128 + I16 + and $N others + = help: see issue #48214 + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) +help: add `#![feature(trivial_bounds)]` to the crate attributes to enable + | +9 + #![feature(trivial_bounds)] + | + +error[E0277]: the trait bound `NotKnownLayout: KnownLayout` is not satisfied + --> tests/ui-nightly/struct.rs:47:10 + | +47 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ the trait `KnownLayout` is not implemented for `NotKnownLayout` + | + = help: the following other types implement trait `KnownLayout`: + () + *const T + *mut T + AU16 + F32 + F64 + I128 + I16 + and $N others + = help: see issue #48214 + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) +help: add `#![feature(trivial_bounds)]` to the crate attributes to enable + | +9 + #![feature(trivial_bounds)] + | + +error[E0277]: the trait bound `HasPadding: ShouldBe` is not satisfied + --> tests/ui-nightly/struct.rs:59:10 + | +59 | #[derive(AsBytes)] + | ^^^^^^^ the trait `ShouldBe` is not implemented for `HasPadding` + | + = help: the trait `ShouldBe` is implemented for `HasPadding` + = help: see issue #48214 + = note: this error originates in the derive macro `AsBytes` (in Nightly builds, run with -Z macro-backtrace for more info) +help: add `#![feature(trivial_bounds)]` to the crate attributes to enable + | +9 + #![feature(trivial_bounds)] + | + +error[E0277]: the trait bound `HasPadding: ShouldBe` is not satisfied + --> tests/ui-nightly/struct.rs:66:10 + | +66 | #[derive(AsBytes)] + | ^^^^^^^ the trait `ShouldBe` is not implemented for `HasPadding` + | + = help: the trait `ShouldBe` is implemented for `HasPadding` + = help: see issue #48214 + = note: this error originates in the derive macro `AsBytes` (in Nightly builds, run with -Z macro-backtrace for more info) +help: add `#![feature(trivial_bounds)]` to the crate attributes to enable + | +9 + #![feature(trivial_bounds)] + | + +error[E0587]: type has conflicting packed and align representation hints + --> tests/ui-nightly/struct.rs:91:1 + | +91 | struct Unaligned3; + | ^^^^^^^^^^^^^^^^^ diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-nightly/union.rs b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/union.rs new file mode 100644 index 000000000..8938e7847 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/union.rs @@ -0,0 +1,73 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[macro_use] +extern crate zerocopy; + +#[path = "../util.rs"] +mod util; + +use self::util::AU16; +use std::mem::ManuallyDrop; + +fn main() {} + +// +// AsBytes errors +// + +#[derive(AsBytes)] +#[repr(C)] +union AsBytes1 { + foo: ManuallyDrop, +} + +#[derive(AsBytes)] +#[repr(C)] +union AsBytes2 { + foo: u8, + bar: [u8; 2], +} + +// +// Unaligned errors +// + +#[derive(Unaligned)] +#[repr(C, align(2))] +union Unaligned1 { + foo: i16, + bar: AU16, +} + +// Transparent unions are unstable; see issue #60405 +// for more information. + +// #[derive(Unaligned)] +// #[repr(transparent, align(2))] +// union Unaligned2 { +// foo: u8, +// } + +#[derive(Unaligned)] +#[repr(packed, align(2))] +union Unaligned3 { + foo: u8, +} + +#[derive(Unaligned)] +#[repr(align(1), align(2))] +struct Unaligned4 { + foo: u8, +} + +#[derive(Unaligned)] +#[repr(align(2), align(4))] +struct Unaligned5 { + foo: u8, +} diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-nightly/union.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/union.stderr new file mode 100644 index 000000000..ae510e835 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-nightly/union.stderr @@ -0,0 +1,51 @@ +error: unsupported on types with type parameters + --> tests/ui-nightly/union.rs:24:10 + | +24 | #[derive(AsBytes)] + | ^^^^^^^ + | + = note: this error originates in the derive macro `AsBytes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-nightly/union.rs:42:11 + | +42 | #[repr(C, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-nightly/union.rs:58:16 + | +58 | #[repr(packed, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-nightly/union.rs:64:18 + | +64 | #[repr(align(1), align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-nightly/union.rs:70:8 + | +70 | #[repr(align(2), align(4))] + | ^^^^^^^^ + +error[E0277]: the trait bound `HasPadding: ShouldBe` is not satisfied + --> tests/ui-nightly/union.rs:30:10 + | +30 | #[derive(AsBytes)] + | ^^^^^^^ the trait `ShouldBe` is not implemented for `HasPadding` + | + = help: the trait `ShouldBe` is implemented for `HasPadding` + = help: see issue #48214 + = note: this error originates in the derive macro `AsBytes` (in Nightly builds, run with -Z macro-backtrace for more info) +help: add `#![feature(trivial_bounds)]` to the crate attributes to enable + | +9 + #![feature(trivial_bounds)] + | + +error[E0587]: type has conflicting packed and align representation hints + --> tests/ui-nightly/union.rs:59:1 + | +59 | union Unaligned3 { + | ^^^^^^^^^^^^^^^^ diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-stable/derive_transparent.rs b/src/rust/vendor/zerocopy-derive/tests/ui-stable/derive_transparent.rs new file mode 100644 index 000000000..2084d921b --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-stable/derive_transparent.rs @@ -0,0 +1,40 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +#[path = "../util.rs"] +mod util; + +use core::marker::PhantomData; + +use { + static_assertions::assert_impl_all, + zerocopy::{AsBytes, FromBytes, FromZeroes, Unaligned}, +}; + +use self::util::NotZerocopy; + +fn main() {} + +// Test generic transparent structs + +#[derive(AsBytes, FromZeroes, FromBytes, Unaligned)] +#[repr(transparent)] +struct TransparentStruct { + inner: T, + _phantom: PhantomData<()>, +} + +// It should be legal to derive these traits on a transparent struct, but it +// must also ensure the traits are only implemented when the inner type +// implements them. +assert_impl_all!(TransparentStruct: FromZeroes); +assert_impl_all!(TransparentStruct: FromBytes); +assert_impl_all!(TransparentStruct: AsBytes); +assert_impl_all!(TransparentStruct: Unaligned); diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-stable/derive_transparent.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-stable/derive_transparent.stderr new file mode 100644 index 000000000..57d34cbf8 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-stable/derive_transparent.stderr @@ -0,0 +1,111 @@ +error[E0277]: the trait bound `NotZerocopy: FromZeroes` is not satisfied + --> tests/ui-stable/derive_transparent.rs:37:18 + | +37 | assert_impl_all!(TransparentStruct: FromZeroes); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FromZeroes` is not implemented for `NotZerocopy`, which is required by `TransparentStruct: FromZeroes` + | + = help: the following other types implement trait `FromZeroes`: + () + *const T + *mut T + AU16 + F32 + F64 + I128 + I16 + and $N others +note: required for `TransparentStruct` to implement `FromZeroes` + --> tests/ui-stable/derive_transparent.rs:27:19 + | +27 | #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)] + | ^^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `_::{closure#0}::assert_impl_all` + --> tests/ui-stable/derive_transparent.rs:37:1 + | +37 | assert_impl_all!(TransparentStruct: FromZeroes); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `assert_impl_all` + = note: this error originates in the derive macro `FromZeroes` which comes from the expansion of the macro `assert_impl_all` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-stable/derive_transparent.rs:38:18 + | +38 | assert_impl_all!(TransparentStruct: FromBytes); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FromBytes` is not implemented for `NotZerocopy`, which is required by `TransparentStruct: FromBytes` + | + = help: the following other types implement trait `FromBytes`: + () + AU16 + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required for `TransparentStruct` to implement `FromBytes` + --> tests/ui-stable/derive_transparent.rs:27:31 + | +27 | #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)] + | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `_::{closure#0}::assert_impl_all` + --> tests/ui-stable/derive_transparent.rs:38:1 + | +38 | assert_impl_all!(TransparentStruct: FromBytes); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `assert_impl_all` + = note: this error originates in the derive macro `FromBytes` which comes from the expansion of the macro `assert_impl_all` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: AsBytes` is not satisfied + --> tests/ui-stable/derive_transparent.rs:39:18 + | +39 | assert_impl_all!(TransparentStruct: AsBytes); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `NotZerocopy`, which is required by `TransparentStruct: AsBytes` + | + = help: the following other types implement trait `AsBytes`: + () + AU16 + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required for `TransparentStruct` to implement `AsBytes` + --> tests/ui-stable/derive_transparent.rs:27:10 + | +27 | #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)] + | ^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `_::{closure#0}::assert_impl_all` + --> tests/ui-stable/derive_transparent.rs:39:1 + | +39 | assert_impl_all!(TransparentStruct: AsBytes); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `assert_impl_all` + = note: this error originates in the derive macro `AsBytes` which comes from the expansion of the macro `assert_impl_all` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: Unaligned` is not satisfied + --> tests/ui-stable/derive_transparent.rs:40:18 + | +40 | assert_impl_all!(TransparentStruct: Unaligned); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `Unaligned` is not implemented for `NotZerocopy`, which is required by `TransparentStruct: Unaligned` + | + = help: the following other types implement trait `Unaligned`: + () + F32 + F64 + I128 + I16 + I32 + I64 + ManuallyDrop + and $N others +note: required for `TransparentStruct` to implement `Unaligned` + --> tests/ui-stable/derive_transparent.rs:27:42 + | +27 | #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)] + | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `_::{closure#0}::assert_impl_all` + --> tests/ui-stable/derive_transparent.rs:40:1 + | +40 | assert_impl_all!(TransparentStruct: Unaligned); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `assert_impl_all` + = note: this error originates in the derive macro `Unaligned` which comes from the expansion of the macro `assert_impl_all` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-stable/enum.rs b/src/rust/vendor/zerocopy-derive/tests/ui-stable/enum.rs new file mode 100644 index 000000000..31d5679d1 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-stable/enum.rs @@ -0,0 +1,194 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[macro_use] +extern crate zerocopy; + +fn main() {} + +// +// Generic errors +// + +#[derive(FromZeroes, FromBytes)] +#[repr("foo")] +enum Generic1 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(foo)] +enum Generic2 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(transparent)] +enum Generic3 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(u8, u16)] +enum Generic4 { + A, +} + +#[derive(FromZeroes, FromBytes)] +enum Generic5 { + A, +} + +// +// FromZeroes errors +// + +#[derive(FromZeroes)] +enum FromZeroes1 { + A(u8), +} + +#[derive(FromZeroes)] +enum FromZeroes2 { + A, + B(u8), +} + +#[derive(FromZeroes)] +enum FromZeroes3 { + A = 1, + B, +} + +// +// FromBytes errors +// + +#[derive(FromZeroes, FromBytes)] +#[repr(C)] +enum FromBytes1 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(usize)] +enum FromBytes2 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(isize)] +enum FromBytes3 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(u32)] +enum FromBytes4 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(i32)] +enum FromBytes5 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(u64)] +enum FromBytes6 { + A, +} + +#[derive(FromZeroes, FromBytes)] +#[repr(i64)] +enum FromBytes7 { + A, +} + +// +// Unaligned errors +// + +#[derive(Unaligned)] +#[repr(C)] +enum Unaligned1 { + A, +} + +#[derive(Unaligned)] +#[repr(u16)] +enum Unaligned2 { + A, +} + +#[derive(Unaligned)] +#[repr(i16)] +enum Unaligned3 { + A, +} + +#[derive(Unaligned)] +#[repr(u32)] +enum Unaligned4 { + A, +} + +#[derive(Unaligned)] +#[repr(i32)] +enum Unaligned5 { + A, +} + +#[derive(Unaligned)] +#[repr(u64)] +enum Unaligned6 { + A, +} + +#[derive(Unaligned)] +#[repr(i64)] +enum Unaligned7 { + A, +} + +#[derive(Unaligned)] +#[repr(usize)] +enum Unaligned8 { + A, +} + +#[derive(Unaligned)] +#[repr(isize)] +enum Unaligned9 { + A, +} + +#[derive(Unaligned)] +#[repr(u8, align(2))] +enum Unaligned10 { + A, +} + +#[derive(Unaligned)] +#[repr(i8, align(2))] +enum Unaligned11 { + A, +} + +#[derive(Unaligned)] +#[repr(align(1), align(2))] +enum Unaligned12 { + A, +} + +#[derive(Unaligned)] +#[repr(align(2), align(4))] +enum Unaligned13 { + A, +} diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-stable/enum.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-stable/enum.stderr new file mode 100644 index 000000000..a47ce9c4b --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-stable/enum.stderr @@ -0,0 +1,201 @@ +error: unrecognized representation hint + --> tests/ui-stable/enum.rs:19:8 + | +19 | #[repr("foo")] + | ^^^^^ + +error: unrecognized representation hint + --> tests/ui-stable/enum.rs:25:8 + | +25 | #[repr(foo)] + | ^^^ + +error: unsupported representation for deriving FromBytes, AsBytes, or Unaligned on an enum + --> tests/ui-stable/enum.rs:31:8 + | +31 | #[repr(transparent)] + | ^^^^^^^^^^^ + +error: conflicting representation hints + --> tests/ui-stable/enum.rs:37:1 + | +37 | #[repr(u8, u16)] + | ^ + +error: must have a non-align #[repr(...)] attribute in order to guarantee this type's memory layout + --> tests/ui-stable/enum.rs:42:22 + | +42 | #[derive(FromZeroes, FromBytes)] + | ^^^^^^^^^ + | + = note: this error originates in the derive macro `FromBytes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: only C-like enums can implement FromZeroes + --> tests/ui-stable/enum.rs:52:1 + | +52 | / enum FromZeroes1 { +53 | | A(u8), +54 | | } + | |_^ + +error: only C-like enums can implement FromZeroes + --> tests/ui-stable/enum.rs:57:1 + | +57 | / enum FromZeroes2 { +58 | | A, +59 | | B(u8), +60 | | } + | |_^ + +error: FromZeroes only supported on enums with a variant that has a discriminant of `0` + --> tests/ui-stable/enum.rs:63:1 + | +63 | / enum FromZeroes3 { +64 | | A = 1, +65 | | B, +66 | | } + | |_^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-stable/enum.rs:73:8 + | +73 | #[repr(C)] + | ^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-stable/enum.rs:79:8 + | +79 | #[repr(usize)] + | ^^^^^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-stable/enum.rs:85:8 + | +85 | #[repr(isize)] + | ^^^^^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-stable/enum.rs:91:8 + | +91 | #[repr(u32)] + | ^^^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-stable/enum.rs:97:8 + | +97 | #[repr(i32)] + | ^^^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-stable/enum.rs:103:8 + | +103 | #[repr(u64)] + | ^^^ + +error: FromBytes requires repr of "u8", "u16", "i8", or "i16" + --> tests/ui-stable/enum.rs:109:8 + | +109 | #[repr(i64)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-stable/enum.rs:119:8 + | +119 | #[repr(C)] + | ^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-stable/enum.rs:125:8 + | +125 | #[repr(u16)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-stable/enum.rs:131:8 + | +131 | #[repr(i16)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-stable/enum.rs:137:8 + | +137 | #[repr(u32)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-stable/enum.rs:143:8 + | +143 | #[repr(i32)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-stable/enum.rs:149:8 + | +149 | #[repr(u64)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-stable/enum.rs:155:8 + | +155 | #[repr(i64)] + | ^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-stable/enum.rs:161:8 + | +161 | #[repr(usize)] + | ^^^^^ + +error: Unaligned requires repr of "u8" or "i8", and no alignment (i.e., repr(align(N > 1))) + --> tests/ui-stable/enum.rs:167:8 + | +167 | #[repr(isize)] + | ^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-stable/enum.rs:173:12 + | +173 | #[repr(u8, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-stable/enum.rs:179:12 + | +179 | #[repr(i8, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-stable/enum.rs:185:18 + | +185 | #[repr(align(1), align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-stable/enum.rs:191:8 + | +191 | #[repr(align(2), align(4))] + | ^^^^^^^^ + +error[E0565]: meta item in `repr` must be an identifier + --> tests/ui-stable/enum.rs:19:8 + | +19 | #[repr("foo")] + | ^^^^^ + +error[E0552]: unrecognized representation hint + --> tests/ui-stable/enum.rs:25:8 + | +25 | #[repr(foo)] + | ^^^ + | + = help: valid reprs are `Rust` (default), `C`, `align`, `packed`, `transparent`, `simd`, `i8`, `u8`, `i16`, `u16`, `i32`, `u32`, `i64`, `u64`, `i128`, `u128`, `isize`, `usize` + +error[E0566]: conflicting representation hints + --> tests/ui-stable/enum.rs:37:8 + | +37 | #[repr(u8, u16)] + | ^^ ^^^ + | + = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! + = note: for more information, see issue #68585 + = note: `#[deny(conflicting_repr_hints)]` on by default diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-stable/enum_from_bytes_u8_too_few.rs b/src/rust/vendor/zerocopy-derive/tests/ui-stable/enum_from_bytes_u8_too_few.rs new file mode 100644 index 000000000..1b1bed31f --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-stable/enum_from_bytes_u8_too_few.rs @@ -0,0 +1,272 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[macro_use] +extern crate zerocopy; + +fn main() {} + +#[derive(FromBytes)] +#[repr(u8)] +enum Foo { + Variant0, + Variant1, + Variant2, + Variant3, + Variant4, + Variant5, + Variant6, + Variant7, + Variant8, + Variant9, + Variant10, + Variant11, + Variant12, + Variant13, + Variant14, + Variant15, + Variant16, + Variant17, + Variant18, + Variant19, + Variant20, + Variant21, + Variant22, + Variant23, + Variant24, + Variant25, + Variant26, + Variant27, + Variant28, + Variant29, + Variant30, + Variant31, + Variant32, + Variant33, + Variant34, + Variant35, + Variant36, + Variant37, + Variant38, + Variant39, + Variant40, + Variant41, + Variant42, + Variant43, + Variant44, + Variant45, + Variant46, + Variant47, + Variant48, + Variant49, + Variant50, + Variant51, + Variant52, + Variant53, + Variant54, + Variant55, + Variant56, + Variant57, + Variant58, + Variant59, + Variant60, + Variant61, + Variant62, + Variant63, + Variant64, + Variant65, + Variant66, + Variant67, + Variant68, + Variant69, + Variant70, + Variant71, + Variant72, + Variant73, + Variant74, + Variant75, + Variant76, + Variant77, + Variant78, + Variant79, + Variant80, + Variant81, + Variant82, + Variant83, + Variant84, + Variant85, + Variant86, + Variant87, + Variant88, + Variant89, + Variant90, + Variant91, + Variant92, + Variant93, + Variant94, + Variant95, + Variant96, + Variant97, + Variant98, + Variant99, + Variant100, + Variant101, + Variant102, + Variant103, + Variant104, + Variant105, + Variant106, + Variant107, + Variant108, + Variant109, + Variant110, + Variant111, + Variant112, + Variant113, + Variant114, + Variant115, + Variant116, + Variant117, + Variant118, + Variant119, + Variant120, + Variant121, + Variant122, + Variant123, + Variant124, + Variant125, + Variant126, + Variant127, + Variant128, + Variant129, + Variant130, + Variant131, + Variant132, + Variant133, + Variant134, + Variant135, + Variant136, + Variant137, + Variant138, + Variant139, + Variant140, + Variant141, + Variant142, + Variant143, + Variant144, + Variant145, + Variant146, + Variant147, + Variant148, + Variant149, + Variant150, + Variant151, + Variant152, + Variant153, + Variant154, + Variant155, + Variant156, + Variant157, + Variant158, + Variant159, + Variant160, + Variant161, + Variant162, + Variant163, + Variant164, + Variant165, + Variant166, + Variant167, + Variant168, + Variant169, + Variant170, + Variant171, + Variant172, + Variant173, + Variant174, + Variant175, + Variant176, + Variant177, + Variant178, + Variant179, + Variant180, + Variant181, + Variant182, + Variant183, + Variant184, + Variant185, + Variant186, + Variant187, + Variant188, + Variant189, + Variant190, + Variant191, + Variant192, + Variant193, + Variant194, + Variant195, + Variant196, + Variant197, + Variant198, + Variant199, + Variant200, + Variant201, + Variant202, + Variant203, + Variant204, + Variant205, + Variant206, + Variant207, + Variant208, + Variant209, + Variant210, + Variant211, + Variant212, + Variant213, + Variant214, + Variant215, + Variant216, + Variant217, + Variant218, + Variant219, + Variant220, + Variant221, + Variant222, + Variant223, + Variant224, + Variant225, + Variant226, + Variant227, + Variant228, + Variant229, + Variant230, + Variant231, + Variant232, + Variant233, + Variant234, + Variant235, + Variant236, + Variant237, + Variant238, + Variant239, + Variant240, + Variant241, + Variant242, + Variant243, + Variant244, + Variant245, + Variant246, + Variant247, + Variant248, + Variant249, + Variant250, + Variant251, + Variant252, + Variant253, + Variant254, +} diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-stable/enum_from_bytes_u8_too_few.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-stable/enum_from_bytes_u8_too_few.stderr new file mode 100644 index 000000000..5edbabc47 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-stable/enum_from_bytes_u8_too_few.stderr @@ -0,0 +1,11 @@ +error: FromBytes only supported on repr(u8) enum with 256 variants + --> tests/ui-stable/enum_from_bytes_u8_too_few.rs:15:1 + | +15 | / #[repr(u8)] +16 | | enum Foo { +17 | | Variant0, +18 | | Variant1, +... | +271 | | Variant254, +272 | | } + | |_^ diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-stable/late_compile_pass.rs b/src/rust/vendor/zerocopy-derive/tests/ui-stable/late_compile_pass.rs new file mode 100644 index 000000000..cd65a6ed2 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-stable/late_compile_pass.rs @@ -0,0 +1,75 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[macro_use] +extern crate zerocopy; + +#[path = "../util.rs"] +mod util; + +use self::util::{NotZerocopy, AU16}; +use zerocopy::KnownLayout; + +fn main() {} + +// These tests cause errors which are generated by a later compilation pass than +// the other errors we generate, and so if they're compiled in the same file, +// the compiler will never get to that pass, and so we won't get the errors. + +// +// FromZeroes errors +// + +#[derive(FromZeroes)] +struct FromZeroes1 { + value: NotZerocopy, +} + +// +// FromBytes errors +// + +#[derive(FromBytes)] +struct FromBytes1 { + value: NotZerocopy, +} + +// +// AsBytes errors +// + +#[derive(AsBytes)] +#[repr(C)] +struct AsBytes1 { + value: NotZerocopy, +} + +// +// Unaligned errors +// + +#[derive(Unaligned)] +#[repr(C)] +struct Unaligned1 { + aligned: AU16, +} + +// This specifically tests a bug we had in an old version of the code in which +// the trait bound would only be enforced for the first field's type. +#[derive(Unaligned)] +#[repr(C)] +struct Unaligned2 { + unaligned: u8, + aligned: AU16, +} + +#[derive(Unaligned)] +#[repr(transparent)] +struct Unaligned3 { + aligned: AU16, +} diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-stable/late_compile_pass.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-stable/late_compile_pass.stderr new file mode 100644 index 000000000..0c66ae57b --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-stable/late_compile_pass.stderr @@ -0,0 +1,144 @@ +warning: unused import: `zerocopy::KnownLayout` + --> tests/ui-stable/late_compile_pass.rs:16:5 + | +16 | use zerocopy::KnownLayout; + | ^^^^^^^^^^^^^^^^^^^^^ + | + = note: `#[warn(unused_imports)]` on by default + +error[E0277]: the trait bound `NotZerocopy: FromZeroes` is not satisfied + --> tests/ui-stable/late_compile_pass.rs:28:10 + | +28 | #[derive(FromZeroes)] + | ^^^^^^^^^^ the trait `FromZeroes` is not implemented for `NotZerocopy` + | + = help: the following other types implement trait `FromZeroes`: + () + *const T + *mut T + AU16 + F32 + F64 + FromZeroes1 + I128 + and $N others + = help: see issue #48214 + = note: this error originates in the derive macro `FromZeroes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-stable/late_compile_pass.rs:37:10 + | +37 | #[derive(FromBytes)] + | ^^^^^^^^^ the trait `FromBytes` is not implemented for `NotZerocopy` + | + = help: the following other types implement trait `FromBytes`: + () + AU16 + F32 + F64 + FromBytes1 + I128 + I16 + I32 + and $N others + = help: see issue #48214 + = note: this error originates in the derive macro `FromBytes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `FromBytes1: FromZeroes` is not satisfied + --> tests/ui-stable/late_compile_pass.rs:37:10 + | +37 | #[derive(FromBytes)] + | ^^^^^^^^^ the trait `FromZeroes` is not implemented for `FromBytes1` + | + = help: the following other types implement trait `FromZeroes`: + () + *const T + *mut T + AU16 + F32 + F64 + FromZeroes1 + I128 + and $N others +note: required by a bound in `FromBytes` + --> $WORKSPACE/src/lib.rs + | + | pub unsafe trait FromBytes: FromZeroes { + | ^^^^^^^^^^ required by this bound in `FromBytes` + = note: this error originates in the derive macro `FromBytes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: AsBytes` is not satisfied + --> tests/ui-stable/late_compile_pass.rs:46:10 + | +46 | #[derive(AsBytes)] + | ^^^^^^^ the trait `AsBytes` is not implemented for `NotZerocopy` + | + = help: the following other types implement trait `AsBytes`: + () + AU16 + AsBytes1 + F32 + F64 + I128 + I16 + I32 + and $N others + = help: see issue #48214 + = note: this error originates in the derive macro `AsBytes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `AU16: Unaligned` is not satisfied + --> tests/ui-stable/late_compile_pass.rs:56:10 + | +56 | #[derive(Unaligned)] + | ^^^^^^^^^ the trait `Unaligned` is not implemented for `AU16` + | + = help: the following other types implement trait `Unaligned`: + () + F32 + F64 + I128 + I16 + I32 + I64 + ManuallyDrop + and $N others + = help: see issue #48214 + = note: this error originates in the derive macro `Unaligned` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `AU16: Unaligned` is not satisfied + --> tests/ui-stable/late_compile_pass.rs:64:10 + | +64 | #[derive(Unaligned)] + | ^^^^^^^^^ the trait `Unaligned` is not implemented for `AU16` + | + = help: the following other types implement trait `Unaligned`: + () + F32 + F64 + I128 + I16 + I32 + I64 + ManuallyDrop + and $N others + = help: see issue #48214 + = note: this error originates in the derive macro `Unaligned` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `AU16: Unaligned` is not satisfied + --> tests/ui-stable/late_compile_pass.rs:71:10 + | +71 | #[derive(Unaligned)] + | ^^^^^^^^^ the trait `Unaligned` is not implemented for `AU16` + | + = help: the following other types implement trait `Unaligned`: + () + F32 + F64 + I128 + I16 + I32 + I64 + ManuallyDrop + and $N others + = help: see issue #48214 + = note: this error originates in the derive macro `Unaligned` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-stable/mid_compile_pass.rs b/src/rust/vendor/zerocopy-derive/tests/ui-stable/mid_compile_pass.rs new file mode 100644 index 000000000..e0c4bc578 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-stable/mid_compile_pass.rs @@ -0,0 +1,61 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::KnownLayout; + +fn main() {} + +// These tests cause errors which are generated by a later compilation pass than +// the other errors we generate, and so if they're compiled in the same file, +// the compiler will never get to that pass, and so we won't get the errors. + +// +// KnownLayout errors +// + +fn assert_kl(_: &T) {} + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | N | Y | N | N | KL04 | +#[derive(KnownLayout)] +struct KL04(u8, T); + +fn test_kl04(kl: &KL04) { + assert_kl(kl); +} + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | N | Y | Y | N | KL06 | +#[derive(KnownLayout)] +struct KL06(u8, T); + +fn test_kl06(kl: &KL06) { + assert_kl(kl); +} + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | Y | Y | N | N | KL12 | +#[derive(KnownLayout)] +#[repr(C)] +struct KL12(u8, T); + +fn test_kl12(kl: &KL12) { + assert_kl(kl) +} + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | Y | Y | N | Y | KL13 | +#[derive(KnownLayout)] +#[repr(C)] +struct KL13(u8, T); + +fn test_kl13(t: T) -> impl KnownLayout { + KL13(0u8, t) +} diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-stable/mid_compile_pass.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-stable/mid_compile_pass.stderr new file mode 100644 index 000000000..ee7dcb968 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-stable/mid_compile_pass.stderr @@ -0,0 +1,104 @@ +error[E0277]: the trait bound `T: KnownLayout` is not satisfied + --> tests/ui-stable/mid_compile_pass.rs:59:26 + | +59 | fn test_kl13(t: T) -> impl KnownLayout { + | ^^^^^^^^^^^^^^^^ the trait `KnownLayout` is not implemented for `T`, which is required by `KL13: KnownLayout` + | +note: required for `KL13` to implement `KnownLayout` + --> tests/ui-stable/mid_compile_pass.rs:55:10 + | +55 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider restricting type parameter `T` + | +59 | fn test_kl13(t: T) -> impl KnownLayout { + | +++++++++++++++++++++++ + +error[E0277]: the size for values of type `T` cannot be known at compilation time + --> tests/ui-stable/mid_compile_pass.rs:31:15 + | +30 | fn test_kl04(kl: &KL04) { + | - this type parameter needs to be `Sized` +31 | assert_kl(kl); + | --------- ^^ doesn't have a size known at compile-time + | | + | required by a bound introduced by this call + | +note: required because it appears within the type `KL04` + --> tests/ui-stable/mid_compile_pass.rs:28:8 + | +28 | struct KL04(u8, T); + | ^^^^ +note: required for `KL04` to implement `KnownLayout` + --> tests/ui-stable/mid_compile_pass.rs:27:10 + | +27 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `assert_kl` + --> tests/ui-stable/mid_compile_pass.rs:23:26 + | +23 | fn assert_kl(_: &T) {} + | ^^^^^^^^^^^ required by this bound in `assert_kl` + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider removing the `?Sized` bound to make the type parameter `Sized` + | +30 - fn test_kl04(kl: &KL04) { +30 + fn test_kl04(kl: &KL04) { + | + +error[E0277]: the size for values of type `T` cannot be known at compilation time + --> tests/ui-stable/mid_compile_pass.rs:40:15 + | +39 | fn test_kl06(kl: &KL06) { + | - this type parameter needs to be `Sized` +40 | assert_kl(kl); + | --------- ^^ doesn't have a size known at compile-time + | | + | required by a bound introduced by this call + | +note: required because it appears within the type `KL06` + --> tests/ui-stable/mid_compile_pass.rs:37:8 + | +37 | struct KL06(u8, T); + | ^^^^ +note: required for `KL06` to implement `KnownLayout` + --> tests/ui-stable/mid_compile_pass.rs:36:10 + | +36 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `assert_kl` + --> tests/ui-stable/mid_compile_pass.rs:23:26 + | +23 | fn assert_kl(_: &T) {} + | ^^^^^^^^^^^ required by this bound in `assert_kl` + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider removing the `?Sized` bound to make the type parameter `Sized` + | +39 - fn test_kl06(kl: &KL06) { +39 + fn test_kl06(kl: &KL06) { + | + +error[E0277]: the trait bound `T: KnownLayout` is not satisfied + --> tests/ui-stable/mid_compile_pass.rs:50:15 + | +50 | assert_kl(kl) + | --------- ^^ the trait `KnownLayout` is not implemented for `T`, which is required by `KL12: KnownLayout` + | | + | required by a bound introduced by this call + | +note: required for `KL12` to implement `KnownLayout` + --> tests/ui-stable/mid_compile_pass.rs:45:10 + | +45 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `assert_kl` + --> tests/ui-stable/mid_compile_pass.rs:23:26 + | +23 | fn assert_kl(_: &T) {} + | ^^^^^^^^^^^ required by this bound in `assert_kl` + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider further restricting this bound + | +49 | fn test_kl12(kl: &KL12) { + | +++++++++++++++++++++++ diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-stable/struct.rs b/src/rust/vendor/zerocopy-derive/tests/ui-stable/struct.rs new file mode 100644 index 000000000..c76dc7f95 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-stable/struct.rs @@ -0,0 +1,99 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[macro_use] +extern crate zerocopy; + +#[path = "../util.rs"] +mod util; + +use zerocopy::KnownLayout; + +use self::util::AU16; + +fn main() {} + +// +// KnownLayout errors +// + +struct NotKnownLayout; + +struct NotKnownLayoutDst([u8]); + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | N | N | N | N | KL00 | +#[derive(KnownLayout)] +struct KL00(u8, NotKnownLayoutDst); + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | N | N | Y | N | KL02 | +#[derive(KnownLayout)] +struct KL02(u8, [u8]); + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | Y | N | N | N | KL08 | +#[derive(KnownLayout)] +#[repr(C)] +struct KL08(u8, NotKnownLayoutDst); + +// | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | +// | Y | N | N | Y | KL09 | +#[derive(KnownLayout)] +#[repr(C)] +struct KL09(NotKnownLayout, NotKnownLayout); + +// +// AsBytes errors +// + +#[derive(AsBytes)] +#[repr(C)] +struct AsBytes1(T); + +#[derive(AsBytes)] +#[repr(C)] +struct AsBytes2 { + foo: u8, + bar: AU16, +} + +#[derive(AsBytes)] +#[repr(C, packed(2))] +struct AsBytes3 { + foo: u8, + // We'd prefer to use AU64 here, but you can't use aligned types in + // packed structs. + bar: u64, +} + +// +// Unaligned errors +// + +#[derive(Unaligned)] +#[repr(C, align(2))] +struct Unaligned1; + +#[derive(Unaligned)] +#[repr(transparent, align(2))] +struct Unaligned2 { + foo: u8, +} + +#[derive(Unaligned)] +#[repr(packed, align(2))] +struct Unaligned3; + +#[derive(Unaligned)] +#[repr(align(1), align(2))] +struct Unaligned4; + +#[derive(Unaligned)] +#[repr(align(2), align(4))] +struct Unaligned5; diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-stable/struct.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-stable/struct.stderr new file mode 100644 index 000000000..c1e95af0b --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-stable/struct.stderr @@ -0,0 +1,137 @@ +error: unsupported on generic structs that are not repr(transparent) or repr(packed) + --> tests/ui-stable/struct.rs:55:10 + | +55 | #[derive(AsBytes)] + | ^^^^^^^ + | + = note: this error originates in the derive macro `AsBytes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-stable/struct.rs:80:11 + | +80 | #[repr(C, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-stable/struct.rs:84:21 + | +84 | #[repr(transparent, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-stable/struct.rs:90:16 + | +90 | #[repr(packed, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-stable/struct.rs:94:18 + | +94 | #[repr(align(1), align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-stable/struct.rs:98:8 + | +98 | #[repr(align(2), align(4))] + | ^^^^^^^^ + +error[E0692]: transparent struct cannot have other repr hints + --> tests/ui-stable/struct.rs:84:8 + | +84 | #[repr(transparent, align(2))] + | ^^^^^^^^^^^ ^^^^^^^^ + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/struct.rs:31:10 + | +31 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: within `KL00`, the trait `Sized` is not implemented for `[u8]`, which is required by `KL00: Sized` +note: required because it appears within the type `KL00` + --> tests/ui-stable/struct.rs:32:8 + | +32 | struct KL00(u8, NotKnownLayoutDst); + | ^^^^ + = help: see issue #48214 + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/struct.rs:36:10 + | +36 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: within `KL02`, the trait `Sized` is not implemented for `[u8]`, which is required by `KL02: Sized` +note: required because it appears within the type `KL02` + --> tests/ui-stable/struct.rs:37:8 + | +37 | struct KL02(u8, [u8]); + | ^^^^ + = help: see issue #48214 + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotKnownLayoutDst: KnownLayout` is not satisfied + --> tests/ui-stable/struct.rs:41:10 + | +41 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ the trait `KnownLayout` is not implemented for `NotKnownLayoutDst` + | + = help: the following other types implement trait `KnownLayout`: + () + *const T + *mut T + AU16 + F32 + F64 + I128 + I16 + and $N others + = help: see issue #48214 + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotKnownLayout: KnownLayout` is not satisfied + --> tests/ui-stable/struct.rs:47:10 + | +47 | #[derive(KnownLayout)] + | ^^^^^^^^^^^ the trait `KnownLayout` is not implemented for `NotKnownLayout` + | + = help: the following other types implement trait `KnownLayout`: + () + *const T + *mut T + AU16 + F32 + F64 + I128 + I16 + and $N others + = help: see issue #48214 + = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `HasPadding: ShouldBe` is not satisfied + --> tests/ui-stable/struct.rs:59:10 + | +59 | #[derive(AsBytes)] + | ^^^^^^^ the trait `ShouldBe` is not implemented for `HasPadding` + | + = help: the trait `ShouldBe` is implemented for `HasPadding` + = help: see issue #48214 + = note: this error originates in the derive macro `AsBytes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `HasPadding: ShouldBe` is not satisfied + --> tests/ui-stable/struct.rs:66:10 + | +66 | #[derive(AsBytes)] + | ^^^^^^^ the trait `ShouldBe` is not implemented for `HasPadding` + | + = help: the trait `ShouldBe` is implemented for `HasPadding` + = help: see issue #48214 + = note: this error originates in the derive macro `AsBytes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0587]: type has conflicting packed and align representation hints + --> tests/ui-stable/struct.rs:91:1 + | +91 | struct Unaligned3; + | ^^^^^^^^^^^^^^^^^ diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-stable/union.rs b/src/rust/vendor/zerocopy-derive/tests/ui-stable/union.rs new file mode 100644 index 000000000..8938e7847 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-stable/union.rs @@ -0,0 +1,73 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[macro_use] +extern crate zerocopy; + +#[path = "../util.rs"] +mod util; + +use self::util::AU16; +use std::mem::ManuallyDrop; + +fn main() {} + +// +// AsBytes errors +// + +#[derive(AsBytes)] +#[repr(C)] +union AsBytes1 { + foo: ManuallyDrop, +} + +#[derive(AsBytes)] +#[repr(C)] +union AsBytes2 { + foo: u8, + bar: [u8; 2], +} + +// +// Unaligned errors +// + +#[derive(Unaligned)] +#[repr(C, align(2))] +union Unaligned1 { + foo: i16, + bar: AU16, +} + +// Transparent unions are unstable; see issue #60405 +// for more information. + +// #[derive(Unaligned)] +// #[repr(transparent, align(2))] +// union Unaligned2 { +// foo: u8, +// } + +#[derive(Unaligned)] +#[repr(packed, align(2))] +union Unaligned3 { + foo: u8, +} + +#[derive(Unaligned)] +#[repr(align(1), align(2))] +struct Unaligned4 { + foo: u8, +} + +#[derive(Unaligned)] +#[repr(align(2), align(4))] +struct Unaligned5 { + foo: u8, +} diff --git a/src/rust/vendor/zerocopy-derive/tests/ui-stable/union.stderr b/src/rust/vendor/zerocopy-derive/tests/ui-stable/union.stderr new file mode 100644 index 000000000..f7d6953be --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/ui-stable/union.stderr @@ -0,0 +1,47 @@ +error: unsupported on types with type parameters + --> tests/ui-stable/union.rs:24:10 + | +24 | #[derive(AsBytes)] + | ^^^^^^^ + | + = note: this error originates in the derive macro `AsBytes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-stable/union.rs:42:11 + | +42 | #[repr(C, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-stable/union.rs:58:16 + | +58 | #[repr(packed, align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-stable/union.rs:64:18 + | +64 | #[repr(align(1), align(2))] + | ^^^^^^^^ + +error: cannot derive Unaligned with repr(align(N > 1)) + --> tests/ui-stable/union.rs:70:8 + | +70 | #[repr(align(2), align(4))] + | ^^^^^^^^ + +error[E0277]: the trait bound `HasPadding: ShouldBe` is not satisfied + --> tests/ui-stable/union.rs:30:10 + | +30 | #[derive(AsBytes)] + | ^^^^^^^ the trait `ShouldBe` is not implemented for `HasPadding` + | + = help: the trait `ShouldBe` is implemented for `HasPadding` + = help: see issue #48214 + = note: this error originates in the derive macro `AsBytes` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0587]: type has conflicting packed and align representation hints + --> tests/ui-stable/union.rs:59:1 + | +59 | union Unaligned3 { + | ^^^^^^^^^^^^^^^^ diff --git a/src/rust/vendor/zerocopy-derive/tests/union_as_bytes.rs b/src/rust/vendor/zerocopy-derive/tests/union_as_bytes.rs new file mode 100644 index 000000000..84f51817f --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/union_as_bytes.rs @@ -0,0 +1,75 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#![allow(warnings)] + +use std::{marker::PhantomData, option::IntoIter}; + +use {static_assertions::assert_impl_all, zerocopy::AsBytes}; + +// A union is `AsBytes` if: +// - all fields are `AsBytes` +// - `repr(C)` or `repr(transparent)` and +// - no padding (size of union equals size of each field type) +// - `repr(packed)` + +#[derive(AsBytes, Clone, Copy)] +#[repr(C)] +union CZst { + a: (), +} + +assert_impl_all!(CZst: AsBytes); + +#[derive(AsBytes)] +#[repr(C)] +union C { + a: u8, + b: u8, +} + +assert_impl_all!(C: AsBytes); + +// Transparent unions are unstable; see issue #60405 +// for more information. + +// #[derive(AsBytes)] +// #[repr(transparent)] +// union Transparent { +// a: u8, +// b: CZst, +// } + +// is_as_bytes!(Transparent); + +#[derive(AsBytes)] +#[repr(C, packed)] +union CZstPacked { + a: (), +} + +assert_impl_all!(CZstPacked: AsBytes); + +#[derive(AsBytes)] +#[repr(C, packed)] +union CPacked { + a: u8, + b: i8, +} + +assert_impl_all!(CPacked: AsBytes); + +#[derive(AsBytes)] +#[repr(C, packed)] +union CMultibytePacked { + a: i32, + b: u32, + c: f32, +} + +assert_impl_all!(CMultibytePacked: AsBytes); diff --git a/src/rust/vendor/zerocopy-derive/tests/union_from_bytes.rs b/src/rust/vendor/zerocopy-derive/tests/union_from_bytes.rs new file mode 100644 index 000000000..4635735ef --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/union_from_bytes.rs @@ -0,0 +1,72 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#![allow(warnings)] + +use std::{marker::PhantomData, option::IntoIter}; + +use { + static_assertions::assert_impl_all, + zerocopy::{FromBytes, FromZeroes}, +}; + +// A union is `FromBytes` if: +// - all fields are `FromBytes` + +#[derive(Clone, Copy, FromZeroes, FromBytes)] +union Zst { + a: (), +} + +assert_impl_all!(Zst: FromBytes); + +#[derive(FromZeroes, FromBytes)] +union One { + a: u8, +} + +assert_impl_all!(One: FromBytes); + +#[derive(FromZeroes, FromBytes)] +union Two { + a: u8, + b: Zst, +} + +assert_impl_all!(Two: FromBytes); + +#[derive(FromZeroes, FromBytes)] +union TypeParams<'a, T: Copy, I: Iterator> +where + I::Item: Copy, +{ + a: T, + c: I::Item, + d: u8, + e: PhantomData<&'a [u8]>, + f: PhantomData<&'static str>, + g: PhantomData, +} + +assert_impl_all!(TypeParams<'static, (), IntoIter<()>>: FromBytes); + +// Deriving `FromBytes` should work if the union has bounded parameters. + +#[derive(FromZeroes, FromBytes)] +#[repr(C)] +union WithParams<'a: 'b, 'b: 'a, const N: usize, T: 'a + 'b + FromBytes> +where + 'a: 'b, + 'b: 'a, + T: 'a + 'b + Copy + FromBytes, +{ + a: [T; N], + b: PhantomData<&'a &'b ()>, +} + +assert_impl_all!(WithParams<'static, 'static, 42, u8>: FromBytes); diff --git a/src/rust/vendor/zerocopy-derive/tests/union_from_zeroes.rs b/src/rust/vendor/zerocopy-derive/tests/union_from_zeroes.rs new file mode 100644 index 000000000..935fc1563 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/union_from_zeroes.rs @@ -0,0 +1,72 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#![allow(warnings)] + +#[macro_use] +mod util; + +use std::{marker::PhantomData, option::IntoIter}; + +use {static_assertions::assert_impl_all, zerocopy::FromZeroes}; + +// A union is `FromZeroes` if: +// - all fields are `FromZeroes` + +#[derive(Clone, Copy, FromZeroes)] +union Zst { + a: (), +} + +assert_impl_all!(Zst: FromZeroes); + +#[derive(FromZeroes)] +union One { + a: bool, +} + +assert_impl_all!(One: FromZeroes); + +#[derive(FromZeroes)] +union Two { + a: bool, + b: Zst, +} + +assert_impl_all!(Two: FromZeroes); + +#[derive(FromZeroes)] +union TypeParams<'a, T: Copy, I: Iterator> +where + I::Item: Copy, +{ + a: T, + c: I::Item, + d: u8, + e: PhantomData<&'a [u8]>, + f: PhantomData<&'static str>, + g: PhantomData, +} + +assert_impl_all!(TypeParams<'static, (), IntoIter<()>>: FromZeroes); + +// Deriving `FromZeroes` should work if the union has bounded parameters. + +#[derive(FromZeroes)] +#[repr(C)] +union WithParams<'a: 'b, 'b: 'a, const N: usize, T: 'a + 'b + FromZeroes> +where + 'a: 'b, + 'b: 'a, + T: 'a + 'b + Copy + FromZeroes, +{ + a: [T; N], + b: PhantomData<&'a &'b ()>, +} + +assert_impl_all!(WithParams<'static, 'static, 42, u8>: FromZeroes); diff --git a/src/rust/vendor/zerocopy-derive/tests/union_known_layout.rs b/src/rust/vendor/zerocopy-derive/tests/union_known_layout.rs new file mode 100644 index 000000000..337ab4afe --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/union_known_layout.rs @@ -0,0 +1,65 @@ +// Copyright 2022 The Fuchsia Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#![allow(warnings)] + +#[macro_use] +mod util; + +use std::{marker::PhantomData, option::IntoIter}; + +use {static_assertions::assert_impl_all, zerocopy::KnownLayout}; + +#[derive(Clone, Copy, KnownLayout)] +union Zst { + a: (), +} + +assert_impl_all!(Zst: KnownLayout); + +#[derive(KnownLayout)] +union One { + a: bool, +} + +assert_impl_all!(One: KnownLayout); + +#[derive(KnownLayout)] +union Two { + a: bool, + b: Zst, +} + +assert_impl_all!(Two: KnownLayout); + +#[derive(KnownLayout)] +union TypeParams<'a, T: Copy, I: Iterator> +where + I::Item: Copy, +{ + a: T, + c: I::Item, + d: u8, + e: PhantomData<&'a [u8]>, + f: PhantomData<&'static str>, + g: PhantomData, +} + +assert_impl_all!(TypeParams<'static, (), IntoIter<()>>: KnownLayout); + +// Deriving `KnownLayout` should work if the union has bounded parameters. + +#[derive(KnownLayout)] +#[repr(C)] +union WithParams<'a: 'b, 'b: 'a, const N: usize, T: 'a + 'b + KnownLayout> +where + 'a: 'b, + 'b: 'a, + T: 'a + 'b + Copy + KnownLayout, +{ + a: [T; N], + b: PhantomData<&'a &'b ()>, +} + +assert_impl_all!(WithParams<'static, 'static, 42, u8>: KnownLayout); diff --git a/src/rust/vendor/zerocopy-derive/tests/union_unaligned.rs b/src/rust/vendor/zerocopy-derive/tests/union_unaligned.rs new file mode 100644 index 000000000..5ba3ac76a --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/union_unaligned.rs @@ -0,0 +1,77 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#![allow(warnings)] + +use std::{marker::PhantomData, option::IntoIter}; + +use {static_assertions::assert_impl_all, zerocopy::Unaligned}; + +// A union is `Unaligned` if: +// - `repr(align)` is no more than 1 and either +// - `repr(C)` or `repr(transparent)` and +// - all fields `Unaligned` +// - `repr(packed)` + +#[derive(Unaligned)] +#[repr(C)] +union Foo { + a: u8, +} + +assert_impl_all!(Foo: Unaligned); + +// Transparent unions are unstable; see issue #60405 +// for more information. + +// #[derive(Unaligned)] +// #[repr(transparent)] +// union Bar { +// a: u8, +// } + +// is_unaligned!(Bar); + +#[derive(Unaligned)] +#[repr(packed)] +union Baz { + // NOTE: The `u16` type is not guaranteed to have alignment 2, although it + // does on many platforms. However, to fix this would require a custom type + // with a `#[repr(align(2))]` attribute, and `#[repr(packed)]` types are not + // allowed to transitively contain `#[repr(align(...))]` types. Thus, we + // have no choice but to use `u16` here. Luckily, these tests run in CI on + // platforms on which `u16` has alignment 2, so this isn't that big of a + // deal. + a: u16, +} + +assert_impl_all!(Baz: Unaligned); + +#[derive(Unaligned)] +#[repr(C, align(1))] +union FooAlign { + a: u8, +} + +assert_impl_all!(FooAlign: Unaligned); + +#[derive(Unaligned)] +#[repr(C)] +union TypeParams<'a, T: Copy, I: Iterator> +where + I::Item: Copy, +{ + a: T, + c: I::Item, + d: u8, + e: PhantomData<&'a [u8]>, + f: PhantomData<&'static str>, + g: PhantomData, +} + +assert_impl_all!(TypeParams<'static, (), IntoIter<()>>: Unaligned); diff --git a/src/rust/vendor/zerocopy-derive/tests/util.rs b/src/rust/vendor/zerocopy-derive/tests/util.rs new file mode 100644 index 000000000..a8656fb20 --- /dev/null +++ b/src/rust/vendor/zerocopy-derive/tests/util.rs @@ -0,0 +1,20 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use zerocopy::{AsBytes, FromBytes, FromZeroes, KnownLayout}; + +/// A type that doesn't implement any zerocopy traits. +pub struct NotZerocopy(T); + +/// A `u16` with alignment 2. +/// +/// Though `u16` has alignment 2 on some platforms, it's not guaranteed. By +/// contrast, `AU16` is guaranteed to have alignment 2. +#[derive(KnownLayout, FromZeroes, FromBytes, AsBytes, Copy, Clone)] +#[repr(C, align(2))] +pub struct AU16(u16); diff --git a/src/rust/vendor/zerocopy/.cargo-checksum.json b/src/rust/vendor/zerocopy/.cargo-checksum.json new file mode 100644 index 000000000..0ce7b1ad9 --- /dev/null +++ b/src/rust/vendor/zerocopy/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CONTRIBUTING.md":"47968395d4bee21ffd1f4e665625ba6c89b841a71dc92d0cac432aafbcbfe027","Cargo.toml":"47af3f175210cf5fc335912d94cbbb0ecf38219ba02524e1941fab092cb84a12","INTERNAL.md":"d6f3929776cd6b195d926dda36b7db287f92fa17ed8dabb0c9c12eb6f945bd61","LICENSE-APACHE":"9d185ac6703c4b0453974c0d85e9eee43e6941009296bb1f5eb0b54e2329e9f3","LICENSE-BSD":"83c1763356e822adde0a2cae748d938a73fdc263849ccff6b27776dff213bd32","LICENSE-MIT":"1a2f5c12ddc934d58956aa5dbdd3255fe55fd957633ab7d0d39e4f0daa73f7df","POLICIES.md":"49c64d06d94b4f1d339f518d25a4fd379fc0851189f1e0b2413725708a61a409","README.md":"a2a01b58f7344a1a03b314d545d221e1a22642071d2067ebefbeb0d1c63a27d0","cargo.sh":"ea53cc247d35243fbe161024890f7c2f855688b8fd26b7244df5ae999ba99bd2","clippy.toml":"df67a6131fff2fe52309e797d2dfad080fc8cbdfcb1baa7f14415b3e397c291c","generate-readme.sh":"0b86377c6ca87971154b8f461e71d72727ecb65e550d2f96729d8949e5264155","rustfmt.toml":"33a21d11175200d203fcdb803c61a24fc461661bf8a2de7c9189af7ecee123c2","src/byteorder.rs":"b84a7ff52d88fdc51c7c660a8e8afb66490551c67aa6f46bf0afcbeb392d2e92","src/lib.rs":"eb5033d1f2f3d5314625bc63353b1d105cb414d4bc68edf8700b401e4055d669","src/macro_util.rs":"de8d86a49ae79f6995a13d735f09421e5e0676494d00a5532fcae91f357c2294","src/macros.rs":"cfb3970b51d6d2895a16ca716bace7497a723923bc540e987ba911aaed94fa86","src/post_monomorphization_compile_fail_tests.rs":"6f20b9ddb9d8c2573f2f0c18f58998b9207e588190586891f48b00570f7d4623","src/third_party/rust/LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","src/third_party/rust/LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","src/third_party/rust/README.fuchsia":"5dc26ec369c273eb99679b43c5de4c41c82797e8800c3926c4c76912e9596ecf","src/third_party/rust/layout.rs":"bf602961483f1ed0266602c00bc31345da38f4601954ed4a673f26d7ae8199b9","src/util.rs":"6ddd878212c7c3bd924f506fce4a713ebd75c5828182c1b5eff0d94c36895134","src/wrappers.rs":"53648beaef9926efb8348a1b3956526b9f9dad725dca7051f2033a833957b3b3","testdata/include_value/data":"88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589","tests/trybuild.rs":"8b77ed684725d2e99fd7806d8f361cd2495b388cc463be3ff2fae25bcbe34c56","tests/ui-msrv/include_value_not_from_bytes.rs":"ea2a419e0c7ce12b4febe6139523184cba2b2c54c879177e0c58a5f78f0ec340","tests/ui-msrv/include_value_not_from_bytes.stderr":"57d634cea8f0906b08e7eea056d09b02364f2a656623116c29fdc706b5393926","tests/ui-msrv/include_value_wrong_size.rs":"418e8c86ebf5a28ee50bd6ae00550f62a7a0ef3a7e7fda965b3d2337b64f2c66","tests/ui-msrv/include_value_wrong_size.stderr":"40bcc6c0172b530cda18bf60d35550e219254a71c0a1e4b16417b17db6d18829","tests/ui-msrv/invalid-impls/invalid-impls.rs":"474d843ad40f3936adcd3ff592d815d8169813962ab9d99a68348b4b91aef10e","tests/ui-msrv/invalid-impls/invalid-impls.stderr":"ddc7a15d675f91b8b838c5c1b8e0d3973d981b11ce956e0f50d4880f0ff0e408","tests/ui-msrv/max-align.rs":"ffcb6687c98e5629d01b17cbd0845ec195007cc39aa244b26a77d17688c8f13d","tests/ui-msrv/max-align.stderr":"38775aa2a8bc035eedbc57ab0081c865b804d9a7fc5200ec425210fdea6a69d1","tests/ui-msrv/transmute-dst-not-frombytes.rs":"e00251eae67cdf8267a4963f212857a2a51de640a6f856c4b8df2a953caad25a","tests/ui-msrv/transmute-dst-not-frombytes.stderr":"537111d0377c9a255bb9cd43fa12646a901f0b8cf6b1fb5842fb5f0d41ea86e8","tests/ui-msrv/transmute-mut-alignment-increase.rs":"ba83c9cf01acf11352f7ee5b54cd73a451394fd78b8ddeb0637931c87adfd6ae","tests/ui-msrv/transmute-mut-alignment-increase.stderr":"9e879881032ab5ba28f8cc6a240bf96150c4a7fb3e55b1de0c808dc5e0b1179d","tests/ui-msrv/transmute-mut-const.rs":"4227f4c0dda6d6128f41b209ecc2bf941c7659c8de84cc0e418862d279baa78f","tests/ui-msrv/transmute-mut-const.stderr":"3c8dcb20b8cffd73f3b330b0199c5912ff015b51fce6d3acf684e388abb70a9c","tests/ui-msrv/transmute-mut-dst-generic.rs":"aa015679b75dac0c37d5c43782b5e9522257f6ba34a10a89d0c1eba524a7af5c","tests/ui-msrv/transmute-mut-dst-generic.stderr":"d19ae09a138d21aa64da708da85fd09b9b98a70c76cf397f5cbe7866ccbddbed","tests/ui-msrv/transmute-mut-dst-not-a-reference.rs":"5d784ab588f081bfc304501f811a85ea2662f88fff8274ccbd53172ec255212c","tests/ui-msrv/transmute-mut-dst-not-a-reference.stderr":"1cca08728f4b93b78757224420090f4ec0a2014043e9b1d86ffafe9fcc8f1faa","tests/ui-msrv/transmute-mut-dst-not-asbytes.rs":"b1f986b3433980d7572a80511ca5a758c91e0c761d01c50bc73ed025d45698a6","tests/ui-msrv/transmute-mut-dst-not-asbytes.stderr":"fd4a28b880eebd3d4f4b1f0388a26b372b07fd8186979970e2ea881379bf007b","tests/ui-msrv/transmute-mut-dst-not-frombytes.rs":"a4353eeb67b4701908e694738c5c4ce965afe4432f14e00e740684352f5ddd30","tests/ui-msrv/transmute-mut-dst-not-frombytes.stderr":"fd9e4c396c995be82b4bda2a28565f5d427d9733ae85f56cfb3922b1130fa06a","tests/ui-msrv/transmute-mut-dst-unsized.rs":"58c3423c07dd06ca98e61439f318ba5f3f7fc68ca9cb59371ebc482ad54709db","tests/ui-msrv/transmute-mut-dst-unsized.stderr":"b0c443b692859195ade80fb3650d51b4a01c2dd8b523322db84acfc3845b154d","tests/ui-msrv/transmute-mut-illegal-lifetime.rs":"ec18bf7b3d9bd2674b43d0e04fc0545227473d43b07e2bbccc19c2068df33673","tests/ui-msrv/transmute-mut-illegal-lifetime.stderr":"ff5965b190242ce05735d7c072c11565c5bd8609261c83dd06396ae8416dfbaa","tests/ui-msrv/transmute-mut-size-decrease.rs":"51aa423ec51a3c5579bbd7bac33adac8040629adc94eec3fb84825ef4f84f7bb","tests/ui-msrv/transmute-mut-size-decrease.stderr":"ae0c86cfbd85107ea908218c5389218a64a46ccf53a0bc553b9c17b48f475e0f","tests/ui-msrv/transmute-mut-size-increase.rs":"ecc34f87b2ec668338672be6bac82b4056ebe35d98fd5d9a210f43f7e866b8e1","tests/ui-msrv/transmute-mut-size-increase.stderr":"d8f4c9f85c59cf24f88e08b3e67796d1218a512e0082100bb63fe38a69186484","tests/ui-msrv/transmute-mut-src-dst-generic.rs":"613e00a353d1b359b57450bb408da585528f84b7eaf039a0c8d86bde1803395f","tests/ui-msrv/transmute-mut-src-dst-generic.stderr":"ec064415b76e341316de3886f3222ab826c2621ea641eb62285b1814369f48c2","tests/ui-msrv/transmute-mut-src-dst-not-references.rs":"0b73d42fbcecba3483e24d4e9296d24d551de18822b45120e225356c5ccefad8","tests/ui-msrv/transmute-mut-src-dst-not-references.stderr":"fc2740d55afdb07bdde457ac259f48ef5b3e13503968299e51791576328b207d","tests/ui-msrv/transmute-mut-src-dst-unsized.rs":"8ccf11a1990dbfd7ed7180c5e73e3a278f072f0a86eb2810f1b2c737ece76c57","tests/ui-msrv/transmute-mut-src-dst-unsized.stderr":"a47a39be560a9a80a31ebd6ee30178f2e375e9450c61a86effb3611f654e302c","tests/ui-msrv/transmute-mut-src-generic.rs":"2cfe526643436df07247cc2583e1d097b247411185952132433127a159527669","tests/ui-msrv/transmute-mut-src-generic.stderr":"a7588c104a34936839fdef78029fdc3929f08f6befac2a94ef5fce5364cd89ca","tests/ui-msrv/transmute-mut-src-immutable.rs":"606aba0c01726255c9be7e67a032ce854209c62dffec16d5dd2c8f484e19979a","tests/ui-msrv/transmute-mut-src-immutable.stderr":"6854b18881116cecf0c716eac01aac312bfe43a295a797c4ad01ac8b7ea7d81c","tests/ui-msrv/transmute-mut-src-not-a-reference.rs":"e627a60c6f6d1b398bdcfc9307dbc57b268cc784b4967d1afaceed7eebd5db47","tests/ui-msrv/transmute-mut-src-not-a-reference.stderr":"538af460b18f588b6075307de50ba1307f98189d2f2aea74346a77ad8b64710c","tests/ui-msrv/transmute-mut-src-not-asbytes.rs":"d0a6ddcfe31ac34ccc550090b80a67a010202bee12a39c230dd4374ef81a520c","tests/ui-msrv/transmute-mut-src-not-asbytes.stderr":"446ab2326cedeae89bd951561206dddcb546684629b12e46e3de1025caa7c894","tests/ui-msrv/transmute-mut-src-not-frombytes.rs":"5866e7d74baf3efb500338ba91a76f221e4a2479376e6921ec831fa284c9b3db","tests/ui-msrv/transmute-mut-src-not-frombytes.stderr":"659915278b39092444f82347fbd62d4bd0c12cecb1d5976159b3fd90c8b995f2","tests/ui-msrv/transmute-mut-src-unsized.rs":"6676d8f29f0a32418f86d4423c464f4e0fdb8fe9ee8aa87f86c5fcdf8bd5e197","tests/ui-msrv/transmute-mut-src-unsized.stderr":"7f9a60f0bafa5d59403e49f2a6b68a56fa2be6c2a62d785fe4cb51bc056159cc","tests/ui-msrv/transmute-ptr-to-usize.rs":"ea33dc39115509988d9abd6ac6536d88d82082417b21da9f9bc8cf8369c69618","tests/ui-msrv/transmute-ptr-to-usize.stderr":"e8713417a977f07158a58aec6690c3a79b49cf5edb9e66f6c1d218a1a55f47eb","tests/ui-msrv/transmute-ref-alignment-increase.rs":"a5028469f90ca572ec1c73131f9a8a0a1cbca47de0dcb9003ba98de378def783","tests/ui-msrv/transmute-ref-alignment-increase.stderr":"2c56277ab280ac4477ccd3ca4c48ac60e096a95579bfea58da81d9082d8ab499","tests/ui-msrv/transmute-ref-dst-generic.rs":"4a6b56491fd59646d1d1d8edbcc9d7de0dc69a9e6e4779f3cfd90e287f11557c","tests/ui-msrv/transmute-ref-dst-generic.stderr":"8f47f9eabb44e8d5c561359237e79d42a998b615b526666e16db325b9cea8a09","tests/ui-msrv/transmute-ref-dst-mutable.rs":"1c48caae9912f70dec5f5a99a0c880fe6a3022f11fd412438b8a1576803e5f73","tests/ui-msrv/transmute-ref-dst-mutable.stderr":"289e040b3e725546081dfd07640e499a5622915954f12c871708d3f46ff43d7a","tests/ui-msrv/transmute-ref-dst-not-a-reference.rs":"c4b8a6c1970e30390d0a301e2dbe718b9eeef743299f7e91cd12c582ec203af7","tests/ui-msrv/transmute-ref-dst-not-a-reference.stderr":"b6c1f2aede85cce47f5ca379b9ae5a77c777e7c60de6590578c47432ebacae88","tests/ui-msrv/transmute-ref-dst-not-frombytes.rs":"42aab9630fbab93f400713a1730d6dd6a89f821b0fa4dd5347aabe5e78b13aff","tests/ui-msrv/transmute-ref-dst-not-frombytes.stderr":"00b6c3472c0f84f4e32217c1c839c0eab1bf449abbc8bb8f60878ce62c360c8b","tests/ui-msrv/transmute-ref-dst-unsized.rs":"c374df8d00541fd34fff37e231e341501a427961f60d88ad3e3c375085cc060d","tests/ui-msrv/transmute-ref-dst-unsized.stderr":"73636b1d142730f1330753c3fa14c88a32a23bf1c0741503b99610a506a8f66b","tests/ui-msrv/transmute-ref-illegal-lifetime.rs":"6812bbf7ec851a8591464f10864dbd1f225e65ed5793b6f6375cbe8a9db50b14","tests/ui-msrv/transmute-ref-illegal-lifetime.stderr":"4f2a3e71cda94564f2343ca9ff23de3eca0d2ff465cedacab187151183813092","tests/ui-msrv/transmute-ref-size-decrease.rs":"939fb562e4678368e59fdafb3a597fd54a661fd09d9ecb23c6e626ff59b45384","tests/ui-msrv/transmute-ref-size-decrease.stderr":"686597597e9f87e717b702bf6b8b6a52d14c5612ec267d48a01b442ab14648e1","tests/ui-msrv/transmute-ref-size-increase.rs":"f66ab294f7618abfac5c503570137759afceb0dd26c8802bb1786b8873fe5670","tests/ui-msrv/transmute-ref-size-increase.stderr":"f1ad62609362a24b5cf47761e30e2cf0a35db82682e041faf251b2a1f822da7c","tests/ui-msrv/transmute-ref-src-dst-generic.rs":"96a6f6580307e6a397af8ca688a8a65144dff5240372203bd9f02bad6a41fd1e","tests/ui-msrv/transmute-ref-src-dst-generic.stderr":"ca3c1493cbab64b5af7c3c4ea88ca16f6bb2478865b0dbe9d4a28d3b11d5fad1","tests/ui-msrv/transmute-ref-src-dst-not-references.rs":"7311602a0153b260d819e9608e8e66ef5904919a2349a95187919d8211e48e23","tests/ui-msrv/transmute-ref-src-dst-not-references.stderr":"003bb1ccb5cf8322416e00e0fa5645f94d76aad875e60d281daae9625eb583a4","tests/ui-msrv/transmute-ref-src-dst-unsized.rs":"f83e0225e824b7526d7732ef5d759b32358e5db50c3c6a318d2b5dcc2eb3c707","tests/ui-msrv/transmute-ref-src-dst-unsized.stderr":"558be2a5b90f3b3a304d5ae94ed3f7cd369e1e0ad03991ff57500913232ea8de","tests/ui-msrv/transmute-ref-src-generic.rs":"ac1699aeca61c82aff5dac51d387a4ef7522faf2b2dfc56af398a2dc9d53745b","tests/ui-msrv/transmute-ref-src-generic.stderr":"2ba4f5f66b2a2eae90f2cb4b28bb92b066fcaf17412ca777e7d9823697d64736","tests/ui-msrv/transmute-ref-src-not-a-reference.rs":"a921f168fa6cb3c6a19894cecdb118bc3164275746672a916aa5194b92f2fb57","tests/ui-msrv/transmute-ref-src-not-a-reference.stderr":"5a8d829089820ec79d9cd8d9ffac7dbde430914fdad691d46edcd96414d5cad0","tests/ui-msrv/transmute-ref-src-not-asbytes.rs":"09aabae9e4634a5432bf7225240954d7b0592994c97a927e0469e27854588232","tests/ui-msrv/transmute-ref-src-not-asbytes.stderr":"bbd65ef7225a7a39f8c53362a1f137a6b294227b0d2b658fa8082742cda4a8bf","tests/ui-msrv/transmute-ref-src-unsized.rs":"d7797488f0ab5db89944ac7db25625c63aef72e6e4ed481d00a083449050b813","tests/ui-msrv/transmute-ref-src-unsized.stderr":"68537a0c14f72addd12d9e2a75f1a965e730a7ee8da04303402ecd69fe6de95e","tests/ui-msrv/transmute-size-decrease.rs":"c63dd10ddab58e282b033132d79fd21e80edb0c654f856679237977f62ced1ed","tests/ui-msrv/transmute-size-decrease.stderr":"978a9600a42a75fb33e46d10ac1485ef7c0a26054d15e52ec7e13023780d919e","tests/ui-msrv/transmute-size-increase.rs":"9413442e6e3c574bd7e36e8d4242000c1513624a4edc97567695a81b5851c491","tests/ui-msrv/transmute-size-increase.stderr":"168c9bb1045d125b069859d88132b7855a161e1353e1ff3d3f0bfcb70a831128","tests/ui-msrv/transmute-src-not-asbytes.rs":"8e2a76d99734c0502ba9daa8c7c2e34ca830ffd6024d3f7f29363d4263e89f74","tests/ui-msrv/transmute-src-not-asbytes.stderr":"e5913ff39e19e7f38b7aebe19f1930810c898d34fb7e7815c1404eff715f0414","tests/ui-nightly/include_value_not_from_bytes.rs":"ea2a419e0c7ce12b4febe6139523184cba2b2c54c879177e0c58a5f78f0ec340","tests/ui-nightly/include_value_not_from_bytes.stderr":"f8b8bbf3be0b9f6f8a019aa268901c8a49fd78240c55cbb66d104443607673d6","tests/ui-nightly/include_value_wrong_size.rs":"418e8c86ebf5a28ee50bd6ae00550f62a7a0ef3a7e7fda965b3d2337b64f2c66","tests/ui-nightly/include_value_wrong_size.stderr":"2b340b79ab4de286609aa5bf561c550ac3f30818df34bc659b54a58f4565501b","tests/ui-nightly/invalid-impls/invalid-impls.rs":"474d843ad40f3936adcd3ff592d815d8169813962ab9d99a68348b4b91aef10e","tests/ui-nightly/invalid-impls/invalid-impls.stderr":"f65bf5ae0342daf2f7b50c297fc395f1ffefd508d26be17cdf41e70a53e6901e","tests/ui-nightly/max-align.rs":"ffcb6687c98e5629d01b17cbd0845ec195007cc39aa244b26a77d17688c8f13d","tests/ui-nightly/max-align.stderr":"e6a1e261b02aa0fded5a3f3e3cdda6afe067f0d1430d684e3d7bd24af2e8635a","tests/ui-nightly/transmute-dst-not-frombytes.rs":"e00251eae67cdf8267a4963f212857a2a51de640a6f856c4b8df2a953caad25a","tests/ui-nightly/transmute-dst-not-frombytes.stderr":"2a10d7770af3f90c2f63d836f7c96e20f7d7e5a02b3a6e0606630e5de688896f","tests/ui-nightly/transmute-mut-alignment-increase.rs":"ba83c9cf01acf11352f7ee5b54cd73a451394fd78b8ddeb0637931c87adfd6ae","tests/ui-nightly/transmute-mut-alignment-increase.stderr":"db521ff9c180434136b0e8421823435be8ed23c7ac85d9a83c479ad1b8153281","tests/ui-nightly/transmute-mut-const.rs":"4227f4c0dda6d6128f41b209ecc2bf941c7659c8de84cc0e418862d279baa78f","tests/ui-nightly/transmute-mut-const.stderr":"4ea82abf58a538b4524d7835b4ceaaa4c783997928c0d0297f386aba2079a5ef","tests/ui-nightly/transmute-mut-dst-generic.rs":"aa015679b75dac0c37d5c43782b5e9522257f6ba34a10a89d0c1eba524a7af5c","tests/ui-nightly/transmute-mut-dst-generic.stderr":"d012039fa54f3d7cc8ee7275637964e7d83f8067545260676326b571bca46617","tests/ui-nightly/transmute-mut-dst-not-a-reference.rs":"5d784ab588f081bfc304501f811a85ea2662f88fff8274ccbd53172ec255212c","tests/ui-nightly/transmute-mut-dst-not-a-reference.stderr":"9d21ae45aff909bf6e6feca6c60fae8db1e4318935aede558bee1e243ede59f8","tests/ui-nightly/transmute-mut-dst-not-asbytes.rs":"b1f986b3433980d7572a80511ca5a758c91e0c761d01c50bc73ed025d45698a6","tests/ui-nightly/transmute-mut-dst-not-asbytes.stderr":"b943aed513868c0e03703a8e980a1b8d2aef5ec3c0d915fef89fd5c993d6f38e","tests/ui-nightly/transmute-mut-dst-not-frombytes.rs":"a4353eeb67b4701908e694738c5c4ce965afe4432f14e00e740684352f5ddd30","tests/ui-nightly/transmute-mut-dst-not-frombytes.stderr":"911637625861f88bfc314d6903e2fae9e4eee663d085f7cdf8c96112a8e8a1b6","tests/ui-nightly/transmute-mut-dst-unsized.rs":"58c3423c07dd06ca98e61439f318ba5f3f7fc68ca9cb59371ebc482ad54709db","tests/ui-nightly/transmute-mut-dst-unsized.stderr":"185f7d73070819f2079f526d26ee8b67a782e267c6c18ff99bcba79faa8e5017","tests/ui-nightly/transmute-mut-illegal-lifetime.rs":"ec18bf7b3d9bd2674b43d0e04fc0545227473d43b07e2bbccc19c2068df33673","tests/ui-nightly/transmute-mut-illegal-lifetime.stderr":"b0379252732ca51314077fa20d3fb4bfcbee61f486229547c807ed0d7dede9c8","tests/ui-nightly/transmute-mut-size-decrease.rs":"51aa423ec51a3c5579bbd7bac33adac8040629adc94eec3fb84825ef4f84f7bb","tests/ui-nightly/transmute-mut-size-decrease.stderr":"9294c2562503924704673967f93afbfd4b1d84abbf76318636105acdc3f37a63","tests/ui-nightly/transmute-mut-size-increase.rs":"ecc34f87b2ec668338672be6bac82b4056ebe35d98fd5d9a210f43f7e866b8e1","tests/ui-nightly/transmute-mut-size-increase.stderr":"6858e39d6238843faa0ec4bf199f88d5013f1b50a811f5e882837f01eea00f93","tests/ui-nightly/transmute-mut-src-dst-generic.rs":"613e00a353d1b359b57450bb408da585528f84b7eaf039a0c8d86bde1803395f","tests/ui-nightly/transmute-mut-src-dst-generic.stderr":"f6a7bb45e58bf80a25a4e694e881f9c38f2a5d33817d9337d41a6d2c2aef93e8","tests/ui-nightly/transmute-mut-src-dst-not-references.rs":"0b73d42fbcecba3483e24d4e9296d24d551de18822b45120e225356c5ccefad8","tests/ui-nightly/transmute-mut-src-dst-not-references.stderr":"c871cb8a8e41cfe7bc04ecdd0196820370ba879c905bb200dd0310e63445b1ac","tests/ui-nightly/transmute-mut-src-dst-unsized.rs":"8ccf11a1990dbfd7ed7180c5e73e3a278f072f0a86eb2810f1b2c737ece76c57","tests/ui-nightly/transmute-mut-src-dst-unsized.stderr":"1e525ac43daf9c0352139ba3414217590684e97902c65fd31a39b47a9ff97cab","tests/ui-nightly/transmute-mut-src-generic.rs":"2cfe526643436df07247cc2583e1d097b247411185952132433127a159527669","tests/ui-nightly/transmute-mut-src-generic.stderr":"3c54bad3b3ab88b5c046bfb6ef79e0162ec7228447a1ba8321d9da754d536f20","tests/ui-nightly/transmute-mut-src-immutable.rs":"606aba0c01726255c9be7e67a032ce854209c62dffec16d5dd2c8f484e19979a","tests/ui-nightly/transmute-mut-src-immutable.stderr":"8babe44bc71011b849c8496008449a7f9109e8e1121fd835a85029bb4c21afb5","tests/ui-nightly/transmute-mut-src-not-a-reference.rs":"e627a60c6f6d1b398bdcfc9307dbc57b268cc784b4967d1afaceed7eebd5db47","tests/ui-nightly/transmute-mut-src-not-a-reference.stderr":"9fbd2270ad872bea0482068b37a0ee489ebf6acd3e0a68b0235da9c94b386407","tests/ui-nightly/transmute-mut-src-not-asbytes.rs":"d0a6ddcfe31ac34ccc550090b80a67a010202bee12a39c230dd4374ef81a520c","tests/ui-nightly/transmute-mut-src-not-asbytes.stderr":"bcf055b807ef3da02755371c6473839be9fea88e848e92e0069a004fdea07fd1","tests/ui-nightly/transmute-mut-src-not-frombytes.rs":"5866e7d74baf3efb500338ba91a76f221e4a2479376e6921ec831fa284c9b3db","tests/ui-nightly/transmute-mut-src-not-frombytes.stderr":"3a96311db2b3618019a0380244ae960f8642adb4b2c3a76b401c0df85ffd7e1f","tests/ui-nightly/transmute-mut-src-unsized.rs":"6676d8f29f0a32418f86d4423c464f4e0fdb8fe9ee8aa87f86c5fcdf8bd5e197","tests/ui-nightly/transmute-mut-src-unsized.stderr":"cf973ad25465824647d50230c6965e5161f535b8336e37c9d2271749d89246da","tests/ui-nightly/transmute-ptr-to-usize.rs":"ea33dc39115509988d9abd6ac6536d88d82082417b21da9f9bc8cf8369c69618","tests/ui-nightly/transmute-ptr-to-usize.stderr":"f05ba5ad01e235eed456686a1ee5b7a668495c38054155965846d2bd613bd7d8","tests/ui-nightly/transmute-ref-alignment-increase.rs":"a5028469f90ca572ec1c73131f9a8a0a1cbca47de0dcb9003ba98de378def783","tests/ui-nightly/transmute-ref-alignment-increase.stderr":"aef92964ba843b890ce6c6b0924726dd89e1b9d6513f2148c269fe8fa203adac","tests/ui-nightly/transmute-ref-dst-generic.rs":"4a6b56491fd59646d1d1d8edbcc9d7de0dc69a9e6e4779f3cfd90e287f11557c","tests/ui-nightly/transmute-ref-dst-generic.stderr":"06b9fcf8e0443f997c0ef5f8e2659afcb65f095b11162ea69488f89788b337a7","tests/ui-nightly/transmute-ref-dst-mutable.rs":"1c48caae9912f70dec5f5a99a0c880fe6a3022f11fd412438b8a1576803e5f73","tests/ui-nightly/transmute-ref-dst-mutable.stderr":"96d38ce9a807ad7b60a846a8f5558c447da0d6cbe9225a077df4997712424d9a","tests/ui-nightly/transmute-ref-dst-not-a-reference.rs":"c4b8a6c1970e30390d0a301e2dbe718b9eeef743299f7e91cd12c582ec203af7","tests/ui-nightly/transmute-ref-dst-not-a-reference.stderr":"8ed2540877865fcdfca6e150465996a8f2872eb122ed5d647825e9181ae64754","tests/ui-nightly/transmute-ref-dst-not-frombytes.rs":"42aab9630fbab93f400713a1730d6dd6a89f821b0fa4dd5347aabe5e78b13aff","tests/ui-nightly/transmute-ref-dst-not-frombytes.stderr":"65d82eb523eeed4babc456616374b71cea0aaa21ab019281cd3ec3bb6ada05e4","tests/ui-nightly/transmute-ref-dst-unsized.rs":"c374df8d00541fd34fff37e231e341501a427961f60d88ad3e3c375085cc060d","tests/ui-nightly/transmute-ref-dst-unsized.stderr":"022fb4352cc105c4a358c5f7b903a55aac8e881ab623b6f5d527832e492c9a2f","tests/ui-nightly/transmute-ref-illegal-lifetime.rs":"6812bbf7ec851a8591464f10864dbd1f225e65ed5793b6f6375cbe8a9db50b14","tests/ui-nightly/transmute-ref-illegal-lifetime.stderr":"cb98c1b304334e58fc61be1c4b7782e68ab92d90a44c9627326d94d14a44cc38","tests/ui-nightly/transmute-ref-size-decrease.rs":"939fb562e4678368e59fdafb3a597fd54a661fd09d9ecb23c6e626ff59b45384","tests/ui-nightly/transmute-ref-size-decrease.stderr":"14f6ea48e66c484e94f47c3af0983de06869a884cda19b2201548aadc2378758","tests/ui-nightly/transmute-ref-size-increase.rs":"f66ab294f7618abfac5c503570137759afceb0dd26c8802bb1786b8873fe5670","tests/ui-nightly/transmute-ref-size-increase.stderr":"d5777c69b0ee36b6dcaf7699abb3ea03e1a8bac17bb5a1d4059ae28ff5f4357f","tests/ui-nightly/transmute-ref-src-dst-generic.rs":"96a6f6580307e6a397af8ca688a8a65144dff5240372203bd9f02bad6a41fd1e","tests/ui-nightly/transmute-ref-src-dst-generic.stderr":"ebffb5c5318798ff84f1da69c3ba732b9af2ad3688ebd7b4b2770e2b201afccb","tests/ui-nightly/transmute-ref-src-dst-not-references.rs":"7311602a0153b260d819e9608e8e66ef5904919a2349a95187919d8211e48e23","tests/ui-nightly/transmute-ref-src-dst-not-references.stderr":"0782e1b3e3137fe1137108d2d0aa685db107ac43af2192ff1e7ffef2e4a6453b","tests/ui-nightly/transmute-ref-src-dst-unsized.rs":"f83e0225e824b7526d7732ef5d759b32358e5db50c3c6a318d2b5dcc2eb3c707","tests/ui-nightly/transmute-ref-src-dst-unsized.stderr":"d8717d6a9d644e7f9ffd3545235b2b9e7b828e4a66d9a0de030931f83096827e","tests/ui-nightly/transmute-ref-src-generic.rs":"ac1699aeca61c82aff5dac51d387a4ef7522faf2b2dfc56af398a2dc9d53745b","tests/ui-nightly/transmute-ref-src-generic.stderr":"b53a09eca6226647cf53ee9bd0388e558def3bd1f8009b6ec74cc26e4db13d1c","tests/ui-nightly/transmute-ref-src-not-a-reference.rs":"a921f168fa6cb3c6a19894cecdb118bc3164275746672a916aa5194b92f2fb57","tests/ui-nightly/transmute-ref-src-not-a-reference.stderr":"e4ef563eedda176adc05995d03ae328b8b8182bb682ffc323cf58211b467dff2","tests/ui-nightly/transmute-ref-src-not-asbytes.rs":"09aabae9e4634a5432bf7225240954d7b0592994c97a927e0469e27854588232","tests/ui-nightly/transmute-ref-src-not-asbytes.stderr":"00485cbb2eaaa8631e1d3ca4cbf77369490e34411c847c4122c15be85227ef98","tests/ui-nightly/transmute-ref-src-unsized.rs":"d7797488f0ab5db89944ac7db25625c63aef72e6e4ed481d00a083449050b813","tests/ui-nightly/transmute-ref-src-unsized.stderr":"657cd1fec23d8dab06f354dde93ac6989d049301edf3b2cd55b1e2e869095613","tests/ui-nightly/transmute-size-decrease.rs":"c63dd10ddab58e282b033132d79fd21e80edb0c654f856679237977f62ced1ed","tests/ui-nightly/transmute-size-decrease.stderr":"4e014a129866804cf91cc3ff7a8ad1044ae1e3a6aad3b6ff8839605ab1b1df77","tests/ui-nightly/transmute-size-increase.rs":"9413442e6e3c574bd7e36e8d4242000c1513624a4edc97567695a81b5851c491","tests/ui-nightly/transmute-size-increase.stderr":"c307d7a2ae3d18e016be5d77e720bcf7023d03b10bb3ff3190e4d934eb9fc6a7","tests/ui-nightly/transmute-src-not-asbytes.rs":"8e2a76d99734c0502ba9daa8c7c2e34ca830ffd6024d3f7f29363d4263e89f74","tests/ui-nightly/transmute-src-not-asbytes.stderr":"bb59ccb405a0737e51fff7d24aa15c65747eae0f6e0dcedb5557c185fe7e4667","tests/ui-stable/include_value_not_from_bytes.rs":"ea2a419e0c7ce12b4febe6139523184cba2b2c54c879177e0c58a5f78f0ec340","tests/ui-stable/include_value_not_from_bytes.stderr":"703b57e2287d1f2ec16511d4a12101d7e0bf357246b97fb2b9735f174380ef1d","tests/ui-stable/include_value_wrong_size.rs":"418e8c86ebf5a28ee50bd6ae00550f62a7a0ef3a7e7fda965b3d2337b64f2c66","tests/ui-stable/include_value_wrong_size.stderr":"b4fdeefd36bb2343f4e6cfae39c821fcfefd0671ea59205ffeea48318ce4fac7","tests/ui-stable/invalid-impls/invalid-impls.rs":"474d843ad40f3936adcd3ff592d815d8169813962ab9d99a68348b4b91aef10e","tests/ui-stable/invalid-impls/invalid-impls.stderr":"6458dc7e13558e98b459053a78b7bad24e2ad2a4444a8b26ed5efed10287972a","tests/ui-stable/max-align.rs":"ffcb6687c98e5629d01b17cbd0845ec195007cc39aa244b26a77d17688c8f13d","tests/ui-stable/max-align.stderr":"a8bd50e80cd0ae680a52ea71d06d259a43300dcfbf6b336a12cb371fe84e119b","tests/ui-stable/transmute-dst-not-frombytes.rs":"e00251eae67cdf8267a4963f212857a2a51de640a6f856c4b8df2a953caad25a","tests/ui-stable/transmute-dst-not-frombytes.stderr":"6e6b46fabef706ce760e498d34a728cb5a21791177722daa03de22daa41fee1c","tests/ui-stable/transmute-mut-alignment-increase.rs":"ba83c9cf01acf11352f7ee5b54cd73a451394fd78b8ddeb0637931c87adfd6ae","tests/ui-stable/transmute-mut-alignment-increase.stderr":"92f1cda35d0c41a93f93152ad5c77fcd2c9ae17a7f2b4d54a311d434aa586400","tests/ui-stable/transmute-mut-const.rs":"4227f4c0dda6d6128f41b209ecc2bf941c7659c8de84cc0e418862d279baa78f","tests/ui-stable/transmute-mut-const.stderr":"41ababb65f8bccee041dbb3edf43896a1473fc106c14ca02ccc553452c8658eb","tests/ui-stable/transmute-mut-dst-generic.rs":"aa015679b75dac0c37d5c43782b5e9522257f6ba34a10a89d0c1eba524a7af5c","tests/ui-stable/transmute-mut-dst-generic.stderr":"f2c60a1aae05ad780802b0290989c546abe35adcbcacf83a2264446a40ceb5dd","tests/ui-stable/transmute-mut-dst-not-a-reference.rs":"5d784ab588f081bfc304501f811a85ea2662f88fff8274ccbd53172ec255212c","tests/ui-stable/transmute-mut-dst-not-a-reference.stderr":"16a9cf4e0f90772d19c132f50dd0a85e60ecd929a6aa0820fbf568c7f6183d74","tests/ui-stable/transmute-mut-dst-not-asbytes.rs":"b1f986b3433980d7572a80511ca5a758c91e0c761d01c50bc73ed025d45698a6","tests/ui-stable/transmute-mut-dst-not-asbytes.stderr":"594b28711e85b0b246d9271e3575c62c32a01f36b8917c8e66b288031da753bc","tests/ui-stable/transmute-mut-dst-not-frombytes.rs":"a4353eeb67b4701908e694738c5c4ce965afe4432f14e00e740684352f5ddd30","tests/ui-stable/transmute-mut-dst-not-frombytes.stderr":"3fbf5b32de9e5b6818299196d0393d707cf018fa9773cd2446483e320a8caadd","tests/ui-stable/transmute-mut-dst-unsized.rs":"58c3423c07dd06ca98e61439f318ba5f3f7fc68ca9cb59371ebc482ad54709db","tests/ui-stable/transmute-mut-dst-unsized.stderr":"757959b30d40bbfe90218e3dadb0aa9f2933f970fcc45de999e5ece508926abf","tests/ui-stable/transmute-mut-illegal-lifetime.rs":"ec18bf7b3d9bd2674b43d0e04fc0545227473d43b07e2bbccc19c2068df33673","tests/ui-stable/transmute-mut-illegal-lifetime.stderr":"3a43e0be32ef3589fe3fa713d387bd3976bd8c75813f9641bbf7c539e10bed41","tests/ui-stable/transmute-mut-size-decrease.rs":"51aa423ec51a3c5579bbd7bac33adac8040629adc94eec3fb84825ef4f84f7bb","tests/ui-stable/transmute-mut-size-decrease.stderr":"b63870c4361917d4cd19fbaba433a9389b806135c9576ae8997c86f3b763fe3c","tests/ui-stable/transmute-mut-size-increase.rs":"ecc34f87b2ec668338672be6bac82b4056ebe35d98fd5d9a210f43f7e866b8e1","tests/ui-stable/transmute-mut-size-increase.stderr":"cb086ebcc60c4e17f8897c62c5b36b110b259c6e970825953798daf37144af47","tests/ui-stable/transmute-mut-src-dst-generic.rs":"613e00a353d1b359b57450bb408da585528f84b7eaf039a0c8d86bde1803395f","tests/ui-stable/transmute-mut-src-dst-generic.stderr":"ff7758361ba41d2bc3a49e9942e9f1f1b76d245f19a5391e45b9a066b8d0f6f4","tests/ui-stable/transmute-mut-src-dst-not-references.rs":"0b73d42fbcecba3483e24d4e9296d24d551de18822b45120e225356c5ccefad8","tests/ui-stable/transmute-mut-src-dst-not-references.stderr":"830581700736527e224bd923da3cd9c215e68556d2379c678174c08eff1501d6","tests/ui-stable/transmute-mut-src-dst-unsized.rs":"8ccf11a1990dbfd7ed7180c5e73e3a278f072f0a86eb2810f1b2c737ece76c57","tests/ui-stable/transmute-mut-src-dst-unsized.stderr":"daf408c8b529c0000fb4422b63ca0e98b29cdcc8c49c33ed305418cbaf430cca","tests/ui-stable/transmute-mut-src-generic.rs":"2cfe526643436df07247cc2583e1d097b247411185952132433127a159527669","tests/ui-stable/transmute-mut-src-generic.stderr":"de709f4435bf09ce98a6a9b19ac69560f85c43b665277ef60c9e62169e4a001f","tests/ui-stable/transmute-mut-src-immutable.rs":"606aba0c01726255c9be7e67a032ce854209c62dffec16d5dd2c8f484e19979a","tests/ui-stable/transmute-mut-src-immutable.stderr":"7c24d82d943695955b3ec1f0a53a349645fd3de1d549f3be989532e3774279bf","tests/ui-stable/transmute-mut-src-not-a-reference.rs":"e627a60c6f6d1b398bdcfc9307dbc57b268cc784b4967d1afaceed7eebd5db47","tests/ui-stable/transmute-mut-src-not-a-reference.stderr":"29b09aea59cfdb4b6535c5d33ec803539f28e53cce81938767ea0c22a1b1ce7d","tests/ui-stable/transmute-mut-src-not-asbytes.rs":"d0a6ddcfe31ac34ccc550090b80a67a010202bee12a39c230dd4374ef81a520c","tests/ui-stable/transmute-mut-src-not-asbytes.stderr":"91eba713b0f4e446f51910220686351187e55d43873ea49cc8a3c00312fe49cf","tests/ui-stable/transmute-mut-src-not-frombytes.rs":"5866e7d74baf3efb500338ba91a76f221e4a2479376e6921ec831fa284c9b3db","tests/ui-stable/transmute-mut-src-not-frombytes.stderr":"89351ba67ebc7fb3fe4e2da713d6b93deee1f2a3f81eaeb2ebceb5b469cae8cf","tests/ui-stable/transmute-mut-src-unsized.rs":"6676d8f29f0a32418f86d4423c464f4e0fdb8fe9ee8aa87f86c5fcdf8bd5e197","tests/ui-stable/transmute-mut-src-unsized.stderr":"28377ad3195fcffebb8c50980af4f7b5c5eb8c673b3ebf21e308a9c84f4cfa58","tests/ui-stable/transmute-ptr-to-usize.rs":"ea33dc39115509988d9abd6ac6536d88d82082417b21da9f9bc8cf8369c69618","tests/ui-stable/transmute-ptr-to-usize.stderr":"cba0e2d85a961b56d8fc2566bc555082b52f762ac36b9745e319bb5d1e726514","tests/ui-stable/transmute-ref-alignment-increase.rs":"a5028469f90ca572ec1c73131f9a8a0a1cbca47de0dcb9003ba98de378def783","tests/ui-stable/transmute-ref-alignment-increase.stderr":"514c5254a0e84051cb34bd700c08163a98195730b87e67acda8907d401311b6c","tests/ui-stable/transmute-ref-dst-generic.rs":"4a6b56491fd59646d1d1d8edbcc9d7de0dc69a9e6e4779f3cfd90e287f11557c","tests/ui-stable/transmute-ref-dst-generic.stderr":"0fa2e50dd2f259260511ae3534334420e4384d542daa8532c7d3a625652c2ada","tests/ui-stable/transmute-ref-dst-mutable.rs":"1c48caae9912f70dec5f5a99a0c880fe6a3022f11fd412438b8a1576803e5f73","tests/ui-stable/transmute-ref-dst-mutable.stderr":"fc83b5283cb5319fd7a2b79f94ed0a49f16bce5b222f7e1cc5ce5a879f3de650","tests/ui-stable/transmute-ref-dst-not-a-reference.rs":"c4b8a6c1970e30390d0a301e2dbe718b9eeef743299f7e91cd12c582ec203af7","tests/ui-stable/transmute-ref-dst-not-a-reference.stderr":"e8a126f4832344b8a69591fcc25e22bbbb29f2078b809a47f8afa40ac1087a1f","tests/ui-stable/transmute-ref-dst-not-frombytes.rs":"42aab9630fbab93f400713a1730d6dd6a89f821b0fa4dd5347aabe5e78b13aff","tests/ui-stable/transmute-ref-dst-not-frombytes.stderr":"dfb9cf0089d8040befa1413ad558a73b1b3d688c887c5712d5e0645a6c715b8c","tests/ui-stable/transmute-ref-dst-unsized.rs":"c374df8d00541fd34fff37e231e341501a427961f60d88ad3e3c375085cc060d","tests/ui-stable/transmute-ref-dst-unsized.stderr":"8a5410232b38b232921f6ae6d9b4ec6f5d3296aa21f8ebeda76faeeabb189941","tests/ui-stable/transmute-ref-illegal-lifetime.rs":"6812bbf7ec851a8591464f10864dbd1f225e65ed5793b6f6375cbe8a9db50b14","tests/ui-stable/transmute-ref-illegal-lifetime.stderr":"45ab741d710dc5a01a21ab64f99927e7da5593328b2037b9bc82a87bc0969136","tests/ui-stable/transmute-ref-size-decrease.rs":"939fb562e4678368e59fdafb3a597fd54a661fd09d9ecb23c6e626ff59b45384","tests/ui-stable/transmute-ref-size-decrease.stderr":"fec5ab0e3d885bbb8e7ab82d6d58b9b4ee35a1802502fbc494bafa086d4132cf","tests/ui-stable/transmute-ref-size-increase.rs":"f66ab294f7618abfac5c503570137759afceb0dd26c8802bb1786b8873fe5670","tests/ui-stable/transmute-ref-size-increase.stderr":"720e2150c9ed538cf00d7525124ab0cee6ac53e91582470e09c140db783fc2be","tests/ui-stable/transmute-ref-src-dst-generic.rs":"96a6f6580307e6a397af8ca688a8a65144dff5240372203bd9f02bad6a41fd1e","tests/ui-stable/transmute-ref-src-dst-generic.stderr":"25f15e5316df34cd4a438548090c287228f86062f7e2ef59ea17fb727b868a19","tests/ui-stable/transmute-ref-src-dst-not-references.rs":"7311602a0153b260d819e9608e8e66ef5904919a2349a95187919d8211e48e23","tests/ui-stable/transmute-ref-src-dst-not-references.stderr":"2bff9f290ec40458939a1633f850853b3486220cfd40bc24c4e52635b7455742","tests/ui-stable/transmute-ref-src-dst-unsized.rs":"f83e0225e824b7526d7732ef5d759b32358e5db50c3c6a318d2b5dcc2eb3c707","tests/ui-stable/transmute-ref-src-dst-unsized.stderr":"4331d74113a83df3e7077a50b7ee6ed6868834808b9ebb6982b1475f4e6afece","tests/ui-stable/transmute-ref-src-generic.rs":"ac1699aeca61c82aff5dac51d387a4ef7522faf2b2dfc56af398a2dc9d53745b","tests/ui-stable/transmute-ref-src-generic.stderr":"f3f8a7ee67ebec21169e1284c9eeaedcfa7b93c05f4e42c504cbd06508f34f9f","tests/ui-stable/transmute-ref-src-not-a-reference.rs":"a921f168fa6cb3c6a19894cecdb118bc3164275746672a916aa5194b92f2fb57","tests/ui-stable/transmute-ref-src-not-a-reference.stderr":"52efb101d85126138395fbed84c7cb911f86ea4457b991d91b2b6ec66521bcff","tests/ui-stable/transmute-ref-src-not-asbytes.rs":"09aabae9e4634a5432bf7225240954d7b0592994c97a927e0469e27854588232","tests/ui-stable/transmute-ref-src-not-asbytes.stderr":"7f2cc024e80cfc10e8ac21d016d940fce149014ad2664d3b9ef380b1bb69c14c","tests/ui-stable/transmute-ref-src-unsized.rs":"d7797488f0ab5db89944ac7db25625c63aef72e6e4ed481d00a083449050b813","tests/ui-stable/transmute-ref-src-unsized.stderr":"db7f3c025885a2f64559e63901cad5acb6baae5c20973fb6470edad6ba0cacc9","tests/ui-stable/transmute-size-decrease.rs":"c63dd10ddab58e282b033132d79fd21e80edb0c654f856679237977f62ced1ed","tests/ui-stable/transmute-size-decrease.stderr":"685acfb1b758f9854a5b36565f0b26cc1ef35322ee25387f05733187de1864d1","tests/ui-stable/transmute-size-increase.rs":"9413442e6e3c574bd7e36e8d4242000c1513624a4edc97567695a81b5851c491","tests/ui-stable/transmute-size-increase.stderr":"54cf03066a5d10ab7caa4741fe9d40df491a9a3fb81b6425a40bf04e21a6910e","tests/ui-stable/transmute-src-not-asbytes.rs":"8e2a76d99734c0502ba9daa8c7c2e34ca830ffd6024d3f7f29363d4263e89f74","tests/ui-stable/transmute-src-not-asbytes.stderr":"6a8f100e6da7f425c532370cd273b8af7f5f44450d0160ee73dbda8fe5f20a59"},"package":"1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0"} \ No newline at end of file diff --git a/src/rust/vendor/zerocopy/CONTRIBUTING.md b/src/rust/vendor/zerocopy/CONTRIBUTING.md new file mode 100644 index 000000000..929e80996 --- /dev/null +++ b/src/rust/vendor/zerocopy/CONTRIBUTING.md @@ -0,0 +1,215 @@ + + +# How to Contribute + +We'd love to accept your patches and contributions to zerocopy. There are just a +few small guidelines you need to follow. + +Once you've read the rest of this doc, check out our [good-first-issue +label][good-first-issue] for some good issues you can use to get your toes wet! + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution; +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code Reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult [GitHub +Help][about_pull_requests] for more information on using pull requests. + +## Code Guidelines + +### Philosophy + +This section is inspired by [Flutter's style guide][flutter_philosophy], which +contains many general principles that you should apply to all your programming +work. Read it. The below calls out specific aspects that we feel are +particularly important. + +#### Dogfood Your Features + +In non-library code, it's often advised to only implement features you need. +After all, it's hard to correctly design code without a concrete use case to +guide its design. Since zerocopy is a library, this advice is not as applicable; +we want our API surface to be featureful and complete even if not every feature +or method has a known use case. However, the observation that unused code is +hard to design still holds. + +Thus, when designing external-facing features, try to make use of them somehow. +This could be by using them to implement other features, or it could be by +writing prototype code which won't actually be checked in anywhere. If you're +feeling ambitious, you could even add (and check in) a [Cargo +example][cargo_example] that exercises the new feature. + +#### Go Down the Rabbit Hole + +You will occasionally encounter behavior that surprises you or seems wrong. It +probably is! Invest the time to find the root cause - you will either learn +something, or fix something, and both are worth your time. Do not work around +behavior you don't understand. + +### Avoid Duplication + +Avoid duplicating code whenever possible. In cases where existing code is not +exposed in a manner suitable to your needs, prefer to extract the necessary +parts into a common dependency. + +### Comments + +When writing comments, take a moment to consider the future reader of your +comment. Ensure that your comments are complete sentences with proper grammar +and punctuation. Note that adding more comments or more verbose comments is not +always better; for example, avoid comments that repeat the code they're anchored +on. + +Documentation comments should be self-contained; in other words, do not assume +that the reader is aware of documentation in adjacent files or on adjacent +structures. Avoid documentation comments on types which describe _instances_ of +the type; for example, `AddressSet is a set of client addresses.` is a comment +that describes a field of type `AddressSet`, but the type may be used to hold +any kind of `Address`, not just a client's. + +Phrase your comments to avoid references that might become stale; for example: +do not mention a variable or type by name when possible (certain doc comments +are necessary exceptions). Also avoid references to past or future versions of +or past or future work surrounding the item being documented; explain things +from first principles rather than making external references (including past +revisions). + +When writing TODOs: + +1. Include an issue reference using the format `TODO(#123):` +1. Phrase the text as an action that is to be taken; it should be possible for + another contributor to pick up the TODO without consulting any external + sources, including the referenced issue. + +### Tests + +Much of the code in zerocopy has the property that, if it is buggy, those bugs +may not cause user code to fail. This makes it extra important to write thorough +tests, but it also makes it harder to write those tests correctly. Here are some +guidelines on how to test code in zerocopy: +1. All code added to zerocopy must include tests that exercise it completely. +1. Tests must be deterministic. Threaded or time-dependent code, random number + generators (RNGs), and communication with external processes are common + sources of nondeterminism. See [Write reproducible, deterministic + tests][determinism] for tips. +1. Avoid [change detector tests][change_detector_tests]; tests that are + unnecessarily sensitive to changes, especially ones external to the code + under test, can hamper feature development and refactoring. +1. Since we run tests in [Miri][miri], make sure that tests exist which exercise + any potential [undefined behavior][undefined_behavior] so that Miri can catch + it. +1. If there's some user code that should be impossible to compile, add a + [trybuild test][trybuild] to ensure that it's properly rejected. + +### Source Control Best Practices + +Commits should be arranged for ease of reading; that is, incidental changes +such as code movement or formatting changes should be committed separately from +actual code changes. + +Commits should always be focused. For example, a commit could add a feature, +fix a bug, or refactor code, but not a mixture. + +Commits should be thoughtfully sized; avoid overly large or complex commits +which can be logically separated, but also avoid overly separated commits that +require code reviews to load multiple commits into their mental working memory +in order to properly understand how the various pieces fit together. + +#### Commit Messages + +Commit messages should be _concise_ but self-contained (avoid relying on issue +references as explanations for changes) and written such that they are helpful +to people reading in the future (include rationale and any necessary context). + +Avoid superfluous details or narrative. + +Commit messages should consist of a brief subject line and a separate +explanatory paragraph in accordance with the following: + +1. [Separate subject from body with a blank line](https://chris.beams.io/posts/git-commit/#separate) +1. [Limit the subject line to 50 characters](https://chris.beams.io/posts/git-commit/#limit-50) +1. [Capitalize the subject line](https://chris.beams.io/posts/git-commit/#capitalize) +1. [Do not end the subject line with a period](https://chris.beams.io/posts/git-commit/#end) +1. [Use the imperative mood in the subject line](https://chris.beams.io/posts/git-commit/#imperative) +1. [Wrap the body at 72 characters](https://chris.beams.io/posts/git-commit/#wrap-72) +1. [Use the body to explain what and why vs. how](https://chris.beams.io/posts/git-commit/#why-not-how) + +If the code affects a particular subsystem, prefix the subject line with the +name of that subsystem in square brackets, omitting any "zerocopy" prefix +(that's implicit). For example, for a commit adding a feature to the +zerocopy-derive crate: + +```text +[derive] Support AsBytes on types with parameters +``` + +The body may be omitted if the subject is self-explanatory; e.g. when fixing a +typo. The git book contains a [Commit Guidelines][commit_guidelines] section +with much of the same advice, and the list above is part of a [blog +post][beams_git_commit] by [Chris Beams][chris_beams]. + +Commit messages should make use of issue integration. Including an issue +reference like `#123` will cause the GitHub UI to link the text of that +reference to the referenced issue, and will also make it so that the referenced +issue back-links to the commit. Use "Closes", "Fixes", or "Resolves" on its own +line to automatically close an issue when your commit is merged: + +```text +Closes #123 +Fixes #123 +Resolves #123 +``` + +When using issue integration, don't omit necessary context that may also be +included in the relevant issue (see "Commit messages should be _concise_ but +self-contained" above). Git history is more likely to be retained indefinitely +than issue history (for example, if this repository is migrated away from GitHub +at some point in the future). + +Commit messages should never contain references to any of: + +1. Relative moments in time +1. Non-public URLs +1. Individuals +1. Hosted code reviews (such as on https://github.com/google/zerocopy/pulls) + + Refer to commits in this repository by their SHA-1 hash + + Refer to commits in other repositories by public web address (such as + https://github.com/google/zerocopy/commit/789b3deb) +1. Other entities which may not make sense to arbitrary future readers + +## Community Guidelines + +This project follows [Google's Open Source Community +Guidelines][google_open_source_guidelines]. + +[about_pull_requests]: https://help.github.com/articles/about-pull-requests/ +[beams_git_commit]: https://chris.beams.io/posts/git-commit/ +[cargo_example]: http://xion.io/post/code/rust-examples.html +[change_detector_tests]: https://testing.googleblog.com/2015/01/testing-on-toilet-change-detector-tests.html +[chris_beams]: https://chris.beams.io/ +[commit_guidelines]: https://www.git-scm.com/book/en/v2/Distributed-Git-Contributing-to-a-Project#_commit_guidelines +[determinism]: https://fuchsia.dev/fuchsia-src/contribute/testing/best-practices#write_reproducible_deterministic_tests +[flutter_philosophy]: https://github.com/flutter/flutter/wiki/Style-guide-for-Flutter-repo#philosophy +[good-first-issue]: https://github.com/google/zerocopy/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 +[google_open_source_guidelines]: https://opensource.google/conduct/ +[magic_number]: https://en.wikipedia.org/wiki/Magic_number_(programming) +[miri]: https://github.com/rust-lang/miri +[trybuild]: https://crates.io/crates/trybuild +[undefined_behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html diff --git a/src/rust/vendor/zerocopy/Cargo.toml b/src/rust/vendor/zerocopy/Cargo.toml new file mode 100644 index 000000000..3d3907cb8 --- /dev/null +++ b/src/rust/vendor/zerocopy/Cargo.toml @@ -0,0 +1,101 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.60.0" +name = "zerocopy" +version = "0.7.35" +authors = ["Joshua Liebow-Feeser "] +exclude = [".*"] +description = "Utilities for zero-copy parsing and serialization" +readme = "README.md" +keywords = [ + "cast", + "convert", + "transmute", + "transmutation", + "type-punning", +] +categories = [ + "embedded", + "encoding", + "no-std::no-alloc", + "parsing", + "rust-patterns", +] +license = "BSD-2-Clause OR Apache-2.0 OR MIT" +repository = "https://github.com/google/zerocopy" + +[package.metadata.ci] +pinned-nightly = "nightly-2024-06-19" +pinned-stable = "1.79.0" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "doc_cfg", + "--generate-link-to-definition", +] + +[package.metadata.playground] +features = ["__internal_use_only_features_that_work_on_stable"] + +[dependencies.byteorder] +version = "1.3" +optional = true +default-features = false + +[dependencies.zerocopy-derive] +version = "=0.7.35" +optional = true + +[dev-dependencies.assert_matches] +version = "1.5" + +[dev-dependencies.elain] +version = "0.3.0" + +[dev-dependencies.itertools] +version = "0.11" + +[dev-dependencies.rand] +version = "0.8.5" +features = ["small_rng"] + +[dev-dependencies.rustversion] +version = "1.0" + +[dev-dependencies.static_assertions] +version = "1.1" + +[dev-dependencies.trybuild] +version = "=1.0.85" +features = ["diff"] + +[dev-dependencies.zerocopy-derive] +version = "=0.7.35" + +[features] +__internal_use_only_features_that_work_on_stable = [ + "alloc", + "derive", + "simd", +] +alloc = [] +default = ["byteorder"] +derive = ["zerocopy-derive"] +simd = [] +simd-nightly = ["simd"] + +[target."cfg(any())".dependencies.zerocopy-derive] +version = "=0.7.35" diff --git a/src/rust/vendor/zerocopy/INTERNAL.md b/src/rust/vendor/zerocopy/INTERNAL.md new file mode 100644 index 000000000..4e7f44073 --- /dev/null +++ b/src/rust/vendor/zerocopy/INTERNAL.md @@ -0,0 +1,44 @@ + + +# Internal details + +This file documents various internal details of zerocopy and its infrastructure +that consumers don't need to be concerned about. It focuses on details that +affect multiple files, and allows each affected code location to reference this +document rather than requiring us to repeat the same explanation in multiple +locations. + +## CI and toolchain versions + +In CI (`.github/workflows/ci.yml`), we pin to specific versions or dates of the +stable and nightly toolchains. The reason is twofold: First, our UI tests (see +`tests/trybuild.rs` and `zerocopy-derive/tests/trybuild.rs`) depend on the +format of rustc's error messages, and that format can change between toolchain +versions (we also maintain multiple copies of our UI tests - one for each +toolchain version pinned in CI - for this reason). Second, not all nightlies +have a working Miri, so we need to pin to one that does (see +https://rust-lang.github.io/rustup-components-history/). + +Updating the versions pinned in CI may cause the UI tests to break. In order to +fix UI tests after a version update, run: + +``` +$ TRYBUILD=overwrite ./cargo.sh +all test +``` + +## Crate versions + +We ensure that the crate versions of zerocopy and zerocopy-derive are always the +same in-tree, and that zerocopy depends upon zerocopy-derive using an exact +version match to the current version in-tree. This has the result that, even +when published on crates.io, both crates effectively constitute a single atomic +version. So long as the code in zerocopy is compatible with the code in +zerocopy-derive in the same Git commit, then publishing them both is fine. This +frees us from the normal task of reasoning about compatibility with a range of +semver-compatible versions of different crates. diff --git a/src/rust/vendor/zerocopy/LICENSE-APACHE b/src/rust/vendor/zerocopy/LICENSE-APACHE new file mode 100644 index 000000000..2dc22c12f --- /dev/null +++ b/src/rust/vendor/zerocopy/LICENSE-APACHE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 The Fuchsia Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/src/rust/vendor/zerocopy/LICENSE-BSD b/src/rust/vendor/zerocopy/LICENSE-BSD new file mode 100644 index 000000000..7ed244f42 --- /dev/null +++ b/src/rust/vendor/zerocopy/LICENSE-BSD @@ -0,0 +1,24 @@ +Copyright 2019 The Fuchsia Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/rust/vendor/zerocopy/LICENSE-MIT b/src/rust/vendor/zerocopy/LICENSE-MIT new file mode 100644 index 000000000..26e15216c --- /dev/null +++ b/src/rust/vendor/zerocopy/LICENSE-MIT @@ -0,0 +1,26 @@ +Copyright 2023 The Fuchsia Authors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + diff --git a/src/rust/vendor/zerocopy/POLICIES.md b/src/rust/vendor/zerocopy/POLICIES.md new file mode 100644 index 000000000..7f6e148b5 --- /dev/null +++ b/src/rust/vendor/zerocopy/POLICIES.md @@ -0,0 +1,114 @@ + + +# Zerocopy's Policies + +## Soundness + +Zerocopy is expressly designed for use in security-critical contexts. It is used +in hardware security firmware, cryptographic implementations, hypervisors, and +more. We understand that software in these contexts has a very high bar for +correctness, and we take our responsibility to meet that bar very seriously. + +This section describes policies which are designed to ensure the correctness and +soundness of our code and prevent regressions. + +### Forwards-compatibility + +Rust does not currently have a formal memory model. As such, while Rust provides +guarantees about the semantics of some operations, the semantics of many +operations is up in the air and subject to change. + +Zerocopy strives to ensure that our code - and code emitted by our custom +derives - is sound under any version of Rust as early as our MSRV, and will +continue to be sound under any future version of Rust. The policies in this +section are designed to help ensure that we live up to this goal. + +### Safety comments + +Each non-test `unsafe` block must be annotated with a "safety comment" which +provides a rationale for its soundness. In order to ensure that our soundness is +forwards-compatible, safety comments must satisfy the following criteria: +- Safety comments must constitute a (possibly informal) proof that all of Rust's + soundness rules are upheld. +- Safety comments must only rely for their correctness on statements which + appear in the stable versions of the [Rust Reference] or standard library + documentation (ie, the docs for [core], [alloc], and [std]); arguments which + rely on text from the beta or nightly versions of these documents are not + considered complete. +- All statements from the Reference or standard library documentation which are + relied upon for soundness must be quoted in the safety comment. This ensures + that there is no ambiguity as to what aspect of the text is being cited. This + is especially important in cases where the text of these documents changes in + the future. Such changes are of course required to be backwards-compatible, + but may change the manner in which a particular guarantee is explained. + +We use the [`clippy::undocumented_unsafe_blocks`] lint to ensure that `unsafe` +blocks cannot be added without a safety comment. Note that there are a few +outstanding uncommented `unsafe` blocks which are tracked in [#429]. Our goal is +to reach 100% safety comment coverage and not regress once we've reached it. + +[Rust Reference]: https://doc.rust-lang.org/reference/ +[core]: https://doc.rust-lang.org/stable/core/ +[alloc]: https://doc.rust-lang.org/stable/alloc/ +[std]: https://doc.rust-lang.org/stable/std/ +[`clippy::undocumented_unsafe_blocks`]: https://rust-lang.github.io/rust-clippy/master/index.html#/undocumented_unsafe_blocks +[#429]: https://github.com/google/zerocopy/issues/429 + +#### Exceptions to our safety comment policy + +In rare circumstances, the soundness of an `unsafe` block may depend upon +semantics which are widely agreed upon but not formally guaranteed. In order to +avoid slowing down zerocopy's development to an unreasonable degree, a safety +comment may violate our safety comment policy so long as all of the following +hold: +- The safety comment's correctness may rely on semantics which are not + guaranteed in official Rust documentation *so long as* a member of the Rust + team has articulated in an official communication (e.g. a comment on a Rust + GitHub repo) that Rust intends to guarantee particular semantics. +- There exists an active effort to formalize the guarantee in Rust's official + documentation. + +### Target architecture support + +Zerocopy bases its soundness on guarantees made about the semantics of Rust +which appear in the Rust Reference or standard library documentation; zerocopy +is sound so long as these guarantees hold. There are known cases in which these +guarantees do not hold on certain target architectures (see +[rust-lang/unsafe-code-guidelines#461]); on such target architectures, zerocopy +may be unsound. We consider it outside of zerocopy's scope to reason about these +cases. Zerocopy makes no effort maintain soundness in cases where Rust's +documented guarantees do not hold. + +[rust-lang/unsafe-code-guidelines#461]: https://github.com/rust-lang/unsafe-code-guidelines/issues/461 + +## MSRV + + + +Without the `derive` feature enabled, zerocopy's minimum supported Rust version +(MSRV) is encoded the `package.rust-version` field in its `Cargo.toml` file. For +zerocopy, we consider an increase in MSRV to be a semver-breaking change, and +will only increase our MSRV during semver-breaking version changes (e.g., 0.1 -> +0.2, 1.0 -> 2.0, etc). + +For zerocopy with the `derive` feature enabled, and for the zerocopy-derive +crate, we inherit the MSRV of our sole external dependency, syn. As of this +writing (2024-07-02), syn does *not* consider MSRV increases to be +semver-breaking changes. Thus, using the `derive` feature may result in the +effective MSRV increasing within a semver version train. + +## Yanking + +Whenever a bug or regression is identified, we will yank any affected versions +which are part of the current version train. For example, if the most recent +version is 0.10.20 and a bug is uncovered, we will release a fix in 0.10.21 and +yank all 0.10.X versions which are affected. We *may* also yank versions in previous +version trains on a case-by-case basis, but we don't guarantee it. diff --git a/src/rust/vendor/zerocopy/README.md b/src/rust/vendor/zerocopy/README.md new file mode 100644 index 000000000..ec09c4569 --- /dev/null +++ b/src/rust/vendor/zerocopy/README.md @@ -0,0 +1,154 @@ + + +# zerocopy + +*Want to help improve zerocopy? +Fill out our [user survey][user-survey]!* + +***Fast, safe, compile error. Pick two.*** + +Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe` +so you don't have to. + +## Overview + +Zerocopy provides four core marker traits, each of which can be derived +(e.g., `#[derive(FromZeroes)]`): +- `FromZeroes` indicates that a sequence of zero bytes represents a valid + instance of a type +- `FromBytes` indicates that a type may safely be converted from an + arbitrary byte sequence +- `AsBytes` indicates that a type may safely be converted *to* a byte + sequence +- `Unaligned` indicates that a type's alignment requirement is 1 + +Types which implement a subset of these traits can then be converted to/from +byte sequences with little to no runtime overhead. + +Zerocopy also provides byte-order aware integer types that support these +conversions; see the `byteorder` module. These types are especially useful +for network parsing. + +[user-survey]: https://docs.google.com/forms/d/e/1FAIpQLSdzBNTN9tzwsmtyZxRFNL02K36IWCdHWW2ZBckyQS2xiO3i8Q/viewform?usp=published_options + +## Cargo Features + +- **`alloc`** + By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled, + the `alloc` crate is added as a dependency, and some allocation-related + functionality is added. + +- **`byteorder`** (enabled by default) + Adds the `byteorder` module and a dependency on the `byteorder` crate. + The `byteorder` module provides byte order-aware equivalents of the + multi-byte primitive numerical types. Unlike their primitive equivalents, + the types in this module have no alignment requirement and support byte + order conversions. This can be useful in handling file formats, network + packet layouts, etc which don't provide alignment guarantees and which may + use a byte order different from that of the execution platform. + +- **`derive`** + Provides derives for the core marker traits via the `zerocopy-derive` + crate. These derives are re-exported from `zerocopy`, so it is not + necessary to depend on `zerocopy-derive` directly. + + However, you may experience better compile times if you instead directly + depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`, + since doing so will allow Rust to compile these crates in parallel. To do + so, do *not* enable the `derive` feature, and list both dependencies in + your `Cargo.toml` with the same leading non-zero version number; e.g: + + ```toml + [dependencies] + zerocopy = "0.X" + zerocopy-derive = "0.X" + ``` + +- **`simd`** + When the `simd` feature is enabled, `FromZeroes`, `FromBytes`, and + `AsBytes` impls are emitted for all stable SIMD types which exist on the + target platform. Note that the layout of SIMD types is not yet stabilized, + so these impls may be removed in the future if layout changes make them + invalid. For more information, see the Unsafe Code Guidelines Reference + page on the [layout of packed SIMD vectors][simd-layout]. + +- **`simd-nightly`** + Enables the `simd` feature and adds support for SIMD types which are only + available on nightly. Since these types are unstable, support for any type + may be removed at any point in the future. + +[simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html + +## Security Ethos + +Zerocopy is expressly designed for use in security-critical contexts. We +strive to ensure that that zerocopy code is sound under Rust's current +memory model, and *any future memory model*. We ensure this by: +- **...not 'guessing' about Rust's semantics.** + We annotate `unsafe` code with a precise rationale for its soundness that + cites a relevant section of Rust's official documentation. When Rust's + documented semantics are unclear, we work with the Rust Operational + Semantics Team to clarify Rust's documentation. +- **...rigorously testing our implementation.** + We run tests using [Miri], ensuring that zerocopy is sound across a wide + array of supported target platforms of varying endianness and pointer + width, and across both current and experimental memory models of Rust. +- **...formally proving the correctness of our implementation.** + We apply formal verification tools like [Kani][kani] to prove zerocopy's + correctness. + +For more information, see our full [soundness policy]. + +[Miri]: https://github.com/rust-lang/miri +[Kani]: https://github.com/model-checking/kani +[soundness policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#soundness + +## Relationship to Project Safe Transmute + +[Project Safe Transmute] is an official initiative of the Rust Project to +develop language-level support for safer transmutation. The Project consults +with crates like zerocopy to identify aspects of safer transmutation that +would benefit from compiler support, and has developed an [experimental, +compiler-supported analysis][mcp-transmutability] which determines whether, +for a given type, any value of that type may be soundly transmuted into +another type. Once this functionality is sufficiently mature, zerocopy +intends to replace its internal transmutability analysis (implemented by our +custom derives) with the compiler-supported one. This change will likely be +an implementation detail that is invisible to zerocopy's users. + +Project Safe Transmute will not replace the need for most of zerocopy's +higher-level abstractions. The experimental compiler analysis is a tool for +checking the soundness of `unsafe` code, not a tool to avoid writing +`unsafe` code altogether. For the foreseeable future, crates like zerocopy +will still be required in order to provide higher-level abstractions on top +of the building block provided by Project Safe Transmute. + +[Project Safe Transmute]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html +[mcp-transmutability]: https://github.com/rust-lang/compiler-team/issues/411 + +## MSRV + +See our [MSRV policy]. + +[MSRV policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#msrv + +## Changelog + +Zerocopy uses [GitHub Releases]. + +[GitHub Releases]: https://github.com/google/zerocopy/releases + +## Disclaimer + +Disclaimer: Zerocopy is not an officially supported Google product. diff --git a/src/rust/vendor/zerocopy/cargo.sh b/src/rust/vendor/zerocopy/cargo.sh new file mode 100755 index 000000000..f72e898db --- /dev/null +++ b/src/rust/vendor/zerocopy/cargo.sh @@ -0,0 +1,120 @@ +#!/bin/bash +# +# Copyright 2023 The Fuchsia Authors +# +# Licensed under a BSD-style license , Apache License, Version 2.0 +# , or the MIT +# license , at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +# This script is a thin wrapper around Cargo that provides human-friendly +# toolchain names which are automatically translated to the toolchain versions +# we have pinned in CI. +# +# cargo.sh --version # looks up the version for the named toolchain +# cargo.sh + [...] # runs cargo commands with the named toolchain +# cargo.sh +all [...] # runs cargo commands with each toolchain +# +# The meta-toolchain "all" instructs this script to run the provided command +# once for each toolchain (msrv, stable, nightly). +# +# A common task that is especially annoying to perform by hand is to update +# trybuild's stderr files. Using this script: +# +# TRYBUILD=overwrite ./cargo.sh +all test --workspace + +set -eo pipefail + +function print-usage-and-exit { + echo "Usage:" >&2 + echo " $0 --version " >&2 + echo " $0 + [...]" >&2 + echo " $0 +all [...]" >&2 + exit 1 +} + +[[ $# -gt 0 ]] || print-usage-and-exit + +function pkg-meta { + # NOTE(#547): We set `CARGO_TARGET_DIR` here because `cargo metadata` + # sometimes causes the `cargo-metadata` crate to be rebuilt from source using + # the default toolchain. This has the effect of clobbering any existing build + # artifacts from whatever toolchain the user has specified (e.g., `+nightly`), + # causing the subsequent `cargo` invocation to rebuild unnecessarily. By + # specifying a separate build directory here, we ensure that this never + # clobbers the build artifacts used by the later `cargo` invocation. + CARGO_TARGET_DIR=target/cargo-sh cargo metadata --format-version 1 | jq -r ".packages[] | select(.name == \"zerocopy\").$1" +} + +function lookup-version { + VERSION="$1" + case "$VERSION" in + msrv) + pkg-meta rust_version + ;; + stable) + pkg-meta 'metadata.ci."pinned-stable"' + ;; + nightly) + pkg-meta 'metadata.ci."pinned-nightly"' + ;; + *) + echo "Unrecognized toolchain name: '$VERSION' (options are 'msrv', 'stable', 'nightly')" >&2 + return 1 + ;; + esac +} + +function get-rustflags { + [ "$1" == nightly ] && echo "--cfg __INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS" +} + +function prompt { + PROMPT="$1" + YES="$2" + while true; do + read -p "$PROMPT " yn + case "$yn" in + [Yy]) $YES; return $?; ;; + [Nn]) return 1; ;; + *) break; ;; + esac + done +} + +case "$1" in + # cargo.sh --version + --version) + [[ $# -eq 2 ]] || print-usage-and-exit + lookup-version "$2" + ;; + # cargo.sh +all [...] + +all) + echo "[cargo.sh] warning: running the same command for each toolchain (msrv, stable, nightly)" >&2 + for toolchain in msrv stable nightly; do + echo "[cargo.sh] running with toolchain: $toolchain" >&2 + $0 "+$toolchain" ${@:2} + done + exit 0 + ;; + # cargo.sh + [...] + +*) + TOOLCHAIN="$(lookup-version ${1:1})" + + cargo "+$TOOLCHAIN" version &>/dev/null && \ + rustup "+$TOOLCHAIN" component list | grep '^rust-src (installed)$' >/dev/null || { + echo "[cargo.sh] missing either toolchain '$TOOLCHAIN' or component 'rust-src'" >&2 + # If we're running in a GitHub action, then it's better to bail than to + # hang waiting for input we're never going to get. + [ -z ${GITHUB_RUN_ID+x} ] || exit 1 + prompt "[cargo.sh] would you like to install toolchain '$TOOLCHAIN' and component 'rust-src' via 'rustup'?" \ + "rustup toolchain install $TOOLCHAIN -c rust-src" + } || exit 1 + + RUSTFLAGS="$(get-rustflags ${1:1}) $RUSTFLAGS" cargo "+$TOOLCHAIN" ${@:2} + ;; + *) + print-usage-and-exit + ;; +esac diff --git a/src/rust/vendor/zerocopy/clippy.toml b/src/rust/vendor/zerocopy/clippy.toml new file mode 100644 index 000000000..9c1140643 --- /dev/null +++ b/src/rust/vendor/zerocopy/clippy.toml @@ -0,0 +1,10 @@ +# Copyright 2023 The Fuchsia Authors +# +# Licensed under a BSD-style license , Apache License, Version 2.0 +# , or the MIT +# license , at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +accept-comment-above-statement = true +accept-comment-above-attributes = true diff --git a/src/rust/vendor/zerocopy/generate-readme.sh b/src/rust/vendor/zerocopy/generate-readme.sh new file mode 100755 index 000000000..be0dc929a --- /dev/null +++ b/src/rust/vendor/zerocopy/generate-readme.sh @@ -0,0 +1,50 @@ +#!/bin/bash +# +# Copyright 2022 The Fuchsia Authors +# +# Licensed under a BSD-style license , Apache License, Version 2.0 +# , or the MIT +# license , at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +set -eo pipefail + +COPYRIGHT_HEADER=$(mktemp) +BODY=$(mktemp) +DISCLAIMER_FOOTER=$(mktemp) + +cat > $COPYRIGHT_HEADER <<'EOF' + + +EOF + +# This uses the `cargo readme` tool, which you can install via `cargo install +# cargo-readme --version 3.2.0`. +# +# The `sed` command is used to strip code links like: +# +# /// Here is a link to [`Vec`]. +# +# These links don't work in a Markdown file, and so we remove the `[` and `]` +# characters to convert them to non-link code snippets. +cargo readme --no-license | sed 's/\[\(`[^`]*`\)]/\1/g' > $BODY + +cat > $DISCLAIMER_FOOTER <<'EOF' + +## Disclaimer + +Disclaimer: Zerocopy is not an officially supported Google product. +EOF + +cat $COPYRIGHT_HEADER $BODY $DISCLAIMER_FOOTER diff --git a/src/rust/vendor/zerocopy/rustfmt.toml b/src/rust/vendor/zerocopy/rustfmt.toml new file mode 100644 index 000000000..c967afe9c --- /dev/null +++ b/src/rust/vendor/zerocopy/rustfmt.toml @@ -0,0 +1,19 @@ +# Copyright 2022 The Fuchsia Authors +# +# Licensed under a BSD-style license , Apache License, Version 2.0 +# , or the MIT +# license , at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +edition = "2021" + +# The "Default" setting has a heuristic which splits lines too aggresively. +# We are willing to revisit this setting in future versions of rustfmt. +# Bugs: +# * https://github.com/rust-lang/rustfmt/issues/3119 +# * https://github.com/rust-lang/rustfmt/issues/3120 +use_small_heuristics = "Max" + +# Prevent carriage returns +newline_style = "Unix" diff --git a/src/rust/vendor/zerocopy/src/byteorder.rs b/src/rust/vendor/zerocopy/src/byteorder.rs new file mode 100644 index 000000000..376c98104 --- /dev/null +++ b/src/rust/vendor/zerocopy/src/byteorder.rs @@ -0,0 +1,1071 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +//! Byte order-aware numeric primitives. +//! +//! This module contains equivalents of the native multi-byte integer types with +//! no alignment requirement and supporting byte order conversions. +//! +//! For each native multi-byte integer type - `u16`, `i16`, `u32`, etc - and +//! floating point type - `f32` and `f64` - an equivalent type is defined by +//! this module - [`U16`], [`I16`], [`U32`], [`F64`], etc. Unlike their native +//! counterparts, these types have alignment 1, and take a type parameter +//! specifying the byte order in which the bytes are stored in memory. Each type +//! implements the [`FromBytes`], [`AsBytes`], and [`Unaligned`] traits. +//! +//! These two properties, taken together, make these types useful for defining +//! data structures whose memory layout matches a wire format such as that of a +//! network protocol or a file format. Such formats often have multi-byte values +//! at offsets that do not respect the alignment requirements of the equivalent +//! native types, and stored in a byte order not necessarily the same as that of +//! the target platform. +//! +//! Type aliases are provided for common byte orders in the [`big_endian`], +//! [`little_endian`], [`network_endian`], and [`native_endian`] submodules. +//! +//! # Example +//! +//! One use of these types is for representing network packet formats, such as +//! UDP: +//! +//! ```rust,edition2021 +//! # #[cfg(feature = "derive")] { // This example uses derives, and won't compile without them +//! use zerocopy::{AsBytes, ByteSlice, FromBytes, FromZeroes, Ref, Unaligned}; +//! use zerocopy::byteorder::network_endian::U16; +//! +//! #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] +//! #[repr(C)] +//! struct UdpHeader { +//! src_port: U16, +//! dst_port: U16, +//! length: U16, +//! checksum: U16, +//! } +//! +//! struct UdpPacket { +//! header: Ref, +//! body: B, +//! } +//! +//! impl UdpPacket { +//! fn parse(bytes: B) -> Option> { +//! let (header, body) = Ref::new_from_prefix(bytes)?; +//! Some(UdpPacket { header, body }) +//! } +//! +//! fn src_port(&self) -> u16 { +//! self.header.src_port.get() +//! } +//! +//! // more getters... +//! } +//! # } +//! ``` + +use core::{ + convert::{TryFrom, TryInto}, + fmt::{self, Binary, Debug, Display, Formatter, LowerHex, Octal, UpperHex}, + marker::PhantomData, + num::TryFromIntError, +}; + +// We don't reexport `WriteBytesExt` or `ReadBytesExt` because those are only +// available with the `std` feature enabled, and zerocopy is `no_std` by +// default. +pub use ::byteorder::{BigEndian, ByteOrder, LittleEndian, NativeEndian, NetworkEndian, BE, LE}; + +use super::*; + +macro_rules! impl_fmt_trait { + ($name:ident, $native:ident, $trait:ident) => { + impl $trait for $name { + #[inline(always)] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + $trait::fmt(&self.get(), f) + } + } + }; +} + +macro_rules! impl_fmt_traits { + ($name:ident, $native:ident, "floating point number") => { + impl_fmt_trait!($name, $native, Display); + }; + ($name:ident, $native:ident, "unsigned integer") => { + impl_fmt_traits!($name, $native, @all_types); + }; + ($name:ident, $native:ident, "signed integer") => { + impl_fmt_traits!($name, $native, @all_types); + }; + ($name:ident, $native:ident, @all_types) => { + impl_fmt_trait!($name, $native, Display); + impl_fmt_trait!($name, $native, Octal); + impl_fmt_trait!($name, $native, LowerHex); + impl_fmt_trait!($name, $native, UpperHex); + impl_fmt_trait!($name, $native, Binary); + }; +} + +macro_rules! impl_ops_traits { + ($name:ident, $native:ident, "floating point number") => { + impl_ops_traits!($name, $native, @all_types); + impl_ops_traits!($name, $native, @signed_integer_floating_point); + }; + ($name:ident, $native:ident, "unsigned integer") => { + impl_ops_traits!($name, $native, @signed_unsigned_integer); + impl_ops_traits!($name, $native, @all_types); + }; + ($name:ident, $native:ident, "signed integer") => { + impl_ops_traits!($name, $native, @signed_unsigned_integer); + impl_ops_traits!($name, $native, @signed_integer_floating_point); + impl_ops_traits!($name, $native, @all_types); + }; + ($name:ident, $native:ident, @signed_unsigned_integer) => { + impl_ops_traits!(@without_byteorder_swap $name, $native, BitAnd, bitand, BitAndAssign, bitand_assign); + impl_ops_traits!(@without_byteorder_swap $name, $native, BitOr, bitor, BitOrAssign, bitor_assign); + impl_ops_traits!(@without_byteorder_swap $name, $native, BitXor, bitxor, BitXorAssign, bitxor_assign); + impl_ops_traits!(@with_byteorder_swap $name, $native, Shl, shl, ShlAssign, shl_assign); + impl_ops_traits!(@with_byteorder_swap $name, $native, Shr, shr, ShrAssign, shr_assign); + + impl core::ops::Not for $name { + type Output = $name; + + #[inline(always)] + fn not(self) -> $name { + let self_native = $native::from_ne_bytes(self.0); + $name((!self_native).to_ne_bytes(), PhantomData) + } + } + }; + ($name:ident, $native:ident, @signed_integer_floating_point) => { + impl core::ops::Neg for $name { + type Output = $name; + + #[inline(always)] + fn neg(self) -> $name { + let self_native: $native = self.get(); + #[allow(clippy::arithmetic_side_effects)] + $name::::new(-self_native) + } + } + }; + ($name:ident, $native:ident, @all_types) => { + impl_ops_traits!(@with_byteorder_swap $name, $native, Add, add, AddAssign, add_assign); + impl_ops_traits!(@with_byteorder_swap $name, $native, Div, div, DivAssign, div_assign); + impl_ops_traits!(@with_byteorder_swap $name, $native, Mul, mul, MulAssign, mul_assign); + impl_ops_traits!(@with_byteorder_swap $name, $native, Rem, rem, RemAssign, rem_assign); + impl_ops_traits!(@with_byteorder_swap $name, $native, Sub, sub, SubAssign, sub_assign); + }; + (@with_byteorder_swap $name:ident, $native:ident, $trait:ident, $method:ident, $trait_assign:ident, $method_assign:ident) => { + impl core::ops::$trait for $name { + type Output = $name; + + #[inline(always)] + fn $method(self, rhs: $name) -> $name { + let self_native: $native = self.get(); + let rhs_native: $native = rhs.get(); + let result_native = core::ops::$trait::$method(self_native, rhs_native); + $name::::new(result_native) + } + } + + impl core::ops::$trait_assign for $name { + #[inline(always)] + fn $method_assign(&mut self, rhs: $name) { + *self = core::ops::$trait::$method(*self, rhs); + } + } + }; + // Implement traits in terms of the same trait on the native type, but + // without performing a byte order swap. This only works for bitwise + // operations like `&`, `|`, etc. + (@without_byteorder_swap $name:ident, $native:ident, $trait:ident, $method:ident, $trait_assign:ident, $method_assign:ident) => { + impl core::ops::$trait for $name { + type Output = $name; + + #[inline(always)] + fn $method(self, rhs: $name) -> $name { + let self_native = $native::from_ne_bytes(self.0); + let rhs_native = $native::from_ne_bytes(rhs.0); + let result_native = core::ops::$trait::$method(self_native, rhs_native); + $name(result_native.to_ne_bytes(), PhantomData) + } + } + + impl core::ops::$trait_assign for $name { + #[inline(always)] + fn $method_assign(&mut self, rhs: $name) { + *self = core::ops::$trait::$method(*self, rhs); + } + } + }; +} + +macro_rules! doc_comment { + ($x:expr, $($tt:tt)*) => { + #[doc = $x] + $($tt)* + }; +} + +macro_rules! define_max_value_constant { + ($name:ident, $bytes:expr, "unsigned integer") => { + /// The maximum value. + /// + /// This constant should be preferred to constructing a new value using + /// `new`, as `new` may perform an endianness swap depending on the + /// endianness `O` and the endianness of the platform. + pub const MAX_VALUE: $name = $name([0xFFu8; $bytes], PhantomData); + }; + // We don't provide maximum and minimum value constants for signed values + // and floats because there's no way to do it generically - it would require + // a different value depending on the value of the `ByteOrder` type + // parameter. Currently, one workaround would be to provide implementations + // for concrete implementations of that trait. In the long term, if we are + // ever able to make the `new` constructor a const fn, we could use that + // instead. + ($name:ident, $bytes:expr, "signed integer") => {}; + ($name:ident, $bytes:expr, "floating point number") => {}; +} + +macro_rules! define_type { + ($article:ident, + $name:ident, + $native:ident, + $bits:expr, + $bytes:expr, + $read_method:ident, + $write_method:ident, + $number_kind:tt, + [$($larger_native:ty),*], + [$($larger_native_try:ty),*], + [$($larger_byteorder:ident),*], + [$($larger_byteorder_try:ident),*]) => { + doc_comment! { + concat!("A ", stringify!($bits), "-bit ", $number_kind, + " stored in a given byte order. + +`", stringify!($name), "` is like the native `", stringify!($native), "` type with +two major differences: First, it has no alignment requirement (its alignment is 1). +Second, the endianness of its memory layout is given by the type parameter `O`, +which can be any type which implements [`ByteOrder`]. In particular, this refers +to [`BigEndian`], [`LittleEndian`], [`NativeEndian`], and [`NetworkEndian`]. + +", stringify!($article), " `", stringify!($name), "` can be constructed using +the [`new`] method, and its contained value can be obtained as a native +`",stringify!($native), "` using the [`get`] method, or updated in place with +the [`set`] method. In all cases, if the endianness `O` is not the same as the +endianness of the current platform, an endianness swap will be performed in +order to uphold the invariants that a) the layout of `", stringify!($name), "` +has endianness `O` and that, b) the layout of `", stringify!($native), "` has +the platform's native endianness. + +`", stringify!($name), "` implements [`FromBytes`], [`AsBytes`], and [`Unaligned`], +making it useful for parsing and serialization. See the module documentation for an +example of how it can be used for parsing UDP packets. + +[`new`]: crate::byteorder::", stringify!($name), "::new +[`get`]: crate::byteorder::", stringify!($name), "::get +[`set`]: crate::byteorder::", stringify!($name), "::set +[`FromBytes`]: crate::FromBytes +[`AsBytes`]: crate::AsBytes +[`Unaligned`]: crate::Unaligned"), + #[derive(Copy, Clone, Eq, PartialEq, Hash)] + #[cfg_attr(any(feature = "derive", test), derive(KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned))] + #[repr(transparent)] + pub struct $name([u8; $bytes], PhantomData); + } + + #[cfg(not(any(feature = "derive", test)))] + impl_known_layout!(O => $name); + + safety_comment! { + /// SAFETY: + /// `$name` is `repr(transparent)`, and so it has the same layout + /// as its only non-zero field, which is a `u8` array. `u8` arrays + /// are `FromZeroes`, `FromBytes`, `AsBytes`, and `Unaligned`. + impl_or_verify!(O => FromZeroes for $name); + impl_or_verify!(O => FromBytes for $name); + impl_or_verify!(O => AsBytes for $name); + impl_or_verify!(O => Unaligned for $name); + } + + impl Default for $name { + #[inline(always)] + fn default() -> $name { + $name::ZERO + } + } + + impl $name { + /// The value zero. + /// + /// This constant should be preferred to constructing a new value + /// using `new`, as `new` may perform an endianness swap depending + /// on the endianness and platform. + pub const ZERO: $name = $name([0u8; $bytes], PhantomData); + + define_max_value_constant!($name, $bytes, $number_kind); + + /// Constructs a new value from bytes which are already in the + /// endianness `O`. + #[inline(always)] + pub const fn from_bytes(bytes: [u8; $bytes]) -> $name { + $name(bytes, PhantomData) + } + } + + impl $name { + // TODO(joshlf): Make these const fns if the `ByteOrder` methods + // ever become const fns. + + /// Constructs a new value, possibly performing an endianness swap + /// to guarantee that the returned value has endianness `O`. + #[inline(always)] + pub fn new(n: $native) -> $name { + let mut out = $name::default(); + O::$write_method(&mut out.0[..], n); + out + } + + /// Returns the value as a primitive type, possibly performing an + /// endianness swap to guarantee that the return value has the + /// endianness of the native platform. + #[inline(always)] + pub fn get(self) -> $native { + O::$read_method(&self.0[..]) + } + + /// Updates the value in place as a primitive type, possibly + /// performing an endianness swap to guarantee that the stored value + /// has the endianness `O`. + #[inline(always)] + pub fn set(&mut self, n: $native) { + O::$write_method(&mut self.0[..], n); + } + } + + // The reasoning behind which traits to implement here is to only + // implement traits which won't cause inference issues. Notably, + // comparison traits like PartialEq and PartialOrd tend to cause + // inference issues. + + impl From<$name> for [u8; $bytes] { + #[inline(always)] + fn from(x: $name) -> [u8; $bytes] { + x.0 + } + } + + impl From<[u8; $bytes]> for $name { + #[inline(always)] + fn from(bytes: [u8; $bytes]) -> $name { + $name(bytes, PhantomData) + } + } + + impl From<$name> for $native { + #[inline(always)] + fn from(x: $name) -> $native { + x.get() + } + } + + impl From<$native> for $name { + #[inline(always)] + fn from(x: $native) -> $name { + $name::new(x) + } + } + + $( + impl From<$name> for $larger_native { + #[inline(always)] + fn from(x: $name) -> $larger_native { + x.get().into() + } + } + )* + + $( + impl TryFrom<$larger_native_try> for $name { + type Error = TryFromIntError; + #[inline(always)] + fn try_from(x: $larger_native_try) -> Result<$name, TryFromIntError> { + $native::try_from(x).map($name::new) + } + } + )* + + $( + impl From<$name> for $larger_byteorder

{ + #[inline(always)] + fn from(x: $name) -> $larger_byteorder

{ + $larger_byteorder::new(x.get().into()) + } + } + )* + + $( + impl TryFrom<$larger_byteorder_try

> for $name { + type Error = TryFromIntError; + #[inline(always)] + fn try_from(x: $larger_byteorder_try

) -> Result<$name, TryFromIntError> { + x.get().try_into().map($name::new) + } + } + )* + + impl AsRef<[u8; $bytes]> for $name { + #[inline(always)] + fn as_ref(&self) -> &[u8; $bytes] { + &self.0 + } + } + + impl AsMut<[u8; $bytes]> for $name { + #[inline(always)] + fn as_mut(&mut self) -> &mut [u8; $bytes] { + &mut self.0 + } + } + + impl PartialEq<$name> for [u8; $bytes] { + #[inline(always)] + fn eq(&self, other: &$name) -> bool { + self.eq(&other.0) + } + } + + impl PartialEq<[u8; $bytes]> for $name { + #[inline(always)] + fn eq(&self, other: &[u8; $bytes]) -> bool { + self.0.eq(other) + } + } + + impl_fmt_traits!($name, $native, $number_kind); + impl_ops_traits!($name, $native, $number_kind); + + impl Debug for $name { + #[inline] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + // This results in a format like "U16(42)". + f.debug_tuple(stringify!($name)).field(&self.get()).finish() + } + } + }; +} + +define_type!( + A, + U16, + u16, + 16, + 2, + read_u16, + write_u16, + "unsigned integer", + [u32, u64, u128, usize], + [u32, u64, u128, usize], + [U32, U64, U128], + [U32, U64, U128] +); +define_type!( + A, + U32, + u32, + 32, + 4, + read_u32, + write_u32, + "unsigned integer", + [u64, u128], + [u64, u128], + [U64, U128], + [U64, U128] +); +define_type!( + A, + U64, + u64, + 64, + 8, + read_u64, + write_u64, + "unsigned integer", + [u128], + [u128], + [U128], + [U128] +); +define_type!(A, U128, u128, 128, 16, read_u128, write_u128, "unsigned integer", [], [], [], []); +define_type!( + An, + I16, + i16, + 16, + 2, + read_i16, + write_i16, + "signed integer", + [i32, i64, i128, isize], + [i32, i64, i128, isize], + [I32, I64, I128], + [I32, I64, I128] +); +define_type!( + An, + I32, + i32, + 32, + 4, + read_i32, + write_i32, + "signed integer", + [i64, i128], + [i64, i128], + [I64, I128], + [I64, I128] +); +define_type!( + An, + I64, + i64, + 64, + 8, + read_i64, + write_i64, + "signed integer", + [i128], + [i128], + [I128], + [I128] +); +define_type!(An, I128, i128, 128, 16, read_i128, write_i128, "signed integer", [], [], [], []); +define_type!( + An, + F32, + f32, + 32, + 4, + read_f32, + write_f32, + "floating point number", + [f64], + [], + [F64], + [] +); +define_type!(An, F64, f64, 64, 8, read_f64, write_f64, "floating point number", [], [], [], []); + +macro_rules! module { + ($name:ident, $trait:ident, $endianness_str:expr) => { + /// Numeric primitives stored in + #[doc = $endianness_str] + /// byte order. + pub mod $name { + use byteorder::$trait; + + module!(@ty U16, $trait, "16-bit unsigned integer", $endianness_str); + module!(@ty U32, $trait, "32-bit unsigned integer", $endianness_str); + module!(@ty U64, $trait, "64-bit unsigned integer", $endianness_str); + module!(@ty U128, $trait, "128-bit unsigned integer", $endianness_str); + module!(@ty I16, $trait, "16-bit signed integer", $endianness_str); + module!(@ty I32, $trait, "32-bit signed integer", $endianness_str); + module!(@ty I64, $trait, "64-bit signed integer", $endianness_str); + module!(@ty I128, $trait, "128-bit signed integer", $endianness_str); + module!(@ty F32, $trait, "32-bit floating point number", $endianness_str); + module!(@ty F64, $trait, "64-bit floating point number", $endianness_str); + } + }; + (@ty $ty:ident, $trait:ident, $desc_str:expr, $endianness_str:expr) => { + /// A + #[doc = $desc_str] + /// stored in + #[doc = $endianness_str] + /// byte order. + pub type $ty = crate::byteorder::$ty<$trait>; + }; +} + +module!(big_endian, BigEndian, "big-endian"); +module!(little_endian, LittleEndian, "little-endian"); +module!(network_endian, NetworkEndian, "network-endian"); +module!(native_endian, NativeEndian, "native-endian"); + +#[cfg(any(test, kani))] +mod tests { + use ::byteorder::NativeEndian; + + use { + super::*, + crate::{AsBytes, FromBytes, Unaligned}, + }; + + #[cfg(not(kani))] + mod compatibility { + pub(super) use rand::{ + distributions::{Distribution, Standard}, + rngs::SmallRng, + Rng, SeedableRng, + }; + + pub(crate) trait Arbitrary {} + + impl Arbitrary for T {} + } + + #[cfg(kani)] + mod compatibility { + pub(crate) use kani::Arbitrary; + + pub(crate) struct SmallRng; + + impl SmallRng { + pub(crate) fn seed_from_u64(_state: u64) -> Self { + Self + } + } + + pub(crate) trait Rng { + fn sample>(&mut self, _distr: D) -> T + where + T: Arbitrary, + { + kani::any() + } + } + + impl Rng for SmallRng {} + + pub(crate) trait Distribution {} + impl Distribution for U {} + + pub(crate) struct Standard; + } + + use compatibility::*; + + // A native integer type (u16, i32, etc). + #[cfg_attr(kani, allow(dead_code))] + trait Native: Arbitrary + FromBytes + AsBytes + Copy + PartialEq + Debug { + const ZERO: Self; + const MAX_VALUE: Self; + + type Distribution: Distribution; + const DIST: Self::Distribution; + + fn rand(rng: &mut R) -> Self { + rng.sample(Self::DIST) + } + + fn checked_add(self, rhs: Self) -> Option; + fn checked_div(self, rhs: Self) -> Option; + fn checked_mul(self, rhs: Self) -> Option; + fn checked_rem(self, rhs: Self) -> Option; + fn checked_sub(self, rhs: Self) -> Option; + fn checked_shl(self, rhs: Self) -> Option; + fn checked_shr(self, rhs: Self) -> Option; + + fn is_nan(self) -> bool; + + /// For `f32` and `f64`, NaN values are not considered equal to + /// themselves. This method is like `assert_eq!`, but it treats NaN + /// values as equal. + fn assert_eq_or_nan(self, other: Self) { + let slf = (!self.is_nan()).then(|| self); + let other = (!other.is_nan()).then(|| other); + assert_eq!(slf, other); + } + } + + trait ByteArray: + FromBytes + AsBytes + Copy + AsRef<[u8]> + AsMut<[u8]> + Debug + Default + Eq + { + /// Invert the order of the bytes in the array. + fn invert(self) -> Self; + } + + trait ByteOrderType: FromBytes + AsBytes + Unaligned + Copy + Eq + Debug { + type Native: Native; + type ByteArray: ByteArray; + + const ZERO: Self; + + fn new(native: Self::Native) -> Self; + fn get(self) -> Self::Native; + fn set(&mut self, native: Self::Native); + fn from_bytes(bytes: Self::ByteArray) -> Self; + fn into_bytes(self) -> Self::ByteArray; + + /// For `f32` and `f64`, NaN values are not considered equal to + /// themselves. This method is like `assert_eq!`, but it treats NaN + /// values as equal. + fn assert_eq_or_nan(self, other: Self) { + let slf = (!self.get().is_nan()).then(|| self); + let other = (!other.get().is_nan()).then(|| other); + assert_eq!(slf, other); + } + } + + trait ByteOrderTypeUnsigned: ByteOrderType { + const MAX_VALUE: Self; + } + + macro_rules! impl_byte_array { + ($bytes:expr) => { + impl ByteArray for [u8; $bytes] { + fn invert(mut self) -> [u8; $bytes] { + self.reverse(); + self + } + } + }; + } + + impl_byte_array!(2); + impl_byte_array!(4); + impl_byte_array!(8); + impl_byte_array!(16); + + macro_rules! impl_byte_order_type_unsigned { + ($name:ident, unsigned) => { + impl ByteOrderTypeUnsigned for $name { + const MAX_VALUE: $name = $name::MAX_VALUE; + } + }; + ($name:ident, signed) => {}; + } + + macro_rules! impl_traits { + ($name:ident, $native:ident, $bytes:expr, $sign:ident $(, @$float:ident)?) => { + impl Native for $native { + // For some types, `0 as $native` is required (for example, when + // `$native` is a floating-point type; `0` is an integer), but + // for other types, it's a trivial cast. In all cases, Clippy + // thinks it's dangerous. + #[allow(trivial_numeric_casts, clippy::as_conversions)] + const ZERO: $native = 0 as $native; + const MAX_VALUE: $native = $native::MAX; + + type Distribution = Standard; + const DIST: Standard = Standard; + + impl_traits!(@float_dependent_methods $(@$float)?); + } + + impl ByteOrderType for $name { + type Native = $native; + type ByteArray = [u8; $bytes]; + + const ZERO: $name = $name::ZERO; + + fn new(native: $native) -> $name { + $name::new(native) + } + + fn get(self) -> $native { + $name::get(self) + } + + fn set(&mut self, native: $native) { + $name::set(self, native) + } + + fn from_bytes(bytes: [u8; $bytes]) -> $name { + $name::from(bytes) + } + + fn into_bytes(self) -> [u8; $bytes] { + <[u8; $bytes]>::from(self) + } + } + + impl_byte_order_type_unsigned!($name, $sign); + }; + (@float_dependent_methods) => { + fn checked_add(self, rhs: Self) -> Option { self.checked_add(rhs) } + fn checked_div(self, rhs: Self) -> Option { self.checked_div(rhs) } + fn checked_mul(self, rhs: Self) -> Option { self.checked_mul(rhs) } + fn checked_rem(self, rhs: Self) -> Option { self.checked_rem(rhs) } + fn checked_sub(self, rhs: Self) -> Option { self.checked_sub(rhs) } + fn checked_shl(self, rhs: Self) -> Option { self.checked_shl(rhs.try_into().unwrap_or(u32::MAX)) } + fn checked_shr(self, rhs: Self) -> Option { self.checked_shr(rhs.try_into().unwrap_or(u32::MAX)) } + fn is_nan(self) -> bool { false } + }; + (@float_dependent_methods @float) => { + fn checked_add(self, rhs: Self) -> Option { Some(self + rhs) } + fn checked_div(self, rhs: Self) -> Option { Some(self / rhs) } + fn checked_mul(self, rhs: Self) -> Option { Some(self * rhs) } + fn checked_rem(self, rhs: Self) -> Option { Some(self % rhs) } + fn checked_sub(self, rhs: Self) -> Option { Some(self - rhs) } + fn checked_shl(self, _rhs: Self) -> Option { unimplemented!() } + fn checked_shr(self, _rhs: Self) -> Option { unimplemented!() } + fn is_nan(self) -> bool { self.is_nan() } + }; + } + + impl_traits!(U16, u16, 2, unsigned); + impl_traits!(U32, u32, 4, unsigned); + impl_traits!(U64, u64, 8, unsigned); + impl_traits!(U128, u128, 16, unsigned); + impl_traits!(I16, i16, 2, signed); + impl_traits!(I32, i32, 4, signed); + impl_traits!(I64, i64, 8, signed); + impl_traits!(I128, i128, 16, signed); + impl_traits!(F32, f32, 4, signed, @float); + impl_traits!(F64, f64, 8, signed, @float); + + macro_rules! call_for_unsigned_types { + ($fn:ident, $byteorder:ident) => { + $fn::>(); + $fn::>(); + $fn::>(); + $fn::>(); + }; + } + + macro_rules! call_for_signed_types { + ($fn:ident, $byteorder:ident) => { + $fn::>(); + $fn::>(); + $fn::>(); + $fn::>(); + }; + } + + macro_rules! call_for_float_types { + ($fn:ident, $byteorder:ident) => { + $fn::>(); + $fn::>(); + }; + } + + macro_rules! call_for_all_types { + ($fn:ident, $byteorder:ident) => { + call_for_unsigned_types!($fn, $byteorder); + call_for_signed_types!($fn, $byteorder); + call_for_float_types!($fn, $byteorder); + }; + } + + #[cfg(target_endian = "big")] + type NonNativeEndian = LittleEndian; + #[cfg(target_endian = "little")] + type NonNativeEndian = BigEndian; + + // We use a `u64` seed so that we can use `SeedableRng::seed_from_u64`. + // `SmallRng`'s `SeedableRng::Seed` differs by platform, so if we wanted to + // call `SeedableRng::from_seed`, which takes a `Seed`, we would need + // conditional compilation by `target_pointer_width`. + const RNG_SEED: u64 = 0x7A03CAE2F32B5B8F; + + const RAND_ITERS: usize = if cfg!(any(miri, kani)) { + // The tests below which use this constant used to take a very long time + // on Miri, which slows down local development and CI jobs. We're not + // using Miri to check for the correctness of our code, but rather its + // soundness, and at least in the context of these particular tests, a + // single loop iteration is just as good for surfacing UB as multiple + // iterations are. + // + // As of the writing of this comment, here's one set of measurements: + // + // $ # RAND_ITERS == 1 + // $ cargo miri test -- -Z unstable-options --report-time endian + // test byteorder::tests::test_native_endian ... ok <0.049s> + // test byteorder::tests::test_non_native_endian ... ok <0.061s> + // + // $ # RAND_ITERS == 1024 + // $ cargo miri test -- -Z unstable-options --report-time endian + // test byteorder::tests::test_native_endian ... ok <25.716s> + // test byteorder::tests::test_non_native_endian ... ok <38.127s> + 1 + } else { + 1024 + }; + + #[cfg_attr(test, test)] + #[cfg_attr(kani, kani::proof)] + fn test_zero() { + fn test_zero() { + assert_eq!(T::ZERO.get(), T::Native::ZERO); + } + + call_for_all_types!(test_zero, NativeEndian); + call_for_all_types!(test_zero, NonNativeEndian); + } + + #[cfg_attr(test, test)] + #[cfg_attr(kani, kani::proof)] + fn test_max_value() { + fn test_max_value() { + assert_eq!(T::MAX_VALUE.get(), T::Native::MAX_VALUE); + } + + call_for_unsigned_types!(test_max_value, NativeEndian); + call_for_unsigned_types!(test_max_value, NonNativeEndian); + } + + #[cfg_attr(test, test)] + #[cfg_attr(kani, kani::proof)] + fn test_endian() { + fn test(invert: bool) { + let mut r = SmallRng::seed_from_u64(RNG_SEED); + for _ in 0..RAND_ITERS { + let native = T::Native::rand(&mut r); + let mut bytes = T::ByteArray::default(); + bytes.as_bytes_mut().copy_from_slice(native.as_bytes()); + if invert { + bytes = bytes.invert(); + } + let mut from_native = T::new(native); + let from_bytes = T::from_bytes(bytes); + + from_native.assert_eq_or_nan(from_bytes); + from_native.get().assert_eq_or_nan(native); + from_bytes.get().assert_eq_or_nan(native); + + assert_eq!(from_native.into_bytes(), bytes); + assert_eq!(from_bytes.into_bytes(), bytes); + + let updated = T::Native::rand(&mut r); + from_native.set(updated); + from_native.get().assert_eq_or_nan(updated); + } + } + + fn test_native() { + test::(false); + } + + fn test_non_native() { + test::(true); + } + + call_for_all_types!(test_native, NativeEndian); + call_for_all_types!(test_non_native, NonNativeEndian); + } + + #[test] + fn test_ops_impls() { + // Test implementations of traits in `core::ops`. Some of these are + // fairly banal, but some are optimized to perform the operation without + // swapping byte order (namely, bit-wise operations which are identical + // regardless of byte order). These are important to test, and while + // we're testing those anyway, it's trivial to test all of the impls. + + fn test(op: F, op_native: G, op_native_checked: Option) + where + T: ByteOrderType, + F: Fn(T, T) -> T, + G: Fn(T::Native, T::Native) -> T::Native, + H: Fn(T::Native, T::Native) -> Option, + { + let mut r = SmallRng::seed_from_u64(RNG_SEED); + for _ in 0..RAND_ITERS { + let n0 = T::Native::rand(&mut r); + let n1 = T::Native::rand(&mut r); + let t0 = T::new(n0); + let t1 = T::new(n1); + + // If this operation would overflow/underflow, skip it rather + // than attempt to catch and recover from panics. + if matches!(&op_native_checked, Some(checked) if checked(n0, n1).is_none()) { + continue; + } + + let n_res = op_native(n0, n1); + let t_res = op(t0, t1); + + // For `f32` and `f64`, NaN values are not considered equal to + // themselves. We store `Option`/`Option` and store + // NaN as `None` so they can still be compared. + let n_res = (!T::Native::is_nan(n_res)).then(|| n_res); + let t_res = (!T::Native::is_nan(t_res.get())).then(|| t_res.get()); + assert_eq!(n_res, t_res); + } + } + + macro_rules! test { + (@binary $trait:ident, $method:ident $([$checked_method:ident])?, $($call_for_macros:ident),*) => {{ + test!( + @inner $trait, + core::ops::$trait::$method, + core::ops::$trait::$method, + { + #[allow(unused_mut, unused_assignments)] + let mut op_native_checked = None:: Option>; + $( + op_native_checked = Some(T::Native::$checked_method); + )? + op_native_checked + }, + $($call_for_macros),* + ); + }}; + (@unary $trait:ident, $method:ident $([$checked_method:ident])?, $($call_for_macros:ident),*) => {{ + test!( + @inner $trait, + |slf, _rhs| core::ops::$trait::$method(slf), + |slf, _rhs| core::ops::$trait::$method(slf), + { + #[allow(unused_mut, unused_assignments)] + let mut op_native_checked = None:: Option>; + $( + op_native_checked = Some(|slf, _rhs| T::Native::$checked_method(slf)); + )? + op_native_checked + }, + $($call_for_macros),* + ); + }}; + (@inner $trait:ident, $op:expr, $op_native:expr, $op_native_checked:expr, $($call_for_macros:ident),*) => {{ + fn t>() + where + T::Native: core::ops::$trait, + { + test::( + $op, + $op_native, + $op_native_checked, + ); + } + + $( + $call_for_macros!(t, NativeEndian); + $call_for_macros!(t, NonNativeEndian); + )* + }}; + } + + test!(@binary Add, add[checked_add], call_for_all_types); + test!(@binary Div, div[checked_div], call_for_all_types); + test!(@binary Mul, mul[checked_mul], call_for_all_types); + test!(@binary Rem, rem[checked_rem], call_for_all_types); + test!(@binary Sub, sub[checked_sub], call_for_all_types); + + test!(@binary BitAnd, bitand, call_for_unsigned_types, call_for_signed_types); + test!(@binary BitOr, bitor, call_for_unsigned_types, call_for_signed_types); + test!(@binary BitXor, bitxor, call_for_unsigned_types, call_for_signed_types); + test!(@binary Shl, shl[checked_shl], call_for_unsigned_types, call_for_signed_types); + test!(@binary Shr, shr[checked_shr], call_for_unsigned_types, call_for_signed_types); + + test!(@unary Not, not, call_for_signed_types, call_for_unsigned_types); + test!(@unary Neg, neg, call_for_signed_types, call_for_float_types); + } + + #[test] + fn test_debug_impl() { + // Ensure that Debug applies format options to the inner value. + let val = U16::::new(10); + assert_eq!(format!("{:?}", val), "U16(10)"); + assert_eq!(format!("{:03?}", val), "U16(010)"); + assert_eq!(format!("{:x?}", val), "U16(a)"); + } +} diff --git a/src/rust/vendor/zerocopy/src/lib.rs b/src/rust/vendor/zerocopy/src/lib.rs new file mode 100644 index 000000000..c221de201 --- /dev/null +++ b/src/rust/vendor/zerocopy/src/lib.rs @@ -0,0 +1,8284 @@ +// Copyright 2018 The Fuchsia Authors +// +// Licensed under the 2-Clause BSD License , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +// After updating the following doc comment, make sure to run the following +// command to update `README.md` based on its contents: +// +// ./generate-readme.sh > README.md + +//! *Want to help improve zerocopy? +//! Fill out our [user survey][user-survey]!* +//! +//! ***Fast, safe, compile error. Pick two.*** +//! +//! Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe` +//! so you don't have to. +//! +//! # Overview +//! +//! Zerocopy provides four core marker traits, each of which can be derived +//! (e.g., `#[derive(FromZeroes)]`): +//! - [`FromZeroes`] indicates that a sequence of zero bytes represents a valid +//! instance of a type +//! - [`FromBytes`] indicates that a type may safely be converted from an +//! arbitrary byte sequence +//! - [`AsBytes`] indicates that a type may safely be converted *to* a byte +//! sequence +//! - [`Unaligned`] indicates that a type's alignment requirement is 1 +//! +//! Types which implement a subset of these traits can then be converted to/from +//! byte sequences with little to no runtime overhead. +//! +//! Zerocopy also provides byte-order aware integer types that support these +//! conversions; see the [`byteorder`] module. These types are especially useful +//! for network parsing. +//! +//! [user-survey]: https://docs.google.com/forms/d/e/1FAIpQLSdzBNTN9tzwsmtyZxRFNL02K36IWCdHWW2ZBckyQS2xiO3i8Q/viewform?usp=published_options +//! +//! # Cargo Features +//! +//! - **`alloc`** +//! By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled, +//! the `alloc` crate is added as a dependency, and some allocation-related +//! functionality is added. +//! +//! - **`byteorder`** (enabled by default) +//! Adds the [`byteorder`] module and a dependency on the `byteorder` crate. +//! The `byteorder` module provides byte order-aware equivalents of the +//! multi-byte primitive numerical types. Unlike their primitive equivalents, +//! the types in this module have no alignment requirement and support byte +//! order conversions. This can be useful in handling file formats, network +//! packet layouts, etc which don't provide alignment guarantees and which may +//! use a byte order different from that of the execution platform. +//! +//! - **`derive`** +//! Provides derives for the core marker traits via the `zerocopy-derive` +//! crate. These derives are re-exported from `zerocopy`, so it is not +//! necessary to depend on `zerocopy-derive` directly. +//! +//! However, you may experience better compile times if you instead directly +//! depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`, +//! since doing so will allow Rust to compile these crates in parallel. To do +//! so, do *not* enable the `derive` feature, and list both dependencies in +//! your `Cargo.toml` with the same leading non-zero version number; e.g: +//! +//! ```toml +//! [dependencies] +//! zerocopy = "0.X" +//! zerocopy-derive = "0.X" +//! ``` +//! +//! - **`simd`** +//! When the `simd` feature is enabled, `FromZeroes`, `FromBytes`, and +//! `AsBytes` impls are emitted for all stable SIMD types which exist on the +//! target platform. Note that the layout of SIMD types is not yet stabilized, +//! so these impls may be removed in the future if layout changes make them +//! invalid. For more information, see the Unsafe Code Guidelines Reference +//! page on the [layout of packed SIMD vectors][simd-layout]. +//! +//! - **`simd-nightly`** +//! Enables the `simd` feature and adds support for SIMD types which are only +//! available on nightly. Since these types are unstable, support for any type +//! may be removed at any point in the future. +//! +//! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html +//! +//! # Security Ethos +//! +//! Zerocopy is expressly designed for use in security-critical contexts. We +//! strive to ensure that that zerocopy code is sound under Rust's current +//! memory model, and *any future memory model*. We ensure this by: +//! - **...not 'guessing' about Rust's semantics.** +//! We annotate `unsafe` code with a precise rationale for its soundness that +//! cites a relevant section of Rust's official documentation. When Rust's +//! documented semantics are unclear, we work with the Rust Operational +//! Semantics Team to clarify Rust's documentation. +//! - **...rigorously testing our implementation.** +//! We run tests using [Miri], ensuring that zerocopy is sound across a wide +//! array of supported target platforms of varying endianness and pointer +//! width, and across both current and experimental memory models of Rust. +//! - **...formally proving the correctness of our implementation.** +//! We apply formal verification tools like [Kani][kani] to prove zerocopy's +//! correctness. +//! +//! For more information, see our full [soundness policy]. +//! +//! [Miri]: https://github.com/rust-lang/miri +//! [Kani]: https://github.com/model-checking/kani +//! [soundness policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#soundness +//! +//! # Relationship to Project Safe Transmute +//! +//! [Project Safe Transmute] is an official initiative of the Rust Project to +//! develop language-level support for safer transmutation. The Project consults +//! with crates like zerocopy to identify aspects of safer transmutation that +//! would benefit from compiler support, and has developed an [experimental, +//! compiler-supported analysis][mcp-transmutability] which determines whether, +//! for a given type, any value of that type may be soundly transmuted into +//! another type. Once this functionality is sufficiently mature, zerocopy +//! intends to replace its internal transmutability analysis (implemented by our +//! custom derives) with the compiler-supported one. This change will likely be +//! an implementation detail that is invisible to zerocopy's users. +//! +//! Project Safe Transmute will not replace the need for most of zerocopy's +//! higher-level abstractions. The experimental compiler analysis is a tool for +//! checking the soundness of `unsafe` code, not a tool to avoid writing +//! `unsafe` code altogether. For the foreseeable future, crates like zerocopy +//! will still be required in order to provide higher-level abstractions on top +//! of the building block provided by Project Safe Transmute. +//! +//! [Project Safe Transmute]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html +//! [mcp-transmutability]: https://github.com/rust-lang/compiler-team/issues/411 +//! +//! # MSRV +//! +//! See our [MSRV policy]. +//! +//! [MSRV policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#msrv +//! +//! # Changelog +//! +//! Zerocopy uses [GitHub Releases]. +//! +//! [GitHub Releases]: https://github.com/google/zerocopy/releases + +// Sometimes we want to use lints which were added after our MSRV. +// `unknown_lints` is `warn` by default and we deny warnings in CI, so without +// this attribute, any unknown lint would cause a CI failure when testing with +// our MSRV. +// +// TODO(#1201): Remove `unexpected_cfgs` +#![allow(unknown_lints, non_local_definitions, unexpected_cfgs)] +#![deny(renamed_and_removed_lints)] +#![deny( + anonymous_parameters, + deprecated_in_future, + late_bound_lifetime_arguments, + missing_copy_implementations, + missing_debug_implementations, + missing_docs, + path_statements, + patterns_in_fns_without_body, + rust_2018_idioms, + trivial_numeric_casts, + unreachable_pub, + unsafe_op_in_unsafe_fn, + unused_extern_crates, + // We intentionally choose not to deny `unused_qualifications`. When items + // are added to the prelude (e.g., `core::mem::size_of`), this has the + // consequence of making some uses trigger this lint on the latest toolchain + // (e.g., `mem::size_of`), but fixing it (e.g. by replacing with `size_of`) + // does not work on older toolchains. + // + // We tested a more complicated fix in #1413, but ultimately decided that, + // since this lint is just a minor style lint, the complexity isn't worth it + // - it's fine to occasionally have unused qualifications slip through, + // especially since these do not affect our user-facing API in any way. + variant_size_differences +)] +#![cfg_attr( + __INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS, + deny(fuzzy_provenance_casts, lossy_provenance_casts) +)] +#![deny( + clippy::all, + clippy::alloc_instead_of_core, + clippy::arithmetic_side_effects, + clippy::as_underscore, + clippy::assertions_on_result_states, + clippy::as_conversions, + clippy::correctness, + clippy::dbg_macro, + clippy::decimal_literal_representation, + clippy::get_unwrap, + clippy::indexing_slicing, + clippy::missing_inline_in_public_items, + clippy::missing_safety_doc, + clippy::obfuscated_if_else, + clippy::perf, + clippy::print_stdout, + clippy::std_instead_of_core, + clippy::style, + clippy::suspicious, + clippy::todo, + clippy::undocumented_unsafe_blocks, + clippy::unimplemented, + clippy::unnested_or_patterns, + clippy::unwrap_used, + clippy::use_debug +)] +#![deny( + rustdoc::bare_urls, + rustdoc::broken_intra_doc_links, + rustdoc::invalid_codeblock_attributes, + rustdoc::invalid_html_tags, + rustdoc::invalid_rust_codeblocks, + rustdoc::missing_crate_level_docs, + rustdoc::private_intra_doc_links +)] +// In test code, it makes sense to weight more heavily towards concise, readable +// code over correct or debuggable code. +#![cfg_attr(any(test, kani), allow( + // In tests, you get line numbers and have access to source code, so panic + // messages are less important. You also often unwrap a lot, which would + // make expect'ing instead very verbose. + clippy::unwrap_used, + // In tests, there's no harm to "panic risks" - the worst that can happen is + // that your test will fail, and you'll fix it. By contrast, panic risks in + // production code introduce the possibly of code panicking unexpectedly "in + // the field". + clippy::arithmetic_side_effects, + clippy::indexing_slicing, +))] +#![cfg_attr(not(test), no_std)] +#![cfg_attr( + all(feature = "simd-nightly", any(target_arch = "x86", target_arch = "x86_64")), + feature(stdarch_x86_avx512) +)] +#![cfg_attr( + all(feature = "simd-nightly", target_arch = "arm"), + feature(stdarch_arm_dsp, stdarch_arm_neon_intrinsics) +)] +#![cfg_attr( + all(feature = "simd-nightly", any(target_arch = "powerpc", target_arch = "powerpc64")), + feature(stdarch_powerpc) +)] +#![cfg_attr(doc_cfg, feature(doc_cfg))] +#![cfg_attr( + __INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS, + feature(layout_for_ptr, strict_provenance) +)] + +// This is a hack to allow zerocopy-derive derives to work in this crate. They +// assume that zerocopy is linked as an extern crate, so they access items from +// it as `zerocopy::Xxx`. This makes that still work. +#[cfg(any(feature = "derive", test))] +extern crate self as zerocopy; + +#[macro_use] +mod macros; + +#[cfg(feature = "byteorder")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "byteorder")))] +pub mod byteorder; +#[doc(hidden)] +pub mod macro_util; +mod post_monomorphization_compile_fail_tests; +mod util; +// TODO(#252): If we make this pub, come up with a better name. +mod wrappers; + +#[cfg(feature = "byteorder")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "byteorder")))] +pub use crate::byteorder::*; +pub use crate::wrappers::*; + +#[cfg(any(feature = "derive", test))] +#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] +pub use zerocopy_derive::Unaligned; + +// `pub use` separately here so that we can mark it `#[doc(hidden)]`. +// +// TODO(#29): Remove this or add a doc comment. +#[cfg(any(feature = "derive", test))] +#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] +#[doc(hidden)] +pub use zerocopy_derive::KnownLayout; + +use core::{ + cell::{self, RefMut}, + cmp::Ordering, + fmt::{self, Debug, Display, Formatter}, + hash::Hasher, + marker::PhantomData, + mem::{self, ManuallyDrop, MaybeUninit}, + num::{ + NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128, + NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping, + }, + ops::{Deref, DerefMut}, + ptr::{self, NonNull}, + slice, +}; + +#[cfg(feature = "alloc")] +extern crate alloc; +#[cfg(feature = "alloc")] +use alloc::{boxed::Box, vec::Vec}; + +#[cfg(any(feature = "alloc", kani))] +use core::alloc::Layout; + +// Used by `TryFromBytes::is_bit_valid`. +#[doc(hidden)] +pub use crate::util::ptr::Ptr; + +// For each polyfill, as soon as the corresponding feature is stable, the +// polyfill import will be unused because method/function resolution will prefer +// the inherent method/function over a trait method/function. Thus, we suppress +// the `unused_imports` warning. +// +// See the documentation on `util::polyfills` for more information. +#[allow(unused_imports)] +use crate::util::polyfills::NonNullExt as _; + +#[rustversion::nightly] +#[cfg(all(test, not(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)))] +const _: () = { + #[deprecated = "some tests may be skipped due to missing RUSTFLAGS=\"--cfg __INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS\""] + const _WARNING: () = (); + #[warn(deprecated)] + _WARNING +}; + +/// The target pointer width, counted in bits. +const POINTER_WIDTH_BITS: usize = mem::size_of::() * 8; + +/// The layout of a type which might be dynamically-sized. +/// +/// `DstLayout` describes the layout of sized types, slice types, and "slice +/// DSTs" - ie, those that are known by the type system to have a trailing slice +/// (as distinguished from `dyn Trait` types - such types *might* have a +/// trailing slice type, but the type system isn't aware of it). +/// +/// # Safety +/// +/// Unlike [`core::alloc::Layout`], `DstLayout` is only used to describe full +/// Rust types - ie, those that satisfy the layout requirements outlined by +/// [the reference]. Callers may assume that an instance of `DstLayout` +/// satisfies any conditions imposed on Rust types by the reference. +/// +/// If `layout: DstLayout` describes a type, `T`, then it is guaranteed that: +/// - `layout.align` is equal to `T`'s alignment +/// - If `layout.size_info` is `SizeInfo::Sized { size }`, then `T: Sized` and +/// `size_of::() == size` +/// - If `layout.size_info` is `SizeInfo::SliceDst(slice_layout)`, then +/// - `T` is a slice DST +/// - The `size` of an instance of `T` with `elems` trailing slice elements is +/// equal to `slice_layout.offset + slice_layout.elem_size * elems` rounded up +/// to the nearest multiple of `layout.align`. Any bytes in the range +/// `[slice_layout.offset + slice_layout.elem_size * elems, size)` are padding +/// and must not be assumed to be initialized. +/// +/// [the reference]: https://doc.rust-lang.org/reference/type-layout.html +#[doc(hidden)] +#[allow(missing_debug_implementations, missing_copy_implementations)] +#[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))] +pub struct DstLayout { + align: NonZeroUsize, + size_info: SizeInfo, +} + +#[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))] +enum SizeInfo { + Sized { _size: usize }, + SliceDst(TrailingSliceLayout), +} + +#[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))] +struct TrailingSliceLayout { + // The offset of the first byte of the trailing slice field. Note that this + // is NOT the same as the minimum size of the type. For example, consider + // the following type: + // + // struct Foo { + // a: u16, + // b: u8, + // c: [u8], + // } + // + // In `Foo`, `c` is at byte offset 3. When `c.len() == 0`, `c` is followed + // by a padding byte. + _offset: usize, + // The size of the element type of the trailing slice field. + _elem_size: E, +} + +impl SizeInfo { + /// Attempts to create a `SizeInfo` from `Self` in which `elem_size` is a + /// `NonZeroUsize`. If `elem_size` is 0, returns `None`. + #[allow(unused)] + const fn try_to_nonzero_elem_size(&self) -> Option> { + Some(match *self { + SizeInfo::Sized { _size } => SizeInfo::Sized { _size }, + SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size }) => { + if let Some(_elem_size) = NonZeroUsize::new(_elem_size) { + SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size }) + } else { + return None; + } + } + }) + } +} + +#[doc(hidden)] +#[derive(Copy, Clone)] +#[cfg_attr(test, derive(Debug))] +#[allow(missing_debug_implementations)] +pub enum _CastType { + _Prefix, + _Suffix, +} + +impl DstLayout { + /// The minimum possible alignment of a type. + const MIN_ALIGN: NonZeroUsize = match NonZeroUsize::new(1) { + Some(min_align) => min_align, + None => unreachable!(), + }; + + /// The maximum theoretic possible alignment of a type. + /// + /// For compatibility with future Rust versions, this is defined as the + /// maximum power-of-two that fits into a `usize`. See also + /// [`DstLayout::CURRENT_MAX_ALIGN`]. + const THEORETICAL_MAX_ALIGN: NonZeroUsize = + match NonZeroUsize::new(1 << (POINTER_WIDTH_BITS - 1)) { + Some(max_align) => max_align, + None => unreachable!(), + }; + + /// The current, documented max alignment of a type \[1\]. + /// + /// \[1\] Per : + /// + /// The alignment value must be a power of two from 1 up to + /// 229. + #[cfg(not(kani))] + const CURRENT_MAX_ALIGN: NonZeroUsize = match NonZeroUsize::new(1 << 28) { + Some(max_align) => max_align, + None => unreachable!(), + }; + + /// Constructs a `DstLayout` for a zero-sized type with `repr_align` + /// alignment (or 1). If `repr_align` is provided, then it must be a power + /// of two. + /// + /// # Panics + /// + /// This function panics if the supplied `repr_align` is not a power of two. + /// + /// # Safety + /// + /// Unsafe code may assume that the contract of this function is satisfied. + #[doc(hidden)] + #[inline] + pub const fn new_zst(repr_align: Option) -> DstLayout { + let align = match repr_align { + Some(align) => align, + None => Self::MIN_ALIGN, + }; + + assert!(align.is_power_of_two()); + + DstLayout { align, size_info: SizeInfo::Sized { _size: 0 } } + } + + /// Constructs a `DstLayout` which describes `T`. + /// + /// # Safety + /// + /// Unsafe code may assume that `DstLayout` is the correct layout for `T`. + #[doc(hidden)] + #[inline] + pub const fn for_type() -> DstLayout { + // SAFETY: `align` is correct by construction. `T: Sized`, and so it is + // sound to initialize `size_info` to `SizeInfo::Sized { size }`; the + // `size` field is also correct by construction. + DstLayout { + align: match NonZeroUsize::new(mem::align_of::()) { + Some(align) => align, + None => unreachable!(), + }, + size_info: SizeInfo::Sized { _size: mem::size_of::() }, + } + } + + /// Constructs a `DstLayout` which describes `[T]`. + /// + /// # Safety + /// + /// Unsafe code may assume that `DstLayout` is the correct layout for `[T]`. + const fn for_slice() -> DstLayout { + // SAFETY: The alignment of a slice is equal to the alignment of its + // element type, and so `align` is initialized correctly. + // + // Since this is just a slice type, there is no offset between the + // beginning of the type and the beginning of the slice, so it is + // correct to set `offset: 0`. The `elem_size` is correct by + // construction. Since `[T]` is a (degenerate case of a) slice DST, it + // is correct to initialize `size_info` to `SizeInfo::SliceDst`. + DstLayout { + align: match NonZeroUsize::new(mem::align_of::()) { + Some(align) => align, + None => unreachable!(), + }, + size_info: SizeInfo::SliceDst(TrailingSliceLayout { + _offset: 0, + _elem_size: mem::size_of::(), + }), + } + } + + /// Like `Layout::extend`, this creates a layout that describes a record + /// whose layout consists of `self` followed by `next` that includes the + /// necessary inter-field padding, but not any trailing padding. + /// + /// In order to match the layout of a `#[repr(C)]` struct, this method + /// should be invoked for each field in declaration order. To add trailing + /// padding, call `DstLayout::pad_to_align` after extending the layout for + /// all fields. If `self` corresponds to a type marked with + /// `repr(packed(N))`, then `repr_packed` should be set to `Some(N)`, + /// otherwise `None`. + /// + /// This method cannot be used to match the layout of a record with the + /// default representation, as that representation is mostly unspecified. + /// + /// # Safety + /// + /// If a (potentially hypothetical) valid `repr(C)` Rust type begins with + /// fields whose layout are `self`, and those fields are immediately + /// followed by a field whose layout is `field`, then unsafe code may rely + /// on `self.extend(field, repr_packed)` producing a layout that correctly + /// encompasses those two components. + /// + /// We make no guarantees to the behavior of this method if these fragments + /// cannot appear in a valid Rust type (e.g., the concatenation of the + /// layouts would lead to a size larger than `isize::MAX`). + #[doc(hidden)] + #[inline] + pub const fn extend(self, field: DstLayout, repr_packed: Option) -> Self { + use util::{core_layout::padding_needed_for, max, min}; + + // If `repr_packed` is `None`, there are no alignment constraints, and + // the value can be defaulted to `THEORETICAL_MAX_ALIGN`. + let max_align = match repr_packed { + Some(max_align) => max_align, + None => Self::THEORETICAL_MAX_ALIGN, + }; + + assert!(max_align.is_power_of_two()); + + // We use Kani to prove that this method is robust to future increases + // in Rust's maximum allowed alignment. However, if such a change ever + // actually occurs, we'd like to be notified via assertion failures. + #[cfg(not(kani))] + { + debug_assert!(self.align.get() <= DstLayout::CURRENT_MAX_ALIGN.get()); + debug_assert!(field.align.get() <= DstLayout::CURRENT_MAX_ALIGN.get()); + if let Some(repr_packed) = repr_packed { + debug_assert!(repr_packed.get() <= DstLayout::CURRENT_MAX_ALIGN.get()); + } + } + + // The field's alignment is clamped by `repr_packed` (i.e., the + // `repr(packed(N))` attribute, if any) [1]. + // + // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers: + // + // The alignments of each field, for the purpose of positioning + // fields, is the smaller of the specified alignment and the alignment + // of the field's type. + let field_align = min(field.align, max_align); + + // The struct's alignment is the maximum of its previous alignment and + // `field_align`. + let align = max(self.align, field_align); + + let size_info = match self.size_info { + // If the layout is already a DST, we panic; DSTs cannot be extended + // with additional fields. + SizeInfo::SliceDst(..) => panic!("Cannot extend a DST with additional fields."), + + SizeInfo::Sized { _size: preceding_size } => { + // Compute the minimum amount of inter-field padding needed to + // satisfy the field's alignment, and offset of the trailing + // field. [1] + // + // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers: + // + // Inter-field padding is guaranteed to be the minimum + // required in order to satisfy each field's (possibly + // altered) alignment. + let padding = padding_needed_for(preceding_size, field_align); + + // This will not panic (and is proven to not panic, with Kani) + // if the layout components can correspond to a leading layout + // fragment of a valid Rust type, but may panic otherwise (e.g., + // combining or aligning the components would create a size + // exceeding `isize::MAX`). + let offset = match preceding_size.checked_add(padding) { + Some(offset) => offset, + None => panic!("Adding padding to `self`'s size overflows `usize`."), + }; + + match field.size_info { + SizeInfo::Sized { _size: field_size } => { + // If the trailing field is sized, the resulting layout + // will be sized. Its size will be the sum of the + // preceeding layout, the size of the new field, and the + // size of inter-field padding between the two. + // + // This will not panic (and is proven with Kani to not + // panic) if the layout components can correspond to a + // leading layout fragment of a valid Rust type, but may + // panic otherwise (e.g., combining or aligning the + // components would create a size exceeding + // `usize::MAX`). + let size = match offset.checked_add(field_size) { + Some(size) => size, + None => panic!("`field` cannot be appended without the total size overflowing `usize`"), + }; + SizeInfo::Sized { _size: size } + } + SizeInfo::SliceDst(TrailingSliceLayout { + _offset: trailing_offset, + _elem_size, + }) => { + // If the trailing field is dynamically sized, so too + // will the resulting layout. The offset of the trailing + // slice component is the sum of the offset of the + // trailing field and the trailing slice offset within + // that field. + // + // This will not panic (and is proven with Kani to not + // panic) if the layout components can correspond to a + // leading layout fragment of a valid Rust type, but may + // panic otherwise (e.g., combining or aligning the + // components would create a size exceeding + // `usize::MAX`). + let offset = match offset.checked_add(trailing_offset) { + Some(offset) => offset, + None => panic!("`field` cannot be appended without the total size overflowing `usize`"), + }; + SizeInfo::SliceDst(TrailingSliceLayout { _offset: offset, _elem_size }) + } + } + } + }; + + DstLayout { align, size_info } + } + + /// Like `Layout::pad_to_align`, this routine rounds the size of this layout + /// up to the nearest multiple of this type's alignment or `repr_packed` + /// (whichever is less). This method leaves DST layouts unchanged, since the + /// trailing padding of DSTs is computed at runtime. + /// + /// In order to match the layout of a `#[repr(C)]` struct, this method + /// should be invoked after the invocations of [`DstLayout::extend`]. If + /// `self` corresponds to a type marked with `repr(packed(N))`, then + /// `repr_packed` should be set to `Some(N)`, otherwise `None`. + /// + /// This method cannot be used to match the layout of a record with the + /// default representation, as that representation is mostly unspecified. + /// + /// # Safety + /// + /// If a (potentially hypothetical) valid `repr(C)` type begins with fields + /// whose layout are `self` followed only by zero or more bytes of trailing + /// padding (not included in `self`), then unsafe code may rely on + /// `self.pad_to_align(repr_packed)` producing a layout that correctly + /// encapsulates the layout of that type. + /// + /// We make no guarantees to the behavior of this method if `self` cannot + /// appear in a valid Rust type (e.g., because the addition of trailing + /// padding would lead to a size larger than `isize::MAX`). + #[doc(hidden)] + #[inline] + pub const fn pad_to_align(self) -> Self { + use util::core_layout::padding_needed_for; + + let size_info = match self.size_info { + // For sized layouts, we add the minimum amount of trailing padding + // needed to satisfy alignment. + SizeInfo::Sized { _size: unpadded_size } => { + let padding = padding_needed_for(unpadded_size, self.align); + let size = match unpadded_size.checked_add(padding) { + Some(size) => size, + None => panic!("Adding padding caused size to overflow `usize`."), + }; + SizeInfo::Sized { _size: size } + } + // For DST layouts, trailing padding depends on the length of the + // trailing DST and is computed at runtime. This does not alter the + // offset or element size of the layout, so we leave `size_info` + // unchanged. + size_info @ SizeInfo::SliceDst(_) => size_info, + }; + + DstLayout { align: self.align, size_info } + } + + /// Validates that a cast is sound from a layout perspective. + /// + /// Validates that the size and alignment requirements of a type with the + /// layout described in `self` would not be violated by performing a + /// `cast_type` cast from a pointer with address `addr` which refers to a + /// memory region of size `bytes_len`. + /// + /// If the cast is valid, `validate_cast_and_convert_metadata` returns + /// `(elems, split_at)`. If `self` describes a dynamically-sized type, then + /// `elems` is the maximum number of trailing slice elements for which a + /// cast would be valid (for sized types, `elem` is meaningless and should + /// be ignored). `split_at` is the index at which to split the memory region + /// in order for the prefix (suffix) to contain the result of the cast, and + /// in order for the remaining suffix (prefix) to contain the leftover + /// bytes. + /// + /// There are three conditions under which a cast can fail: + /// - The smallest possible value for the type is larger than the provided + /// memory region + /// - A prefix cast is requested, and `addr` does not satisfy `self`'s + /// alignment requirement + /// - A suffix cast is requested, and `addr + bytes_len` does not satisfy + /// `self`'s alignment requirement (as a consequence, since all instances + /// of the type are a multiple of its alignment, no size for the type will + /// result in a starting address which is properly aligned) + /// + /// # Safety + /// + /// The caller may assume that this implementation is correct, and may rely + /// on that assumption for the soundness of their code. In particular, the + /// caller may assume that, if `validate_cast_and_convert_metadata` returns + /// `Some((elems, split_at))`, then: + /// - A pointer to the type (for dynamically sized types, this includes + /// `elems` as its pointer metadata) describes an object of size `size <= + /// bytes_len` + /// - If this is a prefix cast: + /// - `addr` satisfies `self`'s alignment + /// - `size == split_at` + /// - If this is a suffix cast: + /// - `split_at == bytes_len - size` + /// - `addr + split_at` satisfies `self`'s alignment + /// + /// Note that this method does *not* ensure that a pointer constructed from + /// its return values will be a valid pointer. In particular, this method + /// does not reason about `isize` overflow, which is a requirement of many + /// Rust pointer APIs, and may at some point be determined to be a validity + /// invariant of pointer types themselves. This should never be a problem so + /// long as the arguments to this method are derived from a known-valid + /// pointer (e.g., one derived from a safe Rust reference), but it is + /// nonetheless the caller's responsibility to justify that pointer + /// arithmetic will not overflow based on a safety argument *other than* the + /// mere fact that this method returned successfully. + /// + /// # Panics + /// + /// `validate_cast_and_convert_metadata` will panic if `self` describes a + /// DST whose trailing slice element is zero-sized. + /// + /// If `addr + bytes_len` overflows `usize`, + /// `validate_cast_and_convert_metadata` may panic, or it may return + /// incorrect results. No guarantees are made about when + /// `validate_cast_and_convert_metadata` will panic. The caller should not + /// rely on `validate_cast_and_convert_metadata` panicking in any particular + /// condition, even if `debug_assertions` are enabled. + #[allow(unused)] + const fn validate_cast_and_convert_metadata( + &self, + addr: usize, + bytes_len: usize, + cast_type: _CastType, + ) -> Option<(usize, usize)> { + // `debug_assert!`, but with `#[allow(clippy::arithmetic_side_effects)]`. + macro_rules! __debug_assert { + ($e:expr $(, $msg:expr)?) => { + debug_assert!({ + #[allow(clippy::arithmetic_side_effects)] + let e = $e; + e + } $(, $msg)?); + }; + } + + // Note that, in practice, `self` is always a compile-time constant. We + // do this check earlier than needed to ensure that we always panic as a + // result of bugs in the program (such as calling this function on an + // invalid type) instead of allowing this panic to be hidden if the cast + // would have failed anyway for runtime reasons (such as a too-small + // memory region). + // + // TODO(#67): Once our MSRV is 1.65, use let-else: + // https://blog.rust-lang.org/2022/11/03/Rust-1.65.0.html#let-else-statements + let size_info = match self.size_info.try_to_nonzero_elem_size() { + Some(size_info) => size_info, + None => panic!("attempted to cast to slice type with zero-sized element"), + }; + + // Precondition + __debug_assert!(addr.checked_add(bytes_len).is_some(), "`addr` + `bytes_len` > usize::MAX"); + + // Alignment checks go in their own block to avoid introducing variables + // into the top-level scope. + { + // We check alignment for `addr` (for prefix casts) or `addr + + // bytes_len` (for suffix casts). For a prefix cast, the correctness + // of this check is trivial - `addr` is the address the object will + // live at. + // + // For a suffix cast, we know that all valid sizes for the type are + // a multiple of the alignment (and by safety precondition, we know + // `DstLayout` may only describe valid Rust types). Thus, a + // validly-sized instance which lives at a validly-aligned address + // must also end at a validly-aligned address. Thus, if the end + // address for a suffix cast (`addr + bytes_len`) is not aligned, + // then no valid start address will be aligned either. + let offset = match cast_type { + _CastType::_Prefix => 0, + _CastType::_Suffix => bytes_len, + }; + + // Addition is guaranteed not to overflow because `offset <= + // bytes_len`, and `addr + bytes_len <= usize::MAX` is a + // precondition of this method. Modulus is guaranteed not to divide + // by 0 because `align` is non-zero. + #[allow(clippy::arithmetic_side_effects)] + if (addr + offset) % self.align.get() != 0 { + return None; + } + } + + let (elems, self_bytes) = match size_info { + SizeInfo::Sized { _size: size } => { + if size > bytes_len { + return None; + } + (0, size) + } + SizeInfo::SliceDst(TrailingSliceLayout { _offset: offset, _elem_size: elem_size }) => { + // Calculate the maximum number of bytes that could be consumed + // - any number of bytes larger than this will either not be a + // multiple of the alignment, or will be larger than + // `bytes_len`. + let max_total_bytes = + util::round_down_to_next_multiple_of_alignment(bytes_len, self.align); + // Calculate the maximum number of bytes that could be consumed + // by the trailing slice. + // + // TODO(#67): Once our MSRV is 1.65, use let-else: + // https://blog.rust-lang.org/2022/11/03/Rust-1.65.0.html#let-else-statements + let max_slice_and_padding_bytes = match max_total_bytes.checked_sub(offset) { + Some(max) => max, + // `bytes_len` too small even for 0 trailing slice elements. + None => return None, + }; + + // Calculate the number of elements that fit in + // `max_slice_and_padding_bytes`; any remaining bytes will be + // considered padding. + // + // Guaranteed not to divide by zero: `elem_size` is non-zero. + #[allow(clippy::arithmetic_side_effects)] + let elems = max_slice_and_padding_bytes / elem_size.get(); + // Guaranteed not to overflow on multiplication: `usize::MAX >= + // max_slice_and_padding_bytes >= (max_slice_and_padding_bytes / + // elem_size) * elem_size`. + // + // Guaranteed not to overflow on addition: + // - max_slice_and_padding_bytes == max_total_bytes - offset + // - elems * elem_size <= max_slice_and_padding_bytes == max_total_bytes - offset + // - elems * elem_size + offset <= max_total_bytes <= usize::MAX + #[allow(clippy::arithmetic_side_effects)] + let without_padding = offset + elems * elem_size.get(); + // `self_bytes` is equal to the offset bytes plus the bytes + // consumed by the trailing slice plus any padding bytes + // required to satisfy the alignment. Note that we have computed + // the maximum number of trailing slice elements that could fit + // in `self_bytes`, so any padding is guaranteed to be less than + // the size of an extra element. + // + // Guaranteed not to overflow: + // - By previous comment: without_padding == elems * elem_size + + // offset <= max_total_bytes + // - By construction, `max_total_bytes` is a multiple of + // `self.align`. + // - At most, adding padding needed to round `without_padding` + // up to the next multiple of the alignment will bring + // `self_bytes` up to `max_total_bytes`. + #[allow(clippy::arithmetic_side_effects)] + let self_bytes = without_padding + + util::core_layout::padding_needed_for(without_padding, self.align); + (elems, self_bytes) + } + }; + + __debug_assert!(self_bytes <= bytes_len); + + let split_at = match cast_type { + _CastType::_Prefix => self_bytes, + // Guaranteed not to underflow: + // - In the `Sized` branch, only returns `size` if `size <= + // bytes_len`. + // - In the `SliceDst` branch, calculates `self_bytes <= + // max_toatl_bytes`, which is upper-bounded by `bytes_len`. + #[allow(clippy::arithmetic_side_effects)] + _CastType::_Suffix => bytes_len - self_bytes, + }; + + Some((elems, split_at)) + } +} + +/// A trait which carries information about a type's layout that is used by the +/// internals of this crate. +/// +/// This trait is not meant for consumption by code outside of this crate. While +/// the normal semver stability guarantees apply with respect to which types +/// implement this trait and which trait implementations are implied by this +/// trait, no semver stability guarantees are made regarding its internals; they +/// may change at any time, and code which makes use of them may break. +/// +/// # Safety +/// +/// This trait does not convey any safety guarantees to code outside this crate. +#[doc(hidden)] // TODO: Remove this once KnownLayout is used by other APIs +pub unsafe trait KnownLayout { + // The `Self: Sized` bound makes it so that `KnownLayout` can still be + // object safe. It's not currently object safe thanks to `const LAYOUT`, and + // it likely won't be in the future, but there's no reason not to be + // forwards-compatible with object safety. + #[doc(hidden)] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized; + + #[doc(hidden)] + const LAYOUT: DstLayout; + + /// SAFETY: The returned pointer has the same address and provenance as + /// `bytes`. If `Self` is a DST, the returned pointer's referent has `elems` + /// elements in its trailing slice. If `Self` is sized, `elems` is ignored. + #[doc(hidden)] + fn raw_from_ptr_len(bytes: NonNull, elems: usize) -> NonNull; +} + +// SAFETY: Delegates safety to `DstLayout::for_slice`. +unsafe impl KnownLayout for [T] { + #[allow(clippy::missing_inline_in_public_items)] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized, + { + } + const LAYOUT: DstLayout = DstLayout::for_slice::(); + + // SAFETY: `.cast` preserves address and provenance. The returned pointer + // refers to an object with `elems` elements by construction. + #[inline(always)] + fn raw_from_ptr_len(data: NonNull, elems: usize) -> NonNull { + // TODO(#67): Remove this allow. See NonNullExt for more details. + #[allow(unstable_name_collisions)] + NonNull::slice_from_raw_parts(data.cast::(), elems) + } +} + +#[rustfmt::skip] +impl_known_layout!( + (), + u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64, + bool, char, + NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32, + NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize +); +#[rustfmt::skip] +impl_known_layout!( + T => Option, + T: ?Sized => PhantomData, + T => Wrapping, + T => MaybeUninit, + T: ?Sized => *const T, + T: ?Sized => *mut T, +); +impl_known_layout!(const N: usize, T => [T; N]); + +safety_comment! { + /// SAFETY: + /// `str` and `ManuallyDrop<[T]>` [1] have the same representations as + /// `[u8]` and `[T]` repsectively. `str` has different bit validity than + /// `[u8]`, but that doesn't affect the soundness of this impl. + /// + /// [1] Per https://doc.rust-lang.org/nightly/core/mem/struct.ManuallyDrop.html: + /// + /// `ManuallyDrop` is guaranteed to have the same layout and bit + /// validity as `T` + /// + /// TODO(#429): + /// - Add quotes from docs. + /// - Once [1] (added in + /// https://github.com/rust-lang/rust/pull/115522) is available on stable, + /// quote the stable docs instead of the nightly docs. + unsafe_impl_known_layout!(#[repr([u8])] str); + unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop); +} + +/// Analyzes whether a type is [`FromZeroes`]. +/// +/// This derive analyzes, at compile time, whether the annotated type satisfies +/// the [safety conditions] of `FromZeroes` and implements `FromZeroes` if it is +/// sound to do so. This derive can be applied to structs, enums, and unions; +/// e.g.: +/// +/// ``` +/// # use zerocopy_derive::FromZeroes; +/// #[derive(FromZeroes)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(FromZeroes)] +/// #[repr(u8)] +/// enum MyEnum { +/// # Variant0, +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(FromZeroes)] +/// union MyUnion { +/// # variant: u8, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// [safety conditions]: trait@FromZeroes#safety +/// +/// # Analysis +/// +/// *This section describes, roughly, the analysis performed by this derive to +/// determine whether it is sound to implement `FromZeroes` for a given type. +/// Unless you are modifying the implementation of this derive, or attempting to +/// manually implement `FromZeroes` for a type yourself, you don't need to read +/// this section.* +/// +/// If a type has the following properties, then this derive can implement +/// `FromZeroes` for that type: +/// +/// - If the type is a struct, all of its fields must be `FromZeroes`. +/// - If the type is an enum, it must be C-like (meaning that all variants have +/// no fields) and it must have a variant with a discriminant of `0`. See [the +/// reference] for a description of how discriminant values are chosen. +/// - The type must not contain any [`UnsafeCell`]s (this is required in order +/// for it to be sound to construct a `&[u8]` and a `&T` to the same region of +/// memory). The type may contain references or pointers to `UnsafeCell`s so +/// long as those values can themselves be initialized from zeroes +/// (`FromZeroes` is not currently implemented for, e.g., +/// `Option<&UnsafeCell<_>>`, but it could be one day). +/// +/// This analysis is subject to change. Unsafe code may *only* rely on the +/// documented [safety conditions] of `FromZeroes`, and must *not* rely on the +/// implementation details of this derive. +/// +/// [the reference]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations +/// [`UnsafeCell`]: core::cell::UnsafeCell +/// +/// ## Why isn't an explicit representation required for structs? +/// +/// Neither this derive, nor the [safety conditions] of `FromZeroes`, requires +/// that structs are marked with `#[repr(C)]`. +/// +/// Per the [Rust reference](reference), +/// +/// > The representation of a type can change the padding between fields, but +/// > does not change the layout of the fields themselves. +/// +/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations +/// +/// Since the layout of structs only consists of padding bytes and field bytes, +/// a struct is soundly `FromZeroes` if: +/// 1. its padding is soundly `FromZeroes`, and +/// 2. its fields are soundly `FromZeroes`. +/// +/// The answer to the first question is always yes: padding bytes do not have +/// any validity constraints. A [discussion] of this question in the Unsafe Code +/// Guidelines Working Group concluded that it would be virtually unimaginable +/// for future versions of rustc to add validity constraints to padding bytes. +/// +/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174 +/// +/// Whether a struct is soundly `FromZeroes` therefore solely depends on whether +/// its fields are `FromZeroes`. +// TODO(#146): Document why we don't require an enum to have an explicit `repr` +// attribute. +#[cfg(any(feature = "derive", test))] +#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] +pub use zerocopy_derive::FromZeroes; + +/// Types whose validity can be checked at runtime, allowing them to be +/// conditionally converted from byte slices. +/// +/// WARNING: Do not implement this trait yourself! Instead, use +/// `#[derive(TryFromBytes)]`. +/// +/// `TryFromBytes` types can safely be deserialized from an untrusted sequence +/// of bytes by performing a runtime check that the byte sequence contains a +/// valid instance of `Self`. +/// +/// `TryFromBytes` is ignorant of byte order. For byte order-aware types, see +/// the [`byteorder`] module. +/// +/// # What is a "valid instance"? +/// +/// In Rust, each type has *bit validity*, which refers to the set of bit +/// patterns which may appear in an instance of that type. It is impossible for +/// safe Rust code to produce values which violate bit validity (ie, values +/// outside of the "valid" set of bit patterns). If `unsafe` code produces an +/// invalid value, this is considered [undefined behavior]. +/// +/// Rust's bit validity rules are currently being decided, which means that some +/// types have three classes of bit patterns: those which are definitely valid, +/// and whose validity is documented in the language; those which may or may not +/// be considered valid at some point in the future; and those which are +/// definitely invalid. +/// +/// Zerocopy takes a conservative approach, and only considers a bit pattern to +/// be valid if its validity is a documenteed guarantee provided by the +/// language. +/// +/// For most use cases, Rust's current guarantees align with programmers' +/// intuitions about what ought to be valid. As a result, zerocopy's +/// conservatism should not affect most users. One notable exception is unions, +/// whose bit validity is very up in the air; zerocopy does not permit +/// implementing `TryFromBytes` for any union type. +/// +/// If you are negatively affected by lack of support for a particular type, +/// we encourage you to let us know by [filing an issue][github-repo]. +/// +/// # Safety +/// +/// On its own, `T: TryFromBytes` does not make any guarantees about the layout +/// or representation of `T`. It merely provides the ability to perform a +/// validity check at runtime via methods like [`try_from_ref`]. +/// +/// Currently, it is not possible to stably implement `TryFromBytes` other than +/// by using `#[derive(TryFromBytes)]`. While there are `#[doc(hidden)]` items +/// on this trait that provide well-defined safety invariants, no stability +/// guarantees are made with respect to these items. In particular, future +/// releases of zerocopy may make backwards-breaking changes to these items, +/// including changes that only affect soundness, which may cause code which +/// uses those items to silently become unsound. +/// +/// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html +/// [github-repo]: https://github.com/google/zerocopy +/// [`try_from_ref`]: TryFromBytes::try_from_ref +// TODO(#5): Update `try_from_ref` doc link once it exists +#[doc(hidden)] +pub unsafe trait TryFromBytes { + /// Does a given memory range contain a valid instance of `Self`? + /// + /// # Safety + /// + /// ## Preconditions + /// + /// The memory referenced by `candidate` may only be accessed via reads for + /// the duration of this method call. This prohibits writes through mutable + /// references and through [`UnsafeCell`]s. There may exist immutable + /// references to the same memory which contain `UnsafeCell`s so long as: + /// - Those `UnsafeCell`s exist at the same byte ranges as `UnsafeCell`s in + /// `Self`. This is a bidirectional property: `Self` may not contain + /// `UnsafeCell`s where other references to the same memory do not, and + /// vice-versa. + /// - Those `UnsafeCell`s are never used to perform mutation for the + /// duration of this method call. + /// + /// The memory referenced by `candidate` may not be referenced by any + /// mutable references even if these references are not used to perform + /// mutation. + /// + /// `candidate` is not required to refer to a valid `Self`. However, it must + /// satisfy the requirement that uninitialized bytes may only be present + /// where it is possible for them to be present in `Self`. This is a dynamic + /// property: if, at a particular byte offset, a valid enum discriminant is + /// set, the subsequent bytes may only have uninitialized bytes as + /// specificed by the corresponding enum. + /// + /// Formally, given `len = size_of_val_raw(candidate)`, at every byte + /// offset, `b`, in the range `[0, len)`: + /// - If, in all instances `s: Self` of length `len`, the byte at offset `b` + /// in `s` is initialized, then the byte at offset `b` within `*candidate` + /// must be initialized. + /// - Let `c` be the contents of the byte range `[0, b)` in `*candidate`. + /// Let `S` be the subset of valid instances of `Self` of length `len` + /// which contain `c` in the offset range `[0, b)`. If, for all instances + /// of `s: Self` in `S`, the byte at offset `b` in `s` is initialized, + /// then the byte at offset `b` in `*candidate` must be initialized. + /// + /// Pragmatically, this means that if `*candidate` is guaranteed to + /// contain an enum type at a particular offset, and the enum discriminant + /// stored in `*candidate` corresponds to a valid variant of that enum + /// type, then it is guaranteed that the appropriate bytes of `*candidate` + /// are initialized as defined by that variant's bit validity (although + /// note that the variant may contain another enum type, in which case the + /// same rules apply depending on the state of its discriminant, and so on + /// recursively). + /// + /// ## Postconditions + /// + /// Unsafe code may assume that, if `is_bit_valid(candidate)` returns true, + /// `*candidate` contains a valid `Self`. + /// + /// # Panics + /// + /// `is_bit_valid` may panic. Callers are responsible for ensuring that any + /// `unsafe` code remains sound even in the face of `is_bit_valid` + /// panicking. (We support user-defined validation routines; so long as + /// these routines are not required to be `unsafe`, there is no way to + /// ensure that these do not generate panics.) + /// + /// [`UnsafeCell`]: core::cell::UnsafeCell + #[doc(hidden)] + unsafe fn is_bit_valid(candidate: Ptr<'_, Self>) -> bool; + + /// Attempts to interpret a byte slice as a `Self`. + /// + /// `try_from_ref` validates that `bytes` contains a valid `Self`, and that + /// it satisfies `Self`'s alignment requirement. If it does, then `bytes` is + /// reinterpreted as a `Self`. + /// + /// Note that Rust's bit validity rules are still being decided. As such, + /// there exist types whose bit validity is ambiguous. See the + /// `TryFromBytes` docs for a discussion of how these cases are handled. + // TODO(#251): In a future in which we distinguish between `FromBytes` and + // `RefFromBytes`, this requires `where Self: RefFromBytes` to disallow + // interior mutability. + #[inline] + #[doc(hidden)] // TODO(#5): Finalize name before remove this attribute. + fn try_from_ref(bytes: &[u8]) -> Option<&Self> + where + Self: KnownLayout, + { + let maybe_self = Ptr::from(bytes).try_cast_into_no_leftover::()?; + + // SAFETY: + // - Since `bytes` is an immutable reference, we know that no mutable + // references exist to this memory region. + // - Since `[u8]` contains no `UnsafeCell`s, we know there are no + // `&UnsafeCell` references to this memory region. + // - Since we don't permit implementing `TryFromBytes` for types which + // contain `UnsafeCell`s, there are no `UnsafeCell`s in `Self`, and so + // the requirement that all references contain `UnsafeCell`s at the + // same offsets is trivially satisfied. + // - All bytes of `bytes` are initialized. + // + // This call may panic. If that happens, it doesn't cause any soundness + // issues, as we have not generated any invalid state which we need to + // fix before returning. + if unsafe { !Self::is_bit_valid(maybe_self) } { + return None; + } + + // SAFETY: + // - Preconditions for `as_ref`: + // - `is_bit_valid` guarantees that `*maybe_self` contains a valid + // `Self`. Since `&[u8]` does not permit interior mutation, this + // cannot be invalidated after this method returns. + // - Since the argument and return types are immutable references, + // Rust will prevent the caller from producing any mutable + // references to the same memory region. + // - Since `Self` is not allowed to contain any `UnsafeCell`s and the + // same is true of `[u8]`, interior mutation is not possible. Thus, + // no mutation is possible. For the same reason, there is no + // mismatch between the two types in terms of which byte ranges are + // referenced as `UnsafeCell`s. + // - Since interior mutation isn't possible within `Self`, there's no + // way for the returned reference to be used to modify the byte range, + // and thus there's no way for the returned reference to be used to + // write an invalid `[u8]` which would be observable via the original + // `&[u8]`. + Some(unsafe { maybe_self.as_ref() }) + } +} + +/// Types for which a sequence of bytes all set to zero represents a valid +/// instance of the type. +/// +/// Any memory region of the appropriate length which is guaranteed to contain +/// only zero bytes can be viewed as any `FromZeroes` type with no runtime +/// overhead. This is useful whenever memory is known to be in a zeroed state, +/// such memory returned from some allocation routines. +/// +/// # Implementation +/// +/// **Do not implement this trait yourself!** Instead, use +/// [`#[derive(FromZeroes)]`][derive] (requires the `derive` Cargo feature); +/// e.g.: +/// +/// ``` +/// # use zerocopy_derive::FromZeroes; +/// #[derive(FromZeroes)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(FromZeroes)] +/// #[repr(u8)] +/// enum MyEnum { +/// # Variant0, +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(FromZeroes)] +/// union MyUnion { +/// # variant: u8, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// This derive performs a sophisticated, compile-time safety analysis to +/// determine whether a type is `FromZeroes`. +/// +/// # Safety +/// +/// *This section describes what is required in order for `T: FromZeroes`, and +/// what unsafe code may assume of such types. If you don't plan on implementing +/// `FromZeroes` manually, and you don't plan on writing unsafe code that +/// operates on `FromZeroes` types, then you don't need to read this section.* +/// +/// If `T: FromZeroes`, then unsafe code may assume that: +/// - It is sound to treat any initialized sequence of zero bytes of length +/// `size_of::()` as a `T`. +/// - Given `b: &[u8]` where `b.len() == size_of::()`, `b` is aligned to +/// `align_of::()`, and `b` contains only zero bytes, it is sound to +/// construct a `t: &T` at the same address as `b`, and it is sound for both +/// `b` and `t` to be live at the same time. +/// +/// If a type is marked as `FromZeroes` which violates this contract, it may +/// cause undefined behavior. +/// +/// `#[derive(FromZeroes)]` only permits [types which satisfy these +/// requirements][derive-analysis]. +/// +#[cfg_attr( + feature = "derive", + doc = "[derive]: zerocopy_derive::FromZeroes", + doc = "[derive-analysis]: zerocopy_derive::FromZeroes#analysis" +)] +#[cfg_attr( + not(feature = "derive"), + doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeroes.html"), + doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeroes.html#analysis"), +)] +pub unsafe trait FromZeroes { + // The `Self: Sized` bound makes it so that `FromZeroes` is still object + // safe. + #[doc(hidden)] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized; + + /// Overwrites `self` with zeroes. + /// + /// Sets every byte in `self` to 0. While this is similar to doing `*self = + /// Self::new_zeroed()`, it differs in that `zero` does not semantically + /// drop the current value and replace it with a new one - it simply + /// modifies the bytes of the existing value. + /// + /// # Examples + /// + /// ``` + /// # use zerocopy::FromZeroes; + /// # use zerocopy_derive::*; + /// # + /// #[derive(FromZeroes)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// let mut header = PacketHeader { + /// src_port: 100u16.to_be_bytes(), + /// dst_port: 200u16.to_be_bytes(), + /// length: 300u16.to_be_bytes(), + /// checksum: 400u16.to_be_bytes(), + /// }; + /// + /// header.zero(); + /// + /// assert_eq!(header.src_port, [0, 0]); + /// assert_eq!(header.dst_port, [0, 0]); + /// assert_eq!(header.length, [0, 0]); + /// assert_eq!(header.checksum, [0, 0]); + /// ``` + #[inline(always)] + fn zero(&mut self) { + let slf: *mut Self = self; + let len = mem::size_of_val(self); + // SAFETY: + // - `self` is guaranteed by the type system to be valid for writes of + // size `size_of_val(self)`. + // - `u8`'s alignment is 1, and thus `self` is guaranteed to be aligned + // as required by `u8`. + // - Since `Self: FromZeroes`, the all-zeroes instance is a valid + // instance of `Self.` + // + // TODO(#429): Add references to docs and quotes. + unsafe { ptr::write_bytes(slf.cast::(), 0, len) }; + } + + /// Creates an instance of `Self` from zeroed bytes. + /// + /// # Examples + /// + /// ``` + /// # use zerocopy::FromZeroes; + /// # use zerocopy_derive::*; + /// # + /// #[derive(FromZeroes)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// let header: PacketHeader = FromZeroes::new_zeroed(); + /// + /// assert_eq!(header.src_port, [0, 0]); + /// assert_eq!(header.dst_port, [0, 0]); + /// assert_eq!(header.length, [0, 0]); + /// assert_eq!(header.checksum, [0, 0]); + /// ``` + #[inline(always)] + fn new_zeroed() -> Self + where + Self: Sized, + { + // SAFETY: `FromZeroes` says that the all-zeroes bit pattern is legal. + unsafe { mem::zeroed() } + } + + /// Creates a `Box` from zeroed bytes. + /// + /// This function is useful for allocating large values on the heap and + /// zero-initializing them, without ever creating a temporary instance of + /// `Self` on the stack. For example, `<[u8; 1048576]>::new_box_zeroed()` + /// will allocate `[u8; 1048576]` directly on the heap; it does not require + /// storing `[u8; 1048576]` in a temporary variable on the stack. + /// + /// On systems that use a heap implementation that supports allocating from + /// pre-zeroed memory, using `new_box_zeroed` (or related functions) may + /// have performance benefits. + /// + /// Note that `Box` can be converted to `Arc` and other + /// container types without reallocation. + /// + /// # Panics + /// + /// Panics if allocation of `size_of::()` bytes fails. + #[cfg(feature = "alloc")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + #[inline] + fn new_box_zeroed() -> Box + where + Self: Sized, + { + // If `T` is a ZST, then return a proper boxed instance of it. There is + // no allocation, but `Box` does require a correct dangling pointer. + let layout = Layout::new::(); + if layout.size() == 0 { + return Box::new(Self::new_zeroed()); + } + + // TODO(#429): Add a "SAFETY" comment and remove this `allow`. + #[allow(clippy::undocumented_unsafe_blocks)] + let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::() }; + if ptr.is_null() { + alloc::alloc::handle_alloc_error(layout); + } + // TODO(#429): Add a "SAFETY" comment and remove this `allow`. + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + Box::from_raw(ptr) + } + } + + /// Creates a `Box<[Self]>` (a boxed slice) from zeroed bytes. + /// + /// This function is useful for allocating large values of `[Self]` on the + /// heap and zero-initializing them, without ever creating a temporary + /// instance of `[Self; _]` on the stack. For example, + /// `u8::new_box_slice_zeroed(1048576)` will allocate the slice directly on + /// the heap; it does not require storing the slice on the stack. + /// + /// On systems that use a heap implementation that supports allocating from + /// pre-zeroed memory, using `new_box_slice_zeroed` may have performance + /// benefits. + /// + /// If `Self` is a zero-sized type, then this function will return a + /// `Box<[Self]>` that has the correct `len`. Such a box cannot contain any + /// actual information, but its `len()` property will report the correct + /// value. + /// + /// # Panics + /// + /// * Panics if `size_of::() * len` overflows. + /// * Panics if allocation of `size_of::() * len` bytes fails. + #[cfg(feature = "alloc")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + #[inline] + fn new_box_slice_zeroed(len: usize) -> Box<[Self]> + where + Self: Sized, + { + let size = mem::size_of::() + .checked_mul(len) + .expect("mem::size_of::() * len overflows `usize`"); + let align = mem::align_of::(); + // On stable Rust versions <= 1.64.0, `Layout::from_size_align` has a + // bug in which sufficiently-large allocations (those which, when + // rounded up to the alignment, overflow `isize`) are not rejected, + // which can cause undefined behavior. See #64 for details. + // + // TODO(#67): Once our MSRV is > 1.64.0, remove this assertion. + #[allow(clippy::as_conversions)] + let max_alloc = (isize::MAX as usize).saturating_sub(align); + assert!(size <= max_alloc); + // TODO(https://github.com/rust-lang/rust/issues/55724): Use + // `Layout::repeat` once it's stabilized. + let layout = + Layout::from_size_align(size, align).expect("total allocation size overflows `isize`"); + + let ptr = if layout.size() != 0 { + // TODO(#429): Add a "SAFETY" comment and remove this `allow`. + #[allow(clippy::undocumented_unsafe_blocks)] + let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::() }; + if ptr.is_null() { + alloc::alloc::handle_alloc_error(layout); + } + ptr + } else { + // `Box<[T]>` does not allocate when `T` is zero-sized or when `len` + // is zero, but it does require a non-null dangling pointer for its + // allocation. + NonNull::::dangling().as_ptr() + }; + + // TODO(#429): Add a "SAFETY" comment and remove this `allow`. + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + Box::from_raw(slice::from_raw_parts_mut(ptr, len)) + } + } + + /// Creates a `Vec` from zeroed bytes. + /// + /// This function is useful for allocating large values of `Vec`s and + /// zero-initializing them, without ever creating a temporary instance of + /// `[Self; _]` (or many temporary instances of `Self`) on the stack. For + /// example, `u8::new_vec_zeroed(1048576)` will allocate directly on the + /// heap; it does not require storing intermediate values on the stack. + /// + /// On systems that use a heap implementation that supports allocating from + /// pre-zeroed memory, using `new_vec_zeroed` may have performance benefits. + /// + /// If `Self` is a zero-sized type, then this function will return a + /// `Vec` that has the correct `len`. Such a `Vec` cannot contain any + /// actual information, but its `len()` property will report the correct + /// value. + /// + /// # Panics + /// + /// * Panics if `size_of::() * len` overflows. + /// * Panics if allocation of `size_of::() * len` bytes fails. + #[cfg(feature = "alloc")] + #[cfg_attr(doc_cfg, doc(cfg(feature = "new_vec_zeroed")))] + #[inline(always)] + fn new_vec_zeroed(len: usize) -> Vec + where + Self: Sized, + { + Self::new_box_slice_zeroed(len).into() + } +} + +/// Analyzes whether a type is [`FromBytes`]. +/// +/// This derive analyzes, at compile time, whether the annotated type satisfies +/// the [safety conditions] of `FromBytes` and implements `FromBytes` if it is +/// sound to do so. This derive can be applied to structs, enums, and unions; +/// e.g.: +/// +/// ``` +/// # use zerocopy_derive::{FromBytes, FromZeroes}; +/// #[derive(FromZeroes, FromBytes)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(FromZeroes, FromBytes)] +/// #[repr(u8)] +/// enum MyEnum { +/// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E, +/// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D, +/// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C, +/// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B, +/// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A, +/// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59, +/// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68, +/// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77, +/// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86, +/// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95, +/// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4, +/// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3, +/// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2, +/// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1, +/// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0, +/// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF, +/// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE, +/// # VFF, +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(FromZeroes, FromBytes)] +/// union MyUnion { +/// # variant: u8, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// [safety conditions]: trait@FromBytes#safety +/// +/// # Analysis +/// +/// *This section describes, roughly, the analysis performed by this derive to +/// determine whether it is sound to implement `FromBytes` for a given type. +/// Unless you are modifying the implementation of this derive, or attempting to +/// manually implement `FromBytes` for a type yourself, you don't need to read +/// this section.* +/// +/// If a type has the following properties, then this derive can implement +/// `FromBytes` for that type: +/// +/// - If the type is a struct, all of its fields must be `FromBytes`. +/// - If the type is an enum: +/// - It must be a C-like enum (meaning that all variants have no fields). +/// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`, +/// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`). +/// - The maximum number of discriminants must be used (so that every possible +/// bit pattern is a valid one). Be very careful when using the `C`, +/// `usize`, or `isize` representations, as their size is +/// platform-dependent. +/// - The type must not contain any [`UnsafeCell`]s (this is required in order +/// for it to be sound to construct a `&[u8]` and a `&T` to the same region of +/// memory). The type may contain references or pointers to `UnsafeCell`s so +/// long as those values can themselves be initialized from zeroes +/// (`FromBytes` is not currently implemented for, e.g., `Option<*const +/// UnsafeCell<_>>`, but it could be one day). +/// +/// [`UnsafeCell`]: core::cell::UnsafeCell +/// +/// This analysis is subject to change. Unsafe code may *only* rely on the +/// documented [safety conditions] of `FromBytes`, and must *not* rely on the +/// implementation details of this derive. +/// +/// ## Why isn't an explicit representation required for structs? +/// +/// Neither this derive, nor the [safety conditions] of `FromBytes`, requires +/// that structs are marked with `#[repr(C)]`. +/// +/// Per the [Rust reference](reference), +/// +/// > The representation of a type can change the padding between fields, but +/// > does not change the layout of the fields themselves. +/// +/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations +/// +/// Since the layout of structs only consists of padding bytes and field bytes, +/// a struct is soundly `FromBytes` if: +/// 1. its padding is soundly `FromBytes`, and +/// 2. its fields are soundly `FromBytes`. +/// +/// The answer to the first question is always yes: padding bytes do not have +/// any validity constraints. A [discussion] of this question in the Unsafe Code +/// Guidelines Working Group concluded that it would be virtually unimaginable +/// for future versions of rustc to add validity constraints to padding bytes. +/// +/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174 +/// +/// Whether a struct is soundly `FromBytes` therefore solely depends on whether +/// its fields are `FromBytes`. +// TODO(#146): Document why we don't require an enum to have an explicit `repr` +// attribute. +#[cfg(any(feature = "derive", test))] +#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] +pub use zerocopy_derive::FromBytes; + +/// Types for which any bit pattern is valid. +/// +/// Any memory region of the appropriate length which contains initialized bytes +/// can be viewed as any `FromBytes` type with no runtime overhead. This is +/// useful for efficiently parsing bytes as structured data. +/// +/// # Implementation +/// +/// **Do not implement this trait yourself!** Instead, use +/// [`#[derive(FromBytes)]`][derive] (requires the `derive` Cargo feature); +/// e.g.: +/// +/// ``` +/// # use zerocopy_derive::{FromBytes, FromZeroes}; +/// #[derive(FromZeroes, FromBytes)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(FromZeroes, FromBytes)] +/// #[repr(u8)] +/// enum MyEnum { +/// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E, +/// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D, +/// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C, +/// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B, +/// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A, +/// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59, +/// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68, +/// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77, +/// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86, +/// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95, +/// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4, +/// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3, +/// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2, +/// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1, +/// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0, +/// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF, +/// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE, +/// # VFF, +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(FromZeroes, FromBytes)] +/// union MyUnion { +/// # variant: u8, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// This derive performs a sophisticated, compile-time safety analysis to +/// determine whether a type is `FromBytes`. +/// +/// # Safety +/// +/// *This section describes what is required in order for `T: FromBytes`, and +/// what unsafe code may assume of such types. If you don't plan on implementing +/// `FromBytes` manually, and you don't plan on writing unsafe code that +/// operates on `FromBytes` types, then you don't need to read this section.* +/// +/// If `T: FromBytes`, then unsafe code may assume that: +/// - It is sound to treat any initialized sequence of bytes of length +/// `size_of::()` as a `T`. +/// - Given `b: &[u8]` where `b.len() == size_of::()`, `b` is aligned to +/// `align_of::()` it is sound to construct a `t: &T` at the same address +/// as `b`, and it is sound for both `b` and `t` to be live at the same time. +/// +/// If a type is marked as `FromBytes` which violates this contract, it may +/// cause undefined behavior. +/// +/// `#[derive(FromBytes)]` only permits [types which satisfy these +/// requirements][derive-analysis]. +/// +#[cfg_attr( + feature = "derive", + doc = "[derive]: zerocopy_derive::FromBytes", + doc = "[derive-analysis]: zerocopy_derive::FromBytes#analysis" +)] +#[cfg_attr( + not(feature = "derive"), + doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html"), + doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html#analysis"), +)] +pub unsafe trait FromBytes: FromZeroes { + // The `Self: Sized` bound makes it so that `FromBytes` is still object + // safe. + #[doc(hidden)] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized; + + /// Interprets the given `bytes` as a `&Self` without copying. + /// + /// If `bytes.len() != size_of::()` or `bytes` is not aligned to + /// `align_of::()`, this returns `None`. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromZeroes, FromBytes)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// // These bytes encode a `PacketHeader`. + /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7].as_slice(); + /// + /// let header = PacketHeader::ref_from(bytes).unwrap(); + /// + /// assert_eq!(header.src_port, [0, 1]); + /// assert_eq!(header.dst_port, [2, 3]); + /// assert_eq!(header.length, [4, 5]); + /// assert_eq!(header.checksum, [6, 7]); + /// ``` + #[inline] + fn ref_from(bytes: &[u8]) -> Option<&Self> + where + Self: Sized, + { + Ref::<&[u8], Self>::new(bytes).map(Ref::into_ref) + } + + /// Interprets the prefix of the given `bytes` as a `&Self` without copying. + /// + /// `ref_from_prefix` returns a reference to the first `size_of::()` + /// bytes of `bytes`. If `bytes.len() < size_of::()` or `bytes` is not + /// aligned to `align_of::()`, this returns `None`. + /// + /// To also access the prefix bytes, use [`Ref::new_from_prefix`]. Then, use + /// [`Ref::into_ref`] to get a `&Self` with the same lifetime. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromZeroes, FromBytes)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// // These are more bytes than are needed to encode a `PacketHeader`. + /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice(); + /// + /// let header = PacketHeader::ref_from_prefix(bytes).unwrap(); + /// + /// assert_eq!(header.src_port, [0, 1]); + /// assert_eq!(header.dst_port, [2, 3]); + /// assert_eq!(header.length, [4, 5]); + /// assert_eq!(header.checksum, [6, 7]); + /// ``` + #[inline] + fn ref_from_prefix(bytes: &[u8]) -> Option<&Self> + where + Self: Sized, + { + Ref::<&[u8], Self>::new_from_prefix(bytes).map(|(r, _)| r.into_ref()) + } + + /// Interprets the suffix of the given `bytes` as a `&Self` without copying. + /// + /// `ref_from_suffix` returns a reference to the last `size_of::()` + /// bytes of `bytes`. If `bytes.len() < size_of::()` or the suffix of + /// `bytes` is not aligned to `align_of::()`, this returns `None`. + /// + /// To also access the suffix bytes, use [`Ref::new_from_suffix`]. Then, use + /// [`Ref::into_ref`] to get a `&Self` with the same lifetime. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromZeroes, FromBytes)] + /// #[repr(C)] + /// struct PacketTrailer { + /// frame_check_sequence: [u8; 4], + /// } + /// + /// // These are more bytes than are needed to encode a `PacketTrailer`. + /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice(); + /// + /// let trailer = PacketTrailer::ref_from_suffix(bytes).unwrap(); + /// + /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]); + /// ``` + #[inline] + fn ref_from_suffix(bytes: &[u8]) -> Option<&Self> + where + Self: Sized, + { + Ref::<&[u8], Self>::new_from_suffix(bytes).map(|(_, r)| r.into_ref()) + } + + /// Interprets the given `bytes` as a `&mut Self` without copying. + /// + /// If `bytes.len() != size_of::()` or `bytes` is not aligned to + /// `align_of::()`, this returns `None`. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(AsBytes, FromZeroes, FromBytes)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// // These bytes encode a `PacketHeader`. + /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..]; + /// + /// let header = PacketHeader::mut_from(bytes).unwrap(); + /// + /// assert_eq!(header.src_port, [0, 1]); + /// assert_eq!(header.dst_port, [2, 3]); + /// assert_eq!(header.length, [4, 5]); + /// assert_eq!(header.checksum, [6, 7]); + /// + /// header.checksum = [0, 0]; + /// + /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0]); + /// ``` + #[inline] + fn mut_from(bytes: &mut [u8]) -> Option<&mut Self> + where + Self: Sized + AsBytes, + { + Ref::<&mut [u8], Self>::new(bytes).map(Ref::into_mut) + } + + /// Interprets the prefix of the given `bytes` as a `&mut Self` without + /// copying. + /// + /// `mut_from_prefix` returns a reference to the first `size_of::()` + /// bytes of `bytes`. If `bytes.len() < size_of::()` or `bytes` is not + /// aligned to `align_of::()`, this returns `None`. + /// + /// To also access the prefix bytes, use [`Ref::new_from_prefix`]. Then, use + /// [`Ref::into_mut`] to get a `&mut Self` with the same lifetime. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(AsBytes, FromZeroes, FromBytes)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// // These are more bytes than are needed to encode a `PacketHeader`. + /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let header = PacketHeader::mut_from_prefix(bytes).unwrap(); + /// + /// assert_eq!(header.src_port, [0, 1]); + /// assert_eq!(header.dst_port, [2, 3]); + /// assert_eq!(header.length, [4, 5]); + /// assert_eq!(header.checksum, [6, 7]); + /// + /// header.checksum = [0, 0]; + /// + /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 8, 9]); + /// ``` + #[inline] + fn mut_from_prefix(bytes: &mut [u8]) -> Option<&mut Self> + where + Self: Sized + AsBytes, + { + Ref::<&mut [u8], Self>::new_from_prefix(bytes).map(|(r, _)| r.into_mut()) + } + + /// Interprets the suffix of the given `bytes` as a `&mut Self` without copying. + /// + /// `mut_from_suffix` returns a reference to the last `size_of::()` + /// bytes of `bytes`. If `bytes.len() < size_of::()` or the suffix of + /// `bytes` is not aligned to `align_of::()`, this returns `None`. + /// + /// To also access the suffix bytes, use [`Ref::new_from_suffix`]. Then, + /// use [`Ref::into_mut`] to get a `&mut Self` with the same lifetime. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(AsBytes, FromZeroes, FromBytes)] + /// #[repr(C)] + /// struct PacketTrailer { + /// frame_check_sequence: [u8; 4], + /// } + /// + /// // These are more bytes than are needed to encode a `PacketTrailer`. + /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let trailer = PacketTrailer::mut_from_suffix(bytes).unwrap(); + /// + /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]); + /// + /// trailer.frame_check_sequence = [0, 0, 0, 0]; + /// + /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 0, 0]); + /// ``` + #[inline] + fn mut_from_suffix(bytes: &mut [u8]) -> Option<&mut Self> + where + Self: Sized + AsBytes, + { + Ref::<&mut [u8], Self>::new_from_suffix(bytes).map(|(_, r)| r.into_mut()) + } + + /// Interprets the given `bytes` as a `&[Self]` without copying. + /// + /// If `bytes.len() % size_of::() != 0` or `bytes` is not aligned to + /// `align_of::()`, this returns `None`. + /// + /// If you need to convert a specific number of slice elements, see + /// [`slice_from_prefix`](FromBytes::slice_from_prefix) or + /// [`slice_from_suffix`](FromBytes::slice_from_suffix). + /// + /// # Panics + /// + /// If `Self` is a zero-sized type. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// # #[derive(Debug, PartialEq, Eq)] + /// #[derive(FromZeroes, FromBytes)] + /// #[repr(C)] + /// struct Pixel { + /// r: u8, + /// g: u8, + /// b: u8, + /// a: u8, + /// } + /// + /// // These bytes encode two `Pixel`s. + /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7].as_slice(); + /// + /// let pixels = Pixel::slice_from(bytes).unwrap(); + /// + /// assert_eq!(pixels, &[ + /// Pixel { r: 0, g: 1, b: 2, a: 3 }, + /// Pixel { r: 4, g: 5, b: 6, a: 7 }, + /// ]); + /// ``` + #[inline] + fn slice_from(bytes: &[u8]) -> Option<&[Self]> + where + Self: Sized, + { + Ref::<_, [Self]>::new_slice(bytes).map(|r| r.into_slice()) + } + + /// Interprets the prefix of the given `bytes` as a `&[Self]` with length + /// equal to `count` without copying. + /// + /// This method verifies that `bytes.len() >= size_of::() * count` + /// and that `bytes` is aligned to `align_of::()`. It consumes the + /// first `size_of::() * count` bytes from `bytes` to construct a + /// `&[Self]`, and returns the remaining bytes to the caller. It also + /// ensures that `sizeof::() * count` does not overflow a `usize`. + /// If any of the length, alignment, or overflow checks fail, it returns + /// `None`. + /// + /// # Panics + /// + /// If `T` is a zero-sized type. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// # #[derive(Debug, PartialEq, Eq)] + /// #[derive(FromZeroes, FromBytes)] + /// #[repr(C)] + /// struct Pixel { + /// r: u8, + /// g: u8, + /// b: u8, + /// a: u8, + /// } + /// + /// // These are more bytes than are needed to encode two `Pixel`s. + /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice(); + /// + /// let (pixels, rest) = Pixel::slice_from_prefix(bytes, 2).unwrap(); + /// + /// assert_eq!(pixels, &[ + /// Pixel { r: 0, g: 1, b: 2, a: 3 }, + /// Pixel { r: 4, g: 5, b: 6, a: 7 }, + /// ]); + /// + /// assert_eq!(rest, &[8, 9]); + /// ``` + #[inline] + fn slice_from_prefix(bytes: &[u8], count: usize) -> Option<(&[Self], &[u8])> + where + Self: Sized, + { + Ref::<_, [Self]>::new_slice_from_prefix(bytes, count).map(|(r, b)| (r.into_slice(), b)) + } + + /// Interprets the suffix of the given `bytes` as a `&[Self]` with length + /// equal to `count` without copying. + /// + /// This method verifies that `bytes.len() >= size_of::() * count` + /// and that `bytes` is aligned to `align_of::()`. It consumes the + /// last `size_of::() * count` bytes from `bytes` to construct a + /// `&[Self]`, and returns the preceding bytes to the caller. It also + /// ensures that `sizeof::() * count` does not overflow a `usize`. + /// If any of the length, alignment, or overflow checks fail, it returns + /// `None`. + /// + /// # Panics + /// + /// If `T` is a zero-sized type. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// # #[derive(Debug, PartialEq, Eq)] + /// #[derive(FromZeroes, FromBytes)] + /// #[repr(C)] + /// struct Pixel { + /// r: u8, + /// g: u8, + /// b: u8, + /// a: u8, + /// } + /// + /// // These are more bytes than are needed to encode two `Pixel`s. + /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice(); + /// + /// let (rest, pixels) = Pixel::slice_from_suffix(bytes, 2).unwrap(); + /// + /// assert_eq!(rest, &[0, 1]); + /// + /// assert_eq!(pixels, &[ + /// Pixel { r: 2, g: 3, b: 4, a: 5 }, + /// Pixel { r: 6, g: 7, b: 8, a: 9 }, + /// ]); + /// ``` + #[inline] + fn slice_from_suffix(bytes: &[u8], count: usize) -> Option<(&[u8], &[Self])> + where + Self: Sized, + { + Ref::<_, [Self]>::new_slice_from_suffix(bytes, count).map(|(b, r)| (b, r.into_slice())) + } + + /// Interprets the given `bytes` as a `&mut [Self]` without copying. + /// + /// If `bytes.len() % size_of::() != 0` or `bytes` is not aligned to + /// `align_of::()`, this returns `None`. + /// + /// If you need to convert a specific number of slice elements, see + /// [`mut_slice_from_prefix`](FromBytes::mut_slice_from_prefix) or + /// [`mut_slice_from_suffix`](FromBytes::mut_slice_from_suffix). + /// + /// # Panics + /// + /// If `T` is a zero-sized type. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// # #[derive(Debug, PartialEq, Eq)] + /// #[derive(AsBytes, FromZeroes, FromBytes)] + /// #[repr(C)] + /// struct Pixel { + /// r: u8, + /// g: u8, + /// b: u8, + /// a: u8, + /// } + /// + /// // These bytes encode two `Pixel`s. + /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..]; + /// + /// let pixels = Pixel::mut_slice_from(bytes).unwrap(); + /// + /// assert_eq!(pixels, &[ + /// Pixel { r: 0, g: 1, b: 2, a: 3 }, + /// Pixel { r: 4, g: 5, b: 6, a: 7 }, + /// ]); + /// + /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 }; + /// + /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0]); + /// ``` + #[inline] + fn mut_slice_from(bytes: &mut [u8]) -> Option<&mut [Self]> + where + Self: Sized + AsBytes, + { + Ref::<_, [Self]>::new_slice(bytes).map(|r| r.into_mut_slice()) + } + + /// Interprets the prefix of the given `bytes` as a `&mut [Self]` with length + /// equal to `count` without copying. + /// + /// This method verifies that `bytes.len() >= size_of::() * count` + /// and that `bytes` is aligned to `align_of::()`. It consumes the + /// first `size_of::() * count` bytes from `bytes` to construct a + /// `&[Self]`, and returns the remaining bytes to the caller. It also + /// ensures that `sizeof::() * count` does not overflow a `usize`. + /// If any of the length, alignment, or overflow checks fail, it returns + /// `None`. + /// + /// # Panics + /// + /// If `T` is a zero-sized type. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// # #[derive(Debug, PartialEq, Eq)] + /// #[derive(AsBytes, FromZeroes, FromBytes)] + /// #[repr(C)] + /// struct Pixel { + /// r: u8, + /// g: u8, + /// b: u8, + /// a: u8, + /// } + /// + /// // These are more bytes than are needed to encode two `Pixel`s. + /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let (pixels, rest) = Pixel::mut_slice_from_prefix(bytes, 2).unwrap(); + /// + /// assert_eq!(pixels, &[ + /// Pixel { r: 0, g: 1, b: 2, a: 3 }, + /// Pixel { r: 4, g: 5, b: 6, a: 7 }, + /// ]); + /// + /// assert_eq!(rest, &[8, 9]); + /// + /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 }; + /// + /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0, 8, 9]); + /// ``` + #[inline] + fn mut_slice_from_prefix(bytes: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])> + where + Self: Sized + AsBytes, + { + Ref::<_, [Self]>::new_slice_from_prefix(bytes, count).map(|(r, b)| (r.into_mut_slice(), b)) + } + + /// Interprets the suffix of the given `bytes` as a `&mut [Self]` with length + /// equal to `count` without copying. + /// + /// This method verifies that `bytes.len() >= size_of::() * count` + /// and that `bytes` is aligned to `align_of::()`. It consumes the + /// last `size_of::() * count` bytes from `bytes` to construct a + /// `&[Self]`, and returns the preceding bytes to the caller. It also + /// ensures that `sizeof::() * count` does not overflow a `usize`. + /// If any of the length, alignment, or overflow checks fail, it returns + /// `None`. + /// + /// # Panics + /// + /// If `T` is a zero-sized type. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// # #[derive(Debug, PartialEq, Eq)] + /// #[derive(AsBytes, FromZeroes, FromBytes)] + /// #[repr(C)] + /// struct Pixel { + /// r: u8, + /// g: u8, + /// b: u8, + /// a: u8, + /// } + /// + /// // These are more bytes than are needed to encode two `Pixel`s. + /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..]; + /// + /// let (rest, pixels) = Pixel::mut_slice_from_suffix(bytes, 2).unwrap(); + /// + /// assert_eq!(rest, &[0, 1]); + /// + /// assert_eq!(pixels, &[ + /// Pixel { r: 2, g: 3, b: 4, a: 5 }, + /// Pixel { r: 6, g: 7, b: 8, a: 9 }, + /// ]); + /// + /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 }; + /// + /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 0, 0]); + /// ``` + #[inline] + fn mut_slice_from_suffix(bytes: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])> + where + Self: Sized + AsBytes, + { + Ref::<_, [Self]>::new_slice_from_suffix(bytes, count).map(|(b, r)| (b, r.into_mut_slice())) + } + + /// Reads a copy of `Self` from `bytes`. + /// + /// If `bytes.len() != size_of::()`, `read_from` returns `None`. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromZeroes, FromBytes)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// // These bytes encode a `PacketHeader`. + /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7].as_slice(); + /// + /// let header = PacketHeader::read_from(bytes).unwrap(); + /// + /// assert_eq!(header.src_port, [0, 1]); + /// assert_eq!(header.dst_port, [2, 3]); + /// assert_eq!(header.length, [4, 5]); + /// assert_eq!(header.checksum, [6, 7]); + /// ``` + #[inline] + fn read_from(bytes: &[u8]) -> Option + where + Self: Sized, + { + Ref::<_, Unalign>::new_unaligned(bytes).map(|r| r.read().into_inner()) + } + + /// Reads a copy of `Self` from the prefix of `bytes`. + /// + /// `read_from_prefix` reads a `Self` from the first `size_of::()` + /// bytes of `bytes`. If `bytes.len() < size_of::()`, it returns + /// `None`. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromZeroes, FromBytes)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// // These are more bytes than are needed to encode a `PacketHeader`. + /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice(); + /// + /// let header = PacketHeader::read_from_prefix(bytes).unwrap(); + /// + /// assert_eq!(header.src_port, [0, 1]); + /// assert_eq!(header.dst_port, [2, 3]); + /// assert_eq!(header.length, [4, 5]); + /// assert_eq!(header.checksum, [6, 7]); + /// ``` + #[inline] + fn read_from_prefix(bytes: &[u8]) -> Option + where + Self: Sized, + { + Ref::<_, Unalign>::new_unaligned_from_prefix(bytes) + .map(|(r, _)| r.read().into_inner()) + } + + /// Reads a copy of `Self` from the suffix of `bytes`. + /// + /// `read_from_suffix` reads a `Self` from the last `size_of::()` + /// bytes of `bytes`. If `bytes.len() < size_of::()`, it returns + /// `None`. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::FromBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(FromZeroes, FromBytes)] + /// #[repr(C)] + /// struct PacketTrailer { + /// frame_check_sequence: [u8; 4], + /// } + /// + /// // These are more bytes than are needed to encode a `PacketTrailer`. + /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice(); + /// + /// let trailer = PacketTrailer::read_from_suffix(bytes).unwrap(); + /// + /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]); + /// ``` + #[inline] + fn read_from_suffix(bytes: &[u8]) -> Option + where + Self: Sized, + { + Ref::<_, Unalign>::new_unaligned_from_suffix(bytes) + .map(|(_, r)| r.read().into_inner()) + } +} + +/// Analyzes whether a type is [`AsBytes`]. +/// +/// This derive analyzes, at compile time, whether the annotated type satisfies +/// the [safety conditions] of `AsBytes` and implements `AsBytes` if it is +/// sound to do so. This derive can be applied to structs, enums, and unions; +/// e.g.: +/// +/// ``` +/// # use zerocopy_derive::{AsBytes}; +/// #[derive(AsBytes)] +/// #[repr(C)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(AsBytes)] +/// #[repr(u8)] +/// enum MyEnum { +/// # Variant, +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(AsBytes)] +/// #[repr(C)] +/// union MyUnion { +/// # variant: u8, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// [safety conditions]: trait@AsBytes#safety +/// +/// # Error Messages +/// +/// Due to the way that the custom derive for `AsBytes` is implemented, you may +/// get an error like this: +/// +/// ```text +/// error[E0277]: the trait bound `HasPadding: ShouldBe` is not satisfied +/// --> lib.rs:23:10 +/// | +/// 1 | #[derive(AsBytes)] +/// | ^^^^^^^ the trait `ShouldBe` is not implemented for `HasPadding` +/// | +/// = help: the trait `ShouldBe` is implemented for `HasPadding` +/// ``` +/// +/// This error indicates that the type being annotated has padding bytes, which +/// is illegal for `AsBytes` types. Consider reducing the alignment of some +/// fields by using types in the [`byteorder`] module, adding explicit struct +/// fields where those padding bytes would be, or using `#[repr(packed)]`. See +/// the Rust Reference's page on [type layout] for more information +/// about type layout and padding. +/// +/// [type layout]: https://doc.rust-lang.org/reference/type-layout.html +/// +/// # Analysis +/// +/// *This section describes, roughly, the analysis performed by this derive to +/// determine whether it is sound to implement `AsBytes` for a given type. +/// Unless you are modifying the implementation of this derive, or attempting to +/// manually implement `AsBytes` for a type yourself, you don't need to read +/// this section.* +/// +/// If a type has the following properties, then this derive can implement +/// `AsBytes` for that type: +/// +/// - If the type is a struct: +/// - It must have a defined representation (`repr(C)`, `repr(transparent)`, +/// or `repr(packed)`). +/// - All of its fields must be `AsBytes`. +/// - Its layout must have no padding. This is always true for +/// `repr(transparent)` and `repr(packed)`. For `repr(C)`, see the layout +/// algorithm described in the [Rust Reference]. +/// - If the type is an enum: +/// - It must be a C-like enum (meaning that all variants have no fields). +/// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`, +/// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`). +/// - The type must not contain any [`UnsafeCell`]s (this is required in order +/// for it to be sound to construct a `&[u8]` and a `&T` to the same region of +/// memory). The type may contain references or pointers to `UnsafeCell`s so +/// long as those values can themselves be initialized from zeroes (`AsBytes` +/// is not currently implemented for, e.g., `Option<&UnsafeCell<_>>`, but it +/// could be one day). +/// +/// [`UnsafeCell`]: core::cell::UnsafeCell +/// +/// This analysis is subject to change. Unsafe code may *only* rely on the +/// documented [safety conditions] of `FromBytes`, and must *not* rely on the +/// implementation details of this derive. +/// +/// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html +#[cfg(any(feature = "derive", test))] +#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))] +pub use zerocopy_derive::AsBytes; + +/// Types that can be viewed as an immutable slice of initialized bytes. +/// +/// Any `AsBytes` type can be viewed as a slice of initialized bytes of the same +/// size. This is useful for efficiently serializing structured data as raw +/// bytes. +/// +/// # Implementation +/// +/// **Do not implement this trait yourself!** Instead, use +/// [`#[derive(AsBytes)]`][derive] (requires the `derive` Cargo feature); e.g.: +/// +/// ``` +/// # use zerocopy_derive::AsBytes; +/// #[derive(AsBytes)] +/// #[repr(C)] +/// struct MyStruct { +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(AsBytes)] +/// #[repr(u8)] +/// enum MyEnum { +/// # Variant0, +/// # /* +/// ... +/// # */ +/// } +/// +/// #[derive(AsBytes)] +/// #[repr(C)] +/// union MyUnion { +/// # variant: u8, +/// # /* +/// ... +/// # */ +/// } +/// ``` +/// +/// This derive performs a sophisticated, compile-time safety analysis to +/// determine whether a type is `AsBytes`. See the [derive +/// documentation][derive] for guidance on how to interpret error messages +/// produced by the derive's analysis. +/// +/// # Safety +/// +/// *This section describes what is required in order for `T: AsBytes`, and +/// what unsafe code may assume of such types. If you don't plan on implementing +/// `AsBytes` manually, and you don't plan on writing unsafe code that +/// operates on `AsBytes` types, then you don't need to read this section.* +/// +/// If `T: AsBytes`, then unsafe code may assume that: +/// - It is sound to treat any `t: T` as an immutable `[u8]` of length +/// `size_of_val(t)`. +/// - Given `t: &T`, it is sound to construct a `b: &[u8]` where `b.len() == +/// size_of_val(t)` at the same address as `t`, and it is sound for both `b` +/// and `t` to be live at the same time. +/// +/// If a type is marked as `AsBytes` which violates this contract, it may cause +/// undefined behavior. +/// +/// `#[derive(AsBytes)]` only permits [types which satisfy these +/// requirements][derive-analysis]. +/// +#[cfg_attr( + feature = "derive", + doc = "[derive]: zerocopy_derive::AsBytes", + doc = "[derive-analysis]: zerocopy_derive::AsBytes#analysis" +)] +#[cfg_attr( + not(feature = "derive"), + doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.AsBytes.html"), + doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.AsBytes.html#analysis"), +)] +pub unsafe trait AsBytes { + // The `Self: Sized` bound makes it so that this function doesn't prevent + // `AsBytes` from being object safe. Note that other `AsBytes` methods + // prevent object safety, but those provide a benefit in exchange for object + // safety. If at some point we remove those methods, change their type + // signatures, or move them out of this trait so that `AsBytes` is object + // safe again, it's important that this function not prevent object safety. + #[doc(hidden)] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized; + + /// Gets the bytes of this value. + /// + /// `as_bytes` provides access to the bytes of this value as an immutable + /// byte slice. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::AsBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(AsBytes)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// let header = PacketHeader { + /// src_port: [0, 1], + /// dst_port: [2, 3], + /// length: [4, 5], + /// checksum: [6, 7], + /// }; + /// + /// let bytes = header.as_bytes(); + /// + /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]); + /// ``` + #[inline(always)] + fn as_bytes(&self) -> &[u8] { + // Note that this method does not have a `Self: Sized` bound; + // `size_of_val` works for unsized values too. + let len = mem::size_of_val(self); + let slf: *const Self = self; + + // SAFETY: + // - `slf.cast::()` is valid for reads for `len * + // mem::size_of::()` many bytes because... + // - `slf` is the same pointer as `self`, and `self` is a reference + // which points to an object whose size is `len`. Thus... + // - The entire region of `len` bytes starting at `slf` is contained + // within a single allocation. + // - `slf` is non-null. + // - `slf` is trivially aligned to `align_of::() == 1`. + // - `Self: AsBytes` ensures that all of the bytes of `slf` are + // initialized. + // - Since `slf` is derived from `self`, and `self` is an immutable + // reference, the only other references to this memory region that + // could exist are other immutable references, and those don't allow + // mutation. `AsBytes` prohibits types which contain `UnsafeCell`s, + // which are the only types for which this rule wouldn't be sufficient. + // - The total size of the resulting slice is no larger than + // `isize::MAX` because no allocation produced by safe code can be + // larger than `isize::MAX`. + // + // TODO(#429): Add references to docs and quotes. + unsafe { slice::from_raw_parts(slf.cast::(), len) } + } + + /// Gets the bytes of this value mutably. + /// + /// `as_bytes_mut` provides access to the bytes of this value as a mutable + /// byte slice. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::AsBytes; + /// # use zerocopy_derive::*; + /// + /// # #[derive(Eq, PartialEq, Debug)] + /// #[derive(AsBytes, FromZeroes, FromBytes)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// let mut header = PacketHeader { + /// src_port: [0, 1], + /// dst_port: [2, 3], + /// length: [4, 5], + /// checksum: [6, 7], + /// }; + /// + /// let bytes = header.as_bytes_mut(); + /// + /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]); + /// + /// bytes.reverse(); + /// + /// assert_eq!(header, PacketHeader { + /// src_port: [7, 6], + /// dst_port: [5, 4], + /// length: [3, 2], + /// checksum: [1, 0], + /// }); + /// ``` + #[inline(always)] + fn as_bytes_mut(&mut self) -> &mut [u8] + where + Self: FromBytes, + { + // Note that this method does not have a `Self: Sized` bound; + // `size_of_val` works for unsized values too. + let len = mem::size_of_val(self); + let slf: *mut Self = self; + + // SAFETY: + // - `slf.cast::()` is valid for reads and writes for `len * + // mem::size_of::()` many bytes because... + // - `slf` is the same pointer as `self`, and `self` is a reference + // which points to an object whose size is `len`. Thus... + // - The entire region of `len` bytes starting at `slf` is contained + // within a single allocation. + // - `slf` is non-null. + // - `slf` is trivially aligned to `align_of::() == 1`. + // - `Self: AsBytes` ensures that all of the bytes of `slf` are + // initialized. + // - `Self: FromBytes` ensures that no write to this memory region + // could result in it containing an invalid `Self`. + // - Since `slf` is derived from `self`, and `self` is a mutable + // reference, no other references to this memory region can exist. + // - The total size of the resulting slice is no larger than + // `isize::MAX` because no allocation produced by safe code can be + // larger than `isize::MAX`. + // + // TODO(#429): Add references to docs and quotes. + unsafe { slice::from_raw_parts_mut(slf.cast::(), len) } + } + + /// Writes a copy of `self` to `bytes`. + /// + /// If `bytes.len() != size_of_val(self)`, `write_to` returns `None`. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::AsBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(AsBytes)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// let header = PacketHeader { + /// src_port: [0, 1], + /// dst_port: [2, 3], + /// length: [4, 5], + /// checksum: [6, 7], + /// }; + /// + /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0]; + /// + /// header.write_to(&mut bytes[..]); + /// + /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]); + /// ``` + /// + /// If too many or too few target bytes are provided, `write_to` returns + /// `None` and leaves the target bytes unmodified: + /// + /// ``` + /// # use zerocopy::AsBytes; + /// # let header = u128::MAX; + /// let mut excessive_bytes = &mut [0u8; 128][..]; + /// + /// let write_result = header.write_to(excessive_bytes); + /// + /// assert!(write_result.is_none()); + /// assert_eq!(excessive_bytes, [0u8; 128]); + /// ``` + #[inline] + fn write_to(&self, bytes: &mut [u8]) -> Option<()> { + if bytes.len() != mem::size_of_val(self) { + return None; + } + + bytes.copy_from_slice(self.as_bytes()); + Some(()) + } + + /// Writes a copy of `self` to the prefix of `bytes`. + /// + /// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes + /// of `bytes`. If `bytes.len() < size_of_val(self)`, it returns `None`. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::AsBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(AsBytes)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// let header = PacketHeader { + /// src_port: [0, 1], + /// dst_port: [2, 3], + /// length: [4, 5], + /// checksum: [6, 7], + /// }; + /// + /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; + /// + /// header.write_to_prefix(&mut bytes[..]); + /// + /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7, 0, 0]); + /// ``` + /// + /// If insufficient target bytes are provided, `write_to_prefix` returns + /// `None` and leaves the target bytes unmodified: + /// + /// ``` + /// # use zerocopy::AsBytes; + /// # let header = u128::MAX; + /// let mut insufficent_bytes = &mut [0, 0][..]; + /// + /// let write_result = header.write_to_suffix(insufficent_bytes); + /// + /// assert!(write_result.is_none()); + /// assert_eq!(insufficent_bytes, [0, 0]); + /// ``` + #[inline] + fn write_to_prefix(&self, bytes: &mut [u8]) -> Option<()> { + let size = mem::size_of_val(self); + bytes.get_mut(..size)?.copy_from_slice(self.as_bytes()); + Some(()) + } + + /// Writes a copy of `self` to the suffix of `bytes`. + /// + /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes of + /// `bytes`. If `bytes.len() < size_of_val(self)`, it returns `None`. + /// + /// # Examples + /// + /// ``` + /// use zerocopy::AsBytes; + /// # use zerocopy_derive::*; + /// + /// #[derive(AsBytes)] + /// #[repr(C)] + /// struct PacketHeader { + /// src_port: [u8; 2], + /// dst_port: [u8; 2], + /// length: [u8; 2], + /// checksum: [u8; 2], + /// } + /// + /// let header = PacketHeader { + /// src_port: [0, 1], + /// dst_port: [2, 3], + /// length: [4, 5], + /// checksum: [6, 7], + /// }; + /// + /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; + /// + /// header.write_to_suffix(&mut bytes[..]); + /// + /// assert_eq!(bytes, [0, 0, 0, 1, 2, 3, 4, 5, 6, 7]); + /// + /// let mut insufficent_bytes = &mut [0, 0][..]; + /// + /// let write_result = header.write_to_suffix(insufficent_bytes); + /// + /// assert!(write_result.is_none()); + /// assert_eq!(insufficent_bytes, [0, 0]); + /// ``` + /// + /// If insufficient target bytes are provided, `write_to_suffix` returns + /// `None` and leaves the target bytes unmodified: + /// + /// ``` + /// # use zerocopy::AsBytes; + /// # let header = u128::MAX; + /// let mut insufficent_bytes = &mut [0, 0][..]; + /// + /// let write_result = header.write_to_suffix(insufficent_bytes); + /// + /// assert!(write_result.is_none()); + /// assert_eq!(insufficent_bytes, [0, 0]); + /// ``` + #[inline] + fn write_to_suffix(&self, bytes: &mut [u8]) -> Option<()> { + let start = bytes.len().checked_sub(mem::size_of_val(self))?; + bytes + .get_mut(start..) + .expect("`start` should be in-bounds of `bytes`") + .copy_from_slice(self.as_bytes()); + Some(()) + } +} + +/// Types with no alignment requirement. +/// +/// WARNING: Do not implement this trait yourself! Instead, use +/// `#[derive(Unaligned)]` (requires the `derive` Cargo feature). +/// +/// If `T: Unaligned`, then `align_of::() == 1`. +/// +/// # Safety +/// +/// *This section describes what is required in order for `T: Unaligned`, and +/// what unsafe code may assume of such types. `#[derive(Unaligned)]` only +/// permits types which satisfy these requirements. If you don't plan on +/// implementing `Unaligned` manually, and you don't plan on writing unsafe code +/// that operates on `Unaligned` types, then you don't need to read this +/// section.* +/// +/// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a +/// reference to `T` at any memory location regardless of alignment. If a type +/// is marked as `Unaligned` which violates this contract, it may cause +/// undefined behavior. +pub unsafe trait Unaligned { + // The `Self: Sized` bound makes it so that `Unaligned` is still object + // safe. + #[doc(hidden)] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized; +} + +safety_comment! { + /// SAFETY: + /// Per the reference [1], "the unit tuple (`()`) ... is guaranteed as a + /// zero-sized type to have a size of 0 and an alignment of 1." + /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`: There + /// is only one possible sequence of 0 bytes, and `()` is inhabited. + /// - `AsBytes`: Since `()` has size 0, it contains no padding bytes. + /// - `Unaligned`: `()` has alignment 1. + /// + /// [1] https://doc.rust-lang.org/reference/type-layout.html#tuple-layout + unsafe_impl!((): TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); + assert_unaligned!(()); +} + +safety_comment! { + /// SAFETY: + /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`: all bit + /// patterns are valid for numeric types [1] + /// - `AsBytes`: numeric types have no padding bytes [1] + /// - `Unaligned` (`u8` and `i8` only): The reference [2] specifies the size + /// of `u8` and `i8` as 1 byte. We also know that: + /// - Alignment is >= 1 [3] + /// - Size is an integer multiple of alignment [4] + /// - The only value >= 1 for which 1 is an integer multiple is 1 + /// Therefore, the only possible alignment for `u8` and `i8` is 1. + /// + /// [1] Per https://doc.rust-lang.org/beta/reference/types/numeric.html#bit-validity: + /// + /// For every numeric type, `T`, the bit validity of `T` is equivalent to + /// the bit validity of `[u8; size_of::()]`. An uninitialized byte is + /// not a valid `u8`. + /// + /// TODO(https://github.com/rust-lang/reference/pull/1392): Once this text + /// is available on the Stable docs, cite those instead. + /// + /// [2] https://doc.rust-lang.org/reference/type-layout.html#primitive-data-layout + /// + /// [3] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment: + /// + /// Alignment is measured in bytes, and must be at least 1. + /// + /// [4] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment: + /// + /// The size of a value is always a multiple of its alignment. + /// + /// TODO(#278): Once we've updated the trait docs to refer to `u8`s rather + /// than bits or bytes, update this comment, especially the reference to + /// [1]. + unsafe_impl!(u8: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); + unsafe_impl!(i8: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); + assert_unaligned!(u8, i8); + unsafe_impl!(u16: TryFromBytes, FromZeroes, FromBytes, AsBytes); + unsafe_impl!(i16: TryFromBytes, FromZeroes, FromBytes, AsBytes); + unsafe_impl!(u32: TryFromBytes, FromZeroes, FromBytes, AsBytes); + unsafe_impl!(i32: TryFromBytes, FromZeroes, FromBytes, AsBytes); + unsafe_impl!(u64: TryFromBytes, FromZeroes, FromBytes, AsBytes); + unsafe_impl!(i64: TryFromBytes, FromZeroes, FromBytes, AsBytes); + unsafe_impl!(u128: TryFromBytes, FromZeroes, FromBytes, AsBytes); + unsafe_impl!(i128: TryFromBytes, FromZeroes, FromBytes, AsBytes); + unsafe_impl!(usize: TryFromBytes, FromZeroes, FromBytes, AsBytes); + unsafe_impl!(isize: TryFromBytes, FromZeroes, FromBytes, AsBytes); + unsafe_impl!(f32: TryFromBytes, FromZeroes, FromBytes, AsBytes); + unsafe_impl!(f64: TryFromBytes, FromZeroes, FromBytes, AsBytes); +} + +safety_comment! { + /// SAFETY: + /// - `FromZeroes`: Valid since "[t]he value false has the bit pattern + /// 0x00" [1]. + /// - `AsBytes`: Since "the boolean type has a size and alignment of 1 each" + /// and "The value false has the bit pattern 0x00 and the value true has + /// the bit pattern 0x01" [1]. Thus, the only byte of the bool is always + /// initialized. + /// - `Unaligned`: Per the reference [1], "[a]n object with the boolean type + /// has a size and alignment of 1 each." + /// + /// [1] https://doc.rust-lang.org/reference/types/boolean.html + unsafe_impl!(bool: FromZeroes, AsBytes, Unaligned); + assert_unaligned!(bool); + /// SAFETY: + /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid` + /// closure: + /// - Given `t: *mut bool` and `let r = *mut u8`, `r` refers to an object + /// of the same size as that referred to by `t`. This is true because + /// `bool` and `u8` have the same size (1 byte) [1]. + /// - Since the closure takes a `&u8` argument, given a `Ptr<'a, bool>` + /// which satisfies the preconditions of + /// `TryFromBytes::::is_bit_valid`, it must be guaranteed that the + /// memory referenced by that `Ptr` always contains a valid `u8`. Since + /// `bool`'s single byte is always initialized, `is_bit_valid`'s + /// precondition requires that the same is true of its argument. Since + /// `u8`'s only bit validity invariant is that its single byte must be + /// initialized, this memory is guaranteed to contain a valid `u8`. + /// - The alignment of `bool` is equal to the alignment of `u8`. [1] [2] + /// - The impl must only return `true` for its argument if the original + /// `Ptr` refers to a valid `bool`. We only return true if the + /// `u8` value is 0 or 1, and both of these are valid values for `bool`. + /// [3] + /// + /// [1] Per https://doc.rust-lang.org/reference/type-layout.html#primitive-data-layout: + /// + /// The size of most primitives is given in this table. + /// + /// | Type | `size_of::() ` | + /// |-----------|----------------------| + /// | `bool` | 1 | + /// | `u8`/`i8` | 1 | + /// + /// [2] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment: + /// + /// The size of a value is always a multiple of its alignment. + /// + /// [3] Per https://doc.rust-lang.org/reference/types/boolean.html: + /// + /// The value false has the bit pattern 0x00 and the value true has the + /// bit pattern 0x01. + unsafe_impl!(bool: TryFromBytes; |byte: &u8| *byte < 2); +} +safety_comment! { + /// SAFETY: + /// - `FromZeroes`: Per reference [1], "[a] value of type char is a Unicode + /// scalar value (i.e. a code point that is not a surrogate), represented + /// as a 32-bit unsigned word in the 0x0000 to 0xD7FF or 0xE000 to + /// 0x10FFFF range" which contains 0x0000. + /// - `AsBytes`: `char` is per reference [1] "represented as a 32-bit + /// unsigned word" (`u32`) which is `AsBytes`. Note that unlike `u32`, not + /// all bit patterns are valid for `char`. + /// + /// [1] https://doc.rust-lang.org/reference/types/textual.html + unsafe_impl!(char: FromZeroes, AsBytes); + /// SAFETY: + /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid` + /// closure: + /// - Given `t: *mut char` and `let r = *mut u32`, `r` refers to an object + /// of the same size as that referred to by `t`. This is true because + /// `char` and `u32` have the same size [1]. + /// - Since the closure takes a `&u32` argument, given a `Ptr<'a, char>` + /// which satisfies the preconditions of + /// `TryFromBytes::::is_bit_valid`, it must be guaranteed that the + /// memory referenced by that `Ptr` always contains a valid `u32`. Since + /// `char`'s bytes are always initialized [2], `is_bit_valid`'s + /// precondition requires that the same is true of its argument. Since + /// `u32`'s only bit validity invariant is that its bytes must be + /// initialized, this memory is guaranteed to contain a valid `u32`. + /// - The alignment of `char` is equal to the alignment of `u32`. [1] + /// - The impl must only return `true` for its argument if the original + /// `Ptr` refers to a valid `char`. `char::from_u32` guarantees + /// that it returns `None` if its input is not a valid `char`. [3] + /// + /// [1] Per https://doc.rust-lang.org/nightly/reference/types/textual.html#layout-and-bit-validity: + /// + /// `char` is guaranteed to have the same size and alignment as `u32` on + /// all platforms. + /// + /// [2] Per https://doc.rust-lang.org/core/primitive.char.html#method.from_u32: + /// + /// Every byte of a `char` is guaranteed to be initialized. + /// + /// [3] Per https://doc.rust-lang.org/core/primitive.char.html#method.from_u32: + /// + /// `from_u32()` will return `None` if the input is not a valid value for + /// a `char`. + unsafe_impl!(char: TryFromBytes; |candidate: &u32| char::from_u32(*candidate).is_some()); +} +safety_comment! { + /// SAFETY: + /// - `FromZeroes`, `AsBytes`, `Unaligned`: Per the reference [1], `str` + /// has the same layout as `[u8]`, and `[u8]` is `FromZeroes`, `AsBytes`, + /// and `Unaligned`. + /// + /// Note that we don't `assert_unaligned!(str)` because `assert_unaligned!` + /// uses `align_of`, which only works for `Sized` types. + /// + /// TODO(#429): Add quotes from documentation. + /// + /// [1] https://doc.rust-lang.org/reference/type-layout.html#str-layout + unsafe_impl!(str: FromZeroes, AsBytes, Unaligned); + /// SAFETY: + /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid` + /// closure: + /// - Given `t: *mut str` and `let r = *mut [u8]`, `r` refers to an object + /// of the same size as that referred to by `t`. This is true because + /// `str` and `[u8]` have the same representation. [1] + /// - Since the closure takes a `&[u8]` argument, given a `Ptr<'a, str>` + /// which satisfies the preconditions of + /// `TryFromBytes::::is_bit_valid`, it must be guaranteed that the + /// memory referenced by that `Ptr` always contains a valid `[u8]`. + /// Since `str`'s bytes are always initialized [1], `is_bit_valid`'s + /// precondition requires that the same is true of its argument. Since + /// `[u8]`'s only bit validity invariant is that its bytes must be + /// initialized, this memory is guaranteed to contain a valid `[u8]`. + /// - The alignment of `str` is equal to the alignment of `[u8]`. [1] + /// - The impl must only return `true` for its argument if the original + /// `Ptr` refers to a valid `str`. `str::from_utf8` guarantees that + /// it returns `Err` if its input is not a valid `str`. [2] + /// + /// [1] Per https://doc.rust-lang.org/reference/types/textual.html: + /// + /// A value of type `str` is represented the same was as `[u8]`. + /// + /// [2] Per https://doc.rust-lang.org/core/str/fn.from_utf8.html#errors: + /// + /// Returns `Err` if the slice is not UTF-8. + unsafe_impl!(str: TryFromBytes; |candidate: &[u8]| core::str::from_utf8(candidate).is_ok()); +} + +safety_comment! { + // `NonZeroXxx` is `AsBytes`, but not `FromZeroes` or `FromBytes`. + // + /// SAFETY: + /// - `AsBytes`: `NonZeroXxx` has the same layout as its associated + /// primitive. Since it is the same size, this guarantees it has no + /// padding - integers have no padding, and there's no room for padding + /// if it can represent all of the same values except 0. + /// - `Unaligned`: `NonZeroU8` and `NonZeroI8` document that + /// `Option` and `Option` both have size 1. [1] [2] + /// This is worded in a way that makes it unclear whether it's meant as a + /// guarantee, but given the purpose of those types, it's virtually + /// unthinkable that that would ever change. `Option` cannot be smaller + /// than its contained type, which implies that, and `NonZeroX8` are of + /// size 1 or 0. `NonZeroX8` can represent multiple states, so they cannot + /// be 0 bytes, which means that they must be 1 byte. The only valid + /// alignment for a 1-byte type is 1. + /// + /// TODO(#429): Add quotes from documentation. + /// + /// [1] https://doc.rust-lang.org/stable/std/num/struct.NonZeroU8.html + /// [2] https://doc.rust-lang.org/stable/std/num/struct.NonZeroI8.html + /// TODO(https://github.com/rust-lang/rust/pull/104082): Cite documentation + /// that layout is the same as primitive layout. + unsafe_impl!(NonZeroU8: AsBytes, Unaligned); + unsafe_impl!(NonZeroI8: AsBytes, Unaligned); + assert_unaligned!(NonZeroU8, NonZeroI8); + unsafe_impl!(NonZeroU16: AsBytes); + unsafe_impl!(NonZeroI16: AsBytes); + unsafe_impl!(NonZeroU32: AsBytes); + unsafe_impl!(NonZeroI32: AsBytes); + unsafe_impl!(NonZeroU64: AsBytes); + unsafe_impl!(NonZeroI64: AsBytes); + unsafe_impl!(NonZeroU128: AsBytes); + unsafe_impl!(NonZeroI128: AsBytes); + unsafe_impl!(NonZeroUsize: AsBytes); + unsafe_impl!(NonZeroIsize: AsBytes); + /// SAFETY: + /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid` + /// closure: + /// - Given `t: *mut NonZeroXxx` and `let r = *mut xxx`, `r` refers to an + /// object of the same size as that referred to by `t`. This is true + /// because `NonZeroXxx` and `xxx` have the same size. [1] + /// - Since the closure takes a `&xxx` argument, given a `Ptr<'a, + /// NonZeroXxx>` which satisfies the preconditions of + /// `TryFromBytes::::is_bit_valid`, it must be guaranteed + /// that the memory referenced by that `Ptr` always contains a valid + /// `xxx`. Since `NonZeroXxx`'s bytes are always initialized [1], + /// `is_bit_valid`'s precondition requires that the same is true of its + /// argument. Since `xxx`'s only bit validity invariant is that its + /// bytes must be initialized, this memory is guaranteed to contain a + /// valid `xxx`. + /// - The alignment of `NonZeroXxx` is equal to the alignment of `xxx`. + /// [1] + /// - The impl must only return `true` for its argument if the original + /// `Ptr` refers to a valid `NonZeroXxx`. The only `xxx` + /// which is not also a valid `NonZeroXxx` is 0. [1] + /// + /// [1] Per https://doc.rust-lang.org/core/num/struct.NonZeroU16.html: + /// + /// `NonZeroU16` is guaranteed to have the same layout and bit validity as + /// `u16` with the exception that `0` is not a valid instance. + unsafe_impl!(NonZeroU8: TryFromBytes; |n: &u8| *n != 0); + unsafe_impl!(NonZeroI8: TryFromBytes; |n: &i8| *n != 0); + unsafe_impl!(NonZeroU16: TryFromBytes; |n: &u16| *n != 0); + unsafe_impl!(NonZeroI16: TryFromBytes; |n: &i16| *n != 0); + unsafe_impl!(NonZeroU32: TryFromBytes; |n: &u32| *n != 0); + unsafe_impl!(NonZeroI32: TryFromBytes; |n: &i32| *n != 0); + unsafe_impl!(NonZeroU64: TryFromBytes; |n: &u64| *n != 0); + unsafe_impl!(NonZeroI64: TryFromBytes; |n: &i64| *n != 0); + unsafe_impl!(NonZeroU128: TryFromBytes; |n: &u128| *n != 0); + unsafe_impl!(NonZeroI128: TryFromBytes; |n: &i128| *n != 0); + unsafe_impl!(NonZeroUsize: TryFromBytes; |n: &usize| *n != 0); + unsafe_impl!(NonZeroIsize: TryFromBytes; |n: &isize| *n != 0); +} +safety_comment! { + /// SAFETY: + /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`, + /// `AsBytes`: The Rust compiler reuses `0` value to represent `None`, so + /// `size_of::>() == size_of::()`; see + /// `NonZeroXxx` documentation. + /// - `Unaligned`: `NonZeroU8` and `NonZeroI8` document that + /// `Option` and `Option` both have size 1. [1] [2] + /// This is worded in a way that makes it unclear whether it's meant as a + /// guarantee, but given the purpose of those types, it's virtually + /// unthinkable that that would ever change. The only valid alignment for + /// a 1-byte type is 1. + /// + /// TODO(#429): Add quotes from documentation. + /// + /// [1] https://doc.rust-lang.org/stable/std/num/struct.NonZeroU8.html + /// [2] https://doc.rust-lang.org/stable/std/num/struct.NonZeroI8.html + /// + /// TODO(https://github.com/rust-lang/rust/pull/104082): Cite documentation + /// for layout guarantees. + unsafe_impl!(Option: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); + unsafe_impl!(Option: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); + assert_unaligned!(Option, Option); + unsafe_impl!(Option: TryFromBytes, FromZeroes, FromBytes, AsBytes); + unsafe_impl!(Option: TryFromBytes, FromZeroes, FromBytes, AsBytes); + unsafe_impl!(Option: TryFromBytes, FromZeroes, FromBytes, AsBytes); + unsafe_impl!(Option: TryFromBytes, FromZeroes, FromBytes, AsBytes); + unsafe_impl!(Option: TryFromBytes, FromZeroes, FromBytes, AsBytes); + unsafe_impl!(Option: TryFromBytes, FromZeroes, FromBytes, AsBytes); + unsafe_impl!(Option: TryFromBytes, FromZeroes, FromBytes, AsBytes); + unsafe_impl!(Option: TryFromBytes, FromZeroes, FromBytes, AsBytes); + unsafe_impl!(Option: TryFromBytes, FromZeroes, FromBytes, AsBytes); + unsafe_impl!(Option: TryFromBytes, FromZeroes, FromBytes, AsBytes); +} + +safety_comment! { + /// SAFETY: + /// The following types can be transmuted from `[0u8; size_of::()]`. [1] + /// None of them contain `UnsafeCell`s, and so they all soundly implement + /// `FromZeroes`. + /// + /// [1] Per + /// https://doc.rust-lang.org/nightly/core/option/index.html#representation: + /// + /// Rust guarantees to optimize the following types `T` such that + /// [`Option`] has the same size and alignment as `T`. In some of these + /// cases, Rust further guarantees that `transmute::<_, Option>([0u8; + /// size_of::()])` is sound and produces `Option::::None`. These + /// cases are identified by the second column: + /// + /// | `T` | `transmute::<_, Option>([0u8; size_of::()])` sound? | + /// |-----------------------|-----------------------------------------------------------| + /// | [`Box`] | when `U: Sized` | + /// | `&U` | when `U: Sized` | + /// | `&mut U` | when `U: Sized` | + /// | [`ptr::NonNull`] | when `U: Sized` | + /// | `fn`, `extern "C" fn` | always | + /// + /// TODO(#429), TODO(https://github.com/rust-lang/rust/pull/115333): Cite + /// the Stable docs once they're available. + #[cfg(feature = "alloc")] + unsafe_impl!( + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + T => FromZeroes for Option> + ); + unsafe_impl!(T => FromZeroes for Option<&'_ T>); + unsafe_impl!(T => FromZeroes for Option<&'_ mut T>); + unsafe_impl!(T => FromZeroes for Option>); + unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeroes for opt_fn!(...)); + unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeroes for opt_extern_c_fn!(...)); +} + +safety_comment! { + /// SAFETY: + /// Per reference [1]: + /// "For all T, the following are guaranteed: + /// size_of::>() == 0 + /// align_of::>() == 1". + /// This gives: + /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`: There + /// is only one possible sequence of 0 bytes, and `PhantomData` is + /// inhabited. + /// - `AsBytes`: Since `PhantomData` has size 0, it contains no padding + /// bytes. + /// - `Unaligned`: Per the preceding reference, `PhantomData` has alignment + /// 1. + /// + /// [1] https://doc.rust-lang.org/std/marker/struct.PhantomData.html#layout-1 + unsafe_impl!(T: ?Sized => TryFromBytes for PhantomData); + unsafe_impl!(T: ?Sized => FromZeroes for PhantomData); + unsafe_impl!(T: ?Sized => FromBytes for PhantomData); + unsafe_impl!(T: ?Sized => AsBytes for PhantomData); + unsafe_impl!(T: ?Sized => Unaligned for PhantomData); + assert_unaligned!(PhantomData<()>, PhantomData, PhantomData); +} +safety_comment! { + /// SAFETY: + /// `Wrapping` is guaranteed by its docs [1] to have the same layout and + /// bit validity as `T`. Also, `Wrapping` is `#[repr(transparent)]`, and + /// has a single field, which is `pub`. Per the reference [2], this means + /// that the `#[repr(transparent)]` attribute is "considered part of the + /// public ABI". + /// + /// - `TryFromBytes`: The safety requirements for `unsafe_impl!` with an + /// `is_bit_valid` closure: + /// - Given `t: *mut Wrapping` and `let r = *mut T`, `r` refers to an + /// object of the same size as that referred to by `t`. This is true + /// because `Wrapping` and `T` have the same layout + /// - The alignment of `Wrapping` is equal to the alignment of `T`. + /// - The impl must only return `true` for its argument if the original + /// `Ptr>` refers to a valid `Wrapping`. Since + /// `Wrapping` has the same bit validity as `T`, and since our impl + /// just calls `T::is_bit_valid`, our impl returns `true` exactly when + /// its argument contains a valid `Wrapping`. + /// - `FromBytes`: Since `Wrapping` has the same bit validity as `T`, if + /// `T: FromBytes`, then all initialized byte sequences are valid + /// instances of `Wrapping`. Similarly, if `T: FromBytes`, then + /// `Wrapping` doesn't contain any `UnsafeCell`s. Thus, `impl FromBytes + /// for Wrapping where T: FromBytes` is a sound impl. + /// - `AsBytes`: Since `Wrapping` has the same bit validity as `T`, if + /// `T: AsBytes`, then all valid instances of `Wrapping` have all of + /// their bytes initialized. Similarly, if `T: AsBytes`, then + /// `Wrapping` doesn't contain any `UnsafeCell`s. Thus, `impl AsBytes + /// for Wrapping where T: AsBytes` is a valid impl. + /// - `Unaligned`: Since `Wrapping` has the same layout as `T`, + /// `Wrapping` has alignment 1 exactly when `T` does. + /// + /// [1] Per https://doc.rust-lang.org/core/num/struct.NonZeroU16.html: + /// + /// `NonZeroU16` is guaranteed to have the same layout and bit validity as + /// `u16` with the exception that `0` is not a valid instance. + /// + /// TODO(#429): Add quotes from documentation. + /// + /// [1] TODO(https://doc.rust-lang.org/nightly/core/num/struct.Wrapping.html#layout-1): + /// Reference this documentation once it's available on stable. + /// + /// [2] https://doc.rust-lang.org/nomicon/other-reprs.html#reprtransparent + unsafe_impl!(T: TryFromBytes => TryFromBytes for Wrapping; |candidate: Ptr| { + // SAFETY: + // - Since `T` and `Wrapping` have the same layout and bit validity + // and contain the same fields, `T` contains `UnsafeCell`s exactly + // where `Wrapping` does. Thus, all memory and `UnsafeCell` + // preconditions of `T::is_bit_valid` hold exactly when the same + // preconditions for `Wrapping::is_bit_valid` hold. + // - By the same token, since `candidate` is guaranteed to have its + // bytes initialized where there are always initialized bytes in + // `Wrapping`, the same is true for `T`. + unsafe { T::is_bit_valid(candidate) } + }); + unsafe_impl!(T: FromZeroes => FromZeroes for Wrapping); + unsafe_impl!(T: FromBytes => FromBytes for Wrapping); + unsafe_impl!(T: AsBytes => AsBytes for Wrapping); + unsafe_impl!(T: Unaligned => Unaligned for Wrapping); + assert_unaligned!(Wrapping<()>, Wrapping); +} +safety_comment! { + // `MaybeUninit` is `FromZeroes` and `FromBytes`, but never `AsBytes` + // since it may contain uninitialized bytes. + // + /// SAFETY: + /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`: + /// `MaybeUninit` has no restrictions on its contents. Unfortunately, + /// in addition to bit validity, `TryFromBytes`, `FromZeroes` and + /// `FromBytes` also require that implementers contain no `UnsafeCell`s. + /// Thus, we require `T: Trait` in order to ensure that `T` - and thus + /// `MaybeUninit` - contains to `UnsafeCell`s. Thus, requiring that `T` + /// implement each of these traits is sufficient. + /// - `Unaligned`: "MaybeUninit is guaranteed to have the same size, + /// alignment, and ABI as T" [1] + /// + /// [1] https://doc.rust-lang.org/stable/core/mem/union.MaybeUninit.html#layout-1 + /// + /// TODO(https://github.com/google/zerocopy/issues/251): If we split + /// `FromBytes` and `RefFromBytes`, or if we introduce a separate + /// `NoCell`/`Freeze` trait, we can relax the trait bounds for `FromZeroes` + /// and `FromBytes`. + unsafe_impl!(T: TryFromBytes => TryFromBytes for MaybeUninit); + unsafe_impl!(T: FromZeroes => FromZeroes for MaybeUninit); + unsafe_impl!(T: FromBytes => FromBytes for MaybeUninit); + unsafe_impl!(T: Unaligned => Unaligned for MaybeUninit); + assert_unaligned!(MaybeUninit<()>, MaybeUninit); +} +safety_comment! { + /// SAFETY: + /// `ManuallyDrop` has the same layout and bit validity as `T` [1], and + /// accessing the inner value is safe (meaning that it's unsound to leave + /// the inner value uninitialized while exposing the `ManuallyDrop` to safe + /// code). + /// - `FromZeroes`, `FromBytes`: Since it has the same layout as `T`, any + /// valid `T` is a valid `ManuallyDrop`. If `T: FromZeroes`, a sequence + /// of zero bytes is a valid `T`, and thus a valid `ManuallyDrop`. If + /// `T: FromBytes`, any sequence of bytes is a valid `T`, and thus a valid + /// `ManuallyDrop`. + /// - `AsBytes`: Since it has the same layout as `T`, and since it's unsound + /// to let safe code access a `ManuallyDrop` whose inner value is + /// uninitialized, safe code can only ever access a `ManuallyDrop` whose + /// contents are a valid `T`. Since `T: AsBytes`, this means that safe + /// code can only ever access a `ManuallyDrop` with all initialized bytes. + /// - `Unaligned`: `ManuallyDrop` has the same layout (and thus alignment) + /// as `T`, and `T: Unaligned` guarantees that that alignment is 1. + /// + /// `ManuallyDrop` is guaranteed to have the same layout and bit + /// validity as `T` + /// + /// [1] Per https://doc.rust-lang.org/nightly/core/mem/struct.ManuallyDrop.html: + /// + /// TODO(#429): + /// - Add quotes from docs. + /// - Once [1] (added in + /// https://github.com/rust-lang/rust/pull/115522) is available on stable, + /// quote the stable docs instead of the nightly docs. + unsafe_impl!(T: ?Sized + FromZeroes => FromZeroes for ManuallyDrop); + unsafe_impl!(T: ?Sized + FromBytes => FromBytes for ManuallyDrop); + unsafe_impl!(T: ?Sized + AsBytes => AsBytes for ManuallyDrop); + unsafe_impl!(T: ?Sized + Unaligned => Unaligned for ManuallyDrop); + assert_unaligned!(ManuallyDrop<()>, ManuallyDrop); +} +safety_comment! { + /// SAFETY: + /// Per the reference [1]: + /// + /// An array of `[T; N]` has a size of `size_of::() * N` and the same + /// alignment of `T`. Arrays are laid out so that the zero-based `nth` + /// element of the array is offset from the start of the array by `n * + /// size_of::()` bytes. + /// + /// ... + /// + /// Slices have the same layout as the section of the array they slice. + /// + /// In other words, the layout of a `[T]` or `[T; N]` is a sequence of `T`s + /// laid out back-to-back with no bytes in between. Therefore, `[T]` or `[T; + /// N]` are `TryFromBytes`, `FromZeroes`, `FromBytes`, and `AsBytes` if `T` + /// is (respectively). Furthermore, since an array/slice has "the same + /// alignment of `T`", `[T]` and `[T; N]` are `Unaligned` if `T` is. + /// + /// Note that we don't `assert_unaligned!` for slice types because + /// `assert_unaligned!` uses `align_of`, which only works for `Sized` types. + /// + /// [1] https://doc.rust-lang.org/reference/type-layout.html#array-layout + unsafe_impl!(const N: usize, T: FromZeroes => FromZeroes for [T; N]); + unsafe_impl!(const N: usize, T: FromBytes => FromBytes for [T; N]); + unsafe_impl!(const N: usize, T: AsBytes => AsBytes for [T; N]); + unsafe_impl!(const N: usize, T: Unaligned => Unaligned for [T; N]); + assert_unaligned!([(); 0], [(); 1], [u8; 0], [u8; 1]); + unsafe_impl!(T: TryFromBytes => TryFromBytes for [T]; |c: Ptr<[T]>| { + // SAFETY: Assuming the preconditions of `is_bit_valid` are satisfied, + // so too will the postcondition: that, if `is_bit_valid(candidate)` + // returns true, `*candidate` contains a valid `Self`. Per the reference + // [1]: + // + // An array of `[T; N]` has a size of `size_of::() * N` and the + // same alignment of `T`. Arrays are laid out so that the zero-based + // `nth` element of the array is offset from the start of the array by + // `n * size_of::()` bytes. + // + // ... + // + // Slices have the same layout as the section of the array they slice. + // + // In other words, the layout of a `[T] is a sequence of `T`s laid out + // back-to-back with no bytes in between. If all elements in `candidate` + // are `is_bit_valid`, so too is `candidate`. + // + // Note that any of the below calls may panic, but it would still be + // sound even if it did. `is_bit_valid` does not promise that it will + // not panic (in fact, it explicitly warns that it's a possibility), and + // we have not violated any safety invariants that we must fix before + // returning. + c.iter().all(|elem| + // SAFETY: We uphold the safety contract of `is_bit_valid(elem)`, by + // precondition on the surrounding call to `is_bit_valid`. The + // memory referenced by `elem` is contained entirely within `c`, and + // satisfies the preconditions satisfied by `c`. By axiom, we assume + // that `Iterator:all` does not invalidate these preconditions + // (e.g., by writing to `elem`.) Since `elem` is derived from `c`, + // it is only possible for uninitialized bytes to occur in `elem` at + // the same bytes they occur within `c`. + unsafe { ::is_bit_valid(elem) } + ) + }); + unsafe_impl!(T: FromZeroes => FromZeroes for [T]); + unsafe_impl!(T: FromBytes => FromBytes for [T]); + unsafe_impl!(T: AsBytes => AsBytes for [T]); + unsafe_impl!(T: Unaligned => Unaligned for [T]); +} +safety_comment! { + /// SAFETY: + /// - `FromZeroes`: For thin pointers (note that `T: Sized`), the zero + /// pointer is considered "null". [1] No operations which require + /// provenance are legal on null pointers, so this is not a footgun. + /// + /// NOTE(#170): Implementing `FromBytes` and `AsBytes` for raw pointers + /// would be sound, but carries provenance footguns. We want to support + /// `FromBytes` and `AsBytes` for raw pointers eventually, but we are + /// holding off until we can figure out how to address those footguns. + /// + /// [1] TODO(https://github.com/rust-lang/rust/pull/116988): Cite the + /// documentation once this PR lands. + unsafe_impl!(T => FromZeroes for *const T); + unsafe_impl!(T => FromZeroes for *mut T); +} + +// SIMD support +// +// Per the Unsafe Code Guidelines Reference [1]: +// +// Packed SIMD vector types are `repr(simd)` homogeneous tuple-structs +// containing `N` elements of type `T` where `N` is a power-of-two and the +// size and alignment requirements of `T` are equal: +// +// ```rust +// #[repr(simd)] +// struct Vector(T_0, ..., T_(N - 1)); +// ``` +// +// ... +// +// The size of `Vector` is `N * size_of::()` and its alignment is an +// implementation-defined function of `T` and `N` greater than or equal to +// `align_of::()`. +// +// ... +// +// Vector elements are laid out in source field order, enabling random access +// to vector elements by reinterpreting the vector as an array: +// +// ```rust +// union U { +// vec: Vector, +// arr: [T; N] +// } +// +// assert_eq!(size_of::>(), size_of::<[T; N]>()); +// assert!(align_of::>() >= align_of::<[T; N]>()); +// +// unsafe { +// let u = U { vec: Vector(t_0, ..., t_(N - 1)) }; +// +// assert_eq!(u.vec.0, u.arr[0]); +// // ... +// assert_eq!(u.vec.(N - 1), u.arr[N - 1]); +// } +// ``` +// +// Given this background, we can observe that: +// - The size and bit pattern requirements of a SIMD type are equivalent to the +// equivalent array type. Thus, for any SIMD type whose primitive `T` is +// `TryFromBytes`, `FromZeroes`, `FromBytes`, or `AsBytes`, that SIMD type is +// also `TryFromBytes`, `FromZeroes`, `FromBytes`, or `AsBytes` respectively. +// - Since no upper bound is placed on the alignment, no SIMD type can be +// guaranteed to be `Unaligned`. +// +// Also per [1]: +// +// This chapter represents the consensus from issue #38. The statements in +// here are not (yet) "guaranteed" not to change until an RFC ratifies them. +// +// See issue #38 [2]. While this behavior is not technically guaranteed, the +// likelihood that the behavior will change such that SIMD types are no longer +// `TryFromBytes`, `FromZeroes`, `FromBytes`, or `AsBytes` is next to zero, as +// that would defeat the entire purpose of SIMD types. Nonetheless, we put this +// behavior behind the `simd` Cargo feature, which requires consumers to opt +// into this stability hazard. +// +// [1] https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html +// [2] https://github.com/rust-lang/unsafe-code-guidelines/issues/38 +#[cfg(feature = "simd")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "simd")))] +mod simd { + /// Defines a module which implements `TryFromBytes`, `FromZeroes`, + /// `FromBytes`, and `AsBytes` for a set of types from a module in + /// `core::arch`. + /// + /// `$arch` is both the name of the defined module and the name of the + /// module in `core::arch`, and `$typ` is the list of items from that module + /// to implement `FromZeroes`, `FromBytes`, and `AsBytes` for. + #[allow(unused_macros)] // `allow(unused_macros)` is needed because some + // target/feature combinations don't emit any impls + // and thus don't use this macro. + macro_rules! simd_arch_mod { + (#[cfg $cfg:tt] $arch:ident, $mod:ident, $($typ:ident),*) => { + #[cfg $cfg] + #[cfg_attr(doc_cfg, doc(cfg $cfg))] + mod $mod { + use core::arch::$arch::{$($typ),*}; + + use crate::*; + impl_known_layout!($($typ),*); + safety_comment! { + /// SAFETY: + /// See comment on module definition for justification. + $( unsafe_impl!($typ: TryFromBytes, FromZeroes, FromBytes, AsBytes); )* + } + } + }; + } + + #[rustfmt::skip] + const _: () = { + simd_arch_mod!( + #[cfg(target_arch = "x86")] + x86, x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i + ); + simd_arch_mod!( + #[cfg(all(feature = "simd-nightly", target_arch = "x86"))] + x86, x86_nightly, __m512bh, __m512, __m512d, __m512i + ); + simd_arch_mod!( + #[cfg(target_arch = "x86_64")] + x86_64, x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i + ); + simd_arch_mod!( + #[cfg(all(feature = "simd-nightly", target_arch = "x86_64"))] + x86_64, x86_64_nightly, __m512bh, __m512, __m512d, __m512i + ); + simd_arch_mod!( + #[cfg(target_arch = "wasm32")] + wasm32, wasm32, v128 + ); + simd_arch_mod!( + #[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))] + powerpc, powerpc, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long + ); + simd_arch_mod!( + #[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))] + powerpc64, powerpc64, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long + ); + simd_arch_mod!( + #[cfg(target_arch = "aarch64")] + aarch64, aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t, + int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t, + int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t, + poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t, + poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t, + uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t, + uint64x1_t, uint64x2_t + ); + simd_arch_mod!( + #[cfg(all(feature = "simd-nightly", target_arch = "arm"))] + arm, arm, int8x4_t, uint8x4_t + ); + }; +} + +/// Safely transmutes a value of one type to a value of another type of the same +/// size. +/// +/// The expression `$e` must have a concrete type, `T`, which implements +/// `AsBytes`. The `transmute!` expression must also have a concrete type, `U` +/// (`U` is inferred from the calling context), and `U` must implement +/// `FromBytes`. +/// +/// Note that the `T` produced by the expression `$e` will *not* be dropped. +/// Semantically, its bits will be copied into a new value of type `U`, the +/// original `T` will be forgotten, and the value of type `U` will be returned. +/// +/// # Examples +/// +/// ``` +/// # use zerocopy::transmute; +/// let one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7]; +/// +/// let two_dimensional: [[u8; 4]; 2] = transmute!(one_dimensional); +/// +/// assert_eq!(two_dimensional, [[0, 1, 2, 3], [4, 5, 6, 7]]); +/// ``` +#[macro_export] +macro_rules! transmute { + ($e:expr) => {{ + // NOTE: This must be a macro (rather than a function with trait bounds) + // because there's no way, in a generic context, to enforce that two + // types have the same size. `core::mem::transmute` uses compiler magic + // to enforce this so long as the types are concrete. + + let e = $e; + if false { + // This branch, though never taken, ensures that the type of `e` is + // `AsBytes` and that the type of this macro invocation expression + // is `FromBytes`. + + struct AssertIsAsBytes(T); + let _ = AssertIsAsBytes(e); + + struct AssertIsFromBytes(U); + #[allow(unused, unreachable_code)] + let u = AssertIsFromBytes(loop {}); + u.0 + } else { + // SAFETY: `core::mem::transmute` ensures that the type of `e` and + // the type of this macro invocation expression have the same size. + // We know this transmute is safe thanks to the `AsBytes` and + // `FromBytes` bounds enforced by the `false` branch. + // + // We use this reexport of `core::mem::transmute` because we know it + // will always be available for crates which are using the 2015 + // edition of Rust. By contrast, if we were to use + // `std::mem::transmute`, this macro would not work for such crates + // in `no_std` contexts, and if we were to use + // `core::mem::transmute`, this macro would not work in `std` + // contexts in which `core` was not manually imported. This is not a + // problem for 2018 edition crates. + unsafe { + // Clippy: It's okay to transmute a type to itself. + #[allow(clippy::useless_transmute, clippy::missing_transmute_annotations)] + $crate::macro_util::core_reexport::mem::transmute(e) + } + } + }} +} + +/// Safely transmutes a mutable or immutable reference of one type to an +/// immutable reference of another type of the same size. +/// +/// The expression `$e` must have a concrete type, `&T` or `&mut T`, where `T: +/// Sized + AsBytes`. The `transmute_ref!` expression must also have a concrete +/// type, `&U` (`U` is inferred from the calling context), where `U: Sized + +/// FromBytes`. It must be the case that `align_of::() >= align_of::()`. +/// +/// The lifetime of the input type, `&T` or `&mut T`, must be the same as or +/// outlive the lifetime of the output type, `&U`. +/// +/// # Examples +/// +/// ``` +/// # use zerocopy::transmute_ref; +/// let one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7]; +/// +/// let two_dimensional: &[[u8; 4]; 2] = transmute_ref!(&one_dimensional); +/// +/// assert_eq!(two_dimensional, &[[0, 1, 2, 3], [4, 5, 6, 7]]); +/// ``` +/// +/// # Alignment increase error message +/// +/// Because of limitations on macros, the error message generated when +/// `transmute_ref!` is used to transmute from a type of lower alignment to a +/// type of higher alignment is somewhat confusing. For example, the following +/// code: +/// +/// ```compile_fail +/// const INCREASE_ALIGNMENT: &u16 = zerocopy::transmute_ref!(&[0u8; 2]); +/// ``` +/// +/// ...generates the following error: +/// +/// ```text +/// error[E0512]: cannot transmute between types of different sizes, or dependently-sized types +/// --> src/lib.rs:1524:34 +/// | +/// 5 | const INCREASE_ALIGNMENT: &u16 = zerocopy::transmute_ref!(&[0u8; 2]); +/// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +/// | +/// = note: source type: `AlignOf<[u8; 2]>` (8 bits) +/// = note: target type: `MaxAlignsOf<[u8; 2], u16>` (16 bits) +/// = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) +/// ``` +/// +/// This is saying that `max(align_of::(), align_of::()) != +/// align_of::()`, which is equivalent to `align_of::() < +/// align_of::()`. +#[macro_export] +macro_rules! transmute_ref { + ($e:expr) => {{ + // NOTE: This must be a macro (rather than a function with trait bounds) + // because there's no way, in a generic context, to enforce that two + // types have the same size or alignment. + + // Ensure that the source type is a reference or a mutable reference + // (note that mutable references are implicitly reborrowed here). + let e: &_ = $e; + + #[allow(unused, clippy::diverging_sub_expression)] + if false { + // This branch, though never taken, ensures that the type of `e` is + // `&T` where `T: 't + Sized + AsBytes`, that the type of this macro + // expression is `&U` where `U: 'u + Sized + FromBytes`, and that + // `'t` outlives `'u`. + + struct AssertIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T); + let _ = AssertIsAsBytes(e); + + struct AssertIsFromBytes<'a, U: ::core::marker::Sized + $crate::FromBytes>(&'a U); + #[allow(unused, unreachable_code)] + let u = AssertIsFromBytes(loop {}); + u.0 + } else if false { + // This branch, though never taken, ensures that `size_of::() == + // size_of::()` and that that `align_of::() >= + // align_of::()`. + + // `t` is inferred to have type `T` because it's assigned to `e` (of + // type `&T`) as `&t`. + let mut t = unreachable!(); + e = &t; + + // `u` is inferred to have type `U` because it's used as `&u` as the + // value returned from this branch. + let u; + + $crate::assert_size_eq!(t, u); + $crate::assert_align_gt_eq!(t, u); + + &u + } else { + // SAFETY: For source type `Src` and destination type `Dst`: + // - We know that `Src: AsBytes` and `Dst: FromBytes` thanks to the + // uses of `AssertIsAsBytes` and `AssertIsFromBytes` above. + // - We know that `size_of::() == size_of::()` thanks to + // the use of `assert_size_eq!` above. + // - We know that `align_of::() >= align_of::()` thanks to + // the use of `assert_align_gt_eq!` above. + unsafe { $crate::macro_util::transmute_ref(e) } + } + }} +} + +/// Safely transmutes a mutable reference of one type to an mutable reference of +/// another type of the same size. +/// +/// The expression `$e` must have a concrete type, `&mut T`, where `T: Sized + +/// AsBytes`. The `transmute_mut!` expression must also have a concrete type, +/// `&mut U` (`U` is inferred from the calling context), where `U: Sized + +/// FromBytes`. It must be the case that `align_of::() >= align_of::()`. +/// +/// The lifetime of the input type, `&mut T`, must be the same as or outlive the +/// lifetime of the output type, `&mut U`. +/// +/// # Examples +/// +/// ``` +/// # use zerocopy::transmute_mut; +/// let mut one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7]; +/// +/// let two_dimensional: &mut [[u8; 4]; 2] = transmute_mut!(&mut one_dimensional); +/// +/// assert_eq!(two_dimensional, &[[0, 1, 2, 3], [4, 5, 6, 7]]); +/// +/// two_dimensional.reverse(); +/// +/// assert_eq!(one_dimensional, [4, 5, 6, 7, 0, 1, 2, 3]); +/// ``` +/// +/// # Alignment increase error message +/// +/// Because of limitations on macros, the error message generated when +/// `transmute_mut!` is used to transmute from a type of lower alignment to a +/// type of higher alignment is somewhat confusing. For example, the following +/// code: +/// +/// ```compile_fail +/// const INCREASE_ALIGNMENT: &mut u16 = zerocopy::transmute_mut!(&mut [0u8; 2]); +/// ``` +/// +/// ...generates the following error: +/// +/// ```text +/// error[E0512]: cannot transmute between types of different sizes, or dependently-sized types +/// --> src/lib.rs:1524:34 +/// | +/// 5 | const INCREASE_ALIGNMENT: &mut u16 = zerocopy::transmute_mut!(&mut [0u8; 2]); +/// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +/// | +/// = note: source type: `AlignOf<[u8; 2]>` (8 bits) +/// = note: target type: `MaxAlignsOf<[u8; 2], u16>` (16 bits) +/// = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) +/// ``` +/// +/// This is saying that `max(align_of::(), align_of::()) != +/// align_of::()`, which is equivalent to `align_of::() < +/// align_of::()`. +#[macro_export] +macro_rules! transmute_mut { + ($e:expr) => {{ + // NOTE: This must be a macro (rather than a function with trait bounds) + // because there's no way, in a generic context, to enforce that two + // types have the same size or alignment. + + // Ensure that the source type is a mutable reference. + let e: &mut _ = $e; + + #[allow(unused, clippy::diverging_sub_expression)] + if false { + // This branch, though never taken, ensures that the type of `e` is + // `&mut T` where `T: 't + Sized + FromBytes + AsBytes`, that the + // type of this macro expression is `&mut U` where `U: 'u + Sized + + // FromBytes + AsBytes`. + + // We use immutable references here rather than mutable so that, if + // this macro is used in a const context (in which, as of this + // writing, mutable references are banned), the error message + // appears to originate in the user's code rather than in the + // internals of this macro. + struct AssertSrcIsFromBytes<'a, T: ::core::marker::Sized + $crate::FromBytes>(&'a T); + struct AssertSrcIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T); + struct AssertDstIsFromBytes<'a, T: ::core::marker::Sized + $crate::FromBytes>(&'a T); + struct AssertDstIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T); + + if true { + let _ = AssertSrcIsFromBytes(&*e); + } else { + let _ = AssertSrcIsAsBytes(&*e); + } + + if true { + #[allow(unused, unreachable_code)] + let u = AssertDstIsFromBytes(loop {}); + &mut *u.0 + } else { + #[allow(unused, unreachable_code)] + let u = AssertDstIsAsBytes(loop {}); + &mut *u.0 + } + } else if false { + // This branch, though never taken, ensures that `size_of::() == + // size_of::()` and that that `align_of::() >= + // align_of::()`. + + // `t` is inferred to have type `T` because it's assigned to `e` (of + // type `&mut T`) as `&mut t`. + let mut t = unreachable!(); + e = &mut t; + + // `u` is inferred to have type `U` because it's used as `&mut u` as + // the value returned from this branch. + let u; + + $crate::assert_size_eq!(t, u); + $crate::assert_align_gt_eq!(t, u); + + &mut u + } else { + // SAFETY: For source type `Src` and destination type `Dst`: + // - We know that `Src: FromBytes + AsBytes` and `Dst: FromBytes + + // AsBytes` thanks to the uses of `AssertSrcIsFromBytes`, + // `AssertSrcIsAsBytes`, `AssertDstIsFromBytes`, and + // `AssertDstIsAsBytes` above. + // - We know that `size_of::() == size_of::()` thanks to + // the use of `assert_size_eq!` above. + // - We know that `align_of::() >= align_of::()` thanks to + // the use of `assert_align_gt_eq!` above. + unsafe { $crate::macro_util::transmute_mut(e) } + } + }} +} + +/// Includes a file and safely transmutes it to a value of an arbitrary type. +/// +/// The file will be included as a byte array, `[u8; N]`, which will be +/// transmuted to another type, `T`. `T` is inferred from the calling context, +/// and must implement [`FromBytes`]. +/// +/// The file is located relative to the current file (similarly to how modules +/// are found). The provided path is interpreted in a platform-specific way at +/// compile time. So, for instance, an invocation with a Windows path containing +/// backslashes `\` would not compile correctly on Unix. +/// +/// `include_value!` is ignorant of byte order. For byte order-aware types, see +/// the [`byteorder`] module. +/// +/// # Examples +/// +/// Assume there are two files in the same directory with the following +/// contents: +/// +/// File `data` (no trailing newline): +/// +/// ```text +/// abcd +/// ``` +/// +/// File `main.rs`: +/// +/// ```rust +/// use zerocopy::include_value; +/// # macro_rules! include_value { +/// # ($file:expr) => { zerocopy::include_value!(concat!("../testdata/include_value/", $file)) }; +/// # } +/// +/// fn main() { +/// let as_u32: u32 = include_value!("data"); +/// assert_eq!(as_u32, u32::from_ne_bytes([b'a', b'b', b'c', b'd'])); +/// let as_i32: i32 = include_value!("data"); +/// assert_eq!(as_i32, i32::from_ne_bytes([b'a', b'b', b'c', b'd'])); +/// } +/// ``` +#[doc(alias("include_bytes", "include_data", "include_type"))] +#[macro_export] +macro_rules! include_value { + ($file:expr $(,)?) => { + $crate::transmute!(*::core::include_bytes!($file)) + }; +} + +/// A typed reference derived from a byte slice. +/// +/// A `Ref` is a reference to a `T` which is stored in a byte slice, `B`. +/// Unlike a native reference (`&T` or `&mut T`), `Ref` has the same +/// mutability as the byte slice it was constructed from (`B`). +/// +/// # Examples +/// +/// `Ref` can be used to treat a sequence of bytes as a structured type, and to +/// read and write the fields of that type as if the byte slice reference were +/// simply a reference to that type. +/// +/// ```rust +/// # #[cfg(feature = "derive")] { // This example uses derives, and won't compile without them +/// use zerocopy::{AsBytes, ByteSlice, ByteSliceMut, FromBytes, FromZeroes, Ref, Unaligned}; +/// +/// #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] +/// #[repr(C)] +/// struct UdpHeader { +/// src_port: [u8; 2], +/// dst_port: [u8; 2], +/// length: [u8; 2], +/// checksum: [u8; 2], +/// } +/// +/// struct UdpPacket { +/// header: Ref, +/// body: B, +/// } +/// +/// impl UdpPacket { +/// pub fn parse(bytes: B) -> Option> { +/// let (header, body) = Ref::new_unaligned_from_prefix(bytes)?; +/// Some(UdpPacket { header, body }) +/// } +/// +/// pub fn get_src_port(&self) -> [u8; 2] { +/// self.header.src_port +/// } +/// } +/// +/// impl UdpPacket { +/// pub fn set_src_port(&mut self, src_port: [u8; 2]) { +/// self.header.src_port = src_port; +/// } +/// } +/// # } +/// ``` +pub struct Ref(B, PhantomData); + +/// Deprecated: prefer [`Ref`] instead. +#[deprecated(since = "0.7.0", note = "LayoutVerified has been renamed to Ref")] +#[doc(hidden)] +pub type LayoutVerified = Ref; + +impl Ref +where + B: ByteSlice, +{ + /// Constructs a new `Ref`. + /// + /// `new` verifies that `bytes.len() == size_of::()` and that `bytes` is + /// aligned to `align_of::()`, and constructs a new `Ref`. If either of + /// these checks fail, it returns `None`. + #[inline] + pub fn new(bytes: B) -> Option> { + if bytes.len() != mem::size_of::() || !util::aligned_to::<_, T>(bytes.deref()) { + return None; + } + Some(Ref(bytes, PhantomData)) + } + + /// Constructs a new `Ref` from the prefix of a byte slice. + /// + /// `new_from_prefix` verifies that `bytes.len() >= size_of::()` and that + /// `bytes` is aligned to `align_of::()`. It consumes the first + /// `size_of::()` bytes from `bytes` to construct a `Ref`, and returns + /// the remaining bytes to the caller. If either the length or alignment + /// checks fail, it returns `None`. + #[inline] + pub fn new_from_prefix(bytes: B) -> Option<(Ref, B)> { + if bytes.len() < mem::size_of::() || !util::aligned_to::<_, T>(bytes.deref()) { + return None; + } + let (bytes, suffix) = bytes.split_at(mem::size_of::()); + Some((Ref(bytes, PhantomData), suffix)) + } + + /// Constructs a new `Ref` from the suffix of a byte slice. + /// + /// `new_from_suffix` verifies that `bytes.len() >= size_of::()` and that + /// the last `size_of::()` bytes of `bytes` are aligned to + /// `align_of::()`. It consumes the last `size_of::()` bytes from + /// `bytes` to construct a `Ref`, and returns the preceding bytes to the + /// caller. If either the length or alignment checks fail, it returns + /// `None`. + #[inline] + pub fn new_from_suffix(bytes: B) -> Option<(B, Ref)> { + let bytes_len = bytes.len(); + let split_at = bytes_len.checked_sub(mem::size_of::())?; + let (prefix, bytes) = bytes.split_at(split_at); + if !util::aligned_to::<_, T>(bytes.deref()) { + return None; + } + Some((prefix, Ref(bytes, PhantomData))) + } +} + +impl Ref +where + B: ByteSlice, +{ + /// Constructs a new `Ref` of a slice type. + /// + /// `new_slice` verifies that `bytes.len()` is a multiple of + /// `size_of::()` and that `bytes` is aligned to `align_of::()`, and + /// constructs a new `Ref`. If either of these checks fail, it returns + /// `None`. + /// + /// # Panics + /// + /// `new_slice` panics if `T` is a zero-sized type. + #[inline] + pub fn new_slice(bytes: B) -> Option> { + let remainder = bytes + .len() + .checked_rem(mem::size_of::()) + .expect("Ref::new_slice called on a zero-sized type"); + if remainder != 0 || !util::aligned_to::<_, T>(bytes.deref()) { + return None; + } + Some(Ref(bytes, PhantomData)) + } + + /// Constructs a new `Ref` of a slice type from the prefix of a byte slice. + /// + /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::() * + /// count` and that `bytes` is aligned to `align_of::()`. It consumes the + /// first `size_of::() * count` bytes from `bytes` to construct a `Ref`, + /// and returns the remaining bytes to the caller. It also ensures that + /// `sizeof::() * count` does not overflow a `usize`. If any of the + /// length, alignment, or overflow checks fail, it returns `None`. + /// + /// # Panics + /// + /// `new_slice_from_prefix` panics if `T` is a zero-sized type. + #[inline] + pub fn new_slice_from_prefix(bytes: B, count: usize) -> Option<(Ref, B)> { + let expected_len = match mem::size_of::().checked_mul(count) { + Some(len) => len, + None => return None, + }; + if bytes.len() < expected_len { + return None; + } + let (prefix, bytes) = bytes.split_at(expected_len); + Self::new_slice(prefix).map(move |l| (l, bytes)) + } + + /// Constructs a new `Ref` of a slice type from the suffix of a byte slice. + /// + /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::() * + /// count` and that `bytes` is aligned to `align_of::()`. It consumes the + /// last `size_of::() * count` bytes from `bytes` to construct a `Ref`, + /// and returns the preceding bytes to the caller. It also ensures that + /// `sizeof::() * count` does not overflow a `usize`. If any of the + /// length, alignment, or overflow checks fail, it returns `None`. + /// + /// # Panics + /// + /// `new_slice_from_suffix` panics if `T` is a zero-sized type. + #[inline] + pub fn new_slice_from_suffix(bytes: B, count: usize) -> Option<(B, Ref)> { + let expected_len = match mem::size_of::().checked_mul(count) { + Some(len) => len, + None => return None, + }; + let split_at = bytes.len().checked_sub(expected_len)?; + let (bytes, suffix) = bytes.split_at(split_at); + Self::new_slice(suffix).map(move |l| (bytes, l)) + } +} + +fn map_zeroed(opt: Option>) -> Option> { + match opt { + Some(mut r) => { + r.0.fill(0); + Some(r) + } + None => None, + } +} + +fn map_prefix_tuple_zeroed( + opt: Option<(Ref, B)>, +) -> Option<(Ref, B)> { + match opt { + Some((mut r, rest)) => { + r.0.fill(0); + Some((r, rest)) + } + None => None, + } +} + +fn map_suffix_tuple_zeroed( + opt: Option<(B, Ref)>, +) -> Option<(B, Ref)> { + map_prefix_tuple_zeroed(opt.map(|(a, b)| (b, a))).map(|(a, b)| (b, a)) +} + +impl Ref +where + B: ByteSliceMut, +{ + /// Constructs a new `Ref` after zeroing the bytes. + /// + /// `new_zeroed` verifies that `bytes.len() == size_of::()` and that + /// `bytes` is aligned to `align_of::()`, and constructs a new `Ref`. If + /// either of these checks fail, it returns `None`. + /// + /// If the checks succeed, then `bytes` will be initialized to zero. This + /// can be useful when re-using buffers to ensure that sensitive data + /// previously stored in the buffer is not leaked. + #[inline(always)] + pub fn new_zeroed(bytes: B) -> Option> { + map_zeroed(Self::new(bytes)) + } + + /// Constructs a new `Ref` from the prefix of a byte slice, zeroing the + /// prefix. + /// + /// `new_from_prefix_zeroed` verifies that `bytes.len() >= size_of::()` + /// and that `bytes` is aligned to `align_of::()`. It consumes the first + /// `size_of::()` bytes from `bytes` to construct a `Ref`, and returns + /// the remaining bytes to the caller. If either the length or alignment + /// checks fail, it returns `None`. + /// + /// If the checks succeed, then the prefix which is consumed will be + /// initialized to zero. This can be useful when re-using buffers to ensure + /// that sensitive data previously stored in the buffer is not leaked. + #[inline(always)] + pub fn new_from_prefix_zeroed(bytes: B) -> Option<(Ref, B)> { + map_prefix_tuple_zeroed(Self::new_from_prefix(bytes)) + } + + /// Constructs a new `Ref` from the suffix of a byte slice, zeroing the + /// suffix. + /// + /// `new_from_suffix_zeroed` verifies that `bytes.len() >= size_of::()` + /// and that the last `size_of::()` bytes of `bytes` are aligned to + /// `align_of::()`. It consumes the last `size_of::()` bytes from + /// `bytes` to construct a `Ref`, and returns the preceding bytes to the + /// caller. If either the length or alignment checks fail, it returns + /// `None`. + /// + /// If the checks succeed, then the suffix which is consumed will be + /// initialized to zero. This can be useful when re-using buffers to ensure + /// that sensitive data previously stored in the buffer is not leaked. + #[inline(always)] + pub fn new_from_suffix_zeroed(bytes: B) -> Option<(B, Ref)> { + map_suffix_tuple_zeroed(Self::new_from_suffix(bytes)) + } +} + +impl Ref +where + B: ByteSliceMut, +{ + /// Constructs a new `Ref` of a slice type after zeroing the bytes. + /// + /// `new_slice_zeroed` verifies that `bytes.len()` is a multiple of + /// `size_of::()` and that `bytes` is aligned to `align_of::()`, and + /// constructs a new `Ref`. If either of these checks fail, it returns + /// `None`. + /// + /// If the checks succeed, then `bytes` will be initialized to zero. This + /// can be useful when re-using buffers to ensure that sensitive data + /// previously stored in the buffer is not leaked. + /// + /// # Panics + /// + /// `new_slice` panics if `T` is a zero-sized type. + #[inline(always)] + pub fn new_slice_zeroed(bytes: B) -> Option> { + map_zeroed(Self::new_slice(bytes)) + } + + /// Constructs a new `Ref` of a slice type from the prefix of a byte slice, + /// after zeroing the bytes. + /// + /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::() * + /// count` and that `bytes` is aligned to `align_of::()`. It consumes the + /// first `size_of::() * count` bytes from `bytes` to construct a `Ref`, + /// and returns the remaining bytes to the caller. It also ensures that + /// `sizeof::() * count` does not overflow a `usize`. If any of the + /// length, alignment, or overflow checks fail, it returns `None`. + /// + /// If the checks succeed, then the suffix which is consumed will be + /// initialized to zero. This can be useful when re-using buffers to ensure + /// that sensitive data previously stored in the buffer is not leaked. + /// + /// # Panics + /// + /// `new_slice_from_prefix_zeroed` panics if `T` is a zero-sized type. + #[inline(always)] + pub fn new_slice_from_prefix_zeroed(bytes: B, count: usize) -> Option<(Ref, B)> { + map_prefix_tuple_zeroed(Self::new_slice_from_prefix(bytes, count)) + } + + /// Constructs a new `Ref` of a slice type from the prefix of a byte slice, + /// after zeroing the bytes. + /// + /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::() * + /// count` and that `bytes` is aligned to `align_of::()`. It consumes the + /// last `size_of::() * count` bytes from `bytes` to construct a `Ref`, + /// and returns the preceding bytes to the caller. It also ensures that + /// `sizeof::() * count` does not overflow a `usize`. If any of the + /// length, alignment, or overflow checks fail, it returns `None`. + /// + /// If the checks succeed, then the consumed suffix will be initialized to + /// zero. This can be useful when re-using buffers to ensure that sensitive + /// data previously stored in the buffer is not leaked. + /// + /// # Panics + /// + /// `new_slice_from_suffix_zeroed` panics if `T` is a zero-sized type. + #[inline(always)] + pub fn new_slice_from_suffix_zeroed(bytes: B, count: usize) -> Option<(B, Ref)> { + map_suffix_tuple_zeroed(Self::new_slice_from_suffix(bytes, count)) + } +} + +impl Ref +where + B: ByteSlice, + T: Unaligned, +{ + /// Constructs a new `Ref` for a type with no alignment requirement. + /// + /// `new_unaligned` verifies that `bytes.len() == size_of::()` and + /// constructs a new `Ref`. If the check fails, it returns `None`. + #[inline(always)] + pub fn new_unaligned(bytes: B) -> Option> { + Ref::new(bytes) + } + + /// Constructs a new `Ref` from the prefix of a byte slice for a type with + /// no alignment requirement. + /// + /// `new_unaligned_from_prefix` verifies that `bytes.len() >= + /// size_of::()`. It consumes the first `size_of::()` bytes from + /// `bytes` to construct a `Ref`, and returns the remaining bytes to the + /// caller. If the length check fails, it returns `None`. + #[inline(always)] + pub fn new_unaligned_from_prefix(bytes: B) -> Option<(Ref, B)> { + Ref::new_from_prefix(bytes) + } + + /// Constructs a new `Ref` from the suffix of a byte slice for a type with + /// no alignment requirement. + /// + /// `new_unaligned_from_suffix` verifies that `bytes.len() >= + /// size_of::()`. It consumes the last `size_of::()` bytes from + /// `bytes` to construct a `Ref`, and returns the preceding bytes to the + /// caller. If the length check fails, it returns `None`. + #[inline(always)] + pub fn new_unaligned_from_suffix(bytes: B) -> Option<(B, Ref)> { + Ref::new_from_suffix(bytes) + } +} + +impl Ref +where + B: ByteSlice, + T: Unaligned, +{ + /// Constructs a new `Ref` of a slice type with no alignment requirement. + /// + /// `new_slice_unaligned` verifies that `bytes.len()` is a multiple of + /// `size_of::()` and constructs a new `Ref`. If the check fails, it + /// returns `None`. + /// + /// # Panics + /// + /// `new_slice` panics if `T` is a zero-sized type. + #[inline(always)] + pub fn new_slice_unaligned(bytes: B) -> Option> { + Ref::new_slice(bytes) + } + + /// Constructs a new `Ref` of a slice type with no alignment requirement + /// from the prefix of a byte slice. + /// + /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::() * + /// count`. It consumes the first `size_of::() * count` bytes from + /// `bytes` to construct a `Ref`, and returns the remaining bytes to the + /// caller. It also ensures that `sizeof::() * count` does not overflow a + /// `usize`. If either the length, or overflow checks fail, it returns + /// `None`. + /// + /// # Panics + /// + /// `new_slice_unaligned_from_prefix` panics if `T` is a zero-sized type. + #[inline(always)] + pub fn new_slice_unaligned_from_prefix(bytes: B, count: usize) -> Option<(Ref, B)> { + Ref::new_slice_from_prefix(bytes, count) + } + + /// Constructs a new `Ref` of a slice type with no alignment requirement + /// from the suffix of a byte slice. + /// + /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::() * + /// count`. It consumes the last `size_of::() * count` bytes from `bytes` + /// to construct a `Ref`, and returns the remaining bytes to the caller. It + /// also ensures that `sizeof::() * count` does not overflow a `usize`. + /// If either the length, or overflow checks fail, it returns `None`. + /// + /// # Panics + /// + /// `new_slice_unaligned_from_suffix` panics if `T` is a zero-sized type. + #[inline(always)] + pub fn new_slice_unaligned_from_suffix(bytes: B, count: usize) -> Option<(B, Ref)> { + Ref::new_slice_from_suffix(bytes, count) + } +} + +impl Ref +where + B: ByteSliceMut, + T: Unaligned, +{ + /// Constructs a new `Ref` for a type with no alignment requirement, zeroing + /// the bytes. + /// + /// `new_unaligned_zeroed` verifies that `bytes.len() == size_of::()` and + /// constructs a new `Ref`. If the check fails, it returns `None`. + /// + /// If the check succeeds, then `bytes` will be initialized to zero. This + /// can be useful when re-using buffers to ensure that sensitive data + /// previously stored in the buffer is not leaked. + #[inline(always)] + pub fn new_unaligned_zeroed(bytes: B) -> Option> { + map_zeroed(Self::new_unaligned(bytes)) + } + + /// Constructs a new `Ref` from the prefix of a byte slice for a type with + /// no alignment requirement, zeroing the prefix. + /// + /// `new_unaligned_from_prefix_zeroed` verifies that `bytes.len() >= + /// size_of::()`. It consumes the first `size_of::()` bytes from + /// `bytes` to construct a `Ref`, and returns the remaining bytes to the + /// caller. If the length check fails, it returns `None`. + /// + /// If the check succeeds, then the prefix which is consumed will be + /// initialized to zero. This can be useful when re-using buffers to ensure + /// that sensitive data previously stored in the buffer is not leaked. + #[inline(always)] + pub fn new_unaligned_from_prefix_zeroed(bytes: B) -> Option<(Ref, B)> { + map_prefix_tuple_zeroed(Self::new_unaligned_from_prefix(bytes)) + } + + /// Constructs a new `Ref` from the suffix of a byte slice for a type with + /// no alignment requirement, zeroing the suffix. + /// + /// `new_unaligned_from_suffix_zeroed` verifies that `bytes.len() >= + /// size_of::()`. It consumes the last `size_of::()` bytes from + /// `bytes` to construct a `Ref`, and returns the preceding bytes to the + /// caller. If the length check fails, it returns `None`. + /// + /// If the check succeeds, then the suffix which is consumed will be + /// initialized to zero. This can be useful when re-using buffers to ensure + /// that sensitive data previously stored in the buffer is not leaked. + #[inline(always)] + pub fn new_unaligned_from_suffix_zeroed(bytes: B) -> Option<(B, Ref)> { + map_suffix_tuple_zeroed(Self::new_unaligned_from_suffix(bytes)) + } +} + +impl Ref +where + B: ByteSliceMut, + T: Unaligned, +{ + /// Constructs a new `Ref` for a slice type with no alignment requirement, + /// zeroing the bytes. + /// + /// `new_slice_unaligned_zeroed` verifies that `bytes.len()` is a multiple + /// of `size_of::()` and constructs a new `Ref`. If the check fails, it + /// returns `None`. + /// + /// If the check succeeds, then `bytes` will be initialized to zero. This + /// can be useful when re-using buffers to ensure that sensitive data + /// previously stored in the buffer is not leaked. + /// + /// # Panics + /// + /// `new_slice` panics if `T` is a zero-sized type. + #[inline(always)] + pub fn new_slice_unaligned_zeroed(bytes: B) -> Option> { + map_zeroed(Self::new_slice_unaligned(bytes)) + } + + /// Constructs a new `Ref` of a slice type with no alignment requirement + /// from the prefix of a byte slice, after zeroing the bytes. + /// + /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::() * + /// count`. It consumes the first `size_of::() * count` bytes from + /// `bytes` to construct a `Ref`, and returns the remaining bytes to the + /// caller. It also ensures that `sizeof::() * count` does not overflow a + /// `usize`. If either the length, or overflow checks fail, it returns + /// `None`. + /// + /// If the checks succeed, then the prefix will be initialized to zero. This + /// can be useful when re-using buffers to ensure that sensitive data + /// previously stored in the buffer is not leaked. + /// + /// # Panics + /// + /// `new_slice_unaligned_from_prefix_zeroed` panics if `T` is a zero-sized + /// type. + #[inline(always)] + pub fn new_slice_unaligned_from_prefix_zeroed( + bytes: B, + count: usize, + ) -> Option<(Ref, B)> { + map_prefix_tuple_zeroed(Self::new_slice_unaligned_from_prefix(bytes, count)) + } + + /// Constructs a new `Ref` of a slice type with no alignment requirement + /// from the suffix of a byte slice, after zeroing the bytes. + /// + /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::() * + /// count`. It consumes the last `size_of::() * count` bytes from `bytes` + /// to construct a `Ref`, and returns the remaining bytes to the caller. It + /// also ensures that `sizeof::() * count` does not overflow a `usize`. + /// If either the length, or overflow checks fail, it returns `None`. + /// + /// If the checks succeed, then the suffix will be initialized to zero. This + /// can be useful when re-using buffers to ensure that sensitive data + /// previously stored in the buffer is not leaked. + /// + /// # Panics + /// + /// `new_slice_unaligned_from_suffix_zeroed` panics if `T` is a zero-sized + /// type. + #[inline(always)] + pub fn new_slice_unaligned_from_suffix_zeroed( + bytes: B, + count: usize, + ) -> Option<(B, Ref)> { + map_suffix_tuple_zeroed(Self::new_slice_unaligned_from_suffix(bytes, count)) + } +} + +impl<'a, B, T> Ref +where + B: 'a + ByteSlice, + T: FromBytes, +{ + /// Converts this `Ref` into a reference. + /// + /// `into_ref` consumes the `Ref`, and returns a reference to `T`. + #[inline(always)] + pub fn into_ref(self) -> &'a T { + assert!(B::INTO_REF_INTO_MUT_ARE_SOUND); + + // SAFETY: According to the safety preconditions on + // `ByteSlice::INTO_REF_INTO_MUT_ARE_SOUND`, the preceding assert + // ensures that, given `B: 'a`, it is sound to drop `self` and still + // access the underlying memory using reads for `'a`. + unsafe { self.deref_helper() } + } +} + +impl<'a, B, T> Ref +where + B: 'a + ByteSliceMut, + T: FromBytes + AsBytes, +{ + /// Converts this `Ref` into a mutable reference. + /// + /// `into_mut` consumes the `Ref`, and returns a mutable reference to `T`. + #[inline(always)] + pub fn into_mut(mut self) -> &'a mut T { + assert!(B::INTO_REF_INTO_MUT_ARE_SOUND); + + // SAFETY: According to the safety preconditions on + // `ByteSlice::INTO_REF_INTO_MUT_ARE_SOUND`, the preceding assert + // ensures that, given `B: 'a + ByteSliceMut`, it is sound to drop + // `self` and still access the underlying memory using both reads and + // writes for `'a`. + unsafe { self.deref_mut_helper() } + } +} + +impl<'a, B, T> Ref +where + B: 'a + ByteSlice, + T: FromBytes, +{ + /// Converts this `Ref` into a slice reference. + /// + /// `into_slice` consumes the `Ref`, and returns a reference to `[T]`. + #[inline(always)] + pub fn into_slice(self) -> &'a [T] { + assert!(B::INTO_REF_INTO_MUT_ARE_SOUND); + + // SAFETY: According to the safety preconditions on + // `ByteSlice::INTO_REF_INTO_MUT_ARE_SOUND`, the preceding assert + // ensures that, given `B: 'a`, it is sound to drop `self` and still + // access the underlying memory using reads for `'a`. + unsafe { self.deref_slice_helper() } + } +} + +impl<'a, B, T> Ref +where + B: 'a + ByteSliceMut, + T: FromBytes + AsBytes, +{ + /// Converts this `Ref` into a mutable slice reference. + /// + /// `into_mut_slice` consumes the `Ref`, and returns a mutable reference to + /// `[T]`. + #[inline(always)] + pub fn into_mut_slice(mut self) -> &'a mut [T] { + assert!(B::INTO_REF_INTO_MUT_ARE_SOUND); + + // SAFETY: According to the safety preconditions on + // `ByteSlice::INTO_REF_INTO_MUT_ARE_SOUND`, the preceding assert + // ensures that, given `B: 'a + ByteSliceMut`, it is sound to drop + // `self` and still access the underlying memory using both reads and + // writes for `'a`. + unsafe { self.deref_mut_slice_helper() } + } +} + +impl Ref +where + B: ByteSlice, + T: FromBytes, +{ + /// Creates an immutable reference to `T` with a specific lifetime. + /// + /// # Safety + /// + /// The type bounds on this method guarantee that it is safe to create an + /// immutable reference to `T` from `self`. However, since the lifetime `'a` + /// is not required to be shorter than the lifetime of the reference to + /// `self`, the caller must guarantee that the lifetime `'a` is valid for + /// this reference. In particular, the referent must exist for all of `'a`, + /// and no mutable references to the same memory may be constructed during + /// `'a`. + unsafe fn deref_helper<'a>(&self) -> &'a T { + // TODO(#429): Add a "SAFETY" comment and remove this `allow`. + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + &*self.0.as_ptr().cast::() + } + } +} + +impl Ref +where + B: ByteSliceMut, + T: FromBytes + AsBytes, +{ + /// Creates a mutable reference to `T` with a specific lifetime. + /// + /// # Safety + /// + /// The type bounds on this method guarantee that it is safe to create a + /// mutable reference to `T` from `self`. However, since the lifetime `'a` + /// is not required to be shorter than the lifetime of the reference to + /// `self`, the caller must guarantee that the lifetime `'a` is valid for + /// this reference. In particular, the referent must exist for all of `'a`, + /// and no other references - mutable or immutable - to the same memory may + /// be constructed during `'a`. + unsafe fn deref_mut_helper<'a>(&mut self) -> &'a mut T { + // TODO(#429): Add a "SAFETY" comment and remove this `allow`. + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + &mut *self.0.as_mut_ptr().cast::() + } + } +} + +impl Ref +where + B: ByteSlice, + T: FromBytes, +{ + /// Creates an immutable reference to `[T]` with a specific lifetime. + /// + /// # Safety + /// + /// `deref_slice_helper` has the same safety requirements as `deref_helper`. + unsafe fn deref_slice_helper<'a>(&self) -> &'a [T] { + let len = self.0.len(); + let elem_size = mem::size_of::(); + debug_assert_ne!(elem_size, 0); + // `Ref<_, [T]>` maintains the invariant that `size_of::() > 0`. + // Thus, neither the mod nor division operations here can panic. + #[allow(clippy::arithmetic_side_effects)] + let elems = { + debug_assert_eq!(len % elem_size, 0); + len / elem_size + }; + // TODO(#429): Add a "SAFETY" comment and remove this `allow`. + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + slice::from_raw_parts(self.0.as_ptr().cast::(), elems) + } + } +} + +impl Ref +where + B: ByteSliceMut, + T: FromBytes + AsBytes, +{ + /// Creates a mutable reference to `[T]` with a specific lifetime. + /// + /// # Safety + /// + /// `deref_mut_slice_helper` has the same safety requirements as + /// `deref_mut_helper`. + unsafe fn deref_mut_slice_helper<'a>(&mut self) -> &'a mut [T] { + let len = self.0.len(); + let elem_size = mem::size_of::(); + debug_assert_ne!(elem_size, 0); + // `Ref<_, [T]>` maintains the invariant that `size_of::() > 0`. + // Thus, neither the mod nor division operations here can panic. + #[allow(clippy::arithmetic_side_effects)] + let elems = { + debug_assert_eq!(len % elem_size, 0); + len / elem_size + }; + // TODO(#429): Add a "SAFETY" comment and remove this `allow`. + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + slice::from_raw_parts_mut(self.0.as_mut_ptr().cast::(), elems) + } + } +} + +impl Ref +where + B: ByteSlice, + T: ?Sized, +{ + /// Gets the underlying bytes. + #[inline] + pub fn bytes(&self) -> &[u8] { + &self.0 + } +} + +impl Ref +where + B: ByteSliceMut, + T: ?Sized, +{ + /// Gets the underlying bytes mutably. + #[inline] + pub fn bytes_mut(&mut self) -> &mut [u8] { + &mut self.0 + } +} + +impl Ref +where + B: ByteSlice, + T: FromBytes, +{ + /// Reads a copy of `T`. + #[inline] + pub fn read(&self) -> T { + // SAFETY: Because of the invariants on `Ref`, we know that `self.0` is + // at least `size_of::()` bytes long, and that it is at least as + // aligned as `align_of::()`. Because `T: FromBytes`, it is sound to + // interpret these bytes as a `T`. + unsafe { ptr::read(self.0.as_ptr().cast::()) } + } +} + +impl Ref +where + B: ByteSliceMut, + T: AsBytes, +{ + /// Writes the bytes of `t` and then forgets `t`. + #[inline] + pub fn write(&mut self, t: T) { + // SAFETY: Because of the invariants on `Ref`, we know that `self.0` is + // at least `size_of::()` bytes long, and that it is at least as + // aligned as `align_of::()`. Writing `t` to the buffer will allow + // all of the bytes of `t` to be accessed as a `[u8]`, but because `T: + // AsBytes`, we know this is sound. + unsafe { ptr::write(self.0.as_mut_ptr().cast::(), t) } + } +} + +impl Deref for Ref +where + B: ByteSlice, + T: FromBytes, +{ + type Target = T; + #[inline] + fn deref(&self) -> &T { + // SAFETY: This is sound because the lifetime of `self` is the same as + // the lifetime of the return value, meaning that a) the returned + // reference cannot outlive `self` and, b) no mutable methods on `self` + // can be called during the lifetime of the returned reference. See the + // documentation on `deref_helper` for what invariants we are required + // to uphold. + unsafe { self.deref_helper() } + } +} + +impl DerefMut for Ref +where + B: ByteSliceMut, + T: FromBytes + AsBytes, +{ + #[inline] + fn deref_mut(&mut self) -> &mut T { + // SAFETY: This is sound because the lifetime of `self` is the same as + // the lifetime of the return value, meaning that a) the returned + // reference cannot outlive `self` and, b) no other methods on `self` + // can be called during the lifetime of the returned reference. See the + // documentation on `deref_mut_helper` for what invariants we are + // required to uphold. + unsafe { self.deref_mut_helper() } + } +} + +impl Deref for Ref +where + B: ByteSlice, + T: FromBytes, +{ + type Target = [T]; + #[inline] + fn deref(&self) -> &[T] { + // SAFETY: This is sound because the lifetime of `self` is the same as + // the lifetime of the return value, meaning that a) the returned + // reference cannot outlive `self` and, b) no mutable methods on `self` + // can be called during the lifetime of the returned reference. See the + // documentation on `deref_slice_helper` for what invariants we are + // required to uphold. + unsafe { self.deref_slice_helper() } + } +} + +impl DerefMut for Ref +where + B: ByteSliceMut, + T: FromBytes + AsBytes, +{ + #[inline] + fn deref_mut(&mut self) -> &mut [T] { + // SAFETY: This is sound because the lifetime of `self` is the same as + // the lifetime of the return value, meaning that a) the returned + // reference cannot outlive `self` and, b) no other methods on `self` + // can be called during the lifetime of the returned reference. See the + // documentation on `deref_mut_slice_helper` for what invariants we are + // required to uphold. + unsafe { self.deref_mut_slice_helper() } + } +} + +impl Display for Ref +where + B: ByteSlice, + T: FromBytes + Display, +{ + #[inline] + fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { + let inner: &T = self; + inner.fmt(fmt) + } +} + +impl Display for Ref +where + B: ByteSlice, + T: FromBytes, + [T]: Display, +{ + #[inline] + fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { + let inner: &[T] = self; + inner.fmt(fmt) + } +} + +impl Debug for Ref +where + B: ByteSlice, + T: FromBytes + Debug, +{ + #[inline] + fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { + let inner: &T = self; + fmt.debug_tuple("Ref").field(&inner).finish() + } +} + +impl Debug for Ref +where + B: ByteSlice, + T: FromBytes + Debug, +{ + #[inline] + fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { + let inner: &[T] = self; + fmt.debug_tuple("Ref").field(&inner).finish() + } +} + +impl Eq for Ref +where + B: ByteSlice, + T: FromBytes + Eq, +{ +} + +impl Eq for Ref +where + B: ByteSlice, + T: FromBytes + Eq, +{ +} + +impl PartialEq for Ref +where + B: ByteSlice, + T: FromBytes + PartialEq, +{ + #[inline] + fn eq(&self, other: &Self) -> bool { + self.deref().eq(other.deref()) + } +} + +impl PartialEq for Ref +where + B: ByteSlice, + T: FromBytes + PartialEq, +{ + #[inline] + fn eq(&self, other: &Self) -> bool { + self.deref().eq(other.deref()) + } +} + +impl Ord for Ref +where + B: ByteSlice, + T: FromBytes + Ord, +{ + #[inline] + fn cmp(&self, other: &Self) -> Ordering { + let inner: &T = self; + let other_inner: &T = other; + inner.cmp(other_inner) + } +} + +impl Ord for Ref +where + B: ByteSlice, + T: FromBytes + Ord, +{ + #[inline] + fn cmp(&self, other: &Self) -> Ordering { + let inner: &[T] = self; + let other_inner: &[T] = other; + inner.cmp(other_inner) + } +} + +impl PartialOrd for Ref +where + B: ByteSlice, + T: FromBytes + PartialOrd, +{ + #[inline] + fn partial_cmp(&self, other: &Self) -> Option { + let inner: &T = self; + let other_inner: &T = other; + inner.partial_cmp(other_inner) + } +} + +impl PartialOrd for Ref +where + B: ByteSlice, + T: FromBytes + PartialOrd, +{ + #[inline] + fn partial_cmp(&self, other: &Self) -> Option { + let inner: &[T] = self; + let other_inner: &[T] = other; + inner.partial_cmp(other_inner) + } +} + +mod sealed { + pub trait ByteSliceSealed {} +} + +// ByteSlice and ByteSliceMut abstract over [u8] references (&[u8], &mut [u8], +// Ref<[u8]>, RefMut<[u8]>, etc). We rely on various behaviors of these +// references such as that a given reference will never changes its length +// between calls to deref() or deref_mut(), and that split_at() works as +// expected. If ByteSlice or ByteSliceMut were not sealed, consumers could +// implement them in a way that violated these behaviors, and would break our +// unsafe code. Thus, we seal them and implement it only for known-good +// reference types. For the same reason, they're unsafe traits. + +#[allow(clippy::missing_safety_doc)] // TODO(fxbug.dev/99068) +/// A mutable or immutable reference to a byte slice. +/// +/// `ByteSlice` abstracts over the mutability of a byte slice reference, and is +/// implemented for various special reference types such as `Ref<[u8]>` and +/// `RefMut<[u8]>`. +/// +/// Note that, while it would be technically possible, `ByteSlice` is not +/// implemented for [`Vec`], as the only way to implement the [`split_at`] +/// method would involve reallocation, and `split_at` must be a very cheap +/// operation in order for the utilities in this crate to perform as designed. +/// +/// [`split_at`]: crate::ByteSlice::split_at +// It may seem overkill to go to this length to ensure that this doc link never +// breaks. We do this because it simplifies CI - it means that generating docs +// always succeeds, so we don't need special logic to only generate docs under +// certain features. +#[cfg_attr(feature = "alloc", doc = "[`Vec`]: alloc::vec::Vec")] +#[cfg_attr( + not(feature = "alloc"), + doc = "[`Vec`]: https://doc.rust-lang.org/std/vec/struct.Vec.html" +)] +pub unsafe trait ByteSlice: Deref + Sized + sealed::ByteSliceSealed { + /// Are the [`Ref::into_ref`] and [`Ref::into_mut`] methods sound when used + /// with `Self`? If not, evaluating this constant must panic at compile + /// time. + /// + /// This exists to work around #716 on versions of zerocopy prior to 0.8. + /// + /// # Safety + /// + /// This may only be set to true if the following holds: Given the + /// following: + /// - `Self: 'a` + /// - `bytes: Self` + /// - `let ptr = bytes.as_ptr()` + /// + /// ...then: + /// - Using `ptr` to read the memory previously addressed by `bytes` is + /// sound for `'a` even after `bytes` has been dropped. + /// - If `Self: ByteSliceMut`, using `ptr` to write the memory previously + /// addressed by `bytes` is sound for `'a` even after `bytes` has been + /// dropped. + #[doc(hidden)] + const INTO_REF_INTO_MUT_ARE_SOUND: bool; + + /// Gets a raw pointer to the first byte in the slice. + #[inline] + fn as_ptr(&self) -> *const u8 { + <[u8]>::as_ptr(self) + } + + /// Splits the slice at the midpoint. + /// + /// `x.split_at(mid)` returns `x[..mid]` and `x[mid..]`. + /// + /// # Panics + /// + /// `x.split_at(mid)` panics if `mid > x.len()`. + fn split_at(self, mid: usize) -> (Self, Self); +} + +#[allow(clippy::missing_safety_doc)] // TODO(fxbug.dev/99068) +/// A mutable reference to a byte slice. +/// +/// `ByteSliceMut` abstracts over various ways of storing a mutable reference to +/// a byte slice, and is implemented for various special reference types such as +/// `RefMut<[u8]>`. +pub unsafe trait ByteSliceMut: ByteSlice + DerefMut { + /// Gets a mutable raw pointer to the first byte in the slice. + #[inline] + fn as_mut_ptr(&mut self) -> *mut u8 { + <[u8]>::as_mut_ptr(self) + } +} + +impl<'a> sealed::ByteSliceSealed for &'a [u8] {} +// TODO(#429): Add a "SAFETY" comment and remove this `allow`. +#[allow(clippy::undocumented_unsafe_blocks)] +unsafe impl<'a> ByteSlice for &'a [u8] { + // SAFETY: If `&'b [u8]: 'a`, then the underlying memory is treated as + // borrowed immutably for `'a` even if the slice itself is dropped. + const INTO_REF_INTO_MUT_ARE_SOUND: bool = true; + + #[inline] + fn split_at(self, mid: usize) -> (Self, Self) { + <[u8]>::split_at(self, mid) + } +} + +impl<'a> sealed::ByteSliceSealed for &'a mut [u8] {} +// TODO(#429): Add a "SAFETY" comment and remove this `allow`. +#[allow(clippy::undocumented_unsafe_blocks)] +unsafe impl<'a> ByteSlice for &'a mut [u8] { + // SAFETY: If `&'b mut [u8]: 'a`, then the underlying memory is treated as + // borrowed mutably for `'a` even if the slice itself is dropped. + const INTO_REF_INTO_MUT_ARE_SOUND: bool = true; + + #[inline] + fn split_at(self, mid: usize) -> (Self, Self) { + <[u8]>::split_at_mut(self, mid) + } +} + +impl<'a> sealed::ByteSliceSealed for cell::Ref<'a, [u8]> {} +// TODO(#429): Add a "SAFETY" comment and remove this `allow`. +#[allow(clippy::undocumented_unsafe_blocks)] +unsafe impl<'a> ByteSlice for cell::Ref<'a, [u8]> { + const INTO_REF_INTO_MUT_ARE_SOUND: bool = if !cfg!(doc) { + panic!("Ref::into_ref and Ref::into_mut are unsound when used with core::cell::Ref; see https://github.com/google/zerocopy/issues/716") + } else { + // When compiling documentation, allow the evaluation of this constant + // to succeed. This doesn't represent a soundness hole - it just delays + // any error to runtime. The reason we need this is that, otherwise, + // `rustdoc` will fail when trying to document this item. + false + }; + + #[inline] + fn split_at(self, mid: usize) -> (Self, Self) { + cell::Ref::map_split(self, |slice| <[u8]>::split_at(slice, mid)) + } +} + +impl<'a> sealed::ByteSliceSealed for RefMut<'a, [u8]> {} +// TODO(#429): Add a "SAFETY" comment and remove this `allow`. +#[allow(clippy::undocumented_unsafe_blocks)] +unsafe impl<'a> ByteSlice for RefMut<'a, [u8]> { + const INTO_REF_INTO_MUT_ARE_SOUND: bool = if !cfg!(doc) { + panic!("Ref::into_ref and Ref::into_mut are unsound when used with core::cell::RefMut; see https://github.com/google/zerocopy/issues/716") + } else { + // When compiling documentation, allow the evaluation of this constant + // to succeed. This doesn't represent a soundness hole - it just delays + // any error to runtime. The reason we need this is that, otherwise, + // `rustdoc` will fail when trying to document this item. + false + }; + + #[inline] + fn split_at(self, mid: usize) -> (Self, Self) { + RefMut::map_split(self, |slice| <[u8]>::split_at_mut(slice, mid)) + } +} + +// TODO(#429): Add a "SAFETY" comment and remove this `allow`. +#[allow(clippy::undocumented_unsafe_blocks)] +unsafe impl<'a> ByteSliceMut for &'a mut [u8] {} + +// TODO(#429): Add a "SAFETY" comment and remove this `allow`. +#[allow(clippy::undocumented_unsafe_blocks)] +unsafe impl<'a> ByteSliceMut for RefMut<'a, [u8]> {} + +#[cfg(feature = "alloc")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] +mod alloc_support { + use alloc::vec::Vec; + + use super::*; + + /// Extends a `Vec` by pushing `additional` new items onto the end of the + /// vector. The new items are initialized with zeroes. + /// + /// # Panics + /// + /// Panics if `Vec::reserve(additional)` fails to reserve enough memory. + #[inline(always)] + pub fn extend_vec_zeroed(v: &mut Vec, additional: usize) { + insert_vec_zeroed(v, v.len(), additional); + } + + /// Inserts `additional` new items into `Vec` at `position`. + /// The new items are initialized with zeroes. + /// + /// # Panics + /// + /// * Panics if `position > v.len()`. + /// * Panics if `Vec::reserve(additional)` fails to reserve enough memory. + #[inline] + pub fn insert_vec_zeroed(v: &mut Vec, position: usize, additional: usize) { + assert!(position <= v.len()); + v.reserve(additional); + // SAFETY: The `reserve` call guarantees that these cannot overflow: + // * `ptr.add(position)` + // * `position + additional` + // * `v.len() + additional` + // + // `v.len() - position` cannot overflow because we asserted that + // `position <= v.len()`. + unsafe { + // This is a potentially overlapping copy. + let ptr = v.as_mut_ptr(); + #[allow(clippy::arithmetic_side_effects)] + ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position); + ptr.add(position).write_bytes(0, additional); + #[allow(clippy::arithmetic_side_effects)] + v.set_len(v.len() + additional); + } + } + + #[cfg(test)] + mod tests { + use core::convert::TryFrom as _; + + use super::*; + + #[test] + fn test_extend_vec_zeroed() { + // Test extending when there is an existing allocation. + let mut v = vec![100u64, 200, 300]; + extend_vec_zeroed(&mut v, 3); + assert_eq!(v.len(), 6); + assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]); + drop(v); + + // Test extending when there is no existing allocation. + let mut v: Vec = Vec::new(); + extend_vec_zeroed(&mut v, 3); + assert_eq!(v.len(), 3); + assert_eq!(&*v, &[0, 0, 0]); + drop(v); + } + + #[test] + fn test_extend_vec_zeroed_zst() { + // Test extending when there is an existing (fake) allocation. + let mut v = vec![(), (), ()]; + extend_vec_zeroed(&mut v, 3); + assert_eq!(v.len(), 6); + assert_eq!(&*v, &[(), (), (), (), (), ()]); + drop(v); + + // Test extending when there is no existing (fake) allocation. + let mut v: Vec<()> = Vec::new(); + extend_vec_zeroed(&mut v, 3); + assert_eq!(&*v, &[(), (), ()]); + drop(v); + } + + #[test] + fn test_insert_vec_zeroed() { + // Insert at start (no existing allocation). + let mut v: Vec = Vec::new(); + insert_vec_zeroed(&mut v, 0, 2); + assert_eq!(v.len(), 2); + assert_eq!(&*v, &[0, 0]); + drop(v); + + // Insert at start. + let mut v = vec![100u64, 200, 300]; + insert_vec_zeroed(&mut v, 0, 2); + assert_eq!(v.len(), 5); + assert_eq!(&*v, &[0, 0, 100, 200, 300]); + drop(v); + + // Insert at middle. + let mut v = vec![100u64, 200, 300]; + insert_vec_zeroed(&mut v, 1, 1); + assert_eq!(v.len(), 4); + assert_eq!(&*v, &[100, 0, 200, 300]); + drop(v); + + // Insert at end. + let mut v = vec![100u64, 200, 300]; + insert_vec_zeroed(&mut v, 3, 1); + assert_eq!(v.len(), 4); + assert_eq!(&*v, &[100, 200, 300, 0]); + drop(v); + } + + #[test] + fn test_insert_vec_zeroed_zst() { + // Insert at start (no existing fake allocation). + let mut v: Vec<()> = Vec::new(); + insert_vec_zeroed(&mut v, 0, 2); + assert_eq!(v.len(), 2); + assert_eq!(&*v, &[(), ()]); + drop(v); + + // Insert at start. + let mut v = vec![(), (), ()]; + insert_vec_zeroed(&mut v, 0, 2); + assert_eq!(v.len(), 5); + assert_eq!(&*v, &[(), (), (), (), ()]); + drop(v); + + // Insert at middle. + let mut v = vec![(), (), ()]; + insert_vec_zeroed(&mut v, 1, 1); + assert_eq!(v.len(), 4); + assert_eq!(&*v, &[(), (), (), ()]); + drop(v); + + // Insert at end. + let mut v = vec![(), (), ()]; + insert_vec_zeroed(&mut v, 3, 1); + assert_eq!(v.len(), 4); + assert_eq!(&*v, &[(), (), (), ()]); + drop(v); + } + + #[test] + fn test_new_box_zeroed() { + assert_eq!(*u64::new_box_zeroed(), 0); + } + + #[test] + fn test_new_box_zeroed_array() { + drop(<[u32; 0x1000]>::new_box_zeroed()); + } + + #[test] + fn test_new_box_zeroed_zst() { + // This test exists in order to exercise unsafe code, especially + // when running under Miri. + #[allow(clippy::unit_cmp)] + { + assert_eq!(*<()>::new_box_zeroed(), ()); + } + } + + #[test] + fn test_new_box_slice_zeroed() { + let mut s: Box<[u64]> = u64::new_box_slice_zeroed(3); + assert_eq!(s.len(), 3); + assert_eq!(&*s, &[0, 0, 0]); + s[1] = 3; + assert_eq!(&*s, &[0, 3, 0]); + } + + #[test] + fn test_new_box_slice_zeroed_empty() { + let s: Box<[u64]> = u64::new_box_slice_zeroed(0); + assert_eq!(s.len(), 0); + } + + #[test] + fn test_new_box_slice_zeroed_zst() { + let mut s: Box<[()]> = <()>::new_box_slice_zeroed(3); + assert_eq!(s.len(), 3); + assert!(s.get(10).is_none()); + // This test exists in order to exercise unsafe code, especially + // when running under Miri. + #[allow(clippy::unit_cmp)] + { + assert_eq!(s[1], ()); + } + s[2] = (); + } + + #[test] + fn test_new_box_slice_zeroed_zst_empty() { + let s: Box<[()]> = <()>::new_box_slice_zeroed(0); + assert_eq!(s.len(), 0); + } + + #[test] + #[should_panic(expected = "mem::size_of::() * len overflows `usize`")] + fn test_new_box_slice_zeroed_panics_mul_overflow() { + let _ = u16::new_box_slice_zeroed(usize::MAX); + } + + #[test] + #[should_panic(expected = "assertion failed: size <= max_alloc")] + fn test_new_box_slice_zeroed_panics_isize_overflow() { + let max = usize::try_from(isize::MAX).unwrap(); + let _ = u16::new_box_slice_zeroed((max / mem::size_of::()) + 1); + } + } +} + +#[cfg(feature = "alloc")] +#[doc(inline)] +pub use alloc_support::*; + +#[cfg(test)] +mod tests { + #![allow(clippy::unreadable_literal)] + + use core::{cell::UnsafeCell, convert::TryInto as _, ops::Deref}; + + use static_assertions::assert_impl_all; + + use super::*; + use crate::util::testutil::*; + + // An unsized type. + // + // This is used to test the custom derives of our traits. The `[u8]` type + // gets a hand-rolled impl, so it doesn't exercise our custom derives. + #[derive(Debug, Eq, PartialEq, FromZeroes, FromBytes, AsBytes, Unaligned)] + #[repr(transparent)] + struct Unsized([u8]); + + impl Unsized { + fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized { + // SAFETY: This *probably* sound - since the layouts of `[u8]` and + // `Unsized` are the same, so are the layouts of `&mut [u8]` and + // `&mut Unsized`. [1] Even if it turns out that this isn't actually + // guaranteed by the language spec, we can just change this since + // it's in test code. + // + // [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/375 + unsafe { mem::transmute(slc) } + } + } + + /// Tests of when a sized `DstLayout` is extended with a sized field. + #[allow(clippy::decimal_literal_representation)] + #[test] + fn test_dst_layout_extend_sized_with_sized() { + // This macro constructs a layout corresponding to a `u8` and extends it + // with a zero-sized trailing field of given alignment `n`. The macro + // tests that the resulting layout has both size and alignment `min(n, + // P)` for all valid values of `repr(packed(P))`. + macro_rules! test_align_is_size { + ($n:expr) => { + let base = DstLayout::for_type::(); + let trailing_field = DstLayout::for_type::>(); + + let packs = + core::iter::once(None).chain((0..29).map(|p| NonZeroUsize::new(2usize.pow(p)))); + + for pack in packs { + let composite = base.extend(trailing_field, pack); + let max_align = pack.unwrap_or(DstLayout::CURRENT_MAX_ALIGN); + let align = $n.min(max_align.get()); + assert_eq!( + composite, + DstLayout { + align: NonZeroUsize::new(align).unwrap(), + size_info: SizeInfo::Sized { _size: align } + } + ) + } + }; + } + + test_align_is_size!(1); + test_align_is_size!(2); + test_align_is_size!(4); + test_align_is_size!(8); + test_align_is_size!(16); + test_align_is_size!(32); + test_align_is_size!(64); + test_align_is_size!(128); + test_align_is_size!(256); + test_align_is_size!(512); + test_align_is_size!(1024); + test_align_is_size!(2048); + test_align_is_size!(4096); + test_align_is_size!(8192); + test_align_is_size!(16384); + test_align_is_size!(32768); + test_align_is_size!(65536); + test_align_is_size!(131072); + test_align_is_size!(262144); + test_align_is_size!(524288); + test_align_is_size!(1048576); + test_align_is_size!(2097152); + test_align_is_size!(4194304); + test_align_is_size!(8388608); + test_align_is_size!(16777216); + test_align_is_size!(33554432); + test_align_is_size!(67108864); + test_align_is_size!(33554432); + test_align_is_size!(134217728); + test_align_is_size!(268435456); + } + + /// Tests of when a sized `DstLayout` is extended with a DST field. + #[test] + fn test_dst_layout_extend_sized_with_dst() { + // Test that for all combinations of real-world alignments and + // `repr_packed` values, that the extension of a sized `DstLayout`` with + // a DST field correctly computes the trailing offset in the composite + // layout. + + let aligns = (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()); + let packs = core::iter::once(None).chain(aligns.clone().map(Some)); + + for align in aligns { + for pack in packs.clone() { + let base = DstLayout::for_type::(); + let elem_size = 42; + let trailing_field_offset = 11; + + let trailing_field = DstLayout { + align, + size_info: SizeInfo::SliceDst(TrailingSliceLayout { + _elem_size: elem_size, + _offset: 11, + }), + }; + + let composite = base.extend(trailing_field, pack); + + let max_align = pack.unwrap_or(DstLayout::CURRENT_MAX_ALIGN).get(); + + let align = align.get().min(max_align); + + assert_eq!( + composite, + DstLayout { + align: NonZeroUsize::new(align).unwrap(), + size_info: SizeInfo::SliceDst(TrailingSliceLayout { + _elem_size: elem_size, + _offset: align + trailing_field_offset, + }), + } + ) + } + } + } + + /// Tests that calling `pad_to_align` on a sized `DstLayout` adds the + /// expected amount of trailing padding. + #[test] + fn test_dst_layout_pad_to_align_with_sized() { + // For all valid alignments `align`, construct a one-byte layout aligned + // to `align`, call `pad_to_align`, and assert that the size of the + // resulting layout is equal to `align`. + for align in (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()) { + let layout = DstLayout { align, size_info: SizeInfo::Sized { _size: 1 } }; + + assert_eq!( + layout.pad_to_align(), + DstLayout { align, size_info: SizeInfo::Sized { _size: align.get() } } + ); + } + + // Test explicitly-provided combinations of unpadded and padded + // counterparts. + + macro_rules! test { + (unpadded { size: $unpadded_size:expr, align: $unpadded_align:expr } + => padded { size: $padded_size:expr, align: $padded_align:expr }) => { + let unpadded = DstLayout { + align: NonZeroUsize::new($unpadded_align).unwrap(), + size_info: SizeInfo::Sized { _size: $unpadded_size }, + }; + let padded = unpadded.pad_to_align(); + + assert_eq!( + padded, + DstLayout { + align: NonZeroUsize::new($padded_align).unwrap(), + size_info: SizeInfo::Sized { _size: $padded_size }, + } + ); + }; + } + + test!(unpadded { size: 0, align: 4 } => padded { size: 0, align: 4 }); + test!(unpadded { size: 1, align: 4 } => padded { size: 4, align: 4 }); + test!(unpadded { size: 2, align: 4 } => padded { size: 4, align: 4 }); + test!(unpadded { size: 3, align: 4 } => padded { size: 4, align: 4 }); + test!(unpadded { size: 4, align: 4 } => padded { size: 4, align: 4 }); + test!(unpadded { size: 5, align: 4 } => padded { size: 8, align: 4 }); + test!(unpadded { size: 6, align: 4 } => padded { size: 8, align: 4 }); + test!(unpadded { size: 7, align: 4 } => padded { size: 8, align: 4 }); + test!(unpadded { size: 8, align: 4 } => padded { size: 8, align: 4 }); + + let current_max_align = DstLayout::CURRENT_MAX_ALIGN.get(); + + test!(unpadded { size: 1, align: current_max_align } + => padded { size: current_max_align, align: current_max_align }); + + test!(unpadded { size: current_max_align + 1, align: current_max_align } + => padded { size: current_max_align * 2, align: current_max_align }); + } + + /// Tests that calling `pad_to_align` on a DST `DstLayout` is a no-op. + #[test] + fn test_dst_layout_pad_to_align_with_dst() { + for align in (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()) { + for offset in 0..10 { + for elem_size in 0..10 { + let layout = DstLayout { + align, + size_info: SizeInfo::SliceDst(TrailingSliceLayout { + _offset: offset, + _elem_size: elem_size, + }), + }; + assert_eq!(layout.pad_to_align(), layout); + } + } + } + } + + // This test takes a long time when running under Miri, so we skip it in + // that case. This is acceptable because this is a logic test that doesn't + // attempt to expose UB. + #[test] + #[cfg_attr(miri, ignore)] + fn testvalidate_cast_and_convert_metadata() { + impl From for SizeInfo { + fn from(_size: usize) -> SizeInfo { + SizeInfo::Sized { _size } + } + } + + impl From<(usize, usize)> for SizeInfo { + fn from((_offset, _elem_size): (usize, usize)) -> SizeInfo { + SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size }) + } + } + + fn layout>(s: S, align: usize) -> DstLayout { + DstLayout { size_info: s.into(), align: NonZeroUsize::new(align).unwrap() } + } + + /// This macro accepts arguments in the form of: + /// + /// layout(_, _, _).validate(_, _, _), Ok(Some((_, _))) + /// | | | | | | | | + /// base_size ----+ | | | | | | | + /// align -----------+ | | | | | | + /// trailing_size ------+ | | | | | + /// addr ---------------------------+ | | | | + /// bytes_len -------------------------+ | | | + /// cast_type ----------------------------+ | | + /// elems ---------------------------------------------+ | + /// split_at ---------------------------------------------+ + /// + /// `.validate` is shorthand for `.validate_cast_and_convert_metadata` + /// for brevity. + /// + /// Each argument can either be an iterator or a wildcard. Each + /// wildcarded variable is implicitly replaced by an iterator over a + /// representative sample of values for that variable. Each `test!` + /// invocation iterates over every combination of values provided by + /// each variable's iterator (ie, the cartesian product) and validates + /// that the results are expected. + /// + /// The final argument uses the same syntax, but it has a different + /// meaning: + /// - If it is `Ok(pat)`, then the pattern `pat` is supplied to + /// `assert_matches!` to validate the computed result for each + /// combination of input values. + /// - If it is `Err(msg)`, then `test!` validates that the call to + /// `validate_cast_and_convert_metadata` panics with the given panic + /// message. + /// + /// Note that the meta-variables that match these variables have the + /// `tt` type, and some valid expressions are not valid `tt`s (such as + /// `a..b`). In this case, wrap the expression in parentheses, and it + /// will become valid `tt`. + macro_rules! test { + ($(:$sizes:expr =>)? + layout($size:tt, $align:tt) + .validate($addr:tt, $bytes_len:tt, $cast_type:tt), $expect:pat $(,)? + ) => { + itertools::iproduct!( + test!(@generate_size $size), + test!(@generate_align $align), + test!(@generate_usize $addr), + test!(@generate_usize $bytes_len), + test!(@generate_cast_type $cast_type) + ).for_each(|(size_info, align, addr, bytes_len, cast_type)| { + // Temporarily disable the panic hook installed by the test + // harness. If we don't do this, all panic messages will be + // kept in an internal log. On its own, this isn't a + // problem, but if a non-caught panic ever happens (ie, in + // code later in this test not in this macro), all of the + // previously-buffered messages will be dumped, hiding the + // real culprit. + let previous_hook = std::panic::take_hook(); + // I don't understand why, but this seems to be required in + // addition to the previous line. + std::panic::set_hook(Box::new(|_| {})); + let actual = std::panic::catch_unwind(|| { + layout(size_info, align).validate_cast_and_convert_metadata(addr, bytes_len, cast_type) + }).map_err(|d| { + *d.downcast::<&'static str>().expect("expected string panic message").as_ref() + }); + std::panic::set_hook(previous_hook); + + assert_matches::assert_matches!( + actual, $expect, + "layout({size_info:?}, {align}).validate_cast_and_convert_metadata({addr}, {bytes_len}, {cast_type:?})", + ); + }); + }; + (@generate_usize _) => { 0..8 }; + // Generate sizes for both Sized and !Sized types. + (@generate_size _) => { + test!(@generate_size (_)).chain(test!(@generate_size (_, _))) + }; + // Generate sizes for both Sized and !Sized types by chaining + // specified iterators for each. + (@generate_size ($sized_sizes:tt | $unsized_sizes:tt)) => { + test!(@generate_size ($sized_sizes)).chain(test!(@generate_size $unsized_sizes)) + }; + // Generate sizes for Sized types. + (@generate_size (_)) => { test!(@generate_size (0..8)) }; + (@generate_size ($sizes:expr)) => { $sizes.into_iter().map(Into::::into) }; + // Generate sizes for !Sized types. + (@generate_size ($min_sizes:tt, $elem_sizes:tt)) => { + itertools::iproduct!( + test!(@generate_min_size $min_sizes), + test!(@generate_elem_size $elem_sizes) + ).map(Into::::into) + }; + (@generate_fixed_size _) => { (0..8).into_iter().map(Into::::into) }; + (@generate_min_size _) => { 0..8 }; + (@generate_elem_size _) => { 1..8 }; + (@generate_align _) => { [1, 2, 4, 8, 16] }; + (@generate_opt_usize _) => { [None].into_iter().chain((0..8).map(Some).into_iter()) }; + (@generate_cast_type _) => { [_CastType::_Prefix, _CastType::_Suffix] }; + (@generate_cast_type $variant:ident) => { [_CastType::$variant] }; + // Some expressions need to be wrapped in parentheses in order to be + // valid `tt`s (required by the top match pattern). See the comment + // below for more details. This arm removes these parentheses to + // avoid generating an `unused_parens` warning. + (@$_:ident ($vals:expr)) => { $vals }; + (@$_:ident $vals:expr) => { $vals }; + } + + const EVENS: [usize; 8] = [0, 2, 4, 6, 8, 10, 12, 14]; + const ODDS: [usize; 8] = [1, 3, 5, 7, 9, 11, 13, 15]; + + // base_size is too big for the memory region. + test!(layout(((1..8) | ((1..8), (1..8))), _).validate(_, [0], _), Ok(None)); + test!(layout(((2..8) | ((2..8), (2..8))), _).validate(_, [1], _), Ok(None)); + + // addr is unaligned for prefix cast + test!(layout(_, [2]).validate(ODDS, _, _Prefix), Ok(None)); + test!(layout(_, [2]).validate(ODDS, _, _Prefix), Ok(None)); + + // addr is aligned, but end of buffer is unaligned for suffix cast + test!(layout(_, [2]).validate(EVENS, ODDS, _Suffix), Ok(None)); + test!(layout(_, [2]).validate(EVENS, ODDS, _Suffix), Ok(None)); + + // Unfortunately, these constants cannot easily be used in the + // implementation of `validate_cast_and_convert_metadata`, since + // `panic!` consumes a string literal, not an expression. + // + // It's important that these messages be in a separate module. If they + // were at the function's top level, we'd pass them to `test!` as, e.g., + // `Err(TRAILING)`, which would run into a subtle Rust footgun - the + // `TRAILING` identifier would be treated as a pattern to match rather + // than a value to check for equality. + mod msgs { + pub(super) const TRAILING: &str = + "attempted to cast to slice type with zero-sized element"; + pub(super) const OVERFLOW: &str = "`addr` + `bytes_len` > usize::MAX"; + } + + // casts with ZST trailing element types are unsupported + test!(layout((_, [0]), _).validate(_, _, _), Err(msgs::TRAILING),); + + // addr + bytes_len must not overflow usize + test!(layout(_, _).validate([usize::MAX], (1..100), _), Err(msgs::OVERFLOW)); + test!(layout(_, _).validate((1..100), [usize::MAX], _), Err(msgs::OVERFLOW)); + test!( + layout(_, _).validate( + [usize::MAX / 2 + 1, usize::MAX], + [usize::MAX / 2 + 1, usize::MAX], + _ + ), + Err(msgs::OVERFLOW) + ); + + // Validates that `validate_cast_and_convert_metadata` satisfies its own + // documented safety postconditions, and also a few other properties + // that aren't documented but we want to guarantee anyway. + fn validate_behavior( + (layout, addr, bytes_len, cast_type): (DstLayout, usize, usize, _CastType), + ) { + if let Some((elems, split_at)) = + layout.validate_cast_and_convert_metadata(addr, bytes_len, cast_type) + { + let (size_info, align) = (layout.size_info, layout.align); + let debug_str = format!( + "layout({size_info:?}, {align}).validate_cast_and_convert_metadata({addr}, {bytes_len}, {cast_type:?}) => ({elems}, {split_at})", + ); + + // If this is a sized type (no trailing slice), then `elems` is + // meaningless, but in practice we set it to 0. Callers are not + // allowed to rely on this, but a lot of math is nicer if + // they're able to, and some callers might accidentally do that. + let sized = matches!(layout.size_info, SizeInfo::Sized { .. }); + assert!(!(sized && elems != 0), "{}", debug_str); + + let resulting_size = match layout.size_info { + SizeInfo::Sized { _size } => _size, + SizeInfo::SliceDst(TrailingSliceLayout { + _offset: offset, + _elem_size: elem_size, + }) => { + let padded_size = |elems| { + let without_padding = offset + elems * elem_size; + without_padding + + util::core_layout::padding_needed_for(without_padding, align) + }; + + let resulting_size = padded_size(elems); + // Test that `validate_cast_and_convert_metadata` + // computed the largest possible value that fits in the + // given range. + assert!(padded_size(elems + 1) > bytes_len, "{}", debug_str); + resulting_size + } + }; + + // Test safety postconditions guaranteed by + // `validate_cast_and_convert_metadata`. + assert!(resulting_size <= bytes_len, "{}", debug_str); + match cast_type { + _CastType::_Prefix => { + assert_eq!(addr % align, 0, "{}", debug_str); + assert_eq!(resulting_size, split_at, "{}", debug_str); + } + _CastType::_Suffix => { + assert_eq!(split_at, bytes_len - resulting_size, "{}", debug_str); + assert_eq!((addr + split_at) % align, 0, "{}", debug_str); + } + } + } else { + let min_size = match layout.size_info { + SizeInfo::Sized { _size } => _size, + SizeInfo::SliceDst(TrailingSliceLayout { _offset, .. }) => { + _offset + util::core_layout::padding_needed_for(_offset, layout.align) + } + }; + + // If a cast is invalid, it is either because... + // 1. there are insufficent bytes at the given region for type: + let insufficient_bytes = bytes_len < min_size; + // 2. performing the cast would misalign type: + let base = match cast_type { + _CastType::_Prefix => 0, + _CastType::_Suffix => bytes_len, + }; + let misaligned = (base + addr) % layout.align != 0; + + assert!(insufficient_bytes || misaligned); + } + } + + let sizes = 0..8; + let elem_sizes = 1..8; + let size_infos = sizes + .clone() + .map(Into::::into) + .chain(itertools::iproduct!(sizes, elem_sizes).map(Into::::into)); + let layouts = itertools::iproduct!(size_infos, [1, 2, 4, 8, 16, 32]) + .filter(|(size_info, align)| !matches!(size_info, SizeInfo::Sized { _size } if _size % align != 0)) + .map(|(size_info, align)| layout(size_info, align)); + itertools::iproduct!(layouts, 0..8, 0..8, [_CastType::_Prefix, _CastType::_Suffix]) + .for_each(validate_behavior); + } + + #[test] + #[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)] + fn test_validate_rust_layout() { + use core::ptr::NonNull; + + // This test synthesizes pointers with various metadata and uses Rust's + // built-in APIs to confirm that Rust makes decisions about type layout + // which are consistent with what we believe is guaranteed by the + // language. If this test fails, it doesn't just mean our code is wrong + // - it means we're misunderstanding the language's guarantees. + + #[derive(Debug)] + struct MacroArgs { + offset: usize, + align: NonZeroUsize, + elem_size: Option, + } + + /// # Safety + /// + /// `test` promises to only call `addr_of_slice_field` on a `NonNull` + /// which points to a valid `T`. + /// + /// `with_elems` must produce a pointer which points to a valid `T`. + fn test NonNull>( + args: MacroArgs, + with_elems: W, + addr_of_slice_field: Option) -> NonNull>, + ) { + let dst = args.elem_size.is_some(); + let layout = { + let size_info = match args.elem_size { + Some(elem_size) => SizeInfo::SliceDst(TrailingSliceLayout { + _offset: args.offset, + _elem_size: elem_size, + }), + None => SizeInfo::Sized { + // Rust only supports types whose sizes are a multiple + // of their alignment. If the macro created a type like + // this: + // + // #[repr(C, align(2))] + // struct Foo([u8; 1]); + // + // ...then Rust will automatically round the type's size + // up to 2. + _size: args.offset + + util::core_layout::padding_needed_for(args.offset, args.align), + }, + }; + DstLayout { size_info, align: args.align } + }; + + for elems in 0..128 { + let ptr = with_elems(elems); + + if let Some(addr_of_slice_field) = addr_of_slice_field { + let slc_field_ptr = addr_of_slice_field(ptr).as_ptr(); + // SAFETY: Both `slc_field_ptr` and `ptr` are pointers to + // the same valid Rust object. + let offset: usize = + unsafe { slc_field_ptr.byte_offset_from(ptr.as_ptr()).try_into().unwrap() }; + assert_eq!(offset, args.offset); + } + + // SAFETY: `ptr` points to a valid `T`. + let (size, align) = unsafe { + (mem::size_of_val_raw(ptr.as_ptr()), mem::align_of_val_raw(ptr.as_ptr())) + }; + + // Avoid expensive allocation when running under Miri. + let assert_msg = if !cfg!(miri) { + format!("\n{args:?}\nsize:{size}, align:{align}") + } else { + String::new() + }; + + let without_padding = + args.offset + args.elem_size.map(|elem_size| elems * elem_size).unwrap_or(0); + assert!(size >= without_padding, "{}", assert_msg); + assert_eq!(align, args.align.get(), "{}", assert_msg); + + // This encodes the most important part of the test: our + // understanding of how Rust determines the layout of repr(C) + // types. Sized repr(C) types are trivial, but DST types have + // some subtlety. Note that: + // - For sized types, `without_padding` is just the size of the + // type that we constructed for `Foo`. Since we may have + // requested a larger alignment, `Foo` may actually be larger + // than this, hence `padding_needed_for`. + // - For unsized types, `without_padding` is dynamically + // computed from the offset, the element size, and element + // count. We expect that the size of the object should be + // `offset + elem_size * elems` rounded up to the next + // alignment. + let expected_size = without_padding + + util::core_layout::padding_needed_for(without_padding, args.align); + assert_eq!(expected_size, size, "{}", assert_msg); + + // For zero-sized element types, + // `validate_cast_and_convert_metadata` just panics, so we skip + // testing those types. + if args.elem_size.map(|elem_size| elem_size > 0).unwrap_or(true) { + let addr = ptr.addr().get(); + let (got_elems, got_split_at) = layout + .validate_cast_and_convert_metadata(addr, size, _CastType::_Prefix) + .unwrap(); + // Avoid expensive allocation when running under Miri. + let assert_msg = if !cfg!(miri) { + format!( + "{}\nvalidate_cast_and_convert_metadata({addr}, {size})", + assert_msg + ) + } else { + String::new() + }; + assert_eq!(got_split_at, size, "{}", assert_msg); + if dst { + assert!(got_elems >= elems, "{}", assert_msg); + if got_elems != elems { + // If `validate_cast_and_convert_metadata` + // returned more elements than `elems`, that + // means that `elems` is not the maximum number + // of elements that can fit in `size` - in other + // words, there is enough padding at the end of + // the value to fit at least one more element. + // If we use this metadata to synthesize a + // pointer, despite having a different element + // count, we still expect it to have the same + // size. + let got_ptr = with_elems(got_elems); + // SAFETY: `got_ptr` is a pointer to a valid `T`. + let size_of_got_ptr = unsafe { mem::size_of_val_raw(got_ptr.as_ptr()) }; + assert_eq!(size_of_got_ptr, size, "{}", assert_msg); + } + } else { + // For sized casts, the returned element value is + // technically meaningless, and we don't guarantee any + // particular value. In practice, it's always zero. + assert_eq!(got_elems, 0, "{}", assert_msg) + } + } + } + } + + macro_rules! validate_against_rust { + ($offset:literal, $align:literal $(, $elem_size:literal)?) => {{ + #[repr(C, align($align))] + struct Foo([u8; $offset]$(, [[u8; $elem_size]])?); + + let args = MacroArgs { + offset: $offset, + align: $align.try_into().unwrap(), + elem_size: { + #[allow(unused)] + let ret = None::; + $(let ret = Some($elem_size);)? + ret + } + }; + + #[repr(C, align($align))] + struct FooAlign; + // Create an aligned buffer to use in order to synthesize + // pointers to `Foo`. We don't ever load values from these + // pointers - we just do arithmetic on them - so having a "real" + // block of memory as opposed to a validly-aligned-but-dangling + // pointer is only necessary to make Miri happy since we run it + // with "strict provenance" checking enabled. + let aligned_buf = Align::<_, FooAlign>::new([0u8; 1024]); + let with_elems = |elems| { + let slc = NonNull::slice_from_raw_parts(NonNull::from(&aligned_buf.t), elems); + #[allow(clippy::as_conversions)] + NonNull::new(slc.as_ptr() as *mut Foo).unwrap() + }; + let addr_of_slice_field = { + #[allow(unused)] + let f = None::) -> NonNull>; + $( + // SAFETY: `test` promises to only call `f` with a `ptr` + // to a valid `Foo`. + let f: Option) -> NonNull> = Some(|ptr: NonNull| unsafe { + NonNull::new(ptr::addr_of_mut!((*ptr.as_ptr()).1)).unwrap().cast::() + }); + let _ = $elem_size; + )? + f + }; + + test::(args, with_elems, addr_of_slice_field); + }}; + } + + // Every permutation of: + // - offset in [0, 4] + // - align in [1, 16] + // - elem_size in [0, 4] (plus no elem_size) + validate_against_rust!(0, 1); + validate_against_rust!(0, 1, 0); + validate_against_rust!(0, 1, 1); + validate_against_rust!(0, 1, 2); + validate_against_rust!(0, 1, 3); + validate_against_rust!(0, 1, 4); + validate_against_rust!(0, 2); + validate_against_rust!(0, 2, 0); + validate_against_rust!(0, 2, 1); + validate_against_rust!(0, 2, 2); + validate_against_rust!(0, 2, 3); + validate_against_rust!(0, 2, 4); + validate_against_rust!(0, 4); + validate_against_rust!(0, 4, 0); + validate_against_rust!(0, 4, 1); + validate_against_rust!(0, 4, 2); + validate_against_rust!(0, 4, 3); + validate_against_rust!(0, 4, 4); + validate_against_rust!(0, 8); + validate_against_rust!(0, 8, 0); + validate_against_rust!(0, 8, 1); + validate_against_rust!(0, 8, 2); + validate_against_rust!(0, 8, 3); + validate_against_rust!(0, 8, 4); + validate_against_rust!(0, 16); + validate_against_rust!(0, 16, 0); + validate_against_rust!(0, 16, 1); + validate_against_rust!(0, 16, 2); + validate_against_rust!(0, 16, 3); + validate_against_rust!(0, 16, 4); + validate_against_rust!(1, 1); + validate_against_rust!(1, 1, 0); + validate_against_rust!(1, 1, 1); + validate_against_rust!(1, 1, 2); + validate_against_rust!(1, 1, 3); + validate_against_rust!(1, 1, 4); + validate_against_rust!(1, 2); + validate_against_rust!(1, 2, 0); + validate_against_rust!(1, 2, 1); + validate_against_rust!(1, 2, 2); + validate_against_rust!(1, 2, 3); + validate_against_rust!(1, 2, 4); + validate_against_rust!(1, 4); + validate_against_rust!(1, 4, 0); + validate_against_rust!(1, 4, 1); + validate_against_rust!(1, 4, 2); + validate_against_rust!(1, 4, 3); + validate_against_rust!(1, 4, 4); + validate_against_rust!(1, 8); + validate_against_rust!(1, 8, 0); + validate_against_rust!(1, 8, 1); + validate_against_rust!(1, 8, 2); + validate_against_rust!(1, 8, 3); + validate_against_rust!(1, 8, 4); + validate_against_rust!(1, 16); + validate_against_rust!(1, 16, 0); + validate_against_rust!(1, 16, 1); + validate_against_rust!(1, 16, 2); + validate_against_rust!(1, 16, 3); + validate_against_rust!(1, 16, 4); + validate_against_rust!(2, 1); + validate_against_rust!(2, 1, 0); + validate_against_rust!(2, 1, 1); + validate_against_rust!(2, 1, 2); + validate_against_rust!(2, 1, 3); + validate_against_rust!(2, 1, 4); + validate_against_rust!(2, 2); + validate_against_rust!(2, 2, 0); + validate_against_rust!(2, 2, 1); + validate_against_rust!(2, 2, 2); + validate_against_rust!(2, 2, 3); + validate_against_rust!(2, 2, 4); + validate_against_rust!(2, 4); + validate_against_rust!(2, 4, 0); + validate_against_rust!(2, 4, 1); + validate_against_rust!(2, 4, 2); + validate_against_rust!(2, 4, 3); + validate_against_rust!(2, 4, 4); + validate_against_rust!(2, 8); + validate_against_rust!(2, 8, 0); + validate_against_rust!(2, 8, 1); + validate_against_rust!(2, 8, 2); + validate_against_rust!(2, 8, 3); + validate_against_rust!(2, 8, 4); + validate_against_rust!(2, 16); + validate_against_rust!(2, 16, 0); + validate_against_rust!(2, 16, 1); + validate_against_rust!(2, 16, 2); + validate_against_rust!(2, 16, 3); + validate_against_rust!(2, 16, 4); + validate_against_rust!(3, 1); + validate_against_rust!(3, 1, 0); + validate_against_rust!(3, 1, 1); + validate_against_rust!(3, 1, 2); + validate_against_rust!(3, 1, 3); + validate_against_rust!(3, 1, 4); + validate_against_rust!(3, 2); + validate_against_rust!(3, 2, 0); + validate_against_rust!(3, 2, 1); + validate_against_rust!(3, 2, 2); + validate_against_rust!(3, 2, 3); + validate_against_rust!(3, 2, 4); + validate_against_rust!(3, 4); + validate_against_rust!(3, 4, 0); + validate_against_rust!(3, 4, 1); + validate_against_rust!(3, 4, 2); + validate_against_rust!(3, 4, 3); + validate_against_rust!(3, 4, 4); + validate_against_rust!(3, 8); + validate_against_rust!(3, 8, 0); + validate_against_rust!(3, 8, 1); + validate_against_rust!(3, 8, 2); + validate_against_rust!(3, 8, 3); + validate_against_rust!(3, 8, 4); + validate_against_rust!(3, 16); + validate_against_rust!(3, 16, 0); + validate_against_rust!(3, 16, 1); + validate_against_rust!(3, 16, 2); + validate_against_rust!(3, 16, 3); + validate_against_rust!(3, 16, 4); + validate_against_rust!(4, 1); + validate_against_rust!(4, 1, 0); + validate_against_rust!(4, 1, 1); + validate_against_rust!(4, 1, 2); + validate_against_rust!(4, 1, 3); + validate_against_rust!(4, 1, 4); + validate_against_rust!(4, 2); + validate_against_rust!(4, 2, 0); + validate_against_rust!(4, 2, 1); + validate_against_rust!(4, 2, 2); + validate_against_rust!(4, 2, 3); + validate_against_rust!(4, 2, 4); + validate_against_rust!(4, 4); + validate_against_rust!(4, 4, 0); + validate_against_rust!(4, 4, 1); + validate_against_rust!(4, 4, 2); + validate_against_rust!(4, 4, 3); + validate_against_rust!(4, 4, 4); + validate_against_rust!(4, 8); + validate_against_rust!(4, 8, 0); + validate_against_rust!(4, 8, 1); + validate_against_rust!(4, 8, 2); + validate_against_rust!(4, 8, 3); + validate_against_rust!(4, 8, 4); + validate_against_rust!(4, 16); + validate_against_rust!(4, 16, 0); + validate_against_rust!(4, 16, 1); + validate_against_rust!(4, 16, 2); + validate_against_rust!(4, 16, 3); + validate_against_rust!(4, 16, 4); + } + + #[test] + fn test_known_layout() { + // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout. + // Test that `PhantomData<$ty>` has the same layout as `()` regardless + // of `$ty`. + macro_rules! test { + ($ty:ty, $expect:expr) => { + let expect = $expect; + assert_eq!(<$ty as KnownLayout>::LAYOUT, expect); + assert_eq!( as KnownLayout>::LAYOUT, expect); + assert_eq!( as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT); + }; + } + + let layout = |offset, align, _trailing_slice_elem_size| DstLayout { + align: NonZeroUsize::new(align).unwrap(), + size_info: match _trailing_slice_elem_size { + None => SizeInfo::Sized { _size: offset }, + Some(elem_size) => SizeInfo::SliceDst(TrailingSliceLayout { + _offset: offset, + _elem_size: elem_size, + }), + }, + }; + + test!((), layout(0, 1, None)); + test!(u8, layout(1, 1, None)); + // Use `align_of` because `u64` alignment may be smaller than 8 on some + // platforms. + test!(u64, layout(8, mem::align_of::(), None)); + test!(AU64, layout(8, 8, None)); + + test!(Option<&'static ()>, usize::LAYOUT); + + test!([()], layout(0, 1, Some(0))); + test!([u8], layout(0, 1, Some(1))); + test!(str, layout(0, 1, Some(1))); + } + + #[cfg(feature = "derive")] + #[test] + fn test_known_layout_derive() { + // In this and other files (`late_compile_pass.rs`, + // `mid_compile_pass.rs`, and `struct.rs`), we test success and failure + // modes of `derive(KnownLayout)` for the following combination of + // properties: + // + // +------------+--------------------------------------+-----------+ + // | | trailing field properties | | + // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | + // |------------+----------+----------------+----------+-----------| + // | N | N | N | N | KL00 | + // | N | N | N | Y | KL01 | + // | N | N | Y | N | KL02 | + // | N | N | Y | Y | KL03 | + // | N | Y | N | N | KL04 | + // | N | Y | N | Y | KL05 | + // | N | Y | Y | N | KL06 | + // | N | Y | Y | Y | KL07 | + // | Y | N | N | N | KL08 | + // | Y | N | N | Y | KL09 | + // | Y | N | Y | N | KL10 | + // | Y | N | Y | Y | KL11 | + // | Y | Y | N | N | KL12 | + // | Y | Y | N | Y | KL13 | + // | Y | Y | Y | N | KL14 | + // | Y | Y | Y | Y | KL15 | + // +------------+----------+----------------+----------+-----------+ + + struct NotKnownLayout { + _t: T, + } + + #[derive(KnownLayout)] + #[repr(C)] + struct AlignSize + where + elain::Align: elain::Alignment, + { + _align: elain::Align, + _size: [u8; SIZE], + } + + type AU16 = AlignSize<2, 2>; + type AU32 = AlignSize<4, 4>; + + fn _assert_kl(_: &T) {} + + let sized_layout = |align, size| DstLayout { + align: NonZeroUsize::new(align).unwrap(), + size_info: SizeInfo::Sized { _size: size }, + }; + + let unsized_layout = |align, elem_size, offset| DstLayout { + align: NonZeroUsize::new(align).unwrap(), + size_info: SizeInfo::SliceDst(TrailingSliceLayout { + _offset: offset, + _elem_size: elem_size, + }), + }; + + // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | + // | N | N | N | Y | KL01 | + #[derive(KnownLayout)] + #[allow(dead_code)] // fields are never read + struct KL01(NotKnownLayout, NotKnownLayout); + + let expected = DstLayout::for_type::(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(4, 8)); + + // ...with `align(N)`: + #[derive(KnownLayout)] + #[repr(align(64))] + #[allow(dead_code)] // fields are never read + struct KL01Align(NotKnownLayout, NotKnownLayout); + + let expected = DstLayout::for_type::(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(64, 64)); + + // ...with `packed`: + #[derive(KnownLayout)] + #[repr(packed)] + #[allow(dead_code)] // fields are never read + struct KL01Packed(NotKnownLayout, NotKnownLayout); + + let expected = DstLayout::for_type::(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(1, 6)); + + // ...with `packed(N)`: + #[derive(KnownLayout)] + #[repr(packed(2))] + #[allow(dead_code)] // fields are never read + struct KL01PackedN(NotKnownLayout, NotKnownLayout); + + assert_impl_all!(KL01PackedN: KnownLayout); + + let expected = DstLayout::for_type::(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(2, 6)); + + // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | + // | N | N | Y | Y | KL03 | + #[derive(KnownLayout)] + #[allow(dead_code)] // fields are never read + struct KL03(NotKnownLayout, u8); + + let expected = DstLayout::for_type::(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(1, 1)); + + // ... with `align(N)` + #[derive(KnownLayout)] + #[repr(align(64))] + #[allow(dead_code)] // fields are never read + struct KL03Align(NotKnownLayout, u8); + + let expected = DstLayout::for_type::(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(64, 64)); + + // ... with `packed`: + #[derive(KnownLayout)] + #[repr(packed)] + #[allow(dead_code)] // fields are never read + struct KL03Packed(NotKnownLayout, u8); + + let expected = DstLayout::for_type::(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(1, 5)); + + // ... with `packed(N)` + #[derive(KnownLayout)] + #[repr(packed(2))] + #[allow(dead_code)] // fields are never read + struct KL03PackedN(NotKnownLayout, u8); + + assert_impl_all!(KL03PackedN: KnownLayout); + + let expected = DstLayout::for_type::(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(2, 6)); + + // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | + // | N | Y | N | Y | KL05 | + #[derive(KnownLayout)] + #[allow(dead_code)] // fields are never read + struct KL05(u8, T); + + fn _test_kl05(t: T) -> impl KnownLayout { + KL05(0u8, t) + } + + // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | + // | N | Y | Y | Y | KL07 | + #[derive(KnownLayout)] + #[allow(dead_code)] // fields are never read + struct KL07(u8, T); + + fn _test_kl07(t: T) -> impl KnownLayout { + let _ = KL07(0u8, t); + } + + // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | + // | Y | N | Y | N | KL10 | + #[derive(KnownLayout)] + #[repr(C)] + struct KL10(NotKnownLayout, [u8]); + + let expected = DstLayout::new_zst(None) + .extend(DstLayout::for_type::>(), None) + .extend(<[u8] as KnownLayout>::LAYOUT, None) + .pad_to_align(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, unsized_layout(4, 1, 4)); + + // ...with `align(N)`: + #[derive(KnownLayout)] + #[repr(C, align(64))] + struct KL10Align(NotKnownLayout, [u8]); + + let repr_align = NonZeroUsize::new(64); + + let expected = DstLayout::new_zst(repr_align) + .extend(DstLayout::for_type::>(), None) + .extend(<[u8] as KnownLayout>::LAYOUT, None) + .pad_to_align(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, unsized_layout(64, 1, 4)); + + // ...with `packed`: + #[derive(KnownLayout)] + #[repr(C, packed)] + struct KL10Packed(NotKnownLayout, [u8]); + + let repr_packed = NonZeroUsize::new(1); + + let expected = DstLayout::new_zst(None) + .extend(DstLayout::for_type::>(), repr_packed) + .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed) + .pad_to_align(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, unsized_layout(1, 1, 4)); + + // ...with `packed(N)`: + #[derive(KnownLayout)] + #[repr(C, packed(2))] + struct KL10PackedN(NotKnownLayout, [u8]); + + let repr_packed = NonZeroUsize::new(2); + + let expected = DstLayout::new_zst(None) + .extend(DstLayout::for_type::>(), repr_packed) + .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed) + .pad_to_align(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, unsized_layout(2, 1, 4)); + + // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | + // | Y | N | Y | Y | KL11 | + #[derive(KnownLayout)] + #[repr(C)] + struct KL11(NotKnownLayout, u8); + + let expected = DstLayout::new_zst(None) + .extend(DstLayout::for_type::>(), None) + .extend(::LAYOUT, None) + .pad_to_align(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(8, 16)); + + // ...with `align(N)`: + #[derive(KnownLayout)] + #[repr(C, align(64))] + struct KL11Align(NotKnownLayout, u8); + + let repr_align = NonZeroUsize::new(64); + + let expected = DstLayout::new_zst(repr_align) + .extend(DstLayout::for_type::>(), None) + .extend(::LAYOUT, None) + .pad_to_align(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(64, 64)); + + // ...with `packed`: + #[derive(KnownLayout)] + #[repr(C, packed)] + struct KL11Packed(NotKnownLayout, u8); + + let repr_packed = NonZeroUsize::new(1); + + let expected = DstLayout::new_zst(None) + .extend(DstLayout::for_type::>(), repr_packed) + .extend(::LAYOUT, repr_packed) + .pad_to_align(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(1, 9)); + + // ...with `packed(N)`: + #[derive(KnownLayout)] + #[repr(C, packed(2))] + struct KL11PackedN(NotKnownLayout, u8); + + let repr_packed = NonZeroUsize::new(2); + + let expected = DstLayout::new_zst(None) + .extend(DstLayout::for_type::>(), repr_packed) + .extend(::LAYOUT, repr_packed) + .pad_to_align(); + + assert_eq!(::LAYOUT, expected); + assert_eq!(::LAYOUT, sized_layout(2, 10)); + + // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | + // | Y | Y | Y | N | KL14 | + #[derive(KnownLayout)] + #[repr(C)] + struct KL14(u8, T); + + fn _test_kl14(kl: &KL14) { + _assert_kl(kl) + } + + // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name | + // | Y | Y | Y | Y | KL15 | + #[derive(KnownLayout)] + #[repr(C)] + struct KL15(u8, T); + + fn _test_kl15(t: T) -> impl KnownLayout { + let _ = KL15(0u8, t); + } + + // Test a variety of combinations of field types: + // - () + // - u8 + // - AU16 + // - [()] + // - [u8] + // - [AU16] + + #[allow(clippy::upper_case_acronyms)] + #[derive(KnownLayout)] + #[repr(C)] + struct KLTU(T, U); + + assert_eq!( as KnownLayout>::LAYOUT, sized_layout(1, 0)); + + assert_eq!( as KnownLayout>::LAYOUT, sized_layout(1, 1)); + + assert_eq!( as KnownLayout>::LAYOUT, sized_layout(2, 2)); + + assert_eq!( as KnownLayout>::LAYOUT, unsized_layout(1, 0, 0)); + + assert_eq!( as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0)); + + assert_eq!( as KnownLayout>::LAYOUT, unsized_layout(2, 2, 0)); + + assert_eq!( as KnownLayout>::LAYOUT, sized_layout(1, 1)); + + assert_eq!( as KnownLayout>::LAYOUT, sized_layout(1, 2)); + + assert_eq!( as KnownLayout>::LAYOUT, sized_layout(2, 4)); + + assert_eq!( as KnownLayout>::LAYOUT, unsized_layout(1, 0, 1)); + + assert_eq!( as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1)); + + assert_eq!( as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2)); + + assert_eq!( as KnownLayout>::LAYOUT, sized_layout(2, 2)); + + assert_eq!( as KnownLayout>::LAYOUT, sized_layout(2, 4)); + + assert_eq!( as KnownLayout>::LAYOUT, sized_layout(2, 4)); + + assert_eq!( as KnownLayout>::LAYOUT, unsized_layout(2, 0, 2)); + + assert_eq!( as KnownLayout>::LAYOUT, unsized_layout(2, 1, 2)); + + assert_eq!( as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2)); + + // Test a variety of field counts. + + #[derive(KnownLayout)] + #[repr(C)] + struct KLF0; + + assert_eq!(::LAYOUT, sized_layout(1, 0)); + + #[derive(KnownLayout)] + #[repr(C)] + struct KLF1([u8]); + + assert_eq!(::LAYOUT, unsized_layout(1, 1, 0)); + + #[derive(KnownLayout)] + #[repr(C)] + struct KLF2(NotKnownLayout, [u8]); + + assert_eq!(::LAYOUT, unsized_layout(1, 1, 1)); + + #[derive(KnownLayout)] + #[repr(C)] + struct KLF3(NotKnownLayout, NotKnownLayout, [u8]); + + assert_eq!(::LAYOUT, unsized_layout(2, 1, 4)); + + #[derive(KnownLayout)] + #[repr(C)] + struct KLF4(NotKnownLayout, NotKnownLayout, NotKnownLayout, [u8]); + + assert_eq!(::LAYOUT, unsized_layout(4, 1, 8)); + } + + #[test] + fn test_object_safety() { + fn _takes_from_zeroes(_: &dyn FromZeroes) {} + fn _takes_from_bytes(_: &dyn FromBytes) {} + fn _takes_unaligned(_: &dyn Unaligned) {} + } + + #[test] + fn test_from_zeroes_only() { + // Test types that implement `FromZeroes` but not `FromBytes`. + + assert!(!bool::new_zeroed()); + assert_eq!(char::new_zeroed(), '\0'); + + #[cfg(feature = "alloc")] + { + assert_eq!(bool::new_box_zeroed(), Box::new(false)); + assert_eq!(char::new_box_zeroed(), Box::new('\0')); + + assert_eq!(bool::new_box_slice_zeroed(3).as_ref(), [false, false, false]); + assert_eq!(char::new_box_slice_zeroed(3).as_ref(), ['\0', '\0', '\0']); + + assert_eq!(bool::new_vec_zeroed(3).as_ref(), [false, false, false]); + assert_eq!(char::new_vec_zeroed(3).as_ref(), ['\0', '\0', '\0']); + } + + let mut string = "hello".to_string(); + let s: &mut str = string.as_mut(); + assert_eq!(s, "hello"); + s.zero(); + assert_eq!(s, "\0\0\0\0\0"); + } + + #[test] + fn test_read_write() { + const VAL: u64 = 0x12345678; + #[cfg(target_endian = "big")] + const VAL_BYTES: [u8; 8] = VAL.to_be_bytes(); + #[cfg(target_endian = "little")] + const VAL_BYTES: [u8; 8] = VAL.to_le_bytes(); + + // Test `FromBytes::{read_from, read_from_prefix, read_from_suffix}`. + + assert_eq!(u64::read_from(&VAL_BYTES[..]), Some(VAL)); + // The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all + // zeroes. + let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]); + assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Some(VAL)); + assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Some(0)); + // The first 8 bytes are all zeroes and the second 8 bytes are from + // `VAL_BYTES` + let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]); + assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Some(0)); + assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Some(VAL)); + + // Test `AsBytes::{write_to, write_to_prefix, write_to_suffix}`. + + let mut bytes = [0u8; 8]; + assert_eq!(VAL.write_to(&mut bytes[..]), Some(())); + assert_eq!(bytes, VAL_BYTES); + let mut bytes = [0u8; 16]; + assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Some(())); + let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]); + assert_eq!(bytes, want); + let mut bytes = [0u8; 16]; + assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Some(())); + let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]); + assert_eq!(bytes, want); + } + + #[test] + fn test_transmute() { + // Test that memory is transmuted as expected. + let array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7]; + let array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]]; + let x: [[u8; 2]; 4] = transmute!(array_of_u8s); + assert_eq!(x, array_of_arrays); + let x: [u8; 8] = transmute!(array_of_arrays); + assert_eq!(x, array_of_u8s); + + // Test that the source expression's value is forgotten rather than + // dropped. + #[derive(AsBytes)] + #[repr(transparent)] + struct PanicOnDrop(()); + impl Drop for PanicOnDrop { + fn drop(&mut self) { + panic!("PanicOnDrop::drop"); + } + } + #[allow(clippy::let_unit_value)] + let _: () = transmute!(PanicOnDrop(())); + + // Test that `transmute!` is legal in a const context. + const ARRAY_OF_U8S: [u8; 8] = [0u8, 1, 2, 3, 4, 5, 6, 7]; + const ARRAY_OF_ARRAYS: [[u8; 2]; 4] = [[0, 1], [2, 3], [4, 5], [6, 7]]; + const X: [[u8; 2]; 4] = transmute!(ARRAY_OF_U8S); + assert_eq!(X, ARRAY_OF_ARRAYS); + } + + #[test] + fn test_transmute_ref() { + // Test that memory is transmuted as expected. + let array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7]; + let array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]]; + let x: &[[u8; 2]; 4] = transmute_ref!(&array_of_u8s); + assert_eq!(*x, array_of_arrays); + let x: &[u8; 8] = transmute_ref!(&array_of_arrays); + assert_eq!(*x, array_of_u8s); + + // Test that `transmute_ref!` is legal in a const context. + const ARRAY_OF_U8S: [u8; 8] = [0u8, 1, 2, 3, 4, 5, 6, 7]; + const ARRAY_OF_ARRAYS: [[u8; 2]; 4] = [[0, 1], [2, 3], [4, 5], [6, 7]]; + #[allow(clippy::redundant_static_lifetimes)] + const X: &'static [[u8; 2]; 4] = transmute_ref!(&ARRAY_OF_U8S); + assert_eq!(*X, ARRAY_OF_ARRAYS); + + // Test that it's legal to transmute a reference while shrinking the + // lifetime (note that `X` has the lifetime `'static`). + let x: &[u8; 8] = transmute_ref!(X); + assert_eq!(*x, ARRAY_OF_U8S); + + // Test that `transmute_ref!` supports decreasing alignment. + let u = AU64(0); + let array = [0, 0, 0, 0, 0, 0, 0, 0]; + let x: &[u8; 8] = transmute_ref!(&u); + assert_eq!(*x, array); + + // Test that a mutable reference can be turned into an immutable one. + let mut x = 0u8; + #[allow(clippy::useless_transmute)] + let y: &u8 = transmute_ref!(&mut x); + assert_eq!(*y, 0); + } + + #[test] + fn test_transmute_mut() { + // Test that memory is transmuted as expected. + let mut array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7]; + let mut array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]]; + let x: &mut [[u8; 2]; 4] = transmute_mut!(&mut array_of_u8s); + assert_eq!(*x, array_of_arrays); + let x: &mut [u8; 8] = transmute_mut!(&mut array_of_arrays); + assert_eq!(*x, array_of_u8s); + + { + // Test that it's legal to transmute a reference while shrinking the + // lifetime. + let x: &mut [u8; 8] = transmute_mut!(&mut array_of_arrays); + assert_eq!(*x, array_of_u8s); + } + // Test that `transmute_mut!` supports decreasing alignment. + let mut u = AU64(0); + let array = [0, 0, 0, 0, 0, 0, 0, 0]; + let x: &[u8; 8] = transmute_mut!(&mut u); + assert_eq!(*x, array); + + // Test that a mutable reference can be turned into an immutable one. + let mut x = 0u8; + #[allow(clippy::useless_transmute)] + let y: &u8 = transmute_mut!(&mut x); + assert_eq!(*y, 0); + } + + #[test] + fn test_macros_evaluate_args_once() { + let mut ctr = 0; + let _: usize = transmute!({ + ctr += 1; + 0usize + }); + assert_eq!(ctr, 1); + + let mut ctr = 0; + let _: &usize = transmute_ref!({ + ctr += 1; + &0usize + }); + assert_eq!(ctr, 1); + } + + #[test] + fn test_include_value() { + const AS_U32: u32 = include_value!("../testdata/include_value/data"); + assert_eq!(AS_U32, u32::from_ne_bytes([b'a', b'b', b'c', b'd'])); + const AS_I32: i32 = include_value!("../testdata/include_value/data"); + assert_eq!(AS_I32, i32::from_ne_bytes([b'a', b'b', b'c', b'd'])); + } + + #[test] + fn test_address() { + // Test that the `Deref` and `DerefMut` implementations return a + // reference which points to the right region of memory. + + let buf = [0]; + let r = Ref::<_, u8>::new(&buf[..]).unwrap(); + let buf_ptr = buf.as_ptr(); + let deref_ptr: *const u8 = r.deref(); + assert_eq!(buf_ptr, deref_ptr); + + let buf = [0]; + let r = Ref::<_, [u8]>::new_slice(&buf[..]).unwrap(); + let buf_ptr = buf.as_ptr(); + let deref_ptr = r.deref().as_ptr(); + assert_eq!(buf_ptr, deref_ptr); + } + + // Verify that values written to a `Ref` are properly shared between the + // typed and untyped representations, that reads via `deref` and `read` + // behave the same, and that writes via `deref_mut` and `write` behave the + // same. + fn test_new_helper(mut r: Ref<&mut [u8], AU64>) { + // assert that the value starts at 0 + assert_eq!(*r, AU64(0)); + assert_eq!(r.read(), AU64(0)); + + // Assert that values written to the typed value are reflected in the + // byte slice. + const VAL1: AU64 = AU64(0xFF00FF00FF00FF00); + *r = VAL1; + assert_eq!(r.bytes(), &VAL1.to_bytes()); + *r = AU64(0); + r.write(VAL1); + assert_eq!(r.bytes(), &VAL1.to_bytes()); + + // Assert that values written to the byte slice are reflected in the + // typed value. + const VAL2: AU64 = AU64(!VAL1.0); // different from `VAL1` + r.bytes_mut().copy_from_slice(&VAL2.to_bytes()[..]); + assert_eq!(*r, VAL2); + assert_eq!(r.read(), VAL2); + } + + // Verify that values written to a `Ref` are properly shared between the + // typed and untyped representations; pass a value with `typed_len` `AU64`s + // backed by an array of `typed_len * 8` bytes. + fn test_new_helper_slice(mut r: Ref<&mut [u8], [AU64]>, typed_len: usize) { + // Assert that the value starts out zeroed. + assert_eq!(&*r, vec![AU64(0); typed_len].as_slice()); + + // Check the backing storage is the exact same slice. + let untyped_len = typed_len * 8; + assert_eq!(r.bytes().len(), untyped_len); + assert_eq!(r.bytes().as_ptr(), r.as_ptr().cast::()); + + // Assert that values written to the typed value are reflected in the + // byte slice. + const VAL1: AU64 = AU64(0xFF00FF00FF00FF00); + for typed in &mut *r { + *typed = VAL1; + } + assert_eq!(r.bytes(), VAL1.0.to_ne_bytes().repeat(typed_len).as_slice()); + + // Assert that values written to the byte slice are reflected in the + // typed value. + const VAL2: AU64 = AU64(!VAL1.0); // different from VAL1 + r.bytes_mut().copy_from_slice(&VAL2.0.to_ne_bytes().repeat(typed_len)); + assert!(r.iter().copied().all(|x| x == VAL2)); + } + + // Verify that values written to a `Ref` are properly shared between the + // typed and untyped representations, that reads via `deref` and `read` + // behave the same, and that writes via `deref_mut` and `write` behave the + // same. + fn test_new_helper_unaligned(mut r: Ref<&mut [u8], [u8; 8]>) { + // assert that the value starts at 0 + assert_eq!(*r, [0; 8]); + assert_eq!(r.read(), [0; 8]); + + // Assert that values written to the typed value are reflected in the + // byte slice. + const VAL1: [u8; 8] = [0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00]; + *r = VAL1; + assert_eq!(r.bytes(), &VAL1); + *r = [0; 8]; + r.write(VAL1); + assert_eq!(r.bytes(), &VAL1); + + // Assert that values written to the byte slice are reflected in the + // typed value. + const VAL2: [u8; 8] = [0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF]; // different from VAL1 + r.bytes_mut().copy_from_slice(&VAL2[..]); + assert_eq!(*r, VAL2); + assert_eq!(r.read(), VAL2); + } + + // Verify that values written to a `Ref` are properly shared between the + // typed and untyped representations; pass a value with `len` `u8`s backed + // by an array of `len` bytes. + fn test_new_helper_slice_unaligned(mut r: Ref<&mut [u8], [u8]>, len: usize) { + // Assert that the value starts out zeroed. + assert_eq!(&*r, vec![0u8; len].as_slice()); + + // Check the backing storage is the exact same slice. + assert_eq!(r.bytes().len(), len); + assert_eq!(r.bytes().as_ptr(), r.as_ptr()); + + // Assert that values written to the typed value are reflected in the + // byte slice. + let mut expected_bytes = [0xFF, 0x00].iter().copied().cycle().take(len).collect::>(); + r.copy_from_slice(&expected_bytes); + assert_eq!(r.bytes(), expected_bytes.as_slice()); + + // Assert that values written to the byte slice are reflected in the + // typed value. + for byte in &mut expected_bytes { + *byte = !*byte; // different from `expected_len` + } + r.bytes_mut().copy_from_slice(&expected_bytes); + assert_eq!(&*r, expected_bytes.as_slice()); + } + + #[test] + fn test_new_aligned_sized() { + // Test that a properly-aligned, properly-sized buffer works for new, + // new_from_prefix, and new_from_suffix, and that new_from_prefix and + // new_from_suffix return empty slices. Test that a properly-aligned + // buffer whose length is a multiple of the element size works for + // new_slice. Test that xxx_zeroed behaves the same, and zeroes the + // memory. + + // A buffer with an alignment of 8. + let mut buf = Align::<[u8; 8], AU64>::default(); + // `buf.t` should be aligned to 8, so this should always succeed. + test_new_helper(Ref::<_, AU64>::new(&mut buf.t[..]).unwrap()); + let ascending: [u8; 8] = (0..8).collect::>().try_into().unwrap(); + buf.t = ascending; + test_new_helper(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).unwrap()); + { + // In a block so that `r` and `suffix` don't live too long. + buf.set_default(); + let (r, suffix) = Ref::<_, AU64>::new_from_prefix(&mut buf.t[..]).unwrap(); + assert!(suffix.is_empty()); + test_new_helper(r); + } + { + buf.t = ascending; + let (r, suffix) = Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).unwrap(); + assert!(suffix.is_empty()); + test_new_helper(r); + } + { + buf.set_default(); + let (prefix, r) = Ref::<_, AU64>::new_from_suffix(&mut buf.t[..]).unwrap(); + assert!(prefix.is_empty()); + test_new_helper(r); + } + { + buf.t = ascending; + let (prefix, r) = Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).unwrap(); + assert!(prefix.is_empty()); + test_new_helper(r); + } + + // A buffer with alignment 8 and length 24. We choose this length very + // intentionally: if we instead used length 16, then the prefix and + // suffix lengths would be identical. In the past, we used length 16, + // which resulted in this test failing to discover the bug uncovered in + // #506. + let mut buf = Align::<[u8; 24], AU64>::default(); + // `buf.t` should be aligned to 8 and have a length which is a multiple + // of `size_of::()`, so this should always succeed. + test_new_helper_slice(Ref::<_, [AU64]>::new_slice(&mut buf.t[..]).unwrap(), 3); + let ascending: [u8; 24] = (0..24).collect::>().try_into().unwrap(); + // 16 ascending bytes followed by 8 zeros. + let mut ascending_prefix = ascending; + ascending_prefix[16..].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]); + // 8 zeros followed by 16 ascending bytes. + let mut ascending_suffix = ascending; + ascending_suffix[..8].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]); + test_new_helper_slice(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[..]).unwrap(), 3); + + { + buf.t = ascending_suffix; + let (r, suffix) = Ref::<_, [AU64]>::new_slice_from_prefix(&mut buf.t[..], 1).unwrap(); + assert_eq!(suffix, &ascending[8..]); + test_new_helper_slice(r, 1); + } + { + buf.t = ascending_suffix; + let (r, suffix) = + Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], 1).unwrap(); + assert_eq!(suffix, &ascending[8..]); + test_new_helper_slice(r, 1); + } + { + buf.t = ascending_prefix; + let (prefix, r) = Ref::<_, [AU64]>::new_slice_from_suffix(&mut buf.t[..], 1).unwrap(); + assert_eq!(prefix, &ascending[..16]); + test_new_helper_slice(r, 1); + } + { + buf.t = ascending_prefix; + let (prefix, r) = + Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], 1).unwrap(); + assert_eq!(prefix, &ascending[..16]); + test_new_helper_slice(r, 1); + } + } + + #[test] + fn test_new_unaligned_sized() { + // Test that an unaligned, properly-sized buffer works for + // `new_unaligned`, `new_unaligned_from_prefix`, and + // `new_unaligned_from_suffix`, and that `new_unaligned_from_prefix` + // `new_unaligned_from_suffix` return empty slices. Test that an + // unaligned buffer whose length is a multiple of the element size works + // for `new_slice`. Test that `xxx_zeroed` behaves the same, and zeroes + // the memory. + + let mut buf = [0u8; 8]; + test_new_helper_unaligned(Ref::<_, [u8; 8]>::new_unaligned(&mut buf[..]).unwrap()); + buf = [0xFFu8; 8]; + test_new_helper_unaligned(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf[..]).unwrap()); + { + // In a block so that `r` and `suffix` don't live too long. + buf = [0u8; 8]; + let (r, suffix) = Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap(); + assert!(suffix.is_empty()); + test_new_helper_unaligned(r); + } + { + buf = [0xFFu8; 8]; + let (r, suffix) = + Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..]).unwrap(); + assert!(suffix.is_empty()); + test_new_helper_unaligned(r); + } + { + buf = [0u8; 8]; + let (prefix, r) = Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap(); + assert!(prefix.is_empty()); + test_new_helper_unaligned(r); + } + { + buf = [0xFFu8; 8]; + let (prefix, r) = + Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..]).unwrap(); + assert!(prefix.is_empty()); + test_new_helper_unaligned(r); + } + + let mut buf = [0u8; 16]; + // `buf.t` should be aligned to 8 and have a length which is a multiple + // of `size_of::AU64>()`, so this should always succeed. + test_new_helper_slice_unaligned( + Ref::<_, [u8]>::new_slice_unaligned(&mut buf[..]).unwrap(), + 16, + ); + buf = [0xFFu8; 16]; + test_new_helper_slice_unaligned( + Ref::<_, [u8]>::new_slice_unaligned_zeroed(&mut buf[..]).unwrap(), + 16, + ); + + { + buf = [0u8; 16]; + let (r, suffix) = + Ref::<_, [u8]>::new_slice_unaligned_from_prefix(&mut buf[..], 8).unwrap(); + assert_eq!(suffix, [0; 8]); + test_new_helper_slice_unaligned(r, 8); + } + { + buf = [0xFFu8; 16]; + let (r, suffix) = + Ref::<_, [u8]>::new_slice_unaligned_from_prefix_zeroed(&mut buf[..], 8).unwrap(); + assert_eq!(suffix, [0xFF; 8]); + test_new_helper_slice_unaligned(r, 8); + } + { + buf = [0u8; 16]; + let (prefix, r) = + Ref::<_, [u8]>::new_slice_unaligned_from_suffix(&mut buf[..], 8).unwrap(); + assert_eq!(prefix, [0; 8]); + test_new_helper_slice_unaligned(r, 8); + } + { + buf = [0xFFu8; 16]; + let (prefix, r) = + Ref::<_, [u8]>::new_slice_unaligned_from_suffix_zeroed(&mut buf[..], 8).unwrap(); + assert_eq!(prefix, [0xFF; 8]); + test_new_helper_slice_unaligned(r, 8); + } + } + + #[test] + fn test_new_oversized() { + // Test that a properly-aligned, overly-sized buffer works for + // `new_from_prefix` and `new_from_suffix`, and that they return the + // remainder and prefix of the slice respectively. Test that + // `xxx_zeroed` behaves the same, and zeroes the memory. + + let mut buf = Align::<[u8; 16], AU64>::default(); + { + // In a block so that `r` and `suffix` don't live too long. `buf.t` + // should be aligned to 8, so this should always succeed. + let (r, suffix) = Ref::<_, AU64>::new_from_prefix(&mut buf.t[..]).unwrap(); + assert_eq!(suffix.len(), 8); + test_new_helper(r); + } + { + buf.t = [0xFFu8; 16]; + // `buf.t` should be aligned to 8, so this should always succeed. + let (r, suffix) = Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).unwrap(); + // Assert that the suffix wasn't zeroed. + assert_eq!(suffix, &[0xFFu8; 8]); + test_new_helper(r); + } + { + buf.set_default(); + // `buf.t` should be aligned to 8, so this should always succeed. + let (prefix, r) = Ref::<_, AU64>::new_from_suffix(&mut buf.t[..]).unwrap(); + assert_eq!(prefix.len(), 8); + test_new_helper(r); + } + { + buf.t = [0xFFu8; 16]; + // `buf.t` should be aligned to 8, so this should always succeed. + let (prefix, r) = Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).unwrap(); + // Assert that the prefix wasn't zeroed. + assert_eq!(prefix, &[0xFFu8; 8]); + test_new_helper(r); + } + } + + #[test] + fn test_new_unaligned_oversized() { + // Test than an unaligned, overly-sized buffer works for + // `new_unaligned_from_prefix` and `new_unaligned_from_suffix`, and that + // they return the remainder and prefix of the slice respectively. Test + // that `xxx_zeroed` behaves the same, and zeroes the memory. + + let mut buf = [0u8; 16]; + { + // In a block so that `r` and `suffix` don't live too long. + let (r, suffix) = Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap(); + assert_eq!(suffix.len(), 8); + test_new_helper_unaligned(r); + } + { + buf = [0xFFu8; 16]; + let (r, suffix) = + Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..]).unwrap(); + // Assert that the suffix wasn't zeroed. + assert_eq!(suffix, &[0xFF; 8]); + test_new_helper_unaligned(r); + } + { + buf = [0u8; 16]; + let (prefix, r) = Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap(); + assert_eq!(prefix.len(), 8); + test_new_helper_unaligned(r); + } + { + buf = [0xFFu8; 16]; + let (prefix, r) = + Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..]).unwrap(); + // Assert that the prefix wasn't zeroed. + assert_eq!(prefix, &[0xFF; 8]); + test_new_helper_unaligned(r); + } + } + + #[test] + fn test_ref_from_mut_from() { + // Test `FromBytes::{ref_from, mut_from}{,_prefix,_suffix}` success cases + // Exhaustive coverage for these methods is covered by the `Ref` tests above, + // which these helper methods defer to. + + let mut buf = + Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + + assert_eq!( + AU64::ref_from(&buf.t[8..]).unwrap().0.to_ne_bytes(), + [8, 9, 10, 11, 12, 13, 14, 15] + ); + let suffix = AU64::mut_from(&mut buf.t[8..]).unwrap(); + suffix.0 = 0x0101010101010101; + // The `[u8:9]` is a non-half size of the full buffer, which would catch + // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511). + assert_eq!(<[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(), &[7u8, 1, 1, 1, 1, 1, 1, 1, 1]); + let suffix = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap(); + suffix.0 = 0x0202020202020202; + <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap()[0] = 42; + assert_eq!(<[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(), &[0, 1, 2, 3, 4, 5, 42, 7, 2]); + <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap()[1] = 30; + assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]); + } + + #[test] + fn test_ref_from_mut_from_error() { + // Test `FromBytes::{ref_from, mut_from}{,_prefix,_suffix}` error cases. + + // Fail because the buffer is too large. + let mut buf = Align::<[u8; 16], AU64>::default(); + // `buf.t` should be aligned to 8, so only the length check should fail. + assert!(AU64::ref_from(&buf.t[..]).is_none()); + assert!(AU64::mut_from(&mut buf.t[..]).is_none()); + assert!(<[u8; 8]>::ref_from(&buf.t[..]).is_none()); + assert!(<[u8; 8]>::mut_from(&mut buf.t[..]).is_none()); + + // Fail because the buffer is too small. + let mut buf = Align::<[u8; 4], AU64>::default(); + assert!(AU64::ref_from(&buf.t[..]).is_none()); + assert!(AU64::mut_from(&mut buf.t[..]).is_none()); + assert!(<[u8; 8]>::ref_from(&buf.t[..]).is_none()); + assert!(<[u8; 8]>::mut_from(&mut buf.t[..]).is_none()); + assert!(AU64::ref_from_prefix(&buf.t[..]).is_none()); + assert!(AU64::mut_from_prefix(&mut buf.t[..]).is_none()); + assert!(AU64::ref_from_suffix(&buf.t[..]).is_none()); + assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_none()); + assert!(<[u8; 8]>::ref_from_prefix(&buf.t[..]).is_none()); + assert!(<[u8; 8]>::mut_from_prefix(&mut buf.t[..]).is_none()); + assert!(<[u8; 8]>::ref_from_suffix(&buf.t[..]).is_none()); + assert!(<[u8; 8]>::mut_from_suffix(&mut buf.t[..]).is_none()); + + // Fail because the alignment is insufficient. + let mut buf = Align::<[u8; 13], AU64>::default(); + assert!(AU64::ref_from(&buf.t[1..]).is_none()); + assert!(AU64::mut_from(&mut buf.t[1..]).is_none()); + assert!(AU64::ref_from(&buf.t[1..]).is_none()); + assert!(AU64::mut_from(&mut buf.t[1..]).is_none()); + assert!(AU64::ref_from_prefix(&buf.t[1..]).is_none()); + assert!(AU64::mut_from_prefix(&mut buf.t[1..]).is_none()); + assert!(AU64::ref_from_suffix(&buf.t[..]).is_none()); + assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_none()); + } + + #[test] + #[allow(clippy::cognitive_complexity)] + fn test_new_error() { + // Fail because the buffer is too large. + + // A buffer with an alignment of 8. + let mut buf = Align::<[u8; 16], AU64>::default(); + // `buf.t` should be aligned to 8, so only the length check should fail. + assert!(Ref::<_, AU64>::new(&buf.t[..]).is_none()); + assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).is_none()); + assert!(Ref::<_, [u8; 8]>::new_unaligned(&buf.t[..]).is_none()); + assert!(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.t[..]).is_none()); + + // Fail because the buffer is too small. + + // A buffer with an alignment of 8. + let mut buf = Align::<[u8; 4], AU64>::default(); + // `buf.t` should be aligned to 8, so only the length check should fail. + assert!(Ref::<_, AU64>::new(&buf.t[..]).is_none()); + assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).is_none()); + assert!(Ref::<_, [u8; 8]>::new_unaligned(&buf.t[..]).is_none()); + assert!(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.t[..]).is_none()); + assert!(Ref::<_, AU64>::new_from_prefix(&buf.t[..]).is_none()); + assert!(Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).is_none()); + assert!(Ref::<_, AU64>::new_from_suffix(&buf.t[..]).is_none()); + assert!(Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).is_none()); + assert!(Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&buf.t[..]).is_none()); + assert!(Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf.t[..]).is_none()); + assert!(Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&buf.t[..]).is_none()); + assert!(Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf.t[..]).is_none()); + + // Fail because the length is not a multiple of the element size. + + let mut buf = Align::<[u8; 12], AU64>::default(); + // `buf.t` has length 12, but element size is 8. + assert!(Ref::<_, [AU64]>::new_slice(&buf.t[..]).is_none()); + assert!(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[..]).is_none()); + assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned(&buf.t[..]).is_none()); + assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_zeroed(&mut buf.t[..]).is_none()); + + // Fail because the buffer is too short. + let mut buf = Align::<[u8; 12], AU64>::default(); + // `buf.t` has length 12, but the element size is 8 (and we're expecting + // two of them). + assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[..], 2).is_none()); + assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], 2).is_none()); + assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[..], 2).is_none()); + assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], 2).is_none()); + assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(&buf.t[..], 2).is_none()); + assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed(&mut buf.t[..], 2) + .is_none()); + assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(&buf.t[..], 2).is_none()); + assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed(&mut buf.t[..], 2) + .is_none()); + + // Fail because the alignment is insufficient. + + // A buffer with an alignment of 8. An odd buffer size is chosen so that + // the last byte of the buffer has odd alignment. + let mut buf = Align::<[u8; 13], AU64>::default(); + // Slicing from 1, we get a buffer with size 12 (so the length check + // should succeed) but an alignment of only 1, which is insufficient. + assert!(Ref::<_, AU64>::new(&buf.t[1..]).is_none()); + assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[1..]).is_none()); + assert!(Ref::<_, AU64>::new_from_prefix(&buf.t[1..]).is_none()); + assert!(Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[1..]).is_none()); + assert!(Ref::<_, [AU64]>::new_slice(&buf.t[1..]).is_none()); + assert!(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[1..]).is_none()); + assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[1..], 1).is_none()); + assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[1..], 1).is_none()); + assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[1..], 1).is_none()); + assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[1..], 1).is_none()); + // Slicing is unnecessary here because `new_from_suffix[_zeroed]` use + // the suffix of the slice, which has odd alignment. + assert!(Ref::<_, AU64>::new_from_suffix(&buf.t[..]).is_none()); + assert!(Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).is_none()); + + // Fail due to arithmetic overflow. + + let mut buf = Align::<[u8; 16], AU64>::default(); + let unreasonable_len = usize::MAX / mem::size_of::() + 1; + assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[..], unreasonable_len).is_none()); + assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], unreasonable_len) + .is_none()); + assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[..], unreasonable_len).is_none()); + assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], unreasonable_len) + .is_none()); + assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(&buf.t[..], unreasonable_len) + .is_none()); + assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed( + &mut buf.t[..], + unreasonable_len + ) + .is_none()); + assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(&buf.t[..], unreasonable_len) + .is_none()); + assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed( + &mut buf.t[..], + unreasonable_len + ) + .is_none()); + } + + // Tests for ensuring that, if a ZST is passed into a slice-like function, + // we always panic. Since these tests need to be separate per-function, and + // they tend to take up a lot of space, we generate them using a macro in a + // submodule instead. The submodule ensures that we can just re-use the name + // of the function under test for the name of the test itself. + mod test_zst_panics { + macro_rules! zst_test { + ($name:ident($($tt:tt)*), $constructor_in_panic_msg:tt) => { + #[test] + #[should_panic = concat!("Ref::", $constructor_in_panic_msg, " called on a zero-sized type")] + fn $name() { + let mut buffer = [0u8]; + let r = $crate::Ref::<_, [()]>::$name(&mut buffer[..], $($tt)*); + unreachable!("should have panicked, got {:?}", r); + } + } + } + zst_test!(new_slice(), "new_slice"); + zst_test!(new_slice_zeroed(), "new_slice"); + zst_test!(new_slice_from_prefix(1), "new_slice"); + zst_test!(new_slice_from_prefix_zeroed(1), "new_slice"); + zst_test!(new_slice_from_suffix(1), "new_slice"); + zst_test!(new_slice_from_suffix_zeroed(1), "new_slice"); + zst_test!(new_slice_unaligned(), "new_slice_unaligned"); + zst_test!(new_slice_unaligned_zeroed(), "new_slice_unaligned"); + zst_test!(new_slice_unaligned_from_prefix(1), "new_slice_unaligned"); + zst_test!(new_slice_unaligned_from_prefix_zeroed(1), "new_slice_unaligned"); + zst_test!(new_slice_unaligned_from_suffix(1), "new_slice_unaligned"); + zst_test!(new_slice_unaligned_from_suffix_zeroed(1), "new_slice_unaligned"); + } + + #[test] + fn test_as_bytes_methods() { + /// Run a series of tests by calling `AsBytes` methods on `t`. + /// + /// `bytes` is the expected byte sequence returned from `t.as_bytes()` + /// before `t` has been modified. `post_mutation` is the expected + /// sequence returned from `t.as_bytes()` after `t.as_bytes_mut()[0]` + /// has had its bits flipped (by applying `^= 0xFF`). + /// + /// `N` is the size of `t` in bytes. + fn test( + t: &mut T, + bytes: &[u8], + post_mutation: &T, + ) { + // Test that we can access the underlying bytes, and that we get the + // right bytes and the right number of bytes. + assert_eq!(t.as_bytes(), bytes); + + // Test that changes to the underlying byte slices are reflected in + // the original object. + t.as_bytes_mut()[0] ^= 0xFF; + assert_eq!(t, post_mutation); + t.as_bytes_mut()[0] ^= 0xFF; + + // `write_to` rejects slices that are too small or too large. + assert_eq!(t.write_to(&mut vec![0; N - 1][..]), None); + assert_eq!(t.write_to(&mut vec![0; N + 1][..]), None); + + // `write_to` works as expected. + let mut bytes = [0; N]; + assert_eq!(t.write_to(&mut bytes[..]), Some(())); + assert_eq!(bytes, t.as_bytes()); + + // `write_to_prefix` rejects slices that are too small. + assert_eq!(t.write_to_prefix(&mut vec![0; N - 1][..]), None); + + // `write_to_prefix` works with exact-sized slices. + let mut bytes = [0; N]; + assert_eq!(t.write_to_prefix(&mut bytes[..]), Some(())); + assert_eq!(bytes, t.as_bytes()); + + // `write_to_prefix` works with too-large slices, and any bytes past + // the prefix aren't modified. + let mut too_many_bytes = vec![0; N + 1]; + too_many_bytes[N] = 123; + assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Some(())); + assert_eq!(&too_many_bytes[..N], t.as_bytes()); + assert_eq!(too_many_bytes[N], 123); + + // `write_to_suffix` rejects slices that are too small. + assert_eq!(t.write_to_suffix(&mut vec![0; N - 1][..]), None); + + // `write_to_suffix` works with exact-sized slices. + let mut bytes = [0; N]; + assert_eq!(t.write_to_suffix(&mut bytes[..]), Some(())); + assert_eq!(bytes, t.as_bytes()); + + // `write_to_suffix` works with too-large slices, and any bytes + // before the suffix aren't modified. + let mut too_many_bytes = vec![0; N + 1]; + too_many_bytes[0] = 123; + assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Some(())); + assert_eq!(&too_many_bytes[1..], t.as_bytes()); + assert_eq!(too_many_bytes[0], 123); + } + + #[derive(Debug, Eq, PartialEq, FromZeroes, FromBytes, AsBytes)] + #[repr(C)] + struct Foo { + a: u32, + b: Wrapping, + c: Option, + } + + let expected_bytes: Vec = if cfg!(target_endian = "little") { + vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0] + } else { + vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0] + }; + let post_mutation_expected_a = + if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 }; + test::<_, 12>( + &mut Foo { a: 1, b: Wrapping(2), c: None }, + expected_bytes.as_bytes(), + &Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None }, + ); + test::<_, 3>( + Unsized::from_mut_slice(&mut [1, 2, 3]), + &[1, 2, 3], + Unsized::from_mut_slice(&mut [0xFE, 2, 3]), + ); + } + + #[test] + fn test_array() { + #[derive(FromZeroes, FromBytes, AsBytes)] + #[repr(C)] + struct Foo { + a: [u16; 33], + } + + let foo = Foo { a: [0xFFFF; 33] }; + let expected = [0xFFu8; 66]; + assert_eq!(foo.as_bytes(), &expected[..]); + } + + #[test] + fn test_display_debug() { + let buf = Align::<[u8; 8], u64>::default(); + let r = Ref::<_, u64>::new(&buf.t[..]).unwrap(); + assert_eq!(format!("{}", r), "0"); + assert_eq!(format!("{:?}", r), "Ref(0)"); + + let buf = Align::<[u8; 8], u64>::default(); + let r = Ref::<_, [u64]>::new_slice(&buf.t[..]).unwrap(); + assert_eq!(format!("{:?}", r), "Ref([0])"); + } + + #[test] + fn test_eq() { + let buf1 = 0_u64; + let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap(); + let buf2 = 0_u64; + let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap(); + assert_eq!(r1, r2); + } + + #[test] + fn test_ne() { + let buf1 = 0_u64; + let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap(); + let buf2 = 1_u64; + let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap(); + assert_ne!(r1, r2); + } + + #[test] + fn test_ord() { + let buf1 = 0_u64; + let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap(); + let buf2 = 1_u64; + let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap(); + assert!(r1 < r2); + } + + #[test] + fn test_new_zeroed() { + assert!(!bool::new_zeroed()); + assert_eq!(u64::new_zeroed(), 0); + // This test exists in order to exercise unsafe code, especially when + // running under Miri. + #[allow(clippy::unit_cmp)] + { + assert_eq!(<()>::new_zeroed(), ()); + } + } + + #[test] + fn test_transparent_packed_generic_struct() { + #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)] + #[repr(transparent)] + #[allow(dead_code)] // for the unused fields + struct Foo { + _t: T, + _phantom: PhantomData<()>, + } + + assert_impl_all!(Foo: FromZeroes, FromBytes, AsBytes); + assert_impl_all!(Foo: Unaligned); + + #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)] + #[repr(packed)] + #[allow(dead_code)] // for the unused fields + struct Bar { + _t: T, + _u: U, + } + + assert_impl_all!(Bar: FromZeroes, FromBytes, AsBytes, Unaligned); + } + + #[test] + fn test_impls() { + use core::borrow::Borrow; + + // A type that can supply test cases for testing + // `TryFromBytes::is_bit_valid`. All types passed to `assert_impls!` + // must implement this trait; that macro uses it to generate runtime + // tests for `TryFromBytes` impls. + // + // All `T: FromBytes` types are provided with a blanket impl. Other + // types must implement `TryFromBytesTestable` directly (ie using + // `impl_try_from_bytes_testable!`). + trait TryFromBytesTestable { + fn with_passing_test_cases(f: F); + fn with_failing_test_cases(f: F); + } + + impl TryFromBytesTestable for T { + fn with_passing_test_cases(f: F) { + // Test with a zeroed value. + f(&Self::new_zeroed()); + + let ffs = { + let mut t = Self::new_zeroed(); + let ptr: *mut T = &mut t; + // SAFETY: `T: FromBytes` + unsafe { ptr::write_bytes(ptr.cast::(), 0xFF, mem::size_of::()) }; + t + }; + + // Test with a value initialized with 0xFF. + f(&ffs); + } + + fn with_failing_test_cases(_f: F) {} + } + + // Implements `TryFromBytesTestable`. + macro_rules! impl_try_from_bytes_testable { + // Base case for recursion (when the list of types has run out). + (=> @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {}; + // Implements for type(s) with no type parameters. + ($ty:ty $(,$tys:ty)* => @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => { + impl TryFromBytesTestable for $ty { + impl_try_from_bytes_testable!( + @methods @success $($success_case),* + $(, @failure $($failure_case),*)? + ); + } + impl_try_from_bytes_testable!($($tys),* => @success $($success_case),* $(, @failure $($failure_case),*)?); + }; + // Implements for multiple types with no type parameters. + ($($($ty:ty),* => @success $($success_case:expr), * $(, @failure $($failure_case:expr),*)?;)*) => { + $( + impl_try_from_bytes_testable!($($ty),* => @success $($success_case),* $(, @failure $($failure_case),*)*); + )* + }; + // Implements only the methods; caller must invoke this from inside + // an impl block. + (@methods @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => { + fn with_passing_test_cases(_f: F) { + $( + _f($success_case.borrow()); + )* + } + + fn with_failing_test_cases(_f: F) { + $($( + let case = $failure_case.as_bytes(); + _f(case.as_bytes()); + )*)? + } + }; + } + + // Note that these impls are only for types which are not `FromBytes`. + // `FromBytes` types are covered by a preceding blanket impl. + impl_try_from_bytes_testable!( + bool => @success true, false, + @failure 2u8, 3u8, 0xFFu8; + char => @success '\u{0}', '\u{D7FF}', '\u{E000}', '\u{10FFFF}', + @failure 0xD800u32, 0xDFFFu32, 0x110000u32; + str => @success "", "hello", "❤️🧡💛💚💙💜", + @failure [0, 159, 146, 150]; + [u8] => @success [], [0, 1, 2]; + NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, + NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, + NonZeroUsize, NonZeroIsize + => @success Self::new(1).unwrap(), + // Doing this instead of `0` ensures that we always satisfy + // the size and alignment requirements of `Self` (whereas + // `0` may be any integer type with a different size or + // alignment than some `NonZeroXxx` types). + @failure Option::::None; + [bool] + => @success [true, false], [false, true], + @failure [2u8], [3u8], [0xFFu8], [0u8, 1u8, 2u8]; + ); + + // Asserts that `$ty` implements any `$trait` and doesn't implement any + // `!$trait`. Note that all `$trait`s must come before any `!$trait`s. + // + // For `T: TryFromBytes`, uses `TryFromBytesTestable` to test success + // and failure cases for `TryFromBytes::is_bit_valid`. + macro_rules! assert_impls { + ($ty:ty: TryFromBytes) => { + <$ty as TryFromBytesTestable>::with_passing_test_cases(|val| { + let c = Ptr::from(val); + // SAFETY: + // - Since `val` is a normal reference, `c` is guranteed to + // be aligned, to point to a single allocation, and to + // have a size which doesn't overflow `isize`. + // - Since `val` is a valid `$ty`, `c`'s referent satisfies + // the bit validity constraints of `is_bit_valid`, which + // are a superset of the bit validity constraints of + // `$ty`. + let res = unsafe { <$ty as TryFromBytes>::is_bit_valid(c) }; + assert!(res, "{}::is_bit_valid({:?}): got false, expected true", stringify!($ty), val); + + // TODO(#5): In addition to testing `is_bit_valid`, test the + // methods built on top of it. This would both allow us to + // test their implementations and actually convert the bytes + // to `$ty`, giving Miri a chance to catch if this is + // unsound (ie, if our `is_bit_valid` impl is buggy). + // + // The following code was tried, but it doesn't work because + // a) some types are not `AsBytes` and, b) some types are + // not `Sized`. + // + // let r = <$ty as TryFromBytes>::try_from_ref(val.as_bytes()).unwrap(); + // assert_eq!(r, &val); + // let r = <$ty as TryFromBytes>::try_from_mut(val.as_bytes_mut()).unwrap(); + // assert_eq!(r, &mut val); + // let v = <$ty as TryFromBytes>::try_read_from(val.as_bytes()).unwrap(); + // assert_eq!(v, val); + }); + #[allow(clippy::as_conversions)] + <$ty as TryFromBytesTestable>::with_failing_test_cases(|c| { + let res = <$ty as TryFromBytes>::try_from_ref(c); + assert!(res.is_none(), "{}::is_bit_valid({:?}): got true, expected false", stringify!($ty), c); + }); + + #[allow(dead_code)] + const _: () = { static_assertions::assert_impl_all!($ty: TryFromBytes); }; + }; + ($ty:ty: $trait:ident) => { + #[allow(dead_code)] + const _: () = { static_assertions::assert_impl_all!($ty: $trait); }; + }; + ($ty:ty: !$trait:ident) => { + #[allow(dead_code)] + const _: () = { static_assertions::assert_not_impl_any!($ty: $trait); }; + }; + ($ty:ty: $($trait:ident),* $(,)? $(!$negative_trait:ident),*) => { + $( + assert_impls!($ty: $trait); + )* + + $( + assert_impls!($ty: !$negative_trait); + )* + }; + } + + // NOTE: The negative impl assertions here are not necessarily + // prescriptive. They merely serve as change detectors to make sure + // we're aware of what trait impls are getting added with a given + // change. Of course, some impls would be invalid (e.g., `bool: + // FromBytes`), and so this change detection is very important. + + assert_impls!((): KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); + assert_impls!(u8: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); + assert_impls!(i8: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); + assert_impls!(u16: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + assert_impls!(i16: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + assert_impls!(u32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + assert_impls!(i32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + assert_impls!(u64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + assert_impls!(i64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + assert_impls!(u128: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + assert_impls!(i128: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + assert_impls!(usize: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + assert_impls!(isize: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + assert_impls!(f32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + assert_impls!(f64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + + assert_impls!(bool: KnownLayout, TryFromBytes, FromZeroes, AsBytes, Unaligned, !FromBytes); + assert_impls!(char: KnownLayout, TryFromBytes, FromZeroes, AsBytes, !FromBytes, !Unaligned); + assert_impls!(str: KnownLayout, TryFromBytes, FromZeroes, AsBytes, Unaligned, !FromBytes); + + assert_impls!(NonZeroU8: KnownLayout, TryFromBytes, AsBytes, Unaligned, !FromZeroes, !FromBytes); + assert_impls!(NonZeroI8: KnownLayout, TryFromBytes, AsBytes, Unaligned, !FromZeroes, !FromBytes); + assert_impls!(NonZeroU16: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned); + assert_impls!(NonZeroI16: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned); + assert_impls!(NonZeroU32: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned); + assert_impls!(NonZeroI32: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned); + assert_impls!(NonZeroU64: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned); + assert_impls!(NonZeroI64: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned); + assert_impls!(NonZeroU128: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned); + assert_impls!(NonZeroI128: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned); + assert_impls!(NonZeroUsize: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned); + assert_impls!(NonZeroIsize: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned); + + assert_impls!(Option: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); + assert_impls!(Option: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); + assert_impls!(Option: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + assert_impls!(Option: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + assert_impls!(Option: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + assert_impls!(Option: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + assert_impls!(Option: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + assert_impls!(Option: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + assert_impls!(Option: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + assert_impls!(Option: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + assert_impls!(Option: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + assert_impls!(Option: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); + + // Implements none of the ZC traits. + struct NotZerocopy; + + #[rustfmt::skip] + type FnManyArgs = fn( + NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, + ) -> (NotZerocopy, NotZerocopy); + + // Allowed, because we're not actually using this type for FFI. + #[allow(improper_ctypes_definitions)] + #[rustfmt::skip] + type ECFnManyArgs = extern "C" fn( + NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, + ) -> (NotZerocopy, NotZerocopy); + + #[cfg(feature = "alloc")] + assert_impls!(Option>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned); + assert_impls!(Option]>>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); + assert_impls!(Option<&'static UnsafeCell>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned); + assert_impls!(Option<&'static [UnsafeCell]>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); + assert_impls!(Option<&'static mut UnsafeCell>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned); + assert_impls!(Option<&'static mut [UnsafeCell]>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); + assert_impls!(Option>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned); + assert_impls!(Option]>>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); + assert_impls!(Option: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned); + assert_impls!(Option: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned); + assert_impls!(Option: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned); + assert_impls!(Option: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned); + + assert_impls!(PhantomData: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); + assert_impls!(PhantomData<[u8]>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); + + assert_impls!(ManuallyDrop: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes); + assert_impls!(ManuallyDrop<[u8]>: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes); + assert_impls!(ManuallyDrop: !TryFromBytes, !KnownLayout, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); + assert_impls!(ManuallyDrop<[NotZerocopy]>: !TryFromBytes, !KnownLayout, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); + + assert_impls!(MaybeUninit: KnownLayout, TryFromBytes, FromZeroes, FromBytes, Unaligned, !AsBytes); + assert_impls!(MaybeUninit: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); + + assert_impls!(Wrapping: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); + assert_impls!(Wrapping: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); + + assert_impls!(Unalign: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes); + assert_impls!(Unalign: Unaligned, !KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes); + + assert_impls!([u8]: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned); + assert_impls!([bool]: KnownLayout, TryFromBytes, FromZeroes, AsBytes, Unaligned, !FromBytes); + assert_impls!([NotZerocopy]: !KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); + assert_impls!([u8; 0]: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes); + assert_impls!([NotZerocopy; 0]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); + assert_impls!([u8; 1]: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes); + assert_impls!([NotZerocopy; 1]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); + + assert_impls!(*const NotZerocopy: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned); + assert_impls!(*mut NotZerocopy: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned); + assert_impls!(*const [NotZerocopy]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); + assert_impls!(*mut [NotZerocopy]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); + assert_impls!(*const dyn Debug: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); + assert_impls!(*mut dyn Debug: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned); + + #[cfg(feature = "simd")] + { + #[allow(unused_macros)] + macro_rules! test_simd_arch_mod { + ($arch:ident, $($typ:ident),*) => { + { + use core::arch::$arch::{$($typ),*}; + use crate::*; + $( assert_impls!($typ: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); )* + } + }; + } + #[cfg(target_arch = "x86")] + test_simd_arch_mod!(x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i); + + #[cfg(all(feature = "simd-nightly", target_arch = "x86"))] + test_simd_arch_mod!(x86, __m512bh, __m512, __m512d, __m512i); + + #[cfg(target_arch = "x86_64")] + test_simd_arch_mod!(x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i); + + #[cfg(all(feature = "simd-nightly", target_arch = "x86_64"))] + test_simd_arch_mod!(x86_64, __m512bh, __m512, __m512d, __m512i); + + #[cfg(target_arch = "wasm32")] + test_simd_arch_mod!(wasm32, v128); + + #[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))] + test_simd_arch_mod!( + powerpc, + vector_bool_long, + vector_double, + vector_signed_long, + vector_unsigned_long + ); + + #[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))] + test_simd_arch_mod!( + powerpc64, + vector_bool_long, + vector_double, + vector_signed_long, + vector_unsigned_long + ); + #[cfg(target_arch = "aarch64")] + #[rustfmt::skip] + test_simd_arch_mod!( + aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t, + int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t, + int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t, + poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t, + poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t, + uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t, + uint64x1_t, uint64x2_t + ); + #[cfg(all(feature = "simd-nightly", target_arch = "arm"))] + #[rustfmt::skip] + test_simd_arch_mod!(arm, int8x4_t, uint8x4_t); + } + } +} + +#[cfg(kani)] +mod proofs { + use super::*; + + impl kani::Arbitrary for DstLayout { + fn any() -> Self { + let align: NonZeroUsize = kani::any(); + let size_info: SizeInfo = kani::any(); + + kani::assume(align.is_power_of_two()); + kani::assume(align < DstLayout::THEORETICAL_MAX_ALIGN); + + // For testing purposes, we most care about instantiations of + // `DstLayout` that can correspond to actual Rust types. We use + // `Layout` to verify that our `DstLayout` satisfies the validity + // conditions of Rust layouts. + kani::assume( + match size_info { + SizeInfo::Sized { _size } => Layout::from_size_align(_size, align.get()), + SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size }) => { + // `SliceDst`` cannot encode an exact size, but we know + // it is at least `_offset` bytes. + Layout::from_size_align(_offset, align.get()) + } + } + .is_ok(), + ); + + Self { align: align, size_info: size_info } + } + } + + impl kani::Arbitrary for SizeInfo { + fn any() -> Self { + let is_sized: bool = kani::any(); + + match is_sized { + true => { + let size: usize = kani::any(); + + kani::assume(size <= isize::MAX as _); + + SizeInfo::Sized { _size: size } + } + false => SizeInfo::SliceDst(kani::any()), + } + } + } + + impl kani::Arbitrary for TrailingSliceLayout { + fn any() -> Self { + let elem_size: usize = kani::any(); + let offset: usize = kani::any(); + + kani::assume(elem_size < isize::MAX as _); + kani::assume(offset < isize::MAX as _); + + TrailingSliceLayout { _elem_size: elem_size, _offset: offset } + } + } + + #[kani::proof] + fn prove_dst_layout_extend() { + use crate::util::{core_layout::padding_needed_for, max, min}; + + let base: DstLayout = kani::any(); + let field: DstLayout = kani::any(); + let packed: Option = kani::any(); + + if let Some(max_align) = packed { + kani::assume(max_align.is_power_of_two()); + kani::assume(base.align <= max_align); + } + + // The base can only be extended if it's sized. + kani::assume(matches!(base.size_info, SizeInfo::Sized { .. })); + let base_size = if let SizeInfo::Sized { _size: size } = base.size_info { + size + } else { + unreachable!(); + }; + + // Under the above conditions, `DstLayout::extend` will not panic. + let composite = base.extend(field, packed); + + // The field's alignment is clamped by `max_align` (i.e., the + // `packed` attribute, if any) [1]. + // + // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers: + // + // The alignments of each field, for the purpose of positioning + // fields, is the smaller of the specified alignment and the + // alignment of the field's type. + let field_align = min(field.align, packed.unwrap_or(DstLayout::THEORETICAL_MAX_ALIGN)); + + // The struct's alignment is the maximum of its previous alignment and + // `field_align`. + assert_eq!(composite.align, max(base.align, field_align)); + + // Compute the minimum amount of inter-field padding needed to + // satisfy the field's alignment, and offset of the trailing field. + // [1] + // + // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers: + // + // Inter-field padding is guaranteed to be the minimum required in + // order to satisfy each field's (possibly altered) alignment. + let padding = padding_needed_for(base_size, field_align); + let offset = base_size + padding; + + // For testing purposes, we'll also construct `alloc::Layout` + // stand-ins for `DstLayout`, and show that `extend` behaves + // comparably on both types. + let base_analog = Layout::from_size_align(base_size, base.align.get()).unwrap(); + + match field.size_info { + SizeInfo::Sized { _size: field_size } => { + if let SizeInfo::Sized { _size: composite_size } = composite.size_info { + // If the trailing field is sized, the resulting layout + // will be sized. Its size will be the sum of the + // preceeding layout, the size of the new field, and the + // size of inter-field padding between the two. + assert_eq!(composite_size, offset + field_size); + + let field_analog = + Layout::from_size_align(field_size, field_align.get()).unwrap(); + + if let Ok((actual_composite, actual_offset)) = base_analog.extend(field_analog) + { + assert_eq!(actual_offset, offset); + assert_eq!(actual_composite.size(), composite_size); + assert_eq!(actual_composite.align(), composite.align.get()); + } else { + // An error here reflects that composite of `base` + // and `field` cannot correspond to a real Rust type + // fragment, because such a fragment would violate + // the basic invariants of a valid Rust layout. At + // the time of writing, `DstLayout` is a little more + // permissive than `Layout`, so we don't assert + // anything in this branch (e.g., unreachability). + } + } else { + panic!("The composite of two sized layouts must be sized.") + } + } + SizeInfo::SliceDst(TrailingSliceLayout { + _offset: field_offset, + _elem_size: field_elem_size, + }) => { + if let SizeInfo::SliceDst(TrailingSliceLayout { + _offset: composite_offset, + _elem_size: composite_elem_size, + }) = composite.size_info + { + // The offset of the trailing slice component is the sum + // of the offset of the trailing field and the trailing + // slice offset within that field. + assert_eq!(composite_offset, offset + field_offset); + // The elem size is unchanged. + assert_eq!(composite_elem_size, field_elem_size); + + let field_analog = + Layout::from_size_align(field_offset, field_align.get()).unwrap(); + + if let Ok((actual_composite, actual_offset)) = base_analog.extend(field_analog) + { + assert_eq!(actual_offset, offset); + assert_eq!(actual_composite.size(), composite_offset); + assert_eq!(actual_composite.align(), composite.align.get()); + } else { + // An error here reflects that composite of `base` + // and `field` cannot correspond to a real Rust type + // fragment, because such a fragment would violate + // the basic invariants of a valid Rust layout. At + // the time of writing, `DstLayout` is a little more + // permissive than `Layout`, so we don't assert + // anything in this branch (e.g., unreachability). + } + } else { + panic!("The extension of a layout with a DST must result in a DST.") + } + } + } + } + + #[kani::proof] + #[kani::should_panic] + fn prove_dst_layout_extend_dst_panics() { + let base: DstLayout = kani::any(); + let field: DstLayout = kani::any(); + let packed: Option = kani::any(); + + if let Some(max_align) = packed { + kani::assume(max_align.is_power_of_two()); + kani::assume(base.align <= max_align); + } + + kani::assume(matches!(base.size_info, SizeInfo::SliceDst(..))); + + let _ = base.extend(field, packed); + } + + #[kani::proof] + fn prove_dst_layout_pad_to_align() { + use crate::util::core_layout::padding_needed_for; + + let layout: DstLayout = kani::any(); + + let padded: DstLayout = layout.pad_to_align(); + + // Calling `pad_to_align` does not alter the `DstLayout`'s alignment. + assert_eq!(padded.align, layout.align); + + if let SizeInfo::Sized { _size: unpadded_size } = layout.size_info { + if let SizeInfo::Sized { _size: padded_size } = padded.size_info { + // If the layout is sized, it will remain sized after padding is + // added. Its sum will be its unpadded size and the size of the + // trailing padding needed to satisfy its alignment + // requirements. + let padding = padding_needed_for(unpadded_size, layout.align); + assert_eq!(padded_size, unpadded_size + padding); + + // Prove that calling `DstLayout::pad_to_align` behaves + // identically to `Layout::pad_to_align`. + let layout_analog = + Layout::from_size_align(unpadded_size, layout.align.get()).unwrap(); + let padded_analog = layout_analog.pad_to_align(); + assert_eq!(padded_analog.align(), layout.align.get()); + assert_eq!(padded_analog.size(), padded_size); + } else { + panic!("The padding of a sized layout must result in a sized layout.") + } + } else { + // If the layout is a DST, padding cannot be statically added. + assert_eq!(padded.size_info, layout.size_info); + } + } +} diff --git a/src/rust/vendor/zerocopy/src/macro_util.rs b/src/rust/vendor/zerocopy/src/macro_util.rs new file mode 100644 index 000000000..d2c61ca00 --- /dev/null +++ b/src/rust/vendor/zerocopy/src/macro_util.rs @@ -0,0 +1,673 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +//! Utilities used by macros and by `zerocopy-derive`. +//! +//! These are defined here `zerocopy` rather than in code generated by macros or +//! by `zerocopy-derive` so that they can be compiled once rather than +//! recompiled for every invocation (e.g., if they were defined in generated +//! code, then deriving `AsBytes` and `FromBytes` on three different types would +//! result in the code in question being emitted and compiled six different +//! times). + +#![allow(missing_debug_implementations)] + +use core::{marker::PhantomData, mem::ManuallyDrop}; + +// TODO(#29), TODO(https://github.com/rust-lang/rust/issues/69835): Remove this +// `cfg` when `size_of_val_raw` is stabilized. +#[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)] +use core::ptr::{self, NonNull}; + +/// A compile-time check that should be one particular value. +pub trait ShouldBe {} + +/// A struct for checking whether `T` contains padding. +pub struct HasPadding(PhantomData); + +impl ShouldBe for HasPadding {} + +/// A type whose size is equal to `align_of::()`. +#[repr(C)] +pub struct AlignOf { + // This field ensures that: + // - The size is always at least 1 (the minimum possible alignment). + // - If the alignment is greater than 1, Rust has to round up to the next + // multiple of it in order to make sure that `Align`'s size is a multiple + // of that alignment. Without this field, its size could be 0, which is a + // valid multiple of any alignment. + _u: u8, + _a: [T; 0], +} + +impl AlignOf { + #[inline(never)] // Make `missing_inline_in_public_items` happy. + pub fn into_t(self) -> T { + unreachable!() + } +} + +/// A type whose size is equal to `max(align_of::(), align_of::())`. +#[repr(C)] +pub union MaxAlignsOf { + _t: ManuallyDrop>, + _u: ManuallyDrop>, +} + +impl MaxAlignsOf { + #[inline(never)] // Make `missing_inline_in_public_items` happy. + pub fn new(_t: T, _u: U) -> MaxAlignsOf { + unreachable!() + } +} + +const _64K: usize = 1 << 16; + +// TODO(#29), TODO(https://github.com/rust-lang/rust/issues/69835): Remove this +// `cfg` when `size_of_val_raw` is stabilized. +#[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)] +#[repr(C, align(65536))] +struct Aligned64kAllocation([u8; _64K]); + +/// A pointer to an aligned allocation of size 2^16. +/// +/// # Safety +/// +/// `ALIGNED_64K_ALLOCATION` is guaranteed to point to the entirety of an +/// allocation with size and alignment 2^16, and to have valid provenance. +// TODO(#29), TODO(https://github.com/rust-lang/rust/issues/69835): Remove this +// `cfg` when `size_of_val_raw` is stabilized. +#[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)] +pub const ALIGNED_64K_ALLOCATION: NonNull<[u8]> = { + const REF: &Aligned64kAllocation = &Aligned64kAllocation([0; _64K]); + let ptr: *const Aligned64kAllocation = REF; + let ptr: *const [u8] = ptr::slice_from_raw_parts(ptr.cast(), _64K); + // SAFETY: + // - `ptr` is derived from a Rust reference, which is guaranteed to be + // non-null. + // - `ptr` is derived from an `&Aligned64kAllocation`, which has size and + // alignment `_64K` as promised. Its length is initialized to `_64K`, + // which means that it refers to the entire allocation. + // - `ptr` is derived from a Rust reference, which is guaranteed to have + // valid provenance. + // + // TODO(#429): Once `NonNull::new_unchecked` docs document that it preserves + // provenance, cite those docs. + // TODO: Replace this `as` with `ptr.cast_mut()` once our MSRV >= 1.65 + #[allow(clippy::as_conversions)] + unsafe { + NonNull::new_unchecked(ptr as *mut _) + } +}; + +/// Computes the offset of the base of the field `$trailing_field_name` within +/// the type `$ty`. +/// +/// `trailing_field_offset!` produces code which is valid in a `const` context. +// TODO(#29), TODO(https://github.com/rust-lang/rust/issues/69835): Remove this +// `cfg` when `size_of_val_raw` is stabilized. +#[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)] +#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`. +#[macro_export] +macro_rules! trailing_field_offset { + ($ty:ty, $trailing_field_name:tt) => {{ + let min_size = { + let zero_elems: *const [()] = + $crate::macro_util::core_reexport::ptr::slice_from_raw_parts( + $crate::macro_util::core_reexport::ptr::NonNull::<()>::dangling() + .as_ptr() + .cast_const(), + 0, + ); + // SAFETY: + // - If `$ty` is `Sized`, `size_of_val_raw` is always safe to call. + // - Otherwise: + // - If `$ty` is not a slice DST, this pointer conversion will + // fail due to "mismatched vtable kinds", and compilation will + // fail. + // - If `$ty` is a slice DST, the safety requirement is that "the + // length of the slice tail must be an initialized integer, and + // the size of the entire value (dynamic tail length + + // statically sized prefix) must fit in isize." The length is + // initialized to 0 above, and Rust guarantees that no type's + // minimum size may overflow `isize`. [1] + // + // [1] TODO(#429), + // TODO(https://github.com/rust-lang/unsafe-code-guidelines/issues/465#issuecomment-1782206516): + // Citation for this? + unsafe { + #[allow(clippy::as_conversions)] + $crate::macro_util::core_reexport::mem::size_of_val_raw(zero_elems as *const $ty) + } + }; + + assert!(min_size <= _64K); + + #[allow(clippy::as_conversions)] + let ptr = ALIGNED_64K_ALLOCATION.as_ptr() as *const $ty; + + // SAFETY: + // - Thanks to the preceding `assert!`, we know that the value with zero + // elements fits in `_64K` bytes, and thus in the allocation addressed + // by `ALIGNED_64K_ALLOCATION`. The offset of the trailing field is + // guaranteed to be no larger than this size, so this field projection + // is guaranteed to remain in-bounds of its allocation. + // - Because the minimum size is no larger than `_64K` bytes, and + // because an object's size must always be a multiple of its alignment + // [1], we know that `$ty`'s alignment is no larger than `_64K`. The + // allocation addressed by `ALIGNED_64K_ALLOCATION` is guaranteed to + // be aligned to `_64K`, so `ptr` is guaranteed to satisfy `$ty`'s + // alignment. + // + // Note that, as of [2], this requirement is technically unnecessary + // for Rust versions >= 1.75.0, but no harm in guaranteeing it anyway + // until we bump our MSRV. + // + // [1] Per https://doc.rust-lang.org/reference/type-layout.html: + // + // The size of a value is always a multiple of its alignment. + // + // [2] https://github.com/rust-lang/reference/pull/1387 + let field = unsafe { + $crate::macro_util::core_reexport::ptr::addr_of!((*ptr).$trailing_field_name) + }; + // SAFETY: + // - Both `ptr` and `field` are derived from the same allocated object. + // - By the preceding safety comment, `field` is in bounds of that + // allocated object. + // - The distance, in bytes, between `ptr` and `field` is required to be + // a multiple of the size of `u8`, which is trivially true because + // `u8`'s size is 1. + // - The distance, in bytes, cannot overflow `isize`. This is guaranteed + // because no allocated object can have a size larger than can fit in + // `isize`. [1] + // - The distance being in-bounds cannot rely on wrapping around the + // address space. This is guaranteed because the same is guaranteed of + // allocated objects. [1] + // + // [1] TODO(#429), TODO(https://github.com/rust-lang/rust/pull/116675): + // Once these are guaranteed in the Reference, cite it. + let offset = unsafe { field.cast::().offset_from(ptr.cast::()) }; + // Guaranteed not to be lossy: `field` comes after `ptr`, so the offset + // from `ptr` to `field` is guaranteed to be positive. + assert!(offset >= 0); + Some( + #[allow(clippy::as_conversions)] + { + offset as usize + }, + ) + }}; +} + +/// Computes alignment of `$ty: ?Sized`. +/// +/// `align_of!` produces code which is valid in a `const` context. +// TODO(#29), TODO(https://github.com/rust-lang/rust/issues/69835): Remove this +// `cfg` when `size_of_val_raw` is stabilized. +#[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)] +#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`. +#[macro_export] +macro_rules! align_of { + ($ty:ty) => {{ + // SAFETY: `OffsetOfTrailingIsAlignment` is `repr(C)`, and its layout is + // guaranteed [1] to begin with the single-byte layout for `_byte`, + // followed by the padding needed to align `_trailing`, then the layout + // for `_trailing`, and finally any trailing padding bytes needed to + // correctly-align the entire struct. + // + // This macro computes the alignment of `$ty` by counting the number of + // bytes preceeding `_trailing`. For instance, if the alignment of `$ty` + // is `1`, then no padding is required align `_trailing` and it will be + // located immediately after `_byte` at offset 1. If the alignment of + // `$ty` is 2, then a single padding byte is required before + // `_trailing`, and `_trailing` will be located at offset 2. + + // This correspondence between offset and alignment holds for all valid + // Rust alignments, and we confirm this exhaustively (or, at least up to + // the maximum alignment supported by `trailing_field_offset!`) in + // `test_align_of_dst`. + // + // [1]: https://doc.rust-lang.org/nomicon/other-reprs.html#reprc + + #[repr(C)] + struct OffsetOfTrailingIsAlignment { + _byte: u8, + _trailing: $ty, + } + + trailing_field_offset!(OffsetOfTrailingIsAlignment, _trailing) + }}; +} + +/// Does the struct type `$t` have padding? +/// +/// `$ts` is the list of the type of every field in `$t`. `$t` must be a +/// struct type, or else `struct_has_padding!`'s result may be meaningless. +/// +/// Note that `struct_has_padding!`'s results are independent of `repr` since +/// they only consider the size of the type and the sizes of the fields. +/// Whatever the repr, the size of the type already takes into account any +/// padding that the compiler has decided to add. Structs with well-defined +/// representations (such as `repr(C)`) can use this macro to check for padding. +/// Note that while this may yield some consistent value for some `repr(Rust)` +/// structs, it is not guaranteed across platforms or compilations. +#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`. +#[macro_export] +macro_rules! struct_has_padding { + ($t:ty, $($ts:ty),*) => { + core::mem::size_of::<$t>() > 0 $(+ core::mem::size_of::<$ts>())* + }; +} + +/// Does the union type `$t` have padding? +/// +/// `$ts` is the list of the type of every field in `$t`. `$t` must be a +/// union type, or else `union_has_padding!`'s result may be meaningless. +/// +/// Note that `union_has_padding!`'s results are independent of `repr` since +/// they only consider the size of the type and the sizes of the fields. +/// Whatever the repr, the size of the type already takes into account any +/// padding that the compiler has decided to add. Unions with well-defined +/// representations (such as `repr(C)`) can use this macro to check for padding. +/// Note that while this may yield some consistent value for some `repr(Rust)` +/// unions, it is not guaranteed across platforms or compilations. +#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`. +#[macro_export] +macro_rules! union_has_padding { + ($t:ty, $($ts:ty),*) => { + false $(|| core::mem::size_of::<$t>() != core::mem::size_of::<$ts>())* + }; +} + +/// Does `t` have alignment greater than or equal to `u`? If not, this macro +/// produces a compile error. It must be invoked in a dead codepath. This is +/// used in `transmute_ref!` and `transmute_mut!`. +#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`. +#[macro_export] +macro_rules! assert_align_gt_eq { + ($t:ident, $u: ident) => {{ + // The comments here should be read in the context of this macro's + // invocations in `transmute_ref!` and `transmute_mut!`. + #[allow(clippy::missing_transmute_annotations)] + if false { + // The type wildcard in this bound is inferred to be `T` because + // `align_of.into_t()` is assigned to `t` (which has type `T`). + let align_of: $crate::macro_util::AlignOf<_> = unreachable!(); + $t = align_of.into_t(); + // `max_aligns` is inferred to have type `MaxAlignsOf` because + // of the inferred types of `t` and `u`. + let mut max_aligns = $crate::macro_util::MaxAlignsOf::new($t, $u); + + // This transmute will only compile successfully if + // `align_of::() == max(align_of::(), align_of::())` - in + // other words, if `align_of::() >= align_of::()`. + // + // SAFETY: This code is never run. + max_aligns = unsafe { $crate::macro_util::core_reexport::mem::transmute(align_of) }; + } else { + loop {} + } + }}; +} + +/// Do `t` and `u` have the same size? If not, this macro produces a compile +/// error. It must be invoked in a dead codepath. This is used in +/// `transmute_ref!` and `transmute_mut!`. +#[doc(hidden)] // `#[macro_export]` bypasses this module's `#[doc(hidden)]`. +#[macro_export] +macro_rules! assert_size_eq { + ($t:ident, $u: ident) => {{ + // The comments here should be read in the context of this macro's + // invocations in `transmute_ref!` and `transmute_mut!`. + if false { + // SAFETY: This code is never run. + $u = unsafe { + // Clippy: It's okay to transmute a type to itself. + #[allow(clippy::useless_transmute, clippy::missing_transmute_annotations)] + $crate::macro_util::core_reexport::mem::transmute($t) + }; + } else { + loop {} + } + }}; +} + +/// Transmutes a reference of one type to a reference of another type. +/// +/// # Safety +/// +/// The caller must guarantee that: +/// - `Src: AsBytes` +/// - `Dst: FromBytes` +/// - `size_of::() == size_of::()` +/// - `align_of::() >= align_of::()` +#[inline(always)] +pub const unsafe fn transmute_ref<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + src: &'src Src, +) -> &'dst Dst { + let src: *const Src = src; + let dst = src.cast::(); + // SAFETY: + // - We know that it is sound to view the target type of the input reference + // (`Src`) as the target type of the output reference (`Dst`) because the + // caller has guaranteed that `Src: AsBytes`, `Dst: FromBytes`, and + // `size_of::() == size_of::()`. + // - We know that there are no `UnsafeCell`s, and thus we don't have to + // worry about `UnsafeCell` overlap, because `Src: AsBytes` and `Dst: + // FromBytes` both forbid `UnsafeCell`s. + // - The caller has guaranteed that alignment is not increased. + // - We know that the returned lifetime will not outlive the input lifetime + // thanks to the lifetime bounds on this function. + unsafe { &*dst } +} + +/// Transmutes a mutable reference of one type to a mutable reference of another +/// type. +/// +/// # Safety +/// +/// The caller must guarantee that: +/// - `Src: FromBytes + AsBytes` +/// - `Dst: FromBytes + AsBytes` +/// - `size_of::() == size_of::()` +/// - `align_of::() >= align_of::()` +#[inline(always)] +pub unsafe fn transmute_mut<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + src: &'src mut Src, +) -> &'dst mut Dst { + let src: *mut Src = src; + let dst = src.cast::(); + // SAFETY: + // - We know that it is sound to view the target type of the input reference + // (`Src`) as the target type of the output reference (`Dst`) and + // vice-versa because the caller has guaranteed that `Src: FromBytes + + // AsBytes`, `Dst: FromBytes + AsBytes`, and `size_of::() == + // size_of::()`. + // - We know that there are no `UnsafeCell`s, and thus we don't have to + // worry about `UnsafeCell` overlap, because `Src: FromBytes + AsBytes` + // and `Dst: FromBytes + AsBytes` forbid `UnsafeCell`s. + // - The caller has guaranteed that alignment is not increased. + // - We know that the returned lifetime will not outlive the input lifetime + // thanks to the lifetime bounds on this function. + unsafe { &mut *dst } +} + +// NOTE: We can't change this to a `pub use core as core_reexport` until [1] is +// fixed or we update to a semver-breaking version (as of this writing, 0.8.0) +// on the `main` branch. +// +// [1] https://github.com/obi1kenobi/cargo-semver-checks/issues/573 +pub mod core_reexport { + pub use core::*; + + pub mod mem { + pub use core::mem::*; + } +} + +#[cfg(test)] +mod tests { + use core::mem; + + use super::*; + use crate::util::testutil::*; + + #[test] + fn test_align_of() { + macro_rules! test { + ($ty:ty) => { + assert_eq!(mem::size_of::>(), mem::align_of::<$ty>()); + }; + } + + test!(()); + test!(u8); + test!(AU64); + test!([AU64; 2]); + } + + #[test] + fn test_max_aligns_of() { + macro_rules! test { + ($t:ty, $u:ty) => { + assert_eq!( + mem::size_of::>(), + core::cmp::max(mem::align_of::<$t>(), mem::align_of::<$u>()) + ); + }; + } + + test!(u8, u8); + test!(u8, AU64); + test!(AU64, u8); + } + + #[test] + fn test_typed_align_check() { + // Test that the type-based alignment check used in + // `assert_align_gt_eq!` behaves as expected. + + macro_rules! assert_t_align_gteq_u_align { + ($t:ty, $u:ty, $gteq:expr) => { + assert_eq!( + mem::size_of::>() == mem::size_of::>(), + $gteq + ); + }; + } + + assert_t_align_gteq_u_align!(u8, u8, true); + assert_t_align_gteq_u_align!(AU64, AU64, true); + assert_t_align_gteq_u_align!(AU64, u8, true); + assert_t_align_gteq_u_align!(u8, AU64, false); + } + + // TODO(#29), TODO(https://github.com/rust-lang/rust/issues/69835): Remove + // this `cfg` when `size_of_val_raw` is stabilized. + #[allow(clippy::decimal_literal_representation)] + #[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)] + #[test] + fn test_trailing_field_offset() { + assert_eq!(mem::align_of::(), _64K); + + macro_rules! test { + (#[$cfg:meta] ($($ts:ty),* ; $trailing_field_ty:ty) => $expect:expr) => {{ + #[$cfg] + #[allow(dead_code)] // fields are never read + struct Test($($ts,)* $trailing_field_ty); + assert_eq!(test!(@offset $($ts),* ; $trailing_field_ty), $expect); + }}; + (#[$cfg:meta] $(#[$cfgs:meta])* ($($ts:ty),* ; $trailing_field_ty:ty) => $expect:expr) => { + test!(#[$cfg] ($($ts),* ; $trailing_field_ty) => $expect); + test!($(#[$cfgs])* ($($ts),* ; $trailing_field_ty) => $expect); + }; + (@offset ; $_trailing:ty) => { trailing_field_offset!(Test, 0) }; + (@offset $_t:ty ; $_trailing:ty) => { trailing_field_offset!(Test, 1) }; + } + + test!(#[repr(C)] #[repr(transparent)] #[repr(packed)](; u8) => Some(0)); + test!(#[repr(C)] #[repr(transparent)] #[repr(packed)](; [u8]) => Some(0)); + test!(#[repr(C)] #[repr(C, packed)] (u8; u8) => Some(1)); + test!(#[repr(C)] (; AU64) => Some(0)); + test!(#[repr(C)] (; [AU64]) => Some(0)); + test!(#[repr(C)] (u8; AU64) => Some(8)); + test!(#[repr(C)] (u8; [AU64]) => Some(8)); + test!(#[repr(C)] (; Nested) => Some(0)); + test!(#[repr(C)] (; Nested) => Some(0)); + test!(#[repr(C)] (u8; Nested) => Some(8)); + test!(#[repr(C)] (u8; Nested) => Some(8)); + + // Test that `packed(N)` limits the offset of the trailing field. + test!(#[repr(C, packed( 1))] (u8; elain::Align< 2>) => Some( 1)); + test!(#[repr(C, packed( 2))] (u8; elain::Align< 4>) => Some( 2)); + test!(#[repr(C, packed( 4))] (u8; elain::Align< 8>) => Some( 4)); + test!(#[repr(C, packed( 8))] (u8; elain::Align< 16>) => Some( 8)); + test!(#[repr(C, packed( 16))] (u8; elain::Align< 32>) => Some( 16)); + test!(#[repr(C, packed( 32))] (u8; elain::Align< 64>) => Some( 32)); + test!(#[repr(C, packed( 64))] (u8; elain::Align< 128>) => Some( 64)); + test!(#[repr(C, packed( 128))] (u8; elain::Align< 256>) => Some( 128)); + test!(#[repr(C, packed( 256))] (u8; elain::Align< 512>) => Some( 256)); + test!(#[repr(C, packed( 512))] (u8; elain::Align< 1024>) => Some( 512)); + test!(#[repr(C, packed( 1024))] (u8; elain::Align< 2048>) => Some( 1024)); + test!(#[repr(C, packed( 2048))] (u8; elain::Align< 4096>) => Some( 2048)); + test!(#[repr(C, packed( 4096))] (u8; elain::Align< 8192>) => Some( 4096)); + test!(#[repr(C, packed( 8192))] (u8; elain::Align< 16384>) => Some( 8192)); + test!(#[repr(C, packed( 16384))] (u8; elain::Align< 32768>) => Some( 16384)); + test!(#[repr(C, packed( 32768))] (u8; elain::Align< 65536>) => Some( 32768)); + test!(#[repr(C, packed( 65536))] (u8; elain::Align< 131072>) => Some( 65536)); + /* Alignments above 65536 are not yet supported. + test!(#[repr(C, packed( 131072))] (u8; elain::Align< 262144>) => Some( 131072)); + test!(#[repr(C, packed( 262144))] (u8; elain::Align< 524288>) => Some( 262144)); + test!(#[repr(C, packed( 524288))] (u8; elain::Align< 1048576>) => Some( 524288)); + test!(#[repr(C, packed( 1048576))] (u8; elain::Align< 2097152>) => Some( 1048576)); + test!(#[repr(C, packed( 2097152))] (u8; elain::Align< 4194304>) => Some( 2097152)); + test!(#[repr(C, packed( 4194304))] (u8; elain::Align< 8388608>) => Some( 4194304)); + test!(#[repr(C, packed( 8388608))] (u8; elain::Align< 16777216>) => Some( 8388608)); + test!(#[repr(C, packed( 16777216))] (u8; elain::Align< 33554432>) => Some( 16777216)); + test!(#[repr(C, packed( 33554432))] (u8; elain::Align< 67108864>) => Some( 33554432)); + test!(#[repr(C, packed( 67108864))] (u8; elain::Align< 33554432>) => Some( 67108864)); + test!(#[repr(C, packed( 33554432))] (u8; elain::Align<134217728>) => Some( 33554432)); + test!(#[repr(C, packed(134217728))] (u8; elain::Align<268435456>) => Some(134217728)); + test!(#[repr(C, packed(268435456))] (u8; elain::Align<268435456>) => Some(268435456)); + */ + + // Test that `align(N)` does not limit the offset of the trailing field. + test!(#[repr(C, align( 1))] (u8; elain::Align< 2>) => Some( 2)); + test!(#[repr(C, align( 2))] (u8; elain::Align< 4>) => Some( 4)); + test!(#[repr(C, align( 4))] (u8; elain::Align< 8>) => Some( 8)); + test!(#[repr(C, align( 8))] (u8; elain::Align< 16>) => Some( 16)); + test!(#[repr(C, align( 16))] (u8; elain::Align< 32>) => Some( 32)); + test!(#[repr(C, align( 32))] (u8; elain::Align< 64>) => Some( 64)); + test!(#[repr(C, align( 64))] (u8; elain::Align< 128>) => Some( 128)); + test!(#[repr(C, align( 128))] (u8; elain::Align< 256>) => Some( 256)); + test!(#[repr(C, align( 256))] (u8; elain::Align< 512>) => Some( 512)); + test!(#[repr(C, align( 512))] (u8; elain::Align< 1024>) => Some( 1024)); + test!(#[repr(C, align( 1024))] (u8; elain::Align< 2048>) => Some( 2048)); + test!(#[repr(C, align( 2048))] (u8; elain::Align< 4096>) => Some( 4096)); + test!(#[repr(C, align( 4096))] (u8; elain::Align< 8192>) => Some( 8192)); + test!(#[repr(C, align( 8192))] (u8; elain::Align< 16384>) => Some( 16384)); + test!(#[repr(C, align( 16384))] (u8; elain::Align< 32768>) => Some( 32768)); + test!(#[repr(C, align( 32768))] (u8; elain::Align< 65536>) => Some( 65536)); + /* Alignments above 65536 are not yet supported. + test!(#[repr(C, align( 65536))] (u8; elain::Align< 131072>) => Some( 131072)); + test!(#[repr(C, align( 131072))] (u8; elain::Align< 262144>) => Some( 262144)); + test!(#[repr(C, align( 262144))] (u8; elain::Align< 524288>) => Some( 524288)); + test!(#[repr(C, align( 524288))] (u8; elain::Align< 1048576>) => Some( 1048576)); + test!(#[repr(C, align( 1048576))] (u8; elain::Align< 2097152>) => Some( 2097152)); + test!(#[repr(C, align( 2097152))] (u8; elain::Align< 4194304>) => Some( 4194304)); + test!(#[repr(C, align( 4194304))] (u8; elain::Align< 8388608>) => Some( 8388608)); + test!(#[repr(C, align( 8388608))] (u8; elain::Align< 16777216>) => Some( 16777216)); + test!(#[repr(C, align( 16777216))] (u8; elain::Align< 33554432>) => Some( 33554432)); + test!(#[repr(C, align( 33554432))] (u8; elain::Align< 67108864>) => Some( 67108864)); + test!(#[repr(C, align( 67108864))] (u8; elain::Align< 33554432>) => Some( 33554432)); + test!(#[repr(C, align( 33554432))] (u8; elain::Align<134217728>) => Some(134217728)); + test!(#[repr(C, align(134217728))] (u8; elain::Align<268435456>) => Some(268435456)); + */ + } + + // TODO(#29), TODO(https://github.com/rust-lang/rust/issues/69835): Remove + // this `cfg` when `size_of_val_raw` is stabilized. + #[allow(clippy::decimal_literal_representation)] + #[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)] + #[test] + fn test_align_of_dst() { + // Test that `align_of!` correctly computes the alignment of DSTs. + assert_eq!(align_of!([elain::Align<1>]), Some(1)); + assert_eq!(align_of!([elain::Align<2>]), Some(2)); + assert_eq!(align_of!([elain::Align<4>]), Some(4)); + assert_eq!(align_of!([elain::Align<8>]), Some(8)); + assert_eq!(align_of!([elain::Align<16>]), Some(16)); + assert_eq!(align_of!([elain::Align<32>]), Some(32)); + assert_eq!(align_of!([elain::Align<64>]), Some(64)); + assert_eq!(align_of!([elain::Align<128>]), Some(128)); + assert_eq!(align_of!([elain::Align<256>]), Some(256)); + assert_eq!(align_of!([elain::Align<512>]), Some(512)); + assert_eq!(align_of!([elain::Align<1024>]), Some(1024)); + assert_eq!(align_of!([elain::Align<2048>]), Some(2048)); + assert_eq!(align_of!([elain::Align<4096>]), Some(4096)); + assert_eq!(align_of!([elain::Align<8192>]), Some(8192)); + assert_eq!(align_of!([elain::Align<16384>]), Some(16384)); + assert_eq!(align_of!([elain::Align<32768>]), Some(32768)); + assert_eq!(align_of!([elain::Align<65536>]), Some(65536)); + /* Alignments above 65536 are not yet supported. + assert_eq!(align_of!([elain::Align<131072>]), Some(131072)); + assert_eq!(align_of!([elain::Align<262144>]), Some(262144)); + assert_eq!(align_of!([elain::Align<524288>]), Some(524288)); + assert_eq!(align_of!([elain::Align<1048576>]), Some(1048576)); + assert_eq!(align_of!([elain::Align<2097152>]), Some(2097152)); + assert_eq!(align_of!([elain::Align<4194304>]), Some(4194304)); + assert_eq!(align_of!([elain::Align<8388608>]), Some(8388608)); + assert_eq!(align_of!([elain::Align<16777216>]), Some(16777216)); + assert_eq!(align_of!([elain::Align<33554432>]), Some(33554432)); + assert_eq!(align_of!([elain::Align<67108864>]), Some(67108864)); + assert_eq!(align_of!([elain::Align<33554432>]), Some(33554432)); + assert_eq!(align_of!([elain::Align<134217728>]), Some(134217728)); + assert_eq!(align_of!([elain::Align<268435456>]), Some(268435456)); + */ + } + + #[test] + fn test_struct_has_padding() { + // Test that, for each provided repr, `struct_has_padding!` reports the + // expected value. + macro_rules! test { + (#[$cfg:meta] ($($ts:ty),*) => $expect:expr) => {{ + #[$cfg] + #[allow(dead_code)] // fields are never read + struct Test($($ts),*); + assert_eq!(struct_has_padding!(Test, $($ts),*), $expect); + }}; + (#[$cfg:meta] $(#[$cfgs:meta])* ($($ts:ty),*) => $expect:expr) => { + test!(#[$cfg] ($($ts),*) => $expect); + test!($(#[$cfgs])* ($($ts),*) => $expect); + }; + } + + test!(#[repr(C)] #[repr(transparent)] #[repr(packed)] () => false); + test!(#[repr(C)] #[repr(transparent)] #[repr(packed)] (u8) => false); + test!(#[repr(C)] #[repr(transparent)] #[repr(packed)] (u8, ()) => false); + test!(#[repr(C)] #[repr(packed)] (u8, u8) => false); + + test!(#[repr(C)] (u8, AU64) => true); + // Rust won't let you put `#[repr(packed)]` on a type which contains a + // `#[repr(align(n > 1))]` type (`AU64`), so we have to use `u64` here. + // It's not ideal, but it definitely has align > 1 on /some/ of our CI + // targets, and this isn't a particularly complex macro we're testing + // anyway. + test!(#[repr(packed)] (u8, u64) => false); + } + + #[test] + fn test_union_has_padding() { + // Test that, for each provided repr, `union_has_padding!` reports the + // expected value. + macro_rules! test { + (#[$cfg:meta] {$($fs:ident: $ts:ty),*} => $expect:expr) => {{ + #[$cfg] + #[allow(unused)] // fields are never read + union Test{ $($fs: $ts),* } + assert_eq!(union_has_padding!(Test, $($ts),*), $expect); + }}; + (#[$cfg:meta] $(#[$cfgs:meta])* {$($fs:ident: $ts:ty),*} => $expect:expr) => { + test!(#[$cfg] {$($fs: $ts),*} => $expect); + test!($(#[$cfgs])* {$($fs: $ts),*} => $expect); + }; + } + + test!(#[repr(C)] #[repr(packed)] {a: u8} => false); + test!(#[repr(C)] #[repr(packed)] {a: u8, b: u8} => false); + + // Rust won't let you put `#[repr(packed)]` on a type which contains a + // `#[repr(align(n > 1))]` type (`AU64`), so we have to use `u64` here. + // It's not ideal, but it definitely has align > 1 on /some/ of our CI + // targets, and this isn't a particularly complex macro we're testing + // anyway. + test!(#[repr(C)] #[repr(packed)] {a: u8, b: u64} => true); + } +} diff --git a/src/rust/vendor/zerocopy/src/macros.rs b/src/rust/vendor/zerocopy/src/macros.rs new file mode 100644 index 000000000..a3d799684 --- /dev/null +++ b/src/rust/vendor/zerocopy/src/macros.rs @@ -0,0 +1,416 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +/// Documents multiple unsafe blocks with a single safety comment. +/// +/// Invoked as: +/// +/// ```rust,ignore +/// safety_comment! { +/// // Non-doc comments come first. +/// /// SAFETY: +/// /// Safety comment starts on its own line. +/// macro_1!(args); +/// macro_2! { args }; +/// /// SAFETY: +/// /// Subsequent safety comments are allowed but not required. +/// macro_3! { args }; +/// } +/// ``` +/// +/// The macro invocations are emitted, each decorated with the following +/// attribute: `#[allow(clippy::undocumented_unsafe_blocks)]`. +macro_rules! safety_comment { + (#[doc = r" SAFETY:"] $($(#[$attr:meta])* $macro:ident!$args:tt;)*) => { + #[allow(clippy::undocumented_unsafe_blocks, unused_attributes)] + const _: () = { $($(#[$attr])* $macro!$args;)* }; + } +} + +/// Unsafely implements trait(s) for a type. +/// +/// # Safety +/// +/// The trait impl must be sound. +/// +/// When implementing `TryFromBytes`: +/// - If no `is_bit_valid` impl is provided, then it must be valid for +/// `is_bit_valid` to unconditionally return `true`. In other words, it must +/// be the case that any initialized sequence of bytes constitutes a valid +/// instance of `$ty`. +/// - If an `is_bit_valid` impl is provided, then: +/// - Regardless of whether the provided closure takes a `Ptr<$repr>` or +/// `&$repr` argument, it must be the case that, given `t: *mut $ty` and +/// `let r = t as *mut $repr`, `r` refers to an object of equal or lesser +/// size than the object referred to by `t`. +/// - If the provided closure takes a `&$repr` argument, then given a `Ptr<'a, +/// $ty>` which satisfies the preconditions of +/// `TryFromBytes::<$ty>::is_bit_valid`, it must be guaranteed that the +/// memory referenced by that `Ptr` always contains a valid `$repr`. +/// - The alignment of `$repr` is less than or equal to the alignment of +/// `$ty`. +/// - The impl of `is_bit_valid` must only return `true` for its argument +/// `Ptr<$repr>` if the original `Ptr<$ty>` refers to a valid `$ty`. +macro_rules! unsafe_impl { + // Implement `$trait` for `$ty` with no bounds. + ($(#[$attr:meta])* $ty:ty: $trait:ident $(; |$candidate:ident: &$repr:ty| $is_bit_valid:expr)?) => { + $(#[$attr])* + unsafe impl $trait for $ty { + unsafe_impl!(@method $trait $(; |$candidate: &$repr| $is_bit_valid)?); + } + }; + // Implement all `$traits` for `$ty` with no bounds. + ($ty:ty: $($traits:ident),*) => { + $( unsafe_impl!($ty: $traits); )* + }; + // This arm is identical to the following one, except it contains a + // preceding `const`. If we attempt to handle these with a single arm, there + // is an inherent ambiguity between `const` (the keyword) and `const` (the + // ident match for `$tyvar:ident`). + // + // To explain how this works, consider the following invocation: + // + // unsafe_impl!(const N: usize, T: ?Sized + Copy => Clone for Foo); + // + // In this invocation, here are the assignments to meta-variables: + // + // |---------------|------------| + // | Meta-variable | Assignment | + // |---------------|------------| + // | $constname | N | + // | $constty | usize | + // | $tyvar | T | + // | $optbound | Sized | + // | $bound | Copy | + // | $trait | Clone | + // | $ty | Foo | + // |---------------|------------| + // + // The following arm has the same behavior with the exception of the lack of + // support for a leading `const` parameter. + ( + $(#[$attr:meta])* + const $constname:ident : $constty:ident $(,)? + $($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),* + => $trait:ident for $ty:ty $(; |$candidate:ident $(: &$ref_repr:ty)? $(: Ptr<$ptr_repr:ty>)?| $is_bit_valid:expr)? + ) => { + unsafe_impl!( + @inner + $(#[$attr])* + @const $constname: $constty, + $($tyvar $(: $(? $optbound +)* + $($bound +)*)?,)* + => $trait for $ty $(; |$candidate $(: &$ref_repr)? $(: Ptr<$ptr_repr>)?| $is_bit_valid)? + ); + }; + ( + $(#[$attr:meta])* + $($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),* + => $trait:ident for $ty:ty $(; |$candidate:ident $(: &$ref_repr:ty)? $(: Ptr<$ptr_repr:ty>)?| $is_bit_valid:expr)? + ) => { + unsafe_impl!( + @inner + $(#[$attr])* + $($tyvar $(: $(? $optbound +)* + $($bound +)*)?,)* + => $trait for $ty $(; |$candidate $(: &$ref_repr)? $(: Ptr<$ptr_repr>)?| $is_bit_valid)? + ); + }; + ( + @inner + $(#[$attr:meta])* + $(@const $constname:ident : $constty:ident,)* + $($tyvar:ident $(: $(? $optbound:ident +)* + $($bound:ident +)* )?,)* + => $trait:ident for $ty:ty $(; |$candidate:ident $(: &$ref_repr:ty)? $(: Ptr<$ptr_repr:ty>)?| $is_bit_valid:expr)? + ) => { + $(#[$attr])* + unsafe impl<$(const $constname: $constty,)* $($tyvar $(: $(? $optbound +)* $($bound +)*)?),*> $trait for $ty { + unsafe_impl!(@method $trait $(; |$candidate: $(&$ref_repr)? $(Ptr<$ptr_repr>)?| $is_bit_valid)?); + } + }; + + (@method TryFromBytes ; |$candidate:ident: &$repr:ty| $is_bit_valid:expr) => { + #[inline] + unsafe fn is_bit_valid(candidate: Ptr<'_, Self>) -> bool { + // SAFETY: + // - The argument to `cast_unsized` is `|p| p as *mut _` as required + // by that method's safety precondition. + // - The caller has promised that the cast results in an object of + // equal or lesser size. + // - The caller has promised that `$repr`'s alignment is less than + // or equal to `Self`'s alignment. + #[allow(clippy::as_conversions)] + let candidate = unsafe { candidate.cast_unsized::<$repr, _>(|p| p as *mut _) }; + // SAFETY: + // - The caller has promised that the referenced memory region will + // contain a valid `$repr` for `'a`. + // - The memory may not be referenced by any mutable references. + // This is a precondition of `is_bit_valid`. + // - The memory may not be mutated even via `UnsafeCell`s. This is a + // precondition of `is_bit_valid`. + // - There must not exist any references to the same memory region + // which contain `UnsafeCell`s at byte ranges which are not + // identical to the byte ranges at which `T` contains + // `UnsafeCell`s. This is a precondition of `is_bit_valid`. + let $candidate: &$repr = unsafe { candidate.as_ref() }; + $is_bit_valid + } + }; + (@method TryFromBytes ; |$candidate:ident: Ptr<$repr:ty>| $is_bit_valid:expr) => { + #[inline] + unsafe fn is_bit_valid(candidate: Ptr<'_, Self>) -> bool { + // SAFETY: + // - The argument to `cast_unsized` is `|p| p as *mut _` as required + // by that method's safety precondition. + // - The caller has promised that the cast results in an object of + // equal or lesser size. + // - The caller has promised that `$repr`'s alignment is less than + // or equal to `Self`'s alignment. + #[allow(clippy::as_conversions)] + let $candidate = unsafe { candidate.cast_unsized::<$repr, _>(|p| p as *mut _) }; + $is_bit_valid + } + }; + (@method TryFromBytes) => { #[inline(always)] unsafe fn is_bit_valid(_: Ptr<'_, Self>) -> bool { true } }; + (@method $trait:ident) => { + #[allow(clippy::missing_inline_in_public_items)] + fn only_derive_is_allowed_to_implement_this_trait() {} + }; + (@method $trait:ident; |$_candidate:ident $(: &$_ref_repr:ty)? $(: NonNull<$_ptr_repr:ty>)?| $_is_bit_valid:expr) => { + compile_error!("Can't provide `is_bit_valid` impl for trait other than `TryFromBytes`"); + }; +} + +/// Implements a trait for a type, bounding on each memeber of the power set of +/// a set of type variables. This is useful for implementing traits for tuples +/// or `fn` types. +/// +/// The last argument is the name of a macro which will be called in every +/// `impl` block, and is expected to expand to the name of the type for which to +/// implement the trait. +/// +/// For example, the invocation: +/// ```ignore +/// unsafe_impl_for_power_set!(A, B => Foo for type!(...)) +/// ``` +/// ...expands to: +/// ```ignore +/// unsafe impl Foo for type!() { ... } +/// unsafe impl Foo for type!(B) { ... } +/// unsafe impl Foo for type!(A, B) { ... } +/// ``` +macro_rules! unsafe_impl_for_power_set { + ($first:ident $(, $rest:ident)* $(-> $ret:ident)? => $trait:ident for $macro:ident!(...)) => { + unsafe_impl_for_power_set!($($rest),* $(-> $ret)? => $trait for $macro!(...)); + unsafe_impl_for_power_set!(@impl $first $(, $rest)* $(-> $ret)? => $trait for $macro!(...)); + }; + ($(-> $ret:ident)? => $trait:ident for $macro:ident!(...)) => { + unsafe_impl_for_power_set!(@impl $(-> $ret)? => $trait for $macro!(...)); + }; + (@impl $($vars:ident),* $(-> $ret:ident)? => $trait:ident for $macro:ident!(...)) => { + unsafe impl<$($vars,)* $($ret)?> $trait for $macro!($($vars),* $(-> $ret)?) { + #[allow(clippy::missing_inline_in_public_items)] + fn only_derive_is_allowed_to_implement_this_trait() {} + } + }; +} + +/// Expands to an `Option` type with the given argument types and +/// return type. Designed for use with `unsafe_impl_for_power_set`. +macro_rules! opt_extern_c_fn { + ($($args:ident),* -> $ret:ident) => { Option $ret> }; +} + +/// Expands to a `Option` type with the given argument types and return +/// type. Designed for use with `unsafe_impl_for_power_set`. +macro_rules! opt_fn { + ($($args:ident),* -> $ret:ident) => { Option $ret> }; +} + +/// Implements trait(s) for a type or verifies the given implementation by +/// referencing an existing (derived) implementation. +/// +/// This macro exists so that we can provide zerocopy-derive as an optional +/// dependency and still get the benefit of using its derives to validate that +/// our trait impls are sound. +/// +/// When compiling without `--cfg 'feature = "derive"` and without `--cfg test`, +/// `impl_or_verify!` emits the provided trait impl. When compiling with either +/// of those cfgs, it is expected that the type in question is deriving the +/// traits instead. In this case, `impl_or_verify!` emits code which validates +/// that the given trait impl is at least as restrictive as the the impl emitted +/// by the custom derive. This has the effect of confirming that the impl which +/// is emitted when the `derive` feature is disabled is actually sound (on the +/// assumption that the impl emitted by the custom derive is sound). +/// +/// The caller is still required to provide a safety comment (e.g. using the +/// `safety_comment!` macro) . The reason for this restriction is that, while +/// `impl_or_verify!` can guarantee that the provided impl is sound when it is +/// compiled with the appropriate cfgs, there is no way to guarantee that it is +/// ever compiled with those cfgs. In particular, it would be possible to +/// accidentally place an `impl_or_verify!` call in a context that is only ever +/// compiled when the `derive` feature is disabled. If that were to happen, +/// there would be nothing to prevent an unsound trait impl from being emitted. +/// Requiring a safety comment reduces the likelihood of emitting an unsound +/// impl in this case, and also provides useful documentation for readers of the +/// code. +/// +/// ## Example +/// +/// ```rust,ignore +/// // Note that these derives are gated by `feature = "derive"` +/// #[cfg_attr(any(feature = "derive", test), derive(FromZeroes, FromBytes, AsBytes, Unaligned))] +/// #[repr(transparent)] +/// struct Wrapper(T); +/// +/// safety_comment! { +/// /// SAFETY: +/// /// `Wrapper` is `repr(transparent)`, so it is sound to implement any +/// /// zerocopy trait if `T` implements that trait. +/// impl_or_verify!(T: FromZeroes => FromZeroes for Wrapper); +/// impl_or_verify!(T: FromBytes => FromBytes for Wrapper); +/// impl_or_verify!(T: AsBytes => AsBytes for Wrapper); +/// impl_or_verify!(T: Unaligned => Unaligned for Wrapper); +/// } +/// ``` +macro_rules! impl_or_verify { + // The following two match arms follow the same pattern as their + // counterparts in `unsafe_impl!`; see the documentation on those arms for + // more details. + ( + const $constname:ident : $constty:ident $(,)? + $($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),* + => $trait:ident for $ty:ty + ) => { + impl_or_verify!(@impl { unsafe_impl!( + const $constname: $constty, $($tyvar $(: $(? $optbound +)* $($bound +)*)?),* => $trait for $ty + ); }); + impl_or_verify!(@verify $trait, { + impl Subtrait for $ty {} + }); + }; + ( + $($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),* + => $trait:ident for $ty:ty + ) => { + impl_or_verify!(@impl { unsafe_impl!( + $($tyvar $(: $(? $optbound +)* $($bound +)*)?),* => $trait for $ty + ); }); + impl_or_verify!(@verify $trait, { + impl<$($tyvar $(: $(? $optbound +)* $($bound +)*)?),*> Subtrait for $ty {} + }); + }; + ( + $($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),* + => $trait:ident for $ty:ty + ) => { + unsafe_impl!( + @inner + $($tyvar $(: $(? $optbound +)* + $($bound +)*)?,)* + => $trait for $ty + ); + }; + (@impl $impl_block:tt) => { + #[cfg(not(any(feature = "derive", test)))] + const _: () = { $impl_block }; + }; + (@verify $trait:ident, $impl_block:tt) => { + #[cfg(any(feature = "derive", test))] + const _: () = { + trait Subtrait: $trait {} + $impl_block + }; + }; +} + +/// Implements `KnownLayout` for a sized type. +macro_rules! impl_known_layout { + ($(const $constvar:ident : $constty:ty, $tyvar:ident $(: ?$optbound:ident)? => $ty:ty),* $(,)?) => { + $(impl_known_layout!(@inner const $constvar: $constty, $tyvar $(: ?$optbound)? => $ty);)* + }; + ($($tyvar:ident $(: ?$optbound:ident)? => $ty:ty),* $(,)?) => { + $(impl_known_layout!(@inner , $tyvar $(: ?$optbound)? => $ty);)* + }; + ($($ty:ty),*) => { $(impl_known_layout!(@inner , => $ty);)* }; + (@inner $(const $constvar:ident : $constty:ty)? , $($tyvar:ident $(: ?$optbound:ident)?)? => $ty:ty) => { + const _: () = { + use core::ptr::NonNull; + + // SAFETY: Delegates safety to `DstLayout::for_type`. + unsafe impl<$(const $constvar : $constty,)? $($tyvar $(: ?$optbound)?)?> KnownLayout for $ty { + #[allow(clippy::missing_inline_in_public_items)] + fn only_derive_is_allowed_to_implement_this_trait() where Self: Sized {} + + const LAYOUT: DstLayout = DstLayout::for_type::<$ty>(); + + // SAFETY: `.cast` preserves address and provenance. + // + // TODO(#429): Add documentation to `.cast` that promises that + // it preserves provenance. + #[inline(always)] + fn raw_from_ptr_len(bytes: NonNull, _elems: usize) -> NonNull { + bytes.cast::() + } + } + }; + }; +} + +/// Implements `KnownLayout` for a type in terms of the implementation of +/// another type with the same representation. +/// +/// # Safety +/// +/// - `$ty` and `$repr` must have the same: +/// - Fixed prefix size +/// - Alignment +/// - (For DSTs) trailing slice element size +/// - It must be valid to perform an `as` cast from `*mut $repr` to `*mut $ty`, +/// and this operation must preserve referent size (ie, `size_of_val_raw`). +macro_rules! unsafe_impl_known_layout { + ($($tyvar:ident: ?Sized + KnownLayout =>)? #[repr($repr:ty)] $ty:ty) => { + const _: () = { + use core::ptr::NonNull; + + unsafe impl<$($tyvar: ?Sized + KnownLayout)?> KnownLayout for $ty { + #[allow(clippy::missing_inline_in_public_items)] + fn only_derive_is_allowed_to_implement_this_trait() {} + + const LAYOUT: DstLayout = <$repr as KnownLayout>::LAYOUT; + + // SAFETY: All operations preserve address and provenance. + // Caller has promised that the `as` cast preserves size. + // + // TODO(#429): Add documentation to `NonNull::new_unchecked` + // that it preserves provenance. + #[inline(always)] + fn raw_from_ptr_len(bytes: NonNull, elems: usize) -> NonNull { + #[allow(clippy::as_conversions)] + let ptr = <$repr>::raw_from_ptr_len(bytes, elems).as_ptr() as *mut Self; + // SAFETY: `ptr` was converted from `bytes`, which is non-null. + unsafe { NonNull::new_unchecked(ptr) } + } + } + }; + }; +} + +/// Uses `align_of` to confirm that a type or set of types have alignment 1. +/// +/// Note that `align_of` requires `T: Sized`, so this macro doesn't work for +/// unsized types. +macro_rules! assert_unaligned { + ($ty:ty) => { + // We only compile this assertion under `cfg(test)` to avoid taking an + // extra non-dev dependency (and making this crate more expensive to + // compile for our dependents). + #[cfg(test)] + static_assertions::const_assert_eq!(core::mem::align_of::<$ty>(), 1); + }; + ($($ty:ty),*) => { + $(assert_unaligned!($ty);)* + }; +} diff --git a/src/rust/vendor/zerocopy/src/post_monomorphization_compile_fail_tests.rs b/src/rust/vendor/zerocopy/src/post_monomorphization_compile_fail_tests.rs new file mode 100644 index 000000000..32505b669 --- /dev/null +++ b/src/rust/vendor/zerocopy/src/post_monomorphization_compile_fail_tests.rs @@ -0,0 +1,118 @@ +// Copyright 2018 The Fuchsia Authors +// +// Licensed under the 2-Clause BSD License , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +//! Code that should fail to compile during the post-monomorphization compiler +//! pass. +//! +//! Due to [a limitation with the `trybuild` crate][trybuild-issue], we cannot +//! use our UI testing framework to test compilation failures that are +//! encountered after monomorphization has complated. This module has one item +//! for each such test we would prefer to have as a UI test, with the code in +//! question appearing as a rustdoc example which is marked with `compile_fail`. +//! This has the effect of causing doctests to fail if any of these examples +//! compile successfully. +//! +//! This is very much a hack and not a complete replacement for UI tests - most +//! notably because this only provides a single "compile vs fail" bit of +//! information, but does not allow us to depend upon the specific error that +//! causes compilation to fail. +//! +//! [trybuild-issue]: https://github.com/dtolnay/trybuild/issues/241 + +// Miri doesn't detect post-monimorphization failures as compile-time failures, +// but instead as runtime failures. +#![cfg(not(miri))] + +/// ```compile_fail +/// use core::cell::{Ref, RefCell}; +/// +/// let refcell = RefCell::new([0u8, 1, 2, 3]); +/// let core_ref = refcell.borrow(); +/// let core_ref = Ref::map(core_ref, |bytes| &bytes[..]); +/// +/// // `zc_ref` now stores `core_ref` internally. +/// let zc_ref = zerocopy::Ref::<_, u32>::new(core_ref).unwrap(); +/// +/// // This causes `core_ref` to get dropped and synthesizes a Rust +/// // reference to the memory `core_ref` was pointing at. +/// let rust_ref = zc_ref.into_ref(); +/// +/// // UB!!! This mutates `rust_ref`'s referent while it's alive. +/// *refcell.borrow_mut() = [0, 0, 0, 0]; +/// +/// println!("{}", rust_ref); +/// ``` +#[allow(unused)] +const REFCELL_REF_INTO_REF: () = (); + +/// ```compile_fail +/// use core::cell::{RefCell, RefMut}; +/// +/// let refcell = RefCell::new([0u8, 1, 2, 3]); +/// let core_ref_mut = refcell.borrow_mut(); +/// let core_ref_mut = RefMut::map(core_ref_mut, |bytes| &mut bytes[..]); +/// +/// // `zc_ref` now stores `core_ref_mut` internally. +/// let zc_ref = zerocopy::Ref::<_, u32>::new(core_ref_mut).unwrap(); +/// +/// // This causes `core_ref_mut` to get dropped and synthesizes a Rust +/// // reference to the memory `core_ref` was pointing at. +/// let rust_ref_mut = zc_ref.into_mut(); +/// +/// // UB!!! This mutates `rust_ref_mut`'s referent while it's alive. +/// *refcell.borrow_mut() = [0, 0, 0, 0]; +/// +/// println!("{}", rust_ref_mut); +/// ``` +#[allow(unused)] +const REFCELL_REFMUT_INTO_MUT: () = (); + +/// ```compile_fail +/// use core::cell::{Ref, RefCell}; +/// +/// let refcell = RefCell::new([0u8, 1, 2, 3]); +/// let core_ref = refcell.borrow(); +/// let core_ref = Ref::map(core_ref, |bytes| &bytes[..]); +/// +/// // `zc_ref` now stores `core_ref` internally. +/// let zc_ref = zerocopy::Ref::<_, [u16]>::new_slice(core_ref).unwrap(); +/// +/// // This causes `core_ref` to get dropped and synthesizes a Rust +/// // reference to the memory `core_ref` was pointing at. +/// let rust_ref = zc_ref.into_slice(); +/// +/// // UB!!! This mutates `rust_ref`'s referent while it's alive. +/// *refcell.borrow_mut() = [0, 0, 0, 0]; +/// +/// println!("{:?}", rust_ref); +/// ``` +#[allow(unused)] +const REFCELL_REFMUT_INTO_SLICE: () = (); + +/// ```compile_fail +/// use core::cell::{RefCell, RefMut}; +/// +/// let refcell = RefCell::new([0u8, 1, 2, 3]); +/// let core_ref_mut = refcell.borrow_mut(); +/// let core_ref_mut = RefMut::map(core_ref_mut, |bytes| &mut bytes[..]); +/// +/// // `zc_ref` now stores `core_ref_mut` internally. +/// let zc_ref = zerocopy::Ref::<_, [u16]>::new_slice(core_ref_mut).unwrap(); +/// +/// // This causes `core_ref_mut` to get dropped and synthesizes a Rust +/// // reference to the memory `core_ref` was pointing at. +/// let rust_ref_mut = zc_ref.into_mut_slice(); +/// +/// // UB!!! This mutates `rust_ref_mut`'s referent while it's alive. +/// *refcell.borrow_mut() = [0, 0, 0, 0]; +/// +/// println!("{:?}", rust_ref_mut); +/// ``` +#[allow(unused)] +const REFCELL_REFMUT_INTO_MUT_SLICE: () = (); diff --git a/src/rust/vendor/zerocopy/src/third_party/rust/LICENSE-APACHE b/src/rust/vendor/zerocopy/src/third_party/rust/LICENSE-APACHE new file mode 100644 index 000000000..1b5ec8b78 --- /dev/null +++ b/src/rust/vendor/zerocopy/src/third_party/rust/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/src/rust/vendor/zerocopy/src/third_party/rust/LICENSE-MIT b/src/rust/vendor/zerocopy/src/third_party/rust/LICENSE-MIT new file mode 100644 index 000000000..31aa79387 --- /dev/null +++ b/src/rust/vendor/zerocopy/src/third_party/rust/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/src/rust/vendor/zerocopy/src/third_party/rust/README.fuchsia b/src/rust/vendor/zerocopy/src/third_party/rust/README.fuchsia new file mode 100644 index 000000000..e0a23dd8e --- /dev/null +++ b/src/rust/vendor/zerocopy/src/third_party/rust/README.fuchsia @@ -0,0 +1,7 @@ +Name: rust +License File: LICENSE-APACHE +License File: LICENSE-MIT +Description: + +See https://github.com/google/zerocopy/pull/492 for an explanation of why this +file exists. diff --git a/src/rust/vendor/zerocopy/src/third_party/rust/layout.rs b/src/rust/vendor/zerocopy/src/third_party/rust/layout.rs new file mode 100644 index 000000000..19ef7c698 --- /dev/null +++ b/src/rust/vendor/zerocopy/src/third_party/rust/layout.rs @@ -0,0 +1,45 @@ +use core::num::NonZeroUsize; + +/// Returns the amount of padding we must insert after `len` bytes to ensure +/// that the following address will satisfy `align` (measured in bytes). +/// +/// e.g., if `len` is 9, then `padding_needed_for(len, 4)` returns 3, because +/// that is the minimum number of bytes of padding required to get a 4-aligned +/// address (assuming that the corresponding memory block starts at a 4-aligned +/// address). +/// +/// The return value of this function has no meaning if `align` is not a +/// power-of-two. +/// +/// # Panics +/// +/// May panic if `align` is not a power of two. +// +// TODO(#419): Replace `len` with a witness type for region size. +#[allow(unused)] +#[inline(always)] +pub(crate) const fn padding_needed_for(len: usize, align: NonZeroUsize) -> usize { + // Rounded up value is: + // len_rounded_up = (len + align - 1) & !(align - 1); + // and then we return the padding difference: `len_rounded_up - len`. + // + // We use modular arithmetic throughout: + // + // 1. align is guaranteed to be > 0, so align - 1 is always + // valid. + // + // 2. `len + align - 1` can overflow by at most `align - 1`, + // so the &-mask with `!(align - 1)` will ensure that in the + // case of overflow, `len_rounded_up` will itself be 0. + // Thus the returned padding, when added to `len`, yields 0, + // which trivially satisfies the alignment `align`. + // + // (Of course, attempts to allocate blocks of memory whose + // size and padding overflow in the above manner should cause + // the allocator to yield an error anyway.) + + let align = align.get(); + debug_assert!(align.is_power_of_two()); + let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1); + len_rounded_up.wrapping_sub(len) +} diff --git a/src/rust/vendor/zerocopy/src/util.rs b/src/rust/vendor/zerocopy/src/util.rs new file mode 100644 index 000000000..50cad1f6d --- /dev/null +++ b/src/rust/vendor/zerocopy/src/util.rs @@ -0,0 +1,810 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[path = "third_party/rust/layout.rs"] +pub(crate) mod core_layout; + +use core::{mem, num::NonZeroUsize}; + +pub(crate) mod ptr { + use core::{ + fmt::{Debug, Formatter}, + marker::PhantomData, + ptr::NonNull, + }; + + use crate::{util::AsAddress, KnownLayout, _CastType}; + + /// A raw pointer with more restrictions. + /// + /// `Ptr` is similar to `NonNull`, but it is more restrictive in the + /// following ways: + /// - It must derive from a valid allocation + /// - It must reference a byte range which is contained inside the + /// allocation from which it derives + /// - As a consequence, the byte range it references must have a size + /// which does not overflow `isize` + /// - It must satisfy `T`'s alignment requirement + /// + /// Thanks to these restrictions, it is easier to prove the soundness of + /// some operations using `Ptr`s. + /// + /// `Ptr<'a, T>` is [covariant] in `'a` and `T`. + /// + /// [covariant]: https://doc.rust-lang.org/reference/subtyping.html + pub struct Ptr<'a, T: 'a + ?Sized> { + // INVARIANTS: + // 1. `ptr` is derived from some valid Rust allocation, `A` + // 2. `ptr` has the same provenance as `A` + // 3. `ptr` addresses a byte range which is entirely contained in `A` + // 4. `ptr` addresses a byte range whose length fits in an `isize` + // 5. `ptr` addresses a byte range which does not wrap around the address + // space + // 6. `ptr` is validly-aligned for `T` + // 7. `A` is guaranteed to live for at least `'a` + // 8. `T: 'a` + ptr: NonNull, + _lifetime: PhantomData<&'a ()>, + } + + impl<'a, T: ?Sized> Copy for Ptr<'a, T> {} + impl<'a, T: ?Sized> Clone for Ptr<'a, T> { + #[inline] + fn clone(&self) -> Self { + *self + } + } + + impl<'a, T: ?Sized> Ptr<'a, T> { + /// Returns a shared reference to the value. + /// + /// # Safety + /// + /// For the duration of `'a`: + /// - The referenced memory must contain a validly-initialized `T` for + /// the duration of `'a`. + /// - The referenced memory must not also be referenced by any mutable + /// references. + /// - The referenced memory must not be mutated, even via an + /// [`UnsafeCell`]. + /// - There must not exist any references to the same memory region + /// which contain `UnsafeCell`s at byte ranges which are not identical + /// to the byte ranges at which `T` contains `UnsafeCell`s. + /// + /// [`UnsafeCell`]: core::cell::UnsafeCell + // TODO(#429): The safety requirements are likely overly-restrictive. + // Notably, mutation via `UnsafeCell`s is probably fine. Once the rules + // are more clearly defined, we should relax the safety requirements. + // For an example of why this is subtle, see: + // https://github.com/rust-lang/unsafe-code-guidelines/issues/463#issuecomment-1736771593 + #[allow(unused)] + pub(crate) unsafe fn as_ref(&self) -> &'a T { + // SAFETY: + // - By invariant, `self.ptr` is properly-aligned for `T`. + // - By invariant, `self.ptr` is "dereferenceable" in that it points + // to a single allocation. + // - By invariant, the allocation is live for `'a`. + // - The caller promises that no mutable references exist to this + // region during `'a`. + // - The caller promises that `UnsafeCell`s match exactly. + // - The caller promises that no mutation will happen during `'a`, + // even via `UnsafeCell`s. + // - The caller promises that the memory region contains a + // validly-intialized `T`. + unsafe { self.ptr.as_ref() } + } + + /// Casts to a different (unsized) target type. + /// + /// # Safety + /// + /// The caller promises that + /// - `cast(p)` is implemented exactly as follows: `|p: *mut T| p as + /// *mut U`. + /// - The size of the object referenced by the resulting pointer is less + /// than or equal to the size of the object referenced by `self`. + /// - The alignment of `U` is less than or equal to the alignment of + /// `T`. + pub(crate) unsafe fn cast_unsized *mut U>( + self, + cast: F, + ) -> Ptr<'a, U> { + let ptr = cast(self.ptr.as_ptr()); + // SAFETY: Caller promises that `cast` is just an `as` cast. We call + // `cast` on `self.ptr.as_ptr()`, which is non-null by construction. + let ptr = unsafe { NonNull::new_unchecked(ptr) }; + // SAFETY: + // - By invariant, `self.ptr` is derived from some valid Rust + // allocation, and since `ptr` is just `self.ptr as *mut U`, so is + // `ptr`. + // - By invariant, `self.ptr` has the same provenance as `A`, and so + // the same is true of `ptr`. + // - By invariant, `self.ptr` addresses a byte range which is + // entirely contained in `A`, and so the same is true of `ptr`. + // - By invariant, `self.ptr` addresses a byte range whose length + // fits in an `isize`, and so the same is true of `ptr`. + // - By invariant, `self.ptr` addresses a byte range which does not + // wrap around the address space, and so the same is true of + // `ptr`. + // - By invariant, `self.ptr` is validly-aligned for `T`. Since + // `ptr` has the same address, and since the caller promises that + // the alignment of `U` is less than or equal to the alignment of + // `T`, `ptr` is validly-aligned for `U`. + // - By invariant, `A` is guaranteed to live for at least `'a`. + // - `U: 'a` + Ptr { ptr, _lifetime: PhantomData } + } + } + + impl<'a> Ptr<'a, [u8]> { + /// Attempts to cast `self` to a `U` using the given cast type. + /// + /// Returns `None` if the resulting `U` would be invalidly-aligned or if + /// no `U` can fit in `self`. On success, returns a pointer to the + /// largest-possible `U` which fits in `self`. + /// + /// # Safety + /// + /// The caller may assume that this implementation is correct, and may + /// rely on that assumption for the soundness of their code. In + /// particular, the caller may assume that, if `try_cast_into` returns + /// `Some((ptr, split_at))`, then: + /// - If this is a prefix cast, `ptr` refers to the byte range `[0, + /// split_at)` in `self`. + /// - If this is a suffix cast, `ptr` refers to the byte range + /// `[split_at, self.len())` in `self`. + /// + /// # Panics + /// + /// Panics if `U` is a DST whose trailing slice element is zero-sized. + pub(crate) fn try_cast_into( + &self, + cast_type: _CastType, + ) -> Option<(Ptr<'a, U>, usize)> { + // PANICS: By invariant, the byte range addressed by `self.ptr` does + // not wrap around the address space. This implies that the sum of + // the address (represented as a `usize`) and length do not overflow + // `usize`, as required by `validate_cast_and_convert_metadata`. + // Thus, this call to `validate_cast_and_convert_metadata` won't + // panic. + let (elems, split_at) = U::LAYOUT.validate_cast_and_convert_metadata( + AsAddress::addr(self.ptr.as_ptr()), + self.len(), + cast_type, + )?; + let offset = match cast_type { + _CastType::_Prefix => 0, + _CastType::_Suffix => split_at, + }; + + let ptr = self.ptr.cast::().as_ptr(); + // SAFETY: `offset` is either `0` or `split_at`. + // `validate_cast_and_convert_metadata` promises that `split_at` is + // in the range `[0, self.len()]`. Thus, in both cases, `offset` is + // in `[0, self.len()]`. Thus: + // - The resulting pointer is in or one byte past the end of the + // same byte range as `self.ptr`. Since, by invariant, `self.ptr` + // addresses a byte range entirely contained within a single + // allocation, the pointer resulting from this operation is within + // or one byte past the end of that same allocation. + // - By invariant, `self.len() <= isize::MAX`. Since `offset <= + // self.len()`, `offset <= isize::MAX`. + // - By invariant, `self.ptr` addresses a byte range which does not + // wrap around the address space. This means that the base pointer + // plus the `self.len()` does not overflow `usize`. Since `offset + // <= self.len()`, this addition does not overflow `usize`. + let base = unsafe { ptr.add(offset) }; + // SAFETY: Since `add` is not allowed to wrap around, the preceding line + // produces a pointer whose address is greater than or equal to that of + // `ptr`. Since `ptr` is a `NonNull`, `base` is also non-null. + let base = unsafe { NonNull::new_unchecked(base) }; + let ptr = U::raw_from_ptr_len(base, elems); + // SAFETY: + // - By invariant, `self.ptr` is derived from some valid Rust + // allocation, `A`, and has the same provenance as `A`. All + // operations performed on `self.ptr` and values derived from it + // in this method preserve provenance, so: + // - `ptr` is derived from a valid Rust allocation, `A`. + // - `ptr` has the same provenance as `A`. + // - `validate_cast_and_convert_metadata` promises that the object + // described by `elems` and `split_at` lives at a byte range which + // is a subset of the input byte range. Thus: + // - Since, by invariant, `self.ptr` addresses a byte range + // entirely contained in `A`, so does `ptr`. + // - Since, by invariant, `self.ptr` addresses a range whose + // length is not longer than `isize::MAX` bytes, so does `ptr`. + // - Since, by invariant, `self.ptr` addresses a range which does + // not wrap around the address space, so does `ptr`. + // - `validate_cast_and_convert_metadata` promises that the object + // described by `split_at` is validly-aligned for `U`. + // - By invariant on `self`, `A` is guaranteed to live for at least + // `'a`. + // - `U: 'a` by trait bound. + Some((Ptr { ptr, _lifetime: PhantomData }, split_at)) + } + + /// Attempts to cast `self` into a `U`, failing if all of the bytes of + /// `self` cannot be treated as a `U`. + /// + /// In particular, this method fails if `self` is not validly-aligned + /// for `U` or if `self`'s size is not a valid size for `U`. + /// + /// # Safety + /// + /// On success, the caller may assume that the returned pointer + /// references the same byte range as `self`. + #[allow(unused)] + #[inline(always)] + pub(crate) fn try_cast_into_no_leftover( + &self, + ) -> Option> { + // TODO(#67): Remove this allow. See NonNulSlicelExt for more + // details. + #[allow(unstable_name_collisions)] + match self.try_cast_into(_CastType::_Prefix) { + Some((slf, split_at)) if split_at == self.len() => Some(slf), + Some(_) | None => None, + } + } + } + + impl<'a, T> Ptr<'a, [T]> { + /// The number of slice elements referenced by `self`. + /// + /// # Safety + /// + /// Unsafe code my rely on `len` satisfying the above contract. + fn len(&self) -> usize { + #[allow(clippy::as_conversions)] + let slc = self.ptr.as_ptr() as *const [()]; + // SAFETY: + // - `()` has alignment 1, so `slc` is trivially aligned. + // - `slc` was derived from a non-null pointer. + // - The size is 0 regardless of the length, so it is sound to + // materialize a reference regardless of location. + // - By invariant, `self.ptr` has valid provenance. + let slc = unsafe { &*slc }; + // This is correct because the preceding `as` cast preserves the + // number of slice elements. Per + // https://doc.rust-lang.org/nightly/reference/expressions/operator-expr.html#slice-dst-pointer-to-pointer-cast: + // + // For slice types like `[T]` and `[U]`, the raw pointer types + // `*const [T]`, `*mut [T]`, `*const [U]`, and `*mut [U]` encode + // the number of elements in this slice. Casts between these raw + // pointer types preserve the number of elements. Note that, as a + // consequence, such casts do *not* necessarily preserve the size + // of the pointer's referent (e.g., casting `*const [u16]` to + // `*const [u8]` will result in a raw pointer which refers to an + // object of half the size of the original). The same holds for + // `str` and any compound type whose unsized tail is a slice type, + // such as struct `Foo(i32, [u8])` or `(u64, Foo)`. + // + // TODO(#429), + // TODO(https://github.com/rust-lang/reference/pull/1417): Once this + // text is available on the Stable docs, cite those instead of the + // Nightly docs. + slc.len() + } + + pub(crate) fn iter(&self) -> impl Iterator> { + // TODO(#429): Once `NonNull::cast` documents that it preserves + // provenance, cite those docs. + let base = self.ptr.cast::().as_ptr(); + (0..self.len()).map(move |i| { + // TODO(https://github.com/rust-lang/rust/issues/74265): Use + // `NonNull::get_unchecked_mut`. + + // SAFETY: If the following conditions are not satisfied + // `pointer::cast` may induce Undefined Behavior [1]: + // > 1. Both the starting and resulting pointer must be either + // > in bounds or one byte past the end of the same allocated + // > object. + // > 2. The computed offset, in bytes, cannot overflow an + // > `isize`. + // > 3. The offset being in bounds cannot rely on “wrapping + // > around” the address space. That is, the + // > infinite-precision sum must fit in a `usize`. + // + // [1] https://doc.rust-lang.org/std/primitive.pointer.html#method.add + // + // We satisfy all three of these conditions here: + // 1. `base` (by invariant on `self`) points to an allocated + // object. By contract, `self.len()` accurately reflects the + // number of elements in the slice. `i` is in bounds of + // `c.len()` by construction, and so the result of this + // addition cannot overflow past the end of the allocation + // referred to by `c`. + // 2. By invariant on `Ptr`, `self` addresses a byte range whose + // length fits in an `isize`. Since `elem` is contained in + // `self`, the computed offset of `elem` must fit within + // `isize.` + // 3. By invariant on `Ptr`, `self` addresses a byte range which + // does not wrap around the address space. Since `elem` is + // contained in `self`, the computed offset of `elem` must + // wrap around the address space. + // + // TODO(#429): Once `pointer::add` documents that it preserves + // provenance, cite those docs. + let elem = unsafe { base.add(i) }; + + // SAFETY: + // - `elem` must not be null. `base` is constructed from a + // `NonNull` pointer, and the addition that produces `elem` + // must not overflow or wrap around, so `elem >= base > 0`. + // + // TODO(#429): Once `NonNull::new_unchecked` documents that it + // preserves provenance, cite those docs. + let elem = unsafe { NonNull::new_unchecked(elem) }; + + // SAFETY: The safety invariants of `Ptr` (see definition) are + // satisfied: + // 1. `elem` is derived from a valid Rust allocation, because + // `self` is derived from a valid Rust allocation, by + // invariant on `Ptr` + // 2. `elem` has the same provenance as `self`, because it + // derived from `self` using a series of + // provenance-preserving operations + // 3. `elem` is entirely contained in the allocation of `self` + // (see above) + // 4. `elem` addresses a byte range whose length fits in an + // `isize` (see above) + // 5. `elem` addresses a byte range which does not wrap around + // the address space (see above) + // 6. `elem` is validly-aligned for `T`. `self`, which + // represents a `[T]` is validly aligned for `T`, and `elem` + // is an element within that `[T]` + // 7. The allocation of `elem` is guaranteed to live for at + // least `'a`, because `elem` is entirely contained in + // `self`, which lives for at least `'a` by invariant on + // `Ptr`. + // 8. `T: 'a`, because `elem` is an element within `[T]`, and + // `[T]: 'a` by invariant on `Ptr` + Ptr { ptr: elem, _lifetime: PhantomData } + }) + } + } + + impl<'a, T: 'a + ?Sized> From<&'a T> for Ptr<'a, T> { + #[inline(always)] + fn from(t: &'a T) -> Ptr<'a, T> { + // SAFETY: `t` points to a valid Rust allocation, `A`, by + // construction. Thus: + // - `ptr` is derived from `A` + // - Since we use `NonNull::from`, which preserves provenance, `ptr` + // has the same provenance as `A` + // - Since `NonNull::from` creates a pointer which addresses the + // same bytes as `t`, `ptr` addresses a byte range entirely + // contained in (in this case, identical to) `A` + // - Since `t: &T`, it addresses no more than `isize::MAX` bytes [1] + // - Since `t: &T`, it addresses a byte range which does not wrap + // around the address space [2] + // - Since it is constructed from a valid `&T`, `ptr` is + // validly-aligned for `T` + // - Since `t: &'a T`, the allocation `A` is guaranteed to live for + // at least `'a` + // - `T: 'a` by trait bound + // + // TODO(#429), + // TODO(https://github.com/rust-lang/rust/issues/116181): Once it's + // documented, reference the guarantee that `NonNull::from` + // preserves provenance. + // + // TODO(#429), + // TODO(https://github.com/rust-lang/unsafe-code-guidelines/issues/465): + // - [1] Where does the reference document that allocations fit in + // `isize`? + // - [2] Where does the reference document that allocations don't + // wrap around the address space? + Ptr { ptr: NonNull::from(t), _lifetime: PhantomData } + } + } + + impl<'a, T: 'a + ?Sized> Debug for Ptr<'a, T> { + #[inline] + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + self.ptr.fmt(f) + } + } + + #[cfg(test)] + mod tests { + use core::mem::{self, MaybeUninit}; + + use super::*; + use crate::{util::testutil::AU64, FromBytes}; + + #[test] + fn test_ptrtry_cast_into_soundness() { + // This test is designed so that if `Ptr::try_cast_into_xxx` are + // buggy, it will manifest as unsoundness that Miri can detect. + + // - If `size_of::() == 0`, `N == 4` + // - Else, `N == 4 * size_of::()` + fn test() { + let mut bytes = [MaybeUninit::::uninit(); N]; + let initialized = [MaybeUninit::new(0u8); N]; + for start in 0..=bytes.len() { + for end in start..=bytes.len() { + // Set all bytes to uninitialized other than those in + // the range we're going to pass to `try_cast_from`. + // This allows Miri to detect out-of-bounds reads + // because they read uninitialized memory. Without this, + // some out-of-bounds reads would still be in-bounds of + // `bytes`, and so might spuriously be accepted. + bytes = [MaybeUninit::::uninit(); N]; + let bytes = &mut bytes[start..end]; + // Initialize only the byte range we're going to pass to + // `try_cast_from`. + bytes.copy_from_slice(&initialized[start..end]); + + let bytes = { + let bytes: *const [MaybeUninit] = bytes; + #[allow(clippy::as_conversions)] + let bytes = bytes as *const [u8]; + // SAFETY: We just initialized these bytes to valid + // `u8`s. + unsafe { &*bytes } + }; + + /// # Safety + /// + /// - `slf` must reference a byte range which is + /// entirely initialized. + /// - `slf` must reference a byte range which is only + /// referenced by shared references which do not + /// contain `UnsafeCell`s during its lifetime. + unsafe fn validate_and_get_len( + slf: Ptr<'_, T>, + ) -> usize { + // SAFETY: + // - Since all bytes in `slf` are initialized and + // `T: FromBytes`, `slf` contains a valid `T`. + // - The caller promises that the referenced memory + // is not also referenced by any mutable + // references. + // - The caller promises that the referenced memory + // is not also referenced as a type which contains + // `UnsafeCell`s. + let t = unsafe { slf.as_ref() }; + + let bytes = { + let len = mem::size_of_val(t); + let t: *const T = t; + // SAFETY: + // - We know `t`'s bytes are all initialized + // because we just read it from `slf`, which + // points to an initialized range of bytes. If + // there's a bug and this doesn't hold, then + // that's exactly what we're hoping Miri will + // catch! + // - Since `T: FromBytes`, `T` doesn't contain + // any `UnsafeCell`s, so it's okay for `t: T` + // and a `&[u8]` to the same memory to be + // alive concurrently. + unsafe { core::slice::from_raw_parts(t.cast::(), len) } + }; + + // This assertion ensures that `t`'s bytes are read + // and compared to another value, which in turn + // ensures that Miri gets a chance to notice if any + // of `t`'s bytes are uninitialized, which they + // shouldn't be (see the comment above). + assert_eq!(bytes, vec![0u8; bytes.len()]); + + mem::size_of_val(t) + } + + for cast_type in [_CastType::_Prefix, _CastType::_Suffix] { + if let Some((slf, split_at)) = + Ptr::from(bytes).try_cast_into::(cast_type) + { + // SAFETY: All bytes in `bytes` have been + // initialized. + let len = unsafe { validate_and_get_len(slf) }; + match cast_type { + _CastType::_Prefix => assert_eq!(split_at, len), + _CastType::_Suffix => assert_eq!(split_at, bytes.len() - len), + } + } + } + + if let Some(slf) = Ptr::from(bytes).try_cast_into_no_leftover::() { + // SAFETY: All bytes in `bytes` have been + // initialized. + let len = unsafe { validate_and_get_len(slf) }; + assert_eq!(len, bytes.len()); + } + } + } + } + + macro_rules! test { + ($($ty:ty),*) => { + $({ + const S: usize = core::mem::size_of::<$ty>(); + const N: usize = if S == 0 { 4 } else { S * 4 }; + test::(); + // We don't support casting into DSTs whose trailing slice + // element is a ZST. + if S > 0 { + test::(); + } + // TODO: Test with a slice DST once we have any that + // implement `KnownLayout + FromBytes`. + })* + }; + } + + test!(()); + test!(u8, u16, u32, u64, u128, usize, AU64); + test!(i8, i16, i32, i64, i128, isize); + test!(f32, f64); + } + } +} + +pub(crate) trait AsAddress { + fn addr(self) -> usize; +} + +impl<'a, T: ?Sized> AsAddress for &'a T { + #[inline(always)] + fn addr(self) -> usize { + let ptr: *const T = self; + AsAddress::addr(ptr) + } +} + +impl<'a, T: ?Sized> AsAddress for &'a mut T { + #[inline(always)] + fn addr(self) -> usize { + let ptr: *const T = self; + AsAddress::addr(ptr) + } +} + +impl AsAddress for *const T { + #[inline(always)] + fn addr(self) -> usize { + // TODO(#181), TODO(https://github.com/rust-lang/rust/issues/95228): Use + // `.addr()` instead of `as usize` once it's stable, and get rid of this + // `allow`. Currently, `as usize` is the only way to accomplish this. + #[allow(clippy::as_conversions)] + #[cfg_attr(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS, allow(lossy_provenance_casts))] + return self.cast::<()>() as usize; + } +} + +impl AsAddress for *mut T { + #[inline(always)] + fn addr(self) -> usize { + let ptr: *const T = self; + AsAddress::addr(ptr) + } +} + +/// Is `t` aligned to `mem::align_of::()`? +#[inline(always)] +pub(crate) fn aligned_to(t: T) -> bool { + // `mem::align_of::()` is guaranteed to return a non-zero value, which in + // turn guarantees that this mod operation will not panic. + #[allow(clippy::arithmetic_side_effects)] + let remainder = t.addr() % mem::align_of::(); + remainder == 0 +} + +/// Round `n` down to the largest value `m` such that `m <= n` and `m % align == +/// 0`. +/// +/// # Panics +/// +/// May panic if `align` is not a power of two. Even if it doesn't panic in this +/// case, it will produce nonsense results. +#[inline(always)] +pub(crate) const fn round_down_to_next_multiple_of_alignment( + n: usize, + align: NonZeroUsize, +) -> usize { + let align = align.get(); + debug_assert!(align.is_power_of_two()); + + // Subtraction can't underflow because `align.get() >= 1`. + #[allow(clippy::arithmetic_side_effects)] + let mask = !(align - 1); + n & mask +} + +pub(crate) const fn max(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize { + if a.get() < b.get() { + b + } else { + a + } +} + +pub(crate) const fn min(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize { + if a.get() > b.get() { + b + } else { + a + } +} + +/// Since we support multiple versions of Rust, there are often features which +/// have been stabilized in the most recent stable release which do not yet +/// exist (stably) on our MSRV. This module provides polyfills for those +/// features so that we can write more "modern" code, and just remove the +/// polyfill once our MSRV supports the corresponding feature. Without this, +/// we'd have to write worse/more verbose code and leave TODO comments sprinkled +/// throughout the codebase to update to the new pattern once it's stabilized. +/// +/// Each trait is imported as `_` at the crate root; each polyfill should "just +/// work" at usage sites. +pub(crate) mod polyfills { + use core::ptr::{self, NonNull}; + + // A polyfill for `NonNull::slice_from_raw_parts` that we can use before our + // MSRV is 1.70, when that function was stabilized. + // + // TODO(#67): Once our MSRV is 1.70, remove this. + #[allow(unused)] + pub(crate) trait NonNullExt { + fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]>; + } + + #[allow(unused)] + impl NonNullExt for NonNull { + #[inline(always)] + fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]> { + let ptr = ptr::slice_from_raw_parts_mut(data.as_ptr(), len); + // SAFETY: `ptr` is converted from `data`, which is non-null. + unsafe { NonNull::new_unchecked(ptr) } + } + } +} + +#[cfg(test)] +pub(crate) mod testutil { + use core::fmt::{self, Display, Formatter}; + + use crate::*; + + /// A `T` which is aligned to at least `align_of::()`. + #[derive(Default)] + pub(crate) struct Align { + pub(crate) t: T, + _a: [A; 0], + } + + impl Align { + pub(crate) fn set_default(&mut self) { + self.t = T::default(); + } + } + + impl Align { + pub(crate) const fn new(t: T) -> Align { + Align { t, _a: [] } + } + } + + // A `u64` with alignment 8. + // + // Though `u64` has alignment 8 on some platforms, it's not guaranteed. + // By contrast, `AU64` is guaranteed to have alignment 8. + #[derive( + KnownLayout, + FromZeroes, + FromBytes, + AsBytes, + Eq, + PartialEq, + Ord, + PartialOrd, + Default, + Debug, + Copy, + Clone, + )] + #[repr(C, align(8))] + pub(crate) struct AU64(pub(crate) u64); + + impl AU64 { + // Converts this `AU64` to bytes using this platform's endianness. + pub(crate) fn to_bytes(self) -> [u8; 8] { + crate::transmute!(self) + } + } + + impl Display for AU64 { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Display::fmt(&self.0, f) + } + } + + #[derive( + FromZeroes, FromBytes, Eq, PartialEq, Ord, PartialOrd, Default, Debug, Copy, Clone, + )] + #[repr(C)] + pub(crate) struct Nested { + _t: T, + _u: U, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_round_down_to_next_multiple_of_alignment() { + fn alt_impl(n: usize, align: NonZeroUsize) -> usize { + let mul = n / align.get(); + mul * align.get() + } + + for align in [1, 2, 4, 8, 16] { + for n in 0..256 { + let align = NonZeroUsize::new(align).unwrap(); + let want = alt_impl(n, align); + let got = round_down_to_next_multiple_of_alignment(n, align); + assert_eq!(got, want, "round_down_to_next_multiple_of_alignment({n}, {align})"); + } + } + } +} + +#[cfg(kani)] +mod proofs { + use super::*; + + #[kani::proof] + fn prove_round_down_to_next_multiple_of_alignment() { + fn model_impl(n: usize, align: NonZeroUsize) -> usize { + assert!(align.get().is_power_of_two()); + let mul = n / align.get(); + mul * align.get() + } + + let align: NonZeroUsize = kani::any(); + kani::assume(align.get().is_power_of_two()); + let n: usize = kani::any(); + + let expected = model_impl(n, align); + let actual = round_down_to_next_multiple_of_alignment(n, align); + assert_eq!(expected, actual, "round_down_to_next_multiple_of_alignment({n}, {align})"); + } + + // Restricted to nightly since we use the unstable `usize::next_multiple_of` + // in our model implementation. + #[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)] + #[kani::proof] + fn prove_padding_needed_for() { + fn model_impl(len: usize, align: NonZeroUsize) -> usize { + let padded = len.next_multiple_of(align.get()); + let padding = padded - len; + padding + } + + let align: NonZeroUsize = kani::any(); + kani::assume(align.get().is_power_of_two()); + let len: usize = kani::any(); + // Constrain `len` to valid Rust lengths, since our model implementation + // isn't robust to overflow. + kani::assume(len <= isize::MAX as usize); + kani::assume(align.get() < 1 << 29); + + let expected = model_impl(len, align); + let actual = core_layout::padding_needed_for(len, align); + assert_eq!(expected, actual, "padding_needed_for({len}, {align})"); + + let padded_len = actual + len; + assert_eq!(padded_len % align, 0); + assert!(padded_len / align >= len / align); + } +} diff --git a/src/rust/vendor/zerocopy/src/wrappers.rs b/src/rust/vendor/zerocopy/src/wrappers.rs new file mode 100644 index 000000000..6532bb463 --- /dev/null +++ b/src/rust/vendor/zerocopy/src/wrappers.rs @@ -0,0 +1,503 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use core::{ + cmp::Ordering, + fmt::{self, Debug, Display, Formatter}, + hash::Hash, + mem::{self, ManuallyDrop}, + ops::{Deref, DerefMut}, + ptr, +}; + +use super::*; + +/// A type with no alignment requirement. +/// +/// An `Unalign` wraps a `T`, removing any alignment requirement. `Unalign` +/// has the same size and bit validity as `T`, but not necessarily the same +/// alignment [or ABI]. This is useful if a type with an alignment requirement +/// needs to be read from a chunk of memory which provides no alignment +/// guarantees. +/// +/// Since `Unalign` has no alignment requirement, the inner `T` may not be +/// properly aligned in memory. There are five ways to access the inner `T`: +/// - by value, using [`get`] or [`into_inner`] +/// - by reference inside of a callback, using [`update`] +/// - fallibly by reference, using [`try_deref`] or [`try_deref_mut`]; these can +/// fail if the `Unalign` does not satisfy `T`'s alignment requirement at +/// runtime +/// - unsafely by reference, using [`deref_unchecked`] or +/// [`deref_mut_unchecked`]; it is the caller's responsibility to ensure that +/// the `Unalign` satisfies `T`'s alignment requirement +/// - (where `T: Unaligned`) infallibly by reference, using [`Deref::deref`] or +/// [`DerefMut::deref_mut`] +/// +/// [or ABI]: https://github.com/google/zerocopy/issues/164 +/// [`get`]: Unalign::get +/// [`into_inner`]: Unalign::into_inner +/// [`update`]: Unalign::update +/// [`try_deref`]: Unalign::try_deref +/// [`try_deref_mut`]: Unalign::try_deref_mut +/// [`deref_unchecked`]: Unalign::deref_unchecked +/// [`deref_mut_unchecked`]: Unalign::deref_mut_unchecked +// NOTE: This type is sound to use with types that need to be dropped. The +// reason is that the compiler-generated drop code automatically moves all +// values to aligned memory slots before dropping them in-place. This is not +// well-documented, but it's hinted at in places like [1] and [2]. However, this +// also means that `T` must be `Sized`; unless something changes, we can never +// support unsized `T`. [3] +// +// [1] https://github.com/rust-lang/rust/issues/54148#issuecomment-420529646 +// [2] https://github.com/google/zerocopy/pull/126#discussion_r1018512323 +// [3] https://github.com/google/zerocopy/issues/209 +#[allow(missing_debug_implementations)] +#[derive(Default, Copy)] +#[cfg_attr( + any(feature = "derive", test), + derive(KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned) +)] +#[repr(C, packed)] +pub struct Unalign(T); + +#[cfg(not(any(feature = "derive", test)))] +impl_known_layout!(T => Unalign); + +safety_comment! { + /// SAFETY: + /// - `Unalign` is `repr(packed)`, so it is unaligned regardless of the + /// alignment of `T`, and so we don't require that `T: Unaligned` + /// - `Unalign` has the same bit validity as `T`, and so it is + /// `FromZeroes`, `FromBytes`, or `AsBytes` exactly when `T` is as well. + impl_or_verify!(T => Unaligned for Unalign); + impl_or_verify!(T: FromZeroes => FromZeroes for Unalign); + impl_or_verify!(T: FromBytes => FromBytes for Unalign); + impl_or_verify!(T: AsBytes => AsBytes for Unalign); +} + +// Note that `Unalign: Clone` only if `T: Copy`. Since the inner `T` may not be +// aligned, there's no way to safely call `T::clone`, and so a `T: Clone` bound +// is not sufficient to implement `Clone` for `Unalign`. +impl Clone for Unalign { + #[inline(always)] + fn clone(&self) -> Unalign { + *self + } +} + +impl Unalign { + /// Constructs a new `Unalign`. + #[inline(always)] + pub const fn new(val: T) -> Unalign { + Unalign(val) + } + + /// Consumes `self`, returning the inner `T`. + #[inline(always)] + pub const fn into_inner(self) -> T { + // Use this instead of `mem::transmute` since the latter can't tell + // that `Unalign` and `T` have the same size. + #[repr(C)] + union Transmute { + u: ManuallyDrop>, + t: ManuallyDrop, + } + + // SAFETY: Since `Unalign` is `#[repr(C, packed)]`, it has the same + // layout as `T`. `ManuallyDrop` is guaranteed to have the same + // layout as `U`, and so `ManuallyDrop>` has the same layout + // as `ManuallyDrop`. Since `Transmute` is `#[repr(C)]`, its `t` + // and `u` fields both start at the same offset (namely, 0) within the + // union. + // + // We do this instead of just destructuring in order to prevent + // `Unalign`'s `Drop::drop` from being run, since dropping is not + // supported in `const fn`s. + // + // TODO(https://github.com/rust-lang/rust/issues/73255): Destructure + // instead of using unsafe. + unsafe { ManuallyDrop::into_inner(Transmute { u: ManuallyDrop::new(self) }.t) } + } + + /// Attempts to return a reference to the wrapped `T`, failing if `self` is + /// not properly aligned. + /// + /// If `self` does not satisfy `mem::align_of::()`, then it is unsound to + /// return a reference to the wrapped `T`, and `try_deref` returns `None`. + /// + /// If `T: Unaligned`, then `Unalign` implements [`Deref`], and callers + /// may prefer [`Deref::deref`], which is infallible. + #[inline(always)] + pub fn try_deref(&self) -> Option<&T> { + if !util::aligned_to::<_, T>(self) { + return None; + } + + // SAFETY: `deref_unchecked`'s safety requirement is that `self` is + // aligned to `align_of::()`, which we just checked. + unsafe { Some(self.deref_unchecked()) } + } + + /// Attempts to return a mutable reference to the wrapped `T`, failing if + /// `self` is not properly aligned. + /// + /// If `self` does not satisfy `mem::align_of::()`, then it is unsound to + /// return a reference to the wrapped `T`, and `try_deref_mut` returns + /// `None`. + /// + /// If `T: Unaligned`, then `Unalign` implements [`DerefMut`], and + /// callers may prefer [`DerefMut::deref_mut`], which is infallible. + #[inline(always)] + pub fn try_deref_mut(&mut self) -> Option<&mut T> { + if !util::aligned_to::<_, T>(&*self) { + return None; + } + + // SAFETY: `deref_mut_unchecked`'s safety requirement is that `self` is + // aligned to `align_of::()`, which we just checked. + unsafe { Some(self.deref_mut_unchecked()) } + } + + /// Returns a reference to the wrapped `T` without checking alignment. + /// + /// If `T: Unaligned`, then `Unalign` implements[ `Deref`], and callers + /// may prefer [`Deref::deref`], which is safe. + /// + /// # Safety + /// + /// If `self` does not satisfy `mem::align_of::()`, then + /// `self.deref_unchecked()` may cause undefined behavior. + #[inline(always)] + pub const unsafe fn deref_unchecked(&self) -> &T { + // SAFETY: `Unalign` is `repr(transparent)`, so there is a valid `T` + // at the same memory location as `self`. It has no alignment guarantee, + // but the caller has promised that `self` is properly aligned, so we + // know that it is sound to create a reference to `T` at this memory + // location. + // + // We use `mem::transmute` instead of `&*self.get_ptr()` because + // dereferencing pointers is not stable in `const` on our current MSRV + // (1.56 as of this writing). + unsafe { mem::transmute(self) } + } + + /// Returns a mutable reference to the wrapped `T` without checking + /// alignment. + /// + /// If `T: Unaligned`, then `Unalign` implements[ `DerefMut`], and + /// callers may prefer [`DerefMut::deref_mut`], which is safe. + /// + /// # Safety + /// + /// If `self` does not satisfy `mem::align_of::()`, then + /// `self.deref_mut_unchecked()` may cause undefined behavior. + #[inline(always)] + pub unsafe fn deref_mut_unchecked(&mut self) -> &mut T { + // SAFETY: `self.get_mut_ptr()` returns a raw pointer to a valid `T` at + // the same memory location as `self`. It has no alignment guarantee, + // but the caller has promised that `self` is properly aligned, so we + // know that the pointer itself is aligned, and thus that it is sound to + // create a reference to a `T` at this memory location. + unsafe { &mut *self.get_mut_ptr() } + } + + /// Gets an unaligned raw pointer to the inner `T`. + /// + /// # Safety + /// + /// The returned raw pointer is not necessarily aligned to + /// `align_of::()`. Most functions which operate on raw pointers require + /// those pointers to be aligned, so calling those functions with the result + /// of `get_ptr` will be undefined behavior if alignment is not guaranteed + /// using some out-of-band mechanism. In general, the only functions which + /// are safe to call with this pointer are those which are explicitly + /// documented as being sound to use with an unaligned pointer, such as + /// [`read_unaligned`]. + /// + /// [`read_unaligned`]: core::ptr::read_unaligned + #[inline(always)] + pub const fn get_ptr(&self) -> *const T { + ptr::addr_of!(self.0) + } + + /// Gets an unaligned mutable raw pointer to the inner `T`. + /// + /// # Safety + /// + /// The returned raw pointer is not necessarily aligned to + /// `align_of::()`. Most functions which operate on raw pointers require + /// those pointers to be aligned, so calling those functions with the result + /// of `get_ptr` will be undefined behavior if alignment is not guaranteed + /// using some out-of-band mechanism. In general, the only functions which + /// are safe to call with this pointer are those which are explicitly + /// documented as being sound to use with an unaligned pointer, such as + /// [`read_unaligned`]. + /// + /// [`read_unaligned`]: core::ptr::read_unaligned + // TODO(https://github.com/rust-lang/rust/issues/57349): Make this `const`. + #[inline(always)] + pub fn get_mut_ptr(&mut self) -> *mut T { + ptr::addr_of_mut!(self.0) + } + + /// Sets the inner `T`, dropping the previous value. + // TODO(https://github.com/rust-lang/rust/issues/57349): Make this `const`. + #[inline(always)] + pub fn set(&mut self, t: T) { + *self = Unalign::new(t); + } + + /// Updates the inner `T` by calling a function on it. + /// + /// If [`T: Unaligned`], then `Unalign` implements [`DerefMut`], and that + /// impl should be preferred over this method when performing updates, as it + /// will usually be faster and more ergonomic. + /// + /// For large types, this method may be expensive, as it requires copying + /// `2 * size_of::()` bytes. \[1\] + /// + /// \[1\] Since the inner `T` may not be aligned, it would not be sound to + /// invoke `f` on it directly. Instead, `update` moves it into a + /// properly-aligned location in the local stack frame, calls `f` on it, and + /// then moves it back to its original location in `self`. + /// + /// [`T: Unaligned`]: Unaligned + #[inline] + pub fn update O>(&mut self, f: F) -> O { + // On drop, this moves `copy` out of itself and uses `ptr::write` to + // overwrite `slf`. + struct WriteBackOnDrop { + copy: ManuallyDrop, + slf: *mut Unalign, + } + + impl Drop for WriteBackOnDrop { + fn drop(&mut self) { + // SAFETY: We never use `copy` again as required by + // `ManuallyDrop::take`. + let copy = unsafe { ManuallyDrop::take(&mut self.copy) }; + // SAFETY: `slf` is the raw pointer value of `self`. We know it + // is valid for writes and properly aligned because `self` is a + // mutable reference, which guarantees both of these properties. + unsafe { ptr::write(self.slf, Unalign::new(copy)) }; + } + } + + // SAFETY: We know that `self` is valid for reads, properly aligned, and + // points to an initialized `Unalign` because it is a mutable + // reference, which guarantees all of these properties. + // + // Since `T: !Copy`, it would be unsound in the general case to allow + // both the original `Unalign` and the copy to be used by safe code. + // We guarantee that the copy is used to overwrite the original in the + // `Drop::drop` impl of `WriteBackOnDrop`. So long as this `drop` is + // called before any other safe code executes, soundness is upheld. + // While this method can terminate in two ways (by returning normally or + // by unwinding due to a panic in `f`), in both cases, `write_back` is + // dropped - and its `drop` called - before any other safe code can + // execute. + let copy = unsafe { ptr::read(self) }.into_inner(); + let mut write_back = WriteBackOnDrop { copy: ManuallyDrop::new(copy), slf: self }; + + let ret = f(&mut write_back.copy); + + drop(write_back); + ret + } +} + +impl Unalign { + /// Gets a copy of the inner `T`. + // TODO(https://github.com/rust-lang/rust/issues/57349): Make this `const`. + #[inline(always)] + pub fn get(&self) -> T { + let Unalign(val) = *self; + val + } +} + +impl Deref for Unalign { + type Target = T; + + #[inline(always)] + fn deref(&self) -> &T { + // SAFETY: `deref_unchecked`'s safety requirement is that `self` is + // aligned to `align_of::()`. `T: Unaligned` guarantees that + // `align_of::() == 1`, and all pointers are one-aligned because all + // addresses are divisible by 1. + unsafe { self.deref_unchecked() } + } +} + +impl DerefMut for Unalign { + #[inline(always)] + fn deref_mut(&mut self) -> &mut T { + // SAFETY: `deref_mut_unchecked`'s safety requirement is that `self` is + // aligned to `align_of::()`. `T: Unaligned` guarantees that + // `align_of::() == 1`, and all pointers are one-aligned because all + // addresses are divisible by 1. + unsafe { self.deref_mut_unchecked() } + } +} + +impl PartialOrd> for Unalign { + #[inline(always)] + fn partial_cmp(&self, other: &Unalign) -> Option { + PartialOrd::partial_cmp(self.deref(), other.deref()) + } +} + +impl Ord for Unalign { + #[inline(always)] + fn cmp(&self, other: &Unalign) -> Ordering { + Ord::cmp(self.deref(), other.deref()) + } +} + +impl PartialEq> for Unalign { + #[inline(always)] + fn eq(&self, other: &Unalign) -> bool { + PartialEq::eq(self.deref(), other.deref()) + } +} + +impl Eq for Unalign {} + +impl Hash for Unalign { + #[inline(always)] + fn hash(&self, state: &mut H) + where + H: Hasher, + { + self.deref().hash(state); + } +} + +impl Debug for Unalign { + #[inline(always)] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Debug::fmt(self.deref(), f) + } +} + +impl Display for Unalign { + #[inline(always)] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Display::fmt(self.deref(), f) + } +} + +#[cfg(test)] +mod tests { + use core::panic::AssertUnwindSafe; + + use super::*; + use crate::util::testutil::*; + + /// A `T` which is guaranteed not to satisfy `align_of::()`. + /// + /// It must be the case that `align_of::() < align_of::()` in order + /// fot this type to work properly. + #[repr(C)] + struct ForceUnalign { + // The outer struct is aligned to `A`, and, thanks to `repr(C)`, `t` is + // placed at the minimum offset that guarantees its alignment. If + // `align_of::() < align_of::()`, then that offset will be + // guaranteed *not* to satisfy `align_of::()`. + _u: u8, + t: T, + _a: [A; 0], + } + + impl ForceUnalign { + const fn new(t: T) -> ForceUnalign { + ForceUnalign { _u: 0, t, _a: [] } + } + } + + #[test] + fn test_unalign() { + // Test methods that don't depend on alignment. + let mut u = Unalign::new(AU64(123)); + assert_eq!(u.get(), AU64(123)); + assert_eq!(u.into_inner(), AU64(123)); + assert_eq!(u.get_ptr(), <*const _>::cast::(&u)); + assert_eq!(u.get_mut_ptr(), <*mut _>::cast::(&mut u)); + u.set(AU64(321)); + assert_eq!(u.get(), AU64(321)); + + // Test methods that depend on alignment (when alignment is satisfied). + let mut u: Align<_, AU64> = Align::new(Unalign::new(AU64(123))); + assert_eq!(u.t.try_deref(), Some(&AU64(123))); + assert_eq!(u.t.try_deref_mut(), Some(&mut AU64(123))); + // SAFETY: The `Align<_, AU64>` guarantees proper alignment. + assert_eq!(unsafe { u.t.deref_unchecked() }, &AU64(123)); + // SAFETY: The `Align<_, AU64>` guarantees proper alignment. + assert_eq!(unsafe { u.t.deref_mut_unchecked() }, &mut AU64(123)); + *u.t.try_deref_mut().unwrap() = AU64(321); + assert_eq!(u.t.get(), AU64(321)); + + // Test methods that depend on alignment (when alignment is not + // satisfied). + let mut u: ForceUnalign<_, AU64> = ForceUnalign::new(Unalign::new(AU64(123))); + assert_eq!(u.t.try_deref(), None); + assert_eq!(u.t.try_deref_mut(), None); + + // Test methods that depend on `T: Unaligned`. + let mut u = Unalign::new(123u8); + assert_eq!(u.try_deref(), Some(&123)); + assert_eq!(u.try_deref_mut(), Some(&mut 123)); + assert_eq!(u.deref(), &123); + assert_eq!(u.deref_mut(), &mut 123); + *u = 21; + assert_eq!(u.get(), 21); + + // Test that some `Unalign` functions and methods are `const`. + const _UNALIGN: Unalign = Unalign::new(0); + const _UNALIGN_PTR: *const u64 = _UNALIGN.get_ptr(); + const _U64: u64 = _UNALIGN.into_inner(); + // Make sure all code is considered "used". + // + // TODO(https://github.com/rust-lang/rust/issues/104084): Remove this + // attribute. + #[allow(dead_code)] + const _: () = { + let x: Align<_, AU64> = Align::new(Unalign::new(AU64(123))); + // Make sure that `deref_unchecked` is `const`. + // + // SAFETY: The `Align<_, AU64>` guarantees proper alignment. + let au64 = unsafe { x.t.deref_unchecked() }; + match au64 { + AU64(123) => {} + _ => unreachable!(), + } + }; + } + + #[test] + fn test_unalign_update() { + let mut u = Unalign::new(AU64(123)); + u.update(|a| a.0 += 1); + assert_eq!(u.get(), AU64(124)); + + // Test that, even if the callback panics, the original is still + // correctly overwritten. Use a `Box` so that Miri is more likely to + // catch any unsoundness (which would likely result in two `Box`es for + // the same heap object, which is the sort of thing that Miri would + // probably catch). + let mut u = Unalign::new(Box::new(AU64(123))); + let res = std::panic::catch_unwind(AssertUnwindSafe(|| { + u.update(|a| { + a.0 += 1; + panic!(); + }) + })); + assert!(res.is_err()); + assert_eq!(u.into_inner(), Box::new(AU64(124))); + } +} diff --git a/src/rust/vendor/zerocopy/testdata/include_value/data b/src/rust/vendor/zerocopy/testdata/include_value/data new file mode 100644 index 000000000..85df50785 --- /dev/null +++ b/src/rust/vendor/zerocopy/testdata/include_value/data @@ -0,0 +1 @@ +abcd \ No newline at end of file diff --git a/src/rust/vendor/zerocopy/tests/trybuild.rs b/src/rust/vendor/zerocopy/tests/trybuild.rs new file mode 100644 index 000000000..24abc2862 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/trybuild.rs @@ -0,0 +1,41 @@ +// Copyright 2019 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use testutil::ToolchainVersion; + +#[test] +#[cfg_attr(miri, ignore)] +fn ui() { + let version = ToolchainVersion::extract_from_pwd().unwrap(); + // See the doc comment on this method for an explanation of what this does + // and why we store source files in different directories. + let source_files_dirname = version.get_ui_source_files_dirname_and_maybe_print_warning(); + + let t = trybuild::TestCases::new(); + t.compile_fail(format!("tests/{source_files_dirname}/*.rs")); +} + +// The file `invalid-impls.rs` directly includes `src/macros.rs` in order to +// test the `impl_or_verify!` macro which is defined in that file. Specifically, +// it tests the verification portion of that macro, which is enabled when +// `cfg(any(feature = "derive", test))`. While `--cfg test` is of course passed +// to the code in the file you're reading right now, `trybuild` does not pass +// `--cfg test` when it invokes Cargo. As a result, this `trybuild` test only +// tests the correct behavior when the "derive" feature is enabled. +#[cfg(feature = "derive")] +#[test] +#[cfg_attr(miri, ignore)] +fn ui_invalid_impls() { + let version = ToolchainVersion::extract_from_pwd().unwrap(); + // See the doc comment on this method for an explanation of what this does + // and why we store source files in different directories. + let source_files_dirname = version.get_ui_source_files_dirname_and_maybe_print_warning(); + + let t = trybuild::TestCases::new(); + t.compile_fail(format!("tests/{source_files_dirname}/invalid-impls/*.rs")); +} diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/include_value_not_from_bytes.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/include_value_not_from_bytes.rs new file mode 100644 index 000000000..45b6138f4 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/include_value_not_from_bytes.rs @@ -0,0 +1,12 @@ +// Copyright 2022 The Fuchsia Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#[macro_use] +extern crate zerocopy; + +fn main() {} + +// Should fail because `UnsafeCell: !FromBytes`. +const NOT_FROM_BYTES: core::cell::UnsafeCell = + include_value!("../../testdata/include_value/data"); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/include_value_not_from_bytes.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/include_value_not_from_bytes.stderr new file mode 100644 index 000000000..21f6443bb --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/include_value_not_from_bytes.stderr @@ -0,0 +1,12 @@ +error[E0277]: the trait bound `UnsafeCell: FromBytes` is not satisfied + --> tests/ui-msrv/include_value_not_from_bytes.rs:12:5 + | +12 | include_value!("../../testdata/include_value/data"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FromBytes` is not implemented for `UnsafeCell` + | +note: required by a bound in `AssertIsFromBytes` + --> tests/ui-msrv/include_value_not_from_bytes.rs:12:5 + | +12 | include_value!("../../testdata/include_value/data"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsFromBytes` + = note: this error originates in the macro `$crate::transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/include_value_wrong_size.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/include_value_wrong_size.rs new file mode 100644 index 000000000..d87b30698 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/include_value_wrong_size.rs @@ -0,0 +1,11 @@ +// Copyright 2022 The Fuchsia Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#[macro_use] +extern crate zerocopy; + +fn main() {} + +// Should fail because the file is 4 bytes long, not 8. +const WRONG_SIZE: u64 = include_value!("../../testdata/include_value/data"); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/include_value_wrong_size.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/include_value_wrong_size.stderr new file mode 100644 index 000000000..30045849d --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/include_value_wrong_size.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/include_value_wrong_size.rs:11:25 + | +11 | const WRONG_SIZE: u64 = include_value!("../../testdata/include_value/data"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `[u8; 4]` (32 bits) + = note: target type: `u64` (64 bits) + = note: this error originates in the macro `$crate::transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/invalid-impls/invalid-impls.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/invalid-impls/invalid-impls.rs new file mode 100644 index 000000000..ea963907d --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/invalid-impls/invalid-impls.rs @@ -0,0 +1,29 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +// Since some macros from `macros.rs` are unused. +#![allow(unused)] + +extern crate zerocopy; +extern crate zerocopy_derive; + +include!("../../../src/macros.rs"); + +use zerocopy::*; +use zerocopy_derive::*; + +fn main() {} + +#[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] +#[repr(transparent)] +struct Foo(T); + +impl_or_verify!(T => FromZeroes for Foo); +impl_or_verify!(T => FromBytes for Foo); +impl_or_verify!(T => AsBytes for Foo); +impl_or_verify!(T => Unaligned for Foo); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/invalid-impls/invalid-impls.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/invalid-impls/invalid-impls.stderr new file mode 100644 index 000000000..c1de466ec --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/invalid-impls/invalid-impls.stderr @@ -0,0 +1,127 @@ +error[E0277]: the trait bound `T: zerocopy::FromZeroes` is not satisfied + --> tests/ui-msrv/invalid-impls/../../../src/macros.rs + | + | impl<$($tyvar $(: $(? $optbound +)* $($bound +)*)?),*> Subtrait for $ty {} + | ^^^^^^^^ the trait `zerocopy::FromZeroes` is not implemented for `T` + | + ::: tests/ui-msrv/invalid-impls/invalid-impls.rs:26:1 + | +26 | impl_or_verify!(T => FromZeroes for Foo); + | ------------------------------------------- in this macro invocation + | +note: required because of the requirements on the impl of `zerocopy::FromZeroes` for `Foo` + --> tests/ui-msrv/invalid-impls/invalid-impls.rs:22:10 + | +22 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] + | ^^^^^^^^^^ +note: required by a bound in `_::Subtrait` + --> tests/ui-msrv/invalid-impls/../../../src/macros.rs + | + | trait Subtrait: $trait {} + | ^^^^^^ required by this bound in `_::Subtrait` + | + ::: tests/ui-msrv/invalid-impls/invalid-impls.rs:26:1 + | +26 | impl_or_verify!(T => FromZeroes for Foo); + | ------------------------------------------- in this macro invocation + = note: this error originates in the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider restricting type parameter `T` + | +26 | impl_or_verify!(T: zerocopy::FromZeroes => FromZeroes for Foo); + | ++++++++++++++++++++++ + +error[E0277]: the trait bound `T: zerocopy::FromBytes` is not satisfied + --> tests/ui-msrv/invalid-impls/../../../src/macros.rs + | + | impl<$($tyvar $(: $(? $optbound +)* $($bound +)*)?),*> Subtrait for $ty {} + | ^^^^^^^^ the trait `zerocopy::FromBytes` is not implemented for `T` + | + ::: tests/ui-msrv/invalid-impls/invalid-impls.rs:27:1 + | +27 | impl_or_verify!(T => FromBytes for Foo); + | ------------------------------------------ in this macro invocation + | +note: required because of the requirements on the impl of `zerocopy::FromBytes` for `Foo` + --> tests/ui-msrv/invalid-impls/invalid-impls.rs:22:22 + | +22 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] + | ^^^^^^^^^ +note: required by a bound in `_::Subtrait` + --> tests/ui-msrv/invalid-impls/../../../src/macros.rs + | + | trait Subtrait: $trait {} + | ^^^^^^ required by this bound in `_::Subtrait` + | + ::: tests/ui-msrv/invalid-impls/invalid-impls.rs:27:1 + | +27 | impl_or_verify!(T => FromBytes for Foo); + | ------------------------------------------ in this macro invocation + = note: this error originates in the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider restricting type parameter `T` + | +27 | impl_or_verify!(T: zerocopy::FromBytes => FromBytes for Foo); + | +++++++++++++++++++++ + +error[E0277]: the trait bound `T: zerocopy::AsBytes` is not satisfied + --> tests/ui-msrv/invalid-impls/../../../src/macros.rs + | + | impl<$($tyvar $(: $(? $optbound +)* $($bound +)*)?),*> Subtrait for $ty {} + | ^^^^^^^^ the trait `zerocopy::AsBytes` is not implemented for `T` + | + ::: tests/ui-msrv/invalid-impls/invalid-impls.rs:28:1 + | +28 | impl_or_verify!(T => AsBytes for Foo); + | ---------------------------------------- in this macro invocation + | +note: required because of the requirements on the impl of `zerocopy::AsBytes` for `Foo` + --> tests/ui-msrv/invalid-impls/invalid-impls.rs:22:33 + | +22 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] + | ^^^^^^^ +note: required by a bound in `_::Subtrait` + --> tests/ui-msrv/invalid-impls/../../../src/macros.rs + | + | trait Subtrait: $trait {} + | ^^^^^^ required by this bound in `_::Subtrait` + | + ::: tests/ui-msrv/invalid-impls/invalid-impls.rs:28:1 + | +28 | impl_or_verify!(T => AsBytes for Foo); + | ---------------------------------------- in this macro invocation + = note: this error originates in the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider restricting type parameter `T` + | +28 | impl_or_verify!(T: zerocopy::AsBytes => AsBytes for Foo); + | +++++++++++++++++++ + +error[E0277]: the trait bound `T: zerocopy::Unaligned` is not satisfied + --> tests/ui-msrv/invalid-impls/../../../src/macros.rs + | + | impl<$($tyvar $(: $(? $optbound +)* $($bound +)*)?),*> Subtrait for $ty {} + | ^^^^^^^^ the trait `zerocopy::Unaligned` is not implemented for `T` + | + ::: tests/ui-msrv/invalid-impls/invalid-impls.rs:29:1 + | +29 | impl_or_verify!(T => Unaligned for Foo); + | ------------------------------------------ in this macro invocation + | +note: required because of the requirements on the impl of `zerocopy::Unaligned` for `Foo` + --> tests/ui-msrv/invalid-impls/invalid-impls.rs:22:42 + | +22 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] + | ^^^^^^^^^ +note: required by a bound in `_::Subtrait` + --> tests/ui-msrv/invalid-impls/../../../src/macros.rs + | + | trait Subtrait: $trait {} + | ^^^^^^ required by this bound in `_::Subtrait` + | + ::: tests/ui-msrv/invalid-impls/invalid-impls.rs:29:1 + | +29 | impl_or_verify!(T => Unaligned for Foo); + | ------------------------------------------ in this macro invocation + = note: this error originates in the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider restricting type parameter `T` + | +29 | impl_or_verify!(T: zerocopy::Unaligned => Unaligned for Foo); + | +++++++++++++++++++++ diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/max-align.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/max-align.rs new file mode 100644 index 000000000..53e3eb9b0 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/max-align.rs @@ -0,0 +1,99 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[repr(C, align(1))] +struct Align1; + +#[repr(C, align(2))] +struct Align2; + +#[repr(C, align(4))] +struct Align4; + +#[repr(C, align(8))] +struct Align8; + +#[repr(C, align(16))] +struct Align16; + +#[repr(C, align(32))] +struct Align32; + +#[repr(C, align(64))] +struct Align64; + +#[repr(C, align(128))] +struct Align128; + +#[repr(C, align(256))] +struct Align256; + +#[repr(C, align(512))] +struct Align512; + +#[repr(C, align(1024))] +struct Align1024; + +#[repr(C, align(2048))] +struct Align2048; + +#[repr(C, align(4096))] +struct Align4096; + +#[repr(C, align(8192))] +struct Align8192; + +#[repr(C, align(16384))] +struct Align16384; + +#[repr(C, align(32768))] +struct Align32768; + +#[repr(C, align(65536))] +struct Align65536; + +#[repr(C, align(131072))] +struct Align131072; + +#[repr(C, align(262144))] +struct Align262144; + +#[repr(C, align(524288))] +struct Align524288; + +#[repr(C, align(1048576))] +struct Align1048576; + +#[repr(C, align(2097152))] +struct Align2097152; + +#[repr(C, align(4194304))] +struct Align4194304; + +#[repr(C, align(8388608))] +struct Align8388608; + +#[repr(C, align(16777216))] +struct Align16777216; + +#[repr(C, align(33554432))] +struct Align33554432; + +#[repr(C, align(67108864))] +struct Align67108864; + +#[repr(C, align(134217728))] +struct Align13421772; + +#[repr(C, align(268435456))] +struct Align26843545; + +#[repr(C, align(1073741824))] +struct Align1073741824; + +fn main() {} diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/max-align.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/max-align.stderr new file mode 100644 index 000000000..6ab6e47e2 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/max-align.stderr @@ -0,0 +1,5 @@ +error[E0589]: invalid `repr(align)` attribute: larger than 2^29 + --> tests/ui-msrv/max-align.rs:96:11 + | +96 | #[repr(C, align(1073741824))] + | ^^^^^^^^^^^^^^^^^ diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-dst-not-frombytes.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-dst-not-frombytes.rs new file mode 100644 index 000000000..c4caaff91 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-dst-not-frombytes.rs @@ -0,0 +1,18 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute; + +fn main() {} + +// `transmute` requires that the destination type implements `FromBytes` +const DST_NOT_FROM_BYTES: NotZerocopy = transmute!(AU16(0)); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-dst-not-frombytes.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-dst-not-frombytes.stderr new file mode 100644 index 000000000..b4afbbd60 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-dst-not-frombytes.stderr @@ -0,0 +1,12 @@ +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-msrv/transmute-dst-not-frombytes.rs:18:41 + | +18 | const DST_NOT_FROM_BYTES: NotZerocopy = transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^ the trait `FromBytes` is not implemented for `NotZerocopy` + | +note: required by a bound in `AssertIsFromBytes` + --> tests/ui-msrv/transmute-dst-not-frombytes.rs:18:41 + | +18 | const DST_NOT_FROM_BYTES: NotZerocopy = transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsFromBytes` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-alignment-increase.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-alignment-increase.rs new file mode 100644 index 000000000..0928564dd --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-alignment-increase.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting from a type of smaller +// alignment to one of larger alignment. +const INCREASE_ALIGNMENT: &mut AU16 = transmute_mut!(&mut [0u8; 2]); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-alignment-increase.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-alignment-increase.stderr new file mode 100644 index 000000000..033031c91 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-alignment-increase.stderr @@ -0,0 +1,36 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-mut-alignment-increase.rs:19:39 + | +19 | const INCREASE_ALIGNMENT: &mut AU16 = transmute_mut!(&mut [0u8; 2]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf<[u8; 2]>` (8 bits) + = note: target type: `MaxAlignsOf<[u8; 2], AU16>` (16 bits) + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0658]: mutable references are not allowed in constants + --> tests/ui-msrv/transmute-mut-alignment-increase.rs:19:54 + | +19 | const INCREASE_ALIGNMENT: &mut AU16 = transmute_mut!(&mut [0u8; 2]); + | ^^^^^^^^^^^^^ + | + = note: see issue #57349 for more information + +error[E0015]: cannot call non-const fn `transmute_mut::<[u8; 2], AU16>` in constants + --> tests/ui-msrv/transmute-mut-alignment-increase.rs:19:39 + | +19 | const INCREASE_ALIGNMENT: &mut AU16 = transmute_mut!(&mut [0u8; 2]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: calls in constants are limited to constant functions, tuple structs and tuple variants + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0716]: temporary value dropped while borrowed + --> tests/ui-msrv/transmute-mut-alignment-increase.rs:19:59 + | +19 | const INCREASE_ALIGNMENT: &mut AU16 = transmute_mut!(&mut [0u8; 2]); + | --------------------^^^^^^^^- + | | | + | | creates a temporary which is freed while still in use + | temporary value is freed at the end of this statement + | using this value as a constant requires that borrow lasts for `'static` diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-const.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-const.rs new file mode 100644 index 000000000..021b562f1 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-const.rs @@ -0,0 +1,20 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +const ARRAY_OF_U8S: [u8; 2] = [0u8; 2]; + +// `transmute_mut!` cannot, generally speaking, be used in const contexts. +const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-const.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-const.stderr new file mode 100644 index 000000000..30bfe4541 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-const.stderr @@ -0,0 +1,41 @@ +warning: taking a mutable reference to a `const` item + --> tests/ui-msrv/transmute-mut-const.rs:20:52 + | +20 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | ^^^^^^^^^^^^^^^^^ + | + = note: `#[warn(const_item_mutation)]` on by default + = note: each usage of a `const` item creates a new temporary + = note: the mutable reference will refer to this temporary, not the original `const` item +note: `const` item defined here + --> tests/ui-msrv/transmute-mut-const.rs:17:1 + | +17 | const ARRAY_OF_U8S: [u8; 2] = [0u8; 2]; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0658]: mutable references are not allowed in constants + --> tests/ui-msrv/transmute-mut-const.rs:20:52 + | +20 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | ^^^^^^^^^^^^^^^^^ + | + = note: see issue #57349 for more information + +error[E0015]: cannot call non-const fn `transmute_mut::<[u8; 2], [u8; 2]>` in constants + --> tests/ui-msrv/transmute-mut-const.rs:20:37 + | +20 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: calls in constants are limited to constant functions, tuple structs and tuple variants + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0716]: temporary value dropped while borrowed + --> tests/ui-msrv/transmute-mut-const.rs:20:57 + | +20 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | --------------------^^^^^^^^^^^^- + | | | + | | creates a temporary which is freed while still in use + | temporary value is freed at the end of this statement + | using this value as a constant requires that borrow lasts for `'static` diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-generic.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-generic.rs new file mode 100644 index 000000000..7068f1026 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-generic.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::{transmute_mut, AsBytes, FromBytes}; + +fn main() {} + +fn transmute_mut(u: &mut u8) -> &mut T { + // `transmute_mut!` requires the destination type to be concrete. + transmute_mut!(u) +} diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-generic.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-generic.stderr new file mode 100644 index 000000000..f6b54ce1c --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-generic.stderr @@ -0,0 +1,19 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-mut-dst-generic.rs:17:5 + | +17 | transmute_mut!(u) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `T` (this type does not have a fixed size) + = note: this error originates in the macro `$crate::assert_size_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-mut-dst-generic.rs:17:5 + | +17 | transmute_mut!(u) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf` (8 bits) + = note: target type: `MaxAlignsOf` (size can vary because of T) + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-a-reference.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-a-reference.rs new file mode 100644 index 000000000..33a9ecd95 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-a-reference.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting into a non-reference +// destination type. +const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-a-reference.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-a-reference.stderr new file mode 100644 index 000000000..8f0ea801e --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-a-reference.stderr @@ -0,0 +1,39 @@ +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-mut-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&mut _` + | + = note: expected type `usize` + found mutable reference `&mut _` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-mut-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&mut _` + | + = note: expected type `usize` + found mutable reference `&mut _` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-mut-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&mut _` + | + = note: expected type `usize` + found mutable reference `&mut _` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-mut-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&mut _` + | + = note: expected type `usize` + found mutable reference `&mut _` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-asbytes.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-asbytes.rs new file mode 100644 index 000000000..b72f12928 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-asbytes.rs @@ -0,0 +1,24 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::FromZeroes, zerocopy::FromBytes, zerocopy::AsBytes)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::FromZeroes, zerocopy::FromBytes)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the destination type implements `AsBytes` +const DST_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-asbytes.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-asbytes.stderr new file mode 100644 index 000000000..7e2dd78b8 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-asbytes.stderr @@ -0,0 +1,12 @@ +error[E0277]: the trait bound `Dst: AsBytes` is not satisfied + --> tests/ui-msrv/transmute-mut-dst-not-asbytes.rs:24:36 + | +24 | const DST_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `Dst` + | +note: required by a bound in `AssertDstIsAsBytes` + --> tests/ui-msrv/transmute-mut-dst-not-asbytes.rs:24:36 + | +24 | const DST_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-frombytes.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-frombytes.rs new file mode 100644 index 000000000..102fcedc9 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-frombytes.rs @@ -0,0 +1,24 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::FromZeroes, zerocopy::FromBytes, zerocopy::AsBytes)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::AsBytes)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the destination type implements `FromBytes` +const DST_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-frombytes.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-frombytes.stderr new file mode 100644 index 000000000..663e085a3 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-not-frombytes.stderr @@ -0,0 +1,12 @@ +error[E0277]: the trait bound `Dst: FromBytes` is not satisfied + --> tests/ui-msrv/transmute-mut-dst-not-frombytes.rs:24:38 + | +24 | const DST_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FromBytes` is not implemented for `Dst` + | +note: required by a bound in `AssertDstIsFromBytes` + --> tests/ui-msrv/transmute-mut-dst-not-frombytes.rs:24:38 + | +24 | const DST_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-unsized.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-unsized.rs new file mode 100644 index 000000000..693ccda56 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-unsized.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting into an unsized destination +// type. +const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-unsized.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-unsized.stderr new file mode 100644 index 000000000..cb60a82a7 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-dst-unsized.stderr @@ -0,0 +1,108 @@ +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertDstIsFromBytes` + --> tests/ui-msrv/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertDstIsAsBytes` + --> tests/ui-msrv/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by this bound in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `transmute` + --> $RUST/core/src/intrinsics.rs + | + | pub fn transmute(e: T) -> U; + | ^ required by this bound in `transmute` + = note: this error originates in the macro `$crate::assert_size_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `MaxAlignsOf::::new` + --> src/macro_util.rs + | + | impl MaxAlignsOf { + | ^ required by this bound in `MaxAlignsOf::::new` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by this bound in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `transmute_mut` + --> src/macro_util.rs + | + | pub unsafe fn transmute_mut<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by this bound in `transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-illegal-lifetime.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-illegal-lifetime.rs new file mode 100644 index 000000000..c31765e4b --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-illegal-lifetime.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +fn main() {} + +fn increase_lifetime() { + let mut x = 0u64; + // It is illegal to increase the lifetime scope. + let _: &'static mut u64 = zerocopy::transmute_mut!(&mut x); +} diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-illegal-lifetime.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-illegal-lifetime.stderr new file mode 100644 index 000000000..5ff714596 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-illegal-lifetime.stderr @@ -0,0 +1,9 @@ +error[E0597]: `x` does not live long enough + --> tests/ui-msrv/transmute-mut-illegal-lifetime.rs:14:56 + | +14 | let _: &'static mut u64 = zerocopy::transmute_mut!(&mut x); + | ---------------- ^^^^^^ borrowed value does not live long enough + | | + | type annotation requires that `x` is borrowed for `'static` +15 | } + | - `x` dropped here while still borrowed diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-size-decrease.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-size-decrease.rs new file mode 100644 index 000000000..c6eec3a9c --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-size-decrease.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// We require that the size of the destination type is not smaller than the size +// of the source type. +const DECREASE_SIZE: &mut u8 = transmute_mut!(&mut [0u8; 2]); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-size-decrease.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-size-decrease.stderr new file mode 100644 index 000000000..2bfc21898 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-size-decrease.stderr @@ -0,0 +1,36 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-mut-size-decrease.rs:17:32 + | +17 | const DECREASE_SIZE: &mut u8 = transmute_mut!(&mut [0u8; 2]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `[u8; 2]` (16 bits) + = note: target type: `u8` (8 bits) + = note: this error originates in the macro `$crate::assert_size_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0658]: mutable references are not allowed in constants + --> tests/ui-msrv/transmute-mut-size-decrease.rs:17:47 + | +17 | const DECREASE_SIZE: &mut u8 = transmute_mut!(&mut [0u8; 2]); + | ^^^^^^^^^^^^^ + | + = note: see issue #57349 for more information + +error[E0015]: cannot call non-const fn `transmute_mut::<[u8; 2], u8>` in constants + --> tests/ui-msrv/transmute-mut-size-decrease.rs:17:32 + | +17 | const DECREASE_SIZE: &mut u8 = transmute_mut!(&mut [0u8; 2]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: calls in constants are limited to constant functions, tuple structs and tuple variants + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0716]: temporary value dropped while borrowed + --> tests/ui-msrv/transmute-mut-size-decrease.rs:17:52 + | +17 | const DECREASE_SIZE: &mut u8 = transmute_mut!(&mut [0u8; 2]); + | --------------------^^^^^^^^- + | | | + | | creates a temporary which is freed while still in use + | temporary value is freed at the end of this statement + | using this value as a constant requires that borrow lasts for `'static` diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-size-increase.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-size-increase.rs new file mode 100644 index 000000000..a4657c283 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-size-increase.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting from a smaller type to a larger +// one. +const INCREASE_SIZE: &mut [u8; 2] = transmute_mut!(&mut 0u8); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-size-increase.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-size-increase.stderr new file mode 100644 index 000000000..6e866a0d3 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-size-increase.stderr @@ -0,0 +1,36 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-mut-size-increase.rs:17:37 + | +17 | const INCREASE_SIZE: &mut [u8; 2] = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `[u8; 2]` (16 bits) + = note: this error originates in the macro `$crate::assert_size_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0658]: mutable references are not allowed in constants + --> tests/ui-msrv/transmute-mut-size-increase.rs:17:52 + | +17 | const INCREASE_SIZE: &mut [u8; 2] = transmute_mut!(&mut 0u8); + | ^^^^^^^^ + | + = note: see issue #57349 for more information + +error[E0015]: cannot call non-const fn `transmute_mut::` in constants + --> tests/ui-msrv/transmute-mut-size-increase.rs:17:37 + | +17 | const INCREASE_SIZE: &mut [u8; 2] = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: calls in constants are limited to constant functions, tuple structs and tuple variants + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0716]: temporary value dropped while borrowed + --> tests/ui-msrv/transmute-mut-size-increase.rs:17:57 + | +17 | const INCREASE_SIZE: &mut [u8; 2] = transmute_mut!(&mut 0u8); + | --------------------^^^- + | | | + | | creates a temporary which is freed while still in use + | temporary value is freed at the end of this statement + | using this value as a constant requires that borrow lasts for `'static` diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-generic.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-generic.rs new file mode 100644 index 000000000..aed7ded96 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-generic.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::{transmute_mut, AsBytes, FromBytes}; + +fn main() {} + +fn transmute_mut(t: &mut T) -> &mut U { + // `transmute_mut!` requires the source and destination types to be + // concrete. + transmute_mut!(t) +} diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-generic.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-generic.stderr new file mode 100644 index 000000000..1162f21f9 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-generic.stderr @@ -0,0 +1,19 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-mut-src-dst-generic.rs:18:5 + | +18 | transmute_mut!(t) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `T` (this type does not have a fixed size) + = note: target type: `U` (this type does not have a fixed size) + = note: this error originates in the macro `$crate::assert_size_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-mut-src-dst-generic.rs:18:5 + | +18 | transmute_mut!(t) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf` (size can vary because of T) + = note: target type: `MaxAlignsOf` (size can vary because of T) + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-not-references.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-not-references.rs new file mode 100644 index 000000000..98cc52088 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-not-references.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting between non-reference source +// and destination types. +const SRC_DST_NOT_REFERENCES: &mut usize = transmute_mut!(0usize); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-not-references.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-not-references.stderr new file mode 100644 index 000000000..c500a93af --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-not-references.stderr @@ -0,0 +1,12 @@ +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-mut-src-dst-not-references.rs:17:59 + | +17 | const SRC_DST_NOT_REFERENCES: &mut usize = transmute_mut!(0usize); + | ---------------^^^^^^- + | | | + | | expected `&mut _`, found `usize` + | | help: consider mutably borrowing here: `&mut 0usize` + | expected due to this + | + = note: expected mutable reference `&mut _` + found type `usize` diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-unsized.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-unsized.rs new file mode 100644 index 000000000..1bebcf2d6 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-unsized.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting between unsized source and +// destination types. +const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-unsized.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-unsized.stderr new file mode 100644 index 000000000..00201a6b6 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-dst-unsized.stderr @@ -0,0 +1,237 @@ +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsFromBytes` + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsFromBytes` + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsAsBytes` + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsAsBytes` + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertDstIsFromBytes` + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertDstIsAsBytes` + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `transmute` + --> $RUST/core/src/intrinsics.rs + | + | pub fn transmute(e: T) -> U; + | ^ required by this bound in `transmute` + = note: this error originates in the macro `$crate::assert_size_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AlignOf::::into_t` + --> src/macro_util.rs + | + | impl AlignOf { + | ^ required by this bound in `AlignOf::::into_t` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: the left-hand-side of an assignment must have a statically known size + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by this bound in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `MaxAlignsOf::::new` + --> src/macro_util.rs + | + | impl MaxAlignsOf { + | ^ required by this bound in `MaxAlignsOf::::new` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by this bound in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by this bound in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by this bound in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `transmute_mut` + --> src/macro_util.rs + | + | pub unsafe fn transmute_mut<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by this bound in `transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all function arguments must have a statically known size + = note: this error originates in the macro `$crate::assert_size_eq` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-generic.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-generic.rs new file mode 100644 index 000000000..a3ef39787 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-generic.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::{transmute_mut, AsBytes}; + +fn main() {} + +fn transmute_mut(t: &mut T) -> &mut u8 { + // `transmute_mut!` requires the source type to be concrete. + transmute_mut!(t) +} diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-generic.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-generic.stderr new file mode 100644 index 000000000..8a9296ca6 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-generic.stderr @@ -0,0 +1,10 @@ +error[E0405]: cannot find trait `FromBytes` in this scope + --> tests/ui-msrv/transmute-mut-src-generic.rs:15:31 + | +15 | fn transmute_mut(t: &mut T) -> &mut u8 { + | ^^^^^^^^^ not found in this scope + | +help: consider importing this trait + | +11 | use zerocopy::FromBytes; + | diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-immutable.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-immutable.rs new file mode 100644 index 000000000..08088d0db --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-immutable.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +fn ref_src_immutable() { + // `transmute_mut!` requires that its source type be a mutable reference. + let _: &mut u8 = transmute_mut!(&0u8); +} diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-immutable.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-immutable.stderr new file mode 100644 index 000000000..8262f169a --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-immutable.stderr @@ -0,0 +1,11 @@ +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-mut-src-immutable.rs:17:37 + | +17 | let _: &mut u8 = transmute_mut!(&0u8); + | ---------------^^^^- + | | | + | | types differ in mutability + | expected due to this + | + = note: expected mutable reference `&mut _` + found reference `&u8` diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-a-reference.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-a-reference.rs new file mode 100644 index 000000000..bf8bc3259 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-a-reference.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting from a non-reference source +// type. +const SRC_NOT_A_REFERENCE: &mut u8 = transmute_mut!(0usize); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-a-reference.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-a-reference.stderr new file mode 100644 index 000000000..3a6bdf78a --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-a-reference.stderr @@ -0,0 +1,12 @@ +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-mut-src-not-a-reference.rs:17:53 + | +17 | const SRC_NOT_A_REFERENCE: &mut u8 = transmute_mut!(0usize); + | ---------------^^^^^^- + | | | + | | expected `&mut _`, found `usize` + | | help: consider mutably borrowing here: `&mut 0usize` + | expected due to this + | + = note: expected mutable reference `&mut _` + found type `usize` diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-asbytes.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-asbytes.rs new file mode 100644 index 000000000..6a14f12fd --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-asbytes.rs @@ -0,0 +1,24 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::FromZeroes, zerocopy::FromBytes)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::FromZeroes, zerocopy::FromBytes, zerocopy::AsBytes)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the source type implements `AsBytes` +const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-asbytes.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-asbytes.stderr new file mode 100644 index 000000000..405697582 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-asbytes.stderr @@ -0,0 +1,25 @@ +error[E0277]: the trait bound `Src: AsBytes` is not satisfied + --> tests/ui-msrv/transmute-mut-src-not-asbytes.rs:24:36 + | +24 | const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `Src` + | +note: required by a bound in `AssertSrcIsAsBytes` + --> tests/ui-msrv/transmute-mut-src-not-asbytes.rs:24:36 + | +24 | const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Src: AsBytes` is not satisfied + --> tests/ui-msrv/transmute-mut-src-not-asbytes.rs:24:36 + | +24 | const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `Src` + | +note: required by a bound in `AssertSrcIsAsBytes` + --> tests/ui-msrv/transmute-mut-src-not-asbytes.rs:24:36 + | +24 | const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-frombytes.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-frombytes.rs new file mode 100644 index 000000000..2ebe03601 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-frombytes.rs @@ -0,0 +1,24 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::AsBytes)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::FromZeroes, zerocopy::FromBytes, zerocopy::AsBytes)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the source type implements `FromBytes` +const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-frombytes.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-frombytes.stderr new file mode 100644 index 000000000..b859c41cd --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-not-frombytes.stderr @@ -0,0 +1,25 @@ +error[E0277]: the trait bound `Src: FromBytes` is not satisfied + --> tests/ui-msrv/transmute-mut-src-not-frombytes.rs:24:38 + | +24 | const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FromBytes` is not implemented for `Src` + | +note: required by a bound in `AssertSrcIsFromBytes` + --> tests/ui-msrv/transmute-mut-src-not-frombytes.rs:24:38 + | +24 | const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Src: FromBytes` is not satisfied + --> tests/ui-msrv/transmute-mut-src-not-frombytes.rs:24:38 + | +24 | const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FromBytes` is not implemented for `Src` + | +note: required by a bound in `AssertSrcIsFromBytes` + --> tests/ui-msrv/transmute-mut-src-not-frombytes.rs:24:38 + | +24 | const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-unsized.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-unsized.rs new file mode 100644 index 000000000..413dd68d8 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-unsized.rs @@ -0,0 +1,16 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting from an unsized source type. +const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-unsized.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-unsized.stderr new file mode 100644 index 000000000..6b18695e6 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-mut-src-unsized.stderr @@ -0,0 +1,198 @@ +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsFromBytes` + --> tests/ui-msrv/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsFromBytes` + --> tests/ui-msrv/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsAsBytes` + --> tests/ui-msrv/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsAsBytes` + --> tests/ui-msrv/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `transmute` + --> $RUST/core/src/intrinsics.rs + | + | pub fn transmute(e: T) -> U; + | ^ required by this bound in `transmute` + = note: this error originates in the macro `$crate::assert_size_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AlignOf::::into_t` + --> src/macro_util.rs + | + | impl AlignOf { + | ^ required by this bound in `AlignOf::::into_t` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: the left-hand-side of an assignment must have a statically known size + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by this bound in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `MaxAlignsOf::::new` + --> src/macro_util.rs + | + | impl MaxAlignsOf { + | ^ required by this bound in `MaxAlignsOf::::new` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by this bound in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by this bound in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by this bound in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `transmute_mut` + --> src/macro_util.rs + | + | pub unsafe fn transmute_mut<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by this bound in `transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all function arguments must have a statically known size + = note: this error originates in the macro `$crate::assert_size_eq` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ptr-to-usize.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ptr-to-usize.rs new file mode 100644 index 000000000..5af885933 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ptr-to-usize.rs @@ -0,0 +1,20 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute; + +fn main() {} + +// It is unclear whether we can or should support this transmutation, especially +// in a const context. This test ensures that even if such a transmutation +// becomes valid due to the requisite implementations of `FromBytes` being +// added, that we re-examine whether it should specifically be valid in a const +// context. +const POINTER_VALUE: usize = transmute!(&0usize as *const usize); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ptr-to-usize.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ptr-to-usize.stderr new file mode 100644 index 000000000..06b1bbaf2 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ptr-to-usize.stderr @@ -0,0 +1,37 @@ +error[E0277]: the trait bound `*const usize: AsBytes` is not satisfied + --> tests/ui-msrv/transmute-ptr-to-usize.rs:20:30 + | +20 | const POINTER_VALUE: usize = transmute!(&0usize as *const usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `*const usize` + | + = help: the following implementations were found: + + + + + and $N others +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-msrv/transmute-ptr-to-usize.rs:20:30 + | +20 | const POINTER_VALUE: usize = transmute!(&0usize as *const usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `*const usize: AsBytes` is not satisfied + --> tests/ui-msrv/transmute-ptr-to-usize.rs:20:30 + | +20 | const POINTER_VALUE: usize = transmute!(&0usize as *const usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `*const usize` + | + = help: the following implementations were found: + + + + + and $N others +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-msrv/transmute-ptr-to-usize.rs:20:30 + | +20 | const POINTER_VALUE: usize = transmute!(&0usize as *const usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-alignment-increase.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-alignment-increase.rs new file mode 100644 index 000000000..bf1988c66 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-alignment-increase.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting from a type of smaller +// alignment to one of larger alignment. +const INCREASE_ALIGNMENT: &AU16 = transmute_ref!(&[0u8; 2]); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-alignment-increase.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-alignment-increase.stderr new file mode 100644 index 000000000..72864e144 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-alignment-increase.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-ref-alignment-increase.rs:19:35 + | +19 | const INCREASE_ALIGNMENT: &AU16 = transmute_ref!(&[0u8; 2]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf<[u8; 2]>` (8 bits) + = note: target type: `MaxAlignsOf<[u8; 2], AU16>` (16 bits) + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-generic.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-generic.rs new file mode 100644 index 000000000..bf4a0f9ad --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-generic.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::{transmute_ref, FromBytes}; + +fn main() {} + +fn transmute_ref(u: &u8) -> &T { + // `transmute_ref!` requires the destination type to be concrete. + transmute_ref!(u) +} diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-generic.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-generic.stderr new file mode 100644 index 000000000..ec7ec7489 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-generic.stderr @@ -0,0 +1,19 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-ref-dst-generic.rs:17:5 + | +17 | transmute_ref!(u) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `T` (this type does not have a fixed size) + = note: this error originates in the macro `$crate::assert_size_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-ref-dst-generic.rs:17:5 + | +17 | transmute_ref!(u) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf` (8 bits) + = note: target type: `MaxAlignsOf` (size can vary because of T) + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-mutable.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-mutable.rs new file mode 100644 index 000000000..fa0e6e4c9 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-mutable.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +fn ref_dst_mutable() { + // `transmute_ref!` requires that its destination type be an immutable + // reference. + let _: &mut u8 = transmute_ref!(&0u8); +} diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-mutable.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-mutable.stderr new file mode 100644 index 000000000..5ccf2cd20 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-mutable.stderr @@ -0,0 +1,29 @@ +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-dst-mutable.rs:18:22 + | +18 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-dst-mutable.rs:18:22 + | +18 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-dst-mutable.rs:18:22 + | +18 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-not-a-reference.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-not-a-reference.rs new file mode 100644 index 000000000..de55f9acd --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-not-a-reference.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting into a non-reference +// destination type. +const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-not-a-reference.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-not-a-reference.stderr new file mode 100644 index 000000000..9a61c4c7c --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-not-a-reference.stderr @@ -0,0 +1,29 @@ +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found reference + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found reference + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found reference + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-not-frombytes.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-not-frombytes.rs new file mode 100644 index 000000000..d81f64d21 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-not-frombytes.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref` requires that the destination type implements `FromBytes` +const DST_NOT_FROM_BYTES: &NotZerocopy = transmute_ref!(&AU16(0)); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-not-frombytes.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-not-frombytes.stderr new file mode 100644 index 000000000..d31767544 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-not-frombytes.stderr @@ -0,0 +1,12 @@ +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-msrv/transmute-ref-dst-not-frombytes.rs:18:42 + | +18 | const DST_NOT_FROM_BYTES: &NotZerocopy = transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FromBytes` is not implemented for `NotZerocopy` + | +note: required by a bound in `AssertIsFromBytes` + --> tests/ui-msrv/transmute-ref-dst-not-frombytes.rs:18:42 + | +18 | const DST_NOT_FROM_BYTES: &NotZerocopy = transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsFromBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-unsized.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-unsized.rs new file mode 100644 index 000000000..625f1fac0 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-unsized.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting into an unsized destination +// type. +const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-unsized.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-unsized.stderr new file mode 100644 index 000000000..78135de87 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-dst-unsized.stderr @@ -0,0 +1,94 @@ +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-dst-unsized.rs:17:28 + | +17 | const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertIsFromBytes` + --> tests/ui-msrv/transmute-ref-dst-unsized.rs:17:28 + | +17 | const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsFromBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-dst-unsized.rs:17:28 + | +17 | const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-dst-unsized.rs:17:28 + | +17 | const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by this bound in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-dst-unsized.rs:17:28 + | +17 | const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `transmute` + --> $RUST/core/src/intrinsics.rs + | + | pub fn transmute(e: T) -> U; + | ^ required by this bound in `transmute` + = note: this error originates in the macro `$crate::assert_size_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-dst-unsized.rs:17:28 + | +17 | const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `MaxAlignsOf::::new` + --> src/macro_util.rs + | + | impl MaxAlignsOf { + | ^ required by this bound in `MaxAlignsOf::::new` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-dst-unsized.rs:17:28 + | +17 | const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by this bound in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-dst-unsized.rs:17:28 + | +17 | const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `transmute_ref` + --> src/macro_util.rs + | + | pub const unsafe fn transmute_ref<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by this bound in `transmute_ref` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-illegal-lifetime.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-illegal-lifetime.rs new file mode 100644 index 000000000..8dd191e6f --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-illegal-lifetime.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +fn main() {} + +fn increase_lifetime() { + let x = 0u64; + // It is illegal to increase the lifetime scope. + let _: &'static u64 = zerocopy::transmute_ref!(&x); +} diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-illegal-lifetime.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-illegal-lifetime.stderr new file mode 100644 index 000000000..866ea56a6 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-illegal-lifetime.stderr @@ -0,0 +1,9 @@ +error[E0597]: `x` does not live long enough + --> tests/ui-msrv/transmute-ref-illegal-lifetime.rs:14:52 + | +14 | let _: &'static u64 = zerocopy::transmute_ref!(&x); + | ------------ ^^ borrowed value does not live long enough + | | + | type annotation requires that `x` is borrowed for `'static` +15 | } + | - `x` dropped here while still borrowed diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-size-decrease.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-size-decrease.rs new file mode 100644 index 000000000..1d66a54ef --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-size-decrease.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// Although this is not a soundness requirement, we currently require that the +// size of the destination type is not smaller than the size of the source type. +const DECREASE_SIZE: &u8 = transmute_ref!(&[0u8; 2]); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-size-decrease.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-size-decrease.stderr new file mode 100644 index 000000000..95669f906 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-size-decrease.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-ref-size-decrease.rs:17:28 + | +17 | const DECREASE_SIZE: &u8 = transmute_ref!(&[0u8; 2]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `[u8; 2]` (16 bits) + = note: target type: `u8` (8 bits) + = note: this error originates in the macro `$crate::assert_size_eq` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-size-increase.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-size-increase.rs new file mode 100644 index 000000000..cdca560b3 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-size-increase.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting from a smaller type to a larger +// one. +const INCREASE_SIZE: &[u8; 2] = transmute_ref!(&0u8); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-size-increase.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-size-increase.stderr new file mode 100644 index 000000000..10f0e1038 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-size-increase.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-ref-size-increase.rs:17:33 + | +17 | const INCREASE_SIZE: &[u8; 2] = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `[u8; 2]` (16 bits) + = note: this error originates in the macro `$crate::assert_size_eq` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-generic.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-generic.rs new file mode 100644 index 000000000..409d785b2 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-generic.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::{transmute_ref, AsBytes, FromBytes}; + +fn main() {} + +fn transmute_ref(t: &T) -> &U { + // `transmute_ref!` requires the source and destination types to be + // concrete. + transmute_ref!(t) +} diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-generic.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-generic.stderr new file mode 100644 index 000000000..eb3268fa8 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-generic.stderr @@ -0,0 +1,19 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-ref-src-dst-generic.rs:18:5 + | +18 | transmute_ref!(t) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `T` (this type does not have a fixed size) + = note: target type: `U` (this type does not have a fixed size) + = note: this error originates in the macro `$crate::assert_size_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-ref-src-dst-generic.rs:18:5 + | +18 | transmute_ref!(t) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf` (size can vary because of T) + = note: target type: `MaxAlignsOf` (size can vary because of T) + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-not-references.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-not-references.rs new file mode 100644 index 000000000..114e917b5 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-not-references.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting between non-reference source +// and destination types. +const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-not-references.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-not-references.stderr new file mode 100644 index 000000000..2c5e23b6d --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-not-references.stderr @@ -0,0 +1,42 @@ +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-src-dst-not-references.rs:17:54 + | +17 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ---------------^^^^^^- + | | | + | | expected reference, found `usize` + | | help: consider borrowing here: `&0usize` + | expected due to this + | + = note: expected reference `&_` + found type `usize` + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-src-dst-not-references.rs:17:39 + | +17 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found reference + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-src-dst-not-references.rs:17:39 + | +17 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found reference + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-src-dst-not-references.rs:17:39 + | +17 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found reference + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-unsized.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-unsized.rs new file mode 100644 index 000000000..6bfe7ffdf --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-unsized.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting between unsized source and +// destination types. +const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-unsized.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-unsized.stderr new file mode 100644 index 000000000..adfd59792 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-dst-unsized.stderr @@ -0,0 +1,195 @@ +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-msrv/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-msrv/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertIsFromBytes` + --> tests/ui-msrv/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsFromBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `transmute` + --> $RUST/core/src/intrinsics.rs + | + | pub fn transmute(e: T) -> U; + | ^ required by this bound in `transmute` + = note: this error originates in the macro `$crate::assert_size_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AlignOf::::into_t` + --> src/macro_util.rs + | + | impl AlignOf { + | ^ required by this bound in `AlignOf::::into_t` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: the left-hand-side of an assignment must have a statically known size + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by this bound in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `MaxAlignsOf::::new` + --> src/macro_util.rs + | + | impl MaxAlignsOf { + | ^ required by this bound in `MaxAlignsOf::::new` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by this bound in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by this bound in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by this bound in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `transmute_ref` + --> src/macro_util.rs + | + | pub const unsafe fn transmute_ref<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by this bound in `transmute_ref` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all function arguments must have a statically known size + = note: this error originates in the macro `$crate::assert_size_eq` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-generic.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-generic.rs new file mode 100644 index 000000000..010281c32 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-generic.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::{transmute_ref, AsBytes}; + +fn main() {} + +fn transmute_ref(t: &T) -> &u8 { + // `transmute_ref!` requires the source type to be concrete. + transmute_ref!(t) +} diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-generic.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-generic.stderr new file mode 100644 index 000000000..4cb3e51bc --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-generic.stderr @@ -0,0 +1,19 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-ref-src-generic.rs:17:5 + | +17 | transmute_ref!(t) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `T` (this type does not have a fixed size) + = note: target type: `u8` (8 bits) + = note: this error originates in the macro `$crate::assert_size_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-ref-src-generic.rs:17:5 + | +17 | transmute_ref!(t) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf` (size can vary because of T) + = note: target type: `MaxAlignsOf` (size can vary because of T) + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-not-a-reference.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-not-a-reference.rs new file mode 100644 index 000000000..90661b3e2 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-not-a-reference.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting from a non-reference source +// type. +const SRC_NOT_A_REFERENCE: &u8 = transmute_ref!(0usize); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-not-a-reference.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-not-a-reference.stderr new file mode 100644 index 000000000..0f4aeec9e --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-not-a-reference.stderr @@ -0,0 +1,12 @@ +error[E0308]: mismatched types + --> tests/ui-msrv/transmute-ref-src-not-a-reference.rs:17:49 + | +17 | const SRC_NOT_A_REFERENCE: &u8 = transmute_ref!(0usize); + | ---------------^^^^^^- + | | | + | | expected reference, found `usize` + | | help: consider borrowing here: `&0usize` + | expected due to this + | + = note: expected reference `&_` + found type `usize` diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-not-asbytes.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-not-asbytes.rs new file mode 100644 index 000000000..6ab19f3c8 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-not-asbytes.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref` requires that the source type implements `AsBytes` +const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&NotZerocopy(AU16(0))); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-not-asbytes.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-not-asbytes.stderr new file mode 100644 index 000000000..6b80d4f49 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-not-asbytes.stderr @@ -0,0 +1,25 @@ +error[E0277]: the trait bound `NotZerocopy: AsBytes` is not satisfied + --> tests/ui-msrv/transmute-ref-src-not-asbytes.rs:18:33 + | +18 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `NotZerocopy` + | +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-msrv/transmute-ref-src-not-asbytes.rs:18:33 + | +18 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: AsBytes` is not satisfied + --> tests/ui-msrv/transmute-ref-src-not-asbytes.rs:18:33 + | +18 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `NotZerocopy` + | +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-msrv/transmute-ref-src-not-asbytes.rs:18:33 + | +18 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-unsized.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-unsized.rs new file mode 100644 index 000000000..14e72b4dd --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-unsized.rs @@ -0,0 +1,16 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting from an unsized source type. +const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-unsized.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-unsized.stderr new file mode 100644 index 000000000..43bac5359 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-ref-src-unsized.stderr @@ -0,0 +1,170 @@ +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-msrv/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-msrv/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `transmute` + --> $RUST/core/src/intrinsics.rs + | + | pub fn transmute(e: T) -> U; + | ^ required by this bound in `transmute` + = note: this error originates in the macro `$crate::assert_size_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AlignOf::::into_t` + --> src/macro_util.rs + | + | impl AlignOf { + | ^ required by this bound in `AlignOf::::into_t` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: the left-hand-side of an assignment must have a statically known size + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by this bound in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `MaxAlignsOf::::new` + --> src/macro_util.rs + | + | impl MaxAlignsOf { + | ^ required by this bound in `MaxAlignsOf::::new` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by this bound in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by this bound in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by this bound in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `transmute_ref` + --> src/macro_util.rs + | + | pub const unsafe fn transmute_ref<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by this bound in `transmute_ref` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-msrv/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all function arguments must have a statically known size + = note: this error originates in the macro `$crate::assert_size_eq` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-size-decrease.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-size-decrease.rs new file mode 100644 index 000000000..1d56831f2 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-size-decrease.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute; + +fn main() {} + +// Although this is not a soundness requirement, we currently require that the +// size of the destination type is not smaller than the size of the source type. +const DECREASE_SIZE: u8 = transmute!(AU16(0)); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-size-decrease.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-size-decrease.stderr new file mode 100644 index 000000000..ffa568848 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-size-decrease.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-size-decrease.rs:19:27 + | +19 | const DECREASE_SIZE: u8 = transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `AU16` (16 bits) + = note: target type: `u8` (8 bits) + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-size-increase.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-size-increase.rs new file mode 100644 index 000000000..32f936308 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-size-increase.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute; + +fn main() {} + +// `transmute!` does not support transmuting from a smaller type to a larger +// one. +const INCREASE_SIZE: AU16 = transmute!(0u8); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-size-increase.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-size-increase.stderr new file mode 100644 index 000000000..865d0caf9 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-size-increase.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-msrv/transmute-size-increase.rs:19:29 + | +19 | const INCREASE_SIZE: AU16 = transmute!(0u8); + | ^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `AU16` (16 bits) + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-src-not-asbytes.rs b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-src-not-asbytes.rs new file mode 100644 index 000000000..dd730216b --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-src-not-asbytes.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute; + +fn main() {} + +// `transmute` requires that the source type implements `AsBytes` +const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); diff --git a/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-src-not-asbytes.stderr b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-src-not-asbytes.stderr new file mode 100644 index 000000000..93eeda0c2 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-msrv/transmute-src-not-asbytes.stderr @@ -0,0 +1,25 @@ +error[E0277]: the trait bound `NotZerocopy: AsBytes` is not satisfied + --> tests/ui-msrv/transmute-src-not-asbytes.rs:18:32 + | +18 | const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `NotZerocopy` + | +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-msrv/transmute-src-not-asbytes.rs:18:32 + | +18 | const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: AsBytes` is not satisfied + --> tests/ui-msrv/transmute-src-not-asbytes.rs:18:32 + | +18 | const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `NotZerocopy` + | +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-msrv/transmute-src-not-asbytes.rs:18:32 + | +18 | const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/include_value_not_from_bytes.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/include_value_not_from_bytes.rs new file mode 100644 index 000000000..45b6138f4 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/include_value_not_from_bytes.rs @@ -0,0 +1,12 @@ +// Copyright 2022 The Fuchsia Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#[macro_use] +extern crate zerocopy; + +fn main() {} + +// Should fail because `UnsafeCell: !FromBytes`. +const NOT_FROM_BYTES: core::cell::UnsafeCell = + include_value!("../../testdata/include_value/data"); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/include_value_not_from_bytes.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/include_value_not_from_bytes.stderr new file mode 100644 index 000000000..f7c7fdd7a --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/include_value_not_from_bytes.stderr @@ -0,0 +1,25 @@ +error[E0277]: the trait bound `UnsafeCell: FromBytes` is not satisfied + --> tests/ui-nightly/include_value_not_from_bytes.rs:12:5 + | +12 | include_value!("../../testdata/include_value/data"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | the trait `FromBytes` is not implemented for `UnsafeCell` + | required by a bound introduced by this call + | + = help: the following other types implement trait `FromBytes`: + () + F32 + F64 + I128 + I16 + I32 + I64 + ManuallyDrop + and $N others +note: required by a bound in `AssertIsFromBytes` + --> tests/ui-nightly/include_value_not_from_bytes.rs:12:5 + | +12 | include_value!("../../testdata/include_value/data"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsFromBytes` + = note: this error originates in the macro `$crate::transmute` which comes from the expansion of the macro `include_value` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/include_value_wrong_size.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/include_value_wrong_size.rs new file mode 100644 index 000000000..d87b30698 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/include_value_wrong_size.rs @@ -0,0 +1,11 @@ +// Copyright 2022 The Fuchsia Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#[macro_use] +extern crate zerocopy; + +fn main() {} + +// Should fail because the file is 4 bytes long, not 8. +const WRONG_SIZE: u64 = include_value!("../../testdata/include_value/data"); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/include_value_wrong_size.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/include_value_wrong_size.stderr new file mode 100644 index 000000000..f592ece1d --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/include_value_wrong_size.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/include_value_wrong_size.rs:11:25 + | +11 | const WRONG_SIZE: u64 = include_value!("../../testdata/include_value/data"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `[u8; 4]` (32 bits) + = note: target type: `u64` (64 bits) + = note: this error originates in the macro `$crate::transmute` which comes from the expansion of the macro `include_value` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/invalid-impls/invalid-impls.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/invalid-impls/invalid-impls.rs new file mode 100644 index 000000000..ea963907d --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/invalid-impls/invalid-impls.rs @@ -0,0 +1,29 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +// Since some macros from `macros.rs` are unused. +#![allow(unused)] + +extern crate zerocopy; +extern crate zerocopy_derive; + +include!("../../../src/macros.rs"); + +use zerocopy::*; +use zerocopy_derive::*; + +fn main() {} + +#[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] +#[repr(transparent)] +struct Foo(T); + +impl_or_verify!(T => FromZeroes for Foo); +impl_or_verify!(T => FromBytes for Foo); +impl_or_verify!(T => AsBytes for Foo); +impl_or_verify!(T => Unaligned for Foo); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/invalid-impls/invalid-impls.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/invalid-impls/invalid-impls.stderr new file mode 100644 index 000000000..1c913094b --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/invalid-impls/invalid-impls.stderr @@ -0,0 +1,107 @@ +error[E0277]: the trait bound `T: zerocopy::FromZeroes` is not satisfied + --> tests/ui-nightly/invalid-impls/invalid-impls.rs:26:37 + | +26 | impl_or_verify!(T => FromZeroes for Foo); + | ^^^^^^ the trait `zerocopy::FromZeroes` is not implemented for `T`, which is required by `Foo: zerocopy::FromZeroes` + | +note: required for `Foo` to implement `zerocopy::FromZeroes` + --> tests/ui-nightly/invalid-impls/invalid-impls.rs:22:10 + | +22 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] + | ^^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `_::Subtrait` + --> tests/ui-nightly/invalid-impls/../../../src/macros.rs + | + | trait Subtrait: $trait {} + | ^^^^^^ required by this bound in `Subtrait` + | + ::: tests/ui-nightly/invalid-impls/invalid-impls.rs:26:1 + | +26 | impl_or_verify!(T => FromZeroes for Foo); + | ------------------------------------------- in this macro invocation + = note: this error originates in the derive macro `FromZeroes` which comes from the expansion of the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider restricting type parameter `T` + | +26 | impl_or_verify!(T: zerocopy::FromZeroes => FromZeroes for Foo); + | ++++++++++++++++++++++ + +error[E0277]: the trait bound `T: zerocopy::FromBytes` is not satisfied + --> tests/ui-nightly/invalid-impls/invalid-impls.rs:27:36 + | +27 | impl_or_verify!(T => FromBytes for Foo); + | ^^^^^^ the trait `zerocopy::FromBytes` is not implemented for `T`, which is required by `Foo: zerocopy::FromBytes` + | +note: required for `Foo` to implement `zerocopy::FromBytes` + --> tests/ui-nightly/invalid-impls/invalid-impls.rs:22:22 + | +22 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] + | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `_::Subtrait` + --> tests/ui-nightly/invalid-impls/../../../src/macros.rs + | + | trait Subtrait: $trait {} + | ^^^^^^ required by this bound in `Subtrait` + | + ::: tests/ui-nightly/invalid-impls/invalid-impls.rs:27:1 + | +27 | impl_or_verify!(T => FromBytes for Foo); + | ------------------------------------------ in this macro invocation + = note: this error originates in the derive macro `FromBytes` which comes from the expansion of the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider restricting type parameter `T` + | +27 | impl_or_verify!(T: zerocopy::FromBytes => FromBytes for Foo); + | +++++++++++++++++++++ + +error[E0277]: the trait bound `T: zerocopy::AsBytes` is not satisfied + --> tests/ui-nightly/invalid-impls/invalid-impls.rs:28:34 + | +28 | impl_or_verify!(T => AsBytes for Foo); + | ^^^^^^ the trait `zerocopy::AsBytes` is not implemented for `T`, which is required by `Foo: zerocopy::AsBytes` + | +note: required for `Foo` to implement `zerocopy::AsBytes` + --> tests/ui-nightly/invalid-impls/invalid-impls.rs:22:33 + | +22 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] + | ^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `_::Subtrait` + --> tests/ui-nightly/invalid-impls/../../../src/macros.rs + | + | trait Subtrait: $trait {} + | ^^^^^^ required by this bound in `Subtrait` + | + ::: tests/ui-nightly/invalid-impls/invalid-impls.rs:28:1 + | +28 | impl_or_verify!(T => AsBytes for Foo); + | ---------------------------------------- in this macro invocation + = note: this error originates in the derive macro `AsBytes` which comes from the expansion of the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider restricting type parameter `T` + | +28 | impl_or_verify!(T: zerocopy::AsBytes => AsBytes for Foo); + | +++++++++++++++++++ + +error[E0277]: the trait bound `T: zerocopy::Unaligned` is not satisfied + --> tests/ui-nightly/invalid-impls/invalid-impls.rs:29:36 + | +29 | impl_or_verify!(T => Unaligned for Foo); + | ^^^^^^ the trait `zerocopy::Unaligned` is not implemented for `T`, which is required by `Foo: zerocopy::Unaligned` + | +note: required for `Foo` to implement `zerocopy::Unaligned` + --> tests/ui-nightly/invalid-impls/invalid-impls.rs:22:42 + | +22 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] + | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `_::Subtrait` + --> tests/ui-nightly/invalid-impls/../../../src/macros.rs + | + | trait Subtrait: $trait {} + | ^^^^^^ required by this bound in `Subtrait` + | + ::: tests/ui-nightly/invalid-impls/invalid-impls.rs:29:1 + | +29 | impl_or_verify!(T => Unaligned for Foo); + | ------------------------------------------ in this macro invocation + = note: this error originates in the derive macro `Unaligned` which comes from the expansion of the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider restricting type parameter `T` + | +29 | impl_or_verify!(T: zerocopy::Unaligned => Unaligned for Foo); + | +++++++++++++++++++++ diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/max-align.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/max-align.rs new file mode 100644 index 000000000..53e3eb9b0 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/max-align.rs @@ -0,0 +1,99 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[repr(C, align(1))] +struct Align1; + +#[repr(C, align(2))] +struct Align2; + +#[repr(C, align(4))] +struct Align4; + +#[repr(C, align(8))] +struct Align8; + +#[repr(C, align(16))] +struct Align16; + +#[repr(C, align(32))] +struct Align32; + +#[repr(C, align(64))] +struct Align64; + +#[repr(C, align(128))] +struct Align128; + +#[repr(C, align(256))] +struct Align256; + +#[repr(C, align(512))] +struct Align512; + +#[repr(C, align(1024))] +struct Align1024; + +#[repr(C, align(2048))] +struct Align2048; + +#[repr(C, align(4096))] +struct Align4096; + +#[repr(C, align(8192))] +struct Align8192; + +#[repr(C, align(16384))] +struct Align16384; + +#[repr(C, align(32768))] +struct Align32768; + +#[repr(C, align(65536))] +struct Align65536; + +#[repr(C, align(131072))] +struct Align131072; + +#[repr(C, align(262144))] +struct Align262144; + +#[repr(C, align(524288))] +struct Align524288; + +#[repr(C, align(1048576))] +struct Align1048576; + +#[repr(C, align(2097152))] +struct Align2097152; + +#[repr(C, align(4194304))] +struct Align4194304; + +#[repr(C, align(8388608))] +struct Align8388608; + +#[repr(C, align(16777216))] +struct Align16777216; + +#[repr(C, align(33554432))] +struct Align33554432; + +#[repr(C, align(67108864))] +struct Align67108864; + +#[repr(C, align(134217728))] +struct Align13421772; + +#[repr(C, align(268435456))] +struct Align26843545; + +#[repr(C, align(1073741824))] +struct Align1073741824; + +fn main() {} diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/max-align.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/max-align.stderr new file mode 100644 index 000000000..c11eed539 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/max-align.stderr @@ -0,0 +1,5 @@ +error[E0589]: invalid `repr(align)` attribute: larger than 2^29 + --> tests/ui-nightly/max-align.rs:96:17 + | +96 | #[repr(C, align(1073741824))] + | ^^^^^^^^^^ diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-dst-not-frombytes.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-dst-not-frombytes.rs new file mode 100644 index 000000000..c4caaff91 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-dst-not-frombytes.rs @@ -0,0 +1,18 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute; + +fn main() {} + +// `transmute` requires that the destination type implements `FromBytes` +const DST_NOT_FROM_BYTES: NotZerocopy = transmute!(AU16(0)); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-dst-not-frombytes.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-dst-not-frombytes.stderr new file mode 100644 index 000000000..70bec2163 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-dst-not-frombytes.stderr @@ -0,0 +1,25 @@ +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-nightly/transmute-dst-not-frombytes.rs:18:41 + | +18 | const DST_NOT_FROM_BYTES: NotZerocopy = transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^ + | | + | the trait `FromBytes` is not implemented for `NotZerocopy` + | required by a bound introduced by this call + | + = help: the following other types implement trait `FromBytes`: + () + AU16 + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required by a bound in `AssertIsFromBytes` + --> tests/ui-nightly/transmute-dst-not-frombytes.rs:18:41 + | +18 | const DST_NOT_FROM_BYTES: NotZerocopy = transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsFromBytes` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-alignment-increase.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-alignment-increase.rs new file mode 100644 index 000000000..0928564dd --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-alignment-increase.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting from a type of smaller +// alignment to one of larger alignment. +const INCREASE_ALIGNMENT: &mut AU16 = transmute_mut!(&mut [0u8; 2]); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-alignment-increase.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-alignment-increase.stderr new file mode 100644 index 000000000..0666f8b52 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-alignment-increase.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-mut-alignment-increase.rs:19:39 + | +19 | const INCREASE_ALIGNMENT: &mut AU16 = transmute_mut!(&mut [0u8; 2]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf<[u8; 2]>` (8 bits) + = note: target type: `MaxAlignsOf<[u8; 2], AU16>` (16 bits) + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-const.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-const.rs new file mode 100644 index 000000000..021b562f1 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-const.rs @@ -0,0 +1,20 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +const ARRAY_OF_U8S: [u8; 2] = [0u8; 2]; + +// `transmute_mut!` cannot, generally speaking, be used in const contexts. +const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-const.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-const.stderr new file mode 100644 index 000000000..61a34b9a7 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-const.stderr @@ -0,0 +1,33 @@ +warning: taking a mutable reference to a `const` item + --> tests/ui-nightly/transmute-mut-const.rs:20:52 + | +20 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | ^^^^^^^^^^^^^^^^^ + | + = note: each usage of a `const` item creates a new temporary + = note: the mutable reference will refer to this temporary, not the original `const` item +note: `const` item defined here + --> tests/ui-nightly/transmute-mut-const.rs:17:1 + | +17 | const ARRAY_OF_U8S: [u8; 2] = [0u8; 2]; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: `#[warn(const_item_mutation)]` on by default + +error[E0658]: mutable references are not allowed in constants + --> tests/ui-nightly/transmute-mut-const.rs:20:52 + | +20 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | ^^^^^^^^^^^^^^^^^ + | + = note: see issue #57349 for more information + = help: add `#![feature(const_mut_refs)]` to the crate attributes to enable + = note: this compiler was built on 2024-06-18; consider upgrading it if it is out of date + +error[E0015]: cannot call non-const fn `transmute_mut::<'_, '_, [u8; 2], [u8; 2]>` in constants + --> tests/ui-nightly/transmute-mut-const.rs:20:37 + | +20 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: calls in constants are limited to constant functions, tuple structs and tuple variants + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-generic.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-generic.rs new file mode 100644 index 000000000..7068f1026 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-generic.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::{transmute_mut, AsBytes, FromBytes}; + +fn main() {} + +fn transmute_mut(u: &mut u8) -> &mut T { + // `transmute_mut!` requires the destination type to be concrete. + transmute_mut!(u) +} diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-generic.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-generic.stderr new file mode 100644 index 000000000..f278558cf --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-generic.stderr @@ -0,0 +1,19 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-mut-dst-generic.rs:17:5 + | +17 | transmute_mut!(u) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `T` (this type does not have a fixed size) + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-mut-dst-generic.rs:17:5 + | +17 | transmute_mut!(u) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf` (8 bits) + = note: target type: `MaxAlignsOf` (size can vary because of T) + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-a-reference.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-a-reference.rs new file mode 100644 index 000000000..33a9ecd95 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-a-reference.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting into a non-reference +// destination type. +const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-a-reference.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-a-reference.stderr new file mode 100644 index 000000000..a84547bd0 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-a-reference.stderr @@ -0,0 +1,39 @@ +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-mut-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&mut _` + | + = note: expected type `usize` + found mutable reference `&mut _` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-mut-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&mut _` + | + = note: expected type `usize` + found mutable reference `&mut _` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-mut-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&mut _` + | + = note: expected type `usize` + found mutable reference `&mut _` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-mut-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&mut _` + | + = note: expected type `usize` + found mutable reference `&mut _` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-asbytes.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-asbytes.rs new file mode 100644 index 000000000..b72f12928 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-asbytes.rs @@ -0,0 +1,24 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::FromZeroes, zerocopy::FromBytes, zerocopy::AsBytes)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::FromZeroes, zerocopy::FromBytes)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the destination type implements `AsBytes` +const DST_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-asbytes.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-asbytes.stderr new file mode 100644 index 000000000..7de5da60a --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-asbytes.stderr @@ -0,0 +1,25 @@ +error[E0277]: the trait bound `Dst: AsBytes` is not satisfied + --> tests/ui-nightly/transmute-mut-dst-not-asbytes.rs:24:36 + | +24 | const DST_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | the trait `AsBytes` is not implemented for `Dst` + | required by a bound introduced by this call + | + = help: the following other types implement trait `AsBytes`: + () + F32 + F64 + I128 + I16 + I32 + I64 + ManuallyDrop + and $N others +note: required by a bound in `AssertDstIsAsBytes` + --> tests/ui-nightly/transmute-mut-dst-not-asbytes.rs:24:36 + | +24 | const DST_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-frombytes.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-frombytes.rs new file mode 100644 index 000000000..102fcedc9 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-frombytes.rs @@ -0,0 +1,24 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::FromZeroes, zerocopy::FromBytes, zerocopy::AsBytes)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::AsBytes)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the destination type implements `FromBytes` +const DST_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-frombytes.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-frombytes.stderr new file mode 100644 index 000000000..9df4ebc2b --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-not-frombytes.stderr @@ -0,0 +1,25 @@ +error[E0277]: the trait bound `Dst: FromBytes` is not satisfied + --> tests/ui-nightly/transmute-mut-dst-not-frombytes.rs:24:38 + | +24 | const DST_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | the trait `FromBytes` is not implemented for `Dst` + | required by a bound introduced by this call + | + = help: the following other types implement trait `FromBytes`: + () + F32 + F64 + I128 + I16 + I32 + I64 + ManuallyDrop + and $N others +note: required by a bound in `AssertDstIsFromBytes` + --> tests/ui-nightly/transmute-mut-dst-not-frombytes.rs:24:38 + | +24 | const DST_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-unsized.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-unsized.rs new file mode 100644 index 000000000..693ccda56 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-unsized.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting into an unsized destination +// type. +const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-unsized.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-unsized.stderr new file mode 100644 index 000000000..5e3fbb4bf --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-dst-unsized.stderr @@ -0,0 +1,86 @@ +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertDstIsFromBytes` + --> tests/ui-nightly/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertDstIsAsBytes` + --> tests/ui-nightly/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute` + --> $RUST/core/src/intrinsics.rs + | + | pub fn transmute(src: Src) -> Dst; + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute` + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute_mut` + --> src/macro_util.rs + | + | pub unsafe fn transmute_mut<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-illegal-lifetime.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-illegal-lifetime.rs new file mode 100644 index 000000000..c31765e4b --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-illegal-lifetime.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +fn main() {} + +fn increase_lifetime() { + let mut x = 0u64; + // It is illegal to increase the lifetime scope. + let _: &'static mut u64 = zerocopy::transmute_mut!(&mut x); +} diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-illegal-lifetime.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-illegal-lifetime.stderr new file mode 100644 index 000000000..b826fcc7a --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-illegal-lifetime.stderr @@ -0,0 +1,12 @@ +error[E0597]: `x` does not live long enough + --> tests/ui-nightly/transmute-mut-illegal-lifetime.rs:14:56 + | +12 | let mut x = 0u64; + | ----- binding `x` declared here +13 | // It is illegal to increase the lifetime scope. +14 | let _: &'static mut u64 = zerocopy::transmute_mut!(&mut x); + | ---------------- ^^^^^^ borrowed value does not live long enough + | | + | type annotation requires that `x` is borrowed for `'static` +15 | } + | - `x` dropped here while still borrowed diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-size-decrease.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-size-decrease.rs new file mode 100644 index 000000000..c6eec3a9c --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-size-decrease.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// We require that the size of the destination type is not smaller than the size +// of the source type. +const DECREASE_SIZE: &mut u8 = transmute_mut!(&mut [0u8; 2]); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-size-decrease.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-size-decrease.stderr new file mode 100644 index 000000000..ac1e35cec --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-size-decrease.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-mut-size-decrease.rs:17:32 + | +17 | const DECREASE_SIZE: &mut u8 = transmute_mut!(&mut [0u8; 2]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `[u8; 2]` (16 bits) + = note: target type: `u8` (8 bits) + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-size-increase.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-size-increase.rs new file mode 100644 index 000000000..a4657c283 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-size-increase.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting from a smaller type to a larger +// one. +const INCREASE_SIZE: &mut [u8; 2] = transmute_mut!(&mut 0u8); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-size-increase.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-size-increase.stderr new file mode 100644 index 000000000..d343bd65e --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-size-increase.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-mut-size-increase.rs:17:37 + | +17 | const INCREASE_SIZE: &mut [u8; 2] = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `[u8; 2]` (16 bits) + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-generic.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-generic.rs new file mode 100644 index 000000000..aed7ded96 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-generic.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::{transmute_mut, AsBytes, FromBytes}; + +fn main() {} + +fn transmute_mut(t: &mut T) -> &mut U { + // `transmute_mut!` requires the source and destination types to be + // concrete. + transmute_mut!(t) +} diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-generic.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-generic.stderr new file mode 100644 index 000000000..e3f3a3fd7 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-generic.stderr @@ -0,0 +1,19 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-mut-src-dst-generic.rs:18:5 + | +18 | transmute_mut!(t) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `T` (this type does not have a fixed size) + = note: target type: `U` (this type does not have a fixed size) + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-mut-src-dst-generic.rs:18:5 + | +18 | transmute_mut!(t) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf` (size can vary because of T) + = note: target type: `MaxAlignsOf` (size can vary because of T) + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-not-references.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-not-references.rs new file mode 100644 index 000000000..98cc52088 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-not-references.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting between non-reference source +// and destination types. +const SRC_DST_NOT_REFERENCES: &mut usize = transmute_mut!(0usize); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-not-references.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-not-references.stderr new file mode 100644 index 000000000..bdd3f423e --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-not-references.stderr @@ -0,0 +1,44 @@ +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-mut-src-dst-not-references.rs:17:59 + | +17 | const SRC_DST_NOT_REFERENCES: &mut usize = transmute_mut!(0usize); + | ---------------^^^^^^- + | | | + | | expected `&mut _`, found `usize` + | expected due to this + | + = note: expected mutable reference `&mut _` + found type `usize` +help: consider mutably borrowing here + | +17 | const SRC_DST_NOT_REFERENCES: &mut usize = transmute_mut!(&mut 0usize); + | ++++ + +warning: this function depends on never type fallback being `()` + --> tests/ui-nightly/transmute-mut-src-dst-not-references.rs:17:1 + | +17 | const SRC_DST_NOT_REFERENCES: &mut usize = transmute_mut!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! + = note: for more information, see issue #123748 + = help: specify the types explicitly +note: in edition 2024, the requirement `!: FromBytes` will fail + --> tests/ui-nightly/transmute-mut-src-dst-not-references.rs:17:44 + | +17 | const SRC_DST_NOT_REFERENCES: &mut usize = transmute_mut!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ + = note: `#[warn(dependency_on_unit_never_type_fallback)]` on by default + = note: this warning originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +warning: never type fallback affects this call to an `unsafe` function + --> tests/ui-nightly/transmute-mut-src-dst-not-references.rs:17:44 + | +17 | const SRC_DST_NOT_REFERENCES: &mut usize = transmute_mut!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ + | + = warning: this will change its meaning in a future release! + = note: for more information, see issue #123748 + = help: specify the type explicitly + = note: `#[warn(never_type_fallback_flowing_into_unsafe)]` on by default + = note: this warning originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-unsized.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-unsized.rs new file mode 100644 index 000000000..1bebcf2d6 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-unsized.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting between unsized source and +// destination types. +const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-unsized.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-unsized.stderr new file mode 100644 index 000000000..2fe66581f --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-dst-unsized.stderr @@ -0,0 +1,231 @@ +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsFromBytes` + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsFromBytes` + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsAsBytes` + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsAsBytes` + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertDstIsFromBytes` + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertDstIsAsBytes` + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AlignOf::::into_t` + --> src/macro_util.rs + | + | impl AlignOf { + | ^ required by this bound in `AlignOf::::into_t` + | #[inline(never)] // Make `missing_inline_in_public_items` happy. + | pub fn into_t(self) -> T { + | ------ required by a bound in this associated function + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: the left-hand-side of an assignment must have a statically known size + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute` + --> $RUST/core/src/intrinsics.rs + | + | pub fn transmute(src: Src) -> Dst; + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute` + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute_mut` + --> src/macro_util.rs + | + | pub unsafe fn transmute_mut<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute_mut` + --> src/macro_util.rs + | + | pub unsafe fn transmute_mut<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-generic.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-generic.rs new file mode 100644 index 000000000..a3ef39787 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-generic.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::{transmute_mut, AsBytes}; + +fn main() {} + +fn transmute_mut(t: &mut T) -> &mut u8 { + // `transmute_mut!` requires the source type to be concrete. + transmute_mut!(t) +} diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-generic.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-generic.stderr new file mode 100644 index 000000000..c06d77507 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-generic.stderr @@ -0,0 +1,10 @@ +error[E0405]: cannot find trait `FromBytes` in this scope + --> tests/ui-nightly/transmute-mut-src-generic.rs:15:31 + | +15 | fn transmute_mut(t: &mut T) -> &mut u8 { + | ^^^^^^^^^ not found in this scope + | +help: consider importing this trait + | +11 + use zerocopy::FromBytes; + | diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-immutable.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-immutable.rs new file mode 100644 index 000000000..08088d0db --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-immutable.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +fn ref_src_immutable() { + // `transmute_mut!` requires that its source type be a mutable reference. + let _: &mut u8 = transmute_mut!(&0u8); +} diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-immutable.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-immutable.stderr new file mode 100644 index 000000000..abaac9958 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-immutable.stderr @@ -0,0 +1,40 @@ +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-mut-src-immutable.rs:17:37 + | +17 | let _: &mut u8 = transmute_mut!(&0u8); + | ---------------^^^^- + | | | + | | types differ in mutability + | expected due to this + | + = note: expected mutable reference `&mut _` + found reference `&u8` + +warning: this function depends on never type fallback being `()` + --> tests/ui-nightly/transmute-mut-src-immutable.rs:15:1 + | +15 | fn ref_src_immutable() { + | ^^^^^^^^^^^^^^^^^^^^^^ + | + = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! + = note: for more information, see issue #123748 + = help: specify the types explicitly +note: in edition 2024, the requirement `!: FromBytes` will fail + --> tests/ui-nightly/transmute-mut-src-immutable.rs:17:22 + | +17 | let _: &mut u8 = transmute_mut!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ + = note: `#[warn(dependency_on_unit_never_type_fallback)]` on by default + = note: this warning originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +warning: never type fallback affects this call to an `unsafe` function + --> tests/ui-nightly/transmute-mut-src-immutable.rs:17:22 + | +17 | let _: &mut u8 = transmute_mut!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ + | + = warning: this will change its meaning in a future release! + = note: for more information, see issue #123748 + = help: specify the type explicitly + = note: `#[warn(never_type_fallback_flowing_into_unsafe)]` on by default + = note: this warning originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-a-reference.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-a-reference.rs new file mode 100644 index 000000000..bf8bc3259 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-a-reference.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting from a non-reference source +// type. +const SRC_NOT_A_REFERENCE: &mut u8 = transmute_mut!(0usize); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-a-reference.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-a-reference.stderr new file mode 100644 index 000000000..8fc4476d1 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-a-reference.stderr @@ -0,0 +1,44 @@ +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-mut-src-not-a-reference.rs:17:53 + | +17 | const SRC_NOT_A_REFERENCE: &mut u8 = transmute_mut!(0usize); + | ---------------^^^^^^- + | | | + | | expected `&mut _`, found `usize` + | expected due to this + | + = note: expected mutable reference `&mut _` + found type `usize` +help: consider mutably borrowing here + | +17 | const SRC_NOT_A_REFERENCE: &mut u8 = transmute_mut!(&mut 0usize); + | ++++ + +warning: this function depends on never type fallback being `()` + --> tests/ui-nightly/transmute-mut-src-not-a-reference.rs:17:1 + | +17 | const SRC_NOT_A_REFERENCE: &mut u8 = transmute_mut!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! + = note: for more information, see issue #123748 + = help: specify the types explicitly +note: in edition 2024, the requirement `!: FromBytes` will fail + --> tests/ui-nightly/transmute-mut-src-not-a-reference.rs:17:38 + | +17 | const SRC_NOT_A_REFERENCE: &mut u8 = transmute_mut!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ + = note: `#[warn(dependency_on_unit_never_type_fallback)]` on by default + = note: this warning originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +warning: never type fallback affects this call to an `unsafe` function + --> tests/ui-nightly/transmute-mut-src-not-a-reference.rs:17:38 + | +17 | const SRC_NOT_A_REFERENCE: &mut u8 = transmute_mut!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ + | + = warning: this will change its meaning in a future release! + = note: for more information, see issue #123748 + = help: specify the type explicitly + = note: `#[warn(never_type_fallback_flowing_into_unsafe)]` on by default + = note: this warning originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-asbytes.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-asbytes.rs new file mode 100644 index 000000000..6a14f12fd --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-asbytes.rs @@ -0,0 +1,24 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::FromZeroes, zerocopy::FromBytes)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::FromZeroes, zerocopy::FromBytes, zerocopy::AsBytes)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the source type implements `AsBytes` +const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-asbytes.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-asbytes.stderr new file mode 100644 index 000000000..0b4154b1b --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-asbytes.stderr @@ -0,0 +1,48 @@ +error[E0277]: the trait bound `Src: AsBytes` is not satisfied + --> tests/ui-nightly/transmute-mut-src-not-asbytes.rs:24:36 + | +24 | const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | the trait `AsBytes` is not implemented for `Src` + | required by a bound introduced by this call + | + = help: the following other types implement trait `AsBytes`: + () + Dst + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required by a bound in `AssertSrcIsAsBytes` + --> tests/ui-nightly/transmute-mut-src-not-asbytes.rs:24:36 + | +24 | const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Src: AsBytes` is not satisfied + --> tests/ui-nightly/transmute-mut-src-not-asbytes.rs:24:36 + | +24 | const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `Src` + | + = help: the following other types implement trait `AsBytes`: + () + Dst + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required by a bound in `AssertSrcIsAsBytes` + --> tests/ui-nightly/transmute-mut-src-not-asbytes.rs:24:36 + | +24 | const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-frombytes.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-frombytes.rs new file mode 100644 index 000000000..2ebe03601 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-frombytes.rs @@ -0,0 +1,24 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::AsBytes)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::FromZeroes, zerocopy::FromBytes, zerocopy::AsBytes)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the source type implements `FromBytes` +const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-frombytes.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-frombytes.stderr new file mode 100644 index 000000000..858fc5095 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-not-frombytes.stderr @@ -0,0 +1,48 @@ +error[E0277]: the trait bound `Src: FromBytes` is not satisfied + --> tests/ui-nightly/transmute-mut-src-not-frombytes.rs:24:38 + | +24 | const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | the trait `FromBytes` is not implemented for `Src` + | required by a bound introduced by this call + | + = help: the following other types implement trait `FromBytes`: + () + Dst + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required by a bound in `AssertSrcIsFromBytes` + --> tests/ui-nightly/transmute-mut-src-not-frombytes.rs:24:38 + | +24 | const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Src: FromBytes` is not satisfied + --> tests/ui-nightly/transmute-mut-src-not-frombytes.rs:24:38 + | +24 | const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FromBytes` is not implemented for `Src` + | + = help: the following other types implement trait `FromBytes`: + () + Dst + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required by a bound in `AssertSrcIsFromBytes` + --> tests/ui-nightly/transmute-mut-src-not-frombytes.rs:24:38 + | +24 | const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-unsized.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-unsized.rs new file mode 100644 index 000000000..413dd68d8 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-unsized.rs @@ -0,0 +1,16 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting from an unsized source type. +const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-unsized.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-unsized.stderr new file mode 100644 index 000000000..b8599942b --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-mut-src-unsized.stderr @@ -0,0 +1,158 @@ +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsFromBytes` + --> tests/ui-nightly/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsFromBytes` + --> tests/ui-nightly/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsAsBytes` + --> tests/ui-nightly/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsAsBytes` + --> tests/ui-nightly/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AlignOf::::into_t` + --> src/macro_util.rs + | + | impl AlignOf { + | ^ required by this bound in `AlignOf::::into_t` + | #[inline(never)] // Make `missing_inline_in_public_items` happy. + | pub fn into_t(self) -> T { + | ------ required by a bound in this associated function + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: the left-hand-side of an assignment must have a statically known size + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute_mut` + --> src/macro_util.rs + | + | pub unsafe fn transmute_mut<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ptr-to-usize.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ptr-to-usize.rs new file mode 100644 index 000000000..5af885933 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ptr-to-usize.rs @@ -0,0 +1,20 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute; + +fn main() {} + +// It is unclear whether we can or should support this transmutation, especially +// in a const context. This test ensures that even if such a transmutation +// becomes valid due to the requisite implementations of `FromBytes` being +// added, that we re-examine whether it should specifically be valid in a const +// context. +const POINTER_VALUE: usize = transmute!(&0usize as *const usize); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ptr-to-usize.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ptr-to-usize.stderr new file mode 100644 index 000000000..2fcba2fb6 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ptr-to-usize.stderr @@ -0,0 +1,30 @@ +error[E0277]: the trait bound `*const usize: AsBytes` is not satisfied + --> tests/ui-nightly/transmute-ptr-to-usize.rs:20:30 + | +20 | const POINTER_VALUE: usize = transmute!(&0usize as *const usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | the trait `AsBytes` is not implemented for `*const usize` + | required by a bound introduced by this call + | + = help: the trait `AsBytes` is implemented for `usize` +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-nightly/transmute-ptr-to-usize.rs:20:30 + | +20 | const POINTER_VALUE: usize = transmute!(&0usize as *const usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `*const usize: AsBytes` is not satisfied + --> tests/ui-nightly/transmute-ptr-to-usize.rs:20:30 + | +20 | const POINTER_VALUE: usize = transmute!(&0usize as *const usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `*const usize` + | + = help: the trait `AsBytes` is implemented for `usize` +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-nightly/transmute-ptr-to-usize.rs:20:30 + | +20 | const POINTER_VALUE: usize = transmute!(&0usize as *const usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-alignment-increase.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-alignment-increase.rs new file mode 100644 index 000000000..bf1988c66 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-alignment-increase.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting from a type of smaller +// alignment to one of larger alignment. +const INCREASE_ALIGNMENT: &AU16 = transmute_ref!(&[0u8; 2]); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-alignment-increase.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-alignment-increase.stderr new file mode 100644 index 000000000..1cef246bc --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-alignment-increase.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-ref-alignment-increase.rs:19:35 + | +19 | const INCREASE_ALIGNMENT: &AU16 = transmute_ref!(&[0u8; 2]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf<[u8; 2]>` (8 bits) + = note: target type: `MaxAlignsOf<[u8; 2], AU16>` (16 bits) + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-generic.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-generic.rs new file mode 100644 index 000000000..bf4a0f9ad --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-generic.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::{transmute_ref, FromBytes}; + +fn main() {} + +fn transmute_ref(u: &u8) -> &T { + // `transmute_ref!` requires the destination type to be concrete. + transmute_ref!(u) +} diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-generic.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-generic.stderr new file mode 100644 index 000000000..4c94d501c --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-generic.stderr @@ -0,0 +1,19 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-ref-dst-generic.rs:17:5 + | +17 | transmute_ref!(u) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `T` (this type does not have a fixed size) + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-ref-dst-generic.rs:17:5 + | +17 | transmute_ref!(u) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf` (8 bits) + = note: target type: `MaxAlignsOf` (size can vary because of T) + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-mutable.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-mutable.rs new file mode 100644 index 000000000..fa0e6e4c9 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-mutable.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +fn ref_dst_mutable() { + // `transmute_ref!` requires that its destination type be an immutable + // reference. + let _: &mut u8 = transmute_ref!(&0u8); +} diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-mutable.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-mutable.stderr new file mode 100644 index 000000000..0cbdd176b --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-mutable.stderr @@ -0,0 +1,29 @@ +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-dst-mutable.rs:18:22 + | +18 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-dst-mutable.rs:18:22 + | +18 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-dst-mutable.rs:18:22 + | +18 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-not-a-reference.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-not-a-reference.rs new file mode 100644 index 000000000..de55f9acd --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-not-a-reference.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting into a non-reference +// destination type. +const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-not-a-reference.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-not-a-reference.stderr new file mode 100644 index 000000000..847d54732 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-not-a-reference.stderr @@ -0,0 +1,29 @@ +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-not-frombytes.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-not-frombytes.rs new file mode 100644 index 000000000..d81f64d21 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-not-frombytes.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref` requires that the destination type implements `FromBytes` +const DST_NOT_FROM_BYTES: &NotZerocopy = transmute_ref!(&AU16(0)); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-not-frombytes.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-not-frombytes.stderr new file mode 100644 index 000000000..a09f99660 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-not-frombytes.stderr @@ -0,0 +1,25 @@ +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-nightly/transmute-ref-dst-not-frombytes.rs:18:42 + | +18 | const DST_NOT_FROM_BYTES: &NotZerocopy = transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | the trait `FromBytes` is not implemented for `NotZerocopy` + | required by a bound introduced by this call + | + = help: the following other types implement trait `FromBytes`: + () + AU16 + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required by a bound in `AssertIsFromBytes` + --> tests/ui-nightly/transmute-ref-dst-not-frombytes.rs:18:42 + | +18 | const DST_NOT_FROM_BYTES: &NotZerocopy = transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsFromBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-unsized.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-unsized.rs new file mode 100644 index 000000000..625f1fac0 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-unsized.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting into an unsized destination +// type. +const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-unsized.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-unsized.stderr new file mode 100644 index 000000000..1e97b5b0e --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-dst-unsized.stderr @@ -0,0 +1,69 @@ +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-dst-unsized.rs:17:28 + | +17 | const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertIsFromBytes` + --> tests/ui-nightly/transmute-ref-dst-unsized.rs:17:28 + | +17 | const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsFromBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-dst-unsized.rs:17:28 + | +17 | const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-dst-unsized.rs:17:28 + | +17 | const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-dst-unsized.rs:17:28 + | +17 | const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute` + --> $RUST/core/src/intrinsics.rs + | + | pub fn transmute(src: Src) -> Dst; + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute` + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-dst-unsized.rs:17:28 + | +17 | const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute_ref` + --> src/macro_util.rs + | + | pub const unsafe fn transmute_ref<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute_ref` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-illegal-lifetime.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-illegal-lifetime.rs new file mode 100644 index 000000000..8dd191e6f --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-illegal-lifetime.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +fn main() {} + +fn increase_lifetime() { + let x = 0u64; + // It is illegal to increase the lifetime scope. + let _: &'static u64 = zerocopy::transmute_ref!(&x); +} diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-illegal-lifetime.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-illegal-lifetime.stderr new file mode 100644 index 000000000..e16a55761 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-illegal-lifetime.stderr @@ -0,0 +1,12 @@ +error[E0597]: `x` does not live long enough + --> tests/ui-nightly/transmute-ref-illegal-lifetime.rs:14:52 + | +12 | let x = 0u64; + | - binding `x` declared here +13 | // It is illegal to increase the lifetime scope. +14 | let _: &'static u64 = zerocopy::transmute_ref!(&x); + | ------------ ^^ borrowed value does not live long enough + | | + | type annotation requires that `x` is borrowed for `'static` +15 | } + | - `x` dropped here while still borrowed diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-size-decrease.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-size-decrease.rs new file mode 100644 index 000000000..1d66a54ef --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-size-decrease.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// Although this is not a soundness requirement, we currently require that the +// size of the destination type is not smaller than the size of the source type. +const DECREASE_SIZE: &u8 = transmute_ref!(&[0u8; 2]); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-size-decrease.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-size-decrease.stderr new file mode 100644 index 000000000..793ecc54c --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-size-decrease.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-ref-size-decrease.rs:17:28 + | +17 | const DECREASE_SIZE: &u8 = transmute_ref!(&[0u8; 2]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `[u8; 2]` (16 bits) + = note: target type: `u8` (8 bits) + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-size-increase.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-size-increase.rs new file mode 100644 index 000000000..cdca560b3 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-size-increase.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting from a smaller type to a larger +// one. +const INCREASE_SIZE: &[u8; 2] = transmute_ref!(&0u8); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-size-increase.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-size-increase.stderr new file mode 100644 index 000000000..40c69f63c --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-size-increase.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-ref-size-increase.rs:17:33 + | +17 | const INCREASE_SIZE: &[u8; 2] = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `[u8; 2]` (16 bits) + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-generic.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-generic.rs new file mode 100644 index 000000000..409d785b2 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-generic.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::{transmute_ref, AsBytes, FromBytes}; + +fn main() {} + +fn transmute_ref(t: &T) -> &U { + // `transmute_ref!` requires the source and destination types to be + // concrete. + transmute_ref!(t) +} diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-generic.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-generic.stderr new file mode 100644 index 000000000..6a3a4fd95 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-generic.stderr @@ -0,0 +1,19 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-ref-src-dst-generic.rs:18:5 + | +18 | transmute_ref!(t) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `T` (this type does not have a fixed size) + = note: target type: `U` (this type does not have a fixed size) + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-ref-src-dst-generic.rs:18:5 + | +18 | transmute_ref!(t) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf` (size can vary because of T) + = note: target type: `MaxAlignsOf` (size can vary because of T) + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-not-references.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-not-references.rs new file mode 100644 index 000000000..114e917b5 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-not-references.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting between non-reference source +// and destination types. +const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-not-references.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-not-references.stderr new file mode 100644 index 000000000..94a738728 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-not-references.stderr @@ -0,0 +1,85 @@ +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-src-dst-not-references.rs:17:54 + | +17 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ---------------^^^^^^- + | | | + | | expected `&_`, found `usize` + | expected due to this + | + = note: expected reference `&_` + found type `usize` +help: consider borrowing here + | +17 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(&0usize); + | + + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-src-dst-not-references.rs:17:39 + | +17 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-src-dst-not-references.rs:17:39 + | +17 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-src-dst-not-references.rs:17:39 + | +17 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +warning: this function depends on never type fallback being `()` + --> tests/ui-nightly/transmute-ref-src-dst-not-references.rs:17:1 + | +17 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! + = note: for more information, see issue #123748 + = help: specify the types explicitly +note: in edition 2024, the requirement `!: AsBytes` will fail + --> tests/ui-nightly/transmute-ref-src-dst-not-references.rs:17:39 + | +17 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ + = note: `#[warn(dependency_on_unit_never_type_fallback)]` on by default + = note: this warning originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +warning: never type fallback affects this call to an `unsafe` function + --> tests/ui-nightly/transmute-ref-src-dst-not-references.rs:17:39 + | +17 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ + | + = warning: this will change its meaning in a future release! + = note: for more information, see issue #123748 + = help: specify the type explicitly + = note: `#[warn(never_type_fallback_flowing_into_unsafe)]` on by default + = note: this warning originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +warning: never type fallback affects this call to an `unsafe` function + --> tests/ui-nightly/transmute-ref-src-dst-not-references.rs:17:39 + | +17 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ + | + = warning: this will change its meaning in a future release! + = note: for more information, see issue #123748 + = help: specify the type explicitly + = note: this warning originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-unsized.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-unsized.rs new file mode 100644 index 000000000..6bfe7ffdf --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-unsized.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting between unsized source and +// destination types. +const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-unsized.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-unsized.stderr new file mode 100644 index 000000000..cb1e443ac --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-dst-unsized.stderr @@ -0,0 +1,183 @@ +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-nightly/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-nightly/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertIsFromBytes` + --> tests/ui-nightly/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsFromBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AlignOf::::into_t` + --> src/macro_util.rs + | + | impl AlignOf { + | ^ required by this bound in `AlignOf::::into_t` + | #[inline(never)] // Make `missing_inline_in_public_items` happy. + | pub fn into_t(self) -> T { + | ------ required by a bound in this associated function + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: the left-hand-side of an assignment must have a statically known size + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute` + --> $RUST/core/src/intrinsics.rs + | + | pub fn transmute(src: Src) -> Dst; + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute` + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute_ref` + --> src/macro_util.rs + | + | pub const unsafe fn transmute_ref<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute_ref` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute_ref` + --> src/macro_util.rs + | + | pub const unsafe fn transmute_ref<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute_ref` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-generic.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-generic.rs new file mode 100644 index 000000000..010281c32 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-generic.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::{transmute_ref, AsBytes}; + +fn main() {} + +fn transmute_ref(t: &T) -> &u8 { + // `transmute_ref!` requires the source type to be concrete. + transmute_ref!(t) +} diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-generic.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-generic.stderr new file mode 100644 index 000000000..a168f44bb --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-generic.stderr @@ -0,0 +1,19 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-ref-src-generic.rs:17:5 + | +17 | transmute_ref!(t) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `T` (this type does not have a fixed size) + = note: target type: `u8` (8 bits) + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-ref-src-generic.rs:17:5 + | +17 | transmute_ref!(t) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf` (size can vary because of T) + = note: target type: `MaxAlignsOf` (size can vary because of T) + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-not-a-reference.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-not-a-reference.rs new file mode 100644 index 000000000..90661b3e2 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-not-a-reference.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting from a non-reference source +// type. +const SRC_NOT_A_REFERENCE: &u8 = transmute_ref!(0usize); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-not-a-reference.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-not-a-reference.stderr new file mode 100644 index 000000000..3418f21c4 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-not-a-reference.stderr @@ -0,0 +1,55 @@ +error[E0308]: mismatched types + --> tests/ui-nightly/transmute-ref-src-not-a-reference.rs:17:49 + | +17 | const SRC_NOT_A_REFERENCE: &u8 = transmute_ref!(0usize); + | ---------------^^^^^^- + | | | + | | expected `&_`, found `usize` + | expected due to this + | + = note: expected reference `&_` + found type `usize` +help: consider borrowing here + | +17 | const SRC_NOT_A_REFERENCE: &u8 = transmute_ref!(&0usize); + | + + +warning: this function depends on never type fallback being `()` + --> tests/ui-nightly/transmute-ref-src-not-a-reference.rs:17:1 + | +17 | const SRC_NOT_A_REFERENCE: &u8 = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! + = note: for more information, see issue #123748 + = help: specify the types explicitly +note: in edition 2024, the requirement `!: AsBytes` will fail + --> tests/ui-nightly/transmute-ref-src-not-a-reference.rs:17:34 + | +17 | const SRC_NOT_A_REFERENCE: &u8 = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ + = note: `#[warn(dependency_on_unit_never_type_fallback)]` on by default + = note: this warning originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +warning: never type fallback affects this call to an `unsafe` function + --> tests/ui-nightly/transmute-ref-src-not-a-reference.rs:17:34 + | +17 | const SRC_NOT_A_REFERENCE: &u8 = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ + | + = warning: this will change its meaning in a future release! + = note: for more information, see issue #123748 + = help: specify the type explicitly + = note: `#[warn(never_type_fallback_flowing_into_unsafe)]` on by default + = note: this warning originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +warning: never type fallback affects this call to an `unsafe` function + --> tests/ui-nightly/transmute-ref-src-not-a-reference.rs:17:34 + | +17 | const SRC_NOT_A_REFERENCE: &u8 = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ + | + = warning: this will change its meaning in a future release! + = note: for more information, see issue #123748 + = help: specify the type explicitly + = note: this warning originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-not-asbytes.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-not-asbytes.rs new file mode 100644 index 000000000..6ab19f3c8 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-not-asbytes.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref` requires that the source type implements `AsBytes` +const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&NotZerocopy(AU16(0))); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-not-asbytes.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-not-asbytes.stderr new file mode 100644 index 000000000..5ae6b5a52 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-not-asbytes.stderr @@ -0,0 +1,48 @@ +error[E0277]: the trait bound `NotZerocopy: AsBytes` is not satisfied + --> tests/ui-nightly/transmute-ref-src-not-asbytes.rs:18:33 + | +18 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | the trait `AsBytes` is not implemented for `NotZerocopy` + | required by a bound introduced by this call + | + = help: the following other types implement trait `AsBytes`: + () + AU16 + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-nightly/transmute-ref-src-not-asbytes.rs:18:33 + | +18 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: AsBytes` is not satisfied + --> tests/ui-nightly/transmute-ref-src-not-asbytes.rs:18:33 + | +18 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `NotZerocopy` + | + = help: the following other types implement trait `AsBytes`: + () + AU16 + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-nightly/transmute-ref-src-not-asbytes.rs:18:33 + | +18 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-unsized.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-unsized.rs new file mode 100644 index 000000000..14e72b4dd --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-unsized.rs @@ -0,0 +1,16 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting from an unsized source type. +const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-unsized.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-unsized.stderr new file mode 100644 index 000000000..3ce31caf1 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-ref-src-unsized.stderr @@ -0,0 +1,127 @@ +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-nightly/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-nightly/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AlignOf::::into_t` + --> src/macro_util.rs + | + | impl AlignOf { + | ^ required by this bound in `AlignOf::::into_t` + | #[inline(never)] // Make `missing_inline_in_public_items` happy. + | pub fn into_t(self) -> T { + | ------ required by a bound in this associated function + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: the left-hand-side of an assignment must have a statically known size + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-nightly/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute_ref` + --> src/macro_util.rs + | + | pub const unsafe fn transmute_ref<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute_ref` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-size-decrease.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-size-decrease.rs new file mode 100644 index 000000000..1d56831f2 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-size-decrease.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute; + +fn main() {} + +// Although this is not a soundness requirement, we currently require that the +// size of the destination type is not smaller than the size of the source type. +const DECREASE_SIZE: u8 = transmute!(AU16(0)); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-size-decrease.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-size-decrease.stderr new file mode 100644 index 000000000..83742d782 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-size-decrease.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-size-decrease.rs:19:27 + | +19 | const DECREASE_SIZE: u8 = transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `AU16` (16 bits) + = note: target type: `u8` (8 bits) + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-size-increase.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-size-increase.rs new file mode 100644 index 000000000..32f936308 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-size-increase.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute; + +fn main() {} + +// `transmute!` does not support transmuting from a smaller type to a larger +// one. +const INCREASE_SIZE: AU16 = transmute!(0u8); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-size-increase.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-size-increase.stderr new file mode 100644 index 000000000..230bb17a7 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-size-increase.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-nightly/transmute-size-increase.rs:19:29 + | +19 | const INCREASE_SIZE: AU16 = transmute!(0u8); + | ^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `AU16` (16 bits) + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-src-not-asbytes.rs b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-src-not-asbytes.rs new file mode 100644 index 000000000..dd730216b --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-src-not-asbytes.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute; + +fn main() {} + +// `transmute` requires that the source type implements `AsBytes` +const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); diff --git a/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-src-not-asbytes.stderr b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-src-not-asbytes.stderr new file mode 100644 index 000000000..20f1b18b9 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-nightly/transmute-src-not-asbytes.stderr @@ -0,0 +1,48 @@ +error[E0277]: the trait bound `NotZerocopy: AsBytes` is not satisfied + --> tests/ui-nightly/transmute-src-not-asbytes.rs:18:32 + | +18 | const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | the trait `AsBytes` is not implemented for `NotZerocopy` + | required by a bound introduced by this call + | + = help: the following other types implement trait `AsBytes`: + () + AU16 + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-nightly/transmute-src-not-asbytes.rs:18:32 + | +18 | const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: AsBytes` is not satisfied + --> tests/ui-nightly/transmute-src-not-asbytes.rs:18:32 + | +18 | const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `NotZerocopy` + | + = help: the following other types implement trait `AsBytes`: + () + AU16 + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-nightly/transmute-src-not-asbytes.rs:18:32 + | +18 | const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/include_value_not_from_bytes.rs b/src/rust/vendor/zerocopy/tests/ui-stable/include_value_not_from_bytes.rs new file mode 100644 index 000000000..45b6138f4 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/include_value_not_from_bytes.rs @@ -0,0 +1,12 @@ +// Copyright 2022 The Fuchsia Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#[macro_use] +extern crate zerocopy; + +fn main() {} + +// Should fail because `UnsafeCell: !FromBytes`. +const NOT_FROM_BYTES: core::cell::UnsafeCell = + include_value!("../../testdata/include_value/data"); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/include_value_not_from_bytes.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/include_value_not_from_bytes.stderr new file mode 100644 index 000000000..4bb4e24f0 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/include_value_not_from_bytes.stderr @@ -0,0 +1,25 @@ +error[E0277]: the trait bound `UnsafeCell: FromBytes` is not satisfied + --> tests/ui-stable/include_value_not_from_bytes.rs:12:5 + | +12 | include_value!("../../testdata/include_value/data"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | the trait `FromBytes` is not implemented for `UnsafeCell` + | required by a bound introduced by this call + | + = help: the following other types implement trait `FromBytes`: + () + F32 + F64 + I128 + I16 + I32 + I64 + ManuallyDrop + and $N others +note: required by a bound in `AssertIsFromBytes` + --> tests/ui-stable/include_value_not_from_bytes.rs:12:5 + | +12 | include_value!("../../testdata/include_value/data"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsFromBytes` + = note: this error originates in the macro `$crate::transmute` which comes from the expansion of the macro `include_value` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/include_value_wrong_size.rs b/src/rust/vendor/zerocopy/tests/ui-stable/include_value_wrong_size.rs new file mode 100644 index 000000000..d87b30698 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/include_value_wrong_size.rs @@ -0,0 +1,11 @@ +// Copyright 2022 The Fuchsia Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#[macro_use] +extern crate zerocopy; + +fn main() {} + +// Should fail because the file is 4 bytes long, not 8. +const WRONG_SIZE: u64 = include_value!("../../testdata/include_value/data"); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/include_value_wrong_size.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/include_value_wrong_size.stderr new file mode 100644 index 000000000..956d74c50 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/include_value_wrong_size.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/include_value_wrong_size.rs:11:25 + | +11 | const WRONG_SIZE: u64 = include_value!("../../testdata/include_value/data"); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `[u8; 4]` (32 bits) + = note: target type: `u64` (64 bits) + = note: this error originates in the macro `$crate::transmute` which comes from the expansion of the macro `include_value` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/invalid-impls/invalid-impls.rs b/src/rust/vendor/zerocopy/tests/ui-stable/invalid-impls/invalid-impls.rs new file mode 100644 index 000000000..ea963907d --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/invalid-impls/invalid-impls.rs @@ -0,0 +1,29 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +// Since some macros from `macros.rs` are unused. +#![allow(unused)] + +extern crate zerocopy; +extern crate zerocopy_derive; + +include!("../../../src/macros.rs"); + +use zerocopy::*; +use zerocopy_derive::*; + +fn main() {} + +#[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] +#[repr(transparent)] +struct Foo(T); + +impl_or_verify!(T => FromZeroes for Foo); +impl_or_verify!(T => FromBytes for Foo); +impl_or_verify!(T => AsBytes for Foo); +impl_or_verify!(T => Unaligned for Foo); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/invalid-impls/invalid-impls.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/invalid-impls/invalid-impls.stderr new file mode 100644 index 000000000..c7ba84b10 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/invalid-impls/invalid-impls.stderr @@ -0,0 +1,107 @@ +error[E0277]: the trait bound `T: zerocopy::FromZeroes` is not satisfied + --> tests/ui-stable/invalid-impls/invalid-impls.rs:26:37 + | +26 | impl_or_verify!(T => FromZeroes for Foo); + | ^^^^^^ the trait `zerocopy::FromZeroes` is not implemented for `T`, which is required by `Foo: zerocopy::FromZeroes` + | +note: required for `Foo` to implement `zerocopy::FromZeroes` + --> tests/ui-stable/invalid-impls/invalid-impls.rs:22:10 + | +22 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] + | ^^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `_::Subtrait` + --> tests/ui-stable/invalid-impls/../../../src/macros.rs + | + | trait Subtrait: $trait {} + | ^^^^^^ required by this bound in `Subtrait` + | + ::: tests/ui-stable/invalid-impls/invalid-impls.rs:26:1 + | +26 | impl_or_verify!(T => FromZeroes for Foo); + | ------------------------------------------- in this macro invocation + = note: this error originates in the derive macro `FromZeroes` which comes from the expansion of the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider restricting type parameter `T` + | +26 | impl_or_verify!(T: zerocopy::FromZeroes => FromZeroes for Foo); + | ++++++++++++++++++++++ + +error[E0277]: the trait bound `T: zerocopy::FromBytes` is not satisfied + --> tests/ui-stable/invalid-impls/invalid-impls.rs:27:36 + | +27 | impl_or_verify!(T => FromBytes for Foo); + | ^^^^^^ the trait `zerocopy::FromBytes` is not implemented for `T`, which is required by `Foo: zerocopy::FromBytes` + | +note: required for `Foo` to implement `zerocopy::FromBytes` + --> tests/ui-stable/invalid-impls/invalid-impls.rs:22:22 + | +22 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] + | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `_::Subtrait` + --> tests/ui-stable/invalid-impls/../../../src/macros.rs + | + | trait Subtrait: $trait {} + | ^^^^^^ required by this bound in `Subtrait` + | + ::: tests/ui-stable/invalid-impls/invalid-impls.rs:27:1 + | +27 | impl_or_verify!(T => FromBytes for Foo); + | ------------------------------------------ in this macro invocation + = note: this error originates in the derive macro `FromBytes` which comes from the expansion of the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider restricting type parameter `T` + | +27 | impl_or_verify!(T: zerocopy::FromBytes => FromBytes for Foo); + | +++++++++++++++++++++ + +error[E0277]: the trait bound `T: zerocopy::AsBytes` is not satisfied + --> tests/ui-stable/invalid-impls/invalid-impls.rs:28:34 + | +28 | impl_or_verify!(T => AsBytes for Foo); + | ^^^^^^ the trait `zerocopy::AsBytes` is not implemented for `T`, which is required by `Foo: zerocopy::AsBytes` + | +note: required for `Foo` to implement `zerocopy::AsBytes` + --> tests/ui-stable/invalid-impls/invalid-impls.rs:22:33 + | +22 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] + | ^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `_::Subtrait` + --> tests/ui-stable/invalid-impls/../../../src/macros.rs + | + | trait Subtrait: $trait {} + | ^^^^^^ required by this bound in `Subtrait` + | + ::: tests/ui-stable/invalid-impls/invalid-impls.rs:28:1 + | +28 | impl_or_verify!(T => AsBytes for Foo); + | ---------------------------------------- in this macro invocation + = note: this error originates in the derive macro `AsBytes` which comes from the expansion of the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider restricting type parameter `T` + | +28 | impl_or_verify!(T: zerocopy::AsBytes => AsBytes for Foo); + | +++++++++++++++++++ + +error[E0277]: the trait bound `T: zerocopy::Unaligned` is not satisfied + --> tests/ui-stable/invalid-impls/invalid-impls.rs:29:36 + | +29 | impl_or_verify!(T => Unaligned for Foo); + | ^^^^^^ the trait `zerocopy::Unaligned` is not implemented for `T`, which is required by `Foo: zerocopy::Unaligned` + | +note: required for `Foo` to implement `zerocopy::Unaligned` + --> tests/ui-stable/invalid-impls/invalid-impls.rs:22:42 + | +22 | #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)] + | ^^^^^^^^^ unsatisfied trait bound introduced in this `derive` macro +note: required by a bound in `_::Subtrait` + --> tests/ui-stable/invalid-impls/../../../src/macros.rs + | + | trait Subtrait: $trait {} + | ^^^^^^ required by this bound in `Subtrait` + | + ::: tests/ui-stable/invalid-impls/invalid-impls.rs:29:1 + | +29 | impl_or_verify!(T => Unaligned for Foo); + | ------------------------------------------ in this macro invocation + = note: this error originates in the derive macro `Unaligned` which comes from the expansion of the macro `impl_or_verify` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider restricting type parameter `T` + | +29 | impl_or_verify!(T: zerocopy::Unaligned => Unaligned for Foo); + | +++++++++++++++++++++ diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/max-align.rs b/src/rust/vendor/zerocopy/tests/ui-stable/max-align.rs new file mode 100644 index 000000000..53e3eb9b0 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/max-align.rs @@ -0,0 +1,99 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +#[repr(C, align(1))] +struct Align1; + +#[repr(C, align(2))] +struct Align2; + +#[repr(C, align(4))] +struct Align4; + +#[repr(C, align(8))] +struct Align8; + +#[repr(C, align(16))] +struct Align16; + +#[repr(C, align(32))] +struct Align32; + +#[repr(C, align(64))] +struct Align64; + +#[repr(C, align(128))] +struct Align128; + +#[repr(C, align(256))] +struct Align256; + +#[repr(C, align(512))] +struct Align512; + +#[repr(C, align(1024))] +struct Align1024; + +#[repr(C, align(2048))] +struct Align2048; + +#[repr(C, align(4096))] +struct Align4096; + +#[repr(C, align(8192))] +struct Align8192; + +#[repr(C, align(16384))] +struct Align16384; + +#[repr(C, align(32768))] +struct Align32768; + +#[repr(C, align(65536))] +struct Align65536; + +#[repr(C, align(131072))] +struct Align131072; + +#[repr(C, align(262144))] +struct Align262144; + +#[repr(C, align(524288))] +struct Align524288; + +#[repr(C, align(1048576))] +struct Align1048576; + +#[repr(C, align(2097152))] +struct Align2097152; + +#[repr(C, align(4194304))] +struct Align4194304; + +#[repr(C, align(8388608))] +struct Align8388608; + +#[repr(C, align(16777216))] +struct Align16777216; + +#[repr(C, align(33554432))] +struct Align33554432; + +#[repr(C, align(67108864))] +struct Align67108864; + +#[repr(C, align(134217728))] +struct Align13421772; + +#[repr(C, align(268435456))] +struct Align26843545; + +#[repr(C, align(1073741824))] +struct Align1073741824; + +fn main() {} diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/max-align.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/max-align.stderr new file mode 100644 index 000000000..7e83b2f5a --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/max-align.stderr @@ -0,0 +1,5 @@ +error[E0589]: invalid `repr(align)` attribute: larger than 2^29 + --> tests/ui-stable/max-align.rs:96:17 + | +96 | #[repr(C, align(1073741824))] + | ^^^^^^^^^^ diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-dst-not-frombytes.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-dst-not-frombytes.rs new file mode 100644 index 000000000..c4caaff91 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-dst-not-frombytes.rs @@ -0,0 +1,18 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute; + +fn main() {} + +// `transmute` requires that the destination type implements `FromBytes` +const DST_NOT_FROM_BYTES: NotZerocopy = transmute!(AU16(0)); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-dst-not-frombytes.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-dst-not-frombytes.stderr new file mode 100644 index 000000000..6b1e046a4 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-dst-not-frombytes.stderr @@ -0,0 +1,25 @@ +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-stable/transmute-dst-not-frombytes.rs:18:41 + | +18 | const DST_NOT_FROM_BYTES: NotZerocopy = transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^ + | | + | the trait `FromBytes` is not implemented for `NotZerocopy` + | required by a bound introduced by this call + | + = help: the following other types implement trait `FromBytes`: + () + AU16 + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required by a bound in `AssertIsFromBytes` + --> tests/ui-stable/transmute-dst-not-frombytes.rs:18:41 + | +18 | const DST_NOT_FROM_BYTES: NotZerocopy = transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsFromBytes` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-alignment-increase.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-alignment-increase.rs new file mode 100644 index 000000000..0928564dd --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-alignment-increase.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting from a type of smaller +// alignment to one of larger alignment. +const INCREASE_ALIGNMENT: &mut AU16 = transmute_mut!(&mut [0u8; 2]); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-alignment-increase.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-alignment-increase.stderr new file mode 100644 index 000000000..252fec9ef --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-alignment-increase.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-mut-alignment-increase.rs:19:39 + | +19 | const INCREASE_ALIGNMENT: &mut AU16 = transmute_mut!(&mut [0u8; 2]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf<[u8; 2]>` (8 bits) + = note: target type: `MaxAlignsOf<[u8; 2], AU16>` (16 bits) + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-const.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-const.rs new file mode 100644 index 000000000..021b562f1 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-const.rs @@ -0,0 +1,20 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +const ARRAY_OF_U8S: [u8; 2] = [0u8; 2]; + +// `transmute_mut!` cannot, generally speaking, be used in const contexts. +const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-const.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-const.stderr new file mode 100644 index 000000000..076dcf54a --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-const.stderr @@ -0,0 +1,31 @@ +warning: taking a mutable reference to a `const` item + --> tests/ui-stable/transmute-mut-const.rs:20:52 + | +20 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | ^^^^^^^^^^^^^^^^^ + | + = note: each usage of a `const` item creates a new temporary + = note: the mutable reference will refer to this temporary, not the original `const` item +note: `const` item defined here + --> tests/ui-stable/transmute-mut-const.rs:17:1 + | +17 | const ARRAY_OF_U8S: [u8; 2] = [0u8; 2]; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ + = note: `#[warn(const_item_mutation)]` on by default + +error[E0658]: mutable references are not allowed in constants + --> tests/ui-stable/transmute-mut-const.rs:20:52 + | +20 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | ^^^^^^^^^^^^^^^^^ + | + = note: see issue #57349 for more information + +error[E0015]: cannot call non-const fn `transmute_mut::<'_, '_, [u8; 2], [u8; 2]>` in constants + --> tests/ui-stable/transmute-mut-const.rs:20:37 + | +20 | const CONST_CONTEXT: &mut [u8; 2] = transmute_mut!(&mut ARRAY_OF_U8S); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: calls in constants are limited to constant functions, tuple structs and tuple variants + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-generic.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-generic.rs new file mode 100644 index 000000000..7068f1026 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-generic.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::{transmute_mut, AsBytes, FromBytes}; + +fn main() {} + +fn transmute_mut(u: &mut u8) -> &mut T { + // `transmute_mut!` requires the destination type to be concrete. + transmute_mut!(u) +} diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-generic.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-generic.stderr new file mode 100644 index 000000000..0000eb0ba --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-generic.stderr @@ -0,0 +1,19 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-mut-dst-generic.rs:17:5 + | +17 | transmute_mut!(u) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `T` (this type does not have a fixed size) + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-mut-dst-generic.rs:17:5 + | +17 | transmute_mut!(u) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf` (8 bits) + = note: target type: `MaxAlignsOf` (size can vary because of T) + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-a-reference.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-a-reference.rs new file mode 100644 index 000000000..33a9ecd95 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-a-reference.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting into a non-reference +// destination type. +const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-a-reference.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-a-reference.stderr new file mode 100644 index 000000000..14ee444cc --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-a-reference.stderr @@ -0,0 +1,39 @@ +error[E0308]: mismatched types + --> tests/ui-stable/transmute-mut-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&mut _` + | + = note: expected type `usize` + found mutable reference `&mut _` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-mut-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&mut _` + | + = note: expected type `usize` + found mutable reference `&mut _` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-mut-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&mut _` + | + = note: expected type `usize` + found mutable reference `&mut _` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-mut-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&mut _` + | + = note: expected type `usize` + found mutable reference `&mut _` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-asbytes.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-asbytes.rs new file mode 100644 index 000000000..b72f12928 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-asbytes.rs @@ -0,0 +1,24 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::FromZeroes, zerocopy::FromBytes, zerocopy::AsBytes)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::FromZeroes, zerocopy::FromBytes)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the destination type implements `AsBytes` +const DST_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-asbytes.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-asbytes.stderr new file mode 100644 index 000000000..6ea5a2fb3 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-asbytes.stderr @@ -0,0 +1,25 @@ +error[E0277]: the trait bound `Dst: AsBytes` is not satisfied + --> tests/ui-stable/transmute-mut-dst-not-asbytes.rs:24:36 + | +24 | const DST_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | the trait `AsBytes` is not implemented for `Dst` + | required by a bound introduced by this call + | + = help: the following other types implement trait `AsBytes`: + () + F32 + F64 + I128 + I16 + I32 + I64 + ManuallyDrop + and $N others +note: required by a bound in `AssertDstIsAsBytes` + --> tests/ui-stable/transmute-mut-dst-not-asbytes.rs:24:36 + | +24 | const DST_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-frombytes.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-frombytes.rs new file mode 100644 index 000000000..102fcedc9 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-frombytes.rs @@ -0,0 +1,24 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::FromZeroes, zerocopy::FromBytes, zerocopy::AsBytes)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::AsBytes)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the destination type implements `FromBytes` +const DST_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-frombytes.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-frombytes.stderr new file mode 100644 index 000000000..a8e39ba2a --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-not-frombytes.stderr @@ -0,0 +1,25 @@ +error[E0277]: the trait bound `Dst: FromBytes` is not satisfied + --> tests/ui-stable/transmute-mut-dst-not-frombytes.rs:24:38 + | +24 | const DST_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | the trait `FromBytes` is not implemented for `Dst` + | required by a bound introduced by this call + | + = help: the following other types implement trait `FromBytes`: + () + F32 + F64 + I128 + I16 + I32 + I64 + ManuallyDrop + and $N others +note: required by a bound in `AssertDstIsFromBytes` + --> tests/ui-stable/transmute-mut-dst-not-frombytes.rs:24:38 + | +24 | const DST_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-unsized.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-unsized.rs new file mode 100644 index 000000000..693ccda56 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-unsized.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting into an unsized destination +// type. +const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-unsized.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-unsized.stderr new file mode 100644 index 000000000..19b88e758 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-dst-unsized.stderr @@ -0,0 +1,86 @@ +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertDstIsFromBytes` + --> tests/ui-stable/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertDstIsAsBytes` + --> tests/ui-stable/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute` + --> $RUST/core/src/intrinsics.rs + | + | pub fn transmute(src: Src) -> Dst; + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute` + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-dst-unsized.rs:17:32 + | +17 | const DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute_mut` + --> src/macro_util.rs + | + | pub unsafe fn transmute_mut<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-illegal-lifetime.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-illegal-lifetime.rs new file mode 100644 index 000000000..c31765e4b --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-illegal-lifetime.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +fn main() {} + +fn increase_lifetime() { + let mut x = 0u64; + // It is illegal to increase the lifetime scope. + let _: &'static mut u64 = zerocopy::transmute_mut!(&mut x); +} diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-illegal-lifetime.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-illegal-lifetime.stderr new file mode 100644 index 000000000..7f128138f --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-illegal-lifetime.stderr @@ -0,0 +1,12 @@ +error[E0597]: `x` does not live long enough + --> tests/ui-stable/transmute-mut-illegal-lifetime.rs:14:56 + | +12 | let mut x = 0u64; + | ----- binding `x` declared here +13 | // It is illegal to increase the lifetime scope. +14 | let _: &'static mut u64 = zerocopy::transmute_mut!(&mut x); + | ---------------- ^^^^^^ borrowed value does not live long enough + | | + | type annotation requires that `x` is borrowed for `'static` +15 | } + | - `x` dropped here while still borrowed diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-size-decrease.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-size-decrease.rs new file mode 100644 index 000000000..c6eec3a9c --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-size-decrease.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// We require that the size of the destination type is not smaller than the size +// of the source type. +const DECREASE_SIZE: &mut u8 = transmute_mut!(&mut [0u8; 2]); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-size-decrease.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-size-decrease.stderr new file mode 100644 index 000000000..239991357 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-size-decrease.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-mut-size-decrease.rs:17:32 + | +17 | const DECREASE_SIZE: &mut u8 = transmute_mut!(&mut [0u8; 2]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `[u8; 2]` (16 bits) + = note: target type: `u8` (8 bits) + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-size-increase.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-size-increase.rs new file mode 100644 index 000000000..a4657c283 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-size-increase.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting from a smaller type to a larger +// one. +const INCREASE_SIZE: &mut [u8; 2] = transmute_mut!(&mut 0u8); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-size-increase.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-size-increase.stderr new file mode 100644 index 000000000..1427c7b0e --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-size-increase.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-mut-size-increase.rs:17:37 + | +17 | const INCREASE_SIZE: &mut [u8; 2] = transmute_mut!(&mut 0u8); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `[u8; 2]` (16 bits) + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-generic.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-generic.rs new file mode 100644 index 000000000..aed7ded96 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-generic.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::{transmute_mut, AsBytes, FromBytes}; + +fn main() {} + +fn transmute_mut(t: &mut T) -> &mut U { + // `transmute_mut!` requires the source and destination types to be + // concrete. + transmute_mut!(t) +} diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-generic.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-generic.stderr new file mode 100644 index 000000000..ddb8bb6fe --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-generic.stderr @@ -0,0 +1,19 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-mut-src-dst-generic.rs:18:5 + | +18 | transmute_mut!(t) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `T` (this type does not have a fixed size) + = note: target type: `U` (this type does not have a fixed size) + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-mut-src-dst-generic.rs:18:5 + | +18 | transmute_mut!(t) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf` (size can vary because of T) + = note: target type: `MaxAlignsOf` (size can vary because of T) + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-not-references.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-not-references.rs new file mode 100644 index 000000000..98cc52088 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-not-references.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting between non-reference source +// and destination types. +const SRC_DST_NOT_REFERENCES: &mut usize = transmute_mut!(0usize); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-not-references.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-not-references.stderr new file mode 100644 index 000000000..c0d9e0f0d --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-not-references.stderr @@ -0,0 +1,15 @@ +error[E0308]: mismatched types + --> tests/ui-stable/transmute-mut-src-dst-not-references.rs:17:59 + | +17 | const SRC_DST_NOT_REFERENCES: &mut usize = transmute_mut!(0usize); + | ---------------^^^^^^- + | | | + | | expected `&mut _`, found `usize` + | expected due to this + | + = note: expected mutable reference `&mut _` + found type `usize` +help: consider mutably borrowing here + | +17 | const SRC_DST_NOT_REFERENCES: &mut usize = transmute_mut!(&mut 0usize); + | ++++ diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-unsized.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-unsized.rs new file mode 100644 index 000000000..1bebcf2d6 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-unsized.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting between unsized source and +// destination types. +const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-unsized.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-unsized.stderr new file mode 100644 index 000000000..0cb83620b --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-dst-unsized.stderr @@ -0,0 +1,231 @@ +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsFromBytes` + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsFromBytes` + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsAsBytes` + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsAsBytes` + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertDstIsFromBytes` + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertDstIsAsBytes` + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertDstIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AlignOf::::into_t` + --> src/macro_util.rs + | + | impl AlignOf { + | ^ required by this bound in `AlignOf::::into_t` + | #[inline(never)] // Make `missing_inline_in_public_items` happy. + | pub fn into_t(self) -> T { + | ------ required by a bound in this associated function + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: the left-hand-side of an assignment must have a statically known size + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute` + --> $RUST/core/src/intrinsics.rs + | + | pub fn transmute(src: Src) -> Dst; + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute` + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute_mut` + --> src/macro_util.rs + | + | pub unsafe fn transmute_mut<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-dst-unsized.rs:17:36 + | +17 | const SRC_DST_UNSIZED: &mut [u8] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute_mut` + --> src/macro_util.rs + | + | pub unsafe fn transmute_mut<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-generic.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-generic.rs new file mode 100644 index 000000000..a3ef39787 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-generic.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::{transmute_mut, AsBytes}; + +fn main() {} + +fn transmute_mut(t: &mut T) -> &mut u8 { + // `transmute_mut!` requires the source type to be concrete. + transmute_mut!(t) +} diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-generic.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-generic.stderr new file mode 100644 index 000000000..fc4809e41 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-generic.stderr @@ -0,0 +1,10 @@ +error[E0405]: cannot find trait `FromBytes` in this scope + --> tests/ui-stable/transmute-mut-src-generic.rs:15:31 + | +15 | fn transmute_mut(t: &mut T) -> &mut u8 { + | ^^^^^^^^^ not found in this scope + | +help: consider importing this trait + | +11 + use zerocopy::FromBytes; + | diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-immutable.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-immutable.rs new file mode 100644 index 000000000..08088d0db --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-immutable.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +fn ref_src_immutable() { + // `transmute_mut!` requires that its source type be a mutable reference. + let _: &mut u8 = transmute_mut!(&0u8); +} diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-immutable.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-immutable.stderr new file mode 100644 index 000000000..0115c791d --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-immutable.stderr @@ -0,0 +1,11 @@ +error[E0308]: mismatched types + --> tests/ui-stable/transmute-mut-src-immutable.rs:17:37 + | +17 | let _: &mut u8 = transmute_mut!(&0u8); + | ---------------^^^^- + | | | + | | types differ in mutability + | expected due to this + | + = note: expected mutable reference `&mut _` + found reference `&u8` diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-a-reference.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-a-reference.rs new file mode 100644 index 000000000..bf8bc3259 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-a-reference.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting from a non-reference source +// type. +const SRC_NOT_A_REFERENCE: &mut u8 = transmute_mut!(0usize); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-a-reference.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-a-reference.stderr new file mode 100644 index 000000000..8c1d9b47b --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-a-reference.stderr @@ -0,0 +1,15 @@ +error[E0308]: mismatched types + --> tests/ui-stable/transmute-mut-src-not-a-reference.rs:17:53 + | +17 | const SRC_NOT_A_REFERENCE: &mut u8 = transmute_mut!(0usize); + | ---------------^^^^^^- + | | | + | | expected `&mut _`, found `usize` + | expected due to this + | + = note: expected mutable reference `&mut _` + found type `usize` +help: consider mutably borrowing here + | +17 | const SRC_NOT_A_REFERENCE: &mut u8 = transmute_mut!(&mut 0usize); + | ++++ diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-asbytes.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-asbytes.rs new file mode 100644 index 000000000..6a14f12fd --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-asbytes.rs @@ -0,0 +1,24 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::FromZeroes, zerocopy::FromBytes)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::FromZeroes, zerocopy::FromBytes, zerocopy::AsBytes)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the source type implements `AsBytes` +const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-asbytes.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-asbytes.stderr new file mode 100644 index 000000000..a428ae230 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-asbytes.stderr @@ -0,0 +1,48 @@ +error[E0277]: the trait bound `Src: AsBytes` is not satisfied + --> tests/ui-stable/transmute-mut-src-not-asbytes.rs:24:36 + | +24 | const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | the trait `AsBytes` is not implemented for `Src` + | required by a bound introduced by this call + | + = help: the following other types implement trait `AsBytes`: + () + Dst + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required by a bound in `AssertSrcIsAsBytes` + --> tests/ui-stable/transmute-mut-src-not-asbytes.rs:24:36 + | +24 | const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Src: AsBytes` is not satisfied + --> tests/ui-stable/transmute-mut-src-not-asbytes.rs:24:36 + | +24 | const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `Src` + | + = help: the following other types implement trait `AsBytes`: + () + Dst + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required by a bound in `AssertSrcIsAsBytes` + --> tests/ui-stable/transmute-mut-src-not-asbytes.rs:24:36 + | +24 | const SRC_NOT_AS_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-frombytes.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-frombytes.rs new file mode 100644 index 000000000..2ebe03601 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-frombytes.rs @@ -0,0 +1,24 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +#[derive(zerocopy::AsBytes)] +#[repr(C)] +struct Src; + +#[derive(zerocopy::FromZeroes, zerocopy::FromBytes, zerocopy::AsBytes)] +#[repr(C)] +struct Dst; + +// `transmute_mut` requires that the source type implements `FromBytes` +const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-frombytes.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-frombytes.stderr new file mode 100644 index 000000000..6a21ff150 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-not-frombytes.stderr @@ -0,0 +1,48 @@ +error[E0277]: the trait bound `Src: FromBytes` is not satisfied + --> tests/ui-stable/transmute-mut-src-not-frombytes.rs:24:38 + | +24 | const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | the trait `FromBytes` is not implemented for `Src` + | required by a bound introduced by this call + | + = help: the following other types implement trait `FromBytes`: + () + Dst + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required by a bound in `AssertSrcIsFromBytes` + --> tests/ui-stable/transmute-mut-src-not-frombytes.rs:24:38 + | +24 | const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Src: FromBytes` is not satisfied + --> tests/ui-stable/transmute-mut-src-not-frombytes.rs:24:38 + | +24 | const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `FromBytes` is not implemented for `Src` + | + = help: the following other types implement trait `FromBytes`: + () + Dst + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required by a bound in `AssertSrcIsFromBytes` + --> tests/ui-stable/transmute-mut-src-not-frombytes.rs:24:38 + | +24 | const SRC_NOT_FROM_BYTES: &mut Dst = transmute_mut!(&mut Src); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-unsized.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-unsized.rs new file mode 100644 index 000000000..413dd68d8 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-unsized.rs @@ -0,0 +1,16 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_mut; + +fn main() {} + +// `transmute_mut!` does not support transmuting from an unsized source type. +const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-unsized.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-unsized.stderr new file mode 100644 index 000000000..07069ec65 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-mut-src-unsized.stderr @@ -0,0 +1,158 @@ +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsFromBytes` + --> tests/ui-stable/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsFromBytes` + --> tests/ui-stable/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsFromBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsAsBytes` + --> tests/ui-stable/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertSrcIsAsBytes` + --> tests/ui-stable/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertSrcIsAsBytes` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AlignOf::::into_t` + --> src/macro_util.rs + | + | impl AlignOf { + | ^ required by this bound in `AlignOf::::into_t` + | #[inline(never)] // Make `missing_inline_in_public_items` happy. + | pub fn into_t(self) -> T { + | ------ required by a bound in this associated function + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: the left-hand-side of an assignment must have a statically known size + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-mut-src-unsized.rs:16:35 + | +16 | const SRC_UNSIZED: &mut [u8; 1] = transmute_mut!(&mut [0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute_mut` + --> src/macro_util.rs + | + | pub unsafe fn transmute_mut<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute_mut` + = note: this error originates in the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ptr-to-usize.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ptr-to-usize.rs new file mode 100644 index 000000000..5af885933 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ptr-to-usize.rs @@ -0,0 +1,20 @@ +// Copyright 2022 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute; + +fn main() {} + +// It is unclear whether we can or should support this transmutation, especially +// in a const context. This test ensures that even if such a transmutation +// becomes valid due to the requisite implementations of `FromBytes` being +// added, that we re-examine whether it should specifically be valid in a const +// context. +const POINTER_VALUE: usize = transmute!(&0usize as *const usize); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ptr-to-usize.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ptr-to-usize.stderr new file mode 100644 index 000000000..4f4d583db --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ptr-to-usize.stderr @@ -0,0 +1,30 @@ +error[E0277]: the trait bound `*const usize: AsBytes` is not satisfied + --> tests/ui-stable/transmute-ptr-to-usize.rs:20:30 + | +20 | const POINTER_VALUE: usize = transmute!(&0usize as *const usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | the trait `AsBytes` is not implemented for `*const usize` + | required by a bound introduced by this call + | + = help: the trait `AsBytes` is implemented for `usize` +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-stable/transmute-ptr-to-usize.rs:20:30 + | +20 | const POINTER_VALUE: usize = transmute!(&0usize as *const usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `*const usize: AsBytes` is not satisfied + --> tests/ui-stable/transmute-ptr-to-usize.rs:20:30 + | +20 | const POINTER_VALUE: usize = transmute!(&0usize as *const usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `*const usize` + | + = help: the trait `AsBytes` is implemented for `usize` +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-stable/transmute-ptr-to-usize.rs:20:30 + | +20 | const POINTER_VALUE: usize = transmute!(&0usize as *const usize); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-alignment-increase.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-alignment-increase.rs new file mode 100644 index 000000000..bf1988c66 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-alignment-increase.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting from a type of smaller +// alignment to one of larger alignment. +const INCREASE_ALIGNMENT: &AU16 = transmute_ref!(&[0u8; 2]); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-alignment-increase.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-alignment-increase.stderr new file mode 100644 index 000000000..a34c4065d --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-alignment-increase.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-ref-alignment-increase.rs:19:35 + | +19 | const INCREASE_ALIGNMENT: &AU16 = transmute_ref!(&[0u8; 2]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf<[u8; 2]>` (8 bits) + = note: target type: `MaxAlignsOf<[u8; 2], AU16>` (16 bits) + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-generic.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-generic.rs new file mode 100644 index 000000000..bf4a0f9ad --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-generic.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::{transmute_ref, FromBytes}; + +fn main() {} + +fn transmute_ref(u: &u8) -> &T { + // `transmute_ref!` requires the destination type to be concrete. + transmute_ref!(u) +} diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-generic.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-generic.stderr new file mode 100644 index 000000000..e30b9f67a --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-generic.stderr @@ -0,0 +1,19 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-ref-dst-generic.rs:17:5 + | +17 | transmute_ref!(u) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `T` (this type does not have a fixed size) + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-ref-dst-generic.rs:17:5 + | +17 | transmute_ref!(u) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf` (8 bits) + = note: target type: `MaxAlignsOf` (size can vary because of T) + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-mutable.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-mutable.rs new file mode 100644 index 000000000..fa0e6e4c9 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-mutable.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +fn ref_dst_mutable() { + // `transmute_ref!` requires that its destination type be an immutable + // reference. + let _: &mut u8 = transmute_ref!(&0u8); +} diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-mutable.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-mutable.stderr new file mode 100644 index 000000000..c70f6ea61 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-mutable.stderr @@ -0,0 +1,29 @@ +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-dst-mutable.rs:18:22 + | +18 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-dst-mutable.rs:18:22 + | +18 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-dst-mutable.rs:18:22 + | +18 | let _: &mut u8 = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ types differ in mutability + | + = note: expected mutable reference `&mut u8` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-not-a-reference.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-not-a-reference.rs new file mode 100644 index 000000000..de55f9acd --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-not-a-reference.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting into a non-reference +// destination type. +const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-not-a-reference.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-not-a-reference.stderr new file mode 100644 index 000000000..ab3f90c2f --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-not-a-reference.stderr @@ -0,0 +1,29 @@ +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-dst-not-a-reference.rs:17:36 + | +17 | const DST_NOT_A_REFERENCE: usize = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-not-frombytes.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-not-frombytes.rs new file mode 100644 index 000000000..d81f64d21 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-not-frombytes.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref` requires that the destination type implements `FromBytes` +const DST_NOT_FROM_BYTES: &NotZerocopy = transmute_ref!(&AU16(0)); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-not-frombytes.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-not-frombytes.stderr new file mode 100644 index 000000000..ac8ebad12 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-not-frombytes.stderr @@ -0,0 +1,25 @@ +error[E0277]: the trait bound `NotZerocopy: FromBytes` is not satisfied + --> tests/ui-stable/transmute-ref-dst-not-frombytes.rs:18:42 + | +18 | const DST_NOT_FROM_BYTES: &NotZerocopy = transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | the trait `FromBytes` is not implemented for `NotZerocopy` + | required by a bound introduced by this call + | + = help: the following other types implement trait `FromBytes`: + () + AU16 + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required by a bound in `AssertIsFromBytes` + --> tests/ui-stable/transmute-ref-dst-not-frombytes.rs:18:42 + | +18 | const DST_NOT_FROM_BYTES: &NotZerocopy = transmute_ref!(&AU16(0)); + | ^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsFromBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-unsized.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-unsized.rs new file mode 100644 index 000000000..625f1fac0 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-unsized.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting into an unsized destination +// type. +const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-unsized.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-unsized.stderr new file mode 100644 index 000000000..71cae8501 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-dst-unsized.stderr @@ -0,0 +1,69 @@ +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-dst-unsized.rs:17:28 + | +17 | const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertIsFromBytes` + --> tests/ui-stable/transmute-ref-dst-unsized.rs:17:28 + | +17 | const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsFromBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-dst-unsized.rs:17:28 + | +17 | const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-dst-unsized.rs:17:28 + | +17 | const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-dst-unsized.rs:17:28 + | +17 | const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute` + --> $RUST/core/src/intrinsics.rs + | + | pub fn transmute(src: Src) -> Dst; + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute` + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-dst-unsized.rs:17:28 + | +17 | const DST_UNSIZED: &[u8] = transmute_ref!(&[0u8; 1]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute_ref` + --> src/macro_util.rs + | + | pub const unsafe fn transmute_ref<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute_ref` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-illegal-lifetime.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-illegal-lifetime.rs new file mode 100644 index 000000000..8dd191e6f --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-illegal-lifetime.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +fn main() {} + +fn increase_lifetime() { + let x = 0u64; + // It is illegal to increase the lifetime scope. + let _: &'static u64 = zerocopy::transmute_ref!(&x); +} diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-illegal-lifetime.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-illegal-lifetime.stderr new file mode 100644 index 000000000..1ef34feb7 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-illegal-lifetime.stderr @@ -0,0 +1,12 @@ +error[E0597]: `x` does not live long enough + --> tests/ui-stable/transmute-ref-illegal-lifetime.rs:14:52 + | +12 | let x = 0u64; + | - binding `x` declared here +13 | // It is illegal to increase the lifetime scope. +14 | let _: &'static u64 = zerocopy::transmute_ref!(&x); + | ------------ ^^ borrowed value does not live long enough + | | + | type annotation requires that `x` is borrowed for `'static` +15 | } + | - `x` dropped here while still borrowed diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-size-decrease.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-size-decrease.rs new file mode 100644 index 000000000..1d66a54ef --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-size-decrease.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// Although this is not a soundness requirement, we currently require that the +// size of the destination type is not smaller than the size of the source type. +const DECREASE_SIZE: &u8 = transmute_ref!(&[0u8; 2]); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-size-decrease.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-size-decrease.stderr new file mode 100644 index 000000000..f353b26ec --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-size-decrease.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-ref-size-decrease.rs:17:28 + | +17 | const DECREASE_SIZE: &u8 = transmute_ref!(&[0u8; 2]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `[u8; 2]` (16 bits) + = note: target type: `u8` (8 bits) + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-size-increase.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-size-increase.rs new file mode 100644 index 000000000..cdca560b3 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-size-increase.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting from a smaller type to a larger +// one. +const INCREASE_SIZE: &[u8; 2] = transmute_ref!(&0u8); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-size-increase.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-size-increase.stderr new file mode 100644 index 000000000..f51eb63f4 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-size-increase.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-ref-size-increase.rs:17:33 + | +17 | const INCREASE_SIZE: &[u8; 2] = transmute_ref!(&0u8); + | ^^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `[u8; 2]` (16 bits) + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-generic.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-generic.rs new file mode 100644 index 000000000..409d785b2 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-generic.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::{transmute_ref, AsBytes, FromBytes}; + +fn main() {} + +fn transmute_ref(t: &T) -> &U { + // `transmute_ref!` requires the source and destination types to be + // concrete. + transmute_ref!(t) +} diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-generic.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-generic.stderr new file mode 100644 index 000000000..0905dc6d5 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-generic.stderr @@ -0,0 +1,19 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-ref-src-dst-generic.rs:18:5 + | +18 | transmute_ref!(t) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `T` (this type does not have a fixed size) + = note: target type: `U` (this type does not have a fixed size) + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-ref-src-dst-generic.rs:18:5 + | +18 | transmute_ref!(t) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf` (size can vary because of T) + = note: target type: `MaxAlignsOf` (size can vary because of T) + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-not-references.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-not-references.rs new file mode 100644 index 000000000..114e917b5 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-not-references.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting between non-reference source +// and destination types. +const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-not-references.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-not-references.stderr new file mode 100644 index 000000000..8a80e991e --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-not-references.stderr @@ -0,0 +1,45 @@ +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-src-dst-not-references.rs:17:54 + | +17 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ---------------^^^^^^- + | | | + | | expected `&_`, found `usize` + | expected due to this + | + = note: expected reference `&_` + found type `usize` +help: consider borrowing here + | +17 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(&0usize); + | + + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-src-dst-not-references.rs:17:39 + | +17 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-src-dst-not-references.rs:17:39 + | +17 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-src-dst-not-references.rs:17:39 + | +17 | const SRC_DST_NOT_REFERENCES: usize = transmute_ref!(0usize); + | ^^^^^^^^^^^^^^^^^^^^^^ expected `usize`, found `&_` + | + = note: expected type `usize` + found reference `&_` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-unsized.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-unsized.rs new file mode 100644 index 000000000..6bfe7ffdf --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-unsized.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting between unsized source and +// destination types. +const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-unsized.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-unsized.stderr new file mode 100644 index 000000000..7017c2f8f --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-dst-unsized.stderr @@ -0,0 +1,183 @@ +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-stable/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-stable/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertIsFromBytes` + --> tests/ui-stable/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsFromBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AlignOf::::into_t` + --> src/macro_util.rs + | + | impl AlignOf { + | ^ required by this bound in `AlignOf::::into_t` + | #[inline(never)] // Make `missing_inline_in_public_items` happy. + | pub fn into_t(self) -> T { + | ------ required by a bound in this associated function + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: the left-hand-side of an assignment must have a statically known size + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute` + --> $RUST/core/src/intrinsics.rs + | + | pub fn transmute(src: Src) -> Dst; + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute` + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute_ref` + --> src/macro_util.rs + | + | pub const unsafe fn transmute_ref<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute_ref` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-dst-unsized.rs:17:32 + | +17 | const SRC_DST_UNSIZED: &[u8] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute_ref` + --> src/macro_util.rs + | + | pub const unsafe fn transmute_ref<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute_ref` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-generic.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-generic.rs new file mode 100644 index 000000000..010281c32 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-generic.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::{transmute_ref, AsBytes}; + +fn main() {} + +fn transmute_ref(t: &T) -> &u8 { + // `transmute_ref!` requires the source type to be concrete. + transmute_ref!(t) +} diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-generic.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-generic.stderr new file mode 100644 index 000000000..b6bbd1648 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-generic.stderr @@ -0,0 +1,19 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-ref-src-generic.rs:17:5 + | +17 | transmute_ref!(t) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `T` (this type does not have a fixed size) + = note: target type: `u8` (8 bits) + = note: this error originates in the macro `$crate::assert_size_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-ref-src-generic.rs:17:5 + | +17 | transmute_ref!(t) + | ^^^^^^^^^^^^^^^^^ + | + = note: source type: `AlignOf` (size can vary because of T) + = note: target type: `MaxAlignsOf` (size can vary because of T) + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-not-a-reference.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-not-a-reference.rs new file mode 100644 index 000000000..90661b3e2 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-not-a-reference.rs @@ -0,0 +1,17 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting from a non-reference source +// type. +const SRC_NOT_A_REFERENCE: &u8 = transmute_ref!(0usize); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-not-a-reference.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-not-a-reference.stderr new file mode 100644 index 000000000..622c3db9a --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-not-a-reference.stderr @@ -0,0 +1,15 @@ +error[E0308]: mismatched types + --> tests/ui-stable/transmute-ref-src-not-a-reference.rs:17:49 + | +17 | const SRC_NOT_A_REFERENCE: &u8 = transmute_ref!(0usize); + | ---------------^^^^^^- + | | | + | | expected `&_`, found `usize` + | expected due to this + | + = note: expected reference `&_` + found type `usize` +help: consider borrowing here + | +17 | const SRC_NOT_A_REFERENCE: &u8 = transmute_ref!(&0usize); + | + diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-not-asbytes.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-not-asbytes.rs new file mode 100644 index 000000000..6ab19f3c8 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-not-asbytes.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref` requires that the source type implements `AsBytes` +const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&NotZerocopy(AU16(0))); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-not-asbytes.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-not-asbytes.stderr new file mode 100644 index 000000000..3b01fada8 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-not-asbytes.stderr @@ -0,0 +1,48 @@ +error[E0277]: the trait bound `NotZerocopy: AsBytes` is not satisfied + --> tests/ui-stable/transmute-ref-src-not-asbytes.rs:18:33 + | +18 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | the trait `AsBytes` is not implemented for `NotZerocopy` + | required by a bound introduced by this call + | + = help: the following other types implement trait `AsBytes`: + () + AU16 + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-stable/transmute-ref-src-not-asbytes.rs:18:33 + | +18 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: AsBytes` is not satisfied + --> tests/ui-stable/transmute-ref-src-not-asbytes.rs:18:33 + | +18 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `NotZerocopy` + | + = help: the following other types implement trait `AsBytes`: + () + AU16 + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-stable/transmute-ref-src-not-asbytes.rs:18:33 + | +18 | const SRC_NOT_AS_BYTES: &AU16 = transmute_ref!(&NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-unsized.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-unsized.rs new file mode 100644 index 000000000..14e72b4dd --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-unsized.rs @@ -0,0 +1,16 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +extern crate zerocopy; + +use zerocopy::transmute_ref; + +fn main() {} + +// `transmute_ref!` does not support transmuting from an unsized source type. +const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-unsized.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-unsized.stderr new file mode 100644 index 000000000..73984d041 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-ref-src-unsized.stderr @@ -0,0 +1,127 @@ +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-stable/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-stable/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: all local variables must have a statically known size + = help: unsized locals are gated as an unstable feature + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by a bound in `AlignOf::::into_t` + --> src/macro_util.rs + | + | impl AlignOf { + | ^ required by this bound in `AlignOf::::into_t` + | #[inline(never)] // Make `missing_inline_in_public_items` happy. + | pub fn into_t(self) -> T { + | ------ required by a bound in this associated function + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` + = note: the left-hand-side of an assignment must have a statically known size + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `MaxAlignsOf` + --> src/macro_util.rs + | + | pub union MaxAlignsOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `MaxAlignsOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ doesn't have a size known at compile-time + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `AlignOf` + --> src/macro_util.rs + | + | pub struct AlignOf { + | ^ required by the implicit `Sized` requirement on this type parameter in `AlignOf` + = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the size for values of type `[u8]` cannot be known at compilation time + --> tests/ui-stable/transmute-ref-src-unsized.rs:16:31 + | +16 | const SRC_UNSIZED: &[u8; 1] = transmute_ref!(&[0u8][..]); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | doesn't have a size known at compile-time + | required by a bound introduced by this call + | + = help: the trait `Sized` is not implemented for `[u8]` +note: required by an implicit `Sized` bound in `transmute_ref` + --> src/macro_util.rs + | + | pub const unsafe fn transmute_ref<'dst, 'src: 'dst, Src: 'src, Dst: 'dst>( + | ^^^ required by the implicit `Sized` requirement on this type parameter in `transmute_ref` + = note: this error originates in the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-size-decrease.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-size-decrease.rs new file mode 100644 index 000000000..1d56831f2 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-size-decrease.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute; + +fn main() {} + +// Although this is not a soundness requirement, we currently require that the +// size of the destination type is not smaller than the size of the source type. +const DECREASE_SIZE: u8 = transmute!(AU16(0)); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-size-decrease.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-size-decrease.stderr new file mode 100644 index 000000000..0241662fd --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-size-decrease.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-size-decrease.rs:19:27 + | +19 | const DECREASE_SIZE: u8 = transmute!(AU16(0)); + | ^^^^^^^^^^^^^^^^^^^ + | + = note: source type: `AU16` (16 bits) + = note: target type: `u8` (8 bits) + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-size-increase.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-size-increase.rs new file mode 100644 index 000000000..32f936308 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-size-increase.rs @@ -0,0 +1,19 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute; + +fn main() {} + +// `transmute!` does not support transmuting from a smaller type to a larger +// one. +const INCREASE_SIZE: AU16 = transmute!(0u8); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-size-increase.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-size-increase.stderr new file mode 100644 index 000000000..87d82a208 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-size-increase.stderr @@ -0,0 +1,9 @@ +error[E0512]: cannot transmute between types of different sizes, or dependently-sized types + --> tests/ui-stable/transmute-size-increase.rs:19:29 + | +19 | const INCREASE_SIZE: AU16 = transmute!(0u8); + | ^^^^^^^^^^^^^^^ + | + = note: source type: `u8` (8 bits) + = note: target type: `AU16` (16 bits) + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-src-not-asbytes.rs b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-src-not-asbytes.rs new file mode 100644 index 000000000..dd730216b --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-src-not-asbytes.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Fuchsia Authors +// +// Licensed under a BSD-style license , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +include!("../../zerocopy-derive/tests/util.rs"); + +extern crate zerocopy; + +use zerocopy::transmute; + +fn main() {} + +// `transmute` requires that the source type implements `AsBytes` +const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); diff --git a/src/rust/vendor/zerocopy/tests/ui-stable/transmute-src-not-asbytes.stderr b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-src-not-asbytes.stderr new file mode 100644 index 000000000..836bf23e8 --- /dev/null +++ b/src/rust/vendor/zerocopy/tests/ui-stable/transmute-src-not-asbytes.stderr @@ -0,0 +1,48 @@ +error[E0277]: the trait bound `NotZerocopy: AsBytes` is not satisfied + --> tests/ui-stable/transmute-src-not-asbytes.rs:18:32 + | +18 | const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | the trait `AsBytes` is not implemented for `NotZerocopy` + | required by a bound introduced by this call + | + = help: the following other types implement trait `AsBytes`: + () + AU16 + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-stable/transmute-src-not-asbytes.rs:18:32 + | +18 | const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `NotZerocopy: AsBytes` is not satisfied + --> tests/ui-stable/transmute-src-not-asbytes.rs:18:32 + | +18 | const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `AsBytes` is not implemented for `NotZerocopy` + | + = help: the following other types implement trait `AsBytes`: + () + AU16 + F32 + F64 + I128 + I16 + I32 + I64 + and $N others +note: required by a bound in `AssertIsAsBytes` + --> tests/ui-stable/transmute-src-not-asbytes.rs:18:32 + | +18 | const SRC_NOT_AS_BYTES: AU16 = transmute!(NotZerocopy(AU16(0))); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `AssertIsAsBytes` + = note: this error originates in the macro `transmute` (in Nightly builds, run with -Z macro-backtrace for more info)