zlib-rs-0.5.2/.cargo_vcs_info.json0000644000000001450000000000100124220ustar { "git": { "sha1": "5422149d3bd134569900f1f8392a7a00fdcb365a" }, "path_in_vcs": "zlib-rs" }zlib-rs-0.5.2/Cargo.lock0000644000000072610000000000100104030ustar # This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "arbitrary" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" dependencies = [ "derive_arbitrary", ] [[package]] name = "autocfg" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "crc32fast" version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if", ] [[package]] name = "derive_arbitrary" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", "syn", ] [[package]] name = "getrandom" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "libc", "wasi", ] [[package]] name = "libc" version = "0.2.171" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" [[package]] name = "memoffset" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" dependencies = [ "autocfg", ] [[package]] name = "proc-macro2" version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] [[package]] name = "quickcheck" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ "rand", ] [[package]] name = "quote" version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" dependencies = [ "proc-macro2", ] [[package]] name = "rand" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "rand_core", ] [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] [[package]] name = "syn" version = "2.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46f71c0377baf4ef1cc3e3402ded576dccc315800fbc62dfc7fe04b009773b4a" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "unicode-ident" version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "zlib-rs" version = "0.5.2" dependencies = [ "arbitrary", "crc32fast", "memoffset", "quickcheck", ] zlib-rs-0.5.2/Cargo.toml0000644000000030730000000000100104230ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2021" rust-version = "1.75" name = "zlib-rs" version = "0.5.2" build = false publish = true autolib = false autobins = false autoexamples = false autotests = false autobenches = false description = "A memory-safe zlib implementation written in rust" homepage = "https://github.com/trifectatechfoundation/zlib-rs" readme = "README.md" license = "Zlib" repository = "https://github.com/trifectatechfoundation/zlib-rs" [features] ZLIB_DEBUG = [] __internal-fuzz = ["arbitrary"] __internal-fuzz-disable-checksum = [] __internal-test = ["quickcheck"] c-allocator = [] default = [ "std", "c-allocator", ] rust-allocator = [] std = ["rust-allocator"] [lib] name = "zlib_rs" path = "src/lib.rs" [dependencies.arbitrary] version = "1.0" features = ["derive"] optional = true [dependencies.quickcheck] version = "1.0.3" features = [] optional = true default-features = false [dev-dependencies.crc32fast] version = "1.3.2" [dev-dependencies.memoffset] version = "0.9.1" [dev-dependencies.quickcheck] version = "1.0.3" features = [] default-features = false [lints.rust] unsafe_op_in_unsafe_fn = "deny" zlib-rs-0.5.2/Cargo.toml.orig000064400000000000000000000017111046102023000141010ustar 00000000000000[package] name = "zlib-rs" readme = "README.md" description.workspace = true version.workspace = true edition.workspace = true license.workspace = true repository.workspace = true homepage.workspace = true publish.workspace = true rust-version.workspace = true [lints.rust] unsafe_op_in_unsafe_fn = "deny" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] default = ["std", "c-allocator"] std = ["rust-allocator"] c-allocator = [] # expose a malloc-based C allocator rust-allocator = [] # expose a rust global alloctor __internal-fuzz = ["arbitrary"] __internal-fuzz-disable-checksum = [] # disable checksum validation on inflate __internal-test = ["quickcheck"] ZLIB_DEBUG = [] [dependencies] arbitrary = { workspace = true, optional = true, features = ["derive"] } quickcheck = { workspace = true, optional = true } [dev-dependencies] crc32fast = "1.3.2" memoffset = "0.9.1" quickcheck.workspace = true zlib-rs-0.5.2/LICENSE000064400000000000000000000015301046102023000122160ustar 00000000000000(C) 2024 Trifecta Tech Foundation This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. zlib-rs-0.5.2/README.md000064400000000000000000000005641046102023000124760ustar 00000000000000# ⚠️ UNSTABLE⚠️ _the public interface of this crate is unstable!_ A pure-rust implementation of [zlib](https://www.zlib.net/manual.html). For a [zlib](https://www.zlib.net/manual.html) -compatible rust api of this crate, see [`libz-rs-sys`](https://crates.io/crates/libz-rs-sys). For a more high-level interface, use [`flate2`](https://crates.io/crates/flate2). zlib-rs-0.5.2/src/adler32/avx2.rs000064400000000000000000000143701046102023000144700ustar 00000000000000//! # Safety //! //! The functions in this module should only be executed on x86 machines with the AVX2 extension. use core::arch::x86_64::{ __m256i, _mm256_add_epi32, _mm256_castsi256_si128, _mm256_extracti128_si256, _mm256_madd_epi16, _mm256_maddubs_epi16, _mm256_permutevar8x32_epi32, _mm256_sad_epu8, _mm256_slli_epi32, _mm256_zextsi128_si256, _mm_add_epi32, _mm_cvtsi128_si32, _mm_cvtsi32_si128, _mm_shuffle_epi32, _mm_unpackhi_epi64, }; use crate::adler32::{ generic::{adler32_len_16, adler32_len_64}, BASE, NMAX, }; const fn __m256i_literal(bytes: [u8; 32]) -> __m256i { // SAFETY: any valid [u8; 32] represents a valid __m256i unsafe { core::mem::transmute(bytes) } } const DOT2V: __m256i = __m256i_literal([ 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, ]); const DOT3V: __m256i = __m256i_literal([ 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, ]); const ZERO: __m256i = __m256i_literal([0; 32]); /// 32 bit horizontal sum, adapted from Agner Fog's vector library. #[target_feature(enable = "avx2")] unsafe fn hsum256(x: __m256i) -> u32 { #[allow(unused_unsafe)] // because target features 1.1 unsafe { let sum1 = _mm_add_epi32(_mm256_extracti128_si256(x, 1), _mm256_castsi256_si128(x)); let sum2 = _mm_add_epi32(sum1, _mm_unpackhi_epi64(sum1, sum1)); let sum3 = _mm_add_epi32(sum2, _mm_shuffle_epi32(sum2, 1)); _mm_cvtsi128_si32(sum3) as u32 } } #[target_feature(enable = "avx2")] unsafe fn partial_hsum256(x: __m256i) -> u32 { const PERM_VEC: __m256i = __m256i_literal([ 0, 0, 0, 0, // 2, 0, 0, 0, // 4, 0, 0, 0, // 6, 0, 0, 0, // 1, 0, 0, 0, // 1, 0, 0, 0, // 1, 0, 0, 0, // 1, 0, 0, 0, // ]); #[allow(unused_unsafe)] // because target features 1.1 unsafe { let non_zero = _mm256_permutevar8x32_epi32(x, PERM_VEC); let non_zero_sse = _mm256_castsi256_si128(non_zero); let sum2 = _mm_add_epi32(non_zero_sse, _mm_unpackhi_epi64(non_zero_sse, non_zero_sse)); let sum3 = _mm_add_epi32(sum2, _mm_shuffle_epi32(sum2, 1)); _mm_cvtsi128_si32(sum3) as u32 } } pub fn adler32_avx2(adler: u32, src: &[u8]) -> u32 { assert!(crate::cpu_features::is_enabled_avx2_and_bmi2()); // SAFETY: the assertion above ensures this code is not executed unless the CPU has AVX2. unsafe { adler32_avx2_help(adler, src) } } #[target_feature(enable = "avx2")] #[target_feature(enable = "bmi2")] #[target_feature(enable = "bmi1")] unsafe fn adler32_avx2_help(adler: u32, src: &[u8]) -> u32 { if src.is_empty() { return adler; } // SAFETY: [u8; 32] safely transmutes into __m256i. let (before, middle, after) = unsafe { src.align_to::<__m256i>() }; let mut adler1 = (adler >> 16) & 0xffff; let mut adler0 = adler & 0xffff; let adler = if before.len() < 16 { adler32_len_16(adler0, before, adler1) } else if before.len() < 32 { adler32_len_64(adler0, before, adler1) } else { adler }; adler1 = (adler >> 16) & 0xffff; adler0 = adler & 0xffff; // use largest step possible (without causing overflow) for chunk in middle.chunks(NMAX as usize / 32) { (adler0, adler1) = unsafe { helper_32_bytes(adler0, adler1, chunk) }; } if !after.is_empty() { if after.len() < 16 { return adler32_len_16(adler0, after, adler1); } else if after.len() < 32 { return adler32_len_64(adler0, after, adler1); } else { unreachable!() } } adler0 | (adler1 << 16) } #[target_feature(enable = "avx2")] unsafe fn helper_32_bytes(mut adler0: u32, mut adler1: u32, src: &[__m256i]) -> (u32, u32) { unsafe { let mut vs1 = _mm256_zextsi128_si256(_mm_cvtsi32_si128(adler0 as i32)); let mut vs2 = _mm256_zextsi128_si256(_mm_cvtsi32_si128(adler1 as i32)); let mut vs1_0 = vs1; let mut vs3 = ZERO; for vbuf in src.iter().copied() { let vs1_sad = _mm256_sad_epu8(vbuf, ZERO); // Sum of abs diff, resulting in 2 x int32's vs1 = _mm256_add_epi32(vs1, vs1_sad); vs3 = _mm256_add_epi32(vs3, vs1_0); let v_short_sum2 = _mm256_maddubs_epi16(vbuf, DOT2V); // sum 32 uint8s to 16 shorts let vsum2 = _mm256_madd_epi16(v_short_sum2, DOT3V); // sum 16 shorts to 8 uint32s vs2 = _mm256_add_epi32(vsum2, vs2); vs1_0 = vs1; } /* Defer the multiplication with 32 to outside of the loop */ vs3 = _mm256_slli_epi32(vs3, 5); vs2 = _mm256_add_epi32(vs2, vs3); adler0 = partial_hsum256(vs1) % BASE; adler1 = hsum256(vs2) % BASE; (adler0, adler1) } } #[cfg(test)] #[cfg(target_feature = "avx2")] mod test { use super::*; #[test] fn empty_input() { let avx2 = adler32_avx2(0, &[]); let rust = crate::adler32::generic::adler32_rust(0, &[]); assert_eq!(rust, avx2); } quickcheck::quickcheck! { fn adler32_avx2_is_adler32_rust(v: Vec, start: u32) -> bool { let avx2 = adler32_avx2(start, &v); let rust = crate::adler32::generic::adler32_rust(start, &v); rust == avx2 } } const INPUT: [u8; 1024] = { let mut array = [0; 1024]; let mut i = 0; while i < array.len() { array[i] = i as u8; i += 1; } array }; #[test] fn start_alignment() { // SIMD algorithm is sensitive to alignment; for i in 0..16 { for start in [crate::ADLER32_INITIAL_VALUE as u32, 42] { let avx2 = adler32_avx2(start, &INPUT[i..]); let rust = crate::adler32::generic::adler32_rust(start, &INPUT[i..]); assert_eq!(avx2, rust, "offset = {i}, start = {start}"); } } } #[test] #[cfg_attr(miri, ignore)] fn large_input() { const DEFAULT: &[u8] = include_bytes!("../deflate/test-data/paper-100k.pdf"); let avx2 = adler32_avx2(42, DEFAULT); let rust = crate::adler32::generic::adler32_rust(42, DEFAULT); assert_eq!(avx2, rust); } } zlib-rs-0.5.2/src/adler32/generic.rs000064400000000000000000000060751046102023000152270ustar 00000000000000use super::{BASE, NMAX}; const UNROLL_MORE: bool = true; // macros for loop unrolling macro_rules! do1 { ($sum1:expr, $sum2:expr, $chunk:expr, $i:expr) => { // SAFETY: $i is bounded by either [0, 8] or [0, 16], and the caller ensures the chunk is // long enough, so we can omit bound checking. $sum1 += unsafe { *$chunk.get_unchecked($i) } as u32; $sum2 += $sum1; }; } macro_rules! do2 { ($sum1:expr, $sum2:expr, $chunk:expr, $i:expr) => { do1!($sum1, $sum2, $chunk, $i); do1!($sum1, $sum2, $chunk, $i + 1); }; } macro_rules! do4 { ($sum1:expr, $sum2:expr, $chunk:expr, $i:expr) => { do2!($sum1, $sum2, $chunk, $i); do2!($sum1, $sum2, $chunk, $i + 2); }; } macro_rules! do8 { ($sum1:expr, $sum2:expr, $chunk:expr, $i:expr) => { do4!($sum1, $sum2, $chunk, $i); do4!($sum1, $sum2, $chunk, $i + 4); }; } macro_rules! do16 { ($sum1:expr, $sum2:expr, $chunk:expr) => { do8!($sum1, $sum2, $chunk, 0); do8!($sum1, $sum2, $chunk, 8); }; } pub fn adler32_rust(mut adler: u32, buf: &[u8]) -> u32 { /* split Adler-32 into component sums */ let mut sum2 = (adler >> 16) & 0xffff; adler &= 0xffff; /* in case user likes doing a byte at a time, keep it fast */ if buf.len() == 1 { return adler32_len_1(adler, buf, sum2); } /* initial Adler-32 value (deferred check for len == 1 speed) */ if buf.is_empty() { return adler | (sum2 << 16); } /* in case short lengths are provided, keep it somewhat fast */ if buf.len() < 16 { return adler32_len_16(adler, buf, sum2); } let mut it = buf.chunks_exact(NMAX as usize); for big_chunk in it.by_ref() { const N: usize = if UNROLL_MORE { 16 } else { 8 } as usize; let it = big_chunk.chunks_exact(N); for chunk in it { if N == 16 { do16!(adler, sum2, chunk); } else { do8!(adler, sum2, chunk, 0); } } adler %= BASE; sum2 %= BASE; } /* do remaining bytes (less than NMAX, still just one modulo) */ adler32_len_64(adler, it.remainder(), sum2) } pub(crate) fn adler32_len_1(mut adler: u32, buf: &[u8], mut sum2: u32) -> u32 { adler += buf[0] as u32; adler %= BASE; sum2 += adler; sum2 %= BASE; adler | (sum2 << 16) } pub(crate) fn adler32_len_16(mut adler: u32, buf: &[u8], mut sum2: u32) -> u32 { for b in buf { adler += (*b) as u32; sum2 += adler; } adler %= BASE; sum2 %= BASE; /* only added so many BASE's */ /* return recombined sums */ adler | (sum2 << 16) } pub(crate) fn adler32_len_64(mut adler: u32, buf: &[u8], mut sum2: u32) -> u32 { const N: usize = if UNROLL_MORE { 16 } else { 8 }; let mut it = buf.chunks_exact(N); for chunk in it.by_ref() { if N == 16 { do16!(adler, sum2, chunk); } else { do8!(adler, sum2, chunk, 0); } } /* Process tail (len < 16). */ adler32_len_16(adler, it.remainder(), sum2) } zlib-rs-0.5.2/src/adler32/neon.rs000064400000000000000000000206221046102023000145440ustar 00000000000000//! # Safety //! //! The functions in this module should only be executed on aarch64 machines with the Neon //! extension. use core::arch::aarch64::{ uint16x8_t, uint16x8x2_t, uint16x8x4_t, uint8x16_t, vaddq_u32, vaddw_high_u8, vaddw_u8, vdupq_n_u16, vdupq_n_u32, vget_high_u32, vget_lane_u32, vget_low_u16, vget_low_u32, vget_low_u8, vld1q_u8_x4, vmlal_high_u16, vmlal_u16, vpadalq_u16, vpadalq_u8, vpadd_u32, vpaddlq_u8, vsetq_lane_u32, vshlq_n_u32, }; use crate::adler32::{ generic::{adler32_len_1, adler32_len_16}, BASE, NMAX, }; const TAPS: [uint16x8x4_t; 2] = unsafe { core::mem::transmute::<[u16; 64], [uint16x8x4_t; 2]>([ 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, ]) }; pub fn adler32_neon(adler: u32, buf: &[u8]) -> u32 { assert!(crate::cpu_features::is_enabled_neon()); // SAFETY: the assertion above ensures this code is not executed unless the CPU has Neon // extensions. unsafe { adler32_neon_internal(adler, buf) } } #[target_feature(enable = "neon")] unsafe fn adler32_neon_internal(mut adler: u32, buf: &[u8]) -> u32 { /* split Adler-32 into component sums */ let sum2 = (adler >> 16) & 0xffff; adler &= 0xffff; /* in case user likes doing a byte at a time, keep it fast */ if buf.len() == 1 { return adler32_len_1(adler, buf, sum2); } /* initial Adler-32 value (deferred check for len == 1 speed) */ if buf.is_empty() { return adler | (sum2 << 16); } /* in case short lengths are provided, keep it somewhat fast */ if buf.len() < 16 { return adler32_len_16(adler, buf, sum2); } // Split Adler-32 into component sums, it can be supplied by the caller sites (e.g. in a PNG file). let mut pair = (adler, sum2); // If memory is not SIMD aligned, do scalar sums to an aligned // offset, provided that doing so doesn't completely eliminate // SIMD operation. Aligned loads are still faster on ARM, even // though there's no explicit aligned load instruction const _: () = assert!(core::mem::align_of::() == 16); let (before, middle, after) = unsafe { buf.align_to::() }; pair = handle_tail(pair, before); for chunk in middle.chunks(NMAX as usize / core::mem::size_of::()) { pair = unsafe { accum32(pair, chunk) }; pair.0 %= BASE; pair.1 %= BASE; } if !after.is_empty() { pair = handle_tail(pair, after); pair.0 %= BASE; pair.1 %= BASE; } // D = B * 65536 + A, see: https://en.wikipedia.org/wiki/Adler-32. (pair.1 << 16) | pair.0 } fn handle_tail(mut pair: (u32, u32), buf: &[u8]) -> (u32, u32) { for x in buf { pair.0 += *x as u32; pair.1 += pair.0; } pair } #[allow(unsafe_op_in_unsafe_fn)] #[target_feature(enable = "neon")] unsafe fn accum32(s: (u32, u32), buf: &[uint8x16_t]) -> (u32, u32) { let mut adacc = vdupq_n_u32(0); let mut s2acc = vdupq_n_u32(0); adacc = vsetq_lane_u32(s.0, adacc, 0); s2acc = vsetq_lane_u32(s.1, s2acc, 0); let mut s3acc = vdupq_n_u32(0); let mut adacc_prev = adacc; let mut s2_0 = vdupq_n_u16(0); let mut s2_1 = vdupq_n_u16(0); let mut s2_2 = vdupq_n_u16(0); let mut s2_3 = vdupq_n_u16(0); let mut s2_4 = vdupq_n_u16(0); let mut s2_5 = vdupq_n_u16(0); let mut s2_6 = vdupq_n_u16(0); let mut s2_7 = vdupq_n_u16(0); let mut it = buf.chunks_exact(4); for chunk in &mut it { let d0_d3 = vld1q_u8_x4(chunk.as_ptr() as *const u8); // Unfortunately it doesn't look like there's a direct sum 8 bit to 32 // bit instruction, we'll have to make due summing to 16 bits first let hsum = uint16x8x2_t(vpaddlq_u8(d0_d3.0), vpaddlq_u8(d0_d3.1)); let hsum_fold = uint16x8x2_t(vpadalq_u8(hsum.0, d0_d3.2), vpadalq_u8(hsum.1, d0_d3.3)); adacc = vpadalq_u16(adacc, hsum_fold.0); s3acc = vaddq_u32(s3acc, adacc_prev); adacc = vpadalq_u16(adacc, hsum_fold.1); // If we do straight widening additions to the 16 bit values, we don't incur // the usual penalties of a pairwise add. We can defer the multiplications // until the very end. These will not overflow because we are incurring at // most 408 loop iterations (NMAX / 64), and a given lane is only going to be // summed into once. This means for the maximum input size, the largest value // we will see is 255 * 102 = 26010, safely under uint16 max s2_0 = vaddw_u8(s2_0, vget_low_u8(d0_d3.0)); s2_1 = vaddw_high_u8(s2_1, d0_d3.0); s2_2 = vaddw_u8(s2_2, vget_low_u8(d0_d3.1)); s2_3 = vaddw_high_u8(s2_3, d0_d3.1); s2_4 = vaddw_u8(s2_4, vget_low_u8(d0_d3.2)); s2_5 = vaddw_high_u8(s2_5, d0_d3.2); s2_6 = vaddw_u8(s2_6, vget_low_u8(d0_d3.3)); s2_7 = vaddw_high_u8(s2_7, d0_d3.3); adacc_prev = adacc; } s3acc = vshlq_n_u32(s3acc, 6); let remainder = it.remainder(); if !remainder.is_empty() { let mut s3acc_0 = vdupq_n_u32(0); for d0 in remainder.iter().copied() { let adler: uint16x8_t = vpaddlq_u8(d0); s2_6 = vaddw_u8(s2_6, vget_low_u8(d0)); s2_7 = vaddw_high_u8(s2_7, d0); adacc = vpadalq_u16(adacc, adler); s3acc_0 = vaddq_u32(s3acc_0, adacc_prev); adacc_prev = adacc; } s3acc_0 = vshlq_n_u32(s3acc_0, 4); s3acc = vaddq_u32(s3acc_0, s3acc); } let t0_t3 = TAPS[0]; let t4_t7 = TAPS[1]; let mut s2acc_0 = vdupq_n_u32(0); let mut s2acc_1 = vdupq_n_u32(0); let mut s2acc_2 = vdupq_n_u32(0); s2acc = vmlal_high_u16(s2acc, t0_t3.0, s2_0); s2acc_0 = vmlal_u16(s2acc_0, vget_low_u16(t0_t3.0), vget_low_u16(s2_0)); s2acc_1 = vmlal_high_u16(s2acc_1, t0_t3.1, s2_1); s2acc_2 = vmlal_u16(s2acc_2, vget_low_u16(t0_t3.1), vget_low_u16(s2_1)); s2acc = vmlal_high_u16(s2acc, t0_t3.2, s2_2); s2acc_0 = vmlal_u16(s2acc_0, vget_low_u16(t0_t3.2), vget_low_u16(s2_2)); s2acc_1 = vmlal_high_u16(s2acc_1, t0_t3.3, s2_3); s2acc_2 = vmlal_u16(s2acc_2, vget_low_u16(t0_t3.3), vget_low_u16(s2_3)); s2acc = vmlal_high_u16(s2acc, t4_t7.0, s2_4); s2acc_0 = vmlal_u16(s2acc_0, vget_low_u16(t4_t7.0), vget_low_u16(s2_4)); s2acc_1 = vmlal_high_u16(s2acc_1, t4_t7.1, s2_5); s2acc_2 = vmlal_u16(s2acc_2, vget_low_u16(t4_t7.1), vget_low_u16(s2_5)); s2acc = vmlal_high_u16(s2acc, t4_t7.2, s2_6); s2acc_0 = vmlal_u16(s2acc_0, vget_low_u16(t4_t7.2), vget_low_u16(s2_6)); s2acc_1 = vmlal_high_u16(s2acc_1, t4_t7.3, s2_7); s2acc_2 = vmlal_u16(s2acc_2, vget_low_u16(t4_t7.3), vget_low_u16(s2_7)); s2acc = vaddq_u32(s2acc_0, s2acc); s2acc_2 = vaddq_u32(s2acc_1, s2acc_2); s2acc = vaddq_u32(s2acc, s2acc_2); let s2acc = vaddq_u32(s2acc, s3acc); let adacc2 = vpadd_u32(vget_low_u32(adacc), vget_high_u32(adacc)); let s2acc2 = vpadd_u32(vget_low_u32(s2acc), vget_high_u32(s2acc)); let as_ = vpadd_u32(adacc2, s2acc2); (vget_lane_u32(as_, 0), vget_lane_u32(as_, 1)) } #[cfg(test)] mod tests { use super::*; quickcheck::quickcheck! { fn adler32_neon_is_adler32_rust(v: Vec, start: u32) -> bool { let neon = adler32_neon(start, &v); let rust = crate::adler32::generic::adler32_rust(start, &v); rust == neon } } const INPUT: [u8; 1024] = { let mut array = [0; 1024]; let mut i = 0; while i < array.len() { array[i] = i as u8; i += 1; } array }; #[test] fn start_alignment() { // SIMD algorithm is sensitive to alignment; for i in 0..16 { for start in [crate::ADLER32_INITIAL_VALUE as u32, 42] { let neon = adler32_neon(start, &INPUT[i..]); let rust = crate::adler32::generic::adler32_rust(start, &INPUT[i..]); assert_eq!(neon, rust, "offset = {i}, start = {start}"); } } } #[test] fn large_input() { const DEFAULT: &[u8] = include_bytes!("../deflate/test-data/paper-100k.pdf"); let neon = adler32_neon(42, DEFAULT); let rust = crate::adler32::generic::adler32_rust(42, DEFAULT); assert_eq!(neon, rust); } } zlib-rs-0.5.2/src/adler32/wasm.rs000064400000000000000000000076721046102023000145660ustar 00000000000000/// Adapted from https://github.com/mcountryman/simd-adler32/blob/main/src/imp/wasm.rs const MOD: u32 = 65521; const NMAX: usize = 5552; const BLOCK_SIZE: usize = 32; const CHUNK_SIZE: usize = NMAX / BLOCK_SIZE * BLOCK_SIZE; #[cfg(target_arch = "wasm32")] use core::arch::wasm32::*; #[cfg(target_arch = "wasm64")] use core::arch::wasm64::*; pub fn adler32_wasm(checksum: u32, data: &[u8]) -> u32 { let a = checksum as u16; let b = (checksum >> 16) as u16; let (a, b) = update(a, b, data); (u32::from(b) << 16) | u32::from(a) } pub fn update(a: u16, b: u16, data: &[u8]) -> (u16, u16) { update_imp(a, b, data) } #[inline] #[target_feature(enable = "simd128")] fn update_imp(a: u16, b: u16, data: &[u8]) -> (u16, u16) { let mut a = a as u32; let mut b = b as u32; let chunks = data.chunks_exact(CHUNK_SIZE); let remainder = chunks.remainder(); for chunk in chunks { update_chunk_block(&mut a, &mut b, chunk); } update_block(&mut a, &mut b, remainder); (a as u16, b as u16) } fn update_chunk_block(a: &mut u32, b: &mut u32, chunk: &[u8]) { debug_assert_eq!( chunk.len(), CHUNK_SIZE, "Unexpected chunk size (expected {}, got {})", CHUNK_SIZE, chunk.len() ); reduce_add_blocks(a, b, chunk); *a %= MOD; *b %= MOD; } fn update_block(a: &mut u32, b: &mut u32, chunk: &[u8]) { debug_assert!( chunk.len() <= CHUNK_SIZE, "Unexpected chunk size (expected <= {}, got {})", CHUNK_SIZE, chunk.len() ); for byte in reduce_add_blocks(a, b, chunk) { *a += *byte as u32; *b += *a; } *a %= MOD; *b %= MOD; } #[inline(always)] fn reduce_add_blocks<'a>(a: &mut u32, b: &mut u32, chunk: &'a [u8]) -> &'a [u8] { if chunk.len() < BLOCK_SIZE { return chunk; } let blocks = chunk.chunks_exact(BLOCK_SIZE); let blocks_remainder = blocks.remainder(); let weight_hi_v = get_weight_hi(); let weight_lo_v = get_weight_lo(); let mut p_v = u32x4(*a * blocks.len() as u32, 0, 0, 0); let mut a_v = u32x4(0, 0, 0, 0); let mut b_v = u32x4(*b, 0, 0, 0); for block in blocks { let block_ptr = block.as_ptr() as *const v128; // SAFETY: the chunks_exact() call earlier guarantees the block is 32-bytes, thus we can // dereference 16-byte pointers to high and low bytes. The underlying data is Copy and is // properly initialized. let v_lo = unsafe { block_ptr.read_unaligned() }; let v_hi = unsafe { block_ptr.add(1).read_unaligned() }; p_v = u32x4_add(p_v, a_v); a_v = u32x4_add(a_v, u32x4_extadd_quarters_u8x16(v_lo)); let mad = i32x4_dot_i8x16(v_lo, weight_lo_v); b_v = u32x4_add(b_v, mad); a_v = u32x4_add(a_v, u32x4_extadd_quarters_u8x16(v_hi)); let mad = i32x4_dot_i8x16(v_hi, weight_hi_v); b_v = u32x4_add(b_v, mad); } b_v = u32x4_add(b_v, u32x4_shl(p_v, 5)); *a += reduce_add(a_v); *b = reduce_add(b_v); blocks_remainder } #[inline(always)] fn i32x4_dot_i8x16(a: v128, b: v128) -> v128 { let a_lo = u16x8_extend_low_u8x16(a); let a_hi = u16x8_extend_high_u8x16(a); let b_lo = u16x8_extend_low_u8x16(b); let b_hi = u16x8_extend_high_u8x16(b); let lo = i32x4_dot_i16x8(a_lo, b_lo); let hi = i32x4_dot_i16x8(a_hi, b_hi); i32x4_add(lo, hi) } #[inline(always)] fn u32x4_extadd_quarters_u8x16(a: v128) -> v128 { u32x4_extadd_pairwise_u16x8(u16x8_extadd_pairwise_u8x16(a)) } #[inline(always)] fn reduce_add(v: v128) -> u32 { let arr: [u32; 4] = unsafe { core::mem::transmute(v) }; let mut sum = 0u32; for val in arr { sum = sum.wrapping_add(val); } sum } #[inline(always)] fn get_weight_lo() -> v128 { u8x16( 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, ) } #[inline(always)] fn get_weight_hi() -> v128 { u8x16(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1) } zlib-rs-0.5.2/src/adler32.rs000064400000000000000000000066061046102023000136130ustar 00000000000000#[cfg(target_arch = "x86_64")] mod avx2; mod generic; #[cfg(target_arch = "aarch64")] mod neon; #[cfg(any(target_arch = "wasm32", target_arch = "wasm64"))] mod wasm; pub fn adler32(start_checksum: u32, data: &[u8]) -> u32 { #[cfg(target_arch = "x86_64")] if crate::cpu_features::is_enabled_avx2_and_bmi2() { return avx2::adler32_avx2(start_checksum, data); } #[cfg(target_arch = "aarch64")] if crate::cpu_features::is_enabled_neon() { return self::neon::adler32_neon(start_checksum, data); } #[cfg(any(target_arch = "wasm32", target_arch = "wasm64"))] if crate::cpu_features::is_enabled_simd128() { return self::wasm::adler32_wasm(start_checksum, data); } generic::adler32_rust(start_checksum, data) } pub fn adler32_fold_copy(start_checksum: u32, dst: &mut [u8], src: &[u8]) -> u32 { debug_assert!(dst.len() >= src.len(), "{} < {}", dst.len(), src.len()); // integrating the memcpy into the adler32 function did not have any benefits, and in fact was // a bit slower for very small chunk sizes. dst[..src.len()].copy_from_slice(src); adler32(start_checksum, src) } pub fn adler32_combine(adler1: u32, adler2: u32, len2: u64) -> u32 { const BASE: u64 = self::BASE as u64; let rem = len2 % BASE; let adler1 = adler1 as u64; let adler2 = adler2 as u64; /* the derivation of this formula is left as an exercise for the reader */ let mut sum1 = adler1 & 0xffff; let mut sum2 = rem * sum1; sum2 %= BASE; sum1 += (adler2 & 0xffff) + BASE - 1; sum2 += ((adler1 >> 16) & 0xffff) + ((adler2 >> 16) & 0xffff) + BASE - rem; if sum1 >= BASE { sum1 -= BASE; } if sum1 >= BASE { sum1 -= BASE; } if sum2 >= (BASE << 1) { sum2 -= BASE << 1; } if sum2 >= BASE { sum2 -= BASE; } (sum1 | (sum2 << 16)) as u32 } // inefficient but correct, useful for testing #[cfg(test)] fn naive_adler32(start_checksum: u32, data: &[u8]) -> u32 { const MOD_ADLER: u32 = 65521; // Largest prime smaller than 2^16 let mut a = start_checksum & 0xFFFF; let mut b = (start_checksum >> 16) & 0xFFFF; for &byte in data { a = (a + byte as u32) % MOD_ADLER; b = (b + a) % MOD_ADLER; } (b << 16) | a } const BASE: u32 = 65521; /* largest prime smaller than 65536 */ const NMAX: u32 = 5552; #[cfg(test)] mod test { use super::*; #[test] fn naive_is_fancy_small_inputs() { for i in 0..128 { let v = (0u8..i).collect::>(); assert_eq!(naive_adler32(1, &v), generic::adler32_rust(1, &v)); } } #[test] fn test_adler32_combine() { ::quickcheck::quickcheck(test as fn(_) -> _); fn test(data: Vec) -> bool { let Some(buf_len) = data.first().copied() else { return true; }; let buf_size = Ord::max(buf_len, 1) as usize; let mut adler1 = 1; let mut adler2 = 1; for chunk in data.chunks(buf_size) { adler1 = adler32(adler1, chunk); } adler2 = adler32(adler2, &data); assert_eq!(adler1, adler2); let combine1 = adler32_combine(adler1, adler2, data.len() as _); let combine2 = adler32_combine(adler1, adler1, data.len() as _); assert_eq!(combine1, combine2); true } } } zlib-rs-0.5.2/src/allocate.rs000064400000000000000000000372261046102023000141450ustar 00000000000000#![allow(unpredictable_function_pointer_comparisons)] #![allow(unsafe_op_in_unsafe_fn)] #[cfg(unix)] use core::ffi::c_int; use core::{ alloc::Layout, ffi::{c_uint, c_void}, marker::PhantomData, mem, ptr::NonNull, }; #[cfg(feature = "rust-allocator")] use alloc::alloc::GlobalAlloc; #[allow(non_camel_case_types)] type size_t = usize; const ALIGN: u8 = 64; // posix_memalign requires that the alignment be a power of two and a multiple of sizeof(void*). const _: () = assert!(ALIGN.count_ones() == 1); const _: () = assert!(ALIGN as usize % mem::size_of::<*mut c_void>() == 0); /// # Safety /// /// This function is safe, but must have this type signature to be used elsewhere in the library #[cfg(unix)] unsafe extern "C" fn zalloc_c(opaque: *mut c_void, items: c_uint, size: c_uint) -> *mut c_void { let _ = opaque; extern "C" { fn posix_memalign(memptr: *mut *mut c_void, align: size_t, size: size_t) -> c_int; } let mut ptr = core::ptr::null_mut(); // SAFETY: ALIGN is a power of 2 and multiple of sizeof(void*), as required by posix_memalign match unsafe { posix_memalign(&mut ptr, ALIGN.into(), items as size_t * size as size_t) } { 0 => ptr, _ => core::ptr::null_mut(), } } /// # Safety /// /// This function is safe, but must have this type signature to be used elsewhere in the library #[cfg(not(unix))] unsafe extern "C" fn zalloc_c(opaque: *mut c_void, items: c_uint, size: c_uint) -> *mut c_void { let _ = opaque; extern "C" { fn malloc(size: size_t) -> *mut c_void; } malloc(items as size_t * size as size_t) } /// # Safety /// /// This function is safe, but must have this type signature to be used elsewhere in the library unsafe extern "C" fn zalloc_c_calloc( opaque: *mut c_void, items: c_uint, size: c_uint, ) -> *mut c_void { let _ = opaque; extern "C" { fn calloc(nitems: size_t, size: size_t) -> *mut c_void; } calloc(items as size_t, size as size_t) } /// # Safety /// /// The `ptr` must be allocated with the allocator that is used internally by `zcfree` unsafe extern "C" fn zfree_c(opaque: *mut c_void, ptr: *mut c_void) { let _ = opaque; extern "C" { fn free(p: *mut c_void); } unsafe { free(ptr) } } /// # Safety /// /// This function is safe to call. #[cfg(feature = "rust-allocator")] unsafe extern "C" fn zalloc_rust(_opaque: *mut c_void, count: c_uint, size: c_uint) -> *mut c_void { let size = count as usize * size as usize; // internally, we want to align allocations to 64 bytes (in part for SIMD reasons) let layout = Layout::from_size_align(size, ALIGN.into()).unwrap(); let ptr = std::alloc::System.alloc(layout); ptr as *mut c_void } /// # Safety /// /// This function is safe to call. #[cfg(feature = "rust-allocator")] unsafe extern "C" fn zalloc_rust_calloc( _opaque: *mut c_void, count: c_uint, size: c_uint, ) -> *mut c_void { let size = count as usize * size as usize; // internally, we want to align allocations to 64 bytes (in part for SIMD reasons) let layout = Layout::from_size_align(size, ALIGN.into()).unwrap(); let ptr = std::alloc::System.alloc_zeroed(layout); ptr as *mut c_void } /// # Safety /// /// - `ptr` must be allocated with the rust `alloc::System` allocator /// - `opaque` is a `&usize` that represents the size of the allocation #[cfg(feature = "rust-allocator")] unsafe extern "C" fn zfree_rust(opaque: *mut c_void, ptr: *mut c_void) { if ptr.is_null() { return; } // we can't really do much else. Deallocating with an invalid layout is UB. debug_assert!(!opaque.is_null()); if opaque.is_null() { return; } let size = *(opaque as *mut usize); let layout = Layout::from_size_align(size, ALIGN.into()); let layout = layout.unwrap(); std::alloc::System.dealloc(ptr.cast(), layout); } #[cfg(test)] unsafe extern "C" fn zalloc_fail(_: *mut c_void, _: c_uint, _: c_uint) -> *mut c_void { core::ptr::null_mut() } #[cfg(test)] unsafe extern "C" fn zfree_fail(_: *mut c_void, _: *mut c_void) { // do nothing } #[derive(Clone, Copy)] #[repr(C)] pub struct Allocator<'a> { pub zalloc: crate::c_api::alloc_func, pub zfree: crate::c_api::free_func, pub opaque: crate::c_api::voidpf, pub _marker: PhantomData<&'a ()>, } unsafe impl Sync for Allocator<'static> {} #[cfg(feature = "rust-allocator")] pub static RUST: Allocator<'static> = Allocator { zalloc: zalloc_rust, zfree: zfree_rust, opaque: core::ptr::null_mut(), _marker: PhantomData, }; #[cfg(feature = "c-allocator")] pub static C: Allocator<'static> = Allocator { zalloc: zalloc_c, zfree: zfree_c, opaque: core::ptr::null_mut(), _marker: PhantomData, }; #[cfg(test)] static FAIL: Allocator<'static> = Allocator { zalloc: zalloc_fail, zfree: zfree_fail, opaque: core::ptr::null_mut(), _marker: PhantomData, }; impl Allocator<'_> { fn allocate_layout(&self, layout: Layout) -> *mut c_void { assert!(layout.align() <= ALIGN.into()); // Special case for the Rust `alloc` backed allocator #[cfg(feature = "rust-allocator")] if self.zalloc == RUST.zalloc { let ptr = unsafe { (RUST.zalloc)(self.opaque, layout.size() as _, 1) }; debug_assert_eq!(ptr as usize % layout.align(), 0); return ptr; } // General case for c-style allocation // We cannot rely on the allocator giving properly aligned allocations and have to fix that ourselves. // // The general approach is to allocate a bit more than the layout needs, so that we can // give the application a properly aligned address and also store the real allocation // pointer in the allocation so that `free` can free the real allocation pointer. // // // Example: The layout represents `(u32, u32)`, with an alignment of 4 bytes and a // total size of 8 bytes. // // Assume that the allocator will give us address `0x07`. We need that to be a multiple // of the alignment, so that shifts the starting position to `0x08`. Then we also need // to store the pointer to the start of the allocation so that `free` can free that // pointer, bumping to `0x10`. The `0x10` pointer is then the pointer that the application // deals with. When free'ing, the original allocation pointer can be read from `0x10 - size_of::<*const c_void>()`. // // Of course there does need to be enough space in the allocation such that when we // shift the start forwards, the end is still within the allocation. Hence we allocate // `extra_space` bytes: enough for a full alignment plus a pointer. // we need at least // // - `align` extra space so that no matter what pointer we get from zalloc, we can shift the start of the // allocation by at most `align - 1` so that `ptr as usize % align == 0 // - `size_of::<*mut _>` extra space so that after aligning to `align`, // there is `size_of::<*mut _>` space to store the pointer to the allocation. // This pointer is then retrieved in `free` let extra_space = core::mem::size_of::<*mut c_void>() + layout.align(); // Safety: we assume allocating works correctly in the safety assumptions on // `DeflateStream` and `InflateStream`. let ptr = unsafe { (self.zalloc)(self.opaque, (layout.size() + extra_space) as _, 1) }; if ptr.is_null() { return ptr; } // Calculate return pointer address with space enough to store original pointer let align_diff = (ptr as usize).next_multiple_of(layout.align()) - (ptr as usize); // Safety: offset is smaller than 64, and we allocated 64 extra bytes in the allocation let mut return_ptr = unsafe { ptr.cast::().add(align_diff) }; // if there is not enough space to store a pointer we need to make more if align_diff < core::mem::size_of::<*mut c_void>() { // # Safety // // - `return_ptr` is well-aligned, therefore `return_ptr + align` is also well-aligned // - we reserve `size_of::<*mut _> + align` extra space in the allocation, so // `ptr + align_diff + align` is still valid for (at least) `layout.size` bytes let offset = Ord::max(core::mem::size_of::<*mut c_void>(), layout.align()); return_ptr = unsafe { return_ptr.add(offset) }; } // Store the original pointer for free() // // Safety: `align >= size_of::<*mut _>`, so there is now space for a pointer before `return_ptr` // in the allocation unsafe { let original_ptr = return_ptr.sub(core::mem::size_of::<*mut c_void>()); core::ptr::write_unaligned(original_ptr.cast::<*mut c_void>(), ptr); }; // Return properly aligned pointer in allocation let ptr = return_ptr.cast::(); debug_assert_eq!(ptr as usize % layout.align(), 0); ptr } fn allocate_layout_zeroed(&self, layout: Layout) -> *mut c_void { assert!(layout.align() <= ALIGN.into()); #[cfg(feature = "rust-allocator")] if self.zalloc == RUST.zalloc { let ptr = unsafe { zalloc_rust_calloc(self.opaque, layout.size() as _, 1) }; debug_assert_eq!(ptr as usize % layout.align(), 0); return ptr; } #[cfg(feature = "c-allocator")] if self.zalloc == C.zalloc { let alloc = Allocator { zalloc: zalloc_c_calloc, zfree: zfree_c, opaque: core::ptr::null_mut(), _marker: PhantomData, }; return alloc.allocate_layout(layout); } // create the allocation (contents are uninitialized) let ptr = self.allocate_layout(layout); if !ptr.is_null() { // zero all contents (thus initializing the buffer) unsafe { core::ptr::write_bytes(ptr, 0u8, layout.size()) }; } ptr } pub fn allocate_raw(&self) -> Option> { NonNull::new(self.allocate_layout(Layout::new::()).cast()) } pub fn allocate_slice_raw(&self, len: usize) -> Option> { NonNull::new(self.allocate_layout(Layout::array::(len).ok()?).cast()) } pub fn allocate_zeroed_raw(&self) -> Option> { NonNull::new(self.allocate_layout_zeroed(Layout::new::()).cast()) } pub fn allocate_zeroed_buffer(&self, len: usize) -> Option> { let layout = Layout::array::(len).ok()?; NonNull::new(self.allocate_layout_zeroed(layout).cast()) } /// # Panics /// /// - when `len` is 0 /// /// # Safety /// /// - `ptr` must be allocated with this allocator /// - `len` must be the number of `T`s that are in this allocation #[allow(unused)] // Rust needs `len` for deallocation pub unsafe fn deallocate(&self, ptr: *mut T, len: usize) { if !ptr.is_null() { // Special case for the Rust `alloc` backed allocator #[cfg(feature = "rust-allocator")] if self.zfree == RUST.zfree { assert_ne!(len, 0, "invalid size for {ptr:?}"); let mut size = core::mem::size_of::() * len; return (RUST.zfree)(&mut size as *mut usize as *mut c_void, ptr.cast()); } // General case for c-style allocation let original_ptr = (ptr as *mut u8).sub(core::mem::size_of::<*const c_void>()); let free_ptr = core::ptr::read_unaligned(original_ptr as *mut *mut c_void); (self.zfree)(self.opaque, free_ptr) } } } #[cfg(test)] mod tests { use core::sync::atomic::{AtomicPtr, Ordering}; use std::sync::Mutex; use super::*; static PTR: AtomicPtr = AtomicPtr::new(core::ptr::null_mut()); static MUTEX: Mutex<()> = Mutex::new(()); unsafe extern "C" fn unaligned_alloc( _opaque: *mut c_void, _items: c_uint, _size: c_uint, ) -> *mut c_void { PTR.load(Ordering::Relaxed) } unsafe extern "C" fn unaligned_free(_opaque: *mut c_void, ptr: *mut c_void) { let expected = PTR.load(Ordering::Relaxed); assert_eq!(expected, ptr) } fn unaligned_allocator_help() { let mut buf = [0u8; 1024]; // we don't want anyone else messing with the PTR static let _guard = MUTEX.lock().unwrap(); for i in 0..64 { let ptr = unsafe { buf.as_mut_ptr().add(i).cast() }; PTR.store(ptr, Ordering::Relaxed); let allocator = Allocator { zalloc: unaligned_alloc, zfree: unaligned_free, opaque: core::ptr::null_mut(), _marker: PhantomData, }; let ptr = allocator.allocate_raw::().unwrap().as_ptr(); assert_eq!(ptr as usize % core::mem::align_of::(), 0); unsafe { allocator.deallocate(ptr, 1) } let ptr = allocator.allocate_slice_raw::(10).unwrap().as_ptr(); assert_eq!(ptr as usize % core::mem::align_of::(), 0); unsafe { allocator.deallocate(ptr, 10) } } } #[test] fn unaligned_allocator_0() { unaligned_allocator_help::<()>() } #[test] fn unaligned_allocator_1() { unaligned_allocator_help::() } #[test] fn unaligned_allocator_2() { unaligned_allocator_help::() } #[test] fn unaligned_allocator_4() { unaligned_allocator_help::() } #[test] fn unaligned_allocator_8() { unaligned_allocator_help::() } #[test] fn unaligned_allocator_16() { unaligned_allocator_help::() } #[test] fn unaligned_allocator_32() { #[repr(C, align(32))] struct Align32(u8); unaligned_allocator_help::() } #[test] fn unaligned_allocator_64() { #[repr(C, align(64))] struct Align64(u8); unaligned_allocator_help::() } fn test_allocate_zeroed_help(allocator: Allocator) { #[repr(C, align(64))] struct Align64(u8); let ptr = allocator.allocate_raw::(); assert!(ptr.is_some()); unsafe { allocator.deallocate(ptr.unwrap().as_ptr(), 1) }; } #[test] fn test_allocate_zeroed() { #[cfg(feature = "rust-allocator")] test_allocate_zeroed_help(RUST); #[cfg(feature = "c-allocator")] test_allocate_zeroed_help(C); assert!(FAIL.allocate_raw::().is_none()); } fn test_allocate_zeroed_buffer_help(allocator: Allocator) { let len = 42; let Some(buf) = allocator.allocate_zeroed_buffer(len) else { return; }; let slice = unsafe { core::slice::from_raw_parts_mut(buf.as_ptr(), len) }; assert_eq!(slice.iter().sum::(), 0); unsafe { allocator.deallocate(buf.as_ptr(), len) }; } #[test] fn test_allocate_buffer_zeroed() { #[cfg(feature = "rust-allocator")] test_allocate_zeroed_buffer_help(RUST); #[cfg(feature = "c-allocator")] test_allocate_zeroed_buffer_help(C); test_allocate_zeroed_buffer_help(FAIL); } #[test] fn test_deallocate_null() { unsafe { #[cfg(feature = "rust-allocator")] (RUST.zfree)(core::ptr::null_mut(), core::ptr::null_mut()); #[cfg(feature = "c-allocator")] (C.zfree)(core::ptr::null_mut(), core::ptr::null_mut()); (FAIL.zfree)(core::ptr::null_mut(), core::ptr::null_mut()); } } } zlib-rs-0.5.2/src/c_api.rs000064400000000000000000000173011046102023000134240ustar 00000000000000#![allow(non_camel_case_types)] #![allow(non_snake_case)] use core::ffi::{c_char, c_int, c_uchar, c_uint, c_ulong, c_void}; use crate::allocate::Allocator; pub type alloc_func = unsafe extern "C" fn(voidpf, uInt, uInt) -> voidpf; pub type free_func = unsafe extern "C" fn(voidpf, voidpf); pub type Bytef = u8; pub type in_func = unsafe extern "C" fn(*mut c_void, *mut *const c_uchar) -> c_uint; pub type out_func = unsafe extern "C" fn(*mut c_void, *mut c_uchar, c_uint) -> c_int; pub type uInt = c_uint; pub type uLong = c_ulong; pub type uLongf = c_ulong; pub type voidp = *mut c_void; pub type voidpc = *const c_void; pub type voidpf = *mut c_void; /// The current stream state /// /// # Custom allocators /// /// The low-level API supports passing in a custom allocator as part of the [`z_stream`]: /// /// ```no_check /// struct z_stream { /// // ... /// zalloc: Option *mut c_void>, /// zfree: Option, /// opaque: *mut c_void, /// } /// ``` /// /// When these fields are `None` (or `NULL` in C), the initialization functions use a default allocator, /// based on feature flags: /// /// - `"rust-allocator"` uses the rust global allocator /// - `"c-allocator"` uses an allocator based on `malloc` and `free` /// /// When both are configured, the `"rust-allocator"` is preferred. When no default allocator is configured, /// and custom `zalloc` and `zfree` are provided, the initialization functions will return a [`Z_STREAM_ERROR`]. /// /// When custom `zalloc` and `zfree` functions are given, they must adhere to the following contract /// to be safe: /// /// - a call `zalloc(opaque, n, m)` must return a pointer `p` to `n * m` bytes of memory, or /// `NULL` if out of memory /// - a call `zfree(opaque, p)` must free that memory /// /// The `strm.opaque` value is passed to as the first argument to all calls to `zalloc` /// and `zfree`, but is otherwise ignored by the library. #[repr(C)] #[derive(Copy, Clone)] pub struct z_stream { pub next_in: *const Bytef, pub avail_in: uInt, pub total_in: z_size, pub next_out: *mut Bytef, pub avail_out: uInt, pub total_out: z_size, pub msg: *mut c_char, pub state: *mut internal_state, pub zalloc: Option, pub zfree: Option, pub opaque: voidpf, pub data_type: c_int, pub adler: z_checksum, pub reserved: uLong, } pub type z_streamp = *mut z_stream; impl Default for z_stream { fn default() -> Self { let mut stream = Self { next_in: core::ptr::null_mut(), avail_in: 0, total_in: 0, next_out: core::ptr::null_mut(), avail_out: 0, total_out: 0, msg: core::ptr::null_mut(), state: core::ptr::null_mut(), zalloc: None, zfree: None, opaque: core::ptr::null_mut(), data_type: 0, adler: 0, reserved: 0, }; #[cfg(feature = "rust-allocator")] if stream.zalloc.is_none() || stream.zfree.is_none() { stream.configure_default_rust_allocator() } #[cfg(feature = "c-allocator")] if stream.zalloc.is_none() || stream.zfree.is_none() { stream.configure_default_c_allocator() } stream } } impl z_stream { fn configure_allocator(&mut self, alloc: Allocator) { self.zalloc = Some(alloc.zalloc); self.zfree = Some(alloc.zfree); self.opaque = alloc.opaque; } #[cfg(feature = "rust-allocator")] pub fn configure_default_rust_allocator(&mut self) { self.configure_allocator(crate::allocate::RUST) } #[cfg(feature = "c-allocator")] pub fn configure_default_c_allocator(&mut self) { self.configure_allocator(crate::allocate::C) } } // // zlib stores Adler-32 and CRC-32 checksums in unsigned long; zlib-ng uses uint32_t. pub(crate) type z_size = c_ulong; pub(crate) type z_checksum = c_ulong; // opaque to the user pub enum internal_state {} pub const Z_NO_FLUSH: c_int = 0; pub const Z_PARTIAL_FLUSH: c_int = 1; pub const Z_SYNC_FLUSH: c_int = 2; pub const Z_FULL_FLUSH: c_int = 3; pub const Z_FINISH: c_int = 4; pub const Z_BLOCK: c_int = 5; pub const Z_TREES: c_int = 6; pub const Z_OK: c_int = 0; pub const Z_STREAM_END: c_int = 1; pub const Z_NEED_DICT: c_int = 2; pub const Z_ERRNO: c_int = -1; pub const Z_STREAM_ERROR: c_int = -2; pub const Z_DATA_ERROR: c_int = -3; pub const Z_MEM_ERROR: c_int = -4; pub const Z_BUF_ERROR: c_int = -5; pub const Z_VERSION_ERROR: c_int = -6; pub const Z_NO_COMPRESSION: c_int = 0; pub const Z_BEST_SPEED: c_int = 1; pub const Z_BEST_COMPRESSION: c_int = 9; pub const Z_DEFAULT_COMPRESSION: c_int = -1; pub const Z_DEFLATED: c_int = 8; pub const Z_BINARY: c_int = 0; pub const Z_TEXT: c_int = 1; pub const Z_ASCII: c_int = Z_TEXT; /* for compatibility with 1.2.2 and earlier */ pub const Z_UNKNOWN: c_int = 2; pub const Z_FILTERED: c_int = 1; pub const Z_HUFFMAN_ONLY: c_int = 2; pub const Z_RLE: c_int = 3; pub const Z_FIXED: c_int = 4; pub const Z_DEFAULT_STRATEGY: c_int = 0; pub type gz_headerp = *mut gz_header; /// gzip header information passed to and from zlib routines. /// See RFC 1952 for more details on the meanings of these fields. #[derive(Debug)] #[repr(C)] pub struct gz_header { /// true if compressed data believed to be text pub text: i32, /// modification time pub time: c_ulong, /// extra flags (not used when writing a gzip file) pub xflags: i32, /// operating system pub os: i32, /// pointer to extra field or NULL if none pub extra: *mut u8, /// extra field length (valid if extra != NULL) pub extra_len: u32, /// space at extra (only when reading header) pub extra_max: u32, /// pointer to zero-terminated file name or NULL pub name: *mut u8, /// space at name (only when reading header) pub name_max: u32, /// pointer to zero-terminated comment or NULL pub comment: *mut u8, /// space at comment (only when reading header) pub comm_max: u32, /// true if there was or will be a header crc pub hcrc: i32, /// true when done reading gzip header (not used when writing a gzip file) pub done: i32, } impl Default for gz_header { fn default() -> Self { Self { text: 0, time: 0, xflags: 0, os: 0, extra: core::ptr::null_mut(), extra_len: 0, extra_max: 0, name: core::ptr::null_mut(), name_max: 0, comment: core::ptr::null_mut(), comm_max: 0, hcrc: 0, done: 0, } } } impl gz_header { // based on the spec https://www.ietf.org/rfc/rfc1952.txt // // 0 - FAT filesystem (MS-DOS, OS/2, NT/Win32) // 1 - Amiga // 2 - VMS (or OpenVMS) // 3 - Unix // 4 - VM/CMS // 5 - Atari TOS // 6 - HPFS filesystem (OS/2, NT) // 7 - Macintosh // 8 - Z-System // 9 - CP/M // 10 - TOPS-20 // 11 - NTFS filesystem (NT) // 12 - QDOS // 13 - Acorn RISCOS // 255 - unknown #[allow(clippy::if_same_then_else)] pub const OS_CODE: u8 = { if cfg!(windows) { 10 } else if cfg!(target_os = "macos") { 19 } else if cfg!(unix) { 3 } else { 3 // assume unix } }; pub(crate) fn flags(&self) -> u8 { (if self.text != 0 { 1 } else { 0 }) + (if self.hcrc != 0 { 2 } else { 0 }) + (if self.extra.is_null() { 0 } else { 4 }) + (if self.name.is_null() { 0 } else { 8 }) + (if self.comment.is_null() { 0 } else { 16 }) } } zlib-rs-0.5.2/src/cpu_features.rs000064400000000000000000000047041046102023000150410ustar 00000000000000#![allow(dead_code)] #![allow(unreachable_code)] pub struct CpuFeatures; impl CpuFeatures { pub const NONE: usize = 0; pub const AVX2: usize = 1; } #[inline(always)] pub fn is_enabled_sse() -> bool { #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] #[cfg(feature = "std")] return std::is_x86_feature_detected!("sse"); false } #[inline(always)] pub fn is_enabled_sse42() -> bool { #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] #[cfg(feature = "std")] return std::is_x86_feature_detected!("sse4.2"); false } #[inline(always)] pub fn is_enabled_avx2_and_bmi2() -> bool { #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] { #[cfg(all( target_feature = "avx2", target_feature = "bmi1", target_feature = "bmi2" ))] return true; #[cfg(feature = "std")] { use std::sync::atomic::{AtomicU32, Ordering}; static CACHE: AtomicU32 = AtomicU32::new(2); return match CACHE.load(Ordering::Relaxed) { 0 => false, 1 => true, _ => { let detected = std::is_x86_feature_detected!("avx2") && std::is_x86_feature_detected!("bmi1") && std::is_x86_feature_detected!("bmi2"); CACHE.store(u32::from(detected), Ordering::Relaxed); detected } }; } } false } #[inline(always)] pub fn is_enabled_avx512() -> bool { #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] #[cfg(feature = "std")] return std::is_x86_feature_detected!("avx512f"); false } #[inline(always)] pub fn is_enabled_pclmulqdq() -> bool { #[cfg(target_arch = "x86_64")] #[cfg(feature = "std")] return std::is_x86_feature_detected!("pclmulqdq") && std::is_x86_feature_detected!("sse4.1"); false } #[inline(always)] pub fn is_enabled_neon() -> bool { #[cfg(target_arch = "aarch64")] #[cfg(feature = "std")] return std::arch::is_aarch64_feature_detected!("neon"); false } #[inline(always)] pub fn is_enabled_crc() -> bool { #[cfg(target_arch = "aarch64")] #[cfg(feature = "std")] return std::arch::is_aarch64_feature_detected!("crc"); false } #[inline(always)] pub fn is_enabled_simd128() -> bool { #[cfg(target_arch = "wasm32")] return cfg!(target_feature = "simd128"); false } zlib-rs-0.5.2/src/crc32/acle.rs000064400000000000000000000112261046102023000141710ustar 00000000000000//! # Safety //! //! The functions in this module must only be executed on an ARM system with the CRC feature. #[cfg_attr(not(target_arch = "aarch64"), allow(unused))] #[target_feature(enable = "crc")] pub unsafe fn crc32_acle_aarch64(crc: u32, buf: &[u8]) -> u32 { let mut c = !crc; // SAFETY: [u8; 8] safely transmutes into u64. let (before, middle, after) = unsafe { buf.align_to::() }; // SAFETY: `remainder` requires the feature "crc" but so does this function c = unsafe { remainder(c, before) }; if middle.is_empty() && after.is_empty() { return !c; } for d in middle { c = unsafe { __crc32d(c, *d) }; } // SAFETY: `remainder` requires the feature "crc" but so does this function c = unsafe { remainder(c, after) }; !c } #[inline] #[target_feature(enable = "crc")] unsafe fn remainder(mut c: u32, mut buf: &[u8]) -> u32 { if let [b0, b1, b2, b3, rest @ ..] = buf { c = unsafe { __crc32w(c, u32::from_le_bytes([*b0, *b1, *b2, *b3])) }; buf = rest; } if let [b0, b1, rest @ ..] = buf { c = unsafe { __crc32h(c, u16::from_le_bytes([*b0, *b1])) }; buf = rest; } if let [b0, rest @ ..] = buf { c = unsafe { __crc32b(c, *b0) }; buf = rest; } debug_assert!(buf.is_empty()); c } // FIXME the intrinsics below are stable since rust 1.80.0: remove these and use the standard // library versions once our MSRV reaches that version. /// CRC32 single round checksum for bytes (8 bits). /// /// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32b) #[target_feature(enable = "crc")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] unsafe fn __crc32b(mut crc: u32, data: u8) -> u32 { unsafe { core::arch::asm!("crc32b {crc:w}, {crc:w}, {data:w}", crc = inout(reg) crc, data = in(reg) data); crc } } /// CRC32 single round checksum for half words (16 bits). /// /// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32h) #[target_feature(enable = "crc")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] unsafe fn __crc32h(mut crc: u32, data: u16) -> u32 { unsafe { core::arch::asm!("crc32h {crc:w}, {crc:w}, {data:w}", crc = inout(reg) crc, data = in(reg) data); crc } } /// CRC32 single round checksum for words (32 bits). /// /// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32w) #[target_feature(enable = "crc")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] pub unsafe fn __crc32w(mut crc: u32, data: u32) -> u32 { unsafe { core::arch::asm!("crc32w {crc:w}, {crc:w}, {data:w}", crc = inout(reg) crc, data = in(reg) data); crc } } /// CRC32 single round checksum for double words (64 bits). /// /// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d) #[cfg(target_arch = "aarch64")] #[target_feature(enable = "crc")] unsafe fn __crc32d(mut crc: u32, data: u64) -> u32 { unsafe { core::arch::asm!("crc32x {crc:w}, {crc:w}, {data:x}", crc = inout(reg) crc, data = in(reg) data); crc } } #[cfg(test)] mod tests { use super::*; quickcheck::quickcheck! { #[cfg(target_arch = "aarch64")] fn crc32_acle_aarch64_is_crc32fast(v: Vec, start: u32) -> bool { let mut h = crc32fast::Hasher::new_with_initial(start); h.update(&v); let a = unsafe { crc32_acle_aarch64(start, &v) }; let b = h.finalize(); a == b } } #[test] fn test_crc32b() { if !crate::cpu_features::is_enabled_crc() { return; } unsafe { assert_eq!(__crc32b(0, 0), 0); assert_eq!(__crc32b(0, 255), 755167117); } } #[test] fn test_crc32h() { if !crate::cpu_features::is_enabled_crc() { return; } unsafe { assert_eq!(__crc32h(0, 0), 0); assert_eq!(__crc32h(0, 16384), 1994146192); } } #[test] fn test_crc32w() { if !crate::cpu_features::is_enabled_crc() { return; } unsafe { assert_eq!(__crc32w(0, 0), 0); assert_eq!(__crc32w(0, 4294967295), 3736805603); } } #[test] #[cfg(target_arch = "aarch64")] fn test_crc32d() { if !crate::cpu_features::is_enabled_crc() { return; } unsafe { assert_eq!(__crc32d(0, 0), 0); assert_eq!(__crc32d(0, 18446744073709551615), 1147535477); } } } zlib-rs-0.5.2/src/crc32/braid.rs000064400000000000000000000175241046102023000143550ustar 00000000000000// Several implementations of CRC-32: // * A naive byte-granularity approach // * A word-sized approach that processes a usize word at a time // * A "braid" implementation that processes a block of N words // at a time, based on the algorithm in section 4.11 from // https://github.com/zlib-ng/zlib-ng/blob/develop/doc/crc-doc.1.0.pdf. // The binary encoding of the CRC-32 polynomial. // We are assuming little-endianness so we process the input // LSB-first. We need to use the "reversed" value from e.g // https://en.wikipedia.org/wiki/Cyclic_redundancy_check#Polynomial_representations. pub(crate) const CRC32_LSB_POLY: usize = 0xedb8_8320usize; const W: usize = core::mem::size_of::(); // The logic assumes that W >= sizeof(u32). // In Rust, this is generally true. const _: () = assert!(W >= core::mem::size_of::()); // Pre-computed tables for the CRC32 algorithm. // CRC32_BYTE_TABLE corresponds to MulByXPowD from the paper. static CRC32_BYTE_TABLE: [[u32; 256]; 1] = build_crc32_table::<256, 1, 1>(); // CRC32_WORD_TABLE is MulWordByXpowD. static CRC32_WORD_TABLE: [[u32; 256]; W] = build_crc32_table::<256, W, 1>(); // FIXME: make const fn when msrv allows. pub(crate) fn get_crc_table() -> &'static [u32; 256] { &CRC32_BYTE_TABLE[0] } // Work-around for not being able to define generic consts or statics // Crc32BraidTable::::TABLE is the generic table for any braid size N. struct Crc32BraidTable; impl Crc32BraidTable { const TABLE: [[u32; 256]; W] = build_crc32_table::<256, W, N>(); } // Build the CRC32 tables using a more efficient and simpler approach // than the combination of Multiply and XpowN (which implement polynomial // multiplication and exponentiation, respectively) from the paper, // but with identical results. This function is const, so it should be // fully evaluated at compile time. const fn build_crc32_table() -> [[u32; A]; W] { let mut arr = [[0u32; A]; W]; let mut i = 0; while i < W { let mut j = 0; while j < A { let mut c = j; let mut k = 0; while k < 8 * (W * N - i) { if c & 1 != 0 { c = CRC32_LSB_POLY ^ (c >> 1); } else { c >>= 1; } k += 1; } arr[i][j] = c as u32; j += 1; } i += 1; } arr } fn crc32_naive_inner(data: &[u8], start: u32) -> u32 { data.iter().fold(start, |crc, val| { let crc32_lsb = crc.to_le_bytes()[0]; CRC32_BYTE_TABLE[0][usize::from(crc32_lsb ^ *val)] ^ (crc >> 8) }) } fn crc32_words_inner(words: &[usize], start: u32, per_word_crcs: &[u32]) -> u32 { words.iter().enumerate().fold(start, |crc, (i, word)| { let value = word.to_le() ^ (crc ^ per_word_crcs.get(i).unwrap_or(&0)) as usize; value .to_le_bytes() .into_iter() .zip(CRC32_WORD_TABLE) .fold(0u32, |crc, (b, tab)| crc ^ tab[usize::from(b)]) }) } pub fn crc32_braid(start: u32, data: &[u8]) -> u32 { // Get a word-aligned sub-slice of the input data // SAFETY: it is safe to transmute a slice of u8 into a usize. let (prefix, words, suffix) = unsafe { data.align_to::() }; let crc = !start; let crc = crc32_naive_inner(prefix, crc); let mut crcs = [0u32; N]; crcs[0] = crc; // TODO: this would normally use words.chunks_exact(N), but // we need to pass the last full block to crc32_words_inner // because we accumulate partial crcs in the array and we // need to roll those into the final value. The last call to // crc32_words_inner does that for us with its per_word_crcs // argument. let blocks = words.len() / N; let blocks = blocks.saturating_sub(1); for i in 0..blocks { // Load the next N words. let mut buffer: [usize; N] = core::array::from_fn(|j| usize::to_le(words[i * N + j]) ^ (crcs[j] as usize)); crcs.fill(0); for j in 0..W { braid_core(&mut crcs, &mut buffer, j); } } let crc = core::mem::take(&mut crcs[0]); let crc = crc32_words_inner(&words[blocks * N..], crc, &crcs); let crc = crc32_naive_inner(suffix, crc); !crc } // A workaround for https://github.com/trifectatechfoundation/zlib-rs/issues/407. // // We're seeing misoptimization with rust versions that use LLVM 20, earlier LLVMs are fine, and // LLVM 21 similarly appears to do fine. The offending feature is `+avx512vl`, the // "Vector Length Extension", which extends some instructions operating on 512-bit operands with // variants that support 256-bit or 128-bit operands. // // The avx512vl target feature only became stable in 1.89.0: before that, we can't detect it // statically. Therefore we use avx2 as a proxy, it is implied by avx512vl. #[cfg_attr(all(target_arch = "x86_64", target_feature = "avx2"), inline(never))] #[cfg_attr(not(target_arch = "x86_64"), inline(always))] fn braid_core(crcs: &mut [u32; N], buffer: &mut [usize; N], j: usize) { for k in 0..N { crcs[k] ^= Crc32BraidTable::::TABLE[j][buffer[k] & 0xff]; buffer[k] >>= 8; } } #[cfg(test)] mod test { use super::*; fn crc32_naive(data: &[u8], start: u32) -> u32 { let crc = !start; let crc = crc32_naive_inner(data, crc); !crc } fn crc32_words(data: &[u8], start: u32) -> u32 { // Get a word-aligned sub-slice of the input data let (prefix, words, suffix) = unsafe { data.align_to::() }; let crc = !start; let crc = crc32_naive_inner(prefix, crc); let crc = crc32_words_inner(words, crc, &[]); let crc = crc32_naive_inner(suffix, crc); !crc } #[test] fn empty_is_identity() { assert_eq!(crc32_naive(&[], 32), 32); } #[test] fn words_endianness() { let v = [0, 0, 0, 0, 0, 16, 0, 1]; let start = 1534327806; let mut h = crc32fast::Hasher::new_with_initial(start); h.update(&v[..]); assert_eq!(crc32_words(&v[..], start), h.finalize()); } #[test] fn crc32_naive_inner_endianness_and_alignment() { assert_eq!(crc32_naive_inner(&[0, 1], 0), 1996959894); let v: Vec<_> = (0..1024).map(|i| i as u8).collect(); let start = 0; // test alignment for i in 0..8 { let mut h = crc32fast::Hasher::new_with_initial(start); h.update(&v[i..]); assert_eq!(crc32_braid::<5>(start, &v[i..]), h.finalize()); } } quickcheck::quickcheck! { fn naive_is_crc32fast(v: Vec, start: u32) -> bool { let mut h = crc32fast::Hasher::new_with_initial(start); h.update(&v[..]); crc32_naive(&v[..], start) == h.finalize() } fn words_is_crc32fast(v: Vec, start: u32) -> bool { let mut h = crc32fast::Hasher::new_with_initial(start); h.update(&v[..]); crc32_words(&v[..], start) == h.finalize() } #[cfg_attr(miri, ignore)] fn braid_4_is_crc32fast(v: Vec, start: u32) -> bool { let mut h = crc32fast::Hasher::new_with_initial(start); h.update(&v[..]); crc32_braid::<4>(start, &v[..]) == h.finalize() } #[cfg_attr(miri, ignore)] fn braid_5_is_crc32fast(v: Vec, start: u32) -> bool { let mut h = crc32fast::Hasher::new_with_initial(start); h.update(&v[..]); crc32_braid::<5>(start, &v[..]) == h.finalize() } #[cfg_attr(miri, ignore)] fn braid_6_is_crc32fast(v: Vec, start: u32) -> bool { let mut h = crc32fast::Hasher::new_with_initial(start); h.update(&v[..]); crc32_braid::<6>(start, &v[..]) == h.finalize() } } } zlib-rs-0.5.2/src/crc32/combine.rs000064400000000000000000000061331046102023000147020ustar 00000000000000use super::braid::CRC32_LSB_POLY; pub const fn crc32_combine(crc1: u32, crc2: u32, len2: u64) -> u32 { crc32_combine_op(crc1, crc2, crc32_combine_gen(len2)) } #[inline(always)] const fn crc32_combine_gen(len2: u64) -> u32 { x2nmodp(len2, 3) } #[inline(always)] const fn crc32_combine_op(crc1: u32, crc2: u32, op: u32) -> u32 { multmodp(op, crc1) ^ crc2 } const X2N_TABLE: [u32; 32] = [ 0x40000000, 0x20000000, 0x08000000, 0x00800000, 0x00008000, 0xedb88320, 0xb1e6b092, 0xa06a2517, 0xed627dae, 0x88d14467, 0xd7bbfe6a, 0xec447f11, 0x8e7ea170, 0x6427800e, 0x4d47bae0, 0x09fe548f, 0x83852d0f, 0x30362f1a, 0x7b5a9cc3, 0x31fec169, 0x9fec022a, 0x6c8dedc4, 0x15d6874d, 0x5fde7a4e, 0xbad90e37, 0x2e4e5eef, 0x4eaba214, 0xa8a472c0, 0x429a969e, 0x148d302a, 0xc40ba6d0, 0xc4e22c3c, ]; // Return a(x) multiplied by b(x) modulo p(x), where p(x) is the CRC polynomial, // reflected. For speed, this requires that a not be zero. const fn multmodp(a: u32, mut b: u32) -> u32 { let mut m = 1 << 31; let mut p = 0; loop { if (a & m) != 0 { p ^= b; if (a & (m - 1)) == 0 { break; } } m >>= 1; b = if (b & 1) != 0 { (b >> 1) ^ CRC32_LSB_POLY as u32 } else { b >> 1 }; } p } // Return x^(n * 2^k) modulo p(x). const fn x2nmodp(mut n: u64, mut k: u32) -> u32 { let mut p: u32 = 1 << 31; /* x^0 == 1 */ while n > 0 { if (n & 1) != 0 { p = multmodp(X2N_TABLE[k as usize & 31], p); } n >>= 1; k += 1; } p } #[cfg(test)] mod test { use super::*; use crate::crc32; #[test] fn test_crc32_combine() { ::quickcheck::quickcheck(test as fn(_) -> _); fn test(data: Vec) -> bool { let Some(buf_len) = data.first().copied() else { return true; }; let buf_size = Ord::max(buf_len, 1) as usize; let crc0 = 0; let mut crc1 = crc0; let mut crc2 = crc0; /* CRC32 */ for chunk in data.chunks(buf_size) { let crc3 = crc32(crc0, chunk); let op = crc32_combine_gen(chunk.len() as _); let crc4 = crc32_combine_op(crc1, crc3, op); crc1 = crc32(crc1, chunk); assert_eq!(crc1, crc4); } crc2 = crc32(crc2, &data); assert_eq!(crc1, crc2); let combine1 = crc32_combine(crc1, crc2, data.len() as _); let combine2 = crc32_combine(crc1, crc1, data.len() as _); assert_eq!(combine1, combine2); // Fast CRC32 combine. let op = crc32_combine_gen(data.len() as _); let combine1 = crc32_combine_op(crc1, crc2, op); let combine2 = crc32_combine_op(crc2, crc1, op); assert_eq!(combine1, combine2); let combine1 = crc32_combine(crc1, crc2, data.len() as _); let combine2 = crc32_combine_op(crc2, crc1, op); assert_eq!(combine1, combine2); true } } } zlib-rs-0.5.2/src/crc32/pclmulqdq.rs000064400000000000000000000336471046102023000153020ustar 00000000000000use core::arch::x86_64::__m128i; use core::arch::x86_64::{ _mm_and_si128, _mm_clmulepi64_si128, _mm_extract_epi32, _mm_load_si128, _mm_loadu_si128, _mm_or_si128, _mm_shuffle_epi8, _mm_slli_si128, _mm_srli_si128, _mm_storeu_si128, _mm_xor_si128, }; use crate::CRC32_INITIAL_VALUE; #[derive(Debug)] #[repr(C, align(16))] struct Align16(T); #[cfg(target_arch = "x86_64")] const fn reg(input: [u32; 4]) -> __m128i { // SAFETY: any valid [u32; 4] represents a valid __m128i unsafe { core::mem::transmute(input) } } /// # Safety /// /// The methods of this struct should only be used on x86_64 systems that support the `pclmulqdq` /// instruction, and SSE2 and SSE4.1 instruction sets. #[derive(Debug, Clone, Copy)] #[cfg(target_arch = "x86_64")] pub(crate) struct Accumulator { fold: [__m128i; 4], } #[cfg(target_arch = "x86_64")] impl Accumulator { const XMM_FOLD4: __m128i = reg([0xc6e41596u32, 0x00000001u32, 0x54442bd4u32, 0x00000001u32]); pub const fn new() -> Self { let xmm_crc0 = reg([0x9db42487, 0, 0, 0]); let xmm_zero = reg([0, 0, 0, 0]); Self { fold: [xmm_crc0, xmm_zero, xmm_zero, xmm_zero], } } pub unsafe fn fold(&mut self, src: &[u8], start: u32) { unsafe { self.fold_help::(&mut [], src, start) } } pub unsafe fn fold_copy(&mut self, dst: &mut [u8], src: &[u8]) { unsafe { self.fold_help::(dst, src, 0) } } #[target_feature(enable = "pclmulqdq", enable = "sse2", enable = "sse4.1")] pub unsafe fn finish(self) -> u32 { const CRC_MASK1: __m128i = reg([0xFFFFFFFFu32, 0xFFFFFFFFu32, 0x00000000u32, 0x00000000u32]); const CRC_MASK2: __m128i = reg([0x00000000u32, 0xFFFFFFFFu32, 0xFFFFFFFFu32, 0xFFFFFFFFu32]); const RK1_RK2: __m128i = reg([ 0xccaa009e, 0x00000000, /* rk1 */ 0x751997d0, 0x00000001, /* rk2 */ ]); const RK5_RK6: __m128i = reg([ 0xccaa009e, 0x00000000, /* rk5 */ 0x63cd6124, 0x00000001, /* rk6 */ ]); const RK7_RK8: __m128i = reg([ 0xf7011640, 0x00000001, /* rk7 */ 0xdb710640, 0x00000001, /* rk8 */ ]); let [mut xmm_crc0, mut xmm_crc1, mut xmm_crc2, mut xmm_crc3] = self.fold; /* * k1 */ let mut crc_fold = RK1_RK2; #[allow(unused_unsafe)] // because target features 1.1 unsafe { let x_tmp0 = _mm_clmulepi64_si128(xmm_crc0, crc_fold, 0x10); xmm_crc0 = _mm_clmulepi64_si128(xmm_crc0, crc_fold, 0x01); xmm_crc1 = _mm_xor_si128(xmm_crc1, x_tmp0); xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_crc0); let x_tmp1 = _mm_clmulepi64_si128(xmm_crc1, crc_fold, 0x10); xmm_crc1 = _mm_clmulepi64_si128(xmm_crc1, crc_fold, 0x01); xmm_crc2 = _mm_xor_si128(xmm_crc2, x_tmp1); xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_crc1); let x_tmp2 = _mm_clmulepi64_si128(xmm_crc2, crc_fold, 0x10); xmm_crc2 = _mm_clmulepi64_si128(xmm_crc2, crc_fold, 0x01); xmm_crc3 = _mm_xor_si128(xmm_crc3, x_tmp2); xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2); /* * k5 */ crc_fold = RK5_RK6; xmm_crc0 = xmm_crc3; xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0); xmm_crc0 = _mm_srli_si128(xmm_crc0, 8); xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc0); xmm_crc0 = xmm_crc3; xmm_crc3 = _mm_slli_si128(xmm_crc3, 4); xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0x10); xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc0); xmm_crc3 = _mm_and_si128(xmm_crc3, CRC_MASK2); /* * k7 */ xmm_crc1 = xmm_crc3; xmm_crc2 = xmm_crc3; crc_fold = RK7_RK8; xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0); xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2); xmm_crc3 = _mm_and_si128(xmm_crc3, CRC_MASK1); xmm_crc2 = xmm_crc3; xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0x10); xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2); xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc1); !(_mm_extract_epi32(xmm_crc3, 2) as u32) } } unsafe fn fold_step(&mut self) { self.fold = core::array::from_fn(|i| match self.fold.get(i + N) { Some(v) => *v, None => unsafe { Self::step(self.fold[(i + N) - 4]) }, }); } #[inline(always)] unsafe fn step(input: __m128i) -> __m128i { unsafe { _mm_xor_si128( _mm_clmulepi64_si128(input, Self::XMM_FOLD4, 0x01), _mm_clmulepi64_si128(input, Self::XMM_FOLD4, 0x10), ) } } unsafe fn partial_fold(&mut self, xmm_crc_part: __m128i, len: usize) { const PSHUFB_SHF_TABLE: [__m128i; 15] = [ reg([0x84838281, 0x88878685, 0x8c8b8a89, 0x008f8e8d]), /* shl 15 (16 - 1)/shr1 */ reg([0x85848382, 0x89888786, 0x8d8c8b8a, 0x01008f8e]), /* shl 14 (16 - 3)/shr2 */ reg([0x86858483, 0x8a898887, 0x8e8d8c8b, 0x0201008f]), /* shl 13 (16 - 4)/shr3 */ reg([0x87868584, 0x8b8a8988, 0x8f8e8d8c, 0x03020100]), /* shl 12 (16 - 4)/shr4 */ reg([0x88878685, 0x8c8b8a89, 0x008f8e8d, 0x04030201]), /* shl 11 (16 - 5)/shr5 */ reg([0x89888786, 0x8d8c8b8a, 0x01008f8e, 0x05040302]), /* shl 10 (16 - 6)/shr6 */ reg([0x8a898887, 0x8e8d8c8b, 0x0201008f, 0x06050403]), /* shl 9 (16 - 7)/shr7 */ reg([0x8b8a8988, 0x8f8e8d8c, 0x03020100, 0x07060504]), /* shl 8 (16 - 8)/shr8 */ reg([0x8c8b8a89, 0x008f8e8d, 0x04030201, 0x08070605]), /* shl 7 (16 - 9)/shr9 */ reg([0x8d8c8b8a, 0x01008f8e, 0x05040302, 0x09080706]), /* shl 6 (16 -10)/shr10*/ reg([0x8e8d8c8b, 0x0201008f, 0x06050403, 0x0a090807]), /* shl 5 (16 -11)/shr11*/ reg([0x8f8e8d8c, 0x03020100, 0x07060504, 0x0b0a0908]), /* shl 4 (16 -12)/shr12*/ reg([0x008f8e8d, 0x04030201, 0x08070605, 0x0c0b0a09]), /* shl 3 (16 -13)/shr13*/ reg([0x01008f8e, 0x05040302, 0x09080706, 0x0d0c0b0a]), /* shl 2 (16 -14)/shr14*/ reg([0x0201008f, 0x06050403, 0x0a090807, 0x0e0d0c0b]), /* shl 1 (16 -15)/shr15*/ ]; unsafe { let xmm_shl = PSHUFB_SHF_TABLE[len - 1]; let xmm_shr = _mm_xor_si128(xmm_shl, reg([0x80808080u32; 4])); let xmm_a0 = Self::step(_mm_shuffle_epi8(self.fold[0], xmm_shl)); self.fold[0] = _mm_shuffle_epi8(self.fold[0], xmm_shr); let xmm_tmp1 = _mm_shuffle_epi8(self.fold[1], xmm_shl); self.fold[0] = _mm_or_si128(self.fold[0], xmm_tmp1); self.fold[1] = _mm_shuffle_epi8(self.fold[1], xmm_shr); let xmm_tmp2 = _mm_shuffle_epi8(self.fold[2], xmm_shl); self.fold[1] = _mm_or_si128(self.fold[1], xmm_tmp2); self.fold[2] = _mm_shuffle_epi8(self.fold[2], xmm_shr); let xmm_tmp3 = _mm_shuffle_epi8(self.fold[3], xmm_shl); self.fold[2] = _mm_or_si128(self.fold[2], xmm_tmp3); self.fold[3] = _mm_shuffle_epi8(self.fold[3], xmm_shr); let xmm_crc_part = _mm_shuffle_epi8(xmm_crc_part, xmm_shl); self.fold[3] = _mm_or_si128(self.fold[3], xmm_crc_part); // zlib-ng uses casts and a floating-point xor instruction here. There is a theory that // this breaks dependency chains on some CPUs and gives better throughput. Other sources // claim that casting between integer and float has a cost and should be avoided. We can't // measure the difference, and choose the shorter code. self.fold[3] = _mm_xor_si128(self.fold[3], xmm_a0) } } #[allow(clippy::needless_range_loop)] unsafe fn progress( &mut self, dst: &mut [u8], src: &mut &[u8], init_crc: &mut u32, ) -> usize { let mut it = src.chunks_exact(16); let mut input: [_; N] = core::array::from_fn(|_| unsafe { _mm_load_si128(it.next().unwrap().as_ptr() as *const __m128i) }); *src = &src[N * 16..]; if COPY { for (s, d) in input[..N].iter().zip(dst.chunks_exact_mut(16)) { unsafe { _mm_storeu_si128(d.as_mut_ptr() as *mut __m128i, *s) }; } } else if *init_crc != CRC32_INITIAL_VALUE { let xmm_initial = reg([*init_crc, 0, 0, 0]); input[0] = unsafe { _mm_xor_si128(input[0], xmm_initial) }; *init_crc = CRC32_INITIAL_VALUE; } unsafe { self.fold_step::() }; for i in 0..N { self.fold[i + (4 - N)] = unsafe { _mm_xor_si128(self.fold[i + (4 - N)], input[i]) }; } if COPY { N * 16 } else { 0 } } #[target_feature(enable = "pclmulqdq", enable = "sse2", enable = "sse4.1")] unsafe fn fold_help( &mut self, mut dst: &mut [u8], mut src: &[u8], mut init_crc: u32, ) { let mut xmm_crc_part = reg([0; 4]); let mut partial_buf = Align16([0u8; 16]); // Technically the CRC functions don't even call this for input < 64, but a bare minimum of 31 // bytes of input is needed for the aligning load that occurs. If there's an initial CRC, to // carry it forward through the folded CRC there must be 16 - src % 16 + 16 bytes available, which // by definition can be up to 15 bytes + one full vector load. */ assert!(src.len() >= 31 || init_crc == CRC32_INITIAL_VALUE); if COPY { assert_eq!(dst.len(), src.len(), "dst and src must be the same length") } if src.len() < 16 { if COPY { if src.is_empty() { return; } partial_buf.0[..src.len()].copy_from_slice(src); xmm_crc_part = unsafe { _mm_load_si128(partial_buf.0.as_mut_ptr() as *mut __m128i) }; dst[..src.len()].copy_from_slice(&partial_buf.0[..src.len()]); } } else { // SAFETY: [u8; 16] can safely transmute into _m128i. let (before, _, _) = unsafe { src.align_to::<__m128i>() }; #[allow(unused_unsafe)] // because target features 1.1 if !before.is_empty() { xmm_crc_part = unsafe { _mm_loadu_si128(src.as_ptr() as *const __m128i) }; if COPY { unsafe { _mm_storeu_si128(dst.as_mut_ptr() as *mut __m128i, xmm_crc_part) }; dst = &mut dst[before.len()..]; } else { let is_initial = init_crc == CRC32_INITIAL_VALUE; if !is_initial { let xmm_initial = reg([init_crc, 0, 0, 0]); xmm_crc_part = unsafe { _mm_xor_si128(xmm_crc_part, xmm_initial) }; init_crc = CRC32_INITIAL_VALUE; } if before.len() < 4 && !is_initial { let xmm_t0 = xmm_crc_part; xmm_crc_part = unsafe { _mm_loadu_si128((src.as_ptr() as *const __m128i).add(1)) }; unsafe { self.fold_step::<1>() }; self.fold[3] = unsafe { _mm_xor_si128(self.fold[3], xmm_t0) }; src = &src[16..]; } } unsafe { self.partial_fold(xmm_crc_part, before.len()) }; src = &src[before.len()..]; } // if is_x86_feature_detected!("vpclmulqdq") { // if src.len() >= 256 { // if COPY { // // size_t n = fold_16_vpclmulqdq_copy(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3, dst, src, len); // // dst += n; // } else { // // size_t n = fold_16_vpclmulqdq(&xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3, src, len, xmm_initial, first); // // first = false; // } // // len -= n; // // src += n; // } // } while src.len() >= 64 { let n = unsafe { self.progress::<4, COPY>(dst, &mut src, &mut init_crc) }; dst = &mut dst[n..]; } if src.len() >= 48 { let n = unsafe { self.progress::<3, COPY>(dst, &mut src, &mut init_crc) }; dst = &mut dst[n..]; } else if src.len() >= 32 { let n = unsafe { self.progress::<2, COPY>(dst, &mut src, &mut init_crc) }; dst = &mut dst[n..]; } else if src.len() >= 16 { let n = unsafe { self.progress::<1, COPY>(dst, &mut src, &mut init_crc) }; dst = &mut dst[n..]; } } if !src.is_empty() { debug_assert!(src.len() <= 16); unsafe { // SAFETY: src and xmm_crc_part don't overlap. xmm_crc_part is statically // guaranteed to be 16 bytes, and in earlier code we advanced the src slice such // that it's less than 16 bytes. core::ptr::copy_nonoverlapping( src.as_ptr(), &mut xmm_crc_part as *mut _ as *mut u8, src.len(), ); if COPY { _mm_storeu_si128(partial_buf.0.as_mut_ptr() as *mut __m128i, xmm_crc_part); core::ptr::copy_nonoverlapping( partial_buf.0.as_ptr(), dst.as_mut_ptr(), src.len(), ); } self.partial_fold(xmm_crc_part, src.len()); } } } } zlib-rs-0.5.2/src/crc32.rs000064400000000000000000000130731046102023000132670ustar 00000000000000use crate::CRC32_INITIAL_VALUE; #[cfg(target_arch = "aarch64")] pub(crate) mod acle; mod braid; mod combine; #[cfg(target_arch = "x86_64")] mod pclmulqdq; pub use combine::crc32_combine; pub fn crc32(start: u32, buf: &[u8]) -> u32 { /* For lens < 64, crc32_braid method is faster. The CRC32 instruction for * these short lengths might also prove to be effective */ if buf.len() < 64 { return crc32_braid(start, buf); } let mut crc_state = Crc32Fold::new_with_initial(start); crc_state.fold(buf, start); crc_state.finish() } pub fn crc32_braid(start: u32, buf: &[u8]) -> u32 { braid::crc32_braid::<5>(start, buf) } pub fn get_crc_table() -> &'static [u32; 256] { braid::get_crc_table() } #[derive(Debug, Clone, Copy)] pub struct Crc32Fold { #[cfg(target_arch = "x86_64")] fold: pclmulqdq::Accumulator, value: u32, } impl Default for Crc32Fold { fn default() -> Self { Self::new() } } impl Crc32Fold { pub const fn new() -> Self { Self::new_with_initial(CRC32_INITIAL_VALUE) } pub const fn new_with_initial(initial: u32) -> Self { Self { #[cfg(target_arch = "x86_64")] fold: pclmulqdq::Accumulator::new(), value: initial, } } pub fn fold(&mut self, src: &[u8], _start: u32) { #[cfg(target_arch = "x86_64")] if crate::cpu_features::is_enabled_pclmulqdq() { return unsafe { self.fold.fold(src, _start) }; } #[cfg(target_arch = "aarch64")] if crate::cpu_features::is_enabled_crc() { self.value = unsafe { self::acle::crc32_acle_aarch64(self.value, src) }; return; } // in this case the start value is ignored self.value = braid::crc32_braid::<5>(self.value, src); } pub fn fold_copy(&mut self, dst: &mut [u8], src: &[u8]) { #[cfg(target_arch = "x86_64")] if crate::cpu_features::is_enabled_pclmulqdq() { return unsafe { self.fold.fold_copy(dst, src) }; } self.fold(src, 0); dst[..src.len()].copy_from_slice(src); } pub fn finish(self) -> u32 { #[cfg(target_arch = "x86_64")] if crate::cpu_features::is_enabled_pclmulqdq() { return unsafe { self.fold.finish() }; } self.value } } #[cfg(test)] mod test { use braid::crc32_braid; use super::*; const INPUT: [u8; 1024] = { let mut array = [0; 1024]; let mut i = 0; while i < array.len() { array[i] = i as u8; i += 1; } array }; #[test] fn test_crc32_fold() { // input large enough to trigger the SIMD let mut h = crc32fast::Hasher::new_with_initial(CRC32_INITIAL_VALUE); h.update(&INPUT); assert_eq!(crc32(CRC32_INITIAL_VALUE, &INPUT), h.finalize()); } #[test] fn test_crc32_fold_align() { // SIMD algorithm is sensitive to alignment; for i in 0..16 { for start in [CRC32_INITIAL_VALUE, 42] { let mut h = crc32fast::Hasher::new_with_initial(start); h.update(&INPUT[i..]); assert_eq!( crc32(start, &INPUT[i..]), h.finalize(), "offset = {i}, start = {start}" ); } } } quickcheck::quickcheck! { fn crc_fold_is_crc32fast(v: Vec, start: u32) -> bool { let mut h = crc32fast::Hasher::new_with_initial(start); h.update(&v); let a = crc32(start, &v) ; let b = h.finalize(); a == b } } #[test] fn chunked() { const INPUT: &[&[u8]] = &[ &[116], &[111, 107, 105, 111, 44, 32, 97, 115], &[121, 110, 99, 45, 115, 116, 100, 44], &[32, 97, 110, 100, 32, 115, 109, 111], &[108, 46, 32, 89, 111, 117, 226, 128], &[153, 118, 101, 32, 112, 114, 111, 98], &[97, 98, 108, 121, 32, 117, 115, 101], &[100, 32, 116, 104, 101, 109, 32, 97], &[116, 32, 115, 111, 109, 101, 32, 112], &[111, 105, 110, 116, 44, 32, 101, 105], &[116, 104, 101, 114, 32, 100, 105, 114], &[101, 99, 116, 108, 121, 32, 111, 114], &[0], ]; const START: u32 = 2380683574; let mut in_chunks = START; for chunk in INPUT { in_chunks = crc32(in_chunks, chunk); } let flattened: Vec<_> = INPUT.iter().copied().flatten().copied().collect(); let flat = crc32(START, &flattened); assert_eq!(in_chunks, flat); } #[test] fn nasty_alignment() { const START: u32 = 2380683574; const FLAT: &[u8] = &[ 116, 111, 107, 105, 111, 44, 32, 97, 115, 121, 110, 99, 45, 115, 116, 100, 44, 32, 97, 110, 100, 32, 115, 109, 111, 108, 46, 32, 89, 111, 117, 226, 128, 153, 118, 101, 32, 112, 114, 111, 98, 97, 98, 108, 121, 32, 117, 115, 101, 100, 32, 116, 104, 101, 109, 32, 97, 116, 32, 115, 111, 109, 101, 32, 112, 111, 105, 110, 116, 44, 32, 101, 105, 116, 104, 101, 114, 32, 100, 105, 114, 101, 99, 116, 108, 121, 32, 111, 114, 0, ]; let mut i = 0; let mut flat = FLAT.to_vec(); while flat[i..].as_ptr() as usize % 16 != 15 { flat.insert(0, 0); i += 1; } let flat = &flat[i..]; assert_eq!(crc32_braid::<5>(START, flat), crc32(START, flat)); assert_eq!(crc32(2380683574, flat), 1175758345); } } zlib-rs-0.5.2/src/deflate/algorithm/fast.rs000064400000000000000000000104551046102023000167030ustar 00000000000000#![forbid(unsafe_code)] use crate::deflate::hash_calc::StandardHashCalc; use crate::{ deflate::{ fill_window, BlockState, DeflateStream, MIN_LOOKAHEAD, STD_MIN_MATCH, WANT_MIN_MATCH, }, flush_block, DeflateFlush, }; pub fn deflate_fast(stream: &mut DeflateStream, flush: DeflateFlush) -> BlockState { loop { // Make sure that we always have enough lookahead, except // at the end of the input file. We need STD_MAX_MATCH bytes // for the next match, plus WANT_MIN_MATCH bytes to insert the // string following the next match. if stream.state.lookahead < MIN_LOOKAHEAD { fill_window(stream); if stream.state.lookahead < MIN_LOOKAHEAD && flush == DeflateFlush::NoFlush { return BlockState::NeedMore; } if stream.state.lookahead == 0 { break; /* flush the current block */ } } let state = &mut stream.state; // Insert the string window[strstart .. strstart+2] in the // dictionary, and set hash_head to the head of the hash chain: let lc: u8; // Literal character to output if there is no match. if state.lookahead >= WANT_MIN_MATCH { let val = u32::from_le_bytes( state.window.filled()[state.strstart..state.strstart + 4] .try_into() .unwrap(), ); let hash_head = StandardHashCalc::quick_insert_value(state, state.strstart, val); let dist = state.strstart as isize - hash_head as isize; // Find the longest match for the string starting at offset state.strstart. if dist <= state.max_dist() as isize && dist > 0 && hash_head != 0 { // To simplify the code, we prevent matches with the string // of window index 0 (in particular we have to avoid a match // of the string with itself at the start of the input file). let mut match_len; (match_len, state.match_start) = crate::deflate::longest_match::longest_match(state, hash_head); if match_len >= WANT_MIN_MATCH { let bflush = state.tally_dist( state.strstart - state.match_start as usize, match_len - STD_MIN_MATCH, ); state.lookahead -= match_len; /* Insert new strings in the hash table only if the match length * is not too large. This saves time but degrades compression. */ if match_len <= state.max_insert_length() && state.lookahead >= WANT_MIN_MATCH { match_len -= 1; /* string at strstart already in table */ state.strstart += 1; state.insert_string(state.strstart, match_len); state.strstart += match_len; } else { state.strstart += match_len; StandardHashCalc::quick_insert_string( state, state.strstart + 2 - STD_MIN_MATCH, ); /* If lookahead < STD_MIN_MATCH, ins_h is garbage, but it does not * matter since it will be recomputed at next deflate call. */ } if bflush { flush_block!(stream, false); } continue; } } lc = val as u8; } else { lc = state.window.filled()[state.strstart]; } /* No match, output a literal byte */ let bflush = state.tally_lit(lc); state.lookahead -= 1; state.strstart += 1; if bflush { flush_block!(stream, false); } } stream.state.insert = if stream.state.strstart < (STD_MIN_MATCH - 1) { stream.state.strstart } else { STD_MIN_MATCH - 1 }; if flush == DeflateFlush::Finish { flush_block!(stream, true); return BlockState::FinishDone; } if !stream.state.sym_buf.is_empty() { flush_block!(stream, false); } BlockState::BlockDone } zlib-rs-0.5.2/src/deflate/algorithm/huff.rs000064400000000000000000000022411046102023000166700ustar 00000000000000#![forbid(unsafe_code)] use crate::{ deflate::{fill_window, BlockState, DeflateStream}, flush_block, DeflateFlush, }; pub fn deflate_huff(stream: &mut DeflateStream, flush: DeflateFlush) -> BlockState { loop { /* Make sure that we have a literal to write. */ if stream.state.lookahead == 0 { fill_window(stream); if stream.state.lookahead == 0 { match flush { DeflateFlush::NoFlush => return BlockState::NeedMore, _ => break, /* flush the current block */ } } } /* Output a literal byte */ let state = &mut stream.state; let lc = state.window.filled()[state.strstart]; let bflush = state.tally_lit(lc); state.lookahead -= 1; state.strstart += 1; if bflush { flush_block!(stream, false); } } stream.state.insert = 0; if flush == DeflateFlush::Finish { flush_block!(stream, true); return BlockState::FinishDone; } if !stream.state.sym_buf.is_empty() { flush_block!(stream, false); } BlockState::BlockDone } zlib-rs-0.5.2/src/deflate/algorithm/medium.rs000064400000000000000000000254521046102023000172310ustar 00000000000000#![forbid(unsafe_code)] use crate::deflate::hash_calc::StandardHashCalc; use crate::{ deflate::{ fill_window, BlockState, DeflateStream, State, MIN_LOOKAHEAD, STD_MIN_MATCH, WANT_MIN_MATCH, }, flush_block, DeflateFlush, }; pub fn deflate_medium(stream: &mut DeflateStream, flush: DeflateFlush) -> BlockState { let mut state = &mut stream.state; // For levels below 5, don't check the next position for a better match let early_exit = state.level < 5; let mut current_match = Match { match_start: 0, match_length: 0, strstart: 0, orgstart: 0, }; let mut next_match = Match { match_start: 0, match_length: 0, strstart: 0, orgstart: 0, }; loop { let mut hash_head; /* Make sure that we always have enough lookahead, except * at the end of the input file. We need STD_MAX_MATCH bytes * for the next match, plus WANT_MIN_MATCH bytes to insert the * string following the next match. */ if stream.state.lookahead < MIN_LOOKAHEAD { fill_window(stream); if stream.state.lookahead < MIN_LOOKAHEAD && flush == DeflateFlush::NoFlush { return BlockState::NeedMore; } if stream.state.lookahead == 0 { break; /* flush the current block */ } next_match.match_length = 0; } state = &mut stream.state; // Insert the string window[strstart .. strstart+2] in the // dictionary, and set hash_head to the head of the hash chain: /* If we already have a future match from a previous round, just use that */ if !early_exit && next_match.match_length > 0 { current_match = next_match; next_match.match_length = 0; } else { hash_head = 0; if state.lookahead >= WANT_MIN_MATCH { hash_head = StandardHashCalc::quick_insert_string(state, state.strstart); } current_match.strstart = state.strstart as u16; current_match.orgstart = current_match.strstart; /* Find the longest match, discarding those <= prev_length. * At this point we have always match_length < WANT_MIN_MATCH */ let dist = state.strstart as i64 - hash_head as i64; if dist <= state.max_dist() as i64 && dist > 0 && hash_head != 0 { /* To simplify the code, we prevent matches with the string * of window index 0 (in particular we have to avoid a match * of the string with itself at the start of the input file). */ let (match_length, match_start) = crate::deflate::longest_match::longest_match(state, hash_head); state.match_start = match_start; current_match.match_length = match_length as u16; current_match.match_start = match_start; if (current_match.match_length as usize) < WANT_MIN_MATCH { current_match.match_length = 1; } if current_match.match_start >= current_match.strstart { /* this can happen due to some restarts */ current_match.match_length = 1; } } else { /* Set up the match to be a 1 byte literal */ current_match.match_start = 0; current_match.match_length = 1; } } insert_match(state, current_match); /* now, look ahead one */ if !early_exit && state.lookahead > MIN_LOOKAHEAD && ((current_match.strstart + current_match.match_length) as usize) < (state.window_size - MIN_LOOKAHEAD) { state.strstart = (current_match.strstart + current_match.match_length) as usize; hash_head = StandardHashCalc::quick_insert_string(state, state.strstart); next_match.strstart = state.strstart as u16; next_match.orgstart = next_match.strstart; /* Find the longest match, discarding those <= prev_length. * At this point we have always match_length < WANT_MIN_MATCH */ let dist = state.strstart as i64 - hash_head as i64; if dist <= state.max_dist() as i64 && dist > 0 && hash_head != 0 { /* To simplify the code, we prevent matches with the string * of window index 0 (in particular we have to avoid a match * of the string with itself at the start of the input file). */ let (match_length, match_start) = crate::deflate::longest_match::longest_match(state, hash_head); state.match_start = match_start; next_match.match_length = match_length as u16; next_match.match_start = match_start; if next_match.match_start >= next_match.strstart { /* this can happen due to some restarts */ next_match.match_length = 1; } if (next_match.match_length as usize) < WANT_MIN_MATCH { next_match.match_length = 1; } else { fizzle_matches( state.window.filled(), state.max_dist(), &mut current_match, &mut next_match, ); } } else { /* Set up the match to be a 1 byte literal */ next_match.match_start = 0; next_match.match_length = 1; } state.strstart = current_match.strstart as usize; } else { next_match.match_length = 0; } /* now emit the current match */ let bflush = emit_match(state, current_match); /* move the "cursor" forward */ state.strstart += current_match.match_length as usize; if bflush { flush_block!(stream, false); } } stream.state.insert = Ord::min(stream.state.strstart, STD_MIN_MATCH - 1); if flush == DeflateFlush::Finish { flush_block!(stream, true); return BlockState::FinishDone; } if !stream.state.sym_buf.is_empty() { flush_block!(stream, false); } BlockState::BlockDone } #[repr(C)] #[derive(Debug, Clone, Copy)] struct Match { match_start: u16, match_length: u16, strstart: u16, orgstart: u16, } fn emit_match(state: &mut State, m: Match) -> bool { let mut bflush = false; /* matches that are not long enough we need to emit as literals */ if (m.match_length as usize) < WANT_MIN_MATCH { for lc in &state.window.filled()[state.strstart..][..m.match_length as usize] { bflush |= State::tally_lit_help(&mut state.sym_buf, &mut state.l_desc, *lc); state.lookahead -= 1; } } else { // check_match(s, m.strstart, m.match_start, m.match_length); bflush |= state.tally_dist( (m.strstart - m.match_start) as usize, m.match_length as usize - STD_MIN_MATCH, ); state.lookahead -= m.match_length as usize; } bflush } #[inline(always)] fn insert_match(state: &mut State, mut m: Match) { if state.lookahead <= (m.match_length as usize + WANT_MIN_MATCH) { return; } /* matches that are not long enough we need to emit as literals */ if (m.match_length as usize) < WANT_MIN_MATCH { m.strstart += 1; m.match_length -= 1; if m.match_length > 0 && m.strstart >= m.orgstart { if m.strstart + m.match_length > m.orgstart { state.insert_string(m.strstart as usize, m.match_length as usize); } else { state.insert_string(m.strstart as usize, (m.orgstart - m.strstart + 1) as usize); } m.strstart += m.match_length; m.match_length = 0; } return; } // Insert new strings in the hash table if state.lookahead >= WANT_MIN_MATCH { m.match_length -= 1; /* string at strstart already in table */ m.strstart += 1; if m.strstart >= m.orgstart { if m.strstart + m.match_length > m.orgstart { state.insert_string(m.strstart as usize, m.match_length as usize); } else { state.insert_string(m.strstart as usize, (m.orgstart - m.strstart + 1) as usize); } } else if m.orgstart < m.strstart + m.match_length { state.insert_string( m.orgstart as usize, (m.strstart + m.match_length - m.orgstart) as usize, ); } m.strstart += m.match_length; m.match_length = 0; } else { m.strstart += m.match_length; m.match_length = 0; if (m.strstart as usize) >= (STD_MIN_MATCH - 2) { StandardHashCalc::quick_insert_string(state, m.strstart as usize + 2 - STD_MIN_MATCH); } /* If lookahead < WANT_MIN_MATCH, ins_h is garbage, but it does not * matter since it will be recomputed at next deflate call. */ } } fn fizzle_matches(window: &[u8], max_dist: usize, current: &mut Match, next: &mut Match) { /* step zero: sanity checks */ if current.match_length <= 1 { return; } if current.match_length > 1 + next.match_start { return; } if current.match_length > 1 + next.strstart { return; } let m = &window[(-(current.match_length as isize) + 1 + next.match_start as isize) as usize..]; let orig = &window[(-(current.match_length as isize) + 1 + next.strstart as isize) as usize..]; /* quick exit check.. if this fails then don't bother with anything else */ if m[0] != orig[0] { return; } /* step one: try to move the "next" match to the left as much as possible */ let limit = next.strstart.saturating_sub(max_dist as u16); let mut c = *current; let mut n = *next; let m = &window[..n.match_start as usize]; let orig = &window[..n.strstart as usize]; let mut m = m.iter().rev(); let mut orig = orig.iter().rev(); let mut changed = 0; while m.next() == orig.next() { if c.match_length < 1 { break; } if n.strstart <= limit { break; } if n.match_length >= 256 { break; } if n.match_start <= 1 { break; } n.strstart -= 1; n.match_start -= 1; n.match_length += 1; c.match_length -= 1; changed += 1; } if changed == 0 { return; } if c.match_length <= 1 && n.match_length != 2 { n.orgstart += 1; *current = c; *next = n; } } zlib-rs-0.5.2/src/deflate/algorithm/mod.rs000064400000000000000000000050141046102023000165200ustar 00000000000000use crate::{ deflate::{BlockState, DeflateStream, Strategy}, DeflateFlush, }; use self::{huff::deflate_huff, rle::deflate_rle, stored::deflate_stored}; mod fast; mod huff; mod medium; mod quick; mod rle; mod slow; mod stored; #[macro_export] macro_rules! flush_block { ($stream:expr, $is_last_block:expr) => { $crate::deflate::flush_block_only($stream, $is_last_block); if $stream.avail_out == 0 { return match $is_last_block { true => BlockState::FinishStarted, false => BlockState::NeedMore, }; } }; } pub fn run(stream: &mut DeflateStream, flush: DeflateFlush) -> BlockState { match stream.state.strategy { _ if stream.state.level == 0 => deflate_stored(stream, flush), Strategy::HuffmanOnly => deflate_huff(stream, flush), Strategy::Rle => deflate_rle(stream, flush), Strategy::Default | Strategy::Filtered | Strategy::Fixed => { (CONFIGURATION_TABLE[stream.state.level as usize].func)(stream, flush) } } } type CompressFunc = fn(&mut DeflateStream, flush: DeflateFlush) -> BlockState; pub struct Config { pub good_length: u16, /* reduce lazy search above this match length */ pub max_lazy: u16, /* do not perform lazy search above this match length */ pub nice_length: u16, /* quit search above this match length */ pub max_chain: u16, pub func: CompressFunc, } impl Config { const fn new( good_length: u16, max_lazy: u16, nice_length: u16, max_chain: u16, func: CompressFunc, ) -> Self { Self { good_length, max_lazy, nice_length, max_chain, func, } } } pub static CONFIGURATION_TABLE: [Config; 10] = { [ Config::new(0, 0, 0, 0, stored::deflate_stored), // 0 /* store only */ Config::new(0, 0, 0, 0, quick::deflate_quick), // 1 Config::new(4, 4, 8, 4, fast::deflate_fast), // 2 /* max speed, no lazy matches */ Config::new(4, 6, 16, 6, medium::deflate_medium), // 3 Config::new(4, 12, 32, 24, medium::deflate_medium), // 4 /* lazy matches */ Config::new(8, 16, 32, 32, medium::deflate_medium), // 5 Config::new(8, 16, 128, 128, medium::deflate_medium), // 6 Config::new(8, 32, 128, 256, slow::deflate_slow), // 7 Config::new(32, 128, 258, 1024, slow::deflate_slow), // 8 Config::new(32, 258, 258, 4096, slow::deflate_slow), // 9 /* max compression */ ] }; zlib-rs-0.5.2/src/deflate/algorithm/quick.rs000064400000000000000000000127201046102023000170570ustar 00000000000000#![forbid(unsafe_code)] use crate::deflate::hash_calc::StandardHashCalc; use crate::{ deflate::{ fill_window, flush_pending, BlockState, BlockType, DeflateStream, State, StaticTreeDesc, MIN_LOOKAHEAD, STD_MAX_MATCH, STD_MIN_MATCH, WANT_MIN_MATCH, }, DeflateFlush, }; pub fn deflate_quick(stream: &mut DeflateStream, flush: DeflateFlush) -> BlockState { let mut state = &mut stream.state; macro_rules! quick_end_block { ($last:expr) => { if state.block_open > 0 { state .bit_writer .emit_end_block_and_align(&StaticTreeDesc::L.static_tree, $last); state.block_open = 0; state.block_start = state.strstart as isize; flush_pending(stream); #[allow(unused_assignments)] { state = &mut stream.state; } if stream.avail_out == 0 { return match $last { true => BlockState::FinishStarted, false => BlockState::NeedMore, }; } } }; } macro_rules! quick_start_block { ($last:expr) => { state.bit_writer.emit_tree(BlockType::StaticTrees, $last); state.block_open = 1 + $last as u8; state.block_start = state.strstart as isize; }; } let last = matches!(flush, DeflateFlush::Finish); if last && state.block_open != 2 { /* Emit end of previous block */ quick_end_block!(false); /* Emit start of last block */ quick_start_block!(last); } else if state.block_open == 0 && state.lookahead > 0 { /* Start new block only when we have lookahead data, so that if no input data is given an empty block will not be written */ quick_start_block!(last); } loop { if state.bit_writer.pending.pending + State::BIT_BUF_SIZE.div_ceil(8) as usize >= state.pending_buf_size() { flush_pending(stream); state = &mut stream.state; if stream.avail_out == 0 { return if last && stream.avail_in == 0 && state.bit_writer.bits_used == 0 && state.block_open == 0 { BlockState::FinishStarted } else { BlockState::NeedMore }; } } if state.lookahead < MIN_LOOKAHEAD { fill_window(stream); state = &mut stream.state; if state.lookahead < MIN_LOOKAHEAD && matches!(flush, DeflateFlush::NoFlush) { return BlockState::NeedMore; } if state.lookahead == 0 { break; } if state.block_open == 0 { // Start new block when we have lookahead data, // so that if no input data is given an empty block will not be written quick_start_block!(last); } } let lc: u8; if state.lookahead >= WANT_MIN_MATCH { macro_rules! first_four_bytes { ($slice:expr, $offset:expr) => { u32::from_le_bytes($slice[$offset..$offset + 4].try_into().unwrap()) }; } let str_val = { let str_start = &state.window.filled()[state.strstart..]; first_four_bytes!(str_start, 0) }; let hash_head = StandardHashCalc::quick_insert_value(state, state.strstart, str_val); let dist = state.strstart as isize - hash_head as isize; if dist <= state.max_dist() as isize && dist > 0 { let match_start = &state.window.filled()[hash_head as usize..]; if str_val == first_four_bytes!(match_start, 0) { let mut match_len = crate::deflate::compare256::compare256_slice( &state.window.filled()[state.strstart + 2..], &match_start[2..], ) + 2; if match_len >= WANT_MIN_MATCH { match_len = Ord::min(match_len, state.lookahead); match_len = Ord::min(match_len, STD_MAX_MATCH); // TODO do this with a debug_assert? // check_match(s, state.strstart, hash_head, match_len); // The `dist` value is a distance within the window, // and MAX_WBITS == 15 (32k), hence a u16 can always represent this value. let dist = u16::try_from(dist).unwrap(); state .bit_writer .emit_dist_static((match_len - STD_MIN_MATCH) as u8, dist); state.lookahead -= match_len; state.strstart += match_len; continue; } } } lc = str_val as u8; } else { lc = state.window.filled()[state.strstart]; } state.bit_writer.emit_lit(StaticTreeDesc::L.static_tree, lc); state.strstart += 1; state.lookahead -= 1; } state.insert = Ord::min(state.strstart, STD_MIN_MATCH - 1); quick_end_block!(last); if last { BlockState::FinishDone } else { BlockState::BlockDone } } zlib-rs-0.5.2/src/deflate/algorithm/rle.rs000064400000000000000000000051531046102023000165270ustar 00000000000000#![forbid(unsafe_code)] use crate::{ deflate::{ compare256::compare256_rle_slice, fill_window, BlockState, DeflateStream, MIN_LOOKAHEAD, STD_MAX_MATCH, STD_MIN_MATCH, }, flush_block, DeflateFlush, }; pub fn deflate_rle(stream: &mut DeflateStream, flush: DeflateFlush) -> BlockState { let mut match_len = 0; let mut bflush; loop { // Make sure that we always have enough lookahead, except // at the end of the input file. We need STD_MAX_MATCH bytes // for the next match, plus WANT_MIN_MATCH bytes to insert the // string following the next match. if stream.state.lookahead < MIN_LOOKAHEAD { fill_window(stream); if stream.state.lookahead < MIN_LOOKAHEAD && flush == DeflateFlush::NoFlush { return BlockState::NeedMore; } if stream.state.lookahead == 0 { break; /* flush the current block */ } } /* See how many times the previous byte repeats */ let state = &mut stream.state; if state.lookahead >= STD_MIN_MATCH && state.strstart > 0 { let scan = &state.window.filled()[state.strstart - 1..][..3 + 256]; { if scan[0] == scan[1] && scan[1] == scan[2] { match_len = compare256_rle_slice(scan[0], &scan[3..]) + 2; match_len = Ord::min(match_len, state.lookahead); match_len = Ord::min(match_len, STD_MAX_MATCH); } } assert!( state.strstart - 1 + match_len <= state.window_size - 1, "wild scan" ); } /* Emit match if have run of STD_MIN_MATCH or longer, else emit literal */ if match_len >= STD_MIN_MATCH { // check_match(s, s->strstart, s->strstart - 1, match_len); bflush = state.tally_dist(1, match_len - STD_MIN_MATCH); state.lookahead -= match_len; state.strstart += match_len; match_len = 0; } else { /* No match, output a literal byte */ let lc = state.window.filled()[state.strstart]; bflush = state.tally_lit(lc); state.lookahead -= 1; state.strstart += 1; } if bflush { flush_block!(stream, false); } } stream.state.insert = 0; if flush == DeflateFlush::Finish { flush_block!(stream, true); return BlockState::FinishDone; } if !stream.state.sym_buf.is_empty() { flush_block!(stream, false); } BlockState::BlockDone } zlib-rs-0.5.2/src/deflate/algorithm/slow.rs000064400000000000000000000135011046102023000167250ustar 00000000000000#![forbid(unsafe_code)] use crate::{ deflate::{ fill_window, flush_block_only, BlockState, DeflateStream, Strategy, MIN_LOOKAHEAD, STD_MIN_MATCH, WANT_MIN_MATCH, }, flush_block, DeflateFlush, }; pub fn deflate_slow(stream: &mut DeflateStream, flush: DeflateFlush) -> BlockState { let mut hash_head; /* head of hash chain */ let mut bflush; /* set if current block must be flushed */ let mut dist; let mut match_len; let use_longest_match_slow = stream.state.max_chain_length > 1024; let valid_distance_range = 1..=stream.state.max_dist() as isize; let mut match_available = stream.state.match_available; /* Process the input block. */ loop { /* Make sure that we always have enough lookahead, except * at the end of the input file. We need STD_MAX_MATCH bytes * for the next match, plus WANT_MIN_MATCH bytes to insert the * string following the next match. */ if stream.state.lookahead < MIN_LOOKAHEAD { fill_window(stream); if stream.state.lookahead < MIN_LOOKAHEAD && flush == DeflateFlush::NoFlush { return BlockState::NeedMore; } if stream.state.lookahead == 0 { break; /* flush the current block */ } } let state = &mut stream.state; /* Insert the string window[strstart .. strstart+2] in the * dictionary, and set hash_head to the head of the hash chain: */ hash_head = if state.lookahead >= WANT_MIN_MATCH { state.quick_insert_string(state.strstart) } else { 0 }; // Find the longest match, discarding those <= prev_length. state.prev_match = state.match_start; match_len = STD_MIN_MATCH - 1; dist = state.strstart as isize - hash_head as isize; if valid_distance_range.contains(&dist) && state.prev_length < state.max_lazy_match && hash_head != 0 { // To simplify the code, we prevent matches with the string // of window index 0 (in particular we have to avoid a match // of the string with itself at the start of the input file). (match_len, state.match_start) = if use_longest_match_slow { crate::deflate::longest_match::longest_match_slow(state, hash_head) } else { crate::deflate::longest_match::longest_match(state, hash_head) }; if match_len <= 5 && (state.strategy == Strategy::Filtered) { /* If prev_match is also WANT_MIN_MATCH, match_start is garbage * but we will ignore the current match anyway. */ match_len = STD_MIN_MATCH - 1; } } // If there was a match at the previous step and the current // match is not better, output the previous match: if state.prev_length as usize >= STD_MIN_MATCH && match_len <= state.prev_length as usize { let max_insert = state.strstart + state.lookahead - STD_MIN_MATCH; /* Do not insert strings in hash table beyond this. */ // check_match(s, state.strstart-1, state.prev_match, state.prev_length); bflush = state.tally_dist( state.strstart - 1 - state.prev_match as usize, state.prev_length as usize - STD_MIN_MATCH, ); /* Insert in hash table all strings up to the end of the match. * strstart-1 and strstart are already inserted. If there is not * enough lookahead, the last two strings are not inserted in * the hash table. */ state.prev_length -= 1; state.lookahead -= state.prev_length as usize; let mov_fwd = state.prev_length as usize - 1; if max_insert > state.strstart { let insert_cnt = Ord::min(mov_fwd, max_insert - state.strstart); state.insert_string(state.strstart + 1, insert_cnt); } state.prev_length = 0; state.match_available = false; match_available = false; state.strstart += mov_fwd + 1; if bflush { flush_block!(stream, false); } } else if match_available { // If there was no match at the previous position, output a // single literal. If there was a match but the current match // is longer, truncate the previous match to a single literal. let lc = state.window.filled()[state.strstart - 1]; bflush = state.tally_lit(lc); if bflush { flush_block_only(stream, false); } stream.state.prev_length = match_len as u16; stream.state.strstart += 1; stream.state.lookahead -= 1; if stream.avail_out == 0 { return BlockState::NeedMore; } } else { // There is no previous match to compare with, wait for // the next step to decide. state.prev_length = match_len as u16; state.match_available = true; match_available = true; state.strstart += 1; state.lookahead -= 1; } } assert_ne!(flush, DeflateFlush::NoFlush, "no flush?"); let state = &mut stream.state; if state.match_available { let lc = state.window.filled()[state.strstart - 1]; let _ = state.tally_lit(lc); state.match_available = false; } state.insert = Ord::min(state.strstart, STD_MIN_MATCH - 1); if flush == DeflateFlush::Finish { flush_block!(stream, true); return BlockState::FinishDone; } if !stream.state.sym_buf.is_empty() { flush_block!(stream, false); } BlockState::BlockDone } zlib-rs-0.5.2/src/deflate/algorithm/stored.rs000064400000000000000000000264311046102023000172470ustar 00000000000000use crate::{ deflate::{ flush_pending, read_buf_window, zng_tr_stored_block, BlockState, DeflateStream, MAX_STORED, }, DeflateFlush, }; pub fn deflate_stored(stream: &mut DeflateStream, flush: DeflateFlush) -> BlockState { // Smallest worthy block size when not flushing or finishing. By default // this is 32K. This can be as small as 507 bytes for memLevel == 1. For // large input and output buffers, the stored block size will be larger. let min_block = Ord::min( stream.state.bit_writer.pending.capacity() - 5, stream.state.w_size, ); // Copy as many min_block or larger stored blocks directly to next_out as // possible. If flushing, copy the remaining available input to next_out as // stored blocks, if there is enough space. // unsigned len, left, have, last = 0; let mut have; let mut last = false; let mut used = stream.avail_in; loop { // maximum deflate stored block length let mut len = MAX_STORED; // number of header bytes have = ((stream.state.bit_writer.bits_used + 42) / 8) as usize; // we need room for at least the header if stream.avail_out < have as u32 { break; } let left = stream.state.strstart as isize - stream.state.block_start; let left = Ord::max(0, left) as usize; have = stream.avail_out as usize - have; if len > left + stream.avail_in as usize { // limit len to the input len = left + stream.avail_in as usize; } len = Ord::min(len, have); // If the stored block would be less than min_block in length, or if // unable to copy all of the available input when flushing, then try // copying to the window and the pending buffer instead. Also don't // write an empty block when flushing -- deflate() does that. if len < min_block && ((len == 0 && flush != DeflateFlush::Finish) || flush == DeflateFlush::NoFlush || len != left + stream.avail_in as usize) { break; } // Make a dummy stored block in pending to get the header bytes, // including any pending bits. This also updates the debugging counts. last = flush == DeflateFlush::Finish && len == left + stream.avail_in as usize; zng_tr_stored_block(stream.state, 0..0, last); /* Replace the lengths in the dummy stored block with len. */ stream.state.bit_writer.pending.rewind(4); stream .state .bit_writer .pending .extend(&(len as u16).to_le_bytes()); stream .state .bit_writer .pending .extend(&(!len as u16).to_le_bytes()); // Write the stored block header bytes. flush_pending(stream); // Update debugging counts for the data about to be copied. stream.state.bit_writer.cmpr_bits_add(len << 3); stream.state.bit_writer.sent_bits_add(len << 3); if left > 0 { // SAFETY: `len` is effectively `min(stream.avail_in, stream.avail_out)`, so any reads // of `len` won't go out of bounds on `next_out`. `left` is calculated from indices of // the window, so `left` reads of the window won't go out of bounds. let left = Ord::min(left, len); let src = &stream.state.window.filled()[stream.state.block_start as usize..]; unsafe { core::ptr::copy_nonoverlapping(src.as_ptr(), stream.next_out, left) }; stream.next_out = stream.next_out.wrapping_add(left); stream.avail_out = stream.avail_out.wrapping_sub(left as _); stream.total_out = stream.total_out.wrapping_add(left as _); stream.state.block_start += left as isize; len -= left; } // Copy uncompressed bytes directly from next_in to next_out, updating the check value. if len > 0 { read_buf_direct_copy(stream, len); } if last { break; } } // Update the sliding window with the last s->w_size bytes of the copied // data, or append all of the copied data to the existing window if less // than s->w_size bytes were copied. Also update the number of bytes to // insert in the hash tables, in the event that deflateParams() switches to // a non-zero compression level. used -= stream.avail_in; /* number of input bytes directly copied */ if used > 0 { let state = &mut stream.state; // If any input was used, then no unused input remains in the window, therefore s->block_start == s->strstart. if used as usize >= state.w_size { /* supplant the previous history */ state.matches = 2; /* clear hash */ // SAFETY: we've advanced the next_in pointer at minimum w_size bytes // read_buf_direct_copy(), so we are able to backtrack that number of bytes. let src = stream.next_in.wrapping_sub(state.w_size); unsafe { state.window.copy_and_initialize(0..state.w_size, src) }; state.strstart = state.w_size; state.insert = state.strstart; } else { if state.window_size - state.strstart <= used as usize { /* Slide the window down. */ state.strstart -= state.w_size; // make sure we don't copy uninitialized bytes. While we discard the first lower w_size // bytes, it is not guaranteed that the upper w_size bytes are all initialized let copy = Ord::min(state.strstart, state.window.filled().len() - state.w_size); state .window .filled_mut() .copy_within(state.w_size..state.w_size + copy, 0); if state.matches < 2 { state.matches += 1; /* add a pending slide_hash() */ } state.insert = Ord::min(state.insert, state.strstart); } // SAFETY: we've advanced the next_in pointer at least `used` bytes // read_buf_direct_copy(), so we are able to backtrack that number of bytes. let src = stream.next_in.wrapping_sub(used as usize); let dst = state.strstart..state.strstart + used as usize; unsafe { state.window.copy_and_initialize(dst, src) }; state.strstart += used as usize; state.insert += Ord::min(used as usize, state.w_size - state.insert); } state.block_start = state.strstart as isize; } if last { return BlockState::FinishDone; } // If flushing and all input has been consumed, then done. if flush != DeflateFlush::NoFlush && flush != DeflateFlush::Finish && stream.avail_in == 0 && stream.state.strstart as isize == stream.state.block_start { return BlockState::BlockDone; } // Fill the window with any remaining input let mut have = stream.state.window_size - stream.state.strstart; if stream.avail_in as usize > have && stream.state.block_start >= stream.state.w_size as isize { // slide the window down let state = &mut stream.state; state.block_start -= state.w_size as isize; state.strstart -= state.w_size; // make sure we don't copy uninitialized bytes. While we discard the first lower w_size // bytes, it is not guaranteed that the upper w_size bytes are all initialized let copy = Ord::min(state.strstart, state.window.filled().len() - state.w_size); state .window .filled_mut() .copy_within(state.w_size..state.w_size + copy, 0); if state.matches < 2 { // add a pending slide_hash state.matches += 1; } have += state.w_size; // more space now state.insert = Ord::min(state.insert, state.strstart); } let have = Ord::min(have, stream.avail_in as usize); if have > 0 { read_buf_window(stream, stream.state.strstart, have); let state = &mut stream.state; state.strstart += have; state.insert += Ord::min(have, state.w_size - state.insert); } // There was not enough avail_out to write a complete worthy or flushed // stored block to next_out. Write a stored block to pending instead, if we // have enough input for a worthy block, or if flushing and there is enough // room for the remaining input as a stored block in the pending buffer. // number of header bytes let state = &mut stream.state; let have = ((state.bit_writer.bits_used + 42) >> 3) as usize; // maximum stored block length that will fit in pending: let have = Ord::min(state.bit_writer.pending.capacity() - have, MAX_STORED); let min_block = Ord::min(have, state.w_size); let left = state.strstart as isize - state.block_start; if left >= min_block as isize || ((left > 0 || flush == DeflateFlush::Finish) && flush != DeflateFlush::NoFlush && stream.avail_in == 0 && left <= have as isize) { let len = Ord::min(left as usize, have); // TODO wrapping? last = flush == DeflateFlush::Finish && stream.avail_in == 0 && len == (left as usize); let range = state.block_start as usize..state.block_start as usize + len; zng_tr_stored_block(state, range, last); state.block_start += len as isize; flush_pending(stream); } // We've done all we can with the available input and output. if last { BlockState::FinishStarted } else { BlockState::NeedMore } } fn read_buf_direct_copy(stream: &mut DeflateStream, size: usize) -> usize { let len = Ord::min(stream.avail_in as usize, size); let output = stream.next_out; if len == 0 { return 0; } stream.avail_in -= len as u32; // SAFETY: len is effectively bounded by next_in and next_out (via size derived in the calling // function), so copies are in-bounds. if stream.state.wrap == 2 { // we likely cannot fuse the crc32 and the copy here because the input can be changed by // a concurrent thread. Therefore it cannot be converted into a slice! unsafe { core::ptr::copy_nonoverlapping(stream.next_in, output, len) } let data = unsafe { core::slice::from_raw_parts(output, len) }; stream.state.crc_fold.fold(data, 0); } else if stream.state.wrap == 1 { // we cannot fuse the adler and the copy in our case, because adler32 takes a slice. // Another process is allowed to concurrently modify stream.next_in, so we cannot turn it // into a rust slice (violates its safety requirements) unsafe { core::ptr::copy_nonoverlapping(stream.next_in, output, len) } let data = unsafe { core::slice::from_raw_parts(output, len) }; stream.adler = crate::adler32::adler32(stream.adler as u32, data) as _; } else { unsafe { core::ptr::copy_nonoverlapping(stream.next_in, output, len) } } stream.next_in = stream.next_in.wrapping_add(len); stream.total_in += len as crate::c_api::z_size; stream.next_out = stream.next_out.wrapping_add(len as _); stream.avail_out = stream.avail_out.wrapping_sub(len as _); stream.total_out = stream.total_out.wrapping_add(len as _); len } zlib-rs-0.5.2/src/deflate/compare256.rs000064400000000000000000000247061046102023000156470ustar 00000000000000#[cfg(test)] const MAX_COMPARE_SIZE: usize = 256; pub fn compare256_slice(src0: &[u8], src1: &[u8]) -> usize { let src0 = first_chunk::<_, 256>(src0).unwrap(); let src1 = first_chunk::<_, 256>(src1).unwrap(); compare256(src0, src1) } fn compare256(src0: &[u8; 256], src1: &[u8; 256]) -> usize { #[cfg(target_arch = "x86_64")] if crate::cpu_features::is_enabled_avx2_and_bmi2() { return unsafe { avx2::compare256(src0, src1) }; } #[cfg(target_arch = "aarch64")] if crate::cpu_features::is_enabled_neon() { return unsafe { neon::compare256(src0, src1) }; } #[cfg(target_arch = "wasm32")] if crate::cpu_features::is_enabled_simd128() { return wasm32::compare256(src0, src1); } rust::compare256(src0, src1) } pub fn compare256_rle_slice(byte: u8, src: &[u8]) -> usize { rust::compare256_rle(byte, src) } #[inline] pub const fn first_chunk(slice: &[T]) -> Option<&[T; N]> { if slice.len() < N { None } else { // SAFETY: We explicitly check for the correct number of elements, // and do not let the reference outlive the slice. Some(unsafe { &*(slice.as_ptr() as *const [T; N]) }) } } mod rust { pub fn compare256(src0: &[u8; 256], src1: &[u8; 256]) -> usize { // only unrolls 4 iterations; zlib-ng unrolls 8 src0.iter().zip(src1).take_while(|(x, y)| x == y).count() } // run-length encoding pub fn compare256_rle(byte: u8, src: &[u8]) -> usize { assert!(src.len() >= 256, "too short {}", src.len()); let sv = u64::from_ne_bytes([byte; 8]); let mut len = 0; // this optimizes well because we statically limit the slice to 256 bytes. // the loop gets unrolled 4 times automatically. for chunk in src[..256].chunks_exact(8) { let mv = u64::from_le_bytes(chunk.try_into().unwrap()); let diff = sv ^ mv; if diff > 0 { let match_byte = diff.trailing_zeros() / 8; return len + match_byte as usize; } len += 8 } 256 } #[test] fn test_compare256() { let str1 = [b'a'; super::MAX_COMPARE_SIZE]; let mut str2 = [b'a'; super::MAX_COMPARE_SIZE]; for i in 0..str1.len() { str2[i] = 0; let match_len = compare256(&str1, &str2); assert_eq!(match_len, i); str2[i] = b'a'; } } #[test] fn test_compare256_rle() { let mut string = [b'a'; super::MAX_COMPARE_SIZE]; for i in 0..string.len() { string[i] = 0; let match_len = compare256_rle(b'a', &string); assert_eq!(match_len, i); string[i] = b'a'; } } } #[cfg(target_arch = "aarch64")] mod neon { use core::arch::aarch64::{ uint8x16x4_t, vceqq_u8, vget_lane_u64, vld4q_u8, vreinterpret_u64_u8, vreinterpretq_u16_u8, vshrn_n_u16, vsriq_n_u8, }; /// # Safety /// /// Behavior is undefined if the `neon` target feature is not enabled #[target_feature(enable = "neon")] pub unsafe fn compare256(src0: &[u8; 256], src1: &[u8; 256]) -> usize { type Chunk = uint8x16x4_t; let src0 = src0.chunks_exact(core::mem::size_of::()); let src1 = src1.chunks_exact(core::mem::size_of::()); let mut len = 0; for (a, b) in src0.zip(src1) { unsafe { // Load 4 vectors *deinterleaved* from the two slices // e.g. the first vector contains the 0, 4, 8, ... bytes of the input, the // second vector contains the 1, 5, 9, ... bytes of the input, etc. let a: Chunk = vld4q_u8(a.as_ptr()); let b: Chunk = vld4q_u8(b.as_ptr()); // Compare each vector element-wise, each resulting vector will contain // 0xFF for equal bytes, and 0x00 for unequal bytes. let cmp0 = vceqq_u8(a.0, b.0); let cmp1 = vceqq_u8(a.1, b.1); let cmp2 = vceqq_u8(a.2, b.2); let cmp3 = vceqq_u8(a.3, b.3); // Pack bits from the 4 vectors into a single vector to convert to a 64-bit integer. // shift the second vector right by one, insert the top bit from the first vector // The top two bits each element of the result are from the first and second vector let first_two_bits = vsriq_n_u8::<1>(cmp1, cmp0); // shift the fourth vector right by one, insert the top bit from the third vector // The top two bits each element of the result are from the third and fourth vector let last_two_bits = vsriq_n_u8::<1>(cmp3, cmp2); // shift last_two_bits (the top two bits of which are from the third and fourth // vector) right by 2, insert the top two bits from first_two_bits (the top two // bits of which are from the first and second vector). // The top four bits of each element of the result are from the // first, second, third, and fourth vector let first_four_bits = vsriq_n_u8::<2>(last_two_bits, first_two_bits); // duplicate the top 4 bits into the bottom 4 bits of each element. let bitmask_vector = vsriq_n_u8::<4>(first_four_bits, first_four_bits); // Reinterpret as 16-bit integers, and shift right by 4 bits narrowing: // shifting right by 4 bits means the top 4 bits of each 16 bit element contains the // low 4 bits of the 0th 8-bit element and the high 4 bits of the 1nth 8-bit // element. Narrowing takes the top 8 bits of each (16-bit) element. let result_vector = vshrn_n_u16::<4>(vreinterpretq_u16_u8(bitmask_vector)); // Convert the vector to a 64-bit integer, where each bit represents whether // the corresponding byte in the original vectors was equal. let bitmask = vget_lane_u64::<0>(vreinterpret_u64_u8(result_vector)); // We reinterpreted the vector as a 64-bit integer, so endianness matters. // We want things to be in little-endian (where the least significant bit is in the // first byte), but in big-endian, the first vector element will be the most // significant byte, so we need to convert to little-endian. let bitmask = bitmask.to_le(); if bitmask != u64::MAX { // Find the first byte that is not equal, which is the first bit that is not set let match_byte = bitmask.trailing_ones(); return len + match_byte as usize; } len += core::mem::size_of::(); } } 256 } #[test] fn test_compare256() { if crate::cpu_features::is_enabled_neon() { let str1 = [b'a'; super::MAX_COMPARE_SIZE]; let mut str2 = [b'a'; super::MAX_COMPARE_SIZE]; for i in 0..str1.len() { str2[i] = 0; let match_len = unsafe { compare256(&str1, &str2) }; assert_eq!(match_len, i); str2[i] = b'a'; } } } } #[cfg(target_arch = "x86_64")] mod avx2 { use core::arch::x86_64::{ __m256i, _mm256_cmpeq_epi8, _mm256_loadu_si256, _mm256_movemask_epi8, }; /// # Safety /// /// Behavior is undefined if the `avx` target feature is not enabled #[target_feature(enable = "avx2")] #[target_feature(enable = "bmi2")] #[target_feature(enable = "bmi1")] pub unsafe fn compare256(src0: &[u8; 256], src1: &[u8; 256]) -> usize { let src0 = src0.chunks_exact(32); let src1 = src1.chunks_exact(32); let mut len = 0; unsafe { for (chunk0, chunk1) in src0.zip(src1) { let ymm_src0 = _mm256_loadu_si256(chunk0.as_ptr() as *const __m256i); let ymm_src1 = _mm256_loadu_si256(chunk1.as_ptr() as *const __m256i); // element-wise compare of the 8-bit elements let ymm_cmp = _mm256_cmpeq_epi8(ymm_src0, ymm_src1); // turn an 32 * 8-bit vector into a 32-bit integer. // a bit in the output is one if the corresponding element is non-zero. let mask = _mm256_movemask_epi8(ymm_cmp) as u32; if mask != 0xFFFFFFFF { let match_byte = mask.trailing_ones(); return len + match_byte as usize; } len += 32; } } 256 } #[test] fn test_compare256() { if crate::cpu_features::is_enabled_avx2_and_bmi2() { let str1 = [b'a'; super::MAX_COMPARE_SIZE]; let mut str2 = [b'a'; super::MAX_COMPARE_SIZE]; for i in 0..str1.len() { str2[i] = 0; let match_len = unsafe { compare256(&str1, &str2) }; assert_eq!(match_len, i); str2[i] = b'a'; } } } } #[cfg(target_arch = "wasm32")] mod wasm32 { use core::arch::wasm32::{u8x16_bitmask, u8x16_eq, v128, v128_load}; #[target_feature(enable = "simd128")] pub fn compare256(src0: &[u8; 256], src1: &[u8; 256]) -> usize { let src0 = src0.chunks_exact(16); let src1 = src1.chunks_exact(16); let mut len = 0; for (chunk0, chunk1) in src0.zip(src1) { // SAFETY: these are valid pointers to slice data. let v128_src0 = unsafe { v128_load(chunk0.as_ptr() as *const v128) }; let v128_src1 = unsafe { v128_load(chunk1.as_ptr() as *const v128) }; let v128_cmp = u8x16_eq(v128_src0, v128_src1); let mask = u8x16_bitmask(v128_cmp); if mask != 0xFFFF { let match_byte = mask.trailing_ones(); return len + match_byte as usize; } len += 16; } 256 } #[test] fn test_compare256() { if crate::cpu_features::is_enabled_simd128() { let str1 = [b'a'; super::MAX_COMPARE_SIZE]; let mut str2 = [b'a'; super::MAX_COMPARE_SIZE]; for i in 0..str1.len() { str2[i] = 0; let match_len = unsafe { compare256(&str1, &str2) }; assert_eq!(match_len, i); str2[i] = b'a'; } } } } zlib-rs-0.5.2/src/deflate/hash_calc.rs000064400000000000000000000123711046102023000156640ustar 00000000000000#![forbid(unsafe_code)] use crate::deflate::{State, HASH_SIZE, STD_MIN_MATCH}; #[derive(Debug, Clone, Copy)] pub enum HashCalcVariant { Standard, Roll, } impl HashCalcVariant { /// Use rolling hash for deflate_slow algorithm with level 9. It allows us to /// properly lookup different hash chains to speed up longest_match search. pub fn for_max_chain_length(max_chain_length: u16) -> Self { if max_chain_length > 1024 { HashCalcVariant::Roll } else { HashCalcVariant::Standard } } } pub struct StandardHashCalc; impl StandardHashCalc { const HASH_CALC_OFFSET: usize = 0; const HASH_CALC_MASK: u32 = (HASH_SIZE - 1) as u32; fn hash_calc(_: u32, val: u32) -> u32 { const HASH_SLIDE: u32 = 16; val.wrapping_mul(2654435761) >> HASH_SLIDE } pub fn update_hash(h: u32, val: u32) -> u32 { Self::hash_calc(h, val) & Self::HASH_CALC_MASK } #[inline] pub fn quick_insert_string(state: &mut State, string: usize) -> u16 { let slice = &state.window.filled()[string + Self::HASH_CALC_OFFSET..]; let val = u32::from_le_bytes(slice[..4].try_into().unwrap()); Self::quick_insert_value(state, string, val) } #[inline] pub fn quick_insert_value(state: &mut State, string: usize, val: u32) -> u16 { let hm = Self::update_hash(0, val) as usize; let head = state.head.as_slice()[hm]; if head != string as u16 { state.prev.as_mut_slice()[string & state.w_mask()] = head; state.head.as_mut_slice()[hm] = string as u16; } head } pub fn insert_string(state: &mut State, string: usize, count: usize) { let slice = &state.window.filled()[string + Self::HASH_CALC_OFFSET..]; // it can happen that insufficient bytes are initialized // .take(count) generates worse assembly let slice = &slice[..Ord::min(slice.len(), count + 3)]; let w_mask = state.w_mask(); for (i, w) in slice.windows(4).enumerate() { let idx = string as u16 + i as u16; let val = u32::from_le_bytes(w.try_into().unwrap()); let hm = Self::update_hash(0, val) as usize; let head = state.head.as_slice()[hm]; if head != idx { state.prev.as_mut_slice()[idx as usize & w_mask] = head; state.head.as_mut_slice()[hm] = idx; } } } } pub struct RollHashCalc; impl RollHashCalc { const HASH_CALC_OFFSET: usize = STD_MIN_MATCH - 1; const HASH_CALC_MASK: u32 = (1 << 15) - 1; fn hash_calc(h: u32, val: u32) -> u32 { const HASH_SLIDE: u32 = 5; (h << HASH_SLIDE) ^ val } pub fn update_hash(h: u32, val: u32) -> u32 { Self::hash_calc(h, val) & Self::HASH_CALC_MASK } pub fn quick_insert_string(state: &mut State, string: usize) -> u16 { let val = state.window.filled()[string + Self::HASH_CALC_OFFSET] as u32; state.ins_h = Self::hash_calc(state.ins_h, val); state.ins_h &= Self::HASH_CALC_MASK; let hm = state.ins_h as usize; let head = state.head.as_slice()[hm]; if head != string as u16 { state.prev.as_mut_slice()[string & state.w_mask()] = head; state.head.as_mut_slice()[hm] = string as u16; } head } pub fn insert_string(state: &mut State, string: usize, count: usize) { let slice = &state.window.filled()[string + Self::HASH_CALC_OFFSET..][..count]; let w_mask = state.w_mask(); for (i, val) in slice.iter().copied().enumerate() { let idx = string as u16 + i as u16; state.ins_h = Self::hash_calc(state.ins_h, val as u32); state.ins_h &= Self::HASH_CALC_MASK; let hm = state.ins_h as usize; let head = state.head.as_slice()[hm]; if head != idx { state.prev.as_mut_slice()[idx as usize & w_mask] = head; state.head.as_mut_slice()[hm] = idx; } } } } #[cfg(test)] mod tests { use super::*; #[test] fn roll_hash_calc() { assert_eq!(RollHashCalc::hash_calc(2565, 93), 82173); assert_eq!(RollHashCalc::hash_calc(16637, 10), 532394); assert_eq!(RollHashCalc::hash_calc(8106, 100), 259364); assert_eq!(RollHashCalc::hash_calc(29988, 101), 959717); assert_eq!(RollHashCalc::hash_calc(9445, 98), 302274); assert_eq!(RollHashCalc::hash_calc(7362, 117), 235573); assert_eq!(RollHashCalc::hash_calc(6197, 103), 198343); assert_eq!(RollHashCalc::hash_calc(1735, 32), 55488); assert_eq!(RollHashCalc::hash_calc(22720, 61), 727101); assert_eq!(RollHashCalc::hash_calc(6205, 32), 198528); assert_eq!(RollHashCalc::hash_calc(3826, 117), 122421); assert_eq!(RollHashCalc::hash_calc(24117, 101), 771781); } #[test] fn standard_hash_calc() { assert_eq!(StandardHashCalc::hash_calc(0, 807411760), 65468); assert_eq!(StandardHashCalc::hash_calc(0, 540024864), 42837); assert_eq!(StandardHashCalc::hash_calc(0, 538980384), 33760); assert_eq!(StandardHashCalc::hash_calc(0, 775430176), 8925); assert_eq!(StandardHashCalc::hash_calc(0, 941629472), 42053); } } zlib-rs-0.5.2/src/deflate/longest_match.rs000064400000000000000000000320351046102023000166050ustar 00000000000000use crate::deflate::{Pos, State, MIN_LOOKAHEAD, STD_MAX_MATCH, STD_MIN_MATCH}; const EARLY_EXIT_TRIGGER_LEVEL: i8 = 5; /// Find the (length, offset) in the window of the longest match for the string /// at offset cur_match pub fn longest_match(state: &crate::deflate::State, cur_match: u16) -> (usize, u16) { longest_match_help::(state, cur_match) } pub fn longest_match_slow(state: &crate::deflate::State, cur_match: u16) -> (usize, u16) { longest_match_help::(state, cur_match) } fn longest_match_help( state: &crate::deflate::State, mut cur_match: u16, ) -> (usize, u16) { let mut match_start = state.match_start; let strstart = state.strstart; let wmask = state.w_mask(); let window = state.window.filled(); let scan = &window[strstart..]; let mut limit: Pos; let limit_base: Pos; let early_exit: bool; let mut chain_length: u16; let mut best_len: usize; let lookahead = state.lookahead; let mut match_offset = 0; macro_rules! goto_next_in_chain { () => { chain_length -= 1; if chain_length > 0 { cur_match = state.prev.as_slice()[cur_match as usize & wmask]; if cur_match > limit { continue; } } return (best_len, match_start); }; } // The code is optimized for STD_MAX_MATCH-2 multiple of 16. assert_eq!(STD_MAX_MATCH, 258, "Code too clever"); // length of the previous match (if any), hence <= STD_MAX_MATCH best_len = if state.prev_length > 0 { state.prev_length as usize } else { STD_MIN_MATCH - 1 }; // Calculate read offset which should only extend an extra byte to find the next best match length. let mut offset = best_len - 1; if best_len >= core::mem::size_of::() { offset -= 2; if best_len >= core::mem::size_of::() { offset -= 4; } } let mut mbase_start = window.as_ptr(); let mut mbase_end = window[offset..].as_ptr(); // Don't waste too much time by following a chain if we already have a good match chain_length = state.max_chain_length; if best_len >= state.good_match as usize { chain_length >>= 2; } let nice_match = state.nice_match; // Stop when cur_match becomes <= limit. To simplify the code, // we prevent matches with the string of window index 0 limit = strstart.saturating_sub(state.max_dist()) as Pos; // look for a better string offset if SLOW { limit_base = limit; if best_len >= STD_MIN_MATCH { /* We're continuing search (lazy evaluation). */ let mut pos: Pos; // Find a most distant chain starting from scan with index=1 (index=0 corresponds // to cur_match). We cannot use s->prev[strstart+1,...] immediately, because // these strings are not yet inserted into the hash table. let Some([_cur_match, scan1, scan2, scanrest @ ..]) = scan.get(..best_len + 1) else { panic!("invalid scan"); }; let mut hash = 0; hash = state.update_hash(hash, *scan1 as u32); hash = state.update_hash(hash, *scan2 as u32); for (i, b) in scanrest.iter().enumerate() { hash = state.update_hash(hash, *b as u32); /* If we're starting with best_len >= 3, we can use offset search. */ pos = state.head.as_slice()[hash as usize]; if pos < cur_match { match_offset = (i + 1) as Pos; cur_match = pos; } } /* Update offset-dependent variables */ limit = limit_base + match_offset; if cur_match <= limit { return break_matching(state, best_len, match_start); } mbase_start = mbase_start.wrapping_sub(match_offset as usize); mbase_end = mbase_end.wrapping_sub(match_offset as usize); } early_exit = false; } else { // must initialize this variable limit_base = 0; early_exit = state.level < EARLY_EXIT_TRIGGER_LEVEL; } let scan_start = window[strstart..].as_ptr(); let mut scan_end = window[strstart + offset..].as_ptr(); assert!( strstart <= state.window_size.saturating_sub(MIN_LOOKAHEAD), "need lookahead" ); loop { if cur_match as usize >= strstart { break; } // Skip to next match if the match length cannot increase or if the match length is // less than 2. Note that the checks below for insufficient lookahead only occur // occasionally for performance reasons. // Therefore uninitialized memory will be accessed and conditional jumps will be made // that depend on those values. However the length of the match is limited to the // lookahead, so the output of deflate is not affected by the uninitialized values. /// # Safety /// /// The two pointers must be valid for reads of N bytes. #[inline(always)] unsafe fn memcmp_n_ptr(src0: *const u8, src1: *const u8) -> bool { unsafe { let src0_cmp = core::ptr::read(src0 as *const [u8; N]); let src1_cmp = core::ptr::read(src1 as *const [u8; N]); src0_cmp == src1_cmp } } /// # Safety /// /// scan_start and scan_end must be valid for reads of N bytes. mbase_end and mbase_start /// must be valid for reads of N + cur_match bytes. #[inline(always)] unsafe fn is_match( cur_match: u16, mbase_start: *const u8, mbase_end: *const u8, scan_start: *const u8, scan_end: *const u8, ) -> bool { let be = mbase_end.wrapping_add(cur_match as usize); let bs = mbase_start.wrapping_add(cur_match as usize); unsafe { memcmp_n_ptr::(be, scan_end) && memcmp_n_ptr::(bs, scan_start) } } // first, do a quick check on the start and end bytes. Go to the next item in the chain if // these bytes don't match. // SAFETY: we read up to 8 bytes in this block. // Note that scan_start >= mbase_start and scan_end >= mbase_end. // the surrounding loop breaks before cur_match gets past strstart, which is bounded by // `window_size - 258 + 3 + 1` (`window_size - MIN_LOOKAHEAD`). // // With 262 bytes of space at the end, and 8 byte reads of scan_start is always in-bounds. // // scan_end is a bit trickier: it reads at a bounded offset from scan_start: // // - >= 8: scan_end is bounded by `258 - (4 + 2 + 1)`, so an 8-byte read is in-bounds // - >= 4: scan_end is bounded by `258 - (2 + 1)`, so a 4-byte read is in-bounds // - >= 2: scan_end is bounded by `258 - 1`, so a 2-byte read is in-bounds let mut len = 0; unsafe { if best_len < core::mem::size_of::() { let scan_val = u64::from_ne_bytes( core::slice::from_raw_parts(scan_start, 8) .try_into() .unwrap(), ); loop { let bs = mbase_start.wrapping_add(cur_match as usize); let match_val = u64::from_ne_bytes(core::slice::from_raw_parts(bs, 8).try_into().unwrap()); let cmp = scan_val ^ match_val; if cmp == 0 { // The first 8 bytes all matched. Additional scanning will be needed // (the compare256 call below) to determine the full match length. break; } // Compute the number of leading bytes that match. let cmp_len = cmp.to_le().trailing_zeros() as usize / 8; if cmp_len > best_len { // The match is fully contained within the 8 bytes just compared, // so we know the match length without needing to do the more // expensive compare256 operation. len = cmp_len; break; } goto_next_in_chain!(); } } else { loop { if is_match::<8>(cur_match, mbase_start, mbase_end, scan_start, scan_end) { break; } goto_next_in_chain!(); } } } // we know that there is at least some match. Now count how many bytes really match if len == 0 { len = { // SAFETY: cur_match is bounded by window_size - MIN_LOOKAHEAD, where MIN_LOOKAHEAD // is 258 + 3 + 1, so 258-byte reads of mbase_start are in-bounds. let src1 = unsafe { core::slice::from_raw_parts( mbase_start.wrapping_add(cur_match as usize + 2), 256, ) }; crate::deflate::compare256::compare256_slice(&scan[2..], src1) + 2 }; } assert!( scan.as_ptr() as usize + len <= window.as_ptr() as usize + (state.window_size - 1), "wild scan" ); if len > best_len { match_start = cur_match - match_offset; /* Do not look for matches beyond the end of the input. */ if len > lookahead { return (lookahead, match_start); } best_len = len; if best_len >= nice_match as usize { return (best_len, match_start); } offset = best_len - 1; if best_len >= core::mem::size_of::() { offset -= 2; if best_len >= core::mem::size_of::() { offset -= 4; } } scan_end = window[strstart + offset..].as_ptr(); // Look for a better string offset if SLOW && len > STD_MIN_MATCH && match_start as usize + len < strstart { let mut pos: Pos; // uint32_t i, hash; // unsigned char *scan_endstr; /* Go back to offset 0 */ cur_match -= match_offset; match_offset = 0; let mut next_pos = cur_match; for i in 0..=len - STD_MIN_MATCH { pos = state.prev.as_slice()[(cur_match as usize + i) & wmask]; if pos < next_pos { /* Hash chain is more distant, use it */ if pos <= limit_base + i as Pos { return break_matching(state, best_len, match_start); } next_pos = pos; match_offset = i as Pos; } } /* Switch cur_match to next_pos chain */ cur_match = next_pos; /* Try hash head at len-(STD_MIN_MATCH-1) position to see if we could get * a better cur_match at the end of string. Using (STD_MIN_MATCH-1) lets * us include one more byte into hash - the byte which will be checked * in main loop now, and which allows to grow match by 1. */ let [scan0, scan1, scan2, ..] = scan[len - (STD_MIN_MATCH + 1)..] else { panic!("index out of bounds"); }; let mut hash = 0; hash = state.update_hash(hash, scan0 as u32); hash = state.update_hash(hash, scan1 as u32); hash = state.update_hash(hash, scan2 as u32); pos = state.head.as_slice()[hash as usize]; if pos < cur_match { match_offset = (len - (STD_MIN_MATCH + 1)) as Pos; if pos <= limit_base + match_offset { return break_matching(state, best_len, match_start); } cur_match = pos; } /* Update offset-dependent variables */ limit = limit_base + match_offset; mbase_start = window.as_ptr().wrapping_sub(match_offset as usize); mbase_end = mbase_start.wrapping_add(offset); continue; } mbase_end = mbase_start.wrapping_add(offset); } else if !SLOW && early_exit { // The probability of finding a match later if we here is pretty low, so for // performance it's best to outright stop here for the lower compression levels break; } goto_next_in_chain!(); } (best_len, match_start) } fn break_matching(state: &State, best_len: usize, match_start: u16) -> (usize, u16) { (Ord::min(best_len, state.lookahead), match_start) } zlib-rs-0.5.2/src/deflate/pending.rs000064400000000000000000000063011046102023000153770ustar 00000000000000use core::{marker::PhantomData, mem::MaybeUninit}; use crate::{allocate::Allocator, weak_slice::WeakSliceMut}; pub struct Pending<'a> { /// start of the allocation buf: WeakSliceMut<'a, MaybeUninit>, /// next pending byte to output to the stream out: usize, /// number of bytes in the pending buffer pub(crate) pending: usize, /// semantically we're storing a mutable slice of bytes _marker: PhantomData<&'a mut [u8]>, } impl<'a> Pending<'a> { pub fn reset_keep(&mut self) { // keep the buffer as it is self.pending = 0; } pub fn pending(&self) -> &[u8] { let slice = &self.buf.as_slice()[self.out..][..self.pending]; // SAFETY: the slice contains initialized bytes. unsafe { &*(slice as *const [MaybeUninit] as *const [u8]) } } /// Number of bytes that can be added to the pending buffer until it is full pub(crate) fn remaining(&self) -> usize { self.buf.len() - (self.out + self.pending) } /// Total number of bytes that can be stored in the pending buffer pub(crate) fn capacity(&self) -> usize { self.buf.len() } #[inline(always)] #[track_caller] /// Mark a number of pending bytes as no longer pending pub fn advance(&mut self, number_of_bytes: usize) { debug_assert!(self.pending >= number_of_bytes); self.out = self.out.wrapping_add(number_of_bytes); self.pending -= number_of_bytes; if self.pending == 0 { self.out = 0; } } #[inline(always)] #[track_caller] pub fn rewind(&mut self, n: usize) { assert!(n <= self.pending, "rewinding past then start"); self.pending -= n; if self.pending == 0 { self.out = 0; } } #[inline(always)] #[track_caller] pub fn extend(&mut self, buf: &[u8]) { assert!( self.remaining() >= buf.len(), "buf.len() must fit in remaining()" ); // SAFETY: [u8] is valid [MaybeUninit] let buf = unsafe { &*(buf as *const [u8] as *const [MaybeUninit]) }; self.buf.as_mut_slice()[self.out + self.pending..][..buf.len()].copy_from_slice(buf); self.pending += buf.len(); } pub(crate) fn new_in(alloc: &Allocator<'a>, len: usize) -> Option { let ptr = alloc.allocate_slice_raw::>(len)?; // SAFETY: freshly allocated buffer let buf = unsafe { WeakSliceMut::from_raw_parts_mut(ptr.as_ptr(), len) }; Some(Self { buf, out: 0, pending: 0, _marker: PhantomData, }) } pub(crate) fn clone_in(&self, alloc: &Allocator<'a>) -> Option { let mut clone = Self::new_in(alloc, self.buf.len())?; clone .buf .as_mut_slice() .copy_from_slice(self.buf.as_slice()); clone.out = self.out; clone.pending = self.pending; Some(clone) } /// # Safety /// /// [`Self`] must not be used after calling this function. pub(crate) unsafe fn drop_in(&mut self, alloc: &Allocator) { unsafe { alloc.deallocate(self.buf.as_mut_ptr(), self.buf.len()) }; } } zlib-rs-0.5.2/src/deflate/slide_hash.rs000064400000000000000000000126221046102023000160610ustar 00000000000000pub fn slide_hash(state: &mut crate::deflate::State) { let wsize = state.w_size as u16; // The state.head and state.prev slices have a length that is a power of 2 between 8 and 16. // That knowledge means `chunks_exact` with a (small) power of 2 can be used without risk of // missing elements. slide_hash_chain(state.head.as_mut_slice(), wsize); slide_hash_chain(state.prev.as_mut_slice(), wsize); } fn slide_hash_chain(table: &mut [u16], wsize: u16) { #[cfg(target_arch = "x86_64")] if crate::cpu_features::is_enabled_avx2_and_bmi2() { // SAFETY: the avx2 and bmi2 target feature are enabled. return unsafe { avx2::slide_hash_chain(table, wsize) }; } #[cfg(target_arch = "aarch64")] if crate::cpu_features::is_enabled_neon() { return unsafe { neon::slide_hash_chain(table, wsize) }; } #[cfg(target_arch = "wasm32")] if crate::cpu_features::is_enabled_simd128() { // SAFETY: the simd128 target feature is enabled. return unsafe { wasm::slide_hash_chain(table, wsize) }; } rust::slide_hash_chain(table, wsize); } #[inline(always)] fn generic_slide_hash_chain(table: &mut [u16], wsize: u16) { debug_assert_eq!(table.len() % N, 0); for chunk in table.chunks_exact_mut(N) { for m in chunk.iter_mut() { *m = m.saturating_sub(wsize); } } } mod rust { pub fn slide_hash_chain(table: &mut [u16], wsize: u16) { // 32 means that 4 128-bit values can be processed per iteration. That appear to be the // optimal amount on x86_64 (SSE) and aarch64 (NEON). super::generic_slide_hash_chain::<32>(table, wsize); } } #[cfg(target_arch = "x86_64")] mod avx2 { /// # Safety /// /// Behavior is undefined if the `avx2` target feature is not enabled #[target_feature(enable = "avx2")] #[target_feature(enable = "bmi2")] #[target_feature(enable = "bmi1")] pub unsafe fn slide_hash_chain(table: &mut [u16], wsize: u16) { // 64 means that 4 256-bit values can be processed per iteration. // That appear to be the optimal amount for avx2. // // This vectorizes well https://godbolt.org/z/sGbdYba7K super::generic_slide_hash_chain::<64>(table, wsize); } } #[cfg(target_arch = "aarch64")] mod neon { /// # Safety /// /// Behavior is undefined if the `neon` target feature is not enabled #[target_feature(enable = "neon")] pub unsafe fn slide_hash_chain(table: &mut [u16], wsize: u16) { // 32 means that 4 128-bit values can be processed per iteration. That appear to be the // optimal amount for neon. super::generic_slide_hash_chain::<32>(table, wsize); } } #[cfg(target_arch = "wasm32")] mod wasm { /// # Safety /// /// Behavior is undefined if the `simd128` target feature is not enabled #[target_feature(enable = "simd128")] pub unsafe fn slide_hash_chain(table: &mut [u16], wsize: u16) { // 32 means that 4 128-bit values can be processed per iteration. That appear to be the // optimal amount on x86_64 (SSE) and aarch64 (NEON), which is what this will ultimately // compile down to. super::generic_slide_hash_chain::<32>(table, wsize); } } #[cfg(test)] mod tests { use super::*; const WSIZE: u16 = 32768; const INPUT: [u16; 64] = [ 0, 0, 28790, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 43884, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64412, 0, 0, 0, 0, 0, 21043, 0, 0, 0, 0, 0, 23707, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64026, 0, 0, 20182, ]; const OUTPUT: [u16; 64] = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 31644, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 31258, 0, 0, 0, ]; #[test] fn test_slide_hash_rust() { let mut input = INPUT; rust::slide_hash_chain(&mut input, WSIZE); assert_eq!(input, OUTPUT); } #[test] #[cfg(target_arch = "x86_64")] fn test_slide_hash_avx2() { if crate::cpu_features::is_enabled_avx2_and_bmi2() { let mut input = INPUT; unsafe { avx2::slide_hash_chain(&mut input, WSIZE) }; assert_eq!(input, OUTPUT); } } #[test] #[cfg(target_arch = "aarch64")] fn test_slide_hash_neon() { if crate::cpu_features::is_enabled_neon() { let mut input = INPUT; unsafe { neon::slide_hash_chain(&mut input, WSIZE) }; assert_eq!(input, OUTPUT); } } #[test] #[cfg(target_arch = "wasm32")] fn test_slide_hash_wasm() { if crate::cpu_features::is_enabled_simd128() { let mut input = INPUT; unsafe { wasm::slide_hash_chain(&mut input, WSIZE) }; assert_eq!(input, OUTPUT); } } quickcheck::quickcheck! { fn slide_is_rust_slide(v: Vec, wsize: u16) -> bool { // pad to a multiple of 64 (the biggest chunk size currently in use) let difference = v.len().next_multiple_of(64) - v.len(); let mut v = v; v.extend(core::iter::repeat(u16::MAX).take(difference)); let mut a = v.clone(); let mut b = v; rust::slide_hash_chain(&mut a, wsize); slide_hash_chain(&mut b, wsize); a == b } } } zlib-rs-0.5.2/src/deflate/sym_buf.rs000064400000000000000000000051561046102023000154260ustar 00000000000000// taken from https://docs.rs/tokio/latest/src/tokio/io/read_buf.rs.html#23-27 // based on https://rust-lang.github.io/rfcs/2930-read-buf.html use crate::allocate::Allocator; use crate::weak_slice::WeakSliceMut; pub(crate) struct SymBuf<'a> { buf: WeakSliceMut<'a, u8>, filled: usize, } impl<'a> SymBuf<'a> { #[inline] pub fn iter(&self) -> impl Iterator + '_ { self.buf.as_slice()[..self.filled] .chunks_exact(3) .map(|chunk| match *chunk { [dist_low, dist_high, lc] => (u16::from_le_bytes([dist_low, dist_high]), lc), _ => unreachable!("chunks are exactly 3 elements wide"), }) } #[inline] pub fn should_flush_block(&self) -> bool { self.filled == self.buf.len() - 3 } /// Returns true if there are no bytes in this ReadBuf #[inline] pub fn is_empty(&self) -> bool { self.filled == 0 } /// Clears the buffer, resetting the filled region to empty. /// /// The number of initialized bytes is not changed, and the contents of the buffer are not modified. #[inline] pub fn clear(&mut self) { self.buf.as_mut_slice().fill(0); self.filled = 0; } #[inline(always)] pub fn push_lit(&mut self, byte: u8) { // NOTE: we rely on the buffer being zeroed here! self.buf.as_mut_slice()[self.filled + 2] = byte; self.filled += 3; } #[inline(always)] pub fn push_dist(&mut self, dist: u16, len: u8) { let buf = &mut self.buf.as_mut_slice()[self.filled..][..3]; let [dist1, dist2] = dist.to_le_bytes(); buf[0] = dist1; buf[1] = dist2; buf[2] = len; self.filled += 3; } pub(crate) fn new_in(alloc: &Allocator<'a>, len: usize) -> Option { let ptr = alloc.allocate_zeroed_buffer(len * 3)?; // safety: all elements are now initialized let buf = unsafe { WeakSliceMut::from_raw_parts_mut(ptr.as_ptr(), len * 3) }; Some(Self { buf, filled: 0 }) } pub(crate) fn clone_in(&self, alloc: &Allocator<'a>) -> Option { let mut clone = Self::new_in(alloc, self.buf.len() / 3)?; clone .buf .as_mut_slice() .copy_from_slice(self.buf.as_slice()); clone.filled = self.filled; Some(clone) } pub(crate) unsafe fn drop_in(&mut self, alloc: &Allocator<'a>) { if !self.buf.is_empty() { let mut buf = core::mem::replace(&mut self.buf, WeakSliceMut::empty()); unsafe { alloc.deallocate(buf.as_mut_ptr(), buf.len()) } } } } zlib-rs-0.5.2/src/deflate/test-data/inflate_buf_error.dat000064400000000000000000002027001046102023000214550ustar 00000000000000  -  *@ J ($$- v ($$]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - @ $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN777777777NQQUQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - J ($$- v ($$]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNQQQQQQQQQQQQQ Q - ]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - J ($$- v ($$QQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN >NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - NNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - - z@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -  - @@@@@@QQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN - NNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN775555554NQQUQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNJNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - J ($$- v ($$]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQUQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQ1QQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - QQQQQQQQQQQQQQQ Q - ]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - J ($$- v ($$QQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - ]NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - @ $$ONNNNN J ($$- v ($$]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - @ $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN777777777NQQUQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - J ($$- v ($$]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQYQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNQQQQQQQQQQQQQ Q - ]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - J ($$- v ($$QQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN >NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - NNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - - z@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -  - @@@@@@QQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN - NNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN777777777NQQUQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNJNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - J ($$- v ($$]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN >NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN NNNwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN NNNwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQUQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQ1QQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - 1QQQQQQQQQQQQQQQ Q - ]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - - z@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -  NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN NNNwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQUQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQ1QQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNSN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - 1QQQQQQQQQQQQQQQ Q - ]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - - z@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -  - @@@@@@QQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN - NNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN777777777NQQUQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNJNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - J ($$- v ($$NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - J ($$- v ($$]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNQQQQQQQQQQQQQ Q - ]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - J ($$- v ($$QQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q > g:2- NN J ($$- v ($$]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNQQQQQQQQQQQQQ Q - ]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - J ($$- v ($$QQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q > g:2- ]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - . $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN777777777NQQUQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNJNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - J ($$- v ($$]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN NNNwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - - z@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -  - @@@@@@QQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN - NNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN777777777NQQUQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNJNNNNNNNNNNNNNNNNNN>[NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - J ($$- v ($$]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQUQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQ1QQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - QQQQQQQQQQQQQQQ Q - ]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - J ($$- v ($$QQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - ]NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - @ $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN777777777NQQUQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - 555555555555555555555555555555555555555555555555555555555555555555555555555555555555555}%NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ5QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN@>QQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN@>NNNN^NNNNNNNNNNNNNNNNNNQ} J ($$- v ($$]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNQQQQQQQQQQQQQ Q - ]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - J ($$- v ($$QQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q > g:2- ]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - . $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN777777777NQQUQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNJNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - J ($$- v ($$]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN NNNwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN NNNwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQUQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQ1QQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - 1QQQQQQQQQQQQQQQ Q - ]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - - z@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -  NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN NNNwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQUQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQ1QQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - 1QQQQQQQQQQQQQQQ Q - ]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - - z@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -  - @@@@@@QQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN - NNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN777777777NQQUQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNJNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - J ($$- v ($$]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAGQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN NNNwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwNNNNNNNNNNNNNNNNNNNN@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -  - @@@@@@QQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN - NNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN777777777NQQUQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNJNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - J ($$- v ($$]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN - @@@@@@QQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN - NNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQUQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQ1QQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - QQQQQQQQQQQQQQQ Q - ]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQPQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - J ($$- v ($$QQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>$$]NNNNNNNNJNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - J ($$- v ($$]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN %NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQAQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ2QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQ-NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN NNNwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQ]-QQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNJ= ( - $$ONNNNNN NNNwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQUQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQ1QQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQ Q - 1QQQQQQQQQQQQQQQ Q - ]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN5555555555555555555555555555555555555555555555555555555555555555555555555555555555555555}%NNN]NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ5QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN@>NNNN^NNNNNNNNNNNNNNNNNNQ}QQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNNNNNNN@>NNNN^NNNNNNNNNNNNNNNNNNQ}= - >>>>QQQQQQQQQQQNQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQ QQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNN.NNNNN>NNNN^NNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQYQQQQQQQQQQQQQQQQ8QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNN'NNNNNNNNNNNN>NNNN^NNNNNNNNNNQQQQQQQQQQQQQQQQQYQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNN'NNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQ - * -  = -  $ -  >>>>QQQQQQQQQQQNQQQQQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQ QQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNNNNNNNNNNNNN.NNNNN>NNNN^NNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQYQQQQQQQQQQQQQQQQ8QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNN'NNNNNNNNNNNN>NNNN^NNNNNNNNNNQQQQQQQQQQQQQQQQQYQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNN'NNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQ - * -  = -  $= -  $>N NNN^NNNNNNNNNNQQQQQQQQQQQQQQQQQYQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNN&NNNNNNNN'NNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQ - * -   - = -  $= -  $>N NNN^NNNNNNNNNNQQQQQQQQQQQQQQQQQYQQQQQQQQQQQQQQQQ3QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQNNN$$]NNNNNNNNNNN'NNNNNNNNNNNN>NNNN^NNNNNNNNNNNNNNNNNNQQQQQQQQQQQQQQQQQQQQQQQQQ - * -   - = - zlib-rs-0.5.2/src/deflate/test-data/paper-100k.pdf000064400000000000000000003100001046102023000175400ustar 00000000000000 C=10 M=100 Y=50 K=0 CMYK PROCESS 10.000002 100.000000 50.000000 0.000000 C=0 M=95 Y=20 K=0 CMYK PROCESS 0.000000 94.999999 19.999999 0.000000 C=25 M=25 Y=40 K=0 CMYK PROCESS 25.000000 25.000000 39.999998 0.000000 C=40 M=45 Y=50 K=5 CMYK PROCESS 39.999998 44.999999 50.000000 5.000001 C=50 M=50 Y=60 K=25 CMYK PROCESS 50.000000 50.000000 60.000002 25.000000 C=55 M=60 Y=65 K=40 CMYK PROCESS 55.000001 60.000002 64.999998 39.999998 C=25 M=40 Y=65 K=0 CMYK PROCESS 25.000000 39.999998 64.999998 0.000000 C=30 M=50 Y=75 K=10 CMYK PROCESS 30.000001 50.000000 75.000000 10.000002 C=35 M=60 Y=80 K=25 CMYK PROCESS 35.000002 60.000002 80.000001 25.000000 C=40 M=65 Y=90 K=35 CMYK PROCESS 39.999998 64.999998 90.000004 35.000002 C=40 M=70 Y=100 K=50 CMYK PROCESS 39.999998 69.999999 100.000000 50.000000 C=50 M=70 Y=80 K=70 CMYK PROCESS 50.000000 69.999999 80.000001 69.999999 Grays 1 C=0 M=0 Y=0 K=100 CMYK PROCESS 0.000000 0.000000 0.000000 100.000000 C=0 M=0 Y=0 K=90 CMYK PROCESS 0.000000 0.000000 0.000000 89.999402 C=0 M=0 Y=0 K=80 CMYK PROCESS 0.000000 0.000000 0.000000 79.998797 C=0 M=0 Y=0 K=70 CMYK PROCESS 0.000000 0.000000 0.000000 69.999701 C=0 M=0 Y=0 K=60 CMYK PROCESS 0.000000 0.000000 0.000000 59.999102 C=0 M=0 Y=0 K=50 CMYK PROCESS 0.000000 0.000000 0.000000 50.000000 C=0 M=0 Y=0 K=40 CMYK PROCESS 0.000000 0.000000 0.000000 39.999402 C=0 M=0 Y=0 K=30 CMYK PROCESS 0.000000 0.000000 0.000000 29.998803 C=0 M=0 Y=0 K=20 CMYK PROCESS 0.000000 0.000000 0.000000 19.999701 C=0 M=0 Y=0 K=10 CMYK PROCESS 0.000000 0.000000 0.000000 9.999102 C=0 M=0 Y=0 K=5 CMYK PROCESS 0.000000 0.000000 0.000000 4.998803 Brights 1 C=0 M=100 Y=100 K=0 CMYK PROCESS 0.000000 100.000000 100.000000 0.000000 C=0 M=75 Y=100 K=0 CMYK PROCESS 0.000000 75.000000 100.000000 0.000000 C=0 M=10 Y=95 K=0 CMYK PROCESS 0.000000 10.000002 94.999999 0.000000 C=85 M=10 Y=100 K=0 CMYK PROCESS 84.999996 10.000002 100.000000 0.000000 C=100 M=90 Y=0 K=0 CMYK PROCESS 100.000000 90.000004 0.000000 0.000000 C=60 M=90 Y=0 K=0 CMYK PROCESS 60.000002 90.000004 0.003099 0.003099 Adobe PDF library 9.00 endstream endobj 145 0 obj<> endobj 1 0 obj<> endobj 2 0 obj<>/Font<>/ProcSet[/PDF/Text]/ExtGState<>>> endobj 3 0 obj<>stream hޔ[MsFWnmU(se{kjw`7ƨhhҜ_e&rl(BG2ˏ_oͷ|H.>޽)Vs"Wq\Xإob5{|)^xylD]E~g*vM_YAڨjqݷ?b*]PWrUm?04]\}\ʲNVId=ɼup6wN~reߎU;6c5652cvu gS3>_D04Qw#*ׇ}L%m_9f0^YצYN9npse)9&|O"~lms=2yTǾZ?'=VW;`L)T0XA4&q'IGJdשŷC7bb1QtrJ1E\ESñR!"dwǝ ?Gq pۢ_⧑}}؉+.DAɤ$& *vo:UTi͑ks?y'V^4m]O߽A$xI;#4.|ýoQwvK,ă2kd6X,4#.C͛ ]=F6wQD1{~۴(}EK_w*hZ@CtEAg/)rjJчvS\a #xhGFؓ}lcYh)(ǍTNSb[,ʔW%ʣMswW/S2XGO5r>, b`'%E,B>|$G0,1%@T/2;,BQC)e]MRض~Rm[Q P c ] QkONFJ_ɇz{θCn5yٜh׏Ұq P$ Yp%b#X5|b^Q*B;NJ:3R*ě9"axpNA  Sb' Up~t܅6C׏s$tWheޢ+hq, GFIH[W5Sa<DXSq-͘1+"G[!&f,ݭpg |v0EQ͝`ҎIYZ)+S wM@]G\ѵcv[C?0!8C1R<*`yqY!K5_q7|M QS:Y0YuIaf>]"B]<-W{0ps涹f^HGѸdV`)0YohjVB訅aRpv)PO%̹ W-%~ח >-@e\: +pXIRt/'* O.XIMh=N`Bepxc_ ~d~R'%W( &o׷+,NyL'$Rxa9KBYXW2Ө/0,sJ1yX>(f$W(J 쇋ÏD~1RQg#pp2:}RHX0 = #1p:]cQ3&sd̤joF?8V&ӒSnޝ#:tb02.žzD-;+,=߃`p!jΰLD]asɠFcT \0>I/LPafrF HKؙe(O/e;ex~ZY P3cd|V4dOЇ)Z~F$֕K$tI@[Au0\dFdZsj?K~oX&=x6U8~.PTZ;@WaNCnuP(Ψ-&fMȲ!<4lmC 5c^A֍Y1xU_2gtiMc%='0>ⷢړuWHh*ZCw3HVwAкa H*^IؽSĮl֫p*>Q!:M֥Mlղ"r91LADm]X؆I)W'SzS4CBlĉ&6qjȂ: SH+z G?_KtɌb~> 4=tU *8g2ajD&_ߩB{  gB$EijMt|G Ǩ>#!8JM:>3wy.#3:0QI.8k"tNZ}G%@i"ܷL5)w?/>+$.&N3ꔈ{[-R 韹n>u eP 4~zZ](i~|M 6A8*;:Mh!GͰ#;BjPDahS~ȗwjyǡQ|EQ'ࢳX |J1c@"A'ceI3ȊZG#s>]MEn,R\FuNT':njDJXIgcqm7̰5zڎDyF AN;etS?Lم1gBB@3V̶uD|onhAJF*gyhLVVBbehה|0*n7AHRiDH§*H,puɢ)?e ;55ܬ_83SYMوtm4BXIގA<.}"!.d>?3Aq̋xH㗊t} lh*QLP3C{A+ͥ[ٲz'w/KisoE$( {gi0s I848'eŪLL"\Fx _BO!'o=`$j ̵4ݩ#,$Y۵RG,|{`WlЌ/pa՚%/[$GR"/Ai;F6䔒+um`P,k닀^TtfT'){.\Ur!|O Sv* Ő!XwrF,Xѕvvh0z +'wJ)s² ]hzo/1#A-+/6=nfݘ`7t刏/-(e>Ίeh"AO'ym(K@ 6~J2U# <↲rJq[ $o0-xf>fAQ'nC*9̤OVJ|v؂?2*( Xd\0)IPsfO.7'N 8ltG֚,̜KdGdOH^e1gp5%Ƌ$$A/9hoZK`_Z%SBPV[ZP7/wVS6pOQul6 ה Fak׊'dyvyi*m4,>ѠG] 9D7"*霨WAh8D\1qu51cuYyתǩݱ+ezuݡ80T,J*y+”L^5F斵zV1ܛgeu׭L[yz#䷐cAmGDL-d>{[sGɕ,5yi5,iKv7eW ĺi[vx~e@UpRDOUhVF;uCǾ4&!ּ3,3xe(CHϵ,{ˢj AsJ6ǯB O+}ɯp9)e;)gerbZÑ7N}ǣ4)[V~暋V@P?e_~ %_ -wQ[ lE6b٤jc{޷뫾sL2=Z%ˑH0i4>v:26 #Diy m8#uKhK#ziT\=e̽ݲ%>'o?ި'{H4y'L=އxNjj1|}c^itB}[~V;-9lNï[p#[rٖsn!OEtp`|v+bERyoXT`zUw(#E Rm3O+-BoU`{mUj0vU0{4A#x/kqC8&(/^sYg2"@gr66L99.oKMPWtu)="kRfHkoQΤļB[Hi}?qsSgxOh S )Ca)ԺY:k=ƱYї".Sar/{DҦLt: hMlڲKLbxDA/'V#_lwŨ{-P3a09p,Ɯ,sՏ;+W!E _EhԭD6v$txd&wcmy82+pFO)W%_6AgY:p|+<t}8,!e4@R\s!{>%!e 0Zw?O~c=L"R'mSQhkY9 t >}T-wS0kͻG KV6Ugl"cOGQetd=yy<*J-Vީoq z[X@4Va`) ?-aoN`,i*́v6jidS4D :Q>ZlQ]gY"b}$ P#qciGY^;;aEv0qಙޕ -^Jd49[aAr]vFMTQҨ8vǾ=2I>Vl;Vy g;](f7#6v7[M ?o} 2 Cf ` sthCBV.]XA+YDZmG'̇ Li7ӂFÿ[ФE.eQu?姛wp9b˿I)p6:A(?c𳫮Ǚ tZzy]:jZMiXe=ar?`KhŇa &ˤk%u&e/O. endstream endobj 4 0 obj<> endobj 5 0 obj<>/ProcSet[/PDF/Text]/ExtGState<>>> endobj 6 0 obj<>stream h[ےq}W#0.u%)YT][K>`z03M=a8^9YKO/IeK.2Oԯ]a7M'rxҖϋI3r~=!p20~S$U C^?OuY廦o*ߔI^e&0W"5Inɓn_K뷻.ox8*5~x*'v1ˤ﮾};YMɼ|.; ^˓Vfh}rg,-O2aer5ȜVF.9zhL"ZIL˱܋y]C#Ś蚚Z̷H,= =-.!nmrj|T&Ub!|SU26S2ɮMxuVeˢš3V@nwAnLR ܃Ɉ.GQv hy l{JfU[̢HIJϭl3Qj^Q7*ϏfÃ(~^z ΖWN>4?x{Lzan2pX3^)BbjaVUϹI,6~S(T"#\ˈ'N.rgX7NFbX`RĴ Ѽط'L-ŎT^u>1lyOFaK*!KpEqUcaA[ƹ>o,{d~EzO;$ڎ!5zД;H[6yygn:(Bfr˄ S3]%SC8;7͜sUlZeT ~VX\|Q|9Yu$/I Q&]->7kB.k3Mn~7qcGh .]~jukW3'6- S 0F(=8G;qKkola7ۗx~|( e vнv'U N J;%fC=Y._m?1Y ?fQ JLrOA͕`hzXEoȲp"NFK ˁ3ojEw~HT)&zlE^P9(NVEnQGP7Ë /.ҘI$AɇlgM"$!),T%> = lW._BXy߃A9DqGl9n[°g%6yQjڝ"@@H ~N)56OI?1'lt"k}BwChh}Qx.z@{\e,+be £P˜ A˜a^2v([|a [QvG!&JrGsHy!ɭRm/< +P?Lu$'<ܾTf.`rBH&FhR3ڀNpzs \:d`j{18c1;YL1Üڧ&f3ybFmx '#3ѽ Y'*#ySq9t G(zhݵtܑqL#Fd,p&5BQXrb^c,כ p­4NdᎩ*nK[6]1HI|.\dE!;% AtڔoySuiֳ"N2Ro>#-Ϭ?'͕7^d}8=hhe*k$W{3EMY2<{Jfﻱݽ@P 9ayHf*EКMC zHu&j~DmlM*C7k\ȃ~xuAn:*~R>LE,JehGl1$*iocW,=*EvasBc>GY|72AN* ]7"Fzar ` ZhB\&_@W,䫵GyZF׫́ e %ԙȕ:_q&Ҡ\ SHڵ6>,%]NRY YKbfw[:^I[L[`~)CY2=+$TK^>1fVh3 > `e'keZD.)S-+P*ڱ,bsf&[0QbGDž3f"Tz?.-gE,ÿq6:ҼD`=<6lXܶ-c+\6C,` 6},R/C6ECV>mN1`-dnC LOê?-?]CY."n"5, )8)e7[ %7 %s5lQrYxZ,"p ?9~9!`R9r@:M4~@K`^!Al)OmwL8h@/okē*B6W 8ag*Y̸9A,gaaNчK y c1ٛG6f )J'^Na@=- [wI `3&"BSݨSiHܗh#ScɦږŶ> mahq%qUHnoK5%&b'l1}`)4ʘ Hݮ{-?Z>(ﳼi, Ʊ%fH1̾)d4lcKgYxaOtuK{WƄm}uBOI/jg/$Y2ΗShP1ev'FM،ڜ z` r~plT=1G43"etPA׍V`wKeV} W!X)CKf0`#R&4RB.l71 j*xބxW{6ݮ|'\D⍦{U1$]|phGL3-knuPoT)ցk91p) t,Vr[*6.ck*?Q`Y\X^yЏh\ش&#pXVz=*š~ /؜C"](ɿFZ,_ +9ZgV4}g\5(oP;6I)zDHYr_) ;QZ63E ֆ)krACl"S" l,fۄ2oi5qkUtSل[2F޶cuϚ|3֟ #S7cHG[i.M2>u8Lt}TcӏS:kyhyQNa¯֒lb`zd \/vZ1C|sl+'!u",XeN2`8P-YwRU/hᴿՂ,TڶԎA>FSe(x E6o׭s}_!N@5F@їbw)y'xsv~DdaBW"V-fI՛zUyҟerY`L\2Ưsjlu̢F*8;pٽ ^."('ƙվA]Y5iT `^ ٤}u,v\bߡ㑜r_# jaYK5y?2_BL>0ȝв2Tڴ,S]*Z7WM2MJy9y.$3Ǽ4A32xKhx6t3|#!XPq-c_[ R)?GT ;1Q[=;mts9C9xqy$¢ 4rwfc[q=Pm"cOGי +JXdZЎ mGF墕PhN^O;j,lnfwtc#Q*F<-쵊kAro0{l2'FB'`,[ Mb 7,!tM*LiW^jatQ8ZDG-;FtZ?ޕZ T>.pV1̔*aҶ2:0:lh*%ԺA[r`а5I{ Tmd9 mQFLusW b4%> ceSD 8iHcC"]:mL1ehC3v, :/XdI-6|15kG}1c>iwGS&Z{mr vln\CqGyWY, * V ?'#>ȣh>ۼ纽g빢9ɣR{yj,_ g#4'7$v뱜G>׭2rJk!Y,3!јU?tVQ%Tyb7gς,m '.L> '8TrBS6*wmv6/2xdRfYVk-J}C\аeg>;dy0U >a!fT9+K M VB џN7y xUcsɆG$J/3"> Dv˅<4јΛ|؀`ܾyl\[> kkGSi%MB!MBk __ȕr ]d>͙0 ^Ƌ*_Mngx rCH4piL͍͊[[(û쬆:{5ІNysu4Y^Eڦ'x\'MQG438ŀnwڳcAX%_ NeMɴM?W)ݻ endstream endobj 7 0 obj<> endobj 8 0 obj<>/XObject<>/ProcSet[/PDF/Text/ImageC]/ExtGState<>>> endobj 9 0 obj<>stream hޜZrF+tpFQP7Ҷ 7(ZɗY-M8& deB_V]oƫwW_|_O<7O3gܵ*4>^e$M7CgsUZ&z:u6urIǻo`edR{{ _RԹLW.H. TXf҅W_^.\^V!:-nۺ 7*R|lC_&ɭ}}?]4zR}!-n ۦ*RI-i6W#Odz&<z", j[P>oT#sw:Q-'1ETt4Jo3C5 zj1qtW)뺚H5c_6ۦ$z?V9n+XWFu2,4ݲ݇ ҟx5/^.A5Leu֏u _͋T-=D:ಏ<6ݷSX M5=fb'j?N"-9NtN56ÝƉWA'?⧧"Jz&͏oɀUbulf,njDF4?ҥ0bS-,%(QepB)nsRE U5qd3gA1>~F>1 T2~{#b#IN O"wNMGJ*6IrΝF1R:m%dƾբų|?G̮epړ my`:$E!Mt.\e0(8s@HĠ?PF]Ydnpz6pq$V6Ayte\Δ9L#෥߄T蠄v05U %s3h c 6[7'.S&j V7<ԝXa{, %ZBss. ,)^rZc9~HH)97Np،]Oc-XW `F|=骘 +Zi%Ќ2 tZ3.`sD0C5,%E سKh> -bJjcS*XjAG#lS ݛt8#8Bጢ arzO#XSk g.,{eܒ4ȏ3 /oV4$i!UaOƙ Třa9,9T@@l=DTn˛;z ! "%ߝ18אr-m6SHl8JpxP-=F?:HO>1S`"@Qx=3" D@%V$ʲYhz,]-*`ch!@8غq4#:B [$j/>EN7⛶tXRnWa\bH'&uTU`Msx"44Ka!2܄0B)NI7,Jv({+88G;)^ 6 Ib=8= )%$9fu ;ܢ <4X=R, 4B^2:lP(3t"'TE_;#tZ| u$`2`P jv+beDwwW]YhjNPTm%U\kr{7[uIi"1sJo^R[,Z-Wu*nt|ARcX$W$5ZuL疣 ?P#5+UA)8Y2P^r`Y6q91\*rslR`[.b)qqU^D<Ž.T[*/ĉDI rѵ(6B\E2acy1胰X)5[w72vtRMbiˠU#eڨ$ޣdh3XXyѝ3i[_ad—v0(g)+w²}Ԓ[k. bd|ΰ *Ӈ$Mg(ϣO!a27fX6f*_ lc^60P]ñd .O(4U` lml TualS0PGmOT +rswFb`D^Oܸ)l(䈊ћRli!8$Shp^ox^#VvWIA]c8Ey},%T !X{C{LL x?Wgi{,iwDsh, g=4I)=,AiSAvtVH8' .{fg@ 3%1t͕_}vq2>lBM>d(.~)E%Ӣ8;xTaWbrRڜl eʪvEikqwF!mH|aˢ~[P%TE6̝FD(Ŀj!s48مM ֱSFYlWDำp|<ʝ1b&`DThco im*HT%#2|;!s{)).Xl3Z%~Fp@֤U*& ~^$ ENn2G is+ŕ q7t|,XE۾d[崴‡)6F[B]OS)WޔϹt<G\|s$e߭ܰV|ٟVHB.Y,o+MdJFDP֚>|!Gwy$| &ft[L&β]q֭BqMV^=KgW#64δ9C^^&&?.ʑ"R䞹;f QJ|cڄ.T򾮆gJ1k.> endobj 11 0 obj<>stream hTPn0 Stmt4x]hW@M <+)N >stream hbd`ad`dd vL) 0q6q5gy+y@HꀀIHﳾmXrj.+r@tE⼼⼼+V,^B 'c endstream endobj 13 0 obj<> endobj 14 0 obj<>stream hO+'lf+:Aٽ zg6Yi !ۃ?p Z_ot6 ޅ!p `4' pvWӿsԭ硸#uZUݭ_U?>p6Et} o-z'K˳vF<"]J.'d~=Fy~n0뵆py=繆>%+,4\(ۭ8~2l:j+\FV7)ϟN.7Z KB/K02My~xv65H. - p标%kn$2٬=ozy7]_vfN 0\My~0,dt:Zgv2ԩa<9KSGujX:WRb̔8QY ~_[t:V R Q;7L.燚<C$*%521v|- t:?DXly~ӽg} 株ZZ45+ϲqZig>&5M_.`pNz/yс<ϵ64252@$@Oj:]bs6ipah*}y>L~Ml̻R^.)57 ZK12m l6 Xjjf\ ypWnOBpk<7l6'V*n`(,­W,K׿g|)λkW5cuիW_Ǵ(gÃ6xf{p4y~(9p|x&Zcd>ϲйc4G#"P_k6A__|A$K @>\? ޼~MF#th ) ի}q,g١O>p!᠋3!_9H? ~ěׯ'CWOGWîa,kIU>|CSG>8\>t7#4>}u_]=`zչ4y~LӝOn:B\EP/_jgqzwZӟ4&珕2-G08rBG?VN #yX9!B|#+'dZ`pc @  <݋lP~ 5xrBn~6ifszt~n+ϲ,vyBOcK,Vi&y>N?V|Vm`liejn>WVm޿_ rYY> (pD=_7L&ş2fYX>?+ѱpˊuY8N]uc>%yZ* <:IڻfZ~bh<#Jܬ*>߽xDg~~hyB܅Qm\O,3<͊k,k̟Fݮxix|><ÊŴBX MGoek/E{f,E!9BGj>`4珕<YivjXcT%nrU zebzܲd29: :gҙOU8Ot֑K'yXU$db4EnMgqǘ˜|>RTUlJ""c:4? M/!-Fq+=\)c7~z8g؂'+y>/pǺMq wF8:܇6Y n-pya-MV <$Vi2YvgaY9l9@itih;ߧ*J4p:?aSSLMc%K-3$Ml6+f/yoSD,ϻ묭m6 )y~LMet1\~b?Pci9*-g4p8CؼMYGHgߧfx珗<`p.7MW.2ߤbQGvbH$|PtqڇBY+ #1ݽMcOO;:w%w-HyݕU:#[[)AG4reM+c%i] fEiQjf wiLSD1[p;y釡PF)QlI=xi9+~?7}Rbc7|Oz# GKNڶHyAH.3J Py fg>X`cbj~6*?| څmIPIjW-Q,Z6MO PIhOlAV IQIOe'E˔`gj9b/wellVy<Х|p^JW=|c%MQs6ĪZ~[Ӡ4?_jBcP{iB-2yXy~er\וhgL,zKy~vJU(5*|Φ m:}jzQ'~J[~Oz{*oȈE/4ml6]O6 GKڦ~V銇yro˯3mz>QJ=Z ځXq 7ϏcC%^ ?|ISڹqJ(jI?V|9ol4>Yi6{=f;ԶRK'ҡӡJ^h5Vfy~ƷlCg߹y D|"NkxLO2Wy+R9*zVW [9۔ϫ*_*~V: ӕq񋧃#ci(|E+#ݴGZʗ*7B?V|9o_mqrZu1iͲ6$iƼEdnbq~\V3rO?i|Y0%oTߙuzЙy-M;<n\#8xmTp/֊Ԟ#\|PMbqmq3ՔeGTEt-1Oo i|=ʚojsle:?Ӧt{s+5<ͫADei|~J)QIĻVJ>YL؞W>=I`Ӟг:t\p)\o|,LX+:,Babb_1n j - *1> _omiqGگ(dR۶bg)?+8Gn<s<22?[ $|Pm Lz4I̵K疧-YހJk1KGj*i!_ތʷM?̿Zھl^v]<Jv'4 珕2+=MgA ni7M ]ytr}=y~gg7U7Cafܞ.7Rye(J6~Ay~?ږ4_Q߾,+ʗ/_>{ ={-|+By>"+y>+Oo6v\.{ɤi~Ţg_PnXK:O_[8#MAP ahOM|,q>y7F'|ngo8|9W?:gYWNbv3ۯ<PkfY֙)HDXf%z*Ӡ~S7e*>8*#POM|@?D|n<sO.7#aPgZCqZ*}v!OM|@?DNn}SJ&\NkυNѰ@S^o:lj:Cm\.+ V +>.#i_ xPVmOMMaQ`C$vJ08'i',pDߔ̥?"?.|<&jj ֹ=#tZyt',pC$vJ08t|4 UU- H¦N>L&tKRi:}܆{<,& -]Z""!aKG4Tgw|B4N$χ!" '>V|YwGͦbmH UVlJwjj^ϲ2[IrٔZ/'҇%G78aMV쾴JZ*ڡڡw ߥO˄eBqdjtf{*lC ptCj@ew g隸9/9a`C$vJvϚݞ%'0n<${X,b˗Mk믛 o5մJ(a3ݼsrV; S誶o?y(ġc%AAeri^^)-qS*E|)͖cxi:0[e=ؿ׿)9rͥ<.Yʍ/_|aUf\?:MjZm+<_(t^nQtE>T(|EQE/8E1yEQyAM1{uib'6jZ˗/+ o6v۴q7"=P(ݝ+$''~r݋()y>\v&ob_HR|Yl6ye6{}B<_I~,սV2y>IJ0(M OiOdM L0( ~/(@49(/?5kۥo&^\a~QkCo15v?hJˬVE gKɲthEnӷjwEv?L?:%?iIh^krJ Y'&׌J nd)ZQhLH,>GYx׈f~"G,a~Q{M =3JY.v;),};!?&_,1r~6zlf!PK4,7( sO }a?y3Z.O&߸<)*p;V7ۆ皜.'n6_ /v; #n6{ߗHuSϑ]hr~Q6܎ pv[reYhۅefY\,}`O)-l$<,3;fg{#^ g!m&t~w &y~_g -}U0V|Xb}gJgEIVMTtja~Y'2<:fcڧž+pZ}C$3EhγNe_BȲZ/GRsOIuL&ߴM9i?`wG-w_;Y}~lڎe/ĎqNURӉ3Y:EsXLgγKi|XzKqHa봝Dg "ÃF@Ч|t = o^WK  0._-`YSq-z |IwRE=FhTs隺H%aY=d&Z4QKH;:9oJ:ADKA^ ۍGwQ{|t) l r}1K/8O?M8G?W~'?Qל>}̙3O=C]I]IƓO>{IATĄb@>_jڙ,jļAb)K4]dAէO)' < (ɬ} 999j++ .|uS #_?|+8*K5a\)%; gj^5Hs,GöGH9Nuv[,GjK w7ؒu&7wA]%"-b&E;a1__=f~<">ڝGK{6'؂t1 VC]666++c>I, ;<[U"竸4kFjc(! gxU#3i8;4H#9x$;\!b|5 *hL[i7eU >q버6zv`LksJG*xʥ3U6Qa LbyӶ AD@:Kl> |/C|dvqXL' 12L\̀MՕ%+&R%5l{J5!4Vf ժ7eem{g)U:ti@OfgJ" ^c<q~ﰸ߻(c}_=ޜt}k -t{*!?cFupzR)i2C9HnWwKZm}jRʽHGf8J@ f6{tV=.UODOWF A7 _|Qߞ> >܍dx 5A:yz>8a{ 24)KawWT@qGxMv T}#v8}2-A!_i|:$3bx'H_uwuG#&??? TZ|8Yϗrzufmj B+3jE ?_|] ÙJs2{̾?{ h c۶tw@VbC|&BV'}YE 6c#Ф% Q9jsp<j캷Vt9[s(Ѷ; TLdA,Y I>x!x>/ۜ= v@>F,+RwQZ RT PVVZԼM%xZo*nA%zHߏ)8;HޣqwTAD9ҚNܖoeU#Ͽ{GoOߚ9>>j++ AKQ!`}?Oqy<4XEsF 4ib#Uv7FΠg:#WMJtL)VIbzHR^ܗ&Fհ>"̐. ^mCum0>)E " xYҪ6V>s2C|{mi7o24RgE( l[ya"5D>QQg2uA`99GЏ6ƆJW€̶m>G|(UoӤ?,ՕL>|#aBK0;0UōI/24a/`S4Y妹at׀ #mkۣfpXFvE 4i8L]]{nJlzra[y]E׽[ww5Y輡@O !TG2[q pE  ekR7ԡ\EE+X7f,yr`̈JXMDFiSmgݣYz>|Ẋo\e`01^ >?}~MF)Ȼ ivVJ?l/Ryld)u6xEÓN7Tx{Ӫ1a"Hg1XaYʙx6g|~L=GL}&pFSA϶Lpc 7G w^?J}|3^?@>bQY pTQ,QE6d9*> (RE+VϷ,KZY>#Vb(isX?pѮyQ}|Ir ÐJQYw#YZth9!yruuO qJSڔݶ,GMH5߫8zi1G<#61kllld˵#Q/*TbQ(+g=}4b O?o<<+8<ʁ>qKW^ɺp8A?qS d \N`,*d^_b]+ S|kn}k*I/?-k9s&Vb~>XM+j0$c7,**JMiQsu&rbgZ|n{≷ULJ)Vpu-kn} ڕo3w>_q(ҜW JUjخUϼ٠YYق4׋CWTҽڍ aPŏ(;A a)lW"Rْ p**3gdYXQpEkSܐ!W,|a Ρ\(|^=eYHQ| q'5? 7}YH=R+Q0jlmixRJ~+mrJ*Y;J~\_4~ /=t KUkR8;bUѮ>_'ފ])#MOiPظ Fj:q|Oh[ܚYd~*s)7q` w gy7o]{kYvߏkQOظ:7?TYxl14ojm_,]ݧs<tnHB D|>^z!y,hUM59KY'> ìB]q0<>Rf [UxI۟7F5mP0kP ?au|6˸I:Jͥ/˭kH,0T gy76`;T /;JΩxܝ}yVF&~Q9i_ᵔw>amۣGA8pkŏޟiHMRw (˪{4I3jo}k7G K/ݮRW0*ԫW[z0xmn_+4T>\so|tBs0 &~zktj7ĉ+XTq?'TӼ}ٛA bS_sBUJVEo6;MԤe|>YP3\SK|~+}<WV<|T|ϣlb؋²R:yJ`<,57{o~CHփu"M/T=5x@k8lw(J26+Iz}nWضGMU .N |-0uokb/#"͆Y:7)]޵A=dYɥ{'󇷧sDm[ܦ4RҰ(RnD1 ŊY{>jP}wK)~#_^T|>7C*<&Dlڄ; `F㵢Un&ͣ # ¬sxԈa{іrNRapxwXb@Ŋj?-|)5 h ʽ5𮆍)v˖rG.K5(}i"zgm+#]5\E R>s2c~g|>wwwaMgYjSPwsCbYJ1O^KŔb3GJ9kG@+7Q4 G*rXYYjNR4 /Jc[0t0RBpk MfжMvtyy0A|;'ÏFHd_ÛGSOinERefYJ4$80l/5IS `%d|֕I}Mx||#'>$7RS}~~ tT79PlVW*P_ض)܅&k/-@M@جYY.ws֤H9} !G9[.M|k#ڸޱ G҇sr.=*3fʾ4a*sԆa>LwË3ifŭC* .Gnk(ku#Ymc J'hKludviyp݈{꾆Jss_or|&cGݔ&MgordBW2s "ώ0Ĝd>?91ϟVM,vcc#+l&CHe |hLidue~@i,/UWOKmd+ޠP0#>Њg2ԟaA}#}*sKX7U=ҿ uZAp|߇Rx?,+Z m^kGϼ xS?^#D"0a(rkfXBn|YHԿ:Tw+|YhvcLbpg0,^Ų:,UNPq)8T0I0as# tb"Euўu u\Ødz6BDFj'RDC@ZYR]dl7mpQnV<"VZ %6fBUjwK\YHKKV,MZmwDLsEg PH3K^1mrFt}`'ߚ/$GQCRǂAgDQM+:/_<3n0&XPʊ'|H,;0 K?w ^o>_ Y]OkC:"n బ8V*aU+E&+Nz/,S y^RuϠ % 拭xr2IßڡTY->o&P}(}/TІpLN$F)ϣW!|ߏ4q\)_~ƕSggٽ15Z$E \֓/59 J?uӲHWiO[ZOx6 T7pFͣχlw D٥:y&XPٲ}/ޟL_| xDY]jDQ]܄>Jr"lgJvB|4o?)|~#4=opxKRE}TT$ g0l{$iF/%l/ԯzu#JƤ'̩R4*#k"5 +J'0oTR|TqrL~?">s~<5.7b> ӧ>Uuc)]abia(0|n?||,]C?ȨP}*^b>JRlA A dyOo/ΟZ\,Do}~qJm4tP%ߏ[czM!M7fXaRZ qh.Բ ψeA^^u<<;"(Ϧ?>Rרx] Y5 Q8JpE|> 2 4ȳdbO?df˺VNY CJ[@_*iR ]i'Uޮpt5(IЖV)Pe`9{KXhpwͮWt)#WWo%'ߞ>wLfsUfR| (nH81?r\J TB"t&T.XiqA| Ɗm:a{e&[ZO';`p&.Ļ'wtz``~/[unsm]E}hw)9_eA(hۣNݶ&nUfYV7 gЃ*Oo|@P>A>v1|>v؏#DEw+CeVns489&ދY} |7*<"#[V~&&6>&7[$Rr>_޵\.ͿLTxF[LG*RuoUwi<64V%Њ0 UW@|pc JI% }i_yZ˲bRχ^IgwX#M8_/;^1XRkᰠR+9@a!>#Ɖ`"_)@ K|+J?`Bwee h&7.]v/9!qIqAk=!ͩJꞪ vwCADx6FAq/?oO~]ߜh`}}}{{{>ږ``۶ .>S(!% CvZ'0㢨1 nDpWYF/S$.N ~8(aoTXzW0Wd k!u^&T[^0:*eV =W>KEUy=5 Ԫ"O^j MmWFV'Y|<|VgK#_=*B7BDo*TwPŅpHV@=wG> ^!戦ʱT7ee͕/ϿvD^X1VC]I]It+^" p?|EL>-THøR6D~TֵV戢ToL*A0n(xtc _ 2zޘ'ABLdeɇP2m۶ ߗ(ߞ>@|Α]pMf3OPWvmǍ JJѮgn^jf|_%!W@_():%m$.1g:9 1 RKwT/(4jCG*_F6ܪ؍=AKEmۧ4͠),ҶIvJJ{]麮eY{jTBE>![%&epxuIe)n'sZ;w*t-e9ur/GߪhuAR053M1e C$E5AQ.|`iBu>_*rj:h(*kw>9!M3.i,]|?-4(rdM)aP |6uycPȏ " CǁWT0bKx]}K/χR/Ab=_ [42 Fk_V$~ ڽN#cuu|p 45',vf58%Ҹ[%AoO >萚 (er( 1VSK5Wsi24׫b#<ַ񲫐n]iYf씵Ls\V遰AQe|OAD(bIڞa(ԩ:'wG/,A|~7/dluuH?@B{KSa 66a\Y*(epp߷HqmkEm BDatߞ>& b0qJu]u^gEyE>_C*rAhZ PSi0V2| iGѼ  XFR"Ʀ7ar>_`E3 h#m#A0uV0TƧ .ۙov+r !ˆez %mq[~&3f\eZ! ϽnH7І; : l]\=^?Ȳ+p6AH2 /rF«GYOyא|~Le3^jûDf_}Wo9R!LHi":4mr,W„' Ͽ<~mIѿ=}>5 21 ۮh8AЙ; pmmmk nuݕ7* ; VE2 tnRh?gʗ'N+א|Bggl{/[ AiVȗz'3 .`꺷1R&AhΔeҕ 8 >9Aʥ{#bJloo?p8}hHQi5H^ue~ 7\H謓@ HńJ8}/~fU2ՉZ@"e  (*A*g%l^@2y(|mWF|m]]~eW:"bYs¥=ip,s ʤ'b*2&<l[8=1A677n@^w>0 i9AAߥTo/*z-766>OB!&E\Ȳ,& CQ1lmA2!3 " $_Z{[~9qQ1 |Qz*^$P!-D ETiҢTd>M(  COı 7) nt㺮&>K]3۱E5>?Uk]RSB2URX`<YZF=Y$HAaSK)}ʽ߶mq`x7IȮFՀ|~u*FS>_ YY٢Btil82JH#9\-BDQ𖵁OE/-  Svr)PO}~ogn2/ /~R L*z*#H-gy268 CTÏNdhNgֱ|?"F ϯ|xWysGJN`\:#M8` ̇ՌAD |kwNriBH?5.(r}>a A L>N0dmy^O|޹x;>._ͮEQeYxilG@W<Aʼn\| R^zUwՁ|~u>WQfLH>nYVW*j+=('Obmet< Èa||8%_緅:}J}L[?獥'So=DM;\4sM}+Ӎd>AQ'[fQvdzr2(: G+r}~Eq aͲ,nR%8amqN>Nzu1\󼤤3F, iT e~1x| "r!EɀWf$Q{?#q/#Ri0ޥ$bRL޶g czM%*az7*!AD͔/cG#Ĵ0ϝ? ),0 Y> TLӤ\|M5|ճ֙eZ;( -x zR. "$$kIQAZUq"IBY,zfL=5VQ *E20_HARϿytqs2Cdc8! |R&Of0՗qp=mYu\X5~|Saxhil$7|NflĂJ4(J1˲* o")ڍ%\{8>^zU۹A>-ûA`8K%'+亷vCpd>A-|܍os~kߟ" |OfMo:+Z?qYw82.C|>ONfG/LȧRn,ZR. ƸP=Xem}$ ܨ#>k%@.|>H+ ҩIlcHzHcj,hs'=gJ ?k>lG3^G̏7X(緃}q^>#OT4%cۣTL J.D2 h|> jyv&gܣ?޵^Gֻ]M-ȍu !$$ zbMKVKX'*jmFfLDaA,rad~Ϝl7333s2+3%c&j! H|~$fSޘ7 }Tx!q*#Ԑt y7[@k:Yb*Wt#NZ+FiAӠwf$Ɯvī_d-]! =ޅ.dRr˾6,Ӆ$꣨fB+%NII3Աnwm OEXnR>_x%r9Ůhdᾔ]N2q>Nk Bx>?ɦŐtr{+tl4624U+r؄GH24fM;qbxŸ`>9gQ豄j5ӅTIB=D0))iZ,;8`v)]RV>ᣧ6L\n|x_񈇹\.zN~5|/I2Fq(Aw&pa.X= xJll@ WPC4̮e盵!kZen(Z@Iw> USȞSrsٱr"mORcf(V #DӲz/1_Zh[ #|wL[ګOaVJs|v3 _ |〽bK Km#p4U*wӫ>_lقh>qhbh0< ;Rr}q(ٳupyN]Aa1,`ޟ3bX٘3;'G)OۗJlz|з_лU7nUi3|~0L|~l \U|{|撒&|]b]:+6Ͽ0|>/?W?[lGPhP==\~:|jdύ7M M _hG+B,^K]]cuu5 |A/po 6 k 3+x$OF؎8q;cf(LmX"mOϏM\Uq %0̬^8|-/;;;beLI|(WH|>?I wED5Ci](nGKϏ3`_8GAGq|@d|>9E.og e>A+Sjܾr&WWhV^vO(VoU^HӅa_bZ0PpFƧ|~[OX"^J)|q>###Xv"âVO),J&tQKLJ%(,~D3 BtuloX$7DgYTc_ TDNxi|~0l}>gfV+Jc C.|p]z*MZD9E^}v\r*RHo @ܬֈt|>j4 xufgY ^篟|%,|~uVҬ`|>p>!3ҙg!*UJɱ}Z- >S85ĉ$4ݵ_`C7kxE5*cHDdZfS_|ݍl|EQ؋fffj~I#YU!C#Vs>?S4\kgY|go??/d.xÉn0oqgx|Z';Rxr5P"̀O x!!|+5YigpnGCj 54bvlQAgriDu㒭6DycX{ .(݇}1v-?og2v~s[. J뽀nāw?i<~n|^KB⻆TttZ:{d~bCA\ f87ކg޽^/~~D/7eMp!둋qj0A&{(p)Vu!{ |~Pg81hooG@Ҁw?~q_Ǜ.ѯ'$fXf%wthqhFF1-Vva-0N-3mR^fffJDm@ HwIP(?Ov#tXѱ%tI1;ԟv+\Яzs/ ]|[eJB|2J].WjL36.-aw}L(XoJJ}~(M>|.接^9xUa}t̙g閆+Ip8ىeP677 Bl|9.AՃ[?'Z'i3|>`;]>?"ƨ)ocI*nf -boRdU~4 |?mBh 'ft},rT|Ci:BִH ׃g[v555L7D?C(JFFȫ`~7GPjV%!c[>?G}>u ݎyG!/h`qkX4O}iQHT 30 W+zsuuuXV@ 0Ji(,X@z1ӚVZMMM~!aw#OY)gO%Bj}> -1րχt>ߡ^)=H$L[O῞l< ,}V( w$!ۍps/[)緛DH>?.F;ԟ6 ;M$B vmXAQ``p\Q(+1# KdUٳ 3}~Lzs].u| . i;ԟN&Xm_/vJLYm;sT8߂ Y;ȇJ߯>vH\.&dDχ |Ciϯiᾥ'LNH @_1`d)c.?w=sڊy%H }>P? G(x<'@P}\4t\W;$Z g[ٯt0:B8Hd>o nw;l8ARGH~?|Il;|C>(m5˙%Zٯ϶bvg] Kno~ŮMz+>Q57BpSza>^B ,>8Ck}@ #/04D]o p_C^3I?H 5m=LQ-FEAġT#n# |>ZSoJ. HxFFF_$wc-`)(U>7aRÊI@ =G"ȋ} `g4B9E"g.vc_LAzU>k?Cn(7צfҘl\h?sP(Da]fn [v܊n.K9WpO=ȋԟr]#eM|| G H#$M$rds;}0(ٳ8.W;u(2qCGǬ5,33 @ 9Wȅmv&Zyk&h.q1PA No6|atl_۝[^ [['ۅg-c˕mjj}>dۣRF&BW=ؿG d]9s|[.8󉸥>.ģ +X?:LjV>?nQ7N2ɺvF,iј),)јI(nōAYLw],Gc(kcqM,2cS5~B#5q +{ha!հlќε3VHAfg!<]KJ7{c,nQ,MMROVY=?k՟bL4e&9M2ۭfX*җQD4\do:dIƼd$I>G4mpXL1?b^\37s(&X̊XPdQ\+]g0y1Tf :]Bi&iM淫&fp#>np_2J3 oX~,JIK9"\j zVStH2Ip(Ŗnf\E*afS7 H_% PjZL2Co%ۜB?$|}5[M{s̿+vZsv.~kgA·{9Vw[}h{Ez[<6>7|Gx#l8O{'kt3=tz͑ӫ;3ʪV=ꕳ+_=?Ks+:w-?j O,/S~ӟ/յK.]rvuKjB7/>>y WC^?Q|Em[ r/5F1[_F+Xa5Q/j"b_i5r>,kXWFhA_݂Z5X}5};Fu7~sVoNoߜ 87Q,N. Ɖ/}Ow_RmQo]Eo.fޕ?]y#jw<ʫy9h,y=/BcGˇ_>Ƴ/?j<ٟ4Ow! 5~ǗD%]KY𒮇t=˷K.=K[,oWe;e-k-k|pم C>\?x|ph}rKkJ׾Pu;_GCO <7P?}+X/(|eWvsɫ;3՝^ݙ[kA.knymͯYƞ7Li3XPb果DˬF3̼hbbf_cI厥;Un_oo_vcū}RP5 m>mmEnnQ{t㽢yo;Nl{b]'6t/%6&Q8MShmJV7W3[elߦ(9(ٌ.5Qb)ObYZ",i?#hMJy'%5/]yO&y鐐ΏYj6;v`ve Jٿk%ejf_i8g]q+]qZࣞSz>yA>Vxl{GWۿ[UUonu'VĚ??u5^WxɵOFWYU]Ԁo3X4:ԟ?<#[WSٽTRݱlvn{n]BfGv1y^l˄̖f6vkvk6/fbC`d_^!Q45m=ɼzzN`g^_+}2CKPK4LXIC9כv{}>\C1Ρ"H2qP2V>s]3!oq)4q}>?ےL`ݮsjfh%>_3Vy*$YL('wS5hoS^oqx"ybχχχχχχχχχQK}~M[Dєw$;;;Gs N?X/J PH0ZQ6; M &!%q-mq)y]42+ yC3VR-aq(Rϸ*((9>T`$w$w#24VEy ?7`z(ruIǻnI#飙?Is`Y;t7; Mʹ1hMY^eS"|[6?^ť|쪡 7&1l*RϷHkɝbwy>T>?4D `g >|>|>|>|>|>|>|>|>|>|>`dC_kc }?/N}>q|D\Š {}~DgϧuT!n[#]Wf322,j ԇwm7<(>D NX"f3mx!,%"5{^ !R;ɧnIJq>1c~0vļY"|s|6ii&C$RZo41z^2hXvȋKt3O6wIDK!`ҰZR|K/33$xW1a=byi5<-4l&|}񩕠r5>oʈ~F}v~g8_<).<6qUvOHkB&f^3"~loF LVE$%NS}"Z`r/V?N7\0%0;r:$} WP;^ /6|8K>LXASox66PmJO!_J@.~٢.`W~E+ˆp;X)y"GkQGJ$ Ҝ/iJ[+S8 LQ |O<*5m=d@8T22ZnjA J:(5KT>_s* | 3Ev)=z}[$SnZ+^m($#io NcvZcװ֗]G7)=:@|jLZ5/V|Skw܄jGF ו}Nܕ2O0lkp6-<.'Dm||||||||||1f^VDOI3aEq#N_/v>VV\}EcDל;=.2ſM XZzEQd~X]甴,m_7:yin\e: q/y| yYAFjχ&Xۈ_E|y;gd}sy}eq |T|>k8p|kFnχχχχχχχχχx!zA\.]E~r|mT4vү˲]~8L#B Q^,:|nVC_?~RnJ|Rr=H:|Ԫ1k6+A7|[x]mqG#:W}&Ki>4 4hEC hb0i >>>>:붙qJ!.$zs {Ek|[1pC &J/ C^k`Ϗ}H|EQߊt~:fPK&|\6(Efv}HML\{<Coh5%;ܱ'Ekq)]1XC62U3 `;wVI7ʞ'.ҸO~xJd6:71yĖχχχχχχχχχlIcz0{@r|XWj%҆ds ---_]}K^ϗ ׉W }~DX,ZJzGcPvv=1.!>%U> [_L_y..øج}BBy OujRI|\ }|]x^r/_=WsCۯe|q&;nQf||||||||||($R/q 5$Er$ +66+sUUU}JA1M~enj|}žsS٥P݆otl07-4OGt#}>eQj4i|>oϲ'7A?UB/`~\ʋ֭||VAtl fFlӻ6F"TRx,\2<=o Kt.Maج;pG |ޏX"@pC)'>_#5za{Ni oxP(&YYf6ljjnyU֗_`DV |:.4"ޅ ˼`EYMa.h󲝴h!|>|>|>|>|>|>|>|>|>|>`4`)qGrSnN6w1϶_7]kZ}M|J̺ 7:#BMV_2\z9vl᧢.oVbyPKΨ&[m A3WJn m?w4χχχχχχχχχχχVq#-%|~*_etLy}G9_s>?G^z^U)(-iͩZ5}} gf#),xEa0[ ➯Og,%zF'j,V+1C6-沫%wMOJ;8044v -~y}U3e:so*3hΤmsΞ=;kwVw֬♳gݞ3_[vܺ#g;)f횱x׌%잾teb4ϞXgۦmۦl}ۧl={[zG6;wfߙά{vڵ7kެ=wMɷ5&ywOIwGc"EQc_{K&W2 ?cqτ{n_hŽX//ō{cO{nFwRܥ^5jܮmj7EcM>j+ĶXU-8yBlRcb^uj~5jUBTj,WPeXP$j,X,ouޤ_Med_M?Ϻ7?kg,ɺ[^;7.Fa+nŴ+Y1}'Xg+g,_9Pef,폜U9KVYFުEr9 (ܺ&ǽ&53oX;sԘv֜uf5Kf嬧M1cfO0{S60;k7Ιčs&l{ƹ7n{æ>4oy6Ƶ[]%X5mb&Eso, pEd5D۟y1; rg,rќh:fyDqhjrDzӂ9jXD?SΒWjʚN)by⚶rԕBZ`IhS#[HeEwd {g90MLn,Ĵ&4x*K\Le,'<iEt(y#Ԥt`IJ&%!/&楕&5jSSx@v$_>Gݟߟ)1SQܥނ;ո vٷ-jZsKlWxo,QT8bk-jl.bTsr `!$l@>-Zmx6eLxNi }S][<@8($>_/}[$;wn 2G|T>|bFF3@r_dh5+VD{ 弙 7<*~oy<I;_.Z*c|[CyoϠD|W C8^ !b{K[oIr;͡j[NdwC/Cn~h@1" 7v]3ڷ#|:з1b_ OK1 Lhx  7AǷ_1ph#&>_\bQR$s>?"d`Clx6c7,ؖGq ( b@">ȫ?7ks_q_{(u%k6Ȭco?,Û7Ft#5\~CUdwQ.}Lп^ G}>ނ|6 EJ0[83f q $t̏ 3|>v%$ϧ)MgaC[5lyBԍb'7VtŮO=~^PrP>tsdAfe ob°h'>U|@Tn@IսX,jH2}>>ū+}sl+S~=| ?P$KZ 1ض@|2&O瑱jxwDYi{rP%19W|aID'a-$ { E /8~ѷ1b [gP)hԗ_*DH}^~oz:>f,I6.()i[Lg(rxB%79+{s糇;4~H|~|sfF!)Ǜ.I4殈Zfwv|)U@`L<18SR7;ߕ=Ure̘ɹ| ij|>'0Tf^0dgR/vJLm# q\))i& PH|Yz;.p_՜kIOmYH" |>vzjDy|4rONHdNwϡmmm Ø iJGX> eUUVxq~E*.M`0xN?c[hο]a2Ԩ~E}ɖ D|ЗtJXCzCd8̩iqsT:;;1ҚQ;gcUZe>k76{O 3hfT-5}5KHn)R!oJH"@J LE0o //oLCˊ':9J+k]I|cɫ՞9p8웍g}-L {Ge6\:iIOOϏԲ RSt]$>saXohڽ |@ >HZ"sZv9d29PmSNj 鬁jǞW ;3D]ŋglYw21}݁#W#Yq)^ `JO{n8 t'|~:+lNߦSamDSu]UH$"Y˝s0esSMb t}-S6?Eͽ̷v1ebt0{/x<Λ2p{O?/GQ4c|> O4321 bJN%9wkay,^QI)gFb.|>+\=;¢WUMӜ:imqX̷5KJx:lӜO)]_Oӏ=O(S33ҟ'F>M:6Fhc Rȡ/|>7<1?pj&_>Ww4{r7qW?ڛ<36DDOjhGG鏎&_MvOgsJ|um5v$=LHstg͸4_Ζ1IJϖ4GldT(bpҌH?2,eF͖ǂ{%ܠ{FGqo>*Q:O `*W R>pH+5<.oڮOW컻b7^G"c:ݩCMfD߮G e|ATt<1*;4N~?"Ȭ2$ ECNL_2ҟlJʻo~g?921U#/HMKC!gssJ~9g|GAU7uw}^\MqKfp<Ms&)؛AMӸ?aIk>gPgV21AI~M}>kj퉩xSCv0dS**eq+ރ@δ39FCjЉ4se?4!йWQ۸|U4iEMW-4ExJL.yxGV^ʥF.ۼ+囯\ʕ[jУ˯~t5.c~ز뚗]߼W|ōW*ry8r-_nۺۖ۶mKXrK~?<-y_nWخZ\>fBv{"t>;~[{n,}>;VoU BFW8%vA,2OOX&K:/6k/×n#!?'/ϹFr+s+]Z(mk]4[)Ӷ[#YK,yuk.ǚ+bknY-+b7|k7z_[};o׿կ޸i7Mk޼7o׽o߹z熻y{׽}^{]?x쩎|g}WQ襞U{|{+>O|`فeZ2_ / _RS1_5v$2rh&,-ɠB|O\:>#ғG~EeHč|0,F]!sL9eM!oi7}Kg#i򊑗4c/]6y BO,Ccd򬑝3<#i#;.v!Oi1'l3Ui>cF5Ef#yel2ҴlQCFjA!߈:<g/\Z"|S-!B엏>dbJդwZC?<~ovP>4hQ s*i=>HC loVw{?nk2骆mA|άzΰ \8XJɁχχχG ~ c亾`} dz6MR8 4e-5Çє$POe1E̫2,Ԕc޻HSϼ5}p }>kzǤ|J)g:p89G4 e7qgƦ;zž5-tvZ4Ben}`` N@5DSyV ")>1_Hzk}Lž>( 􋃕W tq|>|>|>|>%8ľ}zDͽSE}}}L7AS ===hJ`2̷6cn~/n׋~/ jh%7}# lF3֫w}>9b`bI9 4vД5C"ơp2Ӕj(_^$:ܶ$G"RCqχχχG r_VuhW}bߚփˑ)8e|b}ם;$|ϵQn%u|WχχχG&ž#fՋ>Hd&NKT7p ]m`0hs|^UхkRN~vK*=i7f-K||||>>>>>FM>]/ cNKRb{F&xI?PN{8]׃MO \^rxQIubփ>b|j2oӒ{f#0+LMwdXi U, "43lse> C;hRҽ?`pu.ϹmN#E@8u~cL#SҟeԎGq#Pgnw Gr_y kN[^r縤{_䐻GEO '^ХKJBh?<|>|>|>|>|>0o;!q>@P>8~ H*K-8bڔ4Rt`}! oILK:vʩ *]:>όP+{O*0/>>>>χχχχ4o=(Kfhc#ShA@Cݸ;6Kiܴr릏*DFb_[}O_4)oڮ|]&]<.|Q:ޮ}OgKn>>>>>_G6B@ Z4VnE4^>7O/_@Qاn_>z-II~ v+›f3Y)&EUUv}He8#nBᰵ($iiN u O >/ik_ 匐ϷG%:Sz'=r0+i`%zgn>>>>χχχχA;7}>ܫKO2-Dg`` z4e=7eWW$TUnہF̧wā5Ϸ[Z>!WnrBGEOXt++vX4'fP!)+ރ@δ39FCjЉ4se?4!йWQ۸|U4iEMW-4ExJL.yxGV^ʥF.ۼ+囯\ʕ[jУ˯~t5.c~ز뚗]߼W|ōW*ry8r-_nۺۖ۶mKXrK~?<-y_nWخɧ|s3-l|6IH򐑿yF6s%DW=F|<<B`=Y2r;X.|ߑ5>,+<.===]T gBUDSbċ +ےeXSO>͵ I~Iɺxns 8X/gK$}> Sc R{`n|(HY Ocd0?AS Ԏ===hX,f՘@k þR G֔ AT2M7|>v?'G6J k4emf)u]F>~:K-˧M[]+v/ !ko|[$>ɝ>_.Zɍ=\)W8d?T0QvM y:I1;jMTUeώPrfD xE";2_$t ݒ iZnw|qփ|>oPs(U>AA|@A7 RZ ^G6¿zu=Km sgQEQu5+:ي竹^q竪*;R*V@gd1Сp>^󓙬7h>@R 25 5Jٽ]z %= c'2{Qw[EϊɢѨNbgfud&ԝP;FzܫKDP:;T)&߮(:9D$5Šq{ׁ|nk|-g۵˜-27%|>k>}X~6#id&KAH$Bo(r${n&=Jp\ժޛ'o-oȬ+}T0%A[nKfIfKkZ>Ū~uz|?vdbʎ߲7|'=.|]$YlR|>p(9tvT~/qAj(TK@@B(9ACe'?{<]H;e~>PPH< `4{+ճB;ks9O?%:hdb*uwdqjh4(oX,nz\FdbJ4ؤb1kgmlI:\9=lxb =7rŸwڃ>hN06_L:;TۡCW;hʚnkU֌x0wPo;Ғ4lxq~ݮ^BϺG.G YL)tEQ|8RMzpI`Z&ɚޙ!fH$Ԕ8/ҧ;p8\~5> *6:G]lGWowN[w߫^῏LLy\H;6}>?20O|M%R@@h4`7}~_2=O2_Z ;MUPa8KG"P($~Lh(uٺiY4ДZ||)>UspW:+𫎆 y=h,^85#O1Ò>"x秳\tI/.m(5;"x'Nꬳ]UUV# B&?g<#w~RZ &mgZG&ԎQ{_$pdb`jyP#.g5]Ƙ-IJf9KU?N3@ňD"{Yٝ[ԭp8,~< Ҿy[XOw[{mkBiN|~.?$E4#ah &kSlV;Ǹo>*QCO;Yf`ΙnYW ~? [1:C\O]}.yqht$y=i72&9G<|> SLRIy|h4Zx狌LLI|~scڟ^8TN]t%u]BΚ`0H[,jfz moO}F\c0VϧvSiw6Hy@%*BӴ2ߛaoMS{v@ >^R)f1jǨDJ~ USκwG9s/nξR~%&xK;,:vΐP(FD9>q|"L|bn J ;`Y^E>D 0ZRN ˄ t]WF2 ~a>曶 fksE|B;&:8PEԆϧoA &bZxE}~_2#DM w*L4 ևаO]*oڮ aqG"]EQMs=uawk 9_h^so/U'ԎQ&?#r`0zz8gן=IGpMQW\>xC+oBgGji #yltO?tgi+NH>+ouxw>ۆRMD-imDg`` ߧAS)F<sj<% I:oq̳E.f:] K#9ޘ\=:x &D^@J]GFu)&jN.T*Дhj!ڴZ&`uUb56XpHER/| lvpWDv@}>zcV4]A<5Ju|;!Q#SW588a&?QU{zz)yj;HiSU:JZ_e7󘤋^a}>{lӁ@L;ЉfO8%|@b[qvj6w$ݖB!*mC),T*\)kl6[MIJbaGi4*vg矵o.Ew[E;bow n !{tKJ>'=dsu^{+9|nc]seB\Z #;Pu>dbJ"wT]יh uq'v4-!^on*Χ𾤋>cmD}>~rz r3 UP<]Ľ<>P{P/>gP#Z_vN2`0(esO4u'$/"m@E?[-RpԙW8S8}Q"?6u'ԎQۆRSzp^i1kA]1`hA7ng<KKқ- 1+TU5$s|>`$`K|@ C-wPGm{0e.%ݡ˿>}(@ͽ:"c0R(S/]ldJ8Rjm?D@'_۪ԥ{֖-H;DXiN: H%8ut+  m,KCy$EͽD2~)ja"ڗjX]V`vQDr-:;nm2|Ĕ7uW,7=qISt}]-~HM .nK_\(Wt.ٽ:4AGYzphO2Lf"kU?H0CۗԵyjij#d 22co7Wnx߾g[w|~% :L2@uEQ_0ɍi}>yA>l%ʨ/V@1C.yXȚMh[?RQZ|?.P.M'/2b_{7vR|:j 夛bB.@9:dLL>'~I (wD6MRvДh2F]>EJcZ|oN[Ǽ듁2 OiϞ~.|>&kx`RR$AZ."H8nhll"M>7v>k%]ȱ%˹DSw頶חd%HMgEo]@A:M,Q4v#wV13xGQqutݱSq5^˧>.=v.'@ ]pZKmC)f^~_0GҢgU"}}}AS ===4% 8S_J$)!Yo2g'Hs;_3ohء d>emfϧ!mNEw 4xG8.8G#pw Y$Jn}~_2åP2w{kZ焪~Rdlll```hh'd5p7Mid-'LCs_ 'Ť% #&Me>edb˧>3tV; 6gtrF)ojvI74pBBwwPqP`9(4dP>Rh z)[v愪~J¾̧h葯\/ FUs->}7>$8ge}nk[ Mv+,F9@\ Cjq pdPH(M>׍ { dI o>T'ǏqO?P>PFuQwp|M~Fu ~c$t|LV⑚{uZH~oNO u*uȴ||wU}`pOF6Kd>i~&廻xcsG#T/N6lҊ}mU{gwhG5b=7UMn@j9`%ih)%dPӒZߠ令>[Nx廻݂]Dze|i֧f6? r/ޖs_Зeeaw+VXhd%lvtt4JɷE bp%W'y$^aUXVUEтunWE#8KN(R\F_>ߋxmk4zX՛/e tZm02};wҧayw橇EE22Z9Cڰ?)RZfC.t:]lT*Ŗ^Db+{UaUXV$O\dZs;ֽ0_|@/s\ ҟ쬊jU[n7ϮA/> S]y@z[AK\$*C=Ԛvuu HVtqtt-SU/$Bɭª*U[gyIV:v'v% ;{7Ԫb/u*C/ƴguU/>ӄa=jʑ)+;||GA/A7/>exC:k֬$H IfDӪv}]+ ªZ՞={N9f~CuہVF-.X@RNˋd`r nUk$%u_ܫ9|j޽|:QEaCW5s@4BU#@}JƪS:NYǜh+_`xƋ ؈b≎cr_0ۮqM@<\B$\,Ű!:=v07>KOtSv~yy~͎lmV |>@ ~q|>?J6 ۚuφ\Rၼߊb.\J\J':g(zu w^E1hM\:hqK]{>ʊϰ%QRWO賟qvvvx '|_ F|yL_>M2aҔ ˦OlˮtI^7믾t W7^%t(]%~*.Kn9vKnUo 7Ɏ탟}uDtϋnK4ĴetS™FlS&P\9eS3MD:m"Oh\9Doq[|[7ZҚTV\^W/1-Rz\9ɢ}qh DgL;_1u8W+1"1itLDxtL92x8bnpk1::v61{8D6ѰH&idF!7LILOjtq=+Pgzcu-i GG{ϙ%3}fI]|d#u{63KUBevoH>כL2:6VGGG{*p)q)9|:.ź;=;6% #]R:fEe~Lvɽs|AFe }>u-]>?w֩_z F7Ȃ_^Z+|ں╟,ZT[__{>|uW?x7^Z ȋ0::WWu^>{~ssgc w&8YҾht~at_@GHIGo^ׯ,hk_?^n󒷛ݬ-}eo,^ oݷ5ݼjk5co|t\6wW-W-yE=ydacs;s;nFǭ#1[rf;ILHbM#q#57쑸>^7:[Jmxu)_(ib{Č1st\S(?::|q}&E?6v47 o?2;;6;_d8Z FsﭾێEdcHܫXZ64>;VV=mʑxpBAH<:>l#no,Of"dmy#)Ўg6x+~\[WvкŷV}}/u;6u?Y7ozs-˚OٯŎ7[eoA)qit[?km;6߻-x{?$_};^_}=;6Cifش?_[LY^[L e]v6+>c=P޻L ӼoI韼3g"?}4+/d|&(b 쉩QIppd&ʿ9ߞL)ȜgPym`}lC&8DEMa$57,Fc_>KҵHה.;n̒K2H/S$CԁWSAƗ]*iZ{C+##TdDs墕N JOO3dRn],-'B>@gUHZtkS&kpxK:O-^.e+Rք|>SwɅCRKw;?io>>>>>>>>>>wxak_$]6r>~E+D" p)A**R:<zKA2Ti"@qLSlsW1^AX.i:YD7$^Oa*mf/P ЯI6.o 57Y5N^o&;h[0|3.Z p.Ei Eѻ/,_Ԕ֘C7֦o4GcY~hq~4zmKcϯ#!UUiShЏ " ||||||||||@0p}I{d^u40%H|z0>n<sJC#\䤮e./X29#s-ȴ|$Ѩu(aW<]}QOSCϺeI5%܉p~]f,χχχχχχχχχ˲t{e:hG_|Ivx)l}k(ugt<q(JgdY.$ OZ"ʬ8WŖȃT|_/{˃7侓9W A7aӑ7]= \N>j4ou}}A4+wU)\/yŌV|kyYW|֢2_F/Lӻy,~RCvM^ =EutӚ%pUQ1ihB%;%2R{I;y> h&4>R3JKb2x|iuSG+Ms<j Ho  KĆ?EQXG (I19+H>,kPY8M4wZYiMkI6iK' Z^[X=/2S,5qeE Ô(E])YRٿ:T>>>>>>>>>>|݉;o%Ibeo*֤媪:ՐN1;bvK:̪˲JzJ@||©%IS?pB!;W(mz͝k|s,cFF4_5Wj"p $Iޫ,VR>籄SǓrY*{(n))R X#(ʇN-|_Mk"%I_'K*gP}D}>#aÄ~DA_{#A~o8ɱEQ,㻼"O"r+%͝_'SiG%>3N T*ׇKK^VT4 PEQK œ OG7Q{QO!-0z鏶%ߙ'5*1zX' !||||||||||( t;v]8zE}>@iG~;Hp+K{{n u)z} |>|>|>|>|>|>|>|>|>|>ᯊ38V+x\cii:?:gwtuu1 Kw:::r/3cV˲$I.i⠯4Z {)~̖\YI2=-רl@_ HI>ύAy, >ɉ3 %Edj(u`@cǎ)N,Kyq4MQ$|/RbZA\ )+R>a()ttu^AXϖ\9^4ںY_R>& yIi|Ol;k.io z0>w1C( E—$iM0 픿>>>>>>>>>ЀНo79t$I.ՆZ}a\pUv>>?xE71w}l=ǀPţ/IS+9C(iwIHE׵S*ey_̺1ڔ>UMqc_j7'NwNM^- u/Kb˯/nY/%3sw.wY8m]V,rTe<2/NswN~kO>{OxI߿-?m3q3,nGxAۤ}~}A>6w )Љ?ΈSk-]8m{E3-Aq.δ̤&q57,%\{k5s3^;uٵSfuŤ:'_W8뿑/!Yfoek{rcטPJL,%./_+uo_}#t|}xT'nb#9b#f,=3kf8Ŭ׍%O;=7č#q=,2Ʉ7G2͋g2sFe[e6;n;s$1/3xP6veȤ;Y~؎{2A&dbڽvP\zWfvX~.2SWA;V uyP~h^&VYz=m{y߱ 6 ج];f PS5'A3#G>6<粳#Y#ۄOdŨcё)a(Yr=K>x,I}Ol~M]̴8MT ~^8CwNiZQ9opTQI }>s1Ndj(u`|XW<@)ʐK4M3Kמ.f;Ş"0zee~$_(~iVKjTjY7Rf~ N^#4Ѷo}@;%*@ Ñj5+Ro;<"bPļtNIqy.d>*K_(x̎fJ/ 4g퀇>mfԆ(o |>@ ~n r/\?QF9$R2?\wN͝VN AыW/IlGӴ,_[ydv1,x{葑uװ%3sֺ=*(P`^ӮȅG hL4Mb=BM~X>O0C,Rm\w);W뼸!oO)Iҽ| _:1B쩁 ״\>܂뺠> 򃄽Tu.?r}Sk_ڻCT$JFWqN|w4c}uEקu_A"0['(_=#+ݬU|AsgTd4>[A e\YQ>|2"(kEӌbKy2?;=yh̳/42KRr`?)J }>]ta;xB 竈6 |>Pw7R p5]PCO>œ)|DepPUIId>=| QlSyvK6*_LS͖KFg!RCOЯk;2?[Ͻ(Y#j ||~#)MeQeU&>}?vtttwwR&RR:_ 'Eeee>\pw{agT3l%i Gem}gL(9:u~n\5 3FS*筮(tKȗA i n]񤋀`&H`\:G4e{KI(_HZ7͸(xYUyv?g.tIO{{~8Q ||~#) 0 \Yuf7&Aݟ1Nd=0sKYoII ݔvwwup^J2?[iZ{!!oޣɧ5iuɬ{)뜚|v K.P( |PCp:o0@R:뎏/,˹0VrtAdg4A>DFy={IRKI[[HG~-Σ:}C?)(=B/>4;W ||>!Pw7R Hni-pmf#7]|~ivKǮxA5wZ>TMJxZXr|U߀2?G}w,On;bK"z0߰ ||~#) 9=k97ϭ7M?҂R%et(ug4P6ykiaCUw4MhA(u#(ޢ:ڧS9YP>(k0 ZΩ/g@>(\;L` |˲rCV( .|giiZn:#Y (ʆgcf)ÔTއd~kY J$_] h9G".ِ3?Au|P~΀ L`AG.S"x>?=")vKǮxE@5wZH+c@P6 bV=G,ˢQBe*E4GZWw`DJ)~ A.::/o0@R: 15$B [^\O;-7} F/ v˲r7(dYRe,Ȫ7^AX\^(%:`r)٨{ |/WP > 0B嚒$uݹܲ,ܶդd*0ycoc  'TUU-#Eiw&rꎄ>-,(U㜖U/Y O)?Iwu|'ݳB>(\;LlB \=qC+8Ŏ,eHO8]}zύփgGZ 48KuWsK3ʋWm}HK6en]kZwI[KP,m6ESP{@t`J` wi˲?ED"(5ZtZo;]=[^b::@MŀJJ!).*S G CMb°Y0 --(8Wxs gx.uʟR@>(\;Lx4MiUU-DQlF w*YZvxEF T/H)ʆRof뺳(2}%廬 )i.'/#/Fo(ڹ_ߘ͝KOg=S @t`JF,UU%IRŹ>ڡp8L7xǓ)%]񤋌v?+/tww •InY RD4h.ozjFޭwo+-TZea\-|K/6ߘQxp) @}JR8å_xw ?JlɽƍO*1|4/ |>42i*ePgTvT>~> M}}ίl;ԏ#9D˲ѨǧCKcprNEf׍/_P赬" I Dtް̧&ݺK+7[MM%.ie 2ߍJ|P)>ǀ2jpD1QG 9>%>+Tm<'SlɶC.n;?oرchh4,ˢ(z80˲'UQE7N (M_ZOgğVu&b.Ϟӟ/\wInZVλX,F>"C_@r\68kO Ai4?%UNG(B&? E^O!MW*cGΙ| MK75;0<_nUAχ2>> 3 ɔx '7@cfQ+.IRlO(bG9'oͼ(U*#] 飺c٠)r0'pv=ZJ={^T5vʖ`;[72=l'_yS㤎=Z:HH@h(O4w*ԏAh8+zi,g[f]_eS+&󉘑.)g ##p]RedWI>2ȪwEP8_@xQU  @s8*=tT;%0@m1 CUUMJ >xSœ)2Pm,ˢGNJu_iHQ軾{x1-\+ʇEId>Aס) ݞ{Fv: #PG n1M3ZYHD4;_ieYܞ wKeH䮸iuu~ |JAd>'-!5W ҳvKPG wh{=/a n {Nփq&Pmr58%%EQ(Ru]3h#Y$/-MM?-w _U?ris{uqS9PG _XE\>|>sUCS;hTDa$ɲiVJltZS/2Xɩpx H X0ZvIkbMMiQ XTA PU݀k+iD"Fnm\φ0E,NRU eY!+iIΗ$IMR w0oKe|VB;Ιhto'^nmTV|T J*gebP ј>+tVcLvD"Q8] ~oLʢmKM>mmm%.3cVO_Trwb O\r#/:O$d̻Y[ m% Qi1KJ;9$I^ڧ hRoɒ%YֹiVUFS^)@6e|u_Kb|M9* zqQ{iys1>u%A:MՄO# {N̐'^18\]}>w?1@@; 2:r^eY/bً,즼\_Z'n/\.J6z0Ζ]ʋ {Te/㦼eS Ee>ituA]v4t>hP.HZbƷS^̷Ñ|bmX֒ݖNO7^(#||~PAB;*jYu.q7UvI>?t -yW:1x& ~u}n>t ʨGjc|sp>_?ݤSd~pɇo:|Y|>e}*Ha%գB>ߣ5]_1OFdY.d5~HZ°u?#ԩeYQ||ROҒYWu>ܤ:(#||~PABS,ˊD"E+TC`P>P97(ӴNs>n;ԏjE >AIU$MӠK:';U:e];̗$f+'ӎ|ʇ>~%iuV#dB|T˲4M_pXQ|'8wKX,ZiTPV@)0 \Oω]eQ hq(|HKzlib-rs-0.5.2/src/deflate/trees_tbl.rs000064400000000000000000000177041046102023000157470ustar 00000000000000#![forbid(unsafe_code)] use crate::deflate::{ Value, DIST_CODE_LEN, D_CODES, LENGTH_CODES, L_CODES, STD_MAX_MATCH, STD_MIN_MATCH, }; const fn h(freq: u16, code: u16) -> Value { Value::new(freq, code) } #[rustfmt::skip] pub const STATIC_LTREE: [Value; L_CODES + 2] = [ h( 12,8), h(140,8), h( 76,8), h(204,8), h( 44,8), h(172,8), h(108,8), h(236,8), h( 28,8), h(156,8), h( 92,8), h(220,8), h( 60,8), h(188,8), h(124,8), h(252,8), h( 2,8), h(130,8), h( 66,8), h(194,8), h( 34,8), h(162,8), h( 98,8), h(226,8), h( 18,8), h(146,8), h( 82,8), h(210,8), h( 50,8), h(178,8), h(114,8), h(242,8), h( 10,8), h(138,8), h( 74,8), h(202,8), h( 42,8), h(170,8), h(106,8), h(234,8), h( 26,8), h(154,8), h( 90,8), h(218,8), h( 58,8), h(186,8), h(122,8), h(250,8), h( 6,8), h(134,8), h( 70,8), h(198,8), h( 38,8), h(166,8), h(102,8), h(230,8), h( 22,8), h(150,8), h( 86,8), h(214,8), h( 54,8), h(182,8), h(118,8), h(246,8), h( 14,8), h(142,8), h( 78,8), h(206,8), h( 46,8), h(174,8), h(110,8), h(238,8), h( 30,8), h(158,8), h( 94,8), h(222,8), h( 62,8), h(190,8), h(126,8), h(254,8), h( 1,8), h(129,8), h( 65,8), h(193,8), h( 33,8), h(161,8), h( 97,8), h(225,8), h( 17,8), h(145,8), h( 81,8), h(209,8), h( 49,8), h(177,8), h(113,8), h(241,8), h( 9,8), h(137,8), h( 73,8), h(201,8), h( 41,8), h(169,8), h(105,8), h(233,8), h( 25,8), h(153,8), h( 89,8), h(217,8), h( 57,8), h(185,8), h(121,8), h(249,8), h( 5,8), h(133,8), h( 69,8), h(197,8), h( 37,8), h(165,8), h(101,8), h(229,8), h( 21,8), h(149,8), h( 85,8), h(213,8), h( 53,8), h(181,8), h(117,8), h(245,8), h( 13,8), h(141,8), h( 77,8), h(205,8), h( 45,8), h(173,8), h(109,8), h(237,8), h( 29,8), h(157,8), h( 93,8), h(221,8), h( 61,8), h(189,8), h(125,8), h(253,8), h( 19,9), h(275,9), h(147,9), h(403,9), h( 83,9), h(339,9), h(211,9), h(467,9), h( 51,9), h(307,9), h(179,9), h(435,9), h(115,9), h(371,9), h(243,9), h(499,9), h( 11,9), h(267,9), h(139,9), h(395,9), h( 75,9), h(331,9), h(203,9), h(459,9), h( 43,9), h(299,9), h(171,9), h(427,9), h(107,9), h(363,9), h(235,9), h(491,9), h( 27,9), h(283,9), h(155,9), h(411,9), h( 91,9), h(347,9), h(219,9), h(475,9), h( 59,9), h(315,9), h(187,9), h(443,9), h(123,9), h(379,9), h(251,9), h(507,9), h( 7,9), h(263,9), h(135,9), h(391,9), h( 71,9), h(327,9), h(199,9), h(455,9), h( 39,9), h(295,9), h(167,9), h(423,9), h(103,9), h(359,9), h(231,9), h(487,9), h( 23,9), h(279,9), h(151,9), h(407,9), h( 87,9), h(343,9), h(215,9), h(471,9), h( 55,9), h(311,9), h(183,9), h(439,9), h(119,9), h(375,9), h(247,9), h(503,9), h( 15,9), h(271,9), h(143,9), h(399,9), h( 79,9), h(335,9), h(207,9), h(463,9), h( 47,9), h(303,9), h(175,9), h(431,9), h(111,9), h(367,9), h(239,9), h(495,9), h( 31,9), h(287,9), h(159,9), h(415,9), h( 95,9), h(351,9), h(223,9), h(479,9), h( 63,9), h(319,9), h(191,9), h(447,9), h(127,9), h(383,9), h(255,9), h(511,9), h( 0,7), h( 64,7), h( 32,7), h( 96,7), h( 16,7), h( 80,7), h( 48,7), h(112,7), h( 8,7), h( 72,7), h( 40,7), h(104,7), h( 24,7), h( 88,7), h( 56,7), h(120,7), h( 4,7), h( 68,7), h( 36,7), h(100,7), h( 20,7), h( 84,7), h( 52,7), h(116,7), h( 3,8), h(131,8), h( 67,8), h(195,8), h( 35,8), h(163,8), h( 99,8), h(227,8) ]; /// Precomputes the `Values` generated by the `encode_len` function for all `u8` input values #[rustfmt::skip] pub const STATIC_LTREE_ENCODINGS: [Value; 256] = { let mut table = [Value::new(0, 0); 256]; let mut lc = 0; while lc < table.len() { let (code, len) = super::encode_len(&STATIC_LTREE, lc as u8); // assert that there is no precision loss assert!(code as u16 as u64 == code); assert!(len as u16 as usize == len); table[lc] = Value::new(code as u16, len as u16); lc += 1; } table }; #[rustfmt::skip] pub const STATIC_DTREE: [Value; D_CODES] = [ h( 0,5), h(16,5), h( 8,5), h(24,5), h( 4,5), h(20,5), h(12,5), h(28,5), h( 2,5), h(18,5), h(10,5), h(26,5), h( 6,5), h(22,5), h(14,5), h(30,5), h( 1,5), h(17,5), h( 9,5), h(25,5), h( 5,5), h(21,5), h(13,5), h(29,5), h( 3,5), h(19,5), h(11,5), h(27,5), h( 7,5), h(23,5) ]; #[rustfmt::skip] pub const DIST_CODE: [u8; DIST_CODE_LEN] = [ 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 0, 0, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 ]; #[rustfmt::skip] pub const LENGTH_CODE: [u8; STD_MAX_MATCH-STD_MIN_MATCH+1] = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28 ]; pub const BASE_LENGTH: [u8; LENGTH_CODES] = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 0, ]; #[rustfmt::skip] pub const BASE_DIST: [u16; D_CODES] = [ 0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576 ]; zlib-rs-0.5.2/src/deflate/window.rs000064400000000000000000000052331046102023000152650ustar 00000000000000use crate::{allocate::Allocator, weak_slice::WeakSliceMut}; #[derive(Debug)] pub struct Window<'a> { // the full window allocation. This is longer than w_size so that operations don't need to // perform bounds checks. buf: WeakSliceMut<'a, u8>, window_bits: usize, } impl<'a> Window<'a> { pub fn new_in(alloc: &Allocator<'a>, window_bits: usize) -> Option { let len = 2 * ((1 << window_bits) + Self::padding()); let ptr = alloc.allocate_zeroed_buffer(len)?; // SAFETY: freshly allocated buffer let buf = unsafe { WeakSliceMut::from_raw_parts_mut(ptr.as_ptr(), len) }; Some(Self { buf, window_bits }) } pub fn clone_in(&self, alloc: &Allocator<'a>) -> Option { let mut clone = Self::new_in(alloc, self.window_bits)?; clone .buf .as_mut_slice() .copy_from_slice(self.buf.as_slice()); Some(clone) } pub fn as_ptr(&self) -> *const u8 { self.buf.as_ptr() } /// # Safety /// /// [`Self`] must not be used after calling this function. pub unsafe fn drop_in(&mut self, alloc: &Allocator) { if !self.buf.is_empty() { let mut buf = core::mem::replace(&mut self.buf, WeakSliceMut::empty()); unsafe { alloc.deallocate(buf.as_mut_ptr(), buf.len()) }; } } pub fn capacity(&self) -> usize { 2 * (1 << self.window_bits) } /// Returns a shared reference to the filled portion of the buffer. #[inline] pub fn filled(&self) -> &[u8] { // SAFETY: `self.buf` has been initialized for at least `filled` elements unsafe { core::slice::from_raw_parts(self.buf.as_ptr().cast(), self.buf.len()) } } /// Returns a mutable reference to the filled portion of the buffer. #[inline] pub fn filled_mut(&mut self) -> &mut [u8] { // SAFETY: `self.buf` has been initialized for at least `filled` elements unsafe { core::slice::from_raw_parts_mut(self.buf.as_mut_ptr().cast(), self.buf.len()) } } /// # Safety /// /// `src` must point to `range.end - range.start` valid (initialized!) bytes pub unsafe fn copy_and_initialize(&mut self, range: core::ops::Range, src: *const u8) { let (start, end) = (range.start, range.end); let dst = self.buf.as_mut_slice()[range].as_mut_ptr(); unsafe { core::ptr::copy_nonoverlapping(src, dst, end - start) }; } // padding required so that SIMD operations going out-of-bounds are not a problem pub fn padding() -> usize { if crate::cpu_features::is_enabled_pclmulqdq() { 8 } else { 0 } } } zlib-rs-0.5.2/src/deflate.rs000064400000000000000000004326541046102023000137710ustar 00000000000000use core::{ffi::CStr, marker::PhantomData, mem::MaybeUninit, ops::ControlFlow}; use crate::{ adler32::adler32, allocate::Allocator, c_api::{gz_header, internal_state, z_checksum, z_stream}, crc32::{crc32, Crc32Fold}, trace, weak_slice::{WeakArrayMut, WeakSliceMut}, DeflateFlush, ReturnCode, ADLER32_INITIAL_VALUE, CRC32_INITIAL_VALUE, MAX_WBITS, MIN_WBITS, }; use self::{ algorithm::CONFIGURATION_TABLE, hash_calc::{HashCalcVariant, RollHashCalc, StandardHashCalc}, pending::Pending, sym_buf::SymBuf, trees_tbl::STATIC_LTREE, window::Window, }; mod algorithm; mod compare256; mod hash_calc; mod longest_match; mod pending; mod slide_hash; mod sym_buf; mod trees_tbl; mod window; // Position relative to the current window pub(crate) type Pos = u16; // SAFETY: This struct must have the same layout as [`z_stream`], so that casts and transmutations // between the two can work without UB. #[repr(C)] pub struct DeflateStream<'a> { pub(crate) next_in: *mut crate::c_api::Bytef, pub(crate) avail_in: crate::c_api::uInt, pub(crate) total_in: crate::c_api::z_size, pub(crate) next_out: *mut crate::c_api::Bytef, pub(crate) avail_out: crate::c_api::uInt, pub(crate) total_out: crate::c_api::z_size, pub(crate) msg: *const core::ffi::c_char, pub(crate) state: &'a mut State<'a>, pub(crate) alloc: Allocator<'a>, pub(crate) data_type: core::ffi::c_int, pub(crate) adler: crate::c_api::z_checksum, pub(crate) reserved: crate::c_api::uLong, } impl<'a> DeflateStream<'a> { // z_stream and DeflateStream must have the same layout. Do our best to check if this is true. // (imperfect check, but should catch most mistakes.) const _S: () = assert!(core::mem::size_of::() == core::mem::size_of::()); const _A: () = assert!(core::mem::align_of::() == core::mem::align_of::()); /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// - `strm` satisfies the conditions of [`pointer::as_mut`] /// - if not `NULL`, `strm` as initialized using [`init`] or similar /// /// [`pointer::as_mut`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.as_mut #[inline(always)] pub unsafe fn from_stream_mut(strm: *mut z_stream) -> Option<&'a mut Self> { { // Safety: ptr points to a valid value of type z_stream (if non-null) let stream = unsafe { strm.as_ref() }?; if stream.zalloc.is_none() || stream.zfree.is_none() { return None; } if stream.state.is_null() { return None; } } // SAFETY: DeflateStream has an equivalent layout as z_stream unsafe { strm.cast::().as_mut() } } /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// - `strm` satisfies the conditions of [`pointer::as_ref`] /// - if not `NULL`, `strm` as initialized using [`init`] or similar /// /// [`pointer::as_ref`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.as_ref #[inline(always)] pub unsafe fn from_stream_ref(strm: *const z_stream) -> Option<&'a Self> { { // Safety: ptr points to a valid value of type z_stream (if non-null) let stream = unsafe { strm.as_ref() }?; if stream.zalloc.is_none() || stream.zfree.is_none() { return None; } if stream.state.is_null() { return None; } } // SAFETY: DeflateStream has an equivalent layout as z_stream unsafe { strm.cast::().as_ref() } } fn as_z_stream_mut(&mut self) -> &mut z_stream { // SAFETY: a valid &mut DeflateStream is also a valid &mut z_stream unsafe { &mut *(self as *mut DeflateStream as *mut z_stream) } } pub fn pending(&self) -> (usize, u8) { ( self.state.bit_writer.pending.pending, self.state.bit_writer.bits_used, ) } } /// number of elements in hash table pub(crate) const HASH_SIZE: usize = 65536; /// log2(HASH_SIZE) const HASH_BITS: usize = 16; /// Maximum value for memLevel in deflateInit2 const MAX_MEM_LEVEL: i32 = 9; const DEF_MEM_LEVEL: i32 = if MAX_MEM_LEVEL > 8 { 8 } else { MAX_MEM_LEVEL }; #[repr(i32)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default)] #[cfg_attr(feature = "__internal-fuzz", derive(arbitrary::Arbitrary))] pub enum Method { #[default] Deflated = 8, } impl TryFrom for Method { type Error = (); fn try_from(value: i32) -> Result { match value { 8 => Ok(Self::Deflated), _ => Err(()), } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] #[cfg_attr(feature = "__internal-fuzz", derive(arbitrary::Arbitrary))] pub struct DeflateConfig { pub level: i32, pub method: Method, pub window_bits: i32, pub mem_level: i32, pub strategy: Strategy, } #[cfg(any(test, feature = "__internal-test"))] impl quickcheck::Arbitrary for DeflateConfig { fn arbitrary(g: &mut quickcheck::Gen) -> Self { let mem_levels: Vec<_> = (1..=9).collect(); let levels: Vec<_> = (0..=9).collect(); let mut window_bits = Vec::new(); window_bits.extend(9..=15); // zlib window_bits.extend(9 + 16..=15 + 16); // gzip window_bits.extend(-15..=-9); // raw Self { level: *g.choose(&levels).unwrap(), method: Method::Deflated, window_bits: *g.choose(&window_bits).unwrap(), mem_level: *g.choose(&mem_levels).unwrap(), strategy: *g .choose(&[ Strategy::Default, Strategy::Filtered, Strategy::HuffmanOnly, Strategy::Rle, Strategy::Fixed, ]) .unwrap(), } } } impl DeflateConfig { pub fn new(level: i32) -> Self { Self { level, ..Self::default() } } } impl Default for DeflateConfig { fn default() -> Self { Self { level: crate::c_api::Z_DEFAULT_COMPRESSION, method: Method::Deflated, window_bits: MAX_WBITS, mem_level: DEF_MEM_LEVEL, strategy: Strategy::Default, } } } pub fn init(stream: &mut z_stream, config: DeflateConfig) -> ReturnCode { let DeflateConfig { mut level, method: _, mut window_bits, mem_level, strategy, } = config; /* Todo: ignore strm->next_in if we use it as window */ stream.msg = core::ptr::null_mut(); // for safety we must really make sure that alloc and free are consistent // this is a (slight) deviation from stock zlib. In this crate we pick the rust // allocator as the default, but `libz-rs-sys` always explicitly sets an allocator, // and can configure the C allocator #[cfg(feature = "rust-allocator")] if stream.zalloc.is_none() || stream.zfree.is_none() { stream.configure_default_rust_allocator() } #[cfg(feature = "c-allocator")] if stream.zalloc.is_none() || stream.zfree.is_none() { stream.configure_default_c_allocator() } if stream.zalloc.is_none() || stream.zfree.is_none() { return ReturnCode::StreamError; } if level == crate::c_api::Z_DEFAULT_COMPRESSION { level = 6; } let wrap = if window_bits < 0 { if window_bits < -MAX_WBITS { return ReturnCode::StreamError; } window_bits = -window_bits; 0 } else if window_bits > MAX_WBITS { window_bits -= 16; 2 } else { 1 }; if (!(1..=MAX_MEM_LEVEL).contains(&mem_level)) || !(MIN_WBITS..=MAX_WBITS).contains(&window_bits) || !(0..=9).contains(&level) || (window_bits == 8 && wrap != 1) { return ReturnCode::StreamError; } let window_bits = if window_bits == 8 { 9 /* until 256-byte window bug fixed */ } else { window_bits as usize }; let alloc = Allocator { zalloc: stream.zalloc.unwrap(), zfree: stream.zfree.unwrap(), opaque: stream.opaque, _marker: PhantomData, }; // allocated here to have the same order as zlib let Some(state_allocation) = alloc.allocate_raw::() else { return ReturnCode::MemError; }; let w_size = 1 << window_bits; let window = Window::new_in(&alloc, window_bits); let prev = alloc.allocate_slice_raw::(w_size); let head = alloc.allocate_raw::<[u16; HASH_SIZE]>(); let lit_bufsize = 1 << (mem_level + 6); // 16K elements by default let pending = Pending::new_in(&alloc, 4 * lit_bufsize); // zlib-ng overlays the pending_buf and sym_buf. We cannot really do that safely let sym_buf = SymBuf::new_in(&alloc, lit_bufsize); // if any allocation failed, clean up allocations that did succeed let (window, prev, head, pending, sym_buf) = match (window, prev, head, pending, sym_buf) { (Some(window), Some(prev), Some(head), Some(pending), Some(sym_buf)) => { (window, prev, head, pending, sym_buf) } (window, prev, head, pending, sym_buf) => { // SAFETY: these pointers/structures are discarded after deallocation. unsafe { if let Some(mut sym_buf) = sym_buf { sym_buf.drop_in(&alloc); } if let Some(mut pending) = pending { pending.drop_in(&alloc); } if let Some(head) = head { alloc.deallocate(head.as_ptr(), 1) } if let Some(prev) = prev { alloc.deallocate(prev.as_ptr(), w_size) } if let Some(mut window) = window { window.drop_in(&alloc); } alloc.deallocate(state_allocation.as_ptr(), 1); } return ReturnCode::MemError; } }; // zero initialize the memory let prev = prev.as_ptr(); // FIXME: write_bytes is stable for NonNull since 1.80.0 unsafe { prev.write_bytes(0, w_size) }; let prev = unsafe { WeakSliceMut::from_raw_parts_mut(prev, w_size) }; // zero out head's first element let head = head.as_ptr(); // FIXME: write_bytes is stable for NonNull since 1.80.0 unsafe { head.write_bytes(0, 1) }; let head = unsafe { WeakArrayMut::::from_ptr(head) }; let state = State { status: Status::Init, // window w_size, // allocated values window, prev, head, bit_writer: BitWriter::from_pending(pending), // lit_bufsize, // sym_buf, // level: level as i8, // set to zero again for testing? strategy, // these fields are not set explicitly at this point last_flush: 0, wrap, strstart: 0, block_start: 0, block_open: 0, window_size: 0, insert: 0, matches: 0, opt_len: 0, static_len: 0, lookahead: 0, ins_h: 0, max_chain_length: 0, max_lazy_match: 0, good_match: 0, nice_match: 0, // l_desc: TreeDesc::EMPTY, d_desc: TreeDesc::EMPTY, bl_desc: TreeDesc::EMPTY, // crc_fold: Crc32Fold::new(), gzhead: None, gzindex: 0, // match_start: 0, prev_match: 0, match_available: false, prev_length: 0, // just provide a valid default; gets set properly later hash_calc_variant: HashCalcVariant::Standard, _cache_line_0: (), _cache_line_1: (), _cache_line_2: (), _cache_line_3: (), _padding_0: [0; 16], }; unsafe { state_allocation.as_ptr().write(state) }; // FIXME: write is stable for NonNull since 1.80.0 stream.state = state_allocation.as_ptr() as *mut internal_state; let Some(stream) = (unsafe { DeflateStream::from_stream_mut(stream) }) else { if cfg!(debug_assertions) { unreachable!("we should have initialized the stream properly"); } return ReturnCode::StreamError; }; reset(stream) } pub fn params(stream: &mut DeflateStream, level: i32, strategy: Strategy) -> ReturnCode { let level = if level == crate::c_api::Z_DEFAULT_COMPRESSION { 6 } else { level }; if !(0..=9).contains(&level) { return ReturnCode::StreamError; } let level = level as i8; let func = CONFIGURATION_TABLE[stream.state.level as usize].func; let state = &mut stream.state; // FIXME: use fn_addr_eq when it's available in our MSRV. The comparison returning false here // is not functionally incorrect, but would be inconsistent with zlib-ng. #[allow(unpredictable_function_pointer_comparisons)] if (strategy != state.strategy || func != CONFIGURATION_TABLE[level as usize].func) && state.last_flush != -2 { // Flush the last buffer. let err = deflate(stream, DeflateFlush::Block); if err == ReturnCode::StreamError { return err; } let state = &mut stream.state; if stream.avail_in != 0 || ((state.strstart as isize - state.block_start) + state.lookahead as isize) != 0 { return ReturnCode::BufError; } } let state = &mut stream.state; if state.level != level { if state.level == 0 && state.matches != 0 { if state.matches == 1 { self::slide_hash::slide_hash(state); } else { state.head.as_mut_slice().fill(0); } state.matches = 0; } lm_set_level(state, level); } state.strategy = strategy; ReturnCode::Ok } pub fn set_dictionary(stream: &mut DeflateStream, mut dictionary: &[u8]) -> ReturnCode { let state = &mut stream.state; let wrap = state.wrap; if wrap == 2 || (wrap == 1 && state.status != Status::Init) || state.lookahead != 0 { return ReturnCode::StreamError; } // when using zlib wrappers, compute Adler-32 for provided dictionary if wrap == 1 { stream.adler = adler32(stream.adler as u32, dictionary) as z_checksum; } // avoid computing Adler-32 in read_buf state.wrap = 0; // if dictionary would fill window, just replace the history if dictionary.len() >= state.window.capacity() { if wrap == 0 { // clear the hash table state.head.as_mut_slice().fill(0); state.strstart = 0; state.block_start = 0; state.insert = 0; } else { /* already empty otherwise */ } // use the tail dictionary = &dictionary[dictionary.len() - state.w_size..]; } // insert dictionary into window and hash let avail = stream.avail_in; let next = stream.next_in; stream.avail_in = dictionary.len() as _; stream.next_in = dictionary.as_ptr() as *mut u8; fill_window(stream); while stream.state.lookahead >= STD_MIN_MATCH { let str = stream.state.strstart; let n = stream.state.lookahead - (STD_MIN_MATCH - 1); stream.state.insert_string(str, n); stream.state.strstart = str + n; stream.state.lookahead = STD_MIN_MATCH - 1; fill_window(stream); } let state = &mut stream.state; state.strstart += state.lookahead; state.block_start = state.strstart as _; state.insert = state.lookahead; state.lookahead = 0; state.prev_length = 0; state.match_available = false; // restore the state stream.next_in = next; stream.avail_in = avail; state.wrap = wrap; ReturnCode::Ok } pub fn prime(stream: &mut DeflateStream, mut bits: i32, value: i32) -> ReturnCode { // our logic actually supports up to 32 bits. debug_assert!(bits <= 16, "zlib only supports up to 16 bits here"); let mut value64 = value as u64; let state = &mut stream.state; if bits < 0 || bits > BitWriter::BIT_BUF_SIZE as i32 || bits > (core::mem::size_of_val(&value) << 3) as i32 { return ReturnCode::BufError; } let mut put; loop { put = BitWriter::BIT_BUF_SIZE - state.bit_writer.bits_used; let put = Ord::min(put as i32, bits); if state.bit_writer.bits_used == 0 { state.bit_writer.bit_buffer = value64; } else { state.bit_writer.bit_buffer |= (value64 & ((1 << put) - 1)) << state.bit_writer.bits_used; } state.bit_writer.bits_used += put as u8; state.bit_writer.flush_bits(); value64 >>= put; bits -= put; if bits == 0 { break; } } ReturnCode::Ok } pub fn copy<'a>( dest: &mut MaybeUninit>, source: &mut DeflateStream<'a>, ) -> ReturnCode { // SAFETY: source and dest are both mutable references, so guaranteed not to overlap. // dest being a reference to maybe uninitialized memory makes a copy of 1 DeflateStream valid. unsafe { core::ptr::copy_nonoverlapping(source, dest.as_mut_ptr(), 1); } let alloc = &source.alloc; // allocated here to have the same order as zlib let Some(state_allocation) = alloc.allocate_raw::() else { return ReturnCode::MemError; }; let source_state = &source.state; let window = source_state.window.clone_in(alloc); let prev = alloc.allocate_slice_raw::(source_state.w_size); let head = alloc.allocate_raw::<[u16; HASH_SIZE]>(); let pending = source_state.bit_writer.pending.clone_in(alloc); let sym_buf = source_state.sym_buf.clone_in(alloc); // if any allocation failed, clean up allocations that did succeed let (window, prev, head, pending, sym_buf) = match (window, prev, head, pending, sym_buf) { (Some(window), Some(prev), Some(head), Some(pending), Some(sym_buf)) => { (window, prev, head, pending, sym_buf) } (window, prev, head, pending, sym_buf) => { // SAFETY: this access is in-bounds let field_ptr = unsafe { core::ptr::addr_of_mut!((*dest.as_mut_ptr()).state) }; unsafe { core::ptr::write(field_ptr as *mut *mut State, core::ptr::null_mut()) }; // SAFETY: it is an assumpion on DeflateStream that (de)allocation does not cause UB. unsafe { if let Some(mut sym_buf) = sym_buf { sym_buf.drop_in(alloc); } if let Some(mut pending) = pending { pending.drop_in(alloc); } if let Some(head) = head { alloc.deallocate(head.as_ptr(), HASH_SIZE) } if let Some(prev) = prev { alloc.deallocate(prev.as_ptr(), source_state.w_size) } if let Some(mut window) = window { window.drop_in(alloc); } alloc.deallocate(state_allocation.as_ptr(), 1); } return ReturnCode::MemError; } }; let prev = unsafe { let prev = prev.as_ptr(); prev.copy_from_nonoverlapping(source_state.prev.as_ptr(), source_state.prev.len()); WeakSliceMut::from_raw_parts_mut(prev, source_state.prev.len()) }; // FIXME: write_bytes is stable for NonNull since 1.80.0 let head = unsafe { let head = head.as_ptr(); head.write_bytes(0, 1); head.cast::().write(source_state.head.as_slice()[0]); WeakArrayMut::from_ptr(head) }; let mut bit_writer = BitWriter::from_pending(pending); bit_writer.bits_used = source_state.bit_writer.bits_used; bit_writer.bit_buffer = source_state.bit_writer.bit_buffer; let dest_state = State { status: source_state.status, bit_writer, last_flush: source_state.last_flush, wrap: source_state.wrap, strategy: source_state.strategy, level: source_state.level, good_match: source_state.good_match, nice_match: source_state.nice_match, l_desc: source_state.l_desc.clone(), d_desc: source_state.d_desc.clone(), bl_desc: source_state.bl_desc.clone(), prev_match: source_state.prev_match, match_available: source_state.match_available, strstart: source_state.strstart, match_start: source_state.match_start, prev_length: source_state.prev_length, max_chain_length: source_state.max_chain_length, max_lazy_match: source_state.max_lazy_match, block_start: source_state.block_start, block_open: source_state.block_open, window, sym_buf, lit_bufsize: source_state.lit_bufsize, window_size: source_state.window_size, matches: source_state.matches, opt_len: source_state.opt_len, static_len: source_state.static_len, insert: source_state.insert, w_size: source_state.w_size, lookahead: source_state.lookahead, prev, head, ins_h: source_state.ins_h, hash_calc_variant: source_state.hash_calc_variant, crc_fold: source_state.crc_fold, gzhead: None, gzindex: source_state.gzindex, _cache_line_0: (), _cache_line_1: (), _cache_line_2: (), _cache_line_3: (), _padding_0: source_state._padding_0, }; // write the cloned state into state_ptr unsafe { state_allocation.as_ptr().write(dest_state) }; // FIXME: write is stable for NonNull since 1.80.0 // insert the state_ptr into `dest` let field_ptr = unsafe { core::ptr::addr_of_mut!((*dest.as_mut_ptr()).state) }; unsafe { core::ptr::write(field_ptr as *mut *mut State, state_allocation.as_ptr()) }; // update the gzhead field (it contains a mutable reference so we need to be careful let field_ptr = unsafe { core::ptr::addr_of_mut!((*dest.as_mut_ptr()).state.gzhead) }; unsafe { core::ptr::copy(&source_state.gzhead, field_ptr, 1) }; ReturnCode::Ok } /// # Returns /// /// - Err when deflate is not done. A common cause is insufficient output space /// - Ok otherwise pub fn end<'a>(stream: &'a mut DeflateStream) -> Result<&'a mut z_stream, &'a mut z_stream> { let status = stream.state.status; let alloc = stream.alloc; // deallocate in reverse order of allocations unsafe { // SAFETY: we make sure that these fields are not used (by invalidating the state pointer) stream.state.sym_buf.drop_in(&alloc); stream.state.bit_writer.pending.drop_in(&alloc); alloc.deallocate(stream.state.head.as_mut_ptr(), 1); if !stream.state.prev.is_empty() { alloc.deallocate(stream.state.prev.as_mut_ptr(), stream.state.prev.len()); } stream.state.window.drop_in(&alloc); } let stream = stream.as_z_stream_mut(); let state = core::mem::replace(&mut stream.state, core::ptr::null_mut()); // SAFETY: `state` is not used later unsafe { alloc.deallocate(state as *mut State, 1); } match status { Status::Busy => Err(stream), _ => Ok(stream), } } pub fn reset(stream: &mut DeflateStream) -> ReturnCode { let ret = reset_keep(stream); if ret == ReturnCode::Ok { lm_init(stream.state); } ret } fn reset_keep(stream: &mut DeflateStream) -> ReturnCode { stream.total_in = 0; stream.total_out = 0; stream.msg = core::ptr::null_mut(); stream.data_type = crate::c_api::Z_UNKNOWN; let state = &mut stream.state; state.bit_writer.pending.reset_keep(); // can be made negative by deflate(..., Z_FINISH); state.wrap = state.wrap.abs(); state.status = match state.wrap { 2 => Status::GZip, _ => Status::Init, }; stream.adler = match state.wrap { 2 => { state.crc_fold = Crc32Fold::new(); CRC32_INITIAL_VALUE as _ } _ => ADLER32_INITIAL_VALUE as _, }; state.last_flush = -2; state.zng_tr_init(); ReturnCode::Ok } fn lm_init(state: &mut State) { state.window_size = 2 * state.w_size; // zlib uses CLEAR_HASH here state.head.as_mut_slice().fill(0); // Set the default configuration parameters: lm_set_level(state, state.level); state.strstart = 0; state.block_start = 0; state.lookahead = 0; state.insert = 0; state.prev_length = 0; state.match_available = false; state.match_start = 0; state.ins_h = 0; } fn lm_set_level(state: &mut State, level: i8) { state.max_lazy_match = CONFIGURATION_TABLE[level as usize].max_lazy; state.good_match = CONFIGURATION_TABLE[level as usize].good_length; state.nice_match = CONFIGURATION_TABLE[level as usize].nice_length; state.max_chain_length = CONFIGURATION_TABLE[level as usize].max_chain; state.hash_calc_variant = HashCalcVariant::for_max_chain_length(state.max_chain_length); state.level = level; } pub fn tune( stream: &mut DeflateStream, good_length: usize, max_lazy: usize, nice_length: usize, max_chain: usize, ) -> ReturnCode { stream.state.good_match = good_length as u16; stream.state.max_lazy_match = max_lazy as u16; stream.state.nice_match = nice_length as u16; stream.state.max_chain_length = max_chain as u16; ReturnCode::Ok } #[repr(C)] #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub(crate) struct Value { a: u16, b: u16, } impl Value { pub(crate) const fn new(a: u16, b: u16) -> Self { Self { a, b } } pub(crate) fn freq_mut(&mut self) -> &mut u16 { &mut self.a } pub(crate) fn code_mut(&mut self) -> &mut u16 { &mut self.a } pub(crate) fn dad_mut(&mut self) -> &mut u16 { &mut self.b } pub(crate) fn len_mut(&mut self) -> &mut u16 { &mut self.b } #[inline(always)] pub(crate) const fn freq(self) -> u16 { self.a } #[inline(always)] pub(crate) const fn code(self) -> u16 { self.a } #[inline(always)] pub(crate) const fn dad(self) -> u16 { self.b } #[inline(always)] pub(crate) const fn len(self) -> u16 { self.b } } /// number of length codes, not counting the special END_BLOCK code pub(crate) const LENGTH_CODES: usize = 29; /// number of literal bytes 0..255 const LITERALS: usize = 256; /// number of Literal or Length codes, including the END_BLOCK code pub(crate) const L_CODES: usize = LITERALS + 1 + LENGTH_CODES; /// number of distance codes pub(crate) const D_CODES: usize = 30; /// number of codes used to transfer the bit lengths const BL_CODES: usize = 19; /// maximum heap size const HEAP_SIZE: usize = 2 * L_CODES + 1; /// all codes must not exceed MAX_BITS bits const MAX_BITS: usize = 15; /// Bit length codes must not exceed MAX_BL_BITS bits const MAX_BL_BITS: usize = 7; pub(crate) const DIST_CODE_LEN: usize = 512; struct BitWriter<'a> { pub(crate) pending: Pending<'a>, // output still pending pub(crate) bit_buffer: u64, pub(crate) bits_used: u8, /// total bit length of compressed file (NOTE: zlib-ng uses a 32-bit integer here) #[cfg(feature = "ZLIB_DEBUG")] compressed_len: usize, /// bit length of compressed data sent (NOTE: zlib-ng uses a 32-bit integer here) #[cfg(feature = "ZLIB_DEBUG")] bits_sent: usize, } #[inline] const fn encode_len(ltree: &[Value], lc: u8) -> (u64, usize) { let mut lc = lc as usize; /* Send the length code, len is the match length - STD_MIN_MATCH */ let code = self::trees_tbl::LENGTH_CODE[lc] as usize; let c = code + LITERALS + 1; assert!(c < L_CODES, "bad l_code"); // send_code_trace(s, c); let lnode = ltree[c]; let mut match_bits: u64 = lnode.code() as u64; let mut match_bits_len = lnode.len() as usize; let extra = StaticTreeDesc::EXTRA_LBITS[code] as usize; if extra != 0 { lc -= self::trees_tbl::BASE_LENGTH[code] as usize; match_bits |= (lc as u64) << match_bits_len; match_bits_len += extra; } (match_bits, match_bits_len) } #[inline] const fn encode_dist(dtree: &[Value], mut dist: u16) -> (u64, usize) { dist -= 1; /* dist is now the match distance - 1 */ let code = State::d_code(dist as usize) as usize; assert!(code < D_CODES, "bad d_code"); // send_code_trace(s, code); /* Send the distance code */ let dnode = dtree[code]; let mut match_bits = dnode.code() as u64; let mut match_bits_len = dnode.len() as usize; let extra = StaticTreeDesc::EXTRA_DBITS[code] as usize; if extra != 0 { dist -= self::trees_tbl::BASE_DIST[code]; match_bits |= (dist as u64) << match_bits_len; match_bits_len += extra; } (match_bits, match_bits_len) } impl<'a> BitWriter<'a> { pub(crate) const BIT_BUF_SIZE: u8 = 64; fn from_pending(pending: Pending<'a>) -> Self { Self { pending, bit_buffer: 0, bits_used: 0, #[cfg(feature = "ZLIB_DEBUG")] compressed_len: 0, #[cfg(feature = "ZLIB_DEBUG")] bits_sent: 0, } } fn flush_bits(&mut self) { debug_assert!(self.bits_used <= 64); let removed = self.bits_used.saturating_sub(7).next_multiple_of(8); let keep_bytes = self.bits_used / 8; // can never divide by zero let src = &self.bit_buffer.to_le_bytes(); self.pending.extend(&src[..keep_bytes as usize]); self.bits_used -= removed; self.bit_buffer = self.bit_buffer.checked_shr(removed as u32).unwrap_or(0); } fn emit_align(&mut self) { debug_assert!(self.bits_used <= 64); let keep_bytes = self.bits_used.div_ceil(8); let src = &self.bit_buffer.to_le_bytes(); self.pending.extend(&src[..keep_bytes as usize]); self.bits_used = 0; self.bit_buffer = 0; self.sent_bits_align(); } fn send_bits_trace(&self, _value: u64, _len: u8) { trace!(" l {:>2} v {:>4x} ", _len, _value); } fn cmpr_bits_add(&mut self, _len: usize) { #[cfg(feature = "ZLIB_DEBUG")] { self.compressed_len += _len; } } fn cmpr_bits_align(&mut self) { #[cfg(feature = "ZLIB_DEBUG")] { self.compressed_len = self.compressed_len.next_multiple_of(8); } } fn sent_bits_add(&mut self, _len: usize) { #[cfg(feature = "ZLIB_DEBUG")] { self.bits_sent += _len; } } fn sent_bits_align(&mut self) { #[cfg(feature = "ZLIB_DEBUG")] { self.bits_sent = self.bits_sent.next_multiple_of(8); } } #[inline(always)] fn send_bits(&mut self, val: u64, len: u8) { debug_assert!(len <= 64); debug_assert!(self.bits_used <= 64); let total_bits = len + self.bits_used; self.send_bits_trace(val, len); self.sent_bits_add(len as usize); if total_bits < Self::BIT_BUF_SIZE { self.bit_buffer |= val << self.bits_used; self.bits_used = total_bits; } else { self.send_bits_overflow(val, total_bits); } } fn send_bits_overflow(&mut self, val: u64, total_bits: u8) { if self.bits_used == Self::BIT_BUF_SIZE { self.pending.extend(&self.bit_buffer.to_le_bytes()); self.bit_buffer = val; self.bits_used = total_bits - Self::BIT_BUF_SIZE; } else { self.bit_buffer |= val << self.bits_used; self.pending.extend(&self.bit_buffer.to_le_bytes()); self.bit_buffer = val >> (Self::BIT_BUF_SIZE - self.bits_used); self.bits_used = total_bits - Self::BIT_BUF_SIZE; } } fn send_code(&mut self, code: usize, tree: &[Value]) { let node = tree[code]; self.send_bits(node.code() as u64, node.len() as u8) } /// Send one empty static block to give enough lookahead for inflate. /// This takes 10 bits, of which 7 may remain in the bit buffer. pub fn align(&mut self) { self.emit_tree(BlockType::StaticTrees, false); self.emit_end_block(&STATIC_LTREE, false); self.flush_bits(); } pub(crate) fn emit_tree(&mut self, block_type: BlockType, is_last_block: bool) { let header_bits = ((block_type as u64) << 1) | (is_last_block as u64); self.send_bits(header_bits, 3); trace!("\n--- Emit Tree: Last: {}\n", is_last_block as u8); } pub(crate) fn emit_end_block_and_align(&mut self, ltree: &[Value], is_last_block: bool) { self.emit_end_block(ltree, is_last_block); if is_last_block { self.emit_align(); } } fn emit_end_block(&mut self, ltree: &[Value], _is_last_block: bool) { const END_BLOCK: usize = 256; self.send_code(END_BLOCK, ltree); trace!( "\n+++ Emit End Block: Last: {} Pending: {} Total Out: {}\n", _is_last_block as u8, self.pending.pending().len(), "" ); } pub(crate) fn emit_lit(&mut self, ltree: &[Value], c: u8) -> u16 { self.send_code(c as usize, ltree); #[cfg(feature = "ZLIB_DEBUG")] if let Some(c) = char::from_u32(c as u32) { if isgraph(c as u8) { trace!(" '{}' ", c); } } ltree[c as usize].len() } pub(crate) fn emit_dist( &mut self, ltree: &[Value], dtree: &[Value], lc: u8, dist: u16, ) -> usize { let (mut match_bits, mut match_bits_len) = encode_len(ltree, lc); let (dist_match_bits, dist_match_bits_len) = encode_dist(dtree, dist); match_bits |= dist_match_bits << match_bits_len; match_bits_len += dist_match_bits_len; self.send_bits(match_bits, match_bits_len as u8); match_bits_len } pub(crate) fn emit_dist_static(&mut self, lc: u8, dist: u16) -> usize { let precomputed_len = trees_tbl::STATIC_LTREE_ENCODINGS[lc as usize]; let mut match_bits = precomputed_len.code() as u64; let mut match_bits_len = precomputed_len.len() as usize; let dtree = self::trees_tbl::STATIC_DTREE.as_slice(); let (dist_match_bits, dist_match_bits_len) = encode_dist(dtree, dist); match_bits |= dist_match_bits << match_bits_len; match_bits_len += dist_match_bits_len; self.send_bits(match_bits, match_bits_len as u8); match_bits_len } fn compress_block_help(&mut self, sym_buf: &SymBuf, ltree: &[Value], dtree: &[Value]) { for (dist, lc) in sym_buf.iter() { match dist { 0 => self.emit_lit(ltree, lc) as usize, _ => self.emit_dist(ltree, dtree, lc, dist), }; } self.emit_end_block(ltree, false) } fn send_tree(&mut self, tree: &[Value], bl_tree: &[Value], max_code: usize) { /* tree: the tree to be scanned */ /* max_code and its largest code of non zero frequency */ let mut prevlen: isize = -1; /* last emitted length */ let mut curlen; /* length of current code */ let mut nextlen = tree[0].len(); /* length of next code */ let mut count = 0; /* repeat count of the current code */ let mut max_count = 7; /* max repeat count */ let mut min_count = 4; /* min repeat count */ /* tree[max_code+1].Len = -1; */ /* guard already set */ if nextlen == 0 { max_count = 138; min_count = 3; } for n in 0..=max_code { curlen = nextlen; nextlen = tree[n + 1].len(); count += 1; if count < max_count && curlen == nextlen { continue; } else if count < min_count { loop { self.send_code(curlen as usize, bl_tree); count -= 1; if count == 0 { break; } } } else if curlen != 0 { if curlen as isize != prevlen { self.send_code(curlen as usize, bl_tree); count -= 1; } assert!((3..=6).contains(&count), " 3_6?"); self.send_code(REP_3_6, bl_tree); self.send_bits(count - 3, 2); } else if count <= 10 { self.send_code(REPZ_3_10, bl_tree); self.send_bits(count - 3, 3); } else { self.send_code(REPZ_11_138, bl_tree); self.send_bits(count - 11, 7); } count = 0; prevlen = curlen as isize; if nextlen == 0 { max_count = 138; min_count = 3; } else if curlen == nextlen { max_count = 6; min_count = 3; } else { max_count = 7; min_count = 4; } } } } #[repr(C, align(64))] pub(crate) struct State<'a> { status: Status, last_flush: i8, /* value of flush param for previous deflate call */ pub(crate) wrap: i8, /* bit 0 true for zlib, bit 1 true for gzip */ pub(crate) strategy: Strategy, pub(crate) level: i8, /// Whether or not a block is currently open for the QUICK deflation scheme. /// 0 if the block is closed, 1 if there is an active block, or 2 if there /// is an active block and it is the last block. pub(crate) block_open: u8, pub(crate) hash_calc_variant: HashCalcVariant, pub(crate) match_available: bool, /* set if previous match exists */ /// Use a faster search when the previous match is longer than this pub(crate) good_match: u16, /// Stop searching when current match exceeds this pub(crate) nice_match: u16, pub(crate) match_start: Pos, /* start of matching string */ pub(crate) prev_match: Pos, /* previous match */ pub(crate) strstart: usize, /* start of string to insert */ pub(crate) window: Window<'a>, pub(crate) w_size: usize, /* LZ77 window size (32K by default) */ pub(crate) lookahead: usize, /* number of valid bytes ahead in window */ _cache_line_0: (), /// prev[N], where N is an offset in the current window, contains the offset in the window /// of the previous 4-byte sequence that hashes to the same value as the 4-byte sequence /// starting at N. Together with head, prev forms a chained hash table that can be used /// to find earlier strings in the window that are potential matches for new input being /// deflated. pub(crate) prev: WeakSliceMut<'a, u16>, /// head[H] contains the offset of the last 4-character sequence seen so far in /// the current window that hashes to H (as calculated using the hash_calc_variant). pub(crate) head: WeakArrayMut<'a, u16, HASH_SIZE>, /// Length of the best match at previous step. Matches not greater than this /// are discarded. This is used in the lazy match evaluation. pub(crate) prev_length: u16, /// To speed up deflation, hash chains are never searched beyond this length. /// A higher limit improves compression ratio but degrades the speed. pub(crate) max_chain_length: u16, // TODO untangle this mess! zlib uses the same field differently based on compression level // we should just have 2 fields for clarity! // // Insert new strings in the hash table only if the match length is not // greater than this length. This saves time but degrades compression. // max_insert_length is used only for compression levels <= 3. // define max_insert_length max_lazy_match /// Attempt to find a better match only when the current match is strictly smaller /// than this value. This mechanism is used only for compression levels >= 4. pub(crate) max_lazy_match: u16, /// number of string matches in current block /// NOTE: this is a saturating 8-bit counter, to help keep the struct compact. The code that /// makes decisions based on this field only cares whether the count is greater than 2, so /// an 8-bit counter is sufficient. pub(crate) matches: u8, /// Window position at the beginning of the current output block. Gets /// negative when the window is moved backwards. pub(crate) block_start: isize, pub(crate) sym_buf: SymBuf<'a>, _cache_line_1: (), /// Size of match buffer for literals/lengths. There are 4 reasons for /// limiting lit_bufsize to 64K: /// - frequencies can be kept in 16 bit counters /// - if compression is not successful for the first block, all input /// data is still in the window so we can still emit a stored block even /// when input comes from standard input. (This can also be done for /// all blocks if lit_bufsize is not greater than 32K.) /// - if compression is not successful for a file smaller than 64K, we can /// even emit a stored file instead of a stored block (saving 5 bytes). /// This is applicable only for zip (not gzip or zlib). /// - creating new Huffman trees less frequently may not provide fast /// adaptation to changes in the input data statistics. (Take for /// example a binary file with poorly compressible code followed by /// a highly compressible string table.) Smaller buffer sizes give /// fast adaptation but have of course the overhead of transmitting /// trees more frequently. /// - I can't count above 4 lit_bufsize: usize, /// Actual size of window: 2*w_size, except when the user input buffer is directly used as sliding window. pub(crate) window_size: usize, bit_writer: BitWriter<'a>, _cache_line_2: (), /// bit length of current block with optimal trees opt_len: usize, /// bit length of current block with static trees static_len: usize, /// bytes at end of window left to insert pub(crate) insert: usize, /// hash index of string to be inserted pub(crate) ins_h: u32, gzhead: Option<&'a mut gz_header>, gzindex: usize, _padding_0: [u8; 16], _cache_line_3: (), crc_fold: crate::crc32::Crc32Fold, l_desc: TreeDesc, /* literal and length tree */ d_desc: TreeDesc<{ 2 * D_CODES + 1 }>, /* distance tree */ bl_desc: TreeDesc<{ 2 * BL_CODES + 1 }>, /* Huffman tree for bit lengths */ } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Default)] #[cfg_attr(feature = "__internal-fuzz", derive(arbitrary::Arbitrary))] pub enum Strategy { #[default] Default = 0, Filtered = 1, HuffmanOnly = 2, Rle = 3, Fixed = 4, } impl TryFrom for Strategy { type Error = (); fn try_from(value: i32) -> Result { match value { 0 => Ok(Strategy::Default), 1 => Ok(Strategy::Filtered), 2 => Ok(Strategy::HuffmanOnly), 3 => Ok(Strategy::Rle), 4 => Ok(Strategy::Fixed), _ => Err(()), } } } #[derive(Debug, PartialEq, Eq)] enum DataType { Binary = 0, Text = 1, Unknown = 2, } impl<'a> State<'a> { pub const BIT_BUF_SIZE: u8 = BitWriter::BIT_BUF_SIZE; // log2(w_size) (in the range MIN_WBITS..=MAX_WBITS) pub(crate) fn w_bits(&self) -> u32 { self.w_size.trailing_zeros() } pub(crate) fn w_mask(&self) -> usize { self.w_size - 1 } pub(crate) fn max_dist(&self) -> usize { self.w_size - MIN_LOOKAHEAD } // TODO untangle this mess! zlib uses the same field differently based on compression level // we should just have 2 fields for clarity! pub(crate) fn max_insert_length(&self) -> usize { self.max_lazy_match as usize } /// Total size of the pending buf. But because `pending` shares memory with `sym_buf`, this is /// not the number of bytes that are actually in `pending`! pub(crate) fn pending_buf_size(&self) -> usize { self.lit_bufsize * 4 } #[inline(always)] pub(crate) fn update_hash(&self, h: u32, val: u32) -> u32 { match self.hash_calc_variant { HashCalcVariant::Standard => StandardHashCalc::update_hash(h, val), HashCalcVariant::Roll => RollHashCalc::update_hash(h, val), } } #[inline(always)] pub(crate) fn quick_insert_string(&mut self, string: usize) -> u16 { match self.hash_calc_variant { HashCalcVariant::Standard => StandardHashCalc::quick_insert_string(self, string), HashCalcVariant::Roll => RollHashCalc::quick_insert_string(self, string), } } #[inline(always)] pub(crate) fn insert_string(&mut self, string: usize, count: usize) { match self.hash_calc_variant { HashCalcVariant::Standard => StandardHashCalc::insert_string(self, string, count), HashCalcVariant::Roll => RollHashCalc::insert_string(self, string, count), } } #[inline(always)] pub(crate) fn tally_lit(&mut self, unmatched: u8) -> bool { Self::tally_lit_help(&mut self.sym_buf, &mut self.l_desc, unmatched) } // This helper is to work around an ownership issue in algorithm/medium. pub(crate) fn tally_lit_help( sym_buf: &mut SymBuf, l_desc: &mut TreeDesc, unmatched: u8, ) -> bool { sym_buf.push_lit(unmatched); *l_desc.dyn_tree[unmatched as usize].freq_mut() += 1; assert!( unmatched as usize <= STD_MAX_MATCH - STD_MIN_MATCH, "zng_tr_tally: bad literal" ); // signal that the current block should be flushed sym_buf.should_flush_block() } const fn d_code(dist: usize) -> u8 { let index = if dist < 256 { dist } else { 256 + (dist >> 7) }; self::trees_tbl::DIST_CODE[index] } #[inline(always)] pub(crate) fn tally_dist(&mut self, mut dist: usize, len: usize) -> bool { self.sym_buf.push_dist(dist as u16, len as u8); self.matches = self.matches.saturating_add(1); dist -= 1; assert!( dist < self.max_dist() && Self::d_code(dist) < D_CODES as u8, "tally_dist: bad match" ); let index = self::trees_tbl::LENGTH_CODE[len] as usize + LITERALS + 1; *self.l_desc.dyn_tree[index].freq_mut() += 1; *self.d_desc.dyn_tree[Self::d_code(dist) as usize].freq_mut() += 1; // signal that the current block should be flushed self.sym_buf.should_flush_block() } fn detect_data_type(dyn_tree: &[Value]) -> DataType { // set bits 0..6, 14..25, and 28..31 // 0xf3ffc07f = binary 11110011111111111100000001111111 const NON_TEXT: u64 = 0xf3ffc07f; let mut mask = NON_TEXT; /* Check for non-textual bytes. */ for value in &dyn_tree[0..32] { if (mask & 1) != 0 && value.freq() != 0 { return DataType::Binary; } mask >>= 1; } /* Check for textual bytes. */ if dyn_tree[9].freq() != 0 || dyn_tree[10].freq() != 0 || dyn_tree[13].freq() != 0 { return DataType::Text; } if dyn_tree[32..LITERALS].iter().any(|v| v.freq() != 0) { return DataType::Text; } // there are no explicit text or non-text bytes. The stream is either empty or has only // tolerated bytes DataType::Binary } fn compress_block_static_trees(&mut self) { let ltree = self::trees_tbl::STATIC_LTREE.as_slice(); for (dist, lc) in self.sym_buf.iter() { match dist { 0 => self.bit_writer.emit_lit(ltree, lc) as usize, _ => self.bit_writer.emit_dist_static(lc, dist), }; } self.bit_writer.emit_end_block(ltree, false) } fn compress_block_dynamic_trees(&mut self) { self.bit_writer.compress_block_help( &self.sym_buf, &self.l_desc.dyn_tree, &self.d_desc.dyn_tree, ); } fn header(&self) -> u16 { // preset dictionary flag in zlib header const PRESET_DICT: u16 = 0x20; // The deflate compression method (the only one supported in this version) const Z_DEFLATED: u16 = 8; let dict = match self.strstart { 0 => 0, _ => PRESET_DICT, }; let h = ((Z_DEFLATED + ((self.w_bits() as u16 - 8) << 4)) << 8) | (self.level_flags() << 6) | dict; h + 31 - (h % 31) } fn level_flags(&self) -> u16 { if self.strategy >= Strategy::HuffmanOnly || self.level < 2 { 0 } else if self.level < 6 { 1 } else if self.level == 6 { 2 } else { 3 } } fn zng_tr_init(&mut self) { self.l_desc.stat_desc = &StaticTreeDesc::L; self.d_desc.stat_desc = &StaticTreeDesc::D; self.bl_desc.stat_desc = &StaticTreeDesc::BL; self.bit_writer.bit_buffer = 0; self.bit_writer.bits_used = 0; #[cfg(feature = "ZLIB_DEBUG")] { self.bit_writer.compressed_len = 0; self.bit_writer.bits_sent = 0; } // Initialize the first block of the first file: self.init_block(); } /// initializes a new block fn init_block(&mut self) { // Initialize the trees. // TODO would a memset also work here? for value in &mut self.l_desc.dyn_tree[..L_CODES] { *value.freq_mut() = 0; } for value in &mut self.d_desc.dyn_tree[..D_CODES] { *value.freq_mut() = 0; } for value in &mut self.bl_desc.dyn_tree[..BL_CODES] { *value.freq_mut() = 0; } // end of block literal code const END_BLOCK: usize = 256; *self.l_desc.dyn_tree[END_BLOCK].freq_mut() = 1; self.opt_len = 0; self.static_len = 0; self.sym_buf.clear(); self.matches = 0; } } #[repr(u8)] #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum Status { Init = 1, GZip = 4, Extra = 5, Name = 6, Comment = 7, Hcrc = 8, Busy = 2, Finish = 3, } const fn rank_flush(f: i8) -> i8 { // rank Z_BLOCK between Z_NO_FLUSH and Z_PARTIAL_FLUSH ((f) * 2) - (if (f) > 4 { 9 } else { 0 }) } #[derive(Debug)] pub(crate) enum BlockState { /// block not completed, need more input or more output NeedMore = 0, /// block flush performed BlockDone = 1, /// finish started, need only more output at next deflate FinishStarted = 2, /// finish done, accept no more input or output FinishDone = 3, } // Maximum stored block length in deflate format (not including header). pub(crate) const MAX_STORED: usize = 65535; // so u16::max pub(crate) fn read_buf_window(stream: &mut DeflateStream, offset: usize, size: usize) -> usize { let len = Ord::min(stream.avail_in as usize, size); if len == 0 { return 0; } stream.avail_in -= len as u32; if stream.state.wrap == 2 { // we likely cannot fuse the crc32 and the copy here because the input can be changed by // a concurrent thread. Therefore it cannot be converted into a slice! let window = &mut stream.state.window; // SAFETY: len is bounded by avail_in, so this copy is in bounds. unsafe { window.copy_and_initialize(offset..offset + len, stream.next_in) }; let data = &stream.state.window.filled()[offset..][..len]; stream.state.crc_fold.fold(data, CRC32_INITIAL_VALUE); } else if stream.state.wrap == 1 { // we likely cannot fuse the adler32 and the copy here because the input can be changed by // a concurrent thread. Therefore it cannot be converted into a slice! let window = &mut stream.state.window; // SAFETY: len is bounded by avail_in, so this copy is in bounds. unsafe { window.copy_and_initialize(offset..offset + len, stream.next_in) }; let data = &stream.state.window.filled()[offset..][..len]; stream.adler = adler32(stream.adler as u32, data) as _; } else { let window = &mut stream.state.window; // SAFETY: len is bounded by avail_in, so this copy is in bounds. unsafe { window.copy_and_initialize(offset..offset + len, stream.next_in) }; } stream.next_in = stream.next_in.wrapping_add(len); stream.total_in += len as crate::c_api::z_size; len } pub(crate) enum BlockType { StoredBlock = 0, StaticTrees = 1, DynamicTrees = 2, } pub(crate) fn zng_tr_stored_block( state: &mut State, window_range: core::ops::Range, is_last: bool, ) { // send block type state.bit_writer.emit_tree(BlockType::StoredBlock, is_last); // align on byte boundary state.bit_writer.emit_align(); state.bit_writer.cmpr_bits_align(); let input_block: &[u8] = &state.window.filled()[window_range]; let stored_len = input_block.len() as u16; state.bit_writer.pending.extend(&stored_len.to_le_bytes()); state .bit_writer .pending .extend(&(!stored_len).to_le_bytes()); state.bit_writer.cmpr_bits_add(32); state.bit_writer.sent_bits_add(32); if stored_len > 0 { state.bit_writer.pending.extend(input_block); state.bit_writer.cmpr_bits_add((stored_len << 3) as usize); state.bit_writer.sent_bits_add((stored_len << 3) as usize); } } /// The minimum match length mandated by the deflate standard pub(crate) const STD_MIN_MATCH: usize = 3; /// The maximum match length mandated by the deflate standard pub(crate) const STD_MAX_MATCH: usize = 258; /// The minimum wanted match length, affects deflate_quick, deflate_fast, deflate_medium and deflate_slow pub(crate) const WANT_MIN_MATCH: usize = 4; pub(crate) const MIN_LOOKAHEAD: usize = STD_MAX_MATCH + STD_MIN_MATCH + 1; #[inline] pub(crate) fn fill_window(stream: &mut DeflateStream) { debug_assert!(stream.state.lookahead < MIN_LOOKAHEAD); let wsize = stream.state.w_size; loop { let state = &mut *stream.state; let mut more = state.window_size - state.lookahead - state.strstart; // If the window is almost full and there is insufficient lookahead, // move the upper half to the lower one to make room in the upper half. if state.strstart >= wsize + state.max_dist() { // shift the window to the left let (old, new) = state.window.filled_mut()[..2 * wsize].split_at_mut(wsize); old.copy_from_slice(new); state.match_start = state.match_start.saturating_sub(wsize as u16); if state.match_start == 0 { state.prev_length = 0; } state.strstart -= wsize; /* we now have strstart >= MAX_DIST */ state.block_start -= wsize as isize; state.insert = Ord::min(state.insert, state.strstart); self::slide_hash::slide_hash(state); more += wsize; } if stream.avail_in == 0 { break; } // If there was no sliding: // strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && // more == window_size - lookahead - strstart // => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) // => more >= window_size - 2*WSIZE + 2 // In the BIG_MEM or MMAP case (not yet supported), // window_size == input_size + MIN_LOOKAHEAD && // strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. // Otherwise, window_size == 2*WSIZE so more >= 2. // If there was sliding, more >= WSIZE. So in all cases, more >= 2. assert!(more >= 2, "more < 2"); let n = read_buf_window(stream, stream.state.strstart + stream.state.lookahead, more); let state = &mut *stream.state; state.lookahead += n; // Initialize the hash value now that we have some input: if state.lookahead + state.insert >= STD_MIN_MATCH { let string = state.strstart - state.insert; if state.max_chain_length > 1024 { let v0 = state.window.filled()[string] as u32; let v1 = state.window.filled()[string + 1] as u32; state.ins_h = state.update_hash(v0, v1); } else if string >= 1 { state.quick_insert_string(string + 2 - STD_MIN_MATCH); } let mut count = state.insert; if state.lookahead == 1 { count -= 1; } if count > 0 { state.insert_string(string, count); state.insert -= count; } } // If the whole input has less than STD_MIN_MATCH bytes, ins_h is garbage, // but this is not important since only literal bytes will be emitted. if !(stream.state.lookahead < MIN_LOOKAHEAD && stream.avail_in != 0) { break; } } assert!( stream.state.strstart <= stream.state.window_size - MIN_LOOKAHEAD, "not enough room for search" ); } pub(crate) struct StaticTreeDesc { /// static tree or NULL pub(crate) static_tree: &'static [Value], /// extra bits for each code or NULL extra_bits: &'static [u8], /// base index for extra_bits extra_base: usize, /// max number of elements in the tree elems: usize, /// max bit length for the codes max_length: u16, } impl StaticTreeDesc { const EMPTY: Self = Self { static_tree: &[], extra_bits: &[], extra_base: 0, elems: 0, max_length: 0, }; /// extra bits for each length code const EXTRA_LBITS: [u8; LENGTH_CODES] = [ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, ]; /// extra bits for each distance code const EXTRA_DBITS: [u8; D_CODES] = [ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, ]; /// extra bits for each bit length code const EXTRA_BLBITS: [u8; BL_CODES] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 7]; /// The lengths of the bit length codes are sent in order of decreasing /// probability, to avoid transmitting the lengths for unused bit length codes. const BL_ORDER: [u8; BL_CODES] = [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15, ]; pub(crate) const L: Self = Self { static_tree: &self::trees_tbl::STATIC_LTREE, extra_bits: &Self::EXTRA_LBITS, extra_base: LITERALS + 1, elems: L_CODES, max_length: MAX_BITS as u16, }; pub(crate) const D: Self = Self { static_tree: &self::trees_tbl::STATIC_DTREE, extra_bits: &Self::EXTRA_DBITS, extra_base: 0, elems: D_CODES, max_length: MAX_BITS as u16, }; pub(crate) const BL: Self = Self { static_tree: &[], extra_bits: &Self::EXTRA_BLBITS, extra_base: 0, elems: BL_CODES, max_length: MAX_BL_BITS as u16, }; } #[derive(Clone)] pub(crate) struct TreeDesc { dyn_tree: [Value; N], max_code: usize, stat_desc: &'static StaticTreeDesc, } impl TreeDesc { const EMPTY: Self = Self { dyn_tree: [Value::new(0, 0); N], max_code: 0, stat_desc: &StaticTreeDesc::EMPTY, }; } fn build_tree(state: &mut State, desc: &mut TreeDesc) { let tree = &mut desc.dyn_tree; let stree = desc.stat_desc.static_tree; let elements = desc.stat_desc.elems; let mut heap = Heap::new(); let mut max_code = heap.initialize(&mut tree[..elements]); // The pkzip format requires that at least one distance code exists, // and that at least one bit should be sent even if there is only one // possible code. So to avoid special checks later on we force at least // two codes of non zero frequency. while heap.heap_len < 2 { heap.heap_len += 1; let node = if max_code < 2 { max_code += 1; max_code } else { 0 }; debug_assert!(node >= 0); let node = node as usize; heap.heap[heap.heap_len] = node as u32; *tree[node].freq_mut() = 1; heap.depth[node] = 0; state.opt_len -= 1; if !stree.is_empty() { state.static_len -= stree[node].len() as usize; } /* node is 0 or 1 so it does not have extra bits */ } debug_assert!(max_code >= 0); let max_code = max_code as usize; desc.max_code = max_code; // The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, // establish sub-heaps of increasing lengths: let mut n = heap.heap_len / 2; while n >= 1 { heap.pqdownheap(tree, n); n -= 1; } heap.construct_huffman_tree(tree, elements); // At this point, the fields freq and dad are set. We can now // generate the bit lengths. let bl_count = gen_bitlen(state, &mut heap, desc); // The field len is now set, we can generate the bit codes gen_codes(&mut desc.dyn_tree, max_code, &bl_count); } fn gen_bitlen( state: &mut State, heap: &mut Heap, desc: &mut TreeDesc, ) -> [u16; MAX_BITS + 1] { let tree = &mut desc.dyn_tree; let max_code = desc.max_code; let stree = desc.stat_desc.static_tree; let extra = desc.stat_desc.extra_bits; let base = desc.stat_desc.extra_base; let max_length = desc.stat_desc.max_length; let mut bl_count = [0u16; MAX_BITS + 1]; // In a first pass, compute the optimal bit lengths (which may // overflow in the case of the bit length tree). *tree[heap.heap[heap.heap_max] as usize].len_mut() = 0; /* root of the heap */ // number of elements with bit length too large let mut overflow: i32 = 0; for h in heap.heap_max + 1..HEAP_SIZE { let n = heap.heap[h] as usize; let mut bits = tree[tree[n].dad() as usize].len() + 1; if bits > max_length { bits = max_length; overflow += 1; } // We overwrite tree[n].Dad which is no longer needed *tree[n].len_mut() = bits; // not a leaf node if n > max_code { continue; } bl_count[bits as usize] += 1; let mut xbits = 0; if n >= base { xbits = extra[n - base] as usize; } let f = tree[n].freq() as usize; state.opt_len += f * (bits as usize + xbits); if !stree.is_empty() { state.static_len += f * (stree[n].len() as usize + xbits); } } if overflow == 0 { return bl_count; } /* Find the first bit length which could increase: */ loop { let mut bits = max_length as usize - 1; while bl_count[bits] == 0 { bits -= 1; } bl_count[bits] -= 1; /* move one leaf down the tree */ bl_count[bits + 1] += 2; /* move one overflow item as its brother */ bl_count[max_length as usize] -= 1; /* The brother of the overflow item also moves one step up, * but this does not affect bl_count[max_length] */ overflow -= 2; if overflow <= 0 { break; } } // Now recompute all bit lengths, scanning in increasing frequency. // h is still equal to HEAP_SIZE. (It is simpler to reconstruct all // lengths instead of fixing only the wrong ones. This idea is taken // from 'ar' written by Haruhiko Okumura.) let mut h = HEAP_SIZE; for bits in (1..=max_length).rev() { let mut n = bl_count[bits as usize]; while n != 0 { h -= 1; let m = heap.heap[h] as usize; if m > max_code { continue; } if tree[m].len() != bits { // Tracev((stderr, "code %d bits %d->%u\n", m, tree[m].Len, bits)); state.opt_len += (bits * tree[m].freq()) as usize; state.opt_len -= (tree[m].len() * tree[m].freq()) as usize; *tree[m].len_mut() = bits; } n -= 1; } } bl_count } /// Checks that symbol is a printing character (excluding space) #[allow(unused)] fn isgraph(c: u8) -> bool { (c > 0x20) && (c <= 0x7E) } fn gen_codes(tree: &mut [Value], max_code: usize, bl_count: &[u16]) { /* tree: the tree to decorate */ /* max_code: largest code with non zero frequency */ /* bl_count: number of codes at each bit length */ let mut next_code = [0; MAX_BITS + 1]; /* next code value for each bit length */ let mut code = 0; /* running code value */ /* The distribution counts are first used to generate the code values * without bit reversal. */ for bits in 1..=MAX_BITS { code = (code + bl_count[bits - 1]) << 1; next_code[bits] = code; } /* Check that the bit counts in bl_count are consistent. The last code * must be all ones. */ assert!( code + bl_count[MAX_BITS] - 1 == (1 << MAX_BITS) - 1, "inconsistent bit counts" ); trace!("\ngen_codes: max_code {max_code} "); for n in 0..=max_code { let len = tree[n].len(); if len == 0 { continue; } /* Now reverse the bits */ assert!((1..=15).contains(&len), "code length must be 1-15"); *tree[n].code_mut() = next_code[len as usize].reverse_bits() >> (16 - len); next_code[len as usize] += 1; if tree != self::trees_tbl::STATIC_LTREE.as_slice() { trace!( "\nn {:>3} {} l {:>2} c {:>4x} ({:x}) ", n, if isgraph(n as u8) { char::from_u32(n as u32).unwrap() } else { ' ' }, len, tree[n].code(), next_code[len as usize] - 1 ); } } } /// repeat previous bit length 3-6 times (2 bits of repeat count) const REP_3_6: usize = 16; /// repeat a zero length 3-10 times (3 bits of repeat count) const REPZ_3_10: usize = 17; /// repeat a zero length 11-138 times (7 bits of repeat count) const REPZ_11_138: usize = 18; fn scan_tree(bl_desc: &mut TreeDesc<{ 2 * BL_CODES + 1 }>, tree: &mut [Value], max_code: usize) { /* tree: the tree to be scanned */ /* max_code: and its largest code of non zero frequency */ let mut prevlen = -1isize; /* last emitted length */ let mut curlen: isize; /* length of current code */ let mut nextlen = tree[0].len(); /* length of next code */ let mut count = 0; /* repeat count of the current code */ let mut max_count = 7; /* max repeat count */ let mut min_count = 4; /* min repeat count */ if nextlen == 0 { max_count = 138; min_count = 3; } *tree[max_code + 1].len_mut() = 0xffff; /* guard */ let bl_tree = &mut bl_desc.dyn_tree; for n in 0..=max_code { curlen = nextlen as isize; nextlen = tree[n + 1].len(); count += 1; if count < max_count && curlen == nextlen as isize { continue; } else if count < min_count { *bl_tree[curlen as usize].freq_mut() += count; } else if curlen != 0 { if curlen != prevlen { *bl_tree[curlen as usize].freq_mut() += 1; } *bl_tree[REP_3_6].freq_mut() += 1; } else if count <= 10 { *bl_tree[REPZ_3_10].freq_mut() += 1; } else { *bl_tree[REPZ_11_138].freq_mut() += 1; } count = 0; prevlen = curlen; if nextlen == 0 { max_count = 138; min_count = 3; } else if curlen == nextlen as isize { max_count = 6; min_count = 3; } else { max_count = 7; min_count = 4; } } } fn send_all_trees(state: &mut State, lcodes: usize, dcodes: usize, blcodes: usize) { assert!( lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes" ); assert!( lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, "too many codes" ); trace!("\nbl counts: "); state.bit_writer.send_bits(lcodes as u64 - 257, 5); /* not +255 as stated in appnote.txt */ state.bit_writer.send_bits(dcodes as u64 - 1, 5); state.bit_writer.send_bits(blcodes as u64 - 4, 4); /* not -3 as stated in appnote.txt */ for rank in 0..blcodes { trace!("\nbl code {:>2} ", StaticTreeDesc::BL_ORDER[rank]); state.bit_writer.send_bits( state.bl_desc.dyn_tree[StaticTreeDesc::BL_ORDER[rank] as usize].len() as u64, 3, ); } trace!("\nbl tree: sent {}", state.bit_writer.bits_sent); // literal tree state .bit_writer .send_tree(&state.l_desc.dyn_tree, &state.bl_desc.dyn_tree, lcodes - 1); trace!("\nlit tree: sent {}", state.bit_writer.bits_sent); // distance tree state .bit_writer .send_tree(&state.d_desc.dyn_tree, &state.bl_desc.dyn_tree, dcodes - 1); trace!("\ndist tree: sent {}", state.bit_writer.bits_sent); } /// Construct the Huffman tree for the bit lengths and return the index in /// bl_order of the last bit length code to send. fn build_bl_tree(state: &mut State) -> usize { /* Determine the bit length frequencies for literal and distance trees */ scan_tree( &mut state.bl_desc, &mut state.l_desc.dyn_tree, state.l_desc.max_code, ); scan_tree( &mut state.bl_desc, &mut state.d_desc.dyn_tree, state.d_desc.max_code, ); /* Build the bit length tree: */ { let mut tmp = TreeDesc::EMPTY; core::mem::swap(&mut tmp, &mut state.bl_desc); build_tree(state, &mut tmp); core::mem::swap(&mut tmp, &mut state.bl_desc); } /* opt_len now includes the length of the tree representations, except * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. */ /* Determine the number of bit length codes to send. The pkzip format * requires that at least 4 bit length codes be sent. (appnote.txt says * 3 but the actual value used is 4.) */ let mut max_blindex = BL_CODES - 1; while max_blindex >= 3 { let index = StaticTreeDesc::BL_ORDER[max_blindex] as usize; if state.bl_desc.dyn_tree[index].len() != 0 { break; } max_blindex -= 1; } /* Update opt_len to include the bit length tree and counts */ state.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4; trace!( "\ndyn trees: dyn {}, stat {}", state.opt_len, state.static_len ); max_blindex } fn zng_tr_flush_block( stream: &mut DeflateStream, window_offset: Option, stored_len: u32, last: bool, ) { /* window_offset: offset of the input block into the window */ /* stored_len: length of input block */ /* last: one if this is the last block for a file */ let mut opt_lenb; let static_lenb; let mut max_blindex = 0; let state = &mut stream.state; if state.sym_buf.is_empty() { opt_lenb = 0; static_lenb = 0; state.static_len = 7; } else if state.level > 0 { if stream.data_type == DataType::Unknown as i32 { stream.data_type = State::detect_data_type(&state.l_desc.dyn_tree) as i32; } { let mut tmp = TreeDesc::EMPTY; core::mem::swap(&mut tmp, &mut state.l_desc); build_tree(state, &mut tmp); core::mem::swap(&mut tmp, &mut state.l_desc); trace!( "\nlit data: dyn {}, stat {}", state.opt_len, state.static_len ); } { let mut tmp = TreeDesc::EMPTY; core::mem::swap(&mut tmp, &mut state.d_desc); build_tree(state, &mut tmp); core::mem::swap(&mut tmp, &mut state.d_desc); trace!( "\ndist data: dyn {}, stat {}", state.opt_len, state.static_len ); } // Build the bit length tree for the above two trees, and get the index // in bl_order of the last bit length code to send. max_blindex = build_bl_tree(state); // Determine the best encoding. Compute the block lengths in bytes. opt_lenb = (state.opt_len + 3 + 7) >> 3; static_lenb = (state.static_len + 3 + 7) >> 3; trace!( "\nopt {}({}) stat {}({}) stored {} lit {} ", opt_lenb, state.opt_len, static_lenb, state.static_len, stored_len, state.sym_buf.len() / 3 ); if static_lenb <= opt_lenb || state.strategy == Strategy::Fixed { opt_lenb = static_lenb; } } else { assert!(window_offset.is_some(), "lost buf"); /* force a stored block */ opt_lenb = stored_len as usize + 5; static_lenb = stored_len as usize + 5; } #[allow(clippy::unnecessary_unwrap)] if stored_len as usize + 4 <= opt_lenb && window_offset.is_some() { /* 4: two words for the lengths * The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. * Otherwise we can't have processed more than WSIZE input bytes since * the last block flush, because compression would have been * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to * transform a block into a stored block. */ let window_offset = window_offset.unwrap(); let range = window_offset..window_offset + stored_len as usize; zng_tr_stored_block(state, range, last); } else if static_lenb == opt_lenb { state.bit_writer.emit_tree(BlockType::StaticTrees, last); state.compress_block_static_trees(); // cmpr_bits_add(s, s.static_len); } else { state.bit_writer.emit_tree(BlockType::DynamicTrees, last); send_all_trees( state, state.l_desc.max_code + 1, state.d_desc.max_code + 1, max_blindex + 1, ); state.compress_block_dynamic_trees(); } // TODO // This check is made mod 2^32, for files larger than 512 MB and unsigned long implemented on 32 bits. // assert_eq!(state.compressed_len, state.bits_sent, "bad compressed size"); state.init_block(); if last { state.bit_writer.emit_align(); } // Tracev((stderr, "\ncomprlen {}(%lu) ", s->compressed_len>>3, s->compressed_len-7*last)); } pub(crate) fn flush_block_only(stream: &mut DeflateStream, is_last: bool) { zng_tr_flush_block( stream, (stream.state.block_start >= 0).then_some(stream.state.block_start as usize), (stream.state.strstart as isize - stream.state.block_start) as u32, is_last, ); stream.state.block_start = stream.state.strstart as isize; flush_pending(stream) } fn flush_bytes(stream: &mut DeflateStream, mut bytes: &[u8]) -> ControlFlow { let mut state = &mut stream.state; // we'll be using the pending buffer as temporary storage let mut beg = state.bit_writer.pending.pending().len(); /* start of bytes to update crc */ while state.bit_writer.pending.remaining() < bytes.len() { let copy = state.bit_writer.pending.remaining(); state.bit_writer.pending.extend(&bytes[..copy]); stream.adler = crc32( stream.adler as u32, &state.bit_writer.pending.pending()[beg..], ) as z_checksum; state.gzindex += copy; flush_pending(stream); state = &mut stream.state; // could not flush all the pending output if !state.bit_writer.pending.pending().is_empty() { state.last_flush = -1; return ControlFlow::Break(ReturnCode::Ok); } beg = 0; bytes = &bytes[copy..]; } state.bit_writer.pending.extend(bytes); stream.adler = crc32( stream.adler as u32, &state.bit_writer.pending.pending()[beg..], ) as z_checksum; state.gzindex = 0; ControlFlow::Continue(()) } pub fn deflate(stream: &mut DeflateStream, flush: DeflateFlush) -> ReturnCode { if stream.next_out.is_null() || (stream.avail_in != 0 && stream.next_in.is_null()) || (stream.state.status == Status::Finish && flush != DeflateFlush::Finish) { let err = ReturnCode::StreamError; stream.msg = err.error_message(); return err; } if stream.avail_out == 0 { let err = ReturnCode::BufError; stream.msg = err.error_message(); return err; } let old_flush = stream.state.last_flush; stream.state.last_flush = flush as i8; /* Flush as much pending output as possible */ if !stream.state.bit_writer.pending.pending().is_empty() { flush_pending(stream); if stream.avail_out == 0 { /* Since avail_out is 0, deflate will be called again with * more output space, but possibly with both pending and * avail_in equal to zero. There won't be anything to do, * but this is not an error situation so make sure we * return OK instead of BUF_ERROR at next call of deflate: */ stream.state.last_flush = -1; return ReturnCode::Ok; } /* Make sure there is something to do and avoid duplicate consecutive * flushes. For repeated and useless calls with Z_FINISH, we keep * returning Z_STREAM_END instead of Z_BUF_ERROR. */ } else if stream.avail_in == 0 && rank_flush(flush as i8) <= rank_flush(old_flush) && flush != DeflateFlush::Finish { let err = ReturnCode::BufError; stream.msg = err.error_message(); return err; } /* User must not provide more input after the first FINISH: */ if stream.state.status == Status::Finish && stream.avail_in != 0 { let err = ReturnCode::BufError; stream.msg = err.error_message(); return err; } /* Write the header */ if stream.state.status == Status::Init && stream.state.wrap == 0 { stream.state.status = Status::Busy; } if stream.state.status == Status::Init { let header = stream.state.header(); stream .state .bit_writer .pending .extend(&header.to_be_bytes()); /* Save the adler32 of the preset dictionary: */ if stream.state.strstart != 0 { let adler = stream.adler as u32; stream.state.bit_writer.pending.extend(&adler.to_be_bytes()); } stream.adler = ADLER32_INITIAL_VALUE as _; stream.state.status = Status::Busy; // compression must start with an empty pending buffer flush_pending(stream); if !stream.state.bit_writer.pending.pending().is_empty() { stream.state.last_flush = -1; return ReturnCode::Ok; } } if stream.state.status == Status::GZip { /* gzip header */ stream.state.crc_fold = Crc32Fold::new(); stream.state.bit_writer.pending.extend(&[31, 139, 8]); let extra_flags = if stream.state.level == 9 { 2 } else if stream.state.strategy >= Strategy::HuffmanOnly || stream.state.level < 2 { 4 } else { 0 }; match &stream.state.gzhead { None => { let bytes = [0, 0, 0, 0, 0, extra_flags, gz_header::OS_CODE]; stream.state.bit_writer.pending.extend(&bytes); stream.state.status = Status::Busy; /* Compression must start with an empty pending buffer */ flush_pending(stream); if !stream.state.bit_writer.pending.pending().is_empty() { stream.state.last_flush = -1; return ReturnCode::Ok; } } Some(gzhead) => { stream.state.bit_writer.pending.extend(&[gzhead.flags()]); let bytes = (gzhead.time as u32).to_le_bytes(); stream.state.bit_writer.pending.extend(&bytes); stream .state .bit_writer .pending .extend(&[extra_flags, gzhead.os as u8]); if !gzhead.extra.is_null() { let bytes = (gzhead.extra_len as u16).to_le_bytes(); stream.state.bit_writer.pending.extend(&bytes); } if gzhead.hcrc != 0 { stream.adler = crc32( stream.adler as u32, stream.state.bit_writer.pending.pending(), ) as z_checksum } stream.state.gzindex = 0; stream.state.status = Status::Extra; } } } if stream.state.status == Status::Extra { if let Some(gzhead) = stream.state.gzhead.as_ref() { if !gzhead.extra.is_null() { let gzhead_extra = gzhead.extra; let extra = unsafe { core::slice::from_raw_parts( // SAFETY: gzindex is always less than extra_len, and the user // guarantees the pointer is valid for extra_len. gzhead_extra.add(stream.state.gzindex), (gzhead.extra_len & 0xffff) as usize - stream.state.gzindex, ) }; if let ControlFlow::Break(err) = flush_bytes(stream, extra) { return err; } } } stream.state.status = Status::Name; } if stream.state.status == Status::Name { if let Some(gzhead) = stream.state.gzhead.as_ref() { if !gzhead.name.is_null() { // SAFETY: user satisfies precondition that gzhead.name is a C string. let gzhead_name = unsafe { CStr::from_ptr(gzhead.name.cast()) }; let bytes = gzhead_name.to_bytes_with_nul(); if let ControlFlow::Break(err) = flush_bytes(stream, bytes) { return err; } } stream.state.status = Status::Comment; } } if stream.state.status == Status::Comment { if let Some(gzhead) = stream.state.gzhead.as_ref() { if !gzhead.comment.is_null() { // SAFETY: user satisfies precondition that gzhead.name is a C string. let gzhead_comment = unsafe { CStr::from_ptr(gzhead.comment.cast()) }; let bytes = gzhead_comment.to_bytes_with_nul(); if let ControlFlow::Break(err) = flush_bytes(stream, bytes) { return err; } } stream.state.status = Status::Hcrc; } } if stream.state.status == Status::Hcrc { if let Some(gzhead) = stream.state.gzhead.as_ref() { if gzhead.hcrc != 0 { let bytes = (stream.adler as u16).to_le_bytes(); if let ControlFlow::Break(err) = flush_bytes(stream, &bytes) { return err; } } } stream.state.status = Status::Busy; // compression must start with an empty pending buffer flush_pending(stream); if !stream.state.bit_writer.pending.pending().is_empty() { stream.state.last_flush = -1; return ReturnCode::Ok; } } // Start a new block or continue the current one. let state = &mut stream.state; if stream.avail_in != 0 || state.lookahead != 0 || (flush != DeflateFlush::NoFlush && state.status != Status::Finish) { let bstate = self::algorithm::run(stream, flush); let state = &mut stream.state; if matches!(bstate, BlockState::FinishStarted | BlockState::FinishDone) { state.status = Status::Finish; } match bstate { BlockState::NeedMore | BlockState::FinishStarted => { if stream.avail_out == 0 { state.last_flush = -1; /* avoid BUF_ERROR next call, see above */ } return ReturnCode::Ok; /* If flush != Z_NO_FLUSH && avail_out == 0, the next call * of deflate should use the same flush parameter to make sure * that the flush is complete. So we don't have to output an * empty block here, this will be done at next call. This also * ensures that for a very small output buffer, we emit at most * one empty block. */ } BlockState::BlockDone => { match flush { DeflateFlush::NoFlush => unreachable!("condition of inner surrounding if"), DeflateFlush::PartialFlush => { state.bit_writer.align(); } DeflateFlush::SyncFlush => { // add an empty stored block that is marked as not final. This is useful for // parallel deflate where we want to make sure the intermediate blocks are not // marked as "last block". zng_tr_stored_block(state, 0..0, false); } DeflateFlush::FullFlush => { // add an empty stored block that is marked as not final. This is useful for // parallel deflate where we want to make sure the intermediate blocks are not // marked as "last block". zng_tr_stored_block(state, 0..0, false); state.head.as_mut_slice().fill(0); // forget history if state.lookahead == 0 { state.strstart = 0; state.block_start = 0; state.insert = 0; } } DeflateFlush::Block => { /* fall through */ } DeflateFlush::Finish => unreachable!("condition of outer surrounding if"), } flush_pending(stream); if stream.avail_out == 0 { stream.state.last_flush = -1; /* avoid BUF_ERROR at next call, see above */ return ReturnCode::Ok; } } BlockState::FinishDone => { /* do nothing */ } } } if flush != DeflateFlush::Finish { return ReturnCode::Ok; } // write the trailer if stream.state.wrap == 2 { let crc_fold = core::mem::take(&mut stream.state.crc_fold); stream.adler = crc_fold.finish() as z_checksum; let adler = stream.adler as u32; stream.state.bit_writer.pending.extend(&adler.to_le_bytes()); let total_in = stream.total_in as u32; stream .state .bit_writer .pending .extend(&total_in.to_le_bytes()); } else if stream.state.wrap == 1 { let adler = stream.adler as u32; stream.state.bit_writer.pending.extend(&adler.to_be_bytes()); } flush_pending(stream); // If avail_out is zero, the application will call deflate again to flush the rest. if stream.state.wrap > 0 { stream.state.wrap = -stream.state.wrap; /* write the trailer only once! */ } if stream.state.bit_writer.pending.pending().is_empty() { assert_eq!(stream.state.bit_writer.bits_used, 0, "bi_buf not flushed"); return ReturnCode::StreamEnd; } ReturnCode::Ok } pub(crate) fn flush_pending(stream: &mut DeflateStream) { let state = &mut stream.state; state.bit_writer.flush_bits(); let pending = state.bit_writer.pending.pending(); let len = Ord::min(pending.len(), stream.avail_out as usize); if len == 0 { return; } trace!("\n[FLUSH {len} bytes]"); // SAFETY: len is min(pending, stream.avail_out), so we won't overrun next_out. unsafe { core::ptr::copy_nonoverlapping(pending.as_ptr(), stream.next_out, len) }; stream.next_out = stream.next_out.wrapping_add(len); stream.total_out += len as crate::c_api::z_size; stream.avail_out -= len as crate::c_api::uInt; state.bit_writer.pending.advance(len); } pub fn compress_slice<'a>( output: &'a mut [u8], input: &[u8], config: DeflateConfig, ) -> (&'a mut [u8], ReturnCode) { // SAFETY: a [u8] is a valid [MaybeUninit]. let output_uninit = unsafe { core::slice::from_raw_parts_mut(output.as_mut_ptr() as *mut MaybeUninit, output.len()) }; compress(output_uninit, input, config) } pub fn compress<'a>( output: &'a mut [MaybeUninit], input: &[u8], config: DeflateConfig, ) -> (&'a mut [u8], ReturnCode) { compress_with_flush(output, input, config, DeflateFlush::Finish) } pub fn compress_slice_with_flush<'a>( output: &'a mut [u8], input: &[u8], config: DeflateConfig, flush: DeflateFlush, ) -> (&'a mut [u8], ReturnCode) { // SAFETY: a [u8] is a valid [MaybeUninit], and `compress_with_flush` never uninitializes previously initialized memory. let output_uninit = unsafe { core::slice::from_raw_parts_mut(output.as_mut_ptr() as *mut MaybeUninit, output.len()) }; compress_with_flush(output_uninit, input, config, flush) } pub fn compress_with_flush<'a>( output: &'a mut [MaybeUninit], input: &[u8], config: DeflateConfig, final_flush: DeflateFlush, ) -> (&'a mut [u8], ReturnCode) { let mut stream = z_stream { next_in: input.as_ptr() as *mut u8, avail_in: 0, // for special logic in the first iteration total_in: 0, next_out: output.as_mut_ptr() as *mut u8, avail_out: 0, // for special logic on the first iteration total_out: 0, msg: core::ptr::null_mut(), state: core::ptr::null_mut(), zalloc: None, zfree: None, opaque: core::ptr::null_mut(), data_type: 0, adler: 0, reserved: 0, }; let err = init(&mut stream, config); if err != ReturnCode::Ok { return (&mut [], err); } let max = core::ffi::c_uint::MAX as usize; let mut left = output.len(); let mut source_len = input.len(); loop { if stream.avail_out == 0 { stream.avail_out = Ord::min(left, max) as _; left -= stream.avail_out as usize; } if stream.avail_in == 0 { stream.avail_in = Ord::min(source_len, max) as _; source_len -= stream.avail_in as usize; } let flush = if source_len > 0 { DeflateFlush::NoFlush } else { final_flush }; let err = if let Some(stream) = unsafe { DeflateStream::from_stream_mut(&mut stream) } { deflate(stream, flush) } else { ReturnCode::StreamError }; if err != ReturnCode::Ok { break; } } // SAFETY: we have now initialized these bytes let output_slice = unsafe { core::slice::from_raw_parts_mut(output.as_mut_ptr() as *mut u8, stream.total_out as usize) }; // may DataError if insufficient output space let return_code = if let Some(stream) = unsafe { DeflateStream::from_stream_mut(&mut stream) } { match end(stream) { Ok(_) => ReturnCode::Ok, Err(_) => ReturnCode::DataError, } } else { ReturnCode::Ok }; (output_slice, return_code) } pub const fn compress_bound(source_len: usize) -> usize { compress_bound_help(source_len, ZLIB_WRAPLEN) } const fn compress_bound_help(source_len: usize, wrap_len: usize) -> usize { source_len // The source size itself */ // Always at least one byte for any input .wrapping_add(if source_len == 0 { 1 } else { 0 }) // One extra byte for lengths less than 9 .wrapping_add(if source_len < 9 { 1 } else { 0 }) // Source encoding overhead, padded to next full byte .wrapping_add(deflate_quick_overhead(source_len)) // Deflate block overhead bytes .wrapping_add(DEFLATE_BLOCK_OVERHEAD) // none, zlib or gzip wrapper .wrapping_add(wrap_len) } /// heap used to build the Huffman trees /// /// The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. /// The same heap array is used to build all trees. #[derive(Clone)] struct Heap { heap: [u32; 2 * L_CODES + 1], /// number of elements in the heap heap_len: usize, /// element of the largest frequency heap_max: usize, depth: [u8; 2 * L_CODES + 1], } impl Heap { // an empty heap fn new() -> Self { Self { heap: [0; 2 * L_CODES + 1], heap_len: 0, heap_max: 0, depth: [0; 2 * L_CODES + 1], } } /// Construct the initial heap, with least frequent element in /// heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. fn initialize(&mut self, tree: &mut [Value]) -> isize { let mut max_code = -1; self.heap_len = 0; self.heap_max = HEAP_SIZE; for (n, node) in tree.iter_mut().enumerate() { if node.freq() > 0 { self.heap_len += 1; self.heap[self.heap_len] = n as u32; max_code = n as isize; self.depth[n] = 0; } else { *node.len_mut() = 0; } } max_code } /// Index within the heap array of least frequent node in the Huffman tree const SMALLEST: usize = 1; fn pqdownheap(&mut self, tree: &[Value], mut k: usize) { /* tree: the tree to restore */ /* k: node to move down */ // Given the index $i of a node in the tree, pack the node's frequency and depth // into a single integer. The heap ordering logic uses a primary sort on frequency // and a secondary sort on depth, so packing both into one integer makes it // possible to sort with fewer comparison operations. macro_rules! freq_and_depth { ($i:expr) => { (tree[$i as usize].freq() as u32) << 8 | self.depth[$i as usize] as u32 }; } let v = self.heap[k]; let v_val = freq_and_depth!(v); let mut j = k << 1; /* left son of k */ while j <= self.heap_len { /* Set j to the smallest of the two sons: */ let mut j_val = freq_and_depth!(self.heap[j]); if j < self.heap_len { let j1_val = freq_and_depth!(self.heap[j + 1]); if j1_val <= j_val { j += 1; j_val = j1_val; } } /* Exit if v is smaller than both sons */ if v_val <= j_val { break; } /* Exchange v with the smallest son */ self.heap[k] = self.heap[j]; k = j; /* And continue down the tree, setting j to the left son of k */ j <<= 1; } self.heap[k] = v; } /// Remove the smallest element from the heap and recreate the heap with /// one less element. Updates heap and heap_len. fn pqremove(&mut self, tree: &[Value]) -> u32 { let top = self.heap[Self::SMALLEST]; self.heap[Self::SMALLEST] = self.heap[self.heap_len]; self.heap_len -= 1; self.pqdownheap(tree, Self::SMALLEST); top } /// Construct the Huffman tree by repeatedly combining the least two frequent nodes. fn construct_huffman_tree(&mut self, tree: &mut [Value], mut node: usize) { loop { let n = self.pqremove(tree) as usize; /* n = node of least frequency */ let m = self.heap[Heap::SMALLEST] as usize; /* m = node of next least frequency */ self.heap_max -= 1; self.heap[self.heap_max] = n as u32; /* keep the nodes sorted by frequency */ self.heap_max -= 1; self.heap[self.heap_max] = m as u32; /* Create a new node father of n and m */ *tree[node].freq_mut() = tree[n].freq() + tree[m].freq(); self.depth[node] = Ord::max(self.depth[n], self.depth[m]) + 1; *tree[n].dad_mut() = node as u16; *tree[m].dad_mut() = node as u16; /* and insert the new node in the heap */ self.heap[Heap::SMALLEST] = node as u32; node += 1; self.pqdownheap(tree, Heap::SMALLEST); if self.heap_len < 2 { break; } } self.heap_max -= 1; self.heap[self.heap_max] = self.heap[Heap::SMALLEST]; } } /// # Safety /// /// The caller must guarantee: /// /// * If `head` is `Some` /// - `head.extra` is `NULL` or is readable for at least `head.extra_len` bytes /// - `head.name` is `NULL` or satisfies the requirements of [`core::ffi::CStr::from_ptr`] /// - `head.comment` is `NULL` or satisfies the requirements of [`core::ffi::CStr::from_ptr`] pub unsafe fn set_header<'a>( stream: &mut DeflateStream<'a>, head: Option<&'a mut gz_header>, ) -> ReturnCode { if stream.state.wrap != 2 { ReturnCode::StreamError as _ } else { stream.state.gzhead = head; ReturnCode::Ok as _ } } // zlib format overhead const ZLIB_WRAPLEN: usize = 6; // gzip format overhead const GZIP_WRAPLEN: usize = 18; const DEFLATE_HEADER_BITS: usize = 3; const DEFLATE_EOBS_BITS: usize = 15; const DEFLATE_PAD_BITS: usize = 6; const DEFLATE_BLOCK_OVERHEAD: usize = (DEFLATE_HEADER_BITS + DEFLATE_EOBS_BITS + DEFLATE_PAD_BITS) >> 3; const DEFLATE_QUICK_LIT_MAX_BITS: usize = 9; const fn deflate_quick_overhead(x: usize) -> usize { let sum = x .wrapping_mul(DEFLATE_QUICK_LIT_MAX_BITS - 8) .wrapping_add(7); // imitate zlib-ng rounding behavior (on windows, c_ulong is 32 bits) (sum as core::ffi::c_ulong >> 3) as usize } /// For the default windowBits of 15 and memLevel of 8, this function returns /// a close to exact, as well as small, upper bound on the compressed size. /// They are coded as constants here for a reason--if the #define's are /// changed, then this function needs to be changed as well. The return /// value for 15 and 8 only works for those exact settings. /// /// For any setting other than those defaults for windowBits and memLevel, /// the value returned is a conservative worst case for the maximum expansion /// resulting from using fixed blocks instead of stored blocks, which deflate /// can emit on compressed data for some combinations of the parameters. /// /// This function could be more sophisticated to provide closer upper bounds for /// every combination of windowBits and memLevel. But even the conservative /// upper bound of about 14% expansion does not seem onerous for output buffer /// allocation. pub fn bound(stream: Option<&mut DeflateStream>, source_len: usize) -> usize { // on windows, c_ulong is only a 32-bit integer let mask = core::ffi::c_ulong::MAX as usize; // conservative upper bound for compressed data let comp_len = source_len .wrapping_add((source_len.wrapping_add(7) & mask) >> 3) .wrapping_add((source_len.wrapping_add(63) & mask) >> 6) .wrapping_add(5); let Some(stream) = stream else { // return conservative bound plus zlib wrapper return comp_len.wrapping_add(6); }; /* compute wrapper length */ let wrap_len = match stream.state.wrap { 0 => { // raw deflate 0 } 1 => { // zlib wrapper if stream.state.strstart != 0 { ZLIB_WRAPLEN + 4 } else { ZLIB_WRAPLEN } } 2 => { // gzip wrapper let mut gz_wrap_len = GZIP_WRAPLEN; if let Some(header) = &stream.state.gzhead { if !header.extra.is_null() { gz_wrap_len += 2 + header.extra_len as usize; } let mut c_string = header.name; if !c_string.is_null() { loop { gz_wrap_len += 1; // SAFETY: user guarantees header.name is a valid C string. unsafe { if *c_string == 0 { break; } c_string = c_string.add(1); } } } let mut c_string = header.comment; if !c_string.is_null() { loop { gz_wrap_len += 1; // SAFETY: user guarantees header.comment is a valid C string. unsafe { if *c_string == 0 { break; } c_string = c_string.add(1); } } } if header.hcrc != 0 { gz_wrap_len += 2; } } gz_wrap_len } _ => { // default ZLIB_WRAPLEN } }; if stream.state.w_bits() != MAX_WBITS as u32 || HASH_BITS < 15 { if stream.state.level == 0 { /* upper bound for stored blocks with length 127 (memLevel == 1) ~4% overhead plus a small constant */ source_len .wrapping_add(source_len >> 5) .wrapping_add(source_len >> 7) .wrapping_add(source_len >> 11) .wrapping_add(7) .wrapping_add(wrap_len) } else { comp_len.wrapping_add(wrap_len) } } else { compress_bound_help(source_len, wrap_len) } } /// # Safety /// /// The `dictionary` must have enough space for the dictionary. pub unsafe fn get_dictionary(stream: &DeflateStream<'_>, dictionary: *mut u8) -> usize { let s = &stream.state; let len = Ord::min(s.strstart + s.lookahead, s.w_size); if !dictionary.is_null() && len > 0 { unsafe { core::ptr::copy_nonoverlapping( s.window.as_ptr().add(s.strstart + s.lookahead - len), dictionary, len, ); } } len } #[cfg(test)] mod test { use crate::{ inflate::{uncompress_slice, InflateConfig, InflateStream}, InflateFlush, }; use super::*; use core::{ffi::CStr, sync::atomic::AtomicUsize}; #[test] fn detect_data_type_basic() { let empty = || [Value::new(0, 0); LITERALS]; assert_eq!(State::detect_data_type(&empty()), DataType::Binary); let mut binary = empty(); binary[0] = Value::new(1, 0); assert_eq!(State::detect_data_type(&binary), DataType::Binary); let mut text = empty(); text[b'\r' as usize] = Value::new(1, 0); assert_eq!(State::detect_data_type(&text), DataType::Text); let mut text = empty(); text[b'a' as usize] = Value::new(1, 0); assert_eq!(State::detect_data_type(&text), DataType::Text); let mut non_text = empty(); non_text[7] = Value::new(1, 0); assert_eq!(State::detect_data_type(&non_text), DataType::Binary); } #[test] fn from_stream_mut() { unsafe { assert!(DeflateStream::from_stream_mut(core::ptr::null_mut()).is_none()); let mut stream = z_stream::default(); assert!(DeflateStream::from_stream_mut(&mut stream).is_none()); // state is still NULL assert!(DeflateStream::from_stream_mut(&mut stream).is_none()); init(&mut stream, DeflateConfig::default()); let stream = DeflateStream::from_stream_mut(&mut stream); assert!(stream.is_some()); assert!(end(stream.unwrap()).is_ok()); } } unsafe extern "C" fn fail_nth_allocation( opaque: crate::c_api::voidpf, items: crate::c_api::uInt, size: crate::c_api::uInt, ) -> crate::c_api::voidpf { let count = unsafe { &*(opaque as *const AtomicUsize) }; if count.fetch_add(1, core::sync::atomic::Ordering::Relaxed) != N { // must use the C allocator internally because (de)allocation is based on function // pointer values and because we don't use the rust allocator directly, the allocation // logic will store the pointer to the start at the start of the allocation. unsafe { (crate::allocate::C.zalloc)(opaque, items, size) } } else { core::ptr::null_mut() } } #[test] fn init_invalid_allocator() { { let atomic = AtomicUsize::new(0); let mut stream = z_stream { zalloc: Some(fail_nth_allocation::<0>), zfree: Some(crate::allocate::C.zfree), opaque: &atomic as *const _ as *const core::ffi::c_void as *mut _, ..z_stream::default() }; assert_eq!( init(&mut stream, DeflateConfig::default()), ReturnCode::MemError ); } { let atomic = AtomicUsize::new(0); let mut stream = z_stream { zalloc: Some(fail_nth_allocation::<3>), zfree: Some(crate::allocate::C.zfree), opaque: &atomic as *const _ as *const core::ffi::c_void as *mut _, ..z_stream::default() }; assert_eq!( init(&mut stream, DeflateConfig::default()), ReturnCode::MemError ); } { let atomic = AtomicUsize::new(0); let mut stream = z_stream { zalloc: Some(fail_nth_allocation::<5>), zfree: Some(crate::allocate::C.zfree), opaque: &atomic as *const _ as *const core::ffi::c_void as *mut _, ..z_stream::default() }; assert_eq!( init(&mut stream, DeflateConfig::default()), ReturnCode::MemError ); } } mod copy_invalid_allocator { use super::*; #[test] fn fail_0() { let mut stream = z_stream::default(); let atomic = AtomicUsize::new(0); stream.opaque = &atomic as *const _ as *const core::ffi::c_void as *mut _; stream.zalloc = Some(fail_nth_allocation::<6>); stream.zfree = Some(crate::allocate::C.zfree); // init performs 6 allocations; we don't want those to fail assert_eq!(init(&mut stream, DeflateConfig::default()), ReturnCode::Ok); let Some(stream) = (unsafe { DeflateStream::from_stream_mut(&mut stream) }) else { unreachable!() }; let mut stream_copy = MaybeUninit::::zeroed(); assert_eq!(copy(&mut stream_copy, stream), ReturnCode::MemError); assert!(end(stream).is_ok()); } #[test] fn fail_3() { let mut stream = z_stream::default(); let atomic = AtomicUsize::new(0); stream.zalloc = Some(fail_nth_allocation::<{ 6 + 3 }>); stream.zfree = Some(crate::allocate::C.zfree); stream.opaque = &atomic as *const _ as *const core::ffi::c_void as *mut _; // init performs 6 allocations; we don't want those to fail assert_eq!(init(&mut stream, DeflateConfig::default()), ReturnCode::Ok); let Some(stream) = (unsafe { DeflateStream::from_stream_mut(&mut stream) }) else { unreachable!() }; let mut stream_copy = MaybeUninit::::zeroed(); assert_eq!(copy(&mut stream_copy, stream), ReturnCode::MemError); assert!(end(stream).is_ok()); } #[test] fn fail_5() { let mut stream = z_stream::default(); let atomic = AtomicUsize::new(0); stream.zalloc = Some(fail_nth_allocation::<{ 6 + 5 }>); stream.zfree = Some(crate::allocate::C.zfree); stream.opaque = &atomic as *const _ as *const core::ffi::c_void as *mut _; // init performs 6 allocations; we don't want those to fail assert_eq!(init(&mut stream, DeflateConfig::default()), ReturnCode::Ok); let Some(stream) = (unsafe { DeflateStream::from_stream_mut(&mut stream) }) else { unreachable!() }; let mut stream_copy = MaybeUninit::::zeroed(); assert_eq!(copy(&mut stream_copy, stream), ReturnCode::MemError); assert!(end(stream).is_ok()); } } mod invalid_deflate_config { use super::*; #[test] fn sanity_check() { let mut stream = z_stream::default(); assert_eq!(init(&mut stream, DeflateConfig::default()), ReturnCode::Ok); assert!(stream.zalloc.is_some()); assert!(stream.zfree.is_some()); // this should be the default level let stream = unsafe { DeflateStream::from_stream_mut(&mut stream) }.unwrap(); assert_eq!(stream.state.level, 6); assert!(end(stream).is_ok()); } #[test] fn window_bits_correction() { // window_bits of 8 gets turned into 9 internally let mut stream = z_stream::default(); let config = DeflateConfig { window_bits: 8, ..Default::default() }; assert_eq!(init(&mut stream, config), ReturnCode::Ok); let stream = unsafe { DeflateStream::from_stream_mut(&mut stream) }.unwrap(); assert_eq!(stream.state.w_bits(), 9); assert!(end(stream).is_ok()); } #[test] fn window_bits_too_low() { let mut stream = z_stream::default(); let config = DeflateConfig { window_bits: -16, ..Default::default() }; assert_eq!(init(&mut stream, config), ReturnCode::StreamError); } #[test] fn window_bits_too_high() { // window bits too high let mut stream = z_stream::default(); let config = DeflateConfig { window_bits: 42, ..Default::default() }; assert_eq!(init(&mut stream, config), ReturnCode::StreamError); } } #[test] fn end_data_error() { let mut stream = z_stream::default(); assert_eq!(init(&mut stream, DeflateConfig::default()), ReturnCode::Ok); let stream = unsafe { DeflateStream::from_stream_mut(&mut stream) }.unwrap(); // next deflate into too little space let input = b"Hello World\n"; stream.next_in = input.as_ptr() as *mut u8; stream.avail_in = input.len() as _; let output = &mut [0, 0, 0]; stream.next_out = output.as_mut_ptr(); stream.avail_out = output.len() as _; // the deflate is fine assert_eq!(deflate(stream, DeflateFlush::NoFlush), ReturnCode::Ok); // but end is not assert!(end(stream).is_err()); } #[test] fn test_reset_keep() { let mut stream = z_stream::default(); assert_eq!(init(&mut stream, DeflateConfig::default()), ReturnCode::Ok); let stream = unsafe { DeflateStream::from_stream_mut(&mut stream) }.unwrap(); // next deflate into too little space let input = b"Hello World\n"; stream.next_in = input.as_ptr() as *mut u8; stream.avail_in = input.len() as _; let output = &mut [0; 1024]; stream.next_out = output.as_mut_ptr(); stream.avail_out = output.len() as _; assert_eq!(deflate(stream, DeflateFlush::Finish), ReturnCode::StreamEnd); assert_eq!(reset_keep(stream), ReturnCode::Ok); let output = &mut [0; 1024]; stream.next_out = output.as_mut_ptr(); stream.avail_out = output.len() as _; assert_eq!(deflate(stream, DeflateFlush::Finish), ReturnCode::StreamEnd); assert!(end(stream).is_ok()); } #[test] fn hello_world_huffman_only() { const EXPECTED: &[u8] = &[ 0x78, 0x01, 0xf3, 0x48, 0xcd, 0xc9, 0xc9, 0x57, 0x08, 0xcf, 0x2f, 0xca, 0x49, 0x51, 0xe4, 0x02, 0x00, 0x20, 0x91, 0x04, 0x48, ]; let input = "Hello World!\n"; let mut output = vec![0; 128]; let config = DeflateConfig { level: 6, method: Method::Deflated, window_bits: crate::MAX_WBITS, mem_level: DEF_MEM_LEVEL, strategy: Strategy::HuffmanOnly, }; let (output, err) = compress_slice(&mut output, input.as_bytes(), config); assert_eq!(err, ReturnCode::Ok); assert_eq!(output.len(), EXPECTED.len()); assert_eq!(EXPECTED, output); } #[test] fn hello_world_quick() { const EXPECTED: &[u8] = &[ 0x78, 0x01, 0xf3, 0x48, 0xcd, 0xc9, 0xc9, 0x57, 0x08, 0xcf, 0x2f, 0xca, 0x49, 0x51, 0xe4, 0x02, 0x00, 0x20, 0x91, 0x04, 0x48, ]; let input = "Hello World!\n"; let mut output = vec![0; 128]; let config = DeflateConfig { level: 1, method: Method::Deflated, window_bits: crate::MAX_WBITS, mem_level: DEF_MEM_LEVEL, strategy: Strategy::Default, }; let (output, err) = compress_slice(&mut output, input.as_bytes(), config); assert_eq!(err, ReturnCode::Ok); assert_eq!(output.len(), EXPECTED.len()); assert_eq!(EXPECTED, output); } #[test] fn hello_world_quick_random() { const EXPECTED: &[u8] = &[ 0x78, 0x01, 0x53, 0xe1, 0x50, 0x51, 0xe1, 0x52, 0x51, 0x51, 0x01, 0x00, 0x03, 0xec, 0x00, 0xeb, ]; let input = "$\u{8}$$\n$$$"; let mut output = vec![0; 128]; let config = DeflateConfig { level: 1, method: Method::Deflated, window_bits: crate::MAX_WBITS, mem_level: DEF_MEM_LEVEL, strategy: Strategy::Default, }; let (output, err) = compress_slice(&mut output, input.as_bytes(), config); assert_eq!(err, ReturnCode::Ok); assert_eq!(output.len(), EXPECTED.len()); assert_eq!(EXPECTED, output); } fn fuzz_based_test(input: &[u8], config: DeflateConfig, expected: &[u8]) { let mut output_rs = [0; 1 << 17]; let (output_rs, err) = compress_slice(&mut output_rs, input, config); assert_eq!(err, ReturnCode::Ok); assert_eq!(output_rs, expected); } #[test] fn simple_rle() { fuzz_based_test( "\0\0\0\0\u{6}".as_bytes(), DeflateConfig { level: -1, method: Method::Deflated, window_bits: 11, mem_level: 4, strategy: Strategy::Rle, }, &[56, 17, 99, 0, 2, 54, 0, 0, 11, 0, 7], ) } #[test] fn fill_window_out_of_bounds() { const INPUT: &[u8] = &[ 0x71, 0x71, 0x71, 0x71, 0x71, 0x6a, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1d, 0x1d, 0x1d, 0x1d, 0x63, 0x63, 0x63, 0x63, 0x63, 0x1d, 0x1d, 0x1d, 0x1d, 0x1d, 0x1d, 0x1d, 0x1d, 0x1d, 0x1d, 0x1d, 0x27, 0x0, 0x0, 0x0, 0x1d, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71, 0x71, 0x71, 0x71, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x31, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1d, 0x1d, 0x0, 0x0, 0x0, 0x0, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x48, 0x50, 0x50, 0x50, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2c, 0x0, 0x0, 0x0, 0x0, 0x4a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x70, 0x71, 0x71, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71, 0x71, 0x71, 0x71, 0x6a, 0x0, 0x0, 0x0, 0x0, 0x71, 0x0, 0x71, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x31, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x0, 0x4a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x70, 0x71, 0x71, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71, 0x71, 0x71, 0x71, 0x6a, 0x0, 0x0, 0x0, 0x0, 0x71, 0x0, 0x71, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x31, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1d, 0x1d, 0x0, 0x0, 0x0, 0x0, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x48, 0x50, 0x0, 0x0, 0x71, 0x71, 0x71, 0x71, 0x3b, 0x3f, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x50, 0x50, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2c, 0x0, 0x0, 0x0, 0x0, 0x4a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x70, 0x71, 0x71, 0x0, 0x0, 0x0, 0x6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x70, 0x71, 0x71, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71, 0x71, 0x71, 0x71, 0x3b, 0x3f, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x3b, 0x3f, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71, 0x75, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x10, 0x0, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x3b, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x76, 0x71, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x10, 0x0, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x3b, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x76, 0x71, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x0, 0x0, 0x0, 0x0, 0x0, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x30, 0x34, 0x34, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71, 0x0, 0x0, 0x0, 0x0, 0x6, ]; fuzz_based_test( INPUT, DeflateConfig { level: -1, method: Method::Deflated, window_bits: 9, mem_level: 1, strategy: Strategy::HuffmanOnly, }, &[ 0x18, 0x19, 0x4, 0xc1, 0x21, 0x1, 0xc4, 0x0, 0x10, 0x3, 0xb0, 0x18, 0x29, 0x1e, 0x7e, 0x17, 0x83, 0xf5, 0x70, 0x6c, 0xac, 0xfe, 0xc9, 0x27, 0xdb, 0xb6, 0x6f, 0xdb, 0xb6, 0x6d, 0xdb, 0x80, 0x24, 0xb9, 0xbb, 0xbb, 0x24, 0x49, 0x92, 0x24, 0xf, 0x2, 0xd8, 0x36, 0x0, 0xf0, 0x3, 0x0, 0x0, 0x24, 0xd0, 0xb6, 0x6d, 0xdb, 0xb6, 0x6d, 0xdb, 0xbe, 0x6d, 0xf9, 0x13, 0x4, 0xc7, 0x4, 0x0, 0x80, 0x30, 0x0, 0xc3, 0x22, 0x68, 0xf, 0x36, 0x90, 0xc2, 0xb5, 0xfa, 0x7f, 0x48, 0x80, 0x81, 0xb, 0x40, 0x55, 0x55, 0x55, 0xd5, 0x16, 0x80, 0xaa, 0x7, 0x9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe, 0x7c, 0x82, 0xe0, 0x98, 0x0, 0x0, 0x0, 0x4, 0x60, 0x10, 0xf9, 0x8c, 0xe2, 0xe5, 0xfa, 0x3f, 0x2, 0x54, 0x55, 0x55, 0x65, 0x0, 0xa8, 0xaa, 0xaa, 0xaa, 0xba, 0x2, 0x50, 0xb5, 0x90, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x78, 0x82, 0xe0, 0xd0, 0x8a, 0x41, 0x0, 0x0, 0xa2, 0x58, 0x54, 0xb7, 0x60, 0x83, 0x9a, 0x6a, 0x4, 0x96, 0x87, 0xba, 0x51, 0xf8, 0xfb, 0x9b, 0x26, 0xfc, 0x0, 0x1c, 0x7, 0x6c, 0xdb, 0xb6, 0x6d, 0xdb, 0xb6, 0x6d, 0xf7, 0xa8, 0x3a, 0xaf, 0xaa, 0x6a, 0x3, 0xf8, 0xc2, 0x3, 0x40, 0x55, 0x55, 0x55, 0xd5, 0x5b, 0xf8, 0x80, 0xaa, 0x7a, 0xb, 0x0, 0x7f, 0x82, 0xe0, 0x98, 0x0, 0x40, 0x18, 0x0, 0x82, 0xd8, 0x49, 0x40, 0x2, 0x22, 0x7e, 0xeb, 0x80, 0xa6, 0xc, 0xa0, 0x9f, 0xa4, 0x2a, 0x38, 0xf, 0x0, 0x0, 0xe7, 0x1, 0xdc, 0x55, 0x95, 0x17, 0x0, 0x0, 0xae, 0x0, 0x38, 0xc0, 0x67, 0xdb, 0x36, 0x80, 0x2b, 0x0, 0xe, 0xf0, 0xd9, 0xf6, 0x13, 0x4, 0xc7, 0x4, 0x0, 0x0, 0x30, 0xc, 0x83, 0x22, 0x69, 0x7, 0xc6, 0xea, 0xff, 0x19, 0x0, 0x0, 0x80, 0xaa, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8e, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0x6a, 0xf5, 0x63, 0x60, 0x60, 0x3, 0x0, 0xee, 0x8a, 0x88, 0x67, ], ) } #[test] fn gzip_no_header() { let config = DeflateConfig { level: 9, method: Method::Deflated, window_bits: 31, // gzip ..Default::default() }; let input = b"Hello World!"; let os = gz_header::OS_CODE; fuzz_based_test( input, config, &[ 31, 139, 8, 0, 0, 0, 0, 0, 2, os, 243, 72, 205, 201, 201, 87, 8, 207, 47, 202, 73, 81, 4, 0, 163, 28, 41, 28, 12, 0, 0, 0, ], ) } #[test] #[rustfmt::skip] fn gzip_stored_block_checksum() { fuzz_based_test( &[ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 9, 0, ], DeflateConfig { level: 0, method: Method::Deflated, window_bits: 26, mem_level: 6, strategy: Strategy::Default, }, &[ 31, 139, 8, 0, 0, 0, 0, 0, 4, gz_header::OS_CODE, 1, 18, 0, 237, 255, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 9, 0, 60, 101, 156, 55, 18, 0, 0, 0, ], ) } #[test] fn gzip_header_pending_flush() { let extra = "aaaaaaaaaaaaaaaaaaaa\0"; let name = "bbbbbbbbbbbbbbbbbbbb\0"; let comment = "cccccccccccccccccccc\0"; let mut header = gz_header { text: 0, time: 0, xflags: 0, os: 0, extra: extra.as_ptr() as *mut _, extra_len: extra.len() as _, extra_max: 0, name: name.as_ptr() as *mut _, name_max: 0, comment: comment.as_ptr() as *mut _, comm_max: 0, hcrc: 1, done: 0, }; let config = DeflateConfig { window_bits: 31, mem_level: 1, ..Default::default() }; let mut stream = z_stream::default(); assert_eq!(init(&mut stream, config), ReturnCode::Ok); let Some(stream) = (unsafe { DeflateStream::from_stream_mut(&mut stream) }) else { unreachable!() }; unsafe { set_header(stream, Some(&mut header)) }; let input = b"Hello World\n"; stream.next_in = input.as_ptr() as *mut _; stream.avail_in = input.len() as _; let mut output = [0u8; 1024]; stream.next_out = output.as_mut_ptr(); stream.avail_out = 100; assert_eq!(stream.state.bit_writer.pending.capacity(), 512); // only 12 bytes remain, so to write the name the pending buffer must be flushed. // but there is insufficient output space to flush (only 100 bytes) stream.state.bit_writer.pending.extend(&[0; 500]); assert_eq!(deflate(stream, DeflateFlush::Finish), ReturnCode::Ok); // now try that again but with sufficient output space stream.avail_out = output.len() as _; assert_eq!(deflate(stream, DeflateFlush::Finish), ReturnCode::StreamEnd); let n = stream.total_out as usize; assert!(end(stream).is_ok()); let output_rs = &mut output[..n]; assert_eq!(output_rs.len(), 500 + 99); } #[test] fn gzip_with_header() { // this test is here mostly so we get some MIRI action on the gzip header. A test that // compares behavior with zlib-ng is in the libz-rs-sys test suite let extra = "some extra stuff\0"; let name = "nomen est omen\0"; let comment = "such comment\0"; let mut header = gz_header { text: 0, time: 0, xflags: 0, os: 0, extra: extra.as_ptr() as *mut _, extra_len: extra.len() as _, extra_max: 0, name: name.as_ptr() as *mut _, name_max: 0, comment: comment.as_ptr() as *mut _, comm_max: 0, hcrc: 1, done: 0, }; let config = DeflateConfig { window_bits: 31, ..Default::default() }; let mut stream = z_stream::default(); assert_eq!(init(&mut stream, config), ReturnCode::Ok); let Some(stream) = (unsafe { DeflateStream::from_stream_mut(&mut stream) }) else { unreachable!() }; unsafe { set_header(stream, Some(&mut header)) }; let input = b"Hello World\n"; stream.next_in = input.as_ptr() as *mut _; stream.avail_in = input.len() as _; let mut output = [0u8; 256]; stream.next_out = output.as_mut_ptr(); stream.avail_out = output.len() as _; assert_eq!(deflate(stream, DeflateFlush::Finish), ReturnCode::StreamEnd); let n = stream.total_out as usize; assert!(end(stream).is_ok()); let output_rs = &mut output[..n]; assert_eq!(output_rs.len(), 81); { let mut stream = z_stream::default(); let config = InflateConfig { window_bits: config.window_bits, }; assert_eq!(crate::inflate::init(&mut stream, config), ReturnCode::Ok); let Some(stream) = (unsafe { InflateStream::from_stream_mut(&mut stream) }) else { unreachable!(); }; stream.next_in = output_rs.as_mut_ptr() as _; stream.avail_in = output_rs.len() as _; let mut output = [0u8; 12]; stream.next_out = output.as_mut_ptr(); stream.avail_out = output.len() as _; let mut extra_buf = [0u8; 64]; let mut name_buf = [0u8; 64]; let mut comment_buf = [0u8; 64]; let mut header = gz_header { text: 0, time: 0, xflags: 0, os: 0, extra: extra_buf.as_mut_ptr(), extra_len: 0, extra_max: extra_buf.len() as _, name: name_buf.as_mut_ptr(), name_max: name_buf.len() as _, comment: comment_buf.as_mut_ptr(), comm_max: comment_buf.len() as _, hcrc: 0, done: 0, }; assert_eq!( unsafe { crate::inflate::get_header(stream, Some(&mut header)) }, ReturnCode::Ok ); assert_eq!( unsafe { crate::inflate::inflate(stream, InflateFlush::Finish) }, ReturnCode::StreamEnd ); crate::inflate::end(stream); assert!(!header.comment.is_null()); assert_eq!( unsafe { CStr::from_ptr(header.comment.cast()) } .to_str() .unwrap(), comment.trim_end_matches('\0') ); assert!(!header.name.is_null()); assert_eq!( unsafe { CStr::from_ptr(header.name.cast()) } .to_str() .unwrap(), name.trim_end_matches('\0') ); assert!(!header.extra.is_null()); assert_eq!( unsafe { CStr::from_ptr(header.extra.cast()) } .to_str() .unwrap(), extra.trim_end_matches('\0') ); } } #[test] fn insufficient_compress_space() { const DATA: &[u8] = include_bytes!("deflate/test-data/inflate_buf_error.dat"); fn helper(deflate_buf: &mut [u8]) -> ReturnCode { let config = DeflateConfig { level: 0, method: Method::Deflated, window_bits: 10, mem_level: 6, strategy: Strategy::Default, }; let (output, err) = compress_slice(deflate_buf, DATA, config); assert_eq!(err, ReturnCode::Ok); let config = InflateConfig { window_bits: config.window_bits, }; let mut uncompr = [0; 1 << 17]; let (uncompr, err) = uncompress_slice(&mut uncompr, output, config); if err == ReturnCode::Ok { assert_eq!(DATA, uncompr); } err } let mut output = [0; 1 << 17]; // this is too little space assert_eq!(helper(&mut output[..1 << 16]), ReturnCode::DataError); // this is sufficient space assert_eq!(helper(&mut output), ReturnCode::Ok); } fn test_flush(flush: DeflateFlush, expected: &[u8]) { let input = b"Hello World!\n"; let config = DeflateConfig { level: 6, // use gzip method: Method::Deflated, window_bits: 16 + crate::MAX_WBITS, mem_level: DEF_MEM_LEVEL, strategy: Strategy::Default, }; let mut output_rs = vec![0; 128]; // with the flush modes that we test here, the deflate process still has `Status::Busy`, // and the `deflateEnd` function will return `DataError`. let expected_err = ReturnCode::DataError; let (rs, err) = compress_slice_with_flush(&mut output_rs, input, config, flush); assert_eq!(expected_err, err); assert_eq!(rs, expected); } #[test] #[rustfmt::skip] fn sync_flush() { test_flush( DeflateFlush::SyncFlush, &[ 31, 139, 8, 0, 0, 0, 0, 0, 0, gz_header::OS_CODE, 242, 72, 205, 201, 201, 87, 8, 207, 47, 202, 73, 81, 228, 2, 0, 0, 0, 255, 255, ], ) } #[test] #[rustfmt::skip] fn partial_flush() { test_flush( DeflateFlush::PartialFlush, &[ 31, 139, 8, 0, 0, 0, 0, 0, 0, gz_header::OS_CODE, 242, 72, 205, 201, 201, 87, 8, 207, 47, 202, 73, 81, 228, 2, 8, ], ); } #[test] #[rustfmt::skip] fn full_flush() { test_flush( DeflateFlush::FullFlush, &[ 31, 139, 8, 0, 0, 0, 0, 0, 0, gz_header::OS_CODE, 242, 72, 205, 201, 201, 87, 8, 207, 47, 202, 73, 81, 228, 2, 0, 0, 0, 255, 255, ], ); } #[test] #[rustfmt::skip] fn block_flush() { test_flush( DeflateFlush::Block, &[ 31, 139, 8, 0, 0, 0, 0, 0, 0, gz_header::OS_CODE, 242, 72, 205, 201, 201, 87, 8, 207, 47, 202, 73, 81, 228, 2, ], ); } #[test] // splits the input into two, deflates them seperately and then joins the deflated byte streams // into something that can be correctly inflated again. This is the basic idea behind pigz, and // allows for parallel compression. fn split_deflate() { let input = "Hello World!\n"; let (input1, input2) = input.split_at(6); let mut output1 = vec![0; 128]; let mut output2 = vec![0; 128]; let config = DeflateConfig { level: 6, // use gzip method: Method::Deflated, window_bits: 16 + crate::MAX_WBITS, mem_level: DEF_MEM_LEVEL, strategy: Strategy::Default, }; // see also the docs on `SyncFlush`. it makes sure everything is flushed, ends on a byte // boundary, and that the final block does not have the "last block" bit set. let (prefix, err) = compress_slice_with_flush( &mut output1, input1.as_bytes(), config, DeflateFlush::SyncFlush, ); assert_eq!(err, ReturnCode::DataError); let (output2, err) = compress_slice_with_flush( &mut output2, input2.as_bytes(), config, DeflateFlush::Finish, ); assert_eq!(err, ReturnCode::Ok); let inflate_config = crate::inflate::InflateConfig { window_bits: 16 + 15, }; // cuts off the length and crc let (suffix, end) = output2.split_at(output2.len() - 8); let (crc2, len2) = end.split_at(4); let crc2 = u32::from_le_bytes(crc2.try_into().unwrap()); // cuts off the gzip header (10 bytes) from the front let suffix = &suffix[10..]; let mut result: Vec = Vec::new(); result.extend(prefix.iter()); result.extend(suffix); // it would be more proper to use `stream.total_in` here, but the slice helpers hide the // stream so we're cheating a bit here let len1 = input1.len() as u32; let len2 = u32::from_le_bytes(len2.try_into().unwrap()); assert_eq!(len2 as usize, input2.len()); let crc1 = crate::crc32(0, input1.as_bytes()); let crc = crate::crc32_combine(crc1, crc2, len2 as u64); // combined crc of the parts should be the crc of the whole let crc_cheating = crate::crc32(0, input.as_bytes()); assert_eq!(crc, crc_cheating); // write the trailer result.extend(crc.to_le_bytes()); result.extend((len1 + len2).to_le_bytes()); let mut output = vec![0; 128]; let (output, err) = crate::inflate::uncompress_slice(&mut output, &result, inflate_config); assert_eq!(err, ReturnCode::Ok); assert_eq!(output, input.as_bytes()); } #[test] fn inflate_window_copy_slice() { let uncompressed = [ 9, 126, 0, 0, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 76, 33, 8, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 76, 33, 8, 2, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 10, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 14, 0, 0, 0, 0, 0, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 9, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 12, 28, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 10, 0, 0, 0, 0, 14, 0, 0, 0, 0, 0, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 14, 0, 0, 0, 0, 0, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 9, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 12, 28, 0, 2, 0, 0, 0, 63, 1, 0, 12, 2, 36, 0, 28, 0, 0, 0, 1, 0, 0, 63, 63, 13, 0, 0, 0, 0, 0, 0, 0, 63, 63, 63, 63, 0, 0, 0, 0, 0, 0, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 91, 0, 0, 0, 9, 0, 0, 0, 9, 0, 0, 12, 33, 2, 0, 0, 8, 0, 4, 0, 0, 0, 12, 10, 41, 12, 10, 47, ]; let compressed = &[ 31, 139, 8, 0, 0, 0, 0, 0, 4, 3, 181, 193, 49, 14, 194, 32, 24, 128, 209, 175, 192, 0, 228, 151, 232, 206, 66, 226, 226, 96, 60, 2, 113, 96, 235, 13, 188, 139, 103, 23, 106, 104, 108, 100, 49, 169, 239, 185, 39, 11, 199, 7, 51, 39, 171, 248, 118, 226, 63, 52, 157, 120, 86, 102, 78, 86, 209, 104, 58, 241, 84, 129, 166, 12, 4, 154, 178, 229, 202, 30, 36, 130, 166, 19, 79, 21, 104, 202, 64, 160, 41, 91, 174, 236, 65, 34, 10, 200, 19, 162, 206, 68, 96, 130, 156, 15, 188, 229, 138, 197, 157, 161, 35, 3, 87, 126, 245, 0, 28, 224, 64, 146, 2, 139, 1, 196, 95, 196, 223, 94, 10, 96, 92, 33, 86, 2, 0, 0, ]; let config = InflateConfig { window_bits: 25 }; let mut dest_vec_rs = vec![0u8; uncompressed.len()]; let (output_rs, error) = crate::inflate::uncompress_slice(&mut dest_vec_rs, compressed, config); assert_eq!(ReturnCode::Ok, error); assert_eq!(output_rs, uncompressed); } #[test] fn hash_calc_difference() { let input = [ 0, 0, 0, 0, 0, 43, 0, 0, 0, 0, 0, 0, 43, 0, 0, 0, 0, 0, 0, 0, 0, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 19, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 55, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 112, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 0, 0, 0, 0, 0, 0, 49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 19, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 55, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 50, 0, ]; let config = DeflateConfig { level: 6, method: Method::Deflated, window_bits: 9, mem_level: 8, strategy: Strategy::Default, }; let expected = [ 24, 149, 99, 96, 96, 96, 96, 208, 6, 17, 112, 138, 129, 193, 128, 1, 29, 24, 50, 208, 1, 200, 146, 169, 79, 24, 74, 59, 96, 147, 52, 71, 22, 70, 246, 88, 26, 94, 80, 128, 83, 6, 162, 219, 144, 76, 183, 210, 5, 8, 67, 105, 36, 159, 35, 128, 57, 118, 97, 100, 160, 197, 192, 192, 96, 196, 0, 0, 3, 228, 25, 128, ]; fuzz_based_test(&input, config, &expected); } #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))] mod _cache_lines { use super::State; // FIXME: once zlib-rs Minimum Supported Rust Version >= 1.77, switch to core::mem::offset_of // and move this _cache_lines module from up a level from tests to super:: use memoffset::offset_of; const _: () = assert!(offset_of!(State, status) == 0); const _: () = assert!(offset_of!(State, _cache_line_0) == 64); const _: () = assert!(offset_of!(State, _cache_line_1) == 128); const _: () = assert!(offset_of!(State, _cache_line_2) == 192); const _: () = assert!(offset_of!(State, _cache_line_3) == 256); } } zlib-rs-0.5.2/src/inflate/bitreader.rs000064400000000000000000000135151046102023000157370ustar 00000000000000use core::marker::PhantomData; use crate::ReturnCode; #[derive(Debug, Clone, Copy)] pub(crate) struct BitReader<'a> { ptr: *const u8, end: *const u8, bit_buffer: u64, bits_used: u8, _marker: PhantomData<&'a [u8]>, } impl<'a> BitReader<'a> { pub fn new(slice: &'a [u8]) -> Self { let range = slice.as_ptr_range(); Self { ptr: range.start, end: range.end, bit_buffer: 0, bits_used: 0, _marker: PhantomData, } } /// # Safety /// /// ptr and len must satisfy the requirements of [`core::slice::from_raw_parts`]. #[inline(always)] pub unsafe fn update_slice(&mut self, ptr: *const u8, len: usize) { let end = ptr.wrapping_add(len); *self = Self { ptr, end, bit_buffer: self.bit_buffer, bits_used: self.bits_used, _marker: PhantomData, }; } #[inline(always)] pub fn advance(&mut self, bytes: usize) { self.ptr = Ord::min(self.ptr.wrapping_add(bytes), self.end); } #[inline(always)] pub fn as_ptr(&self) -> *const u8 { self.ptr } #[inline(always)] pub fn as_mut_ptr(&mut self) -> *mut u8 { self.ptr as *mut u8 } #[inline(always)] pub fn as_slice(&self) -> &[u8] { let len = self.bytes_remaining(); // SAFETY: condition of constructing this struct. unsafe { core::slice::from_raw_parts(self.ptr, len) } } #[inline(always)] pub fn bits_in_buffer(&self) -> u8 { self.bits_used } #[inline(always)] pub fn hold(&self) -> u64 { self.bit_buffer } #[inline(always)] pub fn bytes_remaining(&self) -> usize { self.end as usize - self.ptr as usize } #[inline(always)] pub fn bytes_remaining_including_buffer(&self) -> usize { (self.end as usize - self.ptr as usize) + (self.bits_used as usize >> 3) } #[inline(always)] pub fn need_bits(&mut self, n: usize) -> Result<(), ReturnCode> { while (self.bits_used as usize) < n { self.pull_byte()?; } Ok(()) } /// Remove zero to seven bits as needed to go to a byte boundary #[inline(always)] pub fn next_byte_boundary(&mut self) { self.bit_buffer >>= self.bits_used & 0b0111; self.bits_used -= self.bits_used & 0b0111; } #[inline(always)] pub fn pull_byte(&mut self) -> Result { // SAFETY: bounds checking. if self.ptr == self.end { return Err(ReturnCode::Ok); } let byte = unsafe { *self.ptr }; self.ptr = unsafe { self.ptr.add(1) }; self.bit_buffer |= (byte as u64) << self.bits_used; self.bits_used += 8; Ok(byte) } #[inline(always)] /// Copy enough bytes from the BitReader's underlying slice to fill the internal /// bit buffer with 7 bytes of data. /// /// Safety: /// /// `self.ptr` must point to at least 8 readable bytes, as indicated by `bytes_remaining()` pub unsafe fn refill(&mut self) { debug_assert!(self.bytes_remaining() >= 8); // SAFETY: caller ensures we have 8 bytes to read for a u64. let read = unsafe { core::ptr::read_unaligned(self.ptr.cast::()) }.to_le(); self.bit_buffer |= read << self.bits_used; // this xor was previously a subtraction but was changed for performance reasons. // for bits_used between 0 and 63 (inclusive), it will always have the same behavior. let increment = (63 ^ self.bits_used) >> 3; self.ptr = self.ptr.wrapping_add(increment as usize); self.bits_used |= 56; } #[inline(always)] pub fn bits(&mut self, n: usize) -> u64 { // debug_assert!( n <= self.bits_used, "{n} bits requested, but only {} avaliable", self.bits_used); let lowest_n_bits = (1 << n) - 1; self.bit_buffer & lowest_n_bits } #[inline(always)] pub fn drop_bits(&mut self, n: u8) { self.bit_buffer >>= n; self.bits_used -= n; } #[inline(always)] pub fn start_sync_search(&mut self) -> ([u8; 4], usize) { let mut buf = [0u8; 4]; self.bit_buffer <<= self.bits_used & 7; self.bits_used -= self.bits_used & 7; let mut len = 0; while self.bits_used >= 8 { buf[len] = self.bit_buffer as u8; len += 1; self.bit_buffer >>= 8; self.bits_used -= 8; } (buf, len) } #[inline(always)] pub fn init_bits(&mut self) { self.bit_buffer = 0; self.bits_used = 0; } #[inline(always)] pub fn prime(&mut self, bits: u8, value: u64) { let value = value & ((1 << bits) - 1); self.bit_buffer += value << self.bits_used; self.bits_used += bits; } #[inline(always)] pub fn return_unused_bytes(&mut self) { let len = self.bits_used >> 3; // SAFETY: ptr is advanced whenever bits_used is incremented by 8, so this sub is always // in bounds. self.ptr = unsafe { self.ptr.sub(len as usize) }; self.bits_used -= len << 3; self.bit_buffer &= (1u64 << self.bits_used) - 1u64; assert!(self.bits_used <= 32); } } #[cfg(feature = "std")] impl std::io::Read for BitReader<'_> { fn read(&mut self, buf: &mut [u8]) -> std::io::Result { assert_eq!(self.bits_used, 0, "bit buffer not cleared before read"); let number_of_bytes = Ord::min(buf.len(), self.bytes_remaining()); // SAFETY: `buf` is a mutable (exclusive) reference, so it cannot overlap the memory that // the reader contains unsafe { core::ptr::copy_nonoverlapping(self.ptr, buf.as_mut_ptr(), number_of_bytes) } self.ptr = unsafe { self.ptr.add(number_of_bytes) }; Ok(number_of_bytes) } } zlib-rs-0.5.2/src/inflate/inffixed_tbl.rs000064400000000000000000000261751046102023000164410ustar 00000000000000use crate::Code; const fn code(op: u8, bits: u8, val: u16) -> Code { Code { op, bits, val } } pub(crate) const LENFIX: [Code; 512] = [ code(96, 7, 0), code(0, 8, 80), code(0, 8, 16), code(20, 8, 115), code(18, 7, 31), code(0, 8, 112), code(0, 8, 48), code(0, 9, 192), code(16, 7, 10), code(0, 8, 96), code(0, 8, 32), code(0, 9, 160), code(0, 8, 0), code(0, 8, 128), code(0, 8, 64), code(0, 9, 224), code(16, 7, 6), code(0, 8, 88), code(0, 8, 24), code(0, 9, 144), code(19, 7, 59), code(0, 8, 120), code(0, 8, 56), code(0, 9, 208), code(17, 7, 17), code(0, 8, 104), code(0, 8, 40), code(0, 9, 176), code(0, 8, 8), code(0, 8, 136), code(0, 8, 72), code(0, 9, 240), code(16, 7, 4), code(0, 8, 84), code(0, 8, 20), code(21, 8, 227), code(19, 7, 43), code(0, 8, 116), code(0, 8, 52), code(0, 9, 200), code(17, 7, 13), code(0, 8, 100), code(0, 8, 36), code(0, 9, 168), code(0, 8, 4), code(0, 8, 132), code(0, 8, 68), code(0, 9, 232), code(16, 7, 8), code(0, 8, 92), code(0, 8, 28), code(0, 9, 152), code(20, 7, 83), code(0, 8, 124), code(0, 8, 60), code(0, 9, 216), code(18, 7, 23), code(0, 8, 108), code(0, 8, 44), code(0, 9, 184), code(0, 8, 12), code(0, 8, 140), code(0, 8, 76), code(0, 9, 248), code(16, 7, 3), code(0, 8, 82), code(0, 8, 18), code(21, 8, 163), code(19, 7, 35), code(0, 8, 114), code(0, 8, 50), code(0, 9, 196), code(17, 7, 11), code(0, 8, 98), code(0, 8, 34), code(0, 9, 164), code(0, 8, 2), code(0, 8, 130), code(0, 8, 66), code(0, 9, 228), code(16, 7, 7), code(0, 8, 90), code(0, 8, 26), code(0, 9, 148), code(20, 7, 67), code(0, 8, 122), code(0, 8, 58), code(0, 9, 212), code(18, 7, 19), code(0, 8, 106), code(0, 8, 42), code(0, 9, 180), code(0, 8, 10), code(0, 8, 138), code(0, 8, 74), code(0, 9, 244), code(16, 7, 5), code(0, 8, 86), code(0, 8, 22), code(64, 8, 0), code(19, 7, 51), code(0, 8, 118), code(0, 8, 54), code(0, 9, 204), code(17, 7, 15), code(0, 8, 102), code(0, 8, 38), code(0, 9, 172), code(0, 8, 6), code(0, 8, 134), code(0, 8, 70), code(0, 9, 236), code(16, 7, 9), code(0, 8, 94), code(0, 8, 30), code(0, 9, 156), code(20, 7, 99), code(0, 8, 126), code(0, 8, 62), code(0, 9, 220), code(18, 7, 27), code(0, 8, 110), code(0, 8, 46), code(0, 9, 188), code(0, 8, 14), code(0, 8, 142), code(0, 8, 78), code(0, 9, 252), code(96, 7, 0), code(0, 8, 81), code(0, 8, 17), code(21, 8, 131), code(18, 7, 31), code(0, 8, 113), code(0, 8, 49), code(0, 9, 194), code(16, 7, 10), code(0, 8, 97), code(0, 8, 33), code(0, 9, 162), code(0, 8, 1), code(0, 8, 129), code(0, 8, 65), code(0, 9, 226), code(16, 7, 6), code(0, 8, 89), code(0, 8, 25), code(0, 9, 146), code(19, 7, 59), code(0, 8, 121), code(0, 8, 57), code(0, 9, 210), code(17, 7, 17), code(0, 8, 105), code(0, 8, 41), code(0, 9, 178), code(0, 8, 9), code(0, 8, 137), code(0, 8, 73), code(0, 9, 242), code(16, 7, 4), code(0, 8, 85), code(0, 8, 21), code(16, 8, 258), code(19, 7, 43), code(0, 8, 117), code(0, 8, 53), code(0, 9, 202), code(17, 7, 13), code(0, 8, 101), code(0, 8, 37), code(0, 9, 170), code(0, 8, 5), code(0, 8, 133), code(0, 8, 69), code(0, 9, 234), code(16, 7, 8), code(0, 8, 93), code(0, 8, 29), code(0, 9, 154), code(20, 7, 83), code(0, 8, 125), code(0, 8, 61), code(0, 9, 218), code(18, 7, 23), code(0, 8, 109), code(0, 8, 45), code(0, 9, 186), code(0, 8, 13), code(0, 8, 141), code(0, 8, 77), code(0, 9, 250), code(16, 7, 3), code(0, 8, 83), code(0, 8, 19), code(21, 8, 195), code(19, 7, 35), code(0, 8, 115), code(0, 8, 51), code(0, 9, 198), code(17, 7, 11), code(0, 8, 99), code(0, 8, 35), code(0, 9, 166), code(0, 8, 3), code(0, 8, 131), code(0, 8, 67), code(0, 9, 230), code(16, 7, 7), code(0, 8, 91), code(0, 8, 27), code(0, 9, 150), code(20, 7, 67), code(0, 8, 123), code(0, 8, 59), code(0, 9, 214), code(18, 7, 19), code(0, 8, 107), code(0, 8, 43), code(0, 9, 182), code(0, 8, 11), code(0, 8, 139), code(0, 8, 75), code(0, 9, 246), code(16, 7, 5), code(0, 8, 87), code(0, 8, 23), code(64, 8, 0), code(19, 7, 51), code(0, 8, 119), code(0, 8, 55), code(0, 9, 206), code(17, 7, 15), code(0, 8, 103), code(0, 8, 39), code(0, 9, 174), code(0, 8, 7), code(0, 8, 135), code(0, 8, 71), code(0, 9, 238), code(16, 7, 9), code(0, 8, 95), code(0, 8, 31), code(0, 9, 158), code(20, 7, 99), code(0, 8, 127), code(0, 8, 63), code(0, 9, 222), code(18, 7, 27), code(0, 8, 111), code(0, 8, 47), code(0, 9, 190), code(0, 8, 15), code(0, 8, 143), code(0, 8, 79), code(0, 9, 254), code(96, 7, 0), code(0, 8, 80), code(0, 8, 16), code(20, 8, 115), code(18, 7, 31), code(0, 8, 112), code(0, 8, 48), code(0, 9, 193), code(16, 7, 10), code(0, 8, 96), code(0, 8, 32), code(0, 9, 161), code(0, 8, 0), code(0, 8, 128), code(0, 8, 64), code(0, 9, 225), code(16, 7, 6), code(0, 8, 88), code(0, 8, 24), code(0, 9, 145), code(19, 7, 59), code(0, 8, 120), code(0, 8, 56), code(0, 9, 209), code(17, 7, 17), code(0, 8, 104), code(0, 8, 40), code(0, 9, 177), code(0, 8, 8), code(0, 8, 136), code(0, 8, 72), code(0, 9, 241), code(16, 7, 4), code(0, 8, 84), code(0, 8, 20), code(21, 8, 227), code(19, 7, 43), code(0, 8, 116), code(0, 8, 52), code(0, 9, 201), code(17, 7, 13), code(0, 8, 100), code(0, 8, 36), code(0, 9, 169), code(0, 8, 4), code(0, 8, 132), code(0, 8, 68), code(0, 9, 233), code(16, 7, 8), code(0, 8, 92), code(0, 8, 28), code(0, 9, 153), code(20, 7, 83), code(0, 8, 124), code(0, 8, 60), code(0, 9, 217), code(18, 7, 23), code(0, 8, 108), code(0, 8, 44), code(0, 9, 185), code(0, 8, 12), code(0, 8, 140), code(0, 8, 76), code(0, 9, 249), code(16, 7, 3), code(0, 8, 82), code(0, 8, 18), code(21, 8, 163), code(19, 7, 35), code(0, 8, 114), code(0, 8, 50), code(0, 9, 197), code(17, 7, 11), code(0, 8, 98), code(0, 8, 34), code(0, 9, 165), code(0, 8, 2), code(0, 8, 130), code(0, 8, 66), code(0, 9, 229), code(16, 7, 7), code(0, 8, 90), code(0, 8, 26), code(0, 9, 149), code(20, 7, 67), code(0, 8, 122), code(0, 8, 58), code(0, 9, 213), code(18, 7, 19), code(0, 8, 106), code(0, 8, 42), code(0, 9, 181), code(0, 8, 10), code(0, 8, 138), code(0, 8, 74), code(0, 9, 245), code(16, 7, 5), code(0, 8, 86), code(0, 8, 22), code(64, 8, 0), code(19, 7, 51), code(0, 8, 118), code(0, 8, 54), code(0, 9, 205), code(17, 7, 15), code(0, 8, 102), code(0, 8, 38), code(0, 9, 173), code(0, 8, 6), code(0, 8, 134), code(0, 8, 70), code(0, 9, 237), code(16, 7, 9), code(0, 8, 94), code(0, 8, 30), code(0, 9, 157), code(20, 7, 99), code(0, 8, 126), code(0, 8, 62), code(0, 9, 221), code(18, 7, 27), code(0, 8, 110), code(0, 8, 46), code(0, 9, 189), code(0, 8, 14), code(0, 8, 142), code(0, 8, 78), code(0, 9, 253), code(96, 7, 0), code(0, 8, 81), code(0, 8, 17), code(21, 8, 131), code(18, 7, 31), code(0, 8, 113), code(0, 8, 49), code(0, 9, 195), code(16, 7, 10), code(0, 8, 97), code(0, 8, 33), code(0, 9, 163), code(0, 8, 1), code(0, 8, 129), code(0, 8, 65), code(0, 9, 227), code(16, 7, 6), code(0, 8, 89), code(0, 8, 25), code(0, 9, 147), code(19, 7, 59), code(0, 8, 121), code(0, 8, 57), code(0, 9, 211), code(17, 7, 17), code(0, 8, 105), code(0, 8, 41), code(0, 9, 179), code(0, 8, 9), code(0, 8, 137), code(0, 8, 73), code(0, 9, 243), code(16, 7, 4), code(0, 8, 85), code(0, 8, 21), code(16, 8, 258), code(19, 7, 43), code(0, 8, 117), code(0, 8, 53), code(0, 9, 203), code(17, 7, 13), code(0, 8, 101), code(0, 8, 37), code(0, 9, 171), code(0, 8, 5), code(0, 8, 133), code(0, 8, 69), code(0, 9, 235), code(16, 7, 8), code(0, 8, 93), code(0, 8, 29), code(0, 9, 155), code(20, 7, 83), code(0, 8, 125), code(0, 8, 61), code(0, 9, 219), code(18, 7, 23), code(0, 8, 109), code(0, 8, 45), code(0, 9, 187), code(0, 8, 13), code(0, 8, 141), code(0, 8, 77), code(0, 9, 251), code(16, 7, 3), code(0, 8, 83), code(0, 8, 19), code(21, 8, 195), code(19, 7, 35), code(0, 8, 115), code(0, 8, 51), code(0, 9, 199), code(17, 7, 11), code(0, 8, 99), code(0, 8, 35), code(0, 9, 167), code(0, 8, 3), code(0, 8, 131), code(0, 8, 67), code(0, 9, 231), code(16, 7, 7), code(0, 8, 91), code(0, 8, 27), code(0, 9, 151), code(20, 7, 67), code(0, 8, 123), code(0, 8, 59), code(0, 9, 215), code(18, 7, 19), code(0, 8, 107), code(0, 8, 43), code(0, 9, 183), code(0, 8, 11), code(0, 8, 139), code(0, 8, 75), code(0, 9, 247), code(16, 7, 5), code(0, 8, 87), code(0, 8, 23), code(64, 8, 0), code(19, 7, 51), code(0, 8, 119), code(0, 8, 55), code(0, 9, 207), code(17, 7, 15), code(0, 8, 103), code(0, 8, 39), code(0, 9, 175), code(0, 8, 7), code(0, 8, 135), code(0, 8, 71), code(0, 9, 239), code(16, 7, 9), code(0, 8, 95), code(0, 8, 31), code(0, 9, 159), code(20, 7, 99), code(0, 8, 127), code(0, 8, 63), code(0, 9, 223), code(18, 7, 27), code(0, 8, 111), code(0, 8, 47), code(0, 9, 191), code(0, 8, 15), code(0, 8, 143), code(0, 8, 79), code(0, 9, 255), ]; pub(crate) const DISTFIX: [Code; 32] = [ code(16, 5, 1), code(23, 5, 257), code(19, 5, 17), code(27, 5, 4097), code(17, 5, 5), code(25, 5, 1025), code(21, 5, 65), code(29, 5, 16385), code(16, 5, 3), code(24, 5, 513), code(20, 5, 33), code(28, 5, 8193), code(18, 5, 9), code(26, 5, 2049), code(22, 5, 129), code(64, 5, 0), code(16, 5, 2), code(23, 5, 385), code(19, 5, 25), code(27, 5, 6145), code(17, 5, 7), code(25, 5, 1537), code(21, 5, 97), code(29, 5, 24577), code(16, 5, 4), code(24, 5, 769), code(20, 5, 49), code(28, 5, 12289), code(18, 5, 13), code(26, 5, 3073), code(22, 5, 193), code(64, 5, 0), ]; zlib-rs-0.5.2/src/inflate/inftrees.rs000064400000000000000000000231661046102023000156200ustar 00000000000000#![forbid(unsafe_code)] use crate::{Code, ENOUGH_DISTS, ENOUGH_LENS}; pub(crate) enum CodeType { Codes, Lens, Dists, } const MAX_BITS: usize = 15; fn min_max(count: [u16; N]) -> (usize, usize) { let mut max = MAX_BITS; while max >= 1 { if count[max] != 0 { break; } max -= 1; } let mut min = 1; while min < max { if count[min] != 0 { break; } min += 1; } (min, max) } /// Length codes 257..285 base const LBASE: [u16; 31] = [ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0, ]; /// Length codes 257..285 extra const LEXT: [u16; 31] = [ 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 77, 202, ]; /// Distance codes 0..29 base const DBASE: [u16; 32] = [ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0, ]; /// Distance codes 0..29 extra const DEXT: [u16; 32] = [ 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 64, 64, ]; #[repr(i32)] #[derive(Debug, PartialEq, Eq)] pub(crate) enum InflateTable { EnoughIsNotEnough = 1, Success { root: usize, used: usize } = 0, InvalidCode = -1, } pub(crate) fn inflate_table( codetype: CodeType, lens: &[u16], codes: usize, table: &mut [Code], bits: usize, work: &mut [u16], ) -> InflateTable { // number of codes of each length let mut count = [0u16; MAX_BITS + 1]; for len in lens[0..codes].iter().copied() { count[len as usize] += 1; } let mut root = bits; let (min, max) = min_max(count); root = Ord::min(root, max); root = Ord::max(root, min); if max == 0 { // no symbols to code at all let code = Code { op: 64, bits: 1, val: 0, }; table[0] = code; table[1] = code; return InflateTable::Success { root: 1, used: 2 }; } /* check for an over-subscribed or incomplete set of lengths */ let mut left = 1i32; let mut len = 1; while len <= MAX_BITS { left <<= 1; left -= count[len] as i32; if left < 0 { // over-subscribed return InflateTable::InvalidCode; } len += 1; } if left > 0 && (matches!(codetype, CodeType::Codes) || max != 1) { // incomplete set return InflateTable::InvalidCode; } /* generate offsets into symbol table for each length for sorting */ // offsets in table for each length let mut offs = [0u16; MAX_BITS + 1]; for len in 1..MAX_BITS { offs[len + 1] = offs[len] + count[len]; } /* sort symbols by length, by symbol order within each length */ for (sym, len) in lens[0..codes].iter().copied().enumerate() { if len != 0 { let offset = offs[len as usize]; offs[len as usize] += 1; work[offset as usize] = sym as u16; } } let (base, extra, match_) = match codetype { CodeType::Codes => (&[] as &[_], &[] as &[_], 20), CodeType::Lens => (&LBASE[..], &LEXT[..], 257), CodeType::Dists => (&DBASE[..], &DEXT[..], 0), }; let mut used = 1 << root; /* check available table space */ if matches!(codetype, CodeType::Lens) && used > ENOUGH_LENS { return InflateTable::EnoughIsNotEnough; } if matches!(codetype, CodeType::Dists) && used > ENOUGH_DISTS { return InflateTable::EnoughIsNotEnough; } let mut huff = 0; // starting code let mut reversed_huff = 0u32; // starting code, reversed let mut sym = 0; let mut len = min; let mut next = 0usize; // index into `table` let mut curr = root; let mut drop_ = 0; let mut low = usize::MAX; // trigger new subtable when len > root let mask = used - 1; /* mask for comparing low */ // process all codes and make table entries 'outer: loop { // create table entry let here = if work[sym] >= match_ { Code { bits: (len - drop_) as u8, op: extra[(work[sym] - match_) as usize] as u8, val: base[(work[sym] - match_) as usize], } } else if work[sym] + 1 < match_ { Code { bits: (len - drop_) as u8, op: 0, val: work[sym], } } else { Code { bits: (len - drop_) as u8, op: 0b01100000, val: 0, } }; // replicate for those indices with low len bits equal to huff let incr = 1 << (len - drop_); let min = 1 << curr; // also has the name 'fill' in the C code let base = &mut table[next + (huff >> drop_)..]; for fill in (0..min).step_by(incr) { base[fill] = here; } // backwards increment the len-bit code huff reversed_huff = reversed_huff.wrapping_add(0x80000000u32 >> (len - 1)); huff = reversed_huff.reverse_bits() as usize; // go to next symbol, update count, len sym += 1; count[len] -= 1; if count[len] == 0 { if len == max { break 'outer; } len = lens[work[sym] as usize] as usize; } // create new sub-table if needed if len > root && (huff & mask) != low { /* if first time, transition to sub-tables */ if drop_ == 0 { drop_ = root; } /* increment past last table */ next += min; /* here min is 1 << curr */ /* determine length of next table */ curr = len - drop_; let mut left = 1 << curr; while curr + drop_ < max { left -= count[curr + drop_] as i32; if left <= 0 { break; } curr += 1; left <<= 1; } /* check for enough space */ used += 1usize << curr; if matches!(codetype, CodeType::Lens) && used > ENOUGH_LENS { return InflateTable::EnoughIsNotEnough; } if matches!(codetype, CodeType::Dists) && used > ENOUGH_DISTS { return InflateTable::EnoughIsNotEnough; } /* point entry in root table to sub-table */ low = huff & mask; table[low] = Code { op: curr as u8, bits: root as u8, val: next as u16, }; } } /* fill in remaining table entry if code is incomplete (guaranteed to have at most one remaining entry, since if the code is incomplete, the maximum code length that was allowed to get this far is one bit) */ if huff != 0 { let here = Code { op: 64, bits: (len - drop_) as u8, val: 0, }; table[next..][huff] = here; } /* set return parameters */ InflateTable::Success { root, used } } #[cfg(test)] mod test { use super::*; #[test] fn not_enough_errors() { // we need to call inflate_table() directly in order to manifest // not-enough errors, since zlib insures that enough is always enough let table = [Code::default(); crate::ENOUGH_DISTS]; let mut work = [0; 16]; let mut lens: [_; 16] = core::array::from_fn(|i| (i + 1) as u16); lens[15] = 15; let mut next = table; let bits = 15; let ret = inflate_table(CodeType::Dists, &lens, 16, &mut next, bits, &mut work); assert_eq!(ret, InflateTable::EnoughIsNotEnough); let mut next = table; let bits = 1; let ret = inflate_table(CodeType::Dists, &lens, 16, &mut next, bits, &mut work); assert_eq!(ret, InflateTable::EnoughIsNotEnough); } fn build_fixed_length_table(work: &mut [u16]) -> [Code; 512] { let mut lens = [0; 288]; // literal/length table let mut sym = 0usize; while sym < 144 { lens[sym] = 8; sym += 1; } while sym < 256 { lens[sym] = 9; sym += 1; } while sym < 280 { lens[sym] = 7; sym += 1; } while sym < 288 { lens[sym] = 8; sym += 1; } let mut next = [Code::default(); 512]; let bits = 9; inflate_table(CodeType::Lens, &lens, 288, &mut next, bits, work); core::array::from_fn(|i| { let mut code = next[i]; code.op = if i & 0b0111_1111 == 99 { 64 } else { code.op }; code }) } #[test] fn generate_fixed_length_table() { let mut work = [0; 512]; let generated = build_fixed_length_table(&mut work); assert_eq!(generated, crate::inflate::inffixed_tbl::LENFIX); } fn build_fixed_distance_table(work: &mut [u16]) -> [Code; 32] { let mut lens = [0; 288]; let mut sym = 0; while sym < 32 { lens[sym] = 5; sym += 1; } let mut next = [Code::default(); 32]; let bits = 5; inflate_table(CodeType::Dists, &lens, 32, &mut next, bits, work); next } #[test] fn generate_fixed_distance_table() { let mut work = [0; 512]; let generated = build_fixed_distance_table(&mut work); assert_eq!(generated, crate::inflate::inffixed_tbl::DISTFIX); } } zlib-rs-0.5.2/src/inflate/window.rs000064400000000000000000000173261046102023000153110ustar 00000000000000use crate::{ adler32::{adler32, adler32_fold_copy}, allocate::Allocator, crc32::Crc32Fold, weak_slice::WeakSliceMut, }; // translation guide: // // wsize -> buf.capacity() // wnext -> buf.ptr // whave -> buf.filled.len() #[derive(Debug)] pub struct Window<'a> { buf: WeakSliceMut<'a, u8>, have: usize, // number of bytes logically written to the window. this can be higher than // buf.len() if we run out of space in the window next: usize, // write head } impl<'a> Window<'a> { pub fn into_raw_parts(self) -> (*mut u8, usize) { self.buf.into_raw_parts() } pub fn is_empty(&self) -> bool { self.size() == 0 } pub fn size(&self) -> usize { // `self.len == 0` is used for uninitialized buffers assert!(self.buf.is_empty() || self.buf.len() >= Self::padding()); self.buf.len().saturating_sub(Self::padding()) } /// number of bytes in the window. Saturates at `Self::capacity`. pub fn have(&self) -> usize { self.have } /// Position where the next byte will be written pub fn next(&self) -> usize { self.next } pub fn empty() -> Self { Self { buf: WeakSliceMut::empty(), have: 0, next: 0, } } pub fn clear(&mut self) { self.have = 0; self.next = 0; } pub fn as_slice(&self) -> &[u8] { &self.buf.as_slice()[..self.have] } pub fn as_ptr(&self) -> *const u8 { self.buf.as_ptr() } #[cfg(test)] fn extend_adler32(&mut self, slice: &[u8], checksum: &mut u32) { self.extend(slice, 0, true, checksum, &mut Crc32Fold::new()); } pub fn extend( &mut self, slice: &[u8], flags: i32, update_checksum: bool, checksum: &mut u32, crc_fold: &mut Crc32Fold, ) { let len = slice.len(); let wsize = self.size(); if len >= wsize { // We have to split the checksum over non-copied and copied bytes let pos = len.saturating_sub(self.size()); let (non_window_slice, window_slice) = slice.split_at(pos); if update_checksum { if flags != 0 { crc_fold.fold(non_window_slice, 0); crc_fold.fold_copy(&mut self.buf.as_mut_slice()[..wsize], window_slice); } else { *checksum = adler32(*checksum, non_window_slice); *checksum = adler32_fold_copy(*checksum, self.buf.as_mut_slice(), window_slice); } } else { self.buf.as_mut_slice()[..wsize].copy_from_slice(window_slice); } self.next = 0; self.have = self.size(); } else { let dist = Ord::min(wsize - self.next, slice.len()); // the end part goes onto the end of the window. The start part wraps around and is // written to the start of the window. let (end_part, start_part) = slice.split_at(dist); if update_checksum { let dst = &mut self.buf.as_mut_slice()[self.next..][..end_part.len()]; if flags != 0 { crc_fold.fold_copy(dst, end_part); } else { *checksum = adler32_fold_copy(*checksum, dst, end_part); } } else { self.buf.as_mut_slice()[self.next..][..end_part.len()].copy_from_slice(end_part); } if !start_part.is_empty() { let dst = &mut self.buf.as_mut_slice()[..start_part.len()]; if update_checksum { if flags != 0 { crc_fold.fold_copy(dst, start_part); } else { *checksum = adler32_fold_copy(*checksum, dst, start_part); } } else { dst.copy_from_slice(start_part); } self.next = start_part.len(); self.have = self.size(); } else { self.next += dist; if self.next == self.size() { self.next = 0; } if self.have < self.size() { self.have += dist; } } } } pub fn new_in(alloc: &Allocator<'a>, window_bits: usize) -> Option { let len = (1 << window_bits) + Self::padding(); let ptr = alloc.allocate_zeroed_buffer(len)?; Some(Self { buf: unsafe { WeakSliceMut::from_raw_parts_mut(ptr.as_ptr(), len) }, have: 0, next: 0, }) } pub fn clone_in(&self, alloc: &Allocator<'a>) -> Option { let len = self.buf.len(); let ptr = alloc.allocate_zeroed_buffer(len)?; Some(Self { buf: unsafe { WeakSliceMut::from_raw_parts_mut(ptr.as_ptr(), len) }, have: self.have, next: self.next, }) } // padding required so that SIMD operations going out-of-bounds are not a problem pub fn padding() -> usize { 64 // very conservative } } #[cfg(test)] mod test { use super::*; fn init_window(window_bits_log2: usize) -> Window<'static> { let mut window = Window::new_in(&crate::allocate::RUST, window_bits_log2).unwrap(); window.have = 0; window.next = 0; window } #[test] fn extend_in_bounds() { let mut checksum = 0; let mut window = init_window(4); window.extend_adler32(&[1; 5], &mut checksum); assert_eq!(window.have, 5); assert_eq!(window.next, 5); let slice = &window.buf.as_slice()[..window.size()]; assert_eq!(&[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], slice); window.extend_adler32(&[2; 7], &mut checksum); assert_eq!(window.have, 12); assert_eq!(window.next, 12); let slice = &window.buf.as_slice()[..window.size()]; assert_eq!(&[1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0], slice); assert_eq!(checksum, 6946835); unsafe { crate::allocate::RUST.deallocate( window.buf.as_mut_slice().as_mut_ptr(), window.buf.as_slice().len(), ) } } #[test] fn extend_crosses_bounds() { let mut checksum = 0; let mut window = init_window(2); window.extend_adler32(&[1; 3], &mut checksum); assert_eq!(window.have, 3); assert_eq!(window.next, 3); let slice = &window.buf.as_slice()[..window.size()]; assert_eq!(&[1, 1, 1, 0], slice); window.extend_adler32(&[2; 3], &mut checksum); assert_eq!(window.have, 4); assert_eq!(window.next, 2); let slice = &window.buf.as_slice()[..window.size()]; assert_eq!(&[2, 2, 1, 2], slice); assert_eq!(checksum, 1769481); unsafe { crate::allocate::RUST.deallocate( window.buf.as_mut_slice().as_mut_ptr(), window.buf.as_slice().len(), ) } } #[test] fn extend_out_of_bounds() { let mut checksum = 0; let mut window = init_window(3); // adds 9 numbers, that won't fit into a window of size 8 window.extend_adler32(&[1, 2, 3, 4, 5, 6, 7, 8, 9], &mut checksum); assert_eq!(window.have, 8); assert_eq!(window.next, 0); let slice = &window.as_slice()[..window.size()]; assert_eq!(&[2, 3, 4, 5, 6, 7, 8, 9], slice); assert_eq!(checksum, 10813485); unsafe { crate::allocate::RUST.deallocate( window.buf.as_mut_slice().as_mut_ptr(), window.as_slice().len(), ) } } } zlib-rs-0.5.2/src/inflate/writer.rs000064400000000000000000000333521046102023000153130ustar 00000000000000#![allow(unsafe_op_in_unsafe_fn)] // FIXME use core::fmt; use core::mem::MaybeUninit; use core::ops::Range; use crate::cpu_features::CpuFeatures; use crate::weak_slice::WeakSliceMut; pub struct Writer<'a> { buf: WeakSliceMut<'a, MaybeUninit>, filled: usize, } impl<'a> Writer<'a> { /// Creates a new `Writer` from a fully initialized buffer. #[inline] pub fn new(buf: &'a mut [u8]) -> Writer<'a> { unsafe { Self::new_uninit(buf.as_mut_ptr(), buf.len()) } } /// Creates a new `Writer` from an uninitialized buffer. /// /// # Safety /// /// The arguments must satisfy the requirements of [`core::slice::from_raw_parts_mut`]. #[inline] pub unsafe fn new_uninit(ptr: *mut u8, len: usize) -> Writer<'a> { let buf = unsafe { WeakSliceMut::from_raw_parts_mut(ptr as *mut MaybeUninit, len) }; Writer { buf, filled: 0 } } /// Pointer to where the next byte will be written #[inline] pub fn next_out(&mut self) -> *mut MaybeUninit { self.buf.as_mut_ptr().wrapping_add(self.filled).cast() } /// Returns the total capacity of the buffer. #[inline] pub fn capacity(&self) -> usize { self.buf.len() } /// Returns the length of the filled part of the buffer #[inline] pub fn len(&self) -> usize { self.filled } /// Returns a shared reference to the filled portion of the buffer. #[inline] pub fn filled(&self) -> &[u8] { // SAFETY: the filled area of the buffer is always initialized, and self.filled is always // in-bounds. unsafe { core::slice::from_raw_parts(self.buf.as_ptr().cast(), self.filled) } } /// Returns the number of bytes at the end of the slice that have not yet been filled. #[inline] pub fn remaining(&self) -> usize { self.capacity() - self.filled } #[inline] pub fn is_full(&self) -> bool { self.filled == self.buf.len() } pub fn push(&mut self, byte: u8) { self.buf.as_mut_slice()[self.filled] = MaybeUninit::new(byte); self.filled += 1; } /// Appends data to the buffer #[inline(always)] pub fn extend(&mut self, buf: &[u8]) { // using simd here (on x86_64) was not fruitful self.buf.as_mut_slice()[self.filled..][..buf.len()].copy_from_slice(slice_to_uninit(buf)); self.filled += buf.len(); } #[inline(always)] pub fn extend_from_window(&mut self, window: &super::window::Window, range: Range) { self.extend_from_window_with_features::<{ CpuFeatures::NONE }>(window, range) } pub fn extend_from_window_with_features( &mut self, window: &super::window::Window, range: Range, ) { match FEATURES { #[cfg(target_arch = "x86_64")] CpuFeatures::AVX2 => self.extend_from_window_help::<32>(window, range), _ => self.extend_from_window_runtime_dispatch(window, range), } } fn extend_from_window_runtime_dispatch( &mut self, window: &super::window::Window, range: Range, ) { // NOTE: the dynamic check for avx512 makes avx2 slower. Measure this carefully before re-enabling // // #[cfg(target_arch = "x86_64")] // if crate::cpu_features::is_enabled_avx512() { // return self.extend_from_window_help::<64>(window, range); // } #[cfg(target_arch = "x86_64")] if crate::cpu_features::is_enabled_avx2_and_bmi2() { return self.extend_from_window_help::<32>(window, range); } #[cfg(target_arch = "x86_64")] if crate::cpu_features::is_enabled_sse() { return self.extend_from_window_help::<16>(window, range); } #[cfg(target_arch = "aarch64")] if crate::cpu_features::is_enabled_neon() { return self.extend_from_window_help::<16>(window, range); } #[cfg(target_arch = "wasm32")] if crate::cpu_features::is_enabled_simd128() { return self.extend_from_window_help::<16>(window, range); } self.extend_from_window_help::<8>(window, range) } #[inline(always)] fn extend_from_window_help( &mut self, window: &super::window::Window, range: Range, ) { let len = range.end - range.start; if self.remaining() >= len + N { // SAFETY: we know that our window has at least a core::mem::size_of::() extra bytes // at the end, making it always safe to perform an (unaligned) Chunk read anywhere in // the window slice. // // The calling function checks for CPU features requirements for C. unsafe { let src = window.as_ptr(); Self::copy_chunk_unchecked::( src.wrapping_add(range.start).cast(), self.next_out(), len, ) } } else { let buf = &window.as_slice()[range]; self.buf.as_mut_slice()[self.filled..][..buf.len()] .copy_from_slice(slice_to_uninit(buf)); } self.filled += len; } #[inline(always)] pub fn copy_match(&mut self, offset_from_end: usize, length: usize) { self.copy_match_with_features::<{ CpuFeatures::NONE }>(offset_from_end, length) } #[inline(always)] pub fn copy_match_with_features( &mut self, offset_from_end: usize, length: usize, ) { match FEATURES { #[cfg(target_arch = "x86_64")] CpuFeatures::AVX2 => self.copy_match_help::<32>(offset_from_end, length), _ => self.copy_match_runtime_dispatch(offset_from_end, length), } } fn copy_match_runtime_dispatch(&mut self, offset_from_end: usize, length: usize) { // NOTE: the dynamic check for avx512 makes avx2 slower. Measure this carefully before re-enabling // // #[cfg(target_arch = "x86_64")] // if crate::cpu_features::is_enabled_avx512() { // return self.copy_match_help::(offset_from_end, length); // } #[cfg(target_arch = "x86_64")] if crate::cpu_features::is_enabled_avx2_and_bmi2() { return self.copy_match_help::<32>(offset_from_end, length); } #[cfg(target_arch = "x86_64")] if crate::cpu_features::is_enabled_sse() { return self.copy_match_help::<16>(offset_from_end, length); } #[cfg(target_arch = "aarch64")] if crate::cpu_features::is_enabled_neon() { return self.copy_match_help::<16>(offset_from_end, length); } #[cfg(target_arch = "wasm32")] if crate::cpu_features::is_enabled_simd128() { return self.copy_match_help::<16>(offset_from_end, length); } self.copy_match_help::<8>(offset_from_end, length) } #[inline(always)] fn copy_match_help(&mut self, offset_from_end: usize, length: usize) { let capacity = self.buf.len(); let len = Ord::min(self.filled + length + N, capacity); let buf = &mut self.buf.as_mut_slice()[..len]; let current = self.filled; self.filled += length; // Note also that the referenced string may overlap the current // position; for example, if the last 2 bytes decoded have values // X and Y, a string reference with // adds X,Y,X,Y,X to the output stream. if length > offset_from_end { match offset_from_end { 1 => { // this will just repeat this value many times let element = buf[current - 1]; buf[current..][..length].fill(element); } _ => { // there is a SIMD implementation of this logic, which _should_ be faster, but // isn't in measurements on x86_64. It still might be for other architectures, // adds a lot of complexity and unsafe code. for i in 0..length { buf[current + i] = buf[current - offset_from_end + i]; } } } } else { Self::copy_chunked_within::(buf, capacity, current, offset_from_end, length) } } #[inline(always)] fn copy_chunked_within( buf: &mut [MaybeUninit], capacity: usize, current: usize, offset_from_end: usize, length: usize, ) { let start = current.checked_sub(offset_from_end).expect("in bounds"); if current + length + N < capacity { let ptr = buf.as_mut_ptr(); // SAFETY: if statement and checked_sub ensures we stay in bounds. unsafe { Self::copy_chunk_unchecked::(ptr.add(start), ptr.add(current), length) } } else { // a full simd copy does not fit in the output buffer buf.copy_within(start..start + length, current); } } /// # Safety /// /// `src` must be safe to perform unaligned reads in `core::mem::size_of::()` chunks until /// `end` is reached. `dst` must be safe to (unalingned) write that number of chunks. #[inline(always)] unsafe fn copy_chunk_unchecked( mut src: *const MaybeUninit, mut dst: *mut MaybeUninit, length: usize, ) { let end = src.add(length); let chunk = load_chunk::(src); store_chunk::(dst, chunk); src = src.add(N); dst = dst.add(N); while src < end { let chunk = load_chunk::(src); store_chunk::(dst, chunk); src = src.add(N); dst = dst.add(N); } } } /// # Safety /// /// Must be valid to read a `[u8; N]` value from `from` with an unaligned read. #[inline(always)] unsafe fn load_chunk(from: *const MaybeUninit) -> [MaybeUninit; N] { core::ptr::read_unaligned(from.cast::<[MaybeUninit; N]>()) } /// # Safety /// /// Must be valid to write a `[u8; N]` value to `out` with an unaligned write. #[inline(always)] unsafe fn store_chunk(out: *mut MaybeUninit, chunk: [MaybeUninit; N]) { core::ptr::write_unaligned(out.cast(), chunk) } impl fmt::Debug for Writer<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Writer") .field("filled", &self.filled) .field("capacity", &self.capacity()) .finish() } } fn slice_to_uninit(slice: &[u8]) -> &[MaybeUninit] { unsafe { &*(slice as *const [u8] as *const [MaybeUninit]) } } #[cfg(test)] mod test { use super::*; const N: usize = 128; const M: usize = 64; fn test_array() -> [MaybeUninit; N] { core::array::from_fn(|i| MaybeUninit::new(if i < M { i as u8 } else { 0xAAu8 })) } fn test_copy_match(offset_from_end: usize, length: usize) { let mut buf = test_array(); let mut writer = Writer { buf: unsafe { WeakSliceMut::from_raw_parts_mut(buf.as_mut_ptr(), buf.len()) }, filled: M, }; writer.copy_match(offset_from_end, length); assert_eq!(writer.filled, M + length); let mut naive = test_array(); for i in 0..length { naive[M + i] = naive[M - offset_from_end + i]; } let buf = unsafe { core::mem::transmute::<[MaybeUninit; 128], [u8; N]>(buf) }; let naive = unsafe { core::mem::transmute::<[MaybeUninit; 128], [u8; N]>(naive) }; assert_eq!( buf[M..][..length], naive[M..][..length], "{offset_from_end} {length}" ); } #[test] fn copy_chunk_unchecked() { let offset_from_end = 17; let length = 17; macro_rules! helper { ($func:expr) => { let mut buf = test_array(); let mut writer = Writer { buf: unsafe { WeakSliceMut::from_raw_parts_mut(buf.as_mut_ptr(), buf.len()) }, filled: M, }; $func(&mut writer, offset_from_end, length); }; } #[cfg(target_arch = "x86_64")] if crate::cpu_features::is_enabled_avx512() { helper!(Writer::copy_match_help::<64>); } #[cfg(target_arch = "x86_64")] if crate::cpu_features::is_enabled_avx2_and_bmi2() { helper!(Writer::copy_match_help::<32>); } #[cfg(target_arch = "x86_64")] if crate::cpu_features::is_enabled_sse() { helper!(Writer::copy_match_help::<16>); } #[cfg(target_arch = "aarch64")] if crate::cpu_features::is_enabled_neon() { helper!(Writer::copy_match_help::<16>); } #[cfg(target_arch = "wasm32")] if crate::cpu_features::is_enabled_simd128() { helper!(Writer::copy_match_help::<16>); } helper!(Writer::copy_match_help::<8>); } #[test] fn copy_match() { for offset_from_end in 1..=64 { for length in 0..=64 { test_copy_match(offset_from_end, length) } } } #[test] fn copy_match_insufficient_space_for_simd() { let mut buf = [1, 2, 3, 0xAA, 0xAA].map(MaybeUninit::new); let mut writer = Writer { buf: unsafe { WeakSliceMut::from_raw_parts_mut(buf.as_mut_ptr(), buf.len()) }, filled: 3, }; writer.copy_match(3, 2); assert_eq!(buf.map(|e| unsafe { e.assume_init() }), [1, 2, 3, 1, 2]); } } zlib-rs-0.5.2/src/inflate.rs000064400000000000000000003070051046102023000137760ustar 00000000000000#![allow(non_snake_case)] // TODO ultimately remove this #![allow(clippy::missing_safety_doc)] // obviously needs to be fixed long-term use core::ffi::{c_char, c_int, c_long, c_ulong}; use core::marker::PhantomData; use core::mem::MaybeUninit; use core::ops::ControlFlow; mod bitreader; mod inffixed_tbl; mod inftrees; mod window; mod writer; use crate::allocate::Allocator; use crate::c_api::internal_state; use crate::cpu_features::CpuFeatures; use crate::{ adler32::adler32, c_api::{gz_header, z_checksum, z_size, z_stream, Z_DEFLATED}, inflate::writer::Writer, Code, InflateFlush, ReturnCode, DEF_WBITS, MAX_WBITS, MIN_WBITS, }; use crate::crc32::{crc32, Crc32Fold}; use self::{ bitreader::BitReader, inftrees::{inflate_table, CodeType, InflateTable}, window::Window, }; const INFLATE_STRICT: bool = false; // SAFETY: This struct must have the same layout as [`z_stream`], so that casts and transmutations // between the two can work without UB. #[repr(C)] pub struct InflateStream<'a> { pub(crate) next_in: *mut crate::c_api::Bytef, pub(crate) avail_in: crate::c_api::uInt, pub(crate) total_in: crate::c_api::z_size, pub(crate) next_out: *mut crate::c_api::Bytef, pub(crate) avail_out: crate::c_api::uInt, pub(crate) total_out: crate::c_api::z_size, pub(crate) msg: *mut c_char, pub(crate) state: &'a mut State<'a>, pub(crate) alloc: Allocator<'a>, pub(crate) data_type: c_int, pub(crate) adler: crate::c_api::z_checksum, pub(crate) reserved: crate::c_api::uLong, } #[cfg(feature = "__internal-test")] #[doc(hidden)] pub const INFLATE_STATE_SIZE: usize = core::mem::size_of::(); #[cfg(feature = "__internal-test")] #[doc(hidden)] pub unsafe fn set_mode_dict(strm: &mut z_stream) { unsafe { (*(strm.state as *mut State)).mode = Mode::Dict; } } impl<'a> InflateStream<'a> { // z_stream and DeflateStream must have the same layout. Do our best to check if this is true. // (imperfect check, but should catch most mistakes.) const _S: () = assert!(core::mem::size_of::() == core::mem::size_of::()); const _A: () = assert!(core::mem::align_of::() == core::mem::align_of::()); /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// - `strm` satisfies the conditions of [`pointer::as_ref`] /// - if not `NULL`, `strm` as initialized using [`init`] or similar /// /// [`pointer::as_ref`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.as_ref #[inline(always)] pub unsafe fn from_stream_ref(strm: *const z_stream) -> Option<&'a Self> { { // Safety: ptr points to a valid value of type z_stream (if non-null) let stream = unsafe { strm.as_ref() }?; if stream.zalloc.is_none() || stream.zfree.is_none() { return None; } if stream.state.is_null() { return None; } } // Safety: InflateStream has an equivalent layout as z_stream unsafe { strm.cast::().as_ref() } } /// # Safety /// /// Behavior is undefined if any of the following conditions are violated: /// /// - `strm` satisfies the conditions of [`pointer::as_mut`] /// - if not `NULL`, `strm` as initialized using [`init`] or similar /// /// [`pointer::as_mut`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.as_mut #[inline(always)] pub unsafe fn from_stream_mut(strm: *mut z_stream) -> Option<&'a mut Self> { { // Safety: ptr points to a valid value of type z_stream (if non-null) let stream = unsafe { strm.as_ref() }?; if stream.zalloc.is_none() || stream.zfree.is_none() { return None; } if stream.state.is_null() { return None; } } // Safety: InflateStream has an equivalent layout as z_stream unsafe { strm.cast::().as_mut() } } fn as_z_stream_mut(&mut self) -> &mut z_stream { // safety: a valid &mut InflateStream is also a valid &mut z_stream unsafe { &mut *(self as *mut _ as *mut z_stream) } } } const MAX_BITS: u8 = 15; // maximum number of bits in a code const MAX_DIST_EXTRA_BITS: u8 = 13; // maximum number of extra distance bits // pub fn uncompress_slice<'a>( output: &'a mut [u8], input: &[u8], config: InflateConfig, ) -> (&'a mut [u8], ReturnCode) { // SAFETY: [u8] is also a valid [MaybeUninit] let output_uninit = unsafe { core::slice::from_raw_parts_mut(output.as_mut_ptr() as *mut MaybeUninit, output.len()) }; uncompress(output_uninit, input, config) } /// Inflates `source` into `dest`, and writes the final inflated size into `dest_len`. pub fn uncompress<'a>( output: &'a mut [MaybeUninit], input: &[u8], config: InflateConfig, ) -> (&'a mut [u8], ReturnCode) { let mut dest_len_ptr = output.len() as z_checksum; // for detection of incomplete stream when *destLen == 0 let mut buf = [0u8]; let mut left; let mut len = input.len() as u64; let dest = if output.is_empty() { left = 1; buf.as_mut_ptr() } else { left = output.len() as u64; dest_len_ptr = 0; output.as_mut_ptr() as *mut u8 }; let mut stream = z_stream { next_in: input.as_ptr() as *mut u8, avail_in: 0, zalloc: None, zfree: None, opaque: core::ptr::null_mut(), ..z_stream::default() }; let err = init(&mut stream, config); if err != ReturnCode::Ok { return (&mut [], err); } stream.next_out = dest; stream.avail_out = 0; let Some(stream) = (unsafe { InflateStream::from_stream_mut(&mut stream) }) else { return (&mut [], ReturnCode::StreamError); }; let err = loop { if stream.avail_out == 0 { stream.avail_out = Ord::min(left, u32::MAX as u64) as u32; left -= stream.avail_out as u64; } if stream.avail_in == 0 { stream.avail_in = Ord::min(len, u32::MAX as u64) as u32; len -= stream.avail_in as u64; } let err = unsafe { inflate(stream, InflateFlush::NoFlush) }; if err != ReturnCode::Ok { break err; } }; if !output.is_empty() { dest_len_ptr = stream.total_out; } else if stream.total_out != 0 && err == ReturnCode::BufError { left = 1; } let avail_out = stream.avail_out; end(stream); let ret = match err { ReturnCode::StreamEnd => ReturnCode::Ok, ReturnCode::NeedDict => ReturnCode::DataError, ReturnCode::BufError if (left + avail_out as u64) != 0 => ReturnCode::DataError, _ => err, }; // SAFETY: we have now initialized these bytes let output_slice = unsafe { core::slice::from_raw_parts_mut(output.as_mut_ptr() as *mut u8, dest_len_ptr as usize) }; (output_slice, ret) } #[derive(Debug, Clone, Copy)] #[repr(u8)] pub enum Mode { Head, Flags, Time, Os, ExLen, Extra, Name, Comment, HCrc, Sync, Mem, Length, Type, TypeDo, Stored, CopyBlock, Check, Len_, Len, Lit, LenExt, Dist, DistExt, Match, Table, LenLens, CodeLens, DictId, Dict, Done, Bad, } #[derive(Default, Clone, Copy)] #[allow(clippy::enum_variant_names)] enum Codes { #[default] Fixed, Codes, Len, Dist, } #[derive(Default, Clone, Copy)] struct Table { codes: Codes, bits: usize, } #[derive(Clone, Copy)] struct Flags(u8); impl Default for Flags { fn default() -> Self { Self::SANE } } impl Flags { /// set if currently processing the last block const IS_LAST_BLOCK: Self = Self(0b0000_0001); /// set if a custom dictionary was provided const HAVE_DICT: Self = Self(0b0000_0010); /// if false, allow invalid distance too far const SANE: Self = Self(0b0000_0100); pub(crate) const fn contains(self, other: Self) -> bool { debug_assert!(other.0.count_ones() == 1); self.0 & other.0 != 0 } #[inline(always)] pub(crate) fn update(&mut self, other: Self, value: bool) { if value { *self = Self(self.0 | other.0); } else { *self = Self(self.0 & !other.0); } } } #[repr(C, align(64))] pub(crate) struct State<'a> { /// Current inflate mode mode: Mode, flags: Flags, /// log base 2 of requested window size wbits: u8, /// bitflag /// /// - bit 0 true if zlib /// - bit 1 true if gzip /// - bit 2 true to validate check value wrap: u8, flush: InflateFlush, // allocated window if needed (capacity == 0 if unused) window: Window<'a>, // /// number of code length code lengths ncode: usize, /// number of length code lengths nlen: usize, /// number of distance code lengths ndist: usize, /// number of code lengths in lens[] have: usize, /// next available space in codes[] next: usize, // represented as an index, don't want a self-referential structure here // IO bit_reader: BitReader<'a>, writer: Writer<'a>, total: usize, /// length of a block to copy length: usize, /// distance back to copy the string from offset: usize, /// extra bits needed extra: usize, /// bits back of last unprocessed length/lit back: usize, /// initial length of match was: usize, /// size of memory copying chunk chunksize: usize, in_available: usize, out_available: usize, gzip_flags: i32, checksum: u32, crc_fold: Crc32Fold, error_message: Option<&'static str>, /// place to store gzip header if needed head: Option<&'a mut gz_header>, dmax: usize, /// table for length/literal codes len_table: Table, /// table for dist codes dist_table: Table, codes_codes: [Code; crate::ENOUGH_LENS], len_codes: [Code; crate::ENOUGH_LENS], dist_codes: [Code; crate::ENOUGH_DISTS], /// temporary storage space for code lengths lens: [u16; 320], /// work area for code table building work: [u16; 288], } impl<'a> State<'a> { fn new(reader: &'a [u8], writer: Writer<'a>) -> Self { let in_available = reader.len(); let out_available = writer.capacity(); Self { flush: InflateFlush::NoFlush, flags: Flags::default(), wrap: 0, mode: Mode::Head, length: 0, len_table: Table::default(), dist_table: Table::default(), wbits: 0, offset: 0, extra: 0, back: 0, was: 0, chunksize: 0, in_available, out_available, bit_reader: BitReader::new(reader), writer, total: 0, window: Window::empty(), head: None, lens: [0u16; 320], work: [0u16; 288], ncode: 0, nlen: 0, ndist: 0, have: 0, next: 0, error_message: None, checksum: 0, crc_fold: Crc32Fold::new(), dmax: 0, gzip_flags: 0, codes_codes: [Code::default(); crate::ENOUGH_LENS], len_codes: [Code::default(); crate::ENOUGH_LENS], dist_codes: [Code::default(); crate::ENOUGH_DISTS], } } fn len_table_ref(&self) -> &[Code] { match self.len_table.codes { Codes::Fixed => &self::inffixed_tbl::LENFIX, Codes::Codes => &self.codes_codes, Codes::Len => &self.len_codes, Codes::Dist => &self.dist_codes, } } fn dist_table_ref(&self) -> &[Code] { match self.dist_table.codes { Codes::Fixed => &self::inffixed_tbl::DISTFIX, Codes::Codes => &self.codes_codes, Codes::Len => &self.len_codes, Codes::Dist => &self.dist_codes, } } fn len_table_get(&self, index: usize) -> Code { self.len_table_ref()[index] } fn dist_table_get(&self, index: usize) -> Code { self.dist_table_ref()[index] } } // swaps endianness const fn zswap32(q: u32) -> u32 { u32::from_be(q.to_le()) } const INFLATE_FAST_MIN_HAVE: usize = 15; const INFLATE_FAST_MIN_LEFT: usize = 260; impl State<'_> { // This logic is split into its own function for two reasons // // - We get to load state to the stack; doing this in all cases is expensive, but doing it just // for Len and related states is very helpful. // - The `-Cllvm-args=-enable-dfa-jump-thread` llvm arg is able to optimize this function, but // not the entirity of `dispatch`. We get a massive boost from that pass. // // It unfortunately does duplicate the code for some of the states; deduplicating it by having // more of the states call this function is slower. fn len_and_friends(&mut self) -> ControlFlow { let avail_in = self.bit_reader.bytes_remaining(); let avail_out = self.writer.remaining(); if avail_in >= INFLATE_FAST_MIN_HAVE && avail_out >= INFLATE_FAST_MIN_LEFT { // SAFETY: INFLATE_FAST_MIN_HAVE is enough bytes remaining to satisfy the precondition. unsafe { inflate_fast_help(self, 0) }; match self.mode { Mode::Len => {} _ => return ControlFlow::Continue(()), } } let mut mode; let mut writer; let mut bit_reader; macro_rules! load { () => { mode = self.mode; writer = core::mem::replace(&mut self.writer, Writer::new(&mut [])); bit_reader = self.bit_reader; }; } macro_rules! restore { () => { self.mode = mode; self.writer = writer; self.bit_reader = bit_reader; }; } load!(); let len_table = match self.len_table.codes { Codes::Fixed => &self::inffixed_tbl::LENFIX[..], Codes::Codes => &self.codes_codes, Codes::Len => &self.len_codes, Codes::Dist => &self.dist_codes, }; let dist_table = match self.dist_table.codes { Codes::Fixed => &self::inffixed_tbl::DISTFIX[..], Codes::Codes => &self.codes_codes, Codes::Len => &self.len_codes, Codes::Dist => &self.dist_codes, }; loop { mode = 'top: { match mode { Mode::Len => { let avail_in = bit_reader.bytes_remaining(); let avail_out = writer.remaining(); // INFLATE_FAST_MIN_LEFT is important. It makes sure there is at least 32 bytes of free // space available. This means for many SIMD operations we don't need to process a // remainder; we just copy blindly, and a later operation will overwrite the extra copied // bytes if avail_in >= INFLATE_FAST_MIN_HAVE && avail_out >= INFLATE_FAST_MIN_LEFT { restore!(); // SAFETY: INFLATE_FAST_MIN_HAVE >= 15. // Note that the restore macro does not do anything that would // reduce the number of bytes available. unsafe { inflate_fast_help(self, 0) }; return ControlFlow::Continue(()); } self.back = 0; // get a literal, length, or end-of-block code let mut here; loop { let bits = bit_reader.bits(self.len_table.bits); here = len_table[bits as usize]; if here.bits <= bit_reader.bits_in_buffer() { break; } if let Err(return_code) = bit_reader.pull_byte() { restore!(); return ControlFlow::Break(return_code); }; } if here.op != 0 && here.op & 0xf0 == 0 { let last = here; loop { let bits = bit_reader.bits((last.bits + last.op) as usize) as u16; here = len_table[(last.val + (bits >> last.bits)) as usize]; if last.bits + here.bits <= bit_reader.bits_in_buffer() { break; } if let Err(return_code) = bit_reader.pull_byte() { restore!(); return ControlFlow::Break(return_code); }; } bit_reader.drop_bits(last.bits); self.back += last.bits as usize; } bit_reader.drop_bits(here.bits); self.back += here.bits as usize; self.length = here.val as usize; if here.op == 0 { break 'top Mode::Lit; } else if here.op & 32 != 0 { // end of block // eprintln!("inflate: end of block"); self.back = usize::MAX; mode = Mode::Type; restore!(); return ControlFlow::Continue(()); } else if here.op & 64 != 0 { mode = Mode::Bad; { restore!(); let this = &mut *self; let msg: &'static str = "invalid literal/length code\0"; #[cfg(all(feature = "std", test))] dbg!(msg); this.error_message = Some(msg); return ControlFlow::Break(ReturnCode::DataError); } } else { // length code self.extra = (here.op & MAX_BITS) as usize; break 'top Mode::LenExt; } } Mode::Lit => { // NOTE: this branch must be kept in sync with its counterpart in `dispatch` if writer.is_full() { restore!(); #[cfg(all(test, feature = "std"))] eprintln!("Ok: writer is full ({} bytes)", self.writer.capacity()); return ControlFlow::Break(ReturnCode::Ok); } writer.push(self.length as u8); break 'top Mode::Len; } Mode::LenExt => { // NOTE: this branch must be kept in sync with its counterpart in `dispatch` let extra = self.extra; // get extra bits, if any if extra != 0 { match bit_reader.need_bits(extra) { Err(return_code) => { restore!(); return ControlFlow::Break(return_code); } Ok(v) => v, }; self.length += bit_reader.bits(extra) as usize; bit_reader.drop_bits(extra as u8); self.back += extra; } // eprintln!("inflate: length {}", state.length); self.was = self.length; break 'top Mode::Dist; } Mode::Dist => { // NOTE: this branch must be kept in sync with its counterpart in `dispatch` // get distance code let mut here; loop { let bits = bit_reader.bits(self.dist_table.bits) as usize; here = dist_table[bits]; if here.bits <= bit_reader.bits_in_buffer() { break; } if let Err(return_code) = bit_reader.pull_byte() { restore!(); return ControlFlow::Break(return_code); }; } if here.op & 0xf0 == 0 { let last = here; loop { let bits = bit_reader.bits((last.bits + last.op) as usize); here = dist_table[last.val as usize + ((bits as usize) >> last.bits)]; if last.bits + here.bits <= bit_reader.bits_in_buffer() { break; } if let Err(return_code) = bit_reader.pull_byte() { restore!(); return ControlFlow::Break(return_code); }; } bit_reader.drop_bits(last.bits); self.back += last.bits as usize; } bit_reader.drop_bits(here.bits); if here.op & 64 != 0 { restore!(); self.mode = Mode::Bad; return ControlFlow::Break(self.bad("invalid distance code\0")); } self.offset = here.val as usize; self.extra = (here.op & MAX_BITS) as usize; break 'top Mode::DistExt; } Mode::DistExt => { // NOTE: this branch must be kept in sync with its counterpart in `dispatch` let extra = self.extra; if extra > 0 { match bit_reader.need_bits(extra) { Err(return_code) => { restore!(); return ControlFlow::Break(return_code); } Ok(v) => v, }; self.offset += bit_reader.bits(extra) as usize; bit_reader.drop_bits(extra as u8); self.back += extra; } if INFLATE_STRICT && self.offset > self.dmax { restore!(); self.mode = Mode::Bad; return ControlFlow::Break( self.bad("invalid distance code too far back\0"), ); } // eprintln!("inflate: distance {}", state.offset); break 'top Mode::Match; } Mode::Match => { // NOTE: this branch must be kept in sync with its counterpart in `dispatch` if writer.is_full() { restore!(); #[cfg(all(feature = "std", test))] eprintln!( "BufError: writer is full ({} bytes)", self.writer.capacity() ); return ControlFlow::Break(ReturnCode::Ok); } let left = writer.remaining(); let copy = writer.len(); let copy = if self.offset > copy { // copy from window to output let mut copy = self.offset - copy; if copy > self.window.have() { if self.flags.contains(Flags::SANE) { restore!(); self.mode = Mode::Bad; return ControlFlow::Break( self.bad("invalid distance too far back\0"), ); } // TODO INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR panic!("INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR") } let wnext = self.window.next(); let wsize = self.window.size(); let from = if copy > wnext { copy -= wnext; wsize - copy } else { wnext - copy }; copy = Ord::min(copy, self.length); copy = Ord::min(copy, left); writer.extend_from_window(&self.window, from..from + copy); copy } else { let copy = Ord::min(self.length, left); writer.copy_match(self.offset, copy); copy }; self.length -= copy; if self.length == 0 { break 'top Mode::Len; } else { // otherwise it seems to recurse? // self.match_() break 'top Mode::Match; } } _ => unsafe { core::hint::unreachable_unchecked() }, } } } } fn dispatch(&mut self) -> ReturnCode { // Note: All early returns must save mode into self.mode again. let mut mode = self.mode; macro_rules! pull_byte { ($self:expr) => { match $self.bit_reader.pull_byte() { Err(return_code) => { self.mode = mode; return $self.inflate_leave(return_code); } Ok(_) => (), } }; } macro_rules! need_bits { ($self:expr, $n:expr) => { match $self.bit_reader.need_bits($n) { Err(return_code) => { self.mode = mode; return $self.inflate_leave(return_code); } Ok(v) => v, } }; } let ret = 'label: loop { mode = 'blk: { match mode { Mode::Head => { if self.wrap == 0 { break 'blk Mode::TypeDo; } need_bits!(self, 16); // Gzip if (self.wrap & 2) != 0 && self.bit_reader.hold() == 0x8b1f { if self.wbits == 0 { self.wbits = 15; } let b0 = self.bit_reader.bits(8) as u8; let b1 = (self.bit_reader.hold() >> 8) as u8; self.checksum = crc32(crate::CRC32_INITIAL_VALUE, &[b0, b1]); self.bit_reader.init_bits(); break 'blk Mode::Flags; } if let Some(header) = &mut self.head { header.done = -1; } // check if zlib header is allowed if (self.wrap & 1) == 0 || ((self.bit_reader.bits(8) << 8) + (self.bit_reader.hold() >> 8)) % 31 != 0 { mode = Mode::Bad; break 'label self.bad("incorrect header check\0"); } if self.bit_reader.bits(4) != Z_DEFLATED as u64 { mode = Mode::Bad; break 'label self.bad("unknown compression method\0"); } self.bit_reader.drop_bits(4); let len = self.bit_reader.bits(4) as u8 + 8; if self.wbits == 0 { self.wbits = len; } if len as i32 > MAX_WBITS || len > self.wbits { mode = Mode::Bad; break 'label self.bad("invalid window size\0"); } self.dmax = 1 << len; self.gzip_flags = 0; // indicate zlib header self.checksum = crate::ADLER32_INITIAL_VALUE as _; if self.bit_reader.hold() & 0x200 != 0 { self.bit_reader.init_bits(); break 'blk Mode::DictId; } else { self.bit_reader.init_bits(); break 'blk Mode::Type; } } Mode::Flags => { need_bits!(self, 16); self.gzip_flags = self.bit_reader.hold() as i32; // Z_DEFLATED = 8 is the only supported method if self.gzip_flags & 0xff != Z_DEFLATED { mode = Mode::Bad; break 'label self.bad("unknown compression method\0"); } if self.gzip_flags & 0xe000 != 0 { mode = Mode::Bad; break 'label self.bad("unknown header flags set\0"); } if let Some(head) = self.head.as_mut() { head.text = ((self.bit_reader.hold() >> 8) & 1) as i32; } if (self.gzip_flags & 0x0200) != 0 && (self.wrap & 4) != 0 { let b0 = self.bit_reader.bits(8) as u8; let b1 = (self.bit_reader.hold() >> 8) as u8; self.checksum = crc32(self.checksum, &[b0, b1]); } self.bit_reader.init_bits(); break 'blk Mode::Time; } Mode::Time => { need_bits!(self, 32); if let Some(head) = self.head.as_mut() { head.time = self.bit_reader.hold() as z_size; } if (self.gzip_flags & 0x0200) != 0 && (self.wrap & 4) != 0 { let bytes = (self.bit_reader.hold() as u32).to_le_bytes(); self.checksum = crc32(self.checksum, &bytes); } self.bit_reader.init_bits(); break 'blk Mode::Os; } Mode::Os => { need_bits!(self, 16); if let Some(head) = self.head.as_mut() { head.xflags = (self.bit_reader.hold() & 0xff) as i32; head.os = (self.bit_reader.hold() >> 8) as i32; } if (self.gzip_flags & 0x0200) != 0 && (self.wrap & 4) != 0 { let bytes = (self.bit_reader.hold() as u16).to_le_bytes(); self.checksum = crc32(self.checksum, &bytes); } self.bit_reader.init_bits(); break 'blk Mode::ExLen; } Mode::ExLen => { if (self.gzip_flags & 0x0400) != 0 { need_bits!(self, 16); // self.length (and head.extra_len) represent the length of the extra field self.length = self.bit_reader.hold() as usize; if let Some(head) = self.head.as_mut() { head.extra_len = self.length as u32; } if (self.gzip_flags & 0x0200) != 0 && (self.wrap & 4) != 0 { let bytes = (self.bit_reader.hold() as u16).to_le_bytes(); self.checksum = crc32(self.checksum, &bytes); } self.bit_reader.init_bits(); } else if let Some(head) = self.head.as_mut() { head.extra = core::ptr::null_mut(); } break 'blk Mode::Extra; } Mode::Extra => { if (self.gzip_flags & 0x0400) != 0 { // self.length is the number of remaining `extra` bytes. But they may not all be available let extra_available = Ord::min(self.length, self.bit_reader.bytes_remaining()); if extra_available > 0 { if let Some(head) = self.head.as_mut() { if !head.extra.is_null() { // at `head.extra`, the caller has reserved `head.extra_max` bytes. // in the deflated byte stream, we've found a gzip header with // `head.extra_len` bytes of data. We must be careful because // `head.extra_len` may be larger than `head.extra_max`. // how many bytes we've already written into `head.extra` let written_so_far = head.extra_len as usize - self.length; // min of number of bytes available at dst and at src let count = Ord::min( (head.extra_max as usize) .saturating_sub(written_so_far), extra_available, ); // SAFETY: location where we'll write: this saturates at the // `head.extra.add(head.extra.max)` to prevent UB let next_write_offset = Ord::min(written_so_far, head.extra_max as usize); unsafe { // SAFETY: count is effectively bounded by head.extra_max // and bit_reader.bytes_remaining(), so the count won't // go out of bounds. core::ptr::copy_nonoverlapping( self.bit_reader.as_mut_ptr(), head.extra.add(next_write_offset), count, ); } } } // Checksum if (self.gzip_flags & 0x0200) != 0 && (self.wrap & 4) != 0 { let extra_slice = &self.bit_reader.as_slice()[..extra_available]; self.checksum = crc32(self.checksum, extra_slice) } self.in_available -= extra_available; self.bit_reader.advance(extra_available); self.length -= extra_available; } // Checks for errors occur after returning if self.length != 0 { break 'label self.inflate_leave(ReturnCode::Ok); } } self.length = 0; break 'blk Mode::Name; } Mode::Name => { if (self.gzip_flags & 0x0800) != 0 { if self.in_available == 0 { break 'label self.inflate_leave(ReturnCode::Ok); } // the name string will always be null-terminated, but might be longer than we have // space for in the header struct. Nonetheless, we read the whole thing. let slice = self.bit_reader.as_slice(); let null_terminator_index = slice.iter().position(|c| *c == 0); // we include the null terminator if it exists let name_slice = match null_terminator_index { Some(i) => &slice[..=i], None => slice, }; // if the header has space, store as much as possible in there if let Some(head) = self.head.as_mut() { if !head.name.is_null() { let remaining_name_bytes = (head.name_max as usize) .checked_sub(self.length) .expect("name out of bounds"); let copy = Ord::min(name_slice.len(), remaining_name_bytes); unsafe { // SAFETY: copy is effectively bound by the name length and // head.name_max, so this won't go out of bounds. core::ptr::copy_nonoverlapping( name_slice.as_ptr(), head.name.add(self.length), copy, ) }; self.length += copy; } } if (self.gzip_flags & 0x0200) != 0 && (self.wrap & 4) != 0 { self.checksum = crc32(self.checksum, name_slice); } let reached_end = name_slice.last() == Some(&0); self.bit_reader.advance(name_slice.len()); if !reached_end && self.bit_reader.bytes_remaining() == 0 { break 'label self.inflate_leave(ReturnCode::Ok); } } else if let Some(head) = self.head.as_mut() { head.name = core::ptr::null_mut(); } self.length = 0; break 'blk Mode::Comment; } Mode::Comment => { if (self.gzip_flags & 0x01000) != 0 { if self.in_available == 0 { break 'label self.inflate_leave(ReturnCode::Ok); } // the comment string will always be null-terminated, but might be longer than we have // space for in the header struct. Nonetheless, we read the whole thing. let slice = self.bit_reader.as_slice(); let null_terminator_index = slice.iter().position(|c| *c == 0); // we include the null terminator if it exists let comment_slice = match null_terminator_index { Some(i) => &slice[..=i], None => slice, }; // if the header has space, store as much as possible in there if let Some(head) = self.head.as_mut() { if !head.comment.is_null() { let remaining_comm_bytes = (head.comm_max as usize) .checked_sub(self.length) .expect("comm out of bounds"); let copy = Ord::min(comment_slice.len(), remaining_comm_bytes); unsafe { // SAFETY: copy is effectively bound by the comment length and // head.comm_max, so this won't go out of bounds. core::ptr::copy_nonoverlapping( comment_slice.as_ptr(), head.comment.add(self.length), copy, ) }; self.length += copy; } } if (self.gzip_flags & 0x0200) != 0 && (self.wrap & 4) != 0 { self.checksum = crc32(self.checksum, comment_slice); } let reached_end = comment_slice.last() == Some(&0); self.bit_reader.advance(comment_slice.len()); if !reached_end && self.bit_reader.bytes_remaining() == 0 { break 'label self.inflate_leave(ReturnCode::Ok); } } else if let Some(head) = self.head.as_mut() { head.comment = core::ptr::null_mut(); } break 'blk Mode::HCrc; } Mode::HCrc => { if (self.gzip_flags & 0x0200) != 0 { need_bits!(self, 16); if (self.wrap & 4) != 0 && self.bit_reader.hold() as u32 != (self.checksum & 0xffff) { mode = Mode::Bad; break 'label self.bad("header crc mismatch\0"); } self.bit_reader.init_bits(); } if let Some(head) = self.head.as_mut() { head.hcrc = (self.gzip_flags >> 9) & 1; head.done = 1; } // compute crc32 checksum if not in raw mode if (self.wrap & 4 != 0) && self.gzip_flags != 0 { self.crc_fold = Crc32Fold::new(); self.checksum = crate::CRC32_INITIAL_VALUE; } break 'blk Mode::Type; } Mode::Type => { use InflateFlush::*; match self.flush { Block | Trees => break 'label ReturnCode::Ok, NoFlush | SyncFlush | Finish => { // NOTE: this is slightly different to what zlib-rs does! break 'blk Mode::TypeDo; } } } Mode::TypeDo => { if self.flags.contains(Flags::IS_LAST_BLOCK) { self.bit_reader.next_byte_boundary(); break 'blk Mode::Check; } need_bits!(self, 3); // self.last = self.bit_reader.bits(1) != 0; self.flags .update(Flags::IS_LAST_BLOCK, self.bit_reader.bits(1) != 0); self.bit_reader.drop_bits(1); match self.bit_reader.bits(2) { 0b00 => { // eprintln!("inflate: stored block (last = {last})"); self.bit_reader.drop_bits(2); break 'blk Mode::Stored; } 0b01 => { // eprintln!("inflate: fixed codes block (last = {last})"); self.len_table = Table { codes: Codes::Fixed, bits: 9, }; self.dist_table = Table { codes: Codes::Fixed, bits: 5, }; mode = Mode::Len_; self.bit_reader.drop_bits(2); if let InflateFlush::Trees = self.flush { break 'label self.inflate_leave(ReturnCode::Ok); } else { break 'blk Mode::Len_; } } 0b10 => { // eprintln!("inflate: dynamic codes block (last = {last})"); self.bit_reader.drop_bits(2); break 'blk Mode::Table; } 0b11 => { // eprintln!("inflate: invalid block type"); self.bit_reader.drop_bits(2); mode = Mode::Bad; break 'label self.bad("invalid block type\0"); } _ => { // LLVM will optimize this branch away unreachable!("BitReader::bits(2) only yields a value of two bits, so this match is already exhaustive") } } } Mode::Stored => { self.bit_reader.next_byte_boundary(); need_bits!(self, 32); let hold = self.bit_reader.bits(32) as u32; // eprintln!("hold {hold:#x}"); if hold as u16 != !((hold >> 16) as u16) { mode = Mode::Bad; break 'label self.bad("invalid stored block lengths\0"); } self.length = hold as usize & 0xFFFF; // eprintln!("inflate: stored length {}", state.length); self.bit_reader.init_bits(); if let InflateFlush::Trees = self.flush { break 'label self.inflate_leave(ReturnCode::Ok); } else { break 'blk Mode::CopyBlock; } } Mode::CopyBlock => { loop { let mut copy = self.length; if copy == 0 { break; } copy = Ord::min(copy, self.writer.remaining()); copy = Ord::min(copy, self.bit_reader.bytes_remaining()); if copy == 0 { break 'label self.inflate_leave(ReturnCode::Ok); } self.writer.extend(&self.bit_reader.as_slice()[..copy]); self.bit_reader.advance(copy); self.length -= copy; } break 'blk Mode::Type; } Mode::Check => { if !cfg!(feature = "__internal-fuzz-disable-checksum") && self.wrap != 0 { need_bits!(self, 32); self.total += self.writer.len(); if self.wrap & 4 != 0 { if self.gzip_flags != 0 { self.crc_fold.fold(self.writer.filled(), self.checksum); self.checksum = self.crc_fold.finish(); } else { self.checksum = adler32(self.checksum, self.writer.filled()); } } let given_checksum = if self.gzip_flags != 0 { self.bit_reader.hold() as u32 } else { zswap32(self.bit_reader.hold() as u32) }; self.out_available = self.writer.capacity() - self.writer.len(); if self.wrap & 4 != 0 && given_checksum != self.checksum { mode = Mode::Bad; break 'label self.bad("incorrect data check\0"); } self.bit_reader.init_bits(); } break 'blk Mode::Length; } Mode::Len_ => { break 'blk Mode::Len; } Mode::Len => { self.mode = mode; let val = self.len_and_friends(); mode = self.mode; match val { ControlFlow::Break(return_code) => break 'label return_code, ControlFlow::Continue(()) => continue 'label, } } Mode::LenExt => { // NOTE: this branch must be kept in sync with its counterpart in `len_and_friends` let extra = self.extra; // get extra bits, if any if extra != 0 { need_bits!(self, extra); self.length += self.bit_reader.bits(extra) as usize; self.bit_reader.drop_bits(extra as u8); self.back += extra; } // eprintln!("inflate: length {}", state.length); self.was = self.length; break 'blk Mode::Dist; } Mode::Lit => { // NOTE: this branch must be kept in sync with its counterpart in `len_and_friends` if self.writer.is_full() { #[cfg(all(test, feature = "std"))] eprintln!("Ok: writer is full ({} bytes)", self.writer.capacity()); break 'label self.inflate_leave(ReturnCode::Ok); } self.writer.push(self.length as u8); break 'blk Mode::Len; } Mode::Dist => { // NOTE: this branch must be kept in sync with its counterpart in `len_and_friends` // get distance code let mut here; loop { let bits = self.bit_reader.bits(self.dist_table.bits) as usize; here = self.dist_table_get(bits); if here.bits <= self.bit_reader.bits_in_buffer() { break; } pull_byte!(self); } if here.op & 0xf0 == 0 { let last = here; loop { let bits = self.bit_reader.bits((last.bits + last.op) as usize); here = self.dist_table_get( last.val as usize + ((bits as usize) >> last.bits), ); if last.bits + here.bits <= self.bit_reader.bits_in_buffer() { break; } pull_byte!(self); } self.bit_reader.drop_bits(last.bits); self.back += last.bits as usize; } self.bit_reader.drop_bits(here.bits); if here.op & 64 != 0 { mode = Mode::Bad; break 'label self.bad("invalid distance code\0"); } self.offset = here.val as usize; self.extra = (here.op & MAX_BITS) as usize; break 'blk Mode::DistExt; } Mode::DistExt => { // NOTE: this branch must be kept in sync with its counterpart in `len_and_friends` let extra = self.extra; if extra > 0 { need_bits!(self, extra); self.offset += self.bit_reader.bits(extra) as usize; self.bit_reader.drop_bits(extra as u8); self.back += extra; } if INFLATE_STRICT && self.offset > self.dmax { mode = Mode::Bad; break 'label self.bad("invalid distance code too far back\0"); } // eprintln!("inflate: distance {}", state.offset); break 'blk Mode::Match; } Mode::Match => { // NOTE: this branch must be kept in sync with its counterpart in `len_and_friends` 'match_: loop { if self.writer.is_full() { #[cfg(all(feature = "std", test))] eprintln!( "BufError: writer is full ({} bytes)", self.writer.capacity() ); break 'label self.inflate_leave(ReturnCode::Ok); } let left = self.writer.remaining(); let copy = self.writer.len(); let copy = if self.offset > copy { // copy from window to output let mut copy = self.offset - copy; if copy > self.window.have() { if self.flags.contains(Flags::SANE) { mode = Mode::Bad; break 'label self.bad("invalid distance too far back\0"); } // TODO INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR panic!("INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR") } let wnext = self.window.next(); let wsize = self.window.size(); let from = if copy > wnext { copy -= wnext; wsize - copy } else { wnext - copy }; copy = Ord::min(copy, self.length); copy = Ord::min(copy, left); self.writer .extend_from_window(&self.window, from..from + copy); copy } else { let copy = Ord::min(self.length, left); self.writer.copy_match(self.offset, copy); copy }; self.length -= copy; if self.length == 0 { break 'blk Mode::Len; } else { // otherwise it seems to recurse? continue 'match_; } } } Mode::Done => todo!(), Mode::Table => { need_bits!(self, 14); self.nlen = self.bit_reader.bits(5) as usize + 257; self.bit_reader.drop_bits(5); self.ndist = self.bit_reader.bits(5) as usize + 1; self.bit_reader.drop_bits(5); self.ncode = self.bit_reader.bits(4) as usize + 4; self.bit_reader.drop_bits(4); // TODO pkzit_bug_workaround if self.nlen > 286 || self.ndist > 30 { mode = Mode::Bad; break 'label self.bad("too many length or distance symbols\0"); } self.have = 0; break 'blk Mode::LenLens; } Mode::LenLens => { // permutation of code lengths ; const ORDER: [u8; 19] = [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15, ]; while self.have < self.ncode { need_bits!(self, 3); self.lens[usize::from(ORDER[self.have])] = self.bit_reader.bits(3) as u16; self.have += 1; self.bit_reader.drop_bits(3); } while self.have < 19 { self.lens[usize::from(ORDER[self.have])] = 0; self.have += 1; } let InflateTable::Success { root, used } = inflate_table( CodeType::Codes, &self.lens, 19, &mut self.codes_codes, 7, &mut self.work, ) else { mode = Mode::Bad; break 'label self.bad("invalid code lengths set\0"); }; self.next = used; self.len_table.codes = Codes::Codes; self.len_table.bits = root; self.have = 0; break 'blk Mode::CodeLens; } Mode::CodeLens => { while self.have < self.nlen + self.ndist { let here = loop { let bits = self.bit_reader.bits(self.len_table.bits); let here = self.len_table_get(bits as usize); if here.bits <= self.bit_reader.bits_in_buffer() { break here; } pull_byte!(self); }; let here_bits = here.bits; match here.val { 0..=15 => { self.bit_reader.drop_bits(here_bits); self.lens[self.have] = here.val; self.have += 1; } 16 => { need_bits!(self, usize::from(here_bits) + 2); self.bit_reader.drop_bits(here_bits); if self.have == 0 { mode = Mode::Bad; break 'label self.bad("invalid bit length repeat\0"); } let len = self.lens[self.have - 1]; let copy = 3 + self.bit_reader.bits(2) as usize; self.bit_reader.drop_bits(2); if self.have + copy > self.nlen + self.ndist { mode = Mode::Bad; break 'label self.bad("invalid bit length repeat\0"); } self.lens[self.have..][..copy].fill(len); self.have += copy; } 17 => { need_bits!(self, usize::from(here_bits) + 3); self.bit_reader.drop_bits(here_bits); let copy = 3 + self.bit_reader.bits(3) as usize; self.bit_reader.drop_bits(3); if self.have + copy > self.nlen + self.ndist { mode = Mode::Bad; break 'label self.bad("invalid bit length repeat\0"); } self.lens[self.have..][..copy].fill(0); self.have += copy; } 18.. => { need_bits!(self, usize::from(here_bits) + 7); self.bit_reader.drop_bits(here_bits); let copy = 11 + self.bit_reader.bits(7) as usize; self.bit_reader.drop_bits(7); if self.have + copy > self.nlen + self.ndist { mode = Mode::Bad; break 'label self.bad("invalid bit length repeat\0"); } self.lens[self.have..][..copy].fill(0); self.have += copy; } } } // check for end-of-block code (better have one) if self.lens[256] == 0 { mode = Mode::Bad; break 'label self.bad("invalid code -- missing end-of-block\0"); } // build code tables let InflateTable::Success { root, used } = inflate_table( CodeType::Lens, &self.lens, self.nlen, &mut self.len_codes, 10, &mut self.work, ) else { mode = Mode::Bad; break 'label self.bad("invalid literal/lengths set\0"); }; self.len_table.codes = Codes::Len; self.len_table.bits = root; self.next = used; let InflateTable::Success { root, used } = inflate_table( CodeType::Dists, &self.lens[self.nlen..], self.ndist, &mut self.dist_codes, 9, &mut self.work, ) else { mode = Mode::Bad; break 'label self.bad("invalid distances set\0"); }; self.dist_table.bits = root; self.dist_table.codes = Codes::Dist; self.next += used; mode = Mode::Len_; if matches!(self.flush, InflateFlush::Trees) { break 'label self.inflate_leave(ReturnCode::Ok); } break 'blk Mode::Len_; } Mode::Dict => { if !self.flags.contains(Flags::HAVE_DICT) { break 'label self.inflate_leave(ReturnCode::NeedDict); } self.checksum = crate::ADLER32_INITIAL_VALUE as _; break 'blk Mode::Type; } Mode::DictId => { need_bits!(self, 32); self.checksum = zswap32(self.bit_reader.hold() as u32); self.bit_reader.init_bits(); break 'blk Mode::Dict; } Mode::Bad => { let msg = "repeated call with bad state\0"; #[cfg(all(feature = "std", test))] dbg!(msg); self.error_message = Some(msg); break 'label ReturnCode::DataError; } Mode::Mem => { break 'label ReturnCode::MemError; } Mode::Sync => { break 'label ReturnCode::StreamError; } Mode::Length => { // for gzip, last bytes contain LENGTH if self.wrap != 0 && self.gzip_flags != 0 { need_bits!(self, 32); if (self.wrap & 4) != 0 && self.bit_reader.hold() != self.total as u64 { mode = Mode::Bad; break 'label self.bad("incorrect length check\0"); } self.bit_reader.init_bits(); } // inflate stream terminated properly break 'label ReturnCode::StreamEnd; } }; } }; self.mode = mode; ret } fn bad(&mut self, msg: &'static str) -> ReturnCode { #[cfg(all(feature = "std", test))] dbg!(msg); self.error_message = Some(msg); self.inflate_leave(ReturnCode::DataError) } // NOTE: it is crucial for the internal bookkeeping that this is the only route for actually // leaving the inflate function call chain fn inflate_leave(&mut self, return_code: ReturnCode) -> ReturnCode { // actual logic is in `inflate` itself return_code } /// Stored in the `z_stream.data_type` field fn decoding_state(&self) -> i32 { let bit_reader_bits = self.bit_reader.bits_in_buffer() as i32; debug_assert!(bit_reader_bits < 64); let last = if self.flags.contains(Flags::IS_LAST_BLOCK) { 64 } else { 0 }; let mode = match self.mode { Mode::Type => 128, Mode::Len_ | Mode::CopyBlock => 256, _ => 0, }; bit_reader_bits | last | mode } } /// # Safety /// /// `state.bit_reader` must have at least 15 bytes available to read, as /// indicated by `state.bit_reader.bytes_remaining() >= 15` unsafe fn inflate_fast_help(state: &mut State, start: usize) { #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] if crate::cpu_features::is_enabled_avx2_and_bmi2() { // SAFETY: we've verified the target features and the caller ensured enough bytes_remaining return unsafe { inflate_fast_help_avx2(state, start) }; } // SAFETY: The caller ensured enough bytes_remaining unsafe { inflate_fast_help_vanilla(state, start) }; } /// # Safety /// /// `state.bit_reader` must have at least 15 bytes available to read, as /// indicated by `state.bit_reader.bytes_remaining() >= 15` #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] #[target_feature(enable = "avx2")] #[target_feature(enable = "bmi2")] #[target_feature(enable = "bmi1")] unsafe fn inflate_fast_help_avx2(state: &mut State, start: usize) { // SAFETY: `bytes_remaining` checked by our caller unsafe { inflate_fast_help_impl::<{ CpuFeatures::AVX2 }>(state, start) }; } /// # Safety /// /// `state.bit_reader` must have at least 15 bytes available to read, as /// indicated by `state.bit_reader.bytes_remaining() >= 15` unsafe fn inflate_fast_help_vanilla(state: &mut State, start: usize) { // SAFETY: `bytes_remaining` checked by our caller unsafe { inflate_fast_help_impl::<{ CpuFeatures::NONE }>(state, start) }; } /// # Safety /// /// `state.bit_reader` must have at least 15 bytes available to read, as /// indicated by `state.bit_reader.bytes_remaining() >= 15` #[inline(always)] unsafe fn inflate_fast_help_impl(state: &mut State, _start: usize) { let mut bit_reader = BitReader::new(&[]); core::mem::swap(&mut bit_reader, &mut state.bit_reader); debug_assert!(bit_reader.bytes_remaining() >= 15); let mut writer = Writer::new(&mut []); core::mem::swap(&mut writer, &mut state.writer); let lcode = state.len_table_ref(); let dcode = state.dist_table_ref(); // IDEA: use const generics for the bits here? let lmask = (1u64 << state.len_table.bits) - 1; let dmask = (1u64 << state.dist_table.bits) - 1; // TODO verify if this is relevant for us let extra_safe = false; let window_size = state.window.size(); let mut bad = None; if bit_reader.bits_in_buffer() < 10 { debug_assert!(bit_reader.bytes_remaining() >= 15); // Safety: Caller ensured that bit_reader has >= 15 bytes available; refill only needs 8. unsafe { bit_reader.refill() }; } // We had at least 15 bytes in the slice, plus whatever was in the buffer. After filling the // buffer from the slice, we now have at least 8 bytes remaining in the slice, plus a full buffer. debug_assert!( bit_reader.bytes_remaining() >= 8 && bit_reader.bytes_remaining_including_buffer() >= 15 ); 'outer: loop { // This condition is ensured above for the first iteration of the `outer` loop. For // subsequent iterations, the loop continuation condition is // `bit_reader.bytes_remaining_including_buffer() > 15`. And because the buffer // contributes at most 7 bytes to the result of bit_reader.bytes_remaining_including_buffer(), // that means that the slice contains at least 8 bytes. debug_assert!( bit_reader.bytes_remaining() >= 8 && bit_reader.bytes_remaining_including_buffer() >= 15 ); let mut here = { let bits = bit_reader.bits_in_buffer(); let hold = bit_reader.hold(); // Safety: As described in the comments for the debug_assert at the start of // the `outer` loop, it is guaranteed that `bit_reader.bytes_remaining() >= 8` here, // which satisfies the safety precondition for `refill`. And, because the total // number of bytes in `bit_reader`'s buffer plus its slice is at least 15, and // `refill` moves at most 7 bytes from the slice to the buffer, the slice will still // contain at least 8 bytes after this `refill` call. unsafe { bit_reader.refill() }; // After the refill, there will be at least 8 bytes left in the bit_reader's slice. debug_assert!(bit_reader.bytes_remaining() >= 8); // in most cases, the read can be interleaved with the logic // based on benchmarks this matters in practice. wild. if bits as usize >= state.len_table.bits { lcode[(hold & lmask) as usize] } else { lcode[(bit_reader.hold() & lmask) as usize] } }; if here.op == 0 { writer.push(here.val as u8); bit_reader.drop_bits(here.bits); here = lcode[(bit_reader.hold() & lmask) as usize]; if here.op == 0 { writer.push(here.val as u8); bit_reader.drop_bits(here.bits); here = lcode[(bit_reader.hold() & lmask) as usize]; } } 'dolen: loop { bit_reader.drop_bits(here.bits); let op = here.op; if op == 0 { writer.push(here.val as u8); } else if op & 16 != 0 { let op = op & MAX_BITS; let mut len = here.val + bit_reader.bits(op as usize) as u16; bit_reader.drop_bits(op); here = dcode[(bit_reader.hold() & dmask) as usize]; // we have two fast-path loads: 10+10 + 15+5 = 40, // but we may need to refill here in the worst case if bit_reader.bits_in_buffer() < MAX_BITS + MAX_DIST_EXTRA_BITS { debug_assert!(bit_reader.bytes_remaining() >= 8); // Safety: On the first iteration of the `dolen` loop, we can rely on the // invariant documented for the previous `refill` call above: after that // operation, `bit_reader.bytes_remining >= 8`, which satisfies the safety // precondition for this call. For subsequent iterations, this invariant // remains true because nothing else within the `dolen` loop consumes data // from the slice. unsafe { bit_reader.refill() }; } 'dodist: loop { bit_reader.drop_bits(here.bits); let op = here.op; if op & 16 != 0 { let op = op & MAX_BITS; let dist = here.val + bit_reader.bits(op as usize) as u16; if INFLATE_STRICT && dist as usize > state.dmax { bad = Some("invalid distance too far back\0"); state.mode = Mode::Bad; break 'outer; } bit_reader.drop_bits(op); // max distance in output let written = writer.len(); if dist as usize > written { // copy fropm the window if (dist as usize - written) > state.window.have() { if state.flags.contains(Flags::SANE) { bad = Some("invalid distance too far back\0"); state.mode = Mode::Bad; break 'outer; } panic!("INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR") } let mut op = dist as usize - written; let mut from; let window_next = state.window.next(); if window_next == 0 { // This case is hit when the window has just wrapped around // by logic in `Window::extend`. It is special-cased because // apparently this is quite common. // // the match is at the end of the window, even though the next // position has now wrapped around. from = window_size - op; } else if window_next >= op { // the standard case: a contiguous copy from the window, no wrapping from = window_next - op; } else { // This case is hit when the window has recently wrapped around // by logic in `Window::extend`. // // The match is (partially) at the end of the window op -= window_next; from = window_size - op; if op < len as usize { // This case is hit when part of the match is at the end of the // window, and part of it has wrapped around to the start. Copy // the end section here, the start section will be copied below. len -= op as u16; writer.extend_from_window_with_features::( &state.window, from..from + op, ); from = 0; op = window_next; } } let copy = Ord::min(op, len as usize); writer.extend_from_window_with_features::( &state.window, from..from + copy, ); if op < len as usize { // here we need some bytes from the output itself writer.copy_match_with_features::( dist as usize, len as usize - op, ); } } else if extra_safe { todo!() } else { writer.copy_match_with_features::(dist as usize, len as usize) } } else if (op & 64) == 0 { // 2nd level distance code here = dcode[(here.val + bit_reader.bits(op as usize) as u16) as usize]; continue 'dodist; } else { bad = Some("invalid distance code\0"); state.mode = Mode::Bad; break 'outer; } break 'dodist; } } else if (op & 64) == 0 { // 2nd level length code here = lcode[(here.val + bit_reader.bits(op as usize) as u16) as usize]; continue 'dolen; } else if op & 32 != 0 { // end of block state.mode = Mode::Type; break 'outer; } else { bad = Some("invalid literal/length code\0"); state.mode = Mode::Bad; break 'outer; } break 'dolen; } // include the bits in the bit_reader buffer in the count of available bytes let remaining = bit_reader.bytes_remaining_including_buffer(); if remaining >= INFLATE_FAST_MIN_HAVE && writer.remaining() >= INFLATE_FAST_MIN_LEFT { continue; } break 'outer; } // return unused bytes (on entry, bits < 8, so in won't go too far back) bit_reader.return_unused_bytes(); state.bit_reader = bit_reader; state.writer = writer; if let Some(error_message) = bad { debug_assert!(matches!(state.mode, Mode::Bad)); state.bad(error_message); } } pub fn prime(stream: &mut InflateStream, bits: i32, value: i32) -> ReturnCode { if bits == 0 { /* fall through */ } else if bits < 0 { stream.state.bit_reader.init_bits(); } else if bits > 16 || stream.state.bit_reader.bits_in_buffer() + bits as u8 > 32 { return ReturnCode::StreamError; } else { stream.state.bit_reader.prime(bits as u8, value as u64); } ReturnCode::Ok } #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub struct InflateConfig { pub window_bits: i32, } impl Default for InflateConfig { fn default() -> Self { Self { window_bits: DEF_WBITS, } } } /// Initialize the stream in an inflate state pub fn init(stream: &mut z_stream, config: InflateConfig) -> ReturnCode { stream.msg = core::ptr::null_mut(); // for safety we must really make sure that alloc and free are consistent // this is a (slight) deviation from stock zlib. In this crate we pick the rust // allocator as the default, but `libz-rs-sys` configures the C allocator #[cfg(feature = "rust-allocator")] if stream.zalloc.is_none() || stream.zfree.is_none() { stream.configure_default_rust_allocator() } #[cfg(feature = "c-allocator")] if stream.zalloc.is_none() || stream.zfree.is_none() { stream.configure_default_c_allocator() } if stream.zalloc.is_none() || stream.zfree.is_none() { return ReturnCode::StreamError; } let mut state = State::new(&[], Writer::new(&mut [])); // TODO this can change depending on the used/supported SIMD instructions state.chunksize = 32; let alloc = Allocator { zalloc: stream.zalloc.unwrap(), zfree: stream.zfree.unwrap(), opaque: stream.opaque, _marker: PhantomData, }; // allocated here to have the same order as zlib let Some(state_allocation) = alloc.allocate_raw::() else { return ReturnCode::MemError; }; // FIXME: write is stable for NonNull since 1.80.0 unsafe { state_allocation.as_ptr().write(state) }; stream.state = state_allocation.as_ptr() as *mut internal_state; // SAFETY: we've correctly initialized the stream to be an InflateStream let ret = if let Some(stream) = unsafe { InflateStream::from_stream_mut(stream) } { reset_with_config(stream, config) } else { ReturnCode::StreamError }; if ret != ReturnCode::Ok { let ptr = stream.state; stream.state = core::ptr::null_mut(); // SAFETY: we assume deallocation does not cause UB unsafe { alloc.deallocate(ptr, 1) }; } ret } pub fn reset_with_config(stream: &mut InflateStream, config: InflateConfig) -> ReturnCode { let mut window_bits = config.window_bits; let wrap; if window_bits < 0 { wrap = 0; if window_bits < -MAX_WBITS { return ReturnCode::StreamError; } window_bits = -window_bits; } else { wrap = (window_bits >> 4) + 5; // TODO wth? if window_bits < 48 { window_bits &= MAX_WBITS; } } if window_bits != 0 && !(MIN_WBITS..=MAX_WBITS).contains(&window_bits) { #[cfg(feature = "std")] eprintln!("invalid windowBits"); return ReturnCode::StreamError; } if stream.state.window.size() != 0 && stream.state.wbits as i32 != window_bits { let mut window = Window::empty(); core::mem::swap(&mut window, &mut stream.state.window); let (ptr, len) = window.into_raw_parts(); assert_ne!(len, 0); // SAFETY: window is discarded after this deallocation. unsafe { stream.alloc.deallocate(ptr, len) }; } stream.state.wrap = wrap as u8; stream.state.wbits = window_bits as _; reset(stream) } pub fn reset(stream: &mut InflateStream) -> ReturnCode { // reset the state of the window stream.state.window.clear(); stream.state.error_message = None; reset_keep(stream) } pub fn reset_keep(stream: &mut InflateStream) -> ReturnCode { stream.total_in = 0; stream.total_out = 0; stream.state.total = 0; stream.msg = core::ptr::null_mut(); let state = &mut stream.state; if state.wrap != 0 { // to support ill-conceived Java test suite stream.adler = (state.wrap & 1) as _; } state.mode = Mode::Head; state.checksum = crate::ADLER32_INITIAL_VALUE as u32; state.flags.update(Flags::IS_LAST_BLOCK, false); state.flags.update(Flags::HAVE_DICT, false); state.flags.update(Flags::SANE, true); state.gzip_flags = -1; state.dmax = 32768; state.head = None; state.bit_reader = BitReader::new(&[]); state.next = 0; state.len_table = Table::default(); state.dist_table = Table::default(); state.back = usize::MAX; ReturnCode::Ok } pub fn codes_used(stream: &InflateStream) -> usize { stream.state.next } pub unsafe fn inflate(stream: &mut InflateStream, flush: InflateFlush) -> ReturnCode { if stream.next_out.is_null() || (stream.next_in.is_null() && stream.avail_in != 0) { return ReturnCode::StreamError as _; } let state = &mut stream.state; // skip check if let Mode::Type = state.mode { state.mode = Mode::TypeDo; } state.flush = flush; unsafe { state .bit_reader .update_slice(stream.next_in, stream.avail_in as usize) }; // Safety: `stream.next_out` is non-null and points to at least `stream.avail_out` bytes. state.writer = unsafe { Writer::new_uninit(stream.next_out.cast(), stream.avail_out as usize) }; state.in_available = stream.avail_in as _; state.out_available = stream.avail_out as _; let mut err = state.dispatch(); let in_read = state.bit_reader.as_ptr() as usize - stream.next_in as usize; let out_written = state.out_available - (state.writer.capacity() - state.writer.len()); stream.total_in += in_read as z_size; state.total += out_written; stream.total_out = state.total as _; stream.avail_in = state.bit_reader.bytes_remaining() as u32; stream.next_in = state.bit_reader.as_ptr() as *mut u8; stream.avail_out = (state.writer.capacity() - state.writer.len()) as u32; stream.next_out = state.writer.next_out() as *mut u8; stream.adler = state.checksum as z_checksum; let valid_mode = |mode| !matches!(mode, Mode::Bad | Mode::Mem | Mode::Sync); let not_done = |mode| { !matches!( mode, Mode::Check | Mode::Length | Mode::Bad | Mode::Mem | Mode::Sync ) }; let must_update_window = state.window.size() != 0 || (out_written != 0 && valid_mode(state.mode) && (not_done(state.mode) || !matches!(state.flush, InflateFlush::Finish))); let update_checksum = state.wrap & 4 != 0; if must_update_window { 'blk: { // initialize the window if needed if state.window.size() == 0 { match Window::new_in(&stream.alloc, state.wbits as usize) { Some(window) => state.window = window, None => { state.mode = Mode::Mem; err = ReturnCode::MemError; break 'blk; } } } state.window.extend( &state.writer.filled()[..out_written], state.gzip_flags, update_checksum, &mut state.checksum, &mut state.crc_fold, ); } } if let Some(msg) = state.error_message { assert!(msg.ends_with('\0')); stream.msg = msg.as_ptr() as *mut u8 as *mut core::ffi::c_char; } stream.data_type = state.decoding_state(); if ((in_read == 0 && out_written == 0) || flush == InflateFlush::Finish as _) && err == (ReturnCode::Ok as _) { ReturnCode::BufError as _ } else { err as _ } } fn syncsearch(mut got: usize, buf: &[u8]) -> (usize, usize) { let len = buf.len(); let mut next = 0; while next < len && got < 4 { if buf[next] == if got < 2 { 0 } else { 0xff } { got += 1; } else if buf[next] != 0 { got = 0; } else { got = 4 - got; } next += 1; } (got, next) } pub fn sync(stream: &mut InflateStream) -> ReturnCode { let state = &mut stream.state; if stream.avail_in == 0 && state.bit_reader.bits_in_buffer() < 8 { return ReturnCode::BufError; } /* if first time, start search in bit buffer */ if !matches!(state.mode, Mode::Sync) { state.mode = Mode::Sync; let (buf, len) = state.bit_reader.start_sync_search(); (state.have, _) = syncsearch(0, &buf[..len]); } // search available input // SAFETY: user guarantees that pointer and length are valid. let slice = unsafe { core::slice::from_raw_parts(stream.next_in, stream.avail_in as usize) }; let len; (state.have, len) = syncsearch(state.have, slice); // SAFETY: syncsearch() returns an index that is in-bounds of the slice. stream.next_in = unsafe { stream.next_in.add(len) }; stream.avail_in -= len as u32; stream.total_in += len as z_size; /* return no joy or set up to restart inflate() on a new block */ if state.have != 4 { return ReturnCode::DataError; } if state.gzip_flags == -1 { state.wrap = 0; /* if no header yet, treat as raw */ } else { state.wrap &= !4; /* no point in computing a check value now */ } let flags = state.gzip_flags; let total_in = stream.total_in; let total_out = stream.total_out; reset(stream); stream.total_in = total_in; stream.total_out = total_out; stream.state.gzip_flags = flags; stream.state.mode = Mode::Type; ReturnCode::Ok } /* Returns true if inflate is currently at the end of a block generated by Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP implementation to provide an additional safety check. PPP uses Z_SYNC_FLUSH but removes the length bytes of the resulting empty stored block. When decompressing, PPP checks that at the end of input packet, inflate is waiting for these length bytes. */ pub fn sync_point(stream: &mut InflateStream) -> bool { matches!(stream.state.mode, Mode::Stored) && stream.state.bit_reader.bits_in_buffer() == 0 } pub unsafe fn copy<'a>( dest: &mut MaybeUninit>, source: &InflateStream<'a>, ) -> ReturnCode { if source.next_out.is_null() || (source.next_in.is_null() && source.avail_in != 0) { return ReturnCode::StreamError; } // Safety: source and dest are both mutable references, so guaranteed not to overlap. // dest being a reference to maybe uninitialized memory makes a copy of 1 DeflateStream valid. unsafe { core::ptr::copy_nonoverlapping(source, dest.as_mut_ptr(), 1); } // allocated here to have the same order as zlib let Some(state_allocation) = source.alloc.allocate_raw::() else { return ReturnCode::MemError; }; let state = &source.state; // SAFETY: an initialized Writer is a valid MaybeUninit. let writer: MaybeUninit = unsafe { core::ptr::read(&state.writer as *const _ as *const MaybeUninit) }; let mut copy = State { mode: state.mode, flags: state.flags, wrap: state.wrap, len_table: state.len_table, dist_table: state.dist_table, wbits: state.wbits, window: Window::empty(), head: None, ncode: state.ncode, nlen: state.nlen, ndist: state.ndist, have: state.have, next: state.next, bit_reader: state.bit_reader, writer: Writer::new(&mut []), total: state.total, length: state.length, offset: state.offset, extra: state.extra, back: state.back, was: state.was, chunksize: state.chunksize, in_available: state.in_available, out_available: state.out_available, lens: state.lens, work: state.work, error_message: state.error_message, flush: state.flush, checksum: state.checksum, crc_fold: state.crc_fold, dmax: state.dmax, gzip_flags: state.gzip_flags, codes_codes: state.codes_codes, len_codes: state.len_codes, dist_codes: state.dist_codes, }; if !state.window.is_empty() { let Some(window) = state.window.clone_in(&source.alloc) else { // SAFETY: state_allocation is not used again. unsafe { source.alloc.deallocate(state_allocation.as_ptr(), 1) }; return ReturnCode::MemError; }; copy.window = window; } // write the cloned state into state_ptr unsafe { state_allocation.as_ptr().write(copy) }; // FIXME: write is stable for NonNull since 1.80.0 // insert the state_ptr into `dest` let field_ptr = unsafe { core::ptr::addr_of_mut!((*dest.as_mut_ptr()).state) }; unsafe { core::ptr::write(field_ptr as *mut *mut State, state_allocation.as_ptr()) }; // update the writer; it cannot be cloned so we need to use some shennanigans let field_ptr = unsafe { core::ptr::addr_of_mut!((*dest.as_mut_ptr()).state.writer) }; unsafe { core::ptr::copy(writer.as_ptr(), field_ptr, 1) }; // update the gzhead field (it contains a mutable reference so we need to be careful let field_ptr = unsafe { core::ptr::addr_of_mut!((*dest.as_mut_ptr()).state.head) }; unsafe { core::ptr::copy(&source.state.head, field_ptr, 1) }; ReturnCode::Ok } pub fn undermine(stream: &mut InflateStream, subvert: i32) -> ReturnCode { stream.state.flags.update(Flags::SANE, (!subvert) != 0); ReturnCode::Ok } pub fn mark(stream: &InflateStream) -> c_long { if stream.next_out.is_null() || (stream.next_in.is_null() && stream.avail_in != 0) { return c_long::MIN; } let state = &stream.state; let length = match state.mode { Mode::CopyBlock => state.length, Mode::Match => state.was - state.length, _ => 0, }; (((state.back as c_long) as c_ulong) << 16) as c_long + length as c_long } pub fn set_dictionary(stream: &mut InflateStream, dictionary: &[u8]) -> ReturnCode { if stream.state.wrap != 0 && !matches!(stream.state.mode, Mode::Dict) { return ReturnCode::StreamError; } // check for correct dictionary identifier if matches!(stream.state.mode, Mode::Dict) { let dictid = adler32(1, dictionary); if dictid != stream.state.checksum { return ReturnCode::DataError; } } let err = 'blk: { // initialize the window if needed if stream.state.window.size() == 0 { match Window::new_in(&stream.alloc, stream.state.wbits as usize) { None => break 'blk ReturnCode::MemError, Some(window) => stream.state.window = window, } } stream.state.window.extend( dictionary, stream.state.gzip_flags, false, &mut stream.state.checksum, &mut stream.state.crc_fold, ); ReturnCode::Ok }; if err != ReturnCode::Ok { stream.state.mode = Mode::Mem; return ReturnCode::MemError; } stream.state.flags.update(Flags::HAVE_DICT, true); ReturnCode::Ok } pub fn end<'a>(stream: &'a mut InflateStream<'a>) -> &'a mut z_stream { let alloc = stream.alloc; let mut window = Window::empty(); core::mem::swap(&mut window, &mut stream.state.window); // safety: window is not used again if !window.is_empty() { let (ptr, len) = window.into_raw_parts(); unsafe { alloc.deallocate(ptr, len) }; } let stream = stream.as_z_stream_mut(); let state_ptr = core::mem::replace(&mut stream.state, core::ptr::null_mut()); // safety: state_ptr is not used again unsafe { alloc.deallocate(state_ptr as *mut State, 1) }; stream } /// # Safety /// /// The caller must guarantee: /// /// * If `head` is `Some`: /// - If `head.extra` is not NULL, it must be writable for at least `head.extra_max` bytes /// - if `head.name` is not NULL, it must be writable for at least `head.name_max` bytes /// - if `head.comment` is not NULL, it must be writable for at least `head.comm_max` bytes pub unsafe fn get_header<'a>( stream: &mut InflateStream<'a>, head: Option<&'a mut gz_header>, ) -> ReturnCode { if (stream.state.wrap & 2) == 0 { return ReturnCode::StreamError; } stream.state.head = head.map(|head| { head.done = 0; head }); ReturnCode::Ok } /// # Safety /// /// The `dictionary` must have enough space for the dictionary. pub unsafe fn get_dictionary(stream: &InflateStream<'_>, dictionary: *mut u8) -> usize { let whave = stream.state.window.have(); let wnext = stream.state.window.next(); if !dictionary.is_null() { unsafe { core::ptr::copy_nonoverlapping( stream.state.window.as_ptr().add(wnext), dictionary, whave - wnext, ); core::ptr::copy_nonoverlapping( stream.state.window.as_ptr(), dictionary.add(whave).sub(wnext).cast(), wnext, ); } } stream.state.window.have() } #[cfg(test)] mod tests { use super::*; #[test] fn uncompress_buffer_overflow() { let mut output = [0; 1 << 13]; let input = [ 72, 137, 58, 0, 3, 39, 255, 255, 255, 255, 255, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 184, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 184, 14, 14, 14, 14, 14, 14, 14, 63, 14, 14, 14, 14, 14, 14, 14, 14, 184, 14, 14, 255, 14, 103, 14, 14, 14, 14, 14, 14, 61, 14, 255, 255, 63, 14, 14, 14, 14, 14, 14, 14, 14, 184, 14, 14, 255, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 6, 14, 14, 14, 14, 14, 14, 14, 14, 71, 4, 137, 106, ]; let config = InflateConfig { window_bits: 15 }; let (_decompressed, err) = uncompress_slice(&mut output, &input, config); assert_eq!(err, ReturnCode::DataError); } } zlib-rs-0.5.2/src/lib.rs000064400000000000000000000203241046102023000131160ustar 00000000000000#![doc = core::include_str!("../README.md")] #![cfg_attr(not(any(test, feature = "rust-allocator")), no_std)] #[cfg(any(feature = "rust-allocator", feature = "c-allocator"))] extern crate alloc; mod adler32; pub mod allocate; pub mod c_api; mod cpu_features; pub mod crc32; pub mod deflate; pub mod inflate; mod weak_slice; pub use adler32::{adler32, adler32_combine}; pub use crc32::{crc32, crc32_combine, get_crc_table}; #[macro_export] macro_rules! trace { ($($arg:tt)*) => { #[cfg(feature = "ZLIB_DEBUG")] { eprint!($($arg)*) } }; } /// Maximum size of the dynamic table. The maximum number of code structures is /// 1924, which is the sum of 1332 for literal/length codes and 592 for distance /// codes. These values were found by exhaustive searches using the program /// examples/enough.c found in the zlib distributions. The arguments to that /// program are the number of symbols, the initial root table size, and the /// maximum bit length of a code. "enough 286 10 15" for literal/length codes /// returns 1332, and "enough 30 9 15" for distance codes returns 592. /// The initial root table size (10 or 9) is found in the fifth argument of the /// inflate_table() calls in inflate.c and infback.c. If the root table size is /// changed, then these maximum sizes would be need to be recalculated and /// updated. #[allow(unused)] pub(crate) const ENOUGH: usize = ENOUGH_LENS + ENOUGH_DISTS; pub(crate) const ENOUGH_LENS: usize = 1332; pub(crate) const ENOUGH_DISTS: usize = 592; /// initial adler-32 hash value pub(crate) const ADLER32_INITIAL_VALUE: usize = 1; /// initial crc-32 hash value pub(crate) const CRC32_INITIAL_VALUE: u32 = 0; pub const MIN_WBITS: i32 = 8; // 256b LZ77 window pub const MAX_WBITS: i32 = 15; // 32kb LZ77 window pub(crate) const DEF_WBITS: i32 = MAX_WBITS; #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] #[cfg_attr(feature = "__internal-fuzz", derive(arbitrary::Arbitrary))] pub enum DeflateFlush { #[default] /// if flush is set to `NoFlush`, that allows deflate to decide how much data /// to accumulate before producing output, in order to maximize compression. NoFlush = 0, /// If flush is set to `PartialFlush`, all pending output is flushed to the /// output buffer, but the output is not aligned to a byte boundary. All of the /// input data so far will be available to the decompressor, as for Z_SYNC_FLUSH. /// This completes the current deflate block and follows it with an empty fixed /// codes block that is 10 bits long. This assures that enough bytes are output /// in order for the decompressor to finish the block before the empty fixed /// codes block. PartialFlush = 1, /// If the parameter flush is set to `SyncFlush`, all pending output is /// flushed to the output buffer and the output is aligned on a byte boundary, so /// that the decompressor can get all input data available so far. (In /// particular avail_in is zero after the call if enough output space has been /// provided before the call.) Flushing may degrade compression for some /// compression algorithms and so it should be used only when necessary. This /// completes the current deflate block and follows it with an empty stored block /// that is three bits plus filler bits to the next byte, followed by four bytes /// (00 00 ff ff). SyncFlush = 2, /// If flush is set to `FullFlush`, all output is flushed as with /// Z_SYNC_FLUSH, and the compression state is reset so that decompression can /// restart from this point if previous compressed data has been damaged or if /// random access is desired. Using `FullFlush` too often can seriously degrade /// compression. FullFlush = 3, /// If the parameter flush is set to `Finish`, pending input is processed, /// pending output is flushed and deflate returns with `StreamEnd` if there was /// enough output space. If deflate returns with `Ok` or `BufError`, this /// function must be called again with `Finish` and more output space (updated /// avail_out) but no more input data, until it returns with `StreamEnd` or an /// error. After deflate has returned `StreamEnd`, the only possible operations /// on the stream are deflateReset or deflateEnd. /// /// `Finish` can be used in the first deflate call after deflateInit if all the /// compression is to be done in a single step. In order to complete in one /// call, avail_out must be at least the value returned by deflateBound (see /// below). Then deflate is guaranteed to return `StreamEnd`. If not enough /// output space is provided, deflate will not return `StreamEnd`, and it must /// be called again as described above. Finish = 4, /// If flush is set to `Block`, a deflate block is completed and emitted, as /// for `SyncFlush`, but the output is not aligned on a byte boundary, and up to /// seven bits of the current block are held to be written as the next byte after /// the next deflate block is completed. In this case, the decompressor may not /// be provided enough bits at this point in order to complete decompression of /// the data provided so far to the compressor. It may need to wait for the next /// block to be emitted. This is for advanced applications that need to control /// the emission of deflate blocks. Block = 5, } impl TryFrom for DeflateFlush { type Error = (); fn try_from(value: i32) -> Result { match value { 0 => Ok(Self::NoFlush), 1 => Ok(Self::PartialFlush), 2 => Ok(Self::SyncFlush), 3 => Ok(Self::FullFlush), 4 => Ok(Self::Finish), 5 => Ok(Self::Block), _ => Err(()), } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub enum InflateFlush { #[default] NoFlush = 0, SyncFlush = 2, Finish = 4, Block = 5, Trees = 6, } impl TryFrom for InflateFlush { type Error = (); fn try_from(value: i32) -> Result { match value { 0 => Ok(Self::NoFlush), 2 => Ok(Self::SyncFlush), 4 => Ok(Self::Finish), 5 => Ok(Self::Block), 6 => Ok(Self::Trees), _ => Err(()), } } } #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] pub(crate) struct Code { /// operation, extra bits, table bits pub op: u8, /// bits in this part of the code pub bits: u8, /// offset in table or code value pub val: u16, } #[derive(Debug, Copy, Clone, PartialEq, Eq)] #[repr(i32)] pub enum ReturnCode { Ok = 0, StreamEnd = 1, NeedDict = 2, ErrNo = -1, StreamError = -2, DataError = -3, MemError = -4, BufError = -5, VersionError = -6, } impl From for ReturnCode { fn from(value: i32) -> Self { match Self::try_from_c_int(value) { Some(value) => value, None => panic!("invalid return code {value}"), } } } impl ReturnCode { const fn error_message_str(self) -> &'static str { match self { ReturnCode::Ok => "\0", ReturnCode::StreamEnd => "stream end\0", ReturnCode::NeedDict => "need dictionary\0", ReturnCode::ErrNo => "file error\0", ReturnCode::StreamError => "stream error\0", ReturnCode::DataError => "data error\0", ReturnCode::MemError => "insufficient memory\0", ReturnCode::BufError => "buffer error\0", ReturnCode::VersionError => "incompatible version\0", } } pub const fn error_message(self) -> *const core::ffi::c_char { let msg = self.error_message_str(); msg.as_ptr().cast::() } pub const fn try_from_c_int(err: core::ffi::c_int) -> Option { match err { 0 => Some(Self::Ok), 1 => Some(Self::StreamEnd), 2 => Some(Self::NeedDict), -1 => Some(Self::ErrNo), -2 => Some(Self::StreamError), -3 => Some(Self::DataError), -4 => Some(Self::MemError), -5 => Some(Self::BufError), -6 => Some(Self::VersionError), _ => None, } } } zlib-rs-0.5.2/src/weak_slice.rs000064400000000000000000000054241046102023000144620ustar 00000000000000use core::marker::PhantomData; /// a mutable "slice" (bundle of pointer and length). The main goal of this type is passing MIRI /// with stacked borrows. In particular, storing a standard slice in data structures violates the /// stacked borrows rule when that slice is deallocated. By only materializing the slice when /// needed for data access, hence bounding the lifetime more tightly, this restriction is circumvented. #[derive(Debug)] pub(crate) struct WeakSliceMut<'a, T> { ptr: *mut T, len: usize, _marker: PhantomData<&'a mut [T]>, } impl<'a, T> WeakSliceMut<'a, T> { /// # Safety /// /// The arguments must satisfy the requirements of [`core::slice::from_raw_parts_mut`]. The /// difference versus a slice is that the slice requirements are only enforced when a slice is /// needed, so in practice we mostly get the bounds checking and other convenient slice APIs, /// without the exact correctness constraints of a rust core/std slice. pub(crate) unsafe fn from_raw_parts_mut(ptr: *mut T, len: usize) -> Self { Self { ptr, len, _marker: PhantomData, } } pub(crate) fn into_raw_parts(self) -> (*mut T, usize) { (self.ptr, self.len) } pub(crate) fn as_slice(&self) -> &'a [T] { unsafe { core::slice::from_raw_parts(self.ptr, self.len) } } pub(crate) fn as_mut_slice(&mut self) -> &'a mut [T] { unsafe { core::slice::from_raw_parts_mut(self.ptr, self.len) } } pub(crate) fn as_ptr(&self) -> *const T { self.ptr } pub(crate) fn as_mut_ptr(&mut self) -> *mut T { self.ptr } pub(crate) fn len(&self) -> usize { self.len } pub(crate) fn is_empty(&self) -> bool { self.len() == 0 } pub(crate) fn empty() -> Self { let buf = &mut []; Self { ptr: buf.as_mut_ptr(), len: buf.len(), _marker: PhantomData, } } } #[derive(Debug)] pub(crate) struct WeakArrayMut<'a, T, const N: usize> { ptr: *mut [T; N], _marker: PhantomData<&'a mut [T; N]>, } impl<'a, T, const N: usize> WeakArrayMut<'a, T, N> { /// # Safety /// /// The pointer must be [convertable to a reference](https://doc.rust-lang.org/std/ptr/index.html#pointer-to-reference-conversion). pub(crate) unsafe fn from_ptr(ptr: *mut [T; N]) -> Self { Self { ptr, _marker: PhantomData, } } pub(crate) fn as_slice(&self) -> &'a [T] { unsafe { core::slice::from_raw_parts(self.ptr.cast(), N) } } pub(crate) fn as_mut_slice(&mut self) -> &'a mut [T] { unsafe { core::slice::from_raw_parts_mut(self.ptr.cast(), N) } } pub(crate) fn as_mut_ptr(&mut self) -> *mut [T; N] { self.ptr } }