miniz_oxide/deflate/
core.rs

1//! Streaming compression functionality.
2
3use std::convert::TryInto;
4use std::io::{self, Cursor, Seek, SeekFrom, Write};
5use std::{cmp, mem};
6
7use super::super::*;
8use super::deflate_flags::*;
9use super::CompressionLevel;
10use crate::deflate::buffer::{
11    HashBuffers, LocalBuf, LZ_CODE_BUF_SIZE, LZ_DICT_FULL_SIZE, OUT_BUF_SIZE,
12};
13use crate::shared::{update_adler32, HUFFMAN_LENGTH_ORDER, MZ_ADLER32_INIT};
14use crate::DataFormat;
15
16const MAX_PROBES_MASK: i32 = 0xFFF;
17
18const MAX_SUPPORTED_HUFF_CODESIZE: usize = 32;
19
20/// Length code for length values.
21#[rustfmt::skip]
22const LEN_SYM: [u16; 256] = [
23    257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268,
24    269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272,
25    273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274,
26    275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276,
27    277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
28    278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
29    279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279,
30    280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
31    281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
32    281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
33    282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
34    282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
35    283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
36    283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
37    284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
38    284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285
39];
40
41/// Number of extra bits for length values.
42#[rustfmt::skip]
43const LEN_EXTRA: [u8; 256] = [
44    0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
45    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
46    3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
47    3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
48    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
49    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
50    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
51    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
52    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
53    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
54    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
55    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
56    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
57    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
58    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
59    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0
60];
61
62/// Distance codes for distances smaller than 512.
63#[rustfmt::skip]
64const SMALL_DIST_SYM: [u8; 512] = [
65     0,  1,  2,  3,  4,  4,  5,  5,  6,  6,  6,  6,  7,  7,  7,  7,
66     8,  8,  8,  8,  8,  8,  8,  8,  9,  9,  9,  9,  9,  9,  9,  9,
67    10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
68    11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
69    12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
70    12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
71    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
72    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
73    14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
74    14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
75    14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
76    14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
77    15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
78    15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
79    15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
80    15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
81    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
82    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
83    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
84    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
85    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
86    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
87    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
88    16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
89    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
90    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
91    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
92    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
93    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
94    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
95    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
96    17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17
97];
98
99/// Number of extra bits for distances smaller than 512.
100#[rustfmt::skip]
101const SMALL_DIST_EXTRA: [u8; 512] = [
102    0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
103    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
104    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
105    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
106    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
107    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
108    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
109    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
110    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
111    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
112    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
113    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
114    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
115    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
116    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
117    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
118];
119
120/// Base values to calculate distances above 512.
121#[rustfmt::skip]
122const LARGE_DIST_SYM: [u8; 128] = [
123     0,  0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23,
124    24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25,
125    26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
126    27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
127    28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
128    28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
129    29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
130    29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29
131];
132
133/// Number of extra bits distances above 512.
134#[rustfmt::skip]
135const LARGE_DIST_EXTRA: [u8; 128] = [
136     0,  0,  8,  8,  9,  9,  9,  9, 10, 10, 10, 10, 10, 10, 10, 10,
137    11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
138    12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
139    12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
140    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
141    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
142    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
143    13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13
144];
145
146#[rustfmt::skip]
147const BITMASKS: [u32; 17] = [
148    0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
149    0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF
150];
151
152/// The maximum number of checks for matches in the hash table the compressor will make for each
153/// compression level.
154const NUM_PROBES: [u32; 11] = [0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500];
155
156#[derive(Copy, Clone)]
157struct SymFreq {
158    key: u16,
159    sym_index: u16,
160}
161
162pub mod deflate_flags {
163    /// Whether to use a zlib wrapper.
164    pub const TDEFL_WRITE_ZLIB_HEADER: u32 = 0x0000_1000;
165    /// Should we compute the adler32 checksum.
166    pub const TDEFL_COMPUTE_ADLER32: u32 = 0x0000_2000;
167    /// Should we use greedy parsing (as opposed to lazy parsing where look ahead one or more
168    /// bytes to check for better matches.)
169    pub const TDEFL_GREEDY_PARSING_FLAG: u32 = 0x0000_4000;
170    /// Used in miniz to skip zero-initializing hash and dict. We don't do this here, so
171    /// this flag is ignored.
172    pub const TDEFL_NONDETERMINISTIC_PARSING_FLAG: u32 = 0x0000_8000;
173    /// Only look for matches with a distance of 0.
174    pub const TDEFL_RLE_MATCHES: u32 = 0x0001_0000;
175    /// Only use matches that are at least 6 bytes long.
176    pub const TDEFL_FILTER_MATCHES: u32 = 0x0002_0000;
177    /// Force the compressor to only output static blocks. (Blocks using the default huffman codes
178    /// specified in the deflate specification.)
179    pub const TDEFL_FORCE_ALL_STATIC_BLOCKS: u32 = 0x0004_0000;
180    /// Force the compressor to only output raw/uncompressed blocks.
181    pub const TDEFL_FORCE_ALL_RAW_BLOCKS: u32 = 0x0008_0000;
182}
183
184/// Strategy setting for compression.
185///
186/// The non-default settings offer some special-case compression variants.
187#[repr(i32)]
188#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
189pub enum CompressionStrategy {
190    /// Don't use any of the special strategies.
191    Default = 0,
192    /// Only use matches that are at least 5 bytes long.
193    Filtered = 1,
194    /// Don't look for matches, only huffman encode the literals.
195    HuffmanOnly = 2,
196    /// Only look for matches with a distance of 1, i.e do run-length encoding only.
197    RLE = 3,
198    /// Only use static/fixed blocks. (Blocks using the default huffman codes
199    /// specified in the deflate specification.)
200    Fixed = 4,
201}
202
203/// A list of deflate flush types.
204#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
205pub enum TDEFLFlush {
206    /// Compress as much as there is space for, and then return
207    /// waiting for more input.
208    None = 0,
209    /// Try to flush the current data and output an empty raw block.
210    Sync = 2,
211    /// Same as sync, but reset the dictionary so that the following data does not depend
212    /// on previous data.
213    Full = 3,
214    /// Try to flush everything and end the stream.
215    Finish = 4,
216}
217
218impl From<MZFlush> for TDEFLFlush {
219    fn from(flush: MZFlush) -> Self {
220        match flush {
221            MZFlush::None => TDEFLFlush::None,
222            MZFlush::Sync => TDEFLFlush::Sync,
223            MZFlush::Full => TDEFLFlush::Full,
224            MZFlush::Finish => TDEFLFlush::Finish,
225            _ => TDEFLFlush::None, // TODO: ??? What to do ???
226        }
227    }
228}
229
230impl TDEFLFlush {
231    pub fn new(flush: i32) -> Result<Self, MZError> {
232        match flush {
233            0 => Ok(TDEFLFlush::None),
234            2 => Ok(TDEFLFlush::Sync),
235            3 => Ok(TDEFLFlush::Full),
236            4 => Ok(TDEFLFlush::Finish),
237            _ => Err(MZError::Param),
238        }
239    }
240}
241
242/// Return status codes.
243#[repr(i32)]
244#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
245pub enum TDEFLStatus {
246    BadParam = -2,
247    PutBufFailed = -1,
248    Okay = 0,
249    Done = 1,
250}
251
252const MAX_HUFF_SYMBOLS: usize = 288;
253/// Size of hash values in the hash chains.
254const LZ_HASH_BITS: i32 = 15;
255/// Size of hash chain for fast compression mode.
256const LEVEL1_HASH_SIZE_MASK: u32 = 4095;
257/// How many bits to shift when updating the current hash value.
258const LZ_HASH_SHIFT: i32 = (LZ_HASH_BITS + 2) / 3;
259/// Size of the chained hash tables.
260const LZ_HASH_SIZE: usize = 1 << LZ_HASH_BITS;
261
262/// The number of huffman tables used by the compressor.
263/// Literal/length, Distances and Length of the huffman codes for the other two tables.
264const MAX_HUFF_TABLES: usize = 3;
265/// Literal/length codes
266const MAX_HUFF_SYMBOLS_0: usize = 288;
267/// Distance codes.
268const MAX_HUFF_SYMBOLS_1: usize = 32;
269/// Huffman length values.
270const MAX_HUFF_SYMBOLS_2: usize = 19;
271/// Size of the chained hash table.
272pub(crate) const LZ_DICT_SIZE: usize = 32_768;
273/// Mask used when stepping through the hash chains.
274const LZ_DICT_SIZE_MASK: u32 = LZ_DICT_SIZE as u32 - 1;
275/// The minimum length of a match.
276const MIN_MATCH_LEN: u32 = 3;
277/// The maximum length of a match.
278pub(crate) const MAX_MATCH_LEN: usize = 258;
279
280const DEFAULT_FLAGS: u32 = NUM_PROBES[4] | TDEFL_WRITE_ZLIB_HEADER;
281
282mod zlib {
283    const DEFAULT_CM: u8 = 8;
284    const DEFAULT_CINFO: u8 = 7 << 4;
285    const _DEFAULT_FDICT: u8 = 0;
286    const DEFAULT_CMF: u8 = DEFAULT_CM | DEFAULT_CINFO;
287    /// The 16-bit value consisting of CMF and FLG must be divisible by this to be valid.
288    const FCHECK_DIVISOR: u8 = 31;
289
290    /// Generate FCHECK from CMF and FLG (without FCKECH )so that they are correct according to the
291    /// specification, i.e (CMF*256 + FCHK) % 31 = 0.
292    /// Returns flg with the FCHKECK bits added (any existing FCHECK bits are ignored).
293    fn add_fcheck(cmf: u8, flg: u8) -> u8 {
294        let rem = ((usize::from(cmf) * 256) + usize::from(flg)) % usize::from(FCHECK_DIVISOR);
295
296        // Clear existing FCHECK if any
297        let flg = flg & 0b11100000;
298
299        // Casting is safe as rem can't overflow since it is a value mod 31
300        // We can simply add the value to flg as (31 - rem) will never be above 2^5
301        flg + (FCHECK_DIVISOR - rem as u8)
302    }
303
304    fn zlib_level_from_flags(flags: u32) -> u8 {
305        use super::NUM_PROBES;
306
307        let num_probes = flags & (super::MAX_PROBES_MASK as u32);
308        if flags & super::TDEFL_GREEDY_PARSING_FLAG != 0 {
309            if num_probes <= 1 {
310                0
311            } else {
312                1
313            }
314        } else if num_probes >= NUM_PROBES[9] {
315            3
316        } else {
317            2
318        }
319    }
320
321    /// Get the zlib header for the level using the default window size and no
322    /// dictionary.
323    fn header_from_level(level: u8) -> [u8; 2] {
324        let cmf = DEFAULT_CMF;
325        [cmf, add_fcheck(cmf, (level as u8) << 6)]
326    }
327
328    /// Create a zlib header from the given compression flags.
329    /// Only level is considered.
330    pub fn header_from_flags(flags: u32) -> [u8; 2] {
331        let level = zlib_level_from_flags(flags);
332        header_from_level(level)
333    }
334
335    #[cfg(test)]
336    mod test {
337        #[test]
338        fn zlib() {
339            use super::super::*;
340            use super::*;
341
342            let test_level = |level, expected| {
343                let flags = create_comp_flags_from_zip_params(
344                    level,
345                    MZ_DEFAULT_WINDOW_BITS,
346                    CompressionStrategy::Default as i32,
347                );
348                assert_eq!(zlib_level_from_flags(flags), expected);
349            };
350
351            assert_eq!(zlib_level_from_flags(DEFAULT_FLAGS), 2);
352            test_level(0, 0);
353            test_level(1, 0);
354            test_level(2, 1);
355            test_level(3, 1);
356            for i in 4..=8 {
357                test_level(i, 2)
358            }
359            test_level(9, 3);
360            test_level(10, 3);
361        }
362
363        #[test]
364        fn test_header() {
365            let header = super::header_from_level(3);
366            assert_eq!(
367                ((usize::from(header[0]) * 256) + usize::from(header[1])) % 31,
368                0
369            );
370        }
371    }
372}
373
374fn memset<T: Copy>(slice: &mut [T], val: T) {
375    for x in slice {
376        *x = val
377    }
378}
379
380#[cfg(test)]
381#[inline]
382fn write_u16_le(val: u16, slice: &mut [u8], pos: usize) {
383    slice[pos] = val as u8;
384    slice[pos + 1] = (val >> 8) as u8;
385}
386
387// Read the two bytes starting at pos and interpret them as an u16.
388#[inline]
389fn read_u16_le(slice: &[u8], pos: usize) -> u16 {
390    // The compiler is smart enough to optimize this into an unaligned load.
391    slice[pos] as u16 | ((slice[pos + 1] as u16) << 8)
392}
393
394/// Main compression struct.
395pub struct CompressorOxide {
396    lz: LZOxide,
397    params: ParamsOxide,
398    huff: Box<HuffmanOxide>,
399    dict: DictOxide,
400}
401
402impl CompressorOxide {
403    /// Create a new `CompressorOxide` with the given flags.
404    ///
405    /// # Notes
406    /// This function may be changed to take different parameters in the future.
407    pub fn new(flags: u32) -> Self {
408        CompressorOxide {
409            lz: LZOxide::new(),
410            params: ParamsOxide::new(flags),
411            /// Put HuffmanOxide on the heap with default trick to avoid
412            /// excessive stack copies.
413            huff: Box::default(),
414            dict: DictOxide::new(flags),
415        }
416    }
417
418    /// Get the adler32 checksum of the currently encoded data.
419    pub fn adler32(&self) -> u32 {
420        self.params.adler32
421    }
422
423    /// Get the return status of the previous [`compress`](fn.compress.html)
424    /// call with this compressor.
425    pub fn prev_return_status(&self) -> TDEFLStatus {
426        self.params.prev_return_status
427    }
428
429    /// Get the raw compressor flags.
430    ///
431    /// # Notes
432    /// This function may be deprecated or changed in the future to use more rust-style flags.
433    pub fn flags(&self) -> i32 {
434        self.params.flags as i32
435    }
436
437    /// Returns whether the compressor is wrapping the data in a zlib format or not.
438    pub fn data_format(&self) -> DataFormat {
439        if (self.params.flags & TDEFL_WRITE_ZLIB_HEADER) != 0 {
440            DataFormat::Zlib
441        } else {
442            DataFormat::Raw
443        }
444    }
445
446    /// Reset the state of the compressor, keeping the same parameters.
447    ///
448    /// This avoids re-allocating data.
449    pub fn reset(&mut self) {
450        // LZ buf and huffman has no settings or dynamic memory
451        // that needs to be saved, so we simply replace them.
452        self.lz = LZOxide::new();
453        self.params.reset();
454        *self.huff = HuffmanOxide::default();
455        self.dict.reset();
456    }
457
458    /// Set the compression level of the compressor.
459    ///
460    /// Using this to change level after compresson has started is supported.
461    /// # Notes
462    /// The compression strategy will be reset to the default one when this is called.
463    pub fn set_compression_level(&mut self, level: CompressionLevel) {
464        let format = self.data_format();
465        self.set_format_and_level(format, level as u8);
466    }
467
468    /// Set the compression level of the compressor using an integer value.
469    ///
470    /// Using this to change level after compresson has started is supported.
471    /// # Notes
472    /// The compression strategy will be reset to the default one when this is called.
473    pub fn set_compression_level_raw(&mut self, level: u8) {
474        let format = self.data_format();
475        self.set_format_and_level(format, level);
476    }
477
478    /// Update the compression settings of the compressor.
479    ///
480    /// Changing the `DataFormat` after compression has started will result in
481    /// a corrupted stream.
482    ///
483    /// # Notes
484    /// This function mainly intented for setting the initial settings after e.g creating with
485    /// `default` or after calling `CompressorOxide::reset()`, and behaviour may be changed
486    /// to disallow calling it after starting compression in the future.
487    pub fn set_format_and_level(&mut self, data_format: DataFormat, level: u8) {
488        let flags = create_comp_flags_from_zip_params(
489            level.into(),
490            data_format.to_window_bits(),
491            CompressionStrategy::Default as i32,
492        );
493        self.params.update_flags(flags);
494        self.dict.update_flags(flags);
495    }
496}
497
498impl Default for CompressorOxide {
499    /// Initialize the compressor with a level of 4, zlib wrapper and
500    /// the default strategy.
501    #[inline(always)]
502    fn default() -> Self {
503        CompressorOxide {
504            lz: LZOxide::new(),
505            params: ParamsOxide::new(DEFAULT_FLAGS),
506            /// Put HuffmanOxide on the heap with default trick to avoid
507            /// excessive stack copies.
508            huff: Box::default(),
509            dict: DictOxide::new(DEFAULT_FLAGS),
510        }
511    }
512}
513
514/// Callback function and user used in `compress_to_output`.
515pub struct CallbackFunc<'a> {
516    pub put_buf_func: Box<dyn FnMut(&[u8]) -> bool + 'a>,
517}
518
519impl<'a> CallbackFunc<'a> {
520    fn flush_output(
521        &mut self,
522        saved_output: SavedOutputBufferOxide,
523        params: &mut ParamsOxide,
524    ) -> i32 {
525        // TODO: As this could be unsafe since
526        // we can't verify the function pointer
527        // this whole function should maybe be unsafe as well.
528        let call_success = (self.put_buf_func)(&params.local_buf.b[0..saved_output.pos as usize]);
529
530        if !call_success {
531            params.prev_return_status = TDEFLStatus::PutBufFailed;
532            return params.prev_return_status as i32;
533        }
534
535        params.flush_remaining as i32
536    }
537}
538
539struct CallbackBuf<'a> {
540    pub out_buf: &'a mut [u8],
541}
542
543impl<'a> CallbackBuf<'a> {
544    fn flush_output(
545        &mut self,
546        saved_output: SavedOutputBufferOxide,
547        params: &mut ParamsOxide,
548    ) -> i32 {
549        if saved_output.local {
550            let n = cmp::min(
551                saved_output.pos as usize,
552                self.out_buf.len() - params.out_buf_ofs,
553            );
554            (&mut self.out_buf[params.out_buf_ofs..params.out_buf_ofs + n])
555                .copy_from_slice(&params.local_buf.b[..n]);
556
557            params.out_buf_ofs += n;
558            if saved_output.pos != n as u64 {
559                params.flush_ofs = n as u32;
560                params.flush_remaining = (saved_output.pos - n as u64) as u32;
561            }
562        } else {
563            params.out_buf_ofs += saved_output.pos as usize;
564        }
565
566        params.flush_remaining as i32
567    }
568}
569
570enum CallbackOut<'a> {
571    Func(CallbackFunc<'a>),
572    Buf(CallbackBuf<'a>),
573}
574
575impl<'a> CallbackOut<'a> {
576    fn new_output_buffer<'b>(
577        &'b mut self,
578        local_buf: &'b mut [u8],
579        out_buf_ofs: usize,
580    ) -> OutputBufferOxide<'b> {
581        let is_local;
582        let buf_len = OUT_BUF_SIZE - 16;
583        let chosen_buffer = match *self {
584            CallbackOut::Buf(ref mut cb) if cb.out_buf.len() - out_buf_ofs >= OUT_BUF_SIZE => {
585                is_local = false;
586                &mut cb.out_buf[out_buf_ofs..out_buf_ofs + buf_len]
587            }
588            _ => {
589                is_local = true;
590                &mut local_buf[..buf_len]
591            }
592        };
593
594        let cursor = Cursor::new(chosen_buffer);
595        OutputBufferOxide {
596            inner: cursor,
597            local: is_local,
598            bit_buffer: 0,
599            bits_in: 0,
600        }
601    }
602}
603
604struct CallbackOxide<'a> {
605    in_buf: Option<&'a [u8]>,
606    in_buf_size: Option<&'a mut usize>,
607    out_buf_size: Option<&'a mut usize>,
608    out: CallbackOut<'a>,
609}
610
611impl<'a> CallbackOxide<'a> {
612    fn new_callback_buf(in_buf: &'a [u8], out_buf: &'a mut [u8]) -> Self {
613        CallbackOxide {
614            in_buf: Some(in_buf),
615            in_buf_size: None,
616            out_buf_size: None,
617            out: CallbackOut::Buf(CallbackBuf { out_buf }),
618        }
619    }
620
621    fn new_callback_func(in_buf: &'a [u8], callback_func: CallbackFunc<'a>) -> Self {
622        CallbackOxide {
623            in_buf: Some(in_buf),
624            in_buf_size: None,
625            out_buf_size: None,
626            out: CallbackOut::Func(callback_func),
627        }
628    }
629
630    fn update_size(&mut self, in_size: Option<usize>, out_size: Option<usize>) {
631        if let (Some(in_size), Some(size)) = (in_size, self.in_buf_size.as_mut()) {
632            **size = in_size;
633        }
634
635        if let (Some(out_size), Some(size)) = (out_size, self.out_buf_size.as_mut()) {
636            **size = out_size
637        }
638    }
639
640    fn flush_output(
641        &mut self,
642        saved_output: SavedOutputBufferOxide,
643        params: &mut ParamsOxide,
644    ) -> i32 {
645        if saved_output.pos == 0 {
646            return params.flush_remaining as i32;
647        }
648
649        self.update_size(Some(params.src_pos), None);
650        match self.out {
651            CallbackOut::Func(ref mut cf) => cf.flush_output(saved_output, params),
652            CallbackOut::Buf(ref mut cb) => cb.flush_output(saved_output, params),
653        }
654    }
655}
656
657struct OutputBufferOxide<'a> {
658    pub inner: Cursor<&'a mut [u8]>,
659    pub local: bool,
660
661    pub bit_buffer: u32,
662    pub bits_in: u32,
663}
664
665impl<'a> OutputBufferOxide<'a> {
666    fn put_bits(&mut self, bits: u32, len: u32) {
667        assert!(bits <= ((1u32 << len) - 1u32));
668        self.bit_buffer |= bits << self.bits_in;
669        self.bits_in += len;
670        while self.bits_in >= 8 {
671            let pos = self.inner.position();
672            self.inner.get_mut()[pos as usize] = self.bit_buffer as u8;
673            self.inner.set_position(pos + 1);
674            self.bit_buffer >>= 8;
675            self.bits_in -= 8;
676        }
677    }
678
679    fn save(&self) -> SavedOutputBufferOxide {
680        SavedOutputBufferOxide {
681            pos: self.inner.position(),
682            bit_buffer: self.bit_buffer,
683            bits_in: self.bits_in,
684            local: self.local,
685        }
686    }
687
688    fn load(&mut self, saved: SavedOutputBufferOxide) {
689        self.inner.set_position(saved.pos);
690        self.bit_buffer = saved.bit_buffer;
691        self.bits_in = saved.bits_in;
692        self.local = saved.local;
693    }
694
695    fn pad_to_bytes(&mut self) {
696        if self.bits_in != 0 {
697            let len = 8 - self.bits_in;
698            self.put_bits(0, len);
699        }
700    }
701}
702
703struct SavedOutputBufferOxide {
704    pub pos: u64,
705    pub bit_buffer: u32,
706    pub bits_in: u32,
707    pub local: bool,
708}
709
710struct BitBuffer {
711    pub bit_buffer: u64,
712    pub bits_in: u32,
713}
714
715impl BitBuffer {
716    fn put_fast(&mut self, bits: u64, len: u32) {
717        self.bit_buffer |= bits << self.bits_in;
718        self.bits_in += len;
719    }
720
721    fn flush(&mut self, output: &mut OutputBufferOxide) -> io::Result<()> {
722        let pos = output.inner.position() as usize;
723        {
724            // isolation to please borrow checker
725            let inner = &mut (*output.inner.get_mut())[pos..pos + 8];
726            let bytes = u64::to_le_bytes(self.bit_buffer);
727            inner.copy_from_slice(&bytes);
728        }
729        output
730            .inner
731            .seek(SeekFrom::Current(i64::from(self.bits_in >> 3)))?;
732        self.bit_buffer >>= self.bits_in & !7;
733        self.bits_in &= 7;
734        Ok(())
735    }
736}
737
738/// A struct containing data about huffman codes and symbol frequencies.
739///
740/// NOTE: Only the literal/lengths have enough symbols to actually use
741/// the full array. It's unclear why it's defined like this in miniz,
742/// it could be for cache/alignment reasons.
743struct HuffmanOxide {
744    /// Number of occurrences of each symbol.
745    pub count: [[u16; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES],
746    /// The bits of the huffman code assigned to the symbol
747    pub codes: [[u16; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES],
748    /// The length of the huffman code assigned to the symbol.
749    pub code_sizes: [[u8; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES],
750}
751
752/// Tables used for literal/lengths in `HuffmanOxide`.
753const LITLEN_TABLE: usize = 0;
754/// Tables for distances.
755const DIST_TABLE: usize = 1;
756/// Tables for the run-length encoded huffman lenghts for literals/lengths/distances.
757const HUFF_CODES_TABLE: usize = 2;
758
759/// Status of RLE encoding of huffman code lengths.
760struct RLE {
761    pub z_count: u32,
762    pub repeat_count: u32,
763    pub prev_code_size: u8,
764}
765
766impl RLE {
767    fn prev_code_size(
768        &mut self,
769        packed_code_sizes: &mut Cursor<&mut [u8]>,
770        h: &mut HuffmanOxide,
771    ) -> io::Result<()> {
772        let counts = &mut h.count[HUFF_CODES_TABLE];
773        if self.repeat_count != 0 {
774            if self.repeat_count < 3 {
775                counts[self.prev_code_size as usize] =
776                    counts[self.prev_code_size as usize].wrapping_add(self.repeat_count as u16);
777                let code = self.prev_code_size;
778                packed_code_sizes.write_all(&[code, code, code][..self.repeat_count as usize])?;
779            } else {
780                counts[16] = counts[16].wrapping_add(1);
781                packed_code_sizes.write_all(&[16, (self.repeat_count - 3) as u8][..])?;
782            }
783            self.repeat_count = 0;
784        }
785
786        Ok(())
787    }
788
789    fn zero_code_size(
790        &mut self,
791        packed_code_sizes: &mut Cursor<&mut [u8]>,
792        h: &mut HuffmanOxide,
793    ) -> io::Result<()> {
794        let counts = &mut h.count[HUFF_CODES_TABLE];
795        if self.z_count != 0 {
796            if self.z_count < 3 {
797                counts[0] = counts[0].wrapping_add(self.z_count as u16);
798                packed_code_sizes.write_all(&[0, 0, 0][..self.z_count as usize])?;
799            } else if self.z_count <= 10 {
800                counts[17] = counts[17].wrapping_add(1);
801                packed_code_sizes.write_all(&[17, (self.z_count - 3) as u8][..])?;
802            } else {
803                counts[18] = counts[18].wrapping_add(1);
804                packed_code_sizes.write_all(&[18, (self.z_count - 11) as u8][..])?;
805            }
806            self.z_count = 0;
807        }
808
809        Ok(())
810    }
811}
812
813impl Default for HuffmanOxide {
814    fn default() -> Self {
815        HuffmanOxide {
816            count: [[0; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES],
817            codes: [[0; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES],
818            code_sizes: [[0; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES],
819        }
820    }
821}
822
823impl HuffmanOxide {
824    fn radix_sort_symbols<'a>(
825        symbols0: &'a mut [SymFreq],
826        symbols1: &'a mut [SymFreq],
827    ) -> &'a mut [SymFreq] {
828        let mut hist = [[0; 256]; 2];
829
830        for freq in symbols0.iter() {
831            hist[0][(freq.key & 0xFF) as usize] += 1;
832            hist[1][((freq.key >> 8) & 0xFF) as usize] += 1;
833        }
834
835        let mut n_passes = 2;
836        if symbols0.len() == hist[1][0] {
837            n_passes -= 1;
838        }
839
840        let mut current_symbols = symbols0;
841        let mut new_symbols = symbols1;
842
843        for pass in 0..n_passes {
844            let mut offsets = [0; 256];
845            let mut offset = 0;
846            for i in 0..256 {
847                offsets[i] = offset;
848                offset += hist[pass][i];
849            }
850
851            for sym in current_symbols.iter() {
852                let j = ((sym.key >> (pass * 8)) & 0xFF) as usize;
853                new_symbols[offsets[j]] = *sym;
854                offsets[j] += 1;
855            }
856
857            mem::swap(&mut current_symbols, &mut new_symbols);
858        }
859
860        current_symbols
861    }
862
863    fn calculate_minimum_redundancy(symbols: &mut [SymFreq]) {
864        match symbols.len() {
865            0 => (),
866            1 => symbols[0].key = 1,
867            n => {
868                symbols[0].key += symbols[1].key;
869                let mut root = 0;
870                let mut leaf = 2;
871                for next in 1..n - 1 {
872                    if (leaf >= n) || (symbols[root].key < symbols[leaf].key) {
873                        symbols[next].key = symbols[root].key;
874                        symbols[root].key = next as u16;
875                        root += 1;
876                    } else {
877                        symbols[next].key = symbols[leaf].key;
878                        leaf += 1;
879                    }
880
881                    if (leaf >= n) || (root < next && symbols[root].key < symbols[leaf].key) {
882                        symbols[next].key = symbols[next].key.wrapping_add(symbols[root].key);
883                        symbols[root].key = next as u16;
884                        root += 1;
885                    } else {
886                        symbols[next].key = symbols[next].key.wrapping_add(symbols[leaf].key);
887                        leaf += 1;
888                    }
889                }
890
891                symbols[n - 2].key = 0;
892                for next in (0..n - 2).rev() {
893                    symbols[next].key = symbols[symbols[next].key as usize].key + 1;
894                }
895
896                let mut avbl = 1;
897                let mut used = 0;
898                let mut dpth = 0;
899                let mut root = (n - 2) as i32;
900                let mut next = (n - 1) as i32;
901                while avbl > 0 {
902                    while (root >= 0) && (symbols[root as usize].key == dpth) {
903                        used += 1;
904                        root -= 1;
905                    }
906                    while avbl > used {
907                        symbols[next as usize].key = dpth;
908                        next -= 1;
909                        avbl -= 1;
910                    }
911                    avbl = 2 * used;
912                    dpth += 1;
913                    used = 0;
914                }
915            }
916        }
917    }
918
919    fn enforce_max_code_size(num_codes: &mut [i32], code_list_len: usize, max_code_size: usize) {
920        if code_list_len <= 1 {
921            return;
922        }
923
924        num_codes[max_code_size] += num_codes[max_code_size + 1..].iter().sum::<i32>();
925        let total = num_codes[1..=max_code_size]
926            .iter()
927            .rev()
928            .enumerate()
929            .fold(0u32, |total, (i, &x)| total + ((x as u32) << i));
930
931        for _ in (1 << max_code_size)..total {
932            num_codes[max_code_size] -= 1;
933            for i in (1..max_code_size).rev() {
934                if num_codes[i] != 0 {
935                    num_codes[i] -= 1;
936                    num_codes[i + 1] += 2;
937                    break;
938                }
939            }
940        }
941    }
942
943    fn optimize_table(
944        &mut self,
945        table_num: usize,
946        table_len: usize,
947        code_size_limit: usize,
948        static_table: bool,
949    ) {
950        let mut num_codes = [0i32; MAX_SUPPORTED_HUFF_CODESIZE + 1];
951        let mut next_code = [0u32; MAX_SUPPORTED_HUFF_CODESIZE + 1];
952
953        if static_table {
954            for &code_size in &self.code_sizes[table_num][..table_len] {
955                num_codes[code_size as usize] += 1;
956            }
957        } else {
958            let mut symbols0 = [SymFreq {
959                key: 0,
960                sym_index: 0,
961            }; MAX_HUFF_SYMBOLS];
962            let mut symbols1 = [SymFreq {
963                key: 0,
964                sym_index: 0,
965            }; MAX_HUFF_SYMBOLS];
966
967            let mut num_used_symbols = 0;
968            for i in 0..table_len {
969                if self.count[table_num][i] != 0 {
970                    symbols0[num_used_symbols] = SymFreq {
971                        key: self.count[table_num][i],
972                        sym_index: i as u16,
973                    };
974                    num_used_symbols += 1;
975                }
976            }
977
978            let symbols = Self::radix_sort_symbols(
979                &mut symbols0[..num_used_symbols],
980                &mut symbols1[..num_used_symbols],
981            );
982            Self::calculate_minimum_redundancy(symbols);
983
984            for symbol in symbols.iter() {
985                num_codes[symbol.key as usize] += 1;
986            }
987
988            Self::enforce_max_code_size(&mut num_codes, num_used_symbols, code_size_limit);
989
990            memset(&mut self.code_sizes[table_num][..], 0);
991            memset(&mut self.codes[table_num][..], 0);
992
993            let mut last = num_used_symbols;
994            for i in 1..=code_size_limit {
995                let first = last - num_codes[i] as usize;
996                for symbol in &symbols[first..last] {
997                    self.code_sizes[table_num][symbol.sym_index as usize] = i as u8;
998                }
999                last = first;
1000            }
1001        }
1002
1003        let mut j = 0;
1004        next_code[1] = 0;
1005        for i in 2..=code_size_limit {
1006            j = (j + num_codes[i - 1]) << 1;
1007            next_code[i] = j as u32;
1008        }
1009
1010        for (&code_size, huff_code) in self.code_sizes[table_num]
1011            .iter()
1012            .take(table_len)
1013            .zip(self.codes[table_num].iter_mut().take(table_len))
1014        {
1015            if code_size == 0 {
1016                continue;
1017            }
1018
1019            let mut code = next_code[code_size as usize];
1020            next_code[code_size as usize] += 1;
1021
1022            let mut rev_code = 0;
1023            for _ in 0..code_size {
1024                rev_code = (rev_code << 1) | (code & 1);
1025                code >>= 1;
1026            }
1027            *huff_code = rev_code as u16;
1028        }
1029    }
1030
1031    fn start_static_block(&mut self, output: &mut OutputBufferOxide) {
1032        memset(&mut self.code_sizes[LITLEN_TABLE][0..144], 8);
1033        memset(&mut self.code_sizes[LITLEN_TABLE][144..256], 9);
1034        memset(&mut self.code_sizes[LITLEN_TABLE][256..280], 7);
1035        memset(&mut self.code_sizes[LITLEN_TABLE][280..288], 8);
1036
1037        memset(&mut self.code_sizes[DIST_TABLE][..32], 5);
1038
1039        self.optimize_table(LITLEN_TABLE, 288, 15, true);
1040        self.optimize_table(DIST_TABLE, 32, 15, true);
1041
1042        output.put_bits(0b01, 2)
1043    }
1044
1045    fn start_dynamic_block(&mut self, output: &mut OutputBufferOxide) -> io::Result<()> {
1046        // There will always be one, and only one end of block code.
1047        self.count[0][256] = 1;
1048
1049        self.optimize_table(0, MAX_HUFF_SYMBOLS_0, 15, false);
1050        self.optimize_table(1, MAX_HUFF_SYMBOLS_1, 15, false);
1051
1052        let num_lit_codes = 286
1053            - &self.code_sizes[0][257..286]
1054                .iter()
1055                .rev()
1056                .take_while(|&x| *x == 0)
1057                .count();
1058
1059        let num_dist_codes = 30
1060            - &self.code_sizes[1][1..30]
1061                .iter()
1062                .rev()
1063                .take_while(|&x| *x == 0)
1064                .count();
1065
1066        let mut code_sizes_to_pack = [0u8; MAX_HUFF_SYMBOLS_0 + MAX_HUFF_SYMBOLS_1];
1067        let mut packed_code_sizes = [0u8; MAX_HUFF_SYMBOLS_0 + MAX_HUFF_SYMBOLS_1];
1068
1069        let total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
1070
1071        code_sizes_to_pack[..num_lit_codes].copy_from_slice(&self.code_sizes[0][..num_lit_codes]);
1072
1073        code_sizes_to_pack[num_lit_codes..total_code_sizes_to_pack]
1074            .copy_from_slice(&self.code_sizes[1][..num_dist_codes]);
1075
1076        let mut rle = RLE {
1077            z_count: 0,
1078            repeat_count: 0,
1079            prev_code_size: 0xFF,
1080        };
1081
1082        memset(&mut self.count[HUFF_CODES_TABLE][..MAX_HUFF_SYMBOLS_2], 0);
1083
1084        let mut packed_code_sizes_cursor = Cursor::new(&mut packed_code_sizes[..]);
1085        for &code_size in &code_sizes_to_pack[..total_code_sizes_to_pack] {
1086            if code_size == 0 {
1087                rle.prev_code_size(&mut packed_code_sizes_cursor, self)?;
1088                rle.z_count += 1;
1089                if rle.z_count == 138 {
1090                    rle.zero_code_size(&mut packed_code_sizes_cursor, self)?;
1091                }
1092            } else {
1093                rle.zero_code_size(&mut packed_code_sizes_cursor, self)?;
1094                if code_size != rle.prev_code_size {
1095                    rle.prev_code_size(&mut packed_code_sizes_cursor, self)?;
1096                    self.count[HUFF_CODES_TABLE][code_size as usize] =
1097                        self.count[HUFF_CODES_TABLE][code_size as usize].wrapping_add(1);
1098                    packed_code_sizes_cursor.write_all(&[code_size][..])?;
1099                } else {
1100                    rle.repeat_count += 1;
1101                    if rle.repeat_count == 6 {
1102                        rle.prev_code_size(&mut packed_code_sizes_cursor, self)?;
1103                    }
1104                }
1105            }
1106            rle.prev_code_size = code_size;
1107        }
1108
1109        if rle.repeat_count != 0 {
1110            rle.prev_code_size(&mut packed_code_sizes_cursor, self)?;
1111        } else {
1112            rle.zero_code_size(&mut packed_code_sizes_cursor, self)?;
1113        }
1114
1115        self.optimize_table(2, MAX_HUFF_SYMBOLS_2, 7, false);
1116
1117        output.put_bits(2, 2);
1118
1119        output.put_bits((num_lit_codes - 257) as u32, 5);
1120        output.put_bits((num_dist_codes - 1) as u32, 5);
1121
1122        let mut num_bit_lengths = 18
1123            - HUFFMAN_LENGTH_ORDER
1124                .iter()
1125                .rev()
1126                .take_while(|&swizzle| self.code_sizes[HUFF_CODES_TABLE][*swizzle as usize] == 0)
1127                .count();
1128
1129        num_bit_lengths = cmp::max(4, num_bit_lengths + 1);
1130        output.put_bits(num_bit_lengths as u32 - 4, 4);
1131        for &swizzle in &HUFFMAN_LENGTH_ORDER[..num_bit_lengths] {
1132            output.put_bits(
1133                u32::from(self.code_sizes[HUFF_CODES_TABLE][swizzle as usize]),
1134                3,
1135            );
1136        }
1137
1138        let mut packed_code_size_index = 0 as usize;
1139        let packed_code_sizes = packed_code_sizes_cursor.get_ref();
1140        while packed_code_size_index < packed_code_sizes_cursor.position() as usize {
1141            let code = packed_code_sizes[packed_code_size_index] as usize;
1142            packed_code_size_index += 1;
1143            assert!(code < MAX_HUFF_SYMBOLS_2);
1144            output.put_bits(
1145                u32::from(self.codes[HUFF_CODES_TABLE][code]),
1146                u32::from(self.code_sizes[HUFF_CODES_TABLE][code]),
1147            );
1148            if code >= 16 {
1149                output.put_bits(
1150                    u32::from(packed_code_sizes[packed_code_size_index]),
1151                    [2, 3, 7][code - 16],
1152                );
1153                packed_code_size_index += 1;
1154            }
1155        }
1156
1157        Ok(())
1158    }
1159}
1160
1161struct DictOxide {
1162    /// The maximum number of checks in the hash chain, for the initial,
1163    /// and the lazy match respectively.
1164    pub max_probes: [u32; 2],
1165    /// Buffer of input data.
1166    /// Padded with 1 byte to simplify matching code in `compress_fast`.
1167    pub b: Box<HashBuffers>,
1168
1169    pub code_buf_dict_pos: u32,
1170    pub lookahead_size: u32,
1171    pub lookahead_pos: u32,
1172    pub size: u32,
1173}
1174
1175fn probes_from_flags(flags: u32) -> [u32; 2] {
1176    [
1177        1 + ((flags & 0xFFF) + 2) / 3,
1178        1 + (((flags & 0xFFF) >> 2) + 2) / 3,
1179    ]
1180}
1181
1182impl DictOxide {
1183    fn new(flags: u32) -> Self {
1184        DictOxide {
1185            max_probes: probes_from_flags(flags),
1186            b: Box::default(),
1187            code_buf_dict_pos: 0,
1188            lookahead_size: 0,
1189            lookahead_pos: 0,
1190            size: 0,
1191        }
1192    }
1193
1194    fn update_flags(&mut self, flags: u32) {
1195        self.max_probes = probes_from_flags(flags);
1196    }
1197
1198    fn reset(&mut self) {
1199        self.b.reset();
1200        self.code_buf_dict_pos = 0;
1201        self.lookahead_size = 0;
1202        self.lookahead_pos = 0;
1203        self.size = 0;
1204    }
1205
1206    /// Do an unaligned read of the data at `pos` in the dictionary and treat it as if it was of
1207    /// type T.
1208    #[inline]
1209    fn read_unaligned_u32(&self, pos: u32) -> u32 {
1210        // Masking the value here helps avoid bounds checks.
1211        let pos = (pos & LZ_DICT_SIZE_MASK) as usize;
1212        let end = pos + 4;
1213        // Somehow this assertion makes things faster.
1214        assert!(end < LZ_DICT_FULL_SIZE);
1215
1216        let bytes: [u8; 4] = self.b.dict[pos..end].try_into().unwrap();
1217        u32::from_le_bytes(bytes)
1218    }
1219
1220    /// Do an unaligned read of the data at `pos` in the dictionary and treat it as if it was of
1221    /// type T.
1222    #[inline]
1223    fn read_unaligned_u64(&self, pos: u32) -> u64 {
1224        let pos = pos as usize;
1225        let bytes: [u8; 8] = self.b.dict[pos..pos + 8].try_into().unwrap();
1226        u64::from_le_bytes(bytes)
1227    }
1228
1229    /// Do an unaligned read of the data at `pos` in the dictionary and treat it as if it was of
1230    /// type T.
1231    #[inline]
1232    fn read_as_u16(&self, pos: usize) -> u16 {
1233        read_u16_le(&self.b.dict[..], pos)
1234    }
1235
1236    /// Try to find a match for the data at lookahead_pos in the dictionary that is
1237    /// longer than `match_len`.
1238    /// Returns a tuple containing (match_distance, match_length). Will be equal to the input
1239    /// values if no better matches were found.
1240    fn find_match(
1241        &self,
1242        lookahead_pos: u32,
1243        max_dist: u32,
1244        max_match_len: u32,
1245        mut match_dist: u32,
1246        mut match_len: u32,
1247    ) -> (u32, u32) {
1248        // Clamp the match len and max_match_len to be valid. (It should be when this is called, but
1249        // do it for now just in case for safety reasons.)
1250        // This should normally end up as at worst conditional moves,
1251        // so it shouldn't slow us down much.
1252        // TODO: Statically verify these so we don't need to do this.
1253        let max_match_len = cmp::min(MAX_MATCH_LEN as u32, max_match_len);
1254        match_len = cmp::max(match_len, 1);
1255
1256        let pos = lookahead_pos & LZ_DICT_SIZE_MASK;
1257        let mut probe_pos = pos;
1258        // Number of probes into the hash chains.
1259        let mut num_probes_left = self.max_probes[(match_len >= 32) as usize];
1260
1261        // If we already have a match of the full length don't bother searching for another one.
1262        if max_match_len <= match_len {
1263            return (match_dist, match_len);
1264        }
1265
1266        // Read the last byte of the current match, and the next one, used to compare matches.
1267        let mut c01: u16 = self.read_as_u16(pos as usize + match_len as usize - 1);
1268        // Read the two bytes at the end position of the current match.
1269        let s01: u16 = self.read_as_u16(pos as usize);
1270
1271        'outer: loop {
1272            let mut dist;
1273            'found: loop {
1274                num_probes_left -= 1;
1275                if num_probes_left == 0 {
1276                    // We have done as many probes in the hash chain as the current compression
1277                    // settings allow, so return the best match we found, if any.
1278                    return (match_dist, match_len);
1279                }
1280
1281                for _ in 0..3 {
1282                    let next_probe_pos = u32::from(self.b.next[probe_pos as usize]);
1283
1284                    dist = (lookahead_pos - next_probe_pos) & 0xFFFF;
1285                    if next_probe_pos == 0 || dist > max_dist {
1286                        // We reached the end of the hash chain, or the next value is further away
1287                        // than the maximum allowed distance, so return the best match we found, if
1288                        // any.
1289                        return (match_dist, match_len);
1290                    }
1291
1292                    // Mask the position value to get the position in the hash chain of the next
1293                    // position to match against.
1294                    probe_pos = next_probe_pos & LZ_DICT_SIZE_MASK;
1295
1296                    if self.read_as_u16((probe_pos + match_len - 1) as usize) == c01 {
1297                        break 'found;
1298                    }
1299                }
1300            }
1301
1302            if dist == 0 {
1303                // We've looked through the whole match range, so return the best match we
1304                // found.
1305                return (match_dist, match_len);
1306            }
1307
1308            // Check if the two first bytes match.
1309            if self.read_as_u16(probe_pos as usize) != s01 {
1310                continue;
1311            }
1312
1313            let mut p = pos + 2;
1314            let mut q = probe_pos + 2;
1315            // The first two bytes matched, so check the full length of the match.
1316            for _ in 0..32 {
1317                let p_data: u64 = self.read_unaligned_u64(p);
1318                let q_data: u64 = self.read_unaligned_u64(q);
1319                // Compare of 8 bytes at a time by using unaligned loads of 64-bit integers.
1320                let xor_data = p_data ^ q_data;
1321                if xor_data == 0 {
1322                    p += 8;
1323                    q += 8;
1324                } else {
1325                    // If not all of the last 8 bytes matched, check how may of them did.
1326                    let trailing = xor_data.trailing_zeros();
1327
1328                    let probe_len = p - pos + (trailing >> 3);
1329                    if probe_len > match_len {
1330                        match_dist = dist;
1331                        match_len = cmp::min(max_match_len, probe_len);
1332                        if match_len == max_match_len {
1333                            // We found a match that had the maximum allowed length,
1334                            // so there is now point searching further.
1335                            return (match_dist, match_len);
1336                        }
1337                        // We found a better match, so save the last two bytes for further match
1338                        // comparisons.
1339                        c01 = self.read_as_u16((pos + match_len - 1) as usize)
1340                    }
1341                    continue 'outer;
1342                }
1343            }
1344
1345            return (dist, cmp::min(max_match_len, MAX_MATCH_LEN as u32));
1346        }
1347    }
1348}
1349
1350struct ParamsOxide {
1351    pub flags: u32,
1352    pub greedy_parsing: bool,
1353    pub block_index: u32,
1354
1355    pub saved_match_dist: u32,
1356    pub saved_match_len: u32,
1357    pub saved_lit: u8,
1358
1359    pub flush: TDEFLFlush,
1360    pub flush_ofs: u32,
1361    pub flush_remaining: u32,
1362    pub finished: bool,
1363
1364    pub adler32: u32,
1365
1366    pub src_pos: usize,
1367
1368    pub out_buf_ofs: usize,
1369    pub prev_return_status: TDEFLStatus,
1370
1371    pub saved_bit_buffer: u32,
1372    pub saved_bits_in: u32,
1373
1374    pub local_buf: Box<LocalBuf>,
1375}
1376
1377impl ParamsOxide {
1378    fn new(flags: u32) -> Self {
1379        ParamsOxide {
1380            flags,
1381            greedy_parsing: flags & TDEFL_GREEDY_PARSING_FLAG != 0,
1382            block_index: 0,
1383            saved_match_dist: 0,
1384            saved_match_len: 0,
1385            saved_lit: 0,
1386            flush: TDEFLFlush::None,
1387            flush_ofs: 0,
1388            flush_remaining: 0,
1389            finished: false,
1390            adler32: MZ_ADLER32_INIT,
1391            src_pos: 0,
1392            out_buf_ofs: 0,
1393            prev_return_status: TDEFLStatus::Okay,
1394            saved_bit_buffer: 0,
1395            saved_bits_in: 0,
1396            local_buf: Box::default(),
1397        }
1398    }
1399
1400    fn update_flags(&mut self, flags: u32) {
1401        self.flags = flags;
1402        self.greedy_parsing = self.flags & TDEFL_GREEDY_PARSING_FLAG != 0;
1403    }
1404
1405    /// Reset state, saving settings.
1406    fn reset(&mut self) {
1407        self.block_index = 0;
1408        self.saved_match_len = 0;
1409        self.saved_match_dist = 0;
1410        self.saved_lit = 0;
1411        self.flush = TDEFLFlush::None;
1412        self.flush_ofs = 0;
1413        self.flush_remaining = 0;
1414        self.finished = false;
1415        self.adler32 = MZ_ADLER32_INIT;
1416        self.src_pos = 0;
1417        self.out_buf_ofs = 0;
1418        self.prev_return_status = TDEFLStatus::Okay;
1419        self.saved_bit_buffer = 0;
1420        self.saved_bits_in = 0;
1421        self.local_buf.b = [0; OUT_BUF_SIZE];
1422    }
1423}
1424
1425struct LZOxide {
1426    pub codes: [u8; LZ_CODE_BUF_SIZE],
1427    pub code_position: usize,
1428    pub flag_position: usize,
1429
1430    pub total_bytes: u32,
1431    pub num_flags_left: u32,
1432}
1433
1434impl LZOxide {
1435    fn new() -> Self {
1436        LZOxide {
1437            codes: [0; LZ_CODE_BUF_SIZE],
1438            code_position: 1,
1439            flag_position: 0,
1440            total_bytes: 0,
1441            num_flags_left: 8,
1442        }
1443    }
1444
1445    fn write_code(&mut self, val: u8) {
1446        self.codes[self.code_position] = val;
1447        self.code_position += 1;
1448    }
1449
1450    fn init_flag(&mut self) {
1451        if self.num_flags_left == 8 {
1452            *self.get_flag() = 0;
1453            self.code_position -= 1;
1454        } else {
1455            *self.get_flag() >>= self.num_flags_left;
1456        }
1457    }
1458
1459    fn get_flag(&mut self) -> &mut u8 {
1460        &mut self.codes[self.flag_position]
1461    }
1462
1463    fn plant_flag(&mut self) {
1464        self.flag_position = self.code_position;
1465        self.code_position += 1;
1466    }
1467
1468    fn consume_flag(&mut self) {
1469        self.num_flags_left -= 1;
1470        if self.num_flags_left == 0 {
1471            self.num_flags_left = 8;
1472            self.plant_flag();
1473        }
1474    }
1475}
1476
1477fn compress_lz_codes(
1478    huff: &HuffmanOxide,
1479    output: &mut OutputBufferOxide,
1480    lz_code_buf: &[u8],
1481) -> io::Result<bool> {
1482    let mut flags = 1;
1483    let mut bb = BitBuffer {
1484        bit_buffer: u64::from(output.bit_buffer),
1485        bits_in: output.bits_in,
1486    };
1487
1488    let mut i: usize = 0;
1489    while i < lz_code_buf.len() {
1490        if flags == 1 {
1491            flags = u32::from(lz_code_buf[i]) | 0x100;
1492            i += 1;
1493        }
1494
1495        // The lz code was a length code
1496        if flags & 1 == 1 {
1497            flags >>= 1;
1498
1499            let sym;
1500            let num_extra_bits;
1501
1502            let match_len = lz_code_buf[i] as usize;
1503
1504            let match_dist = read_u16_le(lz_code_buf, i + 1);
1505
1506            i += 3;
1507
1508            debug_assert!(huff.code_sizes[0][LEN_SYM[match_len] as usize] != 0);
1509            bb.put_fast(
1510                u64::from(huff.codes[0][LEN_SYM[match_len] as usize]),
1511                u32::from(huff.code_sizes[0][LEN_SYM[match_len] as usize]),
1512            );
1513            bb.put_fast(
1514                match_len as u64 & u64::from(BITMASKS[LEN_EXTRA[match_len] as usize]),
1515                u32::from(LEN_EXTRA[match_len]),
1516            );
1517
1518            if match_dist < 512 {
1519                sym = SMALL_DIST_SYM[match_dist as usize] as usize;
1520                num_extra_bits = SMALL_DIST_EXTRA[match_dist as usize] as usize;
1521            } else {
1522                sym = LARGE_DIST_SYM[(match_dist >> 8) as usize] as usize;
1523                num_extra_bits = LARGE_DIST_EXTRA[(match_dist >> 8) as usize] as usize;
1524            }
1525
1526            debug_assert!(huff.code_sizes[1][sym] != 0);
1527            bb.put_fast(
1528                u64::from(huff.codes[1][sym]),
1529                u32::from(huff.code_sizes[1][sym]),
1530            );
1531            bb.put_fast(
1532                u64::from(match_dist) & u64::from(BITMASKS[num_extra_bits as usize]),
1533                num_extra_bits as u32,
1534            );
1535        } else {
1536            // The lz code was a literal
1537            for _ in 0..3 {
1538                flags >>= 1;
1539                let lit = lz_code_buf[i];
1540                i += 1;
1541
1542                debug_assert!(huff.code_sizes[0][lit as usize] != 0);
1543                bb.put_fast(
1544                    u64::from(huff.codes[0][lit as usize]),
1545                    u32::from(huff.code_sizes[0][lit as usize]),
1546                );
1547
1548                if flags & 1 == 1 || i >= lz_code_buf.len() {
1549                    break;
1550                }
1551            }
1552        }
1553
1554        bb.flush(output)?;
1555    }
1556
1557    output.bits_in = 0;
1558    output.bit_buffer = 0;
1559    while bb.bits_in != 0 {
1560        let n = cmp::min(bb.bits_in, 16);
1561        output.put_bits(bb.bit_buffer as u32 & BITMASKS[n as usize], n);
1562        bb.bit_buffer >>= n;
1563        bb.bits_in -= n;
1564    }
1565
1566    // Output the end of block symbol.
1567    output.put_bits(
1568        u32::from(huff.codes[0][256]),
1569        u32::from(huff.code_sizes[0][256]),
1570    );
1571
1572    Ok(true)
1573}
1574
1575fn compress_block(
1576    huff: &mut HuffmanOxide,
1577    output: &mut OutputBufferOxide,
1578    lz: &LZOxide,
1579    static_block: bool,
1580) -> io::Result<bool> {
1581    if static_block {
1582        huff.start_static_block(output);
1583    } else {
1584        huff.start_dynamic_block(output)?;
1585    }
1586
1587    compress_lz_codes(huff, output, &lz.codes[..lz.code_position])
1588}
1589
1590fn flush_block(
1591    d: &mut CompressorOxide,
1592    callback: &mut CallbackOxide,
1593    flush: TDEFLFlush,
1594) -> io::Result<i32> {
1595    let mut saved_buffer;
1596    {
1597        let mut output = callback
1598            .out
1599            .new_output_buffer(&mut d.params.local_buf.b, d.params.out_buf_ofs);
1600        output.bit_buffer = d.params.saved_bit_buffer;
1601        output.bits_in = d.params.saved_bits_in;
1602
1603        let use_raw_block = (d.params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS != 0)
1604            && (d.dict.lookahead_pos - d.dict.code_buf_dict_pos) <= d.dict.size;
1605
1606        assert!(d.params.flush_remaining == 0);
1607        d.params.flush_ofs = 0;
1608        d.params.flush_remaining = 0;
1609
1610        d.lz.init_flag();
1611
1612        // If we are at the start of the stream, write the zlib header if requested.
1613        if d.params.flags & TDEFL_WRITE_ZLIB_HEADER != 0 && d.params.block_index == 0 {
1614            let header = zlib::header_from_flags(d.params.flags as u32);
1615            output.put_bits(header[0].into(), 8);
1616            output.put_bits(header[1].into(), 8);
1617        }
1618
1619        // Output the block header.
1620        output.put_bits((flush == TDEFLFlush::Finish) as u32, 1);
1621
1622        saved_buffer = output.save();
1623
1624        let comp_success = if !use_raw_block {
1625            let use_static =
1626                (d.params.flags & TDEFL_FORCE_ALL_STATIC_BLOCKS != 0) || (d.lz.total_bytes < 48);
1627            compress_block(&mut d.huff, &mut output, &d.lz, use_static)?
1628        } else {
1629            false
1630        };
1631
1632        // If we failed to compress anything and the output would take up more space than the output
1633        // data, output a stored block instead, which has at most 5 bytes of overhead.
1634        // We only use some simple heuristics for now.
1635        // A stored block will have an overhead of at least 4 bytes containing the block length
1636        // but usually more due to the length parameters having to start at a byte boundary and thus
1637        // requiring up to 5 bytes of padding.
1638        // As a static block will have an overhead of at most 1 bit per byte
1639        // (as literals are either 8 or 9 bytes), a raw block will
1640        // never take up less space if the number of input bytes are less than 32.
1641        let expanded = (d.lz.total_bytes > 32)
1642            && (output.inner.position() - saved_buffer.pos + 1 >= u64::from(d.lz.total_bytes))
1643            && (d.dict.lookahead_pos - d.dict.code_buf_dict_pos <= d.dict.size);
1644
1645        if use_raw_block || expanded {
1646            output.load(saved_buffer);
1647
1648            // Block header.
1649            output.put_bits(0, 2);
1650
1651            // Block length has to start on a byte boundary, s opad.
1652            output.pad_to_bytes();
1653
1654            // Block length and ones complement of block length.
1655            output.put_bits(d.lz.total_bytes & 0xFFFF, 16);
1656            output.put_bits(!d.lz.total_bytes & 0xFFFF, 16);
1657
1658            // Write the actual bytes.
1659            for i in 0..d.lz.total_bytes {
1660                let pos = (d.dict.code_buf_dict_pos + i) & LZ_DICT_SIZE_MASK;
1661                output.put_bits(u32::from(d.dict.b.dict[pos as usize]), 8);
1662            }
1663        } else if !comp_success {
1664            output.load(saved_buffer);
1665            compress_block(&mut d.huff, &mut output, &d.lz, true)?;
1666        }
1667
1668        if flush != TDEFLFlush::None {
1669            if flush == TDEFLFlush::Finish {
1670                output.pad_to_bytes();
1671                if d.params.flags & TDEFL_WRITE_ZLIB_HEADER != 0 {
1672                    let mut adler = d.params.adler32;
1673                    for _ in 0..4 {
1674                        output.put_bits((adler >> 24) & 0xFF, 8);
1675                        adler <<= 8;
1676                    }
1677                }
1678            } else {
1679                // Sync or Full flush.
1680                // Output an empty raw block.
1681                output.put_bits(0, 3);
1682                output.pad_to_bytes();
1683                output.put_bits(0, 16);
1684                output.put_bits(0xFFFF, 16);
1685            }
1686        }
1687
1688        memset(&mut d.huff.count[0][..MAX_HUFF_SYMBOLS_0], 0);
1689        memset(&mut d.huff.count[1][..MAX_HUFF_SYMBOLS_1], 0);
1690
1691        d.lz.code_position = 1;
1692        d.lz.flag_position = 0;
1693        d.lz.num_flags_left = 8;
1694        d.dict.code_buf_dict_pos += d.lz.total_bytes;
1695        d.lz.total_bytes = 0;
1696        d.params.block_index += 1;
1697
1698        saved_buffer = output.save();
1699
1700        d.params.saved_bit_buffer = saved_buffer.bit_buffer;
1701        d.params.saved_bits_in = saved_buffer.bits_in;
1702    }
1703
1704    Ok(callback.flush_output(saved_buffer, &mut d.params))
1705}
1706
1707fn record_literal(h: &mut HuffmanOxide, lz: &mut LZOxide, lit: u8) {
1708    lz.total_bytes += 1;
1709    lz.write_code(lit);
1710
1711    *lz.get_flag() >>= 1;
1712    lz.consume_flag();
1713
1714    h.count[0][lit as usize] += 1;
1715}
1716
1717fn record_match(h: &mut HuffmanOxide, lz: &mut LZOxide, mut match_len: u32, mut match_dist: u32) {
1718    assert!(match_len >= MIN_MATCH_LEN);
1719    assert!(match_dist >= 1);
1720    assert!(match_dist as usize <= LZ_DICT_SIZE);
1721
1722    lz.total_bytes += match_len;
1723    match_dist -= 1;
1724    match_len -= MIN_MATCH_LEN;
1725    lz.write_code(match_len as u8);
1726    lz.write_code(match_dist as u8);
1727    lz.write_code((match_dist >> 8) as u8);
1728
1729    *lz.get_flag() >>= 1;
1730    *lz.get_flag() |= 0x80;
1731    lz.consume_flag();
1732
1733    let symbol = if match_dist < 512 {
1734        SMALL_DIST_SYM[match_dist as usize]
1735    } else {
1736        LARGE_DIST_SYM[((match_dist >> 8) & 127) as usize]
1737    } as usize;
1738    h.count[1][symbol] += 1;
1739    h.count[0][LEN_SYM[match_len as usize] as usize] += 1;
1740}
1741
1742fn compress_normal(d: &mut CompressorOxide, callback: &mut CallbackOxide) -> bool {
1743    let mut src_pos = d.params.src_pos;
1744    let in_buf = match callback.in_buf {
1745        None => return true,
1746        Some(in_buf) => in_buf,
1747    };
1748
1749    let mut lookahead_size = d.dict.lookahead_size;
1750    let mut lookahead_pos = d.dict.lookahead_pos;
1751    let mut saved_lit = d.params.saved_lit;
1752    let mut saved_match_dist = d.params.saved_match_dist;
1753    let mut saved_match_len = d.params.saved_match_len;
1754
1755    while src_pos < in_buf.len() || (d.params.flush != TDEFLFlush::None && lookahead_size != 0) {
1756        let src_buf_left = in_buf.len() - src_pos;
1757        let num_bytes_to_process = cmp::min(src_buf_left, MAX_MATCH_LEN - lookahead_size as usize);
1758
1759        if lookahead_size + d.dict.size >= MIN_MATCH_LEN - 1 && num_bytes_to_process > 0 {
1760            let dictb = &mut d.dict.b;
1761
1762            let mut dst_pos = (lookahead_pos + lookahead_size) & LZ_DICT_SIZE_MASK;
1763            let mut ins_pos = lookahead_pos + lookahead_size - 2;
1764            let mut hash = (u32::from(dictb.dict[(ins_pos & LZ_DICT_SIZE_MASK) as usize])
1765                << LZ_HASH_SHIFT)
1766                ^ u32::from(dictb.dict[((ins_pos + 1) & LZ_DICT_SIZE_MASK) as usize]);
1767
1768            lookahead_size += num_bytes_to_process as u32;
1769            for &c in &in_buf[src_pos..src_pos + num_bytes_to_process] {
1770                dictb.dict[dst_pos as usize] = c;
1771                if (dst_pos as usize) < MAX_MATCH_LEN - 1 {
1772                    dictb.dict[LZ_DICT_SIZE + dst_pos as usize] = c;
1773                }
1774
1775                hash = ((hash << LZ_HASH_SHIFT) ^ u32::from(c)) & (LZ_HASH_SIZE as u32 - 1);
1776                dictb.next[(ins_pos & LZ_DICT_SIZE_MASK) as usize] = dictb.hash[hash as usize];
1777
1778                dictb.hash[hash as usize] = ins_pos as u16;
1779                dst_pos = (dst_pos + 1) & LZ_DICT_SIZE_MASK;
1780                ins_pos += 1;
1781            }
1782            src_pos += num_bytes_to_process;
1783        } else {
1784            let dictb = &mut d.dict.b;
1785            for &c in &in_buf[src_pos..src_pos + num_bytes_to_process] {
1786                let dst_pos = (lookahead_pos + lookahead_size) & LZ_DICT_SIZE_MASK;
1787                dictb.dict[dst_pos as usize] = c;
1788                if (dst_pos as usize) < MAX_MATCH_LEN - 1 {
1789                    dictb.dict[LZ_DICT_SIZE + dst_pos as usize] = c;
1790                }
1791
1792                lookahead_size += 1;
1793                if lookahead_size + d.dict.size >= MIN_MATCH_LEN {
1794                    let ins_pos = lookahead_pos + lookahead_size - 3;
1795                    let hash = ((u32::from(dictb.dict[(ins_pos & LZ_DICT_SIZE_MASK) as usize])
1796                        << (LZ_HASH_SHIFT * 2))
1797                        ^ ((u32::from(dictb.dict[((ins_pos + 1) & LZ_DICT_SIZE_MASK) as usize])
1798                            << LZ_HASH_SHIFT)
1799                            ^ u32::from(c)))
1800                        & (LZ_HASH_SIZE as u32 - 1);
1801
1802                    dictb.next[(ins_pos & LZ_DICT_SIZE_MASK) as usize] = dictb.hash[hash as usize];
1803                    dictb.hash[hash as usize] = ins_pos as u16;
1804                }
1805            }
1806
1807            src_pos += num_bytes_to_process;
1808        }
1809
1810        d.dict.size = cmp::min(LZ_DICT_SIZE as u32 - lookahead_size, d.dict.size);
1811        if d.params.flush == TDEFLFlush::None && (lookahead_size as usize) < MAX_MATCH_LEN {
1812            break;
1813        }
1814
1815        let mut len_to_move = 1;
1816        let mut cur_match_dist = 0;
1817        let mut cur_match_len = if saved_match_len != 0 {
1818            saved_match_len
1819        } else {
1820            MIN_MATCH_LEN - 1
1821        };
1822        let cur_pos = lookahead_pos & LZ_DICT_SIZE_MASK;
1823        if d.params.flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS) != 0 {
1824            if d.dict.size != 0 && d.params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS == 0 {
1825                let c = d.dict.b.dict[((cur_pos.wrapping_sub(1)) & LZ_DICT_SIZE_MASK) as usize];
1826                cur_match_len = d.dict.b.dict[cur_pos as usize..(cur_pos + lookahead_size) as usize]
1827                    .iter()
1828                    .take_while(|&x| *x == c)
1829                    .count() as u32;
1830                if cur_match_len < MIN_MATCH_LEN {
1831                    cur_match_len = 0
1832                } else {
1833                    cur_match_dist = 1
1834                }
1835            }
1836        } else {
1837            let dist_len = d.dict.find_match(
1838                lookahead_pos,
1839                d.dict.size,
1840                lookahead_size,
1841                cur_match_dist,
1842                cur_match_len,
1843            );
1844            cur_match_dist = dist_len.0;
1845            cur_match_len = dist_len.1;
1846        }
1847
1848        let far_and_small = cur_match_len == MIN_MATCH_LEN && cur_match_dist >= 8 * 1024;
1849        let filter_small = d.params.flags & TDEFL_FILTER_MATCHES != 0 && cur_match_len <= 5;
1850        if far_and_small || filter_small || cur_pos == cur_match_dist {
1851            cur_match_dist = 0;
1852            cur_match_len = 0;
1853        }
1854
1855        if saved_match_len != 0 {
1856            if cur_match_len > saved_match_len {
1857                record_literal(&mut d.huff, &mut d.lz, saved_lit);
1858                if cur_match_len >= 128 {
1859                    record_match(&mut d.huff, &mut d.lz, cur_match_len, cur_match_dist);
1860                    saved_match_len = 0;
1861                    len_to_move = cur_match_len;
1862                } else {
1863                    saved_lit = d.dict.b.dict[cur_pos as usize];
1864                    saved_match_dist = cur_match_dist;
1865                    saved_match_len = cur_match_len;
1866                }
1867            } else {
1868                record_match(&mut d.huff, &mut d.lz, saved_match_len, saved_match_dist);
1869                len_to_move = saved_match_len - 1;
1870                saved_match_len = 0;
1871            }
1872        } else if cur_match_dist == 0 {
1873            record_literal(
1874                &mut d.huff,
1875                &mut d.lz,
1876                d.dict.b.dict[cmp::min(cur_pos as usize, d.dict.b.dict.len() - 1)],
1877            );
1878        } else if d.params.greedy_parsing
1879            || (d.params.flags & TDEFL_RLE_MATCHES != 0)
1880            || cur_match_len >= 128
1881        {
1882            // If we are using lazy matching, check for matches at the next byte if the current
1883            // match was shorter than 128 bytes.
1884            record_match(&mut d.huff, &mut d.lz, cur_match_len, cur_match_dist);
1885            len_to_move = cur_match_len;
1886        } else {
1887            saved_lit = d.dict.b.dict[cmp::min(cur_pos as usize, d.dict.b.dict.len() - 1)];
1888            saved_match_dist = cur_match_dist;
1889            saved_match_len = cur_match_len;
1890        }
1891
1892        lookahead_pos += len_to_move;
1893        assert!(lookahead_size >= len_to_move);
1894        lookahead_size -= len_to_move;
1895        d.dict.size = cmp::min(d.dict.size + len_to_move, LZ_DICT_SIZE as u32);
1896
1897        let lz_buf_tight = d.lz.code_position > LZ_CODE_BUF_SIZE - 8;
1898        let raw = d.params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS != 0;
1899        let fat = ((d.lz.code_position * 115) >> 7) >= d.lz.total_bytes as usize;
1900        let fat_or_raw = (d.lz.total_bytes > 31 * 1024) && (fat || raw);
1901
1902        if lz_buf_tight || fat_or_raw {
1903            d.params.src_pos = src_pos;
1904            // These values are used in flush_block, so we need to write them back here.
1905            d.dict.lookahead_size = lookahead_size;
1906            d.dict.lookahead_pos = lookahead_pos;
1907
1908            let n = flush_block(d, callback, TDEFLFlush::None)
1909                .unwrap_or(TDEFLStatus::PutBufFailed as i32);
1910            if n != 0 {
1911                d.params.saved_lit = saved_lit;
1912                d.params.saved_match_dist = saved_match_dist;
1913                d.params.saved_match_len = saved_match_len;
1914                return n > 0;
1915            }
1916        }
1917    }
1918
1919    d.params.src_pos = src_pos;
1920    d.dict.lookahead_size = lookahead_size;
1921    d.dict.lookahead_pos = lookahead_pos;
1922    d.params.saved_lit = saved_lit;
1923    d.params.saved_match_dist = saved_match_dist;
1924    d.params.saved_match_len = saved_match_len;
1925    true
1926}
1927
1928const COMP_FAST_LOOKAHEAD_SIZE: u32 = 4096;
1929
1930fn compress_fast(d: &mut CompressorOxide, callback: &mut CallbackOxide) -> bool {
1931    let mut src_pos = d.params.src_pos;
1932    let mut lookahead_size = d.dict.lookahead_size;
1933    let mut lookahead_pos = d.dict.lookahead_pos;
1934
1935    let mut cur_pos = lookahead_pos & LZ_DICT_SIZE_MASK;
1936    let in_buf = match callback.in_buf {
1937        None => return true,
1938        Some(in_buf) => in_buf,
1939    };
1940
1941    debug_assert!(d.lz.code_position < LZ_CODE_BUF_SIZE - 2);
1942
1943    while src_pos < in_buf.len() || (d.params.flush != TDEFLFlush::None && lookahead_size > 0) {
1944        let mut dst_pos = ((lookahead_pos + lookahead_size) & LZ_DICT_SIZE_MASK) as usize;
1945        let mut num_bytes_to_process = cmp::min(
1946            in_buf.len() - src_pos,
1947            (COMP_FAST_LOOKAHEAD_SIZE - lookahead_size) as usize,
1948        );
1949        lookahead_size += num_bytes_to_process as u32;
1950
1951        while num_bytes_to_process != 0 {
1952            let n = cmp::min(LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
1953            d.dict.b.dict[dst_pos..dst_pos + n].copy_from_slice(&in_buf[src_pos..src_pos + n]);
1954
1955            if dst_pos < MAX_MATCH_LEN - 1 {
1956                let m = cmp::min(n, MAX_MATCH_LEN - 1 - dst_pos);
1957                d.dict.b.dict[dst_pos + LZ_DICT_SIZE..dst_pos + LZ_DICT_SIZE + m]
1958                    .copy_from_slice(&in_buf[src_pos..src_pos + m]);
1959            }
1960
1961            src_pos += n;
1962            dst_pos = (dst_pos + n) & LZ_DICT_SIZE_MASK as usize;
1963            num_bytes_to_process -= n;
1964        }
1965
1966        d.dict.size = cmp::min(LZ_DICT_SIZE as u32 - lookahead_size, d.dict.size);
1967        if d.params.flush == TDEFLFlush::None && lookahead_size < COMP_FAST_LOOKAHEAD_SIZE {
1968            break;
1969        }
1970
1971        while lookahead_size >= 4 {
1972            let mut cur_match_len = 1;
1973
1974            let first_trigram = d.dict.read_unaligned_u32(cur_pos) & 0xFF_FFFF;
1975
1976            let hash = (first_trigram ^ (first_trigram >> (24 - (LZ_HASH_BITS - 8))))
1977                & LEVEL1_HASH_SIZE_MASK;
1978
1979            let mut probe_pos = u32::from(d.dict.b.hash[hash as usize]);
1980            d.dict.b.hash[hash as usize] = lookahead_pos as u16;
1981
1982            let mut cur_match_dist = (lookahead_pos - probe_pos) as u16;
1983            if u32::from(cur_match_dist) <= d.dict.size {
1984                probe_pos &= LZ_DICT_SIZE_MASK;
1985
1986                let trigram = d.dict.read_unaligned_u32(probe_pos) & 0xFF_FFFF;
1987
1988                if first_trigram == trigram {
1989                    // Trigram was tested, so we can start with "+ 3" displacement.
1990                    let mut p = cur_pos + 3;
1991                    let mut q = probe_pos + 3;
1992                    cur_match_len = 'find_match: loop {
1993                        for _ in 0..32 {
1994                            let p_data: u64 = d.dict.read_unaligned_u64(p);
1995                            let q_data: u64 = d.dict.read_unaligned_u64(q);
1996                            let xor_data = p_data ^ q_data;
1997                            if xor_data == 0 {
1998                                p += 8;
1999                                q += 8;
2000                            } else {
2001                                let trailing = xor_data.trailing_zeros();
2002                                break 'find_match p as u32 - cur_pos + (trailing >> 3);
2003                            }
2004                        }
2005
2006                        break 'find_match if cur_match_dist == 0 {
2007                            0
2008                        } else {
2009                            MAX_MATCH_LEN as u32
2010                        };
2011                    };
2012
2013                    if cur_match_len < MIN_MATCH_LEN
2014                        || (cur_match_len == MIN_MATCH_LEN && cur_match_dist >= 8 * 1024)
2015                    {
2016                        let lit = first_trigram as u8;
2017                        cur_match_len = 1;
2018                        d.lz.write_code(lit);
2019                        *d.lz.get_flag() >>= 1;
2020                        d.huff.count[0][lit as usize] += 1;
2021                    } else {
2022                        // Limit the match to the length of the lookahead so we don't create a match
2023                        // that ends after the end of the input data.
2024                        cur_match_len = cmp::min(cur_match_len, lookahead_size);
2025                        debug_assert!(cur_match_len >= MIN_MATCH_LEN);
2026                        debug_assert!(cur_match_dist >= 1);
2027                        debug_assert!(cur_match_dist as usize <= LZ_DICT_SIZE);
2028                        cur_match_dist -= 1;
2029
2030                        d.lz.write_code((cur_match_len - MIN_MATCH_LEN) as u8);
2031                        d.lz.write_code(cur_match_dist as u8);
2032                        d.lz.write_code((cur_match_dist >> 8) as u8);
2033
2034                        *d.lz.get_flag() >>= 1;
2035                        *d.lz.get_flag() |= 0x80;
2036                        if cur_match_dist < 512 {
2037                            d.huff.count[1][SMALL_DIST_SYM[cur_match_dist as usize] as usize] += 1;
2038                        } else {
2039                            d.huff.count[1]
2040                                [LARGE_DIST_SYM[(cur_match_dist >> 8) as usize] as usize] += 1;
2041                        }
2042
2043                        d.huff.count[0]
2044                            [LEN_SYM[(cur_match_len - MIN_MATCH_LEN) as usize] as usize] += 1;
2045                    }
2046                } else {
2047                    d.lz.write_code(first_trigram as u8);
2048                    *d.lz.get_flag() >>= 1;
2049                    d.huff.count[0][first_trigram as u8 as usize] += 1;
2050                }
2051
2052                d.lz.consume_flag();
2053                d.lz.total_bytes += cur_match_len;
2054                lookahead_pos += cur_match_len;
2055                d.dict.size = cmp::min(d.dict.size + cur_match_len, LZ_DICT_SIZE as u32);
2056                cur_pos = (cur_pos + cur_match_len) & LZ_DICT_SIZE_MASK;
2057                lookahead_size -= cur_match_len;
2058
2059                if d.lz.code_position > LZ_CODE_BUF_SIZE - 8 {
2060                    // These values are used in flush_block, so we need to write them back here.
2061                    d.dict.lookahead_size = lookahead_size;
2062                    d.dict.lookahead_pos = lookahead_pos;
2063
2064                    let n = match flush_block(d, callback, TDEFLFlush::None) {
2065                        Err(_) => {
2066                            d.params.src_pos = src_pos;
2067                            d.params.prev_return_status = TDEFLStatus::PutBufFailed;
2068                            return false;
2069                        }
2070                        Ok(status) => status,
2071                    };
2072                    if n != 0 {
2073                        d.params.src_pos = src_pos;
2074                        return n > 0;
2075                    }
2076                    debug_assert!(d.lz.code_position < LZ_CODE_BUF_SIZE - 2);
2077
2078                    lookahead_size = d.dict.lookahead_size;
2079                    lookahead_pos = d.dict.lookahead_pos;
2080                }
2081            }
2082        }
2083
2084        while lookahead_size != 0 {
2085            let lit = d.dict.b.dict[cur_pos as usize];
2086            d.lz.total_bytes += 1;
2087            d.lz.write_code(lit);
2088            *d.lz.get_flag() >>= 1;
2089            d.lz.consume_flag();
2090
2091            d.huff.count[0][lit as usize] += 1;
2092            lookahead_pos += 1;
2093            d.dict.size = cmp::min(d.dict.size + 1, LZ_DICT_SIZE as u32);
2094            cur_pos = (cur_pos + 1) & LZ_DICT_SIZE_MASK;
2095            lookahead_size -= 1;
2096
2097            if d.lz.code_position > LZ_CODE_BUF_SIZE - 8 {
2098                // These values are used in flush_block, so we need to write them back here.
2099                d.dict.lookahead_size = lookahead_size;
2100                d.dict.lookahead_pos = lookahead_pos;
2101
2102                let n = match flush_block(d, callback, TDEFLFlush::None) {
2103                    Err(_) => {
2104                        d.params.prev_return_status = TDEFLStatus::PutBufFailed;
2105                        d.params.src_pos = src_pos;
2106                        return false;
2107                    }
2108                    Ok(status) => status,
2109                };
2110                if n != 0 {
2111                    d.params.src_pos = src_pos;
2112                    return n > 0;
2113                }
2114
2115                lookahead_size = d.dict.lookahead_size;
2116                lookahead_pos = d.dict.lookahead_pos;
2117            }
2118        }
2119    }
2120
2121    d.params.src_pos = src_pos;
2122    d.dict.lookahead_size = lookahead_size;
2123    d.dict.lookahead_pos = lookahead_pos;
2124    true
2125}
2126
2127fn flush_output_buffer(c: &mut CallbackOxide, p: &mut ParamsOxide) -> (TDEFLStatus, usize, usize) {
2128    let mut res = (TDEFLStatus::Okay, p.src_pos, 0);
2129    if let CallbackOut::Buf(ref mut cb) = c.out {
2130        let n = cmp::min(cb.out_buf.len() - p.out_buf_ofs, p.flush_remaining as usize);
2131        if n != 0 {
2132            (&mut cb.out_buf[p.out_buf_ofs..p.out_buf_ofs + n])
2133                .copy_from_slice(&p.local_buf.b[p.flush_ofs as usize..p.flush_ofs as usize + n]);
2134        }
2135        p.flush_ofs += n as u32;
2136        p.flush_remaining -= n as u32;
2137        p.out_buf_ofs += n;
2138        res.2 = p.out_buf_ofs;
2139    }
2140
2141    if p.finished && p.flush_remaining == 0 {
2142        res.0 = TDEFLStatus::Done
2143    }
2144    res
2145}
2146
2147/// Main compression function. Tries to compress as much as possible from `in_buf` and
2148/// puts compressed output into `out_buf`.
2149///
2150/// The value of `flush` determines if the compressor should attempt to flush all output
2151/// and alternatively try to finish the stream.
2152/// Should be `TDeflflush::Finish` on the final call.
2153///
2154/// # Returns
2155/// Returns a tuple containing the current status of the compressor, the current position
2156/// in the input buffer and the current position in the output buffer.
2157pub fn compress(
2158    d: &mut CompressorOxide,
2159    in_buf: &[u8],
2160    out_buf: &mut [u8],
2161    flush: TDEFLFlush,
2162) -> (TDEFLStatus, usize, usize) {
2163    compress_inner(
2164        d,
2165        &mut CallbackOxide::new_callback_buf(in_buf, out_buf),
2166        flush,
2167    )
2168}
2169
2170/// Main compression function. Callbacks output.
2171///
2172/// # Returns
2173/// Returns a tuple containing the current status of the compressor, the current position
2174/// in the input buffer.
2175///
2176/// The caller is responsible for ensuring the `CallbackFunc` struct will not cause undefined
2177/// behaviour.
2178pub fn compress_to_output(
2179    d: &mut CompressorOxide,
2180    in_buf: &[u8],
2181    flush: TDEFLFlush,
2182    callback_func: impl FnMut(&[u8]) -> bool,
2183) -> (TDEFLStatus, usize) {
2184    let res = compress_inner(
2185        d,
2186        &mut CallbackOxide::new_callback_func(
2187            in_buf,
2188            CallbackFunc {
2189                put_buf_func: Box::new(callback_func),
2190            },
2191        ),
2192        flush,
2193    );
2194
2195    (res.0, res.1)
2196}
2197
2198fn compress_inner(
2199    d: &mut CompressorOxide,
2200    callback: &mut CallbackOxide,
2201    flush: TDEFLFlush,
2202) -> (TDEFLStatus, usize, usize) {
2203    d.params.out_buf_ofs = 0;
2204    d.params.src_pos = 0;
2205
2206    let prev_ok = d.params.prev_return_status == TDEFLStatus::Okay;
2207    let flush_finish_once = d.params.flush != TDEFLFlush::Finish || flush == TDEFLFlush::Finish;
2208
2209    d.params.flush = flush;
2210    if !prev_ok || !flush_finish_once {
2211        d.params.prev_return_status = TDEFLStatus::BadParam;
2212        return (d.params.prev_return_status, 0, 0);
2213    }
2214
2215    if d.params.flush_remaining != 0 || d.params.finished {
2216        let res = flush_output_buffer(callback, &mut d.params);
2217        d.params.prev_return_status = res.0;
2218        return res;
2219    }
2220
2221    let one_probe = d.params.flags & MAX_PROBES_MASK as u32 == 1;
2222    let greedy = d.params.flags & TDEFL_GREEDY_PARSING_FLAG != 0;
2223    let filter_or_rle_or_raw = d.params.flags
2224        & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)
2225        != 0;
2226
2227    let compress_success = if one_probe && greedy && !filter_or_rle_or_raw {
2228        compress_fast(d, callback)
2229    } else {
2230        compress_normal(d, callback)
2231    };
2232
2233    if !compress_success {
2234        return (
2235            d.params.prev_return_status,
2236            d.params.src_pos,
2237            d.params.out_buf_ofs,
2238        );
2239    }
2240
2241    if let Some(in_buf) = callback.in_buf {
2242        if d.params.flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32) != 0 {
2243            d.params.adler32 = update_adler32(d.params.adler32, &in_buf[..d.params.src_pos]);
2244        }
2245    }
2246
2247    let flush_none = d.params.flush == TDEFLFlush::None;
2248    let in_left = callback.in_buf.map_or(0, |buf| buf.len()) - d.params.src_pos;
2249    let remaining = in_left != 0 || d.params.flush_remaining != 0;
2250    if !flush_none && d.dict.lookahead_size == 0 && !remaining {
2251        let flush = d.params.flush;
2252        match flush_block(d, callback, flush) {
2253            Err(_) => {
2254                d.params.prev_return_status = TDEFLStatus::PutBufFailed;
2255                return (
2256                    d.params.prev_return_status,
2257                    d.params.src_pos,
2258                    d.params.out_buf_ofs,
2259                );
2260            }
2261            Ok(x) if x < 0 => {
2262                return (
2263                    d.params.prev_return_status,
2264                    d.params.src_pos,
2265                    d.params.out_buf_ofs,
2266                )
2267            }
2268            _ => {
2269                d.params.finished = d.params.flush == TDEFLFlush::Finish;
2270                if d.params.flush == TDEFLFlush::Full {
2271                    memset(&mut d.dict.b.hash[..], 0);
2272                    memset(&mut d.dict.b.next[..], 0);
2273                    d.dict.size = 0;
2274                }
2275            }
2276        }
2277    }
2278
2279    let res = flush_output_buffer(callback, &mut d.params);
2280    d.params.prev_return_status = res.0;
2281
2282    res
2283}
2284
2285/// Create a set of compression flags using parameters used by zlib and other compressors.
2286/// Mainly intented for use with transition from c libraries as it deals with raw integers.
2287///
2288/// # Parameters
2289/// `level` determines compression level. Clamped to maximum of 10. Negative values result in
2290/// `Compressionlevel::DefaultLevel`.
2291/// `window_bits`: Above 0, wraps the stream in a zlib wrapper, 0 or negative for a raw deflate
2292/// stream.
2293/// `strategy`: Sets the strategy if this conforms to any of the values in `CompressionStrategy`.
2294///
2295/// # Notes
2296/// This function may be removed or moved to the `miniz_oxide_c_api` in the future.
2297pub fn create_comp_flags_from_zip_params(level: i32, window_bits: i32, strategy: i32) -> u32 {
2298    let num_probes = (if level >= 0 {
2299        cmp::min(10, level)
2300    } else {
2301        CompressionLevel::DefaultLevel as i32
2302    }) as usize;
2303    let greedy = if level <= 3 {
2304        TDEFL_GREEDY_PARSING_FLAG
2305    } else {
2306        0
2307    };
2308    let mut comp_flags = NUM_PROBES[num_probes] | greedy;
2309
2310    if window_bits > 0 {
2311        comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
2312    }
2313
2314    if level == 0 {
2315        comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
2316    } else if strategy == CompressionStrategy::Filtered as i32 {
2317        comp_flags |= TDEFL_FILTER_MATCHES;
2318    } else if strategy == CompressionStrategy::HuffmanOnly as i32 {
2319        comp_flags &= !MAX_PROBES_MASK as u32;
2320    } else if strategy == CompressionStrategy::Fixed as i32 {
2321        comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
2322    } else if strategy == CompressionStrategy::RLE as i32 {
2323        comp_flags |= TDEFL_RLE_MATCHES;
2324    }
2325
2326    comp_flags
2327}
2328
2329#[cfg(test)]
2330mod test {
2331    use super::{
2332        compress_to_output, create_comp_flags_from_zip_params, read_u16_le, write_u16_le,
2333        CompressionStrategy, CompressorOxide, TDEFLFlush, TDEFLStatus, DEFAULT_FLAGS,
2334        MZ_DEFAULT_WINDOW_BITS,
2335    };
2336    use crate::inflate::decompress_to_vec;
2337
2338    #[test]
2339    fn u16_to_slice() {
2340        let mut slice = [0, 0];
2341        write_u16_le(2000, &mut slice, 0);
2342        assert_eq!(slice, [208, 7]);
2343    }
2344
2345    #[test]
2346    fn u16_from_slice() {
2347        let mut slice = [208, 7];
2348        assert_eq!(read_u16_le(&mut slice, 0), 2000);
2349    }
2350
2351    #[test]
2352    fn compress_output() {
2353        assert_eq!(
2354            DEFAULT_FLAGS,
2355            create_comp_flags_from_zip_params(
2356                4,
2357                MZ_DEFAULT_WINDOW_BITS,
2358                CompressionStrategy::Default as i32
2359            )
2360        );
2361
2362        let slice = [
2363            1, 2, 3, 4, 1, 2, 3, 1, 2, 3, 1, 2, 6, 1, 2, 3, 1, 2, 3, 2, 3, 1, 2, 3,
2364        ];
2365        let mut encoded = vec![];
2366        let flags = create_comp_flags_from_zip_params(6, 0, 0);
2367        let mut d = CompressorOxide::new(flags);
2368        let (status, in_consumed) =
2369            compress_to_output(&mut d, &slice, TDEFLFlush::Finish, |out: &[u8]| {
2370                encoded.extend_from_slice(out);
2371                true
2372            });
2373
2374        assert_eq!(status, TDEFLStatus::Done);
2375        assert_eq!(in_consumed, slice.len());
2376
2377        let decoded = decompress_to_vec(&encoded[..]).unwrap();
2378        assert_eq!(&decoded[..], &slice[..]);
2379    }
2380}