valence_anvil/
lib.rs

1#![doc = include_str!("../README.md")]
2
3use std::fs::{DirEntry, File};
4use std::hash::Hash;
5use std::io::{Cursor, ErrorKind, Read, Seek, SeekFrom, Write};
6use std::num::NonZeroUsize;
7use std::path::{Path, PathBuf};
8use std::time::{SystemTime, UNIX_EPOCH};
9
10#[cfg(feature = "bevy_plugin")]
11pub use bevy::*;
12use bitfield_struct::bitfield;
13use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
14use flate2::bufread::{GzDecoder, ZlibDecoder};
15use flate2::write::{GzEncoder, ZlibEncoder};
16use lru::LruCache;
17use thiserror::Error;
18use valence_nbt::binary::{FromModifiedUtf8, ToModifiedUtf8};
19use valence_nbt::Compound;
20
21#[cfg(feature = "bevy_plugin")]
22mod bevy;
23#[cfg(feature = "parsing")]
24pub mod parsing;
25
26const LRU_CACHE_SIZE: NonZeroUsize = match NonZeroUsize::new(256) {
27    Some(n) => n,
28    None => unreachable!(),
29};
30
31#[derive(Debug, Error)]
32#[non_exhaustive]
33pub enum RegionError {
34    #[error("an I/O error occurred: {0}")]
35    Io(#[from] std::io::Error),
36    #[error("failed to convert OsString")]
37    OsStringConv,
38    #[error("chunk is allocated, but stream is missing")]
39    MissingChunkStream,
40    #[error("invalid chunk sector offset")]
41    InvalidChunkSectorOffset,
42    #[error("invalid chunk size")]
43    InvalidChunkSize,
44    #[error("invalid compression scheme number of {0}")]
45    InvalidCompressionScheme(u8),
46    #[error("failed to parse NBT: {0}")]
47    Nbt(#[from] valence_nbt::Error),
48    #[error("not all chunk NBT data was read")]
49    TrailingNbtData,
50    #[error("oversized chunk")]
51    OversizedChunk,
52}
53
54#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, Hash)]
55#[repr(u8)]
56#[non_exhaustive]
57pub enum Compression {
58    Gzip = 1,
59    #[default]
60    Zlib = 2,
61    None = 3,
62}
63
64impl Compression {
65    fn from_u8(compression: u8) -> Option<Compression> {
66        match compression {
67            1 => Some(Compression::Gzip),
68            2 => Some(Compression::Zlib),
69            3 => Some(Compression::None),
70            _ => None,
71        }
72    }
73}
74
75#[derive(Copy, Clone, Debug, Default)]
76#[non_exhaustive]
77pub struct WriteOptions {
78    /// Set the compression method used to write chunks. This can be useful to
79    /// change in order to write anvil files compatible with older Minecraft
80    /// versions.
81    pub compression: Compression,
82
83    /// Set whether to skip writing oversized chunks (>1MiB after compression).
84    /// Versions older than 1.15 (19w36a) cannot read oversized chunks, so this
85    /// may be useful for writing region files compatible with those
86    /// versions.
87    pub skip_oversized_chunks: bool,
88}
89
90#[derive(Debug)]
91pub struct RegionFolder {
92    /// Region files. An LRU cache is used to limit the number of open file
93    /// handles.
94    regions: LruCache<RegionPos, RegionEntry>,
95    /// Path to the directory containing the region files and chunk files.
96    region_root: PathBuf,
97    /// Scratch buffer for (de)compression.
98    compression_buf: Vec<u8>,
99    /// Options to use for writing the chunk.
100    pub write_options: WriteOptions,
101}
102
103impl RegionFolder {
104    pub fn new<R: Into<PathBuf>>(region_root: R) -> Self {
105        Self {
106            regions: LruCache::new(LRU_CACHE_SIZE),
107            region_root: region_root.into(),
108            compression_buf: Vec::new(),
109            write_options: WriteOptions::default(),
110        }
111    }
112
113    fn region<'a>(
114        regions: &'a mut LruCache<RegionPos, RegionEntry>,
115        region_root: &Path,
116        region_x: i32,
117        region_z: i32,
118    ) -> Result<Option<&'a mut Region>, RegionError> {
119        // Need to double get the entry from the cache to make the borrow checker happy.
120        // Polonius will fix this eventually.
121        if regions.get_mut(&(region_x, region_z)).is_some() {
122            match regions.get_mut(&(region_x, region_z)) {
123                Some(RegionEntry::Occupied(region)) => return Ok(Some(region)),
124                Some(RegionEntry::Vacant) => return Ok(None),
125                None => unreachable!(),
126            }
127        }
128
129        let path = region_root.join(format!("r.{region_x}.{region_z}.mca"));
130
131        let file = match File::options().read(true).write(true).open(path) {
132            Ok(file) => file,
133            Err(e) if e.kind() == ErrorKind::NotFound => {
134                regions.put((region_x, region_z), RegionEntry::Vacant);
135                return Ok(None);
136            }
137            Err(e) => return Err(e.into()),
138        };
139
140        // TODO: this is ugly.
141        // TODO: try_get_or_insert_mut
142        regions.try_get_or_insert((region_x, region_z), || {
143            Region::open(file).map(|region| RegionEntry::Occupied(Box::new(region)))
144        })?;
145        let Some(RegionEntry::Occupied(res)) = regions.get_mut(&(region_x, region_z)) else {
146            unreachable!()
147        };
148        Ok(Some(res))
149    }
150
151    /// Gets the raw chunk at the given chunk position.
152    ///
153    /// Returns `Ok(Some(chunk))` if the chunk exists and no errors occurred
154    /// loading it. Returns `Ok(None)` if the chunk does not exist and no
155    /// errors occurred attempting to load it. Returns `Err(_)` if an error
156    /// occurred attempting to load the chunk.
157    pub fn get_chunk<S>(
158        &mut self,
159        pos_x: i32,
160        pos_z: i32,
161    ) -> Result<Option<RawChunk<S>>, RegionError>
162    where
163        S: for<'a> FromModifiedUtf8<'a> + Hash + Ord,
164    {
165        let region_x = pos_x.div_euclid(32);
166        let region_z = pos_z.div_euclid(32);
167
168        let Some(region) = Self::region(&mut self.regions, &self.region_root, region_x, region_z)?
169        else {
170            return Ok(None);
171        };
172
173        region.get_chunk(pos_x, pos_z, &mut self.compression_buf, &self.region_root)
174    }
175
176    /// Deletes the chunk at the given chunk position, returning whether the
177    /// chunk existed before it was deleted.
178    ///
179    /// Note that this only marks the chunk as deleted so that it cannot be
180    /// retrieved, and can be overwritten by other chunks later. It does not
181    /// decrease the size of the region file.
182    pub fn delete_chunk(&mut self, pos_x: i32, pos_z: i32) -> Result<bool, RegionError> {
183        let region_x = pos_x.div_euclid(32);
184        let region_z = pos_z.div_euclid(32);
185
186        let Some(region) = Self::region(&mut self.regions, &self.region_root, region_x, region_z)?
187        else {
188            return Ok(false);
189        };
190
191        region.delete_chunk(pos_x, pos_z, true, &self.region_root)
192    }
193
194    /// Sets the raw chunk at the given position, overwriting the old chunk if
195    /// it exists.
196    pub fn set_chunk<S>(
197        &mut self,
198        pos_x: i32,
199        pos_z: i32,
200        chunk: &Compound<S>,
201    ) -> Result<(), RegionError>
202    where
203        S: ToModifiedUtf8 + Hash + Ord,
204    {
205        let region_x = pos_x.div_euclid(32);
206        let region_z = pos_z.div_euclid(32);
207
208        let region = match Self::region(&mut self.regions, &self.region_root, region_x, region_z)? {
209            Some(region) => region,
210            None => {
211                let path = self
212                    .region_root
213                    .join(format!("r.{region_x}.{region_z}.mca"));
214
215                let file = File::options()
216                    .read(true)
217                    .write(true)
218                    .create(true)
219                    .truncate(false)
220                    .open(path)?;
221
222                // TODO: try_get_or_insert_mut
223                self.regions.put(
224                    (region_x, region_z),
225                    RegionEntry::Occupied(Box::new(Region::create(file)?)),
226                );
227                let Some(RegionEntry::Occupied(region)) =
228                    self.regions.get_mut(&(region_x, region_z))
229                else {
230                    unreachable!()
231                };
232                region
233            }
234        };
235
236        region.set_chunk(
237            pos_x,
238            pos_z,
239            chunk,
240            self.write_options,
241            &mut self.compression_buf,
242            &self.region_root,
243        )
244    }
245
246    /// Returns an iterator over all existing chunks in all regions.
247    pub fn all_chunk_positions(
248        &mut self,
249    ) -> Result<impl Iterator<Item = Result<(i32, i32), RegionError>> + '_, RegionError> {
250        fn extract_region_coordinates(
251            file: std::io::Result<DirEntry>,
252        ) -> Result<Option<(i32, i32)>, RegionError> {
253            let file = file?;
254
255            if !file.file_type()?.is_file() {
256                return Ok(None);
257            }
258
259            let file_name = file
260                .file_name()
261                .into_string()
262                .map_err(|_| RegionError::OsStringConv)?;
263
264            // read the file name as r.x.z.mca
265            let mut split = file_name.splitn(4, '.');
266            if split.next() != Some("r") {
267                return Ok(None);
268            }
269            let Some(Ok(x)) = split.next().map(str::parse) else {
270                return Ok(None);
271            };
272            let Some(Ok(z)) = split.next().map(str::parse) else {
273                return Ok(None);
274            };
275            if split.next() != Some("mca") {
276                return Ok(None);
277            }
278
279            Ok(Some((x, z)))
280        }
281
282        fn region_chunks(
283            this: &mut RegionFolder,
284            pos: Result<(i32, i32), RegionError>,
285        ) -> impl Iterator<Item = Result<(i32, i32), RegionError>> {
286            let positions = match pos {
287                Ok((region_x, region_z)) => {
288                    match RegionFolder::region(
289                        &mut this.regions,
290                        &this.region_root,
291                        region_x,
292                        region_z,
293                    ) {
294                        Ok(Some(region)) => region.chunk_positions(region_x, region_z),
295                        Ok(None) => Vec::new(),
296                        Err(err) => vec![Err(err)],
297                    }
298                }
299                Err(err) => vec![Err(err)],
300            };
301            positions.into_iter()
302        }
303
304        Ok(std::fs::read_dir(&self.region_root)?
305            .filter_map(|file| extract_region_coordinates(file).transpose())
306            .flat_map(|pos| region_chunks(self, pos)))
307    }
308}
309
310/// A chunk represented by the raw compound data.
311pub struct RawChunk<S = String> {
312    pub data: Compound<S>,
313    pub timestamp: u32,
314}
315
316/// X and Z positions of a region.
317type RegionPos = (i32, i32);
318
319#[derive(Debug)]
320enum RegionEntry {
321    /// There is a region file loaded here.
322    Occupied(Box<Region>),
323    /// There is no region file at this position. Don't try to read it from the
324    /// filesystem again.
325    Vacant,
326}
327
328#[bitfield(u32)]
329struct Location {
330    count: u8,
331    #[bits(24)]
332    offset: u32,
333}
334
335impl Location {
336    fn is_none(self) -> bool {
337        self.0 == 0
338    }
339
340    fn offset_and_count(self) -> (u64, usize) {
341        (u64::from(self.offset()), usize::from(self.count()))
342    }
343}
344
345#[derive(Debug)]
346struct Region {
347    file: File,
348    locations: [Location; 1024],
349    timestamps: [u32; 1024],
350    used_sectors: bitvec::vec::BitVec,
351}
352
353impl Region {
354    fn create(mut file: File) -> Result<Self, RegionError> {
355        let header = [0; SECTOR_SIZE * 2];
356        file.write_all(&header)?;
357
358        Ok(Self {
359            file,
360            locations: [Location::default(); 1024],
361            timestamps: [0; 1024],
362            used_sectors: bitvec::vec::BitVec::repeat(true, 2),
363        })
364    }
365
366    fn open(mut file: File) -> Result<Self, RegionError> {
367        let mut header = [0; SECTOR_SIZE * 2];
368        file.read_exact(&mut header)?;
369
370        let locations = std::array::from_fn(|i| {
371            Location(u32::from_be_bytes(
372                header[i * 4..i * 4 + 4].try_into().unwrap(),
373            ))
374        });
375        let timestamps = std::array::from_fn(|i| {
376            u32::from_be_bytes(
377                header[i * 4 + SECTOR_SIZE..i * 4 + SECTOR_SIZE + 4]
378                    .try_into()
379                    .unwrap(),
380            )
381        });
382
383        let mut used_sectors = bitvec::vec::BitVec::repeat(true, 2);
384        for location in locations {
385            if location.is_none() {
386                // No chunk exists at this position.
387                continue;
388            }
389
390            let (sector_offset, sector_count) = location.offset_and_count();
391            if sector_offset < 2 {
392                // skip locations pointing inside the header
393                continue;
394            }
395            if sector_count == 0 {
396                continue;
397            }
398            if sector_offset * SECTOR_SIZE as u64 > file.metadata()?.len() {
399                // this would go past the end of the file, which is impossible
400                continue;
401            }
402
403            Self::reserve_sectors(&mut used_sectors, sector_offset, sector_count);
404        }
405
406        Ok(Self {
407            file,
408            locations,
409            timestamps,
410            used_sectors,
411        })
412    }
413
414    fn get_chunk<S>(
415        &mut self,
416        pos_x: i32,
417        pos_z: i32,
418        decompress_buf: &mut Vec<u8>,
419        region_root: &Path,
420    ) -> Result<Option<RawChunk<S>>, RegionError>
421    where
422        S: for<'a> FromModifiedUtf8<'a> + Hash + Ord,
423    {
424        let chunk_idx = Self::chunk_idx(pos_x, pos_z);
425
426        let location = self.locations[chunk_idx];
427        let timestamp = self.timestamps[chunk_idx];
428
429        if location.is_none() {
430            // No chunk exists at this position.
431            return Ok(None);
432        }
433
434        let (sector_offset, sector_count) = location.offset_and_count();
435
436        // If the sector offset was <2, then the chunk data would be inside the region
437        // header. That doesn't make any sense.
438        if sector_offset < 2 {
439            return Err(RegionError::InvalidChunkSectorOffset);
440        }
441
442        // Seek to the beginning of the chunk's data.
443        self.file
444            .seek(SeekFrom::Start(sector_offset * SECTOR_SIZE as u64))?;
445
446        let exact_chunk_size = self.file.read_u32::<BigEndian>()? as usize;
447        if exact_chunk_size == 0 {
448            return Err(RegionError::MissingChunkStream);
449        }
450
451        // size of this chunk in sectors must always be >= the exact size.
452        if sector_count * SECTOR_SIZE < exact_chunk_size {
453            return Err(RegionError::InvalidChunkSize);
454        }
455
456        let mut compression = self.file.read_u8()?;
457
458        let data_buf = if Self::is_external_stream_chunk(compression) {
459            compression = Self::external_chunk_version(compression);
460            let mut external_file =
461                File::open(Self::external_chunk_file(pos_x, pos_z, region_root))?;
462            let mut buf = Vec::new();
463            external_file.read_to_end(&mut buf)?;
464            buf.into_boxed_slice()
465        } else {
466            // the size includes the version of the stream, but we have already read that
467            let mut data_buf = vec![0; exact_chunk_size - 1].into_boxed_slice();
468            self.file.read_exact(&mut data_buf)?;
469            data_buf
470        };
471
472        let r = data_buf.as_ref();
473
474        decompress_buf.clear();
475
476        // What compression does the chunk use?
477        let mut nbt_slice = match Compression::from_u8(compression) {
478            Some(Compression::Gzip) => {
479                let mut z = GzDecoder::new(r);
480                z.read_to_end(decompress_buf)?;
481                decompress_buf.as_slice()
482            }
483            Some(Compression::Zlib) => {
484                let mut z = ZlibDecoder::new(r);
485                z.read_to_end(decompress_buf)?;
486                decompress_buf.as_slice()
487            }
488            // Uncompressed
489            Some(Compression::None) => r,
490            // Unknown
491            None => return Err(RegionError::InvalidCompressionScheme(compression)),
492        };
493
494        let (data, _) = valence_nbt::from_binary(&mut nbt_slice)?;
495
496        if !nbt_slice.is_empty() {
497            return Err(RegionError::TrailingNbtData);
498        }
499
500        Ok(Some(RawChunk { data, timestamp }))
501    }
502
503    fn delete_chunk(
504        &mut self,
505        pos_x: i32,
506        pos_z: i32,
507        delete_on_disk: bool,
508        region_root: &Path,
509    ) -> Result<bool, RegionError> {
510        let chunk_idx = Self::chunk_idx(pos_x, pos_z);
511
512        let location = self.locations[chunk_idx];
513        if location.is_none() {
514            // chunk already missing, nothing to delete
515            return Ok(false);
516        }
517
518        if delete_on_disk {
519            self.file.seek(SeekFrom::Start(chunk_idx as u64 * 4))?;
520            self.file.write_u32::<BigEndian>(0)?;
521
522            Self::delete_external_chunk_file(pos_x, pos_z, region_root)?;
523        }
524
525        let (sector_offset, sector_count) = location.offset_and_count();
526        if sector_offset >= 2 {
527            let start_index = sector_offset as usize;
528            let end_index = start_index + sector_count;
529            let len = self.used_sectors.len();
530            self.used_sectors[start_index.min(len)..end_index.min(len)].fill(false);
531        }
532
533        self.locations[chunk_idx] = Location::new();
534
535        Ok(true)
536    }
537
538    fn set_chunk<S>(
539        &mut self,
540        pos_x: i32,
541        pos_z: i32,
542        chunk: &Compound<S>,
543        options: WriteOptions,
544        compress_buf: &mut Vec<u8>,
545        region_root: &Path,
546    ) -> Result<(), RegionError>
547    where
548        S: ToModifiedUtf8 + Hash + Ord,
549    {
550        // erase the chunk from allocated chunks (not from disk)
551        self.delete_chunk(pos_x, pos_z, false, region_root)?;
552
553        // write the chunk into NBT and compress it according to the compression method
554        compress_buf.clear();
555        let mut compress_cursor = Cursor::new(compress_buf);
556        match options.compression {
557            Compression::Gzip => valence_nbt::to_binary(
558                chunk,
559                GzEncoder::new(&mut compress_cursor, flate2::Compression::default()),
560                "",
561            )?,
562            Compression::Zlib => valence_nbt::to_binary(
563                chunk,
564                ZlibEncoder::new(&mut compress_cursor, flate2::Compression::default()),
565                "",
566            )?,
567            Compression::None => valence_nbt::to_binary(chunk, &mut compress_cursor, "")?,
568        }
569        let compress_buf = compress_cursor.into_inner();
570
571        // additional 5 bytes for exact chunk size + compression type
572        let num_sectors_needed = (compress_buf.len() + 5).div_ceil(SECTOR_SIZE);
573        let (start_sector, num_sectors) = if num_sectors_needed >= 256 {
574            if options.skip_oversized_chunks {
575                return Err(RegionError::OversizedChunk);
576            }
577
578            // write oversized chunk to external file
579            File::create(Self::external_chunk_file(pos_x, pos_z, region_root))?
580                .write_all(&*compress_buf)?;
581
582            let start_sector = self.allocate_sectors(1);
583            self.file
584                .seek(SeekFrom::Start(start_sector * SECTOR_SIZE as u64))?;
585
586            // write the exact chunk size, which includes *only* the compression version
587            // (the rest of the chunk is external)
588            self.file.write_u32::<BigEndian>(1)?;
589            // write the compression, with the marker which says our chunk is oversized
590            self.file.write_u8((options.compression as u8) | 0x80)?;
591
592            (start_sector, 1)
593        } else {
594            // delete the oversized chunk if it existed before
595            Self::delete_external_chunk_file(pos_x, pos_z, region_root)?;
596
597            let start_sector = self.allocate_sectors(num_sectors_needed);
598            self.file
599                .seek(SeekFrom::Start(start_sector * SECTOR_SIZE as u64))?;
600
601            // write the exact chunk size, which accounts for the compression version which
602            // is not in our compress_buf
603            self.file
604                .write_u32::<BigEndian>((compress_buf.len() + 1) as u32)?;
605            // write the compression
606            self.file.write_u8(options.compression as u8)?;
607            // write the data
608            self.file.write_all(&*compress_buf)?;
609
610            (start_sector, num_sectors_needed)
611        };
612
613        let location = Location::new()
614            .with_offset(start_sector as u32)
615            .with_count(num_sectors as u8);
616        let timestamp = SystemTime::now()
617            .duration_since(UNIX_EPOCH)
618            .map(|duration| duration.as_secs() as u32)
619            .unwrap_or(0);
620
621        // write changed header information to file
622        let chunk_idx = Self::chunk_idx(pos_x, pos_z);
623        self.file.seek(SeekFrom::Start(chunk_idx as u64 * 4))?;
624        self.file.write_u32::<BigEndian>(location.0)?;
625        self.file
626            .seek(SeekFrom::Start(chunk_idx as u64 * 4 + SECTOR_SIZE as u64))?;
627        self.file.write_u32::<BigEndian>(timestamp)?;
628
629        // write changed header information to our header
630        self.locations[chunk_idx] = location;
631        self.timestamps[chunk_idx] = timestamp;
632
633        // pad file to multiple of SECTOR_SIZE
634        let file_length = self.file.seek(SeekFrom::End(0))?;
635        let rem = file_length as usize % SECTOR_SIZE;
636        if rem != 0 {
637            self.file
638                .write_all(&[0; SECTOR_SIZE][..SECTOR_SIZE - rem])?;
639        }
640
641        Ok(())
642    }
643
644    fn chunk_positions(
645        &self,
646        region_x: i32,
647        region_z: i32,
648    ) -> Vec<Result<(i32, i32), RegionError>> {
649        self.locations
650            .iter()
651            .enumerate()
652            .filter_map(move |(index, location)| {
653                if location.is_none() {
654                    None
655                } else {
656                    Some((
657                        region_x * 32 + (index % 32) as i32,
658                        region_z * 32 + (index / 32) as i32,
659                    ))
660                }
661            })
662            .map(Ok)
663            .collect()
664    }
665
666    fn external_chunk_file(pos_x: i32, pos_z: i32, region_root: &Path) -> PathBuf {
667        region_root
668            .to_path_buf()
669            .join(format!("c.{pos_x}.{pos_z}.mcc"))
670    }
671
672    fn delete_external_chunk_file(
673        pos_x: i32,
674        pos_z: i32,
675        region_root: &Path,
676    ) -> Result<(), RegionError> {
677        match std::fs::remove_file(Self::external_chunk_file(pos_x, pos_z, region_root)) {
678            Ok(()) => Ok(()),
679            Err(err) if err.kind() == ErrorKind::NotFound => Ok(()),
680            Err(err) => Err(err.into()),
681        }
682    }
683
684    fn reserve_sectors(
685        used_sectors: &mut bitvec::vec::BitVec,
686        sector_offset: u64,
687        sector_count: usize,
688    ) {
689        let start_index = sector_offset as usize;
690        let end_index = sector_offset as usize + sector_count;
691        if used_sectors.len() < end_index {
692            used_sectors.resize(start_index, false);
693            used_sectors.resize(end_index, true);
694        } else {
695            used_sectors[start_index..end_index].fill(true);
696        }
697    }
698
699    fn allocate_sectors(&mut self, num_sectors: usize) -> u64 {
700        // find the first set of consecutive free sectors of length num_sectors
701        let mut index = 0;
702        let free_space_start = loop {
703            let Some(mut free_space_start) = self.used_sectors[index..].first_zero() else {
704                // we have reached a sequence of 1's at the end of the list, so next free space
705                // is at the end of the file
706                break self.used_sectors.len();
707            };
708            free_space_start += index;
709
710            let Some(mut free_space_end) = self.used_sectors[free_space_start..].first_one() else {
711                // there is no 1 after this 0, so we have enough space here (even if we have to
712                // increase the file size)
713                break free_space_start;
714            };
715            free_space_end += free_space_start;
716
717            if free_space_end - free_space_start >= num_sectors {
718                // if the free space end is far enough from the free space start, we have enough
719                // space
720                break free_space_start;
721            }
722
723            index = free_space_end;
724        };
725
726        Self::reserve_sectors(&mut self.used_sectors, free_space_start as u64, num_sectors);
727        free_space_start as u64
728    }
729
730    fn chunk_idx(pos_x: i32, pos_z: i32) -> usize {
731        (pos_x.rem_euclid(32) + pos_z.rem_euclid(32) * 32) as usize
732    }
733
734    fn is_external_stream_chunk(stream_version: u8) -> bool {
735        (stream_version & 0x80) != 0
736    }
737
738    fn external_chunk_version(stream_version: u8) -> u8 {
739        stream_version & !0x80
740    }
741}
742
743const SECTOR_SIZE: usize = 4096;