1
use std::{
2
    io::{self, BufRead, BufReader, ErrorKind, Read, Seek, SeekFrom, Write},
3
    sync::Arc,
4
    time::Instant,
5
};
6

            
7
use file_manager::{fs::StdFile, FileManager, OpenOptions, PathId};
8
use parking_lot::{Condvar, Mutex, MutexGuard};
9

            
10
use crate::{
11
    buffered::Buffered,
12
    entry::{EntryId, CHUNK, END_OF_ENTRY, NEW_ENTRY},
13
    to_io_result::ToIoResult,
14
    Configuration, LogPosition,
15
};
16

            
17
#[derive(Debug)]
18
pub struct LogFile<F>
19
where
20
    F: file_manager::File,
21
{
22
    data: Arc<LogFileData<F>>,
23
}
24

            
25
impl<F> LogFile<F>
26
where
27
    F: file_manager::File,
28
{
29
720
    pub fn write(
30
720
        id: u64,
31
720
        path: PathId,
32
720
        validated_length: u64,
33
720
        last_entry_id: Option<EntryId>,
34
720
        config: &Configuration<F::Manager>,
35
720
    ) -> io::Result<Self> {
36
720
        let writer = LogFileWriter::new(id, path, validated_length, last_entry_id, config)?;
37
720
        let created_at = if last_entry_id.is_some() {
38
107
            None
39
        } else {
40
613
            Some(Instant::now())
41
        };
42
720
        Ok(Self {
43
720
            data: Arc::new(LogFileData {
44
720
                writer: Mutex::new(writer),
45
720
                sync: Condvar::new(),
46
720
                created_at,
47
720
            }),
48
720
        })
49
720
    }
50

            
51
29748
    pub fn created_at(&self) -> Option<Instant> {
52
29748
        self.data.created_at
53
29748
    }
54

            
55
139431
    pub fn lock(&self) -> MutexGuard<'_, LogFileWriter<F>> {
56
139431
        self.data.writer.lock()
57
139431
    }
58

            
59
2189
    pub fn rename(&self, new_id: u64, new_name: &str) -> io::Result<()> {
60
2189
        let mut writer = self.data.writer.lock();
61
2189
        writer.id = new_id;
62
2189
        writer.rename(new_name)
63
2189
    }
64

            
65
29751
    pub fn synchronize(&self, target_synced_bytes: u64) -> io::Result<()> {
66
29751
        // Flush the buffer to disk.
67
29751
        let data = self.lock();
68
29751
        self.synchronize_locked(data, target_synced_bytes)
69
29751
            .map(|_| ())
70
29751
    }
71

            
72
30128
    pub fn synchronize_locked<'a>(
73
30128
        &'a self,
74
30128
        mut data: MutexGuard<'a, LogFileWriter<F>>,
75
30128
        target_synced_bytes: u64,
76
30128
    ) -> io::Result<MutexGuard<'a, LogFileWriter<F>>> {
77
69150
        loop {
78
69150
            if data.synchronized_through >= target_synced_bytes {
79
                // While we were waiting for the lock, another thread synced our
80
                // data for us.
81
18954
                break;
82
50196
            } else if data.is_syncing {
83
39022
                // Another thread is currently synchronizing this file.
84
39022
                self.data.sync.wait(&mut data);
85
39022
            } else {
86
                // Become the sync thread for this file.
87
11174
                data.is_syncing = true;
88
11174

            
89
11174
                // Check if we need to flush the buffer before calling fsync.
90
11174
                // It's possible that the currently buffered data doesn't need
91
11174
                // to be flushed.
92
11174
                if data.buffer_position() < target_synced_bytes {
93
10837
                    data.file.flush()?;
94
337
                }
95
11174
                let synchronized_length = data.buffer_position();
96

            
97
                // Get a duplicate handle we can use to call sync_data with while the
98
                // mutex isn't locked.
99
11174
                let file_to_sync = data.file.inner().try_clone()?;
100
11174
                drop(data);
101
11174

            
102
11174
                file_to_sync.sync_data()?;
103

            
104
11174
                data = self.lock();
105
11174
                data.is_syncing = false;
106
11174
                data.synchronized_through = synchronized_length;
107
11174
                self.data.sync.notify_all();
108
11174
                break;
109
            }
110
        }
111

            
112
30128
        Ok(data)
113
30128
    }
114
}
115
impl<F> Clone for LogFile<F>
116
where
117
    F: file_manager::File,
118
{
119
30471
    fn clone(&self) -> Self {
120
30471
        Self {
121
30471
            data: self.data.clone(),
122
30471
        }
123
30471
    }
124
}
125

            
126
#[derive(Debug)]
127
struct LogFileData<F>
128
where
129
    F: file_manager::File,
130
{
131
    writer: Mutex<LogFileWriter<F>>,
132
    created_at: Option<Instant>,
133
    sync: Condvar,
134
}
135

            
136
#[derive(Debug)]
137
pub struct LogFileWriter<F>
138
where
139
    F: file_manager::File,
140
{
141
    id: u64,
142
    path: PathId,
143
    file: Buffered<F>,
144
    last_entry_id: Option<EntryId>,
145
    version_info: Arc<Vec<u8>>,
146
    synchronized_through: u64,
147
    is_syncing: bool,
148
    manager: F::Manager,
149
}
150

            
151
static ZEROES: [u8; 8196] = [0; 8196];
152

            
153
impl<F> LogFileWriter<F>
154
where
155
    F: file_manager::File,
156
{
157
720
    fn new(
158
720
        id: u64,
159
720
        path: PathId,
160
720
        validated_length: u64,
161
720
        last_entry_id: Option<EntryId>,
162
720
        config: &Configuration<F::Manager>,
163
720
    ) -> io::Result<Self> {
164
720
        println!("New {}", path.display());
165
720
        let mut file = config.file_manager.open(
166
720
            &path,
167
720
            OpenOptions::new().create(true).write(true).read(true),
168
720
        )?;
169

            
170
        // Truncate or extend the file to the next multiple of the preallocation
171
        // length.
172
720
        let preallocate_bytes = u64::from(config.preallocate_bytes);
173
720
        let padded_length = ((validated_length + preallocate_bytes - 1) / preallocate_bytes).max(1)
174
720
            * preallocate_bytes;
175
720
        let length = file.seek(SeekFrom::End(0))?;
176
720
        let bytes_to_fill = padded_length.checked_sub(length);
177
720
        if let Some(bytes_to_fill) = bytes_to_fill {
178
719
            if bytes_to_fill > 0 {
179
312
                let mut bytes_to_fill = usize::try_from(bytes_to_fill).to_io()?;
180
                // Pre-allocate this disk space by writing zeroes.
181
312
                file.set_len(padded_length)?;
182
312
                file.seek(SeekFrom::Start(validated_length))?;
183
40248
                while bytes_to_fill > 0 {
184
39936
                    let bytes_to_write = bytes_to_fill.min(ZEROES.len());
185
39936
                    file.write_all(&ZEROES[..bytes_to_write])?;
186
39936
                    bytes_to_fill -= bytes_to_write;
187
                }
188
407
            }
189
1
        }
190

            
191
        // Position the writer to write after the last validated byte.
192
720
        file.seek(SeekFrom::Start(validated_length))?;
193
720
        let mut file = Buffered::with_capacity(file, config.buffer_bytes)?;
194

            
195
720
        if validated_length == 0 {
196
512
            Self::write_header(&mut file, &config.version_info)?;
197
512
            file.flush()?;
198
208
        }
199

            
200
720
        Ok(Self {
201
720
            id,
202
720
            path,
203
720
            file,
204
720
            last_entry_id,
205
720
            version_info: config.version_info.clone(),
206
720
            synchronized_through: validated_length,
207
720
            is_syncing: false,
208
720
            manager: config.file_manager.clone(),
209
720
        })
210
720
    }
211

            
212
    fn write_header(file: &mut Buffered<F>, version_info: &[u8]) -> io::Result<()> {
213
2760
        file.write_all(b"okw\0")?;
214
2760
        let version_size = u8::try_from(version_info.len()).to_io()?;
215
2760
        file.write_all(&[version_size])?;
216
2760
        file.write_all(version_info)?;
217
2760
        Ok(())
218
2760
    }
219

            
220
4496
    pub fn path(&self) -> &PathId {
221
4496
        &self.path
222
4496
    }
223

            
224
34512
    pub fn id(&self) -> u64 {
225
34512
        self.id
226
34512
    }
227

            
228
89949
    pub fn position(&self) -> u64 {
229
89949
        self.file.position()
230
89949
    }
231

            
232
22348
    pub fn buffer_position(&self) -> u64 {
233
22348
        self.file.buffer_position()
234
22348
    }
235

            
236
2625
    pub fn is_synchronized(&self) -> bool {
237
2625
        self.file.position() == self.synchronized_through
238
2625
    }
239

            
240
    pub fn revert_to(&mut self, length: u64) -> io::Result<()> {
241
        // Reverting doesn't need to change the bits on disk, as long as we
242
        // gracefully fail when an invalid byte is encountered at the start of
243
        // an entry.
244
2249
        self.file.seek(SeekFrom::Start(length))?;
245
2249
        if self.synchronized_through > length {
246
2248
            self.synchronized_through = length;
247
2248
        }
248
2249
        if length == 0 {
249
2248
            Self::write_header(&mut self.file, &self.version_info)?;
250
2248
            self.last_entry_id = None;
251
1
        }
252

            
253
2249
        Ok(())
254
2249
    }
255

            
256
4437
    pub fn rename(&mut self, new_name: &str) -> io::Result<()> {
257
4437
        let new_path = PathId::from(
258
4437
            self.path
259
4437
                .parent()
260
4437
                .expect("parent path not found")
261
4437
                .join(new_name),
262
4437
        );
263
4437
        self.manager.rename(&self.path, new_path.clone())?;
264
4437
        self.path = new_path;
265
4437

            
266
4437
        Ok(())
267
4437
    }
268

            
269
2248
    pub fn last_entry_id(&self) -> Option<EntryId> {
270
2248
        self.last_entry_id
271
2248
    }
272

            
273
29748
    pub fn set_last_entry_id(&mut self, last_entry_id: Option<EntryId>) {
274
29748
        self.last_entry_id = last_entry_id;
275
29748
    }
276
}
277

            
278
impl<F> Write for LogFileWriter<F>
279
where
280
    F: file_manager::File,
281
{
282
210114
    fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
283
210114
        let bytes_written = self.file.write(buf)?;
284

            
285
210114
        Ok(bytes_written)
286
210114
    }
287

            
288
    fn flush(&mut self) -> std::io::Result<()> {
289
        self.file.flush()
290
    }
291
}
292

            
293
/// Reads a log segment, which contains one or more log entries.
294
#[derive(Debug)]
295
pub struct SegmentReader<F = StdFile>
296
where
297
    F: file_manager::File,
298
{
299
    pub(crate) file_id: u64,
300
    pub(crate) file: BufReader<F>,
301
    pub(crate) header: RecoveredSegment,
302
    pub(crate) current_entry_id: Option<EntryId>,
303
    pub(crate) first_entry_id: Option<EntryId>,
304
    pub(crate) last_entry_id: Option<EntryId>,
305
    pub(crate) valid_until: u64,
306
}
307

            
308
impl<F> SegmentReader<F>
309
where
310
    F: file_manager::File,
311
{
312
2456
    pub(crate) fn new<M>(path: &PathId, file_id: u64, manager: &M) -> io::Result<Self>
313
2456
    where
314
2456
        M: FileManager<File = F>,
315
2456
    {
316
2456
        let mut file = manager.open(path, OpenOptions::new().read(true))?;
317
2456
        file.rewind()?;
318
2456
        let mut file = BufReader::new(file);
319
2456
        let mut buffer = Vec::with_capacity(256 + 5);
320
2456
        buffer.resize(5, 0);
321
2456
        file.read_exact(&mut buffer)?;
322

            
323
2456
        if &buffer[0..3] != b"okw" {
324
            return Err(io::Error::new(
325
                ErrorKind::InvalidData,
326
                "segment file did not contain magic code",
327
            ));
328
2456
        }
329
2456

            
330
2456
        if buffer[3] != 0 {
331
            return Err(io::Error::new(
332
                ErrorKind::InvalidData,
333
                "segment file was written with a newer version",
334
            ));
335
2456
        }
336
2456

            
337
2456
        let version_info_length = buffer[4];
338
2456
        buffer.resize(usize::from(version_info_length), 0);
339
2456
        file.read_exact(&mut buffer)?;
340

            
341
2456
        let header = RecoveredSegment {
342
2456
            version_info: buffer,
343
2456
        };
344
2456

            
345
2456
        Ok(Self {
346
2456
            file_id,
347
2456
            file,
348
2456
            header,
349
2456
            current_entry_id: None,
350
2456
            first_entry_id: None,
351
2456
            last_entry_id: None,
352
2456
            valid_until: u64::from(version_info_length) + 5,
353
2456
        })
354
2456
    }
355

            
356
725
    fn read_next_entry(&mut self) -> io::Result<bool> {
357
725
        self.valid_until = self.file.stream_position()?;
358
725
        let mut header_bytes = [0; 9];
359
725
        match self.file.read_exact(&mut header_bytes) {
360
723
            Ok(()) => {}
361
2
            Err(err) if err.kind() == ErrorKind::UnexpectedEof => return Ok(false),
362
            Err(err) => return Err(err),
363
        }
364

            
365
723
        if let NEW_ENTRY = header_bytes[0] {
366
597
            let read_id = EntryId(u64::from_le_bytes(header_bytes[1..].try_into().unwrap()));
367
597
            if read_id.0 >= self.file_id {
368
400
                self.current_entry_id = Some(read_id);
369
400
                if self.first_entry_id.is_none() {
370
224
                    self.first_entry_id = self.current_entry_id;
371
224
                }
372
400
                self.last_entry_id = self.current_entry_id;
373
400

            
374
400
                return Ok(true);
375
197
            }
376
126
        }
377

            
378
323
        self.current_entry_id = None;
379
323
        Ok(false)
380
725
    }
381

            
382
    /// Reads an entry from the log. If no more entries are found, None is
383
    /// returned.
384
    pub fn read_entry(&mut self) -> io::Result<Option<Entry<'_, F>>> {
385
725
        if let Some(id) = self.current_entry_id.take() {
386
            // Skip the remainder of the current entry.
387
400
            let mut entry = Entry { id, reader: self };
388
400
            while let Some(chunk) = match entry.read_chunk()? {
389
                ReadChunkResult::Chunk(chunk) => Some(chunk),
390
400
                _ => None,
391
            } {
392
                // skip chunk
393
                chunk.skip_remaining_bytes()?;
394
            }
395
325
        }
396

            
397
725
        if !self.read_next_entry()? {
398
            // No more entries.
399
325
            return Ok(None);
400
400
        }
401
400

            
402
400
        Ok(Some(Entry {
403
400
            id: self
404
400
                .current_entry_id
405
400
                .expect("read_next_entry populated this"),
406
400
            reader: self,
407
400
        }))
408
725
    }
409
}
410

            
411
/// A stored entry inside of a [`WriteAheadLog`](crate::WriteAheadLog).
412
///
413
/// Each entry is composed of a series of chunks of data that were previously
414
/// written using [`EntryWriter::write_chunk`](crate::EntryWriter::write_chunk).
415
#[derive(Debug)]
416
pub struct Entry<'a, F = StdFile>
417
where
418
    F: file_manager::File,
419
{
420
    pub(crate) id: EntryId,
421
    pub(crate) reader: &'a mut SegmentReader<F>,
422
}
423

            
424
impl<'entry, F> Entry<'entry, F>
425
where
426
    F: file_manager::File,
427
{
428
    /// The unique id of this entry.
429
    #[must_use]
430
312
    pub const fn id(&self) -> EntryId {
431
312
        self.id
432
312
    }
433

            
434
    /// The segment that this entry was recovered from.
435
    #[must_use]
436
    pub fn segment(&self) -> &RecoveredSegment {
437
        &self.reader.header
438
    }
439

            
440
    /// Reads the next chunk of data written in this entry. If another chunk of
441
    /// data is found in this entry, [`ReadChunkResult::Chunk`] will be
442
    /// returned. If no other chunks are found, [`ReadChunkResult::EndOfEntry`]
443
    /// will be returned.
444
    ///
445
    /// # Aborted Log Entries
446
    ///
447
    /// In the event of recovering from a crash or power outage, it is possible
448
    /// to receive [`ReadChunkResult::AbortedEntry`]. It is also possible when
449
    /// reading from a returned [`EntryChunk`] to encounter an unexpected
450
    /// end-of-file error. When these situations arise, the entire entry that
451
    /// was being read should be ignored.
452
    ///
453
    /// The format chosen for this log format makes some tradeoffs, and one of
454
    /// the tradeoffs is not knowing the full length of an entry when it is
455
    /// being written. This allows for very large log entries to be written
456
    /// without requiring memory for the entire entry.
457
1634
    pub fn read_chunk<'chunk>(&'chunk mut self) -> io::Result<ReadChunkResult<'chunk, 'entry, F>> {
458
1634
        if self.reader.file.buffer().is_empty() {
459
2
            self.reader.file.fill_buf()?;
460
1632
        }
461

            
462
1634
        match self.reader.file.buffer().first().copied() {
463
            Some(CHUNK) => {
464
727
                let mut header_bytes = [0; 5];
465
727
                let offset = self.reader.file.stream_position()?;
466
727
                self.reader.file.read_exact(&mut header_bytes)?;
467
727
                Ok(ReadChunkResult::Chunk(EntryChunk {
468
727
                    position: LogPosition {
469
727
                        file_id: self.reader.file_id,
470
727
                        offset,
471
727
                    },
472
727
                    entry: self,
473
727
                    calculated_crc: 0,
474
727
                    stored_crc32: None,
475
727
                    bytes_remaining: u32::from_le_bytes(
476
727
                        header_bytes[1..5].try_into().expect("u32 is 4 bytes"),
477
727
                    ),
478
727
                }))
479
            }
480
            Some(END_OF_ENTRY) => {
481
400
                self.reader.file.consume(1);
482
400
                Ok(ReadChunkResult::EndOfEntry)
483
            }
484
507
            _ => Ok(ReadChunkResult::AbortedEntry),
485
        }
486
1634
    }
487

            
488
    /// Reads all chunks for this entry. If the entry was completely written,
489
    /// the list of chunks of data is returned. If the entry wasn't completely
490
    /// written, `None` will be returned.
491
400
    pub fn read_all_chunks(&mut self) -> io::Result<Option<Vec<Vec<u8>>>> {
492
400
        let mut chunks = Vec::new();
493
        loop {
494
1127
            let mut chunk = match self.read_chunk()? {
495
727
                ReadChunkResult::Chunk(chunk) => chunk,
496
400
                ReadChunkResult::EndOfEntry => break,
497
                ReadChunkResult::AbortedEntry => return Ok(None),
498
            };
499
727
            chunks.push(chunk.read_all()?);
500
727
            if !chunk.check_crc()? {
501
                return Err(io::Error::new(ErrorKind::InvalidData, "crc check failed"));
502
727
            }
503
        }
504
400
        Ok(Some(chunks))
505
400
    }
506
}
507

            
508
/// The result of reading a chunk from a log segment.
509
#[derive(Debug)]
510
pub enum ReadChunkResult<'chunk, 'entry, F>
511
where
512
    F: file_manager::File,
513
{
514
    /// A chunk was found.
515
    Chunk(EntryChunk<'chunk, 'entry, F>),
516
    /// The end of the entry has been reached.
517
    EndOfEntry,
518
    /// An aborted entry was detected. This should only be encountered if log
519
    /// entries were being written when the computer or application crashed.
520
    ///
521
    /// When this is returned, the entire entry should be ignored.
522
    AbortedEntry,
523
}
524

            
525
/// A chunk of data previously written using
526
/// [`EntryWriter::write_chunk`](crate::EntryWriter::write_chunk).
527
///
528
/// # Panics
529
///
530
/// Once dropped, this type will ensure that the entry reader is advanced to the
531
/// end of this chunk if needed by calling
532
/// [`EntryChunk::skip_remaining_bytes()`]. If an error occurs during this call,
533
/// a panic will occur.
534
///
535
/// To prevent all possibilities of panics, all bytes should be exhausted before
536
/// dropping this type by:
537
///
538
/// - Using [`Read`] until a 0 is returned.
539
/// - Using [`EntryChunk::read_all()`] to read all remaining bytes at once.
540
/// - Skipping all remaining bytes using [`EntryChunk::skip_remaining_bytes()`]
541
#[derive(Debug)]
542
pub struct EntryChunk<'chunk, 'entry, F>
543
where
544
    F: file_manager::File,
545
{
546
    entry: &'chunk mut Entry<'entry, F>,
547
    position: LogPosition,
548
    bytes_remaining: u32,
549
    calculated_crc: u32,
550
    stored_crc32: Option<u32>,
551
}
552

            
553
impl<'chunk, 'entry, F> EntryChunk<'chunk, 'entry, F>
554
where
555
    F: file_manager::File,
556
{
557
    /// Returns the position that this chunk is located at.
558
    #[must_use]
559
    pub fn log_position(&self) -> LogPosition {
560
        self.position
561
    }
562

            
563
    /// Returns the number of bytes remaining to read from this chunk.
564
    #[must_use]
565
    pub const fn bytes_remaining(&self) -> u32 {
566
        self.bytes_remaining
567
    }
568

            
569
    /// Returns true if the CRC has been validated, or false if the computed CRC
570
    /// is different than the stored CRC. Returns an error if the chunk has not
571
    /// been fully read yet.
572
727
    pub fn check_crc(&mut self) -> io::Result<bool> {
573
727
        if self.bytes_remaining == 0 {
574
727
            if self.stored_crc32.is_none() {
575
727
                let mut stored_crc32 = [0; 4];
576
727
                // Bypass our internal read, otherwise our CRC would include the
577
727
                // CRC read itself.
578
727
                self.entry.reader.file.read_exact(&mut stored_crc32)?;
579
727
                self.stored_crc32 = Some(u32::from_le_bytes(stored_crc32));
580
            }
581

            
582
727
            Ok(self.stored_crc32.expect("already initialized") == self.calculated_crc)
583
        } else {
584
            Err(io::Error::new(
585
                io::ErrorKind::Other,
586
                "crc cannot be checked before reading all chunk bytes",
587
            ))
588
        }
589
727
    }
590

            
591
    /// Reads all of the remaining data from this chunk.
592
727
    pub fn read_all(&mut self) -> io::Result<Vec<u8>> {
593
727
        let mut data = Vec::with_capacity(usize::try_from(self.bytes_remaining).to_io()?);
594
727
        self.read_to_end(&mut data)?;
595
727
        Ok(data)
596
727
    }
597

            
598
    /// Advances past the end of this chunk without reading the remaining bytes.
599
    pub fn skip_remaining_bytes(mut self) -> io::Result<()> {
600
        self.skip_remaining_bytes_internal()
601
    }
602

            
603
    /// Advances past the end of this chunk without reading the remaining bytes.
604
727
    fn skip_remaining_bytes_internal(&mut self) -> io::Result<()> {
605
727
        if self.bytes_remaining > 0 || self.stored_crc32.is_none() {
606
            // Skip past the remaining bytes plus the crc.
607
            self.entry
608
                .reader
609
                .file
610
                .seek(SeekFrom::Current(i64::from(self.bytes_remaining + 4)))?;
611
            self.bytes_remaining = 0;
612
727
        }
613
727
        Ok(())
614
727
    }
615
}
616

            
617
impl<'chunk, 'entry, F> Read for EntryChunk<'chunk, 'entry, F>
618
where
619
    F: file_manager::File,
620
{
621
1828
    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
622
1828
        let bytes_remaining = usize::try_from(self.bytes_remaining).to_io()?;
623
1828
        let bytes_to_read = bytes_remaining.min(buf.len());
624
1828

            
625
1828
        if bytes_to_read > 0 {
626
1101
            let bytes_read = self.entry.reader.file.read(&mut buf[..bytes_to_read])?;
627
1101
            self.bytes_remaining -= u32::try_from(bytes_read).to_io()?;
628
1101
            self.calculated_crc = crc32c::crc32c_append(self.calculated_crc, &buf[..bytes_read]);
629
1101
            Ok(bytes_read)
630
        } else {
631
727
            Ok(0)
632
        }
633
1828
    }
634
}
635

            
636
impl<'chunk, 'entry, F> Drop for EntryChunk<'chunk, 'entry, F>
637
where
638
    F: file_manager::File,
639
{
640
727
    fn drop(&mut self) {
641
727
        self.skip_remaining_bytes_internal()
642
727
            .expect("error while skipping remaining bytes");
643
727
    }
644
}
645

            
646
/// Information about an individual segment of a
647
/// [`WriteAheadLog`](crate::WriteAheadLog) that is being recovered.
648
#[derive(Debug)]
649
pub struct RecoveredSegment {
650
    /// The value of [`Configuration::version_info`] at the time this segment
651
    /// was created.
652
    pub version_info: Vec<u8>,
653
}