linear-mcp-fast 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. ccl_chromium_reader/__init__.py +2 -0
  2. ccl_chromium_reader/ccl_chromium_cache.py +1335 -0
  3. ccl_chromium_reader/ccl_chromium_filesystem.py +302 -0
  4. ccl_chromium_reader/ccl_chromium_history.py +357 -0
  5. ccl_chromium_reader/ccl_chromium_indexeddb.py +1060 -0
  6. ccl_chromium_reader/ccl_chromium_localstorage.py +454 -0
  7. ccl_chromium_reader/ccl_chromium_notifications.py +268 -0
  8. ccl_chromium_reader/ccl_chromium_profile_folder.py +568 -0
  9. ccl_chromium_reader/ccl_chromium_sessionstorage.py +368 -0
  10. ccl_chromium_reader/ccl_chromium_snss2.py +332 -0
  11. ccl_chromium_reader/ccl_shared_proto_db_downloads.py +189 -0
  12. ccl_chromium_reader/common.py +19 -0
  13. ccl_chromium_reader/download_common.py +78 -0
  14. ccl_chromium_reader/profile_folder_protocols.py +276 -0
  15. ccl_chromium_reader/serialization_formats/__init__.py +0 -0
  16. ccl_chromium_reader/serialization_formats/ccl_blink_value_deserializer.py +401 -0
  17. ccl_chromium_reader/serialization_formats/ccl_easy_chromium_pickle.py +133 -0
  18. ccl_chromium_reader/serialization_formats/ccl_protobuff.py +276 -0
  19. ccl_chromium_reader/serialization_formats/ccl_v8_value_deserializer.py +627 -0
  20. ccl_chromium_reader/storage_formats/__init__.py +0 -0
  21. ccl_chromium_reader/storage_formats/ccl_leveldb.py +582 -0
  22. ccl_simplesnappy/__init__.py +1 -0
  23. ccl_simplesnappy/ccl_simplesnappy.py +306 -0
  24. linear_mcp_fast/__init__.py +8 -0
  25. linear_mcp_fast/__main__.py +6 -0
  26. linear_mcp_fast/reader.py +433 -0
  27. linear_mcp_fast/server.py +367 -0
  28. linear_mcp_fast/store_detector.py +117 -0
  29. linear_mcp_fast-0.1.0.dist-info/METADATA +160 -0
  30. linear_mcp_fast-0.1.0.dist-info/RECORD +39 -0
  31. linear_mcp_fast-0.1.0.dist-info/WHEEL +5 -0
  32. linear_mcp_fast-0.1.0.dist-info/entry_points.txt +2 -0
  33. linear_mcp_fast-0.1.0.dist-info/top_level.txt +4 -0
  34. tools_and_utilities/Chromium_dump_local_storage.py +111 -0
  35. tools_and_utilities/Chromium_dump_session_storage.py +92 -0
  36. tools_and_utilities/benchmark.py +35 -0
  37. tools_and_utilities/ccl_chrome_audit.py +651 -0
  38. tools_and_utilities/dump_indexeddb_details.py +59 -0
  39. tools_and_utilities/dump_leveldb.py +53 -0
@@ -0,0 +1,582 @@
1
+ """
2
+ Copyright 2020-2021, CCL Forensics
3
+
4
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
5
+ this software and associated documentation files (the "Software"), to deal in
6
+ the Software without restriction, including without limitation the rights to
7
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
8
+ of the Software, and to permit persons to whom the Software is furnished to do
9
+ so, subject to the following conditions:
10
+
11
+ The above copyright notice and this permission notice shall be included in all
12
+ copies or substantial portions of the Software.
13
+
14
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20
+ SOFTWARE.
21
+ """
22
+
23
+ import typing
24
+ import struct
25
+ import re
26
+ import os
27
+ import io
28
+ import pathlib
29
+ import dataclasses
30
+ import enum
31
+ from collections import namedtuple
32
+ from types import MappingProxyType
33
+
34
+ import ccl_simplesnappy
35
+
36
+ __version__ = "0.4"
37
+ __description__ = "A module for reading LevelDB databases"
38
+ __contact__ = "Alex Caithness"
39
+
40
+
41
+ def _read_le_varint(stream: typing.BinaryIO, *, is_google_32bit=False) -> typing.Optional[typing.Tuple[int, bytes]]:
42
+ """Read varint from a stream.
43
+ If the read is successful: returns a tuple of the (unsigned) value and the raw bytes making up that varint,
44
+ otherwise returns None.
45
+ Can be switched to limit the varint to 32 bit."""
46
+ # this only outputs unsigned
47
+ i = 0
48
+ result = 0
49
+ underlying_bytes = []
50
+ limit = 5 if is_google_32bit else 10
51
+ while i < limit:
52
+ raw = stream.read(1)
53
+ if len(raw) < 1:
54
+ return None
55
+ tmp, = raw
56
+ underlying_bytes.append(tmp)
57
+ result |= ((tmp & 0x7f) << (i * 7))
58
+ if (tmp & 0x80) == 0:
59
+ break
60
+ i += 1
61
+ return result, bytes(underlying_bytes)
62
+
63
+
64
+ def read_le_varint(stream: typing.BinaryIO, *, is_google_32bit=False) -> typing.Optional[int]:
65
+ """Convenience version of _read_le_varint that only returns the value or None"""
66
+ x = _read_le_varint(stream, is_google_32bit=is_google_32bit)
67
+ if x is None:
68
+ return None
69
+ else:
70
+ return x[0]
71
+
72
+
73
+ def read_length_prefixed_blob(stream: typing.BinaryIO) -> bytes:
74
+ """Reads a blob of data which is prefixed with a varint length"""
75
+ length = read_le_varint(stream)
76
+ data = stream.read(length)
77
+ if len(data) != length:
78
+ raise ValueError(f"Could not read all data (expected {length}, got {len(data)}")
79
+ return data
80
+
81
+
82
+ @dataclasses.dataclass(frozen=True)
83
+ class BlockHandle:
84
+ """See: https://github.com/google/leveldb/blob/master/doc/table_format.md
85
+ A BlockHandle contains an offset and length of a block in an ldb table file"""
86
+ offset: int
87
+ length: int
88
+
89
+ @classmethod
90
+ def from_stream(cls, stream: typing.BinaryIO) -> "BlockHandle":
91
+ return cls(read_le_varint(stream), read_le_varint(stream))
92
+
93
+ @classmethod
94
+ def from_bytes(cls, data: bytes) -> "BlockHandle":
95
+ with io.BytesIO(data) as stream:
96
+ return BlockHandle.from_stream(stream)
97
+
98
+
99
+ @dataclasses.dataclass(frozen=True)
100
+ class RawBlockEntry:
101
+ """Raw key, value for a record in a LDB file Block, along with the offset within the block from which it came from
102
+ See: https://github.com/google/leveldb/blob/master/doc/table_format.md"""
103
+ key: bytes
104
+ value: bytes
105
+ block_offset: int
106
+
107
+
108
+ class FileType(enum.Enum):
109
+ Ldb = 1
110
+ Log = 2
111
+
112
+
113
+ class KeyState(enum.Enum):
114
+ Deleted = 0
115
+ Live = 1
116
+ Unknown = 2
117
+
118
+
119
+ @dataclasses.dataclass(frozen=True)
120
+ class Record:
121
+ """A record from leveldb; includes details of the origin file, state, etc."""
122
+
123
+ key: bytes
124
+ value: bytes
125
+ seq: int
126
+ state: KeyState
127
+ file_type: FileType
128
+ origin_file: os.PathLike
129
+ offset: int
130
+ was_compressed: bool
131
+
132
+ @property
133
+ def user_key(self) -> bytes:
134
+ """Returns the "userkey" which omits the metadata bytes which may reside at the end of the raw key"""
135
+ if self.file_type == FileType.Ldb:
136
+ if len(self.key) < 8:
137
+ return self.key
138
+ else:
139
+ return self.key[0:-8]
140
+ else:
141
+ return self.key
142
+
143
+ @classmethod
144
+ def ldb_record(cls, key: bytes, value: bytes, origin_file: os.PathLike,
145
+ offset: int, was_compressed: bool) -> "Record":
146
+ seq = (struct.unpack("<Q", key[-8:])[0]) >> 8
147
+ if len(key) > 8:
148
+ state = KeyState.Deleted if key[-8] == 0 else KeyState.Live
149
+ else:
150
+ state = KeyState.Unknown
151
+ return cls(key, value, seq, state, FileType.Ldb, origin_file, offset, was_compressed)
152
+
153
+ @classmethod
154
+ def log_record(cls, key: bytes, value: bytes, seq: int, state: KeyState,
155
+ origin_file: os.PathLike, offset: int) -> "Record":
156
+ return cls(key, value, seq, state, FileType.Log, origin_file, offset, False)
157
+
158
+
159
+ class Block:
160
+ """Block from an .lldb (table) file. See: https://github.com/google/leveldb/blob/master/doc/table_format.md"""
161
+ def __init__(self, raw: bytes, was_compressed: bool, origin: "LdbFile", offset: int):
162
+ self._raw = raw
163
+ self.was_compressed = was_compressed
164
+ self.origin = origin
165
+ self.offset = offset
166
+
167
+ self._restart_array_count, = struct.unpack("<I", self._raw[-4:])
168
+ self._restart_array_offset = len(self._raw) - (self._restart_array_count + 1) * 4
169
+
170
+ def get_restart_offset(self, index) -> int:
171
+ offset = self._restart_array_offset + (index * 4)
172
+ return struct.unpack("<i", self._raw[offset: offset + 4])[0]
173
+
174
+ def get_first_entry_offset(self) -> int:
175
+ return self.get_restart_offset(0)
176
+
177
+ def __iter__(self) -> typing.Iterable[RawBlockEntry]:
178
+ offset = self.get_first_entry_offset()
179
+ with io.BytesIO(self._raw) as buff:
180
+ buff.seek(offset)
181
+
182
+ key = b""
183
+
184
+ while buff.tell() < self._restart_array_offset:
185
+ start_offset = buff.tell()
186
+ shared_length = read_le_varint(buff, is_google_32bit=True)
187
+ non_shared_length = read_le_varint(buff, is_google_32bit=True)
188
+ value_length = read_le_varint(buff, is_google_32bit=True)
189
+
190
+ # sense check
191
+ if offset >= self._restart_array_offset:
192
+ raise ValueError("Reading start of entry past the start of restart array")
193
+ if shared_length > len(key):
194
+ raise ValueError("Shared key length is larger than the previous key")
195
+
196
+ key = key[:shared_length] + buff.read(non_shared_length)
197
+ value = buff.read(value_length)
198
+
199
+ yield RawBlockEntry(key, value, start_offset)
200
+
201
+
202
+ class LdbFile:
203
+ """A leveldb table (.ldb or .sst) file."""
204
+ BLOCK_TRAILER_SIZE = 5
205
+ FOOTER_SIZE = 48
206
+ MAGIC = 0xdb4775248b80fb57
207
+
208
+ def __init__(self, file: pathlib.Path):
209
+ if not file.exists():
210
+ raise FileNotFoundError(file)
211
+
212
+ self.path = file
213
+ self.file_no = int(file.stem, 16)
214
+
215
+ self._f = file.open("rb")
216
+ self._f.seek(-LdbFile.FOOTER_SIZE, os.SEEK_END)
217
+
218
+ self._meta_index_handle = BlockHandle.from_stream(self._f)
219
+ self._index_handle = BlockHandle.from_stream(self._f)
220
+ self._f.seek(-8, os.SEEK_END)
221
+ magic, = struct.unpack("<Q", self._f.read(8))
222
+ if magic != LdbFile.MAGIC:
223
+ raise ValueError(f"Invalid magic number in {file}")
224
+
225
+ self._index = self._read_index()
226
+
227
+ def _read_block(self, handle: BlockHandle) -> Block:
228
+ # block is the size in the blockhandle plus the trailer
229
+ # the trailer is 5 bytes long.
230
+ # idx size meaning
231
+ # 0 1 CompressionType (0 = none, 1 = snappy)
232
+ # 1 4 CRC32
233
+
234
+ self._f.seek(handle.offset)
235
+ raw_block = self._f.read(handle.length)
236
+ trailer = self._f.read(LdbFile.BLOCK_TRAILER_SIZE)
237
+
238
+ if len(raw_block) != handle.length or len(trailer) != LdbFile.BLOCK_TRAILER_SIZE:
239
+ raise ValueError(f"Could not read all of the block at offset {handle.offset} in file {self.path}")
240
+
241
+ is_compressed = trailer[0] != 0
242
+ if is_compressed:
243
+ with io.BytesIO(raw_block) as buff:
244
+ raw_block = ccl_simplesnappy.decompress(buff)
245
+
246
+ return Block(raw_block, is_compressed, self, handle.offset)
247
+
248
+ def _read_index(self) -> typing.Tuple[typing.Tuple[bytes, BlockHandle], ...]:
249
+ index_block = self._read_block(self._index_handle)
250
+ # key is earliest key, value is BlockHandle to that data block
251
+ return tuple((entry.key, BlockHandle.from_bytes(entry.value))
252
+ for entry in index_block)
253
+
254
+ def __iter__(self) -> typing.Iterable[Record]:
255
+ """Iterate Records in this Table file"""
256
+ for block_key, handle in self._index:
257
+ block = self._read_block(handle)
258
+ for entry in block:
259
+ yield Record.ldb_record(
260
+ entry.key, entry.value, self.path,
261
+ block.offset if block.was_compressed else block.offset + entry.block_offset,
262
+ block.was_compressed)
263
+
264
+ def close(self):
265
+ self._f.close()
266
+
267
+
268
+ class LogEntryType(enum.IntEnum):
269
+ Zero = 0
270
+ Full = 1
271
+ First = 2
272
+ Middle = 3
273
+ Last = 4
274
+
275
+
276
+ class LogFile:
277
+ """A levelDb log (.log) file"""
278
+ LOG_ENTRY_HEADER_SIZE = 7
279
+ LOG_BLOCK_SIZE = 32768
280
+
281
+ def __init__(self, file: pathlib.Path):
282
+ if not file.exists():
283
+ raise FileNotFoundError(file)
284
+
285
+ self.path = file
286
+ self.file_no = int(file.stem, 16)
287
+
288
+ self._f = file.open("rb")
289
+
290
+ def _get_raw_blocks(self) -> typing.Iterable[bytes]:
291
+ self._f.seek(0)
292
+
293
+ while chunk := self._f.read(LogFile.LOG_BLOCK_SIZE):
294
+ yield chunk
295
+
296
+ def _get_batches(self) -> typing.Iterable[typing.Tuple[int, bytes]]:
297
+ in_record = False
298
+ start_block_offset = 0
299
+ block = b""
300
+ for idx, chunk_ in enumerate(self._get_raw_blocks()):
301
+ with io.BytesIO(chunk_) as buff:
302
+ while buff.tell() < LogFile.LOG_BLOCK_SIZE - 6:
303
+ header = buff.read(7)
304
+ if len(header) < 7:
305
+ break
306
+ crc, length, block_type = struct.unpack("<IHB", header)
307
+
308
+ if block_type == LogEntryType.Full:
309
+ if in_record:
310
+ raise ValueError(f"Full block whilst still building a block at offset "
311
+ f"{idx * LogFile.LOG_BLOCK_SIZE + buff.tell()} in {self.path}")
312
+ in_record = False
313
+ yield idx * LogFile.LOG_BLOCK_SIZE + buff.tell(), buff.read(length)
314
+ elif block_type == LogEntryType.First:
315
+ if in_record:
316
+ raise ValueError(f"First block whilst still building a block at offset "
317
+ f"{idx * LogFile.LOG_BLOCK_SIZE + buff.tell()} in {self.path}")
318
+ start_block_offset = idx * LogFile.LOG_BLOCK_SIZE + buff.tell()
319
+ block = buff.read(length)
320
+ in_record = True
321
+ elif block_type == LogEntryType.Middle:
322
+ if not in_record:
323
+ raise ValueError(f"Middle block whilst not building a block at offset "
324
+ f"{idx * LogFile.LOG_BLOCK_SIZE + buff.tell()} in {self.path}")
325
+ block += buff.read(length)
326
+ elif block_type == LogEntryType.Last:
327
+ if not in_record:
328
+ raise ValueError(f"Last block whilst not building a block at offset "
329
+ f"{idx * LogFile.LOG_BLOCK_SIZE + buff.tell()} in {self.path}")
330
+ block += buff.read(length)
331
+ in_record = False
332
+ yield start_block_offset * LogFile.LOG_BLOCK_SIZE, block
333
+ else:
334
+ raise ValueError() # Cannot happen
335
+
336
+ def __iter__(self) -> typing.Iterable[Record]:
337
+ """Iterate Records in this Log file"""
338
+ for batch_offset, batch in self._get_batches():
339
+ # as per write_batch and write_batch_internal
340
+ # offset length description
341
+ # 0 8 (u?)int64 Sequence number
342
+ # 8 4 (u?)int32 Count - the log batch can contain multple entries
343
+ #
344
+ # Then Count * the following:
345
+ #
346
+ # 12 1 ValueType (KeyState as far as this library is concerned)
347
+ # 13 1-4 VarInt32 length of key
348
+ # ... ... Key data
349
+ # ... 1-4 VarInt32 length of value
350
+ # ... ... Value data
351
+
352
+ with io.BytesIO(batch) as buff: # it's just easier this way
353
+ header = buff.read(12)
354
+ seq, count = struct.unpack("<QI", header)
355
+
356
+ for i in range(count):
357
+ start_offset = batch_offset + buff.tell()
358
+ state = KeyState(buff.read(1)[0])
359
+ key_length = read_le_varint(buff, is_google_32bit=True)
360
+ key = buff.read(key_length)
361
+ # print(key)
362
+ if state != KeyState.Deleted:
363
+ value_length = read_le_varint(buff, is_google_32bit=True)
364
+ value = buff.read(value_length)
365
+ else:
366
+ value = b""
367
+
368
+ yield Record.log_record(key, value, seq + i, state, self.path, start_offset)
369
+
370
+ def close(self):
371
+ self._f.close()
372
+
373
+
374
+ class VersionEditTag(enum.IntEnum):
375
+ """
376
+ See: https://github.com/google/leveldb/blob/master/db/version_edit.cc
377
+ """
378
+ Comparator = 1,
379
+ LogNumber = 2,
380
+ NextFileNumber = 3,
381
+ LastSequence = 4,
382
+ CompactPointer = 5,
383
+ DeletedFile = 6,
384
+ NewFile = 7,
385
+ # 8 was used for large value refs
386
+ PrevLogNumber = 9
387
+
388
+
389
+ @dataclasses.dataclass(frozen=True)
390
+ class VersionEdit:
391
+ """
392
+ See:
393
+ https://github.com/google/leveldb/blob/master/db/version_edit.h
394
+ https://github.com/google/leveldb/blob/master/db/version_edit.cc
395
+ """
396
+ comparator: str = None
397
+ log_number: int = None
398
+ prev_log_number: int = None
399
+ last_sequence: int = None
400
+ next_file_number: int = None
401
+ compaction_pointers: typing.Tuple[typing.Any] = tuple()
402
+ deleted_files: typing.Tuple[typing.Any] = tuple()
403
+ new_files: typing.Tuple[typing.Any] = tuple()
404
+
405
+ @classmethod
406
+ def from_buffer(cls, buffer: bytes):
407
+ comparator = None
408
+ log_number = None
409
+ prev_log_number = None
410
+ last_sequence = None
411
+ next_file_number = None
412
+ compaction_pointers = []
413
+ deleted_files = []
414
+ new_files = []
415
+
416
+ compaction_pointer_nt = namedtuple("CompactionPointer", ["level", "pointer"])
417
+ deleted_file_nt = namedtuple("DeletedFile", ["level", "file_no"])
418
+ new_file_nt = namedtuple("NewFile", ["level", "file_no", "file_size", "smallest_key", "largest_key"])
419
+
420
+ with io.BytesIO(buffer) as b:
421
+ while b.tell() < len(buffer) - 1:
422
+ tag = read_le_varint(b, is_google_32bit=True)
423
+
424
+ if tag == VersionEditTag.Comparator:
425
+ comparator = read_length_prefixed_blob(b).decode("utf-8")
426
+ elif tag == VersionEditTag.LogNumber:
427
+ log_number = read_le_varint(b)
428
+ elif tag == VersionEditTag.PrevLogNumber:
429
+ prev_log_number = read_le_varint(b)
430
+ elif tag == VersionEditTag.NextFileNumber:
431
+ next_file_number = read_le_varint(b)
432
+ elif tag == VersionEditTag.LastSequence:
433
+ last_sequence = read_le_varint(b)
434
+ elif tag == VersionEditTag.CompactPointer:
435
+ level = read_le_varint(b, is_google_32bit=True)
436
+ compaction_pointer = read_length_prefixed_blob(b)
437
+ compaction_pointers.append(compaction_pointer_nt(level, compaction_pointer))
438
+ elif tag == VersionEditTag.DeletedFile:
439
+ level = read_le_varint(b, is_google_32bit=True)
440
+ file_no = read_le_varint(b)
441
+ deleted_files.append(deleted_file_nt(level, file_no))
442
+ elif tag == VersionEditTag.NewFile:
443
+ level = read_le_varint(b, is_google_32bit=True)
444
+ file_no = read_le_varint(b)
445
+ file_size = read_le_varint(b)
446
+ smallest = read_length_prefixed_blob(b)
447
+ largest = read_length_prefixed_blob(b)
448
+ new_files.append(new_file_nt(level, file_no, file_size, smallest, largest))
449
+
450
+ return cls(comparator, log_number, prev_log_number, last_sequence, next_file_number, tuple(compaction_pointers),
451
+ tuple(deleted_files), tuple(new_files))
452
+
453
+
454
+ class ManifestFile:
455
+ """
456
+ Represents a manifest file which contains database metadata.
457
+ Manifest files are, at a high level, formatted like a log file in terms of the block and batch format,
458
+ but the data within the batches follow their own format.
459
+
460
+ Main use is to identify the level of files, use `file_to_level` property to look up levels based on file no.
461
+
462
+ See:
463
+ https://github.com/google/leveldb/blob/master/db/version_edit.h
464
+ https://github.com/google/leveldb/blob/master/db/version_edit.cc
465
+ """
466
+
467
+ MANIFEST_FILENAME_PATTERN = "MANIFEST-([0-9A-F]{6})"
468
+
469
+ def __init__(self, path: pathlib.Path):
470
+ if match := re.match(ManifestFile.MANIFEST_FILENAME_PATTERN, path.name):
471
+ self.file_no = int(match.group(1))
472
+ else:
473
+ raise ValueError("Invalid name for Manifest")
474
+
475
+ self._f = path.open("rb")
476
+ self.path = path
477
+
478
+ self.file_to_level = {}
479
+ for edit in self:
480
+ if edit.new_files:
481
+ for nf in edit.new_files:
482
+ self.file_to_level[nf.file_no] = nf.level
483
+
484
+ self.file_to_level = MappingProxyType(self.file_to_level)
485
+
486
+ def _get_raw_blocks(self) -> typing.Iterable[bytes]:
487
+ self._f.seek(0)
488
+
489
+ while chunk := self._f.read(LogFile.LOG_BLOCK_SIZE):
490
+ yield chunk
491
+
492
+ def _get_batches(self) -> typing.Iterable[typing.Tuple[int, bytes]]:
493
+ in_record = False
494
+ start_block_offset = 0
495
+ block = b""
496
+ for idx, chunk_ in enumerate(self._get_raw_blocks()):
497
+ with io.BytesIO(chunk_) as buff:
498
+ while buff.tell() < LogFile.LOG_BLOCK_SIZE - 6:
499
+ header = buff.read(7)
500
+ if len(header) < 7:
501
+ break
502
+ crc, length, block_type = struct.unpack("<IHB", header)
503
+
504
+ if block_type == LogEntryType.Full:
505
+ if in_record:
506
+ raise ValueError(f"Full block whilst still building a block at offset "
507
+ f"{idx * LogFile.LOG_BLOCK_SIZE + buff.tell()} in {self.path}")
508
+ in_record = False
509
+ yield idx * LogFile.LOG_BLOCK_SIZE + buff.tell(), buff.read(length)
510
+ elif block_type == LogEntryType.First:
511
+ if in_record:
512
+ raise ValueError(f"First block whilst still building a block at offset "
513
+ f"{idx * LogFile.LOG_BLOCK_SIZE + buff.tell()} in {self.path}")
514
+ start_block_offset = idx * LogFile.LOG_BLOCK_SIZE + buff.tell()
515
+ block = buff.read(length)
516
+ in_record = True
517
+ elif block_type == LogEntryType.Middle:
518
+ if not in_record:
519
+ raise ValueError(f"Middle block whilst not building a block at offset "
520
+ f"{idx * LogFile.LOG_BLOCK_SIZE + buff.tell()} in {self.path}")
521
+ block += buff.read(length)
522
+ elif block_type == LogEntryType.Last:
523
+ if not in_record:
524
+ raise ValueError(f"Last block whilst not building a block at offset "
525
+ f"{idx * LogFile.LOG_BLOCK_SIZE + buff.tell()} in {self.path}")
526
+ block += buff.read(length)
527
+ in_record = False
528
+ yield start_block_offset * LogFile.LOG_BLOCK_SIZE, block
529
+ else:
530
+ raise ValueError() # Cannot happen
531
+
532
+ def __iter__(self):
533
+ for batch_offset, batch in self._get_batches():
534
+ yield VersionEdit.from_buffer(batch)
535
+
536
+ def close(self):
537
+ self._f.close()
538
+
539
+
540
+ class RawLevelDb:
541
+ DATA_FILE_PATTERN = r"[0-9]{6}\.(ldb|log|sst)"
542
+
543
+ def __init__(self, in_dir: os.PathLike):
544
+
545
+ self._in_dir = pathlib.Path(in_dir)
546
+ if not self._in_dir.is_dir():
547
+ raise ValueError("in_dir is not a directory")
548
+
549
+ self._files = []
550
+ latest_manifest = (0, None)
551
+ for file in self._in_dir.iterdir():
552
+ if file.is_file() and re.match(RawLevelDb.DATA_FILE_PATTERN, file.name):
553
+ if file.suffix.lower() == ".log":
554
+ self._files.append(LogFile(file))
555
+ elif file.suffix.lower() == ".ldb" or file.suffix.lower() == ".sst":
556
+ self._files.append(LdbFile(file))
557
+ if file.is_file() and re.match(ManifestFile.MANIFEST_FILENAME_PATTERN, file.name):
558
+ manifest_no = int(re.match(ManifestFile.MANIFEST_FILENAME_PATTERN, file.name).group(1), 16)
559
+ if latest_manifest[0] < manifest_no:
560
+ latest_manifest = (manifest_no, file)
561
+
562
+ self.manifest = ManifestFile(latest_manifest[1]) if latest_manifest[1] is not None else None
563
+
564
+ def __enter__(self):
565
+ return self
566
+
567
+ def __exit__(self, exc_type, exc_val, exc_tb):
568
+ self.close()
569
+
570
+ @property
571
+ def in_dir_path(self) -> pathlib.Path:
572
+ return self._in_dir
573
+
574
+ def iterate_records_raw(self, *, reverse=False) -> typing.Iterable[Record]:
575
+ for file_containing_records in sorted(self._files, reverse=reverse, key=lambda x: x.file_no):
576
+ yield from file_containing_records
577
+
578
+ def close(self):
579
+ for file in self._files:
580
+ file.close()
581
+ if self.manifest:
582
+ self.manifest.close()
@@ -0,0 +1 @@
1
+ from .ccl_simplesnappy import decompress, decompress_framed