ai-edge-litert-nightly 2.0.4.dev20251030__cp313-cp313-manylinux_2_27_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ai-edge-litert-nightly might be problematic. Click here for more details.

Files changed (74) hide show
  1. ai_edge_litert/__init__.py +1 -0
  2. ai_edge_litert/_pywrap_analyzer_wrapper.so +0 -0
  3. ai_edge_litert/_pywrap_litert_compiled_model_wrapper.so +0 -0
  4. ai_edge_litert/_pywrap_litert_tensor_buffer_wrapper.so +0 -0
  5. ai_edge_litert/_pywrap_modify_model_interface.so +0 -0
  6. ai_edge_litert/_pywrap_string_util.so +0 -0
  7. ai_edge_litert/_pywrap_tensorflow_interpreter_wrapper.so +0 -0
  8. ai_edge_litert/_pywrap_tensorflow_lite_calibration_wrapper.so +0 -0
  9. ai_edge_litert/_pywrap_tensorflow_lite_metrics_wrapper.so +0 -0
  10. ai_edge_litert/any_pb2.py +37 -0
  11. ai_edge_litert/aot/__init__.py +0 -0
  12. ai_edge_litert/aot/ai_pack/__init__.py +0 -0
  13. ai_edge_litert/aot/ai_pack/export_lib.py +300 -0
  14. ai_edge_litert/aot/aot_compile.py +153 -0
  15. ai_edge_litert/aot/core/__init__.py +0 -0
  16. ai_edge_litert/aot/core/apply_plugin.py +148 -0
  17. ai_edge_litert/aot/core/common.py +97 -0
  18. ai_edge_litert/aot/core/components.py +93 -0
  19. ai_edge_litert/aot/core/mlir_transforms.py +36 -0
  20. ai_edge_litert/aot/core/tflxx_util.py +30 -0
  21. ai_edge_litert/aot/core/types.py +374 -0
  22. ai_edge_litert/aot/prepare_for_npu.py +152 -0
  23. ai_edge_litert/aot/vendors/__init__.py +22 -0
  24. ai_edge_litert/aot/vendors/example/__init__.py +0 -0
  25. ai_edge_litert/aot/vendors/example/example_backend.py +157 -0
  26. ai_edge_litert/aot/vendors/fallback_backend.py +128 -0
  27. ai_edge_litert/aot/vendors/google_tensor/__init__.py +0 -0
  28. ai_edge_litert/aot/vendors/google_tensor/google_tensor_backend.py +168 -0
  29. ai_edge_litert/aot/vendors/google_tensor/target.py +83 -0
  30. ai_edge_litert/aot/vendors/import_vendor.py +132 -0
  31. ai_edge_litert/aot/vendors/mediatek/__init__.py +0 -0
  32. ai_edge_litert/aot/vendors/mediatek/mediatek_backend.py +196 -0
  33. ai_edge_litert/aot/vendors/mediatek/target.py +94 -0
  34. ai_edge_litert/aot/vendors/qualcomm/__init__.py +0 -0
  35. ai_edge_litert/aot/vendors/qualcomm/qualcomm_backend.py +161 -0
  36. ai_edge_litert/aot/vendors/qualcomm/target.py +74 -0
  37. ai_edge_litert/api_pb2.py +43 -0
  38. ai_edge_litert/compiled_model.py +250 -0
  39. ai_edge_litert/descriptor_pb2.py +3361 -0
  40. ai_edge_litert/duration_pb2.py +37 -0
  41. ai_edge_litert/empty_pb2.py +37 -0
  42. ai_edge_litert/field_mask_pb2.py +37 -0
  43. ai_edge_litert/format_converter_wrapper_pybind11.so +0 -0
  44. ai_edge_litert/hardware_accelerator.py +22 -0
  45. ai_edge_litert/internal/__init__.py +0 -0
  46. ai_edge_litert/internal/litertlm_builder.py +573 -0
  47. ai_edge_litert/internal/litertlm_core.py +58 -0
  48. ai_edge_litert/internal/litertlm_header_schema_py_generated.py +1595 -0
  49. ai_edge_litert/internal/llm_metadata_pb2.py +45 -0
  50. ai_edge_litert/internal/sampler_params_pb2.py +39 -0
  51. ai_edge_litert/interpreter.py +1039 -0
  52. ai_edge_litert/libLiteRtRuntimeCApi.so +0 -0
  53. ai_edge_litert/libpywrap_litert_common.so +0 -0
  54. ai_edge_litert/metrics_interface.py +48 -0
  55. ai_edge_litert/metrics_portable.py +70 -0
  56. ai_edge_litert/model_runtime_info_pb2.py +66 -0
  57. ai_edge_litert/plugin_pb2.py +46 -0
  58. ai_edge_litert/profiling_info_pb2.py +47 -0
  59. ai_edge_litert/pywrap_genai_ops.so +0 -0
  60. ai_edge_litert/schema_py_generated.py +19304 -0
  61. ai_edge_litert/source_context_pb2.py +37 -0
  62. ai_edge_litert/struct_pb2.py +47 -0
  63. ai_edge_litert/tensor_buffer.py +167 -0
  64. ai_edge_litert/timestamp_pb2.py +37 -0
  65. ai_edge_litert/tools/apply_plugin_main +0 -0
  66. ai_edge_litert/type_pb2.py +53 -0
  67. ai_edge_litert/vendors/google_tensor/compiler/libLiteRtCompilerPlugin_google_tensor.so +0 -0
  68. ai_edge_litert/vendors/mediatek/compiler/libLiteRtCompilerPlugin_MediaTek.so +0 -0
  69. ai_edge_litert/vendors/qualcomm/compiler/libLiteRtCompilerPlugin_Qualcomm.so +0 -0
  70. ai_edge_litert/wrappers_pb2.py +53 -0
  71. ai_edge_litert_nightly-2.0.4.dev20251030.dist-info/METADATA +48 -0
  72. ai_edge_litert_nightly-2.0.4.dev20251030.dist-info/RECORD +74 -0
  73. ai_edge_litert_nightly-2.0.4.dev20251030.dist-info/WHEEL +5 -0
  74. ai_edge_litert_nightly-2.0.4.dev20251030.dist-info/top_level.txt +1 -0
@@ -0,0 +1,573 @@
1
+ # Copyright 2025 The ODML Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ """Builder class for LiteRT-LM files.
17
+
18
+ Example usage:
19
+ ```
20
+ builder = litertlm_builder.LitertLmFileBuilder()
21
+ builder.add_system_metadata(
22
+ litertlm_builder.Metadata(
23
+ key="Authors",
24
+ value="The ODML Authors",
25
+ dtype=litertlm_builder.DType.STRING,
26
+ )
27
+ )
28
+ builder.add_tflite_model(
29
+ model_path,
30
+ litertlm_builder.TfLiteModelType.PREFILL_DECODE,
31
+ )
32
+ builder.add_sentencepiece_tokenizer(tokenizer_path)
33
+ builder.add_llm_metadata(llm_metadata_path)
34
+ with litertlm_core.open_file(output_path, "wb") as f:
35
+ builder.build(f)
36
+ ```
37
+ """
38
+
39
+ # TODO(b/445163709): Remove this module once litert_lm publishes a pypi package.
40
+
41
+ import dataclasses
42
+ import enum
43
+ import os # pylint: disable=unused-import
44
+ from typing import Any, BinaryIO, Callable, IO, Optional, TypeVar, Union
45
+ import zlib
46
+ import flatbuffers
47
+ from google.protobuf import message
48
+ from google.protobuf import text_format
49
+ from ai_edge_litert.internal import litertlm_core
50
+ from ai_edge_litert.internal import litertlm_header_schema_py_generated as schema
51
+ from ai_edge_litert.internal import llm_metadata_pb2
52
+
53
+
54
+ @enum.unique
55
+ class DType(enum.Enum):
56
+ """DType enum.
57
+
58
+ This enum maps to the data types defined in the LiteRT-LM flatbuffers schema.
59
+ """
60
+
61
+ INT8 = "int8"
62
+ INT16 = "int16"
63
+ INT32 = "int32"
64
+ INT64 = "int64"
65
+ UINT8 = "uint8"
66
+ UINT16 = "uint16"
67
+ UINT32 = "uint32"
68
+ UINT64 = "uint64"
69
+ FLOAT32 = "float32"
70
+ DOUBLE = "double"
71
+ BOOL = "bool"
72
+ STRING = "string"
73
+
74
+
75
+ @dataclasses.dataclass
76
+ class Metadata:
77
+ """Metadata class."""
78
+
79
+ key: str
80
+ value: Any
81
+ dtype: DType
82
+
83
+
84
+ @enum.unique
85
+ class TfLiteModelType(enum.Enum):
86
+ """TfLiteModelType enum.
87
+
88
+ This enum maps to the model types defined in the LiteRT-LM flatbuffers schema.
89
+ """
90
+
91
+ PREFILL_DECODE = "tf_lite_prefill_decode"
92
+
93
+ EMBEDDER = "tf_lite_embedder"
94
+ PER_LAYER_EMBEDDER = "tf_lite_per_layer_embedder"
95
+
96
+ AUX = "tf_lite_aux"
97
+
98
+ AUDIO_FRONTEND = "tf_lite_audio_frontend"
99
+ AUDIO_ENCODER_HW = "tf_lite_audio_encoder_hw"
100
+ AUDIO_ADAPTER = "tf_lite_audio_adapter"
101
+ END_OF_AUDIO = "tf_lite_end_of_audio"
102
+
103
+ VISION_ENCODER = "tf_lite_vision_encoder"
104
+ VISION_ADAPTER = "tf_lite_vision_adapter"
105
+
106
+ @classmethod
107
+ def get_enum_from_tf_free_value(cls, tf_free_value: str) -> "TfLiteModelType":
108
+ """A helper method to get the enum value from the TF-free value."""
109
+ value = "tf_lite_" + tf_free_value.lower()
110
+ return cls(value)
111
+
112
+
113
+ @dataclasses.dataclass
114
+ class _SectionObject:
115
+ # Metadata for the section.
116
+ metadata: list[Metadata]
117
+ # The data type of the section.
118
+ data_type: schema.AnySectionDataType | int
119
+ # The data reader for the section. This should return the data as a byte
120
+ # string.
121
+ data_reader: Callable[[], bytes]
122
+
123
+
124
+ LitertLmFileBuilderT = TypeVar(
125
+ "LitertLmFileBuilderT", bound="LitertLmFileBuilder"
126
+ )
127
+
128
+
129
+ class LitertLmFileBuilder:
130
+ """LitertLmFileBuilder class.
131
+
132
+ This is the primary entry point for building a LiteRT-LM file. It provides
133
+ methods to add system metadata, sections, and llm metadata to the file.
134
+
135
+ Example usage:
136
+ ```
137
+ builder = litertlm_builder.LitertLmFileBuilder()
138
+ builder.add_system_metadata(
139
+ litertlm_builder.Metadata(
140
+ key="Authors",
141
+ value="The ODML Authors",
142
+ dtype=litertlm_builder.DType.STRING,
143
+ )
144
+ )
145
+ builder.add_tflite_model(
146
+ model_path,
147
+ litertlm_builder.TfLiteModelType.PREFILL_DECODE,
148
+ )
149
+ builder.add_sentencepiece_tokenizer(tokenizer_path)
150
+ builder.add_llm_metadata(llm_metadata_path)
151
+ with litertlm_core.open_file(output_path, "wb") as f:
152
+ builder.build(f)
153
+ ```
154
+ """
155
+
156
+ def __init__(self):
157
+ self._system_metadata: list[Metadata] = []
158
+ self._sections: list[_SectionObject] = []
159
+ self._has_llm_metadata = False
160
+ self._has_tokenizer = False
161
+
162
+ def add_system_metadata(
163
+ self,
164
+ metadata: Metadata,
165
+ ) -> LitertLmFileBuilderT:
166
+ """Adds system level metadata to the litertlm file."""
167
+ for existing_metadata in self._system_metadata:
168
+ if existing_metadata.key == metadata.key:
169
+ raise ValueError(
170
+ f"System metadata already exists for key: {metadata.key}"
171
+ )
172
+ self._system_metadata.append(metadata)
173
+ return self
174
+
175
+ def add_llm_metadata(
176
+ self,
177
+ llm_metadata_path: str,
178
+ additional_metadata: Optional[list[Metadata]] = None,
179
+ ) -> LitertLmFileBuilderT:
180
+ """Adds llm metadata to the litertlm file.
181
+
182
+ Args:
183
+ llm_metadata_path: The path to the llm metadata file. Can be binary or
184
+ textproto format.
185
+ additional_metadata: Additional metadata to add to the llm metadata.
186
+
187
+ Returns:
188
+ The currentLitertLmFileBuilder object.
189
+
190
+ Raises:
191
+ FileNotFoundError: If the llm metadata file is not found.
192
+ """
193
+ assert not self._has_llm_metadata, "Llm metadata already added."
194
+ self._has_llm_metadata = True
195
+ if not litertlm_core.path_exists(llm_metadata_path):
196
+ raise FileNotFoundError(
197
+ f"Llm metadata file not found: {llm_metadata_path}"
198
+ )
199
+
200
+ if _is_binary_proto(llm_metadata_path):
201
+
202
+ def data_reader():
203
+ with litertlm_core.open_file(llm_metadata_path, "rb") as f:
204
+ return f.read()
205
+
206
+ else:
207
+
208
+ def data_reader():
209
+ with litertlm_core.open_file(llm_metadata_path, "r") as f:
210
+ return text_format.Parse(
211
+ f.read(), llm_metadata_pb2.LlmMetadata()
212
+ ).SerializeToString()
213
+
214
+ section_object = _SectionObject(
215
+ metadata=additional_metadata if additional_metadata else [],
216
+ data_type=schema.AnySectionDataType.LlmMetadataProto,
217
+ data_reader=data_reader,
218
+ )
219
+ self._sections.append(section_object)
220
+ return self
221
+
222
+ def add_tflite_model(
223
+ self,
224
+ tflite_model_path: str,
225
+ model_type: TfLiteModelType,
226
+ additional_metadata: Optional[list[Metadata]] = None,
227
+ ) -> LitertLmFileBuilderT:
228
+ """Adds a tflite model to the litertlm file.
229
+
230
+ Args:
231
+ tflite_model_path: The path to the tflite model file.
232
+ model_type: The type of the tflite model.
233
+ additional_metadata: Additional metadata to add to the tflite model.
234
+
235
+ Returns:
236
+ The current LitertLmFileBuilder object.
237
+
238
+ Raises:
239
+ FileNotFoundError: If the tflite model file is not found.
240
+ ValueError: If the model type metadata is overridden.
241
+ """
242
+ if not litertlm_core.path_exists(tflite_model_path):
243
+ raise FileNotFoundError(
244
+ f"Tflite model file not found: {tflite_model_path}"
245
+ )
246
+ metadata = [
247
+ Metadata(key="model_type", value=model_type.value, dtype=DType.STRING)
248
+ ]
249
+ if additional_metadata:
250
+ for metadata_item in additional_metadata:
251
+ if metadata_item.key == "model_type":
252
+ raise ValueError("Model type metadata cannot be overridden.")
253
+ metadata.extend(additional_metadata)
254
+
255
+ def data_reader():
256
+ with litertlm_core.open_file(tflite_model_path, "rb") as f:
257
+ return f.read()
258
+
259
+ section_object = _SectionObject(
260
+ metadata=metadata,
261
+ data_type=schema.AnySectionDataType.TFLiteModel,
262
+ data_reader=data_reader,
263
+ )
264
+ self._sections.append(section_object)
265
+ return self
266
+
267
+ def add_sentencepiece_tokenizer(
268
+ self,
269
+ sp_tokenizer_path: str,
270
+ additional_metadata: Optional[list[Metadata]] = None,
271
+ ) -> LitertLmFileBuilderT:
272
+ """Adds a sentencepiece tokenizer to the litertlm file.
273
+
274
+ Args:
275
+ sp_tokenizer_path: The path to the sentencepiece tokenizer file.
276
+ additional_metadata: Additional metadata to add to the sentencepiece
277
+ tokenizer.
278
+
279
+ Returns:
280
+ The current LitertLmFileBuilder object.
281
+
282
+ Raises:
283
+ FileNotFoundError: If the sentencepiece tokenizer file is not found.
284
+ """
285
+ assert not self._has_tokenizer, "Tokenizer already added."
286
+ self._has_tokenizer = True
287
+ if not litertlm_core.path_exists(sp_tokenizer_path):
288
+ raise FileNotFoundError(
289
+ f"Sentencepiece tokenizer file not found: {sp_tokenizer_path}"
290
+ )
291
+
292
+ def data_reader():
293
+ with litertlm_core.open_file(sp_tokenizer_path, "rb") as f:
294
+ return f.read()
295
+
296
+ section_object = _SectionObject(
297
+ metadata=additional_metadata if additional_metadata else [],
298
+ data_type=schema.AnySectionDataType.SP_Tokenizer,
299
+ data_reader=data_reader,
300
+ )
301
+ self._sections.append(section_object)
302
+ return self
303
+
304
+ def add_hf_tokenizer(
305
+ self,
306
+ hf_tokenizer_path: str,
307
+ additional_metadata: Optional[list[Metadata]] = None,
308
+ ) -> LitertLmFileBuilderT:
309
+ """Adds a hf tokenizer to the litertlm file.
310
+
311
+ Args:
312
+ hf_tokenizer_path: The path to the hf tokenizer `tokenizer.json` file.
313
+ additional_metadata: Additional metadata to add to the hf tokenizer.
314
+
315
+ Returns:
316
+ The current LitertLmFileBuilder object.
317
+
318
+ Raises:
319
+ FileNotFoundError: If the hf tokenizer file is not found.
320
+ """
321
+ assert not self._has_tokenizer, "Tokenizer already added."
322
+ self._has_tokenizer = True
323
+ if not litertlm_core.path_exists(hf_tokenizer_path):
324
+ raise FileNotFoundError(
325
+ f"HF tokenizer file not found: {hf_tokenizer_path}"
326
+ )
327
+
328
+ def read_and_compress(path: str) -> bytes:
329
+ with litertlm_core.open_file(path, "rb") as f:
330
+ content = f.read()
331
+ uncompressed_size = len(content)
332
+ compressed_content = zlib.compress(content)
333
+ return uncompressed_size.to_bytes(8, "little") + compressed_content
334
+
335
+ section_object = _SectionObject(
336
+ metadata=additional_metadata if additional_metadata else [],
337
+ data_type=schema.AnySectionDataType.HF_Tokenizer_Zlib,
338
+ data_reader=lambda: read_and_compress(hf_tokenizer_path),
339
+ )
340
+ self._sections.append(section_object)
341
+ return self
342
+
343
+ def build(
344
+ self,
345
+ stream: BinaryIO | IO[Union[bytes, str]],
346
+ ) -> None:
347
+ """Builds the litertlm into the given stream."""
348
+ stream.seek(0)
349
+ # To simplify the build logic, we reserved the first block for the header.
350
+ # This translates to the first block will be padded to `BLOCK_SIZE`.
351
+ # TODO(b/413978412): support headers > 16KB.
352
+ stream.write(b"\0" * litertlm_core.BLOCK_SIZE)
353
+
354
+ # Write sections
355
+ offsets = []
356
+ for section in self._sections:
357
+ start_offset = stream.tell()
358
+ stream.write(section.data_reader())
359
+ end_offset = stream.tell()
360
+ offsets.append((start_offset, end_offset))
361
+ _write_padding(stream, litertlm_core.BLOCK_SIZE)
362
+
363
+ # write header
364
+ self._write_header(stream, offsets)
365
+
366
+ def _write_header(
367
+ self, stream: BinaryIO, offsets: list[tuple[int, int]]
368
+ ) -> None:
369
+ """Writes the header to the stream."""
370
+ assert self._system_metadata, "System metadata is empty."
371
+
372
+ stream.seek(0)
373
+ stream.write(b"LITERTLM")
374
+ stream.write(litertlm_core.LITERTLM_MAJOR_VERSION.to_bytes(4, "little"))
375
+ stream.write(litertlm_core.LITERTLM_MINOR_VERSION.to_bytes(4, "little"))
376
+ stream.write(litertlm_core.LITERTLM_PATCH_VERSION.to_bytes(4, "little"))
377
+ _write_padding(stream, litertlm_core.HEADER_BEGIN_BYTE_OFFSET)
378
+ stream.write(self._get_header_data(offsets))
379
+ header_end_offset = stream.tell()
380
+ if header_end_offset > litertlm_core.BLOCK_SIZE:
381
+ raise ValueError("Header size exceeds 16KB limit.")
382
+ stream.seek(litertlm_core.HEADER_END_LOCATION_BYTE_OFFSET)
383
+ stream.write(header_end_offset.to_bytes(8, "little"))
384
+
385
+ def _get_header_data(self, offsets: list[tuple[int, int]]) -> bytearray:
386
+ builder = flatbuffers.Builder(1024)
387
+ system_metadata_offset = self._write_system_metadata(builder)
388
+ section_metadata_offset = self._write_section_metadata(builder, offsets)
389
+ schema.LiteRTLMMetaDataStart(builder)
390
+ schema.LiteRTLMMetaDataAddSystemMetadata(builder, system_metadata_offset)
391
+ schema.LiteRTLMMetaDataAddSectionMetadata(builder, section_metadata_offset)
392
+ root = schema.LiteRTLMMetaDataEnd(builder)
393
+ builder.Finish(root)
394
+ return builder.Output()
395
+
396
+ def _write_system_metadata(self, builder: flatbuffers.Builder) -> int:
397
+ """Writes the system metadata to the builder."""
398
+ system_metadata_offsets = [
399
+ _write_metadata(builder, m) for m in self._system_metadata
400
+ ]
401
+ schema.SystemMetadataStartEntriesVector(
402
+ builder, len(system_metadata_offsets)
403
+ )
404
+ for offsets in reversed(system_metadata_offsets):
405
+ builder.PrependUOffsetTRelative(offsets)
406
+ entries_vec = builder.EndVector()
407
+ schema.SystemMetadataStart(builder)
408
+ schema.SystemMetadataAddEntries(builder, entries_vec)
409
+ return schema.SystemMetadataEnd(builder)
410
+
411
+ def _write_section_metadata(
412
+ self, builder: flatbuffers.Builder, offsets: list[tuple[int, int]]
413
+ ) -> int:
414
+ """Writes the section metadata to the builder."""
415
+ assert len(self._sections) == len(offsets)
416
+
417
+ section_objects_offsets = []
418
+ for section, offset in zip(self._sections, offsets):
419
+ section_objects_offsets.append(
420
+ _write_section_object(
421
+ builder, section.metadata, offset, section.data_type
422
+ )
423
+ )
424
+
425
+ schema.SectionMetadataStartObjectsVector(
426
+ builder, len(section_objects_offsets)
427
+ )
428
+ for obj in reversed(section_objects_offsets):
429
+ builder.PrependUOffsetTRelative(obj)
430
+ objects_vec = builder.EndVector()
431
+
432
+ schema.SectionMetadataStart(builder)
433
+ schema.SectionMetadataAddObjects(builder, objects_vec)
434
+ return schema.SectionMetadataEnd(builder)
435
+
436
+
437
+ def _is_binary_proto(filepath: str) -> bool:
438
+ """Checks if a file is a binary protobuf or a textproto version of LlmMetadata.
439
+
440
+ Args:
441
+ filepath (str): The path to the file.
442
+
443
+ Returns:
444
+ bool: True if the file is a binary protobuf, False if it's a textproto.
445
+ TextProto.
446
+ """
447
+ assert litertlm_core.path_exists(filepath), f"File {filepath} does not exist."
448
+
449
+ try:
450
+ with litertlm_core.open_file(filepath, "rb") as f:
451
+ content = f.read()
452
+ msg = llm_metadata_pb2.LlmMetadata()
453
+ msg.ParseFromString(content)
454
+ if msg.IsInitialized():
455
+ return True
456
+ except message.DecodeError:
457
+ # This is expected if the file is in text format. We'll just pass and try
458
+ # the next format.
459
+ pass
460
+
461
+ try:
462
+ with litertlm_core.open_file(filepath, "r") as f:
463
+ content = f.read()
464
+ msg = text_format.Parse(content, llm_metadata_pb2.LlmMetadata())
465
+ if msg.IsInitialized():
466
+ return False
467
+ except (text_format.ParseError, UnicodeDecodeError) as e:
468
+ raise ValueError(f"Failed to parse LlmMetadata from {filepath}.") from e
469
+
470
+
471
+ def _write_padding(stream: BinaryIO, block_size: int) -> None:
472
+ """Writes zero padding to align to the next block size."""
473
+ current_pos = stream.tell()
474
+ padding_needed = (block_size - (current_pos % block_size)) % block_size
475
+ if padding_needed > 0:
476
+ stream.write(b"\0" * padding_needed)
477
+
478
+
479
+ def _write_metadata(builder: flatbuffers.Builder, metadata: Metadata) -> int:
480
+ """Writes a FlatBuffers KeyValuePair."""
481
+ key_offset = builder.CreateString(metadata.key)
482
+
483
+ if metadata.dtype == DType.BOOL:
484
+ schema.BoolStart(builder)
485
+ schema.BoolAddValue(builder, metadata.value)
486
+ value_offset = schema.BoolEnd(builder)
487
+ value_type = schema.VData.Bool
488
+ elif metadata.dtype == DType.INT8:
489
+ schema.Int8Start(builder)
490
+ schema.Int8AddValue(builder, metadata.value)
491
+ value_offset = schema.Int8End(builder)
492
+ value_type = schema.VData.Int8
493
+ elif metadata.dtype == DType.INT16:
494
+ schema.Int16Start(builder)
495
+ schema.Int16AddValue(builder, metadata.value)
496
+ value_offset = schema.Int16End(builder)
497
+ value_type = schema.VData.Int16
498
+ elif metadata.dtype == DType.INT32:
499
+ schema.Int32Start(builder)
500
+ schema.Int32AddValue(builder, metadata.value)
501
+ value_offset = schema.Int32End(builder)
502
+ value_type = schema.VData.Int32
503
+ elif metadata.dtype == DType.INT64:
504
+ schema.Int64Start(builder)
505
+ schema.Int64AddValue(builder, metadata.value)
506
+ value_offset = schema.Int64End(builder)
507
+ value_type = schema.VData.Int64
508
+ elif metadata.dtype == DType.UINT8:
509
+ schema.UInt8Start(builder)
510
+ schema.UInt8AddValue(builder, metadata.value)
511
+ value_offset = schema.UInt8End(builder)
512
+ value_type = schema.VData.UInt8
513
+ elif metadata.dtype == DType.UINT16:
514
+ schema.UInt16Start(builder)
515
+ schema.UInt16AddValue(builder, metadata.value)
516
+ value_offset = schema.UInt16End(builder)
517
+ value_type = schema.VData.UInt16
518
+ elif metadata.dtype == DType.UINT32:
519
+ schema.UInt32Start(builder)
520
+ schema.UInt32AddValue(builder, metadata.value)
521
+ value_offset = schema.UInt32End(builder)
522
+ value_type = schema.VData.UInt32
523
+ elif metadata.dtype == DType.UINT64:
524
+ schema.UInt64Start(builder)
525
+ schema.UInt64AddValue(builder, metadata.value)
526
+ value_offset = schema.UInt64End(builder)
527
+ value_type = schema.VData.UInt64
528
+ elif metadata.dtype == DType.FLOAT32:
529
+ schema.Float32Start(builder)
530
+ schema.Float32AddValue(builder, metadata.value)
531
+ value_offset = schema.Float32End(builder)
532
+ value_type = schema.VData.Float32
533
+ elif metadata.dtype == DType.DOUBLE:
534
+ schema.DoubleStart(builder)
535
+ schema.DoubleAddValue(builder, metadata.value)
536
+ value_offset = schema.DoubleEnd(builder)
537
+ value_type = schema.VData.Double
538
+ elif metadata.dtype == DType.STRING:
539
+ value_offset_str = builder.CreateString(str(metadata.value))
540
+ schema.StringValueStart(builder)
541
+ schema.StringValueAddValue(builder, value_offset_str)
542
+ value_offset = schema.StringValueEnd(builder)
543
+ value_type = schema.VData.StringValue
544
+ else:
545
+ raise ValueError(f"Unsupported dtype: {metadata.dtype}")
546
+
547
+ schema.KeyValuePairStart(builder)
548
+ schema.KeyValuePairAddKey(builder, key_offset)
549
+ schema.KeyValuePairAddValueType(builder, value_type)
550
+ schema.KeyValuePairAddValue(builder, value_offset)
551
+ return schema.KeyValuePairEnd(builder)
552
+
553
+
554
+ def _write_section_object(
555
+ builder: flatbuffers.Builder,
556
+ section_metadata: list[Metadata],
557
+ section_offset: tuple[int, int],
558
+ section_type: schema.AnySectionDataType,
559
+ ) -> int:
560
+ """Writes a FlatBuffers SectionObject."""
561
+ section_metadata_offsets = [
562
+ _write_metadata(builder, m) for m in section_metadata
563
+ ]
564
+ schema.SectionObjectStartItemsVector(builder, len(section_metadata_offsets))
565
+ for offsets in reversed(section_metadata_offsets):
566
+ builder.PrependUOffsetTRelative(offsets)
567
+ items_vec = builder.EndVector()
568
+ schema.SectionObjectStart(builder)
569
+ schema.SectionObjectAddItems(builder, items_vec)
570
+ schema.SectionObjectAddBeginOffset(builder, section_offset[0])
571
+ schema.SectionObjectAddEndOffset(builder, section_offset[1])
572
+ schema.SectionObjectAddDataType(builder, section_type)
573
+ return schema.SectionObjectEnd(builder)
@@ -0,0 +1,58 @@
1
+ # Copyright 2025 The ODML Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Core library with shared constants and utilities for LiteRT-LM tools."""
16
+
17
+ # TODO(b/445163709): Remove this module once litert_lm publishes a pypi package.
18
+
19
+ import os # pylint: disable=unused-import
20
+
21
+ from ai_edge_litert.internal import litertlm_header_schema_py_generated as schema
22
+
23
+ # --- File Format Constants ---
24
+ # LINT.IfChange(litertlm_version_constants) # copybara:comment
25
+ LITERTLM_MAJOR_VERSION = 1
26
+ LITERTLM_MINOR_VERSION = 4
27
+ LITERTLM_PATCH_VERSION = 0
28
+ # copybara:comment_begin(google-only)
29
+ # LINT.ThenChange(
30
+ # litert_lm/schema/core/litertlm_header.h:litertlm_version_constants,
31
+ # litert_lm/schema/py/litertlm_core.py:litertlm_version_constants
32
+ # )
33
+ # copybara:comment_end(google-only)
34
+ BLOCK_SIZE = 16 * 1024
35
+ HEADER_BEGIN_BYTE_OFFSET = 32
36
+ HEADER_END_LOCATION_BYTE_OFFSET = 24
37
+
38
+ SECTION_DATA_TYPE_TO_STRING_MAP = {
39
+ v: k for k, v in schema.AnySectionDataType.__dict__.items()
40
+ }
41
+
42
+
43
+ def any_section_data_type_to_string(data_type: int):
44
+ """Converts AnySectionDataType enum to its string representation."""
45
+ if data_type in SECTION_DATA_TYPE_TO_STRING_MAP:
46
+ return SECTION_DATA_TYPE_TO_STRING_MAP[data_type]
47
+ else:
48
+ raise ValueError(f"Unknown AnySectionDataType value: {data_type}")
49
+
50
+
51
+ def path_exists(file_path: str) -> bool:
52
+ """Checks if a file exists."""
53
+ return os.path.exists(file_path)
54
+
55
+
56
+ def open_file(file_path: str, mode: str = "rb"):
57
+ """Opens a file using the given mode."""
58
+ return open(file_path, mode)