mediapipe-nightly 0.10.11.post20240301__cp311-cp311-macosx_11_0_x86_64.whl → 0.10.11.post20240302__cp311-cp311-macosx_11_0_x86_64.whl
Sign up to get free protection for your applications and to get access to all the features.
- mediapipe/__init__.py +1 -1
- mediapipe/python/_framework_bindings.cpython-311-darwin.so +0 -0
- mediapipe/tasks/cc/genai/inference/calculators/__init__.py +0 -0
- mediapipe/tasks/cc/genai/inference/calculators/detokenizer_calculator_pb2.py +27 -0
- mediapipe/tasks/cc/genai/inference/calculators/llm_gpu_calculator_pb2.py +31 -0
- mediapipe/tasks/cc/genai/inference/calculators/tokenizer_calculator_pb2.py +29 -0
- mediapipe/tasks/cc/genai/inference/proto/llm_file_metadata_pb2.py +32 -0
- mediapipe/tasks/python/genai/converter/converter_base.py +4 -1
- mediapipe/tasks/python/genai/converter/llm_converter.py +15 -12
- mediapipe/tasks/python/genai/converter/pytorch_converter.py +7 -4
- mediapipe/tasks/python/genai/converter/pytorch_converter_test.py +1 -1
- mediapipe/tasks/python/genai/converter/safetensors_converter.py +5 -4
- mediapipe/tasks/python/genai/converter/safetensors_converter_test.py +1 -1
- {mediapipe_nightly-0.10.11.post20240301.dist-info → mediapipe_nightly-0.10.11.post20240302.dist-info}/METADATA +1 -1
- {mediapipe_nightly-0.10.11.post20240301.dist-info → mediapipe_nightly-0.10.11.post20240302.dist-info}/RECORD +18 -13
- {mediapipe_nightly-0.10.11.post20240301.dist-info → mediapipe_nightly-0.10.11.post20240302.dist-info}/LICENSE +0 -0
- {mediapipe_nightly-0.10.11.post20240301.dist-info → mediapipe_nightly-0.10.11.post20240302.dist-info}/WHEEL +0 -0
- {mediapipe_nightly-0.10.11.post20240301.dist-info → mediapipe_nightly-0.10.11.post20240302.dist-info}/top_level.txt +0 -0
mediapipe/__init__.py
CHANGED
Binary file
|
File without changes
|
@@ -0,0 +1,27 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
3
|
+
# source: mediapipe/tasks/cc/genai/inference/calculators/detokenizer_calculator.proto
|
4
|
+
# Protobuf Python Version: 4.25.1
|
5
|
+
"""Generated protocol buffer code."""
|
6
|
+
from google.protobuf import descriptor as _descriptor
|
7
|
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
8
|
+
from google.protobuf import symbol_database as _symbol_database
|
9
|
+
from google.protobuf.internal import builder as _builder
|
10
|
+
# @@protoc_insertion_point(imports)
|
11
|
+
|
12
|
+
_sym_db = _symbol_database.Default()
|
13
|
+
|
14
|
+
|
15
|
+
|
16
|
+
|
17
|
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nKmediapipe/tasks/cc/genai/inference/calculators/detokenizer_calculator.proto\x12\x10odml.infra.proto\"q\n\x1c\x44\x65tokenizerCalculatorOptions\x12\x16\n\x0espm_model_file\x18\x01 \x01(\t\x12\x13\n\x0bstop_tokens\x18\x04 \x03(\t\x12\x18\n\x10num_output_heads\x18\x05 \x01(\x05J\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04\x42@\n\x1b\x63om.google.odml.infra.protoB!DetokenizerCalculatorOptionsProtob\x06proto3')
|
18
|
+
|
19
|
+
_globals = globals()
|
20
|
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
21
|
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'mediapipe.tasks.cc.genai.inference.calculators.detokenizer_calculator_pb2', _globals)
|
22
|
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
23
|
+
_globals['DESCRIPTOR']._options = None
|
24
|
+
_globals['DESCRIPTOR']._serialized_options = b'\n\033com.google.odml.infra.protoB!DetokenizerCalculatorOptionsProto'
|
25
|
+
_globals['_DETOKENIZERCALCULATOROPTIONS']._serialized_start=97
|
26
|
+
_globals['_DETOKENIZERCALCULATOROPTIONS']._serialized_end=210
|
27
|
+
# @@protoc_insertion_point(module_scope)
|
@@ -0,0 +1,31 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
3
|
+
# source: mediapipe/tasks/cc/genai/inference/calculators/llm_gpu_calculator.proto
|
4
|
+
# Protobuf Python Version: 4.25.1
|
5
|
+
"""Generated protocol buffer code."""
|
6
|
+
from google.protobuf import descriptor as _descriptor
|
7
|
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
8
|
+
from google.protobuf import symbol_database as _symbol_database
|
9
|
+
from google.protobuf.internal import builder as _builder
|
10
|
+
# @@protoc_insertion_point(imports)
|
11
|
+
|
12
|
+
_sym_db = _symbol_database.Default()
|
13
|
+
|
14
|
+
|
15
|
+
from mediapipe.tasks.cc.genai.inference.proto import llm_file_metadata_pb2 as mediapipe_dot_tasks_dot_cc_dot_genai_dot_inference_dot_proto_dot_llm__file__metadata__pb2
|
16
|
+
from mediapipe.tasks.cc.genai.inference.proto import llm_params_pb2 as mediapipe_dot_tasks_dot_cc_dot_genai_dot_inference_dot_proto_dot_llm__params__pb2
|
17
|
+
|
18
|
+
|
19
|
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nGmediapipe/tasks/cc/genai/inference/calculators/llm_gpu_calculator.proto\x12\x10odml.infra.proto\x1a@mediapipe/tasks/cc/genai/inference/proto/llm_file_metadata.proto\x1a\x39mediapipe/tasks/cc/genai/inference/proto/llm_params.proto\"\xf4\x04\n\x17LlmGpuCalculatorOptions\x12\x13\n\x0bweight_path\x18\x01 \x01(\t\x12N\n\x0egpu_model_info\x18\n \x01(\x0b\x32\x36.odml.infra.proto.LlmGpuCalculatorOptions.GpuModelInfo\x12\x19\n\x11num_decode_tokens\x18\x0c \x01(\x05\x12\x1b\n\x13sequence_batch_size\x18\x0e \x01(\x05\x12\x11\n\tlora_path\x18\x13 \x01(\t\x12\x37\n\x0ellm_parameters\x18\x14 \x01(\x0b\x32\x1f.odml.infra.proto.LlmParameters\x12\x18\n\x10num_output_heads\x18\x16 \x01(\x05\x12\x0c\n\x04topk\x18\x17 \x01(\x05\x12\x13\n\x0btemperature\x18\x18 \x01(\x02\x12\x18\n\x0brandom_seed\x18\x19 \x01(\rH\x00\x88\x01\x01\x1a\xc0\x01\n\x0cGpuModelInfo\x12\x1c\n\x14\x61llow_precision_loss\x18\x01 \x01(\x08\x12\x1a\n\x12\x65nable_fast_tuning\x18\x02 \x01(\x08\x12\x1b\n\x13\x65nable_winograd_opt\x18\x03 \x01(\x08\x12\x15\n\ruse_low_power\x18\x04 \x01(\x08\x12\x1e\n\x16prefer_texture_weights\x18\x05 \x01(\x08\x12\"\n\x1a\x65nable_host_mapped_pointer\x18\x06 \x01(\x08\x42\x0e\n\x0c_random_seedJ\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07J\x04\x08\x07\x10\x08J\x04\x08\x08\x10\tJ\x04\x08\t\x10\nJ\x04\x08\x0b\x10\x0cJ\x04\x08\r\x10\x0eJ\x04\x08\x1a\x10\x1bJ\x04\x08\x1b\x10\x1c\x42;\n\x1b\x63om.google.odml.infra.protoB\x1cLlmGpuCalculatorOptionsProtob\x06proto3')
|
20
|
+
|
21
|
+
_globals = globals()
|
22
|
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
23
|
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'mediapipe.tasks.cc.genai.inference.calculators.llm_gpu_calculator_pb2', _globals)
|
24
|
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
25
|
+
_globals['DESCRIPTOR']._options = None
|
26
|
+
_globals['DESCRIPTOR']._serialized_options = b'\n\033com.google.odml.infra.protoB\034LlmGpuCalculatorOptionsProto'
|
27
|
+
_globals['_LLMGPUCALCULATOROPTIONS']._serialized_start=219
|
28
|
+
_globals['_LLMGPUCALCULATOROPTIONS']._serialized_end=847
|
29
|
+
_globals['_LLMGPUCALCULATOROPTIONS_GPUMODELINFO']._serialized_start=567
|
30
|
+
_globals['_LLMGPUCALCULATOROPTIONS_GPUMODELINFO']._serialized_end=759
|
31
|
+
# @@protoc_insertion_point(module_scope)
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
3
|
+
# source: mediapipe/tasks/cc/genai/inference/calculators/tokenizer_calculator.proto
|
4
|
+
# Protobuf Python Version: 4.25.1
|
5
|
+
"""Generated protocol buffer code."""
|
6
|
+
from google.protobuf import descriptor as _descriptor
|
7
|
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
8
|
+
from google.protobuf import symbol_database as _symbol_database
|
9
|
+
from google.protobuf.internal import builder as _builder
|
10
|
+
# @@protoc_insertion_point(imports)
|
11
|
+
|
12
|
+
_sym_db = _symbol_database.Default()
|
13
|
+
|
14
|
+
|
15
|
+
|
16
|
+
|
17
|
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nImediapipe/tasks/cc/genai/inference/calculators/tokenizer_calculator.proto\x12\x10odml.infra.proto\"\xb7\x02\n\x1aTokenizerCalculatorOptions\x12\x12\n\nmax_tokens\x18\x01 \x01(\x05\x12\x18\n\x0espm_model_file\x18\x02 \x01(\tH\x00\x12Y\n\x11tflite_model_file\x18\x04 \x01(\x0b\x32<.odml.infra.proto.TokenizerCalculatorOptions.TfLiteModelFileH\x00\x12\x16\n\x0estart_token_id\x18\x03 \x01(\x05\x12 \n\x18\x62ytes_to_unicode_mapping\x18\x05 \x01(\x08\x1aH\n\x0fTfLiteModelFile\x12\x12\n\nmodel_file\x18\x01 \x01(\t\x12!\n\x19spm_model_key_in_metadata\x18\x02 \x01(\tB\x0c\n\nmodel_fileB>\n\x1b\x63om.google.odml.infra.protoB\x1fTokenizerCalculatorOptionsProtob\x06proto3')
|
18
|
+
|
19
|
+
_globals = globals()
|
20
|
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
21
|
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'mediapipe.tasks.cc.genai.inference.calculators.tokenizer_calculator_pb2', _globals)
|
22
|
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
23
|
+
_globals['DESCRIPTOR']._options = None
|
24
|
+
_globals['DESCRIPTOR']._serialized_options = b'\n\033com.google.odml.infra.protoB\037TokenizerCalculatorOptionsProto'
|
25
|
+
_globals['_TOKENIZERCALCULATOROPTIONS']._serialized_start=96
|
26
|
+
_globals['_TOKENIZERCALCULATOROPTIONS']._serialized_end=407
|
27
|
+
_globals['_TOKENIZERCALCULATOROPTIONS_TFLITEMODELFILE']._serialized_start=321
|
28
|
+
_globals['_TOKENIZERCALCULATOROPTIONS_TFLITEMODELFILE']._serialized_end=393
|
29
|
+
# @@protoc_insertion_point(module_scope)
|
@@ -0,0 +1,32 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
3
|
+
# source: mediapipe/tasks/cc/genai/inference/proto/llm_file_metadata.proto
|
4
|
+
# Protobuf Python Version: 4.25.1
|
5
|
+
"""Generated protocol buffer code."""
|
6
|
+
from google.protobuf import descriptor as _descriptor
|
7
|
+
from google.protobuf import descriptor_pool as _descriptor_pool
|
8
|
+
from google.protobuf import symbol_database as _symbol_database
|
9
|
+
from google.protobuf.internal import builder as _builder
|
10
|
+
# @@protoc_insertion_point(imports)
|
11
|
+
|
12
|
+
_sym_db = _symbol_database.Default()
|
13
|
+
|
14
|
+
|
15
|
+
from mediapipe.tasks.cc.genai.inference.proto import llm_params_pb2 as mediapipe_dot_tasks_dot_cc_dot_genai_dot_inference_dot_proto_dot_llm__params__pb2
|
16
|
+
|
17
|
+
|
18
|
+
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n@mediapipe/tasks/cc/genai/inference/proto/llm_file_metadata.proto\x12\x10odml.infra.proto\x1a\x39mediapipe/tasks/cc/genai/inference/proto/llm_params.proto\"\xe8\x02\n\x0fLlmFileMetadata\x12=\n\x07tensors\x18\x01 \x03(\x0b\x32,.odml.infra.proto.LlmFileMetadata.TensorInfo\x12\x35\n\x0cmodel_params\x18\x02 \x01(\x0b\x32\x1f.odml.infra.proto.LlmParameters\x12\x11\n\tlora_rank\x18\x03 \x01(\x05\x1a\xcb\x01\n\nTensorInfo\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06offset\x18\x02 \x01(\x04\x12\x0c\n\x04size\x18\x03 \x01(\x04\x12H\n\tdata_type\x18\x04 \x01(\x0e\x32\x35.odml.infra.proto.LlmFileMetadata.TensorInfo.DataType\"G\n\x08\x44\x61taType\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07\x46LOAT32\x10\x01\x12\x08\n\x04INT8\x10\x02\x12\x08\n\x04INT4\x10\x03\x12\t\n\x05UINT4\x10\x04\x42\x33\n\x1b\x63om.google.odml.infra.protoB\x14LlmFileMetadataProtob\x06proto3')
|
19
|
+
|
20
|
+
_globals = globals()
|
21
|
+
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
22
|
+
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'mediapipe.tasks.cc.genai.inference.proto.llm_file_metadata_pb2', _globals)
|
23
|
+
if _descriptor._USE_C_DESCRIPTORS == False:
|
24
|
+
_globals['DESCRIPTOR']._options = None
|
25
|
+
_globals['DESCRIPTOR']._serialized_options = b'\n\033com.google.odml.infra.protoB\024LlmFileMetadataProto'
|
26
|
+
_globals['_LLMFILEMETADATA']._serialized_start=146
|
27
|
+
_globals['_LLMFILEMETADATA']._serialized_end=506
|
28
|
+
_globals['_LLMFILEMETADATA_TENSORINFO']._serialized_start=303
|
29
|
+
_globals['_LLMFILEMETADATA_TENSORINFO']._serialized_end=506
|
30
|
+
_globals['_LLMFILEMETADATA_TENSORINFO_DATATYPE']._serialized_start=435
|
31
|
+
_globals['_LLMFILEMETADATA_TENSORINFO_DATATYPE']._serialized_end=506
|
32
|
+
# @@protoc_insertion_point(module_scope)
|
@@ -14,6 +14,7 @@
|
|
14
14
|
|
15
15
|
"""Defines a couple base classes for the conversion/quantization process."""
|
16
16
|
|
17
|
+
from typing import Iterator
|
17
18
|
import os
|
18
19
|
from typing import Dict, List, Optional, Tuple
|
19
20
|
import numpy as np
|
@@ -105,7 +106,9 @@ class CkptLoaderBase:
|
|
105
106
|
self._feedforward_quant_bits = feedforward_quant_bits
|
106
107
|
self._embedding_quant_bits = embedding_quant_bits
|
107
108
|
|
108
|
-
def load_to_actions(
|
109
|
+
def load_to_actions(
|
110
|
+
self,
|
111
|
+
) -> Iterator[Optional[List[QuantizationAction]]]:
|
109
112
|
"""Loads the checkpoint and returns the quantization actions."""
|
110
113
|
raise NotImplementedError("The load_to_actions method is not implemented.")
|
111
114
|
|
@@ -193,18 +193,21 @@ def convert_checkpoint(config: ConversionConfig) -> None:
|
|
193
193
|
)
|
194
194
|
actions = loader.load_to_actions()
|
195
195
|
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
196
|
+
for action in actions:
|
197
|
+
# Quantize the weight.
|
198
|
+
quantized_tensors = quantize_by_actions(
|
199
|
+
action, config.backend, config.is_symmetric
|
200
|
+
)
|
201
|
+
del action
|
202
|
+
# Write the quantized tensors into file(s).
|
203
|
+
writer = converter_factory.create_writer(
|
204
|
+
writer_type='weight_bins',
|
205
|
+
output_dir=config.output_dir,
|
206
|
+
backend=config.backend,
|
207
|
+
)
|
208
|
+
writer.write_variables(quantized_tensors)
|
209
|
+
del quantized_tensors
|
210
|
+
del writer
|
208
211
|
|
209
212
|
combined_weight_bins_to_tflite(
|
210
213
|
config.model_type,
|
@@ -14,6 +14,7 @@
|
|
14
14
|
|
15
15
|
"""CkptLoader implementation for loading the Pytorch file."""
|
16
16
|
|
17
|
+
from typing import Iterator
|
17
18
|
import enum
|
18
19
|
import os
|
19
20
|
from typing import List, Optional
|
@@ -306,10 +307,12 @@ class PytorchCkptLoader(converter_base.CkptLoaderBase):
|
|
306
307
|
else:
|
307
308
|
raise ValueError(f"Unknown special model: {special_model}")
|
308
309
|
|
309
|
-
def load_to_actions(
|
310
|
+
def load_to_actions(
|
311
|
+
self,
|
312
|
+
) -> Iterator[List[converter_base.QuantizationAction]]:
|
310
313
|
tensor_names = self._reader.get_tensor_names()
|
311
|
-
actions = []
|
312
314
|
for tensor_name in tensor_names:
|
313
315
|
tensor_actions = self.mapper.map_to_actions(tensor_name)
|
314
|
-
|
315
|
-
|
316
|
+
if tensor_actions is None:
|
317
|
+
continue
|
318
|
+
yield tensor_actions
|
@@ -79,7 +79,7 @@ class PytorchConverterTest(parameterized.TestCase):
|
|
79
79
|
actions = loader.load_to_actions()
|
80
80
|
# There are 16 layers in the model, but qkv weight and bias would be
|
81
81
|
# decomposed to q, k, v tensors, so there would be 20 quantization actions.
|
82
|
-
self.
|
82
|
+
self.assertEqual(sum(len(action) for action in actions), 20)
|
83
83
|
|
84
84
|
|
85
85
|
if __name__ == '__main__':
|
@@ -15,6 +15,7 @@
|
|
15
15
|
"""CkptLoader implementation for loading the Safetensors."""
|
16
16
|
|
17
17
|
import array
|
18
|
+
from typing import Iterator
|
18
19
|
import enum
|
19
20
|
import glob
|
20
21
|
import json
|
@@ -510,12 +511,12 @@ class SafetensorsCkptLoader(converter_base.CkptLoaderBase):
|
|
510
511
|
else:
|
511
512
|
raise ValueError(f"Unknown special model: {special_model}")
|
512
513
|
|
513
|
-
def load_to_actions(
|
514
|
+
def load_to_actions(
|
515
|
+
self,
|
516
|
+
) -> Iterator[List[converter_base.QuantizationAction]]:
|
514
517
|
tensor_names = self._reader.get_tensor_names()
|
515
|
-
actions = []
|
516
518
|
for tensor_name in tensor_names:
|
517
519
|
tensor_actions = self.mapper.map_to_actions(tensor_name)
|
518
520
|
if tensor_actions is None:
|
519
521
|
continue
|
520
|
-
|
521
|
-
return actions
|
522
|
+
yield tensor_actions
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: mediapipe-nightly
|
3
|
-
Version: 0.10.11.
|
3
|
+
Version: 0.10.11.post20240302
|
4
4
|
Summary: MediaPipe is the simplest way for researchers and developers to build world-class ML solutions and applications for mobile, edge, cloud and the web.
|
5
5
|
Home-page: https://github.com/google/mediapipe
|
6
6
|
Author: The MediaPipe Authors
|
@@ -1,9 +1,9 @@
|
|
1
|
-
mediapipe_nightly-0.10.11.
|
2
|
-
mediapipe_nightly-0.10.11.
|
3
|
-
mediapipe_nightly-0.10.11.
|
4
|
-
mediapipe_nightly-0.10.11.
|
5
|
-
mediapipe_nightly-0.10.11.
|
6
|
-
mediapipe/__init__.py,sha256=
|
1
|
+
mediapipe_nightly-0.10.11.post20240302.dist-info/RECORD,,
|
2
|
+
mediapipe_nightly-0.10.11.post20240302.dist-info/LICENSE,sha256=hwfu8FM5h-_FsVXWR2HutuIHk_ULm9Gmja0c9HGdDtg,12331
|
3
|
+
mediapipe_nightly-0.10.11.post20240302.dist-info/WHEEL,sha256=_dGy9EKu13jbT4E4ph0EWQuZFac2dCXB7_Dw2ZQDLNI,111
|
4
|
+
mediapipe_nightly-0.10.11.post20240302.dist-info/top_level.txt,sha256=LG-epD1oIiiHFRqLp--7jacjB3dbx2RfMcLYjCIhmxU,175
|
5
|
+
mediapipe_nightly-0.10.11.post20240302.dist-info/METADATA,sha256=Z_ixYoKtEPje2xOmQuHu44KTR17vwvA_rhw1LA27h1E,9715
|
6
|
+
mediapipe/__init__.py,sha256=FG3p8a-sPsr3_nru1WF1PkqKrAHldPzVed4uOfnptR8,816
|
7
7
|
mediapipe/tasks/__init__.py,sha256=sVJS2p8J2PNVl8DLRPVY6KLpHenP_z3QVPRU0x_iL5g,571
|
8
8
|
mediapipe/tasks/python/__init__.py,sha256=wIM_WOWboOVI1MeehN8fkN_DjoA0MEBVw5mShAd8AS4,858
|
9
9
|
mediapipe/tasks/python/benchmark/__init__.py,sha256=epEucluzX0HinwBZoS7Tgb19j_qgfTuBf-vBkqemch8,587
|
@@ -86,16 +86,16 @@ mediapipe/tasks/python/text/text_classifier.py,sha256=AJbYep6iL8vkf6JKRrGArr9sNd
|
|
86
86
|
mediapipe/tasks/python/text/core/base_text_task_api.py,sha256=OHt7j_0n5c3HBdOrCb_BGWCdKWMKvDULp6tKA5mDZAc,1822
|
87
87
|
mediapipe/tasks/python/text/core/__init__.py,sha256=ZKC2XRtShVe6k6u6LxDt1pG7DQIn5nZnjurs6Pcvm6A,593
|
88
88
|
mediapipe/tasks/python/genai/__init__.py,sha256=7rri6fT6wNurla8O2c5yKiLs9_3qIY0vKkyVAUDe-18,620
|
89
|
-
mediapipe/tasks/python/genai/converter/pytorch_converter_test.py,sha256=
|
90
|
-
mediapipe/tasks/python/genai/converter/safetensors_converter_test.py,sha256=
|
89
|
+
mediapipe/tasks/python/genai/converter/pytorch_converter_test.py,sha256=y_Mg9pOQtlUDh6uVmkz5LcUbk-pmDLA9L3KcxKR-OaA,3041
|
90
|
+
mediapipe/tasks/python/genai/converter/safetensors_converter_test.py,sha256=oCk4FnsjBJkEPlXtv8fdq9dn3I06LsSQRMi0BV_9mew,2802
|
91
91
|
mediapipe/tasks/python/genai/converter/quantization_util.py,sha256=B6i13GqRRIwMabEJWO8rFHPMBjIgdOhFpHiwMD4GzRc,17196
|
92
|
-
mediapipe/tasks/python/genai/converter/safetensors_converter.py,sha256=
|
92
|
+
mediapipe/tasks/python/genai/converter/safetensors_converter.py,sha256=ttCpqyMeHkIFzppP8G8uMG5VsslK5-ULFT5U3R_qif0,18713
|
93
93
|
mediapipe/tasks/python/genai/converter/weight_bins_writer_test.py,sha256=6qgNYXODNOsbveZ0ighEW4JBdawil9mPcC16MZ0mdm8,1994
|
94
|
-
mediapipe/tasks/python/genai/converter/converter_base.py,sha256=
|
95
|
-
mediapipe/tasks/python/genai/converter/pytorch_converter.py,sha256=
|
94
|
+
mediapipe/tasks/python/genai/converter/converter_base.py,sha256=1nBgvcY5xaI0ZPjBJTVdViDFraVyJUZHLXBT0dYOX9c,6568
|
95
|
+
mediapipe/tasks/python/genai/converter/pytorch_converter.py,sha256=b-GWYOzgD-ZRGgyqcXE9LG_JOL0Mqba4q0pc_imrbrg,10771
|
96
96
|
mediapipe/tasks/python/genai/converter/__init__.py,sha256=jfUkinDJR5BVldnbJMbo5vIr2Xc5Z4TTnaCJTNoAUvg,893
|
97
97
|
mediapipe/tasks/python/genai/converter/converter_factory.py,sha256=2K16PZBQym0WhXM2HOdBMHMugykohoD4OTaOIo-UKko,2928
|
98
|
-
mediapipe/tasks/python/genai/converter/llm_converter.py,sha256=
|
98
|
+
mediapipe/tasks/python/genai/converter/llm_converter.py,sha256=OJqrYEDPQYwoOftBw-Ajsqtkcj9uRHBAb_Oo_RCEKa4,7896
|
99
99
|
mediapipe/tasks/python/genai/converter/quantization_util_test.py,sha256=ICujhTFeREGuHGmNk1PlBpf1AUThFvv-Wl5UuZ-xWAk,9060
|
100
100
|
mediapipe/tasks/python/genai/converter/weight_bins_writer.py,sha256=qrsjTWw99u-VDNhtHbnbDfMypx4sGfGyY8mMBXYnQtA,4347
|
101
101
|
mediapipe/tasks/python/metadata/metadata.py,sha256=EECQnM-Af0angD60jaBBOuNMgt7HExH6SqVtVMFNHGc,33763
|
@@ -258,10 +258,15 @@ mediapipe/tasks/cc/genai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
|
|
258
258
|
mediapipe/tasks/cc/genai/inference/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
259
259
|
mediapipe/tasks/cc/genai/inference/proto/llm_params_pb2.py,sha256=zGZQREcWs5jzbMipVcGmXOrJFMeq5lgQNcA_U1XkOoE,2311
|
260
260
|
mediapipe/tasks/cc/genai/inference/proto/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
261
|
+
mediapipe/tasks/cc/genai/inference/proto/llm_file_metadata_pb2.py,sha256=MdDe96Ruxqcnm4vrixmyLqPmBWpXn207W50TX5UYMCo,2454
|
261
262
|
mediapipe/tasks/cc/genai/inference/proto/transformer_params_pb2.py,sha256=-KHfCIuevXnBJusJ2o3_jaf4GHSq0no-o1BhR2jRPTk,5080
|
262
263
|
mediapipe/tasks/cc/genai/inference/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
263
264
|
mediapipe/tasks/cc/genai/inference/utils/llm_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
264
265
|
mediapipe/tasks/cc/genai/inference/utils/xnn_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
266
|
+
mediapipe/tasks/cc/genai/inference/calculators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
267
|
+
mediapipe/tasks/cc/genai/inference/calculators/tokenizer_calculator_pb2.py,sha256=m6mFdMkEGTeGJOa5o00kQU7rVZRDmnBhum0kfEmQJ_8,2038
|
268
|
+
mediapipe/tasks/cc/genai/inference/calculators/llm_gpu_calculator_pb2.py,sha256=vm7Oi2HiCZLI1fx6GDUOgfLcwfKebMGZOk3nFl_X5R0,3191
|
269
|
+
mediapipe/tasks/cc/genai/inference/calculators/detokenizer_calculator_pb2.py,sha256=-VpQshysnt83CsxDB5AIUEb9qw6KaArkek1MR7xbJxI,1625
|
265
270
|
mediapipe/tasks/cc/genai/inference/c/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
266
271
|
mediapipe/tasks/cc/metadata/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
267
272
|
mediapipe/tasks/cc/metadata/python/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -314,7 +319,7 @@ mediapipe/util/sequence/media_sequence_util_test.py,sha256=nuN9-HW3kw2kZbraCH76q
|
|
314
319
|
mediapipe/util/analytics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
315
320
|
mediapipe/util/analytics/mediapipe_log_extension_pb2.py,sha256=UX1x8jckc8NQbUz-SXOKkb8co3JNIRX3rfaMs4_Dz8g,4319
|
316
321
|
mediapipe/util/analytics/mediapipe_logging_enums_pb2.py,sha256=9pxs-DNSQnXQe7E_LKokGDbD_G1FpmzzJw1jzex78lU,3781
|
317
|
-
mediapipe/python/_framework_bindings.cpython-311-darwin.so,sha256=
|
322
|
+
mediapipe/python/_framework_bindings.cpython-311-darwin.so,sha256=IlYGT68y4YVPsQx8ZeHDSDPeDoyzQcl2D0hen0QWmxc,78906003
|
318
323
|
mediapipe/python/solution_base.py,sha256=nEIqsho9DlutfvWWzdSxCOpJ2QzN7n2938WLDmFzn38,26072
|
319
324
|
mediapipe/python/timestamp_test.py,sha256=oWKTZMsV586jH57OBV30rihcymETyGC29VbYURNLJQQ,2528
|
320
325
|
mediapipe/python/image_frame_test.py,sha256=ZSjdE-an2t8i6MiA4_Xri91VMH5_CCx45fjhWUQptMY,8602
|
File without changes
|
File without changes
|
File without changes
|