ai-edge-torch-nightly 0.4.0.dev20250329__py3-none-any.whl → 0.4.0.dev20250331__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ai_edge_torch/generative/examples/amd_llama_135m/convert_to_tflite.py +7 -43
- ai_edge_torch/generative/examples/deepseek/convert_to_tflite.py +7 -42
- ai_edge_torch/generative/examples/gemma/convert_gemma1_to_tflite.py +7 -45
- ai_edge_torch/generative/examples/gemma/convert_gemma2_to_tflite.py +7 -44
- ai_edge_torch/generative/examples/gemma3/convert_gemma3_to_tflite.py +10 -45
- ai_edge_torch/generative/examples/gemma3/verify_gemma3.py +90 -0
- ai_edge_torch/generative/examples/gemma3/verify_util.py +247 -0
- ai_edge_torch/generative/examples/llama/convert_to_tflite.py +9 -43
- ai_edge_torch/generative/examples/openelm/convert_to_tflite.py +7 -44
- ai_edge_torch/generative/examples/paligemma/convert_to_tflite.py +8 -39
- ai_edge_torch/generative/examples/phi/convert_phi3_to_tflite.py +7 -44
- ai_edge_torch/generative/examples/phi/convert_phi4_to_tflite.py +7 -44
- ai_edge_torch/generative/examples/phi/convert_to_tflite.py +7 -42
- ai_edge_torch/generative/examples/qwen/convert_to_tflite.py +8 -45
- ai_edge_torch/generative/examples/qwen_vl/convert_to_tflite.py +8 -39
- ai_edge_torch/generative/examples/smollm/convert_to_tflite.py +8 -43
- ai_edge_torch/generative/examples/smollm/convert_v2_to_tflite.py +8 -43
- ai_edge_torch/generative/examples/tiny_llama/convert_to_tflite.py +7 -44
- ai_edge_torch/generative/utilities/converter.py +45 -0
- ai_edge_torch/version.py +1 -1
- {ai_edge_torch_nightly-0.4.0.dev20250329.dist-info → ai_edge_torch_nightly-0.4.0.dev20250331.dist-info}/METADATA +1 -1
- {ai_edge_torch_nightly-0.4.0.dev20250329.dist-info → ai_edge_torch_nightly-0.4.0.dev20250331.dist-info}/RECORD +25 -23
- {ai_edge_torch_nightly-0.4.0.dev20250329.dist-info → ai_edge_torch_nightly-0.4.0.dev20250331.dist-info}/LICENSE +0 -0
- {ai_edge_torch_nightly-0.4.0.dev20250329.dist-info → ai_edge_torch_nightly-0.4.0.dev20250331.dist-info}/WHEEL +0 -0
- {ai_edge_torch_nightly-0.4.0.dev20250329.dist-info → ai_edge_torch_nightly-0.4.0.dev20250331.dist-info}/top_level.txt +0 -0
@@ -16,56 +16,20 @@
|
|
16
16
|
"""Example of converting Qwen 2.5 models to multi-signature tflite model."""
|
17
17
|
|
18
18
|
import os
|
19
|
-
import pathlib
|
20
|
-
|
21
19
|
from absl import app
|
22
20
|
from absl import flags
|
23
21
|
from ai_edge_torch.generative.examples.qwen import qwen
|
24
22
|
from ai_edge_torch.generative.utilities import converter
|
25
23
|
from ai_edge_torch.generative.utilities.model_builder import ExportConfig
|
26
24
|
|
25
|
+
flags = converter.define_conversion_flags('qwen')
|
26
|
+
|
27
27
|
_MODEL_SIZE = flags.DEFINE_enum(
|
28
28
|
'model_size',
|
29
29
|
'3b',
|
30
30
|
['0.5b', '1.5b', '3b'],
|
31
31
|
'The size of the model to convert.',
|
32
32
|
)
|
33
|
-
_CHECKPOINT_PATH = flags.DEFINE_string(
|
34
|
-
'checkpoint_path',
|
35
|
-
os.path.join(pathlib.Path.home(), 'Downloads/llm_data/qwen'),
|
36
|
-
'The path to the model checkpoint, or directory holding the checkpoint.',
|
37
|
-
)
|
38
|
-
_OUTPUT_PATH = flags.DEFINE_string(
|
39
|
-
'output_path',
|
40
|
-
'/tmp/',
|
41
|
-
'The path to export the tflite model.',
|
42
|
-
)
|
43
|
-
_OUTPUT_NAME_PREFIX = flags.DEFINE_string(
|
44
|
-
'output_name_prefix',
|
45
|
-
'qwen',
|
46
|
-
'The prefix of the output tflite model name.',
|
47
|
-
)
|
48
|
-
_PREFILL_SEQ_LENS = flags.DEFINE_multi_integer(
|
49
|
-
'prefill_seq_lens',
|
50
|
-
(8, 64, 128, 256, 512, 1024),
|
51
|
-
'List of the maximum sizes of prefill input tensors.',
|
52
|
-
)
|
53
|
-
_KV_CACHE_MAX_LEN = flags.DEFINE_integer(
|
54
|
-
'kv_cache_max_len',
|
55
|
-
1280,
|
56
|
-
'The maximum size of KV cache buffer, including both prefill and decode.',
|
57
|
-
)
|
58
|
-
_QUANTIZE = flags.DEFINE_bool(
|
59
|
-
'quantize',
|
60
|
-
True,
|
61
|
-
'Whether the model should be quantized.',
|
62
|
-
)
|
63
|
-
_LORA_RANKS = flags.DEFINE_multi_integer(
|
64
|
-
'lora_ranks',
|
65
|
-
None,
|
66
|
-
'If set, the model will be converted with the provided list of LoRA ranks.',
|
67
|
-
)
|
68
|
-
|
69
33
|
|
70
34
|
_BUILDER = {
|
71
35
|
'0.5b': qwen.build_0_5b_model,
|
@@ -73,18 +37,17 @@ _BUILDER = {
|
|
73
37
|
'3b': qwen.build_3b_model,
|
74
38
|
}
|
75
39
|
|
76
|
-
|
77
40
|
def main(_):
|
78
41
|
pytorch_model = _BUILDER[_MODEL_SIZE.value](
|
79
|
-
|
42
|
+
flags.FLAGS.checkpoint_path, kv_cache_max_len=flags.FLAGS.kv_cache_max_len
|
80
43
|
)
|
81
44
|
converter.convert_to_tflite(
|
82
45
|
pytorch_model,
|
83
|
-
output_path=
|
84
|
-
output_name_prefix=
|
85
|
-
prefill_seq_len=
|
86
|
-
quantize=
|
87
|
-
lora_ranks=
|
46
|
+
output_path=flags.FLAGS.output_path,
|
47
|
+
output_name_prefix=flags.FLAGS.output_name_prefix,
|
48
|
+
prefill_seq_len=flags.FLAGS.prefill_seq_lens,
|
49
|
+
quantize=flags.FLAGS.quantize,
|
50
|
+
lora_ranks=flags.FLAGS.lora_ranks,
|
88
51
|
export_config=ExportConfig(),
|
89
52
|
)
|
90
53
|
|
@@ -16,39 +16,14 @@
|
|
16
16
|
"""Example of converting a Qwen 2.5 VL model to multi-signature tflite model."""
|
17
17
|
|
18
18
|
import os
|
19
|
-
import pathlib
|
20
|
-
|
21
19
|
from absl import app
|
22
20
|
from absl import flags
|
23
21
|
from ai_edge_torch.generative.examples.qwen_vl import qwen_vl
|
24
22
|
from ai_edge_torch.generative.utilities import converter
|
25
23
|
from ai_edge_torch.generative.utilities.model_builder import ExportConfig
|
26
24
|
|
27
|
-
|
28
|
-
|
29
|
-
os.path.join(pathlib.Path.home(), 'Downloads/llm_data/qwen-vl'),
|
30
|
-
'The path to the model checkpoint, or directory holding the checkpoint.',
|
31
|
-
)
|
32
|
-
_OUTPUT_PATH = flags.DEFINE_string(
|
33
|
-
'output_path',
|
34
|
-
'/tmp/',
|
35
|
-
'The path to export the tflite model.',
|
36
|
-
)
|
37
|
-
_OUTPUT_NAME_PREFIX = flags.DEFINE_string(
|
38
|
-
'output_name_prefix',
|
39
|
-
'qwen_vl',
|
40
|
-
'The prefix of the output tflite model name.',
|
41
|
-
)
|
42
|
-
_PREFILL_SEQ_LEN = flags.DEFINE_integer(
|
43
|
-
'prefill_seq_len',
|
44
|
-
1024,
|
45
|
-
'The maximum size of prefill input tensor.',
|
46
|
-
)
|
47
|
-
_KV_CACHE_MAX_LEN = flags.DEFINE_integer(
|
48
|
-
'kv_cache_max_len',
|
49
|
-
1280,
|
50
|
-
'The maximum size of KV cache buffer, including both prefill and decode.',
|
51
|
-
)
|
25
|
+
flags = converter.define_conversion_flags('qwen_vl')
|
26
|
+
|
52
27
|
_IMAGE_HEIGHT = flags.DEFINE_integer(
|
53
28
|
'image_height',
|
54
29
|
34 * 14,
|
@@ -59,30 +34,24 @@ _IMAGE_WIDTH = flags.DEFINE_integer(
|
|
59
34
|
46 * 14,
|
60
35
|
'The width of image.',
|
61
36
|
)
|
62
|
-
_QUANTIZE = flags.DEFINE_bool(
|
63
|
-
'quantize',
|
64
|
-
True,
|
65
|
-
'Whether the model should be quantized.',
|
66
|
-
)
|
67
|
-
|
68
37
|
|
69
38
|
def main(_):
|
70
39
|
pytorch_model = qwen_vl.build_model(
|
71
|
-
|
72
|
-
kv_cache_max_len=
|
40
|
+
flags.FLAGS.checkpoint_path,
|
41
|
+
kv_cache_max_len=flags.FLAGS.kv_cache_max_len,
|
73
42
|
image_size=(_IMAGE_HEIGHT.value, _IMAGE_WIDTH.value),
|
74
43
|
)
|
75
44
|
|
76
45
|
grid_thw = pytorch_model.image_encoder.get_grid_thw()
|
77
46
|
converter.convert_to_tflite(
|
78
47
|
pytorch_model,
|
79
|
-
output_path=
|
80
|
-
output_name_prefix=
|
81
|
-
prefill_seq_len=
|
48
|
+
output_path=flags.FLAGS.output_path,
|
49
|
+
output_name_prefix=flags.FLAGS.output_name_prefix,
|
50
|
+
prefill_seq_len=flags.FLAGS.prefill_seq_lens,
|
82
51
|
pixel_values_size=(
|
83
52
|
pytorch_model.image_encoder.get_pixel_values_size(grid_thw)
|
84
53
|
),
|
85
|
-
quantize=
|
54
|
+
quantize=flags.FLAGS.quantize,
|
86
55
|
config=pytorch_model.config.decoder_config,
|
87
56
|
export_config=ExportConfig(),
|
88
57
|
)
|
@@ -16,49 +16,14 @@
|
|
16
16
|
"""Example of converting SmolLM model to multi-signature tflite model."""
|
17
17
|
|
18
18
|
import os
|
19
|
-
import pathlib
|
20
|
-
|
21
19
|
from absl import app
|
22
20
|
from absl import flags
|
23
21
|
from ai_edge_torch.generative.examples.smollm import smollm
|
24
22
|
from ai_edge_torch.generative.utilities import converter
|
25
23
|
from ai_edge_torch.generative.utilities import model_builder
|
26
24
|
|
27
|
-
|
28
|
-
|
29
|
-
os.path.join(pathlib.Path.home(), 'Downloads/llm_data/smollm'),
|
30
|
-
'The path to the model checkpoint, or directory holding the checkpoint.',
|
31
|
-
)
|
32
|
-
_OUTPUT_PATH = flags.DEFINE_string(
|
33
|
-
'output_path',
|
34
|
-
'/tmp/',
|
35
|
-
'The path to export the tflite model.',
|
36
|
-
)
|
37
|
-
_OUTPUT_NAME_PREFIX = flags.DEFINE_string(
|
38
|
-
'output_name_prefix',
|
39
|
-
'smollm',
|
40
|
-
'The prefix of the output tflite model name.',
|
41
|
-
)
|
42
|
-
_PREFILL_SEQ_LENS = flags.DEFINE_multi_integer(
|
43
|
-
'prefill_seq_lens',
|
44
|
-
(8, 64, 128, 256, 512, 1024),
|
45
|
-
'List of the maximum sizes of prefill input tensors.',
|
46
|
-
)
|
47
|
-
_KV_CACHE_MAX_LEN = flags.DEFINE_integer(
|
48
|
-
'kv_cache_max_len',
|
49
|
-
1280,
|
50
|
-
'The maximum size of KV cache buffer, including both prefill and decode.',
|
51
|
-
)
|
52
|
-
_QUANTIZE = flags.DEFINE_bool(
|
53
|
-
'quantize',
|
54
|
-
True,
|
55
|
-
'Whether the model should be quantized.',
|
56
|
-
)
|
57
|
-
_LORA_RANKS = flags.DEFINE_multi_integer(
|
58
|
-
'lora_ranks',
|
59
|
-
None,
|
60
|
-
'If set, the model will be converted with the provided list of LoRA ranks.',
|
61
|
-
)
|
25
|
+
flags = converter.define_conversion_flags('smollm')
|
26
|
+
|
62
27
|
_DECODE_BATCH_SIZE = flags.DEFINE_integer(
|
63
28
|
'decode_batch_size',
|
64
29
|
1,
|
@@ -68,15 +33,15 @@ _DECODE_BATCH_SIZE = flags.DEFINE_integer(
|
|
68
33
|
|
69
34
|
def main(_):
|
70
35
|
pytorch_model = smollm.build_model(
|
71
|
-
|
36
|
+
flags.FLAGS.checkpoint_path, kv_cache_max_len=flags.FLAGS.kv_cache_max_len
|
72
37
|
)
|
73
38
|
converter.convert_to_tflite(
|
74
39
|
pytorch_model,
|
75
|
-
output_path=
|
76
|
-
output_name_prefix=
|
77
|
-
prefill_seq_len=
|
78
|
-
quantize=
|
79
|
-
lora_ranks=
|
40
|
+
output_path=flags.FLAGS.output_path,
|
41
|
+
output_name_prefix=flags.FLAGS.output_name_prefix,
|
42
|
+
prefill_seq_len=flags.FLAGS.prefill_seq_lens,
|
43
|
+
quantize=flags.FLAGS.quantize,
|
44
|
+
lora_ranks=flags.FLAGS.lora_ranks,
|
80
45
|
export_config=model_builder.ExportConfig(
|
81
46
|
decode_batch_size=_DECODE_BATCH_SIZE.value
|
82
47
|
),
|
@@ -16,49 +16,14 @@
|
|
16
16
|
"""Example of converting SmolLM2 model to multi-signature tflite model."""
|
17
17
|
|
18
18
|
import os
|
19
|
-
import pathlib
|
20
|
-
|
21
19
|
from absl import app
|
22
20
|
from absl import flags
|
23
21
|
from ai_edge_torch.generative.examples.smollm import smollm
|
24
22
|
from ai_edge_torch.generative.utilities import converter
|
25
23
|
from ai_edge_torch.generative.utilities import model_builder
|
26
24
|
|
27
|
-
|
28
|
-
|
29
|
-
os.path.join(pathlib.Path.home(), 'Downloads/llm_data/smollm2'),
|
30
|
-
'The path to the model checkpoint, or directory holding the checkpoint.',
|
31
|
-
)
|
32
|
-
_OUTPUT_PATH = flags.DEFINE_string(
|
33
|
-
'output_path',
|
34
|
-
'/tmp/',
|
35
|
-
'The path to export the tflite model.',
|
36
|
-
)
|
37
|
-
_OUTPUT_NAME_PREFIX = flags.DEFINE_string(
|
38
|
-
'output_name_prefix',
|
39
|
-
'smollm2',
|
40
|
-
'The prefix of the output tflite model name.',
|
41
|
-
)
|
42
|
-
_PREFILL_SEQ_LENS = flags.DEFINE_multi_integer(
|
43
|
-
'prefill_seq_lens',
|
44
|
-
(8, 64, 128, 256, 512, 1024),
|
45
|
-
'List of the maximum sizes of prefill input tensors.',
|
46
|
-
)
|
47
|
-
_KV_CACHE_MAX_LEN = flags.DEFINE_integer(
|
48
|
-
'kv_cache_max_len',
|
49
|
-
1280,
|
50
|
-
'The maximum size of KV cache buffer, including both prefill and decode.',
|
51
|
-
)
|
52
|
-
_QUANTIZE = flags.DEFINE_bool(
|
53
|
-
'quantize',
|
54
|
-
True,
|
55
|
-
'Whether the model should be quantized.',
|
56
|
-
)
|
57
|
-
_LORA_RANKS = flags.DEFINE_multi_integer(
|
58
|
-
'lora_ranks',
|
59
|
-
None,
|
60
|
-
'If set, the model will be converted with the provided list of LoRA ranks.',
|
61
|
-
)
|
25
|
+
flags = converter.define_conversion_flags('smollm2')
|
26
|
+
|
62
27
|
_DECODE_BATCH_SIZE = flags.DEFINE_integer(
|
63
28
|
'decode_batch_size',
|
64
29
|
1,
|
@@ -68,16 +33,16 @@ _DECODE_BATCH_SIZE = flags.DEFINE_integer(
|
|
68
33
|
|
69
34
|
def main(_):
|
70
35
|
pytorch_model = smollm.build_model_v2(
|
71
|
-
|
36
|
+
flags.FLAGS.checkpoint_path, kv_cache_max_len=flags.FLAGS.kv_cache_max_len
|
72
37
|
)
|
73
38
|
|
74
39
|
converter.convert_to_tflite(
|
75
40
|
pytorch_model,
|
76
|
-
output_path=
|
77
|
-
output_name_prefix=
|
78
|
-
prefill_seq_len=
|
79
|
-
quantize=
|
80
|
-
lora_ranks=
|
41
|
+
output_path=flags.FLAGS.output_path,
|
42
|
+
output_name_prefix=flags.FLAGS.output_name_prefix,
|
43
|
+
prefill_seq_len=flags.FLAGS.prefill_seq_lens,
|
44
|
+
quantize=flags.FLAGS.quantize,
|
45
|
+
lora_ranks=flags.FLAGS.lora_ranks,
|
81
46
|
export_config=model_builder.ExportConfig(
|
82
47
|
decode_batch_size=_DECODE_BATCH_SIZE.value
|
83
48
|
),
|
@@ -16,62 +16,25 @@
|
|
16
16
|
"""Example of converting TinyLlama model to multi-signature tflite model."""
|
17
17
|
|
18
18
|
import os
|
19
|
-
import pathlib
|
20
|
-
|
21
19
|
from absl import app
|
22
20
|
from absl import flags
|
23
21
|
from ai_edge_torch.generative.examples.tiny_llama import tiny_llama
|
24
22
|
from ai_edge_torch.generative.utilities import converter
|
25
23
|
from ai_edge_torch.generative.utilities.model_builder import ExportConfig
|
26
24
|
|
27
|
-
|
28
|
-
'checkpoint_path',
|
29
|
-
os.path.join(pathlib.Path.home(), 'Downloads/llm_data/tiny_llama'),
|
30
|
-
'The path to the model checkpoint, or directory holding the checkpoint.',
|
31
|
-
)
|
32
|
-
_OUTPUT_PATH = flags.DEFINE_string(
|
33
|
-
'output_path',
|
34
|
-
'/tmp/',
|
35
|
-
'The path to export the tflite model.',
|
36
|
-
)
|
37
|
-
_OUTPUT_NAME_PREFIX = flags.DEFINE_string(
|
38
|
-
'output_name_prefix',
|
39
|
-
'tinyllama',
|
40
|
-
'The prefix of the output tflite model name.',
|
41
|
-
)
|
42
|
-
_PREFILL_SEQ_LENS = flags.DEFINE_multi_integer(
|
43
|
-
'prefill_seq_lens',
|
44
|
-
(8, 64, 128, 256, 512, 1024),
|
45
|
-
'List of the maximum sizes of prefill input tensors.',
|
46
|
-
)
|
47
|
-
_KV_CACHE_MAX_LEN = flags.DEFINE_integer(
|
48
|
-
'kv_cache_max_len',
|
49
|
-
1280,
|
50
|
-
'The maximum size of KV cache buffer, including both prefill and decode.',
|
51
|
-
)
|
52
|
-
_QUANTIZE = flags.DEFINE_bool(
|
53
|
-
'quantize',
|
54
|
-
True,
|
55
|
-
'Whether the model should be quantized.',
|
56
|
-
)
|
57
|
-
_LORA_RANKS = flags.DEFINE_multi_integer(
|
58
|
-
'lora_ranks',
|
59
|
-
None,
|
60
|
-
'If set, the model will be converted with the provided list of LoRA ranks.',
|
61
|
-
)
|
62
|
-
|
25
|
+
flags = converter.define_conversion_flags("tiny_llama")
|
63
26
|
|
64
27
|
def main(_):
|
65
28
|
pytorch_model = tiny_llama.build_model(
|
66
|
-
|
29
|
+
flags.FLAGS.checkpoint_path, kv_cache_max_len=flags.FLAGS.kv_cache_max_len
|
67
30
|
)
|
68
31
|
converter.convert_to_tflite(
|
69
32
|
pytorch_model,
|
70
|
-
output_path=
|
71
|
-
output_name_prefix=
|
72
|
-
prefill_seq_len=
|
73
|
-
quantize=
|
74
|
-
lora_ranks=
|
33
|
+
output_path=flags.FLAGS.output_path,
|
34
|
+
output_name_prefix=flags.FLAGS.output_name_prefix,
|
35
|
+
prefill_seq_len=flags.FLAGS.prefill_seq_lens,
|
36
|
+
quantize=flags.FLAGS.quantize,
|
37
|
+
lora_ranks=flags.FLAGS.lora_ranks,
|
75
38
|
export_config=ExportConfig(),
|
76
39
|
)
|
77
40
|
|
@@ -16,7 +16,9 @@
|
|
16
16
|
"""Common utility functions for model conversion."""
|
17
17
|
|
18
18
|
import os
|
19
|
+
import pathlib
|
19
20
|
from typing import Optional, Union
|
21
|
+
from absl import flags
|
20
22
|
from ai_edge_torch._convert import converter as converter_utils
|
21
23
|
from ai_edge_torch.generative.layers import lora as lora_utils
|
22
24
|
import ai_edge_torch.generative.layers.model_config as cfg
|
@@ -37,6 +39,49 @@ class ExportableModule(torch.nn.Module):
|
|
37
39
|
return self.module(*export_args, **full_kwargs)
|
38
40
|
|
39
41
|
|
42
|
+
def define_conversion_flags(model_name: str):
|
43
|
+
"""Defines common flags used for model conversion."""
|
44
|
+
|
45
|
+
flags.DEFINE_string(
|
46
|
+
'checkpoint_path',
|
47
|
+
os.path.join(pathlib.Path.home(), f'Downloads/llm_data/{model_name}'),
|
48
|
+
'The path to the model checkpoint, or directory holding the checkpoint.',
|
49
|
+
)
|
50
|
+
flags.DEFINE_string(
|
51
|
+
'output_path',
|
52
|
+
'/tmp/',
|
53
|
+
'The path to export the tflite model.',
|
54
|
+
)
|
55
|
+
flags.DEFINE_string(
|
56
|
+
'output_name_prefix',
|
57
|
+
'qwen',
|
58
|
+
'The prefix of the output tflite model name.',
|
59
|
+
)
|
60
|
+
flags.DEFINE_multi_integer(
|
61
|
+
'prefill_seq_lens',
|
62
|
+
(8, 64, 128, 256, 512, 1024),
|
63
|
+
'List of the maximum sizes of prefill input tensors.',
|
64
|
+
)
|
65
|
+
flags.DEFINE_integer(
|
66
|
+
'kv_cache_max_len',
|
67
|
+
1280,
|
68
|
+
'The maximum size of KV cache buffer, including both prefill and decode.',
|
69
|
+
)
|
70
|
+
flags.DEFINE_bool(
|
71
|
+
'quantize',
|
72
|
+
True,
|
73
|
+
'Whether the model should be quantized.',
|
74
|
+
)
|
75
|
+
flags.DEFINE_multi_integer(
|
76
|
+
'lora_ranks',
|
77
|
+
None,
|
78
|
+
'If set, the model will be converted with the provided list of LoRA'
|
79
|
+
' ranks.',
|
80
|
+
)
|
81
|
+
|
82
|
+
return flags
|
83
|
+
|
84
|
+
|
40
85
|
def convert_to_tflite(
|
41
86
|
pytorch_model: torch.nn.Module,
|
42
87
|
output_path: str,
|
ai_edge_torch/version.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: ai-edge-torch-nightly
|
3
|
-
Version: 0.4.0.
|
3
|
+
Version: 0.4.0.dev20250331
|
4
4
|
Summary: Supporting PyTorch models with the Google AI Edge TFLite runtime.
|
5
5
|
Home-page: https://github.com/google-ai-edge/ai-edge-torch
|
6
6
|
Keywords: On-Device ML,AI,Google,TFLite,PyTorch,LLMs,GenAI
|
@@ -2,7 +2,7 @@ ai_edge_torch/__init__.py,sha256=8sPR_5uXJA4NEE0nIwNdSl-ADOJEoR8hAgYvBQDY70Y,120
|
|
2
2
|
ai_edge_torch/_config.py,sha256=AiqhbcheF7j_ozIGDLC89k1we95aVgFDa-tR6h7UI0s,2529
|
3
3
|
ai_edge_torch/conftest.py,sha256=r0GTrhMRhlmOGrrkvumHN8hkmyug6WvF60vWq8wRIBI,758
|
4
4
|
ai_edge_torch/model.py,sha256=N-pNpTxzhaFGhWhnSGd70lBzb9VlEhTOq5mddU7bvvI,5542
|
5
|
-
ai_edge_torch/version.py,sha256=
|
5
|
+
ai_edge_torch/version.py,sha256=n6t63-ZajgC0tcWwi4U40LTERad0h0oOButBq4PVu4M,706
|
6
6
|
ai_edge_torch/_convert/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
7
7
|
ai_edge_torch/_convert/conversion.py,sha256=gpXQnifODU-mWxkUZw_3ov1lEYBw1SPVIcqj5k7pTGo,5550
|
8
8
|
ai_edge_torch/_convert/conversion_utils.py,sha256=Sr8qXVcTwc-ZnZmK7yxVrIOOp1S_vNrwzC0zUvLTI2o,2160
|
@@ -50,38 +50,40 @@ ai_edge_torch/generative/custom_ops/dynamic_update_slice.py,sha256=ZGAq2CfWZsfef
|
|
50
50
|
ai_edge_torch/generative/examples/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
51
51
|
ai_edge_torch/generative/examples/amd_llama_135m/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
52
52
|
ai_edge_torch/generative/examples/amd_llama_135m/amd_llama_135m.py,sha256=urNif89PyCXbdXT5spOeDvdM5luJ-a5HaXHM86v4JnU,2766
|
53
|
-
ai_edge_torch/generative/examples/amd_llama_135m/convert_to_tflite.py,sha256=
|
53
|
+
ai_edge_torch/generative/examples/amd_llama_135m/convert_to_tflite.py,sha256=aHWsVo_oKpbkEfpQpyPb5DiE9OsUNPBnyjGRHRsVQvs,1589
|
54
54
|
ai_edge_torch/generative/examples/amd_llama_135m/verify.py,sha256=o13NkFlBgawBsjdJup05VMUjAPvDRAmig6VyEkX8q6U,2426
|
55
55
|
ai_edge_torch/generative/examples/deepseek/__init__.py,sha256=JaAnrFoXTl3RJX97XspklkTyqOHVyAgRJsZtzNDd10c,671
|
56
|
-
ai_edge_torch/generative/examples/deepseek/convert_to_tflite.py,sha256=
|
56
|
+
ai_edge_torch/generative/examples/deepseek/convert_to_tflite.py,sha256=0RY9zQJX2RFD0HdagcfACYN2tYmQACJqHafgbm2vJZU,1573
|
57
57
|
ai_edge_torch/generative/examples/deepseek/deepseek.py,sha256=AOAJ7ltXwY5IbmcCP2nVHW9FmRwexzfNxnoDlR-sW9c,2885
|
58
58
|
ai_edge_torch/generative/examples/deepseek/verify.py,sha256=iYldze-pvZGvPkkqr6zA7EmitPnH9sXkzjNVx353IcE,2403
|
59
59
|
ai_edge_torch/generative/examples/gemma/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
60
|
-
ai_edge_torch/generative/examples/gemma/convert_gemma1_to_tflite.py,sha256=
|
61
|
-
ai_edge_torch/generative/examples/gemma/convert_gemma2_to_tflite.py,sha256=
|
60
|
+
ai_edge_torch/generative/examples/gemma/convert_gemma1_to_tflite.py,sha256=ylXh2lyu9fRADCV1AEc63dwu_AhoW3MBDrSOdPSzWfQ,1532
|
61
|
+
ai_edge_torch/generative/examples/gemma/convert_gemma2_to_tflite.py,sha256=BPtCVR4aaRxUeBHsp--nZH1pzmIrisajiIM1pDIB6gw,1556
|
62
62
|
ai_edge_torch/generative/examples/gemma/gemma1.py,sha256=w8oWYibZzvEvCDyp39EYyAWmjgJljhzdYPyFCfAWxZA,3497
|
63
63
|
ai_edge_torch/generative/examples/gemma/gemma2.py,sha256=lR-T25GkjCfd_sN8mAKY_0XNA0MEkMgsj4ZBQnnytHo,11465
|
64
64
|
ai_edge_torch/generative/examples/gemma/verify_gemma1.py,sha256=ip-Gmk4CI5f0GWSdAIdrectxQWJ0t328KCsA4nfHuGg,1736
|
65
65
|
ai_edge_torch/generative/examples/gemma/verify_gemma2.py,sha256=IoBhEMwH07-tFm5-U6F2hpCsI8xynglhq1x9tIOdaPQ,1322
|
66
66
|
ai_edge_torch/generative/examples/gemma/verify_util.py,sha256=tR8RflXocDZqvuStyw9aFlzuiTllEC8rNnjrxms6_Is,5727
|
67
67
|
ai_edge_torch/generative/examples/gemma3/__init__.py,sha256=JaAnrFoXTl3RJX97XspklkTyqOHVyAgRJsZtzNDd10c,671
|
68
|
-
ai_edge_torch/generative/examples/gemma3/convert_gemma3_to_tflite.py,sha256=
|
68
|
+
ai_edge_torch/generative/examples/gemma3/convert_gemma3_to_tflite.py,sha256=i_3kpz2wT2La9047ISoOQigUw7JOhpg2QMHrXp0J6Ic,2955
|
69
69
|
ai_edge_torch/generative/examples/gemma3/decoder.py,sha256=4Vf1zA94qLyNzj9iLU0jrd3kzFFZXft4uiItoIBjKyM,15632
|
70
70
|
ai_edge_torch/generative/examples/gemma3/gemma3.py,sha256=NQzqZ55cmC8tGlZ1SKkDeD0Su8mZ79KiazCS8X08xUY,6473
|
71
71
|
ai_edge_torch/generative/examples/gemma3/image_encoder.py,sha256=uRoLoBWzFtQz5wFZfPCxbkvZsgPAqSkUUsV3977GbYc,5184
|
72
|
+
ai_edge_torch/generative/examples/gemma3/verify_gemma3.py,sha256=v8oNXFICmVOtQxfO7IhZ8GnbvotEkDi9lzYHjoQyOso,2464
|
73
|
+
ai_edge_torch/generative/examples/gemma3/verify_util.py,sha256=u30qiZu3HJCTt5noWqtf9PgGLKQ87ke4Zpa4cpG6-As,8883
|
72
74
|
ai_edge_torch/generative/examples/llama/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
73
|
-
ai_edge_torch/generative/examples/llama/convert_to_tflite.py,sha256=
|
75
|
+
ai_edge_torch/generative/examples/llama/convert_to_tflite.py,sha256=euNMhQ-O7PUQ0v2ootyCbh7yra378toBdCP3TRjNMiQ,1765
|
74
76
|
ai_edge_torch/generative/examples/llama/llama.py,sha256=UKvMO85_5z1vEY5MVu6QBW_vpQYA8LWHbJI4Yx6BrCc,6592
|
75
77
|
ai_edge_torch/generative/examples/llama/verify.py,sha256=X7oKQi85M789ugBrOlMvzk8eSRR3Kf1Mprfl-U-WIpo,2842
|
76
78
|
ai_edge_torch/generative/examples/moonshine/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
77
79
|
ai_edge_torch/generative/examples/moonshine/convert_moonshine_to_tflite.py,sha256=7m3rYRzThRDYb-7pGnpLr3ACi4PWX07Mg20Q98ArPc4,1714
|
78
80
|
ai_edge_torch/generative/examples/moonshine/moonshine.py,sha256=nZ2b8u4TmsB5sgdClgAuH8E78bcTv9RCnF9666HqP2M,3394
|
79
81
|
ai_edge_torch/generative/examples/openelm/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
80
|
-
ai_edge_torch/generative/examples/openelm/convert_to_tflite.py,sha256=
|
82
|
+
ai_edge_torch/generative/examples/openelm/convert_to_tflite.py,sha256=tlBBAQ33T4CHgZOclPf735gLUxnkTkXpruLObu-_RDg,1554
|
81
83
|
ai_edge_torch/generative/examples/openelm/openelm.py,sha256=sIJ8Ie1oxFrJM-1jvv2ukiJbQOTIUGuMEZvmwZbt3n0,4556
|
82
84
|
ai_edge_torch/generative/examples/openelm/verify.py,sha256=4W26ZtPF5Cb9mpHYuRM4b2QB_4W76zf4WV36KzexVjs,2446
|
83
85
|
ai_edge_torch/generative/examples/paligemma/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
84
|
-
ai_edge_torch/generative/examples/paligemma/convert_to_tflite.py,sha256=
|
86
|
+
ai_edge_torch/generative/examples/paligemma/convert_to_tflite.py,sha256=H8PKHHxeoVSF5KmDVcdfnoFU4loMQXrpvdvXt1ATSOg,1959
|
85
87
|
ai_edge_torch/generative/examples/paligemma/decoder.py,sha256=z658dW_D0Iqvo6xnh4vG7_o17-Fufndyis8Rq5yafJY,5439
|
86
88
|
ai_edge_torch/generative/examples/paligemma/decoder2.py,sha256=GZa0Ou_DvOijB2nTL_jRvGbn0_dvJPosQAPf47yqicw,5988
|
87
89
|
ai_edge_torch/generative/examples/paligemma/image_encoder.py,sha256=SvuR97sjkBtfkerH7Hu1UXB8kCFLpEATNbPfCbNAyfo,5614
|
@@ -91,9 +93,9 @@ ai_edge_torch/generative/examples/paligemma/verify_decoder.py,sha256=al5wMPWri4I
|
|
91
93
|
ai_edge_torch/generative/examples/paligemma/verify_decoder2.py,sha256=tm-UfLr0YeBRVcQsWLBOMWI9JUzHmtPEbYK2vpITpqY,2534
|
92
94
|
ai_edge_torch/generative/examples/paligemma/verify_image_encoder.py,sha256=vNm-wTT8BD6zbX6GocfP1QrVoHl0zSvuVxoXN36eeiU,3540
|
93
95
|
ai_edge_torch/generative/examples/phi/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
94
|
-
ai_edge_torch/generative/examples/phi/convert_phi3_to_tflite.py,sha256=
|
95
|
-
ai_edge_torch/generative/examples/phi/convert_phi4_to_tflite.py,sha256=
|
96
|
-
ai_edge_torch/generative/examples/phi/convert_to_tflite.py,sha256=
|
96
|
+
ai_edge_torch/generative/examples/phi/convert_phi3_to_tflite.py,sha256=OyblvnMpEPmHQ-ifBPiQUbECbB-3BkoiHebwL_6bB_k,1543
|
97
|
+
ai_edge_torch/generative/examples/phi/convert_phi4_to_tflite.py,sha256=3xwSUl-eA1-2hJo2bgh6BRDWUz9YTcDIZ_3h8CsW5ys,1541
|
98
|
+
ai_edge_torch/generative/examples/phi/convert_to_tflite.py,sha256=M2m359q2jNXzBIlAn1Jf9knGoYT3XNAYzzUQFf6LzMI,1557
|
97
99
|
ai_edge_torch/generative/examples/phi/phi2.py,sha256=c6PYCky7yJn6MVIYOCTx8S_CH27kOPmJbRZcI95nbZs,3477
|
98
100
|
ai_edge_torch/generative/examples/phi/phi3.py,sha256=ddo52Inl5ub81q460cEyKhnsC3txellRErut-_qtBbM,6949
|
99
101
|
ai_edge_torch/generative/examples/phi/phi4.py,sha256=OkMwLGe8l2JEAgOFi19AdbNBl1xp1djZBZo8MJP58ho,5732
|
@@ -101,11 +103,11 @@ ai_edge_torch/generative/examples/phi/verify.py,sha256=YPFCdbnfmvq38fbpBNr0kHPfS
|
|
101
103
|
ai_edge_torch/generative/examples/phi/verify_phi3.py,sha256=kVYaBVvddfQng0IyZGxyTJEzhiPO0G4VFJm2WOc2Q94,2360
|
102
104
|
ai_edge_torch/generative/examples/phi/verify_phi4.py,sha256=BoCa5kUBRHtMQ-5ql6yD4pG4xHJMyUiQlpMOWVx-JgY,2356
|
103
105
|
ai_edge_torch/generative/examples/qwen/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
104
|
-
ai_edge_torch/generative/examples/qwen/convert_to_tflite.py,sha256=
|
106
|
+
ai_edge_torch/generative/examples/qwen/convert_to_tflite.py,sha256=vx4gsuqsHTUiME68Q6Xkqs4lwvhBy8Cm9dQWVmqZEbY,1805
|
105
107
|
ai_edge_torch/generative/examples/qwen/qwen.py,sha256=Zi_qiQ1JPokXZ95jgSEnQp3F-LKzFCvWvFLKhJjnASo,4199
|
106
108
|
ai_edge_torch/generative/examples/qwen/verify.py,sha256=9_AyEJTeUfvhhID64Rto2bflFPyXMFokdQLsseLUMiI,2775
|
107
109
|
ai_edge_torch/generative/examples/qwen_vl/__init__.py,sha256=JaAnrFoXTl3RJX97XspklkTyqOHVyAgRJsZtzNDd10c,671
|
108
|
-
ai_edge_torch/generative/examples/qwen_vl/convert_to_tflite.py,sha256=
|
110
|
+
ai_edge_torch/generative/examples/qwen_vl/convert_to_tflite.py,sha256=5r3jOXMYooS5pfc9-RtceUa4o5FFvDEztK-i_y1WLZE,1998
|
109
111
|
ai_edge_torch/generative/examples/qwen_vl/decoder.py,sha256=0x4iDg2cBe3PFnjVce3nj7g2rjagGHcKqRCfbASNxA8,4402
|
110
112
|
ai_edge_torch/generative/examples/qwen_vl/image_encoder.py,sha256=nHzBe_YSPnUe1d5i09v4bePQomVifzJNeUjRfprmxC0,14878
|
111
113
|
ai_edge_torch/generative/examples/qwen_vl/qwen_vl.py,sha256=rcYHkpO-NbF4F1Da7q2xNiTng9NHiLx59HyuOgQX5W0,7753
|
@@ -113,8 +115,8 @@ ai_edge_torch/generative/examples/qwen_vl/verify.py,sha256=JUwHoC_zvcC3RC3wZ3e3e
|
|
113
115
|
ai_edge_torch/generative/examples/qwen_vl/verify_decoder.py,sha256=xPWoOBLh2eK12KEhELLYymfL7xvc0chmYC98c6x37oo,2602
|
114
116
|
ai_edge_torch/generative/examples/qwen_vl/verify_image_encoder.py,sha256=PZ392nDoJG2OmHZ_7Jet3Zu1JkN6QErxKcDc7a-PPds,3126
|
115
117
|
ai_edge_torch/generative/examples/smollm/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
116
|
-
ai_edge_torch/generative/examples/smollm/convert_to_tflite.py,sha256=
|
117
|
-
ai_edge_torch/generative/examples/smollm/convert_v2_to_tflite.py,sha256=
|
118
|
+
ai_edge_torch/generative/examples/smollm/convert_to_tflite.py,sha256=bGeS_emY_TTYzBKwe4BIzUQRTf85pJHXNUIIFUtwMks,1737
|
119
|
+
ai_edge_torch/generative/examples/smollm/convert_v2_to_tflite.py,sha256=5MZha6sFuIOG27h7Af44I0TvEpHfUjcWkHTgUyqeBiU,1743
|
118
120
|
ai_edge_torch/generative/examples/smollm/smollm.py,sha256=3uUltb6D3Q1aHpndcYTJrsWM_RBwLAraKDniH8ZZous,3779
|
119
121
|
ai_edge_torch/generative/examples/smollm/verify.py,sha256=KpYxVz_lv61YWy6HLfwT68n0owZMvty5Rr3W7ZNWWSw,2702
|
120
122
|
ai_edge_torch/generative/examples/stable_diffusion/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
@@ -141,7 +143,7 @@ ai_edge_torch/generative/examples/test_models/convert_toy_model.py,sha256=6-WaNH
|
|
141
143
|
ai_edge_torch/generative/examples/test_models/toy_model.py,sha256=Crpj-vOwSViHpblXOrRJmsIn4DrHyuB3XZ8kHifb7LA,5203
|
142
144
|
ai_edge_torch/generative/examples/test_models/toy_model_with_kv_cache.py,sha256=Ab_N9xc-4DImA-Pvevr-nnnslBXScXVo4Pw7L3_OlhI,4732
|
143
145
|
ai_edge_torch/generative/examples/tiny_llama/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
144
|
-
ai_edge_torch/generative/examples/tiny_llama/convert_to_tflite.py,sha256=
|
146
|
+
ai_edge_torch/generative/examples/tiny_llama/convert_to_tflite.py,sha256=YwI3JWoTFcsBQ3nLLbi3ig94p50sJOMguyguvBCpE-o,1568
|
145
147
|
ai_edge_torch/generative/examples/tiny_llama/tiny_llama.py,sha256=mhJ18rb9sxrYRzv1YSzhbNs97oUZck99avZDcUO2oV8,2800
|
146
148
|
ai_edge_torch/generative/examples/tiny_llama/verify.py,sha256=LRu6PSw7Lqu6HGbv1tO2i0nUCqe-VkRgboA10VZ7KNg,2431
|
147
149
|
ai_edge_torch/generative/fx_passes/__init__.py,sha256=4rFrppMRKlTwwZeX1ON_cdp4yUqoTOES161IZQkJF6c,1143
|
@@ -183,7 +185,7 @@ ai_edge_torch/generative/test/test_model_conversion_large.py,sha256=-v2Vj7Qdd3Gy
|
|
183
185
|
ai_edge_torch/generative/test/test_quantize.py,sha256=bEJMhpQ9bIDUZVBXTW888728FcH-i3SyE4JSZZUgU0A,6071
|
184
186
|
ai_edge_torch/generative/test/utils.py,sha256=tF6aCfAGJnc9dmzCnZCEOuKNVimfWOqscv9og0DDLHU,2656
|
185
187
|
ai_edge_torch/generative/utilities/__init__.py,sha256=-_jxnnFnCgnTU4oTm4MnRsvL5lqhomBNdFBbqfmfHPo,720
|
186
|
-
ai_edge_torch/generative/utilities/converter.py,sha256=
|
188
|
+
ai_edge_torch/generative/utilities/converter.py,sha256=Ol-CCztmjAaSeRGkUaKxA9-5ihATYuLS5kwZo1hpA-A,9577
|
187
189
|
ai_edge_torch/generative/utilities/loader.py,sha256=KmbjlKpSJEYaYCy5gxOhiaFj6aVAniaBl-kALv_qsGs,13546
|
188
190
|
ai_edge_torch/generative/utilities/model_builder.py,sha256=eY3qAcBhupIn955YnWuzUi9hoWYvl4ntRWA6PBudzMo,6888
|
189
191
|
ai_edge_torch/generative/utilities/moonshine_loader.py,sha256=_RpFabSqtGH5PHiP3_1f6QfO14qMADUxr_HGRlVDFB0,4891
|
@@ -240,8 +242,8 @@ ai_edge_torch/testing/__init__.py,sha256=_yGgvnBZWb7T3IN3mc4x1sS4vM96HZwM8pwIcPG
|
|
240
242
|
ai_edge_torch/testing/export.py,sha256=dguMa-aEi-WDPnmGBUs2IPdEmt2IVmHOELH19uiJ1uU,3014
|
241
243
|
ai_edge_torch/testing/model_coverage/__init__.py,sha256=5P8J6Zk5YYtDvTBucFvB9NGSRI7Gw_24WnrbhXgycEE,765
|
242
244
|
ai_edge_torch/testing/model_coverage/model_coverage.py,sha256=UPB448aMDUyC0HNYVqio2rcJPnDN0tBQMP08J6vPYew,4718
|
243
|
-
ai_edge_torch_nightly-0.4.0.
|
244
|
-
ai_edge_torch_nightly-0.4.0.
|
245
|
-
ai_edge_torch_nightly-0.4.0.
|
246
|
-
ai_edge_torch_nightly-0.4.0.
|
247
|
-
ai_edge_torch_nightly-0.4.0.
|
245
|
+
ai_edge_torch_nightly-0.4.0.dev20250331.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
246
|
+
ai_edge_torch_nightly-0.4.0.dev20250331.dist-info/METADATA,sha256=rLjK4T1BNhrLkyWhYs1VM2yFinL94LN3xs0Xu3cTtOU,1966
|
247
|
+
ai_edge_torch_nightly-0.4.0.dev20250331.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
248
|
+
ai_edge_torch_nightly-0.4.0.dev20250331.dist-info/top_level.txt,sha256=5KXRaF2hwkApYxf7Y8y_tVb9aulGTlbOoNdbx1aKRkE,14
|
249
|
+
ai_edge_torch_nightly-0.4.0.dev20250331.dist-info/RECORD,,
|
File without changes
|
File without changes
|