ai-edge-torch-nightly 0.6.0.dev20250530__py3-none-any.whl → 0.6.0.dev20250531__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -16,11 +16,13 @@
16
16
  import logging
17
17
  import os
18
18
  import pathlib
19
+ from typing import Callable, Dict
19
20
 
20
21
  from ai_edge_torch.generative.examples.amd_llama_135m import amd_llama_135m
21
22
  from ai_edge_torch.generative.utilities import loader
22
23
  from ai_edge_torch.generative.utilities import transformers_verifier
23
24
  from ai_edge_torch.generative.utilities import verifier
25
+ import torch
24
26
  import transformers
25
27
 
26
28
 
@@ -31,8 +33,9 @@ def verify_amd_llama_135m(
31
33
  checkpoint_dir: str,
32
34
  weight_filename: str = "model.safetensors",
33
35
  max_new_tokens: int = 30,
34
- initialize_from_local: bool = True,
35
36
  prompts: list[str] | None = None,
37
+ initialize_from_local: bool = True,
38
+ custom_loader: Callable[[str], Dict[str, torch.Tensor]] | None = None,
36
39
  ) -> bool:
37
40
  """Verifies the reauthored AMD-Llama-135M model with a custom loader."""
38
41
  logging.info("Loading the original model from: %s", checkpoint_dir)
@@ -41,11 +44,8 @@ def verify_amd_llama_135m(
41
44
  )
42
45
 
43
46
  logging.info("Building the reauthored model from: %s", checkpoint_dir)
44
- custom_loader = (
45
- None
46
- if initialize_from_local
47
- else loader.get_custom_loader("", "safetensors")
48
- )
47
+ if custom_loader is None and not initialize_from_local:
48
+ custom_loader = loader.get_custom_loader("", "safetensors")
49
49
 
50
50
  if initialize_from_local:
51
51
  # Locate the cached dir.
@@ -16,11 +16,13 @@
16
16
  import logging
17
17
  import os
18
18
  import pathlib
19
+ from typing import Callable, Dict
19
20
 
20
21
  from ai_edge_torch.generative.examples.deepseek import deepseek
21
22
  from ai_edge_torch.generative.utilities import loader
22
23
  from ai_edge_torch.generative.utilities import transformers_verifier
23
24
  from ai_edge_torch.generative.utilities import verifier
25
+ import torch
24
26
  import transformers
25
27
 
26
28
 
@@ -31,8 +33,9 @@ def verify_deepseek_r1_distill_1_5b(
31
33
  checkpoint_dir: str,
32
34
  weight_filename: str = "model.safetensors",
33
35
  max_new_tokens: int = 30,
34
- initialize_from_local: bool = True,
35
36
  prompts: list[str] | None = None,
37
+ initialize_from_local: bool = True,
38
+ custom_loader: Callable[[str], Dict[str, torch.Tensor]] | None = None,
36
39
  ) -> bool:
37
40
  """Verifies the reauthored DeepSeek R1 distilled 1.5B model with a custom loader."""
38
41
  logging.info("Loading the original model from: %s", checkpoint_dir)
@@ -41,11 +44,8 @@ def verify_deepseek_r1_distill_1_5b(
41
44
  )
42
45
 
43
46
  logging.info("Building the reauthored model from: %s", checkpoint_dir)
44
- custom_loader = (
45
- None
46
- if initialize_from_local
47
- else loader.get_custom_loader("", "safetensors")
48
- )
47
+ if custom_loader is None and not initialize_from_local:
48
+ custom_loader = loader.get_custom_loader("", "safetensors")
49
49
 
50
50
  if initialize_from_local:
51
51
  # Locate the cached dir.
@@ -16,11 +16,13 @@
16
16
  import logging
17
17
  import os
18
18
  import pathlib
19
+ from typing import Callable, Dict
19
20
 
20
21
  from ai_edge_torch.generative.examples.llama import llama
21
22
  from ai_edge_torch.generative.utilities import loader
22
23
  from ai_edge_torch.generative.utilities import transformers_verifier
23
24
  from ai_edge_torch.generative.utilities import verifier
25
+ import torch
24
26
  import transformers
25
27
 
26
28
  _BUILDER = {
@@ -36,8 +38,9 @@ def verify_llama_3_2(
36
38
  checkpoint_dir: str,
37
39
  weight_filename: str = "model.safetensors",
38
40
  max_new_tokens: int = 30,
39
- initialize_from_local: bool = True,
40
41
  prompts: list[str] | None = None,
42
+ initialize_from_local: bool = True,
43
+ custom_loader: Callable[[str], Dict[str, torch.Tensor]] | None = None,
41
44
  ) -> bool:
42
45
  """Verifies the reauthored Llama 3.2 model with a custom loader."""
43
46
  logging.info("Loading the original model from: %s", checkpoint_dir)
@@ -46,11 +49,8 @@ def verify_llama_3_2(
46
49
  )
47
50
 
48
51
  logging.info("Building the reauthored model from: %s", checkpoint_dir)
49
- custom_loader = (
50
- None
51
- if initialize_from_local
52
- else loader.get_custom_loader("", "safetensors")
53
- )
52
+ if custom_loader is None and not initialize_from_local:
53
+ custom_loader = loader.get_custom_loader("", "safetensors")
54
54
 
55
55
  if initialize_from_local:
56
56
  # Locate the cached dir.
@@ -16,11 +16,13 @@
16
16
  import logging
17
17
  import os
18
18
  import pathlib
19
+ from typing import Callable, Dict
19
20
 
20
21
  from ai_edge_torch.generative.examples.openelm import openelm
21
22
  from ai_edge_torch.generative.utilities import loader
22
23
  from ai_edge_torch.generative.utilities import transformers_verifier
23
24
  from ai_edge_torch.generative.utilities import verifier
25
+ import torch
24
26
  import transformers
25
27
 
26
28
 
@@ -31,8 +33,9 @@ def verify_openelm(
31
33
  checkpoint_dir: str,
32
34
  weight_filename: str = "model.safetensors",
33
35
  max_new_tokens: int = 30,
34
- initialize_from_local: bool = True,
35
36
  prompts: list[str] | None = None,
37
+ initialize_from_local: bool = True,
38
+ custom_loader: Callable[[str], Dict[str, torch.Tensor]] | None = None,
36
39
  ) -> bool:
37
40
  """Verifies the reauthored OpenELM model with a custom loader."""
38
41
  logging.info("Loading the original model from: %s", checkpoint_dir)
@@ -41,11 +44,8 @@ def verify_openelm(
41
44
  )
42
45
 
43
46
  logging.info("Building the reauthored model from: %s", checkpoint_dir)
44
- custom_loader = (
45
- None
46
- if initialize_from_local
47
- else loader.get_custom_loader("", "safetensors")
48
- )
47
+ if custom_loader is None and not initialize_from_local:
48
+ custom_loader = loader.get_custom_loader("", "safetensors")
49
49
 
50
50
  if initialize_from_local:
51
51
  # Locate the cached dir.
@@ -13,14 +13,17 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
15
  """Utils for verifying the Phi model."""
16
+
16
17
  import logging
17
18
  import os
18
19
  import pathlib
20
+ from typing import Callable, Dict
19
21
 
20
22
  from ai_edge_torch.generative.examples.phi import phi2, phi3, phi4
21
23
  from ai_edge_torch.generative.utilities import loader
22
24
  from ai_edge_torch.generative.utilities import transformers_verifier
23
25
  from ai_edge_torch.generative.utilities import verifier
26
+ import torch
24
27
  import transformers
25
28
 
26
29
 
@@ -38,9 +41,10 @@ def verify_phi(
38
41
  checkpoint_dir: str,
39
42
  weight_filename: str = "model.safetensors",
40
43
  max_new_tokens: int = 30,
41
- initialize_from_local: bool = True,
42
44
  prompts: list[str] | None = None,
43
45
  atol: float = 1e-04,
46
+ initialize_from_local: bool = True,
47
+ custom_loader: Callable[[str], Dict[str, torch.Tensor]] | None = None,
44
48
  ) -> bool:
45
49
  """Verifies the reauthored Phi model with a custom loader."""
46
50
  logging.info("Loading the original model from: %s", checkpoint_dir)
@@ -49,11 +53,8 @@ def verify_phi(
49
53
  )
50
54
 
51
55
  logging.info("Building the reauthored model from: %s", checkpoint_dir)
52
- custom_loader = (
53
- None
54
- if initialize_from_local
55
- else loader.get_custom_loader("", "safetensors")
56
- )
56
+ if custom_loader is None and not initialize_from_local:
57
+ custom_loader = loader.get_custom_loader("", "safetensors")
57
58
 
58
59
  if initialize_from_local:
59
60
  # Locate the cached dir.
@@ -13,14 +13,17 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
15
  """Utils for verifying the Qwen model."""
16
+
16
17
  import logging
17
18
  import os
18
19
  import pathlib
20
+ from typing import Callable, Dict
19
21
 
20
22
  from ai_edge_torch.generative.examples.qwen import qwen, qwen3
21
23
  from ai_edge_torch.generative.utilities import loader
22
24
  from ai_edge_torch.generative.utilities import transformers_verifier
23
25
  from ai_edge_torch.generative.utilities import verifier
26
+ import torch
24
27
  import transformers
25
28
 
26
29
 
@@ -50,8 +53,9 @@ def verify_qwen(
50
53
  checkpoint_dir: str,
51
54
  weight_filename: str = "model.safetensors",
52
55
  max_new_tokens: int = 30,
53
- initialize_from_local: bool = True,
54
56
  prompts: list[str] | None = None,
57
+ initialize_from_local: bool = True,
58
+ custom_loader: Callable[[str], Dict[str, torch.Tensor]] | None = None,
55
59
  ) -> bool:
56
60
  """Verifies the reauthored Llama 3.2 model with a custom loader."""
57
61
  logging.info("Loading the original model from: %s", checkpoint_dir)
@@ -60,11 +64,8 @@ def verify_qwen(
60
64
  )
61
65
 
62
66
  logging.info("Building the reauthored model from: %s", checkpoint_dir)
63
- custom_loader = (
64
- None
65
- if initialize_from_local
66
- else loader.get_custom_loader("", "safetensors")
67
- )
67
+ if custom_loader is None and not initialize_from_local:
68
+ custom_loader = loader.get_custom_loader("", "safetensors")
68
69
 
69
70
  if initialize_from_local:
70
71
  # Locate the cached dir.
@@ -16,11 +16,12 @@
16
16
  import logging
17
17
  import os
18
18
  import pathlib
19
-
19
+ from typing import Callable, Dict
20
20
  from ai_edge_torch.generative.examples.smollm import smollm
21
21
  from ai_edge_torch.generative.utilities import loader
22
22
  from ai_edge_torch.generative.utilities import transformers_verifier
23
23
  from ai_edge_torch.generative.utilities import verifier
24
+ import torch
24
25
  import transformers
25
26
 
26
27
  _BUILDER = {
@@ -36,8 +37,9 @@ def verify_smollm_135m(
36
37
  checkpoint_dir: str,
37
38
  weight_filename: str = "model.safetensors",
38
39
  max_new_tokens: int = 30,
39
- initialize_from_local: bool = True,
40
40
  prompts: list[str] | None = None,
41
+ initialize_from_local: bool = True,
42
+ custom_loader: Callable[[str], Dict[str, torch.Tensor]] | None = None,
41
43
  ) -> bool:
42
44
  """Verifies the reauthored SmoLLM model with a custom loader."""
43
45
  logging.info("Loading the original model from: %s", checkpoint_dir)
@@ -46,11 +48,9 @@ def verify_smollm_135m(
46
48
  )
47
49
 
48
50
  logging.info("Building the reauthored model from: %s", checkpoint_dir)
49
- custom_loader = (
50
- None
51
- if initialize_from_local
52
- else loader.get_custom_loader("", "safetensors")
53
- )
51
+
52
+ if custom_loader is None and not initialize_from_local:
53
+ custom_loader = loader.get_custom_loader("", "safetensors")
54
54
 
55
55
  if initialize_from_local:
56
56
  # Locate the cached dir.
@@ -13,14 +13,17 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
15
  """Utils for verifying the TinyLlama model."""
16
+
16
17
  import logging
17
18
  import os
18
19
  import pathlib
20
+ from typing import Callable, Dict
19
21
 
20
22
  from ai_edge_torch.generative.examples.tiny_llama import tiny_llama
21
23
  from ai_edge_torch.generative.utilities import loader
22
24
  from ai_edge_torch.generative.utilities import transformers_verifier
23
25
  from ai_edge_torch.generative.utilities import verifier
26
+ import torch
24
27
  import transformers
25
28
 
26
29
 
@@ -31,8 +34,9 @@ def verify_tiny_llama(
31
34
  checkpoint_dir: str,
32
35
  weight_filename: str = "model.safetensors",
33
36
  max_new_tokens: int = 30,
34
- initialize_from_local: bool = True,
35
37
  prompts: list[str] | None = None,
38
+ initialize_from_local: bool = True,
39
+ custom_loader: Callable[[str], Dict[str, torch.Tensor]] | None = None,
36
40
  ) -> bool:
37
41
  """Verifies the reauthored TinyLlama model with a custom loader."""
38
42
  logging.info("Loading the original model from: %s", checkpoint_dir)
@@ -41,11 +45,8 @@ def verify_tiny_llama(
41
45
  )
42
46
 
43
47
  logging.info("Building the reauthored model from: %s", checkpoint_dir)
44
- custom_loader = (
45
- None
46
- if initialize_from_local
47
- else loader.get_custom_loader("", "safetensors")
48
- )
48
+ if custom_loader is None and not initialize_from_local:
49
+ custom_loader = loader.get_custom_loader("", "safetensors")
49
50
 
50
51
  if initialize_from_local:
51
52
  # Locate the cached dir.
@@ -23,14 +23,13 @@ from safetensors import safe_open
23
23
  from safetensors.torch import load_file
24
24
  import torch
25
25
 
26
-
27
26
  def get_custom_loader(
28
27
  checkpoint_path: str,
29
28
  checkpoint_format: Optional[str] = None,
30
29
  ) -> Callable[[str], Dict[str, torch.Tensor]]:
31
30
  """Returns a custom loader for the given checkpoint path.
32
31
 
33
- Those customer loaders can either support state dictionary or safetensors, and
32
+ Those custome loaders can either support state dictionary or safetensors, and
34
33
  the actual data might be fetched from a remote source.
35
34
 
36
35
  Args:
ai_edge_torch/version.py CHANGED
@@ -15,4 +15,4 @@
15
15
 
16
16
  # The next version of ai-edge-torch.
17
17
  # The minor version code should be bumped after every release.
18
- __version__ = "0.6.0.dev20250530"
18
+ __version__ = "0.6.0.dev20250531"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ai-edge-torch-nightly
3
- Version: 0.6.0.dev20250530
3
+ Version: 0.6.0.dev20250531
4
4
  Summary: Supporting PyTorch models with the Google AI Edge TFLite runtime.
5
5
  Home-page: https://github.com/google-ai-edge/ai-edge-torch
6
6
  Keywords: On-Device ML,AI,Google,TFLite,PyTorch,LLMs,GenAI
@@ -2,7 +2,7 @@ ai_edge_torch/__init__.py,sha256=lemyLCNoGYRnJsmDuGZu7qOqLbLqG6CGDFtu3ue1syU,129
2
2
  ai_edge_torch/_config.py,sha256=AiqhbcheF7j_ozIGDLC89k1we95aVgFDa-tR6h7UI0s,2529
3
3
  ai_edge_torch/conftest.py,sha256=r0GTrhMRhlmOGrrkvumHN8hkmyug6WvF60vWq8wRIBI,758
4
4
  ai_edge_torch/model.py,sha256=A7loFu8jE9CsXsfMmHYZ-KDFJiaD8Kkqwm_9d3IVzk0,5638
5
- ai_edge_torch/version.py,sha256=B-sOsG_3lPrDKxH_MJPNpivWVftaRufBHKPbBig2z3E,806
5
+ ai_edge_torch/version.py,sha256=Z8PdNXw5sVjFtOY2IFehsJi1BwTO9Mm-mSE2MZWv8Hg,806
6
6
  ai_edge_torch/_convert/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
7
7
  ai_edge_torch/_convert/conversion.py,sha256=iQk3R-pLq4c1nfLqPB4xTRj78gghxPGzJCJtILLdg5o,6123
8
8
  ai_edge_torch/_convert/conversion_utils.py,sha256=Sr8qXVcTwc-ZnZmK7yxVrIOOp1S_vNrwzC0zUvLTI2o,2160
@@ -56,12 +56,12 @@ ai_edge_torch/generative/examples/amd_llama_135m/__init__.py,sha256=hHLluseD2R0H
56
56
  ai_edge_torch/generative/examples/amd_llama_135m/amd_llama_135m.py,sha256=PTKcl-CHQnzExQSfrwG9YC0KPc8zomG7WlPabXtZLx4,2910
57
57
  ai_edge_torch/generative/examples/amd_llama_135m/convert_to_tflite.py,sha256=s2f5TJos6rSgogqeFk0qsOpI30qsR04umk9hAAZ5918,1782
58
58
  ai_edge_torch/generative/examples/amd_llama_135m/verify.py,sha256=uyBg5-trxQEjEHDZMX4qojkcsZgERUiPqIgR9n0_AY4,1311
59
- ai_edge_torch/generative/examples/amd_llama_135m/verify_util.py,sha256=_d4r1WgqyUqb7nPIhba8hZsrqqOEVc2AF30j1mTjQBw,2838
59
+ ai_edge_torch/generative/examples/amd_llama_135m/verify_util.py,sha256=OJTjULdz-8LVd5DhmX8isxUI1PzhE-dQLj7--5DHfJ8,2961
60
60
  ai_edge_torch/generative/examples/deepseek/__init__.py,sha256=JaAnrFoXTl3RJX97XspklkTyqOHVyAgRJsZtzNDd10c,671
61
61
  ai_edge_torch/generative/examples/deepseek/convert_to_tflite.py,sha256=xTPfT3Mt_4bMfGkrqDKatLecZOuaE0WhxXs3uAsO_uU,1749
62
62
  ai_edge_torch/generative/examples/deepseek/deepseek.py,sha256=9gUnK1IOifQyYpm03f64Mzg-afwbYY9kVWz6-ynq8zY,3014
63
63
  ai_edge_torch/generative/examples/deepseek/verify.py,sha256=HkvgEyGb-V_f6mWfyeN7Ai5uADAVQNzWvkygaKJiLAc,1344
64
- ai_edge_torch/generative/examples/deepseek/verify_util.py,sha256=WIaDA0Iw_AM1tzligxY9hnJpaYljoqWQ2d0UrUHppMM,2848
64
+ ai_edge_torch/generative/examples/deepseek/verify_util.py,sha256=jui_16J0C0VhICGBJDiO7Br5l5QCrhm-AohXYuUyyqQ,2971
65
65
  ai_edge_torch/generative/examples/gemma/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
66
66
  ai_edge_torch/generative/examples/gemma/convert_gemma1_to_tflite.py,sha256=t2qZTjyM2imPenb14fzbQ-CHj5Cejw4M5xfEZpgX6Uc,1748
67
67
  ai_edge_torch/generative/examples/gemma/convert_gemma2_to_tflite.py,sha256=Yj-b4S9BNxArnGjruRIymCiWrlf7ZvwiG6keTVGldk4,1816
@@ -86,7 +86,7 @@ ai_edge_torch/generative/examples/llama/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIX
86
86
  ai_edge_torch/generative/examples/llama/convert_to_tflite.py,sha256=4qnMyvJHqhqf9k01wEsO23BKo6tSy2KD7sHdTGimKGg,1957
87
87
  ai_edge_torch/generative/examples/llama/llama.py,sha256=eWPFnuSxhjuk5XZmvtndu_Z1-e9NlZg7-uFfiOqJXfw,6952
88
88
  ai_edge_torch/generative/examples/llama/verify.py,sha256=XoF_-kxdryjt0Bt_YeHnIbLfjwFxSVioTSEG75moDr8,1581
89
- ai_edge_torch/generative/examples/llama/verify_util.py,sha256=kDFRkly3wz0S_SIKAMAJkFuKciX3lJWj4c_4DwzV-J8,2896
89
+ ai_edge_torch/generative/examples/llama/verify_util.py,sha256=JIAkG7TEaR_5scibyM-zgS9G2KrIfUBgX4vHDJwmaBo,3019
90
90
  ai_edge_torch/generative/examples/moonshine/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
91
91
  ai_edge_torch/generative/examples/moonshine/convert_moonshine_to_tflite.py,sha256=_GkaSkregS3NWN38UGXxj4pED5gtQGaaPZx5_CZ0TVM,1657
92
92
  ai_edge_torch/generative/examples/moonshine/moonshine.py,sha256=nZ2b8u4TmsB5sgdClgAuH8E78bcTv9RCnF9666HqP2M,3394
@@ -94,7 +94,7 @@ ai_edge_torch/generative/examples/openelm/__init__.py,sha256=hHLluseD2R0Hh4W6XZR
94
94
  ai_edge_torch/generative/examples/openelm/convert_to_tflite.py,sha256=S7OP8PJcOQbm8AHvi_Tc3qnQuVOtjMFNlwaZQ_oirUM,1747
95
95
  ai_edge_torch/generative/examples/openelm/openelm.py,sha256=R_E0hXsg6l8ANEgBBy0R8egz3p4ONJvBmPWs6sXx63M,4692
96
96
  ai_edge_torch/generative/examples/openelm/verify.py,sha256=kRoNEUEsrz51PFSeTPcrYsPBQRLtUmYM3t_-Jl0oFqM,1300
97
- ai_edge_torch/generative/examples/openelm/verify_util.py,sha256=VA08XH1sDCqozY7CTlOnMz_UT_eyObll-LO1Q60RCRs,2790
97
+ ai_edge_torch/generative/examples/openelm/verify_util.py,sha256=arX-ZGWVFfsyZ7n_V8ivaw8fqpmK3CmcxfsS0b1pOBY,2913
98
98
  ai_edge_torch/generative/examples/paligemma/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
99
99
  ai_edge_torch/generative/examples/paligemma/convert_to_tflite.py,sha256=Fl4k-lcpiUaJS0A1E7HVVUW7iTcZAU4FbA4KcSkO5SQ,2212
100
100
  ai_edge_torch/generative/examples/paligemma/decoder.py,sha256=GLlfbJr3ZIzmH643IwXyrG54qKEYMPRsvhU6gXXi7yg,5490
@@ -115,7 +115,7 @@ ai_edge_torch/generative/examples/phi/phi4.py,sha256=ZHA0Rq7ifgxiHC_8PJf-y7WCA7i
115
115
  ai_edge_torch/generative/examples/phi/verify.py,sha256=fIWgqypLQ3uOQ1u5uuklYiQSJPhKCTYIBACjrp7DbMA,1346
116
116
  ai_edge_torch/generative/examples/phi/verify_phi3.py,sha256=TwIu2xUPQyMUTFdz29E2y75wfq4c1fGJnT3QfA3eS1s,1347
117
117
  ai_edge_torch/generative/examples/phi/verify_phi4.py,sha256=2MlgQrfRkhE7Dya8MIixGwpqEZYdPjQkUGB47Mt1hSI,1343
118
- ai_edge_torch/generative/examples/phi/verify_util.py,sha256=kRREOMSikn_BRbTDkQiXBllPZwmWHa9KUk-kK5lCkbU,2945
118
+ ai_edge_torch/generative/examples/phi/verify_util.py,sha256=yIfeGP-VJ9CpeOxzhUe0tDCwMSriQVMGOWXEBxJRYcs,3069
119
119
  ai_edge_torch/generative/examples/qwen/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
120
120
  ai_edge_torch/generative/examples/qwen/convert_to_tflite.py,sha256=TnzyARHQgmWeOdYsV9WpRj5vhKGBH0kAbp3tMj8ZCYw,1998
121
121
  ai_edge_torch/generative/examples/qwen/convert_v3_to_tflite.py,sha256=GVV8CVj3rdgt_ZTOlpLSa6AD1pMMpMnZEuowzN2AIGM,2004
@@ -123,7 +123,7 @@ ai_edge_torch/generative/examples/qwen/qwen.py,sha256=EcIHVeBcJLc290TiPkPfE7jdG_
123
123
  ai_edge_torch/generative/examples/qwen/qwen3.py,sha256=g6aVHjnlPo4YhLjSdXxONaDcKT3fZOh8cewlvf3cfoQ,5554
124
124
  ai_edge_torch/generative/examples/qwen/verify_qwen2.py,sha256=ry-c2QesH-0KnrSQygfjUFs6d4kOFvJz2ts_8mP156I,1659
125
125
  ai_edge_torch/generative/examples/qwen/verify_qwen3.py,sha256=hmE0gdyzgcDpEDcWiwOzKQcxt4XeAe9DPRspy_I-lc8,1628
126
- ai_edge_torch/generative/examples/qwen/verify_util.py,sha256=vPROwLRABTChMGo5yWJkZURXP6TKWgh5FJj1Z3Zs6HU,3153
126
+ ai_edge_torch/generative/examples/qwen/verify_util.py,sha256=9XhDQyIHFoJ6BYota1lIzwsBrN7t_Mtugpgbjgv17Gg,3277
127
127
  ai_edge_torch/generative/examples/qwen_vl/__init__.py,sha256=JaAnrFoXTl3RJX97XspklkTyqOHVyAgRJsZtzNDd10c,671
128
128
  ai_edge_torch/generative/examples/qwen_vl/convert_to_tflite.py,sha256=BM-ed7KrmPwzI3MvDs2R7P-kJgE1SK_cNVqIfXhtJjs,2411
129
129
  ai_edge_torch/generative/examples/qwen_vl/decoder.py,sha256=plOi-3LltxReW_HVxhxwee_rYCQq-gsOwbGZtRsM8N8,4443
@@ -137,7 +137,7 @@ ai_edge_torch/generative/examples/smollm/convert_to_tflite.py,sha256=QVRX_ovqBQi
137
137
  ai_edge_torch/generative/examples/smollm/convert_v2_to_tflite.py,sha256=rOVYSaS68_otJcGewQSconBCPD4GhDEIIyquD4dSUWc,1979
138
138
  ai_edge_torch/generative/examples/smollm/smollm.py,sha256=nQRiq6phJbtl3GAEEsJ_bPP_zrpQmiPumNEWCRrECn0,4028
139
139
  ai_edge_torch/generative/examples/smollm/verify.py,sha256=tXiAnwOnqgwyoa8dI4tCBiGUXkOMfdE9MUkkY_Bc4Ig,1603
140
- ai_edge_torch/generative/examples/smollm/verify_util.py,sha256=KT-eGsHFqtmu30ukC3jupXbF_qS3qx62hjLZfZt9ea8,2896
140
+ ai_edge_torch/generative/examples/smollm/verify_util.py,sha256=_Z5jtNzQnULeWrXWC1LIpEawKwL_wA1_FtRc6A4-s4k,3019
141
141
  ai_edge_torch/generative/examples/stable_diffusion/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
142
142
  ai_edge_torch/generative/examples/stable_diffusion/attention.py,sha256=kDWG6MlIGa89zC5KSRcJlw2c4ITuw8KcchtfmF55f4g,3545
143
143
  ai_edge_torch/generative/examples/stable_diffusion/clip.py,sha256=lSCRZsoLjH_kqasRMwCy5IogkhyJdwcHKsPEfyxsXCQ,6112
@@ -165,7 +165,7 @@ ai_edge_torch/generative/examples/tiny_llama/__init__.py,sha256=hHLluseD2R0Hh4W6
165
165
  ai_edge_torch/generative/examples/tiny_llama/convert_to_tflite.py,sha256=urWkWjOaGzV2gwMXoGEs1mfHNEXfEKgwuXmQ0lrWcbM,1761
166
166
  ai_edge_torch/generative/examples/tiny_llama/tiny_llama.py,sha256=cVNP_a_3UBML0j9ITtcITeVXqCdcC7U1JoYwir09Dk8,2936
167
167
  ai_edge_torch/generative/examples/tiny_llama/verify.py,sha256=qzUsW8tJlAD9mqRxDSAcz5xSUKNlBz_DykA3PwUHMwc,1336
168
- ai_edge_torch/generative/examples/tiny_llama/verify_util.py,sha256=_zYGqP4HO_Stci14C7PXBNnQIT9TBa5uLUEcGfzxFvQ,2813
168
+ ai_edge_torch/generative/examples/tiny_llama/verify_util.py,sha256=AxJeLbpDfKN-ztHYTr2qjoTpWQ-n9yeqYKhorFEBzLs,2937
169
169
  ai_edge_torch/generative/fx_passes/__init__.py,sha256=PFSMsA1vfBfrV9ssBCkYJNl8Hx_bLdWjN01iyjPM5jE,1094
170
170
  ai_edge_torch/generative/fx_passes/remove_sdpa_zero_mask_pass.py,sha256=myGjal5A8yIBoqgArd2k40rZmCgD1Ya369KR7182bhI,2129
171
171
  ai_edge_torch/generative/layers/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
@@ -210,7 +210,7 @@ ai_edge_torch/generative/test/utils.py,sha256=tF6aCfAGJnc9dmzCnZCEOuKNVimfWOqscv
210
210
  ai_edge_torch/generative/utilities/__init__.py,sha256=-_jxnnFnCgnTU4oTm4MnRsvL5lqhomBNdFBbqfmfHPo,720
211
211
  ai_edge_torch/generative/utilities/converter.py,sha256=DuoPb8Uhbxa32uUvr6grV5lssmUJdx298QwYz8cG_1Y,15512
212
212
  ai_edge_torch/generative/utilities/export_config.py,sha256=qjkEbjcvi2AgQikZS5qfgR95Z5z9pm07KX-RN5ibfNE,2280
213
- ai_edge_torch/generative/utilities/loader.py,sha256=oGgEc2tHRsVqSN3mgvcngXQrpV0a7cwTpJ3LmMVnyF0,15954
213
+ ai_edge_torch/generative/utilities/loader.py,sha256=drgKBmNibuc3PCdc0kU0pVcp2Nt1_mjLYh67RyXOn7U,15952
214
214
  ai_edge_torch/generative/utilities/model_builder.py,sha256=tBfOcsI_NcneggHqkCSydYN3ZgmkzPc6nW0AJrA81wI,6461
215
215
  ai_edge_torch/generative/utilities/moonshine_loader.py,sha256=_RpFabSqtGH5PHiP3_1f6QfO14qMADUxr_HGRlVDFB0,4891
216
216
  ai_edge_torch/generative/utilities/stable_diffusion_loader.py,sha256=dqPD9qRXEWtU3ombslOC-BE2l_dMwHoCNu7NsIJhsso,36158
@@ -268,8 +268,8 @@ ai_edge_torch/testing/__init__.py,sha256=_yGgvnBZWb7T3IN3mc4x1sS4vM96HZwM8pwIcPG
268
268
  ai_edge_torch/testing/export.py,sha256=k5mGDGzwc23Z4zaIVDs8CNh-oOt64gsf9MS9NjhbPy4,3293
269
269
  ai_edge_torch/testing/model_coverage/__init__.py,sha256=5P8J6Zk5YYtDvTBucFvB9NGSRI7Gw_24WnrbhXgycEE,765
270
270
  ai_edge_torch/testing/model_coverage/model_coverage.py,sha256=UPB448aMDUyC0HNYVqio2rcJPnDN0tBQMP08J6vPYew,4718
271
- ai_edge_torch_nightly-0.6.0.dev20250530.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
272
- ai_edge_torch_nightly-0.6.0.dev20250530.dist-info/METADATA,sha256=7oeZ6wSsBUuvNXH20tOHtlWkw_Rfmmr0EADK7hSt6AQ,2074
273
- ai_edge_torch_nightly-0.6.0.dev20250530.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
274
- ai_edge_torch_nightly-0.6.0.dev20250530.dist-info/top_level.txt,sha256=5KXRaF2hwkApYxf7Y8y_tVb9aulGTlbOoNdbx1aKRkE,14
275
- ai_edge_torch_nightly-0.6.0.dev20250530.dist-info/RECORD,,
271
+ ai_edge_torch_nightly-0.6.0.dev20250531.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
272
+ ai_edge_torch_nightly-0.6.0.dev20250531.dist-info/METADATA,sha256=hxE-vwPFf28jr_h3KH5MnCqXgvpr6GWwh8IetZ2oK8Q,2074
273
+ ai_edge_torch_nightly-0.6.0.dev20250531.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
274
+ ai_edge_torch_nightly-0.6.0.dev20250531.dist-info/top_level.txt,sha256=5KXRaF2hwkApYxf7Y8y_tVb9aulGTlbOoNdbx1aKRkE,14
275
+ ai_edge_torch_nightly-0.6.0.dev20250531.dist-info/RECORD,,