ai-edge-torch-nightly 0.3.0.dev20240909__py3-none-any.whl → 0.3.0.dev20240913__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. ai_edge_torch/_convert/test/test_convert.py +35 -13
  2. ai_edge_torch/generative/examples/gemma/convert_gemma2_to_tflite.py +31 -12
  3. ai_edge_torch/generative/examples/gemma/convert_to_tflite.py +25 -6
  4. ai_edge_torch/generative/examples/gemma/gemma.py +50 -30
  5. ai_edge_torch/generative/examples/gemma/gemma2.py +85 -58
  6. ai_edge_torch/generative/examples/{experimental/phi → phi}/convert_to_tflite.py +11 -12
  7. ai_edge_torch/generative/examples/{experimental/phi → phi}/phi2.py +46 -43
  8. ai_edge_torch/generative/examples/{experimental/gemma → smallm}/convert_to_tflite.py +12 -14
  9. ai_edge_torch/generative/examples/smallm/smallm.py +122 -0
  10. ai_edge_torch/generative/examples/stable_diffusion/clip.py +11 -5
  11. ai_edge_torch/generative/examples/t5/t5.py +35 -22
  12. ai_edge_torch/generative/examples/t5/t5_attention.py +18 -13
  13. ai_edge_torch/generative/examples/test_models/toy_model.py +15 -13
  14. ai_edge_torch/generative/examples/test_models/toy_model_with_kv_cache.py +74 -33
  15. ai_edge_torch/generative/examples/tiny_llama/convert_to_tflite.py +25 -6
  16. ai_edge_torch/generative/examples/tiny_llama/tiny_llama.py +55 -34
  17. ai_edge_torch/generative/layers/attention.py +77 -73
  18. ai_edge_torch/generative/layers/builder.py +5 -3
  19. ai_edge_torch/generative/layers/kv_cache.py +163 -51
  20. ai_edge_torch/generative/layers/model_config.py +38 -19
  21. ai_edge_torch/generative/layers/normalization.py +158 -0
  22. ai_edge_torch/generative/layers/unet/blocks_2d.py +0 -2
  23. ai_edge_torch/generative/test/{test_experimental_ekv.py → test_kv_cache.py} +12 -24
  24. ai_edge_torch/generative/test/test_loader.py +1 -1
  25. ai_edge_torch/generative/test/test_model_conversion.py +72 -34
  26. ai_edge_torch/generative/test/test_model_conversion_large.py +51 -23
  27. ai_edge_torch/generative/test/utils.py +54 -0
  28. ai_edge_torch/generative/utilities/loader.py +15 -15
  29. ai_edge_torch/generative/utilities/t5_loader.py +21 -20
  30. ai_edge_torch/odml_torch/lowerings/__init__.py +1 -0
  31. ai_edge_torch/odml_torch/lowerings/_convolution.py +196 -74
  32. ai_edge_torch/odml_torch/lowerings/_jax_lowerings.py +0 -2
  33. ai_edge_torch/odml_torch/lowerings/_layer_norm.py +78 -0
  34. ai_edge_torch/version.py +1 -1
  35. {ai_edge_torch_nightly-0.3.0.dev20240909.dist-info → ai_edge_torch_nightly-0.3.0.dev20240913.dist-info}/METADATA +1 -1
  36. {ai_edge_torch_nightly-0.3.0.dev20240909.dist-info → ai_edge_torch_nightly-0.3.0.dev20240913.dist-info}/RECORD +41 -47
  37. ai_edge_torch/generative/examples/experimental/gemma/gemma.py +0 -219
  38. ai_edge_torch/generative/examples/experimental/phi/__init__.py +0 -14
  39. ai_edge_torch/generative/examples/experimental/tiny_llama/__init__.py +0 -14
  40. ai_edge_torch/generative/examples/experimental/tiny_llama/convert_to_tflite.py +0 -87
  41. ai_edge_torch/generative/examples/experimental/tiny_llama/tiny_llama.py +0 -205
  42. ai_edge_torch/generative/examples/phi2/__init__.py +0 -14
  43. ai_edge_torch/generative/examples/phi2/convert_to_tflite.py +0 -67
  44. ai_edge_torch/generative/examples/phi2/phi2.py +0 -189
  45. ai_edge_torch/generative/examples/test_models/toy_model_with_external_kv_cache.py +0 -176
  46. /ai_edge_torch/generative/examples/{experimental → phi}/__init__.py +0 -0
  47. /ai_edge_torch/generative/examples/{experimental/gemma → smallm}/__init__.py +0 -0
  48. {ai_edge_torch_nightly-0.3.0.dev20240909.dist-info → ai_edge_torch_nightly-0.3.0.dev20240913.dist-info}/LICENSE +0 -0
  49. {ai_edge_torch_nightly-0.3.0.dev20240909.dist-info → ai_edge_torch_nightly-0.3.0.dev20240913.dist-info}/WHEEL +0 -0
  50. {ai_edge_torch_nightly-0.3.0.dev20240909.dist-info → ai_edge_torch_nightly-0.3.0.dev20240913.dist-info}/top_level.txt +0 -0
@@ -12,22 +12,171 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
- """Provides lowering for coreaten to mlir stablehlo op: Convolution"""
15
+ """Provides lowering for coreaten to stablehlo for Convolution."""
16
16
 
17
17
  import math
18
18
  from typing import Optional
19
19
 
20
+ from ai_edge_torch.odml_torch.lowerings import registry
20
21
  from jax._src.lib.mlir import ir
21
22
  from jax._src.lib.mlir.dialects import hlo as stablehlo
22
23
  import torch
23
24
 
24
- from .registry import lower
25
+
26
+ def make_padding(padding):
27
+ """Change the padding from pytorch to stablehlo style.
28
+
29
+ Stablehlo allows start and end padding for each dimension while aten only
30
+ allows symmetric padding and so only has one number per dimension.
31
+
32
+ Args:
33
+ padding: The padding of the convolution
34
+
35
+ Returns:
36
+ The padding in stablehlo style
37
+ """
38
+ return tuple((p, p) for p in padding)
39
+
40
+
41
+ def create_conv_dimension_numbers(lhs, transposed: bool = False):
42
+ """Create the dimension numbers for the convolution.
43
+
44
+ Args:
45
+ lhs: The input tensor
46
+ transposed: Whether the convolution is transposed
47
+
48
+ Returns:
49
+ The dimension numbers for the convolution
50
+ """
51
+ num_spatial_dims = len(lhs.type.shape) - 2
52
+ spatial_dimensions = []
53
+ for i in range(0, num_spatial_dims):
54
+ spatial_dimensions.append(i + 2)
55
+
56
+ # Regular kernels are OIHW
57
+ # TransposedConv kernels are IOHW
58
+ dimension_numbers = stablehlo.ConvDimensionNumbers.get(
59
+ input_batch_dimension=0,
60
+ input_feature_dimension=1,
61
+ input_spatial_dimensions=spatial_dimensions,
62
+ kernel_input_feature_dimension=0 if transposed else 1,
63
+ kernel_output_feature_dimension=1 if transposed else 0,
64
+ kernel_spatial_dimensions=spatial_dimensions,
65
+ output_batch_dimension=0,
66
+ output_feature_dimension=1,
67
+ output_spatial_dimensions=spatial_dimensions,
68
+ )
69
+ return dimension_numbers
70
+
71
+
72
+ def infer_output_shape(
73
+ lhs,
74
+ rhs,
75
+ stride,
76
+ dilation,
77
+ padding,
78
+ transposed: bool = False,
79
+ output_padding: list[int] = 0,
80
+ ):
81
+ """Infer the output shape of the convolution.
82
+
83
+ Args:
84
+ lhs: The input tensor
85
+ rhs: The kernel tensor
86
+ stride: The stride of the convolution (dilation of input in transposed conv)
87
+ dilation: The kernel dilation of the convolution
88
+ padding: The padding of the convolution
89
+ transposed: Whether the convolution is transposed
90
+ output_padding: The output padding of the convolution
91
+
92
+ Returns:
93
+ The output shape of the convolution
94
+ """
95
+ lhs_type: ir.RankedTensorType = lhs.type
96
+ lhs_shape: list[int] = lhs_type.shape
97
+ rhs_shape: list[int] = rhs.type.shape
98
+
99
+ # Input layout is: (N)CHW and Kernel layout is: (O)IHW for regular conv
100
+ # Input layout is: (N)CHW and Kernel layout is: I(O)HW for transposed conv
101
+ output_shape = (
102
+ [lhs_shape[0], rhs_shape[1]]
103
+ if transposed
104
+ else [lhs_shape[0], rhs_shape[0]]
105
+ )
106
+ num_spatial_dims = len(lhs.type.shape) - 2
107
+
108
+ # looping over the spatial dims (skipping the first 2 dims which are
109
+ # batch and features)
110
+ for spatial_dim in range(0, num_spatial_dims):
111
+ dim = spatial_dim + 2
112
+ dim_size = lhs_shape[dim]
113
+ kernel_dim_size = rhs_shape[dim]
114
+
115
+ if transposed:
116
+ output_dim_size = (
117
+ (dim_size - 1) * stride[spatial_dim]
118
+ - 2 * padding[spatial_dim]
119
+ + dilation[spatial_dim] * (kernel_dim_size - 1)
120
+ + output_padding[spatial_dim]
121
+ + 1
122
+ )
123
+ else:
124
+ output_dim_size = math.floor(
125
+ (
126
+ (
127
+ dim_size
128
+ + 2 * padding[spatial_dim]
129
+ - dilation[spatial_dim] * (kernel_dim_size - 1)
130
+ - 1
131
+ )
132
+ / stride[spatial_dim]
133
+ )
134
+ + 1
135
+ )
136
+
137
+ output_shape.append(output_dim_size)
138
+
139
+ return output_shape
140
+
141
+
142
+ def build_transpose_conv(
143
+ lctx,
144
+ output_type: ir.RankedTensorType,
145
+ lhs: ir.Value,
146
+ rhs: ir.Value,
147
+ stride: list[int],
148
+ padding: list[int],
149
+ dilation: list[int],
150
+ output_padding: list[int],
151
+ groups: int,
152
+ ):
153
+ lhs_type: ir.RankedTensorType = lhs.type
154
+ num_spatial_dims = len(lhs_type.shape) - 2
155
+ rhs = stablehlo.reverse(rhs, list(range(2, 2 + num_spatial_dims)))
156
+
157
+ kernel_size = rhs.type.shape
158
+ # We need to additional padding on the input to get the right output size.
159
+ adjusted_padding = [
160
+ dilation[dim] * (kernel_size[dim + 2] - 1) - padding[dim]
161
+ for dim in range(num_spatial_dims)
162
+ ]
163
+ return stablehlo.convolution(
164
+ result=output_type,
165
+ lhs=lhs,
166
+ rhs=rhs,
167
+ dimension_numbers=create_conv_dimension_numbers(lhs, True),
168
+ feature_group_count=groups,
169
+ batch_group_count=1,
170
+ padding=make_padding(adjusted_padding),
171
+ lhs_dilation=stride,
172
+ rhs_dilation=dilation,
173
+ )
25
174
 
26
175
 
27
176
  # convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride,
28
177
  # SymInt[] padding, SymInt[] dilation, bool transposed,
29
178
  # SymInt[] output_padding, SymInt groups) -> Tensor
30
- # @lower(torch.ops.aten.convolution)
179
+ @registry.lower(torch.ops.aten.convolution)
31
180
  def _aten_convolution(
32
181
  lctx,
33
182
  lhs: ir.Value,
@@ -40,80 +189,53 @@ def _aten_convolution(
40
189
  output_padding: list[int],
41
190
  groups: int,
42
191
  ):
43
- if transposed:
44
- raise NotImplementedError("Transposed convolution is not implemented.")
45
192
 
46
- if bias is not None:
47
- raise NotImplementedError("Bias on convolution is not implemented.")
48
-
49
- # Stablehlo allows start and end padding for each dimension while aten only
50
- # allows symmetric padding and so only has one number per dimension.
51
- def make_padding(padding):
52
- return tuple((p, p) for p in padding)
53
-
54
- def create_conv_dimension_numbers():
55
- num_spatial_dims = len(lhs.type.shape) - 2
56
- spatial_dimensions = []
57
- for i in range(0, num_spatial_dims):
58
- spatial_dimensions.append(i + 2)
59
-
60
- dimension_numbers = stablehlo.ConvDimensionNumbers.get(
61
- input_batch_dimension=0,
62
- input_feature_dimension=1,
63
- input_spatial_dimensions=spatial_dimensions,
64
- kernel_input_feature_dimension=1,
65
- kernel_output_feature_dimension=0,
66
- kernel_spatial_dimensions=spatial_dimensions,
67
- output_batch_dimension=0,
68
- output_feature_dimension=1,
69
- output_spatial_dimensions=spatial_dimensions,
193
+ # TODO(b/365559296) Add support for output_padding
194
+ if any(output_padding):
195
+ raise NotImplementedError(
196
+ "Output padding on convolution is not implemented."
70
197
  )
71
- return dimension_numbers
72
-
73
- def infer_output_shape():
74
- lhs_type: ir.RankedTensorType = lhs.type
75
- lhs_shape: list[int] = lhs_type.shape
76
- rhs_shape: list[int] = rhs.type.shape
77
-
78
- # Input layout is: (N)CHW and Kernel layout is: (O)IHW
79
- output_shape = [lhs_shape[0], rhs_shape[0]]
80
- num_spatial_dims = len(lhs.type.shape) - 2
81
-
82
- # looping over the spatial dims (skipping the first 2 dims which are
83
- # batch and features)
84
- for spatial_dim in range(0, num_spatial_dims):
85
- dim_size = lhs_shape[spatial_dim + 2]
86
- kernel_dim_size = rhs_shape[spatial_dim + 2]
87
-
88
- # for example, a dilation of 2 increases the dimension size by 2
89
- dim_size *= dilation[spatial_dim]
90
-
91
- # padding added to both sides
92
- dim_size += 2 * padding[spatial_dim]
93
-
94
- output_dim_size = math.ceil(
95
- (dim_size - kernel_dim_size + 1) / stride[spatial_dim]
96
- )
97
-
98
- output_shape.append(output_dim_size)
99
-
100
- return output_shape
101
198
 
102
199
  lhs_type: ir.RankedTensorType = lhs.type
103
-
104
- op = stablehlo.ConvolutionOp(
105
- result=ir.RankedTensorType.get(
106
- infer_output_shape(), lhs_type.element_type
107
- ),
108
- lhs=lhs,
109
- rhs=rhs,
110
- dimension_numbers=create_conv_dimension_numbers(),
111
- feature_group_count=groups,
112
- batch_group_count=1,
113
- window_strides=stride,
114
- padding=make_padding(padding),
115
- lhs_dilation=(1,) * len(stride),
116
- rhs_dilation=dilation,
200
+ output_shape = infer_output_shape(
201
+ lhs, rhs, stride, dilation, padding, transposed, output_padding
202
+ )
203
+ output_type = ir.RankedTensorType.get(
204
+ output_shape,
205
+ lhs_type.element_type,
117
206
  )
118
207
 
119
- return op.result
208
+ if transposed:
209
+ res = build_transpose_conv(
210
+ lctx,
211
+ output_type,
212
+ lhs,
213
+ rhs,
214
+ stride,
215
+ padding,
216
+ dilation,
217
+ output_padding,
218
+ groups,
219
+ )
220
+ else:
221
+ res = stablehlo.convolution(
222
+ result=output_type,
223
+ lhs=lhs,
224
+ rhs=rhs,
225
+ dimension_numbers=create_conv_dimension_numbers(lhs),
226
+ feature_group_count=groups,
227
+ batch_group_count=1,
228
+ window_strides=stride,
229
+ padding=make_padding(padding),
230
+ rhs_dilation=dilation,
231
+ )
232
+
233
+ if bias is not None:
234
+ # broadcast [C] to [NCHW]
235
+ broadcasted_bias = stablehlo.broadcast_in_dim(output_type, bias, [1])
236
+ res = stablehlo.add(
237
+ lhs=res,
238
+ rhs=broadcasted_bias,
239
+ )
240
+
241
+ return res
@@ -105,7 +105,6 @@ lower_by_torch_xla2(torch.ops.aten.clamp.default)
105
105
  lower_by_torch_xla2(torch.ops.aten.clone)
106
106
  lower_by_torch_xla2(torch.ops.aten.clone.default)
107
107
  lower_by_torch_xla2(torch.ops.aten.constant_pad_nd)
108
- lower_by_torch_xla2(torch.ops.aten.convolution)
109
108
  lower_by_torch_xla2(torch.ops.aten.cos)
110
109
  lower_by_torch_xla2(torch.ops.aten.cosh)
111
110
  lower_by_torch_xla2(torch.ops.aten.cumsum)
@@ -168,7 +167,6 @@ lower_by_torch_xla2(torch.ops.aten.mul.Scalar)
168
167
  lower_by_torch_xla2(torch.ops.aten.mul.Tensor)
169
168
  lower_by_torch_xla2(torch.ops.aten.native_batch_norm)
170
169
  lower_by_torch_xla2(torch.ops.aten.native_group_norm)
171
- lower_by_torch_xla2(torch.ops.aten.native_layer_norm)
172
170
  lower_by_torch_xla2(torch.ops.aten.native_layer_norm_backward)
173
171
  lower_by_torch_xla2(torch.ops.aten.ne)
174
172
  lower_by_torch_xla2(torch.ops.aten.neg)
@@ -0,0 +1,78 @@
1
+ # Copyright 2024 The AI Edge Torch Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+ """Provides lowering for coreaten to stablehlo for LayerNorm."""
16
+
17
+ import math
18
+ from typing import Optional
19
+ from ai_edge_torch.odml_torch.lowerings import registry
20
+ from ai_edge_torch.odml_torch.lowerings import utils
21
+ from jax._src.lib.mlir import ir
22
+ from jax._src.lib.mlir.dialects import hlo as stablehlo
23
+ import torch
24
+
25
+
26
+ # native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight,
27
+ # Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)
28
+ @registry.lower(torch.ops.aten.native_layer_norm)
29
+ def _aten_native_layer_norm(
30
+ lctx,
31
+ data: ir.Value,
32
+ normalized_shape: list[int],
33
+ weight: Optional[ir.Value],
34
+ bias: Optional[ir.Value],
35
+ eps: float,
36
+ ):
37
+ data_type: ir.RankedTensorType = data.type
38
+ unnormalized_count = math.prod(data_type.shape) // math.prod(normalized_shape)
39
+ dest_shape = [
40
+ 1,
41
+ unnormalized_count,
42
+ math.prod(normalized_shape),
43
+ ]
44
+ dest_type = ir.RankedTensorType.get(dest_shape, data_type.element_type)
45
+
46
+ reshaped_data = stablehlo.reshape(dest_type, data)
47
+
48
+ one = utils.splat(1, data_type.element_type, [unnormalized_count])
49
+ zero = utils.splat(0, data_type.element_type, [unnormalized_count])
50
+ output, mean, var = stablehlo.batch_norm_training(
51
+ reshaped_data, one, zero, eps, 1
52
+ )
53
+ eps_splat = utils.splat(eps, var.type.element_type, var.type.shape)
54
+ rstd = stablehlo.rsqrt(stablehlo.add(var, eps_splat))
55
+
56
+ stats_shape = data_type.shape[: -1 * len(normalized_shape)] + [1] * len(
57
+ normalized_shape
58
+ )
59
+ stats_type = ir.RankedTensorType.get(stats_shape, data_type.element_type)
60
+ mean = stablehlo.reshape(stats_type, mean)
61
+ rstd = stablehlo.reshape(stats_type, rstd)
62
+
63
+ output = stablehlo.reshape(data_type, output)
64
+
65
+ data_rank = len(data_type.shape)
66
+ normalized_rank = len(normalized_shape)
67
+ if weight is not None:
68
+ weight = stablehlo.broadcast_in_dim(
69
+ data_type, weight, list(range(data_rank - normalized_rank, data_rank))
70
+ )
71
+ output = stablehlo.multiply(weight, output)
72
+ if bias is not None:
73
+ bias = stablehlo.broadcast_in_dim(
74
+ data_type, bias, list(range(data_rank - normalized_rank, data_rank))
75
+ )
76
+ output = stablehlo.add(bias, output)
77
+
78
+ return output, mean, rstd
ai_edge_torch/version.py CHANGED
@@ -13,4 +13,4 @@
13
13
  # limitations under the License.
14
14
  # ==============================================================================
15
15
 
16
- __version__ = "0.3.0.dev20240909"
16
+ __version__ = "0.3.0.dev20240913"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ai-edge-torch-nightly
3
- Version: 0.3.0.dev20240909
3
+ Version: 0.3.0.dev20240913
4
4
  Summary: Supporting PyTorch models with the Google AI Edge TFLite runtime.
5
5
  Home-page: https://github.com/google-ai-edge/ai-edge-torch
6
6
  Keywords: On-Device ML,AI,Google,TFLite,PyTorch,LLMs,GenAI
@@ -2,7 +2,7 @@ ai_edge_torch/__init__.py,sha256=48qP37uHT90YPs4eIUQxCiWVwqGEX3idCUs6mQKvX1U,116
2
2
  ai_edge_torch/config.py,sha256=PCd9PVrbUNeVIUDFUCnW4goDWU4bjouK28yMYU6VOi0,877
3
3
  ai_edge_torch/conftest.py,sha256=r0GTrhMRhlmOGrrkvumHN8hkmyug6WvF60vWq8wRIBI,758
4
4
  ai_edge_torch/model.py,sha256=NYV6Mkaje_ditIEI_s_7nLP_-8i4kbGM8nRzieVkbUI,5397
5
- ai_edge_torch/version.py,sha256=r0y6crIySNGhJqtljkzyHxb1XMvLji2VLajLfUjW8b4,706
5
+ ai_edge_torch/version.py,sha256=2_ahYhvytovu9mWRifMKeqx6-0JbD7-iV5FXU890d7Y,706
6
6
  ai_edge_torch/_convert/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
7
7
  ai_edge_torch/_convert/conversion.py,sha256=kcv_QgNgeyDmrqwdzHicGNP68w6zF7GJg7YkMEIXp4Q,3759
8
8
  ai_edge_torch/_convert/conversion_utils.py,sha256=Sr8qXVcTwc-ZnZmK7yxVrIOOp1S_vNrwzC0zUvLTI2o,2160
@@ -26,7 +26,7 @@ ai_edge_torch/_convert/fx_passes/optimize_layout_transposes_pass/layout_partitio
26
26
  ai_edge_torch/_convert/fx_passes/optimize_layout_transposes_pass/layout_partitioners/greedy.py,sha256=L_x8BrF7UDah-SYl-pG11I6CIckdU9kBTUHcmwW4cts,2420
27
27
  ai_edge_torch/_convert/fx_passes/optimize_layout_transposes_pass/layout_partitioners/min_cut.py,sha256=mzfL9cf0qBnpmxM_OlMQFvQsEZV2B_Mia9yEJV4J7rI,7135
28
28
  ai_edge_torch/_convert/test/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
29
- ai_edge_torch/_convert/test/test_convert.py,sha256=pUYSXuqFg8CAeJ8JkoYf7S0RDLRPVuZUwVOd0xObM6w,14411
29
+ ai_edge_torch/_convert/test/test_convert.py,sha256=FSufFZEeTLBpUnzE1Iy-LvNN0mhDynWMNg7Mei8RpLQ,14973
30
30
  ai_edge_torch/_convert/test/test_convert_composites.py,sha256=BCIODgxMI_3MxMLfNWYMGjcz-al-J3z5eDHCiZJXNwY,7992
31
31
  ai_edge_torch/_convert/test/test_convert_multisig.py,sha256=6_C2R9--KyNR7_oezZIAfyTSR97tOeEWy4XGcbSxBDE,5778
32
32
  ai_edge_torch/_convert/test/test_to_channel_last_io.py,sha256=1o-gUiwzIuO67FNAJ8DeyKv8fVUeZVNNNwofNVDjYeU,3024
@@ -39,27 +39,20 @@ ai_edge_torch/debug/test/test_search_model.py,sha256=-RuU0QsjqkfzZF2IbeA55MoeVOa
39
39
  ai_edge_torch/experimental/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
40
40
  ai_edge_torch/generative/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
41
41
  ai_edge_torch/generative/examples/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
42
- ai_edge_torch/generative/examples/experimental/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
43
- ai_edge_torch/generative/examples/experimental/gemma/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
44
- ai_edge_torch/generative/examples/experimental/gemma/convert_to_tflite.py,sha256=lpiPFSh3SJd6WwuZ0QegSva3__iSz2tUD7L7QfkAe4I,3085
45
- ai_edge_torch/generative/examples/experimental/gemma/gemma.py,sha256=aCoD86pf4nuquUMk7MOR-jsN5FqvySSEuMx9Psxjblk,7261
46
- ai_edge_torch/generative/examples/experimental/phi/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
47
- ai_edge_torch/generative/examples/experimental/phi/convert_to_tflite.py,sha256=DavrdGmqUgoThsGNRv3LXMW5tvJdYEvj66Hf1XRqkXU,3055
48
- ai_edge_torch/generative/examples/experimental/phi/phi2.py,sha256=Jxf3ZyYDpS78l6uh4_LGGIcHawrOhZ1vHoHFVxRaK40,6789
49
- ai_edge_torch/generative/examples/experimental/tiny_llama/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
50
- ai_edge_torch/generative/examples/experimental/tiny_llama/convert_to_tflite.py,sha256=xPVvHQjLJHFiRv_-Fy2sDm0Aft7SG8SXiV6o3rF03cQ,3108
51
- ai_edge_torch/generative/examples/experimental/tiny_llama/tiny_llama.py,sha256=nUm0SQbCTmNAc5u-C9gbQRFPt7GDvUt6UjH6doTvH-I,6817
52
42
  ai_edge_torch/generative/examples/gemma/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
53
- ai_edge_torch/generative/examples/gemma/convert_gemma2_to_tflite.py,sha256=pseJExH35lSAK0ZtzSHB1sFtRtF_EuT2xcSpGU0gKVI,2524
54
- ai_edge_torch/generative/examples/gemma/convert_to_tflite.py,sha256=w589IJETATd6Z9_1XCIWbrlCV3E92X_5ac3VVCVFXG0,2522
55
- ai_edge_torch/generative/examples/gemma/gemma.py,sha256=lc1-CfIObHj9D5VJy78BOtGTrQM4TYMI6NfVi8KM5qA,6747
56
- ai_edge_torch/generative/examples/gemma/gemma2.py,sha256=OcUQLFR136e3QRVXRnmtYnRHXyHJS9EYEFlJ1ymXyRY,8859
57
- ai_edge_torch/generative/examples/phi2/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
58
- ai_edge_torch/generative/examples/phi2/convert_to_tflite.py,sha256=ON6zLO-nFS8eJ2yhyWzT5x2Somr-Ca-VjpjT7OGFU10,2506
59
- ai_edge_torch/generative/examples/phi2/phi2.py,sha256=FFnhv1kx4fHRhSeOreLGj8kAqPnmkz9pD1RRSDVlM_w,6332
43
+ ai_edge_torch/generative/examples/gemma/convert_gemma2_to_tflite.py,sha256=ZJvw8uFVu7FEJ7eXfpzn-pPKgPELoxkGz4Zg7LKKMSI,3048
44
+ ai_edge_torch/generative/examples/gemma/convert_to_tflite.py,sha256=hM-fwjZG53p1UE_lkovLMmHRDHleJsb6_0ib0_k0v54,3040
45
+ ai_edge_torch/generative/examples/gemma/gemma.py,sha256=uejk9Mi85uRuFYIUi5XI58rf4K7TFeE5cZ1flejF8EE,7473
46
+ ai_edge_torch/generative/examples/gemma/gemma2.py,sha256=H0scyAdqRyV2wwaFx1LAa3A5oYn1C5tTdPWvbDTd_SQ,10256
47
+ ai_edge_torch/generative/examples/phi/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
48
+ ai_edge_torch/generative/examples/phi/convert_to_tflite.py,sha256=vqEpZVmB0_wMKcAl6RXm7W57DqPTzEdVVN6W2Z-QYzI,3011
49
+ ai_edge_torch/generative/examples/phi/phi2.py,sha256=wjTLCfCUDcLqvVsrPH-Wx04pOKeuigZCWHO3gL1WOEA,7072
50
+ ai_edge_torch/generative/examples/smallm/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
51
+ ai_edge_torch/generative/examples/smallm/convert_to_tflite.py,sha256=aqqxQMBBO_dtGB1iZ1tpF8hbGpdZkx0VIz62ZqfVMCc,3036
52
+ ai_edge_torch/generative/examples/smallm/smallm.py,sha256=mzlbXxCCB10FN03QDRoPXw-cbucQM_O_Hs8hqLZAvck,4002
60
53
  ai_edge_torch/generative/examples/stable_diffusion/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
61
54
  ai_edge_torch/generative/examples/stable_diffusion/attention.py,sha256=kDWG6MlIGa89zC5KSRcJlw2c4ITuw8KcchtfmF55f4g,3545
62
- ai_edge_torch/generative/examples/stable_diffusion/clip.py,sha256=0WniBWQ6_NcQc5WycX3YRRX7Os9AGQSxfc1m2HKBqg8,4479
55
+ ai_edge_torch/generative/examples/stable_diffusion/clip.py,sha256=evl5Rn_Hlp9-BsNmcf6liXa2syET3-Fz-zVaWjqPKx8,4657
63
56
  ai_edge_torch/generative/examples/stable_diffusion/convert_to_tflite.py,sha256=7ra36nM5tQwSw-vi6QCFLx5IssZhT-6yVK4H3XsAc4w,5044
64
57
  ai_edge_torch/generative/examples/stable_diffusion/decoder.py,sha256=slieF2-QcDCwd4DRZ7snsZIphT97IXpp4plRRsRSwL8,13983
65
58
  ai_edge_torch/generative/examples/stable_diffusion/diffusion.py,sha256=7oUIJ6HO0vmlhFdkXpqGm9KTB-eM4Ob9VrHSDlIGFOg,30926
@@ -74,29 +67,28 @@ ai_edge_torch/generative/examples/stable_diffusion/samplers/k_lms.py,sha256=ZE6H
74
67
  ai_edge_torch/generative/examples/stable_diffusion/samplers/sampler.py,sha256=RxR5rw0wFFm_5CfAY-3-EIz83vhM9EKye8Bb5zBb0Ok,1341
75
68
  ai_edge_torch/generative/examples/t5/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
76
69
  ai_edge_torch/generative/examples/t5/convert_to_tflite.py,sha256=CZVuNEL8OHPkdsz70WOvNpTJ9LFkiDnlwgJiXfUZCVk,4548
77
- ai_edge_torch/generative/examples/t5/t5.py,sha256=Zobw5BV-PC0nlU9Z6fzb2O07rMeU8vGIk-KtKp9D_H0,20871
78
- ai_edge_torch/generative/examples/t5/t5_attention.py,sha256=1lvbSlzyBwmd5Bs7-Up_v4iJQkCPIJx2RmMkLgy7l2Q,8508
70
+ ai_edge_torch/generative/examples/t5/t5.py,sha256=Ekg92OwIXSkSRii9OY-mp3-SExtsxOdoIDTFxm25hso,21304
71
+ ai_edge_torch/generative/examples/t5/t5_attention.py,sha256=l01oYyJo77INzRwN4xqXquaFQPvCFBFF5zOnmGVb3Hg,8731
79
72
  ai_edge_torch/generative/examples/test_models/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
80
- ai_edge_torch/generative/examples/test_models/toy_model.py,sha256=5wj2RmQRIwD6O_R_pp-A_7gKGSdHWDSXyis97r1ELVI,5622
81
- ai_edge_torch/generative/examples/test_models/toy_model_with_external_kv_cache.py,sha256=l9swUKTcDtnTibNSNExaMgLvDeJ4Er2tVh5ZW1EtRgk,5809
82
- ai_edge_torch/generative/examples/test_models/toy_model_with_kv_cache.py,sha256=mQkcpSe6HlRLMkIRCEHc9ZXL7jxEp9RWSGUQjjd-r2w,4841
73
+ ai_edge_torch/generative/examples/test_models/toy_model.py,sha256=QyLeCqDnk71WvvFH68g9UeF-HytonSk1ItGF9dc7Zj8,5854
74
+ ai_edge_torch/generative/examples/test_models/toy_model_with_kv_cache.py,sha256=oX_D_kU9PegBX3Fx9z_J3a1Oh2PF05F0nwZNxyLgQNA,5880
83
75
  ai_edge_torch/generative/examples/tiny_llama/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
84
- ai_edge_torch/generative/examples/tiny_llama/convert_to_tflite.py,sha256=CLRqO7ycMbpy7J3_Czp1sLx6hcdwGD9zVq04yRba0e8,2550
85
- ai_edge_torch/generative/examples/tiny_llama/tiny_llama.py,sha256=4ku0ni3MOWamhPrzLap0BmtdNFk7CH0hwjPNoRAKpvQ,6278
76
+ ai_edge_torch/generative/examples/tiny_llama/convert_to_tflite.py,sha256=y4LiWhwgflqrg4WWh3wq5ei3VOT_cV0A62x62qptQiM,3070
77
+ ai_edge_torch/generative/examples/tiny_llama/tiny_llama.py,sha256=Mnn_aMImR1CpC_T0CMKlp3XgoLyR7N56VR3blVSnMHQ,7007
86
78
  ai_edge_torch/generative/fx_passes/__init__.py,sha256=fmNNXawJ722M4cTUuTx289rT0NHxBEsOy_k8baqCOms,1173
87
79
  ai_edge_torch/generative/fx_passes/remove_sdpa_zero_mask_pass.py,sha256=sXis0U4u-RoIp_NyrmWJNnqFqpqRuZOrhfsJIO6rMps,2028
88
80
  ai_edge_torch/generative/layers/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
89
- ai_edge_torch/generative/layers/attention.py,sha256=2UujQePRJ1LK02PN-hGcuMu0ooCJC6ETfPvzEYVFyho,12284
81
+ ai_edge_torch/generative/layers/attention.py,sha256=d9yLaqxPCtClhNUmauOEFBKxhLnsXdN3NiYy1WspIPI,12826
90
82
  ai_edge_torch/generative/layers/attention_utils.py,sha256=68GXGR2HSWBFViTxX7cHifzVG-kcLS2IL2tQJPIpupg,7344
91
- ai_edge_torch/generative/layers/builder.py,sha256=xb7rjADv3Jm4qfmlYtg6oLLe7ReDE9UjsEqiejPpDD8,4346
83
+ ai_edge_torch/generative/layers/builder.py,sha256=6jDNaa_djF32AjxIJtaDGBzlj3zlvl1yZivK3gC4j94,4424
92
84
  ai_edge_torch/generative/layers/feed_forward.py,sha256=uto7xtwx6jPkk1GZ2x7pSTentQzRrPSKw4_PSE12ahA,3525
93
- ai_edge_torch/generative/layers/kv_cache.py,sha256=Ob8QeXWW5xt-6hcGA0uoC48eRQ8lfvKca8JbWtFx2CE,3082
94
- ai_edge_torch/generative/layers/model_config.py,sha256=WpZ9djUBAZddyeSODHDaVMG37EQqfzGGrlMPi8AA-Hc,5752
95
- ai_edge_torch/generative/layers/normalization.py,sha256=u8lv0p-ktKcRqCDlOqZQa9WQcfDK9JM2IaUQFQdn7xs,1860
85
+ ai_edge_torch/generative/layers/kv_cache.py,sha256=FveTTO0z_yi0-ZdGMuamzSvuInn6B4lesKZ4PHT2Vmg,6088
86
+ ai_edge_torch/generative/layers/model_config.py,sha256=mil4RkGuNFBDKo3gPd9QnfGKLKPZWX9Gz2_q9hX8sNU,6407
87
+ ai_edge_torch/generative/layers/normalization.py,sha256=iod9oNkoDS5m-yFY_Y_XMyvCU5a88ESd_s5WY34ErKA,6129
96
88
  ai_edge_torch/generative/layers/rotary_position_embedding.py,sha256=CZqOoibLcHvUgrgaIIWAlmk3XgE2inzx340MN-npLoU,1347
97
89
  ai_edge_torch/generative/layers/scaled_dot_product_attention.py,sha256=VW-VP8e7FTSPCdu-6DVxpwNrIdgX0R_kq6F6MSEiyXE,3848
98
90
  ai_edge_torch/generative/layers/unet/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
99
- ai_edge_torch/generative/layers/unet/blocks_2d.py,sha256=V4zUAqjWeBseMPG9B-93LDv1LM3Dds6Q-H0NxY0koSA,27212
91
+ ai_edge_torch/generative/layers/unet/blocks_2d.py,sha256=cpygyJccLq6KHKxV7oz4YKh529YLjC9isupnsVmPi0A,27190
100
92
  ai_edge_torch/generative/layers/unet/builder.py,sha256=zAqWXdimmMrQRhmE_t9XkS68mh6PSrzwb-2NZZXrR5I,1901
101
93
  ai_edge_torch/generative/layers/unet/model_config.py,sha256=NvBJj09a7ZC-ChGE_ex-_kLnE_fjzrY6txbLSh1pMKA,9208
102
94
  ai_edge_torch/generative/quantize/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
@@ -107,15 +99,16 @@ ai_edge_torch/generative/quantize/quant_recipe_utils.py,sha256=4fgmP_GgeiFUOkIaC
107
99
  ai_edge_torch/generative/quantize/quant_recipes.py,sha256=0Kvr_o7pbMnE8VMe6Ml0FBxkHM6RJ3C14B2I1mjItjc,2030
108
100
  ai_edge_torch/generative/quantize/supported_schemes.py,sha256=FjdycEOvxRgBmQdZVufetPvkDoD7rUowIOSKV9oV5Kk,1418
109
101
  ai_edge_torch/generative/test/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
110
- ai_edge_torch/generative/test/test_experimental_ekv.py,sha256=8qv_eVtJW9GPvBEf2hPQe3tpdJ33XShya6MCX1FqrZM,4355
111
- ai_edge_torch/generative/test/test_loader.py,sha256=_y5EHGgoNOmCuYonsB81UJScHVsTAQXUVd44czMAw6k,3379
112
- ai_edge_torch/generative/test/test_model_conversion.py,sha256=b3InJ8Rx03YtHpE9h-j0pSXAY1cCf-dLlx4Y5LSJnRQ,5174
113
- ai_edge_torch/generative/test/test_model_conversion_large.py,sha256=9JXcd-rX8MpsYeEWUFEXf783GOwYOLY64KzDfFdmRJ8,4484
102
+ ai_edge_torch/generative/test/test_kv_cache.py,sha256=W6Bh0gYDzmwb0j9HdD5_D7Z7FPToP2HSyFrmwIXuFqo,3793
103
+ ai_edge_torch/generative/test/test_loader.py,sha256=8y74ChO3CZCfEi1eCf3-w47kRgAI4qPYCXpi8rTQXMA,3378
104
+ ai_edge_torch/generative/test/test_model_conversion.py,sha256=SIv7_sc5qHvbHFN8SbAfY00iXGvH7J6cJLkERU_cd5k,5888
105
+ ai_edge_torch/generative/test/test_model_conversion_large.py,sha256=F3q3K9ZgWBzlLy4WpE8-w6UWSuJ-UoJwMm3N6Zb3Y14,5016
114
106
  ai_edge_torch/generative/test/test_quantize.py,sha256=kY_NRpF-v1i4clqI1CFFWEagJv-5PzBDkeJ2fInl9_w,5913
107
+ ai_edge_torch/generative/test/utils.py,sha256=YvEhO2HIj1LkBs5du1UxY-cGRW9HMyAYsOUhgsTrTpA,1796
115
108
  ai_edge_torch/generative/utilities/__init__.py,sha256=-_jxnnFnCgnTU4oTm4MnRsvL5lqhomBNdFBbqfmfHPo,720
116
- ai_edge_torch/generative/utilities/loader.py,sha256=6J0aAP6-6LySeqeYIHKcchr5T9cVtSO34aoDr3V9gxY,12726
109
+ ai_edge_torch/generative/utilities/loader.py,sha256=kn4TCgGAG8s4mdvPITimOBCaVyn04Ksz4gZIleFYF1o,12754
117
110
  ai_edge_torch/generative/utilities/stable_diffusion_loader.py,sha256=pKp3AMSbS3otCvgwJRF5M1l4JRNKk-aCKimXzIMSrds,35679
118
- ai_edge_torch/generative/utilities/t5_loader.py,sha256=_UXcc1QKT-S92hikfo-fTBFhnYLzROqcyRqKonVsqj4,16885
111
+ ai_edge_torch/generative/utilities/t5_loader.py,sha256=tEsfy8-ymzbbjOIc-oesXF3yGyyWtJgFXn2s7VOavt8,16961
119
112
  ai_edge_torch/hlfb/__init__.py,sha256=sH4um75na-O8tzxN6chFyp6Y4xnexsE7kUQpZySv6dE,735
120
113
  ai_edge_torch/hlfb/mark_pattern/__init__.py,sha256=cjTprggj_cuktSCm7-A25e7Shop3k63ylp7sdZmtZ8o,4790
121
114
  ai_edge_torch/hlfb/mark_pattern/passes.py,sha256=pjkKcI1nHECPluAt87cFBrt1DP0f3ge7rHq1NhCkBIE,1936
@@ -145,11 +138,12 @@ ai_edge_torch/odml_torch/debuginfo/_op_polyfill.py,sha256=IvOBQyROI9WHS3umHRxsDW
145
138
  ai_edge_torch/odml_torch/jax_bridge/__init__.py,sha256=Jco5zvejxuyl9xHQxZICAKbkgH7x38qPlwUUpD7S15Q,730
146
139
  ai_edge_torch/odml_torch/jax_bridge/_wrap.py,sha256=drN3L0uTsSjkluKgt6Ngq7b5HLReE_7iAitHpZ9PKqE,5428
147
140
  ai_edge_torch/odml_torch/jax_bridge/utils.py,sha256=T8isGc896VrHZ6c_L5pYmLpolQ7ibcOlgWfPuVFPzIg,2264
148
- ai_edge_torch/odml_torch/lowerings/__init__.py,sha256=GqYk6oBJw7KWeG4_6gxSu_OvYhjJcC2FpGzWPPEdH6w,933
141
+ ai_edge_torch/odml_torch/lowerings/__init__.py,sha256=dE_qzh-OnCNjWzqs1-PHs5PNlRF726qMQKM3tkwAzEs,959
149
142
  ai_edge_torch/odml_torch/lowerings/_basic.py,sha256=wV8AUK8dvjLUy3qjqw_IxpiYVDWUMPNZRfi3XYE_hDs,6972
150
143
  ai_edge_torch/odml_torch/lowerings/_batch_norm.py,sha256=PaLI0BB6pdBW1VyfW8VTOT_Be-ZcqYdNOsyfzKfq8Cg,2064
151
- ai_edge_torch/odml_torch/lowerings/_convolution.py,sha256=B6BILeu-UlwGB1O6g7111X1TaIFznsfxXrB72ygBsBA,3885
152
- ai_edge_torch/odml_torch/lowerings/_jax_lowerings.py,sha256=I0Y4IK7Zap8m6xfxMw7DfQ9Mg4htKOoypdHVAMHqx9c,10669
144
+ ai_edge_torch/odml_torch/lowerings/_convolution.py,sha256=v1VdKmL8YLJv3PR9VgyNghO83A25PpTzY2ZUAJqlq3Q,6847
145
+ ai_edge_torch/odml_torch/lowerings/_jax_lowerings.py,sha256=Ii1akrKLhRTkZ715JxXBBGKv3jGfXReXMQCYNzSnxmM,10567
146
+ ai_edge_torch/odml_torch/lowerings/_layer_norm.py,sha256=1ePJs7oIdUkVdMddFsXMc53qTkEKqGz0ZhQQoNzBa10,2862
153
147
  ai_edge_torch/odml_torch/lowerings/context.py,sha256=jslcCv7r_HtImSRTxJwHAUV_QCu9Jub51lovmoBkmFA,1295
154
148
  ai_edge_torch/odml_torch/lowerings/registry.py,sha256=ES3x_RJ22T5rlmMrlomex2DdcZbhlyVJ7_HS3rjz3Uk,2851
155
149
  ai_edge_torch/odml_torch/lowerings/utils.py,sha256=NczqpsSd3Fn7yVcPC3qllemiZxxDAZgcW1T5l8-W9fE,5593
@@ -161,8 +155,8 @@ ai_edge_torch/quantize/quant_config.py,sha256=U0KisSW-uZkoMJcy-ZP9W57p3tsa594fr9
161
155
  ai_edge_torch/testing/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
162
156
  ai_edge_torch/testing/model_coverage/__init__.py,sha256=5P8J6Zk5YYtDvTBucFvB9NGSRI7Gw_24WnrbhXgycEE,765
163
157
  ai_edge_torch/testing/model_coverage/model_coverage.py,sha256=UPB448aMDUyC0HNYVqio2rcJPnDN0tBQMP08J6vPYew,4718
164
- ai_edge_torch_nightly-0.3.0.dev20240909.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
165
- ai_edge_torch_nightly-0.3.0.dev20240909.dist-info/METADATA,sha256=s7SAIUvFciy8peNKMHvyhoNQWYx67Jerz4foeV7KiE0,1859
166
- ai_edge_torch_nightly-0.3.0.dev20240909.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
167
- ai_edge_torch_nightly-0.3.0.dev20240909.dist-info/top_level.txt,sha256=5KXRaF2hwkApYxf7Y8y_tVb9aulGTlbOoNdbx1aKRkE,14
168
- ai_edge_torch_nightly-0.3.0.dev20240909.dist-info/RECORD,,
158
+ ai_edge_torch_nightly-0.3.0.dev20240913.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
159
+ ai_edge_torch_nightly-0.3.0.dev20240913.dist-info/METADATA,sha256=ahbsMN1e0Tuq_LmrkB6NE-VgVTC65KEiZX3VVmTbcWQ,1859
160
+ ai_edge_torch_nightly-0.3.0.dev20240913.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
161
+ ai_edge_torch_nightly-0.3.0.dev20240913.dist-info/top_level.txt,sha256=5KXRaF2hwkApYxf7Y8y_tVb9aulGTlbOoNdbx1aKRkE,14
162
+ ai_edge_torch_nightly-0.3.0.dev20240913.dist-info/RECORD,,