ai-edge-torch-nightly 0.3.0.dev20241028__py3-none-any.whl → 0.3.0.dev20241030__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- ai_edge_torch/generative/examples/stable_diffusion/convert_to_tflite.py +14 -2
- ai_edge_torch/generative/examples/stable_diffusion/decoder.py +21 -7
- ai_edge_torch/generative/examples/stable_diffusion/diffusion.py +51 -19
- ai_edge_torch/generative/layers/unet/model_config.py +3 -0
- ai_edge_torch/generative/quantize/example.py +5 -2
- ai_edge_torch/version.py +1 -1
- {ai_edge_torch_nightly-0.3.0.dev20241028.dist-info → ai_edge_torch_nightly-0.3.0.dev20241030.dist-info}/METADATA +1 -1
- {ai_edge_torch_nightly-0.3.0.dev20241028.dist-info → ai_edge_torch_nightly-0.3.0.dev20241030.dist-info}/RECORD +11 -11
- {ai_edge_torch_nightly-0.3.0.dev20241028.dist-info → ai_edge_torch_nightly-0.3.0.dev20241030.dist-info}/LICENSE +0 -0
- {ai_edge_torch_nightly-0.3.0.dev20241028.dist-info → ai_edge_torch_nightly-0.3.0.dev20241030.dist-info}/WHEEL +0 -0
- {ai_edge_torch_nightly-0.3.0.dev20241028.dist-info → ai_edge_torch_nightly-0.3.0.dev20241030.dist-info}/top_level.txt +0 -0
@@ -61,6 +61,14 @@ _QUANTIZE = flags.DEFINE_bool(
|
|
61
61
|
default=True,
|
62
62
|
)
|
63
63
|
|
64
|
+
_DEVICE_TYPE = flags.DEFINE_string(
|
65
|
+
'device_type',
|
66
|
+
None,
|
67
|
+
help='The device type of the model. Currently supported: cpu, gpu.',
|
68
|
+
default='cpu',
|
69
|
+
required=True,
|
70
|
+
)
|
71
|
+
|
64
72
|
|
65
73
|
@torch.inference_mode
|
66
74
|
def convert_stable_diffusion_to_tflite(
|
@@ -80,13 +88,17 @@ def convert_stable_diffusion_to_tflite(
|
|
80
88
|
)
|
81
89
|
loader.load(clip_model, strict=False)
|
82
90
|
|
83
|
-
diffusion_model = diffusion.Diffusion(
|
91
|
+
diffusion_model = diffusion.Diffusion(
|
92
|
+
diffusion.get_model_config(batch_size=2, device_type=_DEVICE_TYPE.value)
|
93
|
+
)
|
84
94
|
diffusion_loader = stable_diffusion_loader.DiffusionModelLoader(
|
85
95
|
diffusion_ckpt_path, diffusion.TENSOR_NAMES
|
86
96
|
)
|
87
97
|
diffusion_loader.load(diffusion_model, strict=False)
|
88
98
|
|
89
|
-
decoder_model = decoder.Decoder(
|
99
|
+
decoder_model = decoder.Decoder(
|
100
|
+
decoder.get_model_config(device_type=_DEVICE_TYPE.value)
|
101
|
+
)
|
90
102
|
decoder_loader = stable_diffusion_loader.AutoEncoderModelLoader(
|
91
103
|
decoder_ckpt_path, decoder.TENSOR_NAMES
|
92
104
|
)
|
@@ -270,8 +270,8 @@ class Decoder(nn.Module):
|
|
270
270
|
return x
|
271
271
|
|
272
272
|
|
273
|
-
def get_model_config() -> unet_cfg.AutoEncoderConfig:
|
274
|
-
"""Get configs for the Decoder of Stable Diffusion v1.5"""
|
273
|
+
def get_model_config(device_type: str = "cpu") -> unet_cfg.AutoEncoderConfig:
|
274
|
+
"""Get configs for the Decoder of Stable Diffusion v1.5."""
|
275
275
|
in_channels = 3
|
276
276
|
latent_channels = 4
|
277
277
|
out_channels = 3
|
@@ -279,8 +279,14 @@ def get_model_config() -> unet_cfg.AutoEncoderConfig:
|
|
279
279
|
scaling_factor = 0.18215
|
280
280
|
layers_per_block = 3
|
281
281
|
|
282
|
+
# For now, only turns on StableHLO composite ops on GPU backend for better
|
283
|
+
# performance. CPU should also switch to it once the support is done.
|
284
|
+
enable_hlfb = True if device_type == "gpu" else False
|
285
|
+
|
282
286
|
norm_config = layers_cfg.NormalizationConfig(
|
283
|
-
layers_cfg.NormalizationType.GROUP_NORM,
|
287
|
+
layers_cfg.NormalizationType.GROUP_NORM,
|
288
|
+
group_num=32,
|
289
|
+
enable_hlfb=enable_hlfb,
|
284
290
|
)
|
285
291
|
|
286
292
|
att_config = unet_cfg.AttentionBlock2DConfig(
|
@@ -298,7 +304,7 @@ def get_model_config() -> unet_cfg.AutoEncoderConfig:
|
|
298
304
|
rotary_base=0,
|
299
305
|
rotary_percentage=0.0,
|
300
306
|
),
|
301
|
-
enable_hlfb=
|
307
|
+
enable_hlfb=enable_hlfb,
|
302
308
|
)
|
303
309
|
|
304
310
|
mid_block_config = unet_cfg.MidBlock2DConfig(
|
@@ -327,7 +333,9 @@ def get_model_config() -> unet_cfg.AutoEncoderConfig:
|
|
327
333
|
return config
|
328
334
|
|
329
335
|
|
330
|
-
def get_fake_model_config(
|
336
|
+
def get_fake_model_config(
|
337
|
+
device_type: str = "cpu",
|
338
|
+
) -> unet_cfg.AutoEncoderConfig:
|
331
339
|
"""Get fake configs for the Decoder of Stable Diffusion v1.5 for testing."""
|
332
340
|
in_channels = 3
|
333
341
|
latent_channels = 4
|
@@ -336,8 +344,14 @@ def get_fake_model_config() -> unet_cfg.AutoEncoderConfig:
|
|
336
344
|
scaling_factor = 0.18215
|
337
345
|
layers_per_block = 2
|
338
346
|
|
347
|
+
# For now, only turns on StableHLO composite ops on GPU backend for better
|
348
|
+
# performance. CPU should also switch to it once the support is done.
|
349
|
+
enable_hlfb = True if device_type == "gpu" else False
|
350
|
+
|
339
351
|
norm_config = layers_cfg.NormalizationConfig(
|
340
|
-
layers_cfg.NormalizationType.GROUP_NORM,
|
352
|
+
layers_cfg.NormalizationType.GROUP_NORM,
|
353
|
+
group_num=2,
|
354
|
+
enable_hlfb=enable_hlfb,
|
341
355
|
)
|
342
356
|
|
343
357
|
att_config = unet_cfg.AttentionBlock2DConfig(
|
@@ -355,7 +369,7 @@ def get_fake_model_config() -> unet_cfg.AutoEncoderConfig:
|
|
355
369
|
rotary_base=0,
|
356
370
|
rotary_percentage=0.0,
|
357
371
|
),
|
358
|
-
enable_hlfb=
|
372
|
+
enable_hlfb=enable_hlfb,
|
359
373
|
)
|
360
374
|
|
361
375
|
mid_block_config = unet_cfg.MidBlock2DConfig(
|
@@ -333,7 +333,7 @@ class Diffusion(nn.Module):
|
|
333
333
|
dim=output_channel,
|
334
334
|
num_query_groups=config.transformer_num_attention_heads,
|
335
335
|
),
|
336
|
-
enable_hlfb=
|
336
|
+
enable_hlfb=config.enable_hlfb,
|
337
337
|
),
|
338
338
|
cross_attention_block_config=unet_cfg.CrossAttentionBlock2DConfig(
|
339
339
|
query_dim=output_channel,
|
@@ -347,7 +347,7 @@ class Diffusion(nn.Module):
|
|
347
347
|
dim=output_channel,
|
348
348
|
num_query_groups=config.transformer_num_attention_heads,
|
349
349
|
),
|
350
|
-
enable_hlfb=
|
350
|
+
enable_hlfb=config.enable_hlfb,
|
351
351
|
),
|
352
352
|
pre_conv_normalization_config=config.transformer_pre_conv_norm_config,
|
353
353
|
feed_forward_block_config=unet_cfg.FeedForwardBlock2DConfig(
|
@@ -405,7 +405,7 @@ class Diffusion(nn.Module):
|
|
405
405
|
dim=mid_block_channels,
|
406
406
|
num_query_groups=config.transformer_num_attention_heads,
|
407
407
|
),
|
408
|
-
enable_hlfb=
|
408
|
+
enable_hlfb=config.enable_hlfb,
|
409
409
|
),
|
410
410
|
cross_attention_block_config=unet_cfg.CrossAttentionBlock2DConfig(
|
411
411
|
query_dim=mid_block_channels,
|
@@ -419,7 +419,7 @@ class Diffusion(nn.Module):
|
|
419
419
|
dim=mid_block_channels,
|
420
420
|
num_query_groups=config.transformer_num_attention_heads,
|
421
421
|
),
|
422
|
-
enable_hlfb=
|
422
|
+
enable_hlfb=config.enable_hlfb,
|
423
423
|
),
|
424
424
|
pre_conv_normalization_config=config.transformer_pre_conv_norm_config,
|
425
425
|
feed_forward_block_config=unet_cfg.FeedForwardBlock2DConfig(
|
@@ -478,7 +478,7 @@ class Diffusion(nn.Module):
|
|
478
478
|
dim=output_channel,
|
479
479
|
num_query_groups=config.transformer_num_attention_heads,
|
480
480
|
),
|
481
|
-
enable_hlfb=
|
481
|
+
enable_hlfb=config.enable_hlfb,
|
482
482
|
),
|
483
483
|
cross_attention_block_config=unet_cfg.CrossAttentionBlock2DConfig(
|
484
484
|
query_dim=output_channel,
|
@@ -492,7 +492,7 @@ class Diffusion(nn.Module):
|
|
492
492
|
dim=output_channel,
|
493
493
|
num_query_groups=config.transformer_num_attention_heads,
|
494
494
|
),
|
495
|
-
enable_hlfb=
|
495
|
+
enable_hlfb=config.enable_hlfb,
|
496
496
|
),
|
497
497
|
pre_conv_normalization_config=config.transformer_pre_conv_norm_config,
|
498
498
|
feed_forward_block_config=unet_cfg.FeedForwardBlock2DConfig(
|
@@ -581,13 +581,16 @@ class Diffusion(nn.Module):
|
|
581
581
|
return x
|
582
582
|
|
583
583
|
|
584
|
-
def get_model_config(
|
585
|
-
|
584
|
+
def get_model_config(
|
585
|
+
batch_size: int, device_type: str = "cpu"
|
586
|
+
) -> unet_cfg.DiffusionModelConfig:
|
587
|
+
"""Get configs for the Diffusion model of Stable Diffusion v1.5.
|
586
588
|
|
587
589
|
Args:
|
588
590
|
batch_size (int): the batch size of input.
|
591
|
+
device_type (str): the device type of the model. Default to "cpu".
|
589
592
|
|
590
|
-
|
593
|
+
Returns:
|
591
594
|
The configuration of diffusion model of Stable Diffusion v1.5.
|
592
595
|
"""
|
593
596
|
in_channels = 4
|
@@ -596,9 +599,15 @@ def get_model_config(batch_size: int) -> unet_cfg.DiffusionModelConfig:
|
|
596
599
|
layers_per_block = 2
|
597
600
|
downsample_padding = 1
|
598
601
|
|
602
|
+
# For now, only turns on StableHLO composite ops on GPU backend for better
|
603
|
+
# performance. CPU should also switch to it once the support is done.
|
604
|
+
enable_hlfb = True if device_type == "gpu" else False
|
605
|
+
|
599
606
|
# Residual configs.
|
600
607
|
residual_norm_config = layers_cfg.NormalizationConfig(
|
601
|
-
layers_cfg.NormalizationType.GROUP_NORM,
|
608
|
+
layers_cfg.NormalizationType.GROUP_NORM,
|
609
|
+
group_num=32,
|
610
|
+
enable_hlfb=enable_hlfb,
|
602
611
|
)
|
603
612
|
residual_activation_type = layers_cfg.ActivationType.SILU
|
604
613
|
|
@@ -607,10 +616,14 @@ def get_model_config(batch_size: int) -> unet_cfg.DiffusionModelConfig:
|
|
607
616
|
transformer_batch_size = batch_size
|
608
617
|
transformer_cross_attention_dim = 768 # Embedding from CLIP model
|
609
618
|
transformer_pre_conv_norm_config = layers_cfg.NormalizationConfig(
|
610
|
-
layers_cfg.NormalizationType.GROUP_NORM,
|
619
|
+
layers_cfg.NormalizationType.GROUP_NORM,
|
620
|
+
epsilon=1e-6,
|
621
|
+
group_num=32,
|
622
|
+
enable_hlfb=enable_hlfb,
|
611
623
|
)
|
612
624
|
transformer_norm_config = layers_cfg.NormalizationConfig(
|
613
|
-
layers_cfg.NormalizationType.LAYER_NORM
|
625
|
+
layers_cfg.NormalizationType.LAYER_NORM,
|
626
|
+
enable_hlfb=enable_hlfb,
|
614
627
|
)
|
615
628
|
transformer_ff_activation_type = layers_cfg.ActivationType.GE_GLU
|
616
629
|
|
@@ -623,7 +636,9 @@ def get_model_config(batch_size: int) -> unet_cfg.DiffusionModelConfig:
|
|
623
636
|
|
624
637
|
# Finaly layer configs.
|
625
638
|
final_norm_config = layers_cfg.NormalizationConfig(
|
626
|
-
layers_cfg.NormalizationType.GROUP_NORM,
|
639
|
+
layers_cfg.NormalizationType.GROUP_NORM,
|
640
|
+
group_num=32,
|
641
|
+
enable_hlfb=enable_hlfb,
|
627
642
|
)
|
628
643
|
final_activation_type = layers_cfg.ActivationType.SILU
|
629
644
|
|
@@ -646,16 +661,20 @@ def get_model_config(batch_size: int) -> unet_cfg.DiffusionModelConfig:
|
|
646
661
|
time_embedding_blocks_dim=time_embedding_blocks_dim,
|
647
662
|
final_norm_config=final_norm_config,
|
648
663
|
final_activation_type=final_activation_type,
|
664
|
+
enable_hlfb=enable_hlfb,
|
649
665
|
)
|
650
666
|
|
651
667
|
|
652
|
-
def get_fake_model_config(
|
668
|
+
def get_fake_model_config(
|
669
|
+
batch_size: int, device_type: str = "cpu"
|
670
|
+
) -> unet_cfg.DiffusionModelConfig:
|
653
671
|
"""Get fake configs for the Diffusion model of Stable Diffusion v1.5 for testing.
|
654
672
|
|
655
673
|
Args:
|
656
674
|
batch_size (int): the batch size of input.
|
675
|
+
device_type (str): the device type of the model. Default to "cpu".
|
657
676
|
|
658
|
-
|
677
|
+
Returns:
|
659
678
|
The configuration of diffusion model of Stable Diffusion v1.5.
|
660
679
|
"""
|
661
680
|
in_channels = 4
|
@@ -664,9 +683,15 @@ def get_fake_model_config(batch_size: int) -> unet_cfg.DiffusionModelConfig:
|
|
664
683
|
layers_per_block = 1
|
665
684
|
downsample_padding = 1
|
666
685
|
|
686
|
+
# For now, only turns on StableHLO composite ops on GPU backend for better
|
687
|
+
# performance. CPU should also switch to it once the support is done.
|
688
|
+
enable_hlfb = True if device_type == "gpu" else False
|
689
|
+
|
667
690
|
# Residual configs.
|
668
691
|
residual_norm_config = layers_cfg.NormalizationConfig(
|
669
|
-
layers_cfg.NormalizationType.GROUP_NORM,
|
692
|
+
layers_cfg.NormalizationType.GROUP_NORM,
|
693
|
+
group_num=2,
|
694
|
+
enable_hlfb=enable_hlfb,
|
670
695
|
)
|
671
696
|
residual_activation_type = layers_cfg.ActivationType.SILU
|
672
697
|
|
@@ -675,10 +700,14 @@ def get_fake_model_config(batch_size: int) -> unet_cfg.DiffusionModelConfig:
|
|
675
700
|
transformer_batch_size = batch_size
|
676
701
|
transformer_cross_attention_dim = 4 # Embedding from CLIP model
|
677
702
|
transformer_pre_conv_norm_config = layers_cfg.NormalizationConfig(
|
678
|
-
layers_cfg.NormalizationType.GROUP_NORM,
|
703
|
+
layers_cfg.NormalizationType.GROUP_NORM,
|
704
|
+
epsilon=1e-6,
|
705
|
+
group_num=2,
|
706
|
+
enable_hlfb=enable_hlfb,
|
679
707
|
)
|
680
708
|
transformer_norm_config = layers_cfg.NormalizationConfig(
|
681
|
-
layers_cfg.NormalizationType.LAYER_NORM
|
709
|
+
layers_cfg.NormalizationType.LAYER_NORM,
|
710
|
+
enable_hlfb=enable_hlfb,
|
682
711
|
)
|
683
712
|
transformer_ff_activation_type = layers_cfg.ActivationType.GE_GLU
|
684
713
|
|
@@ -691,7 +720,9 @@ def get_fake_model_config(batch_size: int) -> unet_cfg.DiffusionModelConfig:
|
|
691
720
|
|
692
721
|
# Finaly layer configs.
|
693
722
|
final_norm_config = layers_cfg.NormalizationConfig(
|
694
|
-
layers_cfg.NormalizationType.GROUP_NORM,
|
723
|
+
layers_cfg.NormalizationType.GROUP_NORM,
|
724
|
+
group_num=2,
|
725
|
+
enable_hlfb=enable_hlfb,
|
695
726
|
)
|
696
727
|
final_activation_type = layers_cfg.ActivationType.SILU
|
697
728
|
|
@@ -714,4 +745,5 @@ def get_fake_model_config(batch_size: int) -> unet_cfg.DiffusionModelConfig:
|
|
714
745
|
time_embedding_blocks_dim=time_embedding_blocks_dim,
|
715
746
|
final_norm_config=final_norm_config,
|
716
747
|
final_activation_type=final_activation_type,
|
748
|
+
enable_hlfb=enable_hlfb,
|
717
749
|
)
|
@@ -15,7 +15,9 @@
|
|
15
15
|
|
16
16
|
import ai_edge_torch
|
17
17
|
from ai_edge_torch.generative.examples.gemma import gemma1
|
18
|
+
from ai_edge_torch.generative.layers import kv_cache as kv_utils
|
18
19
|
from ai_edge_torch.generative.quantize import quant_recipes
|
20
|
+
from ai_edge_torch.generative.utilities import model_builder
|
19
21
|
import numpy as np
|
20
22
|
import torch
|
21
23
|
|
@@ -23,11 +25,12 @@ import torch
|
|
23
25
|
def main():
|
24
26
|
# Build a PyTorch model as usual
|
25
27
|
config = gemma1.get_fake_model_config()
|
26
|
-
model =
|
28
|
+
model = model_builder.DecoderOnlyModel(config).eval()
|
27
29
|
idx = torch.from_numpy(np.array([[1, 2, 3, 4]]))
|
28
30
|
tokens = torch.full((1, 10), 0, dtype=torch.int, device="cpu")
|
29
31
|
tokens[0, :4] = idx
|
30
32
|
input_pos = torch.arange(0, 10, dtype=torch.int)
|
33
|
+
kv = kv_utils.KVCache.from_model_config(config)
|
31
34
|
|
32
35
|
# Create a quantization recipe to be applied to the model
|
33
36
|
quant_config = quant_recipes.full_int8_dynamic_recipe()
|
@@ -35,7 +38,7 @@ def main():
|
|
35
38
|
|
36
39
|
# Convert with quantization
|
37
40
|
edge_model = ai_edge_torch.convert(
|
38
|
-
model, (tokens, input_pos), quant_config=quant_config
|
41
|
+
model, (tokens, input_pos, kv), quant_config=quant_config
|
39
42
|
)
|
40
43
|
edge_model.export("/tmp/gemma_2b_quantized.tflite")
|
41
44
|
|
ai_edge_torch/version.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: ai-edge-torch-nightly
|
3
|
-
Version: 0.3.0.
|
3
|
+
Version: 0.3.0.dev20241030
|
4
4
|
Summary: Supporting PyTorch models with the Google AI Edge TFLite runtime.
|
5
5
|
Home-page: https://github.com/google-ai-edge/ai-edge-torch
|
6
6
|
Keywords: On-Device ML,AI,Google,TFLite,PyTorch,LLMs,GenAI
|
@@ -3,7 +3,7 @@ ai_edge_torch/config.py,sha256=FMWeCH2b7HYILBvaI1iZNnYCO4WAhDOwBZBmIE-xrF0,909
|
|
3
3
|
ai_edge_torch/conftest.py,sha256=r0GTrhMRhlmOGrrkvumHN8hkmyug6WvF60vWq8wRIBI,758
|
4
4
|
ai_edge_torch/fx_pass_base.py,sha256=SrYveglaiA_DXPoRBqSXClWM1q7853I5ujRorq_MV0M,4251
|
5
5
|
ai_edge_torch/model.py,sha256=N-pNpTxzhaFGhWhnSGd70lBzb9VlEhTOq5mddU7bvvI,5542
|
6
|
-
ai_edge_torch/version.py,sha256=
|
6
|
+
ai_edge_torch/version.py,sha256=MlL0epetNoc10jxbXPvsj8gL4DSmIQTJq51OGNz2Qhc,706
|
7
7
|
ai_edge_torch/_convert/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
8
8
|
ai_edge_torch/_convert/conversion.py,sha256=HwzfRx_DX5TLtPqwEH1_NOm38_INvHzHl4_mX67KOdQ,5448
|
9
9
|
ai_edge_torch/_convert/conversion_utils.py,sha256=Sr8qXVcTwc-ZnZmK7yxVrIOOp1S_vNrwzC0zUvLTI2o,2160
|
@@ -78,9 +78,9 @@ ai_edge_torch/generative/examples/smollm/verify.py,sha256=HXYcCjDJMylVL3Pc9HU-UX
|
|
78
78
|
ai_edge_torch/generative/examples/stable_diffusion/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
79
79
|
ai_edge_torch/generative/examples/stable_diffusion/attention.py,sha256=kDWG6MlIGa89zC5KSRcJlw2c4ITuw8KcchtfmF55f4g,3545
|
80
80
|
ai_edge_torch/generative/examples/stable_diffusion/clip.py,sha256=5M4auM33SgCTODt0VT8TO-EVILruqGDRiNILBPeB83Y,6072
|
81
|
-
ai_edge_torch/generative/examples/stable_diffusion/convert_to_tflite.py,sha256=
|
82
|
-
ai_edge_torch/generative/examples/stable_diffusion/decoder.py,sha256=
|
83
|
-
ai_edge_torch/generative/examples/stable_diffusion/diffusion.py,sha256=
|
81
|
+
ai_edge_torch/generative/examples/stable_diffusion/convert_to_tflite.py,sha256=Fw0ZsJSG8fM-07mEi6QLCn6LpRveGGL8vt7OIn0Av5c,5276
|
82
|
+
ai_edge_torch/generative/examples/stable_diffusion/decoder.py,sha256=sQKQ-k6H9kG2brgwLsktjCMeN2h0POyfMP6iNsPNKWc,16271
|
83
|
+
ai_edge_torch/generative/examples/stable_diffusion/diffusion.py,sha256=6W58LxmHHkz2ctgpknQkyoDANZAnE9Byp_svfqLpQf0,34793
|
84
84
|
ai_edge_torch/generative/examples/stable_diffusion/encoder.py,sha256=CAPsW84A8f00nS6fLFeh_XUjCPsDCA5UxHOUsMrLfSU,3450
|
85
85
|
ai_edge_torch/generative/examples/stable_diffusion/pipeline.py,sha256=GnY3vPZ-obrWuJifuE5bUooKLqAI7v6q71oaTuLKeBE,8778
|
86
86
|
ai_edge_torch/generative/examples/stable_diffusion/tokenizer.py,sha256=xychak9hdLd6ieXBYEwrK2BkF8NRZWZSSCijIsESpBA,3420
|
@@ -117,9 +117,9 @@ ai_edge_torch/generative/layers/scaled_dot_product_attention.py,sha256=gXxh3papK
|
|
117
117
|
ai_edge_torch/generative/layers/unet/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
118
118
|
ai_edge_torch/generative/layers/unet/blocks_2d.py,sha256=JwndhL3Z31TvkdGlAoTL5PQzmKfHdRWaaE1EbaMI4Gs,27540
|
119
119
|
ai_edge_torch/generative/layers/unet/builder.py,sha256=zAqWXdimmMrQRhmE_t9XkS68mh6PSrzwb-2NZZXrR5I,1901
|
120
|
-
ai_edge_torch/generative/layers/unet/model_config.py,sha256=
|
120
|
+
ai_edge_torch/generative/layers/unet/model_config.py,sha256=raYm8Ol-EFi0zs5vNqmj2ZJCFsnQW2TfwhgDcClfwFA,9356
|
121
121
|
ai_edge_torch/generative/quantize/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
122
|
-
ai_edge_torch/generative/quantize/example.py,sha256=
|
122
|
+
ai_edge_torch/generative/quantize/example.py,sha256=1lfVNUd2cEyRUnoZ7BLbRJ9IN-FTKiWBtZNPFUzAiWE,1747
|
123
123
|
ai_edge_torch/generative/quantize/quant_attrs.py,sha256=n1Fm8BFC8gJa_oiwwAOOghJyHtOXYZ4q-5ZRy4pHrIw,1957
|
124
124
|
ai_edge_torch/generative/quantize/quant_recipe.py,sha256=tKnuJq6hPD23JPCB9nPAlE1UHAwdbChkgPShiVaz4CE,5156
|
125
125
|
ai_edge_torch/generative/quantize/quant_recipe_utils.py,sha256=4fgmP_GgeiFUOkIaC9ZZXC12eO3DQZdrWDXRz5YXiwU,2270
|
@@ -186,8 +186,8 @@ ai_edge_torch/quantize/quant_config.py,sha256=U0KisSW-uZkoMJcy-ZP9W57p3tsa594fr9
|
|
186
186
|
ai_edge_torch/testing/__init__.py,sha256=hHLluseD2R0Hh4W6XZRIXY_dRQeYudjsrKGf6LZz65g,671
|
187
187
|
ai_edge_torch/testing/model_coverage/__init__.py,sha256=5P8J6Zk5YYtDvTBucFvB9NGSRI7Gw_24WnrbhXgycEE,765
|
188
188
|
ai_edge_torch/testing/model_coverage/model_coverage.py,sha256=UPB448aMDUyC0HNYVqio2rcJPnDN0tBQMP08J6vPYew,4718
|
189
|
-
ai_edge_torch_nightly-0.3.0.
|
190
|
-
ai_edge_torch_nightly-0.3.0.
|
191
|
-
ai_edge_torch_nightly-0.3.0.
|
192
|
-
ai_edge_torch_nightly-0.3.0.
|
193
|
-
ai_edge_torch_nightly-0.3.0.
|
189
|
+
ai_edge_torch_nightly-0.3.0.dev20241030.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
190
|
+
ai_edge_torch_nightly-0.3.0.dev20241030.dist-info/METADATA,sha256=OmF5Xcg12MFdHCNcW_0A4l8s7mU0yUXSsjkGRHiRnBI,1897
|
191
|
+
ai_edge_torch_nightly-0.3.0.dev20241030.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
|
192
|
+
ai_edge_torch_nightly-0.3.0.dev20241030.dist-info/top_level.txt,sha256=5KXRaF2hwkApYxf7Y8y_tVb9aulGTlbOoNdbx1aKRkE,14
|
193
|
+
ai_edge_torch_nightly-0.3.0.dev20241030.dist-info/RECORD,,
|
File without changes
|
File without changes
|