ai-edge-torch-nightly 0.2.0.dev20240714__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ai-edge-torch-nightly might be problematic. Click here for more details.
- ai_edge_torch/__init__.py +31 -0
- ai_edge_torch/convert/__init__.py +14 -0
- ai_edge_torch/convert/conversion.py +117 -0
- ai_edge_torch/convert/conversion_utils.py +400 -0
- ai_edge_torch/convert/converter.py +202 -0
- ai_edge_torch/convert/fx_passes/__init__.py +59 -0
- ai_edge_torch/convert/fx_passes/_pass_base.py +49 -0
- ai_edge_torch/convert/fx_passes/build_aten_composite_pass.py +225 -0
- ai_edge_torch/convert/fx_passes/build_interpolate_composite_pass.py +123 -0
- ai_edge_torch/convert/fx_passes/canonicalize_pass.py +37 -0
- ai_edge_torch/convert/fx_passes/inject_mlir_debuginfo_pass.py +73 -0
- ai_edge_torch/convert/fx_passes/optimize_layout_transposes_pass/__init__.py +16 -0
- ai_edge_torch/convert/fx_passes/optimize_layout_transposes_pass/layout_check.py +215 -0
- ai_edge_torch/convert/fx_passes/optimize_layout_transposes_pass/layout_mark.py +48 -0
- ai_edge_torch/convert/fx_passes/optimize_layout_transposes_pass/layout_partitioners/__init__.py +17 -0
- ai_edge_torch/convert/fx_passes/optimize_layout_transposes_pass/layout_partitioners/greedy.py +59 -0
- ai_edge_torch/convert/fx_passes/optimize_layout_transposes_pass/layout_partitioners/min_cut.py +215 -0
- ai_edge_torch/convert/fx_passes/optimize_layout_transposes_pass/layout_rewrite.py +400 -0
- ai_edge_torch/convert/fx_passes/optimize_layout_transposes_pass/op_func_registry.py +30 -0
- ai_edge_torch/convert/fx_passes/optimize_layout_transposes_pass/pass_body.py +293 -0
- ai_edge_torch/convert/fx_passes/optimize_layout_transposes_pass/utils.py +62 -0
- ai_edge_torch/convert/test/__init__.py +14 -0
- ai_edge_torch/convert/test/test_convert.py +311 -0
- ai_edge_torch/convert/test/test_convert_composites.py +192 -0
- ai_edge_torch/convert/test/test_convert_multisig.py +139 -0
- ai_edge_torch/convert/test/test_to_channel_last_io.py +96 -0
- ai_edge_torch/convert/to_channel_last_io.py +85 -0
- ai_edge_torch/debug/__init__.py +17 -0
- ai_edge_torch/debug/culprit.py +464 -0
- ai_edge_torch/debug/test/__init__.py +14 -0
- ai_edge_torch/debug/test/test_culprit.py +133 -0
- ai_edge_torch/debug/test/test_search_model.py +50 -0
- ai_edge_torch/debug/utils.py +48 -0
- ai_edge_torch/experimental/__init__.py +14 -0
- ai_edge_torch/generative/__init__.py +14 -0
- ai_edge_torch/generative/examples/__init__.py +14 -0
- ai_edge_torch/generative/examples/gemma/__init__.py +14 -0
- ai_edge_torch/generative/examples/gemma/convert_to_tflite.py +66 -0
- ai_edge_torch/generative/examples/gemma/gemma.py +174 -0
- ai_edge_torch/generative/examples/phi2/__init__.py +14 -0
- ai_edge_torch/generative/examples/phi2/convert_to_tflite.py +64 -0
- ai_edge_torch/generative/examples/phi2/phi2.py +164 -0
- ai_edge_torch/generative/examples/stable_diffusion/__init__.py +14 -0
- ai_edge_torch/generative/examples/stable_diffusion/attention.py +106 -0
- ai_edge_torch/generative/examples/stable_diffusion/clip.py +115 -0
- ai_edge_torch/generative/examples/stable_diffusion/convert_to_tflite.py +142 -0
- ai_edge_torch/generative/examples/stable_diffusion/decoder.py +317 -0
- ai_edge_torch/generative/examples/stable_diffusion/diffusion.py +573 -0
- ai_edge_torch/generative/examples/stable_diffusion/encoder.py +118 -0
- ai_edge_torch/generative/examples/stable_diffusion/pipeline.py +222 -0
- ai_edge_torch/generative/examples/stable_diffusion/samplers/__init__.py +19 -0
- ai_edge_torch/generative/examples/stable_diffusion/samplers/k_euler.py +61 -0
- ai_edge_torch/generative/examples/stable_diffusion/samplers/k_euler_ancestral.py +65 -0
- ai_edge_torch/generative/examples/stable_diffusion/samplers/k_lms.py +73 -0
- ai_edge_torch/generative/examples/stable_diffusion/samplers/sampler.py +38 -0
- ai_edge_torch/generative/examples/stable_diffusion/tokenizer.py +108 -0
- ai_edge_torch/generative/examples/stable_diffusion/util.py +71 -0
- ai_edge_torch/generative/examples/t5/__init__.py +14 -0
- ai_edge_torch/generative/examples/t5/convert_to_tflite.py +135 -0
- ai_edge_torch/generative/examples/t5/t5.py +608 -0
- ai_edge_torch/generative/examples/t5/t5_attention.py +231 -0
- ai_edge_torch/generative/examples/test_models/__init__.py +14 -0
- ai_edge_torch/generative/examples/test_models/toy_model.py +122 -0
- ai_edge_torch/generative/examples/test_models/toy_model_with_external_kv_cache.py +161 -0
- ai_edge_torch/generative/examples/test_models/toy_model_with_kv_cache.py +143 -0
- ai_edge_torch/generative/examples/tiny_llama/__init__.py +0 -0
- ai_edge_torch/generative/examples/tiny_llama/convert_to_tflite.py +66 -0
- ai_edge_torch/generative/examples/tiny_llama/tiny_llama.py +164 -0
- ai_edge_torch/generative/fx_passes/__init__.py +31 -0
- ai_edge_torch/generative/fx_passes/remove_sdpa_zero_mask_pass.py +47 -0
- ai_edge_torch/generative/layers/__init__.py +14 -0
- ai_edge_torch/generative/layers/attention.py +354 -0
- ai_edge_torch/generative/layers/attention_utils.py +169 -0
- ai_edge_torch/generative/layers/builder.py +131 -0
- ai_edge_torch/generative/layers/feed_forward.py +95 -0
- ai_edge_torch/generative/layers/kv_cache.py +83 -0
- ai_edge_torch/generative/layers/model_config.py +158 -0
- ai_edge_torch/generative/layers/normalization.py +62 -0
- ai_edge_torch/generative/layers/rotary_position_embedding.py +36 -0
- ai_edge_torch/generative/layers/scaled_dot_product_attention.py +117 -0
- ai_edge_torch/generative/layers/unet/__init__.py +14 -0
- ai_edge_torch/generative/layers/unet/blocks_2d.py +711 -0
- ai_edge_torch/generative/layers/unet/builder.py +47 -0
- ai_edge_torch/generative/layers/unet/model_config.py +269 -0
- ai_edge_torch/generative/quantize/__init__.py +14 -0
- ai_edge_torch/generative/quantize/ai_edge_quantizer_glue/__init__.py +0 -0
- ai_edge_torch/generative/quantize/ai_edge_quantizer_glue/translate_recipe.py +148 -0
- ai_edge_torch/generative/quantize/example.py +45 -0
- ai_edge_torch/generative/quantize/quant_attrs.py +68 -0
- ai_edge_torch/generative/quantize/quant_recipe.py +151 -0
- ai_edge_torch/generative/quantize/quant_recipe_utils.py +51 -0
- ai_edge_torch/generative/quantize/quant_recipes.py +48 -0
- ai_edge_torch/generative/quantize/supported_schemes.py +32 -0
- ai_edge_torch/generative/test/__init__.py +14 -0
- ai_edge_torch/generative/test/loader_test.py +80 -0
- ai_edge_torch/generative/test/test_model_conversion.py +235 -0
- ai_edge_torch/generative/test/test_quantize.py +162 -0
- ai_edge_torch/generative/utilities/__init__.py +15 -0
- ai_edge_torch/generative/utilities/loader.py +328 -0
- ai_edge_torch/generative/utilities/stable_diffusion_loader.py +924 -0
- ai_edge_torch/generative/utilities/t5_loader.py +483 -0
- ai_edge_torch/hlfb/__init__.py +16 -0
- ai_edge_torch/hlfb/mark_pattern/__init__.py +139 -0
- ai_edge_torch/hlfb/mark_pattern/passes.py +42 -0
- ai_edge_torch/hlfb/mark_pattern/pattern.py +273 -0
- ai_edge_torch/hlfb/test/__init__.py +14 -0
- ai_edge_torch/hlfb/test/test_mark_pattern.py +133 -0
- ai_edge_torch/hlfb/test/test_stablehlo_composite_builder.py +270 -0
- ai_edge_torch/model.py +142 -0
- ai_edge_torch/quantize/__init__.py +16 -0
- ai_edge_torch/quantize/pt2e_quantizer.py +438 -0
- ai_edge_torch/quantize/pt2e_quantizer_utils.py +1041 -0
- ai_edge_torch/quantize/quant_config.py +81 -0
- ai_edge_torch/testing/__init__.py +14 -0
- ai_edge_torch/testing/model_coverage/__init__.py +16 -0
- ai_edge_torch/testing/model_coverage/model_coverage.py +132 -0
- ai_edge_torch_nightly-0.2.0.dev20240714.dist-info/LICENSE +202 -0
- ai_edge_torch_nightly-0.2.0.dev20240714.dist-info/METADATA +38 -0
- ai_edge_torch_nightly-0.2.0.dev20240714.dist-info/RECORD +121 -0
- ai_edge_torch_nightly-0.2.0.dev20240714.dist-info/WHEEL +5 -0
- ai_edge_torch_nightly-0.2.0.dev20240714.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# Copyright 2024 The AI Edge Torch Authors.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
# Implementation for Rotary Position embedding. https://arxiv.org/pdf/2104.09864.pdf
|
|
16
|
+
import torch
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def apply_rope(x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor:
|
|
20
|
+
"""Computes rotary positional embedding.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
x(torch.Tensor): the input tensor.
|
|
24
|
+
cos(torch.Tensor): cosine value for the rope.
|
|
25
|
+
sin(torch.Tensor): sin value for the rope.
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
output tensor of RoPE.
|
|
29
|
+
"""
|
|
30
|
+
x = x.transpose(1, 2)
|
|
31
|
+
head_size = x.size(-1)
|
|
32
|
+
x1 = x[..., : head_size // 2] # (B, nh, T, hs/2)
|
|
33
|
+
x2 = x[..., head_size // 2 :] # (B, nh, T, hs/2)
|
|
34
|
+
rotated = torch.cat((-x2, x1), dim=-1) # (B, nh, T, hs)
|
|
35
|
+
roped = (x * cos) + (rotated * sin)
|
|
36
|
+
return roped.transpose(1, 2).type_as(x)
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
# Copyright 2024 The AI Edge Torch Authors.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|
|
15
|
+
# Implements scaled dot product attention.
|
|
16
|
+
|
|
17
|
+
import math
|
|
18
|
+
from typing import Optional
|
|
19
|
+
|
|
20
|
+
import torch
|
|
21
|
+
import torch.nn.functional as F
|
|
22
|
+
|
|
23
|
+
from ai_edge_torch.hlfb import StableHLOCompositeBuilder
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def scaled_dot_product_attention(
|
|
27
|
+
q: torch.Tensor,
|
|
28
|
+
k: torch.Tensor,
|
|
29
|
+
v: torch.Tensor,
|
|
30
|
+
head_size: int,
|
|
31
|
+
mask: Optional[torch.Tensor] = None,
|
|
32
|
+
scale: Optional[float] = None,
|
|
33
|
+
):
|
|
34
|
+
"""Scaled dot product attention.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
q (torch.Tensor): Query tensor, with shape [B, T, N, H].
|
|
38
|
+
k (torch.Tensor): Key tensor, with shape [B, T, KV_LEN, H].
|
|
39
|
+
v (torch.Tensor): Value tensor, with shape [B, T, KV_LEN, H].
|
|
40
|
+
head_size (int): head dimension.
|
|
41
|
+
mask (torch.Tensor): the optional mask tensor.
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
The output tensor of scaled_dot_product_attention.
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
if scale is None:
|
|
48
|
+
scale = 1.0 / math.sqrt(head_size)
|
|
49
|
+
|
|
50
|
+
q = q.transpose(1, 2)
|
|
51
|
+
k = k.transpose(1, 2)
|
|
52
|
+
v = v.transpose(1, 2)
|
|
53
|
+
if q.size() != k.size():
|
|
54
|
+
# Handle the GQA case, where q.shape[1] % k.shape[1] == 0.
|
|
55
|
+
k = k.repeat_interleave(q.shape[1] // k.shape[1], dim=1)
|
|
56
|
+
v = v.repeat_interleave(q.shape[1] // v.shape[1], dim=1)
|
|
57
|
+
y = F.scaled_dot_product_attention(
|
|
58
|
+
q,
|
|
59
|
+
k,
|
|
60
|
+
v,
|
|
61
|
+
attn_mask=mask,
|
|
62
|
+
dropout_p=0.0,
|
|
63
|
+
is_causal=mask is None,
|
|
64
|
+
scale=scale,
|
|
65
|
+
)
|
|
66
|
+
return y.transpose(1, 2)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def scaled_dot_product_attention_with_hlfb(
|
|
70
|
+
q: torch.Tensor,
|
|
71
|
+
k: torch.Tensor,
|
|
72
|
+
v: torch.Tensor,
|
|
73
|
+
head_size: int,
|
|
74
|
+
mask: Optional[torch.Tensor] = None,
|
|
75
|
+
scale: Optional[float] = None,
|
|
76
|
+
):
|
|
77
|
+
"""Scaled dot product attention with high-level function boundary enabled.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
q (torch.Tensor): Query tensor, with shape [B, T, N, H].
|
|
81
|
+
k (torch.Tensor): Key tensor, with shape [B, T, KV_LEN, H].
|
|
82
|
+
v (torch.Tensor): Value tensor, with shape [B, T, KV_LEN, H].
|
|
83
|
+
head_size (int): head dimension.
|
|
84
|
+
mask (torch.Tensor): the optional mask tensor.
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
The output tensor of scaled_dot_product_attention.
|
|
88
|
+
"""
|
|
89
|
+
|
|
90
|
+
if scale is None:
|
|
91
|
+
scale = 1.0 / math.sqrt(head_size)
|
|
92
|
+
|
|
93
|
+
builder = StableHLOCompositeBuilder(
|
|
94
|
+
name="odml.scaled_dot_product_attention", attr={"scale": scale}
|
|
95
|
+
)
|
|
96
|
+
q, k, v, mask = builder.mark_inputs(q, k, v, mask)
|
|
97
|
+
|
|
98
|
+
q = q.transpose(1, 2)
|
|
99
|
+
k = k.transpose(1, 2)
|
|
100
|
+
v = v.transpose(1, 2)
|
|
101
|
+
if q.size() != k.size():
|
|
102
|
+
# Handle the GQA case, where q.shape[1] % k.shape[1] == 0.
|
|
103
|
+
k = k.repeat_interleave(q.shape[1] // k.shape[1], dim=1)
|
|
104
|
+
v = v.repeat_interleave(q.shape[1] // v.shape[1], dim=1)
|
|
105
|
+
y = F.scaled_dot_product_attention(
|
|
106
|
+
q,
|
|
107
|
+
k,
|
|
108
|
+
v,
|
|
109
|
+
attn_mask=mask,
|
|
110
|
+
dropout_p=0.0,
|
|
111
|
+
is_causal=mask is None,
|
|
112
|
+
scale=scale,
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
result = y.transpose(1, 2)
|
|
116
|
+
result = builder.mark_outputs(result)
|
|
117
|
+
return result
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# Copyright 2024 The AI Edge Torch Authors.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ==============================================================================
|