femtocrux 0.5.3__py3-none-any.whl → 0.6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- femtocrux/VERSION +1 -1
- femtocrux/femtostack/__init__.py +7 -0
- femtocrux/femtostack/common/__init__.py +10 -0
- femtocrux/femtostack/common/frontend.py +109 -0
- femtocrux/femtostack/common/metrics.py +184 -0
- femtocrux/femtostack/common/sim_io.py +470 -0
- femtocrux/femtostack/tflite_api/__init__.py +6 -0
- femtocrux/femtostack/tflite_api/tflite_frontend.py +6 -0
- femtocrux/femtostack/torch_api/__init__.py +0 -0
- femtocrux/femtostack/torch_api/frontend.py +136 -0
- femtocrux/grpc/compiler_service_pb2_grpc.py +3 -3
- femtocrux/server/server.py +4 -4
- {femtocrux-0.5.3.dist-info → femtocrux-0.6.1.dist-info}/METADATA +1 -1
- femtocrux-0.6.1.dist-info/RECORD +30 -0
- {femtocrux-0.5.3.dist-info → femtocrux-0.6.1.dist-info}/WHEEL +1 -1
- femtocrux-0.5.3.dist-info/RECORD +0 -21
- {femtocrux-0.5.3.dist-info → femtocrux-0.6.1.dist-info}/LICENSE +0 -0
- {femtocrux-0.5.3.dist-info → femtocrux-0.6.1.dist-info}/top_level.txt +0 -0
femtocrux/VERSION
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
0.
|
|
1
|
+
0.6.1
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Copyright Femtosense 2024
|
|
3
|
+
|
|
4
|
+
By using this software package, you agree to abide by the terms and conditions
|
|
5
|
+
in the license agreement found at https://femtosense.ai/legal/eula/
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from .frontend import CompilerFrontend # noqa: F401
|
|
9
|
+
from .metrics import SimMetrics # noqa: F401
|
|
10
|
+
from .sim_io import IOConfig, SimIOWrapper # noqa: F401
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from .sim_io import SimIOWrapper
|
|
3
|
+
import os
|
|
4
|
+
from femtobehav.fasmir import FASMIR
|
|
5
|
+
from femtobehav.sim import SimRunner
|
|
6
|
+
import tempfile
|
|
7
|
+
from typing import Tuple, Any
|
|
8
|
+
import zipfile
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class CompilerFrontend:
|
|
12
|
+
"""A generic compiler frontend, must be subclassed for each input IR/framework"""
|
|
13
|
+
|
|
14
|
+
def __init__(
|
|
15
|
+
self, input_ir: Any, fasmir: FASMIR = None, io_wrapper: SimIOWrapper = None
|
|
16
|
+
):
|
|
17
|
+
self.input_ir = input_ir
|
|
18
|
+
self.fasmir = fasmir
|
|
19
|
+
self.io_wrapper = io_wrapper
|
|
20
|
+
|
|
21
|
+
@property
|
|
22
|
+
def is_compiled(self):
|
|
23
|
+
return self.fasmir is not None and self.io_wrapper is not None
|
|
24
|
+
|
|
25
|
+
def _compile(self, input_ir: Any, options: dict) -> Tuple[FASMIR, SimIOWrapper]:
|
|
26
|
+
"""
|
|
27
|
+
Runs FM compiler to generate FASMIR, and encode io information in a
|
|
28
|
+
SimIOWrapper object.
|
|
29
|
+
|
|
30
|
+
Must be implemented for each frontend subclass.
|
|
31
|
+
|
|
32
|
+
Must return a tuple pair (FASMIR, SimIOWrapper)
|
|
33
|
+
"""
|
|
34
|
+
raise NotImplementedError(
|
|
35
|
+
"Subclasses need to implement this based on their input ir"
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
def compile(self, options: dict = {}):
|
|
39
|
+
if not self.is_compiled:
|
|
40
|
+
self.fasmir, self.io_wrapper = self._compile(self.input_ir, options)
|
|
41
|
+
|
|
42
|
+
def dump_bitfile(self, encrypt: bool = True) -> bytes:
|
|
43
|
+
"""Dumps a bitfile used to program the SPU."""
|
|
44
|
+
if not self.is_compiled:
|
|
45
|
+
raise RuntimeError("Model must be compiled before dumping bitfile")
|
|
46
|
+
|
|
47
|
+
with tempfile.TemporaryFile() as tmpfile:
|
|
48
|
+
with tempfile.TemporaryDirectory() as dirname:
|
|
49
|
+
# Dump memory files to a directory
|
|
50
|
+
runner = SimRunner(self.fasmir, data_dir=dirname, encrypt=encrypt)
|
|
51
|
+
runner.reset()
|
|
52
|
+
runner.finish()
|
|
53
|
+
|
|
54
|
+
# Archive the directory
|
|
55
|
+
with zipfile.ZipFile(
|
|
56
|
+
tmpfile, mode="w", compression=zipfile.ZIP_DEFLATED
|
|
57
|
+
) as archive:
|
|
58
|
+
for relpath in os.listdir(dirname):
|
|
59
|
+
abspath = os.path.join(dirname, relpath)
|
|
60
|
+
archive.write(abspath, arcname=relpath)
|
|
61
|
+
|
|
62
|
+
# Read out the bytes in the archive
|
|
63
|
+
tmpfile.seek(0)
|
|
64
|
+
bitfile = tmpfile.read()
|
|
65
|
+
|
|
66
|
+
return bitfile
|
|
67
|
+
|
|
68
|
+
def _get_padded_len(self, fasmir: FASMIR, name: str):
|
|
69
|
+
try:
|
|
70
|
+
fasmir_var = fasmir.data_vars[name]
|
|
71
|
+
except KeyError:
|
|
72
|
+
raise ValueError(
|
|
73
|
+
"Failed to find FASMIR variable corresponding to name %s" % name
|
|
74
|
+
)
|
|
75
|
+
return fasmir_var.numpy.shape[0]
|
|
76
|
+
|
|
77
|
+
def run_behavioral_simulator(
|
|
78
|
+
self,
|
|
79
|
+
*args: np.ndarray,
|
|
80
|
+
input_period: float = None,
|
|
81
|
+
quantize_inputs=True,
|
|
82
|
+
dequantize_outputs=True,
|
|
83
|
+
**kwargs
|
|
84
|
+
):
|
|
85
|
+
"""
|
|
86
|
+
Runs the behavioral simulator and returns outputs and metrics.
|
|
87
|
+
|
|
88
|
+
Arguments:
|
|
89
|
+
args (np.ndarray): Input tensors to the simulator, as numpy arrays. Either
|
|
90
|
+
floating-point or integer (see `quantize_inputs` for
|
|
91
|
+
more detail on input datatypes).
|
|
92
|
+
input_period (float, optional): total simulation time.
|
|
93
|
+
quantize_inputs (bool, optional): If True, then floating-point inputs will
|
|
94
|
+
be quantized to integer before passing into the simulator.
|
|
95
|
+
Otherwise, the simulator expects that the inputs will
|
|
96
|
+
already be in integer format. Default True.
|
|
97
|
+
dequantize_outputs (bool: optional): If True, the integer outputs from the
|
|
98
|
+
simulator will be cast back to the original
|
|
99
|
+
floating-point domain. Otherwise, the outputs will be
|
|
100
|
+
returned as integers. Default True.
|
|
101
|
+
|
|
102
|
+
"""
|
|
103
|
+
return self.io_wrapper.run(
|
|
104
|
+
*args,
|
|
105
|
+
input_period=input_period,
|
|
106
|
+
quantize_inputs=quantize_inputs,
|
|
107
|
+
dequantize_outputs=dequantize_outputs,
|
|
108
|
+
**kwargs
|
|
109
|
+
)
|
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import tabulate
|
|
3
|
+
from collections import defaultdict
|
|
4
|
+
from typing import List, Union, Dict
|
|
5
|
+
|
|
6
|
+
PREFIXES = {
|
|
7
|
+
1e-15: "f",
|
|
8
|
+
1e-12: "p",
|
|
9
|
+
1e-9: "n",
|
|
10
|
+
1e-6: "µ",
|
|
11
|
+
1e-3: "m",
|
|
12
|
+
1: "",
|
|
13
|
+
1e3: "k",
|
|
14
|
+
1e6: "M",
|
|
15
|
+
1e9: "G",
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def to_sci(x):
|
|
20
|
+
"""Convert a float to a scientific-prefix formatted string
|
|
21
|
+
|
|
22
|
+
e.g. 0.004531 -> '4.531 m'
|
|
23
|
+
"""
|
|
24
|
+
ks = np.array(list(PREFIXES.keys()))
|
|
25
|
+
ks_cut = ks[ks <= np.abs(x)]
|
|
26
|
+
if len(ks_cut) > 0:
|
|
27
|
+
k = np.max(ks[ks <= x])
|
|
28
|
+
pref = PREFIXES[k]
|
|
29
|
+
x = x / k
|
|
30
|
+
return f"{x:.3g} {pref}"
|
|
31
|
+
else:
|
|
32
|
+
return f"{x:.3g} "
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _merge_dicts(dicts: List[dict]):
|
|
36
|
+
"""Merges list of dicts into dict of lists"""
|
|
37
|
+
output = defaultdict(list)
|
|
38
|
+
for d in dicts:
|
|
39
|
+
for k, v in d.items():
|
|
40
|
+
output[k].append(v)
|
|
41
|
+
for k, v in output.items():
|
|
42
|
+
if isinstance(v[0], dict):
|
|
43
|
+
output[k] = _merge_dicts(v)
|
|
44
|
+
return dict(output)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
ALLOWED_KEYS = [
|
|
48
|
+
"Dynamic Energy/Frame (J)",
|
|
49
|
+
"Total Energy/Frame (J)",
|
|
50
|
+
"Latency (s)",
|
|
51
|
+
"Static Energy/Frame (J)",
|
|
52
|
+
"Memory",
|
|
53
|
+
"Frames Simulated",
|
|
54
|
+
"Power (W)",
|
|
55
|
+
]
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class SimMetrics:
|
|
59
|
+
"""
|
|
60
|
+
Object storing hardware simulator metrics.
|
|
61
|
+
|
|
62
|
+
Arguments:
|
|
63
|
+
metrics (list[dict] or dict[list]): measured metrics per batch
|
|
64
|
+
from simulator
|
|
65
|
+
dt (float, optional): total duration of the simulation, in seconds.
|
|
66
|
+
If not provided, will use the total active time of the sim, but this will
|
|
67
|
+
overlook the time spent sleeping.
|
|
68
|
+
|
|
69
|
+
Attributes:
|
|
70
|
+
total_energy: average total energy in Joules
|
|
71
|
+
total_active_time: average time spent processing in seconds
|
|
72
|
+
latency_per_frame: active time divided by the number of processed
|
|
73
|
+
input frames
|
|
74
|
+
power: average power consumption, in Watts
|
|
75
|
+
|
|
76
|
+
metrics: detailed metrics dictionary
|
|
77
|
+
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
def __init__(
|
|
81
|
+
self,
|
|
82
|
+
metrics: Union[List[Dict[str, float]], Dict[str, List[float]]],
|
|
83
|
+
dt=None,
|
|
84
|
+
reduction_mode: str = "mean",
|
|
85
|
+
):
|
|
86
|
+
if isinstance(metrics, list):
|
|
87
|
+
metrics = _merge_dicts(metrics)
|
|
88
|
+
|
|
89
|
+
for k in list(metrics.keys()):
|
|
90
|
+
if k not in ALLOWED_KEYS:
|
|
91
|
+
metrics.pop(k)
|
|
92
|
+
|
|
93
|
+
self.metrics = metrics
|
|
94
|
+
self.dt = dt
|
|
95
|
+
|
|
96
|
+
self.reduction_mode = reduction_mode
|
|
97
|
+
if reduction_mode == "mean":
|
|
98
|
+
self.reduce = np.mean
|
|
99
|
+
elif reduction_mode == "sum":
|
|
100
|
+
self.reduce = np.sum
|
|
101
|
+
|
|
102
|
+
@property
|
|
103
|
+
def num_frames(self):
|
|
104
|
+
return self.reduce(self.metrics["Frames Simulated"])
|
|
105
|
+
|
|
106
|
+
@property
|
|
107
|
+
def total_energy(self):
|
|
108
|
+
return self.reduce(self.metrics["Total Energy/Frame (J)"]) * self.num_frames
|
|
109
|
+
|
|
110
|
+
@property
|
|
111
|
+
def total_dynamic_energy(self):
|
|
112
|
+
return self.reduce(self.metrics["Dynamic Energy/Frame (J)"]) * self.num_frames
|
|
113
|
+
|
|
114
|
+
@property
|
|
115
|
+
def total_static_energy(self):
|
|
116
|
+
return self.reduce(self.metrics["Static Energy/Frame (J)"]) * self.num_frames
|
|
117
|
+
|
|
118
|
+
@property
|
|
119
|
+
def total_active_time(self):
|
|
120
|
+
return self.reduce(self.metrics["Latency (s)"]) * self.num_frames
|
|
121
|
+
|
|
122
|
+
@property
|
|
123
|
+
def latency(self):
|
|
124
|
+
return self.reduce(self.metrics["Latency (s)"])
|
|
125
|
+
|
|
126
|
+
@property
|
|
127
|
+
def total_time(self):
|
|
128
|
+
if self.dt is not None:
|
|
129
|
+
dt = self.dt * self.metrics["Frames Simulated"][0]
|
|
130
|
+
return max(dt, self.total_active_time)
|
|
131
|
+
else:
|
|
132
|
+
return self.total_active_time
|
|
133
|
+
|
|
134
|
+
@property
|
|
135
|
+
def power(self):
|
|
136
|
+
return self.reduce(self.metrics["Power (W)"])
|
|
137
|
+
|
|
138
|
+
def performance_report(self):
|
|
139
|
+
report = [
|
|
140
|
+
["total energy", f"{to_sci(self.total_energy)}J"],
|
|
141
|
+
["total dynamic energy", f"{to_sci(self.total_dynamic_energy)}J"],
|
|
142
|
+
["total static energy", f"{to_sci(self.total_static_energy)}J"],
|
|
143
|
+
["power", f"{to_sci(self.power)}W"],
|
|
144
|
+
["total active time", f"{to_sci(self.total_active_time)}s"],
|
|
145
|
+
["total time", f"{to_sci(self.total_time)}s"],
|
|
146
|
+
["latency/frame", f"{to_sci(self.latency)}s"],
|
|
147
|
+
]
|
|
148
|
+
output = tabulate.tabulate(report)
|
|
149
|
+
return output
|
|
150
|
+
|
|
151
|
+
def memory_report(self):
|
|
152
|
+
mem = self.metrics["Memory"]
|
|
153
|
+
|
|
154
|
+
out = {}
|
|
155
|
+
for mem_type in ["Data Mem", "Instr Mem", "Table Mem"]:
|
|
156
|
+
k = f"{mem_type} (B)"
|
|
157
|
+
out[mem_type] = {
|
|
158
|
+
"Used": int(mem["Used"][k][0]),
|
|
159
|
+
"Capacity": int(mem["Capacity"][k][0]),
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
full_names = {
|
|
163
|
+
"Data Mem": "Data Memory",
|
|
164
|
+
"Instr Mem": "Instruction Memory",
|
|
165
|
+
"Table Mem": "Table Memory",
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
report = [["Memory Type", "Used", "Capacity", "Percentage"]]
|
|
169
|
+
for key, name in full_names.items():
|
|
170
|
+
used = out[key]["Used"]
|
|
171
|
+
cap = out[key]["Capacity"]
|
|
172
|
+
pct = 100 * used / cap
|
|
173
|
+
report.append([name, f"{to_sci(used)}B", f"{to_sci(cap)}B", f"{pct:.1f}%"])
|
|
174
|
+
return tabulate.tabulate(report, headers="firstrow")
|
|
175
|
+
|
|
176
|
+
def __repr__(self):
|
|
177
|
+
perf = self.performance_report()
|
|
178
|
+
mem = self.memory_report()
|
|
179
|
+
output = f"Behavioral Simulator Metrics, {self.reduction_mode} over batches"
|
|
180
|
+
output += f"\n\n{perf}"
|
|
181
|
+
output += f"\n\n{mem}"
|
|
182
|
+
|
|
183
|
+
output = ("-" * 60 + "\n") + output + ("\n" + "-" * 60)
|
|
184
|
+
return output
|
|
@@ -0,0 +1,470 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from femtobehav.fasmir import FASMIR
|
|
3
|
+
from femtobehav.sim import SimRunner
|
|
4
|
+
|
|
5
|
+
# from femtobehav.sim.runner import _yamlify_nested_dict
|
|
6
|
+
from collections import defaultdict
|
|
7
|
+
from typing import List, Dict, Union
|
|
8
|
+
import yaml
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Quantizer:
|
|
12
|
+
"""Converts a floating-point input to integer
|
|
13
|
+
|
|
14
|
+
Arguments:
|
|
15
|
+
precision (str): Output integer precision; 'i8' or 'STD' for int-8,
|
|
16
|
+
'i16' or 'DBL' for int-16
|
|
17
|
+
scale (float): scale value, a positive real number
|
|
18
|
+
zero_point (int): zero-point, an integer
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(self, precision: str, scale: float, zero_point: int = 0):
|
|
22
|
+
if precision.upper() == "STD":
|
|
23
|
+
precision = "i8"
|
|
24
|
+
elif precision.upper() == "DBL":
|
|
25
|
+
precision = "i16"
|
|
26
|
+
|
|
27
|
+
assert precision in ["i8", "i16"]
|
|
28
|
+
self.scale = scale
|
|
29
|
+
self.zero_point = zero_point
|
|
30
|
+
self.precision = precision
|
|
31
|
+
|
|
32
|
+
def __call__(self, x: np.ndarray) -> np.ndarray:
|
|
33
|
+
if self.precision == "i8":
|
|
34
|
+
bits = 8
|
|
35
|
+
dtype = np.int8
|
|
36
|
+
else:
|
|
37
|
+
bits = 16
|
|
38
|
+
dtype = np.int16
|
|
39
|
+
|
|
40
|
+
lims = -(2 ** (bits - 1)), 2 ** (bits - 1) - 1
|
|
41
|
+
y = np.round(x / self.scale + self.zero_point)
|
|
42
|
+
y = np.clip(y, *lims).astype(dtype)
|
|
43
|
+
return y
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _force_ints(x):
|
|
47
|
+
"""
|
|
48
|
+
GRPC turns everything to floats on the way in
|
|
49
|
+
cast to int, see if it matches, get angry otherwise
|
|
50
|
+
takes the place of a quantizer, but does nothing
|
|
51
|
+
"""
|
|
52
|
+
x_int = x.astype(int)
|
|
53
|
+
if not (x == x_int).all():
|
|
54
|
+
raise ValueError("trying to pass float into simulator without quantization")
|
|
55
|
+
return x_int
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class DeQuantizer:
|
|
59
|
+
"""Converts from integer to floating-point
|
|
60
|
+
|
|
61
|
+
Arguments:
|
|
62
|
+
precision (str): Output integer precision; 'i8' or 'STD' for int-8,
|
|
63
|
+
'i16' or 'DBL' for int-16
|
|
64
|
+
scale (float): scale value, a positive real number
|
|
65
|
+
zero_point (int): zero-point, an integer
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
a numpy array of precision float32
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
def __init__(self, scale: float, zero_point: int = 0):
|
|
72
|
+
self.scale = scale
|
|
73
|
+
self.zero_point = zero_point
|
|
74
|
+
|
|
75
|
+
def __call__(self, x: np.ndarray) -> np.ndarray:
|
|
76
|
+
y = (x.astype(np.float32) - self.zero_point) * self.scale
|
|
77
|
+
return y
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class Padder:
|
|
81
|
+
"""Applies padding to a single axis of a tensor
|
|
82
|
+
|
|
83
|
+
Arguments:
|
|
84
|
+
name (str): the input name, for error handling
|
|
85
|
+
true_length (int): length of the axis before padding; the original
|
|
86
|
+
length in the high-level IR
|
|
87
|
+
padded_length (int): length of the axis after padding; padding
|
|
88
|
+
the result of the compiler fitting variables into integer multiples
|
|
89
|
+
of the word-size
|
|
90
|
+
axis (int, default -1): axis to apply padding to
|
|
91
|
+
"""
|
|
92
|
+
|
|
93
|
+
def __init__(self, name: str, true_length: int, padded_length: int, axis: int = -1):
|
|
94
|
+
self.name = name
|
|
95
|
+
self.true_length = true_length
|
|
96
|
+
self.padded_length = padded_length
|
|
97
|
+
self.axis = axis
|
|
98
|
+
|
|
99
|
+
def __call__(self, x: np.ndarray) -> np.ndarray:
|
|
100
|
+
# Verify the input shape
|
|
101
|
+
input_len = x.shape[self.axis]
|
|
102
|
+
expected_len = self.true_length
|
|
103
|
+
if input_len != expected_len:
|
|
104
|
+
raise ValueError(
|
|
105
|
+
"Received unexpected shape for input %s. "
|
|
106
|
+
"Expected a vector of length %d. (Received: %d)"
|
|
107
|
+
% (self.name, expected_len, input_len)
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
assert x.shape[self.axis] == self.true_length
|
|
111
|
+
p_shp = list(x.shape)
|
|
112
|
+
p_shp[self.axis] = self.padded_length - self.true_length
|
|
113
|
+
x = np.concatenate([x, np.zeros(p_shp, dtype=x.dtype)], axis=self.axis)
|
|
114
|
+
return x
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
class DePadder:
|
|
118
|
+
"""Removes padding from a single axis of a tensor
|
|
119
|
+
|
|
120
|
+
Arguments:
|
|
121
|
+
true_length (int): length of the axis before padding; the original
|
|
122
|
+
length in the high-level IR
|
|
123
|
+
padded_length (int): length of the axis after padding; padding
|
|
124
|
+
the result of the compiler fitting variables into integer multiples
|
|
125
|
+
of the word-size
|
|
126
|
+
axis (int, default -1): axis to apply padding to
|
|
127
|
+
"""
|
|
128
|
+
|
|
129
|
+
def __init__(self, true_length: int, padded_length: int, axis: int = -1):
|
|
130
|
+
self.true_length = true_length
|
|
131
|
+
self.padded_length = padded_length
|
|
132
|
+
self.axis = axis
|
|
133
|
+
|
|
134
|
+
def __call__(self, x: np.ndarray) -> np.ndarray:
|
|
135
|
+
assert x.shape[self.axis] == self.padded_length
|
|
136
|
+
ndim = x.ndim
|
|
137
|
+
slicers = [slice(0, None, 1)] * ndim
|
|
138
|
+
slicers[self.axis] = slice(0, self.true_length, 1)
|
|
139
|
+
y = x[tuple(slicers)]
|
|
140
|
+
return y
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def _standardize_dim(d, ndim):
|
|
144
|
+
if d is not None:
|
|
145
|
+
return d % ndim
|
|
146
|
+
return d
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
class BatchSlicer:
|
|
150
|
+
"""Converts input multidimensional tensor into slices that iterate
|
|
151
|
+
over batch and sequential dimensions
|
|
152
|
+
"""
|
|
153
|
+
|
|
154
|
+
def __init__(self, batch_dim=None, seq_dim=None):
|
|
155
|
+
self.batch_dim = batch_dim
|
|
156
|
+
self.seq_dim = seq_dim
|
|
157
|
+
|
|
158
|
+
def __call__(self, x: np.ndarray) -> List[List[np.ndarray]]:
|
|
159
|
+
# first, standardize the input to shape (batch, time, features)
|
|
160
|
+
# inserting unary dims if needed
|
|
161
|
+
|
|
162
|
+
# handle negatively indexed dimensions
|
|
163
|
+
ndim = x.ndim
|
|
164
|
+
batch_dim, seq_dim = map(
|
|
165
|
+
lambda u: _standardize_dim(u, ndim), [self.batch_dim, self.seq_dim]
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
# expand dims as needed
|
|
169
|
+
if batch_dim is None:
|
|
170
|
+
if seq_dim is None:
|
|
171
|
+
x = np.expand_dims(x, axis=[0, 1])
|
|
172
|
+
batch_dim = 0
|
|
173
|
+
seq_dim = 1
|
|
174
|
+
else:
|
|
175
|
+
x = np.expand_dims(x, axis=0)
|
|
176
|
+
batch_dim = 0
|
|
177
|
+
seq_dim += 1
|
|
178
|
+
elif seq_dim is None:
|
|
179
|
+
x = np.expand_dims(x, axis=0)
|
|
180
|
+
seq_dim = 0
|
|
181
|
+
batch_dim += 1
|
|
182
|
+
|
|
183
|
+
feature_dim = set((0, 1, 2))
|
|
184
|
+
feature_dim.remove(batch_dim)
|
|
185
|
+
feature_dim.remove(seq_dim)
|
|
186
|
+
feature_dim = list(feature_dim)[0]
|
|
187
|
+
|
|
188
|
+
# apply transposes
|
|
189
|
+
x = np.transpose(x, [batch_dim, seq_dim, feature_dim])
|
|
190
|
+
|
|
191
|
+
# convert to List[array( time, feature )]:
|
|
192
|
+
out = [batch for batch in x]
|
|
193
|
+
return out
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
def _get_btf(batch_dim, seq_dim):
|
|
197
|
+
ndim = 1 + int(batch_dim is not None) + int(seq_dim is not None)
|
|
198
|
+
batch_dim = _standardize_dim(batch_dim, ndim)
|
|
199
|
+
seq_dim = _standardize_dim(seq_dim, ndim)
|
|
200
|
+
|
|
201
|
+
feat_dim = set(range(ndim))
|
|
202
|
+
if batch_dim is not None:
|
|
203
|
+
feat_dim.remove(batch_dim)
|
|
204
|
+
if seq_dim is not None:
|
|
205
|
+
feat_dim.remove(seq_dim)
|
|
206
|
+
feat_dim = list(feat_dim)[0]
|
|
207
|
+
|
|
208
|
+
return batch_dim, seq_dim, feat_dim
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def _get_inverse_perm(perm):
|
|
212
|
+
out = [None] * len(perm)
|
|
213
|
+
for i, p in enumerate(perm):
|
|
214
|
+
out[p] = i
|
|
215
|
+
return out
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def _inv_transpose(x, forward_perm):
|
|
219
|
+
inverse_perm = _get_inverse_perm(forward_perm)
|
|
220
|
+
return np.transpose(x, inverse_perm)
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
class BatchStacker:
|
|
224
|
+
"""Converts a list of list of tensors into a single tensor,
|
|
225
|
+
given the desired batch/seq dim ordering
|
|
226
|
+
"""
|
|
227
|
+
|
|
228
|
+
def __init__(self, batch_dim=None, seq_dim=None):
|
|
229
|
+
self.batch_dim = batch_dim
|
|
230
|
+
self.seq_dim = seq_dim
|
|
231
|
+
|
|
232
|
+
def __call__(self, x: List[np.ndarray]) -> np.ndarray:
|
|
233
|
+
x = np.stack([batch for batch in x], 0)
|
|
234
|
+
|
|
235
|
+
batch_dim, seq_dim, feat_dim = _get_btf(self.batch_dim, self.seq_dim)
|
|
236
|
+
|
|
237
|
+
if batch_dim is not None:
|
|
238
|
+
if seq_dim is not None:
|
|
239
|
+
return _inv_transpose(x, [batch_dim, seq_dim, feat_dim])
|
|
240
|
+
else:
|
|
241
|
+
x = np.squeeze(x, 1)
|
|
242
|
+
return _inv_transpose(x, [batch_dim, feat_dim])
|
|
243
|
+
else:
|
|
244
|
+
if seq_dim is not None:
|
|
245
|
+
x = np.squeeze(x, 0)
|
|
246
|
+
return _inv_transpose(x, [seq_dim, feat_dim])
|
|
247
|
+
else:
|
|
248
|
+
return x[0, 0]
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
class IOConfig:
|
|
252
|
+
"""
|
|
253
|
+
TODO: documentation here
|
|
254
|
+
|
|
255
|
+
recommended to use the get_input_io and get_output_io factory methods
|
|
256
|
+
instead of constructing from init.
|
|
257
|
+
"""
|
|
258
|
+
|
|
259
|
+
def __init__(
|
|
260
|
+
self,
|
|
261
|
+
name: str,
|
|
262
|
+
quantizer: Union[Quantizer, DeQuantizer] = None,
|
|
263
|
+
padder: Union[Padder, DePadder] = None,
|
|
264
|
+
stacker: BatchStacker = None,
|
|
265
|
+
slicer: BatchSlicer = None,
|
|
266
|
+
):
|
|
267
|
+
self.name = name
|
|
268
|
+
self.quantizer = quantizer
|
|
269
|
+
self.padder = padder
|
|
270
|
+
self.stacker = stacker
|
|
271
|
+
self.slicer = slicer
|
|
272
|
+
|
|
273
|
+
@classmethod
|
|
274
|
+
def get_input_io(
|
|
275
|
+
cls,
|
|
276
|
+
name: str,
|
|
277
|
+
precision: str,
|
|
278
|
+
scale: float = None,
|
|
279
|
+
zero_point: int = None,
|
|
280
|
+
feature_len: int = None,
|
|
281
|
+
padded_feature_len: int = None,
|
|
282
|
+
batch_dim: int = None,
|
|
283
|
+
seq_dim: int = None,
|
|
284
|
+
):
|
|
285
|
+
"""
|
|
286
|
+
Creates an IO pipeline of:
|
|
287
|
+
Quantize -> Pad -> Slice
|
|
288
|
+
"""
|
|
289
|
+
|
|
290
|
+
batch_dim, seq_dim, feat_dim = _get_btf(batch_dim, seq_dim)
|
|
291
|
+
|
|
292
|
+
# get input quantizer
|
|
293
|
+
if (scale is not None) or (zero_point is not None):
|
|
294
|
+
if scale is None:
|
|
295
|
+
scale = 1
|
|
296
|
+
if zero_point is None:
|
|
297
|
+
zero_point = 0
|
|
298
|
+
quantizer = Quantizer(precision, scale, zero_point)
|
|
299
|
+
else:
|
|
300
|
+
quantizer = None
|
|
301
|
+
|
|
302
|
+
# get input padder
|
|
303
|
+
if (feature_len is not None) and (padded_feature_len is not None):
|
|
304
|
+
padder = Padder(name, feature_len, padded_feature_len, axis=feat_dim)
|
|
305
|
+
else:
|
|
306
|
+
padder = None
|
|
307
|
+
|
|
308
|
+
# get input slicer
|
|
309
|
+
slicer = BatchSlicer(batch_dim, seq_dim)
|
|
310
|
+
|
|
311
|
+
return cls(name, quantizer, padder, slicer=slicer)
|
|
312
|
+
|
|
313
|
+
@classmethod
|
|
314
|
+
def get_output_io(
|
|
315
|
+
cls,
|
|
316
|
+
name: str,
|
|
317
|
+
scale: float = None,
|
|
318
|
+
zero_point: int = None,
|
|
319
|
+
feature_len: int = None,
|
|
320
|
+
padded_feature_len: int = None,
|
|
321
|
+
batch_dim: int = None,
|
|
322
|
+
seq_dim: int = None,
|
|
323
|
+
):
|
|
324
|
+
"""
|
|
325
|
+
Creates an IO pipeline of:
|
|
326
|
+
Stack -> DeQuantize -> DePad
|
|
327
|
+
"""
|
|
328
|
+
batch_dim, seq_dim, feat_dim = _get_btf(batch_dim, seq_dim)
|
|
329
|
+
|
|
330
|
+
# get stacker
|
|
331
|
+
stacker = BatchStacker(batch_dim, seq_dim)
|
|
332
|
+
|
|
333
|
+
# get dequantizer
|
|
334
|
+
if (scale is not None) or (zero_point is not None):
|
|
335
|
+
if scale is None:
|
|
336
|
+
scale = 1
|
|
337
|
+
if zero_point is None:
|
|
338
|
+
zero_point = 0
|
|
339
|
+
quantizer = DeQuantizer(scale, zero_point)
|
|
340
|
+
else:
|
|
341
|
+
quantizer = None
|
|
342
|
+
|
|
343
|
+
# get de-padder
|
|
344
|
+
if (feature_len is not None) and (padded_feature_len is not None):
|
|
345
|
+
padder = DePadder(feature_len, padded_feature_len, feat_dim)
|
|
346
|
+
else:
|
|
347
|
+
padder = None
|
|
348
|
+
|
|
349
|
+
return cls(name, quantizer, padder, stacker=stacker)
|
|
350
|
+
|
|
351
|
+
def __call__(
|
|
352
|
+
self, x: Union[np.ndarray, List[List[np.ndarray]]], quant=True
|
|
353
|
+
) -> Union[np.ndarray, List[List[np.ndarray]]]:
|
|
354
|
+
if self.stacker is not None:
|
|
355
|
+
x = self.stacker(x)
|
|
356
|
+
|
|
357
|
+
if not quant: # GRPC workaround
|
|
358
|
+
x = _force_ints(x)
|
|
359
|
+
elif quant and self.quantizer is not None:
|
|
360
|
+
x = self.quantizer(x)
|
|
361
|
+
|
|
362
|
+
if self.padder is not None:
|
|
363
|
+
x = self.padder(x)
|
|
364
|
+
if self.slicer is not None:
|
|
365
|
+
x = self.slicer(x)
|
|
366
|
+
|
|
367
|
+
return x
|
|
368
|
+
|
|
369
|
+
|
|
370
|
+
class SimIOWrapper:
|
|
371
|
+
"""
|
|
372
|
+
Wraps execution of a SimRunner with IOConfigs
|
|
373
|
+
"""
|
|
374
|
+
|
|
375
|
+
def __init__(self, fasmir: FASMIR):
|
|
376
|
+
self.input_configs: List[IOConfig] = []
|
|
377
|
+
self.output_configs: List[IOConfig] = []
|
|
378
|
+
self.fasmir = fasmir
|
|
379
|
+
|
|
380
|
+
def add_input(self, cfg: IOConfig):
|
|
381
|
+
self.input_configs.append(cfg)
|
|
382
|
+
|
|
383
|
+
def add_output(self, cfg: IOConfig):
|
|
384
|
+
self.output_configs.append(cfg)
|
|
385
|
+
|
|
386
|
+
def _preprocess_inputs(self, *args: np.ndarray, quant=True):
|
|
387
|
+
# Verify the number of inputs
|
|
388
|
+
num_inputs = len(args)
|
|
389
|
+
expected_num_inputs = len(self.input_configs)
|
|
390
|
+
if num_inputs != expected_num_inputs:
|
|
391
|
+
raise ValueError(
|
|
392
|
+
"Unexpected number of inputs.\nExpected: %s\nGot: %s"
|
|
393
|
+
% (num_inputs, expected_num_inputs)
|
|
394
|
+
)
|
|
395
|
+
assert len(args) == len(self.input_configs)
|
|
396
|
+
|
|
397
|
+
# Assign each input to a config
|
|
398
|
+
inputs: Dict[str, List[np.ndarray]] = {}
|
|
399
|
+
for arg, cfg in zip(args, self.input_configs):
|
|
400
|
+
inputs[cfg.name] = cfg(arg, quant=quant)
|
|
401
|
+
|
|
402
|
+
return inputs
|
|
403
|
+
|
|
404
|
+
def _postprocess_outputs(
|
|
405
|
+
self, outputs: Dict[str, List[np.ndarray]], quant=True
|
|
406
|
+
) -> List[np.ndarray]:
|
|
407
|
+
post_outputs = []
|
|
408
|
+
for cfg in self.output_configs:
|
|
409
|
+
pre_out = outputs[cfg.name]
|
|
410
|
+
post_outputs.append(cfg(pre_out, quant=quant))
|
|
411
|
+
return post_outputs
|
|
412
|
+
|
|
413
|
+
def _run_sim_once(self, inputs: Dict[str, np.ndarray], input_period=None, **kwargs):
|
|
414
|
+
runner = SimRunner(self.fasmir, **kwargs)
|
|
415
|
+
runner.reset()
|
|
416
|
+
outputs, __, __ = runner.run(inputs)
|
|
417
|
+
metrics = runner.get_metrics(input_period, concise=True, as_yamlable=True)
|
|
418
|
+
runner.finish()
|
|
419
|
+
return outputs, metrics
|
|
420
|
+
|
|
421
|
+
def run(
|
|
422
|
+
self,
|
|
423
|
+
*args,
|
|
424
|
+
input_period=None,
|
|
425
|
+
quantize_inputs=True,
|
|
426
|
+
dequantize_outputs=True,
|
|
427
|
+
**kwargs
|
|
428
|
+
):
|
|
429
|
+
inputs = self._preprocess_inputs(*args, quant=quantize_inputs)
|
|
430
|
+
|
|
431
|
+
B = None
|
|
432
|
+
for x in inputs.values():
|
|
433
|
+
b = len(x)
|
|
434
|
+
if B is None:
|
|
435
|
+
B = b
|
|
436
|
+
assert b == B, "Provided inputs did not have matching batch-sizes"
|
|
437
|
+
|
|
438
|
+
outputs = defaultdict(list)
|
|
439
|
+
metrics = []
|
|
440
|
+
|
|
441
|
+
for b in range(B):
|
|
442
|
+
input_b = {}
|
|
443
|
+
for k, v in inputs.items():
|
|
444
|
+
input_b[k] = v[b]
|
|
445
|
+
output_b, metric_b = self._run_sim_once(
|
|
446
|
+
input_b, input_period=input_period, **kwargs
|
|
447
|
+
)
|
|
448
|
+
metrics.append(metric_b)
|
|
449
|
+
for k, v in output_b.items():
|
|
450
|
+
outputs[k].append(v)
|
|
451
|
+
|
|
452
|
+
# clean up output formatting
|
|
453
|
+
outputs = self._postprocess_outputs(outputs, quant=dequantize_outputs)
|
|
454
|
+
|
|
455
|
+
# warn the user that batches aren't really supported for metrics purposes
|
|
456
|
+
# want to put the WARNING in first, so it shows up at the top
|
|
457
|
+
if B > 1:
|
|
458
|
+
ret_metrics = {
|
|
459
|
+
"WARNING": "simulation was over a batch:"
|
|
460
|
+
+ "metrics here are only for the first batch element"
|
|
461
|
+
}
|
|
462
|
+
for k, v in metrics[0].items():
|
|
463
|
+
ret_metrics[k] = v
|
|
464
|
+
else:
|
|
465
|
+
ret_metrics = metrics[0]
|
|
466
|
+
|
|
467
|
+
# turn the dict into a yaml
|
|
468
|
+
metrics_str = yaml.dump(ret_metrics, sort_keys=False)
|
|
469
|
+
|
|
470
|
+
return outputs, metrics_str
|
|
File without changes
|
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
from fmot import ConvertedModel
|
|
2
|
+
from fmot.fqir import GraphProto
|
|
3
|
+
from femtocrux.femtostack.common import CompilerFrontend, SimIOWrapper, IOConfig
|
|
4
|
+
from femtomapper import MapperConf, Mapper, MapperState
|
|
5
|
+
from femtobehav.fasmir import FASMIR
|
|
6
|
+
import torch
|
|
7
|
+
from typing import Tuple
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _compile_fqir(graph: GraphProto, options: dict) -> FASMIR:
|
|
11
|
+
mapper_conf = MapperConf(**options)
|
|
12
|
+
mapper = Mapper(mapper_conf)
|
|
13
|
+
mapper_state = MapperState(fqir=graph)
|
|
14
|
+
|
|
15
|
+
# compile:
|
|
16
|
+
mapper_state = mapper.do(mapper_state)
|
|
17
|
+
|
|
18
|
+
# extract fasmir
|
|
19
|
+
fasmir = mapper_state.fasmir
|
|
20
|
+
return fasmir
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class TorchCompiler(CompilerFrontend):
|
|
24
|
+
def __init__(self, graph: GraphProto, batch_dim: int = None, seq_dim: int = None):
|
|
25
|
+
assert isinstance(graph, GraphProto)
|
|
26
|
+
|
|
27
|
+
super().__init__(input_ir=graph)
|
|
28
|
+
self.batch_dim = batch_dim
|
|
29
|
+
self.seq_dim = seq_dim
|
|
30
|
+
|
|
31
|
+
def _compile(
|
|
32
|
+
self, input_ir: GraphProto, options: dict
|
|
33
|
+
) -> Tuple[FASMIR, SimIOWrapper]:
|
|
34
|
+
fasmir = _compile_fqir(input_ir, options)
|
|
35
|
+
wrapper = self._get_fqir_iowrapper(input_ir, fasmir)
|
|
36
|
+
return fasmir, wrapper
|
|
37
|
+
|
|
38
|
+
def _get_fqir_iowrapper(self, graph: GraphProto, fasmir: FASMIR) -> SimIOWrapper:
|
|
39
|
+
wrapper = SimIOWrapper(fasmir)
|
|
40
|
+
arith: GraphProto = graph.subgraphs["ARITH"]
|
|
41
|
+
|
|
42
|
+
# add input io
|
|
43
|
+
for x in arith.inputs:
|
|
44
|
+
name = x.name
|
|
45
|
+
|
|
46
|
+
# get quantization config
|
|
47
|
+
scale = 2**x.quanta
|
|
48
|
+
zp = 0
|
|
49
|
+
|
|
50
|
+
if x.dtype.endswith("8"):
|
|
51
|
+
prec = "i8"
|
|
52
|
+
else:
|
|
53
|
+
prec = "i16"
|
|
54
|
+
|
|
55
|
+
# get padding config
|
|
56
|
+
true_len = x.shape[0]
|
|
57
|
+
padded_len = fasmir.data_vars[name].numpy.shape[0]
|
|
58
|
+
|
|
59
|
+
# add input to wrapper
|
|
60
|
+
wrapper.add_input(
|
|
61
|
+
IOConfig.get_input_io(
|
|
62
|
+
name,
|
|
63
|
+
prec,
|
|
64
|
+
scale=scale,
|
|
65
|
+
zero_point=zp,
|
|
66
|
+
feature_len=true_len,
|
|
67
|
+
padded_feature_len=padded_len,
|
|
68
|
+
batch_dim=self.batch_dim,
|
|
69
|
+
seq_dim=self.seq_dim,
|
|
70
|
+
)
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
# add output io
|
|
74
|
+
for x in arith.outputs:
|
|
75
|
+
name = x.name
|
|
76
|
+
|
|
77
|
+
# get quantization config
|
|
78
|
+
scale = 2**x.quanta
|
|
79
|
+
zp = 0
|
|
80
|
+
|
|
81
|
+
# get padding config
|
|
82
|
+
true_len = x.shape[0]
|
|
83
|
+
padded_len = self._get_padded_len(fasmir, name)
|
|
84
|
+
|
|
85
|
+
# add input to wrapper
|
|
86
|
+
wrapper.add_output(
|
|
87
|
+
IOConfig.get_output_io(
|
|
88
|
+
name,
|
|
89
|
+
scale=scale,
|
|
90
|
+
zero_point=zp,
|
|
91
|
+
feature_len=true_len,
|
|
92
|
+
padded_feature_len=padded_len,
|
|
93
|
+
batch_dim=self.batch_dim,
|
|
94
|
+
seq_dim=self.seq_dim,
|
|
95
|
+
)
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
# done!
|
|
99
|
+
return wrapper
|
|
100
|
+
|
|
101
|
+
@classmethod
|
|
102
|
+
def from_fqir(cls, graph: GraphProto, batch_dim: int = None, seq_dim: int = None):
|
|
103
|
+
assert isinstance(graph, GraphProto)
|
|
104
|
+
return cls(graph, batch_dim, seq_dim)
|
|
105
|
+
|
|
106
|
+
@classmethod
|
|
107
|
+
def from_converted_model(
|
|
108
|
+
cls,
|
|
109
|
+
model: ConvertedModel,
|
|
110
|
+
batch_dim: int = None,
|
|
111
|
+
seq_dim: int = None,
|
|
112
|
+
experimental_tracing=False,
|
|
113
|
+
):
|
|
114
|
+
assert isinstance(model, ConvertedModel)
|
|
115
|
+
graph = model.trace(experimental_hybrid_tracing=experimental_tracing)
|
|
116
|
+
return cls(graph, batch_dim, seq_dim)
|
|
117
|
+
|
|
118
|
+
@classmethod
|
|
119
|
+
def from_torch_module(
|
|
120
|
+
cls,
|
|
121
|
+
module: torch.nn.Module,
|
|
122
|
+
calibration_data,
|
|
123
|
+
precision: str = "double",
|
|
124
|
+
batch_dim: int = None,
|
|
125
|
+
seq_dim: int = None,
|
|
126
|
+
experimental_tracing=False,
|
|
127
|
+
conversion_kwargs: dict = {},
|
|
128
|
+
):
|
|
129
|
+
cmodel = ConvertedModel(
|
|
130
|
+
module, precision, batch_dim=batch_dim, seq_dim=seq_dim, **conversion_kwargs
|
|
131
|
+
)
|
|
132
|
+
cmodel.quantize(calibration_data)
|
|
133
|
+
|
|
134
|
+
return TorchCompiler.from_converted_model(
|
|
135
|
+
cmodel, batch_dim, seq_dim, experimental_tracing
|
|
136
|
+
)
|
|
@@ -6,10 +6,10 @@ import warnings
|
|
|
6
6
|
import compiler_service_pb2 as compiler__service__pb2
|
|
7
7
|
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
|
|
8
8
|
|
|
9
|
-
GRPC_GENERATED_VERSION = '1.
|
|
9
|
+
GRPC_GENERATED_VERSION = '1.65.1'
|
|
10
10
|
GRPC_VERSION = grpc.__version__
|
|
11
|
-
EXPECTED_ERROR_RELEASE = '1.
|
|
12
|
-
SCHEDULED_RELEASE_DATE = '
|
|
11
|
+
EXPECTED_ERROR_RELEASE = '1.66.0'
|
|
12
|
+
SCHEDULED_RELEASE_DATE = 'August 6, 2024'
|
|
13
13
|
_version_not_supported = False
|
|
14
14
|
|
|
15
15
|
try:
|
femtocrux/server/server.py
CHANGED
|
@@ -7,9 +7,9 @@ import logging
|
|
|
7
7
|
import pickle
|
|
8
8
|
import sys
|
|
9
9
|
|
|
10
|
-
from femtostack.common import CompilerFrontend
|
|
11
|
-
from femtostack.tflite_api.tflite_frontend import TFLiteCompiler
|
|
12
|
-
from femtostack.torch_api.frontend import TorchCompiler
|
|
10
|
+
from femtocrux.femtostack.common import CompilerFrontend
|
|
11
|
+
from femtocrux.femtostack.tflite_api.tflite_frontend import TFLiteCompiler
|
|
12
|
+
from femtocrux.femtostack.torch_api.frontend import TorchCompiler
|
|
13
13
|
|
|
14
14
|
from femtocrux.server.exceptions import format_exception, format_exception_from_exc
|
|
15
15
|
from femtocrux.util.utils import (
|
|
@@ -173,7 +173,7 @@ class CompileServicer(cs_pb2_grpc.CompileServicer):
|
|
|
173
173
|
# Respond with output data
|
|
174
174
|
yield cs_pb2.simulation_output(
|
|
175
175
|
data=[numpy_to_ndarray(x) for x in outputs],
|
|
176
|
-
report=metrics
|
|
176
|
+
report=metrics,
|
|
177
177
|
status=cs_pb2.status(success=True),
|
|
178
178
|
)
|
|
179
179
|
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
femtocrux/ENV_REQUIREMENTS.sh,sha256=t_O1B4hJAMgxvH9gwp1qls6eVFmhSYBJe64KmuK_H-4,1389
|
|
2
|
+
femtocrux/PY_REQUIREMENTS,sha256=lELTbw5r4KCzeFn0TjLp0QpbOuHnl4XjGKwyL9yVBrc,285
|
|
3
|
+
femtocrux/VERSION,sha256=h7JBsnXFkWlIRlYOmHn1DJ2jFQ-FTv2r14LFOXcvMDM,6
|
|
4
|
+
femtocrux/__init__.py,sha256=yIWd9I2PEXCn_PKIILAN3mkWeTf0tgtVualeTIHNxfQ,342
|
|
5
|
+
femtocrux/version.py,sha256=uNg2kHxQo6oUN1ah7s9_85rCZVRoTHGPD1GAQPZW4lw,164
|
|
6
|
+
femtocrux/client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
7
|
+
femtocrux/client/client.py,sha256=PNaXL_XTe_7_r14WWKmp1RnvAW-kvYzDXcimseA8sEo,22726
|
|
8
|
+
femtocrux/femtostack/__init__.py,sha256=75hXE1ODNiLmiFrkEtfVfhA3O0ZVH-Ub0KoXdJu3n5g,171
|
|
9
|
+
femtocrux/femtostack/common/__init__.py,sha256=Ig25arHQZ05IiLWSlkgR9EmvguEsa5bZF8V8YapobzY,343
|
|
10
|
+
femtocrux/femtostack/common/frontend.py,sha256=OutJsOvALoY5xdf7Je2ixYE1iuf7BrtacwXoaBaFV9c,4069
|
|
11
|
+
femtocrux/femtostack/common/metrics.py,sha256=gmy30EmDI2XkM-Kh_j8fifPQpyM2O1y_cGBMb9SJt_I,5324
|
|
12
|
+
femtocrux/femtostack/common/sim_io.py,sha256=sqU_qaR1pTfmNQfU_ns3w5bxJWhEKzEgUouLCvljHcw,14401
|
|
13
|
+
femtocrux/femtostack/tflite_api/__init__.py,sha256=SKq1KoXWRNMGQSf9RqBa5BUY-0JCMLW4T_UZmJJBSlE,186
|
|
14
|
+
femtocrux/femtostack/tflite_api/tflite_frontend.py,sha256=CsvurJ2RJPnUgjfwQ8ru6jL5sNmb-AbTF1TIwZuHuBY,210
|
|
15
|
+
femtocrux/femtostack/torch_api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
|
+
femtocrux/femtostack/torch_api/frontend.py,sha256=-12zz0NpvQgpPNqBMg7EJQdomtRJPNffrVKC6rBqfBE,4064
|
|
17
|
+
femtocrux/grpc/__init__.py,sha256=uiMHQt5I2eAKJqI3Zh0h1Gm7cmPR4PbaGS71nCJQCGw,169
|
|
18
|
+
femtocrux/grpc/compiler_service_pb2.py,sha256=Fu4wOuUsu4lN1Ig3GacvuzYJ7MPRq-hTbqSzrCDqRAc,4460
|
|
19
|
+
femtocrux/grpc/compiler_service_pb2_grpc.py,sha256=Sat3uoVCwQIEVx99aYTD2YExn5G4bF9Lhv3QCGgxQtM,8738
|
|
20
|
+
femtocrux/server/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
21
|
+
femtocrux/server/exceptions.py,sha256=lI6n471n5QKf5G3aL_1kuBVEItD-jBgithVVpPDwNYc,609
|
|
22
|
+
femtocrux/server/healthcheck.py,sha256=ehqAwnv0D0zpy-AUZAPwv8rp874DZCwUmP8nzdXzZvI,1565
|
|
23
|
+
femtocrux/server/server.py,sha256=fpwL1GtFvdw7E0inkMDbDKd-Hu3QAuEgvlqFP7vtZhY,7928
|
|
24
|
+
femtocrux/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
25
|
+
femtocrux/util/utils.py,sha256=FZ8cssDom4B3FDbVU_ew4Cf3wOWjo2w1jwcbnLzoYnM,1003
|
|
26
|
+
femtocrux-0.6.1.dist-info/LICENSE,sha256=eN9ZI1xHjUmFvN3TEeop5kBGXRUBfbsl55KBNBYYFqI,36
|
|
27
|
+
femtocrux-0.6.1.dist-info/METADATA,sha256=Vy_KiaSGwAXRK7LMcO9lR5ha4VGq7d5fkEFOEQ4P8sk,3561
|
|
28
|
+
femtocrux-0.6.1.dist-info/WHEEL,sha256=Wyh-_nZ0DJYolHNn1_hMa4lM7uDedD_RGVwbmTjyItk,91
|
|
29
|
+
femtocrux-0.6.1.dist-info/top_level.txt,sha256=BkTttlioC3je__8577wxRieZqY3Abu7FOOdMnmYbcNI,10
|
|
30
|
+
femtocrux-0.6.1.dist-info/RECORD,,
|
femtocrux-0.5.3.dist-info/RECORD
DELETED
|
@@ -1,21 +0,0 @@
|
|
|
1
|
-
femtocrux/ENV_REQUIREMENTS.sh,sha256=t_O1B4hJAMgxvH9gwp1qls6eVFmhSYBJe64KmuK_H-4,1389
|
|
2
|
-
femtocrux/PY_REQUIREMENTS,sha256=lELTbw5r4KCzeFn0TjLp0QpbOuHnl4XjGKwyL9yVBrc,285
|
|
3
|
-
femtocrux/VERSION,sha256=J-z_H82vZatGwbuZwQctbRNKDo3Zfdlc-ohpa57UU7I,6
|
|
4
|
-
femtocrux/__init__.py,sha256=yIWd9I2PEXCn_PKIILAN3mkWeTf0tgtVualeTIHNxfQ,342
|
|
5
|
-
femtocrux/version.py,sha256=uNg2kHxQo6oUN1ah7s9_85rCZVRoTHGPD1GAQPZW4lw,164
|
|
6
|
-
femtocrux/client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
7
|
-
femtocrux/client/client.py,sha256=PNaXL_XTe_7_r14WWKmp1RnvAW-kvYzDXcimseA8sEo,22726
|
|
8
|
-
femtocrux/grpc/__init__.py,sha256=uiMHQt5I2eAKJqI3Zh0h1Gm7cmPR4PbaGS71nCJQCGw,169
|
|
9
|
-
femtocrux/grpc/compiler_service_pb2.py,sha256=Fu4wOuUsu4lN1Ig3GacvuzYJ7MPRq-hTbqSzrCDqRAc,4460
|
|
10
|
-
femtocrux/grpc/compiler_service_pb2_grpc.py,sha256=SgL_HC0nLzuwjpm8U6CdRPb2sJjVOLQrhKHpp0D-gHk,8737
|
|
11
|
-
femtocrux/server/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
12
|
-
femtocrux/server/exceptions.py,sha256=lI6n471n5QKf5G3aL_1kuBVEItD-jBgithVVpPDwNYc,609
|
|
13
|
-
femtocrux/server/healthcheck.py,sha256=ehqAwnv0D0zpy-AUZAPwv8rp874DZCwUmP8nzdXzZvI,1565
|
|
14
|
-
femtocrux/server/server.py,sha256=tmXVleZQB59oFzdmut3na4NnDvr0gmxphXF3N3MQx6I,7919
|
|
15
|
-
femtocrux/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
|
-
femtocrux/util/utils.py,sha256=FZ8cssDom4B3FDbVU_ew4Cf3wOWjo2w1jwcbnLzoYnM,1003
|
|
17
|
-
femtocrux-0.5.3.dist-info/LICENSE,sha256=eN9ZI1xHjUmFvN3TEeop5kBGXRUBfbsl55KBNBYYFqI,36
|
|
18
|
-
femtocrux-0.5.3.dist-info/METADATA,sha256=xWND9JZM8p1LP2H7tfbmd5hF0GvZicv09dRdOKSKTEg,3561
|
|
19
|
-
femtocrux-0.5.3.dist-info/WHEEL,sha256=Z4pYXqR_rTB7OWNDYFOm1qRk0RX6GFP2o8LgvP453Hk,91
|
|
20
|
-
femtocrux-0.5.3.dist-info/top_level.txt,sha256=BkTttlioC3je__8577wxRieZqY3Abu7FOOdMnmYbcNI,10
|
|
21
|
-
femtocrux-0.5.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|