femtocrux 1.2.1__py3-none-any.whl → 2.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,470 +0,0 @@
1
- import numpy as np
2
- from femtobehav.fasmir import FASMIR
3
- from femtobehav.sim import SimRunner
4
-
5
- # from femtobehav.sim.runner import _yamlify_nested_dict
6
- from collections import defaultdict
7
- from typing import List, Dict, Union
8
- import yaml
9
-
10
-
11
- class Quantizer:
12
- """Converts a floating-point input to integer
13
-
14
- Arguments:
15
- precision (str): Output integer precision; 'i8' or 'STD' for int-8,
16
- 'i16' or 'DBL' for int-16
17
- scale (float): scale value, a positive real number
18
- zero_point (int): zero-point, an integer
19
- """
20
-
21
- def __init__(self, precision: str, scale: float, zero_point: int = 0):
22
- if precision.upper() == "STD":
23
- precision = "i8"
24
- elif precision.upper() == "DBL":
25
- precision = "i16"
26
-
27
- assert precision in ["i8", "i16"]
28
- self.scale = scale
29
- self.zero_point = zero_point
30
- self.precision = precision
31
-
32
- def __call__(self, x: np.ndarray) -> np.ndarray:
33
- if self.precision == "i8":
34
- bits = 8
35
- dtype = np.int8
36
- else:
37
- bits = 16
38
- dtype = np.int16
39
-
40
- lims = -(2 ** (bits - 1)), 2 ** (bits - 1) - 1
41
- y = np.round(x / self.scale + self.zero_point)
42
- y = np.clip(y, *lims).astype(dtype)
43
- return y
44
-
45
-
46
- def _force_ints(x):
47
- """
48
- GRPC turns everything to floats on the way in
49
- cast to int, see if it matches, get angry otherwise
50
- takes the place of a quantizer, but does nothing
51
- """
52
- x_int = x.astype(int)
53
- if not (x == x_int).all():
54
- raise ValueError("trying to pass float into simulator without quantization")
55
- return x_int
56
-
57
-
58
- class DeQuantizer:
59
- """Converts from integer to floating-point
60
-
61
- Arguments:
62
- precision (str): Output integer precision; 'i8' or 'STD' for int-8,
63
- 'i16' or 'DBL' for int-16
64
- scale (float): scale value, a positive real number
65
- zero_point (int): zero-point, an integer
66
-
67
- Returns:
68
- a numpy array of precision float32
69
- """
70
-
71
- def __init__(self, scale: float, zero_point: int = 0):
72
- self.scale = scale
73
- self.zero_point = zero_point
74
-
75
- def __call__(self, x: np.ndarray) -> np.ndarray:
76
- y = (x.astype(np.float32) - self.zero_point) * self.scale
77
- return y
78
-
79
-
80
- class Padder:
81
- """Applies padding to a single axis of a tensor
82
-
83
- Arguments:
84
- name (str): the input name, for error handling
85
- true_length (int): length of the axis before padding; the original
86
- length in the high-level IR
87
- padded_length (int): length of the axis after padding; padding
88
- the result of the compiler fitting variables into integer multiples
89
- of the word-size
90
- axis (int, default -1): axis to apply padding to
91
- """
92
-
93
- def __init__(self, name: str, true_length: int, padded_length: int, axis: int = -1):
94
- self.name = name
95
- self.true_length = true_length
96
- self.padded_length = padded_length
97
- self.axis = axis
98
-
99
- def __call__(self, x: np.ndarray) -> np.ndarray:
100
- # Verify the input shape
101
- input_len = x.shape[self.axis]
102
- expected_len = self.true_length
103
- if input_len != expected_len:
104
- raise ValueError(
105
- "Received unexpected shape for input %s. "
106
- "Expected a vector of length %d. (Received: %d)"
107
- % (self.name, expected_len, input_len)
108
- )
109
-
110
- assert x.shape[self.axis] == self.true_length
111
- p_shp = list(x.shape)
112
- p_shp[self.axis] = self.padded_length - self.true_length
113
- x = np.concatenate([x, np.zeros(p_shp, dtype=x.dtype)], axis=self.axis)
114
- return x
115
-
116
-
117
- class DePadder:
118
- """Removes padding from a single axis of a tensor
119
-
120
- Arguments:
121
- true_length (int): length of the axis before padding; the original
122
- length in the high-level IR
123
- padded_length (int): length of the axis after padding; padding
124
- the result of the compiler fitting variables into integer multiples
125
- of the word-size
126
- axis (int, default -1): axis to apply padding to
127
- """
128
-
129
- def __init__(self, true_length: int, padded_length: int, axis: int = -1):
130
- self.true_length = true_length
131
- self.padded_length = padded_length
132
- self.axis = axis
133
-
134
- def __call__(self, x: np.ndarray) -> np.ndarray:
135
- assert x.shape[self.axis] == self.padded_length
136
- ndim = x.ndim
137
- slicers = [slice(0, None, 1)] * ndim
138
- slicers[self.axis] = slice(0, self.true_length, 1)
139
- y = x[tuple(slicers)]
140
- return y
141
-
142
-
143
- def _standardize_dim(d, ndim):
144
- if d is not None:
145
- return d % ndim
146
- return d
147
-
148
-
149
- class BatchSlicer:
150
- """Converts input multidimensional tensor into slices that iterate
151
- over batch and sequential dimensions
152
- """
153
-
154
- def __init__(self, batch_dim=None, seq_dim=None):
155
- self.batch_dim = batch_dim
156
- self.seq_dim = seq_dim
157
-
158
- def __call__(self, x: np.ndarray) -> List[List[np.ndarray]]:
159
- # first, standardize the input to shape (batch, time, features)
160
- # inserting unary dims if needed
161
-
162
- # handle negatively indexed dimensions
163
- ndim = x.ndim
164
- batch_dim, seq_dim = map(
165
- lambda u: _standardize_dim(u, ndim), [self.batch_dim, self.seq_dim]
166
- )
167
-
168
- # expand dims as needed
169
- if batch_dim is None:
170
- if seq_dim is None:
171
- x = np.expand_dims(x, axis=[0, 1])
172
- batch_dim = 0
173
- seq_dim = 1
174
- else:
175
- x = np.expand_dims(x, axis=0)
176
- batch_dim = 0
177
- seq_dim += 1
178
- elif seq_dim is None:
179
- x = np.expand_dims(x, axis=0)
180
- seq_dim = 0
181
- batch_dim += 1
182
-
183
- feature_dim = set((0, 1, 2))
184
- feature_dim.remove(batch_dim)
185
- feature_dim.remove(seq_dim)
186
- feature_dim = list(feature_dim)[0]
187
-
188
- # apply transposes
189
- x = np.transpose(x, [batch_dim, seq_dim, feature_dim])
190
-
191
- # convert to List[array( time, feature )]:
192
- out = [batch for batch in x]
193
- return out
194
-
195
-
196
- def _get_btf(batch_dim, seq_dim):
197
- ndim = 1 + int(batch_dim is not None) + int(seq_dim is not None)
198
- batch_dim = _standardize_dim(batch_dim, ndim)
199
- seq_dim = _standardize_dim(seq_dim, ndim)
200
-
201
- feat_dim = set(range(ndim))
202
- if batch_dim is not None:
203
- feat_dim.remove(batch_dim)
204
- if seq_dim is not None:
205
- feat_dim.remove(seq_dim)
206
- feat_dim = list(feat_dim)[0]
207
-
208
- return batch_dim, seq_dim, feat_dim
209
-
210
-
211
- def _get_inverse_perm(perm):
212
- out = [None] * len(perm)
213
- for i, p in enumerate(perm):
214
- out[p] = i
215
- return out
216
-
217
-
218
- def _inv_transpose(x, forward_perm):
219
- inverse_perm = _get_inverse_perm(forward_perm)
220
- return np.transpose(x, inverse_perm)
221
-
222
-
223
- class BatchStacker:
224
- """Converts a list of list of tensors into a single tensor,
225
- given the desired batch/seq dim ordering
226
- """
227
-
228
- def __init__(self, batch_dim=None, seq_dim=None):
229
- self.batch_dim = batch_dim
230
- self.seq_dim = seq_dim
231
-
232
- def __call__(self, x: List[np.ndarray]) -> np.ndarray:
233
- x = np.stack([batch for batch in x], 0)
234
-
235
- batch_dim, seq_dim, feat_dim = _get_btf(self.batch_dim, self.seq_dim)
236
-
237
- if batch_dim is not None:
238
- if seq_dim is not None:
239
- return _inv_transpose(x, [batch_dim, seq_dim, feat_dim])
240
- else:
241
- x = np.squeeze(x, 1)
242
- return _inv_transpose(x, [batch_dim, feat_dim])
243
- else:
244
- if seq_dim is not None:
245
- x = np.squeeze(x, 0)
246
- return _inv_transpose(x, [seq_dim, feat_dim])
247
- else:
248
- return x[0, 0]
249
-
250
-
251
- class IOConfig:
252
- """
253
- TODO: documentation here
254
-
255
- recommended to use the get_input_io and get_output_io factory methods
256
- instead of constructing from init.
257
- """
258
-
259
- def __init__(
260
- self,
261
- name: str,
262
- quantizer: Union[Quantizer, DeQuantizer] = None,
263
- padder: Union[Padder, DePadder] = None,
264
- stacker: BatchStacker = None,
265
- slicer: BatchSlicer = None,
266
- ):
267
- self.name = name
268
- self.quantizer = quantizer
269
- self.padder = padder
270
- self.stacker = stacker
271
- self.slicer = slicer
272
-
273
- @classmethod
274
- def get_input_io(
275
- cls,
276
- name: str,
277
- precision: str,
278
- scale: float = None,
279
- zero_point: int = None,
280
- feature_len: int = None,
281
- padded_feature_len: int = None,
282
- batch_dim: int = None,
283
- seq_dim: int = None,
284
- ):
285
- """
286
- Creates an IO pipeline of:
287
- Quantize -> Pad -> Slice
288
- """
289
-
290
- batch_dim, seq_dim, feat_dim = _get_btf(batch_dim, seq_dim)
291
-
292
- # get input quantizer
293
- if (scale is not None) or (zero_point is not None):
294
- if scale is None:
295
- scale = 1
296
- if zero_point is None:
297
- zero_point = 0
298
- quantizer = Quantizer(precision, scale, zero_point)
299
- else:
300
- quantizer = None
301
-
302
- # get input padder
303
- if (feature_len is not None) and (padded_feature_len is not None):
304
- padder = Padder(name, feature_len, padded_feature_len, axis=feat_dim)
305
- else:
306
- padder = None
307
-
308
- # get input slicer
309
- slicer = BatchSlicer(batch_dim, seq_dim)
310
-
311
- return cls(name, quantizer, padder, slicer=slicer)
312
-
313
- @classmethod
314
- def get_output_io(
315
- cls,
316
- name: str,
317
- scale: float = None,
318
- zero_point: int = None,
319
- feature_len: int = None,
320
- padded_feature_len: int = None,
321
- batch_dim: int = None,
322
- seq_dim: int = None,
323
- ):
324
- """
325
- Creates an IO pipeline of:
326
- Stack -> DeQuantize -> DePad
327
- """
328
- batch_dim, seq_dim, feat_dim = _get_btf(batch_dim, seq_dim)
329
-
330
- # get stacker
331
- stacker = BatchStacker(batch_dim, seq_dim)
332
-
333
- # get dequantizer
334
- if (scale is not None) or (zero_point is not None):
335
- if scale is None:
336
- scale = 1
337
- if zero_point is None:
338
- zero_point = 0
339
- quantizer = DeQuantizer(scale, zero_point)
340
- else:
341
- quantizer = None
342
-
343
- # get de-padder
344
- if (feature_len is not None) and (padded_feature_len is not None):
345
- padder = DePadder(feature_len, padded_feature_len, feat_dim)
346
- else:
347
- padder = None
348
-
349
- return cls(name, quantizer, padder, stacker=stacker)
350
-
351
- def __call__(
352
- self, x: Union[np.ndarray, List[List[np.ndarray]]], quant=True
353
- ) -> Union[np.ndarray, List[List[np.ndarray]]]:
354
- if self.stacker is not None:
355
- x = self.stacker(x)
356
-
357
- if not quant: # GRPC workaround
358
- x = _force_ints(x)
359
- elif quant and self.quantizer is not None:
360
- x = self.quantizer(x)
361
-
362
- if self.padder is not None:
363
- x = self.padder(x)
364
- if self.slicer is not None:
365
- x = self.slicer(x)
366
-
367
- return x
368
-
369
-
370
- class SimIOWrapper:
371
- """
372
- Wraps execution of a SimRunner with IOConfigs
373
- """
374
-
375
- def __init__(self, fasmir: FASMIR):
376
- self.input_configs: List[IOConfig] = []
377
- self.output_configs: List[IOConfig] = []
378
- self.fasmir = fasmir
379
-
380
- def add_input(self, cfg: IOConfig):
381
- self.input_configs.append(cfg)
382
-
383
- def add_output(self, cfg: IOConfig):
384
- self.output_configs.append(cfg)
385
-
386
- def _preprocess_inputs(self, *args: np.ndarray, quant=True):
387
- # Verify the number of inputs
388
- num_inputs = len(args)
389
- expected_num_inputs = len(self.input_configs)
390
- if num_inputs != expected_num_inputs:
391
- raise ValueError(
392
- "Unexpected number of inputs.\nExpected: %s\nGot: %s"
393
- % (num_inputs, expected_num_inputs)
394
- )
395
- assert len(args) == len(self.input_configs)
396
-
397
- # Assign each input to a config
398
- inputs: Dict[str, List[np.ndarray]] = {}
399
- for arg, cfg in zip(args, self.input_configs):
400
- inputs[cfg.name] = cfg(arg, quant=quant)
401
-
402
- return inputs
403
-
404
- def _postprocess_outputs(
405
- self, outputs: Dict[str, List[np.ndarray]], quant=True
406
- ) -> List[np.ndarray]:
407
- post_outputs = []
408
- for cfg in self.output_configs:
409
- pre_out = outputs[cfg.name]
410
- post_outputs.append(cfg(pre_out, quant=quant))
411
- return post_outputs
412
-
413
- def _run_sim_once(self, inputs: Dict[str, np.ndarray], input_period=None, **kwargs):
414
- runner = SimRunner(self.fasmir, **kwargs)
415
- runner.reset()
416
- outputs, __, __ = runner.run(inputs)
417
- metrics = runner.get_metrics(input_period, concise=True, as_yamlable=True)
418
- runner.finish()
419
- return outputs, metrics
420
-
421
- def run(
422
- self,
423
- *args,
424
- input_period=None,
425
- quantize_inputs=True,
426
- dequantize_outputs=True,
427
- **kwargs
428
- ):
429
- inputs = self._preprocess_inputs(*args, quant=quantize_inputs)
430
-
431
- B = None
432
- for x in inputs.values():
433
- b = len(x)
434
- if B is None:
435
- B = b
436
- assert b == B, "Provided inputs did not have matching batch-sizes"
437
-
438
- outputs = defaultdict(list)
439
- metrics = []
440
-
441
- for b in range(B):
442
- input_b = {}
443
- for k, v in inputs.items():
444
- input_b[k] = v[b]
445
- output_b, metric_b = self._run_sim_once(
446
- input_b, input_period=input_period, **kwargs
447
- )
448
- metrics.append(metric_b)
449
- for k, v in output_b.items():
450
- outputs[k].append(v)
451
-
452
- # clean up output formatting
453
- outputs = self._postprocess_outputs(outputs, quant=dequantize_outputs)
454
-
455
- # warn the user that batches aren't really supported for metrics purposes
456
- # want to put the WARNING in first, so it shows up at the top
457
- if B > 1:
458
- ret_metrics = {
459
- "WARNING": "simulation was over a batch:"
460
- + "metrics here are only for the first batch element"
461
- }
462
- for k, v in metrics[0].items():
463
- ret_metrics[k] = v
464
- else:
465
- ret_metrics = metrics[0]
466
-
467
- # turn the dict into a yaml
468
- metrics_str = yaml.dump(ret_metrics, sort_keys=False)
469
-
470
- return outputs, metrics_str
@@ -1,6 +0,0 @@
1
- """
2
- Copyright Femtosense 2024
3
-
4
- By using this software package, you agree to abide by the terms and conditions
5
- in the license agreement found at https://femtosense.ai/legal/eula/
6
- """
@@ -1,6 +0,0 @@
1
- from femtocrux.femtostack.common import CompilerFrontend
2
-
3
-
4
- class TFLiteCompiler(CompilerFrontend):
5
- def __init__(self, *args, **kwargs):
6
- raise ValueError("TFLiteCompiler deprecated in this release")
File without changes
@@ -1,136 +0,0 @@
1
- from fmot import ConvertedModel
2
- from fmot.fqir import GraphProto
3
- from femtocrux.femtostack.common import CompilerFrontend, SimIOWrapper, IOConfig
4
- from femtomapper import MapperConf, Mapper, MapperState
5
- from femtobehav.fasmir import FASMIR
6
- import torch
7
- from typing import Tuple
8
-
9
-
10
- def _compile_fqir(graph: GraphProto, options: dict) -> FASMIR:
11
- mapper_conf = MapperConf(**options)
12
- mapper = Mapper(mapper_conf)
13
- mapper_state = MapperState(fqir=graph)
14
-
15
- # compile:
16
- mapper_state = mapper.do(mapper_state)
17
-
18
- # extract fasmir
19
- fasmir = mapper_state.fasmir
20
- return fasmir
21
-
22
-
23
- class TorchCompiler(CompilerFrontend):
24
- def __init__(self, graph: GraphProto, batch_dim: int = None, seq_dim: int = None):
25
- assert isinstance(graph, GraphProto)
26
-
27
- super().__init__(input_ir=graph)
28
- self.batch_dim = batch_dim
29
- self.seq_dim = seq_dim
30
-
31
- def _compile(
32
- self, input_ir: GraphProto, options: dict
33
- ) -> Tuple[FASMIR, SimIOWrapper]:
34
- fasmir = _compile_fqir(input_ir, options)
35
- wrapper = self._get_fqir_iowrapper(input_ir, fasmir)
36
- return fasmir, wrapper
37
-
38
- def _get_fqir_iowrapper(self, graph: GraphProto, fasmir: FASMIR) -> SimIOWrapper:
39
- wrapper = SimIOWrapper(fasmir)
40
- arith: GraphProto = graph.subgraphs["ARITH"]
41
-
42
- # add input io
43
- for x in arith.inputs:
44
- name = x.name
45
-
46
- # get quantization config
47
- scale = 2**x.quanta
48
- zp = 0
49
-
50
- if x.dtype.endswith("8"):
51
- prec = "i8"
52
- else:
53
- prec = "i16"
54
-
55
- # get padding config
56
- true_len = x.shape[0]
57
- padded_len = fasmir.data_vars[name].numpy.shape[0]
58
-
59
- # add input to wrapper
60
- wrapper.add_input(
61
- IOConfig.get_input_io(
62
- name,
63
- prec,
64
- scale=scale,
65
- zero_point=zp,
66
- feature_len=true_len,
67
- padded_feature_len=padded_len,
68
- batch_dim=self.batch_dim,
69
- seq_dim=self.seq_dim,
70
- )
71
- )
72
-
73
- # add output io
74
- for x in arith.outputs:
75
- name = x.name
76
-
77
- # get quantization config
78
- scale = 2**x.quanta
79
- zp = 0
80
-
81
- # get padding config
82
- true_len = x.shape[0]
83
- padded_len = self._get_padded_len(fasmir, name)
84
-
85
- # add input to wrapper
86
- wrapper.add_output(
87
- IOConfig.get_output_io(
88
- name,
89
- scale=scale,
90
- zero_point=zp,
91
- feature_len=true_len,
92
- padded_feature_len=padded_len,
93
- batch_dim=self.batch_dim,
94
- seq_dim=self.seq_dim,
95
- )
96
- )
97
-
98
- # done!
99
- return wrapper
100
-
101
- @classmethod
102
- def from_fqir(cls, graph: GraphProto, batch_dim: int = None, seq_dim: int = None):
103
- assert isinstance(graph, GraphProto)
104
- return cls(graph, batch_dim, seq_dim)
105
-
106
- @classmethod
107
- def from_converted_model(
108
- cls,
109
- model: ConvertedModel,
110
- batch_dim: int = None,
111
- seq_dim: int = None,
112
- experimental_tracing=False,
113
- ):
114
- assert isinstance(model, ConvertedModel)
115
- graph = model.trace(experimental_hybrid_tracing=experimental_tracing)
116
- return cls(graph, batch_dim, seq_dim)
117
-
118
- @classmethod
119
- def from_torch_module(
120
- cls,
121
- module: torch.nn.Module,
122
- calibration_data,
123
- precision: str = "double",
124
- batch_dim: int = None,
125
- seq_dim: int = None,
126
- experimental_tracing=False,
127
- conversion_kwargs: dict = {},
128
- ):
129
- cmodel = ConvertedModel(
130
- module, precision, batch_dim=batch_dim, seq_dim=seq_dim, **conversion_kwargs
131
- )
132
- cmodel.quantize(calibration_data)
133
-
134
- return TorchCompiler.from_converted_model(
135
- cmodel, batch_dim, seq_dim, experimental_tracing
136
- )
@@ -1,30 +0,0 @@
1
- femtocrux/ENV_REQUIREMENTS.sh,sha256=t_O1B4hJAMgxvH9gwp1qls6eVFmhSYBJe64KmuK_H-4,1389
2
- femtocrux/PY_REQUIREMENTS,sha256=UwXV0o3gieruZUYdN9r2bqQ0Wcf_vosjeP6LJVx6oF0,275
3
- femtocrux/VERSION,sha256=bPTghLR_M8mwLveSedFXgzho-PcFFBaadovjU-4yj-o,6
4
- femtocrux/__init__.py,sha256=yIWd9I2PEXCn_PKIILAN3mkWeTf0tgtVualeTIHNxfQ,342
5
- femtocrux/version.py,sha256=uNg2kHxQo6oUN1ah7s9_85rCZVRoTHGPD1GAQPZW4lw,164
6
- femtocrux/client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- femtocrux/client/client.py,sha256=33oma7Abw64BXh2MBL79GMCTCRoNhunKEh9chhrz3qI,22738
8
- femtocrux/femtostack/__init__.py,sha256=75hXE1ODNiLmiFrkEtfVfhA3O0ZVH-Ub0KoXdJu3n5g,171
9
- femtocrux/femtostack/common/__init__.py,sha256=Ig25arHQZ05IiLWSlkgR9EmvguEsa5bZF8V8YapobzY,343
10
- femtocrux/femtostack/common/frontend.py,sha256=OutJsOvALoY5xdf7Je2ixYE1iuf7BrtacwXoaBaFV9c,4069
11
- femtocrux/femtostack/common/metrics.py,sha256=gmy30EmDI2XkM-Kh_j8fifPQpyM2O1y_cGBMb9SJt_I,5324
12
- femtocrux/femtostack/common/sim_io.py,sha256=sqU_qaR1pTfmNQfU_ns3w5bxJWhEKzEgUouLCvljHcw,14401
13
- femtocrux/femtostack/tflite_api/__init__.py,sha256=SKq1KoXWRNMGQSf9RqBa5BUY-0JCMLW4T_UZmJJBSlE,186
14
- femtocrux/femtostack/tflite_api/tflite_frontend.py,sha256=CsvurJ2RJPnUgjfwQ8ru6jL5sNmb-AbTF1TIwZuHuBY,210
15
- femtocrux/femtostack/torch_api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
- femtocrux/femtostack/torch_api/frontend.py,sha256=-12zz0NpvQgpPNqBMg7EJQdomtRJPNffrVKC6rBqfBE,4064
17
- femtocrux/grpc/__init__.py,sha256=uiMHQt5I2eAKJqI3Zh0h1Gm7cmPR4PbaGS71nCJQCGw,169
18
- femtocrux/grpc/compiler_service_pb2.py,sha256=s5gb9988iKDcdzI4t3b2_fGkotTW41Fy6-xWxdj_YWY,4703
19
- femtocrux/grpc/compiler_service_pb2_grpc.py,sha256=twzBPI2b9BFhkn75WcopLk_Z5D-GwvHheoGOid3gpVA,8501
20
- femtocrux/server/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
- femtocrux/server/exceptions.py,sha256=lI6n471n5QKf5G3aL_1kuBVEItD-jBgithVVpPDwNYc,609
22
- femtocrux/server/healthcheck.py,sha256=ehqAwnv0D0zpy-AUZAPwv8rp874DZCwUmP8nzdXzZvI,1565
23
- femtocrux/server/server.py,sha256=fpwL1GtFvdw7E0inkMDbDKd-Hu3QAuEgvlqFP7vtZhY,7928
24
- femtocrux/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
- femtocrux/util/utils.py,sha256=FZ8cssDom4B3FDbVU_ew4Cf3wOWjo2w1jwcbnLzoYnM,1003
26
- femtocrux-1.2.1.dist-info/licenses/LICENSE,sha256=eN9ZI1xHjUmFvN3TEeop5kBGXRUBfbsl55KBNBYYFqI,36
27
- femtocrux-1.2.1.dist-info/METADATA,sha256=7UWQbn2ZQBQ94vHjVKc9XOSbaSPftDa22vSrCgMeoy4,2763
28
- femtocrux-1.2.1.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
29
- femtocrux-1.2.1.dist-info/top_level.txt,sha256=BkTttlioC3je__8577wxRieZqY3Abu7FOOdMnmYbcNI,10
30
- femtocrux-1.2.1.dist-info/RECORD,,