femtocrux 2.5.2__py3-none-any.whl → 3.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
femtocrux/VERSION CHANGED
@@ -1 +1 @@
1
- 2.5.2
1
+ 3.1.0
femtocrux/__init__.py CHANGED
@@ -1,4 +1,10 @@
1
- from .client.client import CompilerClient, TFLiteModel, FQIRModel, ManagedCompilerClient
1
+ from .client.client import (
2
+ CompilerClient,
3
+ TFLiteModel,
4
+ FQIRModel,
5
+ ModelAndMetadata,
6
+ ManagedCompilerClient,
7
+ )
2
8
  from .version import __version__
3
9
 
4
10
  # PEP 8 definiton of public API
@@ -7,6 +13,7 @@ __all__ = [
7
13
  "CompilerClient",
8
14
  "TFLiteModel",
9
15
  "FQIRModel",
16
+ "ModelAndMetadata",
10
17
  "__version__",
11
18
  "ManagedCompilerClient",
12
19
  ]
@@ -15,14 +15,18 @@ from contextlib import contextmanager
15
15
  import subprocess
16
16
 
17
17
  from fmot.fqir import GraphProto
18
+ from femtorun import IOSpec
18
19
 
19
20
  from femtocrux.util.utils import (
20
21
  read_secret_raw,
21
22
  get_channel_options,
22
23
  serialize_sim_inputs_message,
23
- deserialize_simulation_output,
24
+ deserialize_simulation_data,
25
+ deserialize_simulation_data_list,
24
26
  )
25
27
 
28
+ from google.protobuf.message import Message
29
+
26
30
  # GRPC artifacts
27
31
  import femtocrux.grpc.compiler_service_pb2 as cs_pb2
28
32
  import femtocrux.grpc.compiler_service_pb2_grpc as cs_pb2_grpc
@@ -104,13 +108,16 @@ class Model:
104
108
  :class:`~femtocrux.client.client.TFLiteModel`.
105
109
  """
106
110
 
107
- def _get_message(self, options: dict = {}) -> cs_pb2.model:
111
+ def _get_message(self, user_configuration: dict = {}) -> cs_pb2.model_and_metadata:
108
112
  # Format the options
109
- options_struct = google.protobuf.struct_pb2.Struct()
110
- options_struct.update(options)
113
+ user_configuration_struct = google.protobuf.struct_pb2.Struct()
114
+ user_configuration_struct.update(user_configuration)
111
115
 
112
116
  # Construct the model with IR
113
- return cs_pb2.model(**{self._ir_name: self._get_ir()}, options=options_struct)
117
+ return cs_pb2.model_and_metadata(
118
+ **{self._ir_name: self._get_ir()},
119
+ user_configuration=user_configuration_struct,
120
+ )
114
121
 
115
122
  @property
116
123
  def _ir_name(self) -> str:
@@ -186,14 +193,54 @@ class TFLiteModel(Model):
186
193
  return cs_pb2.tflite(model=self.flatbuffer, signature_name=self.signature_name)
187
194
 
188
195
 
196
+ @dataclass
197
+ class ModelAndMetadata(Model):
198
+ """
199
+ Wraps an FQIR model and metadata to be compiled by femtocrux.
200
+
201
+ model: FQIRModel object
202
+ iospec: IOSpec object
203
+ opt_profile: (str) simple compiler configuration setting
204
+ user_configuration: (dict) fine grained compiler configuration
205
+ memory_reservations: (dict) requested memory to be reserved for IO buffering.
206
+ Currently unsupported.
207
+ """
208
+
209
+ model: FQIRModel = None
210
+ iospec: IOSpec = None
211
+ opt_profile: str | None = None
212
+ user_configuration: dict | None = None
213
+ memory_reservations: dict | None = None # Currently unsupported by the compiler
214
+
215
+ @property
216
+ def _ir_name(self) -> str:
217
+ return "fqir"
218
+
219
+ def _get_ir(self) -> Any:
220
+ # Send the serialized model
221
+ serialized_iospec = self.iospec.serialize_to_string()
222
+ return cs_pb2.model_and_metadata(
223
+ fqir=self.model._get_ir(),
224
+ iospec=serialized_iospec,
225
+ opt_profile=self.opt_profile,
226
+ user_configuration=self.user_configuration,
227
+ memory_reservations=self.memory_reservations,
228
+ )
229
+
230
+
189
231
  class Simulator:
190
232
  """
191
233
  Simulates a compiled model's behavior on the SPU.
192
234
  """
193
235
 
194
- def __init__(self, client: "CompilerClient", model: Model, options: dict = {}):
236
+ def __init__(
237
+ self,
238
+ client: "CompilerClient",
239
+ model_and_metadata: ModelAndMetadata,
240
+ options: dict = {},
241
+ ):
195
242
  self.client = client
196
- self.model = model
243
+ self.model_and_metadata = model_and_metadata
197
244
 
198
245
  # Create an event stream fed by a queue
199
246
  self.request_queue = queue.SimpleQueue()
@@ -201,8 +248,9 @@ class Simulator:
201
248
  self.response_iterator = client._simulate(request_iterator)
202
249
 
203
250
  # Compile the model with the first message
204
- model_msg = model._get_message(options)
205
- simulation_start_msg = cs_pb2.simulation_input(model=model_msg)
251
+ # model_msg = self.model._get_message(user_configuration=options)
252
+ message = model_and_metadata._get_ir()
253
+ simulation_start_msg = cs_pb2.simulation_input(model=message)
206
254
  self._send_request(simulation_start_msg)
207
255
 
208
256
  # Check compilation status
@@ -227,9 +275,9 @@ class Simulator:
227
275
 
228
276
  def simulate(
229
277
  self,
230
- inputs: Iterable[np.array],
278
+ inputs: list[dict[str, np.ndarray]] | dict[str, np.ndarray],
231
279
  input_period: float = 0.016,
232
- ) -> List[np.array]:
280
+ ) -> tuple:
233
281
  """
234
282
  Simulates the model on the given inputs.
235
283
 
@@ -238,11 +286,28 @@ class Simulator:
238
286
  "input_name1": np.ndarray(int16 types ...),
239
287
  "input_name2": np.ndarray([int16 types ...])
240
288
  }
289
+ # Array has dimensions for sequence, features
290
+
291
+ or a list of dicts
292
+ [
293
+ {"input_name1": np.ndarray(int16 types ..),
294
+ "input_name2": np.ndarray(int16 ...)},
295
+ {"input_name1": np.ndarray(int16 types ..),
296
+ "input_name2": np.ndarray(int16 ...)},
297
+ {"input_name1": np.ndarray(int16 types ..),
298
+ "input_name2": np.ndarray(int16 ...)},
299
+ ]
300
+
301
+ The sequence dimension is captured as list elements and the
302
+ array only has a feature dimension
303
+
241
304
  :type input_period (float, optional): Duration between each input in a sequence,
242
305
  in seconds.
243
306
 
244
- :rtype: list
245
- :return: The output tensors.
307
+ :rtype: tuple
308
+ :return: The deserialzed outputs either as a
309
+ list[str, np.ndarray.shape(features)] or
310
+ dict[str, np.ndarray.shape(sequence, features))] and the sim report.
246
311
 
247
312
  """
248
313
 
@@ -252,7 +317,16 @@ class Simulator:
252
317
  self._send_request(simulation_request)
253
318
  response = self._get_response()
254
319
 
255
- return deserialize_simulation_output(response.outputs), response.report
320
+ which = response.WhichOneof("output_data")
321
+ if which == "outputs":
322
+ deserialized_outputs = deserialize_simulation_data(response.outputs)
323
+ elif which == "list_outputs":
324
+ deserialized_outputs = deserialize_simulation_data_list(
325
+ response.list_outputs
326
+ )
327
+ else:
328
+ raise (Exception("Didn't get output data as proto dict or list message."))
329
+ return deserialized_outputs, response.report
256
330
 
257
331
 
258
332
  class CompilerClientImpl:
@@ -291,23 +365,63 @@ class CompilerClientImpl:
291
365
  server_version,
292
366
  )
293
367
 
294
- def compile(self, model: Model, options: dict = {}) -> bytes:
368
+ def compile(
369
+ self,
370
+ model: GraphProto | Model,
371
+ iospec: IOSpec | None = None,
372
+ opt_profile: str | None = None,
373
+ user_configuration: dict | None = None,
374
+ memory_reservations: dict | None = None,
375
+ ) -> tuple[Message, IOSpec, Message]:
295
376
  """
296
377
  Compile the model to a bitstream.
297
378
 
298
- :type model: Model, required
299
- :param model: The model to be compiled.
300
-
301
- :type options: dict, optional
302
- :param options: Complier options.
303
-
304
- :rtype: bytes
305
- :return: A zip archive of compiler artifacts.
379
+ :model: FQIRModel or GraphProto of the model to compile
380
+ :iospec: IOSpec object
381
+ :opt_profile: (str) simple compiler configuration setting
382
+ :user_configuration: (dict) fine grained compiler configuration
383
+ :memory_reservations: (dict) requested memory to be reserved for IO buffering.
384
+ Currently unsupported.
385
+
386
+ :return:
387
+ ProtoMessage of A zip archive of compiler artifacts.
388
+ ProtoMessage of iospec
389
+ ProtoMessage of memory_reservations
306
390
  """
307
391
 
308
- response = self.stub.compile(model._get_message(options))
392
+ # response = self.stub.compile(model._get_message(options))
393
+
394
+ # Send the serialized model
395
+ if isinstance(model, GraphProto):
396
+ fqir_message = FQIRModel(
397
+ graph_proto=model,
398
+ batch_dim=0,
399
+ sequence_dim=1,
400
+ )
401
+ elif isinstance(model, FQIRModel):
402
+ fqir_message = model
403
+ else:
404
+ raise Exception(
405
+ f"Expected fqir GraphProto or protobuf Model message. "
406
+ f"Model allows for specifying custom batch and sequence dimensions. "
407
+ f"Got {type(model)}"
408
+ )
409
+ if iospec is None:
410
+ raise Exception(
411
+ "No iospec was provided. Please provide an iospec to the compiler."
412
+ )
413
+ serialized_iospec = iospec.serialize_to_string()
414
+ data_to_send = cs_pb2.model_and_metadata(
415
+ fqir=fqir_message._get_ir(),
416
+ iospec=serialized_iospec,
417
+ opt_profile=opt_profile,
418
+ user_configuration=user_configuration,
419
+ memory_reservations=memory_reservations,
420
+ )
421
+ response = self.stub.compile(data_to_send)
422
+ returned_iospec = IOSpec.deserialize_from_string(response.iospec)
309
423
  self._check_status(response.status)
310
- return response.bitfile
424
+ return response.bitfile, returned_iospec, response.memory_reservations
311
425
 
312
426
  def _ping(self, message: bytes) -> None:
313
427
  """Pings the server with a message."""
@@ -319,7 +433,9 @@ class CompilerClientImpl:
319
433
  """Calls the 'simulator' bidirectional streaming RPC."""
320
434
  return self.stub.simulate(in_stream)
321
435
 
322
- def simulate(self, model: Model, options: dict = {}) -> Simulator:
436
+ def simulate(
437
+ self, model_and_metadata: ModelAndMetadata, options: dict = {}
438
+ ) -> Simulator:
323
439
  """
324
440
  Get a simulator for the model.
325
441
 
@@ -332,10 +448,14 @@ class CompilerClientImpl:
332
448
  :rtype: Simulator
333
449
  :return: A simulator for the model.
334
450
  """
335
- return Simulator(client=self, model=model, options=options)
451
+ return Simulator(
452
+ client=self, model_and_metadata=model_and_metadata, options=options
453
+ )
336
454
 
337
- def get_simulator_object(self, model: Model, options: dict = {}) -> Simulator:
338
- return self.simulate(model, options)
455
+ def get_simulator_object(
456
+ self, model_and_metadata: ModelAndMetadata, options: dict = {}
457
+ ) -> Simulator:
458
+ return self.simulate(model_and_metadata, options)
339
459
 
340
460
  def _server_version(self) -> str:
341
461
  """Queries the femtocrux version running on the server."""
@@ -26,43 +26,43 @@ from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
26
26
  from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
27
27
 
28
28
 
29
- DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x63ompiler_service.proto\x12\nfscompiler\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1cgoogle/protobuf/struct.proto\"g\n\x04\x66qir\x12\r\n\x05model\x18\x01 \x01(\x0c\x12\x16\n\tbatch_dim\x18\x02 \x01(\x03H\x00\x88\x01\x01\x12\x19\n\x0csequence_dim\x18\x03 \x01(\x03H\x01\x88\x01\x01\x42\x0c\n\n_batch_dimB\x0f\n\r_sequence_dim\"G\n\x06tflite\x12\r\n\x05model\x18\x01 \x01(\x0c\x12\x1b\n\x0esignature_name\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x11\n\x0f_signature_name\"\x90\x01\n\x05model\x12 \n\x04\x66qir\x18\x01 \x01(\x0b\x32\x10.fscompiler.fqirH\x00\x12$\n\x06tflite\x18\x02 \x01(\x0b\x32\x12.fscompiler.tfliteH\x00\x12-\n\x07options\x18\x03 \x01(\x0b\x32\x17.google.protobuf.StructH\x01\x88\x01\x01\x42\x04\n\x02irB\n\n\x08_options\"&\n\x06status\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0b\n\x03msg\x18\x02 \x01(\t\"I\n\x12\x63ompiled_artifacts\x12\x0f\n\x07\x62itfile\x18\x01 \x01(\x0c\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.fscompiler.status\"5\n\x07ndarray\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\x03\x12\r\n\x05\x64type\x18\x03 \x01(\t\"\x14\n\x04\x64\x61ta\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\"\xe6\x01\n\x0fsimulation_data\x12\x37\n\x06inputs\x18\x01 \x03(\x0b\x32\'.fscompiler.simulation_data.InputsEntry\x12\x19\n\x0csim_duration\x18\x02 \x01(\x02H\x00\x88\x01\x01\x12\x19\n\x0cinput_period\x18\x03 \x01(\x02H\x01\x88\x01\x01\x1a\x42\n\x0bInputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.fscompiler.ndarray:\x02\x38\x01\x42\x0f\n\r_sim_durationB\x0f\n\r_input_period\"t\n\x10simulation_input\x12\"\n\x05model\x18\x01 \x01(\x0b\x32\x11.fscompiler.modelH\x00\x12+\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.fscompiler.simulation_dataH\x00\x42\x0f\n\rmodel_or_data\"\xc9\x01\n\x11simulation_output\x12;\n\x07outputs\x18\x01 \x03(\x0b\x32*.fscompiler.simulation_output.OutputsEntry\x12\x0e\n\x06report\x18\x02 \x01(\t\x12\"\n\x06status\x18\x03 \x01(\x0b\x32\x12.fscompiler.status\x1a\x43\n\x0cOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.fscompiler.ndarray:\x02\x38\x01\"\x1f\n\x0cversion_info\x12\x0f\n\x07version\x18\x01 \x01(\t2\x85\x02\n\x07\x43ompile\x12>\n\x07\x63ompile\x12\x11.fscompiler.model\x1a\x1e.fscompiler.compiled_artifacts\"\x00\x12,\n\x04ping\x12\x10.fscompiler.data\x1a\x10.fscompiler.data\"\x00\x12M\n\x08simulate\x12\x1c.fscompiler.simulation_input\x1a\x1d.fscompiler.simulation_output\"\x00(\x01\x30\x01\x12=\n\x07version\x12\x16.google.protobuf.Empty\x1a\x18.fscompiler.version_info\"\x00\x62\x06proto3')
29
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x63ompiler_service.proto\x12\nfscompiler\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1cgoogle/protobuf/struct.proto\"g\n\x04\x66qir\x12\r\n\x05model\x18\x01 \x01(\x0c\x12\x16\n\tbatch_dim\x18\x02 \x01(\x03H\x00\x88\x01\x01\x12\x19\n\x0csequence_dim\x18\x03 \x01(\x03H\x01\x88\x01\x01\x42\x0c\n\n_batch_dimB\x0f\n\r_sequence_dim\"G\n\x06tflite\x12\r\n\x05model\x18\x01 \x01(\x0c\x12\x1b\n\x0esignature_name\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x11\n\x0f_signature_name\"\xd0\x02\n\x12model_and_metadata\x12 \n\x04\x66qir\x18\x01 \x01(\x0b\x32\x10.fscompiler.fqirH\x00\x12$\n\x06tflite\x18\x02 \x01(\x0b\x32\x12.fscompiler.tfliteH\x00\x12\x13\n\x06iospec\x18\x03 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0bopt_profile\x18\x04 \x01(\tH\x02\x88\x01\x01\x12\x38\n\x12user_configuration\x18\x05 \x01(\x0b\x32\x17.google.protobuf.StructH\x03\x88\x01\x01\x12\x39\n\x13memory_reservations\x18\x06 \x01(\x0b\x32\x17.google.protobuf.StructH\x04\x88\x01\x01\x42\x04\n\x02irB\t\n\x07_iospecB\x0e\n\x0c_opt_profileB\x15\n\x13_user_configurationB\x16\n\x14_memory_reservations\"&\n\x06status\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0b\n\x03msg\x18\x02 \x01(\t\"\xbc\x01\n\x12\x63ompiled_artifacts\x12\x0f\n\x07\x62itfile\x18\x01 \x01(\x0c\x12\x13\n\x06iospec\x18\x02 \x01(\tH\x00\x88\x01\x01\x12\x39\n\x13memory_reservations\x18\x03 \x01(\x0b\x32\x17.google.protobuf.StructH\x01\x88\x01\x01\x12\"\n\x06status\x18\x04 \x01(\x0b\x32\x12.fscompiler.statusB\t\n\x07_iospecB\x16\n\x14_memory_reservations\"5\n\x07ndarray\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\x03\x12\r\n\x05\x64type\x18\x03 \x01(\t\"\x14\n\x04\x64\x61ta\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\"\xdb\x01\n\x0fsimulation_data\x12+\n\x06inputs\x18\x01 \x01(\x0b\x32\x19.fscompiler.str_array_mapH\x00\x12\x35\n\x0blist_inputs\x18\x02 \x01(\x0b\x32\x1e.fscompiler.list_str_array_mapH\x00\x12\x19\n\x0csim_duration\x18\x03 \x01(\x02H\x01\x88\x01\x01\x12\x19\n\x0cinput_period\x18\x04 \x01(\x02H\x02\x88\x01\x01\x42\x0c\n\ninput_dataB\x0f\n\r_sim_durationB\x0f\n\r_input_period\"\x84\x01\n\rstr_array_map\x12\x31\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32#.fscompiler.str_array_map.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.fscompiler.ndarray:\x02\x38\x01\"=\n\x12list_str_array_map\x12\'\n\x04maps\x18\x01 \x03(\x0b\x32\x19.fscompiler.str_array_map\"\x81\x01\n\x10simulation_input\x12/\n\x05model\x18\x01 \x01(\x0b\x32\x1e.fscompiler.model_and_metadataH\x00\x12+\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x1b.fscompiler.simulation_dataH\x00\x42\x0f\n\rmodel_or_data\"\xbc\x01\n\x11simulation_output\x12,\n\x07outputs\x18\x01 \x01(\x0b\x32\x19.fscompiler.str_array_mapH\x00\x12\x36\n\x0clist_outputs\x18\x02 \x01(\x0b\x32\x1e.fscompiler.list_str_array_mapH\x00\x12\x0e\n\x06report\x18\x03 \x01(\t\x12\"\n\x06status\x18\x04 \x01(\x0b\x32\x12.fscompiler.statusB\r\n\x0boutput_data\"\x1f\n\x0cversion_info\x12\x0f\n\x07version\x18\x01 \x01(\t2\x92\x02\n\x07\x43ompile\x12K\n\x07\x63ompile\x12\x1e.fscompiler.model_and_metadata\x1a\x1e.fscompiler.compiled_artifacts\"\x00\x12,\n\x04ping\x12\x10.fscompiler.data\x1a\x10.fscompiler.data\"\x00\x12M\n\x08simulate\x12\x1c.fscompiler.simulation_input\x1a\x1d.fscompiler.simulation_output\"\x00(\x01\x30\x01\x12=\n\x07version\x12\x16.google.protobuf.Empty\x1a\x18.fscompiler.version_info\"\x00\x62\x06proto3')
30
30
 
31
31
  _globals = globals()
32
32
  _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
33
33
  _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'compiler_service_pb2', _globals)
34
34
  if not _descriptor._USE_C_DESCRIPTORS:
35
35
  DESCRIPTOR._loaded_options = None
36
- _globals['_SIMULATION_DATA_INPUTSENTRY']._loaded_options = None
37
- _globals['_SIMULATION_DATA_INPUTSENTRY']._serialized_options = b'8\001'
38
- _globals['_SIMULATION_OUTPUT_OUTPUTSENTRY']._loaded_options = None
39
- _globals['_SIMULATION_OUTPUT_OUTPUTSENTRY']._serialized_options = b'8\001'
36
+ _globals['_STR_ARRAY_MAP_DATAENTRY']._loaded_options = None
37
+ _globals['_STR_ARRAY_MAP_DATAENTRY']._serialized_options = b'8\001'
40
38
  _globals['_FQIR']._serialized_start=97
41
39
  _globals['_FQIR']._serialized_end=200
42
40
  _globals['_TFLITE']._serialized_start=202
43
41
  _globals['_TFLITE']._serialized_end=273
44
- _globals['_MODEL']._serialized_start=276
45
- _globals['_MODEL']._serialized_end=420
46
- _globals['_STATUS']._serialized_start=422
47
- _globals['_STATUS']._serialized_end=460
48
- _globals['_COMPILED_ARTIFACTS']._serialized_start=462
49
- _globals['_COMPILED_ARTIFACTS']._serialized_end=535
50
- _globals['_NDARRAY']._serialized_start=537
51
- _globals['_NDARRAY']._serialized_end=590
52
- _globals['_DATA']._serialized_start=592
53
- _globals['_DATA']._serialized_end=612
54
- _globals['_SIMULATION_DATA']._serialized_start=615
55
- _globals['_SIMULATION_DATA']._serialized_end=845
56
- _globals['_SIMULATION_DATA_INPUTSENTRY']._serialized_start=745
57
- _globals['_SIMULATION_DATA_INPUTSENTRY']._serialized_end=811
58
- _globals['_SIMULATION_INPUT']._serialized_start=847
59
- _globals['_SIMULATION_INPUT']._serialized_end=963
60
- _globals['_SIMULATION_OUTPUT']._serialized_start=966
61
- _globals['_SIMULATION_OUTPUT']._serialized_end=1167
62
- _globals['_SIMULATION_OUTPUT_OUTPUTSENTRY']._serialized_start=1100
63
- _globals['_SIMULATION_OUTPUT_OUTPUTSENTRY']._serialized_end=1167
64
- _globals['_VERSION_INFO']._serialized_start=1169
65
- _globals['_VERSION_INFO']._serialized_end=1200
66
- _globals['_COMPILE']._serialized_start=1203
67
- _globals['_COMPILE']._serialized_end=1464
42
+ _globals['_MODEL_AND_METADATA']._serialized_start=276
43
+ _globals['_MODEL_AND_METADATA']._serialized_end=612
44
+ _globals['_STATUS']._serialized_start=614
45
+ _globals['_STATUS']._serialized_end=652
46
+ _globals['_COMPILED_ARTIFACTS']._serialized_start=655
47
+ _globals['_COMPILED_ARTIFACTS']._serialized_end=843
48
+ _globals['_NDARRAY']._serialized_start=845
49
+ _globals['_NDARRAY']._serialized_end=898
50
+ _globals['_DATA']._serialized_start=900
51
+ _globals['_DATA']._serialized_end=920
52
+ _globals['_SIMULATION_DATA']._serialized_start=923
53
+ _globals['_SIMULATION_DATA']._serialized_end=1142
54
+ _globals['_STR_ARRAY_MAP']._serialized_start=1145
55
+ _globals['_STR_ARRAY_MAP']._serialized_end=1277
56
+ _globals['_STR_ARRAY_MAP_DATAENTRY']._serialized_start=1213
57
+ _globals['_STR_ARRAY_MAP_DATAENTRY']._serialized_end=1277
58
+ _globals['_LIST_STR_ARRAY_MAP']._serialized_start=1279
59
+ _globals['_LIST_STR_ARRAY_MAP']._serialized_end=1340
60
+ _globals['_SIMULATION_INPUT']._serialized_start=1343
61
+ _globals['_SIMULATION_INPUT']._serialized_end=1472
62
+ _globals['_SIMULATION_OUTPUT']._serialized_start=1475
63
+ _globals['_SIMULATION_OUTPUT']._serialized_end=1663
64
+ _globals['_VERSION_INFO']._serialized_start=1665
65
+ _globals['_VERSION_INFO']._serialized_end=1696
66
+ _globals['_COMPILE']._serialized_start=1699
67
+ _globals['_COMPILE']._serialized_end=1973
68
68
  # @@protoc_insertion_point(module_scope)
@@ -38,7 +38,7 @@ class CompileStub(object):
38
38
  """
39
39
  self.compile = channel.unary_unary(
40
40
  '/fscompiler.Compile/compile',
41
- request_serializer=compiler__service__pb2.model.SerializeToString,
41
+ request_serializer=compiler__service__pb2.model_and_metadata.SerializeToString,
42
42
  response_deserializer=compiler__service__pb2.compiled_artifacts.FromString,
43
43
  _registered_method=True)
44
44
  self.ping = channel.unary_unary(
@@ -91,7 +91,7 @@ def add_CompileServicer_to_server(servicer, server):
91
91
  rpc_method_handlers = {
92
92
  'compile': grpc.unary_unary_rpc_method_handler(
93
93
  servicer.compile,
94
- request_deserializer=compiler__service__pb2.model.FromString,
94
+ request_deserializer=compiler__service__pb2.model_and_metadata.FromString,
95
95
  response_serializer=compiler__service__pb2.compiled_artifacts.SerializeToString,
96
96
  ),
97
97
  'ping': grpc.unary_unary_rpc_method_handler(
@@ -136,7 +136,7 @@ class Compile(object):
136
136
  request,
137
137
  target,
138
138
  '/fscompiler.Compile/compile',
139
- compiler__service__pb2.model.SerializeToString,
139
+ compiler__service__pb2.model_and_metadata.SerializeToString,
140
140
  compiler__service__pb2.compiled_artifacts.FromString,
141
141
  options,
142
142
  channel_credentials,
@@ -13,10 +13,11 @@ import zipfile
13
13
 
14
14
  from fmot import ConvertedModel
15
15
  from fmot.fqir import GraphProto
16
- from femtomapper import MapperConf, Mapper, MapperState
16
+ from femtomapper import compile as femtomapper_compile
17
17
  from femtobehav.fasmir import FASMIR
18
18
  from femtobehav.sim import SimRunner
19
- from typing import Any
19
+ from femtorun import IOSpec
20
+ from typing import Any, Literal
20
21
 
21
22
 
22
23
  class CompilerFrontend:
@@ -25,13 +26,15 @@ class CompilerFrontend:
25
26
  def __init__(self, input_ir: Any, fasmir: FASMIR = None):
26
27
  self.input_ir = input_ir
27
28
  self.fasmir = fasmir
29
+ self.iospec = None
30
+ self.memory_reservations = None
28
31
  # self.io_wrapper = io_wrapper
29
32
 
30
33
  @property
31
34
  def is_compiled(self):
32
35
  return self.fasmir is not None
33
36
 
34
- def _compile(self, input_ir: Any, options: dict) -> FASMIR:
37
+ def _compile(self, input_ir: Any) -> FASMIR:
35
38
  """
36
39
  Runs FM compiler to generate FASMIR, and encode io information in a
37
40
  SimIOWrapper object.
@@ -44,9 +47,19 @@ class CompilerFrontend:
44
47
  "Subclasses need to implement this based on their input ir"
45
48
  )
46
49
 
47
- def compile(self, options: dict = {}):
50
+ def compile(
51
+ self,
52
+ input_ir: GraphProto,
53
+ iospec: IOSpec | None = None,
54
+ opt_profile: Literal["efficient", "fast", "conservative"] | None = "efficient",
55
+ user_configuration: dict | None = None,
56
+ memory_reservations: dict | None = None,
57
+ ) -> tuple[FASMIR, IOSpec, dict]:
48
58
  if not self.is_compiled:
49
- self.fasmir = self._compile(self.input_ir, options)
59
+ self.fasmir, self.iospec, self.memory_reservations = self._compile(
60
+ input_ir, iospec, opt_profile, user_configuration, memory_reservations
61
+ )
62
+ return self.fasmir, self.iospec, self.memory_reservations
50
63
 
51
64
  def dump_bitfile(self, encrypt: bool = True) -> bytes:
52
65
  """Dumps a bitfile used to program the SPU."""
@@ -56,7 +69,9 @@ class CompilerFrontend:
56
69
  with tempfile.TemporaryFile() as tmpfile:
57
70
  with tempfile.TemporaryDirectory() as dirname:
58
71
  # Dump memory files to a directory
59
- runner = SimRunner(self.fasmir, data_dir=dirname, encrypt=encrypt)
72
+ runner = SimRunner(
73
+ self.fasmir, io_spec=self.iospec, data_dir=dirname, encrypt=encrypt
74
+ )
60
75
  runner.reset()
61
76
  runner.finish()
62
77
 
@@ -84,7 +99,11 @@ class CompilerFrontend:
84
99
  return fasmir_var.numpy.shape[0]
85
100
 
86
101
  def run_behavioral_simulator(
87
- self, inputs: dict[str, np.ndarray], input_period: float = None, **kwargs
102
+ self,
103
+ inputs: dict[str, np.ndarray] | list[dict[str, np.ndarray]],
104
+ input_period: float = None,
105
+ iospec: IOSpec | None = None,
106
+ **kwargs
88
107
  ):
89
108
  """
90
109
  Runs the behavioral simulator and returns outputs and metrics.
@@ -96,7 +115,7 @@ class CompilerFrontend:
96
115
  input_period (float, optional): total simulation time.
97
116
 
98
117
  """
99
- runner = SimRunner(self.fasmir, **kwargs)
118
+ runner = SimRunner(self.fasmir, io_spec=iospec, **kwargs)
100
119
  runner.reset()
101
120
  outputs, __, __ = runner.run(inputs)
102
121
  metrics = runner.get_metrics(input_period, concise=True, as_yamlable=True)
@@ -104,19 +123,6 @@ class CompilerFrontend:
104
123
  return outputs, metrics
105
124
 
106
125
 
107
- def _compile_fqir(graph: GraphProto, options: dict) -> FASMIR:
108
- mapper_conf = MapperConf(**options)
109
- mapper = Mapper(mapper_conf)
110
- mapper_state = MapperState(fqir=graph)
111
-
112
- # compile:
113
- mapper_state = mapper.do(mapper_state)
114
-
115
- # extract fasmir
116
- fasmir = mapper_state.fasmir
117
- return fasmir
118
-
119
-
120
126
  class TorchCompiler(CompilerFrontend):
121
127
  def __init__(self, graph: GraphProto, batch_dim: int = None, seq_dim: int = None):
122
128
  assert isinstance(graph, GraphProto)
@@ -125,10 +131,19 @@ class TorchCompiler(CompilerFrontend):
125
131
  self.batch_dim = batch_dim
126
132
  self.seq_dim = seq_dim
127
133
 
128
- def _compile(self, input_ir: GraphProto, options: dict) -> FASMIR:
129
- fasmir = _compile_fqir(input_ir, options)
130
- # wrapper = self._get_fqir_iowrapper(input_ir, fasmir)
131
- return fasmir
134
+ def _compile(
135
+ self,
136
+ input_ir: GraphProto,
137
+ iospec: IOSpec | None = None,
138
+ opt_profile: Literal["efficient", "fast", "conservative"] | None = "efficient",
139
+ user_configuration: dict | None = None,
140
+ memory_reservations: dict | None = None,
141
+ ) -> tuple[FASMIR, IOSpec, dict]:
142
+ mapper_state, iospec, memory_reservations = femtomapper_compile(
143
+ input_ir, iospec, opt_profile, user_configuration, memory_reservations
144
+ )
145
+ fasmir = mapper_state.fasmir
146
+ return fasmir, iospec, memory_reservations
132
147
 
133
148
  @classmethod
134
149
  def from_fqir(cls, graph: GraphProto, batch_dim: int = None, seq_dim: int = None):
@@ -1,12 +1,12 @@
1
1
  import argparse
2
2
  from collections.abc import Iterable
3
3
  import concurrent
4
- from google.protobuf.json_format import MessageToDict
5
4
  import grpc
6
5
  import logging
7
6
  import pickle
8
7
  import sys
9
8
 
9
+ from femtorun import IOSpec
10
10
  from femtocrux.server import CompilerFrontend, TorchCompiler
11
11
 
12
12
  from femtocrux.server.exceptions import format_exception, format_exception_from_exc
@@ -15,6 +15,7 @@ from femtocrux.util.utils import (
15
15
  get_channel_options,
16
16
  serialize_simulation_output,
17
17
  deserialize_simulation_data,
18
+ deserialize_simulation_data_list,
18
19
  )
19
20
 
20
21
  # Import GRPC artifacts
@@ -36,8 +37,9 @@ class CompileServicer(cs_pb2_grpc.CompileServicer):
36
37
  self.logger = logging.getLogger("CompileServicer")
37
38
  self.logger.setLevel(logging.DEBUG)
38
39
  self.logger.info("Starting compile server.")
40
+ self.iospec = None
39
41
 
40
- def _get_fqir_compiler(self, model: cs_pb2.model) -> CompilerFrontend:
42
+ def _get_fqir_compiler(self, model: cs_pb2.model_and_metadata) -> CompilerFrontend:
41
43
  """Get a Torch compiler from an FQIR a model message"""
42
44
  # Deserialize FQIR
43
45
  fqir = model.fqir
@@ -50,24 +52,36 @@ class CompileServicer(cs_pb2_grpc.CompileServicer):
50
52
  seq_dim=field_or_none(fqir, "sequence_dim"),
51
53
  )
52
54
 
53
- def _compile_model(self, model: cs_pb2.model, context) -> CompilerFrontend:
55
+ def _compile_model(
56
+ self, model_and_metadata: cs_pb2.model_and_metadata, context
57
+ ) -> CompilerFrontend:
54
58
  """Compile a model, for simulation or bitfile generation."""
55
- # Get a compiler for the model
56
- model_type_map = {
57
- "fqir": self._get_fqir_compiler,
58
- }
59
- model_type = model.WhichOneof("ir")
60
- compiler = model_type_map[model_type](model)
61
-
62
- # Get the compiler options
63
- options_struct = field_or_none(model, "options")
64
- if options_struct is None:
65
- options = {}
66
- else:
67
- options = MessageToDict(options_struct)
59
+
60
+ # Deserialize FQIR
61
+ fqir = model_and_metadata.fqir
62
+ graph_proto = pickle.loads(fqir.model)
63
+
64
+ # Compile the FQIR
65
+ compiler = TorchCompiler(
66
+ graph_proto,
67
+ batch_dim=field_or_none(fqir, "batch_dim"),
68
+ seq_dim=field_or_none(fqir, "sequence_dim"),
69
+ )
70
+
71
+ # Get the iospec
72
+ iospec = field_or_none(model_and_metadata, "iospec")
73
+ iospec = IOSpec.deserialize_from_string(iospec)
74
+ self.iospec = iospec
75
+ # Get the opt_profile
76
+ opt_profile = field_or_none(model_and_metadata, "opt_profile")
77
+ # Get the user_configuration
78
+ user_configuration = field_or_none(model_and_metadata, "user_configuration")
79
+ memory_reservations = field_or_none(model_and_metadata, "memory_reservations")
68
80
 
69
81
  # Compile the model
70
- compiler.compile(options=options)
82
+ compiler.compile(
83
+ graph_proto, iospec, opt_profile, user_configuration, memory_reservations
84
+ )
71
85
  assert compiler.is_compiled, "Expected compilation completed"
72
86
  return compiler
73
87
 
@@ -88,10 +102,15 @@ class CompileServicer(cs_pb2_grpc.CompileServicer):
88
102
  )
89
103
 
90
104
  # Return the bitfile
91
- return cs_pb2.compiled_artifacts(
92
- bitfile=bitfile, status=cs_pb2.status(success=True)
105
+ ret_message = cs_pb2.compiled_artifacts(
106
+ bitfile=bitfile,
107
+ iospec=compiler.iospec.serialize_to_string(),
108
+ memory_reservations=compiler.memory_reservations,
109
+ status=cs_pb2.status(success=True),
93
110
  )
94
111
 
112
+ return ret_message
113
+
95
114
  def ping(self, data: cs_pb2.data, context) -> cs_pb2.data:
96
115
  """Round-trip a message."""
97
116
  return data
@@ -107,7 +126,11 @@ class CompileServicer(cs_pb2_grpc.CompileServicer):
107
126
  # Check that this is a model request
108
127
  if not model_request.WhichOneof("model_or_data") == "model":
109
128
  yield cs_pb2.simulation_output(
110
- status=cs_pb2.status(success=False, msg="Expected model message.")
129
+ status=cs_pb2.status(
130
+ success=False,
131
+ msg=f"Expected model message, got "
132
+ f"{model_request.WhichOneof('model_or_data')}",
133
+ )
111
134
  )
112
135
  continue
113
136
 
@@ -143,10 +166,20 @@ class CompileServicer(cs_pb2_grpc.CompileServicer):
143
166
  try:
144
167
  if data_request.WhichOneof("model_or_data") == "data":
145
168
  ### New Path with int64 proto message holding data
146
- deserialized_inputs = deserialize_simulation_data(data.inputs)
169
+
170
+ which = data.WhichOneof("input_data")
171
+ if which == "inputs":
172
+ deserialized_inputs = deserialize_simulation_data(data.inputs)
173
+ elif which == "list_inputs":
174
+ deserialized_inputs = deserialize_simulation_data_list(
175
+ data.list_inputs
176
+ )
177
+ else:
178
+ raise (Exception("Didn't get input data as dict or list."))
147
179
  outputs, metrics = compiler.run_behavioral_simulator(
148
180
  deserialized_inputs,
149
181
  input_period=field_or_none(data, "input_period"),
182
+ iospec=self.iospec,
150
183
  )
151
184
  else:
152
185
  raise (Exception("Didn't get a model or data"))
femtocrux/util/utils.py CHANGED
@@ -31,47 +31,108 @@ def serialize_numpy_array(arr: np.ndarray) -> cs_pb2.ndarray:
31
31
  )
32
32
 
33
33
 
34
+ def prepare_array_for_serialization(array, type_of_input="dict"):
35
+ """
36
+ Check that the arrays that are sent for inputs have the correct types and shapes.
37
+
38
+ They have to be np.ndarrays and have the appropriate shapes:
39
+ List[Dict[str, np.ndarray]] the ndarray has to only have 1 dimension of features
40
+ Dict[str, np.ndarray] the ndarray can have 2 dimensions, one for sequence and
41
+ another for features.
42
+ """
43
+ if isinstance(array, torch.Tensor):
44
+ array_to_serialize = array.numpy()
45
+ elif isinstance(array, np.ndarray):
46
+ array_to_serialize = array
47
+ else:
48
+ raise (Exception("Input array was not of type torch.Tensor or np.ndarray"))
49
+
50
+ if not np.issubdtype(array_to_serialize.dtype, np.integer):
51
+ raise (
52
+ Exception(
53
+ "Input data is not an integer type. Please quantize your"
54
+ "data to int16 or lower."
55
+ )
56
+ )
57
+ shape_size = 2
58
+ if type_of_input == "dict":
59
+ shape_size = 2
60
+ elif type_of_input == "list":
61
+ shape_size = 1
62
+
63
+ if len(array_to_serialize.shape) > shape_size:
64
+ raise (
65
+ Exception(
66
+ f"Expected {shape_size} dimensions for input and got shape: "
67
+ f"{array_to_serialize.shape} Your input array has too many "
68
+ f"dimensions. When in inference mode, please remove any batch "
69
+ f"dimensions."
70
+ )
71
+ )
72
+ return array_to_serialize
73
+
74
+
34
75
  def serialize_sim_inputs_message(
35
- data_dict: dict, input_period: float
76
+ input_data: list | dict, input_period: float
36
77
  ) -> cs_pb2.simulation_data:
37
- """Serializes a dictionary where values are NumPy arrays."""
78
+ """
79
+ Serialize input_data which could be the list[dict[str, np.ndarray]] or
80
+ dict[str, np.ndarray] format of input into oneof.
81
+
82
+ list_str_array_map or str_array_map proto messages and create a larger
83
+ simulation_data proto message.
84
+
85
+ return simulation_data proto message
86
+ """
38
87
  message = cs_pb2.simulation_data()
39
- for key, array in data_dict.items():
40
- if isinstance(array, torch.Tensor):
41
- array_to_serialize = array.numpy()
42
- elif isinstance(array, np.ndarray):
43
- array_to_serialize = array
44
- else:
45
- raise (Exception("Input array was not of type torch.Tensor or np.ndarray"))
46
-
47
- if not np.issubdtype(array_to_serialize.dtype, np.integer):
48
- raise (
49
- Exception(
50
- "Input data is not an integer type. Please quantize your"
51
- "data to int16 or lower."
52
- )
53
- )
54
88
 
55
- if len(array_to_serialize.shape) > 2:
56
- raise (
57
- Exception(
58
- "Expected 2 dimensions for input and got shape: "
59
- "{array_to_serialize.shape} Your input array has too many "
60
- "dimensions. When in inference mode, please remove any batch "
61
- "dimensions."
89
+ if isinstance(input_data, list):
90
+ for single_entry in input_data:
91
+ input_map = message.list_inputs.maps.add()
92
+ for key, array in single_entry.items():
93
+ array_to_serialize = prepare_array_for_serialization(
94
+ array, type_of_input="list"
62
95
  )
63
- )
96
+ input_map.data[key].CopyFrom(serialize_numpy_array(array_to_serialize))
64
97
 
65
- message.inputs[key].CopyFrom(serialize_numpy_array(array_to_serialize))
98
+ elif isinstance(input_data, dict):
99
+ for key, array in input_data.items():
100
+ array_to_serialize = prepare_array_for_serialization(
101
+ array, type_of_input="dict"
102
+ )
103
+ message.inputs.data[key].CopyFrom(serialize_numpy_array(array_to_serialize))
104
+ else:
105
+ raise (
106
+ Exception(f"input_data was of type {type(input_data)}, not dict or list.")
107
+ )
66
108
  message.input_period = input_period
67
109
  return message
68
110
 
69
111
 
70
- def serialize_simulation_output(data_dict: dict, report) -> cs_pb2.simulation_output:
71
- """Serializes a dictionary where values are NumPy arrays."""
112
+ def serialize_simulation_output(
113
+ output_data: dict | list, report
114
+ ) -> cs_pb2.simulation_output:
115
+ """
116
+ Serialize the output_data and report into a proto message simulation_output.
117
+
118
+ We have both the list[dict[str, np.ndarray]] and the dict[str, np.ndarray]
119
+ as possible output formats.
120
+
121
+ We serialize whichever one we get as message.list_outputs or message.outputs
122
+
123
+ return the proto message
124
+ """
72
125
  message = cs_pb2.simulation_output()
73
- for key, array in data_dict.items():
74
- message.outputs[key].CopyFrom(serialize_numpy_array(array))
126
+
127
+ if isinstance(output_data, list):
128
+ for single_entry in output_data:
129
+ output_map = message.list_outputs.maps.add()
130
+ for key, array in single_entry.items():
131
+ output_map.data[key].CopyFrom(serialize_numpy_array(array))
132
+ elif isinstance(output_data, dict):
133
+ for key, array in output_data.items():
134
+ message.outputs.data[key].CopyFrom(serialize_numpy_array(array))
135
+
75
136
  message.report = json.dumps(report)
76
137
  message.status.CopyFrom(cs_pb2.status(success=True))
77
138
 
@@ -83,14 +144,23 @@ def deserialize_numpy_array(proto: cs_pb2.ndarray) -> np.ndarray:
83
144
  return np.frombuffer(proto.data, dtype=np.dtype(proto.dtype)).reshape(proto.shape)
84
145
 
85
146
 
86
- def deserialize_simulation_data(proto: cs_pb2.simulation_data) -> dict:
87
- """Deserializes a LargerMessage back into a dictionary of NumPy arrays."""
88
- return {key: deserialize_numpy_array(value) for key, value in proto.items()}
89
-
90
-
91
- def deserialize_simulation_output(proto: cs_pb2.simulation_output) -> dict:
147
+ def deserialize_simulation_data(proto: cs_pb2.str_array_map) -> dict:
92
148
  """Deserializes a LargerMessage back into a dictionary of NumPy arrays."""
93
- return {key: deserialize_numpy_array(value) for key, value in proto.items()}
149
+ return {key: deserialize_numpy_array(value) for key, value in proto.data.items()}
150
+
151
+
152
+ def deserialize_simulation_data_list(list_input: cs_pb2.list_str_array_map) -> list:
153
+ """
154
+ Convert proto list_str_array_map into a python list[dict[str, np.ndarray]]
155
+ """
156
+ list_of_dicts = []
157
+ for input_map in list_input.maps:
158
+ dict_entry = {}
159
+ for key, ndarray_proto in input_map.data.items():
160
+ # Convert ndarray proto to whatever format you need
161
+ dict_entry[key] = deserialize_numpy_array(ndarray_proto)
162
+ list_of_dicts.append(dict_entry)
163
+ return list_of_dicts
94
164
 
95
165
 
96
166
  def read_secret_raw(prompt="Secret: "):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: femtocrux
3
- Version: 2.5.2
3
+ Version: 3.1.0
4
4
  Summary: Femtosense Compiler
5
5
  Home-page: https://github.com/femtosense/femtocrux
6
6
  Author: Femtosense
@@ -0,0 +1,22 @@
1
+ femtocrux/ENV_REQUIREMENTS.sh,sha256=t_O1B4hJAMgxvH9gwp1qls6eVFmhSYBJe64KmuK_H-4,1389
2
+ femtocrux/PY_REQUIREMENTS,sha256=UwXV0o3gieruZUYdN9r2bqQ0Wcf_vosjeP6LJVx6oF0,275
3
+ femtocrux/VERSION,sha256=svRNO24p-LG3PqRzXwBq_8TRmOH9nH1Q5zYVmx72NsY,6
4
+ femtocrux/__init__.py,sha256=VeuEHuCvQmqHgzj_ogjIbwGE_E8UFLYSJN6GTpXBfe0,409
5
+ femtocrux/version.py,sha256=uNg2kHxQo6oUN1ah7s9_85rCZVRoTHGPD1GAQPZW4lw,164
6
+ femtocrux/client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ femtocrux/client/client.py,sha256=QgyNoLdbjWbGyznWqQQyjJOgGGfGVdgiWxz-297H5Pw,31005
8
+ femtocrux/grpc/__init__.py,sha256=uiMHQt5I2eAKJqI3Zh0h1Gm7cmPR4PbaGS71nCJQCGw,169
9
+ femtocrux/grpc/compiler_service_pb2.py,sha256=a95hVO0IOLTuDmmag-acQaW6v8FFVAsoOkB6_8hw6vo,6255
10
+ femtocrux/grpc/compiler_service_pb2_grpc.py,sha256=b_fyld4rpKE4n5MimKOoyNZ5VU4UqUS_R3EM3_daqxM,8539
11
+ femtocrux/server/__init__.py,sha256=kpm2AGLxo3AA9zo-G8N0Dm0gmWbvAd6jfhMbtkeHcxo,77
12
+ femtocrux/server/compiler_frontend.py,sha256=DtaRF8Wuzc1Ul2Hh6_e1P5SQbJVBGYhsQuLID4kXiUU,6247
13
+ femtocrux/server/exceptions.py,sha256=lI6n471n5QKf5G3aL_1kuBVEItD-jBgithVVpPDwNYc,609
14
+ femtocrux/server/healthcheck.py,sha256=ehqAwnv0D0zpy-AUZAPwv8rp874DZCwUmP8nzdXzZvI,1565
15
+ femtocrux/server/server.py,sha256=102uwMQQHGdNm9GXDqAHaKjDlpYRFkWxDl-7NOwcwP4,8721
16
+ femtocrux/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
+ femtocrux/util/utils.py,sha256=esE9DIT1__9smSmehhBGCY0dxLvequ2RXGXqfH4R24s,6491
18
+ femtocrux-3.1.0.dist-info/licenses/LICENSE,sha256=eN9ZI1xHjUmFvN3TEeop5kBGXRUBfbsl55KBNBYYFqI,36
19
+ femtocrux-3.1.0.dist-info/METADATA,sha256=SeNrpR4cnbKX9Qwjzx8rhI9f2YmavTH-FIeJZylkESQ,2763
20
+ femtocrux-3.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
21
+ femtocrux-3.1.0.dist-info/top_level.txt,sha256=BkTttlioC3je__8577wxRieZqY3Abu7FOOdMnmYbcNI,10
22
+ femtocrux-3.1.0.dist-info/RECORD,,
@@ -1,22 +0,0 @@
1
- femtocrux/ENV_REQUIREMENTS.sh,sha256=t_O1B4hJAMgxvH9gwp1qls6eVFmhSYBJe64KmuK_H-4,1389
2
- femtocrux/PY_REQUIREMENTS,sha256=UwXV0o3gieruZUYdN9r2bqQ0Wcf_vosjeP6LJVx6oF0,275
3
- femtocrux/VERSION,sha256=jQk_mQvA_Y_BKZxt3xvSA-XqddIIptOPhzOpybb0jGo,6
4
- femtocrux/__init__.py,sha256=yIWd9I2PEXCn_PKIILAN3mkWeTf0tgtVualeTIHNxfQ,342
5
- femtocrux/version.py,sha256=uNg2kHxQo6oUN1ah7s9_85rCZVRoTHGPD1GAQPZW4lw,164
6
- femtocrux/client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- femtocrux/client/client.py,sha256=eJH-CM_Pob1V-M9EuwhFyyWd1NsdLBIUiF99Psz46-8,26428
8
- femtocrux/grpc/__init__.py,sha256=uiMHQt5I2eAKJqI3Zh0h1Gm7cmPR4PbaGS71nCJQCGw,169
9
- femtocrux/grpc/compiler_service_pb2.py,sha256=grQNjlnoUg8KKcykK7JcT7Pmv_7R7sjLpsoG_EHGyoY,5494
10
- femtocrux/grpc/compiler_service_pb2_grpc.py,sha256=Hl1bGJEYA7CvdIj38FUvlEolPGCoWyK8WazqOhx4v8s,8500
11
- femtocrux/server/__init__.py,sha256=kpm2AGLxo3AA9zo-G8N0Dm0gmWbvAd6jfhMbtkeHcxo,77
12
- femtocrux/server/compiler_frontend.py,sha256=-pfhOHDkXWReoB273Bc9s2s7BVTMZoCSyGM7pudmMhY,5472
13
- femtocrux/server/exceptions.py,sha256=lI6n471n5QKf5G3aL_1kuBVEItD-jBgithVVpPDwNYc,609
14
- femtocrux/server/healthcheck.py,sha256=ehqAwnv0D0zpy-AUZAPwv8rp874DZCwUmP8nzdXzZvI,1565
15
- femtocrux/server/server.py,sha256=sumnTj63uOGkzwj4-xwTuN4wVB7CDopy8e7PLOpW64Q,7405
16
- femtocrux/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
- femtocrux/util/utils.py,sha256=v8v09aDaOtfejpSIB-LgaA--JJj9o2_BWvNwjd8wfww,4134
18
- femtocrux-2.5.2.dist-info/licenses/LICENSE,sha256=eN9ZI1xHjUmFvN3TEeop5kBGXRUBfbsl55KBNBYYFqI,36
19
- femtocrux-2.5.2.dist-info/METADATA,sha256=9lyIB-H5EOpRqClbGnqarHYEwSWJei8wJhNSHTSfB4E,2763
20
- femtocrux-2.5.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
21
- femtocrux-2.5.2.dist-info/top_level.txt,sha256=BkTttlioC3je__8577wxRieZqY3Abu7FOOdMnmYbcNI,10
22
- femtocrux-2.5.2.dist-info/RECORD,,