prefab 1.1.9__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
prefab/__init__.py CHANGED
@@ -5,7 +5,7 @@ Usage:
5
5
  import prefab as pf
6
6
  """
7
7
 
8
- __version__ = "1.1.9"
8
+ __version__ = "1.3.0"
9
9
 
10
10
  from . import compare, geometry, predict, read, shapes
11
11
  from .device import BufferSpec, Device
prefab/device.py CHANGED
@@ -242,7 +242,6 @@ class Device(BaseModel):
242
242
  self,
243
243
  model: Model,
244
244
  binarize: bool = False,
245
- gpu: bool = False,
246
245
  ) -> "Device":
247
246
  """
248
247
  Predict the nanofabrication outcome of the device using a specified model.
@@ -264,10 +263,6 @@ class Device(BaseModel):
264
263
  If True, the predicted device geometry will be binarized using a threshold
265
264
  method. This is useful for converting probabilistic predictions into binary
266
265
  geometries. Defaults to False.
267
- gpu : bool
268
- If True, the prediction will be performed on a GPU. Defaults to False.
269
- Note: The GPU option has more overhead and will take longer for small
270
- devices, but will be faster for larger devices.
271
266
 
272
267
  Returns
273
268
  -------
@@ -285,7 +280,6 @@ class Device(BaseModel):
285
280
  model=model,
286
281
  model_type="p",
287
282
  binarize=binarize,
288
- gpu=gpu,
289
283
  )
290
284
  return self.model_copy(update={"device_array": prediction_array})
291
285
 
@@ -293,7 +287,6 @@ class Device(BaseModel):
293
287
  self,
294
288
  model: Model,
295
289
  binarize: bool = True,
296
- gpu: bool = False,
297
290
  ) -> "Device":
298
291
  """
299
292
  Correct the nanofabrication outcome of the device using a specified model.
@@ -317,10 +310,6 @@ class Device(BaseModel):
317
310
  If True, the corrected device geometry will be binarized using a threshold
318
311
  method. This is useful for converting probabilistic corrections into binary
319
312
  geometries. Defaults to True.
320
- gpu : bool
321
- If True, the prediction will be performed on a GPU. Defaults to False.
322
- Note: The GPU option has more overhead and will take longer for small
323
- devices, but will be faster for larger devices.
324
313
 
325
314
  Returns
326
315
  -------
@@ -338,14 +327,12 @@ class Device(BaseModel):
338
327
  model=model,
339
328
  model_type="c",
340
329
  binarize=binarize,
341
- gpu=gpu,
342
330
  )
343
331
  return self.model_copy(update={"device_array": correction_array})
344
332
 
345
333
  def semulate(
346
334
  self,
347
335
  model: Model,
348
- gpu: bool = False,
349
336
  ) -> "Device":
350
337
  """
351
338
  Simulate the appearance of the device as if viewed under a scanning electron
@@ -365,10 +352,6 @@ class Device(BaseModel):
365
352
  in `models.py`. Each model is associated with a version and dataset that
366
353
  detail its creation and the data it was trained on, ensuring the SEMulation
367
354
  is tailored to specific fabrication parameters.
368
- gpu : bool
369
- If True, the prediction will be performed on a GPU. Defaults to False.
370
- Note: The GPU option has more overhead and will take longer for small
371
- devices, but will be faster for larger devices.
372
355
 
373
356
  Notes
374
357
  -----
@@ -392,11 +375,52 @@ class Device(BaseModel):
392
375
  model=model,
393
376
  model_type="s",
394
377
  binarize=False,
395
- gpu=gpu,
396
378
  )
397
379
  semulated_array += np.random.normal(0, 0.03, semulated_array.shape)
398
380
  return self.model_copy(update={"device_array": semulated_array})
399
381
 
382
+ def segment(
383
+ self,
384
+ model: Model,
385
+ ) -> "Device":
386
+ """
387
+ Segment a scanning electron microscope (SEM) image into a binary mask.
388
+
389
+ This method applies a specified machine learning model to transform a grayscale
390
+ SEM image into a binary mask, where 1 represents the device structure and 0
391
+ represents the background. This is useful for extracting the device geometry
392
+ from experimental SEM images for analysis or comparison with design intent.
393
+
394
+ Parameters
395
+ ----------
396
+ model : Model
397
+ The model to use for segmentation, representing a specific fabrication
398
+ process and dataset. This model encapsulates details about the fabrication
399
+ foundry, process, material, technology, thickness, and sidewall presence, as
400
+ defined in `models.py`. Each model is associated with a version and dataset
401
+ that detail its creation and the data it was trained on, ensuring the
402
+ segmentation is tailored to specific fabrication parameters.
403
+
404
+ Returns
405
+ -------
406
+ Device
407
+ A new instance of the Device class with its geometry transformed into a
408
+ binary mask.
409
+
410
+ Raises
411
+ ------
412
+ RuntimeError
413
+ If the prediction service returns an error or if the response from the
414
+ service cannot be processed correctly.
415
+ """
416
+ segmented_array = predict_array(
417
+ device_array=self.normalize().device_array,
418
+ model=model,
419
+ model_type="b",
420
+ binarize=False,
421
+ )
422
+ return self.model_copy(update={"device_array": segmented_array})
423
+
400
424
  def to_ndarray(self) -> np.ndarray:
401
425
  """
402
426
  Converts the device geometry to an ndarray.
@@ -520,7 +544,7 @@ class Device(BaseModel):
520
544
  gdstk.Cell
521
545
  The GDSTK cell object representing the device geometry.
522
546
  """
523
- print(f"Creating cell '{cell_name}'...")
547
+ # print(f"Creating cell '{cell_name}'...")
524
548
  gdstk_cell = self.flatten()._device_to_gdstk(
525
549
  cell_name=cell_name,
526
550
  gds_layer=gds_layer,
prefab/geometry.py CHANGED
@@ -133,6 +133,7 @@ def binarize_monte_carlo(
133
133
  """
134
134
  device_array = np.squeeze(device_array)
135
135
  base_threshold = np.random.normal(loc=0.5, scale=0.1)
136
+ base_threshold = np.clip(base_threshold, 0.2, 0.8)
136
137
  threshold_noise = np.random.normal(
137
138
  loc=0, scale=noise_magnitude, size=device_array.shape
138
139
  )
prefab/predict.py CHANGED
@@ -5,19 +5,19 @@ import io
5
5
  import json
6
6
  import os
7
7
 
8
+ import gdstk
8
9
  import numpy as np
9
10
  import requests
10
11
  import toml
11
12
  from autograd import primitive
12
13
  from autograd.extend import defvjp
13
14
  from PIL import Image
14
- from tqdm import tqdm
15
15
 
16
16
  from .geometry import binarize_hard
17
17
  from .models import Model
18
18
 
19
19
  BASE_ENDPOINT_URL = "https://prefab-photonics--predict"
20
- ENDPOINT_VERSION = 2
20
+ ENDPOINT_VERSION = "3"
21
21
 
22
22
 
23
23
  def predict_array(
@@ -25,14 +25,13 @@ def predict_array(
25
25
  model: Model,
26
26
  model_type: str,
27
27
  binarize: bool,
28
- gpu: bool = False,
29
28
  ) -> np.ndarray:
30
29
  """
31
30
  Predict the nanofabrication outcome of a device array using a specified model.
32
31
 
33
32
  This function sends the device array to a serverless prediction service, which uses
34
33
  a specified machine learning model to predict the outcome of the nanofabrication
35
- process. The prediction can be performed on a GPU if specified.
34
+ process.
36
35
 
37
36
  Parameters
38
37
  ----------
@@ -48,48 +47,307 @@ def predict_array(
48
47
  specific fabrication parameters.
49
48
  model_type : str
50
49
  The type of model to use (e.g., 'p' for prediction, 'c' for correction, or 's'
51
- for SEMulate).
50
+ for SEMulate, 'b' for segmentation).
52
51
  binarize : bool
53
52
  If True, the predicted device geometry will be binarized using a threshold
54
53
  method. This is useful for converting probabilistic predictions into binary
55
54
  geometries.
56
- gpu : bool
57
- If True, the prediction will be performed on a GPU. Defaults to False. Note: The
58
- GPU option has more startup overhead and will take longer for small devices, but
59
- will be faster for larger devices.
60
55
 
61
56
  Returns
62
57
  -------
63
58
  np.ndarray
64
- The predicted output array.
59
+ The predicted output array. For single-level predictions, returns shape
60
+ (h, w, 1). For multi-level predictions, returns shape (h, w, n) where n is the
61
+ number of levels.
65
62
 
66
63
  Raises
67
64
  ------
68
65
  RuntimeError
69
66
  If the request to the prediction service fails.
67
+ ValueError
68
+ If the server returns an error or invalid response.
70
69
  """
70
+ endpoint_url = f"{BASE_ENDPOINT_URL}-v{ENDPOINT_VERSION}.modal.run"
71
+ predict_data = {
72
+ "device_array": _encode_array(np.squeeze(device_array)),
73
+ "model": model.to_json(),
74
+ "model_type": model_type,
75
+ }
71
76
  headers = _prepare_headers()
72
- predict_data = _prepare_predict_data(device_array, model, model_type, binarize)
73
- endpoint_url = (
74
- f"{BASE_ENDPOINT_URL}-gpu-v{ENDPOINT_VERSION}.modal.run"
75
- if gpu
76
- else f"{BASE_ENDPOINT_URL}-v{ENDPOINT_VERSION}.modal.run"
77
- )
78
77
 
79
78
  try:
80
- with requests.post(
81
- endpoint_url,
82
- data=json.dumps(predict_data),
83
- headers=headers,
84
- stream=True,
85
- ) as response:
86
- response.raise_for_status()
87
- result = _process_response(response, model_type, binarize)
88
- if result is None:
89
- raise RuntimeError("No prediction result received.")
90
- return result
91
- except requests.RequestException as e:
79
+ response = requests.post(
80
+ url=endpoint_url, data=json.dumps(predict_data), headers=headers
81
+ )
82
+ response.raise_for_status()
83
+
84
+ if not response.content:
85
+ raise ValueError("Empty response received from server")
86
+
87
+ response_data = response.json()
88
+
89
+ if "error" in response_data:
90
+ raise ValueError(f"Prediction error: {response_data['error']}")
91
+
92
+ results = response_data["results"]
93
+ result_arrays = [
94
+ _decode_array(results[key])
95
+ for key in sorted(results.keys())
96
+ if key.startswith("result")
97
+ ]
98
+
99
+ prediction_array = np.stack(result_arrays, axis=-1)
100
+
101
+ if binarize:
102
+ prediction_array = binarize_hard(prediction_array)
103
+
104
+ return prediction_array
105
+
106
+ except requests.exceptions.RequestException as e:
92
107
  raise RuntimeError(f"Request failed: {e}") from e
108
+ except json.JSONDecodeError as e:
109
+ raise ValueError(f"JSON decode error: {e}") from e
110
+
111
+
112
+ def _predict_poly(
113
+ polygon_points: list,
114
+ model: Model,
115
+ model_type: str,
116
+ eta: float = 0.5,
117
+ ) -> list:
118
+ """
119
+ Predict the nanofabrication outcome for a geometry (list of polygons).
120
+
121
+ This function sends the device array to a serverless prediction service, which uses
122
+ a specified machine learning model to predict the outcome of the nanofabrication
123
+ process.
124
+
125
+ Parameters
126
+ ----------
127
+ polygon_points : list
128
+ List of polygon points, where each polygon is a list of [x, y] coordinates.
129
+ model : Model
130
+ The model to use for prediction, representing a specific fabrication process and
131
+ dataset. This model encapsulates details about the fabrication foundry, process,
132
+ material, technology, thickness, and sidewall presence, as defined in
133
+ `models.py`. Each model is associated with a version and dataset that detail its
134
+ creation and the data it was trained on, ensuring the prediction is tailored to
135
+ specific fabrication parameters.
136
+ model_type : str
137
+ The type of model to use ('p' for prediction, 'c' for correction).
138
+ eta : float
139
+ The threshold value for binarization. Defaults to 0.5. Because intermediate
140
+ values cannot be preserved in the polygon data, the predicted polygons are
141
+ binarized using a threshold value of eta.
142
+
143
+ Returns
144
+ -------
145
+ list
146
+ List of predicted polygon points with level information. Each polygon is a dict
147
+ with 'points' (list of coordinates) and 'level' (int) keys.
148
+
149
+ Raises
150
+ ------
151
+ RuntimeError
152
+ If the request to the prediction service fails.
153
+ ValueError
154
+ If the server returns an error or invalid response.
155
+ """
156
+ predict_data = {
157
+ "polygons": polygon_points,
158
+ "model": model.to_json(),
159
+ "model_type": model_type,
160
+ "eta": eta,
161
+ }
162
+
163
+ endpoint_url = f"{BASE_ENDPOINT_URL}-poly-v{ENDPOINT_VERSION}.modal.run"
164
+ headers = _prepare_headers()
165
+
166
+ try:
167
+ response = requests.post(
168
+ endpoint_url, data=json.dumps(predict_data), headers=headers
169
+ )
170
+ response.raise_for_status()
171
+
172
+ if not response.content:
173
+ raise ValueError("Empty response received from server")
174
+
175
+ response_data = response.json()
176
+
177
+ if "polygons" in response_data:
178
+ polygons = response_data["polygons"]
179
+ if polygons and isinstance(polygons[0], dict) and "channel" in polygons[0]:
180
+ return polygons
181
+ else:
182
+ return [{"points": points, "channel": 0} for points in polygons]
183
+ else:
184
+ if "error" in response_data:
185
+ raise ValueError(f"Prediction error: {response_data['error']}")
186
+ return []
187
+
188
+ except requests.exceptions.RequestException as e:
189
+ raise RuntimeError(f"Request failed: {e}") from e
190
+ except json.JSONDecodeError as e:
191
+ raise ValueError(f"JSON decode error: {e}") from e
192
+
193
+
194
+ def predict_gds(
195
+ gds_path: str,
196
+ cell_name: str,
197
+ model: Model,
198
+ model_type: str,
199
+ gds_layer: tuple[int, int] = (1, 0),
200
+ eta: float = 0.5,
201
+ output_path: str = None,
202
+ ) -> None:
203
+ """
204
+ Predict the nanofabrication outcome for a GDS file and cell.
205
+
206
+ This function loads a GDS file, extracts the specified cell, and predicts the
207
+ nanofabrication outcome using the specified model. The predicted cell is
208
+ automatically added to the original GDS library and the file is written to the
209
+ specified output path (or overwrites the original if no output path is provided).
210
+
211
+ Parameters
212
+ ----------
213
+ gds_path : str
214
+ The file path to the GDS file.
215
+ cell_name : str
216
+ The name of the cell within the GDS file to predict.
217
+ model : Model
218
+ The model to use for prediction, representing a specific fabrication process and
219
+ dataset. This model encapsulates details about the fabrication foundry, process,
220
+ material, technology, thickness, and sidewall presence, as defined in
221
+ `models.py`. Each model is associated with a version and dataset that detail its
222
+ creation and the data it was trained on, ensuring the prediction is tailored to
223
+ specific fabrication parameters.
224
+ model_type : str
225
+ The type of model to use ('p' for prediction, 'c' for correction).
226
+ gds_layer : tuple[int, int]
227
+ The layer and datatype to use within the GDS file. Defaults to (1, 0).
228
+ eta : float
229
+ The threshold value for binarization. Defaults to 0.5. Because intermediate
230
+ values cannot be preserved in the polygon data, the predicted polygons are
231
+ binarized using a threshold value of eta.
232
+ output_path : str, optional
233
+ The file path where the updated GDS file will be written. If None, the
234
+ original file will be overwritten. Defaults to None.
235
+
236
+ Raises
237
+ ------
238
+ RuntimeError
239
+ If the request to the prediction service fails.
240
+ ValueError
241
+ If the GDS file cannot be read, the specified cell is not found, or the server
242
+ returns an error or invalid response.
243
+ """
244
+ gdstk_library = gdstk.read_gds(gds_path)
245
+ gdstk_cell = gdstk_library[cell_name]
246
+
247
+ predicted_cell = predict_gdstk(
248
+ gdstk_cell=gdstk_cell,
249
+ model=model,
250
+ model_type=model_type,
251
+ gds_layer=gds_layer,
252
+ eta=eta,
253
+ )
254
+
255
+ base_name = predicted_cell.name
256
+ counter = 1
257
+ while predicted_cell.name in [cell.name for cell in gdstk_library.cells]:
258
+ predicted_cell.name = f"{base_name}_{counter}"
259
+ counter += 1
260
+
261
+ gdstk_library.add(predicted_cell)
262
+
263
+ write_path = output_path if output_path is not None else gds_path
264
+ gdstk_library.write_gds(write_path, max_points=8190)
265
+
266
+
267
+ def predict_gdstk(
268
+ gdstk_cell: gdstk.Cell,
269
+ model: Model,
270
+ model_type: str,
271
+ gds_layer: tuple[int, int] = (1, 0),
272
+ eta: float = 0.5,
273
+ ) -> gdstk.Cell:
274
+ """
275
+ Predict the nanofabrication outcome of a gdstk cell using a specified model.
276
+
277
+ This function extracts polygons from a gdstk cell, sends them to a serverless
278
+ prediction service, and returns a new cell containing the predicted polygons.
279
+
280
+ Parameters
281
+ ----------
282
+ gdstk_cell : gdstk.Cell
283
+ The gdstk.Cell object containing polygons to predict.
284
+ model : Model
285
+ The model to use for prediction, representing a specific fabrication process and
286
+ dataset. This model encapsulates details about the fabrication foundry, process,
287
+ material, technology, thickness, and sidewall presence, as defined in
288
+ `models.py`. Each model is associated with a version and dataset that detail its
289
+ creation and the data it was trained on, ensuring the prediction is tailored to
290
+ specific fabrication parameters.
291
+ model_type : str
292
+ The type of model to use ('p' for prediction, 'c' for correction).
293
+ gds_layer : tuple[int, int]
294
+ The layer and datatype to use within the GDSTK cell. Defaults to (1, 0).
295
+ eta : float
296
+ The threshold value for binarization. Defaults to 0.5. Because intermediate
297
+ values cannot be preserved in the polygon data, the predicted polygons are
298
+ binarized using a threshold value of eta.
299
+
300
+ Returns
301
+ -------
302
+ gdstk.Cell
303
+ A new gdstk cell containing the predicted polygons. For multi-level
304
+ predictions, each level's polygons will be placed on a different layer:
305
+ - Level 0: (layer, 99)
306
+ - Level 1: (layer, 100)
307
+
308
+ Raises
309
+ ------
310
+ RuntimeError
311
+ If the request to the prediction service fails.
312
+ ValueError
313
+ If no polygons are found in the specified layer, or the server returns an error
314
+ or invalid response.
315
+ """
316
+ polygons = gdstk_cell.get_polygons(layer=gds_layer[0], datatype=gds_layer[1])
317
+ if not polygons:
318
+ raise ValueError("No polygons found in the specified layer")
319
+
320
+ polygon_points = [polygon.points.tolist() for polygon in polygons]
321
+
322
+ predicted_polygon_data = _predict_poly(
323
+ polygon_points=polygon_points,
324
+ model=model,
325
+ model_type=model_type,
326
+ eta=eta,
327
+ )
328
+
329
+ result_cell = gdstk.Cell(f"{gdstk_cell.name}_predicted")
330
+
331
+ polygons_by_channel = {}
332
+ for polygon_data in predicted_polygon_data:
333
+ channel = polygon_data.get("channel", 0)
334
+ points = polygon_data.get("points", [])
335
+
336
+ if channel not in polygons_by_channel:
337
+ polygons_by_channel[channel] = []
338
+
339
+ polygons_by_channel[channel].append(points)
340
+
341
+ for channel, points_list in polygons_by_channel.items():
342
+ layer = gds_layer[0]
343
+ datatype = 99 + channel
344
+
345
+ for points in points_list:
346
+ points_array = np.array(points)
347
+ polygon = gdstk.Polygon(points_array, layer=layer, datatype=datatype)
348
+ result_cell.add(polygon)
349
+
350
+ return result_cell
93
351
 
94
352
 
95
353
  def _predict_array_with_grad(
@@ -118,21 +376,49 @@ def _predict_array_with_grad(
118
376
  -------
119
377
  tuple[np.ndarray, np.ndarray]
120
378
  The predicted output array and gradient array.
379
+
380
+ Raises
381
+ ------
382
+ RuntimeError
383
+ If the request to the prediction service fails.
384
+ ValueError
385
+ If the server returns an error or invalid response.
121
386
  """
122
387
  headers = _prepare_headers()
123
- predict_data = _prepare_predict_data(device_array, model, "p", False)
388
+ predict_data = {
389
+ "device_array": _encode_array(np.squeeze(device_array)),
390
+ "model": model.to_json(),
391
+ "model_type": "p",
392
+ "binary": False,
393
+ }
124
394
  endpoint_url = f"{BASE_ENDPOINT_URL}-with-grad-v{ENDPOINT_VERSION}.modal.run"
125
395
 
126
- response = requests.post(
127
- endpoint_url, data=json.dumps(predict_data), headers=headers
128
- )
129
- prediction_array = _decode_array(response.json()["prediction_array"])
130
- gradient_array = _decode_array(response.json()["gradient_array"])
131
- gradient_min = response.json()["gradient_min"]
132
- gradient_max = response.json()["gradient_max"]
133
- gradient_range = gradient_max - gradient_min
134
- gradient_array = gradient_array * gradient_range + gradient_min
135
- return (prediction_array, gradient_array)
396
+ try:
397
+ response = requests.post(
398
+ endpoint_url, data=json.dumps(predict_data), headers=headers
399
+ )
400
+ response.raise_for_status()
401
+
402
+ if not response.content:
403
+ raise ValueError("Empty response received from server")
404
+
405
+ response_data = response.json()
406
+
407
+ if "error" in response_data:
408
+ raise ValueError(f"Prediction error: {response_data['error']}")
409
+
410
+ prediction_array = _decode_array(response_data["prediction_array"])
411
+ gradient_array = _decode_array(response_data["gradient_array"])
412
+ gradient_min = response_data["gradient_min"]
413
+ gradient_max = response_data["gradient_max"]
414
+ gradient_range = gradient_max - gradient_min
415
+ gradient_array = gradient_array * gradient_range + gradient_min
416
+ return (prediction_array, gradient_array)
417
+
418
+ except requests.exceptions.RequestException as e:
419
+ raise RuntimeError(f"Request failed: {e}") from e
420
+ except json.JSONDecodeError as e:
421
+ raise ValueError(f"JSON decode error: {e}") from e
136
422
 
137
423
 
138
424
  @primitive
@@ -156,6 +442,13 @@ def predict_array_with_grad(device_array: np.ndarray, model: Model) -> np.ndarra
156
442
  -------
157
443
  np.ndarray
158
444
  The predicted output array.
445
+
446
+ Raises
447
+ ------
448
+ RuntimeError
449
+ If the request to the prediction service fails.
450
+ ValueError
451
+ If the server returns an error or invalid response.
159
452
  """
160
453
  prediction_array, gradient_array = _predict_array_with_grad(
161
454
  device_array=device_array, model=model
@@ -209,8 +502,8 @@ def _decode_array(encoded_png):
209
502
  return np.array(image) / 255
210
503
 
211
504
 
212
- def _read_tokens():
213
- """Read access and refresh tokens from the configuration file."""
505
+ def _prepare_headers():
506
+ """Prepare HTTP headers for a server request."""
214
507
  token_file_path = os.path.expanduser("~/.prefab.toml")
215
508
  try:
216
509
  with open(token_file_path) as file:
@@ -219,7 +512,10 @@ def _read_tokens():
219
512
  refresh_token = tokens.get("refresh_token")
220
513
  if not access_token or not refresh_token:
221
514
  raise ValueError("Tokens not found in the configuration file.")
222
- return access_token, refresh_token
515
+ return {
516
+ "Authorization": f"Bearer {access_token}",
517
+ "X-Refresh-Token": refresh_token,
518
+ }
223
519
  except FileNotFoundError:
224
520
  raise FileNotFoundError(
225
521
  "Could not validate user.\n"
@@ -227,111 +523,3 @@ def _read_tokens():
227
523
  "Signup/login and generate a new token.\n"
228
524
  "See https://docs.prefabphotonics.com/."
229
525
  ) from None
230
-
231
-
232
- def _prepare_headers():
233
- """Prepare HTTP headers for the request."""
234
- access_token, refresh_token = _read_tokens()
235
- return {
236
- "Authorization": f"Bearer {access_token}",
237
- "X-Refresh-Token": refresh_token,
238
- }
239
-
240
-
241
- def _prepare_predict_data(device_array, model, model_type, binarize):
242
- """Prepare the data payload for the prediction request."""
243
- return {
244
- "device_array": _encode_array(np.squeeze(device_array)),
245
- "model": model.to_json(),
246
- "model_type": model_type,
247
- "binary": binarize,
248
- }
249
-
250
-
251
- def _process_response(response, model_type, binarize):
252
- """Process the streaming response from the prediction request."""
253
- event_type = None
254
- model_descriptions = {
255
- "p": "Prediction",
256
- "c": "Correction",
257
- "s": "SEMulate",
258
- }
259
- progress_bar = tqdm(
260
- total=100,
261
- desc=model_descriptions.get(model_type, "Processing"),
262
- unit="%",
263
- colour="green",
264
- bar_format="{l_bar}{bar:30}{r_bar}{bar:-10b}",
265
- )
266
-
267
- for line in response.iter_lines():
268
- if line:
269
- decoded_line = line.decode("utf-8").strip()
270
- if decoded_line.startswith("event:"):
271
- event_type = decoded_line.split(":", 1)[1].strip()
272
- elif decoded_line.startswith("data:"):
273
- data_content = _parse_data_line(decoded_line)
274
- result = _handle_event(event_type, data_content, progress_bar, binarize)
275
- if result is not None:
276
- progress_bar.close()
277
- return result
278
- progress_bar.close()
279
-
280
-
281
- def _parse_data_line(decoded_line):
282
- """Parse a data line from the response stream."""
283
- data_line = decoded_line.split(":", 1)[1].strip()
284
- try:
285
- return json.loads(data_line)
286
- except json.JSONDecodeError:
287
- raise ValueError(f"Failed to decode JSON: {data_line}") from None
288
-
289
-
290
- def _handle_event(event_type, data_content, progress_bar, binarize):
291
- """Handle different types of events received from the server."""
292
- if event_type == "progress":
293
- _update_progress(progress_bar, data_content)
294
- elif event_type == "result":
295
- return _process_result(data_content, binarize)
296
- elif event_type == "end":
297
- print("Stream ended.")
298
- elif event_type == "auth":
299
- _update_tokens(data_content.get("auth", {}))
300
- elif event_type == "error":
301
- raise ValueError(f"{data_content['error']}")
302
-
303
-
304
- def _update_progress(progress_bar, data_content):
305
- """Update the progress bar based on the progress event."""
306
- progress = round(100 * data_content.get("progress", 0))
307
- progress_bar.update(progress - progress_bar.n)
308
-
309
-
310
- def _process_result(data_content, binarize):
311
- """Process the result event and return the prediction."""
312
- results = [
313
- _decode_array(data_content[key])
314
- for key in sorted(data_content.keys())
315
- if key.startswith("result")
316
- ]
317
- if results:
318
- prediction = np.stack(results, axis=-1)
319
- if binarize:
320
- prediction = binarize_hard(prediction)
321
- return prediction
322
-
323
-
324
- def _update_tokens(auth_data):
325
- """Update tokens if new tokens are provided in the auth event."""
326
- new_access_token = auth_data.get("new_access_token")
327
- new_refresh_token = auth_data.get("new_refresh_token")
328
- if new_access_token and new_refresh_token:
329
- prefab_file_path = os.path.expanduser("~/.prefab.toml")
330
- with open(prefab_file_path, "w", encoding="utf-8") as toml_file:
331
- toml.dump(
332
- {
333
- "access_token": new_access_token,
334
- "refresh_token": new_refresh_token,
335
- },
336
- toml_file,
337
- )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: prefab
3
- Version: 1.1.9
3
+ Version: 1.3.0
4
4
  Summary: Artificial nanofabrication of integrated photonic circuits using deep learning
5
5
  Project-URL: Homepage, https://prefabphotonics.com
6
6
  Project-URL: Repository, https://github.com/PreFab-Photonics/PreFab
@@ -586,7 +586,7 @@ Before you can make PreFab requests, you will need to [create an account](https:
586
586
  To link your account, you will need an token. You can do this by running the following command in your terminal. This will open a browser window where you can log in and authenticate your token.
587
587
 
588
588
  ```sh
589
- python3 -m prefab setup
589
+ prefab setup
590
590
  ```
591
591
 
592
592
  ### Guides
@@ -603,4 +603,4 @@ PreFab models are hosted on a [serverless cloud platform](https://modal.com/). P
603
603
 
604
604
  ## License
605
605
 
606
- This project is licensed under the LGPL-2.1 license. © 2024 PreFab Photonics.
606
+ This project is licensed under the LGPL-2.1 license. © 2025 PreFab Photonics.
@@ -0,0 +1,14 @@
1
+ prefab/__init__.py,sha256=l9aFRS6H9RKpzotuXz_qmSOvW1Fdvo_tlH2vuUOa8VE,425
2
+ prefab/__main__.py,sha256=1eXWiEoG7eetJMm1qRbK2I5MnzuRgKIoQtBeT-Ps8es,3523
3
+ prefab/compare.py,sha256=0Xgp3tFuP4of-ce9Opc19p8i8lIyXkbVGLuwWBaHSeE,3486
4
+ prefab/device.py,sha256=7SiawFa103ebFqii3CWy_vPO6ixigVliO6A58hoOqkU,55414
5
+ prefab/geometry.py,sha256=fvMSJpDAqxWShhKojPGnXdB4otrBn54kbW8uk1T4Wwk,12104
6
+ prefab/models.py,sha256=waPNGtuISyY0f8cz7dnbD451CKYCt8EpPGt-4lSOPNU,2581
7
+ prefab/predict.py,sha256=jtB18QOlPINJRqUZCwnuMOfPuuwE1s2e353X5g3njd4,18349
8
+ prefab/read.py,sha256=WNqC3xENlndzFwXeCF2E7H3Iq2dO_6rPEPZ58DuloqY,16259
9
+ prefab/shapes.py,sha256=58cyXFNh1kEErq2jEbGd3dWSediU1OSmor_FWwc1V8A,25098
10
+ prefab-1.3.0.dist-info/METADATA,sha256=Xvbt_umsD1mZY1S9phLLABxbR18MCuw4disk81IUslY,35025
11
+ prefab-1.3.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
12
+ prefab-1.3.0.dist-info/entry_points.txt,sha256=h1_A9O9F3NAIoKXD1RPb3Eo-WCSiHhMB_AnagBi6XTQ,48
13
+ prefab-1.3.0.dist-info/licenses/LICENSE,sha256=IMF9i4xIpgCADf0U-V1cuf9HBmqWQd3qtI3FSuyW4zE,26526
14
+ prefab-1.3.0.dist-info/RECORD,,
@@ -1,14 +0,0 @@
1
- prefab/__init__.py,sha256=lrYoADI1MlequuC41Rly8vr1kAiFrG8zGw1PILM1rf4,425
2
- prefab/__main__.py,sha256=1eXWiEoG7eetJMm1qRbK2I5MnzuRgKIoQtBeT-Ps8es,3523
3
- prefab/compare.py,sha256=0Xgp3tFuP4of-ce9Opc19p8i8lIyXkbVGLuwWBaHSeE,3486
4
- prefab/device.py,sha256=bN-RkbjhUOXlrnYEJW4H2HqhyxvFrQa15EvhOmGXgkY,54603
5
- prefab/geometry.py,sha256=4fekWMlkdS_qlPNTdPXPhwKuQ5qdQ1Zjf8m9JKd1dA8,12049
6
- prefab/models.py,sha256=waPNGtuISyY0f8cz7dnbD451CKYCt8EpPGt-4lSOPNU,2581
7
- prefab/predict.py,sha256=h13523jasg1WbdiYbkXy43SWTGfQXjq6oEe0O8DT2U0,11731
8
- prefab/read.py,sha256=WNqC3xENlndzFwXeCF2E7H3Iq2dO_6rPEPZ58DuloqY,16259
9
- prefab/shapes.py,sha256=58cyXFNh1kEErq2jEbGd3dWSediU1OSmor_FWwc1V8A,25098
10
- prefab-1.1.9.dist-info/METADATA,sha256=Un--7bA5ihvvtaXK733Q1Zd_8d-EBl1ohWfcdJCdOBI,35036
11
- prefab-1.1.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
12
- prefab-1.1.9.dist-info/entry_points.txt,sha256=h1_A9O9F3NAIoKXD1RPb3Eo-WCSiHhMB_AnagBi6XTQ,48
13
- prefab-1.1.9.dist-info/licenses/LICENSE,sha256=IMF9i4xIpgCADf0U-V1cuf9HBmqWQd3qtI3FSuyW4zE,26526
14
- prefab-1.1.9.dist-info/RECORD,,
File without changes