prefab 1.2.0__py3-none-any.whl → 1.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefab/__init__.py +1 -1
- prefab/device.py +0 -24
- prefab/geometry.py +1 -0
- prefab/predict.py +232 -213
- {prefab-1.2.0.dist-info → prefab-1.3.0.dist-info}/METADATA +1 -1
- prefab-1.3.0.dist-info/RECORD +14 -0
- prefab-1.2.0.dist-info/RECORD +0 -14
- {prefab-1.2.0.dist-info → prefab-1.3.0.dist-info}/WHEEL +0 -0
- {prefab-1.2.0.dist-info → prefab-1.3.0.dist-info}/entry_points.txt +0 -0
- {prefab-1.2.0.dist-info → prefab-1.3.0.dist-info}/licenses/LICENSE +0 -0
prefab/__init__.py
CHANGED
prefab/device.py
CHANGED
|
@@ -242,7 +242,6 @@ class Device(BaseModel):
|
|
|
242
242
|
self,
|
|
243
243
|
model: Model,
|
|
244
244
|
binarize: bool = False,
|
|
245
|
-
gpu: bool = False,
|
|
246
245
|
) -> "Device":
|
|
247
246
|
"""
|
|
248
247
|
Predict the nanofabrication outcome of the device using a specified model.
|
|
@@ -264,10 +263,6 @@ class Device(BaseModel):
|
|
|
264
263
|
If True, the predicted device geometry will be binarized using a threshold
|
|
265
264
|
method. This is useful for converting probabilistic predictions into binary
|
|
266
265
|
geometries. Defaults to False.
|
|
267
|
-
gpu : bool
|
|
268
|
-
If True, the prediction will be performed on a GPU. Defaults to False.
|
|
269
|
-
Note: The GPU option has more overhead and will take longer for small
|
|
270
|
-
devices, but will be faster for larger devices.
|
|
271
266
|
|
|
272
267
|
Returns
|
|
273
268
|
-------
|
|
@@ -285,7 +280,6 @@ class Device(BaseModel):
|
|
|
285
280
|
model=model,
|
|
286
281
|
model_type="p",
|
|
287
282
|
binarize=binarize,
|
|
288
|
-
gpu=gpu,
|
|
289
283
|
)
|
|
290
284
|
return self.model_copy(update={"device_array": prediction_array})
|
|
291
285
|
|
|
@@ -293,7 +287,6 @@ class Device(BaseModel):
|
|
|
293
287
|
self,
|
|
294
288
|
model: Model,
|
|
295
289
|
binarize: bool = True,
|
|
296
|
-
gpu: bool = False,
|
|
297
290
|
) -> "Device":
|
|
298
291
|
"""
|
|
299
292
|
Correct the nanofabrication outcome of the device using a specified model.
|
|
@@ -317,10 +310,6 @@ class Device(BaseModel):
|
|
|
317
310
|
If True, the corrected device geometry will be binarized using a threshold
|
|
318
311
|
method. This is useful for converting probabilistic corrections into binary
|
|
319
312
|
geometries. Defaults to True.
|
|
320
|
-
gpu : bool
|
|
321
|
-
If True, the prediction will be performed on a GPU. Defaults to False.
|
|
322
|
-
Note: The GPU option has more overhead and will take longer for small
|
|
323
|
-
devices, but will be faster for larger devices.
|
|
324
313
|
|
|
325
314
|
Returns
|
|
326
315
|
-------
|
|
@@ -338,14 +327,12 @@ class Device(BaseModel):
|
|
|
338
327
|
model=model,
|
|
339
328
|
model_type="c",
|
|
340
329
|
binarize=binarize,
|
|
341
|
-
gpu=gpu,
|
|
342
330
|
)
|
|
343
331
|
return self.model_copy(update={"device_array": correction_array})
|
|
344
332
|
|
|
345
333
|
def semulate(
|
|
346
334
|
self,
|
|
347
335
|
model: Model,
|
|
348
|
-
gpu: bool = False,
|
|
349
336
|
) -> "Device":
|
|
350
337
|
"""
|
|
351
338
|
Simulate the appearance of the device as if viewed under a scanning electron
|
|
@@ -365,10 +352,6 @@ class Device(BaseModel):
|
|
|
365
352
|
in `models.py`. Each model is associated with a version and dataset that
|
|
366
353
|
detail its creation and the data it was trained on, ensuring the SEMulation
|
|
367
354
|
is tailored to specific fabrication parameters.
|
|
368
|
-
gpu : bool
|
|
369
|
-
If True, the prediction will be performed on a GPU. Defaults to False.
|
|
370
|
-
Note: The GPU option has more overhead and will take longer for small
|
|
371
|
-
devices, but will be faster for larger devices.
|
|
372
355
|
|
|
373
356
|
Notes
|
|
374
357
|
-----
|
|
@@ -392,7 +375,6 @@ class Device(BaseModel):
|
|
|
392
375
|
model=model,
|
|
393
376
|
model_type="s",
|
|
394
377
|
binarize=False,
|
|
395
|
-
gpu=gpu,
|
|
396
378
|
)
|
|
397
379
|
semulated_array += np.random.normal(0, 0.03, semulated_array.shape)
|
|
398
380
|
return self.model_copy(update={"device_array": semulated_array})
|
|
@@ -400,7 +382,6 @@ class Device(BaseModel):
|
|
|
400
382
|
def segment(
|
|
401
383
|
self,
|
|
402
384
|
model: Model,
|
|
403
|
-
gpu: bool = False,
|
|
404
385
|
) -> "Device":
|
|
405
386
|
"""
|
|
406
387
|
Segment a scanning electron microscope (SEM) image into a binary mask.
|
|
@@ -419,10 +400,6 @@ class Device(BaseModel):
|
|
|
419
400
|
defined in `models.py`. Each model is associated with a version and dataset
|
|
420
401
|
that detail its creation and the data it was trained on, ensuring the
|
|
421
402
|
segmentation is tailored to specific fabrication parameters.
|
|
422
|
-
gpu : bool
|
|
423
|
-
If True, the prediction will be performed on a GPU. Defaults to False.
|
|
424
|
-
Note: The GPU option has more overhead and will take longer for small
|
|
425
|
-
devices, but will be faster for larger devices.
|
|
426
403
|
|
|
427
404
|
Returns
|
|
428
405
|
-------
|
|
@@ -441,7 +418,6 @@ class Device(BaseModel):
|
|
|
441
418
|
model=model,
|
|
442
419
|
model_type="b",
|
|
443
420
|
binarize=False,
|
|
444
|
-
gpu=gpu,
|
|
445
421
|
)
|
|
446
422
|
return self.model_copy(update={"device_array": segmented_array})
|
|
447
423
|
|
prefab/geometry.py
CHANGED
|
@@ -133,6 +133,7 @@ def binarize_monte_carlo(
|
|
|
133
133
|
"""
|
|
134
134
|
device_array = np.squeeze(device_array)
|
|
135
135
|
base_threshold = np.random.normal(loc=0.5, scale=0.1)
|
|
136
|
+
base_threshold = np.clip(base_threshold, 0.2, 0.8)
|
|
136
137
|
threshold_noise = np.random.normal(
|
|
137
138
|
loc=0, scale=noise_magnitude, size=device_array.shape
|
|
138
139
|
)
|
prefab/predict.py
CHANGED
|
@@ -12,13 +12,101 @@ import toml
|
|
|
12
12
|
from autograd import primitive
|
|
13
13
|
from autograd.extend import defvjp
|
|
14
14
|
from PIL import Image
|
|
15
|
-
from tqdm import tqdm
|
|
16
15
|
|
|
17
16
|
from .geometry import binarize_hard
|
|
18
17
|
from .models import Model
|
|
19
18
|
|
|
20
19
|
BASE_ENDPOINT_URL = "https://prefab-photonics--predict"
|
|
21
|
-
ENDPOINT_VERSION = "
|
|
20
|
+
ENDPOINT_VERSION = "3"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def predict_array(
|
|
24
|
+
device_array: np.ndarray,
|
|
25
|
+
model: Model,
|
|
26
|
+
model_type: str,
|
|
27
|
+
binarize: bool,
|
|
28
|
+
) -> np.ndarray:
|
|
29
|
+
"""
|
|
30
|
+
Predict the nanofabrication outcome of a device array using a specified model.
|
|
31
|
+
|
|
32
|
+
This function sends the device array to a serverless prediction service, which uses
|
|
33
|
+
a specified machine learning model to predict the outcome of the nanofabrication
|
|
34
|
+
process.
|
|
35
|
+
|
|
36
|
+
Parameters
|
|
37
|
+
----------
|
|
38
|
+
device_array : np.ndarray
|
|
39
|
+
A 2D array representing the planar geometry of the device. This array undergoes
|
|
40
|
+
various transformations to predict the nanofabrication process.
|
|
41
|
+
model : Model
|
|
42
|
+
The model to use for prediction, representing a specific fabrication process and
|
|
43
|
+
dataset. This model encapsulates details about the fabrication foundry, process,
|
|
44
|
+
material, technology, thickness, and sidewall presence, as defined in
|
|
45
|
+
`models.py`. Each model is associated with a version and dataset that detail its
|
|
46
|
+
creation and the data it was trained on, ensuring the prediction is tailored to
|
|
47
|
+
specific fabrication parameters.
|
|
48
|
+
model_type : str
|
|
49
|
+
The type of model to use (e.g., 'p' for prediction, 'c' for correction, or 's'
|
|
50
|
+
for SEMulate, 'b' for segmentation).
|
|
51
|
+
binarize : bool
|
|
52
|
+
If True, the predicted device geometry will be binarized using a threshold
|
|
53
|
+
method. This is useful for converting probabilistic predictions into binary
|
|
54
|
+
geometries.
|
|
55
|
+
|
|
56
|
+
Returns
|
|
57
|
+
-------
|
|
58
|
+
np.ndarray
|
|
59
|
+
The predicted output array. For single-level predictions, returns shape
|
|
60
|
+
(h, w, 1). For multi-level predictions, returns shape (h, w, n) where n is the
|
|
61
|
+
number of levels.
|
|
62
|
+
|
|
63
|
+
Raises
|
|
64
|
+
------
|
|
65
|
+
RuntimeError
|
|
66
|
+
If the request to the prediction service fails.
|
|
67
|
+
ValueError
|
|
68
|
+
If the server returns an error or invalid response.
|
|
69
|
+
"""
|
|
70
|
+
endpoint_url = f"{BASE_ENDPOINT_URL}-v{ENDPOINT_VERSION}.modal.run"
|
|
71
|
+
predict_data = {
|
|
72
|
+
"device_array": _encode_array(np.squeeze(device_array)),
|
|
73
|
+
"model": model.to_json(),
|
|
74
|
+
"model_type": model_type,
|
|
75
|
+
}
|
|
76
|
+
headers = _prepare_headers()
|
|
77
|
+
|
|
78
|
+
try:
|
|
79
|
+
response = requests.post(
|
|
80
|
+
url=endpoint_url, data=json.dumps(predict_data), headers=headers
|
|
81
|
+
)
|
|
82
|
+
response.raise_for_status()
|
|
83
|
+
|
|
84
|
+
if not response.content:
|
|
85
|
+
raise ValueError("Empty response received from server")
|
|
86
|
+
|
|
87
|
+
response_data = response.json()
|
|
88
|
+
|
|
89
|
+
if "error" in response_data:
|
|
90
|
+
raise ValueError(f"Prediction error: {response_data['error']}")
|
|
91
|
+
|
|
92
|
+
results = response_data["results"]
|
|
93
|
+
result_arrays = [
|
|
94
|
+
_decode_array(results[key])
|
|
95
|
+
for key in sorted(results.keys())
|
|
96
|
+
if key.startswith("result")
|
|
97
|
+
]
|
|
98
|
+
|
|
99
|
+
prediction_array = np.stack(result_arrays, axis=-1)
|
|
100
|
+
|
|
101
|
+
if binarize:
|
|
102
|
+
prediction_array = binarize_hard(prediction_array)
|
|
103
|
+
|
|
104
|
+
return prediction_array
|
|
105
|
+
|
|
106
|
+
except requests.exceptions.RequestException as e:
|
|
107
|
+
raise RuntimeError(f"Request failed: {e}") from e
|
|
108
|
+
except json.JSONDecodeError as e:
|
|
109
|
+
raise ValueError(f"JSON decode error: {e}") from e
|
|
22
110
|
|
|
23
111
|
|
|
24
112
|
def _predict_poly(
|
|
@@ -28,10 +116,11 @@ def _predict_poly(
|
|
|
28
116
|
eta: float = 0.5,
|
|
29
117
|
) -> list:
|
|
30
118
|
"""
|
|
31
|
-
Predict the nanofabrication outcome for a list of polygons.
|
|
119
|
+
Predict the nanofabrication outcome for a geometry (list of polygons).
|
|
32
120
|
|
|
33
|
-
This function sends
|
|
34
|
-
learning model to predict the outcome of the nanofabrication
|
|
121
|
+
This function sends the device array to a serverless prediction service, which uses
|
|
122
|
+
a specified machine learning model to predict the outcome of the nanofabrication
|
|
123
|
+
process.
|
|
35
124
|
|
|
36
125
|
Parameters
|
|
37
126
|
----------
|
|
@@ -54,17 +143,15 @@ def _predict_poly(
|
|
|
54
143
|
Returns
|
|
55
144
|
-------
|
|
56
145
|
list
|
|
57
|
-
List of predicted polygon points with
|
|
58
|
-
|
|
146
|
+
List of predicted polygon points with level information. Each polygon is a dict
|
|
147
|
+
with 'points' (list of coordinates) and 'level' (int) keys.
|
|
59
148
|
|
|
60
149
|
Raises
|
|
61
150
|
------
|
|
62
|
-
|
|
63
|
-
If the server returns an error or empty response.
|
|
64
|
-
requests.exceptions.RequestException
|
|
151
|
+
RuntimeError
|
|
65
152
|
If the request to the prediction service fails.
|
|
66
|
-
|
|
67
|
-
If the
|
|
153
|
+
ValueError
|
|
154
|
+
If the server returns an error or invalid response.
|
|
68
155
|
"""
|
|
69
156
|
predict_data = {
|
|
70
157
|
"polygons": polygon_points,
|
|
@@ -99,11 +186,82 @@ def _predict_poly(
|
|
|
99
186
|
return []
|
|
100
187
|
|
|
101
188
|
except requests.exceptions.RequestException as e:
|
|
102
|
-
|
|
103
|
-
raise
|
|
189
|
+
raise RuntimeError(f"Request failed: {e}") from e
|
|
104
190
|
except json.JSONDecodeError as e:
|
|
105
|
-
|
|
106
|
-
|
|
191
|
+
raise ValueError(f"JSON decode error: {e}") from e
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def predict_gds(
|
|
195
|
+
gds_path: str,
|
|
196
|
+
cell_name: str,
|
|
197
|
+
model: Model,
|
|
198
|
+
model_type: str,
|
|
199
|
+
gds_layer: tuple[int, int] = (1, 0),
|
|
200
|
+
eta: float = 0.5,
|
|
201
|
+
output_path: str = None,
|
|
202
|
+
) -> None:
|
|
203
|
+
"""
|
|
204
|
+
Predict the nanofabrication outcome for a GDS file and cell.
|
|
205
|
+
|
|
206
|
+
This function loads a GDS file, extracts the specified cell, and predicts the
|
|
207
|
+
nanofabrication outcome using the specified model. The predicted cell is
|
|
208
|
+
automatically added to the original GDS library and the file is written to the
|
|
209
|
+
specified output path (or overwrites the original if no output path is provided).
|
|
210
|
+
|
|
211
|
+
Parameters
|
|
212
|
+
----------
|
|
213
|
+
gds_path : str
|
|
214
|
+
The file path to the GDS file.
|
|
215
|
+
cell_name : str
|
|
216
|
+
The name of the cell within the GDS file to predict.
|
|
217
|
+
model : Model
|
|
218
|
+
The model to use for prediction, representing a specific fabrication process and
|
|
219
|
+
dataset. This model encapsulates details about the fabrication foundry, process,
|
|
220
|
+
material, technology, thickness, and sidewall presence, as defined in
|
|
221
|
+
`models.py`. Each model is associated with a version and dataset that detail its
|
|
222
|
+
creation and the data it was trained on, ensuring the prediction is tailored to
|
|
223
|
+
specific fabrication parameters.
|
|
224
|
+
model_type : str
|
|
225
|
+
The type of model to use ('p' for prediction, 'c' for correction).
|
|
226
|
+
gds_layer : tuple[int, int]
|
|
227
|
+
The layer and datatype to use within the GDS file. Defaults to (1, 0).
|
|
228
|
+
eta : float
|
|
229
|
+
The threshold value for binarization. Defaults to 0.5. Because intermediate
|
|
230
|
+
values cannot be preserved in the polygon data, the predicted polygons are
|
|
231
|
+
binarized using a threshold value of eta.
|
|
232
|
+
output_path : str, optional
|
|
233
|
+
The file path where the updated GDS file will be written. If None, the
|
|
234
|
+
original file will be overwritten. Defaults to None.
|
|
235
|
+
|
|
236
|
+
Raises
|
|
237
|
+
------
|
|
238
|
+
RuntimeError
|
|
239
|
+
If the request to the prediction service fails.
|
|
240
|
+
ValueError
|
|
241
|
+
If the GDS file cannot be read, the specified cell is not found, or the server
|
|
242
|
+
returns an error or invalid response.
|
|
243
|
+
"""
|
|
244
|
+
gdstk_library = gdstk.read_gds(gds_path)
|
|
245
|
+
gdstk_cell = gdstk_library[cell_name]
|
|
246
|
+
|
|
247
|
+
predicted_cell = predict_gdstk(
|
|
248
|
+
gdstk_cell=gdstk_cell,
|
|
249
|
+
model=model,
|
|
250
|
+
model_type=model_type,
|
|
251
|
+
gds_layer=gds_layer,
|
|
252
|
+
eta=eta,
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
base_name = predicted_cell.name
|
|
256
|
+
counter = 1
|
|
257
|
+
while predicted_cell.name in [cell.name for cell in gdstk_library.cells]:
|
|
258
|
+
predicted_cell.name = f"{base_name}_{counter}"
|
|
259
|
+
counter += 1
|
|
260
|
+
|
|
261
|
+
gdstk_library.add(predicted_cell)
|
|
262
|
+
|
|
263
|
+
write_path = output_path if output_path is not None else gds_path
|
|
264
|
+
gdstk_library.write_gds(write_path, max_points=8190)
|
|
107
265
|
|
|
108
266
|
|
|
109
267
|
def predict_gdstk(
|
|
@@ -116,8 +274,8 @@ def predict_gdstk(
|
|
|
116
274
|
"""
|
|
117
275
|
Predict the nanofabrication outcome of a gdstk cell using a specified model.
|
|
118
276
|
|
|
119
|
-
This function extracts polygons from a gdstk cell, sends them to
|
|
120
|
-
|
|
277
|
+
This function extracts polygons from a gdstk cell, sends them to a serverless
|
|
278
|
+
prediction service, and returns a new cell containing the predicted polygons.
|
|
121
279
|
|
|
122
280
|
Parameters
|
|
123
281
|
----------
|
|
@@ -149,8 +307,11 @@ def predict_gdstk(
|
|
|
149
307
|
|
|
150
308
|
Raises
|
|
151
309
|
------
|
|
310
|
+
RuntimeError
|
|
311
|
+
If the request to the prediction service fails.
|
|
152
312
|
ValueError
|
|
153
|
-
If no polygons are found in the specified layer
|
|
313
|
+
If no polygons are found in the specified layer, or the server returns an error
|
|
314
|
+
or invalid response.
|
|
154
315
|
"""
|
|
155
316
|
polygons = gdstk_cell.get_polygons(layer=gds_layer[0], datatype=gds_layer[1])
|
|
156
317
|
if not polygons:
|
|
@@ -189,78 +350,6 @@ def predict_gdstk(
|
|
|
189
350
|
return result_cell
|
|
190
351
|
|
|
191
352
|
|
|
192
|
-
def predict_array(
|
|
193
|
-
device_array: np.ndarray,
|
|
194
|
-
model: Model,
|
|
195
|
-
model_type: str,
|
|
196
|
-
binarize: bool,
|
|
197
|
-
gpu: bool = False,
|
|
198
|
-
) -> np.ndarray:
|
|
199
|
-
"""
|
|
200
|
-
Predict the nanofabrication outcome of a device array using a specified model.
|
|
201
|
-
|
|
202
|
-
This function sends the device array to a serverless prediction service, which uses
|
|
203
|
-
a specified machine learning model to predict the outcome of the nanofabrication
|
|
204
|
-
process. The prediction can be performed on a GPU if specified.
|
|
205
|
-
|
|
206
|
-
Parameters
|
|
207
|
-
----------
|
|
208
|
-
device_array : np.ndarray
|
|
209
|
-
A 2D array representing the planar geometry of the device. This array undergoes
|
|
210
|
-
various transformations to predict the nanofabrication process.
|
|
211
|
-
model : Model
|
|
212
|
-
The model to use for prediction, representing a specific fabrication process and
|
|
213
|
-
dataset. This model encapsulates details about the fabrication foundry, process,
|
|
214
|
-
material, technology, thickness, and sidewall presence, as defined in
|
|
215
|
-
`models.py`. Each model is associated with a version and dataset that detail its
|
|
216
|
-
creation and the data it was trained on, ensuring the prediction is tailored to
|
|
217
|
-
specific fabrication parameters.
|
|
218
|
-
model_type : str
|
|
219
|
-
The type of model to use (e.g., 'p' for prediction, 'c' for correction, or 's'
|
|
220
|
-
for SEMulate).
|
|
221
|
-
binarize : bool
|
|
222
|
-
If True, the predicted device geometry will be binarized using a threshold
|
|
223
|
-
method. This is useful for converting probabilistic predictions into binary
|
|
224
|
-
geometries.
|
|
225
|
-
gpu : bool
|
|
226
|
-
If True, the prediction will be performed on a GPU. Defaults to False. Note: The
|
|
227
|
-
GPU option has more startup overhead and will take longer for small devices, but
|
|
228
|
-
will be faster for larger devices.
|
|
229
|
-
|
|
230
|
-
Returns
|
|
231
|
-
-------
|
|
232
|
-
np.ndarray
|
|
233
|
-
The predicted output array.
|
|
234
|
-
|
|
235
|
-
Raises
|
|
236
|
-
------
|
|
237
|
-
RuntimeError
|
|
238
|
-
If the request to the prediction service fails.
|
|
239
|
-
"""
|
|
240
|
-
headers = _prepare_headers()
|
|
241
|
-
predict_data = _prepare_predict_data(device_array, model, model_type, binarize)
|
|
242
|
-
endpoint_url = (
|
|
243
|
-
f"{BASE_ENDPOINT_URL}-gpu-v{ENDPOINT_VERSION}.modal.run"
|
|
244
|
-
if gpu
|
|
245
|
-
else f"{BASE_ENDPOINT_URL}-v{ENDPOINT_VERSION}.modal.run"
|
|
246
|
-
)
|
|
247
|
-
|
|
248
|
-
try:
|
|
249
|
-
with requests.post(
|
|
250
|
-
endpoint_url,
|
|
251
|
-
data=json.dumps(predict_data),
|
|
252
|
-
headers=headers,
|
|
253
|
-
stream=True,
|
|
254
|
-
) as response:
|
|
255
|
-
response.raise_for_status()
|
|
256
|
-
result = _process_response(response, model_type, binarize)
|
|
257
|
-
if result is None:
|
|
258
|
-
raise RuntimeError("No prediction result received.")
|
|
259
|
-
return result
|
|
260
|
-
except requests.RequestException as e:
|
|
261
|
-
raise RuntimeError(f"Request failed: {e}") from e
|
|
262
|
-
|
|
263
|
-
|
|
264
353
|
def _predict_array_with_grad(
|
|
265
354
|
device_array: np.ndarray, model: Model
|
|
266
355
|
) -> tuple[np.ndarray, np.ndarray]:
|
|
@@ -287,21 +376,49 @@ def _predict_array_with_grad(
|
|
|
287
376
|
-------
|
|
288
377
|
tuple[np.ndarray, np.ndarray]
|
|
289
378
|
The predicted output array and gradient array.
|
|
379
|
+
|
|
380
|
+
Raises
|
|
381
|
+
------
|
|
382
|
+
RuntimeError
|
|
383
|
+
If the request to the prediction service fails.
|
|
384
|
+
ValueError
|
|
385
|
+
If the server returns an error or invalid response.
|
|
290
386
|
"""
|
|
291
387
|
headers = _prepare_headers()
|
|
292
|
-
predict_data =
|
|
388
|
+
predict_data = {
|
|
389
|
+
"device_array": _encode_array(np.squeeze(device_array)),
|
|
390
|
+
"model": model.to_json(),
|
|
391
|
+
"model_type": "p",
|
|
392
|
+
"binary": False,
|
|
393
|
+
}
|
|
293
394
|
endpoint_url = f"{BASE_ENDPOINT_URL}-with-grad-v{ENDPOINT_VERSION}.modal.run"
|
|
294
395
|
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
396
|
+
try:
|
|
397
|
+
response = requests.post(
|
|
398
|
+
endpoint_url, data=json.dumps(predict_data), headers=headers
|
|
399
|
+
)
|
|
400
|
+
response.raise_for_status()
|
|
401
|
+
|
|
402
|
+
if not response.content:
|
|
403
|
+
raise ValueError("Empty response received from server")
|
|
404
|
+
|
|
405
|
+
response_data = response.json()
|
|
406
|
+
|
|
407
|
+
if "error" in response_data:
|
|
408
|
+
raise ValueError(f"Prediction error: {response_data['error']}")
|
|
409
|
+
|
|
410
|
+
prediction_array = _decode_array(response_data["prediction_array"])
|
|
411
|
+
gradient_array = _decode_array(response_data["gradient_array"])
|
|
412
|
+
gradient_min = response_data["gradient_min"]
|
|
413
|
+
gradient_max = response_data["gradient_max"]
|
|
414
|
+
gradient_range = gradient_max - gradient_min
|
|
415
|
+
gradient_array = gradient_array * gradient_range + gradient_min
|
|
416
|
+
return (prediction_array, gradient_array)
|
|
417
|
+
|
|
418
|
+
except requests.exceptions.RequestException as e:
|
|
419
|
+
raise RuntimeError(f"Request failed: {e}") from e
|
|
420
|
+
except json.JSONDecodeError as e:
|
|
421
|
+
raise ValueError(f"JSON decode error: {e}") from e
|
|
305
422
|
|
|
306
423
|
|
|
307
424
|
@primitive
|
|
@@ -325,6 +442,13 @@ def predict_array_with_grad(device_array: np.ndarray, model: Model) -> np.ndarra
|
|
|
325
442
|
-------
|
|
326
443
|
np.ndarray
|
|
327
444
|
The predicted output array.
|
|
445
|
+
|
|
446
|
+
Raises
|
|
447
|
+
------
|
|
448
|
+
RuntimeError
|
|
449
|
+
If the request to the prediction service fails.
|
|
450
|
+
ValueError
|
|
451
|
+
If the server returns an error or invalid response.
|
|
328
452
|
"""
|
|
329
453
|
prediction_array, gradient_array = _predict_array_with_grad(
|
|
330
454
|
device_array=device_array, model=model
|
|
@@ -378,8 +502,8 @@ def _decode_array(encoded_png):
|
|
|
378
502
|
return np.array(image) / 255
|
|
379
503
|
|
|
380
504
|
|
|
381
|
-
def
|
|
382
|
-
"""
|
|
505
|
+
def _prepare_headers():
|
|
506
|
+
"""Prepare HTTP headers for a server request."""
|
|
383
507
|
token_file_path = os.path.expanduser("~/.prefab.toml")
|
|
384
508
|
try:
|
|
385
509
|
with open(token_file_path) as file:
|
|
@@ -388,7 +512,10 @@ def _read_tokens():
|
|
|
388
512
|
refresh_token = tokens.get("refresh_token")
|
|
389
513
|
if not access_token or not refresh_token:
|
|
390
514
|
raise ValueError("Tokens not found in the configuration file.")
|
|
391
|
-
return
|
|
515
|
+
return {
|
|
516
|
+
"Authorization": f"Bearer {access_token}",
|
|
517
|
+
"X-Refresh-Token": refresh_token,
|
|
518
|
+
}
|
|
392
519
|
except FileNotFoundError:
|
|
393
520
|
raise FileNotFoundError(
|
|
394
521
|
"Could not validate user.\n"
|
|
@@ -396,111 +523,3 @@ def _read_tokens():
|
|
|
396
523
|
"Signup/login and generate a new token.\n"
|
|
397
524
|
"See https://docs.prefabphotonics.com/."
|
|
398
525
|
) from None
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
def _prepare_headers():
|
|
402
|
-
"""Prepare HTTP headers for the request."""
|
|
403
|
-
access_token, refresh_token = _read_tokens()
|
|
404
|
-
return {
|
|
405
|
-
"Authorization": f"Bearer {access_token}",
|
|
406
|
-
"X-Refresh-Token": refresh_token,
|
|
407
|
-
}
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
def _prepare_predict_data(device_array, model, model_type, binarize):
|
|
411
|
-
"""Prepare the data payload for the prediction request."""
|
|
412
|
-
return {
|
|
413
|
-
"device_array": _encode_array(np.squeeze(device_array)),
|
|
414
|
-
"model": model.to_json(),
|
|
415
|
-
"model_type": model_type,
|
|
416
|
-
"binary": binarize,
|
|
417
|
-
}
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
def _process_response(response, model_type, binarize):
|
|
421
|
-
"""Process the streaming response from the prediction request."""
|
|
422
|
-
event_type = None
|
|
423
|
-
model_descriptions = {
|
|
424
|
-
"p": "Prediction",
|
|
425
|
-
"c": "Correction",
|
|
426
|
-
"s": "SEMulate",
|
|
427
|
-
}
|
|
428
|
-
progress_bar = tqdm(
|
|
429
|
-
total=100,
|
|
430
|
-
desc=model_descriptions.get(model_type, "Processing"),
|
|
431
|
-
unit="%",
|
|
432
|
-
colour="green",
|
|
433
|
-
bar_format="{l_bar}{bar:30}{r_bar}{bar:-10b}",
|
|
434
|
-
)
|
|
435
|
-
|
|
436
|
-
for line in response.iter_lines():
|
|
437
|
-
if line:
|
|
438
|
-
decoded_line = line.decode("utf-8").strip()
|
|
439
|
-
if decoded_line.startswith("event:"):
|
|
440
|
-
event_type = decoded_line.split(":", 1)[1].strip()
|
|
441
|
-
elif decoded_line.startswith("data:"):
|
|
442
|
-
data_content = _parse_data_line(decoded_line)
|
|
443
|
-
result = _handle_event(event_type, data_content, progress_bar, binarize)
|
|
444
|
-
if result is not None:
|
|
445
|
-
progress_bar.close()
|
|
446
|
-
return result
|
|
447
|
-
progress_bar.close()
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
def _parse_data_line(decoded_line):
|
|
451
|
-
"""Parse a data line from the response stream."""
|
|
452
|
-
data_line = decoded_line.split(":", 1)[1].strip()
|
|
453
|
-
try:
|
|
454
|
-
return json.loads(data_line)
|
|
455
|
-
except json.JSONDecodeError:
|
|
456
|
-
raise ValueError(f"Failed to decode JSON: {data_line}") from None
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
def _handle_event(event_type, data_content, progress_bar, binarize):
|
|
460
|
-
"""Handle different types of events received from the server."""
|
|
461
|
-
if event_type == "progress":
|
|
462
|
-
_update_progress(progress_bar, data_content)
|
|
463
|
-
elif event_type == "result":
|
|
464
|
-
return _process_result(data_content, binarize)
|
|
465
|
-
elif event_type == "end":
|
|
466
|
-
print("Stream ended.")
|
|
467
|
-
elif event_type == "auth":
|
|
468
|
-
_update_tokens(data_content.get("auth", {}))
|
|
469
|
-
elif event_type == "error":
|
|
470
|
-
raise ValueError(f"{data_content['error']}")
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
def _update_progress(progress_bar, data_content):
|
|
474
|
-
"""Update the progress bar based on the progress event."""
|
|
475
|
-
progress = round(100 * data_content.get("progress", 0))
|
|
476
|
-
progress_bar.update(progress - progress_bar.n)
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
def _process_result(data_content, binarize):
|
|
480
|
-
"""Process the result event and return the prediction."""
|
|
481
|
-
results = [
|
|
482
|
-
_decode_array(data_content[key])
|
|
483
|
-
for key in sorted(data_content.keys())
|
|
484
|
-
if key.startswith("result")
|
|
485
|
-
]
|
|
486
|
-
if results:
|
|
487
|
-
prediction = np.stack(results, axis=-1)
|
|
488
|
-
if binarize:
|
|
489
|
-
prediction = binarize_hard(prediction)
|
|
490
|
-
return prediction
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
def _update_tokens(auth_data):
|
|
494
|
-
"""Update tokens if new tokens are provided in the auth event."""
|
|
495
|
-
new_access_token = auth_data.get("new_access_token")
|
|
496
|
-
new_refresh_token = auth_data.get("new_refresh_token")
|
|
497
|
-
if new_access_token and new_refresh_token:
|
|
498
|
-
prefab_file_path = os.path.expanduser("~/.prefab.toml")
|
|
499
|
-
with open(prefab_file_path, "w", encoding="utf-8") as toml_file:
|
|
500
|
-
toml.dump(
|
|
501
|
-
{
|
|
502
|
-
"access_token": new_access_token,
|
|
503
|
-
"refresh_token": new_refresh_token,
|
|
504
|
-
},
|
|
505
|
-
toml_file,
|
|
506
|
-
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: prefab
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.3.0
|
|
4
4
|
Summary: Artificial nanofabrication of integrated photonic circuits using deep learning
|
|
5
5
|
Project-URL: Homepage, https://prefabphotonics.com
|
|
6
6
|
Project-URL: Repository, https://github.com/PreFab-Photonics/PreFab
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
prefab/__init__.py,sha256=l9aFRS6H9RKpzotuXz_qmSOvW1Fdvo_tlH2vuUOa8VE,425
|
|
2
|
+
prefab/__main__.py,sha256=1eXWiEoG7eetJMm1qRbK2I5MnzuRgKIoQtBeT-Ps8es,3523
|
|
3
|
+
prefab/compare.py,sha256=0Xgp3tFuP4of-ce9Opc19p8i8lIyXkbVGLuwWBaHSeE,3486
|
|
4
|
+
prefab/device.py,sha256=7SiawFa103ebFqii3CWy_vPO6ixigVliO6A58hoOqkU,55414
|
|
5
|
+
prefab/geometry.py,sha256=fvMSJpDAqxWShhKojPGnXdB4otrBn54kbW8uk1T4Wwk,12104
|
|
6
|
+
prefab/models.py,sha256=waPNGtuISyY0f8cz7dnbD451CKYCt8EpPGt-4lSOPNU,2581
|
|
7
|
+
prefab/predict.py,sha256=jtB18QOlPINJRqUZCwnuMOfPuuwE1s2e353X5g3njd4,18349
|
|
8
|
+
prefab/read.py,sha256=WNqC3xENlndzFwXeCF2E7H3Iq2dO_6rPEPZ58DuloqY,16259
|
|
9
|
+
prefab/shapes.py,sha256=58cyXFNh1kEErq2jEbGd3dWSediU1OSmor_FWwc1V8A,25098
|
|
10
|
+
prefab-1.3.0.dist-info/METADATA,sha256=Xvbt_umsD1mZY1S9phLLABxbR18MCuw4disk81IUslY,35025
|
|
11
|
+
prefab-1.3.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
12
|
+
prefab-1.3.0.dist-info/entry_points.txt,sha256=h1_A9O9F3NAIoKXD1RPb3Eo-WCSiHhMB_AnagBi6XTQ,48
|
|
13
|
+
prefab-1.3.0.dist-info/licenses/LICENSE,sha256=IMF9i4xIpgCADf0U-V1cuf9HBmqWQd3qtI3FSuyW4zE,26526
|
|
14
|
+
prefab-1.3.0.dist-info/RECORD,,
|
prefab-1.2.0.dist-info/RECORD
DELETED
|
@@ -1,14 +0,0 @@
|
|
|
1
|
-
prefab/__init__.py,sha256=1aWNs6J8s6UlWOvSlZscZ1W_uZboUlkhVkmJCWvJ7RU,425
|
|
2
|
-
prefab/__main__.py,sha256=1eXWiEoG7eetJMm1qRbK2I5MnzuRgKIoQtBeT-Ps8es,3523
|
|
3
|
-
prefab/compare.py,sha256=0Xgp3tFuP4of-ce9Opc19p8i8lIyXkbVGLuwWBaHSeE,3486
|
|
4
|
-
prefab/device.py,sha256=ZuppXLmDPSfym37U25hSsNu6JNM8ujrr4Bxh3VH4-4s,56582
|
|
5
|
-
prefab/geometry.py,sha256=4fekWMlkdS_qlPNTdPXPhwKuQ5qdQ1Zjf8m9JKd1dA8,12049
|
|
6
|
-
prefab/models.py,sha256=waPNGtuISyY0f8cz7dnbD451CKYCt8EpPGt-4lSOPNU,2581
|
|
7
|
-
prefab/predict.py,sha256=I0gdO0nNAdNQn_ALrHxrDjmBgxIGpPyU_hQYtnF9hYU,17733
|
|
8
|
-
prefab/read.py,sha256=WNqC3xENlndzFwXeCF2E7H3Iq2dO_6rPEPZ58DuloqY,16259
|
|
9
|
-
prefab/shapes.py,sha256=58cyXFNh1kEErq2jEbGd3dWSediU1OSmor_FWwc1V8A,25098
|
|
10
|
-
prefab-1.2.0.dist-info/METADATA,sha256=rd9O4Q3C0adI7BYkkuiEwlqkGw6UlQMpSAFA5Qbuwd0,35025
|
|
11
|
-
prefab-1.2.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
12
|
-
prefab-1.2.0.dist-info/entry_points.txt,sha256=h1_A9O9F3NAIoKXD1RPb3Eo-WCSiHhMB_AnagBi6XTQ,48
|
|
13
|
-
prefab-1.2.0.dist-info/licenses/LICENSE,sha256=IMF9i4xIpgCADf0U-V1cuf9HBmqWQd3qtI3FSuyW4zE,26526
|
|
14
|
-
prefab-1.2.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|