prefab 1.3.0__py3-none-any.whl → 1.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
prefab/predict.py CHANGED
@@ -1,12 +1,22 @@
1
- """Prediction functions for ndarrays of device geometries."""
1
+ """
2
+ Serverless prediction interface for nanofabrication modeling.
3
+
4
+ This module provides functions for predicting nanofabrication outcomes using machine
5
+ learning models hosted on a serverless platform. It supports multiple input formats
6
+ (ndarrays, polygons, GDSII files) and model types (prediction, correction,
7
+ segmentation). Gradient computation is available for inverse design applications
8
+ using automatic differentiation.
9
+ """
2
10
 
3
11
  import base64
4
12
  import io
5
13
  import json
6
14
  import os
15
+ from typing import Any
7
16
 
8
17
  import gdstk
9
18
  import numpy as np
19
+ import numpy.typing as npt
10
20
  import requests
11
21
  import toml
12
22
  from autograd import primitive
@@ -21,11 +31,11 @@ ENDPOINT_VERSION = "3"
21
31
 
22
32
 
23
33
  def predict_array(
24
- device_array: np.ndarray,
34
+ device_array: npt.NDArray[Any],
25
35
  model: Model,
26
36
  model_type: str,
27
37
  binarize: bool,
28
- ) -> np.ndarray:
38
+ ) -> npt.NDArray[Any]:
29
39
  """
30
40
  Predict the nanofabrication outcome of a device array using a specified model.
31
41
 
@@ -40,14 +50,12 @@ def predict_array(
40
50
  various transformations to predict the nanofabrication process.
41
51
  model : Model
42
52
  The model to use for prediction, representing a specific fabrication process and
43
- dataset. This model encapsulates details about the fabrication foundry, process,
44
- material, technology, thickness, and sidewall presence, as defined in
45
- `models.py`. Each model is associated with a version and dataset that detail its
46
- creation and the data it was trained on, ensuring the prediction is tailored to
47
- specific fabrication parameters.
53
+ dataset. This model encapsulates details about the fabrication foundry and
54
+ process, as defined in `models.py`. Each model is associated with a version and
55
+ dataset that detail its creation and the data it was trained on, ensuring the
56
+ prediction is tailored to specific fabrication parameters.
48
57
  model_type : str
49
- The type of model to use (e.g., 'p' for prediction, 'c' for correction, or 's'
50
- for SEMulate, 'b' for segmentation).
58
+ The type of model to use (e.g., 'p' for prediction or 'c' for correction).
51
59
  binarize : bool
52
60
  If True, the predicted device geometry will be binarized using a threshold
53
61
  method. This is useful for converting probabilistic predictions into binary
@@ -110,11 +118,11 @@ def predict_array(
110
118
 
111
119
 
112
120
  def _predict_poly(
113
- polygon_points: list,
121
+ polygon_points: list[Any],
114
122
  model: Model,
115
123
  model_type: str,
116
124
  eta: float = 0.5,
117
- ) -> list:
125
+ ) -> list[Any]:
118
126
  """
119
127
  Predict the nanofabrication outcome for a geometry (list of polygons).
120
128
 
@@ -128,13 +136,12 @@ def _predict_poly(
128
136
  List of polygon points, where each polygon is a list of [x, y] coordinates.
129
137
  model : Model
130
138
  The model to use for prediction, representing a specific fabrication process and
131
- dataset. This model encapsulates details about the fabrication foundry, process,
132
- material, technology, thickness, and sidewall presence, as defined in
133
- `models.py`. Each model is associated with a version and dataset that detail its
134
- creation and the data it was trained on, ensuring the prediction is tailored to
135
- specific fabrication parameters.
139
+ dataset. This model encapsulates details about the fabrication foundry and
140
+ process, as defined in `models.py`. Each model is associated with a version and
141
+ dataset that detail its creation and the data it was trained on, ensuring the
142
+ prediction is tailored to specific fabrication parameters.
136
143
  model_type : str
137
- The type of model to use ('p' for prediction, 'c' for correction).
144
+ The type of model to use (e.g., 'p' for prediction or 'c' for correction).
138
145
  eta : float
139
146
  The threshold value for binarization. Defaults to 0.5. Because intermediate
140
147
  values cannot be preserved in the polygon data, the predicted polygons are
@@ -198,7 +205,7 @@ def predict_gds(
198
205
  model_type: str,
199
206
  gds_layer: tuple[int, int] = (1, 0),
200
207
  eta: float = 0.5,
201
- output_path: str = None,
208
+ output_path: str | None = None,
202
209
  ) -> None:
203
210
  """
204
211
  Predict the nanofabrication outcome for a GDS file and cell.
@@ -216,13 +223,12 @@ def predict_gds(
216
223
  The name of the cell within the GDS file to predict.
217
224
  model : Model
218
225
  The model to use for prediction, representing a specific fabrication process and
219
- dataset. This model encapsulates details about the fabrication foundry, process,
220
- material, technology, thickness, and sidewall presence, as defined in
221
- `models.py`. Each model is associated with a version and dataset that detail its
222
- creation and the data it was trained on, ensuring the prediction is tailored to
223
- specific fabrication parameters.
226
+ dataset. This model encapsulates details about the fabrication foundry and
227
+ process, as defined in `models.py`. Each model is associated with a version and
228
+ dataset that detail its creation and the data it was trained on, ensuring the
229
+ prediction is tailored to specific fabrication parameters.
224
230
  model_type : str
225
- The type of model to use ('p' for prediction, 'c' for correction).
231
+ The type of model to use (e.g., 'p' for prediction or 'c' for correction).
226
232
  gds_layer : tuple[int, int]
227
233
  The layer and datatype to use within the GDS file. Defaults to (1, 0).
228
234
  eta : float
@@ -242,7 +248,14 @@ def predict_gds(
242
248
  returns an error or invalid response.
243
249
  """
244
250
  gdstk_library = gdstk.read_gds(gds_path)
245
- gdstk_cell = gdstk_library[cell_name]
251
+ cells = [
252
+ cell
253
+ for cell in gdstk_library.cells
254
+ if isinstance(cell, gdstk.Cell) and cell.name == cell_name
255
+ ]
256
+ if not cells:
257
+ raise ValueError(f"Cell '{cell_name}' not found in GDS file")
258
+ gdstk_cell = cells[0]
246
259
 
247
260
  predicted_cell = predict_gdstk(
248
261
  gdstk_cell=gdstk_cell,
@@ -261,6 +274,7 @@ def predict_gds(
261
274
  gdstk_library.add(predicted_cell)
262
275
 
263
276
  write_path = output_path if output_path is not None else gds_path
277
+ print(f"Writing to {write_path}")
264
278
  gdstk_library.write_gds(write_path, max_points=8190)
265
279
 
266
280
 
@@ -283,13 +297,12 @@ def predict_gdstk(
283
297
  The gdstk.Cell object containing polygons to predict.
284
298
  model : Model
285
299
  The model to use for prediction, representing a specific fabrication process and
286
- dataset. This model encapsulates details about the fabrication foundry, process,
287
- material, technology, thickness, and sidewall presence, as defined in
288
- `models.py`. Each model is associated with a version and dataset that detail its
289
- creation and the data it was trained on, ensuring the prediction is tailored to
290
- specific fabrication parameters.
300
+ dataset. This model encapsulates details about the fabrication foundry and
301
+ process, as defined in `models.py`. Each model is associated with a version and
302
+ dataset that detail its creation and the data it was trained on, ensuring the
303
+ prediction is tailored to specific fabrication parameters.
291
304
  model_type : str
292
- The type of model to use ('p' for prediction, 'c' for correction).
305
+ The type of model to use (e.g., 'p' for prediction or 'c' for correction).
293
306
  gds_layer : tuple[int, int]
294
307
  The layer and datatype to use within the GDSTK cell. Defaults to (1, 0).
295
308
  eta : float
@@ -317,7 +330,7 @@ def predict_gdstk(
317
330
  if not polygons:
318
331
  raise ValueError("No polygons found in the specified layer")
319
332
 
320
- polygon_points = [polygon.points.tolist() for polygon in polygons]
333
+ polygon_points = [polygon.points.tolist() for polygon in polygons] # pyright: ignore[reportAttributeAccessIssue]
321
334
 
322
335
  predicted_polygon_data = _predict_poly(
323
336
  polygon_points=polygon_points,
@@ -326,7 +339,8 @@ def predict_gdstk(
326
339
  eta=eta,
327
340
  )
328
341
 
329
- result_cell = gdstk.Cell(f"{gdstk_cell.name}_predicted")
342
+ suffix = "corrected" if model_type == "c" else "predicted"
343
+ result_cell = gdstk.Cell(f"{gdstk_cell.name}_{suffix}")
330
344
 
331
345
  polygons_by_channel = {}
332
346
  for polygon_data in predicted_polygon_data:
@@ -344,15 +358,15 @@ def predict_gdstk(
344
358
 
345
359
  for points in points_list:
346
360
  points_array = np.array(points)
347
- polygon = gdstk.Polygon(points_array, layer=layer, datatype=datatype)
361
+ polygon = gdstk.Polygon(points_array, layer=layer, datatype=datatype) # pyright: ignore[reportArgumentType]
348
362
  result_cell.add(polygon)
349
363
 
350
364
  return result_cell
351
365
 
352
366
 
353
367
  def _predict_array_with_grad(
354
- device_array: np.ndarray, model: Model
355
- ) -> tuple[np.ndarray, np.ndarray]:
368
+ device_array: npt.NDArray[Any], model: Model
369
+ ) -> tuple[npt.NDArray[Any], npt.NDArray[Any]]:
356
370
  """
357
371
  Predict the nanofabrication outcome of a device array and compute its gradient.
358
372
 
@@ -360,11 +374,6 @@ def _predict_array_with_grad(
360
374
  device array using a specified model. It also computes the gradient of the
361
375
  prediction with respect to the input device array.
362
376
 
363
- Notes
364
- -----
365
- This function is currently not used in the main `predict_array` function as
366
- the main `predict_array` function (e.g., GPU support and progress bar) for now.
367
-
368
377
  Parameters
369
378
  ----------
370
379
  device_array : np.ndarray
@@ -422,7 +431,9 @@ def _predict_array_with_grad(
422
431
 
423
432
 
424
433
  @primitive
425
- def predict_array_with_grad(device_array: np.ndarray, model: Model) -> np.ndarray:
434
+ def predict_array_with_grad(
435
+ device_array: npt.NDArray[Any], model: Model
436
+ ) -> npt.NDArray[Any]:
426
437
  """
427
438
  Predict the nanofabrication outcome of a device array and compute its gradient.
428
439
 
@@ -453,11 +464,13 @@ def predict_array_with_grad(device_array: np.ndarray, model: Model) -> np.ndarra
453
464
  prediction_array, gradient_array = _predict_array_with_grad(
454
465
  device_array=device_array, model=model
455
466
  )
456
- predict_array_with_grad.gradient_array = gradient_array # type: ignore
467
+ predict_array_with_grad.gradient_array = gradient_array # pyright: ignore[reportFunctionMemberAccess]
457
468
  return prediction_array
458
469
 
459
470
 
460
- def predict_array_with_grad_vjp(ans: np.ndarray, device_array: np.ndarray, *args):
471
+ def predict_array_with_grad_vjp(
472
+ ans: npt.NDArray[Any], device_array: npt.NDArray[Any], *args: Any
473
+ ) -> Any:
461
474
  """
462
475
  Define the vector-Jacobian product (VJP) for the prediction function.
463
476
 
@@ -475,10 +488,10 @@ def predict_array_with_grad_vjp(ans: np.ndarray, device_array: np.ndarray, *args
475
488
  function
476
489
  A function that computes the VJP given an upstream gradient `g`.
477
490
  """
478
- grad_x = predict_array_with_grad.gradient_array # type: ignore
491
+ grad_x = predict_array_with_grad.gradient_array # pyright: ignore[reportFunctionMemberAccess]
479
492
 
480
- def vjp(g: np.ndarray) -> np.ndarray:
481
- return g * grad_x
493
+ def vjp(g: npt.NDArray[Any]) -> npt.NDArray[Any]:
494
+ return g * grad_x # type: ignore[no-any-return]
482
495
 
483
496
  return vjp
484
497
 
@@ -486,7 +499,7 @@ def predict_array_with_grad_vjp(ans: np.ndarray, device_array: np.ndarray, *args
486
499
  defvjp(predict_array_with_grad, predict_array_with_grad_vjp)
487
500
 
488
501
 
489
- def _encode_array(array):
502
+ def _encode_array(array: npt.NDArray[Any]) -> str:
490
503
  """Encode an ndarray as a base64 encoded image for transmission."""
491
504
  image = Image.fromarray(np.uint8(array * 255))
492
505
  buffered = io.BytesIO()
@@ -495,14 +508,14 @@ def _encode_array(array):
495
508
  return encoded_png
496
509
 
497
510
 
498
- def _decode_array(encoded_png):
511
+ def _decode_array(encoded_png: str) -> npt.NDArray[Any]:
499
512
  """Decode a base64 encoded image and return an ndarray."""
500
513
  binary_data = base64.b64decode(encoded_png)
501
514
  image = Image.open(io.BytesIO(binary_data))
502
- return np.array(image) / 255
515
+ return np.array(image) / 255 # type: ignore[no-any-return]
503
516
 
504
517
 
505
- def _prepare_headers():
518
+ def _prepare_headers() -> dict[str, str]:
506
519
  """Prepare HTTP headers for a server request."""
507
520
  token_file_path = os.path.expanduser("~/.prefab.toml")
508
521
  try:
@@ -519,7 +532,7 @@ def _prepare_headers():
519
532
  except FileNotFoundError:
520
533
  raise FileNotFoundError(
521
534
  "Could not validate user.\n"
522
- "Please update prefab using: pip install --upgrade prefab.\n"
523
- "Signup/login and generate a new token.\n"
524
- "See https://docs.prefabphotonics.com/."
535
+ + "Please update prefab using: pip install --upgrade prefab.\n"
536
+ + "Signup/login and generate a new token.\n"
537
+ + "See https://docs.prefabphotonics.com/."
525
538
  ) from None
prefab/py.typed ADDED
File without changes