prefab 1.2.0__tar.gz → 1.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. {prefab-1.2.0 → prefab-1.3.0}/PKG-INFO +1 -1
  2. {prefab-1.2.0 → prefab-1.3.0}/docs/CHANGELOG.md +6 -0
  3. {prefab-1.2.0 → prefab-1.3.0}/mkdocs.yml +1 -2
  4. {prefab-1.2.0 → prefab-1.3.0}/prefab/__init__.py +1 -1
  5. {prefab-1.2.0 → prefab-1.3.0}/prefab/device.py +0 -24
  6. {prefab-1.2.0 → prefab-1.3.0}/prefab/geometry.py +1 -0
  7. {prefab-1.2.0 → prefab-1.3.0}/prefab/predict.py +232 -213
  8. {prefab-1.2.0 → prefab-1.3.0}/pyproject.toml +1 -1
  9. {prefab-1.2.0 → prefab-1.3.0}/.gitattributes +0 -0
  10. {prefab-1.2.0 → prefab-1.3.0}/.github/workflows/docs-deploy.yml +0 -0
  11. {prefab-1.2.0 → prefab-1.3.0}/.github/workflows/python-publish.yml +0 -0
  12. {prefab-1.2.0 → prefab-1.3.0}/.gitignore +0 -0
  13. {prefab-1.2.0 → prefab-1.3.0}/LICENSE +0 -0
  14. {prefab-1.2.0 → prefab-1.3.0}/README.md +0 -0
  15. {prefab-1.2.0 → prefab-1.3.0}/docs/.markdownlint.json +0 -0
  16. {prefab-1.2.0 → prefab-1.3.0}/docs/CNAME +0 -0
  17. {prefab-1.2.0 → prefab-1.3.0}/docs/assets/blog-cards/python-for-photonics.png +0 -0
  18. {prefab-1.2.0 → prefab-1.3.0}/docs/assets/favicon.ico +0 -0
  19. {prefab-1.2.0 → prefab-1.3.0}/docs/assets/images/flux_ID_nopredict.png +0 -0
  20. {prefab-1.2.0 → prefab-1.3.0}/docs/assets/images/flux_ID_predict.png +0 -0
  21. {prefab-1.2.0 → prefab-1.3.0}/docs/assets/logo-white.png +0 -0
  22. {prefab-1.2.0 → prefab-1.3.0}/docs/assets/logo.png +0 -0
  23. {prefab-1.2.0 → prefab-1.3.0}/docs/assets/prefab_logo_box.svg +0 -0
  24. {prefab-1.2.0 → prefab-1.3.0}/docs/assets/prefab_logo_white.svg +0 -0
  25. {prefab-1.2.0 → prefab-1.3.0}/docs/assets/promo_c.png +0 -0
  26. {prefab-1.2.0 → prefab-1.3.0}/docs/assets/promo_p.png +0 -0
  27. {prefab-1.2.0 → prefab-1.3.0}/docs/blog/.authors.yml +0 -0
  28. {prefab-1.2.0 → prefab-1.3.0}/docs/blog/index.md +0 -0
  29. {prefab-1.2.0 → prefab-1.3.0}/docs/blog/posts/prefab-v1.1.0.md +0 -0
  30. {prefab-1.2.0 → prefab-1.3.0}/docs/blog/posts/prefab-v1.1.0_files/prefab-v1.1.0_11_1.png +0 -0
  31. {prefab-1.2.0 → prefab-1.3.0}/docs/blog/posts/prefab-v1.1.0_files/prefab-v1.1.0_11_2.png +0 -0
  32. {prefab-1.2.0 → prefab-1.3.0}/docs/blog/posts/prefab-v1.1.0_files/prefab-v1.1.0_13_2.png +0 -0
  33. {prefab-1.2.0 → prefab-1.3.0}/docs/blog/posts/prefab-v1.1.0_files/prefab-v1.1.0_13_3.png +0 -0
  34. {prefab-1.2.0 → prefab-1.3.0}/docs/blog/posts/prefab-v1.1.0_files/prefab-v1.1.0_13_4.png +0 -0
  35. {prefab-1.2.0 → prefab-1.3.0}/docs/blog/posts/prefab-v1.1.0_files/prefab-v1.1.0_3_1.png +0 -0
  36. {prefab-1.2.0 → prefab-1.3.0}/docs/blog/posts/prefab-v1.1.0_files/prefab-v1.1.0_3_2.png +0 -0
  37. {prefab-1.2.0 → prefab-1.3.0}/docs/blog/posts/prefab-v1.1.0_files/prefab-v1.1.0_3_3.png +0 -0
  38. {prefab-1.2.0 → prefab-1.3.0}/docs/blog/posts/prefab-v1.1.0_files/prefab-v1.1.0_7_2.png +0 -0
  39. {prefab-1.2.0 → prefab-1.3.0}/docs/blog/posts/prefab-v1.1.0_files/prefab-v1.1.0_7_3.png +0 -0
  40. {prefab-1.2.0 → prefab-1.3.0}/docs/blog/posts/prefab-v1.1.0_files/prefab-v1.1.0_7_4.png +0 -0
  41. {prefab-1.2.0 → prefab-1.3.0}/docs/blog/posts/prefab-v1.1.0_files/prefab-v1.1.0_9_2.png +0 -0
  42. {prefab-1.2.0 → prefab-1.3.0}/docs/blog/posts/python-for-photonics.md +0 -0
  43. {prefab-1.2.0 → prefab-1.3.0}/docs/blog/prefab-v1.1.0.ipynb +0 -0
  44. {prefab-1.2.0 → prefab-1.3.0}/docs/examples/1_prediction.ipynb +0 -0
  45. {prefab-1.2.0 → prefab-1.3.0}/docs/examples/2_correction.ipynb +0 -0
  46. {prefab-1.2.0 → prefab-1.3.0}/docs/examples/3_sidewall_angle.ipynb +0 -0
  47. {prefab-1.2.0 → prefab-1.3.0}/docs/examples/4_SEM_generation.ipynb +0 -0
  48. {prefab-1.2.0 → prefab-1.3.0}/docs/examples/5_prediction_simulation.ipynb +0 -0
  49. {prefab-1.2.0 → prefab-1.3.0}/docs/examples/6_fabrication-aware_inverse_design.ipynb +0 -0
  50. {prefab-1.2.0 → prefab-1.3.0}/docs/examples/7_sidewall_simulation.ipynb +0 -0
  51. {prefab-1.2.0 → prefab-1.3.0}/docs/fab_aware_design.md +0 -0
  52. {prefab-1.2.0 → prefab-1.3.0}/docs/index.md +0 -0
  53. {prefab-1.2.0 → prefab-1.3.0}/docs/models.md +0 -0
  54. {prefab-1.2.0 → prefab-1.3.0}/docs/overrides/main.html +0 -0
  55. {prefab-1.2.0 → prefab-1.3.0}/docs/reference/compare.md +0 -0
  56. {prefab-1.2.0 → prefab-1.3.0}/docs/reference/device.md +0 -0
  57. {prefab-1.2.0 → prefab-1.3.0}/docs/reference/geometry.md +0 -0
  58. {prefab-1.2.0 → prefab-1.3.0}/docs/reference/models.md +0 -0
  59. {prefab-1.2.0 → prefab-1.3.0}/docs/reference/predict.md +0 -0
  60. {prefab-1.2.0 → prefab-1.3.0}/docs/reference/read.md +0 -0
  61. {prefab-1.2.0 → prefab-1.3.0}/docs/reference/shapes.md +0 -0
  62. {prefab-1.2.0 → prefab-1.3.0}/docs/setup.md +0 -0
  63. {prefab-1.2.0 → prefab-1.3.0}/docs/stylesheets/extra.css +0 -0
  64. {prefab-1.2.0 → prefab-1.3.0}/prefab/__main__.py +0 -0
  65. {prefab-1.2.0 → prefab-1.3.0}/prefab/compare.py +0 -0
  66. {prefab-1.2.0 → prefab-1.3.0}/prefab/models.py +0 -0
  67. {prefab-1.2.0 → prefab-1.3.0}/prefab/read.py +0 -0
  68. {prefab-1.2.0 → prefab-1.3.0}/prefab/shapes.py +0 -0
  69. {prefab-1.2.0 → prefab-1.3.0}/site/404.html +0 -0
  70. {prefab-1.2.0 → prefab-1.3.0}/site/assets/_mkdocstrings.css +0 -0
  71. {prefab-1.2.0 → prefab-1.3.0}/site/assets/favicon.ico +0 -0
  72. {prefab-1.2.0 → prefab-1.3.0}/site/assets/images/favicon.png +0 -0
  73. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/bundle.a7c05c9e.min.js +0 -0
  74. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/bundle.a7c05c9e.min.js.map +0 -0
  75. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.ar.min.js +0 -0
  76. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.da.min.js +0 -0
  77. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.de.min.js +0 -0
  78. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.du.min.js +0 -0
  79. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.el.min.js +0 -0
  80. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.es.min.js +0 -0
  81. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.fi.min.js +0 -0
  82. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.fr.min.js +0 -0
  83. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.he.min.js +0 -0
  84. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.hi.min.js +0 -0
  85. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.hu.min.js +0 -0
  86. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.hy.min.js +0 -0
  87. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.it.min.js +0 -0
  88. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.ja.min.js +0 -0
  89. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.jp.min.js +0 -0
  90. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.kn.min.js +0 -0
  91. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.ko.min.js +0 -0
  92. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.multi.min.js +0 -0
  93. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.nl.min.js +0 -0
  94. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.no.min.js +0 -0
  95. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.pt.min.js +0 -0
  96. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.ro.min.js +0 -0
  97. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.ru.min.js +0 -0
  98. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.sa.min.js +0 -0
  99. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.stemmer.support.min.js +0 -0
  100. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.sv.min.js +0 -0
  101. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.ta.min.js +0 -0
  102. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.te.min.js +0 -0
  103. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.th.min.js +0 -0
  104. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.tr.min.js +0 -0
  105. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.vi.min.js +0 -0
  106. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/min/lunr.zh.min.js +0 -0
  107. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/tinyseg.js +0 -0
  108. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/lunr/wordcut.js +0 -0
  109. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/workers/search.b8dbb3d2.min.js +0 -0
  110. {prefab-1.2.0 → prefab-1.3.0}/site/assets/javascripts/workers/search.b8dbb3d2.min.js.map +0 -0
  111. {prefab-1.2.0 → prefab-1.3.0}/site/assets/logo.png +0 -0
  112. {prefab-1.2.0 → prefab-1.3.0}/site/assets/logo_neue.svg +0 -0
  113. {prefab-1.2.0 → prefab-1.3.0}/site/assets/promo_c.png +0 -0
  114. {prefab-1.2.0 → prefab-1.3.0}/site/assets/promo_p.png +0 -0
  115. {prefab-1.2.0 → prefab-1.3.0}/site/assets/stylesheets/main.66ac8b77.min.css +0 -0
  116. {prefab-1.2.0 → prefab-1.3.0}/site/assets/stylesheets/main.66ac8b77.min.css.map +0 -0
  117. {prefab-1.2.0 → prefab-1.3.0}/site/assets/stylesheets/palette.06af60db.min.css +0 -0
  118. {prefab-1.2.0 → prefab-1.3.0}/site/assets/stylesheets/palette.06af60db.min.css.map +0 -0
  119. {prefab-1.2.0 → prefab-1.3.0}/site/css/ansi-colours.css +0 -0
  120. {prefab-1.2.0 → prefab-1.3.0}/site/css/jupyter-cells.css +0 -0
  121. {prefab-1.2.0 → prefab-1.3.0}/site/css/pandas-dataframe.css +0 -0
  122. {prefab-1.2.0 → prefab-1.3.0}/site/examples/1_prediction/index.html +0 -0
  123. {prefab-1.2.0 → prefab-1.3.0}/site/examples/2_correction/index.html +0 -0
  124. {prefab-1.2.0 → prefab-1.3.0}/site/index.html +0 -0
  125. {prefab-1.2.0 → prefab-1.3.0}/site/models/index.html +0 -0
  126. {prefab-1.2.0 → prefab-1.3.0}/site/objects.inv +0 -0
  127. {prefab-1.2.0 → prefab-1.3.0}/site/reference/compare/index.html +0 -0
  128. {prefab-1.2.0 → prefab-1.3.0}/site/reference/device/index.html +0 -0
  129. {prefab-1.2.0 → prefab-1.3.0}/site/reference/geometry/index.html +0 -0
  130. {prefab-1.2.0 → prefab-1.3.0}/site/reference/models/index.html +0 -0
  131. {prefab-1.2.0 → prefab-1.3.0}/site/reference/read/index.html +0 -0
  132. {prefab-1.2.0 → prefab-1.3.0}/site/search/search_index.json +0 -0
  133. {prefab-1.2.0 → prefab-1.3.0}/site/sitemap.xml +0 -0
  134. {prefab-1.2.0 → prefab-1.3.0}/site/sitemap.xml.gz +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: prefab
3
- Version: 1.2.0
3
+ Version: 1.3.0
4
4
  Summary: Artificial nanofabrication of integrated photonic circuits using deep learning
5
5
  Project-URL: Homepage, https://prefabphotonics.com
6
6
  Project-URL: Repository, https://github.com/PreFab-Photonics/PreFab
@@ -1,5 +1,11 @@
1
1
  # Changelog
2
2
 
3
+ ## [1.3.0](https://github.com/PreFab-Photonics/PreFab/releases/tag/v1.3.0) - 2025-07-15
4
+
5
+ - Many changes on the server-side to significantly improve performance and accuracy. Some client-side changes to the prediction functions in `prefab.predict` to handle the updated server-side changes.
6
+ - Added `predict.predict_gds` to predict directly on a GDS file.
7
+ - Add small amount of clipping to binarization with roughness to avoid artifacts.
8
+
3
9
  ## [1.2.0](https://github.com/PreFab-Photonics/PreFab/releases/tag/v1.2.0) - 2025-03-19
4
10
 
5
11
  - Added `Device.segment` method to segment an SEM image into a binary mask. Some more testing to be done.
@@ -102,9 +102,8 @@ plugins:
102
102
  - mkdocstrings:
103
103
  handlers:
104
104
  python:
105
- rendering:
106
- show_source: true
107
105
  options:
106
+ show_source: true
108
107
  docstring_style: numpy
109
108
  show_docstring_examples: true
110
109
  - mkdocs-jupyter:
@@ -5,7 +5,7 @@ Usage:
5
5
  import prefab as pf
6
6
  """
7
7
 
8
- __version__ = "1.2.0"
8
+ __version__ = "1.3.0"
9
9
 
10
10
  from . import compare, geometry, predict, read, shapes
11
11
  from .device import BufferSpec, Device
@@ -242,7 +242,6 @@ class Device(BaseModel):
242
242
  self,
243
243
  model: Model,
244
244
  binarize: bool = False,
245
- gpu: bool = False,
246
245
  ) -> "Device":
247
246
  """
248
247
  Predict the nanofabrication outcome of the device using a specified model.
@@ -264,10 +263,6 @@ class Device(BaseModel):
264
263
  If True, the predicted device geometry will be binarized using a threshold
265
264
  method. This is useful for converting probabilistic predictions into binary
266
265
  geometries. Defaults to False.
267
- gpu : bool
268
- If True, the prediction will be performed on a GPU. Defaults to False.
269
- Note: The GPU option has more overhead and will take longer for small
270
- devices, but will be faster for larger devices.
271
266
 
272
267
  Returns
273
268
  -------
@@ -285,7 +280,6 @@ class Device(BaseModel):
285
280
  model=model,
286
281
  model_type="p",
287
282
  binarize=binarize,
288
- gpu=gpu,
289
283
  )
290
284
  return self.model_copy(update={"device_array": prediction_array})
291
285
 
@@ -293,7 +287,6 @@ class Device(BaseModel):
293
287
  self,
294
288
  model: Model,
295
289
  binarize: bool = True,
296
- gpu: bool = False,
297
290
  ) -> "Device":
298
291
  """
299
292
  Correct the nanofabrication outcome of the device using a specified model.
@@ -317,10 +310,6 @@ class Device(BaseModel):
317
310
  If True, the corrected device geometry will be binarized using a threshold
318
311
  method. This is useful for converting probabilistic corrections into binary
319
312
  geometries. Defaults to True.
320
- gpu : bool
321
- If True, the prediction will be performed on a GPU. Defaults to False.
322
- Note: The GPU option has more overhead and will take longer for small
323
- devices, but will be faster for larger devices.
324
313
 
325
314
  Returns
326
315
  -------
@@ -338,14 +327,12 @@ class Device(BaseModel):
338
327
  model=model,
339
328
  model_type="c",
340
329
  binarize=binarize,
341
- gpu=gpu,
342
330
  )
343
331
  return self.model_copy(update={"device_array": correction_array})
344
332
 
345
333
  def semulate(
346
334
  self,
347
335
  model: Model,
348
- gpu: bool = False,
349
336
  ) -> "Device":
350
337
  """
351
338
  Simulate the appearance of the device as if viewed under a scanning electron
@@ -365,10 +352,6 @@ class Device(BaseModel):
365
352
  in `models.py`. Each model is associated with a version and dataset that
366
353
  detail its creation and the data it was trained on, ensuring the SEMulation
367
354
  is tailored to specific fabrication parameters.
368
- gpu : bool
369
- If True, the prediction will be performed on a GPU. Defaults to False.
370
- Note: The GPU option has more overhead and will take longer for small
371
- devices, but will be faster for larger devices.
372
355
 
373
356
  Notes
374
357
  -----
@@ -392,7 +375,6 @@ class Device(BaseModel):
392
375
  model=model,
393
376
  model_type="s",
394
377
  binarize=False,
395
- gpu=gpu,
396
378
  )
397
379
  semulated_array += np.random.normal(0, 0.03, semulated_array.shape)
398
380
  return self.model_copy(update={"device_array": semulated_array})
@@ -400,7 +382,6 @@ class Device(BaseModel):
400
382
  def segment(
401
383
  self,
402
384
  model: Model,
403
- gpu: bool = False,
404
385
  ) -> "Device":
405
386
  """
406
387
  Segment a scanning electron microscope (SEM) image into a binary mask.
@@ -419,10 +400,6 @@ class Device(BaseModel):
419
400
  defined in `models.py`. Each model is associated with a version and dataset
420
401
  that detail its creation and the data it was trained on, ensuring the
421
402
  segmentation is tailored to specific fabrication parameters.
422
- gpu : bool
423
- If True, the prediction will be performed on a GPU. Defaults to False.
424
- Note: The GPU option has more overhead and will take longer for small
425
- devices, but will be faster for larger devices.
426
403
 
427
404
  Returns
428
405
  -------
@@ -441,7 +418,6 @@ class Device(BaseModel):
441
418
  model=model,
442
419
  model_type="b",
443
420
  binarize=False,
444
- gpu=gpu,
445
421
  )
446
422
  return self.model_copy(update={"device_array": segmented_array})
447
423
 
@@ -133,6 +133,7 @@ def binarize_monte_carlo(
133
133
  """
134
134
  device_array = np.squeeze(device_array)
135
135
  base_threshold = np.random.normal(loc=0.5, scale=0.1)
136
+ base_threshold = np.clip(base_threshold, 0.2, 0.8)
136
137
  threshold_noise = np.random.normal(
137
138
  loc=0, scale=noise_magnitude, size=device_array.shape
138
139
  )
@@ -12,13 +12,101 @@ import toml
12
12
  from autograd import primitive
13
13
  from autograd.extend import defvjp
14
14
  from PIL import Image
15
- from tqdm import tqdm
16
15
 
17
16
  from .geometry import binarize_hard
18
17
  from .models import Model
19
18
 
20
19
  BASE_ENDPOINT_URL = "https://prefab-photonics--predict"
21
- ENDPOINT_VERSION = "2"
20
+ ENDPOINT_VERSION = "3"
21
+
22
+
23
+ def predict_array(
24
+ device_array: np.ndarray,
25
+ model: Model,
26
+ model_type: str,
27
+ binarize: bool,
28
+ ) -> np.ndarray:
29
+ """
30
+ Predict the nanofabrication outcome of a device array using a specified model.
31
+
32
+ This function sends the device array to a serverless prediction service, which uses
33
+ a specified machine learning model to predict the outcome of the nanofabrication
34
+ process.
35
+
36
+ Parameters
37
+ ----------
38
+ device_array : np.ndarray
39
+ A 2D array representing the planar geometry of the device. This array undergoes
40
+ various transformations to predict the nanofabrication process.
41
+ model : Model
42
+ The model to use for prediction, representing a specific fabrication process and
43
+ dataset. This model encapsulates details about the fabrication foundry, process,
44
+ material, technology, thickness, and sidewall presence, as defined in
45
+ `models.py`. Each model is associated with a version and dataset that detail its
46
+ creation and the data it was trained on, ensuring the prediction is tailored to
47
+ specific fabrication parameters.
48
+ model_type : str
49
+ The type of model to use (e.g., 'p' for prediction, 'c' for correction, or 's'
50
+ for SEMulate, 'b' for segmentation).
51
+ binarize : bool
52
+ If True, the predicted device geometry will be binarized using a threshold
53
+ method. This is useful for converting probabilistic predictions into binary
54
+ geometries.
55
+
56
+ Returns
57
+ -------
58
+ np.ndarray
59
+ The predicted output array. For single-level predictions, returns shape
60
+ (h, w, 1). For multi-level predictions, returns shape (h, w, n) where n is the
61
+ number of levels.
62
+
63
+ Raises
64
+ ------
65
+ RuntimeError
66
+ If the request to the prediction service fails.
67
+ ValueError
68
+ If the server returns an error or invalid response.
69
+ """
70
+ endpoint_url = f"{BASE_ENDPOINT_URL}-v{ENDPOINT_VERSION}.modal.run"
71
+ predict_data = {
72
+ "device_array": _encode_array(np.squeeze(device_array)),
73
+ "model": model.to_json(),
74
+ "model_type": model_type,
75
+ }
76
+ headers = _prepare_headers()
77
+
78
+ try:
79
+ response = requests.post(
80
+ url=endpoint_url, data=json.dumps(predict_data), headers=headers
81
+ )
82
+ response.raise_for_status()
83
+
84
+ if not response.content:
85
+ raise ValueError("Empty response received from server")
86
+
87
+ response_data = response.json()
88
+
89
+ if "error" in response_data:
90
+ raise ValueError(f"Prediction error: {response_data['error']}")
91
+
92
+ results = response_data["results"]
93
+ result_arrays = [
94
+ _decode_array(results[key])
95
+ for key in sorted(results.keys())
96
+ if key.startswith("result")
97
+ ]
98
+
99
+ prediction_array = np.stack(result_arrays, axis=-1)
100
+
101
+ if binarize:
102
+ prediction_array = binarize_hard(prediction_array)
103
+
104
+ return prediction_array
105
+
106
+ except requests.exceptions.RequestException as e:
107
+ raise RuntimeError(f"Request failed: {e}") from e
108
+ except json.JSONDecodeError as e:
109
+ raise ValueError(f"JSON decode error: {e}") from e
22
110
 
23
111
 
24
112
  def _predict_poly(
@@ -28,10 +116,11 @@ def _predict_poly(
28
116
  eta: float = 0.5,
29
117
  ) -> list:
30
118
  """
31
- Predict the nanofabrication outcome for a list of polygons.
119
+ Predict the nanofabrication outcome for a geometry (list of polygons).
32
120
 
33
- This function sends polygon data to the server, which uses a specified machine
34
- learning model to predict the outcome of the nanofabrication process.
121
+ This function sends the device array to a serverless prediction service, which uses
122
+ a specified machine learning model to predict the outcome of the nanofabrication
123
+ process.
35
124
 
36
125
  Parameters
37
126
  ----------
@@ -54,17 +143,15 @@ def _predict_poly(
54
143
  Returns
55
144
  -------
56
145
  list
57
- List of predicted polygon points with channel information. Each polygon is a
58
- dict with 'points' (list of coordinates) and 'channel' (int) keys.
146
+ List of predicted polygon points with level information. Each polygon is a dict
147
+ with 'points' (list of coordinates) and 'level' (int) keys.
59
148
 
60
149
  Raises
61
150
  ------
62
- ValueError
63
- If the server returns an error or empty response.
64
- requests.exceptions.RequestException
151
+ RuntimeError
65
152
  If the request to the prediction service fails.
66
- json.JSONDecodeError
67
- If the response cannot be parsed as JSON.
153
+ ValueError
154
+ If the server returns an error or invalid response.
68
155
  """
69
156
  predict_data = {
70
157
  "polygons": polygon_points,
@@ -99,11 +186,82 @@ def _predict_poly(
99
186
  return []
100
187
 
101
188
  except requests.exceptions.RequestException as e:
102
- print(f"Request failed: {str(e)}")
103
- raise
189
+ raise RuntimeError(f"Request failed: {e}") from e
104
190
  except json.JSONDecodeError as e:
105
- print(f"JSON decode error: {str(e)}")
106
- raise
191
+ raise ValueError(f"JSON decode error: {e}") from e
192
+
193
+
194
+ def predict_gds(
195
+ gds_path: str,
196
+ cell_name: str,
197
+ model: Model,
198
+ model_type: str,
199
+ gds_layer: tuple[int, int] = (1, 0),
200
+ eta: float = 0.5,
201
+ output_path: str = None,
202
+ ) -> None:
203
+ """
204
+ Predict the nanofabrication outcome for a GDS file and cell.
205
+
206
+ This function loads a GDS file, extracts the specified cell, and predicts the
207
+ nanofabrication outcome using the specified model. The predicted cell is
208
+ automatically added to the original GDS library and the file is written to the
209
+ specified output path (or overwrites the original if no output path is provided).
210
+
211
+ Parameters
212
+ ----------
213
+ gds_path : str
214
+ The file path to the GDS file.
215
+ cell_name : str
216
+ The name of the cell within the GDS file to predict.
217
+ model : Model
218
+ The model to use for prediction, representing a specific fabrication process and
219
+ dataset. This model encapsulates details about the fabrication foundry, process,
220
+ material, technology, thickness, and sidewall presence, as defined in
221
+ `models.py`. Each model is associated with a version and dataset that detail its
222
+ creation and the data it was trained on, ensuring the prediction is tailored to
223
+ specific fabrication parameters.
224
+ model_type : str
225
+ The type of model to use ('p' for prediction, 'c' for correction).
226
+ gds_layer : tuple[int, int]
227
+ The layer and datatype to use within the GDS file. Defaults to (1, 0).
228
+ eta : float
229
+ The threshold value for binarization. Defaults to 0.5. Because intermediate
230
+ values cannot be preserved in the polygon data, the predicted polygons are
231
+ binarized using a threshold value of eta.
232
+ output_path : str, optional
233
+ The file path where the updated GDS file will be written. If None, the
234
+ original file will be overwritten. Defaults to None.
235
+
236
+ Raises
237
+ ------
238
+ RuntimeError
239
+ If the request to the prediction service fails.
240
+ ValueError
241
+ If the GDS file cannot be read, the specified cell is not found, or the server
242
+ returns an error or invalid response.
243
+ """
244
+ gdstk_library = gdstk.read_gds(gds_path)
245
+ gdstk_cell = gdstk_library[cell_name]
246
+
247
+ predicted_cell = predict_gdstk(
248
+ gdstk_cell=gdstk_cell,
249
+ model=model,
250
+ model_type=model_type,
251
+ gds_layer=gds_layer,
252
+ eta=eta,
253
+ )
254
+
255
+ base_name = predicted_cell.name
256
+ counter = 1
257
+ while predicted_cell.name in [cell.name for cell in gdstk_library.cells]:
258
+ predicted_cell.name = f"{base_name}_{counter}"
259
+ counter += 1
260
+
261
+ gdstk_library.add(predicted_cell)
262
+
263
+ write_path = output_path if output_path is not None else gds_path
264
+ gdstk_library.write_gds(write_path, max_points=8190)
107
265
 
108
266
 
109
267
  def predict_gdstk(
@@ -116,8 +274,8 @@ def predict_gdstk(
116
274
  """
117
275
  Predict the nanofabrication outcome of a gdstk cell using a specified model.
118
276
 
119
- This function extracts polygons from a gdstk cell, sends them to the prediction
120
- server, and returns a new cell containing the predicted polygons.
277
+ This function extracts polygons from a gdstk cell, sends them to a serverless
278
+ prediction service, and returns a new cell containing the predicted polygons.
121
279
 
122
280
  Parameters
123
281
  ----------
@@ -149,8 +307,11 @@ def predict_gdstk(
149
307
 
150
308
  Raises
151
309
  ------
310
+ RuntimeError
311
+ If the request to the prediction service fails.
152
312
  ValueError
153
- If no polygons are found in the specified layer.
313
+ If no polygons are found in the specified layer, or the server returns an error
314
+ or invalid response.
154
315
  """
155
316
  polygons = gdstk_cell.get_polygons(layer=gds_layer[0], datatype=gds_layer[1])
156
317
  if not polygons:
@@ -189,78 +350,6 @@ def predict_gdstk(
189
350
  return result_cell
190
351
 
191
352
 
192
- def predict_array(
193
- device_array: np.ndarray,
194
- model: Model,
195
- model_type: str,
196
- binarize: bool,
197
- gpu: bool = False,
198
- ) -> np.ndarray:
199
- """
200
- Predict the nanofabrication outcome of a device array using a specified model.
201
-
202
- This function sends the device array to a serverless prediction service, which uses
203
- a specified machine learning model to predict the outcome of the nanofabrication
204
- process. The prediction can be performed on a GPU if specified.
205
-
206
- Parameters
207
- ----------
208
- device_array : np.ndarray
209
- A 2D array representing the planar geometry of the device. This array undergoes
210
- various transformations to predict the nanofabrication process.
211
- model : Model
212
- The model to use for prediction, representing a specific fabrication process and
213
- dataset. This model encapsulates details about the fabrication foundry, process,
214
- material, technology, thickness, and sidewall presence, as defined in
215
- `models.py`. Each model is associated with a version and dataset that detail its
216
- creation and the data it was trained on, ensuring the prediction is tailored to
217
- specific fabrication parameters.
218
- model_type : str
219
- The type of model to use (e.g., 'p' for prediction, 'c' for correction, or 's'
220
- for SEMulate).
221
- binarize : bool
222
- If True, the predicted device geometry will be binarized using a threshold
223
- method. This is useful for converting probabilistic predictions into binary
224
- geometries.
225
- gpu : bool
226
- If True, the prediction will be performed on a GPU. Defaults to False. Note: The
227
- GPU option has more startup overhead and will take longer for small devices, but
228
- will be faster for larger devices.
229
-
230
- Returns
231
- -------
232
- np.ndarray
233
- The predicted output array.
234
-
235
- Raises
236
- ------
237
- RuntimeError
238
- If the request to the prediction service fails.
239
- """
240
- headers = _prepare_headers()
241
- predict_data = _prepare_predict_data(device_array, model, model_type, binarize)
242
- endpoint_url = (
243
- f"{BASE_ENDPOINT_URL}-gpu-v{ENDPOINT_VERSION}.modal.run"
244
- if gpu
245
- else f"{BASE_ENDPOINT_URL}-v{ENDPOINT_VERSION}.modal.run"
246
- )
247
-
248
- try:
249
- with requests.post(
250
- endpoint_url,
251
- data=json.dumps(predict_data),
252
- headers=headers,
253
- stream=True,
254
- ) as response:
255
- response.raise_for_status()
256
- result = _process_response(response, model_type, binarize)
257
- if result is None:
258
- raise RuntimeError("No prediction result received.")
259
- return result
260
- except requests.RequestException as e:
261
- raise RuntimeError(f"Request failed: {e}") from e
262
-
263
-
264
353
  def _predict_array_with_grad(
265
354
  device_array: np.ndarray, model: Model
266
355
  ) -> tuple[np.ndarray, np.ndarray]:
@@ -287,21 +376,49 @@ def _predict_array_with_grad(
287
376
  -------
288
377
  tuple[np.ndarray, np.ndarray]
289
378
  The predicted output array and gradient array.
379
+
380
+ Raises
381
+ ------
382
+ RuntimeError
383
+ If the request to the prediction service fails.
384
+ ValueError
385
+ If the server returns an error or invalid response.
290
386
  """
291
387
  headers = _prepare_headers()
292
- predict_data = _prepare_predict_data(device_array, model, "p", False)
388
+ predict_data = {
389
+ "device_array": _encode_array(np.squeeze(device_array)),
390
+ "model": model.to_json(),
391
+ "model_type": "p",
392
+ "binary": False,
393
+ }
293
394
  endpoint_url = f"{BASE_ENDPOINT_URL}-with-grad-v{ENDPOINT_VERSION}.modal.run"
294
395
 
295
- response = requests.post(
296
- endpoint_url, data=json.dumps(predict_data), headers=headers
297
- )
298
- prediction_array = _decode_array(response.json()["prediction_array"])
299
- gradient_array = _decode_array(response.json()["gradient_array"])
300
- gradient_min = response.json()["gradient_min"]
301
- gradient_max = response.json()["gradient_max"]
302
- gradient_range = gradient_max - gradient_min
303
- gradient_array = gradient_array * gradient_range + gradient_min
304
- return (prediction_array, gradient_array)
396
+ try:
397
+ response = requests.post(
398
+ endpoint_url, data=json.dumps(predict_data), headers=headers
399
+ )
400
+ response.raise_for_status()
401
+
402
+ if not response.content:
403
+ raise ValueError("Empty response received from server")
404
+
405
+ response_data = response.json()
406
+
407
+ if "error" in response_data:
408
+ raise ValueError(f"Prediction error: {response_data['error']}")
409
+
410
+ prediction_array = _decode_array(response_data["prediction_array"])
411
+ gradient_array = _decode_array(response_data["gradient_array"])
412
+ gradient_min = response_data["gradient_min"]
413
+ gradient_max = response_data["gradient_max"]
414
+ gradient_range = gradient_max - gradient_min
415
+ gradient_array = gradient_array * gradient_range + gradient_min
416
+ return (prediction_array, gradient_array)
417
+
418
+ except requests.exceptions.RequestException as e:
419
+ raise RuntimeError(f"Request failed: {e}") from e
420
+ except json.JSONDecodeError as e:
421
+ raise ValueError(f"JSON decode error: {e}") from e
305
422
 
306
423
 
307
424
  @primitive
@@ -325,6 +442,13 @@ def predict_array_with_grad(device_array: np.ndarray, model: Model) -> np.ndarra
325
442
  -------
326
443
  np.ndarray
327
444
  The predicted output array.
445
+
446
+ Raises
447
+ ------
448
+ RuntimeError
449
+ If the request to the prediction service fails.
450
+ ValueError
451
+ If the server returns an error or invalid response.
328
452
  """
329
453
  prediction_array, gradient_array = _predict_array_with_grad(
330
454
  device_array=device_array, model=model
@@ -378,8 +502,8 @@ def _decode_array(encoded_png):
378
502
  return np.array(image) / 255
379
503
 
380
504
 
381
- def _read_tokens():
382
- """Read access and refresh tokens from the configuration file."""
505
+ def _prepare_headers():
506
+ """Prepare HTTP headers for a server request."""
383
507
  token_file_path = os.path.expanduser("~/.prefab.toml")
384
508
  try:
385
509
  with open(token_file_path) as file:
@@ -388,7 +512,10 @@ def _read_tokens():
388
512
  refresh_token = tokens.get("refresh_token")
389
513
  if not access_token or not refresh_token:
390
514
  raise ValueError("Tokens not found in the configuration file.")
391
- return access_token, refresh_token
515
+ return {
516
+ "Authorization": f"Bearer {access_token}",
517
+ "X-Refresh-Token": refresh_token,
518
+ }
392
519
  except FileNotFoundError:
393
520
  raise FileNotFoundError(
394
521
  "Could not validate user.\n"
@@ -396,111 +523,3 @@ def _read_tokens():
396
523
  "Signup/login and generate a new token.\n"
397
524
  "See https://docs.prefabphotonics.com/."
398
525
  ) from None
399
-
400
-
401
- def _prepare_headers():
402
- """Prepare HTTP headers for the request."""
403
- access_token, refresh_token = _read_tokens()
404
- return {
405
- "Authorization": f"Bearer {access_token}",
406
- "X-Refresh-Token": refresh_token,
407
- }
408
-
409
-
410
- def _prepare_predict_data(device_array, model, model_type, binarize):
411
- """Prepare the data payload for the prediction request."""
412
- return {
413
- "device_array": _encode_array(np.squeeze(device_array)),
414
- "model": model.to_json(),
415
- "model_type": model_type,
416
- "binary": binarize,
417
- }
418
-
419
-
420
- def _process_response(response, model_type, binarize):
421
- """Process the streaming response from the prediction request."""
422
- event_type = None
423
- model_descriptions = {
424
- "p": "Prediction",
425
- "c": "Correction",
426
- "s": "SEMulate",
427
- }
428
- progress_bar = tqdm(
429
- total=100,
430
- desc=model_descriptions.get(model_type, "Processing"),
431
- unit="%",
432
- colour="green",
433
- bar_format="{l_bar}{bar:30}{r_bar}{bar:-10b}",
434
- )
435
-
436
- for line in response.iter_lines():
437
- if line:
438
- decoded_line = line.decode("utf-8").strip()
439
- if decoded_line.startswith("event:"):
440
- event_type = decoded_line.split(":", 1)[1].strip()
441
- elif decoded_line.startswith("data:"):
442
- data_content = _parse_data_line(decoded_line)
443
- result = _handle_event(event_type, data_content, progress_bar, binarize)
444
- if result is not None:
445
- progress_bar.close()
446
- return result
447
- progress_bar.close()
448
-
449
-
450
- def _parse_data_line(decoded_line):
451
- """Parse a data line from the response stream."""
452
- data_line = decoded_line.split(":", 1)[1].strip()
453
- try:
454
- return json.loads(data_line)
455
- except json.JSONDecodeError:
456
- raise ValueError(f"Failed to decode JSON: {data_line}") from None
457
-
458
-
459
- def _handle_event(event_type, data_content, progress_bar, binarize):
460
- """Handle different types of events received from the server."""
461
- if event_type == "progress":
462
- _update_progress(progress_bar, data_content)
463
- elif event_type == "result":
464
- return _process_result(data_content, binarize)
465
- elif event_type == "end":
466
- print("Stream ended.")
467
- elif event_type == "auth":
468
- _update_tokens(data_content.get("auth", {}))
469
- elif event_type == "error":
470
- raise ValueError(f"{data_content['error']}")
471
-
472
-
473
- def _update_progress(progress_bar, data_content):
474
- """Update the progress bar based on the progress event."""
475
- progress = round(100 * data_content.get("progress", 0))
476
- progress_bar.update(progress - progress_bar.n)
477
-
478
-
479
- def _process_result(data_content, binarize):
480
- """Process the result event and return the prediction."""
481
- results = [
482
- _decode_array(data_content[key])
483
- for key in sorted(data_content.keys())
484
- if key.startswith("result")
485
- ]
486
- if results:
487
- prediction = np.stack(results, axis=-1)
488
- if binarize:
489
- prediction = binarize_hard(prediction)
490
- return prediction
491
-
492
-
493
- def _update_tokens(auth_data):
494
- """Update tokens if new tokens are provided in the auth event."""
495
- new_access_token = auth_data.get("new_access_token")
496
- new_refresh_token = auth_data.get("new_refresh_token")
497
- if new_access_token and new_refresh_token:
498
- prefab_file_path = os.path.expanduser("~/.prefab.toml")
499
- with open(prefab_file_path, "w", encoding="utf-8") as toml_file:
500
- toml.dump(
501
- {
502
- "access_token": new_access_token,
503
- "refresh_token": new_refresh_token,
504
- },
505
- toml_file,
506
- )
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "prefab"
7
- version = "1.2.0"
7
+ version = "1.3.0"
8
8
  description = "Artificial nanofabrication of integrated photonic circuits using deep learning"
9
9
  authors = [{ name = "PreFab Photonics Inc.", email = "hi@prefabphotonics.com" }]
10
10
  keywords = [
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes