py3dcal 1.0.0__tar.gz → 1.0.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. {py3dcal-1.0.0/py3DCal.egg-info → py3dcal-1.0.7}/PKG-INFO +13 -2
  2. py3dcal-1.0.7/README.md +12 -0
  3. py3dcal-1.0.7/py3DCal/__init__.py +14 -0
  4. {py3dcal-1.0.0 → py3dcal-1.0.7}/py3DCal/data_collection/Calibrator.py +10 -8
  5. {py3dcal-1.0.0/py3DCal/data_collection/Sensors → py3dcal-1.0.7/py3DCal/data_collection/sensors}/GelsightMini/GelsightMini.py +5 -5
  6. {py3dcal-1.0.0/py3DCal/data_collection/Sensors → py3dcal-1.0.7/py3DCal/data_collection/sensors}/Sensor.py +10 -1
  7. {py3dcal-1.0.0 → py3dcal-1.0.7}/py3DCal/model_training/datasets/DIGIT_dataset.py +4 -2
  8. {py3dcal-1.0.0 → py3dcal-1.0.7}/py3DCal/model_training/datasets/GelSightMini_dataset.py +4 -2
  9. {py3dcal-1.0.0 → py3dcal-1.0.7}/py3DCal/model_training/datasets/tactile_sensor_dataset.py +4 -3
  10. py3dcal-1.0.7/py3DCal/model_training/lib/annotate_dataset.py +560 -0
  11. {py3dcal-1.0.0 → py3dcal-1.0.7}/py3DCal/model_training/lib/depthmaps.py +11 -3
  12. {py3dcal-1.0.0 → py3dcal-1.0.7}/py3DCal/model_training/lib/fast_poisson.py +12 -12
  13. py3dcal-1.0.7/py3DCal/model_training/lib/validate_parameters.py +87 -0
  14. {py3dcal-1.0.0 → py3dcal-1.0.7}/py3DCal/model_training/models/touchnet.py +1 -1
  15. {py3dcal-1.0.0 → py3dcal-1.0.7/py3dcal.egg-info}/PKG-INFO +13 -2
  16. {py3dcal-1.0.0/py3DCal.egg-info → py3dcal-1.0.7/py3dcal.egg-info}/SOURCES.txt +13 -18
  17. {py3dcal-1.0.0 → py3dcal-1.0.7}/setup.py +1 -1
  18. py3dcal-1.0.0/README.md +0 -1
  19. py3dcal-1.0.0/py3DCal/__init__.py +0 -12
  20. py3dcal-1.0.0/py3DCal/model_training/lib/validate_parameters.py +0 -45
  21. {py3dcal-1.0.0 → py3dcal-1.0.7}/LICENSE +0 -0
  22. {py3dcal-1.0.0 → py3dcal-1.0.7}/MANIFEST.in +0 -0
  23. {py3dcal-1.0.0/py3DCal/data_collection/Printers/Ender3 → py3dcal-1.0.7/py3DCal/data_collection}/__init__.py +0 -0
  24. {py3dcal-1.0.0/py3DCal/data_collection/Printers → py3dcal-1.0.7/py3DCal/data_collection/printers}/Ender3/Ender3.py +0 -0
  25. {py3dcal-1.0.0/py3DCal/data_collection/Printers → py3dcal-1.0.7/py3DCal/data_collection/printers/Ender3}/__init__.py +0 -0
  26. {py3dcal-1.0.0/py3DCal/data_collection/Printers → py3dcal-1.0.7/py3DCal/data_collection/printers}/Printer.py +0 -0
  27. {py3dcal-1.0.0/py3DCal/data_collection/Sensors/DIGIT → py3dcal-1.0.7/py3DCal/data_collection/printers}/__init__.py +0 -0
  28. {py3dcal-1.0.0/py3DCal/data_collection/Sensors → py3dcal-1.0.7/py3DCal/data_collection/sensors}/DIGIT/DIGIT.py +0 -0
  29. {py3dcal-1.0.0/py3DCal/data_collection/Sensors/GelsightMini → py3dcal-1.0.7/py3DCal/data_collection/sensors/DIGIT}/__init__.py +0 -0
  30. {py3dcal-1.0.0/py3DCal/data_collection/Sensors → py3dcal-1.0.7/py3DCal/data_collection/sensors}/DIGIT/default.csv +0 -0
  31. {py3dcal-1.0.0/py3DCal/data_collection/Sensors → py3dcal-1.0.7/py3DCal/data_collection/sensors/GelsightMini}/__init__.py +0 -0
  32. {py3dcal-1.0.0/py3DCal/data_collection/Sensors → py3dcal-1.0.7/py3DCal/data_collection/sensors}/GelsightMini/default.csv +0 -0
  33. {py3dcal-1.0.0/py3DCal/data_collection → py3dcal-1.0.7/py3DCal/data_collection/sensors}/__init__.py +0 -0
  34. {py3dcal-1.0.0 → py3dcal-1.0.7}/py3DCal/model_training/__init__.py +0 -0
  35. {py3dcal-1.0.0 → py3dcal-1.0.7}/py3DCal/model_training/datasets/__init__.py +0 -0
  36. {py3dcal-1.0.0 → py3dcal-1.0.7}/py3DCal/model_training/datasets/split_dataset.py +0 -0
  37. {py3dcal-1.0.0 → py3dcal-1.0.7}/py3DCal/model_training/lib/__init__.py +0 -0
  38. {py3dcal-1.0.0 → py3dcal-1.0.7}/py3DCal/model_training/lib/add_coordinate_embeddings.py +0 -0
  39. {py3dcal-1.0.0 → py3dcal-1.0.7}/py3DCal/model_training/lib/get_gradient_map.py +0 -0
  40. {py3dcal-1.0.0 → py3dcal-1.0.7}/py3DCal/model_training/lib/precompute_gradients.py +0 -0
  41. {py3dcal-1.0.0 → py3dcal-1.0.7}/py3DCal/model_training/lib/train_model.py +0 -0
  42. {py3dcal-1.0.0 → py3dcal-1.0.7}/py3DCal/model_training/models/__init__.py +0 -0
  43. {py3dcal-1.0.0 → py3dcal-1.0.7}/py3DCal/utils/__init__.py +0 -0
  44. {py3dcal-1.0.0 → py3dcal-1.0.7}/py3DCal/utils/utils.py +0 -0
  45. {py3dcal-1.0.0/py3DCal.egg-info → py3dcal-1.0.7/py3dcal.egg-info}/dependency_links.txt +0 -0
  46. {py3dcal-1.0.0/py3DCal.egg-info → py3dcal-1.0.7/py3dcal.egg-info}/entry_points.txt +0 -0
  47. {py3dcal-1.0.0/py3DCal.egg-info → py3dcal-1.0.7/py3dcal.egg-info}/requires.txt +0 -0
  48. {py3dcal-1.0.0/py3DCal.egg-info → py3dcal-1.0.7/py3dcal.egg-info}/top_level.txt +0 -0
  49. {py3dcal-1.0.0 → py3dcal-1.0.7}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: py3dcal
3
- Version: 1.0.0
3
+ Version: 1.0.7
4
4
  Summary: UNKNOWN
5
5
  Home-page: https://github.com/rohankotanu/py3DCal
6
6
  Author: Rohan Kota
@@ -12,5 +12,16 @@ Classifier: License :: OSI Approved :: MIT License
12
12
  Description-Content-Type: text/markdown
13
13
  License-File: LICENSE
14
14
 
15
- # For instructions on how to use this library, please visit https://rohankotanu.github.io/3DCal/
15
+ # Installing the package
16
+ ```python
17
+ pip3 install py3dcal
18
+ ```
19
+
20
+ <br>
21
+
22
+ # For instructions on how to use this package, please visit https://rohankotanu.github.io/3DCal/
23
+
24
+ <br>
25
+
26
+ **Note:** Project was tested with Python 3.10.4
16
27
 
@@ -0,0 +1,12 @@
1
+ # Installing the package
2
+ ```python
3
+ pip3 install py3dcal
4
+ ```
5
+
6
+ <br>
7
+
8
+ # For instructions on how to use this package, please visit https://rohankotanu.github.io/3DCal/
9
+
10
+ <br>
11
+
12
+ **Note:** Project was tested with Python 3.10.4
@@ -0,0 +1,14 @@
1
+ from .data_collection.Calibrator import Calibrator
2
+ from .data_collection.printers.Printer import Printer
3
+ from .data_collection.printers.Ender3.Ender3 import Ender3
4
+ from .data_collection.sensors.Sensor import Sensor
5
+ from .data_collection.sensors.DIGIT.DIGIT import DIGIT
6
+ from .data_collection.sensors.GelsightMini.GelsightMini import GelsightMini
7
+ from .model_training import datasets, models
8
+ from .model_training.datasets.split_dataset import split_dataset
9
+ from .model_training.models.touchnet import SensorType
10
+ from .model_training.lib.annotate_dataset import annotate, visualize_annotations
11
+ from .model_training.lib.train_model import train_model
12
+ from .model_training.lib.depthmaps import get_depthmap, save_2d_depthmap, show_2d_depthmap
13
+ from .model_training.lib.fast_poisson import fast_poisson
14
+ from .utils.utils import list_com_ports
@@ -6,9 +6,8 @@ from typing import Union
6
6
  from pathlib import Path
7
7
  from PIL import Image
8
8
  from tqdm import tqdm
9
-
10
- from .Printers.Printer import Printer
11
- from .Sensors.Sensor import Sensor
9
+ from .printers.Printer import Printer
10
+ from .sensors.Sensor import Sensor
12
11
 
13
12
  class Calibrator:
14
13
  """ Calibrator class to automatically probe a tactile sensor.
@@ -153,7 +152,7 @@ class Calibrator:
153
152
  # Connect to 3D printer if not already connected
154
153
  if not self.printer_connected:
155
154
  self.connect_printer()
156
- self.printer.send_gcode("M117 Sensor Calibration In Progress")
155
+ self.printer.send_gcode("M117 Calibrating sensor...")
157
156
 
158
157
  # Connect to sensor
159
158
  if save_images == True:
@@ -257,11 +256,14 @@ class Calibrator:
257
256
  with open(os.path.join(data_save_path, "annotations", "probe_data.csv"), 'a', newline='') as csv_file:
258
257
  csv_writer = csv.writer(csv_file)
259
258
 
260
- for j in range(int(self.calibration_points[i][3])):
259
+ # Flush frames to clear camera buffer
260
+ self.sensor.flush_frames(n=5)
261
+
262
+ for _ in range(int(self.calibration_points[i][3])):
261
263
  frame = self.sensor.capture_image()
262
264
 
263
265
  img_name = str(img_idx) + "_" + "X" + str(self.calibration_points[i][0]) + "Y" + str(self.calibration_points[i][1]) + "Z" + str(self.calibration_points[i][2]) + ".png"
264
- img_path = os.path.join(data_save_path, "probe_images",img_name)
266
+ img_path = os.path.join(data_save_path, "probe_images", img_name)
265
267
 
266
268
  img = Image.fromarray(frame)
267
269
  img.save(img_path)
@@ -283,7 +285,7 @@ class Calibrator:
283
285
  print("")
284
286
 
285
287
  # Update printer display
286
- self.printer.send_gcode("M117 Sensor Calibration Complete!")
288
+ self.printer.send_gcode("M117 Calibration Done!")
287
289
 
288
290
  # Disconnect from 3D printer
289
291
  self.disconnect_printer()
@@ -292,7 +294,7 @@ class Calibrator:
292
294
  if save_images == True:
293
295
  self.disconnect_sensor()
294
296
 
295
- print("Sensor calibration procedure complete!")
297
+ print("Calibration procedure complete!")
296
298
  print("")
297
299
 
298
300
  return True
@@ -10,10 +10,10 @@ except:
10
10
 
11
11
  class GelsightMini(Sensor):
12
12
  """
13
- GelsightMini: A Sensor Class for the Gelsight Mini sensor
13
+ GelsightMini: A Sensor Class for the GelSight Mini sensor
14
14
  """
15
15
  def __init__(self):
16
- self.name = "Gelsight Mini"
16
+ self.name = "GelSight Mini"
17
17
  self.x_offset = 108
18
18
  self.y_offset = 110
19
19
  self.z_offset = 67
@@ -23,7 +23,7 @@ class GelsightMini(Sensor):
23
23
 
24
24
  def connect(self):
25
25
  """
26
- Connects to the Gelsight Mini sensor.
26
+ Connects to the GelSight Mini sensor.
27
27
  """
28
28
  # Code to connect to the sensor
29
29
  self.sensor = gsdevice.Camera("GelSight Mini")
@@ -31,14 +31,14 @@ class GelsightMini(Sensor):
31
31
 
32
32
  def disconnect(self):
33
33
  """
34
- Disconnects from the Gelsight Mini sensor.
34
+ Disconnects from the GelSight Mini sensor.
35
35
  """
36
36
  # Code to disconnect from the sensor
37
37
  self.sensor.stop_video()
38
38
 
39
39
  def capture_image(self):
40
40
  """
41
- Captures an image from the Gelsight Mini sensor.
41
+ Captures an image from the GelSight Mini sensor.
42
42
  """
43
43
  # Code to return an image from the sensor
44
44
  image = cv2.cvtColor(self.sensor.get_image(), cv2.COLOR_BGR2RGB)
@@ -32,4 +32,13 @@ class Sensor(ABC):
32
32
  Returns:
33
33
  numpy.ndarray: The image from the sensor.
34
34
  """
35
- pass
35
+ pass
36
+
37
+ def flush_frames(self, n: int = 5):
38
+ """Discards the next n frames to clear camera buffer.
39
+
40
+ Args:
41
+ n (int): Number of frames to discard. Default is 5.
42
+ """
43
+ for _ in range(n):
44
+ self.capture_image()
@@ -21,13 +21,15 @@ class DIGIT(TactileSensorDataset):
21
21
  def __init__(self, root: Union[str, Path] = Path("."), download=False, add_coordinate_embeddings=True, subtract_blank=True, transform=None):
22
22
  validate_root(root)
23
23
 
24
- self.dataset_path = os.path.join(root, "digit_calibration_data")
24
+ self.root = root
25
25
 
26
- super().__init__(root=self.dataset_path, add_coordinate_embeddings=add_coordinate_embeddings, subtract_blank=subtract_blank, transform=transform)
26
+ self.dataset_path = os.path.join(self.root, "digit_calibration_data")
27
27
 
28
28
  if download:
29
29
  self._download_dataset()
30
30
 
31
+ super().__init__(root=self.dataset_path, add_coordinate_embeddings=add_coordinate_embeddings, subtract_blank=subtract_blank, transform=transform)
32
+
31
33
  def _download_dataset(self):
32
34
  """
33
35
  Downloads the dataset for either the DIGIT sensor.
@@ -21,13 +21,15 @@ class GelSightMini(TactileSensorDataset):
21
21
  def __init__(self, root: Union[str, Path] = Path("."), download=False, add_coordinate_embeddings=True, subtract_blank=True, transform=None):
22
22
  validate_root(root)
23
23
 
24
- self.dataset_path = os.path.join(root, "gsmini_calibration_data")
24
+ self.root = root
25
25
 
26
- super().__init__(root=self.dataset_path, add_coordinate_embeddings=add_coordinate_embeddings, subtract_blank=subtract_blank, transform=transform)
26
+ self.dataset_path = os.path.join(self.root, "gsmini_calibration_data")
27
27
 
28
28
  if download:
29
29
  self._download_dataset()
30
30
 
31
+ super().__init__(root=self.dataset_path, add_coordinate_embeddings=add_coordinate_embeddings, subtract_blank=subtract_blank, transform=transform)
32
+
31
33
  def _download_dataset(self):
32
34
  """
33
35
  Downloads the dataset for the GelSight Mini sensor.
@@ -9,7 +9,7 @@ from torchvision import transforms
9
9
  from ..lib.precompute_gradients import precompute_gradients
10
10
  from ..lib.get_gradient_map import get_gradient_map
11
11
  from ..lib.add_coordinate_embeddings import add_coordinate_embeddings
12
- from ..lib.validate_parameters import validate_root
12
+ from ..lib.validate_parameters import validate_root, validate_dataset
13
13
 
14
14
  class TactileSensorDataset(Dataset):
15
15
  """
@@ -22,12 +22,13 @@ class TactileSensorDataset(Dataset):
22
22
  transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. Default: ``transforms.ToTensor()``
23
23
  """
24
24
  def __init__(self, root: Union[str, Path], add_coordinate_embeddings=True, subtract_blank=True, transform=None):
25
- validate_root(root)
25
+ validate_dataset(root, subtract_blank)
26
26
 
27
27
  self.root = root
28
28
  self.annotation_path = os.path.join(root, "annotations", "annotations.csv")
29
29
  self.metadata_path = os.path.join(root, "annotations", "metadata.json")
30
- self.blank_image_path = os.path.join(root, "blank_images", "blank.png")
30
+ if subtract_blank:
31
+ self.blank_image_path = os.path.join(root, "blank_images", "blank.png")
31
32
  self.add_coordinate_embeddings = add_coordinate_embeddings
32
33
  self.subtract_blank = subtract_blank
33
34
 
@@ -0,0 +1,560 @@
1
+ import os
2
+ import cv2
3
+ import math
4
+ import json
5
+ import numpy as np
6
+ import pandas as pd
7
+ from typing import Union
8
+ from pathlib import Path
9
+ from matplotlib import pyplot as plt
10
+ from matplotlib.patches import Circle
11
+ from .validate_parameters import validate_root
12
+
13
+ def annotate(dataset_path: Union[str, Path], probe_radius_mm: Union[int, float], img_idxs=None):
14
+ """
15
+ Tool to annotate custom dataset with pixel-to-millimeter calibration.
16
+ Creates an annotated_data.csv file required for training.
17
+
18
+ Controls:
19
+ - w/s: Move circle up/down
20
+ - a/d: Move circle left/right
21
+ - r/f: Increase/decrease circle size or pixel/mm ratio
22
+ - q: Proceed to next step
23
+
24
+ Args:
25
+ dataset_path (str or pathlib.Path): Path to the dataset directory.
26
+ probe_radius_mm (int or float): Radius of the probe used to collect data (in mm).
27
+ img_idxs (tuple or list, optional): The two image indices to use for circle fitting. Default: None (auto-selects images at 25th and 75th percentile columns of middle row).
28
+
29
+ Returns:
30
+ Saves annotated_data.csv in the dataset_path/annotations directory.
31
+ """
32
+ validate_root(dataset_path, must_exist=True)
33
+ _validate_probe_radius(probe_radius_mm)
34
+ _validate_indices(img_idxs, dataset_path, target_length=2)
35
+
36
+ # Open probe data
37
+ probe_data_path = os.path.join(dataset_path, "annotations", "probe_data.csv")
38
+ probe_data = pd.read_csv(probe_data_path)
39
+
40
+ # Get middle row
41
+ middle_row = probe_data.loc[probe_data["y_mm"] == probe_data["y_mm"].median()]
42
+
43
+ # Automatically select 2 images if indices not provided
44
+ if img_idxs is None:
45
+ # Make sure there are multiple coordinates in the middle row
46
+ unique_x_coords = middle_row.drop_duplicates(subset='y_mm')
47
+
48
+ if len(unique_x_coords) >= 2:
49
+ # Get the indices of the 25th percentile and 75th percentile x-values in the middle row
50
+ idx1 = middle_row.loc[middle_row["x_mm"] == middle_row["x_mm"].quantile(0.25)].index[0]
51
+ idx2 = middle_row.loc[middle_row["x_mm"] == middle_row["x_mm"].quantile(0.75)].index[0]
52
+
53
+ else:
54
+ # Get unique coordinates
55
+ unique_coords = probe_data.drop_duplicates(subset=['x_mm', 'y_mm'])
56
+
57
+ # Sort unique coordinates by x_mm and y_mm
58
+ sorted_unique_data = unique_coords.sort_values(by=['y_mm', 'x_mm'])
59
+
60
+ # Get the 25th and 75th percentile indices
61
+ idx1 = sorted_unique_data.index[math.floor(len(sorted_unique_data) * 0.25)]
62
+ idx2 = sorted_unique_data.index[math.floor(len(sorted_unique_data) * 0.75)]
63
+ else:
64
+ idx1 = img_idxs[0]
65
+ idx2 = img_idxs[1]
66
+
67
+ if probe_data["x_mm"][idx1] == probe_data["x_mm"][idx2] and probe_data["y_mm"][idx1] == probe_data["y_mm"][idx2]:
68
+ raise ValueError("Selected images must have different x- and y-coordinates for annotation.")
69
+
70
+ # Get the image names and probe coordinates
71
+ image1_name = os.path.join(dataset_path, "probe_images", probe_data["img_name"][idx1])
72
+ img1_x_mm = probe_data["x_mm"][idx1]
73
+ img1_y_mm = probe_data["y_mm"][idx1]
74
+
75
+ image2_name = os.path.join(dataset_path, "probe_images", probe_data["img_name"][idx2])
76
+ img2_x_mm = probe_data["x_mm"][idx2]
77
+ img2_y_mm = probe_data["y_mm"][idx2]
78
+
79
+ # Blank image path
80
+ blank_image_path = os.path.join(dataset_path, "blank_images", "blank.png")
81
+
82
+ # Fit 2 circles
83
+ circle1_x, circle1_y, circle1_r = _fit_circle(image1_name, blank_image_path)
84
+ circle2_x, circle2_y, circle2_r = _fit_circle(image2_name, blank_image_path)
85
+
86
+ # Compute pixels/mm
87
+ d_mm = np.sqrt((img2_x_mm - img1_x_mm) ** 2 + (img2_y_mm - img1_y_mm) ** 2)
88
+ px_per_mm = np.sqrt((circle2_x - circle1_x) ** 2 + (circle2_y - circle1_y) ** 2) / d_mm
89
+
90
+ # Fine tune the fitting
91
+ px_per_mm, annotations = _adjust_fitting(dataset_path, anchor_idx=idx1, px_per_mm=px_per_mm, anchor_data=(circle1_x, circle1_y, circle1_r))
92
+
93
+ print("pixels per mm:", px_per_mm)
94
+
95
+ # Save metadata file
96
+ metadata_path = os.path.join(dataset_path, "annotations", 'metadata.json')
97
+ data = {"px_per_mm": px_per_mm, "probe_radius_mm": probe_radius_mm}
98
+ with open(metadata_path, "w") as json_file:
99
+ json.dump(data, json_file, indent=4)
100
+
101
+ # Create CSV file with annotated data
102
+ annotations_path = os.path.join(dataset_path, "annotations", "annotations.csv")
103
+ annotations.to_csv(annotations_path, index=False)
104
+
105
+ def _fit_circle(image_path: Union[str, Path], blank_image_path: Union[str, Path]):
106
+ """
107
+ Fits a circle to an image.
108
+
109
+ Args:
110
+ image_path: Path to the image.
111
+ blank_image_path: Path to the blank image.
112
+
113
+ Returns:
114
+ x: x-coordinate of the circle.
115
+ y: y-coordinate of the circle.
116
+ r: radius of the circle.
117
+ """
118
+ # Load original image (default view)
119
+ image = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
120
+ blank_image = cv2.cvtColor(cv2.imread(blank_image_path), cv2.COLOR_BGR2RGB)
121
+ bitwise_not_blank = cv2.bitwise_not(blank_image)
122
+
123
+ # Initial circle position and radius
124
+ x = image.shape[1] // 2
125
+ y = image.shape[0] // 2
126
+ r = 30
127
+
128
+ # Flags for image display modes
129
+ subtract_blank = False
130
+ bitwise_not = False
131
+
132
+ # Disable Matplotlib’s conflicting keymaps
133
+ plt.rcParams['keymap.save'] = []
134
+ plt.rcParams['keymap.fullscreen'] = []
135
+
136
+ # Prepare figure with two subplots: text (left), image (right)
137
+ fig, (ax_text, ax_img) = plt.subplots(1, 2, figsize=(14, 8), gridspec_kw={'width_ratios': [1, 3]})
138
+ plt.subplots_adjust(wspace=0.4, bottom=0, top=1, left=0, right=1)
139
+
140
+ fig.canvas.manager.set_window_title('Fit Circle to Generated Annotations')
141
+
142
+ # Right: Image panel
143
+ img_artist = ax_img.imshow(image)
144
+ ax_img.set_axis_off()
145
+ circle_artist = plt.Circle((x, y), r, color='red', fill=False, linewidth=1)
146
+ ax_img.add_patch(circle_artist)
147
+ center_artist, = ax_img.plot(x, y, marker='*', color='lime', markersize=6)
148
+
149
+ # Left: Instruction panel
150
+ ax_text.set_axis_off()
151
+ ax_text.text(
152
+ 0.30, 0.75,
153
+ "Commands:\n\nw: Up\ns: Down\na: Left\nd: Right\nr: Bigger\nf: Smaller\nq: Next\n\n\n1: View 1 (RGB image)\n2: View 2 (Difference image)\n3: View 3 (Bitwise not image)",
154
+ fontsize=20, color='black', va='top', ha='left', wrap=True
155
+ )
156
+
157
+ plt.ion()
158
+ plt.show(block=False)
159
+
160
+ done = False
161
+
162
+ def on_key(event):
163
+ nonlocal x, y, r, done, subtract_blank, bitwise_not, image, blank_image, bitwise_not_blank
164
+
165
+ if event.key == 'q':
166
+ done = True
167
+ elif event.key in ('w', 'up'):
168
+ y -= 1
169
+ elif event.key in ('s', 'down'):
170
+ y += 1
171
+ elif event.key in ('a', 'left'):
172
+ x -= 1
173
+ elif event.key in ('d', 'right'):
174
+ x += 1
175
+ elif event.key == 'r':
176
+ r += 1
177
+ elif event.key == 'f':
178
+ r -= 1
179
+ elif event.key == '1': # Normal image
180
+ subtract_blank = False
181
+ bitwise_not = False
182
+ img_artist.set_data(image)
183
+
184
+ elif event.key == '2': # Difference image
185
+ subtract_blank = not subtract_blank
186
+ bitwise_not = False
187
+
188
+ if subtract_blank:
189
+ diff_image = cv2.absdiff(image, blank_image)
190
+ img_artist.set_data(diff_image)
191
+ else:
192
+ img_artist.set_data(image)
193
+
194
+ elif event.key == '3': # Bitwise not image
195
+ bitwise_not = not bitwise_not
196
+ subtract_blank = False
197
+
198
+ if bitwise_not:
199
+ bitwise_not_image = cv2.addWeighted(image, 0.5, bitwise_not_blank, 0.5, 0.0)
200
+ img_artist.set_data(bitwise_not_image)
201
+ else:
202
+ img_artist.set_data(image)
203
+
204
+ fig.canvas.mpl_connect('key_press_event', on_key)
205
+
206
+ # Interactive update loop
207
+ while not done:
208
+ circle_artist.center = (x, y)
209
+ circle_artist.set_radius(r)
210
+ center_artist.set_data([x], [y])
211
+ fig.canvas.draw_idle()
212
+ plt.pause(0.01)
213
+
214
+ plt.close(fig)
215
+ plt.ioff() # Turn off interactive mode
216
+ fig.canvas.flush_events() # Flush any pending events
217
+
218
+ return x, y, r
219
+
220
+ def _adjust_fitting(dataset_path: Union[str, Path], anchor_idx, px_per_mm, anchor_data):
221
+ """
222
+ Scales the pixel-to-millimeter calibration using an interactive Matplotlib GUI.
223
+ Args:
224
+ dataset_path: Path to the dataset.
225
+ csv_path: Path to the CSV file.
226
+ initial_val: Initial pixel/mm ratio.
227
+ anchor_idx: Index of the anchor image.
228
+ circle_vals: Values of the anchor circle (x, y, r).
229
+ Returns:
230
+ px_per_mm: Pixel/millimeter ratio.
231
+ calibration_data: Updated calibration dataframe.
232
+ """
233
+
234
+ # Load calibration data
235
+ calibration_data_path = os.path.join(dataset_path, "annotations", "probe_data.csv")
236
+ calibration_data = pd.read_csv(calibration_data_path)
237
+
238
+ # Load anchor image
239
+ anchor_image_path = os.path.join(dataset_path, "probe_images", calibration_data["img_name"][anchor_idx])
240
+ anchor_image = cv2.cvtColor(cv2.imread(anchor_image_path), cv2.COLOR_BGR2RGB)
241
+ anchor_x_mm = calibration_data["x_mm"][anchor_idx]
242
+ anchor_y_mm = calibration_data["y_mm"][anchor_idx]
243
+ anchor_x_px, anchor_y_px, anchor_r_px = anchor_data
244
+ height, width, _ = anchor_image.shape
245
+
246
+ # Add initial annotations (pixel coordinates)
247
+ calibration_data['x_px'] = anchor_x_px + (calibration_data['x_mm'] - anchor_x_mm) * px_per_mm
248
+ calibration_data['y_px'] = anchor_y_px + (anchor_y_mm - calibration_data['y_mm']) * px_per_mm
249
+
250
+ # Load blank image
251
+ blank_image_path = os.path.join(dataset_path, "blank_images", "blank.png")
252
+ blank_image = cv2.cvtColor(cv2.imread(blank_image_path), cv2.COLOR_BGR2RGB)
253
+
254
+ # Generate blank mosaic
255
+ blank_mosaic = np.zeros((height * 3, width * 3, 3), dtype=np.uint8)
256
+
257
+ for row in range(3):
258
+ for col in range(3):
259
+ blank_mosaic[(row * height):((row + 1) * height),
260
+ (col * width):((col + 1) * width), :] = blank_image
261
+
262
+ # Create bitwise not mosaic
263
+ bitwise_not_blank = cv2.bitwise_not(blank_mosaic)
264
+
265
+ # Generate 3×3 mosaic
266
+ image_list = [anchor_idx]
267
+ mosaic = np.zeros((height * 3, width * 3, 3), dtype=np.uint8)
268
+ mosaic[:height, :width, :] = anchor_image
269
+
270
+ idx = 1
271
+ while len(image_list) < 9:
272
+ random_row = calibration_data.sample(n=1)
273
+
274
+ # Make sure circles are within the camera's FOV
275
+ if random_row["x_px"].values[0] > width * 0.15 and random_row["x_px"].values[0] < width * 0.85 and random_row["y_px"].values[0] > height * 0.15 and random_row["y_px"].values[0] < height * 0.85:
276
+ image_path = os.path.join(dataset_path, "probe_images", random_row["img_name"].values[0])
277
+ image = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
278
+
279
+ image_list.append(random_row.index[0])
280
+
281
+ row = math.floor(idx / 3)
282
+ col = idx % 3
283
+
284
+ mosaic[(height * row):(height * (row + 1)),
285
+ (width * col):(width * (col + 1)), :] = image
286
+ idx += 1
287
+
288
+ # Flags for image display modes
289
+ subtract_blank = False
290
+ bitwise_not = False
291
+
292
+ # Initialize Matplotlib figure
293
+ plt.rcParams['keymap.save'] = []
294
+ plt.rcParams['keymap.fullscreen'] = []
295
+
296
+ fig, (ax_text, ax_img) = plt.subplots(1, 2, figsize=(14, 8), gridspec_kw={'width_ratios': [1, 3]})
297
+ plt.subplots_adjust(wspace=0.4, bottom=0, top=1, left=0, right=1)
298
+ fig.canvas.manager.set_window_title('Validate and Refine Calibration Annotations')
299
+
300
+ # Right panel: image grid
301
+ img_artist = ax_img.imshow(mosaic)
302
+ ax_img.set_axis_off()
303
+
304
+ ax_img.text(
305
+ width * 0.19,
306
+ height * 0.1,
307
+ "Anchor Image",
308
+ color='yellow',
309
+ fontsize=13,
310
+ bbox=dict(facecolor='black', alpha=0.1, boxstyle='round,pad=0.3')
311
+ )
312
+
313
+
314
+ # Overlay circles
315
+ circle_artists = []
316
+ for i in range(9):
317
+ row = math.floor(i / 3)
318
+ col = i % 3
319
+ idx = image_list[i]
320
+ x = int(calibration_data.loc[idx, 'x_px']) + col * width
321
+ y = int(calibration_data.loc[idx, 'y_px']) + row * height
322
+ circ = Circle((x, y), anchor_r_px, color='red', fill=False, lw=1)
323
+ ax_img.add_patch(circ)
324
+ circle_artists.append(circ)
325
+
326
+ # Left panel: instructions
327
+ ax_text.set_axis_off()
328
+ ax_text.text(
329
+ 0.30, 0.75,
330
+ f"Commands:\n\nw: Up\ns: Down\na: Left\nd: Right\nr: Increase pixel/mm value\nf: Decrease pixel/mm value\nq: Quit\n\n\n1: View 1 (RGB image)\n2: View 2 (Difference image)\n3: View 3 (Bitwise not image)",
331
+ fontsize=20, color='black', va='top', ha='left', wrap=True
332
+ )
333
+
334
+ plt.ion()
335
+ plt.show(block=False)
336
+
337
+ done = False
338
+
339
+ # Keyboard event handler
340
+ def on_key(event):
341
+ nonlocal anchor_x_px, anchor_y_px, anchor_r_px, px_per_mm, done, subtract_blank, bitwise_not, mosaic, blank_mosaic, bitwise_not_blank
342
+
343
+ if event.key == 'q':
344
+ done = True
345
+ elif event.key in ('w', 'up'):
346
+ anchor_y_px -= 1
347
+ elif event.key in ('s', 'down'):
348
+ anchor_y_px += 1
349
+ elif event.key in ('a', 'left'):
350
+ anchor_x_px -= 1
351
+ elif event.key in ('d', 'right'):
352
+ anchor_x_px += 1
353
+ elif event.key == 'r':
354
+ px_per_mm += 1
355
+ elif event.key == 'f':
356
+ px_per_mm -= 1
357
+ elif event.key == '1':
358
+ subtract_blank = False
359
+ bitwise_not = False
360
+ img_artist.set_data(mosaic)
361
+ elif event.key == '2':
362
+ subtract_blank = not subtract_blank
363
+ bitwise_not = False
364
+
365
+ if subtract_blank:
366
+ diff_mosaic = cv2.absdiff(mosaic, blank_mosaic)
367
+ img_artist.set_data(diff_mosaic)
368
+ else:
369
+ img_artist.set_data(mosaic)
370
+
371
+ elif event.key == '3':
372
+ bitwise_not = not bitwise_not
373
+ subtract_blank = False
374
+
375
+ if bitwise_not:
376
+ bitwise_not_mosaic = cv2.addWeighted(mosaic, 0.5, bitwise_not_blank, 0.5, 0.0)
377
+ img_artist.set_data(bitwise_not_mosaic)
378
+ else:
379
+ img_artist.set_data(mosaic)
380
+
381
+ # Recompute coordinates
382
+ calibration_data['x_px'] = anchor_x_px + (calibration_data['x_mm'] - anchor_x_mm) * px_per_mm
383
+ calibration_data['y_px'] = anchor_y_px + (anchor_y_mm - calibration_data['y_mm']) * px_per_mm
384
+
385
+ for i in range(9):
386
+ row = math.floor(i / 3)
387
+ col = i % 3
388
+ idx = image_list[i]
389
+ x = int(calibration_data.loc[idx, 'x_px']) + col * width
390
+ y = int(calibration_data.loc[idx, 'y_px']) + row * height
391
+ circle_artists[i].center = (x, y)
392
+
393
+ fig.canvas.mpl_connect('key_press_event', on_key)
394
+
395
+ # Main interactive loop
396
+ while not done:
397
+ fig.canvas.draw_idle()
398
+ plt.pause(0.01)
399
+
400
+ plt.close(fig)
401
+
402
+ return px_per_mm, calibration_data
403
+
404
+ def visualize_annotations(dataset_path: Union[str, Path], img_idxs=None, save_path=None):
405
+ """
406
+ Visualizes precomputed circles on images from the annotated data.
407
+
408
+ Args:
409
+ dataset_path (str or pathlib.Path): Path to the dataset directory.
410
+ img_idxs (tuple or list, optional): Image indices to visualize. By default, shows 3 random images.
411
+ save_path (str): Optional path to save the visualization
412
+
413
+ Returns:
414
+ None: Displays the image(s) with circles overlaid
415
+ """
416
+ validate_root(dataset_path, must_exist=True)
417
+ _validate_indices(img_idxs, dataset_path)
418
+
419
+ # Get paths
420
+ annotation_path = os.path.join(dataset_path, "annotations", "annotations.csv")
421
+ metadata_path = os.path.join(dataset_path, "annotations", "metadata.json")
422
+ probe_images_path = os.path.join(dataset_path, "probe_images")
423
+
424
+ # Load the annotated data
425
+ data = pd.read_csv(annotation_path)
426
+
427
+ # Get circle radius from metadata
428
+ with open(metadata_path, "r") as json_file:
429
+ metadata = json.load(json_file)
430
+
431
+ probe_radius_mm = metadata["probe_radius_mm"]
432
+ px_per_mm = metadata["px_per_mm"]
433
+ radius = probe_radius_mm * px_per_mm
434
+
435
+ # If no indices provided, select 3 random images
436
+ if img_idxs is None:
437
+ # Get 3 random indices from data
438
+ img_idxs = data.sample(n=3).index.tolist()
439
+
440
+ # Create subplot layout
441
+ n_images = len(img_idxs)
442
+ if n_images == 1:
443
+ fig, ax = plt.subplots(1, 1, figsize=(8, 8))
444
+ axes = [ax]
445
+ else:
446
+ cols = min(3, n_images)
447
+ rows = (n_images + cols - 1) // cols
448
+ fig, axes = plt.subplots(rows, cols, figsize=(6*cols, 6*rows))
449
+ if rows == 1:
450
+ axes = axes if n_images > 1 else [axes]
451
+ else:
452
+ axes = axes.flatten()
453
+
454
+ for i, idx in enumerate(img_idxs):
455
+ # Get image info
456
+ img_name = data.iloc[idx]['img_name']
457
+ x_px = data.iloc[idx]['x_px']
458
+ y_px = data.iloc[idx]['y_px']
459
+ x_mm = data.iloc[idx]['x_mm']
460
+ y_mm = data.iloc[idx]['y_mm']
461
+ depth_mm = data.iloc[idx]['penetration_depth_mm']
462
+
463
+ # Load image
464
+ img_path = os.path.join(probe_images_path, img_name)
465
+ if not os.path.exists(img_path):
466
+ print(f"Image not found: {img_path}")
467
+ continue
468
+
469
+ # Read image using OpenCV and convert to RGB
470
+ img = cv2.imread(img_path)
471
+ img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
472
+
473
+ # Draw circle on image
474
+ img_with_circle = img_rgb.copy()
475
+ # Keep your float coordinates
476
+ center_x = float(x_px)
477
+ center_y = float(y_px)
478
+ print("center", center_x, center_y)
479
+ # Draw circle using Matplotlib (subpixel precision preserved)
480
+ circle = Circle((center_x, center_y), radius, color='red', fill=False, linewidth=2)
481
+ axes[i].imshow(img_rgb)
482
+ axes[i].add_patch(circle)
483
+
484
+ # Draw the exact center as a green dot
485
+ axes[i].plot(center_x, center_y, '*', color='lime', markersize=6)
486
+
487
+ # Display in subplot
488
+ axes[i].imshow(img_with_circle)
489
+ axes[i].set_title(f'{img_name}\n'
490
+ f'Position (mm): ({x_mm}, {y_mm})\n'
491
+ f'Pixels (px): ({float(x_px):.1f}, {float(y_px):.1f})')
492
+ axes[i].axis('off')
493
+
494
+ # Hide unused subplots
495
+ for i in range(len(img_idxs), len(axes)):
496
+ axes[i].axis('off')
497
+
498
+
499
+ fig.set_size_inches(14, 9)
500
+ plt.tight_layout()
501
+
502
+ if save_path:
503
+ plt.savefig(save_path, dpi=150, bbox_inches='tight')
504
+ print(f"Visualization saved to: {save_path}")
505
+
506
+ plt.show()
507
+
508
+ def _validate_probe_radius(probe_radius_mm):
509
+ """
510
+ Validates the probe radius specified by the user.
511
+
512
+ Args:
513
+ probe_radius_mm: Probe radius specified by the user.
514
+ Returns:
515
+ None.
516
+ Raises:
517
+ ValueError: If the probe radius is not specified or invalid.
518
+ """
519
+ if probe_radius_mm is None:
520
+ raise ValueError(
521
+ "Probe radius cannot be None.\n"
522
+ )
523
+ if not isinstance(probe_radius_mm, (int, float)) or probe_radius_mm <= 0:
524
+ raise ValueError(
525
+ "Probe radius must be a positive number (int or float).\n"
526
+ )
527
+
528
+ def _validate_indices(idxs, dataset_path, target_length=None):
529
+ """
530
+ Validates the image indices specified by the user.
531
+
532
+ Args:
533
+ idxs: Tuple of indices specified by the user.
534
+ Returns:
535
+ None.
536
+ Raises:
537
+ ValueError: If the indices are not specified or invalid.
538
+ """
539
+ if idxs is not None:
540
+ # Check if data type is correct
541
+ if target_length is not None:
542
+ if not (isinstance(idxs, (tuple, list)) and len(idxs) == target_length and all(isinstance(i, int) for i in idxs)):
543
+ raise ValueError(
544
+ f"Image indices must be a tuple or list of {target_length} integers.\n"
545
+ )
546
+
547
+ else:
548
+ if not (isinstance(idxs, (tuple, list)) and all(isinstance(i, int) for i in idxs)):
549
+ raise ValueError(
550
+ "Image indices must be a tuple or list of integers.\n"
551
+ )
552
+
553
+ # Check if indices are within range
554
+ annotation_path = os.path.join(dataset_path, "annotations", "annotations.csv")
555
+ data = pd.read_csv(annotation_path)
556
+ max_index = len(data) - 1
557
+
558
+ for idx in idxs:
559
+ if idx < 0 or idx > max_index:
560
+ raise ValueError(f"Image index {idx} is out of range. Valid range is 0 to {max_index}.")
@@ -1,6 +1,7 @@
1
1
  from pyexpat import model
2
2
  import numpy as np
3
3
  import torch
4
+ from torch import nn
4
5
  from pathlib import Path
5
6
  from typing import Union
6
7
  from PIL import Image
@@ -10,7 +11,7 @@ from .validate_parameters import validate_device
10
11
  from .add_coordinate_embeddings import add_coordinate_embeddings
11
12
  from .fast_poisson import fast_poisson
12
13
 
13
- def get_depthmap(model, image_path: Union[str, Path], blank_image_path: Union[str, Path], device='cpu') -> np.ndarray:
14
+ def get_depthmap(model: nn.Module, image_path: Union[str, Path], blank_image_path: Union[str, Path], device='cpu') -> np.ndarray:
14
15
  """
15
16
  Returns the depthmap for a given input image.
16
17
  Args:
@@ -18,6 +19,7 @@ def get_depthmap(model, image_path: Union[str, Path], blank_image_path: Union[st
18
19
  image_path (str or pathlib.Path): Path to the input image.
19
20
  blank_image_path (str or pathlib.Path): Path to the blank image.
20
21
  device (str, optional): Device to run the model on. Defaults to 'cpu'.
22
+
21
23
  Returns:
22
24
  depthmap (numpy.ndarray): The computed depthmap.
23
25
  """
@@ -44,12 +46,15 @@ def get_depthmap(model, image_path: Union[str, Path], blank_image_path: Union[st
44
46
 
45
47
  return depthmap
46
48
 
47
- def save_2d_depthmap(model, image_path: Union[str, Path], blank_image_path: Union[str, Path], device='cpu', save_path: Union[str, Path] = Path("depthmap.png")):
49
+ def save_2d_depthmap(model: nn.Module, image_path: Union[str, Path], blank_image_path: Union[str, Path], device='cpu', save_path: Union[str, Path] = Path("depthmap.png")):
48
50
  """
49
51
  Save an image of the depthmap for a given input image.
50
52
  Args:
53
+ model (nn.Module): A model which takes in an image and outputs gradient maps.
51
54
  image_path (str): Path to the input image.
52
55
  save_path (str or pathlib.Path): Path to save the depthmap image.
56
+ blank_image_path (str): Path to the blank image.
57
+ device (str, optional): Device to run the model on. Defaults to 'cpu'.
53
58
 
54
59
  Returns:
55
60
  None.
@@ -58,12 +63,15 @@ def save_2d_depthmap(model, image_path: Union[str, Path], blank_image_path: Unio
58
63
 
59
64
  plt.imsave(save_path, depthmap, cmap='viridis')
60
65
 
61
- def show_2d_depthmap(model, image_path: Union[str, Path], blank_image_path: Union[str, Path], device='cpu'):
66
+ def show_2d_depthmap(model: nn.Module, image_path: Union[str, Path], blank_image_path: Union[str, Path], device='cpu'):
62
67
  """
63
68
  Show the depthmap for a given input image.
64
69
 
65
70
  Args:
71
+ model (nn.Module): A model which takes in an image and outputs gradient maps.
66
72
  image_path (str): Path to the input image.
73
+ blank_image_path (str): Path to the blank image.
74
+ device (str, optional): Device to run the model on. Defaults to 'cpu'.
67
75
 
68
76
  Returns:
69
77
  None.
@@ -2,25 +2,25 @@ import numpy as np
2
2
  from scipy.fftpack import dst
3
3
  from scipy.fftpack import idst
4
4
 
5
- def fast_poisson(Fx, Fy):
5
+ def fast_poisson(Gx, Gy):
6
6
  """
7
7
  Fast Poisson solver for 2D images.
8
8
  Args:
9
- Fx: 2D array of x-derivatives
10
- Fy: 2D array of y-derivatives
9
+ Gx (np.ndarray): 2D array of x-derivatives
10
+ Gy (np.ndarray): 2D array of y-derivatives
11
11
  Returns:
12
- img: 2D array of the solution to the Poisson equation
12
+ depthmap (np.ndarray): 2D array of the solution to the Poisson equation
13
13
  """
14
14
 
15
- height, width = Fx.shape
15
+ height, width = Gx.shape
16
16
 
17
- # Compute the difference of the Fx array in the x-direction to approximate the second derivative in the x-direction (only for interior)
18
- Fxx = Fx[1:-1,1:-1] - Fx[1:-1,:-2]
19
- # Compute the difference of the Fy array in the y-direction to approximate the second derivative in the y-direction (only for interior)
20
- Fyy = Fy[1:-1,1:-1] - Fy[:-2,1:-1]
17
+ # Compute the difference of the Gx array in the x-direction to approximate the second derivative in the x-direction (only for interior)
18
+ Gxx = Gx[1:-1,1:-1] - Gx[1:-1,:-2]
19
+ # Compute the difference of the Gy array in the y-direction to approximate the second derivative in the y-direction (only for interior)
20
+ Gyy = Gy[1:-1,1:-1] - Gy[:-2,1:-1]
21
21
 
22
22
  # Combine the two second derivatives to form the source term for the Poisson equation, g
23
- g = Fxx + Fyy
23
+ g = Gxx + Gyy
24
24
 
25
25
  # Apply the Discrete Sine Transform (DST) to the 2D array g (row-wise transform)
26
26
  g_sinx = dst(g, norm='ortho')
@@ -46,6 +46,6 @@ def fast_poisson(Fx, Fy):
46
46
  # Note: The norm='ortho' option in the DST and IDST ensures that the transforms are orthonormal, maintaining energy conservation in the transforms
47
47
 
48
48
  # Pad the result (which is only for the interior) with 0's at the border because we are assuming fixed boundary conditions
49
- img = np.pad(g_xy, pad_width=1, mode='constant')
49
+ depthmap = np.pad(g_xy, pad_width=1, mode='constant')
50
50
 
51
- return img
51
+ return depthmap
@@ -0,0 +1,87 @@
1
+ import os
2
+ import torch
3
+ from typing import Union
4
+ from pathlib import Path
5
+
6
+ def validate_device(device: str):
7
+ """
8
+ Validates the device by converting it to a torch.device object.
9
+ Args:
10
+ device (str): Device to run the model on.
11
+ Returns:
12
+ None.
13
+ Raises:
14
+ ValueError: If the device is not specified or invalid.
15
+ """
16
+ try:
17
+ device = torch.device(device)
18
+ except Exception as e:
19
+ raise ValueError(
20
+ f"Invalid device '{device}'. Valid options include:\n"
21
+ " - 'cpu': CPU processing\n"
22
+ " - 'cuda' or 'cuda:0': NVIDIA GPU\n"
23
+ " - 'mps': Apple Silicon GPU\n"
24
+ "See: https://pytorch.org/docs/stable/tensor_attributes.html#torch.device"
25
+ ) from e
26
+
27
+ def validate_root(root, must_exist=False):
28
+ """
29
+ Validates the root path specified by the user.
30
+
31
+ Args:
32
+ root: root path specified by the user.
33
+ Returns:
34
+ None.
35
+ Raises:
36
+ ValueError: If the root is not specified or invalid.
37
+ """
38
+ if root is None :
39
+ raise ValueError(
40
+ "root directory cannot be None.\n"
41
+ )
42
+
43
+ if not isinstance(root, (str, Path)):
44
+ raise ValueError(
45
+ "root directory must be a valid file system path as a string or pathlib.Path object\n"
46
+ )
47
+
48
+ if must_exist and not os.path.exists(root):
49
+ raise ValueError(
50
+ f"root directory '{root}' does not exist.\n"
51
+ )
52
+
53
+ def validate_dataset(root, subtract_blank: bool):
54
+ """
55
+ Validates the dataset path specified by the user.
56
+
57
+ Args:
58
+ root: Dataset path specified by the user.
59
+ subtract_blank (bool): Whether to subtract blank image.
60
+ Returns:
61
+ None.
62
+ Raises:
63
+ FileNotFoundError: If necessary files do not exist.
64
+ """
65
+ validate_root(root)
66
+
67
+ annotation_path = os.path.join(root, "annotations", "annotations.csv")
68
+ metadata_path = os.path.join(root, "annotations", "metadata.json")
69
+ probe_images_path = os.path.join(root, "probe_images")
70
+ blank_image_path = os.path.join(root, "blank_images", "blank.png")
71
+
72
+ # Check if root directory exists
73
+ if not os.path.exists(root):
74
+ raise FileNotFoundError(f"Dataset root directory '{root}' does not exist.")
75
+
76
+ # Check if all the necessary files exist
77
+ if not os.path.exists(annotation_path):
78
+ raise FileNotFoundError(f"annotations.csv file not found in annotations/ directory. Use py3DCal.annotations() function to create it.")
79
+
80
+ if not os.path.exists(metadata_path):
81
+ raise FileNotFoundError(f"metadata.json file not found in annotations/ directory. Use py3DCal.annotations() function to create it.")
82
+
83
+ if not os.path.exists(probe_images_path):
84
+ raise FileNotFoundError(f"probe_images/ directory not found in dataset root.")
85
+
86
+ if subtract_blank and not os.path.exists(blank_image_path):
87
+ raise FileNotFoundError(f"blank.png file not found in blank_images/ directory.")
@@ -111,7 +111,7 @@ class TouchNet(nn.Module):
111
111
  if not load_pretrained and sensor_type is not None:
112
112
  print("Warning: sensor_type parameter is ignored when load_pretrained is False.")
113
113
 
114
- if not load_pretrained and root is not ".":
114
+ if not load_pretrained and root != ".":
115
115
  print("Warning: root parameter is ignored when load_pretrained is False.")
116
116
 
117
117
  def _load_pretrained_model(self, root, sensor_type):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: py3dcal
3
- Version: 1.0.0
3
+ Version: 1.0.7
4
4
  Summary: UNKNOWN
5
5
  Home-page: https://github.com/rohankotanu/py3DCal
6
6
  Author: Rohan Kota
@@ -12,5 +12,16 @@ Classifier: License :: OSI Approved :: MIT License
12
12
  Description-Content-Type: text/markdown
13
13
  License-File: LICENSE
14
14
 
15
- # For instructions on how to use this library, please visit https://rohankotanu.github.io/3DCal/
15
+ # Installing the package
16
+ ```python
17
+ pip3 install py3dcal
18
+ ```
19
+
20
+ <br>
21
+
22
+ # For instructions on how to use this package, please visit https://rohankotanu.github.io/3DCal/
23
+
24
+ <br>
25
+
26
+ **Note:** Project was tested with Python 3.10.4
16
27
 
@@ -3,26 +3,20 @@ MANIFEST.in
3
3
  README.md
4
4
  setup.py
5
5
  py3DCal/__init__.py
6
- py3DCal.egg-info/PKG-INFO
7
- py3DCal.egg-info/SOURCES.txt
8
- py3DCal.egg-info/dependency_links.txt
9
- py3DCal.egg-info/entry_points.txt
10
- py3DCal.egg-info/requires.txt
11
- py3DCal.egg-info/top_level.txt
12
6
  py3DCal/data_collection/Calibrator.py
13
7
  py3DCal/data_collection/__init__.py
14
- py3DCal/data_collection/Printers/Printer.py
15
- py3DCal/data_collection/Printers/__init__.py
16
- py3DCal/data_collection/Printers/Ender3/Ender3.py
17
- py3DCal/data_collection/Printers/Ender3/__init__.py
18
- py3DCal/data_collection/Sensors/Sensor.py
19
- py3DCal/data_collection/Sensors/__init__.py
20
- py3DCal/data_collection/Sensors/DIGIT/DIGIT.py
21
- py3DCal/data_collection/Sensors/DIGIT/__init__.py
22
- py3DCal/data_collection/Sensors/DIGIT/default.csv
23
- py3DCal/data_collection/Sensors/GelsightMini/GelsightMini.py
24
- py3DCal/data_collection/Sensors/GelsightMini/__init__.py
25
- py3DCal/data_collection/Sensors/GelsightMini/default.csv
8
+ py3DCal/data_collection/printers/Printer.py
9
+ py3DCal/data_collection/printers/__init__.py
10
+ py3DCal/data_collection/printers/Ender3/Ender3.py
11
+ py3DCal/data_collection/printers/Ender3/__init__.py
12
+ py3DCal/data_collection/sensors/Sensor.py
13
+ py3DCal/data_collection/sensors/__init__.py
14
+ py3DCal/data_collection/sensors/DIGIT/DIGIT.py
15
+ py3DCal/data_collection/sensors/DIGIT/__init__.py
16
+ py3DCal/data_collection/sensors/DIGIT/default.csv
17
+ py3DCal/data_collection/sensors/GelsightMini/GelsightMini.py
18
+ py3DCal/data_collection/sensors/GelsightMini/__init__.py
19
+ py3DCal/data_collection/sensors/GelsightMini/default.csv
26
20
  py3DCal/model_training/__init__.py
27
21
  py3DCal/model_training/datasets/DIGIT_dataset.py
28
22
  py3DCal/model_training/datasets/GelSightMini_dataset.py
@@ -31,6 +25,7 @@ py3DCal/model_training/datasets/split_dataset.py
31
25
  py3DCal/model_training/datasets/tactile_sensor_dataset.py
32
26
  py3DCal/model_training/lib/__init__.py
33
27
  py3DCal/model_training/lib/add_coordinate_embeddings.py
28
+ py3DCal/model_training/lib/annotate_dataset.py
34
29
  py3DCal/model_training/lib/depthmaps.py
35
30
  py3DCal/model_training/lib/fast_poisson.py
36
31
  py3DCal/model_training/lib/get_gradient_map.py
@@ -5,7 +5,7 @@ with open("README.md", "r") as f:
5
5
 
6
6
  setup(
7
7
  name='py3dcal',
8
- version='1.0.0',
8
+ version='1.0.7',
9
9
  url="https://github.com/rohankotanu/py3DCal",
10
10
  author="Rohan Kota",
11
11
  author_email="rohankota2026@u.northwestern.edu",
py3dcal-1.0.0/README.md DELETED
@@ -1 +0,0 @@
1
- # For instructions on how to use this library, please visit https://rohankotanu.github.io/3DCal/
@@ -1,12 +0,0 @@
1
- from .data_collection.Calibrator import Calibrator
2
- from .data_collection.Printers.Printer import Printer
3
- from .data_collection.Sensors.Sensor import Sensor
4
- from .data_collection.Printers.Ender3.Ender3 import Ender3
5
- from .data_collection.Sensors.DIGIT.DIGIT import DIGIT
6
- from .data_collection.Sensors.GelsightMini.GelsightMini import GelsightMini
7
- from .model_training import datasets, models
8
- from .model_training.datasets.split_dataset import split_dataset
9
- from .model_training.models.touchnet import SensorType
10
- from .model_training.lib.train_model import train_model
11
- from .model_training.lib.depthmaps import get_depthmap, save_2d_depthmap, show_2d_depthmap
12
- from .utils.utils import list_com_ports
@@ -1,45 +0,0 @@
1
- import torch
2
- from typing import Union
3
- from pathlib import Path
4
-
5
- def validate_device(device: str):
6
- """
7
- Validates the device by converting it to a torch.device object.
8
- Args:
9
- device (str): Device to run the model on.
10
- Returns:
11
- None.
12
- Raises:
13
- ValueError: If the device is not specified or invalid.
14
- """
15
- try:
16
- device = torch.device(device)
17
- except Exception as e:
18
- raise ValueError(
19
- f"Invalid device '{device}'. Valid options include:\n"
20
- " - 'cpu': CPU processing\n"
21
- " - 'cuda' or 'cuda:0': NVIDIA GPU\n"
22
- " - 'mps': Apple Silicon GPU\n"
23
- "See: https://pytorch.org/docs/stable/tensor_attributes.html#torch.device"
24
- ) from e
25
-
26
- def validate_root(root):
27
- """
28
- Validates the root path specified by the user.
29
-
30
- Args:
31
- root: root path specified by the user.
32
- Returns:
33
- None.
34
- Raises:
35
- ValueError: If the root is not specified or invalid.
36
- """
37
- if root is None :
38
- raise ValueError(
39
- "root directory cannot be None.\n"
40
- )
41
-
42
- if not isinstance(root, (str, Path)):
43
- raise ValueError(
44
- "root directory must be a valid file system path as a string or pathlib.Path object\n"
45
- )
File without changes
File without changes
File without changes
File without changes