py3dcal 1.0.3__py3-none-any.whl → 1.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. py3DCal/__init__.py +6 -5
  2. py3DCal/data_collection/Calibrator.py +10 -8
  3. py3DCal/data_collection/{Sensors → sensors}/GelsightMini/GelsightMini.py +5 -5
  4. py3DCal/data_collection/{Sensors → sensors}/Sensor.py +10 -1
  5. py3DCal/model_training/datasets/tactile_sensor_dataset.py +4 -3
  6. py3DCal/model_training/lib/annotate_dataset.py +560 -0
  7. py3DCal/model_training/lib/depthmaps.py +11 -3
  8. py3DCal/model_training/lib/validate_parameters.py +44 -2
  9. {py3dcal-1.0.3.dist-info → py3dcal-1.0.6.dist-info}/METADATA +1 -1
  10. py3dcal-1.0.6.dist-info/RECORD +40 -0
  11. py3DCal/model_training/lib/validate_device.py +0 -22
  12. py3DCal/model_training/touchnet/__init__.py +0 -0
  13. py3DCal/model_training/touchnet/dataset.py +0 -78
  14. py3DCal/model_training/touchnet/touchnet.py +0 -736
  15. py3DCal/model_training/touchnet/touchnet_architecture.py +0 -72
  16. py3dcal-1.0.3.dist-info/RECORD +0 -44
  17. /py3DCal/data_collection/{Printers → printers}/Ender3/Ender3.py +0 -0
  18. /py3DCal/data_collection/{Printers → printers}/Ender3/__init__.py +0 -0
  19. /py3DCal/data_collection/{Printers → printers}/Printer.py +0 -0
  20. /py3DCal/data_collection/{Printers → printers}/__init__.py +0 -0
  21. /py3DCal/data_collection/{Sensors → sensors}/DIGIT/DIGIT.py +0 -0
  22. /py3DCal/data_collection/{Sensors → sensors}/DIGIT/__init__.py +0 -0
  23. /py3DCal/data_collection/{Sensors → sensors}/DIGIT/default.csv +0 -0
  24. /py3DCal/data_collection/{Sensors → sensors}/GelsightMini/__init__.py +0 -0
  25. /py3DCal/data_collection/{Sensors → sensors}/GelsightMini/default.csv +0 -0
  26. /py3DCal/data_collection/{Sensors → sensors}/__init__.py +0 -0
  27. {py3dcal-1.0.3.dist-info → py3dcal-1.0.6.dist-info}/LICENSE +0 -0
  28. {py3dcal-1.0.3.dist-info → py3dcal-1.0.6.dist-info}/WHEEL +0 -0
  29. {py3dcal-1.0.3.dist-info → py3dcal-1.0.6.dist-info}/entry_points.txt +0 -0
  30. {py3dcal-1.0.3.dist-info → py3dcal-1.0.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,40 @@
1
+ py3DCal/__init__.py,sha256=roGUqVPYu3kB2TMhvWqGo95GyyJxH-zhlWgnJQF4TvM,836
2
+ py3DCal/data_collection/Calibrator.py,sha256=X9LaLrIPv2maBZR3CwZuWrtKISjsP3GxNEx8UvwVbmc,11586
3
+ py3DCal/data_collection/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ py3DCal/data_collection/printers/Printer.py,sha256=ouqgWuJWk8PPjhTRFwolnupXbE0SzO819LIgw1ug-7s,1628
5
+ py3DCal/data_collection/printers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ py3DCal/data_collection/printers/Ender3/Ender3.py,sha256=pkE9Kt-mMH-fpZC5Gl6YyDPAxOdfZxp4Z7s68N7D_is,2239
7
+ py3DCal/data_collection/printers/Ender3/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
+ py3DCal/data_collection/sensors/Sensor.py,sha256=o4tdkPMU2RF27OcOoxLABE4-K7R7Cf-d7kiT-KEDwAk,1063
9
+ py3DCal/data_collection/sensors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
+ py3DCal/data_collection/sensors/DIGIT/DIGIT.py,sha256=1Jc1qXwinsOFjJ6-QUoUcXOBFjG4M6ylnUjbWMe3NSc,1268
11
+ py3DCal/data_collection/sensors/DIGIT/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
+ py3DCal/data_collection/sensors/DIGIT/default.csv,sha256=OqjTE9b5uAEAxrHyjsxS7UyUWHgjO8yWPN0gVbBj7_Q,26728
13
+ py3DCal/data_collection/sensors/GelsightMini/GelsightMini.py,sha256=nQ8JbiQypLCdTLM4WBNmXB_FFQ_yyxJ9gaV-OoE08Gg,1193
14
+ py3DCal/data_collection/sensors/GelsightMini/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
+ py3DCal/data_collection/sensors/GelsightMini/default.csv,sha256=lavPHcJ6o4VkvMvOk7lcdRCp9dOJxg_VrPNayf9zVvM,26449
16
+ py3DCal/model_training/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
+ py3DCal/model_training/datasets/DIGIT_dataset.py,sha256=IYRqWDawbTTe4IOVjZuKVr0yVNBe1XLGC6PoDxsTMfo,3017
18
+ py3DCal/model_training/datasets/GelSightMini_dataset.py,sha256=H8Fr_4f3HDHLLl6KshRfqt0FP8-3d4n9XRK0xfPcH0k,3070
19
+ py3DCal/model_training/datasets/__init__.py,sha256=vqrB177ZXrBmqDnL472EWleJS6Y-BxYEy2Ao9hWWDHc,137
20
+ py3DCal/model_training/datasets/split_dataset.py,sha256=AzNJlTgcXGa9AdHJnVJYNEyv__OuNHZAMB76Haqc-io,1351
21
+ py3DCal/model_training/datasets/tactile_sensor_dataset.py,sha256=LuSQYNHY5vkOihlJpG1PIs9EtxYtVW5x3ac6wgRTXLQ,3314
22
+ py3DCal/model_training/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
+ py3DCal/model_training/lib/add_coordinate_embeddings.py,sha256=8wit43RIx28IKEB82SnH_Of09FAmiWM3jgOeXpamM1I,1198
24
+ py3DCal/model_training/lib/annotate_dataset.py,sha256=J_CD4sc0vVp9Lq8cH-GWeoI98PwgU7dEV3bRLVAsFbA,21547
25
+ py3DCal/model_training/lib/depthmaps.py,sha256=eW9rs4Ch64SwgwoTc1riauolX-IDxrYXmOJ0qQqQAgE,3183
26
+ py3DCal/model_training/lib/fast_poisson.py,sha256=wJ5MTkSCxkFU3wUx-zomvIYPcAyEpPZj-LX7JQOx8JE,2252
27
+ py3DCal/model_training/lib/get_gradient_map.py,sha256=IbCigrK_-6ZkeOSaHZAIhMu2pFmkSpWAaz1EjUtenCM,1438
28
+ py3DCal/model_training/lib/precompute_gradients.py,sha256=zc1uvishZP7PjBWYF2VSrIMCtEkLrTPtLktOTpCh9P8,1860
29
+ py3DCal/model_training/lib/train_model.py,sha256=fxFIfKWp3WA1Aa2IEczKBJCivVyVovj7IW2HqNw5IlE,4016
30
+ py3DCal/model_training/lib/validate_parameters.py,sha256=My8cbGPSWsVpMB54jydbLYgfefWPMLGvr4oiPyLImb0,3052
31
+ py3DCal/model_training/models/__init__.py,sha256=FTMTEyUglpRIFJ4hi3Z7y1KdScGDVVg_OxYXip4b8wg,42
32
+ py3DCal/model_training/models/touchnet.py,sha256=JIfhwXX0uIk6UcN2tmCLoiH9il9U2IByjbjYIjQEIqc,8079
33
+ py3DCal/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
+ py3DCal/utils/utils.py,sha256=hgTyWZuBXfo9lxLnOLd0445Aw2-uARtKGXuBhZmz-Z0,995
35
+ py3dcal-1.0.6.dist-info/LICENSE,sha256=D95ljbgz6PW9niwHP26EWFN77QBvepSCsMKGp0mRVFM,1066
36
+ py3dcal-1.0.6.dist-info/METADATA,sha256=xwSfMW_oF1trG9HODqx8aokzXveNxek9vN_IuP5JbQM,882
37
+ py3dcal-1.0.6.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
38
+ py3dcal-1.0.6.dist-info/entry_points.txt,sha256=_N1ruxvLEyZmSAaPsCx8kEzbYSJ5bHG5S8bvpua_X5E,59
39
+ py3dcal-1.0.6.dist-info/top_level.txt,sha256=NbatjyXjN_E6UMifZpkx-ohahGQH_ZFvqovwmvU7FMA,8
40
+ py3dcal-1.0.6.dist-info/RECORD,,
@@ -1,22 +0,0 @@
1
- import torch
2
-
3
- def validate_device(device: str):
4
- """
5
- Validates the device by converting it to a torch.device object.
6
- Args:
7
- device (str): Device to run the model on.
8
- Returns:
9
- None.
10
- Raises:
11
- ValueError: If the device is not specified or invalid.
12
- """
13
- try:
14
- device = torch.device(device)
15
- except Exception as e:
16
- raise ValueError(
17
- f"Invalid device '{device}'. Valid options include:\n"
18
- " - 'cpu': CPU processing\n"
19
- " - 'cuda' or 'cuda:0': NVIDIA GPU\n"
20
- " - 'mps': Apple Silicon GPU\n"
21
- "See: https://pytorch.org/docs/stable/tensor_attributes.html#torch.device"
22
- ) from e
File without changes
@@ -1,78 +0,0 @@
1
- import os
2
- import sys
3
- import torch
4
- import numpy as np
5
- import pandas as pd
6
- from PIL import Image
7
- from torch.utils.data import Dataset
8
- from ..lib.precompute_gradients import precompute_gradients
9
- from ..lib.get_gradient_map import get_gradient_map
10
-
11
-
12
- class TactileSensorDataset(Dataset):
13
- """
14
- Tactile Sensor dataset.
15
- """
16
- def __init__(self, dataset_path, annotation_path, blank_image_path, transform=None, radius=36):
17
- self.dataset_path = dataset_path
18
- self.annotation_path = annotation_path
19
- self.blank_image_path = blank_image_path
20
- self.transform = transform
21
- self.csv_file = dataset_path # Store the CSV filename
22
-
23
- # Load the CSV data
24
- self.data = pd.read_csv(annotation_path, comment='#')
25
-
26
- self.precomputed_gradients = precompute_gradients(dataset_path, annotation_path, r=radius)
27
- self.blank_tensor = self.transform(Image.open(blank_image_path).convert("RGB"))
28
-
29
- def _add_coordinate_channels(self, image):
30
- """
31
- Add x and y coordinate channels to the input image.
32
- X channel: column indices (1s in first column, 2s in second column, etc.)
33
- Y channel: row indices (1s in first row, 2s in second row, etc.)
34
- """
35
- # Get image dimensions
36
- _, height, width = image.shape
37
-
38
- # Create x coordinate channel (column indices)
39
- x_coords = torch.arange(1, width + 1, dtype=torch.float32).unsqueeze(0).repeat(height, 1)
40
- x_channel = x_coords.unsqueeze(0) # Add channel dimension
41
-
42
- # Create y coordinate channel (row indices)
43
- y_coords = torch.arange(1, height + 1, dtype=torch.float32).unsqueeze(1).repeat(1, width)
44
- y_channel = y_coords.unsqueeze(0) # Add channel dimension
45
-
46
- # Concatenate original image with coordinate channels
47
- image_with_coords = torch.cat([image, x_channel, y_channel], dim=0)
48
-
49
- return image_with_coords
50
-
51
- def __len__(self):
52
- return len(self.data) # Use the DataFrame length
53
-
54
- def __getitem__(self, idx):
55
- # Check if index is valid
56
- if idx < 0 or idx >= len(self.data):
57
- raise IndexError("Index out of range")
58
-
59
- if torch.is_tensor(idx):
60
- idx = idx.tolist()
61
-
62
- img_name = os.path.join(self.dataset_path, self.data.iloc[idx, 0])
63
- img = Image.open(img_name).convert("RGB")
64
- image = np.array(img, copy=True)
65
-
66
- target = get_gradient_map(idx, annotation_path=self.annotation_path, precomputed_gradients=self.precomputed_gradients)
67
-
68
- # if self.transform:
69
- image = self.transform(image)
70
- target = self.transform(target)
71
-
72
- # Subtract pre-transformed blank tensor
73
- image = image - self.blank_tensor
74
- # Add coordinate channels
75
- image = self._add_coordinate_channels(image)
76
-
77
- sample = (image, target)
78
- return sample