py3dcal 1.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of py3dcal might be problematic. Click here for more details.
- py3DCal/__init__.py +14 -0
- py3DCal/data_collection/Calibrator.py +300 -0
- py3DCal/data_collection/__init__.py +0 -0
- py3DCal/data_collection/printers/Ender3/Ender3.py +82 -0
- py3DCal/data_collection/printers/Ender3/__init__.py +0 -0
- py3DCal/data_collection/printers/Printer.py +63 -0
- py3DCal/data_collection/printers/__init__.py +0 -0
- py3DCal/data_collection/sensors/DIGIT/DIGIT.py +47 -0
- py3DCal/data_collection/sensors/DIGIT/__init__.py +0 -0
- py3DCal/data_collection/sensors/DIGIT/default.csv +1222 -0
- py3DCal/data_collection/sensors/GelsightMini/GelsightMini.py +45 -0
- py3DCal/data_collection/sensors/GelsightMini/__init__.py +0 -0
- py3DCal/data_collection/sensors/GelsightMini/default.csv +1210 -0
- py3DCal/data_collection/sensors/Sensor.py +44 -0
- py3DCal/data_collection/sensors/__init__.py +0 -0
- py3DCal/model_training/__init__.py +0 -0
- py3DCal/model_training/datasets/DIGIT_dataset.py +77 -0
- py3DCal/model_training/datasets/GelSightMini_dataset.py +75 -0
- py3DCal/model_training/datasets/__init__.py +3 -0
- py3DCal/model_training/datasets/split_dataset.py +38 -0
- py3DCal/model_training/datasets/tactile_sensor_dataset.py +83 -0
- py3DCal/model_training/lib/__init__.py +0 -0
- py3DCal/model_training/lib/add_coordinate_embeddings.py +29 -0
- py3DCal/model_training/lib/annotate_dataset.py +422 -0
- py3DCal/model_training/lib/depthmaps.py +82 -0
- py3DCal/model_training/lib/fast_poisson.py +51 -0
- py3DCal/model_training/lib/get_gradient_map.py +39 -0
- py3DCal/model_training/lib/precompute_gradients.py +61 -0
- py3DCal/model_training/lib/train_model.py +96 -0
- py3DCal/model_training/lib/validate_parameters.py +87 -0
- py3DCal/model_training/models/__init__.py +1 -0
- py3DCal/model_training/models/touchnet.py +211 -0
- py3DCal/utils/__init__.py +0 -0
- py3DCal/utils/utils.py +32 -0
- py3dcal-1.0.5.dist-info/LICENSE +21 -0
- py3dcal-1.0.5.dist-info/METADATA +29 -0
- py3dcal-1.0.5.dist-info/RECORD +40 -0
- py3dcal-1.0.5.dist-info/WHEEL +5 -0
- py3dcal-1.0.5.dist-info/entry_points.txt +3 -0
- py3dcal-1.0.5.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import torch.optim as optim
|
|
3
|
+
import torch.nn as nn
|
|
4
|
+
from torch.utils.data import DataLoader
|
|
5
|
+
from ..datasets.tactile_sensor_dataset import TactileSensorDataset
|
|
6
|
+
from ..datasets.split_dataset import split_dataset
|
|
7
|
+
from .validate_parameters import validate_device
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def train_model(model: nn.Module, dataset: TactileSensorDataset, num_epochs: int = 60, batch_size: int = 64, learning_rate: float = 1e-4, train_ratio: float = 0.8, loss_fn: nn.Module = nn.MSELoss(), device='cpu'):
|
|
11
|
+
"""
|
|
12
|
+
Train TouchNet model on a dataset for 60 epochs with a
|
|
13
|
+
64 batch size, and AdamW optimizer with learning rate 1e-4.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
model (nn.Module): The PyTorch model to be trained.
|
|
17
|
+
dataset (py3DCal.datasets.TactileSensorDataset): The dataset to train the model on.
|
|
18
|
+
num_epochs (int): Number of epochs to train for. Defaults to 60.
|
|
19
|
+
batch_size (int): Batch size. Defaults to 64.
|
|
20
|
+
learning_rate (float): Learning rate. Defaults to 1e-4.
|
|
21
|
+
train_ratio (float): Proportion of data to use for training. Defaults to 0.8.
|
|
22
|
+
loss_fn (nn.Module): Loss function. Defaults to nn.MSELoss().
|
|
23
|
+
device (str): Device to run the training on. Defaults to 'cpu'.
|
|
24
|
+
|
|
25
|
+
Outputs:
|
|
26
|
+
weights.pth: Trained model weights.
|
|
27
|
+
loss.csv: Training and testing losses.
|
|
28
|
+
"""
|
|
29
|
+
validate_device(device)
|
|
30
|
+
_validate_model_and_dataset(model, dataset)
|
|
31
|
+
|
|
32
|
+
optimizer = optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=1e-4)
|
|
33
|
+
train_dataset, val_dataset = split_dataset(dataset, train_ratio=train_ratio)
|
|
34
|
+
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True if device == "cuda" else False, persistent_workers=True)
|
|
35
|
+
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=8, pin_memory=True if device == "cuda" else False, persistent_workers=True)
|
|
36
|
+
|
|
37
|
+
model.to(device)
|
|
38
|
+
|
|
39
|
+
epoch_train_losses = []
|
|
40
|
+
epoch_val_losses = []
|
|
41
|
+
|
|
42
|
+
print("Starting training...\n")
|
|
43
|
+
|
|
44
|
+
# Training loop
|
|
45
|
+
for epoch in range(num_epochs):
|
|
46
|
+
print(f"Epoch {epoch+1}/{num_epochs}")
|
|
47
|
+
|
|
48
|
+
model.train()
|
|
49
|
+
train_loss = 0.0
|
|
50
|
+
for batch_idx, (inputs, targets) in enumerate(train_loader):
|
|
51
|
+
inputs = inputs.to(torch.float32).to(device)
|
|
52
|
+
targets = targets.to(torch.float32).to(device)
|
|
53
|
+
optimizer.zero_grad()
|
|
54
|
+
outputs = model(inputs)
|
|
55
|
+
|
|
56
|
+
loss = loss_fn(outputs, targets)
|
|
57
|
+
loss.backward()
|
|
58
|
+
optimizer.step()
|
|
59
|
+
|
|
60
|
+
train_loss += loss.item()
|
|
61
|
+
|
|
62
|
+
print(f" [Batch {batch_idx}/{len(train_loader)}] - Loss: {loss.item():.4f}")
|
|
63
|
+
|
|
64
|
+
avg_train_loss = train_loss / len(train_loader)
|
|
65
|
+
epoch_train_losses.append(avg_train_loss)
|
|
66
|
+
print(f"Epoch {epoch+1}/{num_epochs} | Train Loss: {avg_train_loss:.4f}")
|
|
67
|
+
|
|
68
|
+
# Validation loop
|
|
69
|
+
model.eval()
|
|
70
|
+
val_loss = 0.0
|
|
71
|
+
|
|
72
|
+
with torch.no_grad():
|
|
73
|
+
for inputs, targets in val_loader:
|
|
74
|
+
inputs = inputs.to(torch.float32).to(device)
|
|
75
|
+
targets = targets.to(torch.float32).to(device)
|
|
76
|
+
outputs = model(inputs)
|
|
77
|
+
loss = loss_fn(outputs, targets)
|
|
78
|
+
val_loss += loss.item()
|
|
79
|
+
|
|
80
|
+
avg_val_loss = val_loss / len(val_loader)
|
|
81
|
+
epoch_val_losses.append(avg_val_loss)
|
|
82
|
+
print(f"VAL LOSS: {avg_val_loss:.4f}")
|
|
83
|
+
|
|
84
|
+
with open("losses.csv", "w") as f:
|
|
85
|
+
f.write("epoch,train_loss,val_loss\n")
|
|
86
|
+
for i in range(len(epoch_train_losses)):
|
|
87
|
+
f.write(f"{i+1},{epoch_train_losses[i]},{epoch_val_losses[i]}\n")
|
|
88
|
+
|
|
89
|
+
torch.save(model.state_dict(), "weights.pth")
|
|
90
|
+
|
|
91
|
+
def _validate_model_and_dataset(model: nn.Module, dataset: TactileSensorDataset):
|
|
92
|
+
if not isinstance(model, nn.Module):
|
|
93
|
+
raise ValueError("Model must be an instance of torch.nn.Module.")
|
|
94
|
+
|
|
95
|
+
if not isinstance(dataset, TactileSensorDataset):
|
|
96
|
+
raise ValueError("Dataset must be an instance of py3DCal.datasets.TactileSensorDataset.")
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import torch
|
|
3
|
+
from typing import Union
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
def validate_device(device: str):
|
|
7
|
+
"""
|
|
8
|
+
Validates the device by converting it to a torch.device object.
|
|
9
|
+
Args:
|
|
10
|
+
device (str): Device to run the model on.
|
|
11
|
+
Returns:
|
|
12
|
+
None.
|
|
13
|
+
Raises:
|
|
14
|
+
ValueError: If the device is not specified or invalid.
|
|
15
|
+
"""
|
|
16
|
+
try:
|
|
17
|
+
device = torch.device(device)
|
|
18
|
+
except Exception as e:
|
|
19
|
+
raise ValueError(
|
|
20
|
+
f"Invalid device '{device}'. Valid options include:\n"
|
|
21
|
+
" - 'cpu': CPU processing\n"
|
|
22
|
+
" - 'cuda' or 'cuda:0': NVIDIA GPU\n"
|
|
23
|
+
" - 'mps': Apple Silicon GPU\n"
|
|
24
|
+
"See: https://pytorch.org/docs/stable/tensor_attributes.html#torch.device"
|
|
25
|
+
) from e
|
|
26
|
+
|
|
27
|
+
def validate_root(root, must_exist=False):
|
|
28
|
+
"""
|
|
29
|
+
Validates the root path specified by the user.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
root: root path specified by the user.
|
|
33
|
+
Returns:
|
|
34
|
+
None.
|
|
35
|
+
Raises:
|
|
36
|
+
ValueError: If the root is not specified or invalid.
|
|
37
|
+
"""
|
|
38
|
+
if root is None :
|
|
39
|
+
raise ValueError(
|
|
40
|
+
"root directory cannot be None.\n"
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
if not isinstance(root, (str, Path)):
|
|
44
|
+
raise ValueError(
|
|
45
|
+
"root directory must be a valid file system path as a string or pathlib.Path object\n"
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
if must_exist and not os.path.exists(root):
|
|
49
|
+
raise ValueError(
|
|
50
|
+
f"root directory '{root}' does not exist.\n"
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
def validate_dataset(root, subtract_blank: bool):
|
|
54
|
+
"""
|
|
55
|
+
Validates the dataset path specified by the user.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
root: Dataset path specified by the user.
|
|
59
|
+
subtract_blank (bool): Whether to subtract blank image.
|
|
60
|
+
Returns:
|
|
61
|
+
None.
|
|
62
|
+
Raises:
|
|
63
|
+
FileNotFoundError: If necessary files do not exist.
|
|
64
|
+
"""
|
|
65
|
+
validate_root(root)
|
|
66
|
+
|
|
67
|
+
annotation_path = os.path.join(root, "annotations", "annotations.csv")
|
|
68
|
+
metadata_path = os.path.join(root, "annotations", "metadata.json")
|
|
69
|
+
probe_images_path = os.path.join(root, "probe_images")
|
|
70
|
+
blank_image_path = os.path.join(root, "blank_images", "blank.png")
|
|
71
|
+
|
|
72
|
+
# Check if root directory exists
|
|
73
|
+
if not os.path.exists(root):
|
|
74
|
+
raise FileNotFoundError(f"Dataset root directory '{root}' does not exist.")
|
|
75
|
+
|
|
76
|
+
# Check if all the necessary files exist
|
|
77
|
+
if not os.path.exists(annotation_path):
|
|
78
|
+
raise FileNotFoundError(f"annotations.csv file not found in annotations/ directory. Use py3DCal.annotations() function to create it.")
|
|
79
|
+
|
|
80
|
+
if not os.path.exists(metadata_path):
|
|
81
|
+
raise FileNotFoundError(f"metadata.json file not found in annotations/ directory. Use py3DCal.annotations() function to create it.")
|
|
82
|
+
|
|
83
|
+
if not os.path.exists(probe_images_path):
|
|
84
|
+
raise FileNotFoundError(f"probe_images/ directory not found in dataset root.")
|
|
85
|
+
|
|
86
|
+
if subtract_blank and not os.path.exists(blank_image_path):
|
|
87
|
+
raise FileNotFoundError(f"blank.png file not found in blank_images/ directory.")
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .touchnet import SensorType, TouchNet
|
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import requests
|
|
3
|
+
from tqdm import tqdm
|
|
4
|
+
import torch
|
|
5
|
+
import torch.nn as nn
|
|
6
|
+
import torch.nn.functional as F
|
|
7
|
+
from enum import Enum
|
|
8
|
+
from typing import Union
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
|
|
11
|
+
class SensorType(Enum):
|
|
12
|
+
"""
|
|
13
|
+
SensorType: Available sensor types with pretrained weights and compiled datasets
|
|
14
|
+
"""
|
|
15
|
+
DIGIT = "DIGIT"
|
|
16
|
+
GELSIGHTMINI = "GelSightMini"
|
|
17
|
+
|
|
18
|
+
class TouchNet(nn.Module):
|
|
19
|
+
"""
|
|
20
|
+
TouchNet: A PyTorch neural network for producing surface normal maps from tactile sensor images.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
load_pretrained (bool): If True, loads pretrained weights for the specified sensor type.
|
|
24
|
+
sensor_type (SensorType): The type of tactile sensor. Must be specified if load_pretrained is True.
|
|
25
|
+
root (str or pathlib.Path): The root directory for saving/loading the pretrained_weights (.pth) file.
|
|
26
|
+
"""
|
|
27
|
+
def __init__(self, load_pretrained: bool = False, sensor_type: SensorType = None, root: Union[str, Path] = "."):
|
|
28
|
+
super().__init__()
|
|
29
|
+
|
|
30
|
+
self._validate_parameters(load_pretrained, sensor_type, root)
|
|
31
|
+
|
|
32
|
+
self.conv1 = nn.Conv2d(5, 32, kernel_size=7, padding=3)
|
|
33
|
+
self.bn1 = nn.BatchNorm2d(32)
|
|
34
|
+
self.dropout1 = nn.Dropout2d(0.2)
|
|
35
|
+
|
|
36
|
+
self.conv2 = nn.Conv2d(32, 64, kernel_size=7, padding=3)
|
|
37
|
+
self.bn2 = nn.BatchNorm2d(64)
|
|
38
|
+
self.dropout2 = nn.Dropout2d(0.2)
|
|
39
|
+
|
|
40
|
+
self.conv3 = nn.Conv2d(64, 128, kernel_size=7, padding=3)
|
|
41
|
+
self.bn3 = nn.BatchNorm2d(128)
|
|
42
|
+
self.dropout3 = nn.Dropout2d(0.2)
|
|
43
|
+
|
|
44
|
+
self.conv4 = nn.Conv2d(128, 256, kernel_size=5, padding=2)
|
|
45
|
+
self.bn4 = nn.BatchNorm2d(256)
|
|
46
|
+
self.dropout4 = nn.Dropout2d(0.3)
|
|
47
|
+
|
|
48
|
+
self.conv5 = nn.Conv2d(256, 256, kernel_size=5, padding=2)
|
|
49
|
+
self.bn5 = nn.BatchNorm2d(256)
|
|
50
|
+
self.dropout5 = nn.Dropout2d(0.3)
|
|
51
|
+
|
|
52
|
+
self.conv6 = nn.Conv2d(256, 128, kernel_size=5, padding=2)
|
|
53
|
+
self.bn6 = nn.BatchNorm2d(128)
|
|
54
|
+
self.dropout6 = nn.Dropout2d(0.2)
|
|
55
|
+
|
|
56
|
+
self.conv7 = nn.Conv2d(128, 64, kernel_size=3, padding=1)
|
|
57
|
+
self.bn7 = nn.BatchNorm2d(64)
|
|
58
|
+
self.dropout7 = nn.Dropout2d(0.2)
|
|
59
|
+
|
|
60
|
+
self.conv8 = nn.Conv2d(64, 32, kernel_size=3, padding=1)
|
|
61
|
+
self.bn8 = nn.BatchNorm2d(32)
|
|
62
|
+
self.dropout8 = nn.Dropout2d(0.2)
|
|
63
|
+
|
|
64
|
+
self.conv9 = nn.Conv2d(32, 2, kernel_size=1)
|
|
65
|
+
|
|
66
|
+
if load_pretrained:
|
|
67
|
+
self._load_pretrained_model(root, sensor_type)
|
|
68
|
+
|
|
69
|
+
def forward(self, x):
|
|
70
|
+
x = F.relu(self.bn1(self.conv1(x)))
|
|
71
|
+
x = self.dropout1(x)
|
|
72
|
+
|
|
73
|
+
x = F.relu(self.bn2(self.conv2(x)))
|
|
74
|
+
x = self.dropout2(x)
|
|
75
|
+
|
|
76
|
+
x = F.relu(self.bn3(self.conv3(x)))
|
|
77
|
+
x = self.dropout3(x)
|
|
78
|
+
|
|
79
|
+
x = F.relu(self.bn4(self.conv4(x)))
|
|
80
|
+
x = self.dropout4(x)
|
|
81
|
+
|
|
82
|
+
x = F.relu(self.bn5(self.conv5(x)))
|
|
83
|
+
x = self.dropout5(x)
|
|
84
|
+
|
|
85
|
+
x = F.relu(self.bn6(self.conv6(x)))
|
|
86
|
+
x = self.dropout6(x)
|
|
87
|
+
|
|
88
|
+
x = F.relu(self.bn7(self.conv7(x)))
|
|
89
|
+
x = self.dropout7(x)
|
|
90
|
+
|
|
91
|
+
x = F.relu(self.bn8(self.conv8(x)))
|
|
92
|
+
x = self.dropout8(x)
|
|
93
|
+
|
|
94
|
+
x = self.conv9(x)
|
|
95
|
+
|
|
96
|
+
return x
|
|
97
|
+
|
|
98
|
+
def _validate_parameters(self, load_pretrained, sensor_type, root):
|
|
99
|
+
if load_pretrained and sensor_type is None:
|
|
100
|
+
raise ValueError("sensor_type must be specified when load_pretrained is True. sensor_type must be either SensorType.DIGIT or SensorType.GELSIGHTMINI.")
|
|
101
|
+
|
|
102
|
+
if load_pretrained and not isinstance(sensor_type, SensorType):
|
|
103
|
+
raise ValueError("sensor_type must be either SensorType.DIGIT or SensorType.GELSIGHTMINI.")
|
|
104
|
+
|
|
105
|
+
if load_pretrained and root is None:
|
|
106
|
+
raise ValueError("root directory for storing/loading model cannot be None when load_pretrained is True.")
|
|
107
|
+
|
|
108
|
+
if load_pretrained and not isinstance(root, (str, Path)):
|
|
109
|
+
raise ValueError("root directory must be a valid file system path as a string or pathlib.Path object when load_pretrained is True.")
|
|
110
|
+
|
|
111
|
+
if not load_pretrained and sensor_type is not None:
|
|
112
|
+
print("Warning: sensor_type parameter is ignored when load_pretrained is False.")
|
|
113
|
+
|
|
114
|
+
if not load_pretrained and root is not ".":
|
|
115
|
+
print("Warning: root parameter is ignored when load_pretrained is False.")
|
|
116
|
+
|
|
117
|
+
def _load_pretrained_model(self, root, sensor_type):
|
|
118
|
+
"""
|
|
119
|
+
Loads a pretrained model for either the DIGIT or GelSightMini sensor.
|
|
120
|
+
Args:
|
|
121
|
+
None.
|
|
122
|
+
Returns:
|
|
123
|
+
None.
|
|
124
|
+
"""
|
|
125
|
+
|
|
126
|
+
if sensor_type == SensorType.DIGIT:
|
|
127
|
+
file_path = os.path.join(root, "digit_pretrained_weights.pth")
|
|
128
|
+
|
|
129
|
+
# Check if DIGIT pretrained weights exist locally, if not download them
|
|
130
|
+
if not os.path.exists(file_path):
|
|
131
|
+
|
|
132
|
+
print(f"Downloading DIGIT pretrained weights ...")
|
|
133
|
+
response = requests.get('https://zenodo.org/records/17517028/files/digit_pretrained_weights.pth?download=1', stream=True)
|
|
134
|
+
response.raise_for_status()
|
|
135
|
+
|
|
136
|
+
total_size = int(response.headers.get('content-length', 0))
|
|
137
|
+
block_size = 1024
|
|
138
|
+
|
|
139
|
+
# Save file in chunks to handle large datasets
|
|
140
|
+
with open(file_path, 'wb') as f, tqdm(
|
|
141
|
+
total=total_size,
|
|
142
|
+
unit='B',
|
|
143
|
+
unit_scale=True,
|
|
144
|
+
desc="Downloading",
|
|
145
|
+
ncols=80
|
|
146
|
+
) as progress_bar:
|
|
147
|
+
for chunk in response.iter_content(chunk_size=block_size):
|
|
148
|
+
if chunk:
|
|
149
|
+
f.write(chunk)
|
|
150
|
+
progress_bar.update(len(chunk))
|
|
151
|
+
|
|
152
|
+
print(f"Download complete!")
|
|
153
|
+
else:
|
|
154
|
+
print(f"DIGIT pretrained weights already exists at: {file_path}/")
|
|
155
|
+
|
|
156
|
+
elif sensor_type == SensorType.GELSIGHTMINI:
|
|
157
|
+
file_path = os.path.join(root, "gsmini_pretrained_weights.pth")
|
|
158
|
+
|
|
159
|
+
# Check if GelSight Mini pretrained weights exist locally, if not download them
|
|
160
|
+
if not os.path.exists(file_path):
|
|
161
|
+
|
|
162
|
+
print(f"Downloading GelSight Mini pretrained weights ...")
|
|
163
|
+
response = requests.get('https://zenodo.org/records/17517028/files/gsmini_pretrained_weights.pth?download=1', stream=True)
|
|
164
|
+
response.raise_for_status()
|
|
165
|
+
|
|
166
|
+
total_size = int(response.headers.get('content-length', 0))
|
|
167
|
+
block_size = 1024
|
|
168
|
+
|
|
169
|
+
# Save file in chunks to handle large datasets
|
|
170
|
+
with open(file_path, 'wb') as f, tqdm(
|
|
171
|
+
total=total_size,
|
|
172
|
+
unit='B',
|
|
173
|
+
unit_scale=True,
|
|
174
|
+
desc="Downloading",
|
|
175
|
+
ncols=80
|
|
176
|
+
) as progress_bar:
|
|
177
|
+
for chunk in response.iter_content(chunk_size=block_size):
|
|
178
|
+
if chunk:
|
|
179
|
+
f.write(chunk)
|
|
180
|
+
progress_bar.update(len(chunk))
|
|
181
|
+
|
|
182
|
+
print(f"Download complete!")
|
|
183
|
+
else:
|
|
184
|
+
print(f"GelSight Mini pretrained weights already exists at: {file_path}/")
|
|
185
|
+
|
|
186
|
+
state_dict = torch.load(file_path, map_location="cpu")
|
|
187
|
+
|
|
188
|
+
self.load_state_dict(state_dict)
|
|
189
|
+
|
|
190
|
+
def load_weights(self, weights_path: Union[str, Path]):
|
|
191
|
+
"""
|
|
192
|
+
Loads model weights from a specified .pth file.
|
|
193
|
+
|
|
194
|
+
Args:
|
|
195
|
+
weights_path (str or pathlib.Path): The file path to the .pth file containing the model weights.
|
|
196
|
+
Returns:
|
|
197
|
+
None.
|
|
198
|
+
Raises:
|
|
199
|
+
ValueError: If the weights_path is not specified or invalid.
|
|
200
|
+
"""
|
|
201
|
+
if weights_path is None:
|
|
202
|
+
raise ValueError("weights_path cannot be None.")
|
|
203
|
+
|
|
204
|
+
if not isinstance(weights_path, (str, Path)):
|
|
205
|
+
raise ValueError("weights_path must be a valid file system path as a string or pathlib.Path object.")
|
|
206
|
+
|
|
207
|
+
if not os.path.exists(weights_path):
|
|
208
|
+
raise ValueError(f"The specified weights_path does not exist: {weights_path}")
|
|
209
|
+
|
|
210
|
+
state_dict = torch.load(weights_path, map_location="cpu")
|
|
211
|
+
self.load_state_dict(state_dict)
|
|
File without changes
|
py3DCal/utils/utils.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import serial
|
|
2
|
+
import sys
|
|
3
|
+
import glob
|
|
4
|
+
|
|
5
|
+
def list_com_ports():
|
|
6
|
+
""" Prints serial port names
|
|
7
|
+
|
|
8
|
+
:raises EnvironmentError:
|
|
9
|
+
On unsupported or unknown platforms
|
|
10
|
+
"""
|
|
11
|
+
if sys.platform.startswith('win'):
|
|
12
|
+
ports = ['COM%s' % (i + 1) for i in range(256)]
|
|
13
|
+
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
|
|
14
|
+
# this excludes your current terminal "/dev/tty"
|
|
15
|
+
ports = glob.glob('/dev/tty[A-Za-z]*')
|
|
16
|
+
elif sys.platform.startswith('darwin'):
|
|
17
|
+
ports = glob.glob('/dev/tty.*')
|
|
18
|
+
else:
|
|
19
|
+
raise EnvironmentError('Unsupported platform')
|
|
20
|
+
|
|
21
|
+
result = []
|
|
22
|
+
for port in ports:
|
|
23
|
+
try:
|
|
24
|
+
s = serial.Serial(port)
|
|
25
|
+
s.close()
|
|
26
|
+
result.append(port)
|
|
27
|
+
except (OSError, serial.SerialException):
|
|
28
|
+
pass
|
|
29
|
+
|
|
30
|
+
print('Available COM Ports: ', end='')
|
|
31
|
+
print(result)
|
|
32
|
+
print('\n')
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Rohan Kota
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: py3dcal
|
|
3
|
+
Version: 1.0.5
|
|
4
|
+
Summary: UNKNOWN
|
|
5
|
+
Home-page: https://github.com/rohankotanu/py3DCal
|
|
6
|
+
Author: Rohan Kota
|
|
7
|
+
Author-email: rohankota2026@u.northwestern.edu
|
|
8
|
+
License: MIT
|
|
9
|
+
Platform: UNKNOWN
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
12
|
+
Description-Content-Type: text/markdown
|
|
13
|
+
License-File: LICENSE
|
|
14
|
+
Requires-Dist: numpy>=1.0.0
|
|
15
|
+
Requires-Dist: matplotlib>=3.0.0
|
|
16
|
+
Requires-Dist: pandas>=2.0.0
|
|
17
|
+
Requires-Dist: scipy>=1.0.0
|
|
18
|
+
Requires-Dist: torch>=2.0.0
|
|
19
|
+
Requires-Dist: torchvision>=0.23.0
|
|
20
|
+
Requires-Dist: pyserial>=3.0
|
|
21
|
+
Requires-Dist: opencv-python>=4.0.0
|
|
22
|
+
Requires-Dist: pillow>=11.0.0
|
|
23
|
+
Requires-Dist: tqdm>=4.0.0
|
|
24
|
+
Requires-Dist: requests>=2.0.0
|
|
25
|
+
Requires-Dist: scikit-learn>=1.0.0
|
|
26
|
+
Requires-Dist: digit-interface>=0.2.1
|
|
27
|
+
|
|
28
|
+
# For instructions on how to use this library, please visit https://rohankotanu.github.io/3DCal/
|
|
29
|
+
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
py3DCal/__init__.py,sha256=dvqOqTSeNzPAVY37lJGxNjW1IfWK6M3nxt940hDYT-4,813
|
|
2
|
+
py3DCal/data_collection/Calibrator.py,sha256=X9LaLrIPv2maBZR3CwZuWrtKISjsP3GxNEx8UvwVbmc,11586
|
|
3
|
+
py3DCal/data_collection/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
+
py3DCal/data_collection/printers/Printer.py,sha256=ouqgWuJWk8PPjhTRFwolnupXbE0SzO819LIgw1ug-7s,1628
|
|
5
|
+
py3DCal/data_collection/printers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
+
py3DCal/data_collection/printers/Ender3/Ender3.py,sha256=pkE9Kt-mMH-fpZC5Gl6YyDPAxOdfZxp4Z7s68N7D_is,2239
|
|
7
|
+
py3DCal/data_collection/printers/Ender3/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
|
+
py3DCal/data_collection/sensors/Sensor.py,sha256=o4tdkPMU2RF27OcOoxLABE4-K7R7Cf-d7kiT-KEDwAk,1063
|
|
9
|
+
py3DCal/data_collection/sensors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
10
|
+
py3DCal/data_collection/sensors/DIGIT/DIGIT.py,sha256=1Jc1qXwinsOFjJ6-QUoUcXOBFjG4M6ylnUjbWMe3NSc,1268
|
|
11
|
+
py3DCal/data_collection/sensors/DIGIT/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
12
|
+
py3DCal/data_collection/sensors/DIGIT/default.csv,sha256=OqjTE9b5uAEAxrHyjsxS7UyUWHgjO8yWPN0gVbBj7_Q,26728
|
|
13
|
+
py3DCal/data_collection/sensors/GelsightMini/GelsightMini.py,sha256=nQ8JbiQypLCdTLM4WBNmXB_FFQ_yyxJ9gaV-OoE08Gg,1193
|
|
14
|
+
py3DCal/data_collection/sensors/GelsightMini/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
15
|
+
py3DCal/data_collection/sensors/GelsightMini/default.csv,sha256=lavPHcJ6o4VkvMvOk7lcdRCp9dOJxg_VrPNayf9zVvM,26449
|
|
16
|
+
py3DCal/model_training/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
17
|
+
py3DCal/model_training/datasets/DIGIT_dataset.py,sha256=IYRqWDawbTTe4IOVjZuKVr0yVNBe1XLGC6PoDxsTMfo,3017
|
|
18
|
+
py3DCal/model_training/datasets/GelSightMini_dataset.py,sha256=H8Fr_4f3HDHLLl6KshRfqt0FP8-3d4n9XRK0xfPcH0k,3070
|
|
19
|
+
py3DCal/model_training/datasets/__init__.py,sha256=vqrB177ZXrBmqDnL472EWleJS6Y-BxYEy2Ao9hWWDHc,137
|
|
20
|
+
py3DCal/model_training/datasets/split_dataset.py,sha256=AzNJlTgcXGa9AdHJnVJYNEyv__OuNHZAMB76Haqc-io,1351
|
|
21
|
+
py3DCal/model_training/datasets/tactile_sensor_dataset.py,sha256=LuSQYNHY5vkOihlJpG1PIs9EtxYtVW5x3ac6wgRTXLQ,3314
|
|
22
|
+
py3DCal/model_training/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
23
|
+
py3DCal/model_training/lib/add_coordinate_embeddings.py,sha256=8wit43RIx28IKEB82SnH_Of09FAmiWM3jgOeXpamM1I,1198
|
|
24
|
+
py3DCal/model_training/lib/annotate_dataset.py,sha256=mPGLROo7h98w9AOdMZJzqAPa4ZGMJnciJ4ovLlrM7j0,16016
|
|
25
|
+
py3DCal/model_training/lib/depthmaps.py,sha256=eW9rs4Ch64SwgwoTc1riauolX-IDxrYXmOJ0qQqQAgE,3183
|
|
26
|
+
py3DCal/model_training/lib/fast_poisson.py,sha256=wJ5MTkSCxkFU3wUx-zomvIYPcAyEpPZj-LX7JQOx8JE,2252
|
|
27
|
+
py3DCal/model_training/lib/get_gradient_map.py,sha256=IbCigrK_-6ZkeOSaHZAIhMu2pFmkSpWAaz1EjUtenCM,1438
|
|
28
|
+
py3DCal/model_training/lib/precompute_gradients.py,sha256=zc1uvishZP7PjBWYF2VSrIMCtEkLrTPtLktOTpCh9P8,1860
|
|
29
|
+
py3DCal/model_training/lib/train_model.py,sha256=fxFIfKWp3WA1Aa2IEczKBJCivVyVovj7IW2HqNw5IlE,4016
|
|
30
|
+
py3DCal/model_training/lib/validate_parameters.py,sha256=My8cbGPSWsVpMB54jydbLYgfefWPMLGvr4oiPyLImb0,3052
|
|
31
|
+
py3DCal/model_training/models/__init__.py,sha256=FTMTEyUglpRIFJ4hi3Z7y1KdScGDVVg_OxYXip4b8wg,42
|
|
32
|
+
py3DCal/model_training/models/touchnet.py,sha256=JIfhwXX0uIk6UcN2tmCLoiH9il9U2IByjbjYIjQEIqc,8079
|
|
33
|
+
py3DCal/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
34
|
+
py3DCal/utils/utils.py,sha256=hgTyWZuBXfo9lxLnOLd0445Aw2-uARtKGXuBhZmz-Z0,995
|
|
35
|
+
py3dcal-1.0.5.dist-info/LICENSE,sha256=D95ljbgz6PW9niwHP26EWFN77QBvepSCsMKGp0mRVFM,1066
|
|
36
|
+
py3dcal-1.0.5.dist-info/METADATA,sha256=kOwKuH8L9Lm7BrI04dQaNdO2amIs6YM1t-IasW59_Uw,882
|
|
37
|
+
py3dcal-1.0.5.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
|
38
|
+
py3dcal-1.0.5.dist-info/entry_points.txt,sha256=_N1ruxvLEyZmSAaPsCx8kEzbYSJ5bHG5S8bvpua_X5E,59
|
|
39
|
+
py3dcal-1.0.5.dist-info/top_level.txt,sha256=NbatjyXjN_E6UMifZpkx-ohahGQH_ZFvqovwmvU7FMA,8
|
|
40
|
+
py3dcal-1.0.5.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
py3DCal
|