bplusplus 1.2.2__py3-none-any.whl → 1.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of bplusplus might be problematic. Click here for more details.
- bplusplus/__init__.py +3 -5
- bplusplus/inference.py +891 -0
- bplusplus/prepare.py +419 -652
- bplusplus/{hierarchical/test.py → test.py} +22 -9
- bplusplus/tracker.py +261 -0
- bplusplus/{hierarchical/train.py → train.py} +1 -1
- bplusplus-1.2.3.dist-info/METADATA +101 -0
- bplusplus-1.2.3.dist-info/RECORD +11 -0
- {bplusplus-1.2.2.dist-info → bplusplus-1.2.3.dist-info}/WHEEL +1 -1
- bplusplus/resnet/test.py +0 -473
- bplusplus/resnet/train.py +0 -329
- bplusplus/train_validate.py +0 -11
- bplusplus-1.2.2.dist-info/METADATA +0 -260
- bplusplus-1.2.2.dist-info/RECORD +0 -12
- {bplusplus-1.2.2.dist-info → bplusplus-1.2.3.dist-info}/LICENSE +0 -0
|
@@ -23,7 +23,7 @@ import sys
|
|
|
23
23
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
24
24
|
logger = logging.getLogger(__name__)
|
|
25
25
|
|
|
26
|
-
def
|
|
26
|
+
def test(species_list, test_set, yolo_weights, hierarchical_weights, output_dir="."):
|
|
27
27
|
"""
|
|
28
28
|
Run the two-stage classifier on a test set.
|
|
29
29
|
|
|
@@ -243,8 +243,15 @@ class TestTwoStage:
|
|
|
243
243
|
if "species_list" in checkpoint:
|
|
244
244
|
saved_species = checkpoint["species_list"]
|
|
245
245
|
print(f"Saved model was trained on: {', '.join(saved_species)}")
|
|
246
|
-
|
|
247
|
-
taxonomy
|
|
246
|
+
|
|
247
|
+
# Use saved taxonomy mappings if available
|
|
248
|
+
if "species_to_genus" in checkpoint and "genus_to_family" in checkpoint:
|
|
249
|
+
species_to_genus = checkpoint["species_to_genus"]
|
|
250
|
+
genus_to_family = checkpoint["genus_to_family"]
|
|
251
|
+
else:
|
|
252
|
+
# Fallback: fetch from GBIF but this may cause index mismatches
|
|
253
|
+
print("Warning: No taxonomy mappings in checkpoint, fetching from GBIF")
|
|
254
|
+
_, species_to_genus, genus_to_family = get_taxonomy(species_names)
|
|
248
255
|
else:
|
|
249
256
|
taxonomy, species_to_genus, genus_to_family = get_taxonomy(species_names)
|
|
250
257
|
else:
|
|
@@ -285,8 +292,6 @@ class TestTwoStage:
|
|
|
285
292
|
self.classification_model.eval()
|
|
286
293
|
|
|
287
294
|
self.classification_transform = transforms.Compose([
|
|
288
|
-
transforms.Resize((768, 768)), # Fixed size for all validation images
|
|
289
|
-
transforms.CenterCrop(640),
|
|
290
295
|
transforms.ToTensor(),
|
|
291
296
|
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
|
292
297
|
])
|
|
@@ -467,10 +472,18 @@ class TestTwoStage:
|
|
|
467
472
|
predicted_genus_frames, true_genus_frames,
|
|
468
473
|
predicted_family_frames, true_family_frames):
|
|
469
474
|
"""Calculate metrics at all taxonomic levels"""
|
|
470
|
-
# Get list of species, families and genera
|
|
475
|
+
# Get list of species, families and genera using the same order as model training
|
|
471
476
|
species_list = self.species_names
|
|
472
|
-
|
|
473
|
-
|
|
477
|
+
|
|
478
|
+
# Use the index mappings from the model to ensure consistency
|
|
479
|
+
if 1 in self.idx_to_level and 2 in self.idx_to_level:
|
|
480
|
+
family_list = [self.idx_to_level[1][i] for i in sorted(self.idx_to_level[1].keys())]
|
|
481
|
+
genus_list = [self.idx_to_level[2][i] for i in sorted(self.idx_to_level[2].keys())]
|
|
482
|
+
else:
|
|
483
|
+
# Fallback to sorted lists (may cause issues)
|
|
484
|
+
print("Warning: Using fallback sorted lists for taxonomy - this may cause index mismatches")
|
|
485
|
+
genus_list = sorted(list(set(self.species_to_genus.values())))
|
|
486
|
+
family_list = sorted(list(set(self.genus_to_family.values())))
|
|
474
487
|
|
|
475
488
|
# Print the index mappings we're using for evaluation
|
|
476
489
|
print("\nUsing the following index mappings for evaluation:")
|
|
@@ -665,4 +678,4 @@ if __name__ == "__main__":
|
|
|
665
678
|
hierarchical_model_path = "/mnt/nvme0n1p1/mit/two-stage-detection/hierarchical/hierarchical-weights.pth"
|
|
666
679
|
output_directory = "./output"
|
|
667
680
|
|
|
668
|
-
|
|
681
|
+
test(species_names, test_directory, yolo_model_path, hierarchical_model_path, output_directory)
|
bplusplus/tracker.py
ADDED
|
@@ -0,0 +1,261 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import uuid
|
|
3
|
+
from scipy.optimize import linear_sum_assignment
|
|
4
|
+
from collections import deque
|
|
5
|
+
|
|
6
|
+
class BoundingBox:
|
|
7
|
+
def __init__(self, x, y, width, height, frame_id, track_id=None):
|
|
8
|
+
self.x = x
|
|
9
|
+
self.y = y
|
|
10
|
+
self.width = width
|
|
11
|
+
self.height = height
|
|
12
|
+
self.area = width * height
|
|
13
|
+
self.frame_id = frame_id
|
|
14
|
+
self.track_id = track_id
|
|
15
|
+
|
|
16
|
+
def center(self):
|
|
17
|
+
return (self.x + self.width/2, self.y + self.height/2)
|
|
18
|
+
|
|
19
|
+
@classmethod
|
|
20
|
+
def from_xyxy(cls, x1, y1, x2, y2, frame_id, track_id=None):
|
|
21
|
+
"""Create BoundingBox from x1,y1,x2,y2 coordinates"""
|
|
22
|
+
width = x2 - x1
|
|
23
|
+
height = y2 - y1
|
|
24
|
+
return cls(x1, y1, width, height, frame_id, track_id)
|
|
25
|
+
|
|
26
|
+
class InsectTracker:
|
|
27
|
+
def __init__(self, image_height, image_width, max_frames=30, w_dist=0.7, w_area=0.3, cost_threshold=0.8, track_memory_frames=None, debug=False):
|
|
28
|
+
self.image_height = image_height
|
|
29
|
+
self.image_width = image_width
|
|
30
|
+
self.max_dist = np.sqrt(image_height**2 + image_width**2)
|
|
31
|
+
self.max_frames = max_frames
|
|
32
|
+
self.w_dist = w_dist
|
|
33
|
+
self.w_area = w_area
|
|
34
|
+
self.cost_threshold = cost_threshold
|
|
35
|
+
self.debug = debug
|
|
36
|
+
|
|
37
|
+
# If track_memory_frames not specified, use max_frames (full history window)
|
|
38
|
+
self.track_memory_frames = track_memory_frames if track_memory_frames is not None else max_frames
|
|
39
|
+
if self.debug:
|
|
40
|
+
print(f"DEBUG: Tracker initialized with max_frames={max_frames}, track_memory_frames={self.track_memory_frames}")
|
|
41
|
+
|
|
42
|
+
self.tracking_history = deque(maxlen=max_frames)
|
|
43
|
+
self.current_tracks = []
|
|
44
|
+
self.lost_tracks = {} # track_id -> {box: BoundingBox, frames_lost: int}
|
|
45
|
+
|
|
46
|
+
def _generate_track_id(self):
|
|
47
|
+
"""Generate a unique UUID for a new track"""
|
|
48
|
+
return str(uuid.uuid4())
|
|
49
|
+
|
|
50
|
+
def calculate_cost(self, box1, box2):
|
|
51
|
+
"""Calculate cost between two bounding boxes as per equation (4)"""
|
|
52
|
+
# Calculate center points
|
|
53
|
+
cx1, cy1 = box1.center()
|
|
54
|
+
cx2, cy2 = box2.center()
|
|
55
|
+
|
|
56
|
+
# Euclidean distance (equation 1)
|
|
57
|
+
dist = np.sqrt((cx2 - cx1)**2 + (cy2 - cy1)**2)
|
|
58
|
+
|
|
59
|
+
# Normalized distance (equation 2 used for normalization)
|
|
60
|
+
norm_dist = dist / self.max_dist
|
|
61
|
+
|
|
62
|
+
# Area cost (equation 3)
|
|
63
|
+
min_area = min(box1.area, box2.area)
|
|
64
|
+
max_area = max(box1.area, box2.area)
|
|
65
|
+
area_cost = min_area / max_area if max_area > 0 else 1.0
|
|
66
|
+
|
|
67
|
+
# Final cost (equation 4)
|
|
68
|
+
cost = (norm_dist * self.w_dist) + ((1 - area_cost) * self.w_area)
|
|
69
|
+
|
|
70
|
+
return cost
|
|
71
|
+
|
|
72
|
+
def build_cost_matrix(self, prev_boxes, curr_boxes):
|
|
73
|
+
"""Build cost matrix for Hungarian algorithm"""
|
|
74
|
+
n_prev = len(prev_boxes)
|
|
75
|
+
n_curr = len(curr_boxes)
|
|
76
|
+
n = max(n_prev, n_curr)
|
|
77
|
+
|
|
78
|
+
# Initialize cost matrix with high values
|
|
79
|
+
cost_matrix = np.ones((n, n)) * 999.0
|
|
80
|
+
|
|
81
|
+
# Fill in actual costs
|
|
82
|
+
for i in range(n_prev):
|
|
83
|
+
for j in range(n_curr):
|
|
84
|
+
cost_matrix[i, j] = self.calculate_cost(prev_boxes[i], curr_boxes[j])
|
|
85
|
+
|
|
86
|
+
return cost_matrix, n_prev, n_curr
|
|
87
|
+
|
|
88
|
+
def update(self, new_detections, frame_id):
|
|
89
|
+
"""
|
|
90
|
+
Update tracking with new detections from YOLO
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
new_detections: List of YOLO detection boxes (x1, y1, x2, y2 format)
|
|
94
|
+
frame_id: Current frame number
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
List of track IDs corresponding to each detection
|
|
98
|
+
"""
|
|
99
|
+
# Handle empty detection list (no detections in this frame)
|
|
100
|
+
if not new_detections:
|
|
101
|
+
if self.debug:
|
|
102
|
+
print(f"DEBUG: Frame {frame_id} has no detections")
|
|
103
|
+
# Move all current tracks to lost tracks
|
|
104
|
+
for track in self.current_tracks:
|
|
105
|
+
if track.track_id not in self.lost_tracks:
|
|
106
|
+
self.lost_tracks[track.track_id] = {
|
|
107
|
+
'box': track,
|
|
108
|
+
'frames_lost': 1
|
|
109
|
+
}
|
|
110
|
+
if self.debug:
|
|
111
|
+
print(f"DEBUG: Moved track {track.track_id} to lost tracks")
|
|
112
|
+
else:
|
|
113
|
+
self.lost_tracks[track.track_id]['frames_lost'] += 1
|
|
114
|
+
|
|
115
|
+
# Age lost tracks and remove old ones
|
|
116
|
+
self._age_lost_tracks()
|
|
117
|
+
|
|
118
|
+
self.current_tracks = []
|
|
119
|
+
self.tracking_history.append([])
|
|
120
|
+
return []
|
|
121
|
+
|
|
122
|
+
# Convert YOLO detections to BoundingBox objects
|
|
123
|
+
new_boxes = []
|
|
124
|
+
for i, detection in enumerate(new_detections):
|
|
125
|
+
x1, y1, x2, y2 = detection[:4]
|
|
126
|
+
bbox = BoundingBox.from_xyxy(x1, y1, x2, y2, frame_id)
|
|
127
|
+
new_boxes.append(bbox)
|
|
128
|
+
|
|
129
|
+
# If this is the first frame or no existing tracks, assign new track IDs to all boxes
|
|
130
|
+
if not self.current_tracks and not self.lost_tracks:
|
|
131
|
+
track_ids = []
|
|
132
|
+
for box in new_boxes:
|
|
133
|
+
box.track_id = self._generate_track_id()
|
|
134
|
+
track_ids.append(box.track_id)
|
|
135
|
+
if self.debug:
|
|
136
|
+
print(f"DEBUG: FIRST FRAME - Assigned track ID {box.track_id} to new detection")
|
|
137
|
+
self.current_tracks = new_boxes
|
|
138
|
+
self.tracking_history.append(new_boxes)
|
|
139
|
+
return track_ids
|
|
140
|
+
|
|
141
|
+
# Combine current tracks and lost tracks for matching
|
|
142
|
+
all_previous_tracks = self.current_tracks.copy()
|
|
143
|
+
lost_track_list = []
|
|
144
|
+
|
|
145
|
+
for track_id, lost_info in self.lost_tracks.items():
|
|
146
|
+
lost_track_list.append(lost_info['box'])
|
|
147
|
+
lost_track_list[-1].track_id = track_id # Ensure track_id is preserved
|
|
148
|
+
|
|
149
|
+
all_previous_tracks.extend(lost_track_list)
|
|
150
|
+
|
|
151
|
+
if not all_previous_tracks:
|
|
152
|
+
# No previous tracks at all, assign new IDs
|
|
153
|
+
track_ids = []
|
|
154
|
+
for box in new_boxes:
|
|
155
|
+
box.track_id = self._generate_track_id()
|
|
156
|
+
track_ids.append(box.track_id)
|
|
157
|
+
if self.debug:
|
|
158
|
+
print(f"DEBUG: No previous tracks - Assigned track ID {box.track_id} to new detection")
|
|
159
|
+
self.current_tracks = new_boxes
|
|
160
|
+
self.tracking_history.append(new_boxes)
|
|
161
|
+
return track_ids
|
|
162
|
+
|
|
163
|
+
# Build cost matrix including lost tracks
|
|
164
|
+
cost_matrix, n_prev, n_curr = self.build_cost_matrix(all_previous_tracks, new_boxes)
|
|
165
|
+
|
|
166
|
+
# Apply Hungarian algorithm
|
|
167
|
+
row_indices, col_indices = linear_sum_assignment(cost_matrix)
|
|
168
|
+
|
|
169
|
+
# Assign track IDs based on the matching
|
|
170
|
+
assigned_curr_indices = set()
|
|
171
|
+
track_ids = [None] * len(new_boxes)
|
|
172
|
+
recovered_tracks = set() # Track IDs that were recovered from lost tracks
|
|
173
|
+
|
|
174
|
+
if self.debug:
|
|
175
|
+
print(f"DEBUG: Hungarian assignment - rows: {row_indices}, cols: {col_indices}")
|
|
176
|
+
print(f"DEBUG: Cost threshold: {self.cost_threshold}")
|
|
177
|
+
print(f"DEBUG: Current tracks: {len(self.current_tracks)}, Lost tracks: {len(self.lost_tracks)}")
|
|
178
|
+
|
|
179
|
+
for i, j in zip(row_indices, col_indices):
|
|
180
|
+
# Only consider valid assignments (not dummy rows/columns)
|
|
181
|
+
if i < n_prev and j < n_curr:
|
|
182
|
+
cost = cost_matrix[i, j]
|
|
183
|
+
if self.debug:
|
|
184
|
+
print(f"DEBUG: Checking assignment {i}->{j}, cost: {cost:.3f}")
|
|
185
|
+
# Check if cost is below threshold
|
|
186
|
+
if cost < self.cost_threshold:
|
|
187
|
+
# Assign the track ID from previous box to current box
|
|
188
|
+
prev_track_id = all_previous_tracks[i].track_id
|
|
189
|
+
new_boxes[j].track_id = prev_track_id
|
|
190
|
+
track_ids[j] = prev_track_id
|
|
191
|
+
assigned_curr_indices.add(j)
|
|
192
|
+
|
|
193
|
+
# Check if this was a lost track being recovered
|
|
194
|
+
if prev_track_id in self.lost_tracks:
|
|
195
|
+
recovered_tracks.add(prev_track_id)
|
|
196
|
+
if self.debug:
|
|
197
|
+
print(f"DEBUG: RECOVERED lost track ID {prev_track_id} for detection {j} (was lost for {self.lost_tracks[prev_track_id]['frames_lost']} frames)")
|
|
198
|
+
else:
|
|
199
|
+
if self.debug:
|
|
200
|
+
print(f"DEBUG: Continued track ID {prev_track_id} for detection {j}")
|
|
201
|
+
else:
|
|
202
|
+
if self.debug:
|
|
203
|
+
print(f"DEBUG: Cost {cost:.3f} above threshold {self.cost_threshold}, not assigning")
|
|
204
|
+
|
|
205
|
+
# Remove recovered tracks from lost tracks
|
|
206
|
+
for track_id in recovered_tracks:
|
|
207
|
+
del self.lost_tracks[track_id]
|
|
208
|
+
|
|
209
|
+
# Assign new track IDs to unassigned current boxes (new insects)
|
|
210
|
+
for j in range(n_curr):
|
|
211
|
+
if j not in assigned_curr_indices:
|
|
212
|
+
new_boxes[j].track_id = self._generate_track_id()
|
|
213
|
+
track_ids[j] = new_boxes[j].track_id
|
|
214
|
+
if self.debug:
|
|
215
|
+
print(f"DEBUG: Assigned NEW track ID {new_boxes[j].track_id} to detection {j}")
|
|
216
|
+
|
|
217
|
+
# Move unmatched current tracks to lost tracks (tracks that disappeared this frame)
|
|
218
|
+
matched_track_ids = {track_ids[j] for j in assigned_curr_indices if track_ids[j] is not None}
|
|
219
|
+
for track in self.current_tracks:
|
|
220
|
+
if track.track_id not in matched_track_ids and track.track_id not in recovered_tracks:
|
|
221
|
+
if track.track_id not in self.lost_tracks:
|
|
222
|
+
self.lost_tracks[track.track_id] = {
|
|
223
|
+
'box': track,
|
|
224
|
+
'frames_lost': 1
|
|
225
|
+
}
|
|
226
|
+
if self.debug:
|
|
227
|
+
print(f"DEBUG: Track {track.track_id} disappeared, moved to lost tracks")
|
|
228
|
+
|
|
229
|
+
# Age lost tracks and remove old ones
|
|
230
|
+
self._age_lost_tracks()
|
|
231
|
+
|
|
232
|
+
# Update current tracks
|
|
233
|
+
self.current_tracks = new_boxes
|
|
234
|
+
|
|
235
|
+
# Add to tracking history
|
|
236
|
+
self.tracking_history.append(new_boxes)
|
|
237
|
+
|
|
238
|
+
return track_ids
|
|
239
|
+
|
|
240
|
+
def _age_lost_tracks(self):
|
|
241
|
+
"""Age lost tracks and remove those that have been lost too long"""
|
|
242
|
+
tracks_to_remove = []
|
|
243
|
+
for track_id, lost_info in self.lost_tracks.items():
|
|
244
|
+
lost_info['frames_lost'] += 1
|
|
245
|
+
if lost_info['frames_lost'] > self.track_memory_frames:
|
|
246
|
+
tracks_to_remove.append(track_id)
|
|
247
|
+
if self.debug:
|
|
248
|
+
print(f"DEBUG: Permanently removing track {track_id} (lost for {lost_info['frames_lost']} frames)")
|
|
249
|
+
|
|
250
|
+
for track_id in tracks_to_remove:
|
|
251
|
+
del self.lost_tracks[track_id]
|
|
252
|
+
|
|
253
|
+
def get_tracking_stats(self):
|
|
254
|
+
"""Get current tracking statistics for debugging/monitoring"""
|
|
255
|
+
return {
|
|
256
|
+
'active_tracks': len(self.current_tracks),
|
|
257
|
+
'lost_tracks': len(self.lost_tracks),
|
|
258
|
+
'active_track_ids': [track.track_id for track in self.current_tracks],
|
|
259
|
+
'lost_track_ids': list(self.lost_tracks.keys()),
|
|
260
|
+
'total_history_frames': len(self.tracking_history)
|
|
261
|
+
}
|
|
@@ -14,7 +14,7 @@ import logging
|
|
|
14
14
|
from tqdm import tqdm
|
|
15
15
|
import sys
|
|
16
16
|
|
|
17
|
-
def
|
|
17
|
+
def train(batch_size=4, epochs=30, patience=3, img_size=640, data_dir='input', output_dir='./output', species_list=None):
|
|
18
18
|
"""
|
|
19
19
|
Main function to run the entire training pipeline.
|
|
20
20
|
Sets up datasets, model, training process and handles errors.
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: bplusplus
|
|
3
|
+
Version: 1.2.3
|
|
4
|
+
Summary: A simple method to create AI models for biodiversity, with collect and prepare pipeline
|
|
5
|
+
License: MIT
|
|
6
|
+
Author: Titus Venverloo
|
|
7
|
+
Author-email: tvenver@mit.edu
|
|
8
|
+
Requires-Python: >=3.9.0,<4.0.0
|
|
9
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
16
|
+
Requires-Dist: numpy
|
|
17
|
+
Requires-Dist: pandas (==2.1.4)
|
|
18
|
+
Requires-Dist: pillow
|
|
19
|
+
Requires-Dist: prettytable (==3.7.0)
|
|
20
|
+
Requires-Dist: pygbif (>=0.6.4,<0.7.0)
|
|
21
|
+
Requires-Dist: pyyaml (==6.0.1)
|
|
22
|
+
Requires-Dist: requests (==2.25.1)
|
|
23
|
+
Requires-Dist: scikit-learn
|
|
24
|
+
Requires-Dist: tabulate (>=0.9.0,<0.10.0)
|
|
25
|
+
Requires-Dist: torch (>=2.5.0,<3.0.0)
|
|
26
|
+
Requires-Dist: torchvision
|
|
27
|
+
Requires-Dist: tqdm (==4.66.4)
|
|
28
|
+
Requires-Dist: ultralytics (>=8.3.0)
|
|
29
|
+
Requires-Dist: validators (>=0.33.0,<0.34.0)
|
|
30
|
+
Description-Content-Type: text/markdown
|
|
31
|
+
|
|
32
|
+
# Domain-Agnostic Insect Classification Pipeline
|
|
33
|
+
|
|
34
|
+
This project provides a complete, end-to-end pipeline for building a custom insect classification system. The framework is designed to be **domain-agnostic**, allowing you to train a powerful detection and classification model for **any insect species** by simply providing a list of names.
|
|
35
|
+
|
|
36
|
+
Using the `Bplusplus` library, this pipeline automates the entire machine learning workflow, from data collection to video inference.
|
|
37
|
+
|
|
38
|
+
## Key Features
|
|
39
|
+
|
|
40
|
+
- **Automated Data Collection**: Downloads hundreds of images for any species from the GBIF database.
|
|
41
|
+
- **Intelligent Data Preparation**: Uses a pre-trained model to automatically find, crop, and resize insects from raw images, ensuring high-quality training data.
|
|
42
|
+
- **Hierarchical Classification**: Trains a model to identify insects at three taxonomic levels: **family, genus, and species**.
|
|
43
|
+
- **Video Inference & Tracking**: Processes video files to detect, classify, and track individual insects over time, providing aggregated predictions.
|
|
44
|
+
## Pipeline Overview
|
|
45
|
+
|
|
46
|
+
The process is broken down into six main steps, all detailed in the `full_pipeline.ipynb` notebook:
|
|
47
|
+
|
|
48
|
+
1. **Collect Data**: Select your target species and fetch raw insect images from the web.
|
|
49
|
+
2. **Prepare Data**: Filter, clean, and prepare images for training.
|
|
50
|
+
3. **Train Model**: Train the hierarchical classification model.
|
|
51
|
+
4. **Download Weights**: Fetch pre-trained weights for the detection model.
|
|
52
|
+
5. **Test Model**: Evaluate the performance of the trained model.
|
|
53
|
+
6. **Run Inference**: Run the full pipeline on a video file for real-world application.
|
|
54
|
+
|
|
55
|
+
## How to Use
|
|
56
|
+
|
|
57
|
+
### Prerequisites
|
|
58
|
+
|
|
59
|
+
- Python 3.8+
|
|
60
|
+
- `venv` for creating a virtual environment (recommended)
|
|
61
|
+
|
|
62
|
+
### Setup
|
|
63
|
+
|
|
64
|
+
1. **Create and activate a virtual environment:**
|
|
65
|
+
```bash
|
|
66
|
+
python3 -m venv venv
|
|
67
|
+
source venv/bin/activate
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
2. **Install the required packages:**
|
|
71
|
+
```bash
|
|
72
|
+
pip install bplusplus
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
### Running the Pipeline
|
|
76
|
+
|
|
77
|
+
The entire workflow is contained within **`full_pipeline.ipynb`**. Open it with a Jupyter Notebook or JupyterLab environment and run the cells sequentially to execute the full pipeline.
|
|
78
|
+
|
|
79
|
+
### Customization
|
|
80
|
+
|
|
81
|
+
To train the model on different insect species, simply modify the `names` list in **Step 1** of the notebook:
|
|
82
|
+
|
|
83
|
+
```python
|
|
84
|
+
# a/full_pipeline.ipynb
|
|
85
|
+
|
|
86
|
+
# To use your own species, change the names in this list
|
|
87
|
+
names = [
|
|
88
|
+
"Vespa crabro", "Vespula vulgaris", "Dolichovespula media"
|
|
89
|
+
]
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
The pipeline will automatically handle the rest, from data collection to training, for your new set of species.
|
|
93
|
+
|
|
94
|
+
## Directory Structure
|
|
95
|
+
|
|
96
|
+
The pipeline will create the following directories to store artifacts:
|
|
97
|
+
|
|
98
|
+
- `GBIF_data/`: Stores the raw images downloaded from GBIF.
|
|
99
|
+
- `prepared_data/`: Contains the cleaned, cropped, and resized images ready for training.
|
|
100
|
+
- `trained_model/`: Saves the trained model weights (`best_multitask.pt`) and pre-trained detection weights.
|
|
101
|
+
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
bplusplus/__init__.py,sha256=GLqIx6Ln3Jt_Q95zkqXglKaSF3dbw-awax4dYry3tw0,145
|
|
2
|
+
bplusplus/collect.py,sha256=lEJHXPpOo4DALBw6zemdmFuqAXZ12-BKwgesvq5ACYs,7135
|
|
3
|
+
bplusplus/inference.py,sha256=3XmwzEfVTw5OFiMbMVgiuEa-r22HvMUWHVXESZsTIzo,37708
|
|
4
|
+
bplusplus/prepare.py,sha256=pdXUVAzY030tM6f0Gf_zml8I26lS38wuvH13R2F00Do,25829
|
|
5
|
+
bplusplus/test.py,sha256=kKjrsb3iCfljtRjot_kiVB5hopMkApoW9yvMcuI2O_U,30545
|
|
6
|
+
bplusplus/tracker.py,sha256=JixV1ICGywGhVMTvkq3hrk4MLUUWDh3XJW4VLm4JdO0,11250
|
|
7
|
+
bplusplus/train.py,sha256=wkHnKbTdZAFn2voJS7gSYXU7B9UVYVYmbTJCR0tFzs4,28058
|
|
8
|
+
bplusplus-1.2.3.dist-info/LICENSE,sha256=rRkeHptDnlmviR0_WWgNT9t696eys_cjfVUU8FEO4k4,1071
|
|
9
|
+
bplusplus-1.2.3.dist-info/METADATA,sha256=IDnokwF2CEyM_3xLmlRL30k2P9NglDjdjbxC7-UZoc4,4046
|
|
10
|
+
bplusplus-1.2.3.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
|
11
|
+
bplusplus-1.2.3.dist-info/RECORD,,
|