lazylabel-gui 1.1.1__py3-none-any.whl → 1.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. lazylabel/__init__.py +8 -8
  2. lazylabel/config/__init__.py +6 -6
  3. lazylabel/config/hotkeys.py +168 -168
  4. lazylabel/config/paths.py +40 -40
  5. lazylabel/config/settings.py +65 -65
  6. lazylabel/core/__init__.py +6 -6
  7. lazylabel/core/file_manager.py +105 -105
  8. lazylabel/core/model_manager.py +97 -97
  9. lazylabel/core/segment_manager.py +171 -171
  10. lazylabel/main.py +36 -36
  11. lazylabel/models/__init__.py +4 -4
  12. lazylabel/models/sam_model.py +195 -195
  13. lazylabel/ui/__init__.py +7 -7
  14. lazylabel/ui/control_panel.py +241 -237
  15. lazylabel/ui/editable_vertex.py +64 -51
  16. lazylabel/ui/hotkey_dialog.py +383 -383
  17. lazylabel/ui/hoverable_pixelmap_item.py +22 -22
  18. lazylabel/ui/hoverable_polygon_item.py +39 -39
  19. lazylabel/ui/main_window.py +1659 -1546
  20. lazylabel/ui/numeric_table_widget_item.py +9 -9
  21. lazylabel/ui/photo_viewer.py +54 -54
  22. lazylabel/ui/reorderable_class_table.py +61 -61
  23. lazylabel/ui/right_panel.py +315 -315
  24. lazylabel/ui/widgets/__init__.py +8 -8
  25. lazylabel/ui/widgets/adjustments_widget.py +108 -107
  26. lazylabel/ui/widgets/model_selection_widget.py +93 -93
  27. lazylabel/ui/widgets/settings_widget.py +105 -105
  28. lazylabel/ui/widgets/status_bar.py +109 -109
  29. lazylabel/utils/__init__.py +5 -5
  30. lazylabel/utils/custom_file_system_model.py +132 -132
  31. lazylabel/utils/utils.py +12 -12
  32. {lazylabel_gui-1.1.1.dist-info → lazylabel_gui-1.1.2.dist-info}/METADATA +197 -197
  33. lazylabel_gui-1.1.2.dist-info/RECORD +37 -0
  34. {lazylabel_gui-1.1.1.dist-info → lazylabel_gui-1.1.2.dist-info}/licenses/LICENSE +21 -21
  35. lazylabel_gui-1.1.1.dist-info/RECORD +0 -37
  36. {lazylabel_gui-1.1.1.dist-info → lazylabel_gui-1.1.2.dist-info}/WHEEL +0 -0
  37. {lazylabel_gui-1.1.1.dist-info → lazylabel_gui-1.1.2.dist-info}/entry_points.txt +0 -0
  38. {lazylabel_gui-1.1.1.dist-info → lazylabel_gui-1.1.2.dist-info}/top_level.txt +0 -0
@@ -1,195 +1,195 @@
1
- import os
2
- import cv2
3
- import numpy as np
4
- import torch
5
- import requests
6
- from tqdm import tqdm
7
- from segment_anything import sam_model_registry, SamPredictor
8
-
9
-
10
- def download_model(url, download_path):
11
- """Downloads file with a progress bar."""
12
- print(f"[10/20] SAM model not found. Downloading from Meta's repository...")
13
- print(f" Downloading to: {download_path}")
14
- try:
15
- print(f"[10/20] Connecting to download server...")
16
- response = requests.get(url, stream=True, timeout=30)
17
- response.raise_for_status()
18
- total_size_in_bytes = int(response.headers.get("content-length", 0))
19
- block_size = 1024 # 1 Kibibyte
20
-
21
- print(
22
- f"[10/20] Starting download ({total_size_in_bytes / (1024*1024*1024):.1f} GB)..."
23
- )
24
- progress_bar = tqdm(total=total_size_in_bytes, unit="iB", unit_scale=True)
25
- with open(download_path, "wb") as file:
26
- for data in response.iter_content(block_size):
27
- progress_bar.update(len(data))
28
- file.write(data)
29
- progress_bar.close()
30
-
31
- if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
32
- raise RuntimeError("Download incomplete - file size mismatch")
33
-
34
- print("[10/20] Model download completed successfully.")
35
-
36
- except requests.exceptions.ConnectionError as e:
37
- raise RuntimeError(
38
- f"[10/20] Network connection failed: Check your internet connection"
39
- )
40
- except requests.exceptions.Timeout as e:
41
- raise RuntimeError(f"[10/20] Download timeout: Server took too long to respond")
42
- except requests.exceptions.HTTPError as e:
43
- raise RuntimeError(
44
- f"[10/20] HTTP error {e.response.status_code}: Server rejected request"
45
- )
46
- except requests.exceptions.RequestException as e:
47
- raise RuntimeError(f"[10/20] Network error during download: {e}")
48
- except PermissionError as e:
49
- raise RuntimeError(
50
- f"[10/20] Permission denied: Cannot write to {download_path}"
51
- )
52
- except OSError as e:
53
- raise RuntimeError(f"[10/20] Disk error: {e} (check available disk space)")
54
- except Exception as e:
55
- # Clean up partial download
56
- if os.path.exists(download_path):
57
- try:
58
- os.remove(download_path)
59
- except:
60
- pass
61
- raise RuntimeError(f"[10/20] Download failed: {e}")
62
-
63
-
64
- class SamModel:
65
- def __init__(
66
- self,
67
- model_type="vit_h",
68
- model_filename="sam_vit_h_4b8939.pth",
69
- custom_model_path=None,
70
- ):
71
- self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
72
- print(f"[9/20] Detected device: {str(self.device).upper()}")
73
-
74
- self.current_model_type = model_type
75
- self.current_model_path = custom_model_path
76
- self.model = None
77
- self.predictor = None
78
- self.image = None
79
- self.is_loaded = False
80
-
81
- try:
82
- if custom_model_path and os.path.exists(custom_model_path):
83
- # Use custom model path
84
- model_path = custom_model_path
85
- print(f"[10/20] Loading custom SAM model from {model_path}...")
86
- else:
87
- # Use default model with download if needed - store in models folder
88
- model_url = (
89
- f"https://dl.fbaipublicfiles.com/segment_anything/{model_filename}"
90
- )
91
-
92
- # Use models folder instead of cache folder
93
- models_dir = os.path.dirname(__file__) # Already in models directory
94
- os.makedirs(models_dir, exist_ok=True)
95
- model_path = os.path.join(models_dir, model_filename)
96
-
97
- # Also check the old cache location and move it if it exists
98
- old_cache_dir = os.path.join(
99
- os.path.expanduser("~"), ".cache", "lazylabel"
100
- )
101
- old_model_path = os.path.join(old_cache_dir, model_filename)
102
-
103
- if os.path.exists(old_model_path) and not os.path.exists(model_path):
104
- print(
105
- f"[10/20] Moving existing model from cache to models folder..."
106
- )
107
- import shutil
108
-
109
- shutil.move(old_model_path, model_path)
110
- elif not os.path.exists(model_path):
111
- # Download the model if it doesn't exist
112
- download_model(model_url, model_path)
113
-
114
- print(f"[10/20] Loading default SAM model from {model_path}...")
115
-
116
- print(f"[11/20] Initializing {model_type.upper()} model architecture...")
117
- self.model = sam_model_registry[model_type](checkpoint=model_path).to(
118
- self.device
119
- )
120
-
121
- print(f"[12/20] Setting up predictor...")
122
- self.predictor = SamPredictor(self.model)
123
- self.is_loaded = True
124
- print("[13/20] SAM model loaded successfully.")
125
-
126
- except Exception as e:
127
- print(f"[8/20] Failed to load SAM model: {e}")
128
- print(f"[8/20] SAM point functionality will be disabled.")
129
- self.is_loaded = False
130
-
131
- def load_custom_model(self, model_path, model_type="vit_h"):
132
- """Load a custom model from the specified path."""
133
- if not os.path.exists(model_path):
134
- print(f"Model file not found: {model_path}")
135
- return False
136
-
137
- print(f"Loading custom SAM model from {model_path}...")
138
- try:
139
- # Clear existing model from memory
140
- if hasattr(self, "model") and self.model is not None:
141
- del self.model
142
- del self.predictor
143
- torch.cuda.empty_cache() if torch.cuda.is_available() else None
144
-
145
- # Load new model
146
- self.model = sam_model_registry[model_type](checkpoint=model_path).to(
147
- self.device
148
- )
149
- self.predictor = SamPredictor(self.model)
150
- self.current_model_type = model_type
151
- self.current_model_path = model_path
152
- self.is_loaded = True
153
-
154
- # Re-set image if one was previously loaded
155
- if self.image is not None:
156
- self.predictor.set_image(self.image)
157
-
158
- print("Custom SAM model loaded successfully.")
159
- return True
160
- except Exception as e:
161
- print(f"Error loading custom model: {e}")
162
- self.is_loaded = False
163
- self.model = None
164
- self.predictor = None
165
- return False
166
-
167
- def set_image(self, image_path):
168
- if not self.is_loaded:
169
- return False
170
- try:
171
- self.image = cv2.imread(image_path)
172
- self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
173
- self.predictor.set_image(self.image)
174
- return True
175
- except Exception as e:
176
- print(f"Error setting image: {e}")
177
- return False
178
-
179
- def predict(self, positive_points, negative_points):
180
- if not self.is_loaded or not positive_points:
181
- return None
182
-
183
- try:
184
- points = np.array(positive_points + negative_points)
185
- labels = np.array([1] * len(positive_points) + [0] * len(negative_points))
186
-
187
- masks, _, _ = self.predictor.predict(
188
- point_coords=points,
189
- point_labels=labels,
190
- multimask_output=False,
191
- )
192
- return masks[0]
193
- except Exception as e:
194
- print(f"Error during prediction: {e}")
195
- return None
1
+ import os
2
+ import cv2
3
+ import numpy as np
4
+ import torch
5
+ import requests
6
+ from tqdm import tqdm
7
+ from segment_anything import sam_model_registry, SamPredictor
8
+
9
+
10
+ def download_model(url, download_path):
11
+ """Downloads file with a progress bar."""
12
+ print(f"[10/20] SAM model not found. Downloading from Meta's repository...")
13
+ print(f" Downloading to: {download_path}")
14
+ try:
15
+ print(f"[10/20] Connecting to download server...")
16
+ response = requests.get(url, stream=True, timeout=30)
17
+ response.raise_for_status()
18
+ total_size_in_bytes = int(response.headers.get("content-length", 0))
19
+ block_size = 1024 # 1 Kibibyte
20
+
21
+ print(
22
+ f"[10/20] Starting download ({total_size_in_bytes / (1024*1024*1024):.1f} GB)..."
23
+ )
24
+ progress_bar = tqdm(total=total_size_in_bytes, unit="iB", unit_scale=True)
25
+ with open(download_path, "wb") as file:
26
+ for data in response.iter_content(block_size):
27
+ progress_bar.update(len(data))
28
+ file.write(data)
29
+ progress_bar.close()
30
+
31
+ if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
32
+ raise RuntimeError("Download incomplete - file size mismatch")
33
+
34
+ print("[10/20] Model download completed successfully.")
35
+
36
+ except requests.exceptions.ConnectionError as e:
37
+ raise RuntimeError(
38
+ f"[10/20] Network connection failed: Check your internet connection"
39
+ )
40
+ except requests.exceptions.Timeout as e:
41
+ raise RuntimeError(f"[10/20] Download timeout: Server took too long to respond")
42
+ except requests.exceptions.HTTPError as e:
43
+ raise RuntimeError(
44
+ f"[10/20] HTTP error {e.response.status_code}: Server rejected request"
45
+ )
46
+ except requests.exceptions.RequestException as e:
47
+ raise RuntimeError(f"[10/20] Network error during download: {e}")
48
+ except PermissionError as e:
49
+ raise RuntimeError(
50
+ f"[10/20] Permission denied: Cannot write to {download_path}"
51
+ )
52
+ except OSError as e:
53
+ raise RuntimeError(f"[10/20] Disk error: {e} (check available disk space)")
54
+ except Exception as e:
55
+ # Clean up partial download
56
+ if os.path.exists(download_path):
57
+ try:
58
+ os.remove(download_path)
59
+ except:
60
+ pass
61
+ raise RuntimeError(f"[10/20] Download failed: {e}")
62
+
63
+
64
+ class SamModel:
65
+ def __init__(
66
+ self,
67
+ model_type="vit_h",
68
+ model_filename="sam_vit_h_4b8939.pth",
69
+ custom_model_path=None,
70
+ ):
71
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
72
+ print(f"[9/20] Detected device: {str(self.device).upper()}")
73
+
74
+ self.current_model_type = model_type
75
+ self.current_model_path = custom_model_path
76
+ self.model = None
77
+ self.predictor = None
78
+ self.image = None
79
+ self.is_loaded = False
80
+
81
+ try:
82
+ if custom_model_path and os.path.exists(custom_model_path):
83
+ # Use custom model path
84
+ model_path = custom_model_path
85
+ print(f"[10/20] Loading custom SAM model from {model_path}...")
86
+ else:
87
+ # Use default model with download if needed - store in models folder
88
+ model_url = (
89
+ f"https://dl.fbaipublicfiles.com/segment_anything/{model_filename}"
90
+ )
91
+
92
+ # Use models folder instead of cache folder
93
+ models_dir = os.path.dirname(__file__) # Already in models directory
94
+ os.makedirs(models_dir, exist_ok=True)
95
+ model_path = os.path.join(models_dir, model_filename)
96
+
97
+ # Also check the old cache location and move it if it exists
98
+ old_cache_dir = os.path.join(
99
+ os.path.expanduser("~"), ".cache", "lazylabel"
100
+ )
101
+ old_model_path = os.path.join(old_cache_dir, model_filename)
102
+
103
+ if os.path.exists(old_model_path) and not os.path.exists(model_path):
104
+ print(
105
+ f"[10/20] Moving existing model from cache to models folder..."
106
+ )
107
+ import shutil
108
+
109
+ shutil.move(old_model_path, model_path)
110
+ elif not os.path.exists(model_path):
111
+ # Download the model if it doesn't exist
112
+ download_model(model_url, model_path)
113
+
114
+ print(f"[10/20] Loading default SAM model from {model_path}...")
115
+
116
+ print(f"[11/20] Initializing {model_type.upper()} model architecture...")
117
+ self.model = sam_model_registry[model_type](checkpoint=model_path).to(
118
+ self.device
119
+ )
120
+
121
+ print(f"[12/20] Setting up predictor...")
122
+ self.predictor = SamPredictor(self.model)
123
+ self.is_loaded = True
124
+ print("[13/20] SAM model loaded successfully.")
125
+
126
+ except Exception as e:
127
+ print(f"[8/20] Failed to load SAM model: {e}")
128
+ print(f"[8/20] SAM point functionality will be disabled.")
129
+ self.is_loaded = False
130
+
131
+ def load_custom_model(self, model_path, model_type="vit_h"):
132
+ """Load a custom model from the specified path."""
133
+ if not os.path.exists(model_path):
134
+ print(f"Model file not found: {model_path}")
135
+ return False
136
+
137
+ print(f"Loading custom SAM model from {model_path}...")
138
+ try:
139
+ # Clear existing model from memory
140
+ if hasattr(self, "model") and self.model is not None:
141
+ del self.model
142
+ del self.predictor
143
+ torch.cuda.empty_cache() if torch.cuda.is_available() else None
144
+
145
+ # Load new model
146
+ self.model = sam_model_registry[model_type](checkpoint=model_path).to(
147
+ self.device
148
+ )
149
+ self.predictor = SamPredictor(self.model)
150
+ self.current_model_type = model_type
151
+ self.current_model_path = model_path
152
+ self.is_loaded = True
153
+
154
+ # Re-set image if one was previously loaded
155
+ if self.image is not None:
156
+ self.predictor.set_image(self.image)
157
+
158
+ print("Custom SAM model loaded successfully.")
159
+ return True
160
+ except Exception as e:
161
+ print(f"Error loading custom model: {e}")
162
+ self.is_loaded = False
163
+ self.model = None
164
+ self.predictor = None
165
+ return False
166
+
167
+ def set_image(self, image_path):
168
+ if not self.is_loaded:
169
+ return False
170
+ try:
171
+ self.image = cv2.imread(image_path)
172
+ self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
173
+ self.predictor.set_image(self.image)
174
+ return True
175
+ except Exception as e:
176
+ print(f"Error setting image: {e}")
177
+ return False
178
+
179
+ def predict(self, positive_points, negative_points):
180
+ if not self.is_loaded or not positive_points:
181
+ return None
182
+
183
+ try:
184
+ points = np.array(positive_points + negative_points)
185
+ labels = np.array([1] * len(positive_points) + [0] * len(negative_points))
186
+
187
+ masks, _, _ = self.predictor.predict(
188
+ point_coords=points,
189
+ point_labels=labels,
190
+ multimask_output=False,
191
+ )
192
+ return masks[0]
193
+ except Exception as e:
194
+ print(f"Error during prediction: {e}")
195
+ return None
lazylabel/ui/__init__.py CHANGED
@@ -1,8 +1,8 @@
1
- """UI components for LazyLabel."""
2
-
3
- from .main_window import MainWindow
4
- from .control_panel import ControlPanel
5
- from .right_panel import RightPanel
6
- from .hotkey_dialog import HotkeyDialog
7
-
1
+ """UI components for LazyLabel."""
2
+
3
+ from .main_window import MainWindow
4
+ from .control_panel import ControlPanel
5
+ from .right_panel import RightPanel
6
+ from .hotkey_dialog import HotkeyDialog
7
+
8
8
  __all__ = ['MainWindow', 'ControlPanel', 'RightPanel', 'HotkeyDialog']