endoreg-db 0.8.4.1__py3-none-any.whl → 0.8.4.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of endoreg-db might be problematic. Click here for more details.
- endoreg_db/management/commands/setup_endoreg_db.py +7 -91
- endoreg_db/models/metadata/model_meta_logic.py +51 -110
- endoreg_db/services/video_import.py +10 -27
- {endoreg_db-0.8.4.1.dist-info → endoreg_db-0.8.4.2.dist-info}/METADATA +1 -1
- {endoreg_db-0.8.4.1.dist-info → endoreg_db-0.8.4.2.dist-info}/RECORD +7 -8
- endoreg_db/management/commands/validate_ai_models.py +0 -124
- {endoreg_db-0.8.4.1.dist-info → endoreg_db-0.8.4.2.dist-info}/WHEEL +0 -0
- {endoreg_db-0.8.4.1.dist-info → endoreg_db-0.8.4.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -8,10 +8,8 @@ from pathlib import Path
|
|
|
8
8
|
|
|
9
9
|
from django.core.management import call_command
|
|
10
10
|
from django.core.management.base import BaseCommand
|
|
11
|
-
|
|
12
11
|
from endoreg_db.models import ModelMeta
|
|
13
12
|
|
|
14
|
-
|
|
15
13
|
class Command(BaseCommand):
|
|
16
14
|
help = """
|
|
17
15
|
Complete setup for EndoReg DB when used as an embedded app.
|
|
@@ -121,15 +119,6 @@ class Command(BaseCommand):
|
|
|
121
119
|
self.stdout.write(self.style.ERROR(f"❌ Failed to create AI model metadata: {e}"))
|
|
122
120
|
return
|
|
123
121
|
|
|
124
|
-
# Step 5.5: Validate and fix AI model active metadata
|
|
125
|
-
self.stdout.write("\n🔧 Step 5.5: Validating AI model active metadata...")
|
|
126
|
-
try:
|
|
127
|
-
self._validate_and_fix_ai_model_metadata()
|
|
128
|
-
self.stdout.write(self.style.SUCCESS("✅ AI model metadata validation completed"))
|
|
129
|
-
except Exception as e:
|
|
130
|
-
self.stdout.write(self.style.ERROR(f"❌ Failed to validate AI model metadata: {e}"))
|
|
131
|
-
return
|
|
132
|
-
|
|
133
122
|
# Step 6: Verification
|
|
134
123
|
self.stdout.write("\n🔍 Step 6: Verifying setup...")
|
|
135
124
|
try:
|
|
@@ -148,11 +137,12 @@ class Command(BaseCommand):
|
|
|
148
137
|
def _find_model_weights_file(self):
|
|
149
138
|
"""Find the model weights file in various possible locations."""
|
|
150
139
|
# Check common locations for model weights
|
|
151
|
-
|
|
140
|
+
|
|
152
141
|
if not ModelMeta.objects.exists():
|
|
153
142
|
print("📦 No model metadata found — creating from Hugging Face...")
|
|
154
143
|
ModelMeta.setup_default_from_huggingface(
|
|
155
|
-
"wg-lux/colo_segmentation_RegNetX800MF_base",
|
|
144
|
+
"wg-lux/colo_segmentation_RegNetX800MF_base",
|
|
145
|
+
labelset_name="multilabel_classification_colonoscopy_default"
|
|
156
146
|
)
|
|
157
147
|
print("✅ Default ModelMeta created.")
|
|
158
148
|
possible_paths = [
|
|
@@ -172,8 +162,10 @@ class Command(BaseCommand):
|
|
|
172
162
|
return path
|
|
173
163
|
|
|
174
164
|
self.stdout.write("Model weights file not found in standard locations")
|
|
175
|
-
|
|
165
|
+
|
|
176
166
|
return None
|
|
167
|
+
|
|
168
|
+
|
|
177
169
|
|
|
178
170
|
def _verify_setup(self):
|
|
179
171
|
"""Verify that the setup was successful."""
|
|
@@ -213,81 +205,5 @@ class Command(BaseCommand):
|
|
|
213
205
|
self.stdout.write(f"Found {meta_count} model metadata record(s)")
|
|
214
206
|
|
|
215
207
|
self.stdout.write("Setup verification passed")
|
|
208
|
+
|
|
216
209
|
|
|
217
|
-
def _validate_and_fix_ai_model_metadata(self):
|
|
218
|
-
"""
|
|
219
|
-
Validate that all AI models have proper active metadata and fix if necessary.
|
|
220
|
-
This addresses the "No model metadata found for this model" error.
|
|
221
|
-
"""
|
|
222
|
-
from endoreg_db.models import AiModel, LabelSet, ModelMeta
|
|
223
|
-
|
|
224
|
-
all_models = AiModel.objects.all()
|
|
225
|
-
fixed_count = 0
|
|
226
|
-
|
|
227
|
-
for model in all_models:
|
|
228
|
-
self.stdout.write(f"Checking model: {model.name}")
|
|
229
|
-
|
|
230
|
-
# Check if model has metadata versions
|
|
231
|
-
metadata_count = model.metadata_versions.count()
|
|
232
|
-
self.stdout.write(f" Metadata versions: {metadata_count}")
|
|
233
|
-
|
|
234
|
-
if metadata_count == 0:
|
|
235
|
-
# Create metadata for models that don't have any
|
|
236
|
-
self.stdout.write(f" Creating metadata for {model.name}...")
|
|
237
|
-
|
|
238
|
-
# Use existing labelset or create default
|
|
239
|
-
labelset = LabelSet.objects.first()
|
|
240
|
-
if not labelset:
|
|
241
|
-
labelset = LabelSet.objects.create(name="default_colonoscopy_labels", description="Default colonoscopy classification labels")
|
|
242
|
-
|
|
243
|
-
# Create basic metadata
|
|
244
|
-
meta = ModelMeta.objects.create(
|
|
245
|
-
name=model.name,
|
|
246
|
-
version="1.0",
|
|
247
|
-
model=model,
|
|
248
|
-
labelset=labelset,
|
|
249
|
-
activation="sigmoid" if "classification" in model.name else "sigmoid",
|
|
250
|
-
mean="0.485,0.456,0.406", # ImageNet defaults
|
|
251
|
-
std="0.229,0.224,0.225", # ImageNet defaults
|
|
252
|
-
size_x=224,
|
|
253
|
-
size_y=224,
|
|
254
|
-
axes="CHW",
|
|
255
|
-
batchsize=32,
|
|
256
|
-
num_workers=4,
|
|
257
|
-
description=f"Auto-generated metadata for {model.name}",
|
|
258
|
-
)
|
|
259
|
-
|
|
260
|
-
model.active_meta = meta
|
|
261
|
-
model.save()
|
|
262
|
-
fixed_count += 1
|
|
263
|
-
self.stdout.write(f" ✅ Created and set metadata for {model.name}")
|
|
264
|
-
|
|
265
|
-
elif not model.active_meta:
|
|
266
|
-
# Model has metadata but no active meta set
|
|
267
|
-
first_meta = model.metadata_versions.first()
|
|
268
|
-
if first_meta:
|
|
269
|
-
self.stdout.write(f" Setting active metadata for {model.name}...")
|
|
270
|
-
model.active_meta = first_meta
|
|
271
|
-
model.save()
|
|
272
|
-
fixed_count += 1
|
|
273
|
-
self.stdout.write(f" ✅ Set active metadata: {first_meta.name} v{first_meta.version}")
|
|
274
|
-
else:
|
|
275
|
-
self.stdout.write(f" ⚠️ No metadata versions available for {model.name}")
|
|
276
|
-
|
|
277
|
-
else:
|
|
278
|
-
self.stdout.write(f" ✅ Model {model.name} has active metadata: {model.active_meta}")
|
|
279
|
-
|
|
280
|
-
# Verify all models can get latest version
|
|
281
|
-
self.stdout.write("\nTesting model metadata access...")
|
|
282
|
-
for model in all_models:
|
|
283
|
-
try:
|
|
284
|
-
latest = model.get_latest_version()
|
|
285
|
-
self.stdout.write(f" ✅ {model.name}: {latest}")
|
|
286
|
-
except Exception as e:
|
|
287
|
-
self.stdout.write(f" ❌ {model.name}: {e}")
|
|
288
|
-
raise Exception(f"Model {model.name} still has metadata issues: {e}")
|
|
289
|
-
|
|
290
|
-
if fixed_count > 0:
|
|
291
|
-
self.stdout.write(f"Fixed metadata for {fixed_count} model(s)")
|
|
292
|
-
else:
|
|
293
|
-
self.stdout.write("All models already had proper metadata")
|
|
@@ -2,7 +2,7 @@ import shutil
|
|
|
2
2
|
from logging import getLogger
|
|
3
3
|
from pathlib import Path
|
|
4
4
|
from typing import TYPE_CHECKING, Any, Optional, Type
|
|
5
|
-
|
|
5
|
+
from django.core.files import File
|
|
6
6
|
from django.db import transaction
|
|
7
7
|
from huggingface_hub import hf_hub_download
|
|
8
8
|
|
|
@@ -18,14 +18,18 @@ if TYPE_CHECKING:
|
|
|
18
18
|
from .model_meta import ModelMeta # Import ModelMeta for type hinting
|
|
19
19
|
|
|
20
20
|
|
|
21
|
-
def get_latest_version_number_logic(
|
|
21
|
+
def get_latest_version_number_logic(
|
|
22
|
+
cls: Type["ModelMeta"], meta_name: str, model_name: str
|
|
23
|
+
) -> int:
|
|
22
24
|
"""
|
|
23
25
|
Finds the highest numerical version for a given meta_name and model_name.
|
|
24
26
|
Iterates through all versions, attempts to parse them as integers,
|
|
25
27
|
and returns the maximum integer found. If no numeric versions are found,
|
|
26
28
|
returns 0.
|
|
27
29
|
"""
|
|
28
|
-
versions_qs = cls.objects.filter(
|
|
30
|
+
versions_qs = cls.objects.filter(
|
|
31
|
+
name=meta_name, model__name=model_name
|
|
32
|
+
).values_list("version", flat=True)
|
|
29
33
|
|
|
30
34
|
max_v = 0
|
|
31
35
|
found_numeric_version = False
|
|
@@ -80,17 +84,24 @@ def create_from_file_logic(
|
|
|
80
84
|
|
|
81
85
|
if requested_version:
|
|
82
86
|
target_version = str(requested_version)
|
|
83
|
-
existing = cls.objects.filter(
|
|
87
|
+
existing = cls.objects.filter(
|
|
88
|
+
name=meta_name, model=ai_model, version=target_version
|
|
89
|
+
).first()
|
|
84
90
|
if existing and not bump_if_exists:
|
|
85
91
|
raise ValueError(
|
|
86
|
-
f"ModelMeta '{meta_name}' version '{target_version}' for model '{model_name}'
|
|
92
|
+
f"ModelMeta '{meta_name}' version '{target_version}' for model '{model_name}' "
|
|
93
|
+
f"already exists. Use bump_if_exists=True to increment."
|
|
87
94
|
)
|
|
88
95
|
elif existing and bump_if_exists:
|
|
89
96
|
target_version = str(latest_version_num + 1)
|
|
90
|
-
logger.info(
|
|
97
|
+
logger.info(
|
|
98
|
+
f"Bumping version for {meta_name}/{model_name} to {target_version}"
|
|
99
|
+
)
|
|
91
100
|
else:
|
|
92
101
|
target_version = str(latest_version_num + 1)
|
|
93
|
-
logger.info(
|
|
102
|
+
logger.info(
|
|
103
|
+
f"Setting next version for {meta_name}/{model_name} to {target_version}"
|
|
104
|
+
)
|
|
94
105
|
|
|
95
106
|
# --- Prepare Weights File ---
|
|
96
107
|
source_weights_path = Path(weights_file).resolve()
|
|
@@ -100,7 +111,10 @@ def create_from_file_logic(
|
|
|
100
111
|
# Construct destination path within MEDIA_ROOT/WEIGHTS_DIR
|
|
101
112
|
weights_filename = source_weights_path.name
|
|
102
113
|
# Relative path for the FileField upload_to
|
|
103
|
-
relative_dest_path =
|
|
114
|
+
relative_dest_path = (
|
|
115
|
+
Path(WEIGHTS_DIR.relative_to(STORAGE_DIR))
|
|
116
|
+
/ f"{meta_name}_v{target_version}_{weights_filename}"
|
|
117
|
+
)
|
|
104
118
|
# Full path for shutil.copy
|
|
105
119
|
full_dest_path = STORAGE_DIR / relative_dest_path
|
|
106
120
|
|
|
@@ -113,6 +127,8 @@ def create_from_file_logic(
|
|
|
113
127
|
logger.info(f"Copied weights from {source_weights_path} to {full_dest_path}")
|
|
114
128
|
except Exception as e:
|
|
115
129
|
raise IOError(f"Failed to copy weights file: {e}") from e
|
|
130
|
+
|
|
131
|
+
|
|
116
132
|
|
|
117
133
|
# --- Create/Update ModelMeta Instance ---
|
|
118
134
|
defaults = {
|
|
@@ -130,6 +146,11 @@ def create_from_file_logic(
|
|
|
130
146
|
version=target_version,
|
|
131
147
|
defaults=defaults,
|
|
132
148
|
)
|
|
149
|
+
|
|
150
|
+
with open(full_dest_path, "rb") as f:
|
|
151
|
+
model_meta.weights.save(relative_dest_path.name, File(f), save=False)
|
|
152
|
+
model_meta.save()
|
|
153
|
+
|
|
133
154
|
|
|
134
155
|
if created:
|
|
135
156
|
logger.info(f"Created new ModelMeta: {model_meta}")
|
|
@@ -219,14 +240,22 @@ def get_model_meta_by_name_version_logic(
|
|
|
219
240
|
try:
|
|
220
241
|
return cls.objects.get(name=meta_name, model=ai_model, version=version)
|
|
221
242
|
except Exception as exc:
|
|
222
|
-
raise cls.DoesNotExist(
|
|
243
|
+
raise cls.DoesNotExist(
|
|
244
|
+
f"ModelMeta '{meta_name}' version '{version}' for model '{model_name}' not found."
|
|
245
|
+
) from exc
|
|
223
246
|
else:
|
|
224
247
|
# Get latest version
|
|
225
|
-
latest =
|
|
248
|
+
latest = (
|
|
249
|
+
cls.objects.filter(name=meta_name, model=ai_model)
|
|
250
|
+
.order_by("-date_created")
|
|
251
|
+
.first()
|
|
252
|
+
)
|
|
226
253
|
if latest:
|
|
227
254
|
return latest
|
|
228
255
|
else:
|
|
229
|
-
raise cls.DoesNotExist(
|
|
256
|
+
raise cls.DoesNotExist(
|
|
257
|
+
f"No ModelMeta found for '{meta_name}' and model '{model_name}'."
|
|
258
|
+
)
|
|
230
259
|
|
|
231
260
|
|
|
232
261
|
import re
|
|
@@ -244,7 +273,9 @@ def infer_default_model_meta_from_hf(model_id: str) -> dict[str, Any]:
|
|
|
244
273
|
"""
|
|
245
274
|
|
|
246
275
|
if not (info := model_info(model_id)):
|
|
247
|
-
logger.info(
|
|
276
|
+
logger.info(
|
|
277
|
+
f"Could not retrieve model info for {model_id}, using ColoReg segmentation defaults."
|
|
278
|
+
)
|
|
248
279
|
return {
|
|
249
280
|
"name": "wg-lux/colo_segmentation_RegNetX800MF_base",
|
|
250
281
|
"activation": "sigmoid",
|
|
@@ -293,7 +324,9 @@ def infer_default_model_meta_from_hf(model_id: str) -> dict[str, Any]:
|
|
|
293
324
|
}
|
|
294
325
|
|
|
295
326
|
|
|
296
|
-
def setup_default_from_huggingface_logic(
|
|
327
|
+
def setup_default_from_huggingface_logic(
|
|
328
|
+
cls, model_id: str, labelset_name: str | None = None
|
|
329
|
+
):
|
|
297
330
|
"""
|
|
298
331
|
Downloads model weights from Hugging Face and auto-fills ModelMeta fields.
|
|
299
332
|
"""
|
|
@@ -307,7 +340,11 @@ def setup_default_from_huggingface_logic(cls, model_id: str, labelset_name: str
|
|
|
307
340
|
)
|
|
308
341
|
|
|
309
342
|
ai_model, _ = AiModel.objects.get_or_create(name=meta["name"])
|
|
310
|
-
labelset =
|
|
343
|
+
labelset = (
|
|
344
|
+
LabelSet.objects.first()
|
|
345
|
+
if not labelset_name
|
|
346
|
+
else LabelSet.objects.get(name=labelset_name)
|
|
347
|
+
)
|
|
311
348
|
|
|
312
349
|
return create_from_file_logic(
|
|
313
350
|
cls,
|
|
@@ -322,99 +359,3 @@ def setup_default_from_huggingface_logic(cls, model_id: str, labelset_name: str
|
|
|
322
359
|
size_y=meta["size_y"],
|
|
323
360
|
description=meta["description"],
|
|
324
361
|
)
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
def validate_and_fix_ai_model_metadata_logic():
|
|
328
|
-
"""
|
|
329
|
-
Validates that all AI models have proper active metadata and fixes any issues.
|
|
330
|
-
This prevents the "No model metadata found for this model" error.
|
|
331
|
-
|
|
332
|
-
Returns:
|
|
333
|
-
dict: Summary of fixes applied
|
|
334
|
-
"""
|
|
335
|
-
from ..administration.ai.ai_model import AiModel
|
|
336
|
-
from ..label.label_set import LabelSet
|
|
337
|
-
|
|
338
|
-
summary = {"models_checked": 0, "models_fixed": 0, "metadata_created": 0, "active_meta_set": 0, "errors": []}
|
|
339
|
-
|
|
340
|
-
try:
|
|
341
|
-
all_models = AiModel.objects.all()
|
|
342
|
-
summary["models_checked"] = all_models.count()
|
|
343
|
-
|
|
344
|
-
for model in all_models:
|
|
345
|
-
logger.info(f"Validating model: {model.name}")
|
|
346
|
-
|
|
347
|
-
# Check if model has metadata versions
|
|
348
|
-
metadata_count = model.metadata_versions.count()
|
|
349
|
-
|
|
350
|
-
if metadata_count == 0:
|
|
351
|
-
# Create metadata for models that don't have any
|
|
352
|
-
logger.info(f"Creating metadata for {model.name}")
|
|
353
|
-
|
|
354
|
-
# Use existing labelset or create default
|
|
355
|
-
labelset = LabelSet.objects.first()
|
|
356
|
-
if not labelset:
|
|
357
|
-
labelset = LabelSet.objects.create(name="default_colonoscopy_labels", description="Default colonoscopy classification labels")
|
|
358
|
-
|
|
359
|
-
# Import here to avoid circular imports
|
|
360
|
-
from .model_meta import ModelMeta
|
|
361
|
-
|
|
362
|
-
# Create basic metadata
|
|
363
|
-
meta = ModelMeta.objects.create(
|
|
364
|
-
name=model.name,
|
|
365
|
-
version="1.0",
|
|
366
|
-
model=model,
|
|
367
|
-
labelset=labelset,
|
|
368
|
-
activation="sigmoid" if "classification" in model.name else "sigmoid",
|
|
369
|
-
mean="0.485,0.456,0.406", # ImageNet defaults
|
|
370
|
-
std="0.229,0.224,0.225", # ImageNet defaults
|
|
371
|
-
size_x=224,
|
|
372
|
-
size_y=224,
|
|
373
|
-
axes="CHW",
|
|
374
|
-
batchsize=32,
|
|
375
|
-
num_workers=4,
|
|
376
|
-
description=f"Auto-generated metadata for {model.name}",
|
|
377
|
-
)
|
|
378
|
-
|
|
379
|
-
model.active_meta = meta
|
|
380
|
-
model.save()
|
|
381
|
-
summary["models_fixed"] += 1
|
|
382
|
-
summary["metadata_created"] += 1
|
|
383
|
-
logger.info(f"Created and set metadata for {model.name}")
|
|
384
|
-
|
|
385
|
-
elif not model.active_meta:
|
|
386
|
-
# Model has metadata but no active meta set
|
|
387
|
-
first_meta = model.metadata_versions.first()
|
|
388
|
-
if first_meta:
|
|
389
|
-
logger.info(f"Setting active metadata for {model.name}")
|
|
390
|
-
model.active_meta = first_meta
|
|
391
|
-
model.save()
|
|
392
|
-
summary["models_fixed"] += 1
|
|
393
|
-
summary["active_meta_set"] += 1
|
|
394
|
-
logger.info(f"Set active metadata: {first_meta.name} v{first_meta.version}")
|
|
395
|
-
else:
|
|
396
|
-
error_msg = f"No metadata versions available for {model.name}"
|
|
397
|
-
logger.warning(error_msg)
|
|
398
|
-
summary["errors"].append(error_msg)
|
|
399
|
-
|
|
400
|
-
else:
|
|
401
|
-
logger.info(f"Model {model.name} has valid active metadata: {model.active_meta}")
|
|
402
|
-
|
|
403
|
-
# Verify all models can get latest version
|
|
404
|
-
logger.info("Testing model metadata access...")
|
|
405
|
-
for model in all_models:
|
|
406
|
-
try:
|
|
407
|
-
latest = model.get_latest_version()
|
|
408
|
-
logger.info(f"✅ {model.name}: {latest}")
|
|
409
|
-
except Exception as e:
|
|
410
|
-
error_msg = f"Model {model.name} metadata test failed: {e}"
|
|
411
|
-
logger.error(error_msg)
|
|
412
|
-
summary["errors"].append(error_msg)
|
|
413
|
-
|
|
414
|
-
return summary
|
|
415
|
-
|
|
416
|
-
except Exception as e:
|
|
417
|
-
error_msg = f"Validation failed: {e}"
|
|
418
|
-
logger.error(error_msg)
|
|
419
|
-
summary["errors"].append(error_msg)
|
|
420
|
-
return summary
|
|
@@ -18,6 +18,7 @@ from contextlib import contextmanager
|
|
|
18
18
|
from pathlib import Path
|
|
19
19
|
from typing import Union, Dict, Any, Optional, List, Tuple
|
|
20
20
|
from django.db import transaction
|
|
21
|
+
from lx_anonymizer import FrameCleaner
|
|
21
22
|
from moviepy import video
|
|
22
23
|
from endoreg_db.models import VideoFile, SensitiveMeta
|
|
23
24
|
from endoreg_db.utils.paths import STORAGE_DIR, VIDEO_DIR, ANONYM_VIDEO_DIR
|
|
@@ -55,18 +56,7 @@ class VideoImportService():
|
|
|
55
56
|
self.project_root = Path(__file__).parent.parent.parent.parent
|
|
56
57
|
|
|
57
58
|
# Track processed files to prevent duplicates
|
|
58
|
-
|
|
59
|
-
# Ensure anonym_video directory exists before listing files
|
|
60
|
-
anonym_video_dir = Path(ANONYM_VIDEO_DIR)
|
|
61
|
-
if anonym_video_dir.exists():
|
|
62
|
-
self.processed_files = set(str(anonym_video_dir / file) for file in os.listdir(ANONYM_VIDEO_DIR))
|
|
63
|
-
else:
|
|
64
|
-
logger.info(f"Creating anonym_videos directory: {anonym_video_dir}")
|
|
65
|
-
anonym_video_dir.mkdir(parents=True, exist_ok=True)
|
|
66
|
-
self.processed_files = set()
|
|
67
|
-
except Exception as e:
|
|
68
|
-
logger.warning(f"Failed to initialize processed files tracking: {e}")
|
|
69
|
-
self.processed_files = set()
|
|
59
|
+
self.processed_files = set(str(Path(ANONYM_VIDEO_DIR) / file) for file in os.listdir(ANONYM_VIDEO_DIR))
|
|
70
60
|
|
|
71
61
|
# Central video instance and processing context
|
|
72
62
|
self.current_video: Optional[VideoFile] = None
|
|
@@ -75,6 +65,8 @@ class VideoImportService():
|
|
|
75
65
|
self.delete_source = True
|
|
76
66
|
|
|
77
67
|
self.logger = logging.getLogger(__name__)
|
|
68
|
+
|
|
69
|
+
self.cleaner = None # This gets instantiated in the perform_frame_cleaning method
|
|
78
70
|
|
|
79
71
|
def _require_current_video(self) -> VideoFile:
|
|
80
72
|
"""Return the current VideoFile or raise if it has not been initialized."""
|
|
@@ -157,9 +149,6 @@ class VideoImportService():
|
|
|
157
149
|
High-level helper that orchestrates the complete video import and anonymization process.
|
|
158
150
|
Uses the central video instance pattern for improved state management.
|
|
159
151
|
"""
|
|
160
|
-
# DEFENSIVE: Initialize processing_context immediately to prevent KeyError crashes
|
|
161
|
-
self.processing_context = {'file_path': Path(file_path)}
|
|
162
|
-
|
|
163
152
|
try:
|
|
164
153
|
# Initialize processing context
|
|
165
154
|
self._initialize_processing_context(file_path, center_name, processor_name,
|
|
@@ -196,12 +185,7 @@ class VideoImportService():
|
|
|
196
185
|
return self.current_video
|
|
197
186
|
|
|
198
187
|
except Exception as e:
|
|
199
|
-
|
|
200
|
-
safe_file_path = getattr(self, 'processing_context', {}).get('file_path', file_path)
|
|
201
|
-
# Debug: Log context state for troubleshooting
|
|
202
|
-
context_keys = list(getattr(self, 'processing_context', {}).keys())
|
|
203
|
-
self.logger.debug(f"Context keys during error: {context_keys}")
|
|
204
|
-
self.logger.error(f"Video import and anonymization failed for {safe_file_path}: {e}")
|
|
188
|
+
self.logger.error(f"Video import and anonymization failed for {file_path}: {e}")
|
|
205
189
|
self._cleanup_on_error()
|
|
206
190
|
raise
|
|
207
191
|
finally:
|
|
@@ -840,7 +824,7 @@ class VideoImportService():
|
|
|
840
824
|
from lx_anonymizer import FrameCleaner # type: ignore[import]
|
|
841
825
|
|
|
842
826
|
if FrameCleaner:
|
|
843
|
-
return True, FrameCleaner
|
|
827
|
+
return True, FrameCleaner()
|
|
844
828
|
|
|
845
829
|
except Exception as e:
|
|
846
830
|
self.logger.warning(f"Frame cleaning not available: {e} Please install or update lx_anonymizer.")
|
|
@@ -869,10 +853,13 @@ class VideoImportService():
|
|
|
869
853
|
|
|
870
854
|
|
|
871
855
|
# Create temporary output path for cleaned video
|
|
872
|
-
video_filename = self.processing_context.get('video_filename', Path(raw_video_path).name)
|
|
856
|
+
video_filename = self.processing_context.get('video_filename', Path(raw_video_path).name if raw_video_path else "video.mp4")
|
|
873
857
|
cleaned_filename = f"cleaned_{video_filename}"
|
|
858
|
+
if not raw_video_path:
|
|
859
|
+
raise RuntimeError("raw_video_path is None after fallback, cannot construct cleaned_video_path")
|
|
874
860
|
cleaned_video_path = Path(raw_video_path).parent / cleaned_filename
|
|
875
861
|
|
|
862
|
+
|
|
876
863
|
|
|
877
864
|
|
|
878
865
|
# Clean video with ROI masking (heavy I/O operation)
|
|
@@ -988,10 +975,6 @@ class VideoImportService():
|
|
|
988
975
|
This method is always called in the finally block of import_and_anonymize()
|
|
989
976
|
to ensure the file lock is released even if processing fails.
|
|
990
977
|
"""
|
|
991
|
-
# DEFENSIVE: Ensure processing_context exists before accessing it
|
|
992
|
-
if not hasattr(self, 'processing_context'):
|
|
993
|
-
self.processing_context = {}
|
|
994
|
-
|
|
995
978
|
try:
|
|
996
979
|
# Release file lock if it was acquired
|
|
997
980
|
lock_context = self.processing_context.get('_lock_context')
|
|
@@ -290,11 +290,10 @@ endoreg_db/management/commands/load_unit_data.py,sha256=tcux-iL-ByT2ApgmHEkLllZS
|
|
|
290
290
|
endoreg_db/management/commands/load_user_groups.py,sha256=D7SK2FvZEHoE4TIXNGCjDw5_12MH9bpGZvoS7eEv0Os,1031
|
|
291
291
|
endoreg_db/management/commands/register_ai_model.py,sha256=KixTfuQR6TUfRmzB5GOos16BFOz7NL4TzLzBkgtPPgE,2510
|
|
292
292
|
endoreg_db/management/commands/reset_celery_schedule.py,sha256=U-m_FNRTw6LAwJoT9RUE4qrhmQXm7AyFToPcHYyJpIE,386
|
|
293
|
-
endoreg_db/management/commands/setup_endoreg_db.py,sha256=
|
|
293
|
+
endoreg_db/management/commands/setup_endoreg_db.py,sha256=efOXE6IQs4ey84tIncf6zXI2VVVRd7CYXbeuACFgkgI,9095
|
|
294
294
|
endoreg_db/management/commands/start_filewatcher.py,sha256=3jESBqRiYPa9f35--zd70qQaYnyT0tzRO_b_HJuyteQ,4093
|
|
295
295
|
endoreg_db/management/commands/storage_management.py,sha256=NpToX59ndwTFNmnSoeppmiPdMvpjSHH7mAdIe4SvUoI,22396
|
|
296
296
|
endoreg_db/management/commands/summarize_db_content.py,sha256=pOIz3qbY4Ktmh0zV_DKFx971VD0pPx027gCD7a47EL0,10766
|
|
297
|
-
endoreg_db/management/commands/validate_ai_models.py,sha256=Z7Ga-PndTFVG8GnkYbS58h8ofiyhnxZDcyP5Qqpl1c8,4684
|
|
298
297
|
endoreg_db/management/commands/validate_video.py,sha256=cns_kNgztyp6XTeXuDeLEet8vAATkpxZwJuSWuQ5Olk,11302
|
|
299
298
|
endoreg_db/management/commands/validate_video_files.py,sha256=0lvA0Z8BKiibjyqc4ueI646IIc5bKI3sIOxiiF5_bTk,6509
|
|
300
299
|
endoreg_db/management/commands/video_validation.py,sha256=xnAoCPB44dmnRbn6FqUjqRXQ-ZhDPNX1T5kCpAU8sgc,771
|
|
@@ -465,7 +464,7 @@ endoreg_db/models/medical/risk/risk_type.py,sha256=kEugcaWSTEWH_Vxq4dcF80Iv1L4_K
|
|
|
465
464
|
endoreg_db/models/metadata/__init__.py,sha256=8I6oLj3YTmeaPGJpL0AWG5gLwp38QzrEggxSkTisv7c,474
|
|
466
465
|
endoreg_db/models/metadata/frame_ocr_result.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
467
466
|
endoreg_db/models/metadata/model_meta.py,sha256=F_r-PTLeNi4J-4EaGCQkGIguhdl7Bwba7_i56ZAjc-4,7589
|
|
468
|
-
endoreg_db/models/metadata/model_meta_logic.py,sha256=
|
|
467
|
+
endoreg_db/models/metadata/model_meta_logic.py,sha256=pcpKf9J5DUiEG-D-VhOPcnFjBznkx5jK7EGv0zDMQm8,12440
|
|
469
468
|
endoreg_db/models/metadata/pdf_meta.py,sha256=BTmpSgqxmPKi0apcNjyrZAS4AFKCPXVdBd6VBeyyv6E,3174
|
|
470
469
|
endoreg_db/models/metadata/sensitive_meta.py,sha256=ekLHrW-b5uYcjfkRd0EW5ncx5ef8Bu-K6msDkpWCAbk,13034
|
|
471
470
|
endoreg_db/models/metadata/sensitive_meta_logic.py,sha256=by3eCW8CgglK1SHiDOepHhTOGaugswxJhkH0BZp7-gs,33909
|
|
@@ -603,7 +602,7 @@ endoreg_db/services/pseudonym_service.py,sha256=CJhbtRa6K6SPbphgCZgEMi8AFQtB18CU
|
|
|
603
602
|
endoreg_db/services/requirements_object.py,sha256=290zf8AEbVtCoHhW4Jr7_ud-RvrqYmb1Nz9UBHtTnc0,6164
|
|
604
603
|
endoreg_db/services/segment_sync.py,sha256=YgHvIHkbW4mqCu0ACf3zjRSZnNfxWwt4gh5syUVXuE0,6400
|
|
605
604
|
endoreg_db/services/storage_aware_video_processor.py,sha256=kKFK64vXLeBSVkp1YJonU3gFDTeXZ8C4qb9QZZB99SE,13420
|
|
606
|
-
endoreg_db/services/video_import.py,sha256=
|
|
605
|
+
endoreg_db/services/video_import.py,sha256=0eeY5etJ4rg6uxC-uUis_yoa6cvGgwY2VaiD3mVFosg,46369
|
|
607
606
|
endoreg_db/tasks/upload_tasks.py,sha256=OJq7DhNwcbWdXzHY8jz5c51BCVkPN5gSWOz-6Fx6W5M,7799
|
|
608
607
|
endoreg_db/tasks/video_ingest.py,sha256=kxFuYkHijINV0VabQKCFVpJRv6eCAw07tviONurDgg8,5265
|
|
609
608
|
endoreg_db/tasks/video_processing_tasks.py,sha256=rZ7Kr49bAR4Q-vALO2SURebrhcJ5hSFGwjF4aULrOao,14089
|
|
@@ -787,7 +786,7 @@ endoreg_db/views/video/video_meta.py,sha256=C1wBMTtQb_yzEUrhFGAy2UHEWMk_CbU75WXX
|
|
|
787
786
|
endoreg_db/views/video/video_processing_history.py,sha256=mhFuS8RG5GV8E-lTtuD0qrq-bIpnUFp8vy9aERfC-J8,770
|
|
788
787
|
endoreg_db/views/video/video_remove_frames.py,sha256=2FmvNrSPM0fUXiBxINN6vBUUDCqDlBkNcGR3WsLDgKo,1696
|
|
789
788
|
endoreg_db/views/video/video_stream.py,sha256=kLyuf0ORTmsLeYUQkTQ6iRYqlIQozWhMMR3Lhfe_trk,12148
|
|
790
|
-
endoreg_db-0.8.4.
|
|
791
|
-
endoreg_db-0.8.4.
|
|
792
|
-
endoreg_db-0.8.4.
|
|
793
|
-
endoreg_db-0.8.4.
|
|
789
|
+
endoreg_db-0.8.4.2.dist-info/METADATA,sha256=xSaPMvzzc7HovOmkl2Gn0lYVoRNTPk4yqIBFcz0kc70,14758
|
|
790
|
+
endoreg_db-0.8.4.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
791
|
+
endoreg_db-0.8.4.2.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
792
|
+
endoreg_db-0.8.4.2.dist-info/RECORD,,
|
|
@@ -1,124 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Django management command to validate and fix AI model metadata issues.
|
|
3
|
-
This command addresses the "No model metadata found for this model" error.
|
|
4
|
-
"""
|
|
5
|
-
|
|
6
|
-
from django.core.management.base import BaseCommand
|
|
7
|
-
|
|
8
|
-
from endoreg_db.models.metadata.model_meta_logic import validate_and_fix_ai_model_metadata_logic
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
class Command(BaseCommand):
|
|
12
|
-
help = """
|
|
13
|
-
Validate and fix AI model metadata to prevent processing errors.
|
|
14
|
-
|
|
15
|
-
This command:
|
|
16
|
-
- Checks all AI models for proper metadata configuration
|
|
17
|
-
- Creates missing metadata with sensible defaults
|
|
18
|
-
- Sets active metadata for models that don't have it
|
|
19
|
-
- Validates that all models can access their latest versions
|
|
20
|
-
|
|
21
|
-
Use this command to fix "No model metadata found for this model" errors.
|
|
22
|
-
"""
|
|
23
|
-
|
|
24
|
-
def add_arguments(self, parser):
|
|
25
|
-
parser.add_argument(
|
|
26
|
-
"--dry-run",
|
|
27
|
-
action="store_true",
|
|
28
|
-
help="Show what would be fixed without making changes",
|
|
29
|
-
)
|
|
30
|
-
parser.add_argument(
|
|
31
|
-
"--force",
|
|
32
|
-
action="store_true",
|
|
33
|
-
help="Force recreation of existing metadata",
|
|
34
|
-
)
|
|
35
|
-
|
|
36
|
-
def handle(self, *args, **options):
|
|
37
|
-
dry_run = options.get("dry_run", False)
|
|
38
|
-
force = options.get("force", False)
|
|
39
|
-
|
|
40
|
-
self.stdout.write(self.style.SUCCESS("🔍 Validating AI model metadata..."))
|
|
41
|
-
|
|
42
|
-
if dry_run:
|
|
43
|
-
self.stdout.write(self.style.WARNING("🧪 DRY RUN MODE - No changes will be made"))
|
|
44
|
-
|
|
45
|
-
try:
|
|
46
|
-
if dry_run:
|
|
47
|
-
# In dry run, we just check and report issues
|
|
48
|
-
summary = self._check_ai_models_dry_run()
|
|
49
|
-
else:
|
|
50
|
-
# Actually fix the issues
|
|
51
|
-
summary = validate_and_fix_ai_model_metadata_logic()
|
|
52
|
-
|
|
53
|
-
# Report results
|
|
54
|
-
self._report_summary(summary)
|
|
55
|
-
|
|
56
|
-
if summary["errors"]:
|
|
57
|
-
self.stdout.write(self.style.ERROR("❌ Validation completed with errors"))
|
|
58
|
-
for error in summary["errors"]:
|
|
59
|
-
self.stdout.write(self.style.ERROR(f" - {error}"))
|
|
60
|
-
return
|
|
61
|
-
|
|
62
|
-
self.stdout.write(self.style.SUCCESS("✅ AI model metadata validation completed successfully"))
|
|
63
|
-
|
|
64
|
-
except Exception as e:
|
|
65
|
-
self.stdout.write(self.style.ERROR(f"❌ Validation failed: {e}"))
|
|
66
|
-
raise
|
|
67
|
-
|
|
68
|
-
def _check_ai_models_dry_run(self):
|
|
69
|
-
"""Check AI models without making changes."""
|
|
70
|
-
from endoreg_db.models import AiModel
|
|
71
|
-
|
|
72
|
-
summary = {"models_checked": 0, "models_fixed": 0, "metadata_created": 0, "active_meta_set": 0, "errors": []}
|
|
73
|
-
|
|
74
|
-
all_models = AiModel.objects.all()
|
|
75
|
-
summary["models_checked"] = all_models.count()
|
|
76
|
-
|
|
77
|
-
for model in all_models:
|
|
78
|
-
self.stdout.write(f"Checking model: {model.name}")
|
|
79
|
-
|
|
80
|
-
metadata_count = model.metadata_versions.count()
|
|
81
|
-
self.stdout.write(f" Metadata versions: {metadata_count}")
|
|
82
|
-
|
|
83
|
-
if metadata_count == 0:
|
|
84
|
-
self.stdout.write(f" 🔧 Would create metadata for {model.name}")
|
|
85
|
-
summary["models_fixed"] += 1
|
|
86
|
-
summary["metadata_created"] += 1
|
|
87
|
-
|
|
88
|
-
elif not model.active_meta:
|
|
89
|
-
self.stdout.write(f" 🔧 Would set active metadata for {model.name}")
|
|
90
|
-
summary["models_fixed"] += 1
|
|
91
|
-
summary["active_meta_set"] += 1
|
|
92
|
-
|
|
93
|
-
else:
|
|
94
|
-
self.stdout.write(f" ✅ Model {model.name} has valid active metadata")
|
|
95
|
-
|
|
96
|
-
# Test metadata access
|
|
97
|
-
try:
|
|
98
|
-
latest = model.get_latest_version()
|
|
99
|
-
self.stdout.write(f" ✅ Can access latest version: {latest}")
|
|
100
|
-
except Exception as e:
|
|
101
|
-
error_msg = f"Model {model.name} metadata test failed: {e}"
|
|
102
|
-
self.stdout.write(self.style.ERROR(f" ❌ {error_msg}"))
|
|
103
|
-
summary["errors"].append(error_msg)
|
|
104
|
-
|
|
105
|
-
return summary
|
|
106
|
-
|
|
107
|
-
def _report_summary(self, summary):
|
|
108
|
-
"""Report the validation summary."""
|
|
109
|
-
self.stdout.write("\n📊 Validation Summary:")
|
|
110
|
-
self.stdout.write(f" Models checked: {summary['models_checked']}")
|
|
111
|
-
|
|
112
|
-
if summary["models_fixed"] > 0:
|
|
113
|
-
self.stdout.write(f" Models fixed: {summary['models_fixed']}")
|
|
114
|
-
|
|
115
|
-
if summary["metadata_created"] > 0:
|
|
116
|
-
self.stdout.write(f" Metadata created: {summary['metadata_created']}")
|
|
117
|
-
|
|
118
|
-
if summary["active_meta_set"] > 0:
|
|
119
|
-
self.stdout.write(f" Active metadata set: {summary['active_meta_set']}")
|
|
120
|
-
|
|
121
|
-
if summary["errors"]:
|
|
122
|
-
self.stdout.write(f" Errors encountered: {len(summary['errors'])}")
|
|
123
|
-
else:
|
|
124
|
-
self.stdout.write(" No errors found")
|
|
File without changes
|
|
File without changes
|