endoreg-db 0.8.4.4__py3-none-any.whl → 0.8.4.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of endoreg-db might be problematic. Click here for more details.
- endoreg_db/management/commands/load_ai_model_data.py +2 -1
- endoreg_db/models/media/video/pipe_1.py +30 -33
- endoreg_db/models/metadata/model_meta_logic.py +15 -1
- {endoreg_db-0.8.4.4.dist-info → endoreg_db-0.8.4.6.dist-info}/METADATA +1 -1
- {endoreg_db-0.8.4.4.dist-info → endoreg_db-0.8.4.6.dist-info}/RECORD +7 -7
- {endoreg_db-0.8.4.4.dist-info → endoreg_db-0.8.4.6.dist-info}/WHEEL +0 -0
- {endoreg_db-0.8.4.4.dist-info → endoreg_db-0.8.4.6.dist-info}/licenses/LICENSE +0 -0
|
@@ -9,6 +9,7 @@ from ...data import (
|
|
|
9
9
|
)
|
|
10
10
|
from ...models import (
|
|
11
11
|
AiModel,
|
|
12
|
+
LabelSet, # Add LabelSet import
|
|
12
13
|
ModelMeta, # Add ModelMeta back to imports
|
|
13
14
|
ModelType,
|
|
14
15
|
VideoSegmentationLabel,
|
|
@@ -35,7 +36,7 @@ IMPORT_METADATA = {
|
|
|
35
36
|
"dir": AI_MODEL_META_DATA_DIR, # e.g. "ai_model_meta"
|
|
36
37
|
"model": ModelMeta, # e.g. ModelMeta
|
|
37
38
|
"foreign_keys": ["labelset", "model"], # Foreign key relationships
|
|
38
|
-
"foreign_key_models": [
|
|
39
|
+
"foreign_key_models": [LabelSet, AiModel], # Actual model classes
|
|
39
40
|
},
|
|
40
41
|
VideoSegmentationLabel.__name__: {
|
|
41
42
|
"dir": VIDEO_SEGMENTATION_LABEL_DATA_DIR, # e.g. "interventions"
|
|
@@ -1,20 +1,23 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
from typing import TYPE_CHECKING,
|
|
2
|
+
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
|
|
3
|
+
|
|
3
4
|
from django.db import transaction
|
|
5
|
+
|
|
4
6
|
from endoreg_db.helpers.download_segmentation_model import download_segmentation_model
|
|
5
7
|
|
|
6
8
|
# Added imports
|
|
7
9
|
|
|
8
10
|
# Configure logging
|
|
9
|
-
logger = logging.getLogger(__name__)
|
|
11
|
+
logger = logging.getLogger(__name__) # Changed from "video_file"
|
|
10
12
|
|
|
11
13
|
if TYPE_CHECKING:
|
|
12
14
|
from endoreg_db.models import VideoFile
|
|
13
15
|
|
|
14
|
-
|
|
15
16
|
# --- Pipeline 1 ---
|
|
17
|
+
|
|
18
|
+
|
|
16
19
|
def _pipe_1(
|
|
17
|
-
video_file:"VideoFile",
|
|
20
|
+
video_file: "VideoFile",
|
|
18
21
|
model_name: str,
|
|
19
22
|
model_meta_version: Optional[int] = None,
|
|
20
23
|
delete_frames_after: bool = False,
|
|
@@ -28,11 +31,12 @@ def _pipe_1(
|
|
|
28
31
|
"""
|
|
29
32
|
Pipeline 1: Extract frames, text, predict, create segments, optionally delete frames.
|
|
30
33
|
"""
|
|
31
|
-
success = False
|
|
32
|
-
from .video_file_segments import _convert_sequences_to_db_segments # Added import
|
|
33
|
-
from ...metadata import ModelMeta, VideoPredictionMeta
|
|
34
|
+
success = False # Initialize success flag
|
|
34
35
|
from endoreg_db.models import AiModel, LabelVideoSegment
|
|
35
36
|
|
|
37
|
+
from ...metadata import ModelMeta, VideoPredictionMeta
|
|
38
|
+
from .video_file_segments import _convert_sequences_to_db_segments # Added import
|
|
39
|
+
|
|
36
40
|
video_file.refresh_from_db()
|
|
37
41
|
video_file.update_video_meta()
|
|
38
42
|
|
|
@@ -43,16 +47,12 @@ def _pipe_1(
|
|
|
43
47
|
video_file.extract_frames(overwrite=False) # Avoid overwriting if already extracted
|
|
44
48
|
|
|
45
49
|
logger.info("Pipe 1: Extracting text metadata...")
|
|
46
|
-
video_file.update_text_metadata(
|
|
47
|
-
ocr_frame_fraction=ocr_frame_fraction, cap=ocr_cap, overwrite=False
|
|
48
|
-
)
|
|
50
|
+
video_file.update_text_metadata(ocr_frame_fraction=ocr_frame_fraction, cap=ocr_cap, overwrite=False)
|
|
49
51
|
with transaction.atomic():
|
|
50
52
|
state = video_file.get_or_create_state()
|
|
51
53
|
if not state.frames_extracted:
|
|
52
54
|
logger.error("Pipe 1 failed: Frame extraction did not complete successfully.")
|
|
53
55
|
return False
|
|
54
|
-
|
|
55
|
-
|
|
56
56
|
|
|
57
57
|
# 3. Perform Initial Prediction
|
|
58
58
|
logger.info(f"Pipe 1: Performing prediction with model '{model_name}'...")
|
|
@@ -74,7 +74,7 @@ def _pipe_1(
|
|
|
74
74
|
except AiModel.DoesNotExist:
|
|
75
75
|
logger.error(f"Pipe 1 failed: Model '{model_name}' not found.")
|
|
76
76
|
return False
|
|
77
|
-
|
|
77
|
+
|
|
78
78
|
except ModelMeta.DoesNotExist:
|
|
79
79
|
try:
|
|
80
80
|
model_name = download_segmentation_model()
|
|
@@ -82,11 +82,9 @@ def _pipe_1(
|
|
|
82
82
|
if model_meta_version is not None:
|
|
83
83
|
model_meta = ai_model_obj.metadata_versions.get(version=model_meta_version)
|
|
84
84
|
else:
|
|
85
|
-
model_meta =
|
|
85
|
+
model_meta = ai_model_obj.get_latest_version()
|
|
86
86
|
except ModelMeta.DoesNotExist:
|
|
87
|
-
logger.error(
|
|
88
|
-
f"Pipe 1 failed: ModelMeta version {model_meta_version} for model '{model_name}' not found."
|
|
89
|
-
)
|
|
87
|
+
logger.error(f"Pipe 1 failed: ModelMeta version {model_meta_version} for model '{model_name}' not found.")
|
|
90
88
|
return False
|
|
91
89
|
try:
|
|
92
90
|
sequences: Optional[Dict[str, List[Tuple[int, int]]]] = video_file.predict_video(
|
|
@@ -107,7 +105,7 @@ def _pipe_1(
|
|
|
107
105
|
|
|
108
106
|
# --- Set and Save State ---
|
|
109
107
|
state.initial_prediction_completed = True
|
|
110
|
-
state.save(update_fields=[
|
|
108
|
+
state.save(update_fields=["initial_prediction_completed"])
|
|
111
109
|
logger.info("Pipe 1: Set initial_prediction_completed state to True.")
|
|
112
110
|
|
|
113
111
|
logger.info(f"Pipe 1: Sequences returned from prediction: {sequences}")
|
|
@@ -117,9 +115,7 @@ def _pipe_1(
|
|
|
117
115
|
# 4. Create LabelVideoSegments
|
|
118
116
|
logger.info("Pipe 1: Creating LabelVideoSegments from predictions...")
|
|
119
117
|
try:
|
|
120
|
-
video_prediction_meta = VideoPredictionMeta.objects.get(
|
|
121
|
-
video_file=video_file, model_meta=model_meta
|
|
122
|
-
)
|
|
118
|
+
video_prediction_meta = VideoPredictionMeta.objects.get(video_file=video_file, model_meta=model_meta)
|
|
123
119
|
logger.info(f"Pipe 1: Calling _convert_sequences_to_db_segments for video {video_file.uuid} with prediction meta {video_prediction_meta.pk}")
|
|
124
120
|
_convert_sequences_to_db_segments(
|
|
125
121
|
video=video_file,
|
|
@@ -127,9 +123,9 @@ def _pipe_1(
|
|
|
127
123
|
video_prediction_meta=video_prediction_meta,
|
|
128
124
|
)
|
|
129
125
|
video_file.sequences = sequences
|
|
130
|
-
video_file.save(update_fields=[
|
|
126
|
+
video_file.save(update_fields=["sequences"])
|
|
131
127
|
state.lvs_created = True
|
|
132
|
-
state.save(update_fields=[
|
|
128
|
+
state.save(update_fields=["lvs_created"])
|
|
133
129
|
logger.info("Pipe 1: Set lvs_created state to True.")
|
|
134
130
|
logger.info("Pipe 1: LabelVideoSegment creation complete.")
|
|
135
131
|
lvs_count_after = LabelVideoSegment.objects.filter(video_file=video_file).count()
|
|
@@ -139,7 +135,7 @@ def _pipe_1(
|
|
|
139
135
|
raise
|
|
140
136
|
|
|
141
137
|
logger.info(f"Pipe 1 completed successfully for video {video_file.uuid}")
|
|
142
|
-
success = True
|
|
138
|
+
success = True # Set success flag
|
|
143
139
|
return True
|
|
144
140
|
|
|
145
141
|
except Exception as e:
|
|
@@ -147,7 +143,7 @@ def _pipe_1(
|
|
|
147
143
|
return False
|
|
148
144
|
finally:
|
|
149
145
|
# 5. Optionally delete frames
|
|
150
|
-
if delete_frames_after and success:
|
|
146
|
+
if delete_frames_after and success: # Check success flag
|
|
151
147
|
logger.info("Pipe 1: Deleting frames after processing...")
|
|
152
148
|
try:
|
|
153
149
|
video_file.delete_frames()
|
|
@@ -156,15 +152,16 @@ def _pipe_1(
|
|
|
156
152
|
logger.error(f"Pipe 1 failed during frame deletion: {e}", exc_info=True)
|
|
157
153
|
else:
|
|
158
154
|
logger.info("Pipe 1: Frame deletion skipped.")
|
|
159
|
-
|
|
155
|
+
|
|
156
|
+
|
|
160
157
|
# --- Test after Pipe 1 ---
|
|
161
|
-
def _test_after_pipe_1(video_file:"VideoFile", start_frame: int = 0, end_frame: int = 100) -> bool:
|
|
158
|
+
def _test_after_pipe_1(video_file: "VideoFile", start_frame: int = 0, end_frame: int = 100) -> bool:
|
|
162
159
|
"""
|
|
163
160
|
Simulates human annotation validation after Pipe 1.
|
|
164
161
|
Creates 'outside' segments and marks sensitive meta as verified.
|
|
165
162
|
"""
|
|
166
|
-
from ...label import
|
|
167
|
-
|
|
163
|
+
from ...label import Label, LabelVideoSegment
|
|
164
|
+
|
|
168
165
|
logger.info(f"Starting _test_after_pipe_1 for video {video_file.uuid}")
|
|
169
166
|
try:
|
|
170
167
|
# 1. Create 'outside' LabelVideoSegments
|
|
@@ -172,15 +169,15 @@ def _test_after_pipe_1(video_file:"VideoFile", start_frame: int = 0, end_frame:
|
|
|
172
169
|
outside_label = Label.objects.get(name__iexact="outside")
|
|
173
170
|
logger.info(f"Creating 'outside' annotation segment [{start_frame}-{end_frame}]")
|
|
174
171
|
# Create a segment - assuming custom_create handles saving
|
|
175
|
-
outside_segment = LabelVideoSegment.objects.create(
|
|
172
|
+
outside_segment = LabelVideoSegment.objects.create( # Assign to variable
|
|
176
173
|
video_file=video_file,
|
|
177
174
|
label=outside_label,
|
|
178
175
|
start_frame_number=start_frame,
|
|
179
176
|
end_frame_number=end_frame,
|
|
180
|
-
prediction_meta=None,
|
|
177
|
+
prediction_meta=None,
|
|
181
178
|
)
|
|
182
179
|
# Ensure the segment has a state and mark it as validated
|
|
183
|
-
segment_state, created = outside_segment.get_or_create_state()
|
|
180
|
+
segment_state, created = outside_segment.get_or_create_state() # Unpack the tuple
|
|
184
181
|
segment_state.is_validated = True
|
|
185
182
|
segment_state.save()
|
|
186
183
|
logger.info(f"Marked 'outside' segment {outside_segment.pk} as validated. Created: {created}")
|
|
@@ -202,7 +199,7 @@ def _test_after_pipe_1(video_file:"VideoFile", start_frame: int = 0, end_frame:
|
|
|
202
199
|
# Example: using a boolean field
|
|
203
200
|
video_file.sensitive_meta.state.dob_verified = True
|
|
204
201
|
video_file.sensitive_meta.state.names_verified = True
|
|
205
|
-
video_file.sensitive_meta.state.save()
|
|
202
|
+
video_file.sensitive_meta.state.save() # Save the SensitiveMeta instance
|
|
206
203
|
logger.info("Sensitive meta state updated.")
|
|
207
204
|
|
|
208
205
|
else:
|
|
@@ -19,6 +19,13 @@ if TYPE_CHECKING:
|
|
|
19
19
|
from .model_meta import ModelMeta # Import ModelMeta for type hinting
|
|
20
20
|
|
|
21
21
|
|
|
22
|
+
def _get_model_meta_class():
|
|
23
|
+
"""Lazy import to avoid circular imports"""
|
|
24
|
+
from .model_meta import ModelMeta
|
|
25
|
+
|
|
26
|
+
return ModelMeta
|
|
27
|
+
|
|
28
|
+
|
|
22
29
|
def get_latest_version_number_logic(cls: Type["ModelMeta"], meta_name: str, model_name: str) -> int:
|
|
23
30
|
"""
|
|
24
31
|
Finds the highest numerical version for a given meta_name and model_name.
|
|
@@ -308,7 +315,14 @@ def setup_default_from_huggingface_logic(cls, model_id: str, labelset_name: str
|
|
|
308
315
|
)
|
|
309
316
|
|
|
310
317
|
ai_model, _ = AiModel.objects.get_or_create(name=meta["name"])
|
|
311
|
-
|
|
318
|
+
if not labelset_name:
|
|
319
|
+
labelset = LabelSet.objects.first()
|
|
320
|
+
if not labelset:
|
|
321
|
+
raise ValueError("No labelset found and no labelset_name provided")
|
|
322
|
+
else:
|
|
323
|
+
labelset = LabelSet.objects.get(name=labelset_name)
|
|
324
|
+
|
|
325
|
+
ModelMeta = _get_model_meta_class()
|
|
312
326
|
model_meta = ModelMeta.objects.filter(name=meta["name"], model=ai_model).first()
|
|
313
327
|
if model_meta:
|
|
314
328
|
logger.info(f"ModelMeta {meta['name']} for model {ai_model.name} already exists. Skipping creation.")
|
|
@@ -258,7 +258,7 @@ endoreg_db/management/commands/import_report.py,sha256=vFst-NeQdL-w62yoH4kDamq-2
|
|
|
258
258
|
endoreg_db/management/commands/import_video.py,sha256=AMvgi1eN0F_hjhgnNNYIFkJtHfjalBfh2lfDxw6VTzE,17980
|
|
259
259
|
endoreg_db/management/commands/import_video_with_classification.py,sha256=ulZH5jvAWu_pJ1kI9B3hbIO1-p_BReY0zbIQDS_d9OI,14726
|
|
260
260
|
endoreg_db/management/commands/init_default_ai_model.py,sha256=98yBigGZ5gkA-b1LPcvzS5x2jAms3pX58fU-TEAcjKw,4669
|
|
261
|
-
endoreg_db/management/commands/load_ai_model_data.py,sha256=
|
|
261
|
+
endoreg_db/management/commands/load_ai_model_data.py,sha256=ba2z-0qWtweUC6iYyiusH3xGDkpkYNNlIjei3QK7YAA,2887
|
|
262
262
|
endoreg_db/management/commands/load_ai_model_label_data.py,sha256=jnm2720TsnRTBKF6guwnjLo7sropW_YoRjgyjo1TUr8,2143
|
|
263
263
|
endoreg_db/management/commands/load_base_db_data.py,sha256=0Go2cYbqfx6MBSeQaHPAq22yeJxOyX25xpmcBcE9Auw,9374
|
|
264
264
|
endoreg_db/management/commands/load_center_data.py,sha256=GQpbe7dxgbTgd66oBqrBXax-os3ibnPmCeSEFDVauPU,2570
|
|
@@ -387,7 +387,7 @@ endoreg_db/models/media/pdf/report_reader/report_reader_config.py,sha256=wYVDmPS
|
|
|
387
387
|
endoreg_db/models/media/pdf/report_reader/report_reader_flag.py,sha256=j9tjbLRenxpWfeaseALl8rV2Dqem9YaM_duS1iJkARU,536
|
|
388
388
|
endoreg_db/models/media/video/__init__.py,sha256=ifW4SXXN2q6wAuFwSP7XlYskpX7UX6uy0py5mpCCOCM,211
|
|
389
389
|
endoreg_db/models/media/video/create_from_file.py,sha256=3n4bbzFteEOFDUuEikP0x-StCKI5R5IhyKC7o3kLZ6Y,15128
|
|
390
|
-
endoreg_db/models/media/video/pipe_1.py,sha256=
|
|
390
|
+
endoreg_db/models/media/video/pipe_1.py,sha256=ljO3vO2mqqTXLZsKjzMTC6-sW4JRWMVRfJcK0n5CjKg,9740
|
|
391
391
|
endoreg_db/models/media/video/pipe_2.py,sha256=DnMxW0uOqSsf7-0n9Rlvn7u89U4Jpkv7n6hFpQfUjkQ,4964
|
|
392
392
|
endoreg_db/models/media/video/refactor_plan.md,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
393
393
|
endoreg_db/models/media/video/video_file.py,sha256=txlxR8d1OBgt3UEkWvLcGSyLarh0jXLw-z0SAV5KOok,26789
|
|
@@ -465,7 +465,7 @@ endoreg_db/models/medical/risk/risk_type.py,sha256=kEugcaWSTEWH_Vxq4dcF80Iv1L4_K
|
|
|
465
465
|
endoreg_db/models/metadata/__init__.py,sha256=8I6oLj3YTmeaPGJpL0AWG5gLwp38QzrEggxSkTisv7c,474
|
|
466
466
|
endoreg_db/models/metadata/frame_ocr_result.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
467
467
|
endoreg_db/models/metadata/model_meta.py,sha256=F_r-PTLeNi4J-4EaGCQkGIguhdl7Bwba7_i56ZAjc-4,7589
|
|
468
|
-
endoreg_db/models/metadata/model_meta_logic.py,sha256=
|
|
468
|
+
endoreg_db/models/metadata/model_meta_logic.py,sha256=vAbNDaoZygH8xOCulWlXoHoR1T0BSvr9kIloxjzhfjo,12533
|
|
469
469
|
endoreg_db/models/metadata/pdf_meta.py,sha256=BTmpSgqxmPKi0apcNjyrZAS4AFKCPXVdBd6VBeyyv6E,3174
|
|
470
470
|
endoreg_db/models/metadata/sensitive_meta.py,sha256=ekLHrW-b5uYcjfkRd0EW5ncx5ef8Bu-K6msDkpWCAbk,13034
|
|
471
471
|
endoreg_db/models/metadata/sensitive_meta_logic.py,sha256=XN3x3p0cqLlzPSZl7e35JBUXr_QKYSq48vwF1N60N4U,32134
|
|
@@ -788,7 +788,7 @@ endoreg_db/views/video/video_meta.py,sha256=C1wBMTtQb_yzEUrhFGAy2UHEWMk_CbU75WXX
|
|
|
788
788
|
endoreg_db/views/video/video_processing_history.py,sha256=mhFuS8RG5GV8E-lTtuD0qrq-bIpnUFp8vy9aERfC-J8,770
|
|
789
789
|
endoreg_db/views/video/video_remove_frames.py,sha256=2FmvNrSPM0fUXiBxINN6vBUUDCqDlBkNcGR3WsLDgKo,1696
|
|
790
790
|
endoreg_db/views/video/video_stream.py,sha256=kLyuf0ORTmsLeYUQkTQ6iRYqlIQozWhMMR3Lhfe_trk,12148
|
|
791
|
-
endoreg_db-0.8.4.
|
|
792
|
-
endoreg_db-0.8.4.
|
|
793
|
-
endoreg_db-0.8.4.
|
|
794
|
-
endoreg_db-0.8.4.
|
|
791
|
+
endoreg_db-0.8.4.6.dist-info/METADATA,sha256=yg3dFsDv-45ABqQIpubittlF5GZbQ3luaiZkeV2JoYc,14719
|
|
792
|
+
endoreg_db-0.8.4.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
793
|
+
endoreg_db-0.8.4.6.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
794
|
+
endoreg_db-0.8.4.6.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|