endoreg-db 0.8.4.4__py3-none-any.whl → 0.8.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of endoreg-db might be problematic. Click here for more details.

Files changed (36) hide show
  1. endoreg_db/management/commands/load_ai_model_data.py +2 -1
  2. endoreg_db/management/commands/setup_endoreg_db.py +11 -7
  3. endoreg_db/models/media/pdf/raw_pdf.py +241 -97
  4. endoreg_db/models/media/video/pipe_1.py +30 -33
  5. endoreg_db/models/media/video/video_file.py +300 -187
  6. endoreg_db/models/metadata/model_meta_logic.py +15 -1
  7. endoreg_db/models/metadata/sensitive_meta_logic.py +391 -70
  8. endoreg_db/serializers/__init__.py +26 -55
  9. endoreg_db/serializers/misc/__init__.py +1 -1
  10. endoreg_db/serializers/misc/file_overview.py +65 -35
  11. endoreg_db/serializers/misc/{vop_patient_data.py → sensitive_patient_data.py} +1 -1
  12. endoreg_db/serializers/video_examination.py +198 -0
  13. endoreg_db/services/lookup_service.py +228 -58
  14. endoreg_db/services/lookup_store.py +174 -30
  15. endoreg_db/services/pdf_import.py +585 -282
  16. endoreg_db/services/video_import.py +340 -101
  17. endoreg_db/urls/__init__.py +36 -23
  18. endoreg_db/urls/label_video_segments.py +2 -0
  19. endoreg_db/urls/media.py +3 -2
  20. endoreg_db/views/__init__.py +6 -3
  21. endoreg_db/views/media/pdf_media.py +3 -1
  22. endoreg_db/views/media/video_media.py +1 -1
  23. endoreg_db/views/media/video_segments.py +187 -259
  24. endoreg_db/views/pdf/__init__.py +5 -8
  25. endoreg_db/views/pdf/pdf_stream.py +187 -0
  26. endoreg_db/views/pdf/reimport.py +110 -94
  27. endoreg_db/views/requirement/lookup.py +171 -287
  28. endoreg_db/views/video/__init__.py +0 -2
  29. endoreg_db/views/video/video_examination_viewset.py +202 -289
  30. {endoreg_db-0.8.4.4.dist-info → endoreg_db-0.8.6.1.dist-info}/METADATA +1 -1
  31. {endoreg_db-0.8.4.4.dist-info → endoreg_db-0.8.6.1.dist-info}/RECORD +33 -34
  32. endoreg_db/views/pdf/pdf_media.py +0 -239
  33. endoreg_db/views/pdf/pdf_stream_views.py +0 -127
  34. endoreg_db/views/video/video_media.py +0 -158
  35. {endoreg_db-0.8.4.4.dist-info → endoreg_db-0.8.6.1.dist-info}/WHEEL +0 -0
  36. {endoreg_db-0.8.4.4.dist-info → endoreg_db-0.8.6.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,20 +1,23 @@
1
1
  import logging
2
- from typing import TYPE_CHECKING, Optional, Dict, List, Tuple
2
+ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
3
+
3
4
  from django.db import transaction
5
+
4
6
  from endoreg_db.helpers.download_segmentation_model import download_segmentation_model
5
7
 
6
8
  # Added imports
7
9
 
8
10
  # Configure logging
9
- logger = logging.getLogger(__name__) # Changed from "video_file"
11
+ logger = logging.getLogger(__name__) # Changed from "video_file"
10
12
 
11
13
  if TYPE_CHECKING:
12
14
  from endoreg_db.models import VideoFile
13
15
 
14
-
15
16
  # --- Pipeline 1 ---
17
+
18
+
16
19
  def _pipe_1(
17
- video_file:"VideoFile",
20
+ video_file: "VideoFile",
18
21
  model_name: str,
19
22
  model_meta_version: Optional[int] = None,
20
23
  delete_frames_after: bool = False,
@@ -28,11 +31,12 @@ def _pipe_1(
28
31
  """
29
32
  Pipeline 1: Extract frames, text, predict, create segments, optionally delete frames.
30
33
  """
31
- success = False # Initialize success flag
32
- from .video_file_segments import _convert_sequences_to_db_segments # Added import
33
- from ...metadata import ModelMeta, VideoPredictionMeta
34
+ success = False # Initialize success flag
34
35
  from endoreg_db.models import AiModel, LabelVideoSegment
35
36
 
37
+ from ...metadata import ModelMeta, VideoPredictionMeta
38
+ from .video_file_segments import _convert_sequences_to_db_segments # Added import
39
+
36
40
  video_file.refresh_from_db()
37
41
  video_file.update_video_meta()
38
42
 
@@ -43,16 +47,12 @@ def _pipe_1(
43
47
  video_file.extract_frames(overwrite=False) # Avoid overwriting if already extracted
44
48
 
45
49
  logger.info("Pipe 1: Extracting text metadata...")
46
- video_file.update_text_metadata(
47
- ocr_frame_fraction=ocr_frame_fraction, cap=ocr_cap, overwrite=False
48
- )
50
+ video_file.update_text_metadata(ocr_frame_fraction=ocr_frame_fraction, cap=ocr_cap, overwrite=False)
49
51
  with transaction.atomic():
50
52
  state = video_file.get_or_create_state()
51
53
  if not state.frames_extracted:
52
54
  logger.error("Pipe 1 failed: Frame extraction did not complete successfully.")
53
55
  return False
54
-
55
-
56
56
 
57
57
  # 3. Perform Initial Prediction
58
58
  logger.info(f"Pipe 1: Performing prediction with model '{model_name}'...")
@@ -74,7 +74,7 @@ def _pipe_1(
74
74
  except AiModel.DoesNotExist:
75
75
  logger.error(f"Pipe 1 failed: Model '{model_name}' not found.")
76
76
  return False
77
-
77
+
78
78
  except ModelMeta.DoesNotExist:
79
79
  try:
80
80
  model_name = download_segmentation_model()
@@ -82,11 +82,9 @@ def _pipe_1(
82
82
  if model_meta_version is not None:
83
83
  model_meta = ai_model_obj.metadata_versions.get(version=model_meta_version)
84
84
  else:
85
- model_meta = ModelMeta.setup_default_from_huggingface()
85
+ model_meta = ai_model_obj.get_latest_version()
86
86
  except ModelMeta.DoesNotExist:
87
- logger.error(
88
- f"Pipe 1 failed: ModelMeta version {model_meta_version} for model '{model_name}' not found."
89
- )
87
+ logger.error(f"Pipe 1 failed: ModelMeta version {model_meta_version} for model '{model_name}' not found.")
90
88
  return False
91
89
  try:
92
90
  sequences: Optional[Dict[str, List[Tuple[int, int]]]] = video_file.predict_video(
@@ -107,7 +105,7 @@ def _pipe_1(
107
105
 
108
106
  # --- Set and Save State ---
109
107
  state.initial_prediction_completed = True
110
- state.save(update_fields=['initial_prediction_completed'])
108
+ state.save(update_fields=["initial_prediction_completed"])
111
109
  logger.info("Pipe 1: Set initial_prediction_completed state to True.")
112
110
 
113
111
  logger.info(f"Pipe 1: Sequences returned from prediction: {sequences}")
@@ -117,9 +115,7 @@ def _pipe_1(
117
115
  # 4. Create LabelVideoSegments
118
116
  logger.info("Pipe 1: Creating LabelVideoSegments from predictions...")
119
117
  try:
120
- video_prediction_meta = VideoPredictionMeta.objects.get(
121
- video_file=video_file, model_meta=model_meta
122
- )
118
+ video_prediction_meta = VideoPredictionMeta.objects.get(video_file=video_file, model_meta=model_meta)
123
119
  logger.info(f"Pipe 1: Calling _convert_sequences_to_db_segments for video {video_file.uuid} with prediction meta {video_prediction_meta.pk}")
124
120
  _convert_sequences_to_db_segments(
125
121
  video=video_file,
@@ -127,9 +123,9 @@ def _pipe_1(
127
123
  video_prediction_meta=video_prediction_meta,
128
124
  )
129
125
  video_file.sequences = sequences
130
- video_file.save(update_fields=['sequences'])
126
+ video_file.save(update_fields=["sequences"])
131
127
  state.lvs_created = True
132
- state.save(update_fields=['lvs_created'])
128
+ state.save(update_fields=["lvs_created"])
133
129
  logger.info("Pipe 1: Set lvs_created state to True.")
134
130
  logger.info("Pipe 1: LabelVideoSegment creation complete.")
135
131
  lvs_count_after = LabelVideoSegment.objects.filter(video_file=video_file).count()
@@ -139,7 +135,7 @@ def _pipe_1(
139
135
  raise
140
136
 
141
137
  logger.info(f"Pipe 1 completed successfully for video {video_file.uuid}")
142
- success = True # Set success flag
138
+ success = True # Set success flag
143
139
  return True
144
140
 
145
141
  except Exception as e:
@@ -147,7 +143,7 @@ def _pipe_1(
147
143
  return False
148
144
  finally:
149
145
  # 5. Optionally delete frames
150
- if delete_frames_after and success: # Check success flag
146
+ if delete_frames_after and success: # Check success flag
151
147
  logger.info("Pipe 1: Deleting frames after processing...")
152
148
  try:
153
149
  video_file.delete_frames()
@@ -156,15 +152,16 @@ def _pipe_1(
156
152
  logger.error(f"Pipe 1 failed during frame deletion: {e}", exc_info=True)
157
153
  else:
158
154
  logger.info("Pipe 1: Frame deletion skipped.")
159
-
155
+
156
+
160
157
  # --- Test after Pipe 1 ---
161
- def _test_after_pipe_1(video_file:"VideoFile", start_frame: int = 0, end_frame: int = 100) -> bool:
158
+ def _test_after_pipe_1(video_file: "VideoFile", start_frame: int = 0, end_frame: int = 100) -> bool:
162
159
  """
163
160
  Simulates human annotation validation after Pipe 1.
164
161
  Creates 'outside' segments and marks sensitive meta as verified.
165
162
  """
166
- from ...label import LabelVideoSegment, Label
167
-
163
+ from ...label import Label, LabelVideoSegment
164
+
168
165
  logger.info(f"Starting _test_after_pipe_1 for video {video_file.uuid}")
169
166
  try:
170
167
  # 1. Create 'outside' LabelVideoSegments
@@ -172,15 +169,15 @@ def _test_after_pipe_1(video_file:"VideoFile", start_frame: int = 0, end_frame:
172
169
  outside_label = Label.objects.get(name__iexact="outside")
173
170
  logger.info(f"Creating 'outside' annotation segment [{start_frame}-{end_frame}]")
174
171
  # Create a segment - assuming custom_create handles saving
175
- outside_segment = LabelVideoSegment.objects.create( # Assign to variable
172
+ outside_segment = LabelVideoSegment.objects.create( # Assign to variable
176
173
  video_file=video_file,
177
174
  label=outside_label,
178
175
  start_frame_number=start_frame,
179
176
  end_frame_number=end_frame,
180
- prediction_meta=None,
177
+ prediction_meta=None,
181
178
  )
182
179
  # Ensure the segment has a state and mark it as validated
183
- segment_state, created = outside_segment.get_or_create_state() # Unpack the tuple
180
+ segment_state, created = outside_segment.get_or_create_state() # Unpack the tuple
184
181
  segment_state.is_validated = True
185
182
  segment_state.save()
186
183
  logger.info(f"Marked 'outside' segment {outside_segment.pk} as validated. Created: {created}")
@@ -202,7 +199,7 @@ def _test_after_pipe_1(video_file:"VideoFile", start_frame: int = 0, end_frame:
202
199
  # Example: using a boolean field
203
200
  video_file.sensitive_meta.state.dob_verified = True
204
201
  video_file.sensitive_meta.state.names_verified = True
205
- video_file.sensitive_meta.state.save() # Save the SensitiveMeta instance
202
+ video_file.sensitive_meta.state.save() # Save the SensitiveMeta instance
206
203
  logger.info("Sensitive meta state updated.")
207
204
 
208
205
  else: