endoreg-db 0.8.3.1__py3-none-any.whl → 0.8.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of endoreg-db might be problematic. Click here for more details.

@@ -0,0 +1,116 @@
1
+ """
2
+ Django management command to create ModelMeta from Hugging Face model.
3
+ """
4
+
5
+ from pathlib import Path
6
+
7
+ from django.core.files.base import ContentFile
8
+ from django.core.management.base import BaseCommand
9
+ from huggingface_hub import hf_hub_download
10
+
11
+ from endoreg_db.models import AiModel, LabelSet, ModelMeta
12
+
13
+
14
+ class Command(BaseCommand):
15
+ help = "Create ModelMeta by downloading model from Hugging Face"
16
+
17
+ def add_arguments(self, parser):
18
+ parser.add_argument(
19
+ "--model_id",
20
+ type=str,
21
+ default="wg-lux/colo_segmentation_RegNetX800MF_base",
22
+ help="Hugging Face model ID",
23
+ )
24
+ parser.add_argument(
25
+ "--model_name",
26
+ type=str,
27
+ default="image_multilabel_classification_colonoscopy_default",
28
+ help="Name for the AI model",
29
+ )
30
+ parser.add_argument(
31
+ "--labelset_name",
32
+ type=str,
33
+ default="multilabel_classification_colonoscopy_default",
34
+ help="Name of the labelset",
35
+ )
36
+ parser.add_argument(
37
+ "--meta_version",
38
+ type=str,
39
+ default="1",
40
+ help="Version for the model meta",
41
+ )
42
+
43
+ def handle(self, *args, **options):
44
+ model_id = options["model_id"]
45
+ model_name = options["model_name"]
46
+ labelset_name = options["labelset_name"]
47
+ version = options["meta_version"]
48
+
49
+ self.stdout.write(f"Downloading model {model_id} from Hugging Face...")
50
+
51
+ try:
52
+ # Download the model weights
53
+ weights_path = hf_hub_download(
54
+ repo_id=model_id,
55
+ filename="colo_segmentation_RegNetX800MF_base.ckpt",
56
+ local_dir="/tmp",
57
+ )
58
+ self.stdout.write(f"Downloaded weights to: {weights_path}")
59
+
60
+ # Get or create AI model
61
+ ai_model, created = AiModel.objects.get_or_create(
62
+ name=model_name, defaults={"description": f"Model from {model_id}"}
63
+ )
64
+ if created:
65
+ self.stdout.write(f"Created AI model: {ai_model.name}")
66
+
67
+ # Get labelset
68
+ try:
69
+ labelset = LabelSet.objects.get(name=labelset_name)
70
+ except LabelSet.DoesNotExist:
71
+ self.stdout.write(
72
+ self.style.ERROR(f"LabelSet '{labelset_name}' not found")
73
+ )
74
+ return
75
+
76
+ # Create ModelMeta
77
+ model_meta, created = ModelMeta.objects.get_or_create(
78
+ name=model_name,
79
+ model=ai_model,
80
+ version=version,
81
+ defaults={
82
+ "labelset": labelset,
83
+ "activation": "sigmoid",
84
+ "mean": "0.45211223,0.27139644,0.19264949",
85
+ "std": "0.31418097,0.21088019,0.16059452",
86
+ "size_x": 716,
87
+ "size_y": 716,
88
+ "axes": "2,0,1",
89
+ "batchsize": 16,
90
+ "num_workers": 0,
91
+ "description": f"Downloaded from {model_id}",
92
+ },
93
+ )
94
+
95
+ # Save the weights file to the model
96
+ with open(weights_path, "rb") as f:
97
+ model_meta.weights.save(
98
+ f"{model_name}_v{version}_colo_segmentation_RegNetX800MF_base.ckpt",
99
+ ContentFile(f.read()),
100
+ )
101
+
102
+ # Set as active meta
103
+ ai_model.active_meta = model_meta
104
+ ai_model.save()
105
+
106
+ self.stdout.write(
107
+ self.style.SUCCESS(
108
+ f"Successfully {'created' if created else 'updated'} ModelMeta: {model_meta}"
109
+ )
110
+ )
111
+
112
+ except Exception as e:
113
+ self.stdout.write(self.style.ERROR(f"Error creating ModelMeta: {e}"))
114
+ import traceback
115
+
116
+ traceback.print_exc()
@@ -0,0 +1,196 @@
1
+ """
2
+ Django management command to perform complete setup for EndoReg DB when used as an embedded app.
3
+ This command ensures all necessary data and configurations are initialized.
4
+ """
5
+
6
+ import os
7
+ from pathlib import Path
8
+
9
+ from django.core.management import call_command
10
+ from django.core.management.base import BaseCommand
11
+
12
+
13
+ class Command(BaseCommand):
14
+ help = """
15
+ Complete setup for EndoReg DB when used as an embedded app.
16
+ This command performs all necessary initialization steps:
17
+ 1. Loads base database data
18
+ 2. Sets up AI models and labels
19
+ 3. Creates cache table
20
+ 4. Initializes model metadata
21
+ """
22
+
23
+ def add_arguments(self, parser):
24
+ parser.add_argument(
25
+ "--skip-ai-setup",
26
+ action="store_true",
27
+ help="Skip AI model setup (for cases where AI features are not needed)",
28
+ )
29
+ parser.add_argument(
30
+ "--force-recreate",
31
+ action="store_true",
32
+ help="Force recreation of AI model metadata even if it exists",
33
+ )
34
+
35
+ def handle(self, *args, **options):
36
+ skip_ai = options.get("skip_ai_setup", False)
37
+ force_recreate = options.get("force_recreate", False)
38
+
39
+ self.stdout.write(self.style.SUCCESS("🚀 Starting EndoReg DB embedded app setup..."))
40
+
41
+ # Step 1: Load base database data
42
+ self.stdout.write("\n📊 Step 1: Loading base database data...")
43
+ try:
44
+ call_command("load_base_db_data")
45
+ self.stdout.write(self.style.SUCCESS("✅ Base database data loaded successfully"))
46
+ except Exception as e:
47
+ self.stdout.write(self.style.ERROR(f"❌ Failed to load base data: {e}"))
48
+ return
49
+
50
+ # Step 2: Create cache table (only if using database caching)
51
+ self.stdout.write("\n💾 Step 2: Setting up caching...")
52
+ from django.conf import settings
53
+
54
+ cache_backend = settings.CACHES.get("default", {}).get("BACKEND", "")
55
+ if "db" in cache_backend or "database" in cache_backend:
56
+ self.stdout.write("Using database caching - creating cache table...")
57
+ try:
58
+ call_command("createcachetable")
59
+ self.stdout.write(self.style.SUCCESS("✅ Cache table created successfully"))
60
+ except Exception as e:
61
+ self.stdout.write(self.style.ERROR(f"❌ Failed to create cache table: {e}"))
62
+ return
63
+ else:
64
+ self.stdout.write("Using in-memory caching - skipping cache table creation")
65
+
66
+ if skip_ai:
67
+ self.stdout.write(self.style.WARNING("\n⚠️ Skipping AI setup as requested"))
68
+ else:
69
+ # Step 3: Load AI model data
70
+ self.stdout.write("\n🤖 Step 3: Loading AI model data...")
71
+ try:
72
+ call_command("load_ai_model_data")
73
+ self.stdout.write(self.style.SUCCESS("✅ AI model data loaded successfully"))
74
+ except Exception as e:
75
+ self.stdout.write(self.style.ERROR(f"❌ Failed to load AI model data: {e}"))
76
+ return
77
+
78
+ # Step 4: Load AI model label data
79
+ self.stdout.write("\n🏷️ Step 4: Loading AI model label data...")
80
+ try:
81
+ call_command("load_ai_model_label_data")
82
+ self.stdout.write(self.style.SUCCESS("✅ AI model label data loaded successfully"))
83
+ except Exception as e:
84
+ self.stdout.write(self.style.ERROR(f"❌ Failed to load AI model label data: {e}"))
85
+ return
86
+
87
+ # Step 5: Create model metadata
88
+ self.stdout.write("\n📋 Step 5: Creating AI model metadata...")
89
+ try:
90
+ # Check if model metadata already exists
91
+ from endoreg_db.models import AiModel
92
+
93
+ default_model_name = "image_multilabel_classification_colonoscopy_default"
94
+ ai_model = AiModel.objects.filter(name=default_model_name).first()
95
+
96
+ if not ai_model:
97
+ self.stdout.write(self.style.ERROR(f"❌ AI model '{default_model_name}' not found"))
98
+ return
99
+
100
+ existing_meta = ai_model.metadata_versions.first()
101
+ if existing_meta and not force_recreate:
102
+ self.stdout.write(self.style.SUCCESS("✅ Model metadata already exists (use --force-recreate to recreate)"))
103
+ else:
104
+ # Try to create model metadata
105
+ model_path = self._find_model_weights_file()
106
+ if model_path:
107
+ call_command(
108
+ "create_multilabel_model_meta",
109
+ model_name=default_model_name,
110
+ model_meta_version=1,
111
+ image_classification_labelset_name="multilabel_classification_colonoscopy_default",
112
+ model_path=str(model_path),
113
+ )
114
+ self.stdout.write(self.style.SUCCESS("✅ AI model metadata created successfully"))
115
+ else:
116
+ self.stdout.write(self.style.WARNING("⚠️ Model weights file not found. AI features may not work properly."))
117
+
118
+ except Exception as e:
119
+ self.stdout.write(self.style.ERROR(f"❌ Failed to create AI model metadata: {e}"))
120
+ return
121
+
122
+ # Step 6: Verification
123
+ self.stdout.write("\n🔍 Step 6: Verifying setup...")
124
+ try:
125
+ self._verify_setup()
126
+ self.stdout.write(self.style.SUCCESS("✅ Setup verification completed successfully"))
127
+ except Exception as e:
128
+ self.stdout.write(self.style.ERROR(f"❌ Setup verification failed: {e}"))
129
+ return
130
+
131
+ self.stdout.write(self.style.SUCCESS("\n🎉 EndoReg DB embedded app setup completed successfully!"))
132
+ self.stdout.write("\nNext steps:")
133
+ self.stdout.write("1. Run migrations: python manage.py migrate")
134
+ self.stdout.write("2. Create superuser: python manage.py createsuperuser")
135
+ self.stdout.write("3. Start development server: python manage.py runserver")
136
+
137
+ def _find_model_weights_file(self):
138
+ """Find the model weights file in various possible locations."""
139
+ # Check common locations for model weights
140
+ possible_paths = [
141
+ # Test assets (for development)
142
+ Path("tests/assets/colo_segmentation_RegNetX800MF_6.ckpt"),
143
+ # Project root assets
144
+ Path("assets/colo_segmentation_RegNetX800MF_6.ckpt"),
145
+ # Storage directory
146
+ Path("data/storage/model_weights/colo_segmentation_RegNetX800MF_6.ckpt"),
147
+ # Absolute paths based on environment
148
+ Path(os.getenv("STORAGE_DIR", "storage")) / "model_weights" / "colo_segmentation_RegNetX800MF_6.ckpt",
149
+ ]
150
+
151
+ for path in possible_paths:
152
+ if path.exists():
153
+ self.stdout.write(f"Found model weights at: {path}")
154
+ return path
155
+
156
+ self.stdout.write("Model weights file not found in standard locations")
157
+ return None
158
+
159
+ def _verify_setup(self):
160
+ """Verify that the setup was successful."""
161
+ from django.conf import settings
162
+ from django.db import connection
163
+
164
+ # Check that required tables exist
165
+ required_tables = [
166
+ "endoreg_db_aimodel",
167
+ "endoreg_db_modelmeta",
168
+ ]
169
+
170
+ # Only check for cache table if using database caching
171
+ cache_backend = settings.CACHES.get("default", {}).get("BACKEND", "")
172
+ if "db" in cache_backend or "database" in cache_backend:
173
+ required_tables.append("django_cache_table")
174
+
175
+ cursor = connection.cursor()
176
+ cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
177
+ existing_tables = [row[0] for row in cursor.fetchall()]
178
+
179
+ missing_tables = [table for table in required_tables if table not in existing_tables]
180
+ if missing_tables:
181
+ raise Exception(f"Missing required tables: {missing_tables}")
182
+
183
+ # Check that AI models exist (if AI setup was performed)
184
+ from endoreg_db.models import AiModel
185
+
186
+ if AiModel.objects.exists():
187
+ ai_model_count = AiModel.objects.count()
188
+ self.stdout.write(f"Found {ai_model_count} AI model(s)")
189
+
190
+ # Check for model metadata
191
+ from endoreg_db.models import ModelMeta
192
+
193
+ meta_count = ModelMeta.objects.count()
194
+ self.stdout.write(f"Found {meta_count} model metadata record(s)")
195
+
196
+ self.stdout.write("Setup verification passed")
@@ -1,21 +1,21 @@
1
1
  import shutil
2
+ from logging import getLogger
2
3
  from pathlib import Path
3
- from typing import Optional, TYPE_CHECKING, Any, Type
4
- from huggingface_hub import hf_hub_download
4
+ from typing import TYPE_CHECKING, Any, Optional, Type
5
+
5
6
  from django.db import transaction
7
+ from huggingface_hub import hf_hub_download
6
8
 
7
9
  # Assuming ModelMeta, AiModel, LabelSet are importable from the correct locations
8
10
  # Adjust imports based on your project structure if necessary
9
11
  from ..administration.ai.ai_model import AiModel
10
12
  from ..label.label_set import LabelSet
11
- from ..utils import WEIGHTS_DIR, STORAGE_DIR
12
-
13
- from logging import getLogger
13
+ from ..utils import STORAGE_DIR, WEIGHTS_DIR
14
14
 
15
15
  logger = getLogger("ai_model")
16
16
 
17
17
  if TYPE_CHECKING:
18
- from .model_meta import ModelMeta # Import ModelMeta for type hinting
18
+ from .model_meta import ModelMeta # Import ModelMeta for type hinting
19
19
 
20
20
 
21
21
  def get_latest_version_number_logic(
@@ -29,13 +29,13 @@ def get_latest_version_number_logic(
29
29
  """
30
30
  versions_qs = cls.objects.filter(
31
31
  name=meta_name, model__name=model_name
32
- ).values_list('version', flat=True)
32
+ ).values_list("version", flat=True)
33
33
 
34
34
  max_v = 0
35
35
  found_numeric_version = False
36
36
 
37
37
  for v_str in versions_qs:
38
- if v_str is None: # Skip None versions
38
+ if v_str is None: # Skip None versions
39
39
  continue
40
40
  try:
41
41
  v_int = int(v_str)
@@ -47,13 +47,13 @@ def get_latest_version_number_logic(
47
47
  f"Warning: Could not parse version string '{v_str}' as an integer for "
48
48
  f"meta_name='{meta_name}', model_name='{model_name}' while determining the max version."
49
49
  )
50
-
50
+
51
51
  return max_v if found_numeric_version else 0
52
52
 
53
53
 
54
54
  @transaction.atomic
55
55
  def create_from_file_logic(
56
- cls: Type["ModelMeta"], # cls is ModelMeta
56
+ cls: Type["ModelMeta"], # cls is ModelMeta
57
57
  meta_name: str,
58
58
  model_name: str,
59
59
  labelset_name: str,
@@ -94,11 +94,14 @@ def create_from_file_logic(
94
94
  )
95
95
  elif existing and bump_if_exists:
96
96
  target_version = str(latest_version_num + 1)
97
- logger.info(f"Bumping version for {meta_name}/{model_name} to {target_version}")
97
+ logger.info(
98
+ f"Bumping version for {meta_name}/{model_name} to {target_version}"
99
+ )
98
100
  else:
99
101
  target_version = str(latest_version_num + 1)
100
- logger.info(f"Setting next version for {meta_name}/{model_name} to {target_version}")
101
-
102
+ logger.info(
103
+ f"Setting next version for {meta_name}/{model_name} to {target_version}"
104
+ )
102
105
 
103
106
  # --- Prepare Weights File ---
104
107
  source_weights_path = Path(weights_file).resolve()
@@ -108,7 +111,10 @@ def create_from_file_logic(
108
111
  # Construct destination path within MEDIA_ROOT/WEIGHTS_DIR
109
112
  weights_filename = source_weights_path.name
110
113
  # Relative path for the FileField upload_to
111
- relative_dest_path = Path(WEIGHTS_DIR.relative_to(STORAGE_DIR)) / f"{meta_name}_v{target_version}_{weights_filename}"
114
+ relative_dest_path = (
115
+ Path(WEIGHTS_DIR.relative_to(STORAGE_DIR))
116
+ / f"{meta_name}_v{target_version}_{weights_filename}"
117
+ )
112
118
  # Full path for shutil.copy
113
119
  full_dest_path = STORAGE_DIR / relative_dest_path
114
120
 
@@ -125,8 +131,8 @@ def create_from_file_logic(
125
131
  # --- Create/Update ModelMeta Instance ---
126
132
  defaults = {
127
133
  "labelset": label_set,
128
- "weights": relative_dest_path.as_posix(), # Store relative path for FileField
129
- **kwargs, # Pass through other fields like activation, mean, std, etc.
134
+ "weights": relative_dest_path.as_posix(), # Store relative path for FileField
135
+ **kwargs, # Pass through other fields like activation, mean, std, etc.
130
136
  }
131
137
 
132
138
  # Remove None values from defaults to avoid overriding model defaults unnecessarily
@@ -152,35 +158,39 @@ def create_from_file_logic(
152
158
 
153
159
  return model_meta
154
160
 
161
+
155
162
  # --- Add other logic functions referenced by ModelMeta here ---
156
163
  # (get_latest_version_number_logic, get_activation_function_logic, etc.)
157
164
  # Placeholder for get_activation_function_logic
158
165
  def get_activation_function_logic(activation_name: str):
159
- import torch.nn as nn # Import locally as it's specific to this function
166
+ import torch.nn as nn # Import locally as it's specific to this function
167
+
160
168
  if activation_name.lower() == "sigmoid":
161
169
  return nn.Sigmoid()
162
170
  elif activation_name.lower() == "softmax":
163
171
  # Note: Softmax usually requires specifying the dimension
164
- return nn.Softmax(dim=1) # Assuming dim=1 (channels) is common
172
+ return nn.Softmax(dim=1) # Assuming dim=1 (channels) is common
165
173
  elif activation_name.lower() == "none":
166
174
  return nn.Identity()
167
175
  else:
168
176
  # Consider adding more activations or raising an error
169
177
  raise ValueError(f"Unsupported activation function: {activation_name}")
170
178
 
179
+
171
180
  # Placeholder for get_inference_dataset_config_logic
172
181
  def get_inference_dataset_config_logic(model_meta: "ModelMeta") -> dict:
173
182
  # This would typically extract relevant fields from model_meta
174
183
  # for configuring a dataset during inference
175
184
  return {
176
- "mean": [float(x) for x in model_meta.mean.split(',')],
177
- "std": [float(x) for x in model_meta.std.split(',')],
178
- "size_y": model_meta.size_y, # Add size_y key
179
- "size_x": model_meta.size_x, # Add size_x key
180
- "axes": [int(x) for x in model_meta.axes.split(',')],
185
+ "mean": [float(x) for x in model_meta.mean.split(",")],
186
+ "std": [float(x) for x in model_meta.std.split(",")],
187
+ "size_y": model_meta.size_y, # Add size_y key
188
+ "size_x": model_meta.size_x, # Add size_x key
189
+ "axes": [int(x) for x in model_meta.axes.split(",")],
181
190
  # Add other relevant config like normalization type, etc.
182
191
  }
183
192
 
193
+
184
194
  # Placeholder for get_config_dict_logic
185
195
  def get_config_dict_logic(model_meta: "ModelMeta") -> dict:
186
196
  # Returns a dictionary representation of the model's configuration
@@ -202,6 +212,7 @@ def get_config_dict_logic(model_meta: "ModelMeta") -> dict:
202
212
  # Add any other relevant fields
203
213
  }
204
214
 
215
+
205
216
  # Placeholder for get_model_meta_by_name_version_logic
206
217
  def get_model_meta_by_name_version_logic(
207
218
  cls: Type["ModelMeta"],
@@ -227,17 +238,24 @@ def get_model_meta_by_name_version_logic(
227
238
  ) from exc
228
239
  else:
229
240
  # Get latest version
230
- latest = cls.objects.filter(name=meta_name, model=ai_model).order_by("-date_created").first()
241
+ latest = (
242
+ cls.objects.filter(name=meta_name, model=ai_model)
243
+ .order_by("-date_created")
244
+ .first()
245
+ )
231
246
  if latest:
232
247
  return latest
233
248
  else:
234
249
  raise cls.DoesNotExist(
235
250
  f"No ModelMeta found for '{meta_name}' and model '{model_name}'."
236
251
  )
237
-
238
- from huggingface_hub import model_info
252
+
253
+
239
254
  import re
240
255
 
256
+ from huggingface_hub import model_info
257
+
258
+
241
259
  def infer_default_model_meta_from_hf(model_id: str) -> dict[str, Any]:
242
260
  """
243
261
  Infers default model metadata (activation, normalization, input size)
@@ -248,7 +266,9 @@ def infer_default_model_meta_from_hf(model_id: str) -> dict[str, Any]:
248
266
  """
249
267
 
250
268
  if not (info := model_info(model_id)):
251
- logger.info(f"Could not retrieve model info for {model_id}, using ColoReg segmentation defaults.")
269
+ logger.info(
270
+ f"Could not retrieve model info for {model_id}, using ColoReg segmentation defaults."
271
+ )
252
272
  return {
253
273
  "name": "wg-lux/colo_segmentation_RegNetX800MF_base",
254
274
  "activation": "sigmoid",
@@ -295,18 +315,29 @@ def infer_default_model_meta_from_hf(model_id: str) -> dict[str, Any]:
295
315
  "size_y": size_y,
296
316
  "description": f"Inferred defaults for {model_id}",
297
317
  }
298
-
299
- def setup_default_from_huggingface_logic(cls, model_id: str, labelset_name: str | None = None):
318
+
319
+
320
+ def setup_default_from_huggingface_logic(
321
+ cls, model_id: str, labelset_name: str | None = None
322
+ ):
300
323
  """
301
324
  Downloads model weights from Hugging Face and auto-fills ModelMeta fields.
302
325
  """
303
326
  meta = infer_default_model_meta_from_hf(model_id)
304
327
 
305
328
  # Download weights
306
- weights_path = hf_hub_download(repo_id=model_id, filename="pytorch_model.bin", local_dir=WEIGHTS_DIR)
329
+ weights_path = hf_hub_download(
330
+ repo_id=model_id,
331
+ filename="colo_segmentation_RegNetX800MF_base.ckpt",
332
+ local_dir=WEIGHTS_DIR,
333
+ )
307
334
 
308
335
  ai_model, _ = AiModel.objects.get_or_create(name=meta["name"])
309
- labelset = LabelSet.objects.first() if not labelset_name else LabelSet.objects.get(name=labelset_name)
336
+ labelset = (
337
+ LabelSet.objects.first()
338
+ if not labelset_name
339
+ else LabelSet.objects.get(name=labelset_name)
340
+ )
310
341
 
311
342
  return create_from_file_logic(
312
343
  cls,
@@ -19,16 +19,12 @@ from pathlib import Path
19
19
  from typing import Union, Dict, Any, Optional, List, Tuple
20
20
  from django.db import transaction
21
21
  from endoreg_db.models import VideoFile, SensitiveMeta
22
- from endoreg_db.utils.paths import STORAGE_DIR, RAW_FRAME_DIR, VIDEO_DIR, ANONYM_VIDEO_DIR
22
+ from endoreg_db.utils.paths import STORAGE_DIR, VIDEO_DIR, ANONYM_VIDEO_DIR
23
23
  import random
24
- from lx_anonymizer.ocr import trocr_full_image_ocr
25
24
  from endoreg_db.utils.hashs import get_video_hash
26
- from endoreg_db.models.media.video.video_file_anonymize import _cleanup_raw_assets, _anonymize
27
- from typing import TYPE_CHECKING
25
+ from endoreg_db.models.media.video.video_file_anonymize import _cleanup_raw_assets
28
26
  from django.db.models.fields.files import FieldFile
29
-
30
- if TYPE_CHECKING:
31
- from endoreg_db.models import EndoscopyProcessor
27
+ from endoreg_db.models import EndoscopyProcessor
32
28
 
33
29
  # File lock configuration (matches PDF import)
34
30
  STALE_LOCK_SECONDS = 6000 # 100 minutes - reclaim locks older than this
@@ -58,15 +54,13 @@ class VideoImportService():
58
54
  self.project_root = Path(__file__).parent.parent.parent.parent
59
55
 
60
56
  # Track processed files to prevent duplicates
61
- self.processed_files = set(str(file) for file in os.listdir(ANONYM_VIDEO_DIR))
62
-
63
- self.STORAGE_DIR = STORAGE_DIR
64
-
57
+ self.processed_files = set(str(Path(ANONYM_VIDEO_DIR) / file) for file in os.listdir(ANONYM_VIDEO_DIR))
58
+
65
59
  # Central video instance and processing context
66
60
  self.current_video: Optional[VideoFile] = None
67
61
  self.processing_context: Dict[str, Any] = {}
68
62
 
69
- self.delete_source = False
63
+ self.delete_source = True
70
64
 
71
65
  self.logger = logging.getLogger(__name__)
72
66
 
@@ -225,8 +219,12 @@ class VideoImportService():
225
219
 
226
220
  # Acquire file lock to prevent concurrent processing
227
221
  # Lock will be held until finally block in import_and_anonymize()
228
- self.processing_context['_lock_context'] = self._file_lock(file_path)
229
- self.processing_context['_lock_context'].__enter__()
222
+ try:
223
+ self.processing_context['_lock_context'] = self._file_lock(file_path)
224
+ self.processing_context['_lock_context'].__enter__()
225
+ except Exception:
226
+ self._cleanup_processing_context()
227
+ raise
230
228
 
231
229
  self.logger.info("Acquired file lock for: %s", file_path)
232
230
 
@@ -274,96 +272,78 @@ class VideoImportService():
274
272
  def _move_to_final_storage(self):
275
273
  """
276
274
  Move video from raw_videos to final storage locations.
277
- - Raw video → /data/videos (raw_file_path)
275
+ - Raw video → /data/videos (raw_file_path)
278
276
  - Processed video will later → /data/anonym_videos (file_path)
279
277
  """
280
278
  from endoreg_db.utils import data_paths
281
-
282
- source_path = self.processing_context['file_path']
283
279
 
284
- videos_dir = data_paths["video"]
285
- videos_dir.mkdir(parents=True, exist_ok=True)
280
+ source_path = Path(self.processing_context["file_path"])
281
+ _current_video = self._require_current_video()
282
+ videos_dir = Path(data_paths["video"])
283
+ storage_root = Path(data_paths["storage"])
286
284
 
287
- _current_video = self.current_video
288
- assert _current_video is not None, "Current video instance is None during storage move"
285
+ videos_dir.mkdir(parents=True, exist_ok=True)
289
286
 
287
+ # --- Derive stored_raw_path safely ---
290
288
  stored_raw_path = None
291
- if hasattr(_current_video, "get_raw_file_path"):
292
- possible_path = _current_video.get_raw_file_path()
293
- if possible_path:
294
- try:
295
- stored_raw_path = Path(possible_path)
296
- except (TypeError, ValueError):
297
- stored_raw_path = None
298
-
299
- if stored_raw_path:
300
- try:
301
- storage_root = data_paths["storage"]
302
- if stored_raw_path.is_absolute():
303
- if not stored_raw_path.is_relative_to(storage_root):
289
+ try:
290
+ if hasattr(_current_video, "get_raw_file_path"):
291
+ candidate = _current_video.get_raw_file_path()
292
+ if candidate:
293
+ candidate_path = Path(candidate)
294
+ # Accept only if under storage_root
295
+ try:
296
+ candidate_path.relative_to(storage_root)
297
+ stored_raw_path = candidate_path
298
+ except ValueError:
299
+ # outside storage_root, reset
304
300
  stored_raw_path = None
305
- else:
306
- if stored_raw_path.parts and stored_raw_path.parts[0] == videos_dir.name:
307
- stored_raw_path = storage_root / stored_raw_path
308
- else:
309
- stored_raw_path = videos_dir / stored_raw_path.name
310
- except Exception:
311
- stored_raw_path = None
312
-
313
- if stored_raw_path and not stored_raw_path.suffix:
301
+ except Exception:
314
302
  stored_raw_path = None
315
303
 
304
+ # Fallback: derive from UUID + suffix
316
305
  if not stored_raw_path:
306
+ suffix = source_path.suffix or ".mp4"
317
307
  uuid_str = getattr(_current_video, "uuid", None)
318
- source_suffix = Path(source_path).suffix or ".mp4"
319
- filename = f"{uuid_str}{source_suffix}" if uuid_str else Path(source_path).name
308
+ filename = f"{uuid_str}{suffix}" if uuid_str else source_path.name
320
309
  stored_raw_path = videos_dir / filename
321
310
 
322
- delete_source = bool(self.processing_context.get('delete_source'))
311
+ delete_source = bool(self.processing_context.get("delete_source", True))
323
312
  stored_raw_path.parent.mkdir(parents=True, exist_ok=True)
324
313
 
325
- if not stored_raw_path.exists():
326
- try:
327
- if source_path.exists():
328
- if delete_source:
329
- shutil.move(str(source_path), str(stored_raw_path))
330
- self.logger.info("Moved raw video to: %s", stored_raw_path)
331
- else:
332
- shutil.copy2(str(source_path), str(stored_raw_path))
333
- self.logger.info("Copied raw video to: %s", stored_raw_path)
334
- else:
335
- raise FileNotFoundError(f"Neither stored raw path nor source path exists for {self.processing_context['file_path']}")
336
- except Exception as e:
337
- self.logger.error("Failed to place video in final storage: %s", e)
338
- raise
339
- else:
340
- # If we already have the stored copy, respect delete_source flag without touching assets unnecessarily
341
- if delete_source and source_path.exists():
314
+ # --- Move or copy raw video ---
315
+ try:
316
+ if delete_source:
317
+ # Try atomic move first, fallback to copy+unlink
342
318
  try:
319
+ os.replace(source_path, stored_raw_path)
320
+ self.logger.info("Moved raw video to: %s", stored_raw_path)
321
+ except Exception:
322
+ shutil.copy2(source_path, stored_raw_path)
343
323
  os.remove(source_path)
344
- self.logger.info("Removed original source file after storing copy: %s", source_path)
345
- except OSError as e:
346
- self.logger.warning("Failed to remove source file %s: %s", source_path, e)
324
+ self.logger.info("Copied & removed raw video to: %s", stored_raw_path)
325
+ else:
326
+ shutil.copy2(source_path, stored_raw_path)
327
+ self.logger.info("Copied raw video to: %s", stored_raw_path)
328
+ except Exception as e:
329
+ self.logger.error("Failed to move/copy video to final storage: %s", e)
330
+ raise
347
331
 
348
- # Ensure database path points to stored location (relative to storage root)
332
+ # --- Ensure DB raw_file is relative to storage root ---
349
333
  try:
350
- storage_root = data_paths["storage"]
351
- relative_path = Path(stored_raw_path).relative_to(storage_root)
352
- if _current_video.raw_file.name != str(relative_path):
353
- _current_video.raw_file.name = str(relative_path)
354
- _current_video.save(update_fields=['raw_file'])
355
- self.logger.info("Updated raw_file path to: %s", relative_path)
356
- except Exception as e:
357
- self.logger.error("Failed to ensure raw_file path is relative: %s", e)
358
- fallback_relative = Path("videos") / Path(stored_raw_path).name
359
- if _current_video.raw_file.name != fallback_relative.as_posix():
360
- _current_video.raw_file.name = fallback_relative.as_posix()
361
- _current_video.save(update_fields=['raw_file'])
362
- self.logger.info("Updated raw_file path using fallback: %s", fallback_relative.as_posix())
334
+ rel_path = stored_raw_path.relative_to(storage_root)
335
+ except Exception:
336
+ rel_path = Path("videos") / stored_raw_path.name
337
+
338
+ if _current_video.raw_file.name != rel_path.as_posix():
339
+ _current_video.raw_file.name = rel_path.as_posix()
340
+ _current_video.save(update_fields=["raw_file"])
341
+ self.logger.info("Updated raw_file path to: %s", rel_path.as_posix())
342
+
343
+ # --- Store for later stages ---
344
+ self.processing_context["raw_video_path"] = stored_raw_path
345
+ self.processing_context["video_filename"] = stored_raw_path.name
363
346
 
364
- # Store paths for later processing
365
- self.processing_context['raw_video_path'] = Path(stored_raw_path)
366
- self.processing_context['video_filename'] = Path(stored_raw_path).name
367
347
 
368
348
  def _setup_processing_environment(self):
369
349
  """Setup the processing environment without file movement."""
@@ -405,7 +385,7 @@ class VideoImportService():
405
385
  def _process_frames_and_metadata(self):
406
386
  """Process frames and extract metadata with anonymization."""
407
387
  # Check frame cleaning availability
408
- frame_cleaning_available, FrameCleaner, ReportReader = self._ensure_frame_cleaning_available()
388
+ frame_cleaning_available, frame_cleaner = self._ensure_frame_cleaning_available()
409
389
  video = self._require_current_video()
410
390
 
411
391
  raw_file_field = video.raw_file
@@ -426,7 +406,7 @@ class VideoImportService():
426
406
  from concurrent.futures import ThreadPoolExecutor, TimeoutError as FutureTimeoutError
427
407
 
428
408
  with ThreadPoolExecutor(max_workers=1) as executor:
429
- future = executor.submit(self._perform_frame_cleaning, FrameCleaner, endoscope_data_roi_nested, endoscope_image_roi)
409
+ future = executor.submit(self._perform_frame_cleaning, endoscope_data_roi_nested, endoscope_image_roi)
430
410
  try:
431
411
  # Increased timeout to better accommodate ffmpeg + OCR
432
412
  future.result(timeout=300)
@@ -472,6 +452,9 @@ class VideoImportService():
472
452
  self.processing_context['error_reason'] = f"Frame cleaning failed: {e}, Fallback failed: {fallback_error}"
473
453
 
474
454
  def _save_anonymized_video(self):
455
+
456
+ original_raw_file_path_to_delete = None
457
+ original_raw_frame_dir_to_delete = None
475
458
  video = self._require_current_video()
476
459
  anonymized_video_path = video.get_target_anonymized_video_path()
477
460
 
@@ -759,6 +742,17 @@ class VideoImportService():
759
742
  except Exception as exc:
760
743
  self.logger.error("Failed to retrieve processor ROI information: %s", exc)
761
744
 
745
+ # Convert dict to nested list if necessary to match return type
746
+ if isinstance(endoscope_data_roi_nested, dict):
747
+ # Convert dict[str, dict[str, int | None] | None] to List[List[Dict[str, Any]]]
748
+ converted_roi = []
749
+ for key, value in endoscope_data_roi_nested.items():
750
+ if isinstance(value, dict):
751
+ converted_roi.append([value])
752
+ elif value is None:
753
+ converted_roi.append([])
754
+ endoscope_data_roi_nested = converted_roi
755
+
762
756
  return endoscope_data_roi_nested, endoscope_image_roi
763
757
 
764
758
  def _ensure_default_patient_data(self, video_instance: VideoFile | None = None) -> None:
@@ -780,8 +774,6 @@ class VideoImportService():
780
774
  sensitive_meta = SensitiveMeta.create_from_dict(default_data)
781
775
  video.sensitive_meta = sensitive_meta
782
776
  video.save(update_fields=["sensitive_meta"])
783
- state = video.get_or_create_state()
784
- state.mark_sensitive_meta_processed(save=True)
785
777
  self.logger.info("Created default SensitiveMeta for video %s", video.uuid)
786
778
  except Exception as exc:
787
779
  self.logger.error("Failed to create default SensitiveMeta for video %s: %s", video.uuid, exc)
@@ -820,67 +812,43 @@ class VideoImportService():
820
812
  Tuple of (availability_flag, FrameCleaner_class, ReportReader_class)
821
813
  """
822
814
  try:
823
- # Check if we can find the lx-anonymizer directory
824
- from importlib import resources
825
- lx_anonymizer_path = resources.files("lx_anonymizer")
815
+ # Check if we can find lx-anonymizer
816
+ from lx_anonymizer import FrameCleaner # type: ignore[import]
826
817
 
827
- # make sure lx_anonymizer_path is a Path object
828
- lx_anonymizer_path = Path(str(lx_anonymizer_path))
829
-
830
- if lx_anonymizer_path.exists():
831
- # Add to Python path temporarily
832
- if str(lx_anonymizer_path) not in sys.path:
833
- sys.path.insert(0, str(lx_anonymizer_path))
834
-
835
- # Try simple import
836
- from lx_anonymizer import FrameCleaner, ReportReader
837
-
838
- self.logger.info("Successfully imported lx_anonymizer modules")
839
-
840
- # Remove from path to avoid conflicts
841
- if str(lx_anonymizer_path) in sys.path:
842
- sys.path.remove(str(lx_anonymizer_path))
843
-
844
- return True, FrameCleaner, ReportReader
845
-
846
- else:
847
- self.logger.warning(f"lx-anonymizer path not found: {lx_anonymizer_path}")
848
-
818
+ if FrameCleaner:
819
+ return True, FrameCleaner
820
+
849
821
  except Exception as e:
850
- self.logger.warning(f"Frame cleaning not available: {e}")
822
+ self.logger.warning(f"Frame cleaning not available: {e} Please install or update lx_anonymizer.")
851
823
 
852
- return False, None, None
824
+ return False, None
853
825
 
854
826
 
855
827
 
856
- def _perform_frame_cleaning(self, FrameCleaner, endoscope_data_roi_nested, endoscope_image_roi):
828
+ def _perform_frame_cleaning(self, endoscope_data_roi_nested, endoscope_image_roi):
857
829
  """Perform frame cleaning and anonymization."""
858
830
  # Instantiate frame cleaner
859
- frame_cleaner = FrameCleaner()
860
-
831
+ is_available, frame_cleaner = self._ensure_frame_cleaning_available()
832
+
833
+ if not is_available:
834
+ raise RuntimeError("Frame cleaning not available")
835
+
861
836
  # Prepare parameters for frame cleaning
862
837
  raw_video_path = self.processing_context.get('raw_video_path')
863
838
 
864
839
  if not raw_video_path or not Path(raw_video_path).exists():
865
840
  raise RuntimeError(f"Raw video path not found: {raw_video_path}")
866
-
867
- # Get processor name safely
868
- video = self._require_current_video()
869
- video_meta = getattr(video, "video_meta", None)
870
- processor = getattr(video_meta, "processor", None) if video_meta else None
871
- device_name = processor.name if processor else self.processing_context['processor_name']
841
+
872
842
 
873
843
  # Create temporary output path for cleaned video
874
844
  video_filename = self.processing_context.get('video_filename', Path(raw_video_path).name)
875
845
  cleaned_filename = f"cleaned_{video_filename}"
876
846
  cleaned_video_path = Path(raw_video_path).parent / cleaned_filename
877
847
 
878
- # Processor roi is used later to OCR preknown regions.
879
848
 
880
849
  # Clean video with ROI masking (heavy I/O operation)
881
850
  actual_cleaned_path, extracted_metadata = frame_cleaner.clean_video(
882
851
  video_path=Path(raw_video_path),
883
- video_file_obj=video,
884
852
  endoscope_image_roi=endoscope_image_roi,
885
853
  endoscope_data_roi_nested=endoscope_data_roi_nested,
886
854
  output_path=cleaned_video_path,
@@ -1023,7 +991,7 @@ def import_and_anonymize(
1023
991
  center_name: str,
1024
992
  processor_name: str,
1025
993
  save_video: bool = True,
1026
- delete_source: bool = False,
994
+ delete_source: bool = True,
1027
995
  ) -> VideoFile | None:
1028
996
  """Module-level helper that instantiates VideoImportService and runs import_and_anonymize.
1029
997
  Kept for backward compatibility with callers that import this function directly.
@@ -53,7 +53,7 @@ def apply_video_mask_task(self, video_id: int, mask_type: str = 'device_default'
53
53
  self.update_state(state='PROGRESS', meta={'progress': 10, 'message': 'Setting up FrameCleaner...'})
54
54
 
55
55
  # Initialize FrameCleaner
56
- cleaner = FrameCleaner(use_minicpm=True)
56
+ cleaner = FrameCleaner()
57
57
 
58
58
  # Determine mask configuration
59
59
  if mask_type == 'custom' and custom_mask:
@@ -110,14 +110,14 @@ def _setup_frame_removal(video_id: int, detection_engine: str):
110
110
  from lx_anonymizer.frame_cleaner import FrameCleaner
111
111
  from django.shortcuts import get_object_or_404
112
112
  video = get_object_or_404(VideoFile, pk=video_id)
113
- video_path = Path(video.file.path)
113
+ video_path = Path(video.raw_file.path)
114
114
  if not video_path.exists():
115
115
  raise FileNotFoundError(f"Video file not found: {video_path}")
116
116
  output_dir = video_path.parent / "processed"
117
117
  output_dir.mkdir(exist_ok=True)
118
118
  output_path = output_dir / f"{video_path.stem}_cleaned{video_path.suffix}"
119
119
  use_minicpm = detection_engine == 'minicpm'
120
- cleaner = FrameCleaner(use_minicpm=use_minicpm)
120
+ cleaner = FrameCleaner()
121
121
  return video, video_path, output_path, cleaner
122
122
 
123
123
  def _detect_sensitive_frames(self, cleaner, video_path, selection_method, manual_frames, total_frames):
@@ -257,7 +257,7 @@ def reprocess_video_task(self, video_id: int):
257
257
  self.update_state(state='PROGRESS', meta={'progress': 20, 'message': 'Initializing FrameCleaner...'})
258
258
 
259
259
  # Initialize FrameCleaner with optimal settings
260
- cleaner = FrameCleaner(use_minicpm=True)
260
+ cleaner = FrameCleaner()
261
261
 
262
262
  # Create output path
263
263
  output_dir = video_path.parent / "processed"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: endoreg-db
3
- Version: 0.8.3.1
3
+ Version: 0.8.3.3
4
4
  Summary: EndoReg Db Django App
5
5
  Project-URL: Homepage, https://info.coloreg.de
6
6
  Project-URL: Repository, https://github.com/wg-lux/endoreg-db
@@ -33,7 +33,7 @@ Requires-Dist: huggingface-hub>=0.35.3
33
33
  Requires-Dist: icecream>=2.1.4
34
34
  Requires-Dist: librosa==0.11.0
35
35
  Requires-Dist: llvmlite>=0.44.0
36
- Requires-Dist: lx-anonymizer[llm,ocr]>=0.8.7
36
+ Requires-Dist: lx-anonymizer[llm,ocr]>=0.8.8
37
37
  Requires-Dist: moviepy==2.2.1
38
38
  Requires-Dist: mypy>=1.16.0
39
39
  Requires-Dist: numpy>=2.2.3
@@ -248,6 +248,7 @@ endoreg_db/management/__init__.py,sha256=3dsK9Mizq1veuWTcvSOyWMFT9VI8wtyk-P2K9Ri
248
248
  endoreg_db/management/commands/__init__.py,sha256=Ch0jwQfNpOSr4O5KKMfYJ93dsesk1Afb-JtbRVyFXZs,21
249
249
  endoreg_db/management/commands/anonymize_video.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
250
250
  endoreg_db/management/commands/check_auth.py,sha256=TPiYeCZ5QcqIvR33xhbqXunO2nrcNAmHb_izoMTqgpg,5390
251
+ endoreg_db/management/commands/create_model_meta_from_huggingface.py,sha256=RUuoBjTzdchuMY6qcwBENN7FTyTygPTZQBZYWwhugDc,3925
251
252
  endoreg_db/management/commands/create_multilabel_model_meta.py,sha256=qeoyqcF2CWcnhniVRrlYbmJmwNwyZb-VQ0pjkr6arJU,7566
252
253
  endoreg_db/management/commands/fix_missing_patient_data.py,sha256=5TPUTOQwI2fVh3Zd88o4ne0R8N_V98k0GZsI1gW0kGM,7766
253
254
  endoreg_db/management/commands/fix_video_paths.py,sha256=7LLwc38oX3B_tYWbLJA43Li_KBO3m5Lyw0CF6YqN5rU,7145
@@ -289,6 +290,7 @@ endoreg_db/management/commands/load_unit_data.py,sha256=tcux-iL-ByT2ApgmHEkLllZS
289
290
  endoreg_db/management/commands/load_user_groups.py,sha256=D7SK2FvZEHoE4TIXNGCjDw5_12MH9bpGZvoS7eEv0Os,1031
290
291
  endoreg_db/management/commands/register_ai_model.py,sha256=KixTfuQR6TUfRmzB5GOos16BFOz7NL4TzLzBkgtPPgE,2510
291
292
  endoreg_db/management/commands/reset_celery_schedule.py,sha256=U-m_FNRTw6LAwJoT9RUE4qrhmQXm7AyFToPcHYyJpIE,386
293
+ endoreg_db/management/commands/setup_endoreg_db.py,sha256=_mJkNB2IZNcgDQkOExUTkmmjp9qMwEiZH2KEJcyCi_Y,8635
292
294
  endoreg_db/management/commands/start_filewatcher.py,sha256=3jESBqRiYPa9f35--zd70qQaYnyT0tzRO_b_HJuyteQ,4093
293
295
  endoreg_db/management/commands/storage_management.py,sha256=NpToX59ndwTFNmnSoeppmiPdMvpjSHH7mAdIe4SvUoI,22396
294
296
  endoreg_db/management/commands/summarize_db_content.py,sha256=pOIz3qbY4Ktmh0zV_DKFx971VD0pPx027gCD7a47EL0,10766
@@ -462,7 +464,7 @@ endoreg_db/models/medical/risk/risk_type.py,sha256=kEugcaWSTEWH_Vxq4dcF80Iv1L4_K
462
464
  endoreg_db/models/metadata/__init__.py,sha256=8I6oLj3YTmeaPGJpL0AWG5gLwp38QzrEggxSkTisv7c,474
463
465
  endoreg_db/models/metadata/frame_ocr_result.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
464
466
  endoreg_db/models/metadata/model_meta.py,sha256=F_r-PTLeNi4J-4EaGCQkGIguhdl7Bwba7_i56ZAjc-4,7589
465
- endoreg_db/models/metadata/model_meta_logic.py,sha256=27mqScxUTJXNUVc6CqAs5dXjspEsh0TWPmlxdJVulGc,12015
467
+ endoreg_db/models/metadata/model_meta_logic.py,sha256=6w1YX8hVq40UXbVN1fvDO9OljwekBZaDVHEjVZecoV8,12252
466
468
  endoreg_db/models/metadata/pdf_meta.py,sha256=BTmpSgqxmPKi0apcNjyrZAS4AFKCPXVdBd6VBeyyv6E,3174
467
469
  endoreg_db/models/metadata/sensitive_meta.py,sha256=ekLHrW-b5uYcjfkRd0EW5ncx5ef8Bu-K6msDkpWCAbk,13034
468
470
  endoreg_db/models/metadata/sensitive_meta_logic.py,sha256=Oh7ssZQEPfKGfRMF5nXKJpOIxXx-Xibd3rpOu-bQilk,29988
@@ -600,10 +602,10 @@ endoreg_db/services/pseudonym_service.py,sha256=CJhbtRa6K6SPbphgCZgEMi8AFQtB18CU
600
602
  endoreg_db/services/requirements_object.py,sha256=290zf8AEbVtCoHhW4Jr7_ud-RvrqYmb1Nz9UBHtTnc0,6164
601
603
  endoreg_db/services/segment_sync.py,sha256=YgHvIHkbW4mqCu0ACf3zjRSZnNfxWwt4gh5syUVXuE0,6400
602
604
  endoreg_db/services/storage_aware_video_processor.py,sha256=kKFK64vXLeBSVkp1YJonU3gFDTeXZ8C4qb9QZZB99SE,13420
603
- endoreg_db/services/video_import.py,sha256=Ifl-x1WSlHEcA-Lzf75l_b84g8LqXXUA_OmENZhjv3A,47747
605
+ endoreg_db/services/video_import.py,sha256=gDuVTW5WUYGSc0m5ly67cc10YpnTpBkxO7uOEcRa3Ok,45663
604
606
  endoreg_db/tasks/upload_tasks.py,sha256=OJq7DhNwcbWdXzHY8jz5c51BCVkPN5gSWOz-6Fx6W5M,7799
605
607
  endoreg_db/tasks/video_ingest.py,sha256=kxFuYkHijINV0VabQKCFVpJRv6eCAw07tviONurDgg8,5265
606
- endoreg_db/tasks/video_processing_tasks.py,sha256=KjcERRJ1TZzmavBpvr6OsvSTUViU0PR1ECWnEdzu2Js,14140
608
+ endoreg_db/tasks/video_processing_tasks.py,sha256=rZ7Kr49bAR4Q-vALO2SURebrhcJ5hSFGwjF4aULrOao,14089
607
609
  endoreg_db/templates/timeline.html,sha256=H9VXKOecCzqcWWkpNIZXFI29ztg-oxV5uvxMglgoClk,6167
608
610
  endoreg_db/templates/admin/patient_finding_intervention.html,sha256=F3JUKm3HhWIf_xoZZ-SET5d5ZDlm2jMM8g909w1dnYc,10164
609
611
  endoreg_db/templates/admin/start_examination.html,sha256=3K4wirul9KNyB5mN9cpfCSCAyAD6ro19GwxFOY5sZ3A,267
@@ -784,7 +786,7 @@ endoreg_db/views/video/video_meta.py,sha256=C1wBMTtQb_yzEUrhFGAy2UHEWMk_CbU75WXX
784
786
  endoreg_db/views/video/video_processing_history.py,sha256=mhFuS8RG5GV8E-lTtuD0qrq-bIpnUFp8vy9aERfC-J8,770
785
787
  endoreg_db/views/video/video_remove_frames.py,sha256=2FmvNrSPM0fUXiBxINN6vBUUDCqDlBkNcGR3WsLDgKo,1696
786
788
  endoreg_db/views/video/video_stream.py,sha256=kLyuf0ORTmsLeYUQkTQ6iRYqlIQozWhMMR3Lhfe_trk,12148
787
- endoreg_db-0.8.3.1.dist-info/METADATA,sha256=M6P6tLtoK5aa7AEUO9ZjJxAIe96STW10oY4grrihNYU,14758
788
- endoreg_db-0.8.3.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
789
- endoreg_db-0.8.3.1.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
790
- endoreg_db-0.8.3.1.dist-info/RECORD,,
789
+ endoreg_db-0.8.3.3.dist-info/METADATA,sha256=anKqQ1fidx7S7ca0cWHU1UHEDNI67ujUV-RO4IGgr1g,14758
790
+ endoreg_db-0.8.3.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
791
+ endoreg_db-0.8.3.3.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
792
+ endoreg_db-0.8.3.3.dist-info/RECORD,,