endoreg-db 0.8.6.1__py3-none-any.whl → 0.8.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of endoreg-db might be problematic. Click here for more details.
- endoreg_db/authz/auth.py +74 -0
- endoreg_db/authz/backends.py +168 -0
- endoreg_db/authz/management/commands/list_routes.py +18 -0
- endoreg_db/authz/middleware.py +83 -0
- endoreg_db/authz/permissions.py +127 -0
- endoreg_db/authz/policy.py +218 -0
- endoreg_db/authz/views_auth.py +66 -0
- endoreg_db/config/env.py +13 -8
- endoreg_db/data/__init__.py +8 -31
- endoreg_db/data/_examples/disease.yaml +55 -0
- endoreg_db/data/_examples/disease_classification.yaml +13 -0
- endoreg_db/data/_examples/disease_classification_choice.yaml +62 -0
- endoreg_db/data/_examples/event.yaml +64 -0
- endoreg_db/data/_examples/examination.yaml +72 -0
- endoreg_db/data/_examples/finding/anatomy_colon.yaml +128 -0
- endoreg_db/data/_examples/finding/colonoscopy.yaml +40 -0
- endoreg_db/data/_examples/finding/colonoscopy_bowel_prep.yaml +56 -0
- endoreg_db/data/_examples/finding/complication.yaml +16 -0
- endoreg_db/data/_examples/finding/data.yaml +105 -0
- endoreg_db/data/_examples/finding/examination_setting.yaml +16 -0
- endoreg_db/data/_examples/finding/medication_related.yaml +18 -0
- endoreg_db/data/_examples/finding/outcome.yaml +12 -0
- endoreg_db/data/_examples/finding_classification/colonoscopy_bowel_preparation.yaml +68 -0
- endoreg_db/data/_examples/finding_classification/colonoscopy_jnet.yaml +22 -0
- endoreg_db/data/_examples/finding_classification/colonoscopy_kudo.yaml +25 -0
- endoreg_db/data/_examples/finding_classification/colonoscopy_lesion_circularity.yaml +20 -0
- endoreg_db/data/_examples/finding_classification/colonoscopy_lesion_planarity.yaml +24 -0
- endoreg_db/data/_examples/finding_classification/colonoscopy_lesion_size.yaml +68 -0
- endoreg_db/data/_examples/finding_classification/colonoscopy_lesion_surface.yaml +20 -0
- endoreg_db/data/_examples/finding_classification/colonoscopy_location.yaml +80 -0
- endoreg_db/data/_examples/finding_classification/colonoscopy_lst.yaml +21 -0
- endoreg_db/data/_examples/finding_classification/colonoscopy_nice.yaml +20 -0
- endoreg_db/data/_examples/finding_classification/colonoscopy_paris.yaml +26 -0
- endoreg_db/data/_examples/finding_classification/colonoscopy_sano.yaml +22 -0
- endoreg_db/data/_examples/finding_classification/colonoscopy_summary.yaml +53 -0
- endoreg_db/data/_examples/finding_classification/complication_generic.yaml +25 -0
- endoreg_db/data/_examples/finding_classification/examination_setting_generic.yaml +40 -0
- endoreg_db/data/_examples/finding_classification/histology_colo.yaml +51 -0
- endoreg_db/data/_examples/finding_classification/intervention_required.yaml +26 -0
- endoreg_db/data/_examples/finding_classification/medication_related.yaml +23 -0
- endoreg_db/data/_examples/finding_classification/visualized.yaml +33 -0
- endoreg_db/data/_examples/finding_classification_choice/bowel_preparation.yaml +78 -0
- endoreg_db/data/_examples/finding_classification_choice/colon_lesion_circularity_default.yaml +32 -0
- endoreg_db/data/_examples/finding_classification_choice/colon_lesion_jnet.yaml +15 -0
- endoreg_db/data/_examples/finding_classification_choice/colon_lesion_kudo.yaml +23 -0
- endoreg_db/data/_examples/finding_classification_choice/colon_lesion_lst.yaml +15 -0
- endoreg_db/data/_examples/finding_classification_choice/colon_lesion_nice.yaml +17 -0
- endoreg_db/data/_examples/finding_classification_choice/colon_lesion_paris.yaml +57 -0
- endoreg_db/data/_examples/finding_classification_choice/colon_lesion_planarity_default.yaml +49 -0
- endoreg_db/data/_examples/finding_classification_choice/colon_lesion_sano.yaml +14 -0
- endoreg_db/data/_examples/finding_classification_choice/colon_lesion_surface_intact_default.yaml +36 -0
- endoreg_db/data/_examples/finding_classification_choice/colonoscopy_location.yaml +229 -0
- endoreg_db/data/_examples/finding_classification_choice/colonoscopy_not_complete_reason.yaml +19 -0
- endoreg_db/data/_examples/finding_classification_choice/colonoscopy_size.yaml +82 -0
- endoreg_db/data/_examples/finding_classification_choice/colonoscopy_summary_worst_finding.yaml +15 -0
- endoreg_db/data/_examples/finding_classification_choice/complication_generic_types.yaml +15 -0
- endoreg_db/data/_examples/finding_classification_choice/examination_setting_generic_types.yaml +15 -0
- endoreg_db/data/_examples/finding_classification_choice/histology.yaml +24 -0
- endoreg_db/data/_examples/finding_classification_choice/histology_polyp.yaml +20 -0
- endoreg_db/data/_examples/finding_classification_choice/outcome.yaml +19 -0
- endoreg_db/data/_examples/finding_classification_choice/yes_no_na.yaml +11 -0
- endoreg_db/data/_examples/finding_classification_type/colonoscopy_basic.yaml +48 -0
- endoreg_db/data/_examples/finding_intervention/endoscopy.yaml +43 -0
- endoreg_db/data/_examples/finding_intervention/endoscopy_colonoscopy.yaml +168 -0
- endoreg_db/data/_examples/finding_intervention/endoscopy_egd.yaml +128 -0
- endoreg_db/data/_examples/finding_intervention/endoscopy_ercp.yaml +32 -0
- endoreg_db/data/_examples/finding_intervention/endoscopy_eus_lower.yaml +9 -0
- endoreg_db/data/_examples/finding_intervention/endoscopy_eus_upper.yaml +36 -0
- endoreg_db/data/_examples/finding_intervention_type/endoscopy.yaml +15 -0
- endoreg_db/data/_examples/finding_type/data.yaml +43 -0
- endoreg_db/data/_examples/requirement/age.yaml +26 -0
- endoreg_db/data/_examples/requirement/colonoscopy_baseline_austria.yaml +45 -0
- endoreg_db/data/_examples/requirement/disease_cardiovascular.yaml +79 -0
- endoreg_db/data/_examples/requirement/disease_classification_choice_cardiovascular.yaml +41 -0
- endoreg_db/data/_examples/requirement/disease_hepatology.yaml +12 -0
- endoreg_db/data/_examples/requirement/disease_misc.yaml +12 -0
- endoreg_db/data/_examples/requirement/disease_renal.yaml +96 -0
- endoreg_db/data/_examples/requirement/endoscopy_bleeding_risk.yaml +59 -0
- endoreg_db/data/_examples/requirement/event_cardiology.yaml +251 -0
- endoreg_db/data/_examples/requirement/event_requirements.yaml +145 -0
- endoreg_db/data/_examples/requirement/finding_colon_polyp.yaml +50 -0
- endoreg_db/data/_examples/requirement/gender.yaml +25 -0
- endoreg_db/data/_examples/requirement/lab_value.yaml +441 -0
- endoreg_db/data/_examples/requirement/medication.yaml +93 -0
- endoreg_db/data/_examples/requirement_operator/age.yaml +13 -0
- endoreg_db/data/_examples/requirement_operator/lab_operators.yaml +129 -0
- endoreg_db/data/_examples/requirement_operator/model_operators.yaml +96 -0
- endoreg_db/data/_examples/requirement_set/01_endoscopy_generic.yaml +48 -0
- endoreg_db/data/_examples/requirement_set/colonoscopy_austria_screening.yaml +57 -0
- endoreg_db/data/_examples/yaml_examples.xlsx +0 -0
- endoreg_db/data/ai_model_meta/default_multilabel_classification.yaml +4 -3
- endoreg_db/data/event_classification/data.yaml +4 -0
- endoreg_db/data/event_classification_choice/data.yaml +9 -0
- endoreg_db/data/finding_classification/colonoscopy_bowel_preparation.yaml +43 -70
- endoreg_db/data/finding_classification/colonoscopy_lesion_size.yaml +22 -52
- endoreg_db/data/finding_classification/colonoscopy_location.yaml +31 -62
- endoreg_db/data/finding_classification/histology_colo.yaml +28 -36
- endoreg_db/data/requirement/colon_polyp_intervention.yaml +49 -0
- endoreg_db/data/requirement/coloreg_colon_polyp.yaml +49 -0
- endoreg_db/data/requirement_set/01_endoscopy_generic.yaml +31 -12
- endoreg_db/data/requirement_set/01_laboratory.yaml +13 -0
- endoreg_db/data/requirement_set/02_endoscopy_bleeding_risk.yaml +46 -0
- endoreg_db/data/requirement_set/90_coloreg.yaml +178 -0
- endoreg_db/data/requirement_set/_old_ +109 -0
- endoreg_db/data/requirement_set_type/data.yaml +21 -0
- endoreg_db/data/setup_config.yaml +4 -4
- endoreg_db/data/tag/requirement_set_tags.yaml +21 -0
- endoreg_db/exceptions.py +5 -2
- endoreg_db/helpers/data_loader.py +1 -1
- endoreg_db/management/commands/create_model_meta_from_huggingface.py +21 -10
- endoreg_db/management/commands/create_multilabel_model_meta.py +299 -129
- endoreg_db/management/commands/import_video.py +9 -10
- endoreg_db/management/commands/import_video_with_classification.py +1 -1
- endoreg_db/management/commands/init_default_ai_model.py +1 -1
- endoreg_db/management/commands/list_routes.py +18 -0
- endoreg_db/management/commands/load_center_data.py +12 -12
- endoreg_db/management/commands/load_requirement_data.py +60 -31
- endoreg_db/management/commands/load_requirement_set_tags.py +95 -0
- endoreg_db/management/commands/setup_endoreg_db.py +3 -3
- endoreg_db/management/commands/storage_management.py +271 -203
- endoreg_db/migrations/0001_initial.py +1799 -1300
- endoreg_db/migrations/0002_requirementset_depends_on.py +18 -0
- endoreg_db/migrations/_old/0001_initial.py +1857 -0
- endoreg_db/migrations/_old/0004_employee_city_employee_post_code_employee_street_and_more.py +68 -0
- endoreg_db/migrations/_old/0004_remove_casetemplate_rules_and_more.py +77 -0
- endoreg_db/migrations/_old/0005_merge_20251111_1003.py +14 -0
- endoreg_db/migrations/_old/0006_sensitivemeta_anonymized_text_and_more.py +68 -0
- endoreg_db/migrations/_old/0007_remove_rule_attribute_dtype_remove_rule_rule_type_and_more.py +89 -0
- endoreg_db/migrations/_old/0008_remove_event_event_classification_and_more.py +27 -0
- endoreg_db/migrations/_old/0009_alter_modelmeta_options_and_more.py +21 -0
- endoreg_db/models/__init__.py +78 -123
- endoreg_db/models/administration/__init__.py +21 -42
- endoreg_db/models/administration/ai/active_model.py +2 -2
- endoreg_db/models/administration/ai/ai_model.py +7 -6
- endoreg_db/models/administration/case/__init__.py +1 -15
- endoreg_db/models/administration/case/case.py +3 -3
- endoreg_db/models/administration/case/case_template/__init__.py +2 -14
- endoreg_db/models/administration/case/case_template/case_template.py +2 -124
- endoreg_db/models/administration/case/case_template/case_template_rule.py +2 -268
- endoreg_db/models/administration/case/case_template/case_template_rule_value.py +2 -85
- endoreg_db/models/administration/case/case_template/case_template_type.py +2 -25
- endoreg_db/models/administration/center/center.py +33 -19
- endoreg_db/models/administration/center/center_product.py +12 -9
- endoreg_db/models/administration/center/center_resource.py +25 -19
- endoreg_db/models/administration/center/center_shift.py +21 -17
- endoreg_db/models/administration/center/center_waste.py +16 -8
- endoreg_db/models/administration/person/__init__.py +2 -0
- endoreg_db/models/administration/person/employee/employee.py +10 -5
- endoreg_db/models/administration/person/employee/employee_qualification.py +9 -4
- endoreg_db/models/administration/person/employee/employee_type.py +12 -6
- endoreg_db/models/administration/person/examiner/examiner.py +13 -11
- endoreg_db/models/administration/person/patient/__init__.py +2 -0
- endoreg_db/models/administration/person/patient/patient.py +103 -100
- endoreg_db/models/administration/person/patient/patient_external_id.py +37 -0
- endoreg_db/models/administration/person/person.py +4 -0
- endoreg_db/models/administration/person/profession/__init__.py +8 -4
- endoreg_db/models/administration/person/user/portal_user_information.py +11 -7
- endoreg_db/models/administration/product/product.py +20 -15
- endoreg_db/models/administration/product/product_material.py +17 -18
- endoreg_db/models/administration/product/product_weight.py +12 -8
- endoreg_db/models/administration/product/reference_product.py +23 -55
- endoreg_db/models/administration/qualification/qualification.py +7 -3
- endoreg_db/models/administration/qualification/qualification_type.py +7 -3
- endoreg_db/models/administration/shift/scheduled_days.py +8 -5
- endoreg_db/models/administration/shift/shift.py +16 -12
- endoreg_db/models/administration/shift/shift_type.py +23 -31
- endoreg_db/models/label/__init__.py +7 -8
- endoreg_db/models/label/annotation/image_classification.py +10 -9
- endoreg_db/models/label/annotation/video_segmentation_annotation.py +8 -5
- endoreg_db/models/label/label.py +15 -15
- endoreg_db/models/label/label_set.py +19 -6
- endoreg_db/models/label/label_type.py +1 -1
- endoreg_db/models/label/label_video_segment/_create_from_video.py +5 -8
- endoreg_db/models/label/label_video_segment/label_video_segment.py +76 -102
- endoreg_db/models/label/video_segmentation_label.py +4 -0
- endoreg_db/models/label/video_segmentation_labelset.py +4 -3
- endoreg_db/models/media/frame/frame.py +22 -22
- endoreg_db/models/media/pdf/raw_pdf.py +110 -182
- endoreg_db/models/media/pdf/report_file.py +25 -29
- endoreg_db/models/media/pdf/report_reader/report_reader_config.py +30 -46
- endoreg_db/models/media/pdf/report_reader/report_reader_flag.py +23 -7
- endoreg_db/models/media/video/__init__.py +1 -0
- endoreg_db/models/media/video/create_from_file.py +48 -56
- endoreg_db/models/media/video/pipe_2.py +8 -9
- endoreg_db/models/media/video/video_file.py +150 -108
- endoreg_db/models/media/video/video_file_ai.py +288 -74
- endoreg_db/models/media/video/video_file_anonymize.py +38 -38
- endoreg_db/models/media/video/video_file_frames/__init__.py +3 -1
- endoreg_db/models/media/video/video_file_frames/_bulk_create_frames.py +6 -8
- endoreg_db/models/media/video/video_file_frames/_create_frame_object.py +7 -9
- endoreg_db/models/media/video/video_file_frames/_delete_frames.py +9 -8
- endoreg_db/models/media/video/video_file_frames/_extract_frames.py +38 -45
- endoreg_db/models/media/video/video_file_frames/_get_frame.py +6 -8
- endoreg_db/models/media/video/video_file_frames/_get_frame_number.py +4 -18
- endoreg_db/models/media/video/video_file_frames/_get_frame_path.py +4 -3
- endoreg_db/models/media/video/video_file_frames/_get_frame_paths.py +7 -6
- endoreg_db/models/media/video/video_file_frames/_get_frame_range.py +6 -8
- endoreg_db/models/media/video/video_file_frames/_get_frames.py +6 -8
- endoreg_db/models/media/video/video_file_frames/_initialize_frames.py +15 -25
- endoreg_db/models/media/video/video_file_frames/_manage_frame_range.py +26 -23
- endoreg_db/models/media/video/video_file_frames/_mark_frames_extracted_status.py +23 -14
- endoreg_db/models/media/video/video_file_io.py +109 -62
- endoreg_db/models/media/video/video_file_meta/get_crop_template.py +3 -3
- endoreg_db/models/media/video/video_file_meta/get_endo_roi.py +5 -3
- endoreg_db/models/media/video/video_file_meta/get_fps.py +37 -34
- endoreg_db/models/media/video/video_file_meta/initialize_video_specs.py +19 -25
- endoreg_db/models/media/video/video_file_meta/text_meta.py +41 -38
- endoreg_db/models/media/video/video_file_meta/video_meta.py +14 -7
- endoreg_db/models/media/video/video_file_segments.py +24 -17
- endoreg_db/models/media/video/video_metadata.py +19 -35
- endoreg_db/models/media/video/video_processing.py +96 -95
- endoreg_db/models/medical/contraindication/__init__.py +13 -3
- endoreg_db/models/medical/disease.py +22 -16
- endoreg_db/models/medical/event.py +31 -18
- endoreg_db/models/medical/examination/__init__.py +13 -6
- endoreg_db/models/medical/examination/examination.py +17 -18
- endoreg_db/models/medical/examination/examination_indication.py +26 -25
- endoreg_db/models/medical/examination/examination_time.py +16 -6
- endoreg_db/models/medical/examination/examination_time_type.py +9 -6
- endoreg_db/models/medical/examination/examination_type.py +3 -4
- endoreg_db/models/medical/finding/finding.py +38 -39
- endoreg_db/models/medical/finding/finding_classification.py +37 -48
- endoreg_db/models/medical/finding/finding_intervention.py +27 -22
- endoreg_db/models/medical/finding/finding_type.py +13 -12
- endoreg_db/models/medical/hardware/endoscope.py +20 -26
- endoreg_db/models/medical/hardware/endoscopy_processor.py +2 -2
- endoreg_db/models/medical/laboratory/lab_value.py +62 -91
- endoreg_db/models/medical/medication/medication.py +22 -10
- endoreg_db/models/medical/medication/medication_indication.py +29 -3
- endoreg_db/models/medical/medication/medication_indication_type.py +25 -14
- endoreg_db/models/medical/medication/medication_intake_time.py +31 -19
- endoreg_db/models/medical/medication/medication_schedule.py +27 -16
- endoreg_db/models/medical/organ/__init__.py +15 -12
- endoreg_db/models/medical/patient/medication_examples.py +1 -5
- endoreg_db/models/medical/patient/patient_disease.py +20 -23
- endoreg_db/models/medical/patient/patient_event.py +19 -22
- endoreg_db/models/medical/patient/patient_examination.py +48 -54
- endoreg_db/models/medical/patient/patient_examination_indication.py +16 -14
- endoreg_db/models/medical/patient/patient_finding.py +122 -139
- endoreg_db/models/medical/patient/patient_finding_classification.py +44 -49
- endoreg_db/models/medical/patient/patient_finding_intervention.py +8 -19
- endoreg_db/models/medical/patient/patient_lab_sample.py +28 -23
- endoreg_db/models/medical/patient/patient_lab_value.py +82 -89
- endoreg_db/models/medical/patient/patient_medication.py +27 -38
- endoreg_db/models/medical/patient/patient_medication_schedule.py +28 -36
- endoreg_db/models/medical/risk/risk.py +7 -6
- endoreg_db/models/medical/risk/risk_type.py +8 -5
- endoreg_db/models/metadata/model_meta.py +60 -29
- endoreg_db/models/metadata/model_meta_logic.py +125 -18
- endoreg_db/models/metadata/pdf_meta.py +19 -24
- endoreg_db/models/metadata/sensitive_meta.py +102 -85
- endoreg_db/models/metadata/sensitive_meta_logic.py +192 -173
- endoreg_db/models/metadata/video_meta.py +51 -31
- endoreg_db/models/metadata/video_prediction_logic.py +16 -23
- endoreg_db/models/metadata/video_prediction_meta.py +29 -33
- endoreg_db/models/other/distribution/date_value_distribution.py +89 -29
- endoreg_db/models/other/distribution/multiple_categorical_value_distribution.py +21 -5
- endoreg_db/models/other/distribution/numeric_value_distribution.py +114 -53
- endoreg_db/models/other/distribution/single_categorical_value_distribution.py +4 -3
- endoreg_db/models/other/emission/emission_factor.py +18 -8
- endoreg_db/models/other/gender.py +10 -5
- endoreg_db/models/other/information_source.py +25 -25
- endoreg_db/models/other/material.py +9 -5
- endoreg_db/models/other/resource.py +6 -4
- endoreg_db/models/other/tag.py +10 -5
- endoreg_db/models/other/transport_route.py +13 -8
- endoreg_db/models/other/unit.py +10 -6
- endoreg_db/models/other/waste.py +6 -5
- endoreg_db/models/requirement/requirement.py +580 -272
- endoreg_db/models/requirement/requirement_error.py +85 -0
- endoreg_db/models/requirement/requirement_evaluation/evaluate_with_dependencies.py +268 -0
- endoreg_db/models/requirement/requirement_evaluation/operator_evaluation_models.py +3 -6
- endoreg_db/models/requirement/requirement_evaluation/requirement_type_parser.py +90 -64
- endoreg_db/models/requirement/requirement_operator.py +36 -33
- endoreg_db/models/requirement/requirement_set.py +74 -57
- endoreg_db/models/state/__init__.py +4 -4
- endoreg_db/models/state/abstract.py +2 -2
- endoreg_db/models/state/anonymization.py +12 -0
- endoreg_db/models/state/audit_ledger.py +46 -47
- endoreg_db/models/state/label_video_segment.py +9 -0
- endoreg_db/models/state/raw_pdf.py +40 -46
- endoreg_db/models/state/sensitive_meta.py +6 -2
- endoreg_db/models/state/video.py +58 -53
- endoreg_db/models/upload_job.py +32 -55
- endoreg_db/models/utils.py +1 -2
- endoreg_db/root_urls.py +21 -2
- endoreg_db/serializers/__init__.py +0 -2
- endoreg_db/serializers/anonymization.py +18 -10
- endoreg_db/serializers/meta/report_meta.py +1 -1
- endoreg_db/serializers/meta/sensitive_meta_detail.py +63 -118
- endoreg_db/serializers/misc/file_overview.py +11 -99
- endoreg_db/serializers/requirements/requirement_sets.py +92 -22
- endoreg_db/serializers/video/segmentation.py +2 -1
- endoreg_db/serializers/video/video_processing_history.py +20 -5
- endoreg_db/services/anonymization.py +75 -73
- endoreg_db/services/lookup_service.py +37 -24
- endoreg_db/services/pdf_import.py +166 -68
- endoreg_db/services/storage_aware_video_processor.py +140 -114
- endoreg_db/services/video_import.py +193 -283
- endoreg_db/urls/__init__.py +7 -20
- endoreg_db/urls/media.py +108 -67
- endoreg_db/urls/root_urls.py +29 -0
- endoreg_db/utils/__init__.py +15 -5
- endoreg_db/utils/ai/multilabel_classification_net.py +116 -20
- endoreg_db/utils/case_generator/__init__.py +3 -0
- endoreg_db/utils/dataloader.py +88 -16
- endoreg_db/utils/defaults/set_default_center.py +32 -0
- endoreg_db/utils/names.py +22 -16
- endoreg_db/utils/permissions.py +2 -1
- endoreg_db/utils/pipelines/process_video_dir.py +1 -1
- endoreg_db/utils/requirement_operator_logic/model_evaluators.py +414 -127
- endoreg_db/utils/setup_config.py +8 -5
- endoreg_db/utils/storage.py +115 -0
- endoreg_db/utils/validate_endo_roi.py +8 -2
- endoreg_db/utils/video/ffmpeg_wrapper.py +184 -188
- endoreg_db/views/__init__.py +0 -10
- endoreg_db/views/anonymization/media_management.py +198 -163
- endoreg_db/views/anonymization/overview.py +4 -1
- endoreg_db/views/anonymization/validate.py +174 -40
- endoreg_db/views/media/__init__.py +2 -0
- endoreg_db/views/media/pdf_media.py +131 -152
- endoreg_db/views/media/sensitive_metadata.py +46 -6
- endoreg_db/views/media/video_media.py +89 -82
- endoreg_db/views/media/video_segments.py +2 -3
- endoreg_db/views/meta/sensitive_meta_detail.py +0 -63
- endoreg_db/views/patient/patient.py +5 -4
- endoreg_db/views/pdf/pdf_stream.py +20 -21
- endoreg_db/views/pdf/reimport.py +11 -32
- endoreg_db/views/requirement/evaluate.py +188 -187
- endoreg_db/views/requirement/lookup.py +17 -3
- endoreg_db/views/requirement/requirement_utils.py +89 -0
- endoreg_db/views/video/__init__.py +0 -2
- endoreg_db/views/video/correction.py +2 -2
- {endoreg_db-0.8.6.1.dist-info → endoreg_db-0.8.8.0.dist-info}/METADATA +7 -3
- {endoreg_db-0.8.6.1.dist-info → endoreg_db-0.8.8.0.dist-info}/RECORD +341 -245
- endoreg_db/models/administration/permissions/__init__.py +0 -44
- endoreg_db/models/media/video/video_file_frames.py +0 -0
- endoreg_db/models/metadata/frame_ocr_result.py +0 -0
- endoreg_db/models/rule/__init__.py +0 -13
- endoreg_db/models/rule/rule.py +0 -27
- endoreg_db/models/rule/rule_applicator.py +0 -224
- endoreg_db/models/rule/rule_attribute_dtype.py +0 -17
- endoreg_db/models/rule/rule_type.py +0 -20
- endoreg_db/models/rule/ruleset.py +0 -17
- endoreg_db/serializers/video/video_metadata.py +0 -105
- endoreg_db/urls/report.py +0 -48
- endoreg_db/urls/video.py +0 -61
- endoreg_db/utils/case_generator/case_generator.py +0 -159
- endoreg_db/utils/case_generator/utils.py +0 -30
- endoreg_db/views/report/__init__.py +0 -9
- endoreg_db/views/report/report_list.py +0 -112
- endoreg_db/views/report/report_with_secure_url.py +0 -28
- endoreg_db/views/report/start_examination.py +0 -7
- endoreg_db/views.py +0 -0
- /endoreg_db/data/{requirement_set → _examples/requirement_set}/endoscopy_bleeding_risk.yaml +0 -0
- /endoreg_db/migrations/{0002_add_video_correction_models.py → _old/0002_add_video_correction_models.py} +0 -0
- /endoreg_db/migrations/{0003_add_center_display_name.py → _old/0003_add_center_display_name.py} +0 -0
- /endoreg_db/{models/media/video/refactor_plan.md → views/pdf/pdf_stream_views.py} +0 -0
- {endoreg_db-0.8.6.1.dist-info → endoreg_db-0.8.8.0.dist-info}/WHEEL +0 -0
- {endoreg_db-0.8.6.1.dist-info → endoreg_db-0.8.8.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,21 +1,173 @@
|
|
|
1
1
|
import logging
|
|
2
|
+
from collections import Counter, defaultdict
|
|
2
3
|
from pathlib import Path
|
|
3
|
-
from typing import TYPE_CHECKING,
|
|
4
|
-
|
|
4
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
|
|
5
|
+
|
|
6
|
+
import numpy as np
|
|
7
|
+
from safetensors import safe_open
|
|
5
8
|
|
|
6
|
-
from ...utils import TEST_RUN as GLOBAL_TEST_RUN, TEST_RUN_FRAME_NUMBER as GLOBAL_N_TEST_FRAMES
|
|
7
9
|
from ...metadata import ModelMeta, VideoPredictionMeta
|
|
10
|
+
from ...utils import TEST_RUN as GLOBAL_TEST_RUN
|
|
11
|
+
from ...utils import TEST_RUN_FRAME_NUMBER as GLOBAL_N_TEST_FRAMES
|
|
8
12
|
|
|
9
13
|
if TYPE_CHECKING:
|
|
10
|
-
from .video_file import VideoFile
|
|
11
14
|
from ...medical.hardware import EndoscopyProcessor
|
|
15
|
+
from .video_file import VideoFile
|
|
12
16
|
|
|
13
17
|
logger = logging.getLogger(__name__)
|
|
14
18
|
|
|
15
19
|
|
|
16
|
-
def
|
|
17
|
-
|
|
18
|
-
|
|
20
|
+
def _is_stub_weights_file(weights_path: Path) -> bool:
|
|
21
|
+
"""Return True if the provided weights file is a known test stub."""
|
|
22
|
+
|
|
23
|
+
name_hint = weights_path.name.lower()
|
|
24
|
+
if "stub" in name_hint:
|
|
25
|
+
return True
|
|
26
|
+
|
|
27
|
+
try:
|
|
28
|
+
size_bytes = weights_path.stat().st_size
|
|
29
|
+
except OSError:
|
|
30
|
+
return False
|
|
31
|
+
|
|
32
|
+
if size_bytes < 4096:
|
|
33
|
+
try:
|
|
34
|
+
with weights_path.open("rb") as fh:
|
|
35
|
+
header = fh.read(32)
|
|
36
|
+
except OSError:
|
|
37
|
+
return False
|
|
38
|
+
return header.startswith(b"stub-weights") or not header
|
|
39
|
+
|
|
40
|
+
return False
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def _resolve_label_names(model_meta: "ModelMeta") -> List[str]:
|
|
44
|
+
"""Return deterministic label ordering for the associated label set."""
|
|
45
|
+
|
|
46
|
+
labelset = model_meta.labelset
|
|
47
|
+
if not labelset:
|
|
48
|
+
return []
|
|
49
|
+
|
|
50
|
+
try:
|
|
51
|
+
return [label.name for label in labelset.get_labels_in_order()]
|
|
52
|
+
except AttributeError:
|
|
53
|
+
# Fallback in case legacy labelsets provide only the raw manager interface.
|
|
54
|
+
return [label.name for label in labelset.labels.all().order_by("name")]
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _infer_model_type(model_meta: "ModelMeta", weights_path: Path) -> str:
|
|
58
|
+
"""Best-effort detection of the backbone expected by the safetensors weights."""
|
|
59
|
+
|
|
60
|
+
candidates: List[Any] = [
|
|
61
|
+
getattr(model_meta.model, "model_subtype", None) if model_meta.model else None,
|
|
62
|
+
getattr(model_meta.model, "name", None) if model_meta.model else None,
|
|
63
|
+
model_meta.name,
|
|
64
|
+
model_meta.description,
|
|
65
|
+
weights_path.stem,
|
|
66
|
+
]
|
|
67
|
+
|
|
68
|
+
for value in candidates:
|
|
69
|
+
if not value:
|
|
70
|
+
continue
|
|
71
|
+
text = str(value).lower()
|
|
72
|
+
if "regnet" in text:
|
|
73
|
+
return "RegNetX800MF"
|
|
74
|
+
if "efficientnet" in text and "b4" in text:
|
|
75
|
+
return "EfficientNetB4"
|
|
76
|
+
if "efficientnet" in text:
|
|
77
|
+
return "EfficientNetB4"
|
|
78
|
+
|
|
79
|
+
logger.warning(
|
|
80
|
+
"Unable to infer model backbone for %s; defaulting to EfficientNetB4.",
|
|
81
|
+
weights_path,
|
|
82
|
+
)
|
|
83
|
+
return "EfficientNetB4"
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
LEGACY_CLASS_LABELS = [
|
|
87
|
+
"appendix",
|
|
88
|
+
"blood",
|
|
89
|
+
"diverticule",
|
|
90
|
+
"grasper",
|
|
91
|
+
"ileocaecalvalve",
|
|
92
|
+
"ileum",
|
|
93
|
+
"low_quality",
|
|
94
|
+
"nbi",
|
|
95
|
+
"needle",
|
|
96
|
+
"outside",
|
|
97
|
+
"polyp",
|
|
98
|
+
"snare",
|
|
99
|
+
"water_jet",
|
|
100
|
+
"wound",
|
|
101
|
+
]
|
|
102
|
+
|
|
103
|
+
LEGACY_LABEL_ALIASES = {
|
|
104
|
+
"nbi": "digital_chromo_endoscopy",
|
|
105
|
+
"grasper": "instrument",
|
|
106
|
+
"needle": "instrument",
|
|
107
|
+
"snare": "instrument",
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
LEGACY_IGNORED_LABELS = {"diverticule"}
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def _infer_output_classes(weights_path: Path) -> Optional[int]:
|
|
114
|
+
if weights_path.suffix.lower() != ".safetensors":
|
|
115
|
+
return None
|
|
116
|
+
|
|
117
|
+
try:
|
|
118
|
+
with safe_open(weights_path, framework="pt", device="cpu") as handle:
|
|
119
|
+
return int(handle.get_tensor("model.fc.weight").shape[0])
|
|
120
|
+
except Exception as exc: # pragma: no cover - defensive logging only
|
|
121
|
+
logger.debug("Unable to infer output classes from %s: %s", weights_path, exc)
|
|
122
|
+
return None
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def _build_label_mapping(source_labels: List[str], target_labels: List[str]) -> Dict[str, List[str]]:
|
|
126
|
+
if source_labels == target_labels:
|
|
127
|
+
return {label: [label] for label in target_labels}
|
|
128
|
+
|
|
129
|
+
mapping: Dict[str, List[str]] = {label: [] for label in target_labels}
|
|
130
|
+
|
|
131
|
+
for label in source_labels:
|
|
132
|
+
alias = LEGACY_LABEL_ALIASES.get(label, label)
|
|
133
|
+
if alias in mapping:
|
|
134
|
+
mapping[alias].append(label)
|
|
135
|
+
elif label not in LEGACY_IGNORED_LABELS:
|
|
136
|
+
logger.debug("Label '%s' from source set has no mapping; dropping.", label)
|
|
137
|
+
|
|
138
|
+
for label in target_labels:
|
|
139
|
+
if not mapping[label]:
|
|
140
|
+
mapping[label] = [label]
|
|
141
|
+
|
|
142
|
+
return mapping
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def _remap_prediction_dict(predictions: Dict[str, Any], mapping: Dict[str, List[str]]) -> Dict[str, Any]:
|
|
146
|
+
remapped: Dict[str, Any] = {}
|
|
147
|
+
for target, sources in mapping.items():
|
|
148
|
+
values: List[Any] = []
|
|
149
|
+
for source in sources:
|
|
150
|
+
value = predictions.get(source)
|
|
151
|
+
if value is not None:
|
|
152
|
+
values.append(value)
|
|
153
|
+
if not values:
|
|
154
|
+
remapped[target] = 0.0
|
|
155
|
+
continue
|
|
156
|
+
|
|
157
|
+
first = values[0]
|
|
158
|
+
if isinstance(first, np.ndarray):
|
|
159
|
+
stacked = np.stack(values, axis=0)
|
|
160
|
+
remapped[target] = stacked.max(axis=0)
|
|
161
|
+
elif hasattr(first, "__iter__") and not isinstance(first, (float, int)):
|
|
162
|
+
stacked = np.stack([np.asarray(v) for v in values], axis=0)
|
|
163
|
+
remapped[target] = stacked.max(axis=0)
|
|
164
|
+
else:
|
|
165
|
+
remapped[target] = max(float(v) for v in values)
|
|
166
|
+
|
|
167
|
+
return remapped
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def _extract_text_from_video_frames(video: "VideoFile", frame_fraction: float = 0.001, cap: int = 15) -> Optional[Dict[str, str]]:
|
|
19
171
|
"""
|
|
20
172
|
Extracts text from a sample of video frames using OCR based on processor ROIs.
|
|
21
173
|
Requires frames to be extracted. Raises ValueError on pre-condition failure.
|
|
@@ -29,7 +181,7 @@ def _extract_text_from_video_frames(
|
|
|
29
181
|
extract_text_from_rois,
|
|
30
182
|
) # Local import for dependency isolation
|
|
31
183
|
|
|
32
|
-
state = video.get_or_create_state()
|
|
184
|
+
state = video.get_or_create_state() # Use State helper
|
|
33
185
|
# --- Pre-condition Check ---
|
|
34
186
|
if not state.frames_extracted:
|
|
35
187
|
# Raise exception
|
|
@@ -42,7 +194,7 @@ def _extract_text_from_video_frames(
|
|
|
42
194
|
raise ValueError(f"Processor not set for video {video.uuid}. Cannot extract text.")
|
|
43
195
|
|
|
44
196
|
try:
|
|
45
|
-
frame_paths = video.get_frame_paths()
|
|
197
|
+
frame_paths = video.get_frame_paths() # Use Frame helper
|
|
46
198
|
except Exception as e:
|
|
47
199
|
logger.error("Error getting frame paths for video %s: %s", video.uuid, e, exc_info=True)
|
|
48
200
|
raise RuntimeError(f"Could not get frame paths for video {video.uuid}") from e
|
|
@@ -50,7 +202,7 @@ def _extract_text_from_video_frames(
|
|
|
50
202
|
n_frames = len(frame_paths)
|
|
51
203
|
if n_frames == 0:
|
|
52
204
|
logger.warning("No frame paths found for video %s during text extraction.", video.uuid)
|
|
53
|
-
return None
|
|
205
|
+
return None # Return None if no frames, not an error condition for this function
|
|
54
206
|
|
|
55
207
|
# Determine number of frames to process
|
|
56
208
|
n_frames_to_process = max(1, int(frame_fraction * n_frames))
|
|
@@ -78,10 +230,8 @@ def _extract_text_from_video_frames(
|
|
|
78
230
|
rois_texts[roi].append(text)
|
|
79
231
|
except Exception as e:
|
|
80
232
|
# Log error but continue processing other frames
|
|
81
|
-
logger.error(
|
|
82
|
-
|
|
83
|
-
)
|
|
84
|
-
errors_encountered = True # Flag that an error occurred
|
|
233
|
+
logger.error("Error extracting text from frame %s for video %s: %s", frame_path, video.uuid, e, exc_info=True)
|
|
234
|
+
errors_encountered = True # Flag that an error occurred
|
|
85
235
|
|
|
86
236
|
# Determine the most frequent text for each ROI
|
|
87
237
|
most_frequent_texts = {}
|
|
@@ -97,17 +247,15 @@ def _extract_text_from_video_frames(
|
|
|
97
247
|
else:
|
|
98
248
|
most_frequent_texts[roi] = None
|
|
99
249
|
except Exception as e:
|
|
100
|
-
logger.error(
|
|
101
|
-
"Error finding most common text for ROI %s: %s", roi, e, exc_info=True
|
|
102
|
-
)
|
|
250
|
+
logger.error("Error finding most common text for ROI %s: %s", roi, e, exc_info=True)
|
|
103
251
|
most_frequent_texts[roi] = None
|
|
104
252
|
|
|
105
253
|
if errors_encountered:
|
|
106
254
|
logger.warning("Errors occurred during text extraction for some frames of video %s. Results may be incomplete.", video.uuid)
|
|
107
255
|
|
|
108
256
|
if not most_frequent_texts:
|
|
109
|
-
|
|
110
|
-
|
|
257
|
+
logger.info("No text extracted for any ROI for video %s.", video.uuid)
|
|
258
|
+
return None # Return None if no text found
|
|
111
259
|
|
|
112
260
|
logger.info("Extracted text for video %s: %s", video.uuid, most_frequent_texts)
|
|
113
261
|
return most_frequent_texts
|
|
@@ -121,7 +269,7 @@ def _predict_video_pipeline(
|
|
|
121
269
|
binarize_threshold: float = 0.5,
|
|
122
270
|
test_run: bool = False,
|
|
123
271
|
n_test_frames: int = 10,
|
|
124
|
-
) -> Dict[str, List[Tuple[int, int]]]:
|
|
272
|
+
) -> Dict[str, List[Tuple[int, int]]]: # Changed return type to non-optional
|
|
125
273
|
"""
|
|
126
274
|
Executes the video prediction pipeline using an AI model.
|
|
127
275
|
Requires frames to be extracted. Raises exceptions on failure.
|
|
@@ -132,13 +280,13 @@ def _predict_video_pipeline(
|
|
|
132
280
|
"""
|
|
133
281
|
# Import heavy dependencies locally
|
|
134
282
|
from ...administration.ai import AiModel
|
|
135
|
-
try:
|
|
136
|
-
from ....utils.ai import InferenceDataset, MultiLabelClassificationNet, Classifier
|
|
137
283
|
|
|
284
|
+
try:
|
|
285
|
+
from ....utils.ai import Classifier, InferenceDataset, MultiLabelClassificationNet
|
|
138
286
|
from ....utils.ai.postprocess import (
|
|
139
287
|
concat_pred_dicts,
|
|
140
|
-
make_smooth_preds,
|
|
141
288
|
find_true_pred_sequences,
|
|
289
|
+
make_smooth_preds,
|
|
142
290
|
)
|
|
143
291
|
except ImportError as e:
|
|
144
292
|
logger.error("Failed to import endo_ai components: %s. Prediction unavailable.", e, exc_info=True)
|
|
@@ -150,7 +298,7 @@ def _predict_video_pipeline(
|
|
|
150
298
|
n_test_frames = GLOBAL_N_TEST_FRAMES
|
|
151
299
|
logger.info("Using global TEST_RUN settings for prediction pipeline.")
|
|
152
300
|
|
|
153
|
-
state = video.get_or_create_state()
|
|
301
|
+
state = video.get_or_create_state() # Use State helper
|
|
154
302
|
# --- Pre-condition Check ---
|
|
155
303
|
if not state.frames_extracted:
|
|
156
304
|
# Raise exception
|
|
@@ -158,7 +306,7 @@ def _predict_video_pipeline(
|
|
|
158
306
|
# --- End Pre-condition Check ---
|
|
159
307
|
|
|
160
308
|
# Frame directory check
|
|
161
|
-
frame_dir = video.get_frame_dir_path()
|
|
309
|
+
frame_dir = video.get_frame_dir_path() # Use IO helper
|
|
162
310
|
if not frame_dir or not frame_dir.exists() or not any(frame_dir.iterdir()):
|
|
163
311
|
# Raise exception
|
|
164
312
|
raise FileNotFoundError(f"Frame directory {frame_dir} is empty or does not exist for video {video.uuid}. Prediction aborted.")
|
|
@@ -180,9 +328,7 @@ def _predict_video_pipeline(
|
|
|
180
328
|
|
|
181
329
|
# Get or create VideoPredictionMeta
|
|
182
330
|
try:
|
|
183
|
-
_video_prediction_meta, created = VideoPredictionMeta.objects.get_or_create(
|
|
184
|
-
video_file=video, model_meta=model_meta
|
|
185
|
-
)
|
|
331
|
+
_video_prediction_meta, created = VideoPredictionMeta.objects.get_or_create(video_file=video, model_meta=model_meta)
|
|
186
332
|
if created:
|
|
187
333
|
logger.info(
|
|
188
334
|
"Created new VideoPredictionMeta for video %s, model %s.",
|
|
@@ -197,12 +343,18 @@ def _predict_video_pipeline(
|
|
|
197
343
|
)
|
|
198
344
|
# video_prediction_meta.save() # Save is handled by get_or_create
|
|
199
345
|
except Exception as e:
|
|
200
|
-
logger.error(
|
|
201
|
-
"Failed to get or create VideoPredictionMeta for video %s, model %s: %s", video.uuid, model_meta.name, e, exc_info=True
|
|
202
|
-
)
|
|
346
|
+
logger.error("Failed to get or create VideoPredictionMeta for video %s, model %s: %s", video.uuid, model_meta.name, e, exc_info=True)
|
|
203
347
|
# Raise exception
|
|
204
348
|
raise RuntimeError("Failed to get or create VideoPredictionMeta") from e
|
|
205
349
|
|
|
350
|
+
if _is_stub_weights_file(weights_path):
|
|
351
|
+
logger.info(
|
|
352
|
+
"Detected stub weights at %s for video %s; skipping model inference and returning empty predictions.",
|
|
353
|
+
weights_path,
|
|
354
|
+
video.uuid,
|
|
355
|
+
)
|
|
356
|
+
return {}
|
|
357
|
+
|
|
206
358
|
# --- Dataset Preparation ---
|
|
207
359
|
datasets = {
|
|
208
360
|
"inference_dataset": InferenceDataset,
|
|
@@ -214,40 +366,92 @@ def _predict_video_pipeline(
|
|
|
214
366
|
raise ValueError(f"Dataset class '{dataset_name}' not found for video {video.uuid}. Prediction aborted.")
|
|
215
367
|
|
|
216
368
|
try:
|
|
217
|
-
paths = video.get_frame_paths()
|
|
369
|
+
paths = video.get_frame_paths() # Use Frame helper
|
|
218
370
|
if not paths:
|
|
219
371
|
raise FileNotFoundError(f"No frame paths returned by get_frame_paths for {frame_dir} (Video: {video.uuid})")
|
|
220
372
|
except Exception as e:
|
|
221
|
-
logger.error(
|
|
222
|
-
"Error listing or getting frame files from %s for video %s: %s", frame_dir, video.uuid, e, exc_info=True
|
|
223
|
-
)
|
|
373
|
+
logger.error("Error listing or getting frame files from %s for video %s: %s", frame_dir, video.uuid, e, exc_info=True)
|
|
224
374
|
raise RuntimeError(f"Error getting frame paths from {frame_dir}") from e
|
|
225
375
|
|
|
226
376
|
logger.info("Found %d frame files in %s for video %s.", len(paths), frame_dir, video.uuid)
|
|
227
377
|
|
|
228
|
-
crop_template = video.get_crop_template()
|
|
378
|
+
crop_template = video.get_crop_template() # Use Meta helper
|
|
229
379
|
string_paths = [p.as_posix() for p in paths]
|
|
230
|
-
crops = [crop_template] * len(paths)
|
|
380
|
+
crops = [crop_template] * len(paths) # Assuming same crop for all frames
|
|
231
381
|
|
|
232
382
|
if test_run:
|
|
233
383
|
logger.info("TEST RUN: Using first %d frames for video %s.", n_test_frames, video.uuid)
|
|
234
384
|
string_paths = string_paths[:n_test_frames]
|
|
235
385
|
crops = crops[:n_test_frames]
|
|
236
386
|
if not string_paths:
|
|
237
|
-
|
|
387
|
+
# Raise exception
|
|
238
388
|
raise ValueError(f"Not enough frames ({len(paths)}) for test run (required {n_test_frames}) for video {video.uuid}.")
|
|
239
389
|
|
|
390
|
+
label_names = _resolve_label_names(model_meta)
|
|
391
|
+
if not label_names:
|
|
392
|
+
raise ValueError(f"Label set '{getattr(model_meta.labelset, 'name', 'unknown')}' has no labels configured.")
|
|
393
|
+
|
|
394
|
+
outputs_hint = _infer_output_classes(weights_path)
|
|
395
|
+
|
|
396
|
+
network_labels = label_names
|
|
397
|
+
if outputs_hint and outputs_hint != len(label_names):
|
|
398
|
+
if outputs_hint == len(LEGACY_CLASS_LABELS):
|
|
399
|
+
network_labels = LEGACY_CLASS_LABELS
|
|
400
|
+
logger.info(
|
|
401
|
+
"Detected legacy multilabel checkpoint with %d classes; using legacy label ordering.",
|
|
402
|
+
outputs_hint,
|
|
403
|
+
)
|
|
404
|
+
else:
|
|
405
|
+
logger.warning(
|
|
406
|
+
"Weights %s expect %d outputs while label set '%s' defines %d labels.",
|
|
407
|
+
weights_path.name,
|
|
408
|
+
outputs_hint,
|
|
409
|
+
getattr(model_meta.labelset, "name", "unknown"),
|
|
410
|
+
len(label_names),
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
label_mapping = _build_label_mapping(network_labels, label_names)
|
|
414
|
+
|
|
415
|
+
load_kwargs: Dict[str, Any] = {}
|
|
416
|
+
if weights_path.suffix.lower() == ".safetensors":
|
|
417
|
+
load_kwargs.update(
|
|
418
|
+
{
|
|
419
|
+
"labels": network_labels,
|
|
420
|
+
"model_type": _infer_model_type(model_meta, weights_path),
|
|
421
|
+
"load_imagenet_weights": False,
|
|
422
|
+
"strict": False,
|
|
423
|
+
}
|
|
424
|
+
)
|
|
425
|
+
|
|
426
|
+
classifier_config: Optional[Dict[str, Any]] = None
|
|
427
|
+
|
|
240
428
|
try:
|
|
241
429
|
ds_config = model_meta.get_inference_dataset_config()
|
|
242
430
|
ds = dataset_model_class(string_paths, crops, config=ds_config)
|
|
243
431
|
logger.info("Created dataset '%s' with %d items for video %s.", dataset_name, len(ds), video.uuid)
|
|
244
432
|
if len(ds) > 0:
|
|
245
|
-
sample = ds[0]
|
|
246
|
-
logger.debug("Sample shape: %s", sample
|
|
433
|
+
sample = ds[0] # Get a sample for debugging shape
|
|
434
|
+
logger.debug("Sample shape: %s", getattr(sample, "shape", None))
|
|
435
|
+
|
|
436
|
+
try:
|
|
437
|
+
activation = ModelMeta.get_activation_function(model_meta.activation)
|
|
438
|
+
except ValueError:
|
|
439
|
+
logger.warning(
|
|
440
|
+
"Unsupported activation '%s' for model %s; falling back to sigmoid.",
|
|
441
|
+
model_meta.activation,
|
|
442
|
+
model_meta.name,
|
|
443
|
+
)
|
|
444
|
+
activation = ModelMeta.get_activation_function("sigmoid")
|
|
445
|
+
|
|
446
|
+
classifier_config = {
|
|
447
|
+
**ds_config,
|
|
448
|
+
"batchsize": model_meta.batchsize or 16,
|
|
449
|
+
"num_workers": model_meta.num_workers or 0,
|
|
450
|
+
"activation": activation,
|
|
451
|
+
"labels": network_labels,
|
|
452
|
+
}
|
|
247
453
|
except Exception as e:
|
|
248
|
-
logger.error(
|
|
249
|
-
"Failed to create dataset '%s' for video %s: %s", dataset_name, video.uuid, e, exc_info=True
|
|
250
|
-
)
|
|
454
|
+
logger.error("Failed to create dataset '%s' for video %s: %s", dataset_name, video.uuid, e, exc_info=True)
|
|
251
455
|
# Raise exception
|
|
252
456
|
raise RuntimeError(f"Failed to create dataset '{dataset_name}'") from e
|
|
253
457
|
|
|
@@ -255,38 +459,47 @@ def _predict_video_pipeline(
|
|
|
255
459
|
try:
|
|
256
460
|
# Check if CUDA is available
|
|
257
461
|
import torch
|
|
462
|
+
|
|
258
463
|
if torch.cuda.is_available():
|
|
259
464
|
try:
|
|
260
|
-
|
|
465
|
+
device = torch.device("cuda")
|
|
261
466
|
ai_model_instance = MultiLabelClassificationNet.load_from_checkpoint(
|
|
262
|
-
checkpoint_path=weights_path.as_posix(),
|
|
467
|
+
checkpoint_path=weights_path.as_posix(),
|
|
468
|
+
map_location=device,
|
|
469
|
+
**load_kwargs,
|
|
263
470
|
)
|
|
264
|
-
|
|
265
|
-
_ = ai_model_instance.cuda()
|
|
471
|
+
ai_model_instance = ai_model_instance.to(device)
|
|
266
472
|
logger.info("Loaded model on GPU for video %s.", video.uuid)
|
|
267
473
|
except RuntimeError as cuda_err:
|
|
268
|
-
|
|
269
|
-
|
|
474
|
+
logger.warning(
|
|
475
|
+
"GPU loading failed for video %s: %s. Falling back to CPU.",
|
|
476
|
+
video.uuid,
|
|
477
|
+
cuda_err,
|
|
478
|
+
)
|
|
479
|
+
device = torch.device("cpu")
|
|
270
480
|
ai_model_instance = MultiLabelClassificationNet.load_from_checkpoint(
|
|
271
481
|
checkpoint_path=weights_path.as_posix(),
|
|
272
|
-
map_location=
|
|
482
|
+
map_location=device,
|
|
483
|
+
**load_kwargs,
|
|
273
484
|
)
|
|
485
|
+
ai_model_instance = ai_model_instance.to(device)
|
|
274
486
|
logger.info("Loaded model on CPU for video %s.", video.uuid)
|
|
275
487
|
else:
|
|
276
488
|
# No CUDA available, load directly on CPU
|
|
277
489
|
logger.info("CUDA not available. Loading model on CPU for video %s.", video.uuid)
|
|
490
|
+
device = torch.device("cpu")
|
|
278
491
|
ai_model_instance = MultiLabelClassificationNet.load_from_checkpoint(
|
|
279
492
|
checkpoint_path=weights_path.as_posix(),
|
|
280
|
-
map_location=
|
|
493
|
+
map_location=device,
|
|
494
|
+
**load_kwargs,
|
|
281
495
|
)
|
|
496
|
+
ai_model_instance = ai_model_instance.to(device)
|
|
282
497
|
|
|
283
|
-
_ = ai_model_instance.eval()
|
|
284
|
-
classifier = Classifier(ai_model_instance, verbose=True)
|
|
498
|
+
_ = ai_model_instance.eval() # Set to evaluation mode
|
|
499
|
+
classifier = Classifier(ai_model_instance, config=classifier_config or {}, verbose=True)
|
|
285
500
|
logger.info("AI model loaded successfully for video %s from %s.", video.uuid, weights_path)
|
|
286
501
|
except Exception as e:
|
|
287
|
-
logger.error(
|
|
288
|
-
"Failed to load AI model for video %s from %s: %s", video.uuid, weights_path, e, exc_info=True
|
|
289
|
-
)
|
|
502
|
+
logger.error("Failed to load AI model for video %s from %s: %s", video.uuid, weights_path, e, exc_info=True)
|
|
290
503
|
# Raise exception
|
|
291
504
|
raise RuntimeError(f"Failed to load AI model from {weights_path}") from e
|
|
292
505
|
|
|
@@ -299,15 +512,19 @@ def _predict_video_pipeline(
|
|
|
299
512
|
logger.error("Inference failed for video %s: %s", video.uuid, e, exc_info=True)
|
|
300
513
|
# CUDA-OOM Fallback: Speicher freigeben und CPU versuchen
|
|
301
514
|
try:
|
|
302
|
-
import
|
|
303
|
-
|
|
304
|
-
|
|
515
|
+
import gc
|
|
516
|
+
|
|
517
|
+
import torch
|
|
518
|
+
|
|
519
|
+
is_oom = isinstance(e, (getattr(torch.cuda, "OutOfMemoryError", RuntimeError), RuntimeError)) and (
|
|
520
|
+
"out of memory" in str(e).lower() or "cuda out of memory" in str(e).lower()
|
|
305
521
|
)
|
|
306
522
|
except Exception:
|
|
307
523
|
is_oom = False
|
|
308
|
-
if
|
|
524
|
+
if "torch" in globals() or "torch" in locals():
|
|
309
525
|
try:
|
|
310
526
|
import torch # ensure available in this scope
|
|
527
|
+
|
|
311
528
|
if torch.cuda.is_available() and is_oom:
|
|
312
529
|
logger.warning("CUDA OOM detected. Freeing CUDA cache and retrying on CPU…")
|
|
313
530
|
try:
|
|
@@ -339,17 +556,20 @@ def _predict_video_pipeline(
|
|
|
339
556
|
try:
|
|
340
557
|
logger.info("Post-processing predictions for video %s...", video.uuid)
|
|
341
558
|
readable_predictions = [classifier.readable(p) for p in predictions]
|
|
559
|
+
if label_mapping:
|
|
560
|
+
readable_predictions = [_remap_prediction_dict(prediction, label_mapping) for prediction in readable_predictions]
|
|
342
561
|
|
|
343
562
|
merged_predictions = concat_pred_dicts(readable_predictions)
|
|
344
563
|
|
|
345
|
-
fps = video.get_fps()
|
|
564
|
+
fps = video.get_fps() # Use Meta helper
|
|
346
565
|
if not fps:
|
|
347
566
|
logger.warning(
|
|
348
567
|
"Video FPS is unknown for %s. Smoothing/sequence calculations might be inaccurate. Using default 30 FPS.",
|
|
349
568
|
video.uuid,
|
|
350
569
|
)
|
|
351
|
-
fps = 30
|
|
570
|
+
fps = 30 # Default FPS if unknown
|
|
352
571
|
|
|
572
|
+
fps = int(fps)
|
|
353
573
|
smooth_merged_predictions = {}
|
|
354
574
|
for key in merged_predictions.keys():
|
|
355
575
|
smooth_merged_predictions[key] = make_smooth_preds(
|
|
@@ -360,9 +580,7 @@ def _predict_video_pipeline(
|
|
|
360
580
|
|
|
361
581
|
binary_smooth_merged_predictions = {}
|
|
362
582
|
for key in smooth_merged_predictions.keys():
|
|
363
|
-
binary_smooth_merged_predictions[key] =
|
|
364
|
-
smooth_merged_predictions[key] > binarize_threshold
|
|
365
|
-
)
|
|
583
|
+
binary_smooth_merged_predictions[key] = smooth_merged_predictions[key] > binarize_threshold
|
|
366
584
|
|
|
367
585
|
sequences = {}
|
|
368
586
|
for label, prediction_array in binary_smooth_merged_predictions.items():
|
|
@@ -390,10 +608,10 @@ def _predict_video_entry(
|
|
|
390
608
|
binarize_threshold: float = 0.5,
|
|
391
609
|
test_run: bool = GLOBAL_TEST_RUN,
|
|
392
610
|
n_test_frames: int = GLOBAL_N_TEST_FRAMES,
|
|
393
|
-
save_results: bool = True,
|
|
611
|
+
save_results: bool = True, # Note: save_results is handled in video_file.py now
|
|
394
612
|
):
|
|
395
613
|
"""Entry point called from VideoFile.predict_video. Imports and calls the main prediction logic."""
|
|
396
|
-
from endoreg_db.models import AiModel, ModelMeta
|
|
614
|
+
from endoreg_db.models import AiModel, ModelMeta # Local import
|
|
397
615
|
|
|
398
616
|
try:
|
|
399
617
|
ai_model = AiModel.objects.get(name=model_name)
|
|
@@ -412,7 +630,7 @@ def _predict_video_entry(
|
|
|
412
630
|
# --- Explicitly pass only the arguments expected by _predict_video_pipeline ---
|
|
413
631
|
predicted_sequences = _predict_video_pipeline(
|
|
414
632
|
video=video,
|
|
415
|
-
model_meta=model_meta,
|
|
633
|
+
model_meta=model_meta, # Pass the fetched ModelMeta object
|
|
416
634
|
dataset_name=dataset_name,
|
|
417
635
|
smooth_window_size_s=smooth_window_size_s,
|
|
418
636
|
binarize_threshold=binarize_threshold,
|
|
@@ -425,15 +643,11 @@ def _predict_video_entry(
|
|
|
425
643
|
return predicted_sequences, model_meta
|
|
426
644
|
|
|
427
645
|
|
|
428
|
-
def _extract_text_information(
|
|
429
|
-
video: "VideoFile", frame_fraction: float = 0.001, cap: int = 15
|
|
430
|
-
) -> Optional[Dict[str, str]]:
|
|
646
|
+
def _extract_text_information(video: "VideoFile", frame_fraction: float = 0.001, cap: int = 15) -> Optional[Dict[str, str]]:
|
|
431
647
|
"""Facade function to call the text extraction logic."""
|
|
432
648
|
logger.info("Attempting text extraction for video %s.", video.uuid)
|
|
433
649
|
|
|
434
|
-
extracted_data = _extract_text_from_video_frames(
|
|
435
|
-
video=video, frame_fraction=frame_fraction, cap=cap
|
|
436
|
-
)
|
|
650
|
+
extracted_data = _extract_text_from_video_frames(video=video, frame_fraction=frame_fraction, cap=cap)
|
|
437
651
|
|
|
438
652
|
if extracted_data is not None:
|
|
439
653
|
logger.info("Text extraction successful for video %s.", video.uuid)
|