endoreg-db 0.6.0__py3-none-any.whl → 0.6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of endoreg-db might be problematic. Click here for more details.
- endoreg_db/case_generator/__init__.py +0 -0
- endoreg_db/case_generator/case_generator.py +159 -0
- endoreg_db/case_generator/lab_sample_factory.py +33 -0
- endoreg_db/case_generator/utils.py +30 -0
- endoreg_db/data/__init__.py +118 -0
- endoreg_db/data/agl_service/data.yaml +19 -0
- endoreg_db/data/ai_model/data.yaml +7 -0
- endoreg_db/data/ai_model_label/label/data.yaml +88 -0
- endoreg_db/data/ai_model_label/label-set/data.yaml +21 -0
- endoreg_db/data/ai_model_label/label-type/data.yaml +7 -0
- endoreg_db/data/ai_model_meta/default_multilabel_classification.yaml +5 -0
- endoreg_db/data/ai_model_type/data.yaml +7 -0
- endoreg_db/data/ai_model_video_segmentation_label/base_segmentation.yaml +176 -0
- endoreg_db/data/ai_model_video_segmentation_labelset/data.yaml +20 -0
- endoreg_db/data/case_template/rule/00_patient_lab_sample_add_default_value.yaml +167 -0
- endoreg_db/data/case_template/rule/01_patient-set-age.yaml +8 -0
- endoreg_db/data/case_template/rule/01_patient-set-gender.yaml +9 -0
- endoreg_db/data/case_template/rule/11_create_patient_lab_sample.yaml +23 -0
- endoreg_db/data/case_template/rule/12_create-patient_medication-anticoagulation.yaml +19 -0
- endoreg_db/data/case_template/rule/13_create-patient_medication_schedule-anticoagulation.yaml +19 -0
- endoreg_db/data/case_template/rule/19_create_patient.yaml +17 -0
- endoreg_db/data/case_template/rule_type/base_types.yaml +35 -0
- endoreg_db/data/case_template/rule_value/.init +0 -0
- endoreg_db/data/case_template/rule_value_type/base_types.yaml +59 -0
- endoreg_db/data/case_template/template/base.yaml +8 -0
- endoreg_db/data/case_template/template_type/pre_endoscopy.yaml +3 -0
- endoreg_db/data/case_template/tmp/_rule_value +13 -0
- endoreg_db/data/case_template/tmp/rule/01_atrial_fibrillation.yaml +21 -0
- endoreg_db/data/case_template/tmp/rule/02_create_object.yaml +10 -0
- endoreg_db/data/case_template/tmp/template/atrial_fibrillation_low_risk.yaml +7 -0
- endoreg_db/data/center/data.yaml +90 -0
- endoreg_db/data/center_resource/green_endoscopy_dashboard_CenterResource.yaml +144 -0
- endoreg_db/data/center_waste/green_endoscopy_dashboard_CenterWaste.yaml +48 -0
- endoreg_db/data/contraindication/bleeding.yaml +11 -0
- endoreg_db/data/disease/cardiovascular.yaml +37 -0
- endoreg_db/data/disease/hepatology.yaml +5 -0
- endoreg_db/data/disease/misc.yaml +6 -0
- endoreg_db/data/disease/renal.yaml +5 -0
- endoreg_db/data/disease_classification/chronic_kidney_disease.yaml +6 -0
- endoreg_db/data/disease_classification/coronary_vessel_disease.yaml +6 -0
- endoreg_db/data/disease_classification_choice/chronic_kidney_disease.yaml +41 -0
- endoreg_db/data/disease_classification_choice/coronary_vessel_disease.yaml +20 -0
- endoreg_db/data/distribution/date/patient.yaml +7 -0
- endoreg_db/data/distribution/multiple_categorical/.init +0 -0
- endoreg_db/data/distribution/numeric/data.yaml +14 -0
- endoreg_db/data/distribution/single_categorical/patient.yaml +7 -0
- endoreg_db/data/emission_factor/green_endoscopy_dashboard_EmissionFactor.yaml +132 -0
- endoreg_db/data/endoscope/data.yaml +93 -0
- endoreg_db/data/endoscope_type/data.yaml +11 -0
- endoreg_db/data/endoscopy_processor/data.yaml +47 -0
- endoreg_db/data/event/cardiology.yaml +28 -0
- endoreg_db/data/event/neurology.yaml +14 -0
- endoreg_db/data/event/surgery.yaml +13 -0
- endoreg_db/data/event/thrombembolism.yaml +20 -0
- endoreg_db/data/examination/examinations/data.yaml +66 -0
- endoreg_db/data/examination/time/data.yaml +48 -0
- endoreg_db/data/examination/time-type/data.yaml +8 -0
- endoreg_db/data/examination/type/data.yaml +5 -0
- endoreg_db/data/examination_indication/endoscopy.yaml +8 -0
- endoreg_db/data/examination_indication_classification/endoscopy.yaml +8 -0
- endoreg_db/data/examination_indication_classification_choice/endoscopy.yaml +101 -0
- endoreg_db/data/finding/data.yaml +141 -0
- endoreg_db/data/finding_intervention/endoscopy.yaml +138 -0
- endoreg_db/data/finding_intervention_type/endoscopy.yaml +15 -0
- endoreg_db/data/finding_location_classification/colonoscopy.yaml +46 -0
- endoreg_db/data/finding_location_classification_choice/colonoscopy.yaml +240 -0
- endoreg_db/data/finding_morphology_classification/colonoscopy.yaml +48 -0
- endoreg_db/data/finding_morphology_classification_choice/colon_lesion_circularity_default.yaml +34 -0
- endoreg_db/data/finding_morphology_classification_choice/colon_lesion_nice.yaml +20 -0
- endoreg_db/data/finding_morphology_classification_choice/colon_lesion_paris.yaml +65 -0
- endoreg_db/data/finding_morphology_classification_choice/colon_lesion_planarity_default.yaml +56 -0
- endoreg_db/data/finding_morphology_classification_choice/colon_lesion_surface_intact_default.yaml +39 -0
- endoreg_db/data/finding_morphology_classification_choice/colonoscopy_size.yaml +57 -0
- endoreg_db/data/finding_morphology_classification_type/colonoscopy.yaml +79 -0
- endoreg_db/data/finding_type/data.yaml +30 -0
- endoreg_db/data/gender/data.yaml +35 -0
- endoreg_db/data/information_source/data.yaml +30 -0
- endoreg_db/data/information_source/medication.yaml +6 -0
- endoreg_db/data/lab_value/cardiac_enzymes.yaml +37 -0
- endoreg_db/data/lab_value/coagulation.yaml +54 -0
- endoreg_db/data/lab_value/electrolytes.yaml +228 -0
- endoreg_db/data/lab_value/gastrointestinal_function.yaml +133 -0
- endoreg_db/data/lab_value/hematology.yaml +184 -0
- endoreg_db/data/lab_value/hormones.yaml +59 -0
- endoreg_db/data/lab_value/lipids.yaml +53 -0
- endoreg_db/data/lab_value/misc.yaml +33 -0
- endoreg_db/data/lab_value/renal_function.yaml +12 -0
- endoreg_db/data/log_type/data.yaml +57 -0
- endoreg_db/data/lx_client_tag/base.yaml +54 -0
- endoreg_db/data/lx_client_type/base.yaml +30 -0
- endoreg_db/data/lx_permission/base.yaml +24 -0
- endoreg_db/data/lx_permission/endoreg.yaml +52 -0
- endoreg_db/data/material/material.yaml +91 -0
- endoreg_db/data/medication/anticoagulation.yaml +65 -0
- endoreg_db/data/medication/tah.yaml +70 -0
- endoreg_db/data/medication_indication/anticoagulation.yaml +115 -0
- endoreg_db/data/medication_indication_type/data.yaml +11 -0
- endoreg_db/data/medication_indication_type/thrombembolism.yaml +41 -0
- endoreg_db/data/medication_intake_time/base.yaml +31 -0
- endoreg_db/data/medication_schedule/apixaban.yaml +95 -0
- endoreg_db/data/medication_schedule/ass.yaml +12 -0
- endoreg_db/data/medication_schedule/enoxaparin.yaml +26 -0
- endoreg_db/data/names_first/first_names.yaml +51 -0
- endoreg_db/data/names_last/last_names.yaml +51 -0
- endoreg_db/data/network_device/data.yaml +59 -0
- endoreg_db/data/network_device_type/data.yaml +12 -0
- endoreg_db/data/organ/data.yaml +29 -0
- endoreg_db/data/patient_lab_sample_type/generic.yaml +6 -0
- endoreg_db/data/pdf_type/data.yaml +29 -0
- endoreg_db/data/product/green_endoscopy_dashboard_Product.yaml +66 -0
- endoreg_db/data/product_group/green_endoscopy_dashboard_ProductGroup.yaml +33 -0
- endoreg_db/data/product_material/green_endoscopy_dashboard_ProductMaterial.yaml +308 -0
- endoreg_db/data/product_weight/green_endoscopy_dashboard_ProductWeight.yaml +88 -0
- endoreg_db/data/profession/data.yaml +70 -0
- endoreg_db/data/reference_product/green_endoscopy_dashboard_ReferenceProduct.yaml +55 -0
- endoreg_db/data/report_reader_flag/ukw-examination-generic.yaml +30 -0
- endoreg_db/data/report_reader_flag/ukw-histology-generic.yaml +19 -0
- endoreg_db/data/resource/green_endoscopy_dashboard_Resource.yaml +15 -0
- endoreg_db/data/tmp/chronic_kidney_disease.yaml +0 -0
- endoreg_db/data/tmp/congestive_heart_failure.yaml +0 -0
- endoreg_db/data/transport_route/green_endoscopy_dashboard_TransportRoute.yaml +12 -0
- endoreg_db/data/unit/concentration.yaml +92 -0
- endoreg_db/data/unit/data.yaml +17 -0
- endoreg_db/data/unit/length.yaml +31 -0
- endoreg_db/data/unit/misc.yaml +20 -0
- endoreg_db/data/unit/rate.yaml +6 -0
- endoreg_db/data/unit/time.yaml +13 -0
- endoreg_db/data/unit/volume.yaml +35 -0
- endoreg_db/data/unit/weight.yaml +38 -0
- endoreg_db/data/waste/data.yaml +12 -0
- endoreg_db/forms/__init__.py +5 -0
- endoreg_db/forms/examination_form.py +11 -0
- endoreg_db/forms/patient_finding_intervention_form.py +19 -0
- endoreg_db/forms/patient_form.py +26 -0
- endoreg_db/forms/questionnaires/__init__.py +1 -0
- endoreg_db/forms/questionnaires/tto_questionnaire.py +23 -0
- endoreg_db/forms/settings/__init__.py +8 -0
- endoreg_db/forms/unit.py +6 -0
- endoreg_db/management/__init__.py +0 -0
- endoreg_db/management/commands/__init__.py +0 -0
- endoreg_db/management/commands/_load_model_template.py +41 -0
- endoreg_db/management/commands/delete_all.py +18 -0
- endoreg_db/management/commands/fetch_legacy_image_dataset.py +32 -0
- endoreg_db/management/commands/fix_auth_permission.py +20 -0
- endoreg_db/management/commands/load_active_model_data.py +45 -0
- endoreg_db/management/commands/load_ai_model_data.py +79 -0
- endoreg_db/management/commands/load_ai_model_label_data.py +59 -0
- endoreg_db/management/commands/load_base_db_data.py +178 -0
- endoreg_db/management/commands/load_center_data.py +43 -0
- endoreg_db/management/commands/load_contraindication_data.py +41 -0
- endoreg_db/management/commands/load_disease_classification_choices_data.py +41 -0
- endoreg_db/management/commands/load_disease_classification_data.py +41 -0
- endoreg_db/management/commands/load_disease_data.py +62 -0
- endoreg_db/management/commands/load_distribution_data.py +66 -0
- endoreg_db/management/commands/load_endoscope_data.py +68 -0
- endoreg_db/management/commands/load_event_data.py +41 -0
- endoreg_db/management/commands/load_examination_data.py +75 -0
- endoreg_db/management/commands/load_examination_indication_data.py +65 -0
- endoreg_db/management/commands/load_finding_data.py +171 -0
- endoreg_db/management/commands/load_g_play_data.py +113 -0
- endoreg_db/management/commands/load_gender_data.py +44 -0
- endoreg_db/management/commands/load_green_endoscopy_wuerzburg_data.py +133 -0
- endoreg_db/management/commands/load_information_source.py +45 -0
- endoreg_db/management/commands/load_lab_value_data.py +50 -0
- endoreg_db/management/commands/load_logging_data.py +39 -0
- endoreg_db/management/commands/load_lx_data.py +64 -0
- endoreg_db/management/commands/load_medication_data.py +103 -0
- endoreg_db/management/commands/load_medication_indication_data.py +63 -0
- endoreg_db/management/commands/load_medication_indication_type_data.py +41 -0
- endoreg_db/management/commands/load_medication_intake_time_data.py +41 -0
- endoreg_db/management/commands/load_medication_schedule_data.py +55 -0
- endoreg_db/management/commands/load_name_data.py +37 -0
- endoreg_db/management/commands/load_network_data.py +57 -0
- endoreg_db/management/commands/load_organ_data.py +43 -0
- endoreg_db/management/commands/load_pdf_type_data.py +61 -0
- endoreg_db/management/commands/load_profession_data.py +44 -0
- endoreg_db/management/commands/load_report_reader_flag_data.py +46 -0
- endoreg_db/management/commands/load_unit_data.py +46 -0
- endoreg_db/management/commands/load_user_groups.py +28 -0
- endoreg_db/management/commands/register_ai_model.py +64 -0
- endoreg_db/management/commands/reset_celery_schedule.py +9 -0
- endoreg_db/migrations/0001_initial.py +2045 -0
- endoreg_db/migrations/0002_alter_frame_image_alter_rawframe_image.py +23 -0
- endoreg_db/migrations/0003_alter_frame_image_alter_rawframe_image.py +23 -0
- endoreg_db/migrations/0004_alter_rawvideofile_file_alter_video_file.py +25 -0
- endoreg_db/migrations/0005_rawvideofile_frame_count_and_more.py +33 -0
- endoreg_db/migrations/0006_frame_extracted_rawframe_extracted.py +23 -0
- endoreg_db/migrations/0007_rename_pseudo_patient_video_patient_and_more.py +24 -0
- endoreg_db/migrations/0008_remove_reportfile_patient_examination_and_more.py +48 -0
- endoreg_db/migrations/__init__.py +0 -0
- endoreg_db/models/__init__.py +376 -0
- endoreg_db/models/ai_model/__init__.py +4 -0
- endoreg_db/models/ai_model/active_model.py +9 -0
- endoreg_db/models/ai_model/ai_model.py +103 -0
- endoreg_db/models/ai_model/lightning/__init__.py +3 -0
- endoreg_db/models/ai_model/lightning/inference_dataset.py +53 -0
- endoreg_db/models/ai_model/lightning/multilabel_classification_net.py +155 -0
- endoreg_db/models/ai_model/lightning/postprocess.py +53 -0
- endoreg_db/models/ai_model/lightning/predict.py +172 -0
- endoreg_db/models/ai_model/lightning/prediction_visualizer.py +55 -0
- endoreg_db/models/ai_model/lightning/preprocess.py +68 -0
- endoreg_db/models/ai_model/lightning/run_visualizer.py +21 -0
- endoreg_db/models/ai_model/model_meta.py +250 -0
- endoreg_db/models/ai_model/model_type.py +36 -0
- endoreg_db/models/ai_model/utils.py +8 -0
- endoreg_db/models/annotation/__init__.py +32 -0
- endoreg_db/models/annotation/anonymized_image_annotation.py +115 -0
- endoreg_db/models/annotation/binary_classification_annotation_task.py +117 -0
- endoreg_db/models/annotation/image_classification.py +86 -0
- endoreg_db/models/annotation/video_segmentation_annotation.py +52 -0
- endoreg_db/models/annotation/video_segmentation_labelset.py +20 -0
- endoreg_db/models/case/__init__.py +1 -0
- endoreg_db/models/case/case.py +34 -0
- endoreg_db/models/case_template/__init__.py +15 -0
- endoreg_db/models/case_template/case_template.py +125 -0
- endoreg_db/models/case_template/case_template_rule.py +276 -0
- endoreg_db/models/case_template/case_template_rule_value.py +88 -0
- endoreg_db/models/case_template/case_template_type.py +28 -0
- endoreg_db/models/center/__init__.py +11 -0
- endoreg_db/models/center/center.py +51 -0
- endoreg_db/models/center/center_product.py +33 -0
- endoreg_db/models/center/center_resource.py +33 -0
- endoreg_db/models/center/center_waste.py +16 -0
- endoreg_db/models/contraindication/__init__.py +21 -0
- endoreg_db/models/data_file/__init__.py +39 -0
- endoreg_db/models/data_file/base_classes/__init__.py +7 -0
- endoreg_db/models/data_file/base_classes/abstract_frame.py +100 -0
- endoreg_db/models/data_file/base_classes/abstract_pdf.py +136 -0
- endoreg_db/models/data_file/base_classes/abstract_video.py +807 -0
- endoreg_db/models/data_file/base_classes/frame_helpers.py +17 -0
- endoreg_db/models/data_file/base_classes/prepare_bulk_frames.py +19 -0
- endoreg_db/models/data_file/base_classes/utils.py +80 -0
- endoreg_db/models/data_file/frame.py +29 -0
- endoreg_db/models/data_file/import_classes/__init__.py +18 -0
- endoreg_db/models/data_file/import_classes/processing_functions/__init__.py +35 -0
- endoreg_db/models/data_file/import_classes/processing_functions/pdf.py +28 -0
- endoreg_db/models/data_file/import_classes/processing_functions/video.py +260 -0
- endoreg_db/models/data_file/import_classes/raw_pdf.py +260 -0
- endoreg_db/models/data_file/import_classes/raw_video.py +288 -0
- endoreg_db/models/data_file/metadata/__init__.py +13 -0
- endoreg_db/models/data_file/metadata/pdf_meta.py +74 -0
- endoreg_db/models/data_file/metadata/sensitive_meta.py +290 -0
- endoreg_db/models/data_file/metadata/video_meta.py +199 -0
- endoreg_db/models/data_file/report_file.py +56 -0
- endoreg_db/models/data_file/video/__init__.py +11 -0
- endoreg_db/models/data_file/video/import_meta.py +25 -0
- endoreg_db/models/data_file/video/video.py +196 -0
- endoreg_db/models/data_file/video_segment.py +214 -0
- endoreg_db/models/disease.py +79 -0
- endoreg_db/models/emission/__init__.py +5 -0
- endoreg_db/models/emission/emission_factor.py +85 -0
- endoreg_db/models/event.py +73 -0
- endoreg_db/models/examination/__init__.py +9 -0
- endoreg_db/models/examination/examination.py +67 -0
- endoreg_db/models/examination/examination_indication.py +170 -0
- endoreg_db/models/examination/examination_time.py +53 -0
- endoreg_db/models/examination/examination_time_type.py +48 -0
- endoreg_db/models/examination/examination_type.py +40 -0
- endoreg_db/models/finding/__init__.py +11 -0
- endoreg_db/models/finding/finding.py +75 -0
- endoreg_db/models/finding/finding_intervention.py +60 -0
- endoreg_db/models/finding/finding_location_classification.py +94 -0
- endoreg_db/models/finding/finding_morphology_classification.py +89 -0
- endoreg_db/models/finding/finding_type.py +22 -0
- endoreg_db/models/hardware/__init__.py +2 -0
- endoreg_db/models/hardware/endoscope.py +60 -0
- endoreg_db/models/hardware/endoscopy_processor.py +155 -0
- endoreg_db/models/information_source.py +29 -0
- endoreg_db/models/label/__init__.py +1 -0
- endoreg_db/models/label/label.py +112 -0
- endoreg_db/models/laboratory/__init__.py +1 -0
- endoreg_db/models/laboratory/lab_value.py +111 -0
- endoreg_db/models/logging/__init__.py +11 -0
- endoreg_db/models/logging/agl_service.py +19 -0
- endoreg_db/models/logging/base.py +22 -0
- endoreg_db/models/logging/log_type.py +23 -0
- endoreg_db/models/logging/network_device.py +27 -0
- endoreg_db/models/lx/__init__.py +4 -0
- endoreg_db/models/lx/client.py +57 -0
- endoreg_db/models/lx/identity.py +34 -0
- endoreg_db/models/lx/permission.py +18 -0
- endoreg_db/models/lx/user.py +16 -0
- endoreg_db/models/medication/__init__.py +19 -0
- endoreg_db/models/medication/medication.py +33 -0
- endoreg_db/models/medication/medication_indication.py +50 -0
- endoreg_db/models/medication/medication_indication_type.py +34 -0
- endoreg_db/models/medication/medication_intake_time.py +26 -0
- endoreg_db/models/medication/medication_schedule.py +37 -0
- endoreg_db/models/network/__init__.py +9 -0
- endoreg_db/models/network/agl_service.py +38 -0
- endoreg_db/models/network/network_device.py +58 -0
- endoreg_db/models/network/network_device_type.py +23 -0
- endoreg_db/models/organ/__init__.py +38 -0
- endoreg_db/models/other/__init__.py +23 -0
- endoreg_db/models/other/distribution/__init__.py +44 -0
- endoreg_db/models/other/distribution/base_value_distribution.py +20 -0
- endoreg_db/models/other/distribution/date_value_distribution.py +91 -0
- endoreg_db/models/other/distribution/multiple_categorical_value_distribution.py +32 -0
- endoreg_db/models/other/distribution/numeric_value_distribution.py +97 -0
- endoreg_db/models/other/distribution/single_categorical_value_distribution.py +22 -0
- endoreg_db/models/other/distribution.py +5 -0
- endoreg_db/models/other/material.py +20 -0
- endoreg_db/models/other/resource.py +18 -0
- endoreg_db/models/other/transport_route.py +22 -0
- endoreg_db/models/other/waste.py +20 -0
- endoreg_db/models/patient/__init__.py +24 -0
- endoreg_db/models/patient/patient_examination.py +182 -0
- endoreg_db/models/patient/patient_finding.py +143 -0
- endoreg_db/models/patient/patient_finding_intervention.py +26 -0
- endoreg_db/models/patient/patient_finding_location.py +120 -0
- endoreg_db/models/patient/patient_finding_morphology.py +166 -0
- endoreg_db/models/permissions/__init__.py +44 -0
- endoreg_db/models/persons/__init__.py +34 -0
- endoreg_db/models/persons/examiner/__init__.py +2 -0
- endoreg_db/models/persons/examiner/examiner.py +60 -0
- endoreg_db/models/persons/examiner/examiner_type.py +2 -0
- endoreg_db/models/persons/first_name.py +18 -0
- endoreg_db/models/persons/gender.py +22 -0
- endoreg_db/models/persons/last_name.py +20 -0
- endoreg_db/models/persons/patient/__init__.py +8 -0
- endoreg_db/models/persons/patient/patient.py +389 -0
- endoreg_db/models/persons/patient/patient_disease.py +22 -0
- endoreg_db/models/persons/patient/patient_event.py +52 -0
- endoreg_db/models/persons/patient/patient_examination_indication.py +32 -0
- endoreg_db/models/persons/patient/patient_lab_sample.py +108 -0
- endoreg_db/models/persons/patient/patient_lab_value.py +197 -0
- endoreg_db/models/persons/patient/patient_medication.py +59 -0
- endoreg_db/models/persons/patient/patient_medication_schedule.py +88 -0
- endoreg_db/models/persons/person.py +31 -0
- endoreg_db/models/persons/portal_user_information.py +27 -0
- endoreg_db/models/prediction/__init__.py +8 -0
- endoreg_db/models/prediction/image_classification.py +51 -0
- endoreg_db/models/prediction/video_prediction_meta.py +306 -0
- endoreg_db/models/product/__init__.py +14 -0
- endoreg_db/models/product/product.py +110 -0
- endoreg_db/models/product/product_group.py +27 -0
- endoreg_db/models/product/product_material.py +28 -0
- endoreg_db/models/product/product_weight.py +38 -0
- endoreg_db/models/product/reference_product.py +115 -0
- endoreg_db/models/questionnaires/__init__.py +114 -0
- endoreg_db/models/quiz/__init__.py +9 -0
- endoreg_db/models/quiz/quiz_answer.py +41 -0
- endoreg_db/models/quiz/quiz_question.py +54 -0
- endoreg_db/models/report_reader/__init__.py +7 -0
- endoreg_db/models/report_reader/report_reader_config.py +53 -0
- endoreg_db/models/report_reader/report_reader_flag.py +20 -0
- endoreg_db/models/rules/__init__.py +5 -0
- endoreg_db/models/rules/rule.py +24 -0
- endoreg_db/models/rules/rule_applicator.py +224 -0
- endoreg_db/models/rules/rule_attribute_dtype.py +19 -0
- endoreg_db/models/rules/rule_type.py +22 -0
- endoreg_db/models/rules/ruleset.py +19 -0
- endoreg_db/models/unit.py +22 -0
- endoreg_db/queries/__init__.py +5 -0
- endoreg_db/queries/annotations/__init__.py +3 -0
- endoreg_db/queries/annotations/legacy.py +158 -0
- endoreg_db/queries/get/__init__.py +6 -0
- endoreg_db/queries/get/annotation.py +0 -0
- endoreg_db/queries/get/center.py +42 -0
- endoreg_db/queries/get/model.py +13 -0
- endoreg_db/queries/get/patient.py +14 -0
- endoreg_db/queries/get/patient_examination.py +20 -0
- endoreg_db/queries/get/prediction.py +0 -0
- endoreg_db/queries/get/report_file.py +33 -0
- endoreg_db/queries/get/video.py +31 -0
- endoreg_db/queries/get/video_import_meta.py +0 -0
- endoreg_db/queries/get/video_prediction_meta.py +0 -0
- endoreg_db/queries/sanity/__init_.py +0 -0
- endoreg_db/serializers/__init__.py +10 -0
- endoreg_db/serializers/ai_model.py +19 -0
- endoreg_db/serializers/annotation.py +14 -0
- endoreg_db/serializers/center.py +11 -0
- endoreg_db/serializers/examination.py +33 -0
- endoreg_db/serializers/frame.py +9 -0
- endoreg_db/serializers/hardware.py +21 -0
- endoreg_db/serializers/label.py +22 -0
- endoreg_db/serializers/patient.py +33 -0
- endoreg_db/serializers/prediction.py +10 -0
- endoreg_db/serializers/raw_video_meta_validation.py +13 -0
- endoreg_db/serializers/report_file.py +7 -0
- endoreg_db/serializers/video.py +20 -0
- endoreg_db/serializers/video_segmentation.py +492 -0
- endoreg_db/templates/admin/patient_finding_intervention.html +253 -0
- endoreg_db/templates/admin/start_examination.html +12 -0
- endoreg_db/templates/timeline.html +176 -0
- endoreg_db/utils/__init__.py +36 -0
- endoreg_db/utils/cropping.py +29 -0
- endoreg_db/utils/dataloader.py +118 -0
- endoreg_db/utils/dates.py +39 -0
- endoreg_db/utils/file_operations.py +30 -0
- endoreg_db/utils/hashs.py +152 -0
- endoreg_db/utils/legacy_ocr.py +201 -0
- endoreg_db/utils/names.py +74 -0
- endoreg_db/utils/ocr.py +190 -0
- endoreg_db/utils/parse_and_generate_yaml.py +46 -0
- endoreg_db/utils/pydantic_models/__init__.py +6 -0
- endoreg_db/utils/pydantic_models/db_config.py +57 -0
- endoreg_db/utils/uuid.py +4 -0
- endoreg_db/utils/validate_endo_roi.py +19 -0
- endoreg_db/utils/validate_subcategory_dict.py +91 -0
- endoreg_db/utils/video/__init__.py +13 -0
- endoreg_db/utils/video/extract_frames.py +121 -0
- endoreg_db/utils/video/transcode_videofile.py +111 -0
- endoreg_db/views/__init__.py +2 -0
- endoreg_db/views/csrf.py +7 -0
- endoreg_db/views/patient_views.py +90 -0
- endoreg_db/views/raw_video_meta_validation_views.py +38 -0
- endoreg_db/views/report_views.py +96 -0
- endoreg_db/views/video_segmentation_views.py +149 -0
- endoreg_db/views/views_for_timeline.py +46 -0
- {endoreg_db-0.6.0.dist-info → endoreg_db-0.6.2.dist-info}/METADATA +14 -4
- endoreg_db-0.6.2.dist-info/RECORD +420 -0
- {endoreg_db-0.6.0.dist-info → endoreg_db-0.6.2.dist-info}/WHEEL +1 -2
- endoreg_db-0.6.0.dist-info/RECORD +0 -11
- endoreg_db-0.6.0.dist-info/top_level.txt +0 -1
- {endoreg_db-0.6.0.dist-info → endoreg_db-0.6.2.dist-info/licenses}/LICENSE +0 -0
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
from torch.utils.data import Dataset
|
|
2
|
+
import torch
|
|
3
|
+
import numpy as np
|
|
4
|
+
from PIL import Image
|
|
5
|
+
from torchvision import transforms
|
|
6
|
+
from .preprocess import Cropper
|
|
7
|
+
|
|
8
|
+
class InferenceDataset(Dataset):
|
|
9
|
+
def __init__(self, paths, crops, config):
|
|
10
|
+
self.paths = paths
|
|
11
|
+
self.crops = crops
|
|
12
|
+
self.cropper = Cropper() # Assuming Cropper can work with NumPy arrays
|
|
13
|
+
self.config = config
|
|
14
|
+
|
|
15
|
+
# Initialize the image transformations using torchvision
|
|
16
|
+
self.transforms = transforms.Compose([
|
|
17
|
+
# Convert PIL image to PyTorch tensor
|
|
18
|
+
transforms.ToTensor(),
|
|
19
|
+
# Normalize the image using the provided mean and std
|
|
20
|
+
transforms.Normalize(mean=self.config["mean"], std=self.config["std"])
|
|
21
|
+
])
|
|
22
|
+
|
|
23
|
+
def __len__(self):
|
|
24
|
+
# Returns the total number of samples
|
|
25
|
+
return len(self.paths)
|
|
26
|
+
|
|
27
|
+
def __getitem__(self, idx):
|
|
28
|
+
# Open the image with Pillow
|
|
29
|
+
with Image.open(self.paths[idx]) as pil_image:
|
|
30
|
+
# Convert the image to RGB to ensure 3 channels
|
|
31
|
+
pil_image = pil_image.convert('RGB')
|
|
32
|
+
|
|
33
|
+
# Get the corresponding crop for the current image
|
|
34
|
+
crop = self.crops[idx]
|
|
35
|
+
|
|
36
|
+
# Crop the image based on the provided crop parameters and convert to numpy for cropping
|
|
37
|
+
cropped = self.cropper(
|
|
38
|
+
np.array(pil_image), # Convert PIL image to numpy array for cropping
|
|
39
|
+
crop,
|
|
40
|
+
scale=[
|
|
41
|
+
self.config["size_x"],
|
|
42
|
+
self.config["size_y"]
|
|
43
|
+
]
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
# Convert cropped numpy array back to PIL image for torchvision transforms
|
|
47
|
+
cropped_pil = Image.fromarray(cropped.astype('uint8'), 'RGB')
|
|
48
|
+
|
|
49
|
+
# Apply the transformations
|
|
50
|
+
img = self.transforms(cropped_pil)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
return img
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from torchvision import models
|
|
3
|
+
import torch.nn as nn
|
|
4
|
+
from pytorch_lightning import LightningModule
|
|
5
|
+
import numpy as np
|
|
6
|
+
from sklearn.metrics import precision_score, recall_score, f1_score
|
|
7
|
+
|
|
8
|
+
METRICS_ON_STEP = False
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def calculate_metrics(pred, target, threshold=0.5):
|
|
12
|
+
pred = np.array(pred > threshold, dtype=float)
|
|
13
|
+
return {
|
|
14
|
+
"micro/precision": precision_score(
|
|
15
|
+
y_true=target, y_pred=pred, average="micro", zero_division=0
|
|
16
|
+
),
|
|
17
|
+
"micro/recall": recall_score(
|
|
18
|
+
y_true=target, y_pred=pred, average="micro", zero_division=0
|
|
19
|
+
),
|
|
20
|
+
"micro/f1": f1_score(
|
|
21
|
+
y_true=target, y_pred=pred, average="micro", zero_division=0
|
|
22
|
+
),
|
|
23
|
+
"macro/precision": precision_score(
|
|
24
|
+
y_true=target, y_pred=pred, average="macro", zero_division=0
|
|
25
|
+
),
|
|
26
|
+
"macro/recall": recall_score(
|
|
27
|
+
y_true=target, y_pred=pred, average="macro", zero_division=0
|
|
28
|
+
),
|
|
29
|
+
"macro/f1": f1_score(
|
|
30
|
+
y_true=target, y_pred=pred, average="macro", zero_division=0
|
|
31
|
+
),
|
|
32
|
+
"samples/precision": precision_score(
|
|
33
|
+
y_true=target, y_pred=pred, average=None, zero_division=0
|
|
34
|
+
),
|
|
35
|
+
"samples/recall": recall_score(
|
|
36
|
+
y_true=target, y_pred=pred, average=None, zero_division=0
|
|
37
|
+
),
|
|
38
|
+
"samples/f1": f1_score(
|
|
39
|
+
y_true=target, y_pred=pred, average=None, zero_division=0
|
|
40
|
+
),
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class MultiLabelClassificationNet(LightningModule):
|
|
45
|
+
def __init__(
|
|
46
|
+
self,
|
|
47
|
+
labels=["ASD"],
|
|
48
|
+
lr=6e-3,
|
|
49
|
+
weight_decay=0.001,
|
|
50
|
+
pos_weight=2,
|
|
51
|
+
model_type="EfficientNetB4",
|
|
52
|
+
):
|
|
53
|
+
super().__init__()
|
|
54
|
+
self.save_hyperparameters()
|
|
55
|
+
self.model_type = "RegNetX800MF" # model_type
|
|
56
|
+
self.labels = labels
|
|
57
|
+
self.n_classes = len(labels)
|
|
58
|
+
self.val_preds = []
|
|
59
|
+
self.val_targets = []
|
|
60
|
+
self.pos_weight = pos_weight
|
|
61
|
+
self.weight_decay = weight_decay
|
|
62
|
+
self.lr = lr
|
|
63
|
+
self.sigm = nn.Sigmoid()
|
|
64
|
+
|
|
65
|
+
if model_type == "EfficientNetB4":
|
|
66
|
+
self.model = models.efficientnet_b4(pretrained=True)
|
|
67
|
+
num_ftrs = self.model.classifier[1].in_features
|
|
68
|
+
self.model.classifier[1] = nn.Linear(num_ftrs, len(labels))
|
|
69
|
+
|
|
70
|
+
elif model_type == "RegNetX800MF":
|
|
71
|
+
self.model = models.regnet_x_800mf(pretrained=True)
|
|
72
|
+
num_ftrs = self.model.fc.in_features
|
|
73
|
+
self.model.fc = nn.Linear(num_ftrs, len(labels))
|
|
74
|
+
|
|
75
|
+
self.criterion = nn.BCEWithLogitsLoss(
|
|
76
|
+
pos_weight=torch.Tensor([self.pos_weight] * len(self.labels))
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
def forward(self, x):
|
|
80
|
+
"""Forward pass"""
|
|
81
|
+
x = self.model(x)
|
|
82
|
+
return x
|
|
83
|
+
|
|
84
|
+
def training_step(self, batch, batch_idx):
|
|
85
|
+
"""Training step"""
|
|
86
|
+
x, y = batch
|
|
87
|
+
y_pred = self(x)
|
|
88
|
+
loss = self.criterion(y_pred, y)
|
|
89
|
+
self.log(
|
|
90
|
+
"train/loss", loss, on_step=METRICS_ON_STEP, on_epoch=True, prog_bar=True
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
preds = np.array(self.sigm(y_pred).cpu() > 0.5, dtype=float)
|
|
94
|
+
|
|
95
|
+
return {"loss": loss, "preds": preds, "targets": y}
|
|
96
|
+
|
|
97
|
+
def validation_step(self, batch, batch_idx):
|
|
98
|
+
"""Validation step"""
|
|
99
|
+
x, y = batch
|
|
100
|
+
y_pred = self(x)
|
|
101
|
+
loss = self.criterion(y_pred, y)
|
|
102
|
+
self.log("val/loss", loss, on_epoch=True, prog_bar=True)
|
|
103
|
+
|
|
104
|
+
preds = np.array(self.sigm(y_pred).cpu() > 0.5, dtype=float)
|
|
105
|
+
self.val_preds.append(preds)
|
|
106
|
+
self.val_targets.append(y.cpu().numpy())
|
|
107
|
+
|
|
108
|
+
return {"loss": loss, "preds": preds, "targets": y}
|
|
109
|
+
|
|
110
|
+
def validation_epoch_end(self, outputs):
|
|
111
|
+
"""Validation epoch end"""
|
|
112
|
+
self.val_preds = np.concatenate([_ for _ in self.val_preds])
|
|
113
|
+
self.val_targets = np.concatenate([_ for _ in self.val_targets])
|
|
114
|
+
|
|
115
|
+
metrics = calculate_metrics(self.val_preds, self.val_targets, threshold=0.5)
|
|
116
|
+
for key, value in metrics.items():
|
|
117
|
+
value = value.tolist()
|
|
118
|
+
if isinstance(value, list):
|
|
119
|
+
for i, _value in enumerate(value):
|
|
120
|
+
name = "val/" + f"{key}/{self.labels[i]}"
|
|
121
|
+
self.log(
|
|
122
|
+
name,
|
|
123
|
+
_value,
|
|
124
|
+
on_epoch=True,
|
|
125
|
+
on_step=METRICS_ON_STEP,
|
|
126
|
+
prog_bar=False,
|
|
127
|
+
)
|
|
128
|
+
else:
|
|
129
|
+
name = "val/" + f"{key}"
|
|
130
|
+
self.log(
|
|
131
|
+
name, value, on_epoch=True, on_step=METRICS_ON_STEP, prog_bar=True
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
self.val_preds = []
|
|
135
|
+
self.val_targets = []
|
|
136
|
+
|
|
137
|
+
def configure_optimizers(self):
|
|
138
|
+
"""Choose what optimizers and learning-rate schedulers to use in your optimization.
|
|
139
|
+
Normally you'd need one. But in the case of GANs or similar you might have multiple.
|
|
140
|
+
|
|
141
|
+
See examples here:
|
|
142
|
+
https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers
|
|
143
|
+
"""
|
|
144
|
+
optimizer = torch.optim.SGD(
|
|
145
|
+
self.parameters(), self.lr, momentum=0.5, weight_decay=self.weight_decay
|
|
146
|
+
)
|
|
147
|
+
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
|
|
148
|
+
optimizer, T_0=20, verbose=True
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
return {
|
|
152
|
+
"optimizer": optimizer,
|
|
153
|
+
"lr_scheduler": lr_scheduler,
|
|
154
|
+
"monitor": "val/loss",
|
|
155
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
def concat_pred_dicts(pred_dicts):
|
|
4
|
+
'''Shoulkd be a list of dictionaries with the same keys'''
|
|
5
|
+
assert len(pred_dicts) > 0
|
|
6
|
+
keys = pred_dicts[0].keys()
|
|
7
|
+
|
|
8
|
+
merged_predictions = {key: [] for key in keys}
|
|
9
|
+
for p in pred_dicts:
|
|
10
|
+
for key in p.keys():
|
|
11
|
+
merged_predictions[key].append(p[key])
|
|
12
|
+
|
|
13
|
+
for key in merged_predictions.keys():
|
|
14
|
+
merged_predictions[key] = np.array(merged_predictions[key])
|
|
15
|
+
|
|
16
|
+
return merged_predictions
|
|
17
|
+
|
|
18
|
+
def make_smooth_preds(prediction_array, window_size_s=1, fps = 50):
|
|
19
|
+
window_size = int(window_size_s * fps)
|
|
20
|
+
smooth_prediction_array = np.convolve(prediction_array, np.ones(window_size)/window_size, mode='valid')
|
|
21
|
+
return smooth_prediction_array
|
|
22
|
+
|
|
23
|
+
def find_true_pred_sequences(predictions):
|
|
24
|
+
"""
|
|
25
|
+
Efficiently finds sequences of 'outside' predictions in the binary predictions array using NumPy.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
predictions (np.array): An array of boolean values, where True represents an 'outside' image
|
|
29
|
+
and False represents an 'inside' image.
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
list of tuples: A list where each tuple represents a sequence of 'outside' predictions,
|
|
33
|
+
with the first element as the start index and the second element as the stop index.
|
|
34
|
+
"""
|
|
35
|
+
# Identify where the value changes in the binary array (from False to True or True to False)
|
|
36
|
+
change_indices = np.where(np.diff(predictions.astype(int)) != 0)[0]
|
|
37
|
+
|
|
38
|
+
# Since diff reduces the length by 1, we adjust indices to align with the original array
|
|
39
|
+
change_indices += 1
|
|
40
|
+
|
|
41
|
+
# If the first element is 'outside', prepend a 0 to indicate the start
|
|
42
|
+
if predictions[0]:
|
|
43
|
+
change_indices = np.insert(change_indices, 0, 0)
|
|
44
|
+
|
|
45
|
+
# If the last element is 'outside', append the length of the array to indicate the end
|
|
46
|
+
if predictions[-1]:
|
|
47
|
+
change_indices = np.append(change_indices, predictions.size)
|
|
48
|
+
|
|
49
|
+
# Extract the 'outside' sequences by slicing the change_indices array in steps of two
|
|
50
|
+
outside_sequences = [(change_indices[i], change_indices[i + 1] - 1) for i in range(0, len(change_indices), 2)]
|
|
51
|
+
|
|
52
|
+
return outside_sequences
|
|
53
|
+
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
from .inference_dataset import InferenceDataset
|
|
2
|
+
from torch.utils.data import DataLoader
|
|
3
|
+
import torch
|
|
4
|
+
from torch import nn
|
|
5
|
+
import json
|
|
6
|
+
from .postprocess import (
|
|
7
|
+
concat_pred_dicts,
|
|
8
|
+
make_smooth_preds,
|
|
9
|
+
find_true_pred_sequences
|
|
10
|
+
)
|
|
11
|
+
import numpy as np
|
|
12
|
+
|
|
13
|
+
sample_config = {
|
|
14
|
+
# mean and std for normalization
|
|
15
|
+
"mean": (0.45211223, 0.27139644, 0.19264949),
|
|
16
|
+
"std": (0.31418097, 0.21088019, 0.16059452),
|
|
17
|
+
# Image Size
|
|
18
|
+
"size_x": 716,
|
|
19
|
+
"size_y": 716,
|
|
20
|
+
# how to wrangle axes of the image before putting them in the network
|
|
21
|
+
"axes": [2,0,1], # 2,1,0 for opencv
|
|
22
|
+
"batchsize": 16,
|
|
23
|
+
"num_workers": 0, # always 1 for Windows systems # FIXME: fix celery crash if multiprocessing
|
|
24
|
+
# maybe add sigmoid after prediction?
|
|
25
|
+
"activation": nn.Sigmoid(),
|
|
26
|
+
"labels": [
|
|
27
|
+
'appendix', 'blood', 'diverticule', 'grasper', 'ileocaecalvalve', 'ileum', 'low_quality', 'nbi', 'needle', 'outside', 'polyp', 'snare', 'water_jet', 'wound'
|
|
28
|
+
]
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
class Classifier():
|
|
32
|
+
def __init__(self, model=None, config=sample_config, verbose = False):
|
|
33
|
+
self.config = config
|
|
34
|
+
self.model = model
|
|
35
|
+
self.verbose = verbose
|
|
36
|
+
|
|
37
|
+
def pipe(self, paths, crops, verbose = None):
|
|
38
|
+
if verbose is None:
|
|
39
|
+
verbose = self.verbose
|
|
40
|
+
|
|
41
|
+
dataset = InferenceDataset(paths, crops, self.config)
|
|
42
|
+
if verbose:
|
|
43
|
+
print("Dataset created")
|
|
44
|
+
|
|
45
|
+
dl = DataLoader(
|
|
46
|
+
dataset=dataset,
|
|
47
|
+
batch_size=self.config["batchsize"],
|
|
48
|
+
num_workers=self.config["num_workers"],
|
|
49
|
+
shuffle = False,
|
|
50
|
+
pin_memory=True
|
|
51
|
+
)
|
|
52
|
+
if verbose:
|
|
53
|
+
print("Dataloader created")
|
|
54
|
+
|
|
55
|
+
predictions = []
|
|
56
|
+
|
|
57
|
+
with torch.inference_mode():
|
|
58
|
+
if self.verbose:
|
|
59
|
+
print("Starting inference")
|
|
60
|
+
for i,batch in enumerate(dl):
|
|
61
|
+
prediction = self.model(batch.cuda())
|
|
62
|
+
prediction = self.config["activation"](prediction).cpu().tolist()#.numpy().tolist()
|
|
63
|
+
predictions += prediction
|
|
64
|
+
if self.verbose and i==0:
|
|
65
|
+
print("First batch done")
|
|
66
|
+
|
|
67
|
+
return predictions
|
|
68
|
+
|
|
69
|
+
def __call__(self, image, crop=None):
|
|
70
|
+
return self.pipe([image], [crop])
|
|
71
|
+
|
|
72
|
+
def readable(self, predictions):
|
|
73
|
+
return {label: prediction for label, prediction in zip(self.config["labels"], predictions)}
|
|
74
|
+
|
|
75
|
+
def get_prediction_dict(self, predictions, paths):
|
|
76
|
+
json_dict = {
|
|
77
|
+
"labels": self.config["labels"],
|
|
78
|
+
"paths": paths,
|
|
79
|
+
"predictions": predictions
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
return json_dict
|
|
83
|
+
|
|
84
|
+
def get_prediction_json(self, predictions, paths, json_target_path: str = None):
|
|
85
|
+
if not json_target_path:
|
|
86
|
+
json_target_path = "predictions.json"
|
|
87
|
+
|
|
88
|
+
json_dict = self.get_prediction_dict(predictions, paths)
|
|
89
|
+
|
|
90
|
+
with open(json_target_path, 'w') as f:
|
|
91
|
+
json.dump(json_dict, f)
|
|
92
|
+
|
|
93
|
+
if self.verbose:
|
|
94
|
+
print(f"Saved predictions to {json_target_path}")
|
|
95
|
+
|
|
96
|
+
def post_process_predictions(self, pred_dicts, window_size_s=1, fps=50, min_seq_len_s = 0.5):
|
|
97
|
+
'''
|
|
98
|
+
pred_dicts: list of dictionaries with the same keys
|
|
99
|
+
window_size_s: size of the window in seconds for smoothing
|
|
100
|
+
fps: frames per second
|
|
101
|
+
min_seq_len_s: minimum length of a sequence in seconds
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
predictions: concatenated predictions
|
|
105
|
+
smooth_predictions: smoothed predictions
|
|
106
|
+
binary_predictions: binary predictions
|
|
107
|
+
raw_sequences: raw sequences
|
|
108
|
+
filtered_sequences: filtered sequences
|
|
109
|
+
'''
|
|
110
|
+
# Concatenate the predictions
|
|
111
|
+
predictions = concat_pred_dicts(pred_dicts)
|
|
112
|
+
|
|
113
|
+
smooth_predictions = {key:[] for key in predictions.keys()}
|
|
114
|
+
for key in predictions.keys():
|
|
115
|
+
smooth_predictions[key] = make_smooth_preds(
|
|
116
|
+
predictions[key],
|
|
117
|
+
window_size_s=window_size_s,
|
|
118
|
+
fps=fps
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
binary_predictions = {}
|
|
122
|
+
for key in smooth_predictions.keys():
|
|
123
|
+
binary_predictions[key] = np.array([p > 0.5 for p in smooth_predictions[key]])
|
|
124
|
+
|
|
125
|
+
raw_sequences = {}
|
|
126
|
+
for key in binary_predictions.keys():
|
|
127
|
+
raw_sequences[key] = find_true_pred_sequences(binary_predictions[key])
|
|
128
|
+
|
|
129
|
+
filtered_sequences = {}
|
|
130
|
+
min_seq_len = int(min_seq_len_s * fps)
|
|
131
|
+
for key in raw_sequences.keys():
|
|
132
|
+
filtered_sequences[key] = [s for s in raw_sequences[key] if s[1] - s[0] > min_seq_len]
|
|
133
|
+
|
|
134
|
+
return predictions, smooth_predictions, binary_predictions, raw_sequences, filtered_sequences
|
|
135
|
+
|
|
136
|
+
def post_process_predictions_serializable(
|
|
137
|
+
self, pred_dicts,
|
|
138
|
+
window_size_s = 1, fps = 50,
|
|
139
|
+
min_seq_len_s = 0.5
|
|
140
|
+
):
|
|
141
|
+
result = self.post_process_predictions(
|
|
142
|
+
pred_dicts,
|
|
143
|
+
window_size_s,
|
|
144
|
+
fps, min_seq_len_s
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
for i, _dict in enumerate(result):
|
|
148
|
+
_keys = list(_dict.keys())
|
|
149
|
+
for key in _keys:
|
|
150
|
+
# if numpy array
|
|
151
|
+
if hasattr(_dict[key], "tolist"):
|
|
152
|
+
result[i][key] = _dict[key].tolist()
|
|
153
|
+
|
|
154
|
+
# check if list of tuples
|
|
155
|
+
# if so, make sure each tuple has 2 elements and split to two lists (start, stop)
|
|
156
|
+
if all(isinstance(x, tuple) for x in _dict[key]):
|
|
157
|
+
if all(len(x) == 2 for x in _dict[key]):
|
|
158
|
+
result[i][f"{key}_start"] = [int(x[0]) for x in _dict[key]]
|
|
159
|
+
result[i][f"{key}_stop"] = [int(x[1]) for x in _dict[key]]
|
|
160
|
+
del result[i][key]
|
|
161
|
+
|
|
162
|
+
# make dict of dicts
|
|
163
|
+
result_dict = {
|
|
164
|
+
"predictions": result[0],
|
|
165
|
+
"smooth_predictions": result[1],
|
|
166
|
+
"binary_predictions": result[2],
|
|
167
|
+
"raw_sequences": result[3],
|
|
168
|
+
"filtered_sequences": result[4]
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
return result_dict
|
|
172
|
+
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import streamlit as st
|
|
2
|
+
import matplotlib.pyplot as plt
|
|
3
|
+
import json
|
|
4
|
+
from PIL import Image
|
|
5
|
+
import argparse
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# Load the JSON file
|
|
9
|
+
@st.cache_data
|
|
10
|
+
def load_data(file_path):
|
|
11
|
+
with open(file_path, 'r') as f:
|
|
12
|
+
data = json.load(f)
|
|
13
|
+
return data
|
|
14
|
+
|
|
15
|
+
# Streamlit App
|
|
16
|
+
def main(file_path):
|
|
17
|
+
st.title("Visualize Predictions")
|
|
18
|
+
|
|
19
|
+
# Load the data
|
|
20
|
+
data = load_data(file_path)
|
|
21
|
+
|
|
22
|
+
# Dropdown to select label
|
|
23
|
+
selected_label = st.selectbox('Select Label', options=data['labels'])
|
|
24
|
+
|
|
25
|
+
# Get the index of the selected label
|
|
26
|
+
label_index = data['labels'].index(selected_label)
|
|
27
|
+
|
|
28
|
+
# Extract predictions for the selected label
|
|
29
|
+
selected_predictions = [pred[label_index] for pred in data['predictions']]
|
|
30
|
+
|
|
31
|
+
# Line plot for the selected label's prediction values
|
|
32
|
+
st.subheader(f"Line plot for {selected_label}")
|
|
33
|
+
|
|
34
|
+
fig, ax = plt.subplots(figsize=(10, 6)) # Explicitly create a figure and axis
|
|
35
|
+
ax.plot(selected_predictions)
|
|
36
|
+
ax.set_xlabel('Frame')
|
|
37
|
+
ax.set_ylabel('Prediction Value')
|
|
38
|
+
ax.set_title(f'Predictions for {selected_label}')
|
|
39
|
+
|
|
40
|
+
st.pyplot(fig) # Pass the figure object explicitly
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
# Slider to select frame
|
|
44
|
+
frame_idx = st.slider('Select a frame', 0, len(data['paths']) - 1)
|
|
45
|
+
|
|
46
|
+
# Display the frame
|
|
47
|
+
image_path = data['paths'][frame_idx]
|
|
48
|
+
image = Image.open(image_path)
|
|
49
|
+
st.image(image, caption=f"Frame {frame_idx}", use_column_width=True)
|
|
50
|
+
|
|
51
|
+
if __name__ == '__main__':
|
|
52
|
+
parser = argparse.ArgumentParser(description="Visualize Predictions")
|
|
53
|
+
parser.add_argument("--file", type=str, help="Path to JSON file containing predictions", required=True)
|
|
54
|
+
args = parser.parse_args()
|
|
55
|
+
main(args.file)
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from PIL import Image, ImageOps # Import the required modules from Pillow
|
|
3
|
+
|
|
4
|
+
def crop_img(img, crop):
|
|
5
|
+
"""
|
|
6
|
+
Crops the image based on the specified dimensions and adds padding to maintain aspect ratio.
|
|
7
|
+
|
|
8
|
+
Parameters:
|
|
9
|
+
img: PIL Image object.
|
|
10
|
+
crop: Tuple of (ymin, ymax, xmin, xmax) specifying the crop area.
|
|
11
|
+
|
|
12
|
+
Returns:
|
|
13
|
+
PIL Image object that has been cropped and padded as necessary.
|
|
14
|
+
"""
|
|
15
|
+
# Convert crop dimensions to Pillow format: left, upper, right, lower
|
|
16
|
+
ymin, ymax, xmin, xmax = crop
|
|
17
|
+
img_cropped = img.crop((xmin, ymin, xmax, ymax))
|
|
18
|
+
|
|
19
|
+
# Calculate the new size and the required padding
|
|
20
|
+
width, height = img_cropped.size
|
|
21
|
+
delta = width - height
|
|
22
|
+
|
|
23
|
+
if delta > 0:
|
|
24
|
+
padding = (0, abs(delta) // 2, 0, abs(delta) - abs(delta) // 2) # (left, top, right, bottom)
|
|
25
|
+
elif delta < 0:
|
|
26
|
+
padding = (abs(delta) // 2, 0, abs(delta) - abs(delta) // 2, 0)
|
|
27
|
+
else:
|
|
28
|
+
padding = (0, 0, 0, 0)
|
|
29
|
+
|
|
30
|
+
# Pad the image to make it square
|
|
31
|
+
img_padded = ImageOps.expand(img_cropped, padding)
|
|
32
|
+
|
|
33
|
+
return img_padded
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class Cropper:
|
|
37
|
+
def __init__(self):
|
|
38
|
+
pass
|
|
39
|
+
|
|
40
|
+
def __call__(self, img, crop=None, scale=None, scale_method=Image.Resampling.LANCZOS):
|
|
41
|
+
"""
|
|
42
|
+
Applies cropping and scaling transformations to the input image.
|
|
43
|
+
|
|
44
|
+
Parameters:
|
|
45
|
+
img: PIL Image object or numpy array of the image.
|
|
46
|
+
crop: Optional tuple specifying the cropping area (y_min, y_max, x_min, x_max).
|
|
47
|
+
scale: Optional tuple specifying the new size (width, height).
|
|
48
|
+
scale_method: Resampling method used for scaling (default is Image.Resampling.LANCZOS).
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
Numpy array of the processed image.
|
|
52
|
+
"""
|
|
53
|
+
# Convert numpy array to PIL Image if necessary
|
|
54
|
+
if isinstance(img, np.ndarray):
|
|
55
|
+
img = Image.fromarray(img.astype('uint8'), 'RGB')
|
|
56
|
+
|
|
57
|
+
if crop is not None:
|
|
58
|
+
img = crop_img(img, crop)
|
|
59
|
+
else:
|
|
60
|
+
raise Exception("Automatic crop detection not implemented yet")
|
|
61
|
+
|
|
62
|
+
if scale is not None:
|
|
63
|
+
img = img.resize(scale, resample=scale_method)
|
|
64
|
+
|
|
65
|
+
# Convert PIL Image back to numpy array
|
|
66
|
+
img = np.array(img)
|
|
67
|
+
|
|
68
|
+
return img
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
# agl_visualization/run_visualizer.py
|
|
2
|
+
|
|
3
|
+
import subprocess
|
|
4
|
+
import argparse
|
|
5
|
+
|
|
6
|
+
def main():
|
|
7
|
+
parser = argparse.ArgumentParser(description="Visualize Predictions")
|
|
8
|
+
parser.add_argument("--file", type=str, help="Path to JSON file containing predictions", required=True)
|
|
9
|
+
args = parser.parse_args()
|
|
10
|
+
|
|
11
|
+
# Replace this with the path where prediction_visualizer.py resides
|
|
12
|
+
streamlit_script_path = "agl_predict_endo_frame/prediction_visualizer.py"
|
|
13
|
+
|
|
14
|
+
# Using subprocess to run the Streamlit command
|
|
15
|
+
subprocess.run(f'streamlit run {streamlit_script_path} -- --file {args.file}', shell=True)
|
|
16
|
+
|
|
17
|
+
# if __name__ == "__main__":
|
|
18
|
+
# parser = argparse.ArgumentParser(description="Visualize Predictions")
|
|
19
|
+
# parser.add_argument("--file", type=str, help="Path to JSON file containing predictions", required=True)
|
|
20
|
+
# args = parser.parse_args()
|
|
21
|
+
# main(args.file)
|