endoreg-db 0.1.0__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (121) hide show
  1. endoreg_db/data/__init__.py +14 -0
  2. endoreg_db/data/active_model/data.yaml +3 -0
  3. endoreg_db/data/center/data.yaml +7 -0
  4. endoreg_db/data/endoscope_type/data.yaml +11 -0
  5. endoreg_db/data/endoscopy_processor/data.yaml +45 -0
  6. endoreg_db/data/examination/examinations/data.yaml +17 -0
  7. endoreg_db/data/examination/time/data.yaml +48 -0
  8. endoreg_db/data/examination/time-type/data.yaml +8 -0
  9. endoreg_db/data/examination/type/data.yaml +5 -0
  10. endoreg_db/data/information_source/data.yaml +30 -0
  11. endoreg_db/data/label/label/data.yaml +62 -0
  12. endoreg_db/data/label/label-set/data.yaml +18 -0
  13. endoreg_db/data/label/label-type/data.yaml +7 -0
  14. endoreg_db/data/model_type/data.yaml +7 -0
  15. endoreg_db/data/profession/data.yaml +70 -0
  16. endoreg_db/data/unit/data.yaml +17 -0
  17. endoreg_db/data/unit/length.yaml +31 -0
  18. endoreg_db/data/unit/volume.yaml +26 -0
  19. endoreg_db/data/unit/weight.yaml +31 -0
  20. endoreg_db/forms/__init__.py +2 -0
  21. endoreg_db/forms/settings/__init__.py +8 -0
  22. endoreg_db/forms/unit.py +6 -0
  23. endoreg_db/management/commands/_load_model_template.py +41 -0
  24. endoreg_db/management/commands/delete_legacy_images.py +19 -0
  25. endoreg_db/management/commands/delete_legacy_videos.py +17 -0
  26. endoreg_db/management/commands/extract_legacy_video_frames.py +18 -0
  27. endoreg_db/management/commands/fetch_legacy_image_dataset.py +32 -0
  28. endoreg_db/management/commands/import_legacy_images.py +94 -0
  29. endoreg_db/management/commands/import_legacy_videos.py +76 -0
  30. endoreg_db/management/commands/load_active_model_data.py +45 -0
  31. endoreg_db/management/commands/load_ai_model_data.py +45 -0
  32. endoreg_db/management/commands/load_base_db_data.py +62 -0
  33. endoreg_db/management/commands/load_center_data.py +43 -0
  34. endoreg_db/management/commands/load_endoscope_type_data.py +45 -0
  35. endoreg_db/management/commands/load_endoscopy_processor_data.py +45 -0
  36. endoreg_db/management/commands/load_examination_data.py +75 -0
  37. endoreg_db/management/commands/load_information_source.py +45 -0
  38. endoreg_db/management/commands/load_label_data.py +67 -0
  39. endoreg_db/management/commands/load_profession_data.py +44 -0
  40. endoreg_db/management/commands/load_unit_data.py +46 -0
  41. endoreg_db/management/commands/load_user_groups.py +67 -0
  42. endoreg_db/management/commands/register_ai_model.py +65 -0
  43. endoreg_db/migrations/0001_initial.py +582 -0
  44. endoreg_db/models/__init__.py +53 -0
  45. endoreg_db/models/ai_model/__init__.py +3 -0
  46. endoreg_db/models/ai_model/active_model.py +9 -0
  47. endoreg_db/models/ai_model/model_meta.py +24 -0
  48. endoreg_db/models/ai_model/model_type.py +26 -0
  49. endoreg_db/models/ai_model/utils.py +8 -0
  50. endoreg_db/models/annotation/__init__.py +2 -0
  51. endoreg_db/models/annotation/binary_classification_annotation_task.py +80 -0
  52. endoreg_db/models/annotation/image_classification.py +27 -0
  53. endoreg_db/models/center.py +19 -0
  54. endoreg_db/models/data_file/__init__.py +4 -0
  55. endoreg_db/models/data_file/base_classes/__init__.py +3 -0
  56. endoreg_db/models/data_file/base_classes/abstract_frame.py +51 -0
  57. endoreg_db/models/data_file/base_classes/abstract_video.py +200 -0
  58. endoreg_db/models/data_file/frame.py +45 -0
  59. endoreg_db/models/data_file/report_file.py +88 -0
  60. endoreg_db/models/data_file/video/__init__.py +7 -0
  61. endoreg_db/models/data_file/video/import_meta.py +25 -0
  62. endoreg_db/models/data_file/video/video.py +25 -0
  63. endoreg_db/models/data_file/video_segment.py +107 -0
  64. endoreg_db/models/examination/__init__.py +4 -0
  65. endoreg_db/models/examination/examination.py +26 -0
  66. endoreg_db/models/examination/examination_time.py +27 -0
  67. endoreg_db/models/examination/examination_time_type.py +24 -0
  68. endoreg_db/models/examination/examination_type.py +18 -0
  69. endoreg_db/models/hardware/__init__.py +2 -0
  70. endoreg_db/models/hardware/endoscope.py +44 -0
  71. endoreg_db/models/hardware/endoscopy_processor.py +143 -0
  72. endoreg_db/models/information_source.py +22 -0
  73. endoreg_db/models/label/__init__.py +1 -0
  74. endoreg_db/models/label/label.py +84 -0
  75. endoreg_db/models/legacy_data/__init__.py +3 -0
  76. endoreg_db/models/legacy_data/image.py +34 -0
  77. endoreg_db/models/patient_examination/__init__.py +35 -0
  78. endoreg_db/models/persons/__init__.py +4 -0
  79. endoreg_db/models/persons/examiner/__init__.py +2 -0
  80. endoreg_db/models/persons/examiner/examiner.py +16 -0
  81. endoreg_db/models/persons/examiner/examiner_type.py +2 -0
  82. endoreg_db/models/persons/patient.py +58 -0
  83. endoreg_db/models/persons/person.py +34 -0
  84. endoreg_db/models/persons/portal_user_information.py +29 -0
  85. endoreg_db/models/prediction/__init__.py +2 -0
  86. endoreg_db/models/prediction/image_classification.py +37 -0
  87. endoreg_db/models/prediction/video_prediction_meta.py +244 -0
  88. endoreg_db/models/unit.py +20 -0
  89. endoreg_db/queries/__init__.py +5 -0
  90. endoreg_db/queries/annotations/__init__.py +3 -0
  91. endoreg_db/queries/annotations/legacy.py +159 -0
  92. endoreg_db/queries/get/__init__.py +6 -0
  93. endoreg_db/queries/get/annotation.py +0 -0
  94. endoreg_db/queries/get/center.py +42 -0
  95. endoreg_db/queries/get/model.py +13 -0
  96. endoreg_db/queries/get/patient.py +14 -0
  97. endoreg_db/queries/get/patient_examination.py +20 -0
  98. endoreg_db/queries/get/prediction.py +0 -0
  99. endoreg_db/queries/get/report_file.py +33 -0
  100. endoreg_db/queries/get/video.py +31 -0
  101. endoreg_db/queries/get/video_import_meta.py +0 -0
  102. endoreg_db/queries/get/video_prediction_meta.py +0 -0
  103. endoreg_db/queries/sanity/__init_.py +0 -0
  104. endoreg_db/serializers/__init__.py +10 -0
  105. endoreg_db/serializers/ai_model.py +19 -0
  106. endoreg_db/serializers/annotation.py +17 -0
  107. endoreg_db/serializers/center.py +11 -0
  108. endoreg_db/serializers/examination.py +33 -0
  109. endoreg_db/serializers/frame.py +13 -0
  110. endoreg_db/serializers/hardware.py +21 -0
  111. endoreg_db/serializers/label.py +22 -0
  112. endoreg_db/serializers/patient.py +10 -0
  113. endoreg_db/serializers/prediction.py +15 -0
  114. endoreg_db/serializers/report_file.py +7 -0
  115. endoreg_db/serializers/video.py +27 -0
  116. endoreg_db-0.2.1.dist-info/LICENSE +674 -0
  117. endoreg_db-0.2.1.dist-info/METADATA +27 -0
  118. endoreg_db-0.2.1.dist-info/RECORD +126 -0
  119. endoreg_db-0.1.0.dist-info/METADATA +0 -19
  120. endoreg_db-0.1.0.dist-info/RECORD +0 -10
  121. {endoreg_db-0.1.0.dist-info → endoreg_db-0.2.1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,58 @@
1
+ from .person import Person
2
+ from django import forms
3
+ from django.forms import DateInput
4
+ from rest_framework import serializers
5
+ from ..patient_examination import PatientExamination
6
+ from ..data_file import ReportFile
7
+ from django.db import models
8
+
9
+ class Patient(Person):
10
+ """
11
+ A class representing a patient.
12
+
13
+ Attributes inhereted from Person:
14
+ first_name (str): The first name of the patient.
15
+ last_name (str): The last name of the patient.
16
+ dob (datetime.date): The date of birth of the patient.
17
+ gender (str): The gender of the patient.
18
+ email (str): The email address of the patient.
19
+ phone (str): The phone number of the patient.
20
+
21
+ """
22
+ center = models.ForeignKey("Center", on_delete=models.CASCADE, blank=True, null=True)
23
+
24
+ def __str__(self):
25
+ return self.first_name + " " + self.last_name + " (" + str(self.dob) + ")"
26
+
27
+ def get_unmatched_report_files(self): #field: self.report_files; filter: report_file.patient_examination = None
28
+ '''Returns all report files for this patient that are not matched to a patient examination.'''
29
+
30
+ return self.reportfile_set.filter(patient_examination=None)
31
+
32
+ def get_unmatched_video_files(self): #field: self.videos; filter: video.patient_examination = None
33
+ '''Returns all video files for this patient that are not matched to a patient examination.'''
34
+ return self.videos.filter(patient_examination=None)
35
+
36
+ def get_patient_examinations(self): #field: self.patient_examinations
37
+ '''Returns all patient examinations for this patient ordered by date (most recent is first).'''
38
+ return self.patient_examinations.order_by('-date')
39
+
40
+ def create_examination_by_report_file(self, report_file:ReportFile):
41
+ '''Creates a patient examination for this patient based on the given report file.'''
42
+ patient_examination = PatientExamination(patient=self, report_file=report_file)
43
+ patient_examination.save()
44
+ return patient_examination
45
+
46
+
47
+ class PatientForm(forms.ModelForm):
48
+ class Meta:
49
+ model = Patient
50
+ fields = '__all__'
51
+ widgets = {
52
+ 'dob': DateInput(attrs={'type': 'date'}),
53
+ }
54
+
55
+ def __init__(self, *args, **kwargs):
56
+ super().__init__(*args, **kwargs)
57
+ for field in self.fields.values():
58
+ field.widget.attrs['class'] = 'form-control'
@@ -0,0 +1,34 @@
1
+ from abc import abstractmethod
2
+ from django.db import models
3
+
4
+ class Person(models.Model):
5
+ """
6
+ Abstract base class for a person.
7
+
8
+ Attributes:
9
+ first_name (str): The first name of the person.
10
+ last_name (str): The last name of the person.
11
+ dob (date): The date of birth of the person.
12
+ gender (str): The gender of the person.
13
+ email (str): The email address of the person.
14
+ phone (str): The phone number of the person.
15
+ """
16
+
17
+ first_name = models.CharField(max_length=255)
18
+ last_name = models.CharField(max_length=255)
19
+ dob = models.DateField("Date of Birth", blank=True, null=True)
20
+ gender = models.CharField(
21
+ max_length=10, choices=[('Male', 'Male'), ('Female', 'Female'), ('Other', 'Other')],
22
+ blank=True, null=True
23
+ )
24
+ email = models.EmailField(max_length=255, blank=True, null=True)
25
+ phone = models.CharField(max_length=255, blank=True, null=True)
26
+ is_real_person = models.BooleanField(default=True)
27
+
28
+ @abstractmethod
29
+ def __str__(self):
30
+ pass
31
+
32
+ class Meta:
33
+ abstract = True
34
+
@@ -0,0 +1,29 @@
1
+ from django.db import models
2
+
3
+ # models.py in your main app
4
+
5
+ from django.db import models
6
+ from django.contrib.auth.models import User
7
+
8
+ class ProfessionManager(models.Manager):
9
+ def get_by_natural_key(self, name):
10
+ return self.get(name=name)
11
+
12
+ class Profession(models.Model):
13
+ objects = ProfessionManager()
14
+ name = models.CharField(max_length=100)
15
+ name_de = models.CharField(max_length=100, blank=True, null=True)
16
+ name_en = models.CharField(max_length=100, blank=True, null=True)
17
+ description = models.TextField(blank=True, null=True)
18
+
19
+ def __str__(self):
20
+ return self.name_de
21
+
22
+ class PortalUserInfo(models.Model):
23
+ user = models.OneToOneField(User, on_delete=models.CASCADE)
24
+ profession = models.ForeignKey('Profession', on_delete=models.CASCADE, blank=True, null=True)
25
+ works_in_endoscopy = models.BooleanField(blank=True, null=True)
26
+ # Add other fields as needed
27
+
28
+ def __str__(self):
29
+ return self.user.username
@@ -0,0 +1,2 @@
1
+ from .image_classification import ImageClassificationPrediction
2
+ from .video_prediction_meta import LegacyVideoPredictionMeta, VideoPredictionMeta
@@ -0,0 +1,37 @@
1
+ from django.db import models
2
+
3
+ class ImageClassificationPrediction(models.Model):
4
+ """
5
+ A class representing an image classification prediction.
6
+
7
+ Attributes:
8
+ label (Label): The label of the prediction.
9
+ frame (Frame): The frame of the prediction.
10
+ confidence (float): The confidence of the prediction.
11
+ date_created (datetime): The date the prediction was created.
12
+
13
+ """
14
+ label = models.ForeignKey("Label", on_delete=models.CASCADE, related_name="image_classification_predictions")
15
+ frame = models.ForeignKey("Frame", on_delete=models.CASCADE, blank=True, null=True, related_name="image_classification_predictions")
16
+ legacy_frame = models.ForeignKey("LegacyFrame", on_delete=models.CASCADE, blank=True, null=True, related_name="image_classification_predictions")
17
+ legacy_image = models.ForeignKey("LegacyImage", on_delete=models.CASCADE, blank=True, null=True, related_name="image_classification_predictions")
18
+ value = models.BooleanField()
19
+ confidence = models.FloatField()
20
+ model_meta = models.ForeignKey("ModelMeta", on_delete=models.CASCADE, related_name="image_classification_predictions")
21
+ date_created = models.DateTimeField(auto_now_add=True)
22
+
23
+ class Meta:
24
+ unique_together = ('label', 'frame', 'model_meta')
25
+
26
+ def get_image_object(self):
27
+ """
28
+ Get the image of the prediction.
29
+ """
30
+ if self.frame:
31
+ return self.frame
32
+ elif self.legacy_frame:
33
+ return self.legacy_frame
34
+ elif self.legacy_image:
35
+ return self.legacy_image
36
+ else:
37
+ return None
@@ -0,0 +1,244 @@
1
+ from django.db import models
2
+
3
+ from endoreg_db.models.label.label import LabelSet
4
+ from ..data_file.video import LegacyVideo, Video
5
+ from ..data_file.frame import LegacyFrame, Frame
6
+ from .image_classification import ImageClassificationPrediction
7
+ from ..data_file.video_segment import LegacyLabelVideoSegment, LabelVideoSegment, find_segments_in_prediction_array
8
+ from ..information_source import get_prediction_information_source
9
+ import numpy as np
10
+ import pickle
11
+
12
+ DEFAULT_WINDOW_SIZE_IN_SECONDS_FOR_RUNNING_MEAN = 1.5
13
+ DEFAULT_VIDEO_SEGMENT_LENGTH_THRESHOLD_IN_S = 1.0
14
+
15
+ class AbstractVideoPredictionMeta(models.Model):
16
+ model_meta = models.ForeignKey("ModelMeta", on_delete=models.CASCADE)
17
+ date_created = models.DateTimeField(auto_now_add=True)
18
+ date_modified = models.DateTimeField(auto_now=True)
19
+ video = None # Placeholder for the video field, to be defined in derived classes
20
+ prediction_array = models.BinaryField(blank=True, null=True)
21
+
22
+ class Meta:
23
+ abstract = True
24
+ unique_together = ('model_meta', 'video')
25
+
26
+ def __str__(self):
27
+ return f"Video {self.video.id} - {self.model_meta.name}"
28
+
29
+ def get_labelset(self):
30
+ """
31
+ Get the labelset of the predictions.
32
+ """
33
+ return self.model_meta.labelset
34
+
35
+ def get_video_model(self):
36
+ assert 1 == 2, "This method should be overridden in derived classes"
37
+
38
+ def get_frame_model(self):
39
+ assert 1 == 2, "This method should be overridden in derived classes"
40
+
41
+ def get_label_list(self):
42
+ """
43
+ Get the label list of the predictions.
44
+ """
45
+ labelset:LabelSet = self.get_labelset()
46
+ label_list = labelset.get_labels_in_order()
47
+ return label_list
48
+
49
+ def get_video_segment_model(self):
50
+ assert 1 == 2, "This method should be overridden in derived classes"
51
+
52
+ def save_prediction_array(self, prediction_array:np.array):
53
+ """
54
+ Save the prediction array to the database.
55
+ """
56
+ self.prediction_array = pickle.dumps(prediction_array)
57
+ self.save()
58
+
59
+ def get_prediction_array(self):
60
+ """
61
+ Get the prediction array from the database.
62
+ """
63
+ if self.prediction_array is None:
64
+ return None
65
+ else:
66
+ return pickle.loads(self.prediction_array)
67
+
68
+ def calculate_prediction_array(self):
69
+ assert 1 == 2, "This method should be overridden in derived classes"
70
+
71
+ def apply_running_mean(self, confidence_array, window_size_in_seconds: int = None):
72
+ """
73
+ Apply a running mean filter to the confidence array for smoothing, assuming a padding
74
+ of 0.5 for the edges.
75
+
76
+ Args:
77
+ self: Object that has video and fps attributes, and to which this function belongs.
78
+ confidence_array: A 2D numpy array with dimensions (num_frames),
79
+ containing confidence scores for each label at each frame.
80
+ window_size_in_seconds: The window size for the running mean in seconds.
81
+
82
+ Returns:
83
+ running_mean_array: A 2D numpy array with the same dimensions as confidence_array,
84
+ containing the smoothed confidence scores.
85
+ """
86
+ video = self.video
87
+ fps = video.fps
88
+
89
+ if not window_size_in_seconds:
90
+ window_size_in_seconds = DEFAULT_WINDOW_SIZE_IN_SECONDS_FOR_RUNNING_MEAN
91
+
92
+ # Calculate window size in frames, ensuring at least one frame
93
+ window_size_in_frames = int(window_size_in_seconds * fps)
94
+ window_size_in_frames = max(window_size_in_frames, 1)
95
+
96
+ # Define the window for the running mean
97
+ window = np.ones(window_size_in_frames) / window_size_in_frames
98
+
99
+ # Create running mean array with the same shape as the confidence array
100
+ running_mean_array = np.zeros(confidence_array.shape)
101
+
102
+ # Calculate the padding size
103
+ pad_size = window_size_in_frames // 2
104
+
105
+ # Pad the array with 0.5 on both sides
106
+ padded_confidences = np.pad(confidence_array, (pad_size, pad_size), 'constant', constant_values=(0.5, 0.5))
107
+
108
+ # Apply the running mean filter on the padded array
109
+ running_mean = np.convolve(padded_confidences, window, mode='same')
110
+
111
+ # Remove the padding from the result to match the original shape
112
+ running_mean = running_mean[pad_size:-pad_size]
113
+
114
+ return running_mean
115
+
116
+
117
+ def create_video_segments_for_label(self, segments, label):
118
+ """
119
+ Creates video segments for the given label and segments.
120
+ Segments is a list of tuples (start_frame_number, end_frame_number).
121
+ Labels is a Label object.
122
+ """
123
+ video = self.video
124
+ video_segment_model = self.get_video_segment_model()
125
+ information_source = get_prediction_information_source()
126
+
127
+ for segment in segments:
128
+ start_frame_number, end_frame_number = segment
129
+
130
+ video_segment = video_segment_model(
131
+ video=video,
132
+ prediction_meta=self,
133
+ start_frame_number=start_frame_number,
134
+ end_frame_number=end_frame_number,
135
+ source=information_source,
136
+ label=label,
137
+ )
138
+ video_segment.save()
139
+
140
+ def create_video_segments(self, segment_length_threshold_in_s:float=None):
141
+ if not segment_length_threshold_in_s:
142
+ segment_length_threshold_in_s = DEFAULT_VIDEO_SEGMENT_LENGTH_THRESHOLD_IN_S
143
+
144
+ video = self.video
145
+ fps = video.fps
146
+ min_frame_length = int(segment_length_threshold_in_s * fps)
147
+
148
+ label_list = self.get_label_list()
149
+
150
+ # if prediction array doesnt exist, create it
151
+ if self.prediction_array is None:
152
+ self.calculate_prediction_array()
153
+
154
+ prediction_array = self.get_prediction_array()
155
+
156
+ for i, label in enumerate(label_list):
157
+ # get predictions for this label
158
+ predictions = prediction_array[:, i]
159
+ # find segments of predictions that are longer than the threshold
160
+ # segments is a list of tuples (start_frame_number, end_frame_number)
161
+ segments = find_segments_in_prediction_array(predictions, min_frame_length)
162
+
163
+ # create video segments
164
+ self.create_video_segments_for_label(segments, label)
165
+
166
+ import numpy as np
167
+ class VideoPredictionMeta(AbstractVideoPredictionMeta):
168
+ video = models.OneToOneField("Video", on_delete=models.CASCADE, related_name="video_prediction_meta")
169
+
170
+ def get_video_model(self):
171
+ return Video
172
+
173
+ def get_frame_model(self):
174
+ return Frame
175
+
176
+ def get_video_segment_model(self):
177
+ return LabelVideoSegment
178
+
179
+ def calculate_prediction_array(self, window_size_in_seconds:int=None):
180
+ """
181
+ Fetches all predictions for this video, labelset, and model meta.
182
+ """
183
+ video:Video = self.video
184
+
185
+ model_meta = self.model_meta
186
+ label_list = self.get_label_list()
187
+
188
+ prediction_array = np.zeros((video.get_frame_number, len(label_list)))
189
+ for i, label in enumerate(label_list):
190
+ # fetch all predictions for this label, video, and model meta ordered by ImageClassificationPrediction.frame.frame_number
191
+ predictions = ImageClassificationPrediction.objects.filter(label=label, frame__video=video, model_meta=model_meta).order_by('frame__frame_number')
192
+ confidences = np.array([prediction.confidence for prediction in predictions])
193
+ smooth_confidences = self.apply_running_mean(confidences, window_size_in_seconds)
194
+ # calculate binary predictions
195
+ binary_predictions = smooth_confidences > 0.5
196
+ # add to prediction array
197
+ prediction_array[:, i] = binary_predictions
198
+
199
+ # save prediction array
200
+ self.save_prediction_array(prediction_array)
201
+
202
+
203
+ class LegacyVideoPredictionMeta(AbstractVideoPredictionMeta):
204
+ video = models.OneToOneField("LegacyVideo", on_delete=models.CASCADE, related_name="video_prediction_meta")
205
+
206
+ def get_video_model(self):
207
+ return LegacyVideo
208
+
209
+ def get_frame_model(self):
210
+ return LegacyFrame
211
+
212
+ def get_video_segment_model(self):
213
+ return LegacyLabelVideoSegment
214
+
215
+ def calculate_prediction_array(self, window_size_in_seconds:int=None):
216
+ """
217
+ Fetches all predictions for this video, labelset, and model meta.
218
+ """
219
+ video:LegacyVideo = self.video
220
+
221
+ model_meta = self.model_meta
222
+ label_list = self.get_label_list()
223
+
224
+ prediction_array = np.zeros((video.get_frame_number, len(label_list)))
225
+ for i, label in enumerate(label_list):
226
+ # fetch all predictions for this label, video, and model meta ordered by ImageClassificationPrediction.frame.frame_number
227
+ predictions = ImageClassificationPrediction.objects.filter(label=label, legacy_frame__video=video, model_meta=model_meta).order_by('legacy_frame__frame_number')
228
+ confidences = np.array([prediction.confidence for prediction in predictions])
229
+ smooth_confidences = self.apply_running_mean(confidences, window_size_in_seconds)
230
+ # calculate binary predictions
231
+ binary_predictions = smooth_confidences > 0.5
232
+ # add to prediction array
233
+ prediction_array[:, i] = binary_predictions
234
+
235
+ # save prediction array
236
+ self.save_prediction_array(prediction_array)
237
+
238
+
239
+
240
+
241
+
242
+
243
+
244
+
@@ -0,0 +1,20 @@
1
+ from django.db import models
2
+
3
+ class UnitManager(models.Manager):
4
+ def get_by_natural_key(self, name):
5
+ return self.get(name=name)
6
+
7
+ class Unit(models.Model):
8
+ objects = UnitManager()
9
+
10
+ name = models.CharField(max_length=100) # e.g. "Centimeter"
11
+ name_de = models.CharField(max_length=100, blank=True, null=True) # e.g. "Zentimeter"
12
+ name_en = models.CharField(max_length=100, blank=True, null=True) # e.g. "Centimeter"
13
+ description = models.CharField(max_length=100, blank=True, null=True) # e.g. "centimeters", "millimeters", "inches"
14
+ abbreviation = models.CharField(max_length=10, blank=True, null=True) # e.g. "cm", "mm", "in"
15
+
16
+ def __str__(self):
17
+ return self.abbreviation
18
+
19
+ def natural_key(self):
20
+ return (self.name,)
@@ -0,0 +1,5 @@
1
+ from .annotations import (
2
+ generate_legacy_dataset_output
3
+ )
4
+
5
+ from .get import *
@@ -0,0 +1,3 @@
1
+ from .legacy import (
2
+ get_legacy_annotations_for_labelset, generate_legacy_dataset_output
3
+ )
@@ -0,0 +1,159 @@
1
+ from ...models import LabelSet, ImageClassificationAnnotation
2
+ from django.db.models import Q, F
3
+ from django.db import models
4
+ from icecream import ic
5
+ from tqdm import tqdm
6
+ from collections import defaultdict
7
+
8
+ # def get_legacy_annotations_for_labelset(labelset_name, version=None):
9
+ # """
10
+ # Retrieve annotations for a given label set for training.
11
+
12
+ # Args:
13
+ # - labelset_name (str): The name of the label set.
14
+ # - version (int, optional): The version of the label set. If not specified, the latest version is fetched.
15
+
16
+ # Returns:
17
+ # - list[dict]: A list of dictionaries. Each dictionary represents an image and its annotations.
18
+ # Format: [{"frame": <frame_object>, "annotations": [{"label": <label_name>, "value": <value>}, ...]}, ...]
19
+
20
+ # Example:
21
+ # annotations_for_training = get_annotations_for_labelset("YourLabelSetName", version=2)
22
+
23
+ # """
24
+
25
+ # # Fetch the label set based on the name and optionally the version
26
+ # if version:
27
+ # labelset = LabelSet.objects.get(name=labelset_name, version=version)
28
+ # else:
29
+ # labelset = LabelSet.objects.filter(name=labelset_name).order_by('-version').first()
30
+ # if not labelset:
31
+ # raise ValueError(f"No label set found with the name: {labelset_name}")
32
+
33
+ # # Retrieve all labels in the label set
34
+ # labels_in_set = labelset.labels.all()
35
+
36
+ # # Get the most recent annotations for each frame/label combination
37
+ # annotations = ImageClassificationAnnotation.objects.filter(label__in=labels_in_set)
38
+ # annotations = annotations.annotate(
39
+ # latest_annotation=models.Window(
40
+ # expression=models.functions.RowNumber(),
41
+ # partition_by=[F('legacy_image'), F('label')],
42
+ # order_by=F('date_modified').desc()
43
+ # )
44
+ # ).filter(latest_annotation=1)
45
+
46
+ # # Organize the annotations by image/frame
47
+ # organized_annotations = []
48
+
49
+ # for annotation in tqdm(annotations):
50
+ # # ic(annotation)
51
+ # # Check if the frame is already in the organized list
52
+ # existing_entry = next((entry for entry in organized_annotations if entry['legacy_image'] == annotation.legacy_frame), None)
53
+
54
+ # if existing_entry:
55
+ # # Add this annotation to the existing frame's annotations
56
+ # existing_entry['annotations'].append({
57
+ # "label": annotation.label.name,
58
+ # "value": annotation.value
59
+ # })
60
+ # else:
61
+ # # Create a new entry for this frame
62
+ # organized_annotations.append({
63
+ # "legacy_image": annotation.legacy_image,
64
+ # "annotations": [{
65
+ # "label": annotation.label.name,
66
+ # "value": annotation.value
67
+ # }]
68
+ # })
69
+
70
+ # return organized_annotations
71
+
72
+
73
+
74
+ def get_legacy_annotations_for_labelset(labelset_name, version=None):
75
+ """
76
+ ... [rest of your docstring]
77
+ """
78
+
79
+ # Fetch the label set based on the name and optionally the version
80
+ if version:
81
+ labelset = LabelSet.objects.get(name=labelset_name, version=version)
82
+ else:
83
+ labelset = LabelSet.objects.filter(name=labelset_name).order_by('-version').first()
84
+ if not labelset:
85
+ raise ValueError(f"No label set found with the name: {labelset_name}")
86
+
87
+ # Retrieve all labels in the label set
88
+ labels_in_set = labelset.labels.all()
89
+
90
+ # Get the most recent annotations for each frame/label combination
91
+ annotations = (ImageClassificationAnnotation.objects
92
+ .filter(label__in=labels_in_set)
93
+ .select_related('legacy_image', 'label') # Reduce number of queries
94
+ .annotate(
95
+ latest_annotation=models.Window(
96
+ expression=models.functions.RowNumber(),
97
+ partition_by=[F('legacy_image'), F('label')],
98
+ order_by=F('date_modified').desc()
99
+ )
100
+ ).filter(latest_annotation=1))
101
+
102
+ # Organize the annotations by image/frame using a defaultdict
103
+ organized_annotations_dict = defaultdict(lambda: {
104
+ "legacy_image": None,
105
+ "annotations": []
106
+ })
107
+
108
+ for annotation in tqdm(annotations):
109
+ organized_entry = organized_annotations_dict[annotation.legacy_image.id]
110
+ organized_entry["legacy_image"] = annotation.legacy_image
111
+ organized_entry["annotations"].append({
112
+ "label": annotation.label.name,
113
+ "value": annotation.value
114
+ })
115
+
116
+ # Convert organized_annotations_dict to a list
117
+ organized_annotations = list(organized_annotations_dict.values())
118
+
119
+ return organized_annotations
120
+
121
+ def generate_legacy_dataset_output(labelset_name, version=None):
122
+ """
123
+ Generate an output suitable for creating PyTorch datasets.
124
+
125
+ Args:
126
+ - labelset_name (str): The name of the label set.
127
+ - version (int, optional): The version of the label set. If not specified, the latest version is fetched.
128
+
129
+ Returns:
130
+ - list[dict]: A list of dictionaries, where each dictionary contains the file path and the labels.
131
+ Format: [{"path": <file_path>, "labels": [<label_1_value>, <label_2_value>, ...]}, ...]
132
+ - labelset[LabelSet]: The label set that was used to generate the output.
133
+ """
134
+
135
+ # First, retrieve the organized annotations using the previously defined function
136
+ organized_annotations = get_legacy_annotations_for_labelset(labelset_name, version)
137
+
138
+ # Fetch all labels from the labelset for consistent ordering
139
+ labelset = LabelSet.objects.get(name=labelset_name, version=version)
140
+ all_labels = labelset.get_labels_in_order()
141
+
142
+ dataset_output = []
143
+
144
+ for entry in organized_annotations:
145
+ # Prepare a dictionary for each frame
146
+ frame_data = {
147
+ "path": entry['legacy_image'].image.path, # Assuming 'image' field stores the file path
148
+ "labels": [-1] * len(all_labels) # Initialize with -1 for all labels
149
+ }
150
+
151
+ # Update the labels based on the annotations
152
+ for annotation in entry['annotations']:
153
+ index = next((i for i, label in enumerate(all_labels) if label.name == annotation['label']), None)
154
+ if index is not None:
155
+ frame_data['labels'][index] = int(annotation['value'])
156
+
157
+ dataset_output.append(frame_data)
158
+
159
+ return dataset_output, labelset
@@ -0,0 +1,6 @@
1
+ from .annotation import *
2
+ from .center import *
3
+ from .model import *
4
+ from .patient import *
5
+ from .patient_examination import *
6
+ from .video import *
File without changes
@@ -0,0 +1,42 @@
1
+ from endoreg_db.models import Center
2
+ from typing import Optional
3
+
4
+ def get_centers() -> Center:
5
+ """
6
+ Returns all Center objects from the database.
7
+ """
8
+ return Center.objects.all()
9
+
10
+ def get_center_by_name(name) -> Optional[Center]:
11
+ """Retrieve a Center object by its name.
12
+
13
+ Args:
14
+ name (str): The name of the center to retrieve.
15
+
16
+ Returns:
17
+ Optional[Center]: The Center object with the given name, or None if it does not exist.
18
+ """
19
+ return Center.objects.get(name=name)
20
+
21
+ def get_center_by_id(id) -> Optional[Center]:
22
+ """Retrieve a Center object by its id.
23
+
24
+ Args:
25
+ id (int): The id of the center to retrieve.
26
+
27
+ Returns:
28
+ Optional[Center]: The Center object with the given id, or None if it does not exist.
29
+ """
30
+ return Center.objects.get(id=id)
31
+
32
+ def get_center_by_natural_key(name: str) -> Optional[Center]:
33
+ """
34
+ Retrieve a Center object by its natural key.
35
+
36
+ Args:
37
+ name: The name of the center to retrieve.
38
+
39
+ Returns:
40
+ The Center object with the given name, or None if it does not exist.
41
+ """
42
+ return Center.objects.get_by_natural_key(name=name)
@@ -0,0 +1,13 @@
1
+ from endoreg_db.models import (
2
+ ModelMeta,
3
+ )
4
+
5
+ def get_latest_model_by_name(model_name):
6
+ """
7
+ Expects model name. Fetches models by name from database, sorts by version and returns latest version.
8
+ """
9
+ models = ModelMeta.objects.filter(name=model_name).order_by('-version')
10
+ if len(models) == 0:
11
+ return None
12
+ else:
13
+ return models[0]
@@ -0,0 +1,14 @@
1
+ from endoreg_db.models import Patient
2
+
3
+ def get_patients() -> Patient:
4
+ """
5
+ Returns all Patient objects from the database.
6
+ """
7
+ return Patient.objects.all()
8
+
9
+ def get_patients_without_dob() -> Patient:
10
+ """
11
+ Returns all Patient objects from the database without a date of birth.
12
+ """
13
+ return Patient.objects.filter(dob__isnull=True)
14
+