megadetector 5.0.29__py3-none-any.whl → 10.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (95) hide show
  1. megadetector/classification/efficientnet/model.py +8 -8
  2. megadetector/classification/efficientnet/utils.py +6 -5
  3. megadetector/classification/prepare_classification_script_mc.py +3 -3
  4. megadetector/data_management/annotations/annotation_constants.py +0 -1
  5. megadetector/data_management/camtrap_dp_to_coco.py +34 -1
  6. megadetector/data_management/cct_json_utils.py +2 -2
  7. megadetector/data_management/coco_to_yolo.py +22 -5
  8. megadetector/data_management/databases/add_width_and_height_to_db.py +85 -12
  9. megadetector/data_management/databases/combine_coco_camera_traps_files.py +2 -2
  10. megadetector/data_management/databases/integrity_check_json_db.py +29 -15
  11. megadetector/data_management/generate_crops_from_cct.py +50 -1
  12. megadetector/data_management/labelme_to_coco.py +4 -2
  13. megadetector/data_management/labelme_to_yolo.py +82 -2
  14. megadetector/data_management/lila/generate_lila_per_image_labels.py +276 -18
  15. megadetector/data_management/lila/get_lila_annotation_counts.py +5 -3
  16. megadetector/data_management/lila/lila_common.py +3 -0
  17. megadetector/data_management/lila/test_lila_metadata_urls.py +15 -5
  18. megadetector/data_management/mewc_to_md.py +5 -0
  19. megadetector/data_management/ocr_tools.py +4 -3
  20. megadetector/data_management/read_exif.py +20 -5
  21. megadetector/data_management/remap_coco_categories.py +66 -4
  22. megadetector/data_management/remove_exif.py +50 -1
  23. megadetector/data_management/rename_images.py +3 -3
  24. megadetector/data_management/resize_coco_dataset.py +563 -95
  25. megadetector/data_management/yolo_output_to_md_output.py +131 -2
  26. megadetector/data_management/yolo_to_coco.py +140 -5
  27. megadetector/detection/change_detection.py +4 -3
  28. megadetector/detection/pytorch_detector.py +60 -22
  29. megadetector/detection/run_detector.py +225 -25
  30. megadetector/detection/run_detector_batch.py +42 -16
  31. megadetector/detection/run_inference_with_yolov5_val.py +12 -2
  32. megadetector/detection/run_tiled_inference.py +1 -0
  33. megadetector/detection/video_utils.py +53 -24
  34. megadetector/postprocessing/add_max_conf.py +4 -0
  35. megadetector/postprocessing/categorize_detections_by_size.py +1 -1
  36. megadetector/postprocessing/classification_postprocessing.py +55 -20
  37. megadetector/postprocessing/combine_batch_outputs.py +3 -2
  38. megadetector/postprocessing/compare_batch_results.py +64 -10
  39. megadetector/postprocessing/convert_output_format.py +12 -8
  40. megadetector/postprocessing/create_crop_folder.py +137 -10
  41. megadetector/postprocessing/load_api_results.py +26 -8
  42. megadetector/postprocessing/md_to_coco.py +4 -4
  43. megadetector/postprocessing/md_to_labelme.py +18 -7
  44. megadetector/postprocessing/merge_detections.py +5 -0
  45. megadetector/postprocessing/postprocess_batch_results.py +6 -3
  46. megadetector/postprocessing/remap_detection_categories.py +55 -2
  47. megadetector/postprocessing/render_detection_confusion_matrix.py +9 -6
  48. megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +2 -2
  49. megadetector/taxonomy_mapping/map_new_lila_datasets.py +3 -4
  50. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +40 -19
  51. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +1 -1
  52. megadetector/taxonomy_mapping/species_lookup.py +123 -41
  53. megadetector/utils/ct_utils.py +133 -113
  54. megadetector/utils/md_tests.py +93 -13
  55. megadetector/utils/path_utils.py +137 -107
  56. megadetector/utils/split_locations_into_train_val.py +2 -2
  57. megadetector/utils/string_utils.py +7 -7
  58. megadetector/utils/url_utils.py +81 -58
  59. megadetector/utils/wi_utils.py +46 -17
  60. megadetector/visualization/plot_utils.py +13 -9
  61. megadetector/visualization/render_images_with_thumbnails.py +2 -1
  62. megadetector/visualization/visualization_utils.py +94 -46
  63. megadetector/visualization/visualize_db.py +36 -9
  64. megadetector/visualization/visualize_detector_output.py +4 -4
  65. {megadetector-5.0.29.dist-info → megadetector-10.0.1.dist-info}/METADATA +135 -135
  66. megadetector-10.0.1.dist-info/RECORD +139 -0
  67. {megadetector-5.0.29.dist-info → megadetector-10.0.1.dist-info}/licenses/LICENSE +0 -0
  68. {megadetector-5.0.29.dist-info → megadetector-10.0.1.dist-info}/top_level.txt +0 -0
  69. megadetector/api/batch_processing/api_core/__init__.py +0 -0
  70. megadetector/api/batch_processing/api_core/batch_service/__init__.py +0 -0
  71. megadetector/api/batch_processing/api_core/batch_service/score.py +0 -438
  72. megadetector/api/batch_processing/api_core/server.py +0 -294
  73. megadetector/api/batch_processing/api_core/server_api_config.py +0 -97
  74. megadetector/api/batch_processing/api_core/server_app_config.py +0 -55
  75. megadetector/api/batch_processing/api_core/server_batch_job_manager.py +0 -220
  76. megadetector/api/batch_processing/api_core/server_job_status_table.py +0 -149
  77. megadetector/api/batch_processing/api_core/server_orchestration.py +0 -360
  78. megadetector/api/batch_processing/api_core/server_utils.py +0 -88
  79. megadetector/api/batch_processing/api_core_support/__init__.py +0 -0
  80. megadetector/api/batch_processing/api_core_support/aggregate_results_manually.py +0 -46
  81. megadetector/api/batch_processing/api_support/__init__.py +0 -0
  82. megadetector/api/batch_processing/api_support/summarize_daily_activity.py +0 -152
  83. megadetector/api/batch_processing/data_preparation/__init__.py +0 -0
  84. megadetector/api/synchronous/__init__.py +0 -0
  85. megadetector/api/synchronous/api_core/animal_detection_api/__init__.py +0 -0
  86. megadetector/api/synchronous/api_core/animal_detection_api/api_backend.py +0 -151
  87. megadetector/api/synchronous/api_core/animal_detection_api/api_frontend.py +0 -263
  88. megadetector/api/synchronous/api_core/animal_detection_api/config.py +0 -35
  89. megadetector/api/synchronous/api_core/tests/__init__.py +0 -0
  90. megadetector/api/synchronous/api_core/tests/load_test.py +0 -109
  91. megadetector/utils/azure_utils.py +0 -178
  92. megadetector/utils/sas_blob_utils.py +0 -513
  93. megadetector-5.0.29.dist-info/RECORD +0 -163
  94. /megadetector/{api/batch_processing/__init__.py → __init__.py} +0 -0
  95. {megadetector-5.0.29.dist-info → megadetector-10.0.1.dist-info}/WHEEL +0 -0
@@ -1,438 +0,0 @@
1
- import io
2
- import json
3
- import math
4
- from megadetector.utils import ct_utils
5
- import os
6
- import sys
7
- from datetime import datetime
8
- from io import BytesIO
9
- from typing import Union
10
-
11
- from PIL import Image
12
- import numpy as np
13
- import requests
14
- import tensorflow as tf
15
- from azure.storage.blob import ContainerClient
16
-
17
- print('score.py, tensorflow version:', tf.__version__)
18
- print('score.py, tf.test.is_gpu_available:', tf.test.is_gpu_available())
19
-
20
- PRINT_EVERY = 500
21
-
22
-
23
- #%% Helper functions *copied* from ct_utils.py and visualization/visualization_utils.py
24
-
25
- IMAGE_ROTATIONS = {
26
- 3: 180,
27
- 6: 270,
28
- 8: 90
29
- }
30
-
31
- def truncate_float(x, precision=3):
32
- """
33
- Function for truncating a float scalar to the defined precision.
34
- For example: truncate_float(0.0003214884) --> 0.000321
35
- This function is primarily used to achieve a certain float representation
36
- before exporting to JSON
37
- Args:
38
- x (float) Scalar to truncate
39
- precision (int) The number of significant digits to preserve, should be
40
- greater or equal 1
41
- """
42
-
43
- assert precision > 0
44
-
45
- if np.isclose(x, 0):
46
- return 0
47
- else:
48
- # Determine the factor, which shifts the decimal point of x
49
- # just behind the last significant digit
50
- factor = math.pow(10, precision - 1 - math.floor(math.log10(abs(x))))
51
- # Shift decimal point by multiplicatipon with factor, flooring, and
52
- # division by factor
53
- return math.floor(x * factor)/factor
54
-
55
-
56
- def open_image(input_file: Union[str, BytesIO]) -> Image:
57
- """Opens an image in binary format using PIL.Image and converts to RGB mode.
58
-
59
- This operation is lazy; image will not be actually loaded until the first
60
- operation that needs to load it (for example, resizing), so file opening
61
- errors can show up later.
62
-
63
- Args:
64
- input_file: str or BytesIO, either a path to an image file (anything
65
- that PIL can open), or an image as a stream of bytes
66
-
67
- Returns:
68
- an PIL image object in RGB mode
69
- """
70
- if (isinstance(input_file, str)
71
- and input_file.startswith(('http://', 'https://'))):
72
- response = requests.get(input_file)
73
- image = Image.open(BytesIO(response.content))
74
- try:
75
- response = requests.get(input_file)
76
- image = Image.open(BytesIO(response.content))
77
- except Exception as e:
78
- print(f'Error opening image {input_file}: {e}')
79
- raise
80
- else:
81
- image = Image.open(input_file)
82
- if image.mode not in ('RGBA', 'RGB', 'L'):
83
- raise AttributeError(f'Image {input_file} uses unsupported mode {image.mode}')
84
- if image.mode == 'RGBA' or image.mode == 'L':
85
- # PIL.Image.convert() returns a converted copy of this image
86
- image = image.convert(mode='RGB')
87
-
88
- # alter orientation as needed according to EXIF tag 0x112 (274) for Orientation
89
- # https://gist.github.com/dangtrinhnt/a577ece4cbe5364aad28
90
- # https://www.media.mit.edu/pia/Research/deepview/exif.html
91
- try:
92
- exif = image._getexif()
93
- orientation: int = exif.get(274, None) # 274 is the key for the Orientation field
94
- if orientation is not None and orientation in IMAGE_ROTATIONS:
95
- image = image.rotate(IMAGE_ROTATIONS[orientation], expand=True) # returns a rotated copy
96
- except Exception:
97
- pass
98
-
99
- return image
100
-
101
-
102
- def load_image(input_file: Union[str, BytesIO]) -> Image.Image:
103
- """Loads the image at input_file as a PIL Image into memory.
104
- Image.open() used in open_image() is lazy and errors will occur downstream
105
- if not explicitly loaded.
106
- Args:
107
- input_file: str or BytesIO, either a path to an image file (anything
108
- that PIL can open), or an image as a stream of bytes
109
- Returns: PIL.Image.Image, in RGB mode
110
- """
111
- image = open_image(input_file)
112
- image.load()
113
- return image
114
-
115
-
116
- #%% TFDetector class, an unmodified *copy* of the class in detection/tf_detector.py,
117
- # so we do not have to import the packages required by run_detector.py
118
-
119
- class TFDetector:
120
- """
121
- A detector model loaded at the time of initialization. It is intended to be used with
122
- MegaDetector (TF). The inference batch size is set to 1; code needs to be modified
123
- to support larger batch sizes, including resizing appropriately.
124
- """
125
-
126
- # Number of decimal places to round to for confidence and bbox coordinates
127
- CONF_DIGITS = 3
128
- COORD_DIGITS = 4
129
-
130
- # MegaDetector was trained with batch size of 1, and the resizing function is a part
131
- # of the inference graph
132
- BATCH_SIZE = 1
133
-
134
- # An enumeration of failure reasons
135
- FAILURE_TF_INFER = 'Failure TF inference'
136
- FAILURE_IMAGE_OPEN = 'Failure image access'
137
-
138
- DEFAULT_RENDERING_CONFIDENCE_THRESHOLD = 0.85 # to render bounding boxes
139
- DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD = 0.1 # to include in the output json file
140
-
141
- DEFAULT_DETECTOR_LABEL_MAP = {
142
- '1': 'animal',
143
- '2': 'person',
144
- '3': 'vehicle' # available in megadetector v4+
145
- }
146
-
147
- NUM_DETECTOR_CATEGORIES = 4 # animal, person, group, vehicle - for color assignment
148
-
149
- def __init__(self, model_path):
150
- """Loads model from model_path and starts a tf.Session with this graph. Obtains
151
- input and output tensor handles."""
152
- detection_graph = TFDetector.__load_model(model_path)
153
- self.tf_session = tf.Session(graph=detection_graph)
154
-
155
- self.image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
156
- self.box_tensor = detection_graph.get_tensor_by_name('detection_boxes:0')
157
- self.score_tensor = detection_graph.get_tensor_by_name('detection_scores:0')
158
- self.class_tensor = detection_graph.get_tensor_by_name('detection_classes:0')
159
-
160
- @staticmethod
161
- def round_and_make_float(d, precision=4):
162
- return truncate_float(float(d), precision=precision)
163
-
164
- @staticmethod
165
- def __convert_coords(tf_coords):
166
- """Converts coordinates from the model's output format [y1, x1, y2, x2] to the
167
- format used by our API and MegaDB: [x1, y1, width, height]. All coordinates
168
- (including model outputs) are normalized in the range [0, 1].
169
- Args:
170
- tf_coords: np.array of predicted bounding box coordinates from the TF detector,
171
- has format [y1, x1, y2, x2]
172
- Returns: list of Python float, predicted bounding box coordinates [x1, y1, width, height]
173
- """
174
- # change from [y1, x1, y2, x2] to [x1, y1, width, height]
175
- width = tf_coords[3] - tf_coords[1]
176
- height = tf_coords[2] - tf_coords[0]
177
-
178
- new = [tf_coords[1], tf_coords[0], width, height] # must be a list instead of np.array
179
-
180
- # convert numpy floats to Python floats
181
- for i, d in enumerate(new):
182
- new[i] = TFDetector.round_and_make_float(d, precision=TFDetector.COORD_DIGITS)
183
- return new
184
-
185
- @staticmethod
186
- def convert_to_tf_coords(array):
187
- """From [x1, y1, width, height] to [y1, x1, y2, x2], where x1 is x_min, x2 is x_max
188
- This is an extraneous step as the model outputs [y1, x1, y2, x2] but were converted to the API
189
- output format - only to keep the interface of the sync API.
190
- """
191
- x1 = array[0]
192
- y1 = array[1]
193
- width = array[2]
194
- height = array[3]
195
- x2 = x1 + width
196
- y2 = y1 + height
197
- return [y1, x1, y2, x2]
198
-
199
- @staticmethod
200
- def __load_model(model_path):
201
- """Loads a detection model (i.e., create a graph) from a .pb file.
202
- Args:
203
- model_path: .pb file of the model.
204
- Returns: the loaded graph.
205
- """
206
- print('TFDetector: Loading graph...')
207
- detection_graph = tf.Graph()
208
- with detection_graph.as_default():
209
- od_graph_def = tf.GraphDef()
210
- with tf.gfile.GFile(model_path, 'rb') as fid:
211
- serialized_graph = fid.read()
212
- od_graph_def.ParseFromString(serialized_graph)
213
- tf.import_graph_def(od_graph_def, name='')
214
- print('TFDetector: Detection graph loaded.')
215
-
216
- return detection_graph
217
-
218
- def _generate_detections_one_image(self, image):
219
- np_im = np.asarray(image, np.uint8)
220
- im_w_batch_dim = np.expand_dims(np_im, axis=0)
221
-
222
- # need to change the above line to the following if supporting a batch size > 1 and resizing to the same size
223
- # np_images = [np.asarray(image, np.uint8) for image in images]
224
- # images_stacked = np.stack(np_images, axis=0) if len(images) > 1 else np.expand_dims(np_images[0], axis=0)
225
-
226
- # performs inference
227
- (box_tensor_out, score_tensor_out, class_tensor_out) = self.tf_session.run(
228
- [self.box_tensor, self.score_tensor, self.class_tensor],
229
- feed_dict={self.image_tensor: im_w_batch_dim})
230
-
231
- return box_tensor_out, score_tensor_out, class_tensor_out
232
-
233
- def generate_detections_one_image(self, image, image_id,
234
- detection_threshold=DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD):
235
- """Apply the detector to an image.
236
- Args:
237
- image: the PIL Image object
238
- image_id: a path to identify the image; will be in the "file" field of the output object
239
- detection_threshold: confidence above which to include the detection proposal
240
- Returns:
241
- A dict with the following fields, see the 'images' key in https://github.com/agentmorris/MegaDetector/tree/main/megadetector/api/batch_processing#batch-processing-api-output-format
242
- - 'file' (always present)
243
- - 'max_detection_conf'
244
- - 'detections', which is a list of detection objects containing keys 'category', 'conf' and 'bbox'
245
- - 'failure'
246
- """
247
- result = {
248
- 'file': image_id
249
- }
250
- try:
251
- b_box, b_score, b_class = self._generate_detections_one_image(image)
252
-
253
- # our batch size is 1; need to loop the batch dim if supporting batch size > 1
254
- boxes, scores, classes = b_box[0], b_score[0], b_class[0]
255
-
256
- detections_cur_image = [] # will be empty for an image with no confident detections
257
- max_detection_conf = 0.0
258
- for b, s, c in zip(boxes, scores, classes):
259
- if s > detection_threshold:
260
- detection_entry = {
261
- 'category': str(int(c)), # use string type for the numerical class label, not int
262
- 'conf': truncate_float(float(s), # cast to float for json serialization
263
- precision=TFDetector.CONF_DIGITS),
264
- 'bbox': TFDetector.__convert_coords(b)
265
- }
266
- detections_cur_image.append(detection_entry)
267
- if s > max_detection_conf:
268
- max_detection_conf = s
269
-
270
- result['max_detection_conf'] = truncate_float(float(max_detection_conf),
271
- precision=TFDetector.CONF_DIGITS)
272
- result['detections'] = detections_cur_image
273
-
274
- except Exception as e:
275
- result['failure'] = TFDetector.FAILURE_TF_INFER
276
- print('TFDetector: image {} failed during inference: {}'.format(image_id, str(e)))
277
-
278
- return result
279
-
280
-
281
- #%% Scoring script
282
-
283
- class BatchScorer:
284
- """
285
- Coordinates scoring images in this Task.
286
-
287
- 1. have a synchronized queue that download tasks enqueue and scoring function dequeues - but need to be able to
288
- limit the size of the queue. We do not want to write the image to disk and then load it in the scoring func.
289
- """
290
- def __init__(self, **kwargs):
291
- print('score.py BatchScorer, __init__()')
292
-
293
- detector_path = kwargs.get('detector_path')
294
- self.detector = TFDetector(detector_path)
295
-
296
- self.use_url = kwargs.get('use_url')
297
- if not self.use_url:
298
- input_container_sas = kwargs.get('input_container_sas')
299
- self.input_container_client = ContainerClient.from_container_url(input_container_sas)
300
-
301
- self.detection_threshold = kwargs.get('detection_threshold')
302
-
303
- self.image_ids_to_score = kwargs.get('image_ids_to_score')
304
-
305
- # determine if there is metadata attached to each image_id
306
- self.metadata_available = True if isinstance(self.image_ids_to_score[0], list) else False
307
-
308
- def _download_image(self, image_file) -> Image:
309
- """
310
- Args:
311
- image_file: Public URL if use_url, else the full path from container root
312
-
313
- Returns:
314
- PIL image loaded
315
- """
316
- if not self.use_url:
317
- downloader = self.input_container_client.download_blob(image_file)
318
- image_file = io.BytesIO()
319
- blob_props = downloader.download_to_stream(image_file)
320
-
321
- image = open_image(image_file)
322
- return image
323
-
324
- def score_images(self) -> list:
325
- detections = []
326
-
327
- for i in self.image_ids_to_score:
328
-
329
- if self.metadata_available:
330
- image_id = i[0]
331
- image_metadata = i[1]
332
- else:
333
- image_id = i
334
-
335
- try:
336
- image = self._download_image(image_id)
337
- except Exception as e:
338
- print(f'score.py BatchScorer, score_images, download_image exception: {e}')
339
- result = {
340
- 'file': image_id,
341
- 'failure': TFDetector.FAILURE_IMAGE_OPEN
342
- }
343
- else:
344
- result = self.detector.generate_detections_one_image(
345
- image, image_id, detection_threshold=self.detection_threshold)
346
-
347
- if self.metadata_available:
348
- result['meta'] = image_metadata
349
-
350
- detections.append(result)
351
- if len(detections) % PRINT_EVERY == 0:
352
- print(f'scored {len(detections)} images')
353
-
354
- return detections
355
-
356
-
357
- def main(): # noqa
358
- print('score.py, main()')
359
-
360
- # information to determine input and output locations
361
- api_instance_name = os.environ['API_INSTANCE_NAME']
362
- job_id = os.environ['AZ_BATCH_JOB_ID']
363
- task_id = os.environ['AZ_BATCH_TASK_ID']
364
- mount_point = os.environ['AZ_BATCH_NODE_MOUNTS_DIR']
365
-
366
- # other parameters for the task
367
- begin_index = int(os.environ['TASK_BEGIN_INDEX'])
368
- end_index = int(os.environ['TASK_END_INDEX'])
369
-
370
- input_container_sas = os.environ.get('JOB_CONTAINER_SAS', None) # could be None if use_url
371
- use_url = os.environ.get('JOB_USE_URL', None)
372
-
373
- if use_url and use_url.lower() == 'true': # bool of any non-empty string is True
374
- use_url = True
375
- else:
376
- use_url = False
377
-
378
- detection_threshold = float(os.environ['DETECTION_CONF_THRESHOLD'])
379
-
380
- print(f'score.py, main(), api_instance_name: {api_instance_name}, job_id: {job_id}, task_id: {task_id}, '
381
- f'mount_point: {mount_point}, begin_index: {begin_index}, end_index: {end_index}, '
382
- f'input_container_sas: {input_container_sas}, use_url (parsed): {use_url}'
383
- f'detection_threshold: {detection_threshold}')
384
-
385
- job_folder_mounted = os.path.join(mount_point, 'batch-api', f'api_{api_instance_name}', f'job_{job_id}')
386
- task_out_dir = os.path.join(job_folder_mounted, 'task_outputs')
387
- os.makedirs(task_out_dir, exist_ok=True)
388
- task_output_path = os.path.join(task_out_dir, f'job_{job_id}_task_{task_id}.json')
389
-
390
- # test that we can write to output path; also in case there is no image to process
391
- ct_utils.write_json(task_output_path, [])
392
-
393
- # list images to process
394
- list_images_path = os.path.join(job_folder_mounted, f'{job_id}_images.json')
395
- with open(list_images_path) as f:
396
- list_images = json.load(f)
397
- print(f'score.py, main(), length of list_images: {len(list_images)}')
398
-
399
- if (not isinstance(list_images, list)) or len(list_images) == 0:
400
- print('score.py, main(), zero images in specified overall list, exiting...')
401
- sys.exit(0)
402
-
403
- # items in this list can be strings or [image_id, metadata]
404
- list_images = list_images[begin_index: end_index]
405
- if len(list_images) == 0:
406
- print('score.py, main(), zero images in the shard, exiting')
407
- sys.exit(0)
408
-
409
- print(f'score.py, main(), processing {len(list_images)} images in this Task')
410
-
411
- # model path
412
- # Path to .pb TensorFlow detector model file, relative to the
413
- # models/megadetector_copies folder in mounted container
414
- detector_model_rel_path = os.environ['DETECTOR_REL_PATH']
415
- detector_path = os.path.join(mount_point, 'models', 'megadetector_copies', detector_model_rel_path)
416
- assert os.path.exists(detector_path), f'detector is not found at the specified path: {detector_path}'
417
-
418
- # score the images
419
- scorer = BatchScorer(
420
- detector_path=detector_path,
421
- use_url=use_url,
422
- input_container_sas=input_container_sas,
423
- detection_threshold=detection_threshold,
424
- image_ids_to_score=list_images
425
- )
426
-
427
- try:
428
- tick = datetime.now()
429
- detections = scorer.score_images()
430
- duration = datetime.now() - tick
431
- print(f'score.py, main(), score_images() duration: {duration}')
432
- except Exception as e:
433
- raise RuntimeError(f'score.py, main(), exception in score_images(): {e}')
434
-
435
- ct_utils.write_json(task_output_path, detections, ensure_ascii=False)
436
-
437
- if __name__ == '__main__':
438
- main()