megadetector 10.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of megadetector might be problematic. Click here for more details.

Files changed (147) hide show
  1. megadetector/__init__.py +0 -0
  2. megadetector/api/__init__.py +0 -0
  3. megadetector/api/batch_processing/integration/digiKam/setup.py +6 -0
  4. megadetector/api/batch_processing/integration/digiKam/xmp_integration.py +465 -0
  5. megadetector/api/batch_processing/integration/eMammal/test_scripts/config_template.py +5 -0
  6. megadetector/api/batch_processing/integration/eMammal/test_scripts/push_annotations_to_emammal.py +125 -0
  7. megadetector/api/batch_processing/integration/eMammal/test_scripts/select_images_for_testing.py +55 -0
  8. megadetector/classification/__init__.py +0 -0
  9. megadetector/classification/aggregate_classifier_probs.py +108 -0
  10. megadetector/classification/analyze_failed_images.py +227 -0
  11. megadetector/classification/cache_batchapi_outputs.py +198 -0
  12. megadetector/classification/create_classification_dataset.py +626 -0
  13. megadetector/classification/crop_detections.py +516 -0
  14. megadetector/classification/csv_to_json.py +226 -0
  15. megadetector/classification/detect_and_crop.py +853 -0
  16. megadetector/classification/efficientnet/__init__.py +9 -0
  17. megadetector/classification/efficientnet/model.py +415 -0
  18. megadetector/classification/efficientnet/utils.py +608 -0
  19. megadetector/classification/evaluate_model.py +520 -0
  20. megadetector/classification/identify_mislabeled_candidates.py +152 -0
  21. megadetector/classification/json_to_azcopy_list.py +63 -0
  22. megadetector/classification/json_validator.py +696 -0
  23. megadetector/classification/map_classification_categories.py +276 -0
  24. megadetector/classification/merge_classification_detection_output.py +509 -0
  25. megadetector/classification/prepare_classification_script.py +194 -0
  26. megadetector/classification/prepare_classification_script_mc.py +228 -0
  27. megadetector/classification/run_classifier.py +287 -0
  28. megadetector/classification/save_mislabeled.py +110 -0
  29. megadetector/classification/train_classifier.py +827 -0
  30. megadetector/classification/train_classifier_tf.py +725 -0
  31. megadetector/classification/train_utils.py +323 -0
  32. megadetector/data_management/__init__.py +0 -0
  33. megadetector/data_management/animl_to_md.py +161 -0
  34. megadetector/data_management/annotations/__init__.py +0 -0
  35. megadetector/data_management/annotations/annotation_constants.py +33 -0
  36. megadetector/data_management/camtrap_dp_to_coco.py +270 -0
  37. megadetector/data_management/cct_json_utils.py +566 -0
  38. megadetector/data_management/cct_to_md.py +184 -0
  39. megadetector/data_management/cct_to_wi.py +293 -0
  40. megadetector/data_management/coco_to_labelme.py +284 -0
  41. megadetector/data_management/coco_to_yolo.py +702 -0
  42. megadetector/data_management/databases/__init__.py +0 -0
  43. megadetector/data_management/databases/add_width_and_height_to_db.py +107 -0
  44. megadetector/data_management/databases/combine_coco_camera_traps_files.py +210 -0
  45. megadetector/data_management/databases/integrity_check_json_db.py +528 -0
  46. megadetector/data_management/databases/subset_json_db.py +195 -0
  47. megadetector/data_management/generate_crops_from_cct.py +200 -0
  48. megadetector/data_management/get_image_sizes.py +164 -0
  49. megadetector/data_management/labelme_to_coco.py +559 -0
  50. megadetector/data_management/labelme_to_yolo.py +349 -0
  51. megadetector/data_management/lila/__init__.py +0 -0
  52. megadetector/data_management/lila/create_lila_blank_set.py +556 -0
  53. megadetector/data_management/lila/create_lila_test_set.py +187 -0
  54. megadetector/data_management/lila/create_links_to_md_results_files.py +106 -0
  55. megadetector/data_management/lila/download_lila_subset.py +182 -0
  56. megadetector/data_management/lila/generate_lila_per_image_labels.py +777 -0
  57. megadetector/data_management/lila/get_lila_annotation_counts.py +174 -0
  58. megadetector/data_management/lila/get_lila_image_counts.py +112 -0
  59. megadetector/data_management/lila/lila_common.py +319 -0
  60. megadetector/data_management/lila/test_lila_metadata_urls.py +164 -0
  61. megadetector/data_management/mewc_to_md.py +344 -0
  62. megadetector/data_management/ocr_tools.py +873 -0
  63. megadetector/data_management/read_exif.py +964 -0
  64. megadetector/data_management/remap_coco_categories.py +195 -0
  65. megadetector/data_management/remove_exif.py +156 -0
  66. megadetector/data_management/rename_images.py +194 -0
  67. megadetector/data_management/resize_coco_dataset.py +663 -0
  68. megadetector/data_management/speciesnet_to_md.py +41 -0
  69. megadetector/data_management/wi_download_csv_to_coco.py +247 -0
  70. megadetector/data_management/yolo_output_to_md_output.py +594 -0
  71. megadetector/data_management/yolo_to_coco.py +876 -0
  72. megadetector/data_management/zamba_to_md.py +188 -0
  73. megadetector/detection/__init__.py +0 -0
  74. megadetector/detection/change_detection.py +840 -0
  75. megadetector/detection/process_video.py +479 -0
  76. megadetector/detection/pytorch_detector.py +1451 -0
  77. megadetector/detection/run_detector.py +1267 -0
  78. megadetector/detection/run_detector_batch.py +2159 -0
  79. megadetector/detection/run_inference_with_yolov5_val.py +1314 -0
  80. megadetector/detection/run_md_and_speciesnet.py +1494 -0
  81. megadetector/detection/run_tiled_inference.py +1038 -0
  82. megadetector/detection/tf_detector.py +209 -0
  83. megadetector/detection/video_utils.py +1379 -0
  84. megadetector/postprocessing/__init__.py +0 -0
  85. megadetector/postprocessing/add_max_conf.py +72 -0
  86. megadetector/postprocessing/categorize_detections_by_size.py +166 -0
  87. megadetector/postprocessing/classification_postprocessing.py +1752 -0
  88. megadetector/postprocessing/combine_batch_outputs.py +249 -0
  89. megadetector/postprocessing/compare_batch_results.py +2110 -0
  90. megadetector/postprocessing/convert_output_format.py +403 -0
  91. megadetector/postprocessing/create_crop_folder.py +629 -0
  92. megadetector/postprocessing/detector_calibration.py +570 -0
  93. megadetector/postprocessing/generate_csv_report.py +522 -0
  94. megadetector/postprocessing/load_api_results.py +223 -0
  95. megadetector/postprocessing/md_to_coco.py +428 -0
  96. megadetector/postprocessing/md_to_labelme.py +351 -0
  97. megadetector/postprocessing/md_to_wi.py +41 -0
  98. megadetector/postprocessing/merge_detections.py +392 -0
  99. megadetector/postprocessing/postprocess_batch_results.py +2077 -0
  100. megadetector/postprocessing/remap_detection_categories.py +226 -0
  101. megadetector/postprocessing/render_detection_confusion_matrix.py +677 -0
  102. megadetector/postprocessing/repeat_detection_elimination/find_repeat_detections.py +206 -0
  103. megadetector/postprocessing/repeat_detection_elimination/remove_repeat_detections.py +82 -0
  104. megadetector/postprocessing/repeat_detection_elimination/repeat_detections_core.py +1665 -0
  105. megadetector/postprocessing/separate_detections_into_folders.py +795 -0
  106. megadetector/postprocessing/subset_json_detector_output.py +964 -0
  107. megadetector/postprocessing/top_folders_to_bottom.py +238 -0
  108. megadetector/postprocessing/validate_batch_results.py +332 -0
  109. megadetector/taxonomy_mapping/__init__.py +0 -0
  110. megadetector/taxonomy_mapping/map_lila_taxonomy_to_wi_taxonomy.py +491 -0
  111. megadetector/taxonomy_mapping/map_new_lila_datasets.py +213 -0
  112. megadetector/taxonomy_mapping/prepare_lila_taxonomy_release.py +165 -0
  113. megadetector/taxonomy_mapping/preview_lila_taxonomy.py +543 -0
  114. megadetector/taxonomy_mapping/retrieve_sample_image.py +71 -0
  115. megadetector/taxonomy_mapping/simple_image_download.py +224 -0
  116. megadetector/taxonomy_mapping/species_lookup.py +1008 -0
  117. megadetector/taxonomy_mapping/taxonomy_csv_checker.py +159 -0
  118. megadetector/taxonomy_mapping/taxonomy_graph.py +346 -0
  119. megadetector/taxonomy_mapping/validate_lila_category_mappings.py +83 -0
  120. megadetector/tests/__init__.py +0 -0
  121. megadetector/tests/test_nms_synthetic.py +335 -0
  122. megadetector/utils/__init__.py +0 -0
  123. megadetector/utils/ct_utils.py +1857 -0
  124. megadetector/utils/directory_listing.py +199 -0
  125. megadetector/utils/extract_frames_from_video.py +307 -0
  126. megadetector/utils/gpu_test.py +125 -0
  127. megadetector/utils/md_tests.py +2072 -0
  128. megadetector/utils/path_utils.py +2832 -0
  129. megadetector/utils/process_utils.py +172 -0
  130. megadetector/utils/split_locations_into_train_val.py +237 -0
  131. megadetector/utils/string_utils.py +234 -0
  132. megadetector/utils/url_utils.py +825 -0
  133. megadetector/utils/wi_platform_utils.py +968 -0
  134. megadetector/utils/wi_taxonomy_utils.py +1759 -0
  135. megadetector/utils/write_html_image_list.py +239 -0
  136. megadetector/visualization/__init__.py +0 -0
  137. megadetector/visualization/plot_utils.py +309 -0
  138. megadetector/visualization/render_images_with_thumbnails.py +243 -0
  139. megadetector/visualization/visualization_utils.py +1940 -0
  140. megadetector/visualization/visualize_db.py +630 -0
  141. megadetector/visualization/visualize_detector_output.py +479 -0
  142. megadetector/visualization/visualize_video_output.py +705 -0
  143. megadetector-10.0.13.dist-info/METADATA +134 -0
  144. megadetector-10.0.13.dist-info/RECORD +147 -0
  145. megadetector-10.0.13.dist-info/WHEEL +5 -0
  146. megadetector-10.0.13.dist-info/licenses/LICENSE +19 -0
  147. megadetector-10.0.13.dist-info/top_level.txt +1 -0
@@ -0,0 +1,873 @@
1
+ """
2
+
3
+ ocr_tools.py
4
+
5
+ Use OCR (via the Tesseract package) to pull metadata (particularly times and
6
+ dates from camera trap images).
7
+
8
+ The general approach is:
9
+
10
+ * Crop a fixed percentage from the top and bottom of an image, slightly larger
11
+ than the largest examples we've seen of how much space is used for metadata.
12
+
13
+ * Define the background color as the median pixel value, and find rows that are
14
+ mostly that color to refine the crop.
15
+
16
+ * Crop to the refined crop, then run pytesseract to extract text.
17
+
18
+ * Use regular expressions to find time and date.
19
+
20
+ Prior to using this module:
21
+
22
+ * Install Tesseract from https://tesseract-ocr.github.io/tessdoc/Installation.html
23
+
24
+ * pip install pytesseract
25
+
26
+ Known limitations:
27
+
28
+ * Semi-transparent overlays (which I've only seen on consumer cameras) usually fail.
29
+
30
+ """
31
+
32
+ #%% Notes to self
33
+
34
+ """
35
+
36
+ * To use the legacy engine (--oem 0), I had to download an updated eng.traineddata file from:
37
+
38
+ https://github.com/tesseract-ocr/tessdata
39
+
40
+ """
41
+
42
+
43
+ #%% Constants and imports
44
+
45
+ import os
46
+ import json
47
+ import numpy as np
48
+ import datetime
49
+ import re
50
+
51
+ from functools import partial
52
+ from dateutil.parser import parse as dateparse
53
+
54
+ import cv2
55
+ from PIL import Image, ImageFilter
56
+ from tqdm import tqdm
57
+
58
+ from megadetector.utils.path_utils import find_images
59
+ from megadetector.utils.path_utils import open_file
60
+ from megadetector.utils import write_html_image_list
61
+ from megadetector.utils.ct_utils import is_iterable
62
+ from megadetector.visualization import visualization_utils as vis_utils
63
+
64
+ # pip install pytesseract
65
+ #
66
+ # Also install tesseract from: https://github.com/UB-Mannheim/tesseract/wiki, and add
67
+ # the installation dir to your path (on Windows, typically C:\Program Files (x86)\Tesseract-OCR)
68
+ import pytesseract # type: ignore
69
+
70
+
71
+ #%% Extraction options
72
+
73
+ class DatetimeExtractionOptions:
74
+ """
75
+ Options used to parameterize datetime extraction in most functions in this module.
76
+ """
77
+
78
+ def __init__(self):
79
+
80
+ #: Using a semi-arbitrary metric of how much it feels like we found the
81
+ #: text-containing region, discard regions that appear to be extraction failures
82
+ self.p_crop_success_threshold = 0.5
83
+
84
+ #: Pad each crop with a few pixels to make tesseract happy
85
+ self.crop_padding = 10
86
+
87
+ #: Discard short text, typically text from the top of the image
88
+ self.min_text_length = 4
89
+
90
+ #: When we're looking for pixels that match the background color, allow some
91
+ #: tolerance around the dominant color
92
+ self.background_tolerance = 2
93
+
94
+ #: We need to see a consistent color in at least this fraction of pixels in our rough
95
+ #: crop to believe that we actually found a candidate metadata region.
96
+ self.min_background_fraction = 0.3
97
+
98
+ #: What fraction of the [top,bottom] of the image should we use for our rough crop?
99
+ self.image_crop_fraction = [0.045 , 0.045]
100
+ # self.image_crop_fraction = [0.08 , 0.08]
101
+
102
+ #: Within that rough crop, how much should we use for determining the background color?
103
+ self.background_crop_fraction_of_rough_crop = 0.5
104
+
105
+ #: A row is considered a probable metadata row if it contains at least this fraction
106
+ #: of the background color. This is used only to find the top and bottom of the crop area,
107
+ #: so it's not that *every* row needs to hit this criteria, only the rows that are generally
108
+ #: above and below the text.
109
+ self.min_background_fraction_for_background_row = 0.5
110
+
111
+ #: psm 6: "assume a single uniform block of text"
112
+ #: psm 13: raw line
113
+ #: oem: 0 == legacy, 1 == lstm
114
+ #: tesseract_config_string = '--oem 0 --psm 6'
115
+ #:
116
+ #: Try these configuration strings in order until we find a valid datetime
117
+ self.tesseract_config_strings = ['--oem 1 --psm 13','--oem 0 --psm 13',
118
+ '--oem 1 --psm 6','--oem 0 --psm 6']
119
+
120
+ #: If this is False, and one set of options appears to succeed for an image, we'll
121
+ #: stop there. If this is True, we always run all option sets on every image.
122
+ self.force_all_ocr_options = False
123
+
124
+ #: Whether to apply PIL's ImageFilter.SHARPEN prior to OCR
125
+ self.apply_sharpening_filter = True
126
+
127
+ #: Tesseract should be on your system path, but you can also specify the
128
+ #: path explicitly, e.g. you can do either of these:
129
+ #:
130
+ #: * os.environ['PATH'] += r';C:\Program Files\Tesseract-OCR'
131
+ #: * self.tesseract_cmd = 'r"C:\Program Files\Tesseract-OCR\tesseract.exe"'
132
+ self.tesseract_cmd = 'tesseract.exe'
133
+
134
+
135
+ #%% Support functions
136
+
137
+ def make_rough_crops(image,options=None):
138
+ """
139
+ Crops the top and bottom regions out of an image.
140
+
141
+ Args:
142
+ image (Image or str): a PIL Image or file name
143
+ options (DatetimeExtractionOptions, optional): OCR parameters
144
+
145
+ Returns:
146
+ dict: a dict with fields 'top' and 'bottom', each pointing to a new PIL Image
147
+ """
148
+
149
+ if options is None:
150
+ options = DatetimeExtractionOptions()
151
+
152
+ if isinstance(image,str):
153
+ image = vis_utils.open_image(image)
154
+
155
+ w = image.width
156
+ h = image.height
157
+
158
+ crop_height_top = round(options.image_crop_fraction[0] * h)
159
+ crop_height_bottom = round(options.image_crop_fraction[1] * h)
160
+
161
+ # l,t,r,b
162
+ #
163
+ # 0,0 is upper-left
164
+ top_crop = image.crop([0,0,w,crop_height_top])
165
+ bottom_crop = image.crop([0,h-crop_height_bottom,w,h])
166
+ return {'top':top_crop,'bottom':bottom_crop}
167
+
168
+ # ...def make_rough_crops(...)
169
+
170
+
171
+ def crop_to_solid_region(rough_crop,crop_location,options=None):
172
+ """
173
+ Given a rough crop from the top or bottom of an image, finds the background color
174
+ and crops to the metadata region.
175
+
176
+ Within a region of an image (typically a crop from the top-ish or bottom-ish part of
177
+ an image), tightly crop to the solid portion (typically a region with a black background).
178
+
179
+ The success metric is just a binary indicator right now: 1.0 if we found a region we believe
180
+ contains a solid background, 0.0 otherwise.
181
+
182
+ Args:
183
+ rough_crop (Image): the PIL Image to crop
184
+ crop_location (str): 'top' or 'bottom'
185
+ options (DatetimeExtractionOptions, optional): OCR parameters
186
+
187
+ Returns:
188
+ tuple: a tuple containing (a cropped_image (Image), p_success (float), padded_image (Image))
189
+ """
190
+
191
+ if options is None:
192
+ options = DatetimeExtractionOptions()
193
+
194
+ crop_to_solid_region_result = {}
195
+ crop_to_solid_region_result['crop_pil'] = None
196
+ crop_to_solid_region_result['padded_crop_pil'] = None
197
+ crop_to_solid_region_result['p_success'] = 0.0
198
+
199
+ # pil --> cv2
200
+ rough_crop_np = np.array(rough_crop)
201
+ rough_crop_np = rough_crop_np[:, :, ::-1].copy()
202
+
203
+ # Search *part* of the crop for the background value (the part closest to the top or bottom
204
+ # of the image)
205
+ rows_to_use_for_background_search = int(rough_crop_np.shape[0] * \
206
+ options.background_crop_fraction_of_rough_crop)
207
+
208
+ if crop_location == 'top':
209
+ background_search_image = rough_crop_np[0:rows_to_use_for_background_search,:,:]
210
+ elif crop_location == 'bottom':
211
+ background_search_image = rough_crop_np[-rows_to_use_for_background_search:,:,:]
212
+ else:
213
+ raise ValueError('Unrecognized crop location: {}'.format(crop_location))
214
+
215
+ background_search_image = cv2.cvtColor(background_search_image, cv2.COLOR_BGR2GRAY)
216
+ background_search_image = background_search_image.astype('uint8')
217
+ background_search_image = cv2.medianBlur(background_search_image,3)
218
+ pixel_values = background_search_image.flatten()
219
+ counts = np.bincount(pixel_values)
220
+ background_value = int(np.argmax(counts))
221
+
222
+ # Did we find a sensible mode that looks like a background value?
223
+ background_value_count = int(np.max(counts))
224
+ p_background_value = background_value_count / np.sum(counts)
225
+
226
+ if (p_background_value < options.min_background_fraction):
227
+ return crop_to_solid_region_result
228
+ else:
229
+ p_success = 1.0
230
+
231
+ analysis_image = cv2.cvtColor(rough_crop_np, cv2.COLOR_BGR2GRAY)
232
+ analysis_image = analysis_image.astype('uint8')
233
+ analysis_image = cv2.medianBlur(analysis_image,3)
234
+
235
+ # This will now be a binary image indicating which pixels are background
236
+ analysis_image = cv2.inRange(analysis_image,
237
+ background_value-options.background_tolerance,
238
+ background_value+options.background_tolerance)
239
+
240
+ # Use row heuristics to refine the crop
241
+ h = analysis_image.shape[0]
242
+ w = analysis_image.shape[1]
243
+
244
+ min_x = 0
245
+ min_y = -1
246
+ max_x = w
247
+ max_y = -1
248
+
249
+ # Find the first and last row that are mostly the background color
250
+ for y in range(h):
251
+ row_count = 0
252
+ for x in range(w):
253
+ if analysis_image[y][x] > 0:
254
+ row_count += 1
255
+ row_fraction = row_count / w
256
+ if row_fraction > options.min_background_fraction_for_background_row:
257
+ if min_y == -1:
258
+ min_y = y
259
+ max_y = y
260
+
261
+ assert (min_y == -1 and max_y == -1) or (min_y != -1 and max_y != -1)
262
+
263
+ if min_y == -1:
264
+ return crop_to_solid_region_result
265
+
266
+ if max_y == min_y:
267
+ return crop_to_solid_region_result
268
+
269
+ x = min_x
270
+ y = min_y
271
+ w = max_x-min_x
272
+ h = max_y-min_y
273
+
274
+ # Crop the image
275
+ crop_np = rough_crop_np[y:y+h,x:x+w]
276
+
277
+ # Tesseract doesn't like characters really close to the edge, so pad a little.
278
+ crop_padding = options.crop_padding
279
+ padded_crop_np = cv2.copyMakeBorder(crop_np,crop_padding,crop_padding,crop_padding,crop_padding,
280
+ cv2.BORDER_CONSTANT,
281
+ value=[background_value,background_value,background_value])
282
+
283
+ crop_pil = Image.fromarray(crop_np)
284
+ padded_crop_pil = Image.fromarray(padded_crop_np)
285
+
286
+ crop_to_solid_region_result['crop_pil'] = crop_pil
287
+ crop_to_solid_region_result['padded_crop_pil'] = padded_crop_pil
288
+ crop_to_solid_region_result['p_success'] = p_success
289
+
290
+ return crop_to_solid_region_result
291
+
292
+ # ...crop_to_solid_region(...)
293
+
294
+
295
+ def find_text_in_crops(rough_crops,options=None,tesseract_config_string=None):
296
+ """
297
+ Finds all text in each Image in the dict [rough_crops]; those images should be pretty small
298
+ regions by the time they get to this function, roughly the top or bottom 20% of an image.
299
+
300
+ Args:
301
+ rough_crops (list): list of Image objects that have been cropped close to text
302
+ options (DatetimeExtractionOptions, optional): OCR parameters
303
+ tesseract_config_string (str, optional): optional CLI argument to pass to tesseract.exe
304
+
305
+ Returns:
306
+ dict: a dict with keys "top" and "bottom", where each value is a dict with keys
307
+ 'text' (text found, if any) and 'crop_to_solid_region_results' (metadata about the OCR pass)
308
+ """
309
+
310
+ if options is None:
311
+ options = DatetimeExtractionOptions()
312
+
313
+ if tesseract_config_string is None:
314
+ tesseract_config_string = options.tesseract_config_strings[0]
315
+
316
+ find_text_in_crops_results = {}
317
+
318
+ # crop_location = 'top'
319
+ # crop_location = 'bottom'
320
+ for crop_location in ('top','bottom'):
321
+
322
+ find_text_in_crops_results[crop_location] = {}
323
+ find_text_in_crops_results[crop_location]['text'] = ''
324
+ find_text_in_crops_results[crop_location]['crop_to_solid_region_results'] = None
325
+
326
+ rough_crop = rough_crops[crop_location]
327
+
328
+ # Crop to the portion of the rough crop with a solid background color
329
+ crop_to_solid_region_results = crop_to_solid_region(rough_crop,crop_location,options)
330
+
331
+ find_text_in_crops_results[crop_location]['crop_to_solid_region_results'] = \
332
+ crop_to_solid_region_results
333
+
334
+ # Try cropping to a solid region; if that doesn't work, try running OCR on the whole
335
+ # rough crop.
336
+ if crop_to_solid_region_results['p_success'] >= options.p_crop_success_threshold:
337
+ padded_crop_pil = crop_to_solid_region_results['padded_crop_pil']
338
+ else:
339
+ # continue
340
+ padded_crop_pil = rough_crop
341
+
342
+ if options.apply_sharpening_filter:
343
+ padded_crop_pil = padded_crop_pil.filter(ImageFilter.SHARPEN)
344
+
345
+ # Find text in the padded crop
346
+ pytesseract.pytesseract.tesseract_cmd = options.tesseract_cmd
347
+ text = pytesseract.image_to_string(padded_crop_pil, lang='eng',
348
+ config=tesseract_config_string)
349
+
350
+ text = text.replace('\n', ' ').replace('\r', '').strip()
351
+
352
+ find_text_in_crops_results[crop_location]['text'] = text
353
+
354
+ # ...for each cropped region
355
+
356
+ return find_text_in_crops_results
357
+
358
+ # ...def find_text_in_crops(...)
359
+
360
+
361
+ def _datetime_string_to_datetime(matched_string):
362
+ """
363
+ Takes an OCR-matched datetime string, does a little cleanup, and parses a date
364
+ from it.
365
+
366
+ By the time a string gets to this function, it should be a proper date string, with
367
+ no extraneous characters other than spaces around colons or hyphens.
368
+ """
369
+
370
+ matched_string = matched_string.replace(' -','-')
371
+ matched_string = matched_string.replace('- ','-')
372
+ matched_string = matched_string.replace(' :',':')
373
+ matched_string = matched_string.replace(': ',':')
374
+ try:
375
+ extracted_datetime = dateparse(matched_string)
376
+ except Exception:
377
+ extracted_datetime = None
378
+ return extracted_datetime
379
+
380
+
381
+ def _get_datetime_from_strings(strings,options=None):
382
+ """
383
+ Given a string or list of strings, search for exactly one datetime in those strings.
384
+ using a series of regular expressions.
385
+
386
+ Strings are currently just concatenated before searching for a datetime.
387
+ """
388
+
389
+ if options is None:
390
+ options = DatetimeExtractionOptions()
391
+
392
+ if isinstance(strings,str):
393
+ s = strings
394
+ else:
395
+ s = ' '.join(strings).lower()
396
+ s = s.replace('—','-')
397
+ s = ''.join(e for e in s if e.isalnum() or e in ':-/' or e.isspace())
398
+
399
+ ### AM/PM
400
+
401
+ # 2013-10-02 11:40:50 AM
402
+ m = re.search(r'(\d\d\d\d)\s?-\s?(\d\d)\s?-\s?(\d\d)\s+(\d+)\s?:?\s?(\d\d)\s?:\s?(\d\d)\s*([a|p]m)',s)
403
+ if m is not None:
404
+ return _datetime_string_to_datetime(m.group(0))
405
+
406
+ # 04/01/2017 08:54:00AM
407
+ m = re.search(r'(\d\d)\s?/\s?(\d\d)\s?/\s?(\d\d\d\d)\s+(\d+)\s?:\s?(\d\d)\s?:\s?(\d\d)\s*([a|p]m)',s)
408
+ if m is not None:
409
+ return _datetime_string_to_datetime(m.group(0))
410
+
411
+ # 2017/04/01 08:54:00AM
412
+ m = re.search(r'(\d\d\d\d)\s?/\s?(\d\d)\s?/\s?(\d\d)\s+(\d+)\s?:\s?(\d\d)\s?:\s?(\d\d)\s*([a|p]m)',s)
413
+ if m is not None:
414
+ return _datetime_string_to_datetime(m.group(0))
415
+
416
+ # 04/01/2017 08:54AM
417
+ m = re.search(r'(\d\d)\s?/\s?(\d\d)\s?/\s?(\d\d\d\d)\s+(\d+)\s?:\s?(\d\d)\s*([a|p]m)',s)
418
+ if m is not None:
419
+ return _datetime_string_to_datetime(m.group(0))
420
+
421
+ # 2017/04/01 08:54AM
422
+ m = re.search(r'(\d\d\d\d)\s?/\s?(\d\d)\s?/\s?(\d\d)\s+(\d+)\s?:\s?(\d\d)\s*([a|p]m)',s)
423
+ if m is not None:
424
+ return _datetime_string_to_datetime(m.group(0))
425
+
426
+ ### No AM/PM
427
+
428
+ # 2013-07-27 04:56:35
429
+ m = re.search(r'(\d\d\d\d)\s?-\s?(\d\d)\s?-\s?(\d\d)\s*(\d\d)\s?:\s?(\d\d)\s?:\s?(\d\d)',s)
430
+ if m is not None:
431
+ return _datetime_string_to_datetime(m.group(0))
432
+
433
+ # 07-27-2013 04:56:35
434
+ m = re.search(r'(\d\d)\s?-\s?(\d\d)\s?-\s?(\d\d\d\d)\s*(\d\d)\s?:\s?(\d\d)\s?:\s?(\d\d)',s)
435
+ if m is not None:
436
+ return _datetime_string_to_datetime(m.group(0))
437
+
438
+ # 2013/07/27 04:56:35
439
+ m = re.search(r'(\d\d\d\d)\s?/\s?(\d\d)\s?/\s?(\d\d)\s*(\d\d)\s?:\s?(\d\d)\s?:\s?(\d\d)',s)
440
+ if m is not None:
441
+ return _datetime_string_to_datetime(m.group(0))
442
+
443
+ # 07/27/2013 04:56:35
444
+ m = re.search(r'(\d\d)\s?/\s?(\d\d)\s?/\s?(\d\d\d\d)\s*(\d\d)\s?:\s?(\d\d)\s?:\s?(\d\d)',s)
445
+ if m is not None:
446
+ return _datetime_string_to_datetime(m.group(0))
447
+
448
+ return None
449
+
450
+ # ...def _get_datetime_from_strings(...)
451
+
452
+
453
+ def get_datetime_from_image(image,include_crops=True,options=None):
454
+ """
455
+ Tries to find the datetime string (if present) in an image.
456
+
457
+ Args:
458
+ image (Image or str): the PIL Image object or image filename in which we should look for
459
+ datetime information.
460
+ include_crops (bool, optional): whether to include cropped images in the return dict (set
461
+ this to False if you're worried about size and you're processing a zillion images)
462
+ options (DatetimeExtractionOptions or list, optional): OCR parameters, either one
463
+ DatetimeExtractionOptions object or a list of options to try
464
+
465
+ Returns:
466
+ dict: a dict with fields:
467
+
468
+ - datetime: Python datetime object, or None
469
+ - text_results: length-2 list of strings
470
+ - all_extracted_datetimes: if we ran multiple option sets, this will contain the
471
+ datetimes extracted for each option set
472
+ - ocr_results: detailed results from the OCR process, including crops as PIL images;
473
+ only included if include_crops is True
474
+ """
475
+
476
+ if options is None:
477
+ options = DatetimeExtractionOptions()
478
+
479
+ if isinstance(image,str):
480
+ image = vis_utils.open_image(image)
481
+
482
+ # Crop the top and bottom from the image
483
+ rough_crops = make_rough_crops(image,options)
484
+ assert len(rough_crops) == 2
485
+
486
+ all_extracted_datetimes = {}
487
+ all_text_results = []
488
+ all_ocr_results = []
489
+
490
+ extracted_datetime = None
491
+
492
+ # Find text, possibly trying all config strings
493
+ #
494
+ # tesseract_config_string = options.tesseract_config_strings[0]
495
+ for tesseract_config_string in options.tesseract_config_strings:
496
+
497
+ ocr_results = find_text_in_crops(rough_crops,options,tesseract_config_string)
498
+ all_ocr_results.append(ocr_results)
499
+
500
+ text_results = [v['text'] for v in ocr_results.values()]
501
+ assert len(text_results) == 2
502
+ all_text_results.append(text_results)
503
+
504
+ # Find datetime
505
+ extracted_datetime_this_option_set = _get_datetime_from_strings(text_results,options)
506
+ assert isinstance(extracted_datetime_this_option_set,datetime.datetime) or \
507
+ (extracted_datetime_this_option_set is None)
508
+
509
+ all_extracted_datetimes[tesseract_config_string] = \
510
+ extracted_datetime_this_option_set
511
+
512
+ if extracted_datetime_this_option_set is not None:
513
+ if extracted_datetime is None:
514
+ extracted_datetime = extracted_datetime_this_option_set
515
+ if not options.force_all_ocr_options:
516
+ break
517
+
518
+ # ...for each set of OCR options
519
+
520
+ if extracted_datetime is not None:
521
+ assert extracted_datetime.year <= 2023 and extracted_datetime.year >= 1990
522
+
523
+ to_return = {}
524
+ to_return['datetime'] = extracted_datetime
525
+
526
+ to_return['text_results'] = all_text_results
527
+ to_return['all_extracted_datetimes'] = all_extracted_datetimes
528
+
529
+ if include_crops:
530
+ to_return['ocr_results'] = all_ocr_results
531
+ else:
532
+ to_return['ocr_results'] = None
533
+
534
+ return to_return
535
+
536
+ # ...def get_datetime_from_image(...)
537
+
538
+
539
+ def try_get_datetime_from_image(filename,include_crops=False,options=None):
540
+ """
541
+ Try/catch wrapper for get_datetime_from_image, optionally trying multiple option sets
542
+ until we find a datetime.
543
+
544
+ Args:
545
+ filename (Image or str): the PIL Image object or image filename in which we should look
546
+ for datetime information.
547
+ include_crops (bool, optional): whether to include cropped images in the return dict (set
548
+ this to False if you're worried about size and you're processing a zillion images)
549
+ options (DatetimeExtractionOptions or list, optional): OCR parameters, either one
550
+ DatetimeExtractionOptions object or a list of options to try
551
+
552
+ Returns:
553
+ dict: A dict with fields:
554
+ - datetime: Python datetime object, or None
555
+ - text_results: length-2 list of strings
556
+ - all_extracted_datetimes: if we ran multiple option sets, this will contain the
557
+ datetimes extracted for each option set
558
+ - ocr_results: detailed results from the OCR process, including crops as PIL images;
559
+ only included if include_crops is True
560
+ """
561
+
562
+ if options is None:
563
+ options = DatetimeExtractionOptions()
564
+
565
+ if not is_iterable(options):
566
+ options = [options]
567
+
568
+ result = {}
569
+ result['error'] = None
570
+
571
+ for i_option_set,current_options in enumerate(options):
572
+ try:
573
+ result = get_datetime_from_image(filename,include_crops=include_crops,options=current_options)
574
+ result['options_index'] = i_option_set
575
+ if 'datetime' in result and result['datetime'] is not None:
576
+ break
577
+ except Exception as e:
578
+ result['error'] = str(e)
579
+
580
+ return result
581
+
582
+
583
+ def get_datetimes_for_folder(folder_name,output_file=None,n_to_sample=-1,options=None,
584
+ n_workers=16,use_threads=False):
585
+ """
586
+ The main entry point for this module. Tries to retrieve metadata from pixels for every
587
+ image in [folder_name], optionally the results to the .json file [output_file].
588
+
589
+ Args:
590
+ folder_name (str): the folder of images to process recursively
591
+ output_file (str, optional): the .json file to which we should write results; if None,
592
+ just returns the results
593
+ n_to_sample (int, optional): for debugging only, used to limit the number of images
594
+ we process
595
+ options (DatetimeExtractionOptions or list, optional): OCR parameters, either one
596
+ DatetimeExtractionOptions object or a list of options to try for each image
597
+ n_workers (int, optional): the number of parallel workers to use; set to <= 1 to disable
598
+ parallelization
599
+ use_threads (bool, optional): whether to use threads (True) or processes (False) for
600
+ parallelization; not relevant if n_workers <= 1
601
+
602
+ Returns:
603
+ dict: a dict mapping filenames to datetime extraction results, see try_get_datetime_from_images
604
+ for the format of each value in the dict.
605
+ """
606
+
607
+ if options is None:
608
+ options = DatetimeExtractionOptions()
609
+
610
+ image_file_names = \
611
+ find_images(folder_name,convert_slashes=True,
612
+ return_relative_paths=False,recursive=True)
613
+
614
+ if n_to_sample > 0:
615
+ import random
616
+ random.seed(0)
617
+ image_file_names = random.sample(image_file_names,n_to_sample)
618
+
619
+ if n_workers <= 1:
620
+
621
+ all_results = []
622
+ for fn_abs in tqdm(image_file_names):
623
+ all_results.append(try_get_datetime_from_image(fn_abs,options=options))
624
+
625
+ else:
626
+
627
+ # Don't spawn more than one worker per image
628
+ if n_workers > len(image_file_names):
629
+ n_workers = len(image_file_names)
630
+
631
+ pool = None
632
+ try:
633
+ if use_threads:
634
+ from multiprocessing.pool import ThreadPool
635
+ pool = ThreadPool(n_workers)
636
+ worker_string = 'threads'
637
+ else:
638
+ from multiprocessing.pool import Pool
639
+ pool = Pool(n_workers)
640
+ worker_string = 'processes'
641
+
642
+ print('Starting a pool of {} {}'.format(n_workers,worker_string))
643
+
644
+ all_results = list(tqdm(pool.imap(
645
+ partial(try_get_datetime_from_image,options=options),image_file_names),
646
+ total=len(image_file_names)))
647
+ finally:
648
+ if pool is not None:
649
+ pool.close()
650
+ pool.join()
651
+ print('Pool closed and joined for datetime extraction')
652
+
653
+ filename_to_results = {}
654
+
655
+ # fn_relative = image_file_names[0]
656
+ for i_file,fn_abs in enumerate(image_file_names):
657
+ filename_to_results[fn_abs] = all_results[i_file]
658
+
659
+ if output_file is not None:
660
+ with open(output_file,'w') as f:
661
+ json.dump(filename_to_results,f,indent=1,default=str)
662
+
663
+ return filename_to_results
664
+
665
+
666
+ #%% Interactive driver
667
+
668
+ if False:
669
+
670
+ #%% Process images
671
+
672
+ folder_name = r'g:\temp\island_conservation_camera_traps'
673
+ output_file = r'g:\temp\ocr_results.json'
674
+ from megadetector.utils.path_utils import insert_before_extension
675
+ output_file = insert_before_extension(output_file)
676
+ n_to_sample = -1
677
+ assert os.path.isdir(folder_name)
678
+ options_a = DatetimeExtractionOptions()
679
+ options_b = DatetimeExtractionOptions()
680
+ options_b.image_crop_fraction = [0.08 , 0.08]
681
+ options_a.force_all_ocr_options = False
682
+ options_b.force_all_ocr_options = False
683
+ # all_options = [options_a,options_b]
684
+ all_options = [options_a]
685
+ filename_to_results = get_datetimes_for_folder(folder_name,output_file,
686
+ n_to_sample=n_to_sample,options=all_options)
687
+
688
+
689
+ #%% Load results
690
+
691
+ # output_file = r"G:\temp\ocr_results.2023.10.31.07.37.54.json"
692
+ with open(output_file,'r') as f:
693
+ filename_to_results = json.load(f)
694
+ filenames = sorted(list(filename_to_results.keys()))
695
+ print('Loaded results for {} files'.format(len(filename_to_results)))
696
+
697
+
698
+ #%% Scrap cell
699
+
700
+ fn = 'g:/camera_traps/camera_trap_images/2018.07.02/newcam/people/DSCF0273.JPG'
701
+ include_crops = False
702
+ options_a = DatetimeExtractionOptions()
703
+ options_b = DatetimeExtractionOptions()
704
+ options_b.image_crop_fraction = [0.08 , 0.08]
705
+ image = vis_utils.open_image(fn) # noqa
706
+ result = try_get_datetime_from_image(fn,options=[options_a,options_b]) # noqa
707
+ print(result)
708
+
709
+ # open_file(fn)
710
+ # rough_crops = make_rough_crops(image,options=options)
711
+
712
+
713
+ #%% Look for OCR or parsing failures
714
+
715
+ bad_tokens = ()
716
+
717
+ files_with_disagreements = set()
718
+
719
+ # i_fn = 0; fn = filenames[i_fn]
720
+ for i_fn,fn in enumerate(filenames):
721
+
722
+ image = fn
723
+ results = filename_to_results[fn]
724
+
725
+ if 'text_results' not in results:
726
+ raise Exception('no results available for {} ({})'.format(i_fn,fn))
727
+ # print('Skipping {}, no results'.format(i_fn))
728
+ # continue
729
+
730
+ s = ' '.join([x[0] for x in results['text_results']])
731
+
732
+ known_bad = False
733
+ for bad_token in bad_tokens:
734
+ if bad_token in s:
735
+ known_bad = True
736
+ if known_bad:
737
+ continue
738
+
739
+ extracted_datetime = results['datetime']
740
+
741
+ # If we have a datetime, make sure all successful OCR results agree
742
+ if extracted_datetime is not None:
743
+ for config_string in results['all_extracted_datetimes']:
744
+ if results['all_extracted_datetimes'][config_string] is not None:
745
+ if results['all_extracted_datetimes'][config_string] != extracted_datetime:
746
+ files_with_disagreements.add(fn)
747
+ else:
748
+ print('Falling back for {} ({})'.format(i_fn,fn))
749
+ ocr_results = get_datetime_from_image(fn)
750
+ extracted_datetime = ocr_results['datetime']
751
+
752
+ if extracted_datetime is None:
753
+ print('Failure at {}: {}'.format(i_fn,s))
754
+
755
+ # open_file(fn)
756
+ # get_datetime_from_image(fn)
757
+
758
+
759
+ #%% Write results to an HTML file for testing
760
+
761
+ n_to_sample = 5000
762
+ if (n_to_sample >= 0) and (len(filename_to_results) > n_to_sample):
763
+ filenames = sorted(list(filename_to_results.keys()))
764
+ import random
765
+ random.seed(0)
766
+ keys = random.sample(filenames,n_to_sample)
767
+ filename_to_results = {k: filename_to_results[k] for k in keys}
768
+
769
+ preview_dir = r'g:\temp\ocr-preview'
770
+ os.makedirs(preview_dir,exist_ok=True)
771
+
772
+ def resize_image_for_preview(fn_abs):
773
+ fn_relative = os.path.relpath(fn_abs,folder_name)
774
+ resized_image = vis_utils.resize_image(fn_abs,target_width=600)
775
+ resized_fn = os.path.join(preview_dir,fn_relative)
776
+ os.makedirs(os.path.dirname(resized_fn),exist_ok=True)
777
+ resized_image.save(resized_fn)
778
+ return resized_fn
779
+
780
+ # Resize images in parallel
781
+ n_rendering_workers = 16
782
+
783
+ if n_rendering_workers <= 1:
784
+ for fn_abs in tqdm(filename_to_results.keys()):
785
+ resize_image_for_preview(fn_abs)
786
+ else:
787
+ # from multiprocessing.pool import Pool as RenderingPool; worker_string = 'processes'
788
+ from multiprocessing.pool import ThreadPool as RenderingPool; worker_string = 'threads'
789
+ pool = RenderingPool(n_rendering_workers)
790
+
791
+ print('Starting rendering pool with {} {}'.format(n_rendering_workers,worker_string))
792
+
793
+ _ = list(tqdm(pool.imap(resize_image_for_preview,filename_to_results.keys()),
794
+ total=len(filename_to_results)))
795
+
796
+
797
+ def make_datetime_preview_page(filenames,html_file):
798
+
799
+ html_image_list = []
800
+ html_options = write_html_image_list.write_html_image_list()
801
+ html_options['maxFiguresPerHtmlFile'] = 2500
802
+ html_options['defaultImageStyle'] = 'margin:0px;margin-top:5px;margin-bottom:30px;'
803
+
804
+ # fn_abs = filenames[0]
805
+ for fn_abs in filenames:
806
+
807
+ fn_relative = os.path.relpath(fn_abs,folder_name)
808
+ # resized_fn = os.path.join(preview_dir,fn_relative)
809
+ results_this_image = filename_to_results[fn_abs]
810
+
811
+ extracted_datetime = results_this_image['datetime']
812
+ title = 'Image: {}<br/>Extracted datetime: {}'.format(fn_relative,extracted_datetime)
813
+ html_image_list.append({'filename':fn_relative,'title':title})
814
+
815
+ # ...for each crop
816
+
817
+ # ...for each image
818
+
819
+ html_options['makeRelative'] = True
820
+ write_html_image_list.write_html_image_list(html_file,
821
+ html_image_list,
822
+ html_options)
823
+ open_file(html_file)
824
+ return html_image_list
825
+
826
+ failed_files = []
827
+ for fn_abs in filename_to_results:
828
+ results_this_image = filename_to_results[fn_abs]
829
+ if results_this_image['datetime'] is None:
830
+ failed_files.append(fn_abs)
831
+
832
+ print('Found {} failures'.format(len(failed_files)))
833
+
834
+ output_summary_file = os.path.join(preview_dir,'summary.html')
835
+ html_image_list = make_datetime_preview_page(sorted(list(filename_to_results.keys())),output_summary_file)
836
+
837
+ failure_summary_file = os.path.join(preview_dir,'failures.html')
838
+ html_image_list_failures = make_datetime_preview_page(failed_files,failure_summary_file)
839
+
840
+ filenames = failed_files
841
+ html_file = failure_summary_file
842
+
843
+
844
+ #%% Other approaches to getting dates from strings
845
+
846
+ # ...that didn't really work out.
847
+
848
+ # pip install dateparser
849
+ import dateparser
850
+
851
+ # pip install datefinder
852
+ import datefinder
853
+
854
+ from dateparser.search import search_dates # noqa
855
+
856
+ dateparser_settings = {'PREFER_DATES_FROM':'past','STRICT_PARSING':True}
857
+
858
+ dateparser_result = dateparser.search.search_dates(s, settings=dateparser_settings)
859
+
860
+ if dateparser_result is not None:
861
+ assert len(dateparser_result) == 1
862
+ extracted_datetime = dateparser_result[0][1]
863
+ else:
864
+ matches = datefinder.find_dates(s,strict=False)
865
+ matches_list = [m for m in matches]
866
+ if len(matches_list) == 1:
867
+ extracted_datetime = matches_list[0]
868
+ else:
869
+ extracted_datetime = None
870
+
871
+ if extracted_datetime is not None:
872
+ assert extracted_datetime.year <= 2023 and extracted_datetime.year >= 1990
873
+