learning-loop-node 0.13.7__tar.gz → 0.15.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of learning-loop-node might be problematic. Click here for more details.

Files changed (104) hide show
  1. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/PKG-INFO +35 -38
  2. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/README.md +34 -37
  3. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/data_classes/__init__.py +2 -2
  4. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/data_classes/image_metadata.py +5 -0
  5. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/data_classes/training.py +3 -2
  6. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/data_exchanger.py +3 -3
  7. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/detector/detector_logic.py +8 -5
  8. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/detector/detector_node.py +105 -44
  9. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/detector/inbox_filter/relevance_filter.py +11 -9
  10. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/detector/outbox.py +134 -44
  11. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/detector/rest/detect.py +3 -3
  12. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/detector/rest/upload.py +4 -3
  13. learning_loop_node-0.15.0/learning_loop_node/helpers/background_tasks.py +78 -0
  14. learning_loop_node-0.15.0/learning_loop_node/helpers/run.py +21 -0
  15. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/node.py +11 -4
  16. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/annotator/conftest.py +9 -4
  17. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/annotator/test_annotator_node.py +10 -2
  18. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/detector/inbox_filter/test_unexpected_observations_count.py +4 -3
  19. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/detector/test_client_communication.py +1 -23
  20. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/detector/test_outbox.py +7 -16
  21. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/detector/test_relevance_filter.py +3 -3
  22. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/general/conftest.py +8 -2
  23. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/trainer/conftest.py +2 -2
  24. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/trainer/trainer_logic_generic.py +16 -4
  25. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/pyproject.toml +1 -1
  26. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/__init__.py +0 -0
  27. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/annotation/__init__.py +0 -0
  28. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/annotation/annotator_logic.py +0 -0
  29. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/annotation/annotator_node.py +0 -0
  30. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/data_classes/annotations.py +0 -0
  31. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/data_classes/detections.py +0 -0
  32. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/data_classes/general.py +0 -0
  33. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/data_classes/socket_response.py +0 -0
  34. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/detector/__init__.py +0 -0
  35. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/detector/exceptions.py +0 -0
  36. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/detector/inbox_filter/__init__.py +0 -0
  37. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/detector/inbox_filter/cam_observation_history.py +0 -0
  38. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/detector/rest/__init__.py +0 -0
  39. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/detector/rest/about.py +0 -0
  40. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/detector/rest/backdoor_controls.py +0 -0
  41. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/detector/rest/model_version_control.py +0 -0
  42. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/detector/rest/operation_mode.py +0 -0
  43. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/detector/rest/outbox_mode.py +0 -0
  44. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/enums/__init__.py +0 -0
  45. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/enums/annotator.py +0 -0
  46. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/enums/detector.py +0 -0
  47. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/enums/general.py +0 -0
  48. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/enums/trainer.py +0 -0
  49. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/examples/novelty_score_updater.py +0 -0
  50. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/globals.py +0 -0
  51. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/helpers/__init__.py +0 -0
  52. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/helpers/environment_reader.py +0 -0
  53. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/helpers/gdrive_downloader.py +0 -0
  54. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/helpers/log_conf.py +0 -0
  55. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/helpers/misc.py +0 -0
  56. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/loop_communication.py +0 -0
  57. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/py.typed +0 -0
  58. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/rest.py +0 -0
  59. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/__init__.py +0 -0
  60. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/annotator/__init__.py +0 -0
  61. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/annotator/pytest.ini +0 -0
  62. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/detector/__init__.py +0 -0
  63. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/detector/conftest.py +0 -0
  64. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/detector/inbox_filter/__init__.py +0 -0
  65. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/detector/inbox_filter/test_observation.py +0 -0
  66. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/detector/inbox_filter/test_relevance_group.py +0 -0
  67. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/detector/pytest.ini +0 -0
  68. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/detector/test.jpg +0 -0
  69. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/detector/test_detector_node.py +0 -0
  70. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/detector/testing_detector.py +0 -0
  71. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/general/__init__.py +0 -0
  72. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/general/pytest.ini +0 -0
  73. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/general/test_data/file_1.txt +0 -0
  74. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/general/test_data/file_2.txt +0 -0
  75. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/general/test_data/model.json +0 -0
  76. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/general/test_data_classes.py +0 -0
  77. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/general/test_downloader.py +0 -0
  78. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/general/test_learning_loop_node.py +0 -0
  79. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/test_helper.py +0 -0
  80. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/trainer/__init__.py +0 -0
  81. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/trainer/pytest.ini +0 -0
  82. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/trainer/state_helper.py +0 -0
  83. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/trainer/states/__init__.py +0 -0
  84. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/trainer/states/test_state_cleanup.py +0 -0
  85. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/trainer/states/test_state_detecting.py +0 -0
  86. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/trainer/states/test_state_download_train_model.py +0 -0
  87. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/trainer/states/test_state_prepare.py +0 -0
  88. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/trainer/states/test_state_sync_confusion_matrix.py +0 -0
  89. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/trainer/states/test_state_train.py +0 -0
  90. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/trainer/states/test_state_upload_detections.py +0 -0
  91. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/trainer/states/test_state_upload_model.py +0 -0
  92. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/trainer/test_errors.py +0 -0
  93. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/trainer/test_trainer_states.py +0 -0
  94. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/tests/trainer/testing_trainer_logic.py +0 -0
  95. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/trainer/__init__.py +0 -0
  96. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/trainer/downloader.py +0 -0
  97. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/trainer/exceptions.py +0 -0
  98. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/trainer/executor.py +0 -0
  99. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/trainer/io_helpers.py +0 -0
  100. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/trainer/rest/__init__.py +0 -0
  101. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/trainer/rest/backdoor_controls.py +0 -0
  102. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/trainer/test_executor.py +0 -0
  103. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/trainer/trainer_logic.py +0 -0
  104. {learning_loop_node-0.13.7 → learning_loop_node-0.15.0}/learning_loop_node/trainer/trainer_node.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: learning-loop-node
3
- Version: 0.13.7
3
+ Version: 0.15.0
4
4
  Summary: Python Library for Nodes which connect to the Zauberzeug Learning Loop
5
5
  Home-page: https://github.com/zauberzeug/learning_loop_node
6
6
  License: MIT
@@ -100,11 +100,16 @@ You can additionally provide the following camera parameters:
100
100
  - `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled` (example curl parameter `-H 'autoupload: all'`)
101
101
  - `camera-id`: a string which groups images for submission together (example curl parameter `-H 'camera-id: front_cam'`)
102
102
 
103
+ To use the socketio interface, the caller needs to connect to the detector node's socketio server and emit the `detect` or `batch_detect` event with the image data and image metadata. Example code can be found [in the rosys implementation](https://github.com/zauberzeug/rosys/blob/main/rosys/vision/detector_hardware.py).
104
+
103
105
  The detector also has a sio **upload endpoint** that can be used to upload images and detections to the learning loop. The function receives a json dictionary, with the following entries:
104
106
 
105
107
  - `image`: the image data in jpg format
106
108
  - `tags`: a list of strings. If not provided the tag is `picked_by_system`
107
109
  - `detections`: a dictionary representing the detections. UUIDs for the classes are automatically determined based on the category names. This field is optional. If not provided, no detections are uploaded.
110
+ - `source`: optional source identifier for the image
111
+ - `creation_date`: optional creation date for the image
112
+ - `upload_priority`: boolean flag to prioritize the upload (defaults to False)
108
113
 
109
114
  The endpoint returns None if the upload was successful and an error message otherwise.
110
115
 
@@ -187,58 +192,52 @@ Upload a model with
187
192
  The model should now be available for the format 'format_a'
188
193
  `curl "https://learning-loop.ai/api/zauberzeug/projects/demo/models?format=format_a"`
189
194
 
190
- ````
191
-
195
+ ```json
192
196
  {
193
- "models": [
194
- {
195
- "id": "3c20d807-f71c-40dc-a996-8a8968aa5431",
196
- "version": "4.0",
197
- "formats": [
198
- "format_a"
199
- ],
200
- "created": "2021-06-01T06:28:21.289092",
201
- "comment": "uploaded at 2021-06-01 06:28:21.288442",
202
- ...
197
+ "models": [
198
+ {
199
+ "id": "3c20d807-f71c-40dc-a996-8a8968aa5431",
200
+ "version": "4.0",
201
+ "formats": [
202
+ "format_a"
203
+ ],
204
+ "created": "2021-06-01T06:28:21.289092",
205
+ "comment": "uploaded at 2021-06-01 06:28:21.288442",
206
+ ...
207
+ }
208
+ ]
203
209
  }
204
- ]
205
- }
206
-
207
210
  ```
208
211
 
209
212
  but not in the format_b
210
213
  `curl "https://learning-loop.ai/api/zauberzeug/projects/demo/models?format=format_b"`
211
214
 
212
- ```
213
-
215
+ ```json
214
216
  {
215
- "models": []
217
+ "models": []
216
218
  }
217
-
218
219
  ```
219
220
 
220
221
  Connect the Node to the Learning Loop by simply starting the container.
221
222
  After a short time the converted model should be available as well.
222
223
  `curl https://learning-loop.ai/api/zauberzeug/projects/demo/models?format=format_b`
223
224
 
224
- ```
225
-
226
- {
227
- "models": [
225
+ ```json
228
226
  {
229
- "id": "3c20d807-f71c-40dc-a996-8a8968aa5431",
230
- "version": "4.0",
231
- "formats": [
232
- "format_a",
233
- "format_b",
234
- ],
235
- "created": "2021-06-01T06:28:21.289092",
236
- "comment": "uploaded at 2021-06-01 06:28:21.288442",
237
- ...
238
- }
239
- ]
227
+ "models": [
228
+ {
229
+ "id": "3c20d807-f71c-40dc-a996-8a8968aa5431",
230
+ "version": "4.0",
231
+ "formats": [
232
+ "format_a",
233
+ "format_b",
234
+ ],
235
+ "created": "2021-06-01T06:28:21.289092",
236
+ "comment": "uploaded at 2021-06-01 06:28:21.288442",
237
+ ...
238
+ }
239
+ ]
240
240
  }
241
-
242
241
  ```
243
242
 
244
243
  ## About Models (the currency between Nodes)
@@ -257,6 +256,4 @@ After a short time the converted model should be available as well.
257
256
  - Nodes add properties to `model.json`, which contains all the information which are needed by subsequent nodes. These are typically the properties:
258
257
  - `resolution`: resolution in which the model expects images (as `int`, since the resolution is mostly square - later, ` resolution_x`` resolution_y ` would also be conceivable or `resolutions` to give a list of possible resolutions)
259
258
  - `categories`: list of categories with name, id, (later also type), in the order in which they are used by the model -- this is neccessary to be robust about renamings
260
- ```
261
- ````
262
259
 
@@ -60,11 +60,16 @@ You can additionally provide the following camera parameters:
60
60
  - `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled` (example curl parameter `-H 'autoupload: all'`)
61
61
  - `camera-id`: a string which groups images for submission together (example curl parameter `-H 'camera-id: front_cam'`)
62
62
 
63
+ To use the socketio interface, the caller needs to connect to the detector node's socketio server and emit the `detect` or `batch_detect` event with the image data and image metadata. Example code can be found [in the rosys implementation](https://github.com/zauberzeug/rosys/blob/main/rosys/vision/detector_hardware.py).
64
+
63
65
  The detector also has a sio **upload endpoint** that can be used to upload images and detections to the learning loop. The function receives a json dictionary, with the following entries:
64
66
 
65
67
  - `image`: the image data in jpg format
66
68
  - `tags`: a list of strings. If not provided the tag is `picked_by_system`
67
69
  - `detections`: a dictionary representing the detections. UUIDs for the classes are automatically determined based on the category names. This field is optional. If not provided, no detections are uploaded.
70
+ - `source`: optional source identifier for the image
71
+ - `creation_date`: optional creation date for the image
72
+ - `upload_priority`: boolean flag to prioritize the upload (defaults to False)
68
73
 
69
74
  The endpoint returns None if the upload was successful and an error message otherwise.
70
75
 
@@ -147,58 +152,52 @@ Upload a model with
147
152
  The model should now be available for the format 'format_a'
148
153
  `curl "https://learning-loop.ai/api/zauberzeug/projects/demo/models?format=format_a"`
149
154
 
150
- ````
151
-
155
+ ```json
152
156
  {
153
- "models": [
154
- {
155
- "id": "3c20d807-f71c-40dc-a996-8a8968aa5431",
156
- "version": "4.0",
157
- "formats": [
158
- "format_a"
159
- ],
160
- "created": "2021-06-01T06:28:21.289092",
161
- "comment": "uploaded at 2021-06-01 06:28:21.288442",
162
- ...
157
+ "models": [
158
+ {
159
+ "id": "3c20d807-f71c-40dc-a996-8a8968aa5431",
160
+ "version": "4.0",
161
+ "formats": [
162
+ "format_a"
163
+ ],
164
+ "created": "2021-06-01T06:28:21.289092",
165
+ "comment": "uploaded at 2021-06-01 06:28:21.288442",
166
+ ...
167
+ }
168
+ ]
163
169
  }
164
- ]
165
- }
166
-
167
170
  ```
168
171
 
169
172
  but not in the format_b
170
173
  `curl "https://learning-loop.ai/api/zauberzeug/projects/demo/models?format=format_b"`
171
174
 
172
- ```
173
-
175
+ ```json
174
176
  {
175
- "models": []
177
+ "models": []
176
178
  }
177
-
178
179
  ```
179
180
 
180
181
  Connect the Node to the Learning Loop by simply starting the container.
181
182
  After a short time the converted model should be available as well.
182
183
  `curl https://learning-loop.ai/api/zauberzeug/projects/demo/models?format=format_b`
183
184
 
184
- ```
185
-
186
- {
187
- "models": [
185
+ ```json
188
186
  {
189
- "id": "3c20d807-f71c-40dc-a996-8a8968aa5431",
190
- "version": "4.0",
191
- "formats": [
192
- "format_a",
193
- "format_b",
194
- ],
195
- "created": "2021-06-01T06:28:21.289092",
196
- "comment": "uploaded at 2021-06-01 06:28:21.288442",
197
- ...
198
- }
199
- ]
187
+ "models": [
188
+ {
189
+ "id": "3c20d807-f71c-40dc-a996-8a8968aa5431",
190
+ "version": "4.0",
191
+ "formats": [
192
+ "format_a",
193
+ "format_b",
194
+ ],
195
+ "created": "2021-06-01T06:28:21.289092",
196
+ "comment": "uploaded at 2021-06-01 06:28:21.288442",
197
+ ...
198
+ }
199
+ ]
200
200
  }
201
-
202
201
  ```
203
202
 
204
203
  ## About Models (the currency between Nodes)
@@ -217,5 +216,3 @@ After a short time the converted model should be available as well.
217
216
  - Nodes add properties to `model.json`, which contains all the information which are needed by subsequent nodes. These are typically the properties:
218
217
  - `resolution`: resolution in which the model expects images (as `int`, since the resolution is mostly square - later, ` resolution_x`` resolution_y ` would also be conceivable or `resolutions` to give a list of possible resolutions)
219
218
  - `categories`: list of categories with name, id, (later also type), in the order in which they are used by the model -- this is neccessary to be robust about renamings
220
- ```
221
- ````
@@ -3,7 +3,7 @@ from .detections import (BoxDetection, ClassificationDetection, Detections, Obse
3
3
  SegmentationDetection, Shape)
4
4
  from .general import (AboutResponse, AnnotationNodeStatus, Category, Context, DetectionStatus, ErrorConfiguration,
5
5
  ModelInformation, ModelVersionResponse, NodeState, NodeStatus)
6
- from .image_metadata import ImageMetadata
6
+ from .image_metadata import ImageMetadata, ImagesMetadata
7
7
  from .socket_response import SocketResponse
8
8
  from .training import Errors, PretrainedModel, Training, TrainingError, TrainingOut, TrainingStateData, TrainingStatus
9
9
 
@@ -12,7 +12,7 @@ __all__ = [
12
12
  'BoxDetection', 'ClassificationDetection', 'ImageMetadata', 'Observation', 'Point', 'PointDetection',
13
13
  'SegmentationDetection', 'Shape', 'Detections',
14
14
  'AnnotationNodeStatus', 'Category', 'Context', 'DetectionStatus', 'ErrorConfiguration',
15
- 'ModelInformation', 'NodeState', 'NodeStatus', 'ModelVersionResponse',
15
+ 'ModelInformation', 'NodeState', 'NodeStatus', 'ModelVersionResponse', 'ImagesMetadata',
16
16
  'SocketResponse',
17
17
  'Errors', 'PretrainedModel', 'Training',
18
18
  'TrainingError', 'TrainingOut', 'TrainingStateData', 'TrainingStatus',
@@ -35,3 +35,8 @@ class ImageMetadata():
35
35
 
36
36
  def __len__(self):
37
37
  return len(self.box_detections) + len(self.point_detections) + len(self.segmentation_detections) + len(self.classification_detections)
38
+
39
+
40
+ @dataclass(**KWONLY_SLOTS)
41
+ class ImagesMetadata():
42
+ items: List[ImageMetadata] = field(default_factory=list, metadata={'description': 'List of image metadata'})
@@ -8,6 +8,7 @@ from uuid import uuid4
8
8
 
9
9
  from ..enums import TrainerState
10
10
  from ..helpers.misc import create_image_folder, create_training_folder
11
+
11
12
  # pylint: disable=no-name-in-module
12
13
  from .general import Category, Context
13
14
 
@@ -52,7 +53,7 @@ class Training():
52
53
  training_folder: str # f'{project_folder}/trainings/{trainings_id}'
53
54
 
54
55
  categories: List[Category]
55
- hyperparameters: dict
56
+ hyperparameters: Dict[str, Any]
56
57
 
57
58
  training_number: int
58
59
  training_state: str
@@ -63,7 +64,7 @@ class Training():
63
64
  base_model_uuid: Optional[str] = None # model uuid to continue training (is loaded from loop)
64
65
 
65
66
  # NOTE: these are set later after the model has been uploaded
66
- image_data: Optional[List[dict]] = None
67
+ image_data: Optional[List[Dict]] = None
67
68
  skipped_image_count: Optional[int] = None
68
69
  model_uuid_for_detecting: Optional[str] = None # Model uuid to load from the loop after training and upload
69
70
 
@@ -7,7 +7,7 @@ from glob import glob
7
7
  from http import HTTPStatus
8
8
  from io import BytesIO
9
9
  from time import time
10
- from typing import Dict, List, Optional
10
+ from typing import Any, Dict, List, Optional
11
11
 
12
12
  import aiofiles # type: ignore
13
13
 
@@ -68,7 +68,7 @@ class DataExchanger():
68
68
  assert response.status_code == 200, response
69
69
  return (response.json())['image_ids']
70
70
 
71
- async def download_images_data(self, image_uuids: List[str], chunk_size: int = 100) -> List[Dict]:
71
+ async def download_images_data(self, image_uuids: List[str], chunk_size: int = 100) -> List[Dict[str, Any]]:
72
72
  """Download image annotations, tags, set and other information for the given image uuids."""
73
73
  logging.info('Fetching annotations, tags, sets, etc. for %s images..', len(image_uuids))
74
74
 
@@ -78,7 +78,7 @@ class DataExchanger():
78
78
  return []
79
79
 
80
80
  progress_factor = 0.5 / num_image_ids # first 50% of progress is for downloading data
81
- images_data: List[Dict] = []
81
+ images_data: List[Dict[str, Any]] = []
82
82
  for i in range(0, num_image_ids, chunk_size):
83
83
  self.progress = i * progress_factor
84
84
  chunk_ids = image_uuids[i:i+chunk_size]
@@ -2,9 +2,7 @@ import logging
2
2
  from abc import abstractmethod
3
3
  from typing import List, Optional
4
4
 
5
- import numpy as np
6
-
7
- from ..data_classes import ImageMetadata, ModelInformation
5
+ from ..data_classes import ImageMetadata, ImagesMetadata, ModelInformation
8
6
  from ..globals import GLOBALS
9
7
  from .exceptions import NodeNeedsRestartError
10
8
 
@@ -44,13 +42,18 @@ class DetectorLogic():
44
42
  def init(self):
45
43
  """Called when a (new) model was loaded. Initialize the model. Model information available via `self.model_info`"""
46
44
 
47
- def evaluate_with_all_info(self, image: np.ndarray, tags: List[str], source: Optional[str] = None, creation_date: Optional[str] = None) -> ImageMetadata: # pylint: disable=unused-argument
45
+ def evaluate_with_all_info(self, image: bytes, tags: List[str], source: Optional[str] = None, creation_date: Optional[str] = None) -> ImageMetadata: # pylint: disable=unused-argument
48
46
  """Called by the detector node when an image should be evaluated (REST or SocketIO).
49
47
  Tags, source come from the caller and may be used in this function.
50
48
  By default, this function simply calls `evaluate`"""
51
49
  return self.evaluate(image)
52
50
 
53
51
  @abstractmethod
54
- def evaluate(self, image: np.ndarray) -> ImageMetadata:
52
+ def evaluate(self, image: bytes) -> ImageMetadata:
55
53
  """Evaluate the image and return the detections.
56
54
  The object should return empty detections if it is not initialized"""
55
+
56
+ @abstractmethod
57
+ def batch_evaluate(self, images: List[bytes]) -> ImagesMetadata:
58
+ """Evaluate a batch of images and return the detections.
59
+ The object should return empty detections if it is not initialized"""
@@ -1,36 +1,25 @@
1
1
  import asyncio
2
2
  import contextlib
3
- import math
4
3
  import os
5
4
  import shutil
6
5
  import subprocess
7
6
  import sys
8
7
  from dataclasses import asdict
9
8
  from datetime import datetime
10
- from threading import Thread
11
9
  from typing import Dict, List, Optional
12
10
 
13
- import numpy as np
14
11
  import socketio
15
12
  from dacite import from_dict
16
13
  from fastapi.encoders import jsonable_encoder
17
14
  from socketio import AsyncClient
18
15
 
19
- from ..data_classes import (
20
- AboutResponse,
21
- Category,
22
- Context,
23
- DetectionStatus,
24
- ImageMetadata,
25
- ModelInformation,
26
- ModelVersionResponse,
27
- Shape,
28
- )
16
+ from ..data_classes import (AboutResponse, Category, Context, DetectionStatus, ImageMetadata, ImagesMetadata,
17
+ ModelInformation, ModelVersionResponse, Shape)
29
18
  from ..data_classes.socket_response import SocketResponse
30
19
  from ..data_exchanger import DataExchanger, DownloadError
31
20
  from ..enums import OperationMode, VersionMode
32
21
  from ..globals import GLOBALS
33
- from ..helpers import environment_reader
22
+ from ..helpers import background_tasks, environment_reader, run
34
23
  from ..node import Node
35
24
  from .detector_logic import DetectorLogic
36
25
  from .exceptions import NodeNeedsRestartError
@@ -227,7 +216,7 @@ class DetectorNode(Node):
227
216
  async def detect(sid, data: Dict) -> Dict:
228
217
  try:
229
218
  det = await self.get_detections(
230
- raw_image=np.frombuffer(data['image'], np.uint8),
219
+ raw_image=data['image'],
231
220
  camera_id=data.get('camera-id', None) or data.get('mac', None),
232
221
  tags=data.get('tags', []),
233
222
  source=data.get('source', None),
@@ -240,8 +229,29 @@ class DetectorNode(Node):
240
229
  return detection_dict
241
230
  except Exception as e:
242
231
  self.log.exception('could not detect via socketio')
243
- with open('/tmp/bad_img_from_socket_io.jpg', 'wb') as f:
244
- f.write(data['image'])
232
+ # with open('/tmp/bad_img_from_socket_io.jpg', 'wb') as f:
233
+ # f.write(data['image'])
234
+ return {'error': str(e)}
235
+
236
+ @self.sio.event
237
+ async def batch_detect(sid, data: Dict) -> Dict:
238
+ try:
239
+ det = await self.get_batch_detections(
240
+ raw_images=data['images'],
241
+ tags=data.get('tags', []),
242
+ camera_id=data.get('camera-id', None) or data.get('mac', None),
243
+ source=data.get('source', None),
244
+ autoupload=data.get('autoupload', None),
245
+ creation_date=data.get('creation_date', None)
246
+ )
247
+ if det is None:
248
+ return {'error': 'no model loaded'}
249
+ detection_dict = jsonable_encoder(asdict(det))
250
+ return detection_dict
251
+ except Exception as e:
252
+ self.log.exception('could not detect via socketio')
253
+ # with open('/tmp/bad_img_from_socket_io.jpg', 'wb') as f:
254
+ # f.write(data['image'])
245
255
  return {'error': str(e)}
246
256
 
247
257
  @self.sio.event
@@ -279,9 +289,10 @@ class DetectorNode(Node):
279
289
  return {'error': str(e)}
280
290
 
281
291
  @self.sio.event
282
- async def upload(sid, data: Dict) -> Optional[Dict]:
283
- '''upload an image with detections'''
292
+ async def upload(sid, data: Dict) -> Dict:
293
+ """Upload an image with detections"""
284
294
 
295
+ self.log.debug('Processing upload via socketio.')
285
296
  detection_data = data.get('detections', {})
286
297
  if detection_data and self.detector_logic.model_info is not None:
287
298
  try:
@@ -293,22 +304,19 @@ class DetectorNode(Node):
293
304
  else:
294
305
  image_metadata = ImageMetadata()
295
306
 
296
- tags = data.get('tags', [])
297
- tags.append('picked_by_system')
298
-
299
- source = data.get('source', None)
300
- creation_date = data.get('creation_date', None)
301
-
302
- self.log.debug('running upload via socketio. tags: %s, source: %s, creation_date: %s',
303
- tags, source, creation_date)
304
-
305
- loop = asyncio.get_event_loop()
306
307
  try:
307
- await loop.run_in_executor(None, self.outbox.save, data['image'], image_metadata, tags, source, creation_date)
308
+ await self.upload_images(
309
+ images=[data['image']],
310
+ image_metadata=image_metadata,
311
+ tags=data.get('tags', []),
312
+ source=data.get('source', None),
313
+ creation_date=data.get('creation_date', None),
314
+ upload_priority=data.get('upload_priority', False)
315
+ )
308
316
  except Exception as e:
309
317
  self.log.exception('could not upload via socketio')
310
318
  return {'error': str(e)}
311
- return None
319
+ return {'status': 'OK'}
312
320
 
313
321
  @self.sio.event
314
322
  def connect(sid, environ, auth) -> None:
@@ -469,7 +477,7 @@ class DetectorNode(Node):
469
477
  self.log.warning('Operation mode set to %s, but sync failed: %s', mode, e)
470
478
 
471
479
  def reload(self, reason: str):
472
- '''provide a cause for the reload'''
480
+ """provide a cause for the reload"""
473
481
 
474
482
  self.log.info('########## reloading app because %s', reason)
475
483
  if os.path.isfile('/app/app_code/restart/restart.py'):
@@ -482,20 +490,20 @@ class DetectorNode(Node):
482
490
  self.log.error('could not reload app')
483
491
 
484
492
  async def get_detections(self,
485
- raw_image: np.ndarray,
486
- camera_id: Optional[str],
493
+ raw_image: bytes,
487
494
  tags: List[str],
495
+ *,
496
+ camera_id: Optional[str] = None,
488
497
  source: Optional[str] = None,
489
498
  autoupload: Optional[str] = None,
490
499
  creation_date: Optional[str] = None) -> ImageMetadata:
491
500
  """ Main processing function for the detector node when an image is received via REST or SocketIO.
492
- This function infers the detections from the image, cares about uploading to the loop and returns the detections as a dictionary.
501
+ This function infers the detections from the image, cares about uploading to the loop and returns the detections as ImageMetadata object.
493
502
  Note: raw_image is a numpy array of type uint8, but not in the correct shape!
494
503
  It can be converted e.g. using cv2.imdecode(raw_image, cv2.IMREAD_COLOR)"""
495
504
 
496
505
  await self.detection_lock.acquire()
497
- loop = asyncio.get_event_loop()
498
- detections = await loop.run_in_executor(None, self.detector_logic.evaluate_with_all_info, raw_image, tags, source, creation_date)
506
+ detections = await run.io_bound(self.detector_logic.evaluate_with_all_info, raw_image, tags, source, creation_date)
499
507
  self.detection_lock.release()
500
508
 
501
509
  fix_shape_detections(detections)
@@ -503,21 +511,74 @@ class DetectorNode(Node):
503
511
  n_po, n_se = len(detections.point_detections), len(detections.segmentation_detections)
504
512
  self.log.debug('Detected: %d boxes, %d points, %d segs, %d classes', n_bo, n_po, n_se, n_cl)
505
513
 
506
- if autoupload is None or autoupload == 'filtered': # NOTE default is filtered
507
- Thread(target=self.relevance_filter.may_upload_detections,
508
- args=(detections, camera_id, raw_image, tags, source, creation_date)).start()
514
+ autoupload = autoupload or 'filtered'
515
+ if autoupload == 'filtered' and camera_id is not None:
516
+ background_tasks.create(self.relevance_filter.may_upload_detections(
517
+ detections, camera_id, raw_image, tags, source, creation_date
518
+ ))
509
519
  elif autoupload == 'all':
510
- Thread(target=self.outbox.save, args=(raw_image, detections, tags, source, creation_date)).start()
520
+ background_tasks.create(self.outbox.save(raw_image, detections, tags, source, creation_date))
511
521
  elif autoupload == 'disabled':
512
522
  pass
513
523
  else:
514
524
  self.log.error('unknown autoupload value %s', autoupload)
515
525
  return detections
516
526
 
517
- async def upload_images(self, images: List[bytes], source: Optional[str], creation_date: Optional[str]):
518
- loop = asyncio.get_event_loop()
527
+ async def get_batch_detections(self,
528
+ raw_images: List[bytes],
529
+ tags: List[str],
530
+ *,
531
+ camera_id: Optional[str] = None,
532
+ source: Optional[str] = None,
533
+ autoupload: Optional[str] = None,
534
+ creation_date: Optional[str] = None) -> ImagesMetadata:
535
+ """ Processing function for the detector node when a a batch inference is requested via SocketIO.
536
+ This function infers the detections from all images, cares about uploading to the loop and returns the detections as a list of ImageMetadata."""
537
+
538
+ await self.detection_lock.acquire()
539
+ all_detections = await run.io_bound(self.detector_logic.batch_evaluate, raw_images)
540
+ self.detection_lock.release()
541
+
542
+ for detections, raw_image in zip(all_detections.items, raw_images):
543
+ fix_shape_detections(detections)
544
+ n_bo, n_cl = len(detections.box_detections), len(detections.classification_detections)
545
+ n_po, n_se = len(detections.point_detections), len(detections.segmentation_detections)
546
+ self.log.debug('Detected: %d boxes, %d points, %d segs, %d classes', n_bo, n_po, n_se, n_cl)
547
+
548
+ autoupload = autoupload or 'filtered'
549
+ if autoupload == 'filtered' and camera_id is not None:
550
+ background_tasks.create(self.relevance_filter.may_upload_detections(
551
+ detections, camera_id, raw_image, tags, source, creation_date
552
+ ))
553
+ elif autoupload == 'all':
554
+ background_tasks.create(self.outbox.save(raw_image, detections, tags, source, creation_date))
555
+ elif autoupload == 'disabled':
556
+ pass
557
+ else:
558
+ self.log.error('unknown autoupload value %s', autoupload)
559
+ return all_detections
560
+
561
+ async def upload_images(
562
+ self, *,
563
+ images: List[bytes],
564
+ image_metadata: Optional[ImageMetadata] = None,
565
+ tags: Optional[List[str]] = None,
566
+ source: Optional[str],
567
+ creation_date: Optional[str],
568
+ upload_priority: bool = False
569
+ ) -> None:
570
+ """Save images to the outbox using an asyncio executor.
571
+ Used by SIO and REST upload endpoints."""
572
+
573
+ if image_metadata is None:
574
+ image_metadata = ImageMetadata()
575
+ if tags is None:
576
+ tags = []
577
+
578
+ tags.append('picked_by_system')
579
+
519
580
  for image in images:
520
- await loop.run_in_executor(None, self.outbox.save, image, ImageMetadata(), ['picked_by_system'], source, creation_date)
581
+ await self.outbox.save(image, image_metadata, tags, source, creation_date, upload_priority)
521
582
 
522
583
  def add_category_id_to_detections(self, model_info: ModelInformation, image_metadata: ImageMetadata):
523
584
  def find_category_id_by_name(categories: List[Category], category_name: str):
@@ -11,14 +11,16 @@ class RelevanceFilter():
11
11
  self.cam_histories: Dict[str, CamObservationHistory] = {}
12
12
  self.outbox: Outbox = outbox
13
13
 
14
- def may_upload_detections(self,
15
- image_metadata: ImageMetadata,
16
- cam_id: str,
17
- raw_image: bytes,
18
- tags: List[str],
19
- source: Optional[str] = None,
20
- creation_date: Optional[str] = None
21
- ) -> List[str]:
14
+ async def may_upload_detections(self,
15
+ image_metadata: ImageMetadata,
16
+ cam_id: str,
17
+ raw_image: bytes,
18
+ tags: List[str],
19
+ source: Optional[str] = None,
20
+ creation_date: Optional[str] = None) -> List[str]:
21
+ """Check if the detection should be uploaded to the outbox.
22
+ If so, upload it and return the list of causes for the upload.
23
+ """
22
24
  for group in self.cam_histories.values():
23
25
  group.forget_old_detections()
24
26
 
@@ -30,5 +32,5 @@ class RelevanceFilter():
30
32
  if len(causes) > 0:
31
33
  tags = tags if tags is not None else []
32
34
  tags.extend(causes)
33
- self.outbox.save(raw_image, image_metadata, tags, source, creation_date)
35
+ await self.outbox.save(raw_image, image_metadata, tags, source, creation_date)
34
36
  return causes