learning-loop-node 0.16.1__tar.gz → 0.17.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of learning-loop-node might be problematic. Click here for more details.
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/PKG-INFO +29 -21
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/README.md +28 -20
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/data_classes/__init__.py +6 -1
- learning_loop_node-0.17.1/learning_loop_node/data_classes/annotations.py +44 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/data_classes/detections.py +3 -7
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/data_classes/image_metadata.py +15 -1
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/detector/detector_logic.py +9 -9
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/detector/detector_node.py +66 -50
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/detector/inbox_filter/relevance_filter.py +13 -11
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/detector/outbox.py +3 -17
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/detector/rest/detect.py +10 -6
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/detector/rest/upload.py +11 -1
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/helpers/background_tasks.py +0 -1
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/detector/conftest.py +2 -3
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/detector/inbox_filter/test_unexpected_observations_count.py +3 -3
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/detector/test_detector_node.py +18 -8
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/detector/test_relevance_filter.py +1 -1
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/detector/testing_detector.py +8 -4
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/test_helper.py +19 -3
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/trainer/states/test_state_detecting.py +2 -1
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/pyproject.toml +1 -1
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/__init__.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/annotation/__init__.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/annotation/annotator_logic.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/annotation/annotator_node.py +0 -0
- /learning_loop_node-0.16.1/learning_loop_node/data_classes/annotations.py → /learning_loop_node-0.17.1/learning_loop_node/data_classes/annotation_data.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/data_classes/general.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/data_classes/socket_response.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/data_classes/training.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/data_exchanger.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/detector/__init__.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/detector/exceptions.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/detector/inbox_filter/__init__.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/detector/inbox_filter/cam_observation_history.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/detector/rest/__init__.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/detector/rest/about.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/detector/rest/backdoor_controls.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/detector/rest/model_version_control.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/detector/rest/operation_mode.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/detector/rest/outbox_mode.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/enums/__init__.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/enums/annotator.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/enums/detector.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/enums/general.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/enums/trainer.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/examples/novelty_score_updater.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/globals.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/helpers/__init__.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/helpers/environment_reader.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/helpers/gdrive_downloader.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/helpers/log_conf.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/helpers/misc.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/helpers/run.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/loop_communication.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/node.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/py.typed +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/rest.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/__init__.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/annotator/__init__.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/annotator/conftest.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/annotator/pytest.ini +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/annotator/test_annotator_node.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/detector/__init__.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/detector/inbox_filter/__init__.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/detector/inbox_filter/test_observation.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/detector/inbox_filter/test_relevance_group.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/detector/pytest.ini +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/detector/test.jpg +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/detector/test_client_communication.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/detector/test_outbox.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/general/__init__.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/general/conftest.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/general/pytest.ini +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/general/test_data/file_1.txt +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/general/test_data/file_2.txt +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/general/test_data/model.json +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/general/test_data_classes.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/general/test_downloader.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/general/test_learning_loop_node.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/trainer/__init__.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/trainer/conftest.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/trainer/pytest.ini +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/trainer/state_helper.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/trainer/states/__init__.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/trainer/states/test_state_cleanup.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/trainer/states/test_state_download_train_model.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/trainer/states/test_state_prepare.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/trainer/states/test_state_sync_confusion_matrix.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/trainer/states/test_state_train.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/trainer/states/test_state_upload_detections.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/trainer/states/test_state_upload_model.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/trainer/test_errors.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/trainer/test_trainer_states.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/tests/trainer/testing_trainer_logic.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/trainer/__init__.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/trainer/downloader.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/trainer/exceptions.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/trainer/executor.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/trainer/io_helpers.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/trainer/rest/__init__.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/trainer/rest/backdoor_controls.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/trainer/test_executor.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/trainer/trainer_logic.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/trainer/trainer_logic_generic.py +0 -0
- {learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/trainer/trainer_node.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: learning-loop-node
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.17.1
|
|
4
4
|
Summary: Python Library for Nodes which connect to the Zauberzeug Learning Loop
|
|
5
5
|
Home-page: https://github.com/zauberzeug/learning_loop_node
|
|
6
6
|
License: MIT
|
|
@@ -85,34 +85,48 @@ from learning_loop_node/learning_loop_node
|
|
|
85
85
|
|
|
86
86
|
Detector Nodes are normally deployed on edge devices like robots or machinery but can also run in the cloud to provide backend services for an app or similar. These nodes register themself at the Learning Loop. They provide REST and Socket.io APIs to run inference on images. The processed images can automatically be used for active learning: e.g. uncertain predictions will be send to the Learning Loop.
|
|
87
87
|
|
|
88
|
-
###
|
|
88
|
+
### Inference API
|
|
89
89
|
|
|
90
90
|
Images can be send to the detector node via socketio or rest.
|
|
91
|
-
|
|
91
|
+
Via **REST** you may provide the following parameters:
|
|
92
|
+
|
|
93
|
+
- `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
|
|
94
|
+
- `camera_id`: a camera identifier (string) used to improve the autoupload filtering
|
|
95
|
+
- `tags`: comma separated list of tags to add to the image in the learning loop
|
|
96
|
+
- `source`: optional source identifier (str) for the image (e.g. a robot id)
|
|
97
|
+
- `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
|
|
98
|
+
- `creation_date`: optional creation date (str) for the image in isoformat (e.g. `2023-01-30T12:34:56`)
|
|
92
99
|
|
|
93
100
|
Example usage:
|
|
94
101
|
|
|
95
|
-
`curl --request POST -F 'file=@test.jpg' localhost:8004/detect`
|
|
102
|
+
`curl --request POST -F 'file=@test.jpg' -H 'autoupload: all' -H 'camera_id: front_cam' localhost:8004/detect`
|
|
103
|
+
|
|
104
|
+
To use the **SocketIO** inference EPs, the caller needs to connect to the detector node's SocketIO server and emit the `detect` or `batch_detect` event with the image data and image metadata.
|
|
105
|
+
Example code can be found [in the rosys implementation](https://github.com/zauberzeug/rosys/blob/main/rosys/vision/detector_hardware.py).
|
|
106
|
+
|
|
107
|
+
### Upload API
|
|
96
108
|
|
|
97
|
-
|
|
98
|
-
You can additionally provide the following camera parameters:
|
|
109
|
+
The detector has a **REST** endpoint to upload images (and detections) to the Learning Loop. The endpoint takes a POST request with one or multiple images. The images are expected to be in jpg format. The following optional parameters may be set via headers:
|
|
99
110
|
|
|
100
|
-
- `
|
|
101
|
-
- `
|
|
111
|
+
- `source`: optional source identifier (str) for the image (e.g. a robot id)
|
|
112
|
+
- `creation_date`: optional creation date (str) for the image in isoformat (e.g. `2023-01-30T12:34:56`)
|
|
113
|
+
- `upload_priority`: A boolean flag to prioritize the upload (defaults to False)
|
|
102
114
|
|
|
103
|
-
|
|
115
|
+
Example:
|
|
104
116
|
|
|
105
|
-
|
|
117
|
+
`curl -X POST -F 'files=@test.jpg' "http://localhost:/upload"`
|
|
118
|
+
|
|
119
|
+
The detector also has a **SocketIO** upload endpoint that can be used to upload images and detections to the learning loop. The function receives a json dictionary, with the following entries:
|
|
106
120
|
|
|
107
121
|
- `image`: the image data in jpg format
|
|
108
|
-
|
|
109
|
-
- `
|
|
110
|
-
- `source`: optional source identifier for the image
|
|
111
|
-
- `creation_date`: optional creation date for the image
|
|
122
|
+
|
|
123
|
+
- `metadata`: a dictionary representing the image metadata. If metadata contains detections and/or annotations, UUIDs for the classes are automatically determined based on the category names. Metadata should follow the schema of the `ImageMetadata` data class.
|
|
112
124
|
- `upload_priority`: boolean flag to prioritize the upload (defaults to False)
|
|
113
125
|
|
|
114
126
|
The endpoint returns None if the upload was successful and an error message otherwise.
|
|
115
127
|
|
|
128
|
+
For both ways to upload an image, the tag `picked_by_system` is automatically added to the image metadata.
|
|
129
|
+
|
|
116
130
|
### Changing the model versioning mode
|
|
117
131
|
|
|
118
132
|
The detector can be configured to one of the following behaviors:
|
|
@@ -164,12 +178,6 @@ The outbox mode can also be queried via:
|
|
|
164
178
|
- HTTP: `curl http://localhost/outbox_mode`
|
|
165
179
|
- SocketIO: `sio.emit('get_outbox_mode')`
|
|
166
180
|
|
|
167
|
-
### Explicit upload
|
|
168
|
-
|
|
169
|
-
The detector has a REST endpoint to upload images (and detections) to the Learning Loop. The endpoint takes a POST request with the image and optionally the detections. The image is expected to be in jpg format. The detections are expected to be a json dictionary. Example:
|
|
170
|
-
|
|
171
|
-
`curl -X POST -F 'files=@test.jpg' "http://localhost:/upload"`
|
|
172
|
-
|
|
173
181
|
## Trainer Node
|
|
174
182
|
|
|
175
183
|
Trainers fetch the images and anntoations from the Learning Loop to train new models.
|
|
@@ -184,7 +192,7 @@ A Conveter Node converts models from one format into another.
|
|
|
184
192
|
|
|
185
193
|
...
|
|
186
194
|
|
|
187
|
-
|
|
195
|
+
### Test operability
|
|
188
196
|
|
|
189
197
|
Assumend there is a Converter Node which converts models of format 'format_a' into 'format_b'.
|
|
190
198
|
Upload a model with
|
|
@@ -45,34 +45,48 @@ from learning_loop_node/learning_loop_node
|
|
|
45
45
|
|
|
46
46
|
Detector Nodes are normally deployed on edge devices like robots or machinery but can also run in the cloud to provide backend services for an app or similar. These nodes register themself at the Learning Loop. They provide REST and Socket.io APIs to run inference on images. The processed images can automatically be used for active learning: e.g. uncertain predictions will be send to the Learning Loop.
|
|
47
47
|
|
|
48
|
-
###
|
|
48
|
+
### Inference API
|
|
49
49
|
|
|
50
50
|
Images can be send to the detector node via socketio or rest.
|
|
51
|
-
|
|
51
|
+
Via **REST** you may provide the following parameters:
|
|
52
|
+
|
|
53
|
+
- `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
|
|
54
|
+
- `camera_id`: a camera identifier (string) used to improve the autoupload filtering
|
|
55
|
+
- `tags`: comma separated list of tags to add to the image in the learning loop
|
|
56
|
+
- `source`: optional source identifier (str) for the image (e.g. a robot id)
|
|
57
|
+
- `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
|
|
58
|
+
- `creation_date`: optional creation date (str) for the image in isoformat (e.g. `2023-01-30T12:34:56`)
|
|
52
59
|
|
|
53
60
|
Example usage:
|
|
54
61
|
|
|
55
|
-
`curl --request POST -F 'file=@test.jpg' localhost:8004/detect`
|
|
62
|
+
`curl --request POST -F 'file=@test.jpg' -H 'autoupload: all' -H 'camera_id: front_cam' localhost:8004/detect`
|
|
63
|
+
|
|
64
|
+
To use the **SocketIO** inference EPs, the caller needs to connect to the detector node's SocketIO server and emit the `detect` or `batch_detect` event with the image data and image metadata.
|
|
65
|
+
Example code can be found [in the rosys implementation](https://github.com/zauberzeug/rosys/blob/main/rosys/vision/detector_hardware.py).
|
|
66
|
+
|
|
67
|
+
### Upload API
|
|
56
68
|
|
|
57
|
-
|
|
58
|
-
You can additionally provide the following camera parameters:
|
|
69
|
+
The detector has a **REST** endpoint to upload images (and detections) to the Learning Loop. The endpoint takes a POST request with one or multiple images. The images are expected to be in jpg format. The following optional parameters may be set via headers:
|
|
59
70
|
|
|
60
|
-
- `
|
|
61
|
-
- `
|
|
71
|
+
- `source`: optional source identifier (str) for the image (e.g. a robot id)
|
|
72
|
+
- `creation_date`: optional creation date (str) for the image in isoformat (e.g. `2023-01-30T12:34:56`)
|
|
73
|
+
- `upload_priority`: A boolean flag to prioritize the upload (defaults to False)
|
|
62
74
|
|
|
63
|
-
|
|
75
|
+
Example:
|
|
64
76
|
|
|
65
|
-
|
|
77
|
+
`curl -X POST -F 'files=@test.jpg' "http://localhost:/upload"`
|
|
78
|
+
|
|
79
|
+
The detector also has a **SocketIO** upload endpoint that can be used to upload images and detections to the learning loop. The function receives a json dictionary, with the following entries:
|
|
66
80
|
|
|
67
81
|
- `image`: the image data in jpg format
|
|
68
|
-
|
|
69
|
-
- `
|
|
70
|
-
- `source`: optional source identifier for the image
|
|
71
|
-
- `creation_date`: optional creation date for the image
|
|
82
|
+
|
|
83
|
+
- `metadata`: a dictionary representing the image metadata. If metadata contains detections and/or annotations, UUIDs for the classes are automatically determined based on the category names. Metadata should follow the schema of the `ImageMetadata` data class.
|
|
72
84
|
- `upload_priority`: boolean flag to prioritize the upload (defaults to False)
|
|
73
85
|
|
|
74
86
|
The endpoint returns None if the upload was successful and an error message otherwise.
|
|
75
87
|
|
|
88
|
+
For both ways to upload an image, the tag `picked_by_system` is automatically added to the image metadata.
|
|
89
|
+
|
|
76
90
|
### Changing the model versioning mode
|
|
77
91
|
|
|
78
92
|
The detector can be configured to one of the following behaviors:
|
|
@@ -124,12 +138,6 @@ The outbox mode can also be queried via:
|
|
|
124
138
|
- HTTP: `curl http://localhost/outbox_mode`
|
|
125
139
|
- SocketIO: `sio.emit('get_outbox_mode')`
|
|
126
140
|
|
|
127
|
-
### Explicit upload
|
|
128
|
-
|
|
129
|
-
The detector has a REST endpoint to upload images (and detections) to the Learning Loop. The endpoint takes a POST request with the image and optionally the detections. The image is expected to be in jpg format. The detections are expected to be a json dictionary. Example:
|
|
130
|
-
|
|
131
|
-
`curl -X POST -F 'files=@test.jpg' "http://localhost:/upload"`
|
|
132
|
-
|
|
133
141
|
## Trainer Node
|
|
134
142
|
|
|
135
143
|
Trainers fetch the images and anntoations from the Learning Loop to train new models.
|
|
@@ -144,7 +152,7 @@ A Conveter Node converts models from one format into another.
|
|
|
144
152
|
|
|
145
153
|
...
|
|
146
154
|
|
|
147
|
-
|
|
155
|
+
### Test operability
|
|
148
156
|
|
|
149
157
|
Assumend there is a Converter Node which converts models of format 'format_a' into 'format_b'.
|
|
150
158
|
Upload a model with
|
{learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/data_classes/__init__.py
RENAMED
|
@@ -1,4 +1,9 @@
|
|
|
1
|
-
from .
|
|
1
|
+
from .annotation_data import (
|
|
2
|
+
AnnotationData,
|
|
3
|
+
SegmentationAnnotation,
|
|
4
|
+
ToolOutput,
|
|
5
|
+
UserInput,
|
|
6
|
+
)
|
|
2
7
|
from .detections import (
|
|
3
8
|
BoxDetection,
|
|
4
9
|
ClassificationDetection,
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
|
|
2
|
+
import sys
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
KWONLY_SLOTS = {'kw_only': True, 'slots': True} if sys.version_info >= (3, 10) else {}
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass(**KWONLY_SLOTS)
|
|
10
|
+
class BoxAnnotation():
|
|
11
|
+
"""Coordinates according to COCO format. x,y is the top left corner of the box.
|
|
12
|
+
x increases to the right, y increases downwards.
|
|
13
|
+
"""
|
|
14
|
+
category_name: str = field(metadata={'description': 'Category name'})
|
|
15
|
+
x: int = field(metadata={'description': 'X coordinate (left to right)'})
|
|
16
|
+
y: int = field(metadata={'description': 'Y coordinate (top to bottom)'})
|
|
17
|
+
width: int = field(metadata={'description': 'Width'})
|
|
18
|
+
height: int = field(metadata={'description': 'Height'})
|
|
19
|
+
category_id: Optional[str] = field(default=None, metadata={'description': 'Category UUID'})
|
|
20
|
+
|
|
21
|
+
def __str__(self):
|
|
22
|
+
return f'x:{int(self.x)} y: {int(self.y)}, w: {int(self.width)} h: {int(self.height)} -> {self.category_name}'
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@dataclass(**KWONLY_SLOTS)
|
|
26
|
+
class PointAnnotation():
|
|
27
|
+
"""Coordinates according to COCO format. x,y is the center of the point.
|
|
28
|
+
x increases to the right, y increases downwards."""
|
|
29
|
+
category_name: str = field(metadata={'description': 'Category name'})
|
|
30
|
+
x: float = field(metadata={'description': 'X coordinate (right)'})
|
|
31
|
+
y: float = field(metadata={'description': 'Y coordinate (down)'})
|
|
32
|
+
category_id: Optional[str] = field(default=None, metadata={'description': 'Category UUID'})
|
|
33
|
+
|
|
34
|
+
def __str__(self):
|
|
35
|
+
return f'x:{int(self.x)} y: {int(self.y)}, -> {self.category_name}'
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@dataclass(**KWONLY_SLOTS)
|
|
39
|
+
class ClassificationAnnotation():
|
|
40
|
+
category_name: str = field(metadata={'description': 'Category name'})
|
|
41
|
+
category_id: Optional[str] = field(default=None, metadata={'description': 'Category UUID'})
|
|
42
|
+
|
|
43
|
+
def __str__(self):
|
|
44
|
+
return f'-> {self.category_name}'
|
|
@@ -9,10 +9,6 @@ import numpy as np
|
|
|
9
9
|
KWONLY_SLOTS = {'kw_only': True, 'slots': True} if sys.version_info >= (3, 10) else {}
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
def current_datetime():
|
|
13
|
-
return datetime.now().isoformat(sep='_', timespec='milliseconds')
|
|
14
|
-
|
|
15
|
-
|
|
16
12
|
@dataclass(**KWONLY_SLOTS)
|
|
17
13
|
class BoxDetection():
|
|
18
14
|
"""Coordinates according to COCO format. x,y is the top left corner of the box.
|
|
@@ -25,7 +21,7 @@ class BoxDetection():
|
|
|
25
21
|
height: int = field(metadata={'description': 'Height'})
|
|
26
22
|
model_name: str = field(metadata={'description': 'Model name'})
|
|
27
23
|
confidence: float = field(metadata={'description': 'Confidence'})
|
|
28
|
-
category_id: Optional[str] = field(default=None, metadata={'description': 'Category
|
|
24
|
+
category_id: Optional[str] = field(default=None, metadata={'description': 'Category UUID'})
|
|
29
25
|
|
|
30
26
|
def intersection_over_union(self, other_detection: 'BoxDetection') -> float:
|
|
31
27
|
# https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/
|
|
@@ -59,7 +55,7 @@ class PointDetection():
|
|
|
59
55
|
y: float = field(metadata={'description': 'Y coordinate (down)'})
|
|
60
56
|
model_name: str = field(metadata={'description': 'Model name'})
|
|
61
57
|
confidence: float = field(metadata={'description': 'Confidence'})
|
|
62
|
-
category_id: Optional[str] = field(default=None, metadata={'description': 'Category
|
|
58
|
+
category_id: Optional[str] = field(default=None, metadata={'description': 'Category UUID'})
|
|
63
59
|
|
|
64
60
|
def distance(self, other: 'PointDetection') -> float:
|
|
65
61
|
return np.sqrt((other.x - self.x)**2 + (other.y - self.y)**2)
|
|
@@ -73,7 +69,7 @@ class ClassificationDetection():
|
|
|
73
69
|
category_name: str = field(metadata={'description': 'Category name'})
|
|
74
70
|
model_name: str = field(metadata={'description': 'Model name'})
|
|
75
71
|
confidence: float = field(metadata={'description': 'Confidence'})
|
|
76
|
-
category_id: Optional[str] = field(default=None, metadata={'description': 'Category
|
|
72
|
+
category_id: Optional[str] = field(default=None, metadata={'description': 'Category UUID'})
|
|
77
73
|
|
|
78
74
|
def __str__(self):
|
|
79
75
|
return f'c: {self.confidence:.2f} -> {self.category_name}'
|
|
@@ -4,7 +4,13 @@ from dataclasses import dataclass, field
|
|
|
4
4
|
from datetime import datetime
|
|
5
5
|
from typing import List, Optional
|
|
6
6
|
|
|
7
|
-
from .
|
|
7
|
+
from .annotations import BoxAnnotation, ClassificationAnnotation, PointAnnotation
|
|
8
|
+
from .detections import (
|
|
9
|
+
BoxDetection,
|
|
10
|
+
ClassificationDetection,
|
|
11
|
+
PointDetection,
|
|
12
|
+
SegmentationDetection,
|
|
13
|
+
)
|
|
8
14
|
|
|
9
15
|
# pylint: disable=too-many-instance-attributes
|
|
10
16
|
|
|
@@ -25,6 +31,14 @@ class ImageMetadata():
|
|
|
25
31
|
'description': 'List of segmentation detections'})
|
|
26
32
|
classification_detections: List[ClassificationDetection] = field(default_factory=list, metadata={
|
|
27
33
|
'description': 'List of classification detections'})
|
|
34
|
+
|
|
35
|
+
box_annotations: List[BoxAnnotation] = field(default_factory=list, metadata={
|
|
36
|
+
'description': 'List of box annotations'})
|
|
37
|
+
point_annotations: List[PointAnnotation] = field(default_factory=list, metadata={
|
|
38
|
+
'description': 'List of point annotations'})
|
|
39
|
+
classification_annotation: Optional[ClassificationAnnotation] = field(default=None, metadata={
|
|
40
|
+
'description': 'Classification annotation'})
|
|
41
|
+
|
|
28
42
|
tags: List[str] = field(default_factory=list, metadata={
|
|
29
43
|
'description': 'List of tags'})
|
|
30
44
|
|
|
@@ -42,18 +42,18 @@ class DetectorLogic():
|
|
|
42
42
|
def init(self):
|
|
43
43
|
"""Called when a (new) model was loaded. Initialize the model. Model information available via `self.model_info`"""
|
|
44
44
|
|
|
45
|
-
def evaluate_with_all_info(self, image: bytes, tags: List[str], source: Optional[str] = None, creation_date: Optional[str] = None) -> ImageMetadata: # pylint: disable=unused-argument
|
|
46
|
-
"""Called by the detector node when an image should be evaluated (REST or SocketIO).
|
|
47
|
-
Tags, source come from the caller and may be used in this function.
|
|
48
|
-
By default, this function simply calls `evaluate`"""
|
|
49
|
-
return self.evaluate(image)
|
|
50
|
-
|
|
51
45
|
@abstractmethod
|
|
52
|
-
def evaluate(self, image: bytes) -> ImageMetadata:
|
|
46
|
+
def evaluate(self, image: bytes) -> ImageMetadata: # pylint: disable=unused-argument
|
|
53
47
|
"""Evaluate the image and return the detections.
|
|
54
|
-
|
|
48
|
+
|
|
49
|
+
Called by the detector node when an image should be evaluated (REST or SocketIO).
|
|
50
|
+
The resulting detections should be stored in the ImageMetadata.
|
|
51
|
+
Tags stored in the ImageMetadata will be uploaded to the learning loop.
|
|
52
|
+
The function should return empty metadata if the detector is not initialized."""
|
|
55
53
|
|
|
56
54
|
@abstractmethod
|
|
57
55
|
def batch_evaluate(self, images: List[bytes]) -> ImagesMetadata:
|
|
58
56
|
"""Evaluate a batch of images and return the detections.
|
|
59
|
-
The
|
|
57
|
+
The resulting detections per image should be stored in the ImagesMetadata.
|
|
58
|
+
Tags stored in the ImagesMetadata will be uploaded to the learning loop.
|
|
59
|
+
The function should return empty metadata if the detector is not initialized."""
|
{learning_loop_node-0.16.1 → learning_loop_node-0.17.1}/learning_loop_node/detector/detector_node.py
RENAMED
|
@@ -8,6 +8,11 @@ from dataclasses import asdict
|
|
|
8
8
|
from datetime import datetime
|
|
9
9
|
from typing import Dict, List, Optional
|
|
10
10
|
|
|
11
|
+
try:
|
|
12
|
+
from typing import Literal
|
|
13
|
+
except ImportError: # Python <= 3.8
|
|
14
|
+
from typing_extensions import Literal # type: ignore
|
|
15
|
+
|
|
11
16
|
import socketio
|
|
12
17
|
from dacite import from_dict
|
|
13
18
|
from fastapi.encoders import jsonable_encoder
|
|
@@ -223,10 +228,10 @@ class DetectorNode(Node):
|
|
|
223
228
|
try:
|
|
224
229
|
det = await self.get_detections(
|
|
225
230
|
raw_image=data['image'],
|
|
226
|
-
camera_id=data.get('
|
|
231
|
+
camera_id=data.get('camera_id', None),
|
|
227
232
|
tags=data.get('tags', []),
|
|
228
233
|
source=data.get('source', None),
|
|
229
|
-
autoupload=data.get('autoupload',
|
|
234
|
+
autoupload=data.get('autoupload', 'filtered'),
|
|
230
235
|
creation_date=data.get('creation_date', None)
|
|
231
236
|
)
|
|
232
237
|
if det is None:
|
|
@@ -245,9 +250,9 @@ class DetectorNode(Node):
|
|
|
245
250
|
det = await self.get_batch_detections(
|
|
246
251
|
raw_images=data['images'],
|
|
247
252
|
tags=data.get('tags', []),
|
|
248
|
-
camera_id=data.get('
|
|
253
|
+
camera_id=data.get('camera_id', None),
|
|
249
254
|
source=data.get('source', None),
|
|
250
|
-
autoupload=data.get('autoupload',
|
|
255
|
+
autoupload=data.get('autoupload', 'filtered'),
|
|
251
256
|
creation_date=data.get('creation_date', None)
|
|
252
257
|
)
|
|
253
258
|
if det is None:
|
|
@@ -296,27 +301,30 @@ class DetectorNode(Node):
|
|
|
296
301
|
|
|
297
302
|
@self.sio.event
|
|
298
303
|
async def upload(sid, data: Dict) -> Dict:
|
|
299
|
-
"""Upload
|
|
304
|
+
"""Upload a single image with metadata to the learning loop.
|
|
300
305
|
|
|
306
|
+
The data dict must contain:
|
|
307
|
+
- image: The image bytes to upload
|
|
308
|
+
- metadata: The metadata for the image (optional)
|
|
309
|
+
"""
|
|
301
310
|
self.log.debug('Processing upload via socketio.')
|
|
302
|
-
|
|
303
|
-
|
|
311
|
+
|
|
312
|
+
metadata = data.get('metadata', None)
|
|
313
|
+
if metadata:
|
|
304
314
|
try:
|
|
305
|
-
image_metadata = from_dict(data_class=ImageMetadata, data=
|
|
315
|
+
image_metadata = from_dict(data_class=ImageMetadata, data=metadata)
|
|
306
316
|
except Exception as e:
|
|
307
317
|
self.log.exception('could not parse detections')
|
|
308
318
|
return {'error': str(e)}
|
|
309
|
-
|
|
319
|
+
if self.detector_logic.model_info is not None:
|
|
320
|
+
image_metadata = self.add_category_id_to_detections(self.detector_logic.model_info, image_metadata)
|
|
310
321
|
else:
|
|
311
322
|
image_metadata = ImageMetadata()
|
|
312
323
|
|
|
313
324
|
try:
|
|
314
325
|
await self.upload_images(
|
|
315
326
|
images=[data['image']],
|
|
316
|
-
|
|
317
|
-
tags=data.get('tags', []),
|
|
318
|
-
source=data.get('source', None),
|
|
319
|
-
creation_date=data.get('creation_date', None),
|
|
327
|
+
images_metadata=ImagesMetadata(items=[image_metadata]) if metadata else None,
|
|
320
328
|
upload_priority=data.get('upload_priority', False)
|
|
321
329
|
)
|
|
322
330
|
except Exception as e:
|
|
@@ -506,34 +514,37 @@ class DetectorNode(Node):
|
|
|
506
514
|
*,
|
|
507
515
|
camera_id: Optional[str] = None,
|
|
508
516
|
source: Optional[str] = None,
|
|
509
|
-
autoupload:
|
|
517
|
+
autoupload: Literal['filtered', 'all', 'disabled'],
|
|
510
518
|
creation_date: Optional[str] = None) -> ImageMetadata:
|
|
511
519
|
""" Main processing function for the detector node when an image is received via REST or SocketIO.
|
|
512
520
|
This function infers the detections from the image, cares about uploading to the loop and returns the detections as ImageMetadata object.
|
|
513
521
|
Note: raw_image is a numpy array of type uint8, but not in the correct shape!
|
|
514
|
-
It can be converted e.g. using cv2.imdecode(
|
|
522
|
+
It can be converted e.g. using cv2.imdecode(np.frombuffer(image, np.uint8), cv2.IMREAD_COLOR)"""
|
|
515
523
|
|
|
516
524
|
await self.detection_lock.acquire()
|
|
517
|
-
|
|
518
|
-
|
|
525
|
+
try:
|
|
526
|
+
metadata = await run.io_bound(self.detector_logic.evaluate, raw_image)
|
|
527
|
+
finally:
|
|
528
|
+
self.detection_lock.release()
|
|
529
|
+
|
|
530
|
+
metadata.tags.extend(tags)
|
|
531
|
+
metadata.source = source
|
|
532
|
+
metadata.created = creation_date
|
|
519
533
|
|
|
520
|
-
fix_shape_detections(
|
|
521
|
-
n_bo, n_cl = len(
|
|
522
|
-
n_po, n_se = len(
|
|
534
|
+
fix_shape_detections(metadata)
|
|
535
|
+
n_bo, n_cl = len(metadata.box_detections), len(metadata.classification_detections)
|
|
536
|
+
n_po, n_se = len(metadata.point_detections), len(metadata.segmentation_detections)
|
|
523
537
|
self.log.debug('Detected: %d boxes, %d points, %d segs, %d classes', n_bo, n_po, n_se, n_cl)
|
|
524
538
|
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
background_tasks.create(self.relevance_filter.may_upload_detections(
|
|
528
|
-
detections, camera_id, raw_image, tags, source, creation_date
|
|
529
|
-
))
|
|
539
|
+
if autoupload == 'filtered':
|
|
540
|
+
background_tasks.create(self.relevance_filter.may_upload_detections(metadata, camera_id, raw_image))
|
|
530
541
|
elif autoupload == 'all':
|
|
531
|
-
background_tasks.create(self.outbox.save(raw_image,
|
|
542
|
+
background_tasks.create(self.outbox.save(raw_image, metadata))
|
|
532
543
|
elif autoupload == 'disabled':
|
|
533
544
|
pass
|
|
534
545
|
else:
|
|
535
546
|
self.log.error('unknown autoupload value %s', autoupload)
|
|
536
|
-
return
|
|
547
|
+
return metadata
|
|
537
548
|
|
|
538
549
|
async def get_batch_detections(self,
|
|
539
550
|
raw_images: List[bytes],
|
|
@@ -541,14 +552,21 @@ class DetectorNode(Node):
|
|
|
541
552
|
*,
|
|
542
553
|
camera_id: Optional[str] = None,
|
|
543
554
|
source: Optional[str] = None,
|
|
544
|
-
autoupload:
|
|
555
|
+
autoupload: str = 'filtered',
|
|
545
556
|
creation_date: Optional[str] = None) -> ImagesMetadata:
|
|
546
557
|
""" Processing function for the detector node when a a batch inference is requested via SocketIO.
|
|
547
558
|
This function infers the detections from all images, cares about uploading to the loop and returns the detections as a list of ImageMetadata."""
|
|
548
559
|
|
|
549
560
|
await self.detection_lock.acquire()
|
|
550
|
-
|
|
551
|
-
|
|
561
|
+
try:
|
|
562
|
+
all_detections = await run.io_bound(self.detector_logic.batch_evaluate, raw_images)
|
|
563
|
+
finally:
|
|
564
|
+
self.detection_lock.release()
|
|
565
|
+
|
|
566
|
+
for metadata in all_detections.items:
|
|
567
|
+
metadata.tags.extend(tags)
|
|
568
|
+
metadata.source = source
|
|
569
|
+
metadata.created = creation_date
|
|
552
570
|
|
|
553
571
|
for detections, raw_image in zip(all_detections.items, raw_images):
|
|
554
572
|
fix_shape_detections(detections)
|
|
@@ -556,13 +574,10 @@ class DetectorNode(Node):
|
|
|
556
574
|
n_po, n_se = len(detections.point_detections), len(detections.segmentation_detections)
|
|
557
575
|
self.log.debug('Detected: %d boxes, %d points, %d segs, %d classes', n_bo, n_po, n_se, n_cl)
|
|
558
576
|
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
background_tasks.create(self.relevance_filter.may_upload_detections(
|
|
562
|
-
detections, camera_id, raw_image, tags, source, creation_date
|
|
563
|
-
))
|
|
577
|
+
if autoupload == 'filtered':
|
|
578
|
+
background_tasks.create(self.relevance_filter.may_upload_detections(detections, camera_id, raw_image))
|
|
564
579
|
elif autoupload == 'all':
|
|
565
|
-
background_tasks.create(self.outbox.save(raw_image, detections
|
|
580
|
+
background_tasks.create(self.outbox.save(raw_image, detections))
|
|
566
581
|
elif autoupload == 'disabled':
|
|
567
582
|
pass
|
|
568
583
|
else:
|
|
@@ -572,24 +587,25 @@ class DetectorNode(Node):
|
|
|
572
587
|
async def upload_images(
|
|
573
588
|
self, *,
|
|
574
589
|
images: List[bytes],
|
|
575
|
-
|
|
576
|
-
tags: Optional[List[str]] = None,
|
|
577
|
-
source: Optional[str],
|
|
578
|
-
creation_date: Optional[str],
|
|
590
|
+
images_metadata: Optional[ImagesMetadata] = None,
|
|
579
591
|
upload_priority: bool = False
|
|
580
592
|
) -> None:
|
|
581
593
|
"""Save images to the outbox using an asyncio executor.
|
|
582
|
-
Used by SIO and REST upload endpoints.
|
|
594
|
+
Used by SIO and REST upload endpoints.
|
|
583
595
|
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
596
|
+
:param images: List of images to upload
|
|
597
|
+
:param images_metadata: Optional metadata for all images
|
|
598
|
+
:param upload_priority: Whether to upload the images with priority
|
|
599
|
+
:raises ValueError: If the number of images and number of metadata items do not match
|
|
600
|
+
"""
|
|
588
601
|
|
|
589
|
-
|
|
602
|
+
if images_metadata and len(images_metadata.items) != len(images):
|
|
603
|
+
raise ValueError('Number of images and number of metadata items do not match')
|
|
590
604
|
|
|
591
|
-
for image in images:
|
|
592
|
-
|
|
605
|
+
for i, image in enumerate(images):
|
|
606
|
+
image_metadata = images_metadata.items[i] if images_metadata else ImageMetadata()
|
|
607
|
+
image_metadata.tags.append('picked_by_system')
|
|
608
|
+
await self.outbox.save(image, image_metadata, upload_priority)
|
|
593
609
|
|
|
594
610
|
def add_category_id_to_detections(self, model_info: ModelInformation, image_metadata: ImageMetadata):
|
|
595
611
|
def find_category_id_by_name(categories: List[Category], category_name: str):
|
|
@@ -628,9 +644,9 @@ def step_into(new_dir):
|
|
|
628
644
|
os.chdir(previous_dir)
|
|
629
645
|
|
|
630
646
|
|
|
631
|
-
def fix_shape_detections(
|
|
647
|
+
def fix_shape_detections(metadata: ImageMetadata):
|
|
632
648
|
# TODO This is a quick fix.. check how loop upload detections deals with this
|
|
633
|
-
for seg_detection in
|
|
649
|
+
for seg_detection in metadata.segmentation_detections:
|
|
634
650
|
if isinstance(seg_detection.shape, Shape):
|
|
635
651
|
points = ','.join([str(value) for p in seg_detection.shape.points for _,
|
|
636
652
|
value in asdict(p).items()])
|
|
@@ -9,28 +9,30 @@ class RelevanceFilter():
|
|
|
9
9
|
|
|
10
10
|
def __init__(self, outbox: Outbox) -> None:
|
|
11
11
|
self.cam_histories: Dict[str, CamObservationHistory] = {}
|
|
12
|
+
self.unknown_cam_history: CamObservationHistory = CamObservationHistory()
|
|
12
13
|
self.outbox: Outbox = outbox
|
|
13
14
|
|
|
14
15
|
async def may_upload_detections(self,
|
|
15
16
|
image_metadata: ImageMetadata,
|
|
16
|
-
cam_id: str,
|
|
17
|
-
raw_image: bytes
|
|
18
|
-
tags: List[str],
|
|
19
|
-
source: Optional[str] = None,
|
|
20
|
-
creation_date: Optional[str] = None) -> List[str]:
|
|
17
|
+
cam_id: Optional[str],
|
|
18
|
+
raw_image: bytes) -> List[str]:
|
|
21
19
|
"""Check if the detection should be uploaded to the outbox.
|
|
22
20
|
If so, upload it and return the list of causes for the upload.
|
|
23
21
|
"""
|
|
24
22
|
for group in self.cam_histories.values():
|
|
25
23
|
group.forget_old_detections()
|
|
26
24
|
|
|
27
|
-
if cam_id
|
|
28
|
-
|
|
29
|
-
|
|
25
|
+
if cam_id is None:
|
|
26
|
+
history = self.unknown_cam_history
|
|
27
|
+
else:
|
|
28
|
+
if cam_id not in self.cam_histories:
|
|
29
|
+
self.cam_histories[cam_id] = CamObservationHistory()
|
|
30
|
+
history = self.cam_histories[cam_id]
|
|
31
|
+
|
|
32
|
+
causes = history.get_causes_to_upload(image_metadata)
|
|
30
33
|
if len(image_metadata) >= 80:
|
|
31
34
|
causes.append('unexpected_observations_count')
|
|
32
35
|
if len(causes) > 0:
|
|
33
|
-
tags
|
|
34
|
-
|
|
35
|
-
await self.outbox.save(raw_image, image_metadata, tags, source, creation_date)
|
|
36
|
+
image_metadata.tags.extend(causes)
|
|
37
|
+
await self.outbox.save(raw_image, image_metadata)
|
|
36
38
|
return causes
|