learning-loop-node 0.17.2__tar.gz → 0.18.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of learning-loop-node might be problematic. Click here for more details.
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/PKG-INFO +25 -9
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/README.md +23 -7
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/detector_logic.py +5 -2
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/detector_node.py +74 -22
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/inbox_filter/relevance_filter.py +4 -2
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/outbox.py +12 -7
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/rest/detect.py +10 -9
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/rest/upload.py +8 -4
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/helpers/background_tasks.py +4 -4
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/helpers/misc.py +34 -1
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/annotator/conftest.py +1 -1
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/conftest.py +2 -2
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/inbox_filter/test_unexpected_observations_count.py +2 -1
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/test_client_communication.py +6 -6
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/test_detector_node.py +3 -3
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/test_outbox.py +8 -30
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/test_relevance_filter.py +4 -1
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/testing_detector.py +4 -2
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/pyproject.toml +2 -2
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/__init__.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/annotation/__init__.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/annotation/annotator_logic.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/annotation/annotator_node.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/data_classes/__init__.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/data_classes/annotation_data.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/data_classes/annotations.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/data_classes/detections.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/data_classes/general.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/data_classes/image_metadata.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/data_classes/socket_response.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/data_classes/training.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/data_exchanger.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/__init__.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/exceptions.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/inbox_filter/__init__.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/inbox_filter/cam_observation_history.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/rest/__init__.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/rest/about.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/rest/backdoor_controls.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/rest/model_version_control.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/rest/operation_mode.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/rest/outbox_mode.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/enums/__init__.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/enums/annotator.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/enums/detector.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/enums/general.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/enums/trainer.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/examples/novelty_score_updater.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/globals.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/helpers/__init__.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/helpers/environment_reader.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/helpers/gdrive_downloader.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/helpers/log_conf.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/helpers/run.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/loop_communication.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/node.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/py.typed +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/rest.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/__init__.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/annotator/__init__.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/annotator/pytest.ini +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/annotator/test_annotator_node.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/__init__.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/inbox_filter/__init__.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/inbox_filter/test_observation.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/inbox_filter/test_relevance_group.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/pytest.ini +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/test.jpg +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/__init__.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/conftest.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/pytest.ini +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/test_data/file_1.txt +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/test_data/file_2.txt +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/test_data/model.json +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/test_data_classes.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/test_downloader.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/test_learning_loop_node.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/test_helper.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/__init__.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/conftest.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/pytest.ini +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/state_helper.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/states/__init__.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/states/test_state_cleanup.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/states/test_state_detecting.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/states/test_state_download_train_model.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/states/test_state_prepare.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/states/test_state_sync_confusion_matrix.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/states/test_state_train.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/states/test_state_upload_detections.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/states/test_state_upload_model.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/test_errors.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/test_trainer_states.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/testing_trainer_logic.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/trainer/__init__.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/trainer/downloader.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/trainer/exceptions.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/trainer/executor.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/trainer/io_helpers.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/trainer/rest/__init__.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/trainer/rest/backdoor_controls.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/trainer/test_executor.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/trainer/trainer_logic.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/trainer/trainer_logic_generic.py +0 -0
- {learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/trainer/trainer_node.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: learning-loop-node
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.18.0
|
|
4
4
|
Summary: Python Library for Nodes which connect to the Zauberzeug Learning Loop
|
|
5
5
|
Home-page: https://github.com/zauberzeug/learning_loop_node
|
|
6
6
|
License: MIT
|
|
@@ -21,7 +21,7 @@ Requires-Dist: dacite (>=1.8.1,<2.0.0)
|
|
|
21
21
|
Requires-Dist: fastapi (>=0.93,<0.109)
|
|
22
22
|
Requires-Dist: fastapi-socketio (>=0.0.10,<0.0.11)
|
|
23
23
|
Requires-Dist: fastapi-utils (>=0.2.1,<0.3.0)
|
|
24
|
-
Requires-Dist: httpx (>=0.
|
|
24
|
+
Requires-Dist: httpx (>=0.28.1,<0.29.0)
|
|
25
25
|
Requires-Dist: icecream (>=2.1.0,<3.0.0)
|
|
26
26
|
Requires-Dist: numpy (>=1.13.3,<2.0.0)
|
|
27
27
|
Requires-Dist: psutil (>=5.8.0,<6.0.0)
|
|
@@ -90,9 +90,8 @@ Detector Nodes are normally deployed on edge devices like robots or machinery bu
|
|
|
90
90
|
Images can be send to the detector node via socketio or rest.
|
|
91
91
|
Via **REST** you may provide the following parameters:
|
|
92
92
|
|
|
93
|
-
- `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
|
|
94
93
|
- `camera_id`: a camera identifier (string) used to improve the autoupload filtering
|
|
95
|
-
- `tags`: comma separated list of tags to add to the image in the learning loop
|
|
94
|
+
- `tags`: comma separated list of tags to add to the image in the learning loop to add to the image in the learning loop
|
|
96
95
|
- `source`: optional source identifier (str) for the image (e.g. a robot id)
|
|
97
96
|
- `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
|
|
98
97
|
- `creation_date`: optional creation date (str) for the image in isoformat (e.g. `2023-01-30T12:34:56`)
|
|
@@ -101,7 +100,22 @@ Example usage:
|
|
|
101
100
|
|
|
102
101
|
`curl --request POST -F 'file=@test.jpg' -H 'autoupload: all' -H 'camera_id: front_cam' localhost:8004/detect`
|
|
103
102
|
|
|
104
|
-
To use the **SocketIO** inference EPs, the caller needs to connect to the detector node's SocketIO server and emit the `detect` or `batch_detect` event with the image data and image metadata.
|
|
103
|
+
To use the **SocketIO** inference EPs, the caller needs to connect to the detector node's SocketIO server and emit the `detect` or `batch_detect` event with the image data and image metadata. The `detect` endpoint receives a dictionary, with the following entries:
|
|
104
|
+
|
|
105
|
+
- `image`: The image data as dictionary with the following keys:
|
|
106
|
+
- `bytes`: bytes of the ndarray (retrieved via `ndarray.tobytes(order='C')`)
|
|
107
|
+
- `dtype`: data type of the ndarray as string (e.g. `uint8`, `float32`, etc.)
|
|
108
|
+
- `shape`: shape of the ndarray as tuple of ints (e.g. `(480, 640, 3)`)
|
|
109
|
+
- `camera_id`: optional camera identifier (string) used to improve the autoupload filtering
|
|
110
|
+
- `tags`: optional list of tags to add to the image in the learning loop
|
|
111
|
+
- `source`: optional source string
|
|
112
|
+
- `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
|
|
113
|
+
- `creation_date`: optional creation date (str) for the image in isoformat (e.g. `2023-01-30T12:34:56`)
|
|
114
|
+
|
|
115
|
+
The `batch_detect` endpoint receives a dictionary, with the same entries as the `detect` endpoint, except that the `image` entry is replaced by:
|
|
116
|
+
|
|
117
|
+
- `images`: List of image data dictionaries, each with the same structure as the `image` entry in the `detect` endpoint
|
|
118
|
+
|
|
105
119
|
Example code can be found [in the rosys implementation](https://github.com/zauberzeug/rosys/blob/main/rosys/vision/detector_hardware.py).
|
|
106
120
|
|
|
107
121
|
### Upload API
|
|
@@ -116,12 +130,14 @@ Example:
|
|
|
116
130
|
|
|
117
131
|
`curl -X POST -F 'files=@test.jpg' "http://localhost:/upload"`
|
|
118
132
|
|
|
119
|
-
The detector also has a **SocketIO** upload endpoint that can be used to upload images and detections to the learning loop. The function receives a
|
|
120
|
-
|
|
121
|
-
- `image`: the image data in jpg format
|
|
133
|
+
The detector also has a **SocketIO** upload endpoint that can be used to upload images and detections to the learning loop. The function receives a dictionary, with the following entries:
|
|
122
134
|
|
|
135
|
+
- `image`: the image data as dictionary with the following keys:
|
|
136
|
+
- `bytes`: bytes of the ndarray (retrieved via `ndarray.tobytes(order='C')`)
|
|
137
|
+
- `dtype`: data type of the ndarray as string (e.g. `uint8`, `float32`, etc.)
|
|
138
|
+
- `shape`: shape of the ndarray as tuple of ints (e.g. `(480, 640, 3)`)
|
|
123
139
|
- `metadata`: a dictionary representing the image metadata. If metadata contains detections and/or annotations, UUIDs for the classes are automatically determined based on the category names. Metadata should follow the schema of the `ImageMetadata` data class.
|
|
124
|
-
- `upload_priority`: boolean flag to prioritize the upload (defaults to False)
|
|
140
|
+
- `upload_priority`: Optional boolean flag to prioritize the upload (defaults to False)
|
|
125
141
|
|
|
126
142
|
The endpoint returns None if the upload was successful and an error message otherwise.
|
|
127
143
|
|
|
@@ -50,9 +50,8 @@ Detector Nodes are normally deployed on edge devices like robots or machinery bu
|
|
|
50
50
|
Images can be send to the detector node via socketio or rest.
|
|
51
51
|
Via **REST** you may provide the following parameters:
|
|
52
52
|
|
|
53
|
-
- `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
|
|
54
53
|
- `camera_id`: a camera identifier (string) used to improve the autoupload filtering
|
|
55
|
-
- `tags`: comma separated list of tags to add to the image in the learning loop
|
|
54
|
+
- `tags`: comma separated list of tags to add to the image in the learning loop to add to the image in the learning loop
|
|
56
55
|
- `source`: optional source identifier (str) for the image (e.g. a robot id)
|
|
57
56
|
- `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
|
|
58
57
|
- `creation_date`: optional creation date (str) for the image in isoformat (e.g. `2023-01-30T12:34:56`)
|
|
@@ -61,7 +60,22 @@ Example usage:
|
|
|
61
60
|
|
|
62
61
|
`curl --request POST -F 'file=@test.jpg' -H 'autoupload: all' -H 'camera_id: front_cam' localhost:8004/detect`
|
|
63
62
|
|
|
64
|
-
To use the **SocketIO** inference EPs, the caller needs to connect to the detector node's SocketIO server and emit the `detect` or `batch_detect` event with the image data and image metadata.
|
|
63
|
+
To use the **SocketIO** inference EPs, the caller needs to connect to the detector node's SocketIO server and emit the `detect` or `batch_detect` event with the image data and image metadata. The `detect` endpoint receives a dictionary, with the following entries:
|
|
64
|
+
|
|
65
|
+
- `image`: The image data as dictionary with the following keys:
|
|
66
|
+
- `bytes`: bytes of the ndarray (retrieved via `ndarray.tobytes(order='C')`)
|
|
67
|
+
- `dtype`: data type of the ndarray as string (e.g. `uint8`, `float32`, etc.)
|
|
68
|
+
- `shape`: shape of the ndarray as tuple of ints (e.g. `(480, 640, 3)`)
|
|
69
|
+
- `camera_id`: optional camera identifier (string) used to improve the autoupload filtering
|
|
70
|
+
- `tags`: optional list of tags to add to the image in the learning loop
|
|
71
|
+
- `source`: optional source string
|
|
72
|
+
- `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
|
|
73
|
+
- `creation_date`: optional creation date (str) for the image in isoformat (e.g. `2023-01-30T12:34:56`)
|
|
74
|
+
|
|
75
|
+
The `batch_detect` endpoint receives a dictionary, with the same entries as the `detect` endpoint, except that the `image` entry is replaced by:
|
|
76
|
+
|
|
77
|
+
- `images`: List of image data dictionaries, each with the same structure as the `image` entry in the `detect` endpoint
|
|
78
|
+
|
|
65
79
|
Example code can be found [in the rosys implementation](https://github.com/zauberzeug/rosys/blob/main/rosys/vision/detector_hardware.py).
|
|
66
80
|
|
|
67
81
|
### Upload API
|
|
@@ -76,12 +90,14 @@ Example:
|
|
|
76
90
|
|
|
77
91
|
`curl -X POST -F 'files=@test.jpg' "http://localhost:/upload"`
|
|
78
92
|
|
|
79
|
-
The detector also has a **SocketIO** upload endpoint that can be used to upload images and detections to the learning loop. The function receives a
|
|
80
|
-
|
|
81
|
-
- `image`: the image data in jpg format
|
|
93
|
+
The detector also has a **SocketIO** upload endpoint that can be used to upload images and detections to the learning loop. The function receives a dictionary, with the following entries:
|
|
82
94
|
|
|
95
|
+
- `image`: the image data as dictionary with the following keys:
|
|
96
|
+
- `bytes`: bytes of the ndarray (retrieved via `ndarray.tobytes(order='C')`)
|
|
97
|
+
- `dtype`: data type of the ndarray as string (e.g. `uint8`, `float32`, etc.)
|
|
98
|
+
- `shape`: shape of the ndarray as tuple of ints (e.g. `(480, 640, 3)`)
|
|
83
99
|
- `metadata`: a dictionary representing the image metadata. If metadata contains detections and/or annotations, UUIDs for the classes are automatically determined based on the category names. Metadata should follow the schema of the `ImageMetadata` data class.
|
|
84
|
-
- `upload_priority`: boolean flag to prioritize the upload (defaults to False)
|
|
100
|
+
- `upload_priority`: Optional boolean flag to prioritize the upload (defaults to False)
|
|
85
101
|
|
|
86
102
|
The endpoint returns None if the upload was successful and an error message otherwise.
|
|
87
103
|
|
|
@@ -2,6 +2,8 @@ import logging
|
|
|
2
2
|
from abc import abstractmethod
|
|
3
3
|
from typing import List, Optional
|
|
4
4
|
|
|
5
|
+
import numpy as np
|
|
6
|
+
|
|
5
7
|
from ..data_classes import ImageMetadata, ImagesMetadata, ModelInformation
|
|
6
8
|
from ..globals import GLOBALS
|
|
7
9
|
from .exceptions import NodeNeedsRestartError
|
|
@@ -43,7 +45,7 @@ class DetectorLogic():
|
|
|
43
45
|
"""Called when a (new) model was loaded. Initialize the model. Model information available via `self.model_info`"""
|
|
44
46
|
|
|
45
47
|
@abstractmethod
|
|
46
|
-
def evaluate(self, image:
|
|
48
|
+
def evaluate(self, image: np.ndarray) -> ImageMetadata:
|
|
47
49
|
"""Evaluate the image and return the detections.
|
|
48
50
|
|
|
49
51
|
Called by the detector node when an image should be evaluated (REST or SocketIO).
|
|
@@ -52,8 +54,9 @@ class DetectorLogic():
|
|
|
52
54
|
The function should return empty metadata if the detector is not initialized."""
|
|
53
55
|
|
|
54
56
|
@abstractmethod
|
|
55
|
-
def batch_evaluate(self, images: List[
|
|
57
|
+
def batch_evaluate(self, images: List[np.ndarray]) -> ImagesMetadata:
|
|
56
58
|
"""Evaluate a batch of images and return the detections.
|
|
59
|
+
|
|
57
60
|
The resulting detections per image should be stored in the ImagesMetadata.
|
|
58
61
|
Tags stored in the ImagesMetadata will be uploaded to the learning loop.
|
|
59
62
|
The function should return empty metadata if the detector is not initialized."""
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/detector_node.py
RENAMED
|
@@ -8,6 +8,8 @@ from dataclasses import asdict
|
|
|
8
8
|
from datetime import datetime
|
|
9
9
|
from typing import Dict, List, Optional
|
|
10
10
|
|
|
11
|
+
import numpy as np
|
|
12
|
+
|
|
11
13
|
try:
|
|
12
14
|
from typing import Literal
|
|
13
15
|
except ImportError: # Python <= 3.8
|
|
@@ -33,6 +35,7 @@ from ..data_exchanger import DataExchanger, DownloadError
|
|
|
33
35
|
from ..enums import OperationMode, VersionMode
|
|
34
36
|
from ..globals import GLOBALS
|
|
35
37
|
from ..helpers import background_tasks, environment_reader, run
|
|
38
|
+
from ..helpers.misc import numpy_image_from_dict
|
|
36
39
|
from ..node import Node
|
|
37
40
|
from .detector_logic import DetectorLogic
|
|
38
41
|
from .exceptions import NodeNeedsRestartError
|
|
@@ -225,9 +228,28 @@ class DetectorNode(Node):
|
|
|
225
228
|
|
|
226
229
|
@self.sio.event
|
|
227
230
|
async def detect(sid, data: Dict) -> Dict:
|
|
231
|
+
"""Detect objects in a single image sent via SocketIO.
|
|
232
|
+
|
|
233
|
+
The data dict has the following schema:
|
|
234
|
+
- image: The image data as dictionary:
|
|
235
|
+
- bytes: bytes of the ndarray
|
|
236
|
+
- dtype: data type of the ndarray
|
|
237
|
+
- shape: shape of the ndarray
|
|
238
|
+
- camera_id: Optional camera ID
|
|
239
|
+
- tags: Optional list of tags
|
|
240
|
+
- source: Optional source string
|
|
241
|
+
- autoupload: Optional 'filtered', 'all' or 'disabled' (default: 'filtered')
|
|
242
|
+
- creation_date: Optional creation date in isoformat string
|
|
243
|
+
"""
|
|
244
|
+
try:
|
|
245
|
+
image = numpy_image_from_dict(data['image'])
|
|
246
|
+
except Exception:
|
|
247
|
+
self.log.exception('could not parse image from socketio')
|
|
248
|
+
return {'error': 'could not parse image from data'}
|
|
249
|
+
|
|
228
250
|
try:
|
|
229
251
|
det = await self.get_detections(
|
|
230
|
-
|
|
252
|
+
image=image,
|
|
231
253
|
camera_id=data.get('camera_id', None),
|
|
232
254
|
tags=data.get('tags', []),
|
|
233
255
|
source=data.get('source', None),
|
|
@@ -246,9 +268,22 @@ class DetectorNode(Node):
|
|
|
246
268
|
|
|
247
269
|
@self.sio.event
|
|
248
270
|
async def batch_detect(sid, data: Dict) -> Dict:
|
|
271
|
+
"""
|
|
272
|
+
Detect objects in a batch of images sent via SocketIO.
|
|
273
|
+
|
|
274
|
+
Data dict follows the schema of the detect endpoint,
|
|
275
|
+
but 'images' is a list of image dicts.
|
|
276
|
+
"""
|
|
277
|
+
try:
|
|
278
|
+
images_data = data['images']
|
|
279
|
+
images = [numpy_image_from_dict(image) for image in images_data]
|
|
280
|
+
except Exception:
|
|
281
|
+
self.log.exception('could not parse images from socketio')
|
|
282
|
+
return {'error': 'could not parse images from data'}
|
|
283
|
+
|
|
249
284
|
try:
|
|
250
285
|
det = await self.get_batch_detections(
|
|
251
|
-
|
|
286
|
+
images=images,
|
|
252
287
|
tags=data.get('tags', []),
|
|
253
288
|
camera_id=data.get('camera_id', None),
|
|
254
289
|
source=data.get('source', None),
|
|
@@ -304,8 +339,12 @@ class DetectorNode(Node):
|
|
|
304
339
|
"""Upload a single image with metadata to the learning loop.
|
|
305
340
|
|
|
306
341
|
The data dict must contain:
|
|
307
|
-
- image: The image
|
|
342
|
+
- image: The image data as dictionary with the following keys:
|
|
343
|
+
- bytes: bytes of the ndarray (retrieved via `ndarray.tobytes(order='C')`)
|
|
344
|
+
- dtype: data type of the ndarray as string (e.g. `uint8`, `float32`, etc.)
|
|
345
|
+
- shape: shape of the ndarray as tuple of ints (e.g. `(480, 640, 3)`)
|
|
308
346
|
- metadata: The metadata for the image (optional)
|
|
347
|
+
- upload_priority: Whether to upload with priority (optional)
|
|
309
348
|
"""
|
|
310
349
|
self.log.debug('Processing upload via socketio.')
|
|
311
350
|
|
|
@@ -321,9 +360,15 @@ class DetectorNode(Node):
|
|
|
321
360
|
else:
|
|
322
361
|
image_metadata = ImageMetadata()
|
|
323
362
|
|
|
363
|
+
try:
|
|
364
|
+
image = numpy_image_from_dict(data['image'])
|
|
365
|
+
except Exception:
|
|
366
|
+
self.log.exception('could not parse image from socketio')
|
|
367
|
+
return {'error': 'could not parse image from data'}
|
|
368
|
+
|
|
324
369
|
try:
|
|
325
370
|
await self.upload_images(
|
|
326
|
-
images=[
|
|
371
|
+
images=[image],
|
|
327
372
|
images_metadata=ImagesMetadata(items=[image_metadata]) if metadata else None,
|
|
328
373
|
upload_priority=data.get('upload_priority', False)
|
|
329
374
|
)
|
|
@@ -373,6 +418,7 @@ class DetectorNode(Node):
|
|
|
373
418
|
)
|
|
374
419
|
|
|
375
420
|
self.log_status_on_change(status.state, status)
|
|
421
|
+
response = None
|
|
376
422
|
|
|
377
423
|
try:
|
|
378
424
|
response = await self.loop_communicator.post(
|
|
@@ -380,8 +426,8 @@ class DetectorNode(Node):
|
|
|
380
426
|
except Exception:
|
|
381
427
|
self.log.warning('Exception while trying to sync status with loop')
|
|
382
428
|
|
|
383
|
-
if response
|
|
384
|
-
self.log.warning('Status update failed: %s',
|
|
429
|
+
if not response or not response.is_success:
|
|
430
|
+
self.log.warning('Status update failed. Response: "%s"', response)
|
|
385
431
|
|
|
386
432
|
async def _update_model_if_required(self) -> None:
|
|
387
433
|
"""Check if a new model is available and update if necessary.
|
|
@@ -509,21 +555,24 @@ class DetectorNode(Node):
|
|
|
509
555
|
self.log.error('could not reload app')
|
|
510
556
|
|
|
511
557
|
async def get_detections(self,
|
|
512
|
-
|
|
558
|
+
image: np.ndarray,
|
|
513
559
|
tags: List[str],
|
|
514
560
|
*,
|
|
515
561
|
camera_id: Optional[str] = None,
|
|
516
562
|
source: Optional[str] = None,
|
|
517
563
|
autoupload: Literal['filtered', 'all', 'disabled'],
|
|
518
564
|
creation_date: Optional[str] = None) -> ImageMetadata:
|
|
519
|
-
"""
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
565
|
+
"""
|
|
566
|
+
Main processing function for the detector node.
|
|
567
|
+
|
|
568
|
+
Used when an image is received via REST or SocketIO.
|
|
569
|
+
This function infers the detections from the image,
|
|
570
|
+
cares about uploading to the loop and returns the detections as ImageMetadata object.
|
|
571
|
+
"""
|
|
523
572
|
|
|
524
573
|
await self.detection_lock.acquire()
|
|
525
574
|
try:
|
|
526
|
-
metadata = await run.io_bound(self.detector_logic.evaluate,
|
|
575
|
+
metadata = await run.io_bound(self.detector_logic.evaluate, image)
|
|
527
576
|
finally:
|
|
528
577
|
self.detection_lock.release()
|
|
529
578
|
|
|
@@ -537,9 +586,9 @@ class DetectorNode(Node):
|
|
|
537
586
|
self.log.debug('Detected: %d boxes, %d points, %d segs, %d classes', n_bo, n_po, n_se, n_cl)
|
|
538
587
|
|
|
539
588
|
if autoupload == 'filtered':
|
|
540
|
-
background_tasks.create(self.relevance_filter.may_upload_detections(metadata, camera_id,
|
|
589
|
+
background_tasks.create(self.relevance_filter.may_upload_detections(metadata, camera_id, image))
|
|
541
590
|
elif autoupload == 'all':
|
|
542
|
-
background_tasks.create(self.outbox.save(
|
|
591
|
+
background_tasks.create(self.outbox.save(image, metadata))
|
|
543
592
|
elif autoupload == 'disabled':
|
|
544
593
|
pass
|
|
545
594
|
else:
|
|
@@ -547,19 +596,22 @@ class DetectorNode(Node):
|
|
|
547
596
|
return metadata
|
|
548
597
|
|
|
549
598
|
async def get_batch_detections(self,
|
|
550
|
-
|
|
599
|
+
images: List[np.ndarray],
|
|
551
600
|
tags: List[str],
|
|
552
601
|
*,
|
|
553
602
|
camera_id: Optional[str] = None,
|
|
554
603
|
source: Optional[str] = None,
|
|
555
604
|
autoupload: str = 'filtered',
|
|
556
605
|
creation_date: Optional[str] = None) -> ImagesMetadata:
|
|
557
|
-
"""
|
|
558
|
-
|
|
606
|
+
"""
|
|
607
|
+
Processing function for the detector node when a a batch inference is requested via SocketIO.
|
|
608
|
+
|
|
609
|
+
This function infers the detections from all images, cares about uploading to the loop and returns the detections as a list of ImageMetadata.
|
|
610
|
+
"""
|
|
559
611
|
|
|
560
612
|
await self.detection_lock.acquire()
|
|
561
613
|
try:
|
|
562
|
-
all_detections = await run.io_bound(self.detector_logic.batch_evaluate,
|
|
614
|
+
all_detections = await run.io_bound(self.detector_logic.batch_evaluate, images)
|
|
563
615
|
finally:
|
|
564
616
|
self.detection_lock.release()
|
|
565
617
|
|
|
@@ -568,16 +620,16 @@ class DetectorNode(Node):
|
|
|
568
620
|
metadata.source = source
|
|
569
621
|
metadata.created = creation_date
|
|
570
622
|
|
|
571
|
-
for detections,
|
|
623
|
+
for detections, image in zip(all_detections.items, images):
|
|
572
624
|
fix_shape_detections(detections)
|
|
573
625
|
n_bo, n_cl = len(detections.box_detections), len(detections.classification_detections)
|
|
574
626
|
n_po, n_se = len(detections.point_detections), len(detections.segmentation_detections)
|
|
575
627
|
self.log.debug('Detected: %d boxes, %d points, %d segs, %d classes', n_bo, n_po, n_se, n_cl)
|
|
576
628
|
|
|
577
629
|
if autoupload == 'filtered':
|
|
578
|
-
background_tasks.create(self.relevance_filter.may_upload_detections(detections, camera_id,
|
|
630
|
+
background_tasks.create(self.relevance_filter.may_upload_detections(detections, camera_id, image))
|
|
579
631
|
elif autoupload == 'all':
|
|
580
|
-
background_tasks.create(self.outbox.save(
|
|
632
|
+
background_tasks.create(self.outbox.save(image, detections))
|
|
581
633
|
elif autoupload == 'disabled':
|
|
582
634
|
pass
|
|
583
635
|
else:
|
|
@@ -586,7 +638,7 @@ class DetectorNode(Node):
|
|
|
586
638
|
|
|
587
639
|
async def upload_images(
|
|
588
640
|
self, *,
|
|
589
|
-
images: List[
|
|
641
|
+
images: List[np.ndarray],
|
|
590
642
|
images_metadata: Optional[ImagesMetadata] = None,
|
|
591
643
|
upload_priority: bool = False
|
|
592
644
|
) -> None:
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
from typing import Dict, List, Optional
|
|
2
2
|
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
3
5
|
from ...data_classes.image_metadata import ImageMetadata
|
|
4
6
|
from ..outbox import Outbox
|
|
5
7
|
from .cam_observation_history import CamObservationHistory
|
|
@@ -15,7 +17,7 @@ class RelevanceFilter():
|
|
|
15
17
|
async def may_upload_detections(self,
|
|
16
18
|
image_metadata: ImageMetadata,
|
|
17
19
|
cam_id: Optional[str],
|
|
18
|
-
|
|
20
|
+
image: np.ndarray) -> List[str]:
|
|
19
21
|
"""Check if the detection should be uploaded to the outbox.
|
|
20
22
|
If so, upload it and return the list of causes for the upload.
|
|
21
23
|
"""
|
|
@@ -34,5 +36,5 @@ class RelevanceFilter():
|
|
|
34
36
|
causes.append('unexpected_observations_count')
|
|
35
37
|
if len(causes) > 0:
|
|
36
38
|
image_metadata.tags.extend(causes)
|
|
37
|
-
await self.outbox.save(
|
|
39
|
+
await self.outbox.save(image, image_metadata)
|
|
38
40
|
return causes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/outbox.py
RENAMED
|
@@ -16,6 +16,7 @@ from threading import Lock
|
|
|
16
16
|
from typing import List, Optional, Tuple, TypeVar, Union
|
|
17
17
|
|
|
18
18
|
import aiohttp
|
|
19
|
+
import numpy as np
|
|
19
20
|
import PIL
|
|
20
21
|
import PIL.Image # type: ignore
|
|
21
22
|
from fastapi.encoders import jsonable_encoder
|
|
@@ -24,6 +25,7 @@ from ..data_classes import ImageMetadata
|
|
|
24
25
|
from ..enums import OutboxMode
|
|
25
26
|
from ..globals import GLOBALS
|
|
26
27
|
from ..helpers import environment_reader, run
|
|
28
|
+
from ..helpers.misc import numpy_array_to_jpg_bytes
|
|
27
29
|
|
|
28
30
|
T = TypeVar('T')
|
|
29
31
|
|
|
@@ -76,13 +78,16 @@ class Outbox():
|
|
|
76
78
|
self.upload_folders.append(file)
|
|
77
79
|
|
|
78
80
|
async def save(self,
|
|
79
|
-
image:
|
|
81
|
+
image: np.ndarray,
|
|
80
82
|
image_metadata: Optional[ImageMetadata] = None,
|
|
81
83
|
upload_priority: bool = False) -> None:
|
|
84
|
+
"""
|
|
85
|
+
Save an image and its metadata to disk.
|
|
82
86
|
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
87
|
+
The data will be picked up by the continuous upload process.
|
|
88
|
+
"""
|
|
89
|
+
|
|
90
|
+
jpg_bytes = numpy_array_to_jpg_bytes(image)
|
|
86
91
|
|
|
87
92
|
if image_metadata is None:
|
|
88
93
|
image_metadata = ImageMetadata()
|
|
@@ -90,7 +95,7 @@ class Outbox():
|
|
|
90
95
|
identifier = datetime.now().isoformat(sep='_', timespec='microseconds')
|
|
91
96
|
|
|
92
97
|
try:
|
|
93
|
-
await run.io_bound(self._save_files_to_disk, identifier,
|
|
98
|
+
await run.io_bound(self._save_files_to_disk, identifier, jpg_bytes, image_metadata, upload_priority)
|
|
94
99
|
except Exception as e:
|
|
95
100
|
self.log.error('Failed to save files for image %s: %s', identifier, e)
|
|
96
101
|
return
|
|
@@ -104,7 +109,7 @@ class Outbox():
|
|
|
104
109
|
|
|
105
110
|
def _save_files_to_disk(self,
|
|
106
111
|
identifier: str,
|
|
107
|
-
|
|
112
|
+
jpeg_image: bytes,
|
|
108
113
|
image_metadata: ImageMetadata,
|
|
109
114
|
upload_priority: bool) -> None:
|
|
110
115
|
subpath = 'priority' if upload_priority else 'normal'
|
|
@@ -119,7 +124,7 @@ class Outbox():
|
|
|
119
124
|
json.dump(jsonable_encoder(asdict(image_metadata)), f)
|
|
120
125
|
|
|
121
126
|
with open(tmp + f'/image_{identifier}.jpg', 'wb') as f:
|
|
122
|
-
f.write(
|
|
127
|
+
f.write(jpeg_image)
|
|
123
128
|
|
|
124
129
|
if not os.path.exists(tmp):
|
|
125
130
|
self.log.error('Could not rename %s to %s', tmp, full_path)
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/rest/detect.py
RENAMED
|
@@ -9,6 +9,7 @@ except ImportError: # Python <= 3.8
|
|
|
9
9
|
from fastapi import APIRouter, File, Header, Request, UploadFile
|
|
10
10
|
|
|
11
11
|
from ...data_classes.image_metadata import ImageMetadata
|
|
12
|
+
from ...helpers.misc import jpg_bytes_to_numpy_array
|
|
12
13
|
|
|
13
14
|
if TYPE_CHECKING:
|
|
14
15
|
from ..detector_node import DetectorNode
|
|
@@ -35,23 +36,23 @@ async def http_detect(
|
|
|
35
36
|
Multiple images example:
|
|
36
37
|
|
|
37
38
|
for i in `seq 1 10`; do time curl --request POST -F 'file=@test.jpg' localhost:8004/detect; done
|
|
38
|
-
|
|
39
39
|
"""
|
|
40
|
+
node: 'DetectorNode' = request.app
|
|
41
|
+
|
|
40
42
|
try:
|
|
41
43
|
# Read file directly to bytes instead of using numpy
|
|
42
|
-
file_bytes = file.
|
|
44
|
+
file_bytes = await file.read()
|
|
43
45
|
except Exception as exc:
|
|
44
46
|
logging.exception('Error during reading of image %s.', file.filename)
|
|
45
47
|
raise Exception(f'Uploaded file {file.filename} is no image file.') from exc
|
|
46
48
|
|
|
47
49
|
try:
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
creation_date=creation_date)
|
|
50
|
+
detections = await node.get_detections(image=jpg_bytes_to_numpy_array(file_bytes),
|
|
51
|
+
camera_id=camera_id or None,
|
|
52
|
+
tags=tags.split(',') if tags else [],
|
|
53
|
+
source=source,
|
|
54
|
+
autoupload=autoupload or 'filtered',
|
|
55
|
+
creation_date=creation_date)
|
|
55
56
|
except Exception as exc:
|
|
56
57
|
logging.exception('Error during detection of image %s.', file.filename)
|
|
57
58
|
raise Exception(f'Error during detection of image {file.filename}.') from exc
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/rest/upload.py
RENAMED
|
@@ -3,6 +3,7 @@ from typing import TYPE_CHECKING, List, Optional
|
|
|
3
3
|
from fastapi import APIRouter, File, Query, Request, UploadFile
|
|
4
4
|
|
|
5
5
|
from ...data_classes.image_metadata import ImageMetadata, ImagesMetadata
|
|
6
|
+
from ...helpers.misc import jpg_bytes_to_numpy_array
|
|
6
7
|
|
|
7
8
|
if TYPE_CHECKING:
|
|
8
9
|
from ..detector_node import DetectorNode
|
|
@@ -26,15 +27,18 @@ async def upload_image(request: Request,
|
|
|
26
27
|
|
|
27
28
|
curl -X POST -F 'files=@test.jpg' "http://localhost:/upload?source=test&creation_date=2024-01-01T00:00:00&upload_priority=true"
|
|
28
29
|
"""
|
|
29
|
-
|
|
30
|
+
node: 'DetectorNode' = request.app
|
|
31
|
+
|
|
32
|
+
files_bytes = [await file.read() for file in files]
|
|
30
33
|
image_metadatas = []
|
|
31
|
-
|
|
34
|
+
images = []
|
|
35
|
+
for file_bytes in files_bytes:
|
|
36
|
+
images.append(jpg_bytes_to_numpy_array(file_bytes))
|
|
32
37
|
image_metadatas.append(ImageMetadata(source=source, created=creation_date))
|
|
33
38
|
|
|
34
39
|
images_metadata = ImagesMetadata(items=image_metadatas)
|
|
35
40
|
|
|
36
|
-
node
|
|
37
|
-
await node.upload_images(images=raw_files,
|
|
41
|
+
await node.upload_images(images=images,
|
|
38
42
|
images_metadata=images_metadata,
|
|
39
43
|
upload_priority=upload_priority)
|
|
40
44
|
return 200, "OK"
|
|
@@ -25,14 +25,14 @@ from __future__ import annotations
|
|
|
25
25
|
|
|
26
26
|
import asyncio
|
|
27
27
|
import logging
|
|
28
|
-
from typing import
|
|
28
|
+
from typing import Coroutine, Dict, Set
|
|
29
29
|
|
|
30
30
|
running_tasks: Set[asyncio.Task] = set()
|
|
31
31
|
lazy_tasks_running: Dict[str, asyncio.Task] = {}
|
|
32
|
-
lazy_tasks_waiting: Dict[str,
|
|
32
|
+
lazy_tasks_waiting: Dict[str, Coroutine] = {}
|
|
33
33
|
|
|
34
34
|
|
|
35
|
-
def create(coroutine:
|
|
35
|
+
def create(coroutine: Coroutine, *, name: str = 'unnamed task') -> asyncio.Task:
|
|
36
36
|
"""Wraps a loop.create_task call and ensures there is an exception handler added to the task.
|
|
37
37
|
|
|
38
38
|
If the task raises an exception, it is logged and handled by the global exception handlers.
|
|
@@ -48,7 +48,7 @@ def create(coroutine: Awaitable, *, name: str = 'unnamed task') -> asyncio.Task:
|
|
|
48
48
|
return task
|
|
49
49
|
|
|
50
50
|
|
|
51
|
-
def create_lazy(coroutine:
|
|
51
|
+
def create_lazy(coroutine: Coroutine, *, name: str) -> None:
|
|
52
52
|
"""Wraps a create call and ensures a second task with the same name is delayed until the first one is done.
|
|
53
53
|
|
|
54
54
|
If a third task with the same name is created while the first one is still running, the second one is discarded.
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"""original copied from https://quantlane.com/blog/ensure-asyncio-task-exceptions-get-logged/"""
|
|
2
2
|
import asyncio
|
|
3
3
|
import functools
|
|
4
|
+
import io
|
|
4
5
|
import json
|
|
5
6
|
import logging
|
|
6
7
|
import os
|
|
@@ -9,10 +10,12 @@ import sys
|
|
|
9
10
|
from dataclasses import asdict
|
|
10
11
|
from glob import glob
|
|
11
12
|
from time import perf_counter
|
|
12
|
-
from typing import Any, Coroutine, List, Optional, Tuple, TypeVar
|
|
13
|
+
from typing import Any, Coroutine, Dict, List, Optional, Tuple, TypeVar
|
|
13
14
|
from uuid import UUID, uuid4
|
|
14
15
|
|
|
16
|
+
import numpy as np
|
|
15
17
|
import pynvml
|
|
18
|
+
from PIL import Image
|
|
16
19
|
|
|
17
20
|
from ..data_classes.general import Context
|
|
18
21
|
from ..data_classes.socket_response import SocketResponse
|
|
@@ -204,3 +207,33 @@ def create_training_folder(project_folder: str, trainings_id: str) -> str:
|
|
|
204
207
|
training_folder = f'{project_folder}/trainings/{trainings_id}'
|
|
205
208
|
os.makedirs(training_folder, exist_ok=True)
|
|
206
209
|
return training_folder
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def jpg_bytes_to_numpy_array(jpg_bytes: bytes) -> np.ndarray:
|
|
213
|
+
"""Convert jpg bytes to numpy array."""
|
|
214
|
+
image = Image.open(io.BytesIO(jpg_bytes))
|
|
215
|
+
return np.array(image)
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def numpy_array_to_jpg_bytes(image_array: np.ndarray) -> bytes:
|
|
219
|
+
"""Convert jpg bytes to numpy array."""
|
|
220
|
+
buffer = io.BytesIO()
|
|
221
|
+
Image.fromarray(image_array).save(buffer, format="JPEG")
|
|
222
|
+
jpg_bytes = buffer.getvalue()
|
|
223
|
+
return jpg_bytes
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
def numpy_image_from_dict(image_data: Dict) -> np.ndarray:
|
|
227
|
+
"""
|
|
228
|
+
Convert image dict to numpy array.
|
|
229
|
+
|
|
230
|
+
The image_data dict should have the following keys:
|
|
231
|
+
- bytes: bytes of the image in C order (row-major)
|
|
232
|
+
- dtype: data type of the pixel values
|
|
233
|
+
- shape: shape of the image (height, width, channels)
|
|
234
|
+
"""
|
|
235
|
+
|
|
236
|
+
image_bytes = image_data['bytes']
|
|
237
|
+
image_dtype = image_data['dtype']
|
|
238
|
+
image_shape = image_data['shape']
|
|
239
|
+
return np.frombuffer(image_bytes, dtype=image_dtype).reshape(image_shape, order='C')
|
|
@@ -18,7 +18,7 @@ async def setup_test_project(): # pylint: disable=redefined-outer-name
|
|
|
18
18
|
try:
|
|
19
19
|
await loop_communicator.delete("/zauberzeug/projects/pytest_nodelib_annotator?keep_images=true", timeout=10)
|
|
20
20
|
except Exception:
|
|
21
|
-
logging.
|
|
21
|
+
logging.exception("Failed to delete project pytest_nodelib_annotator")
|
|
22
22
|
sys.exit(1)
|
|
23
23
|
await asyncio.sleep(1)
|
|
24
24
|
project_conf = {
|
|
@@ -6,7 +6,7 @@ import shutil
|
|
|
6
6
|
import socket
|
|
7
7
|
from glob import glob
|
|
8
8
|
from multiprocessing import Process, log_to_stderr
|
|
9
|
-
from typing import AsyncGenerator
|
|
9
|
+
from typing import AsyncGenerator
|
|
10
10
|
|
|
11
11
|
import numpy as np
|
|
12
12
|
import pytest
|
|
@@ -137,7 +137,7 @@ class MockDetectorLogic(DetectorLogic): # pylint: disable=abstract-method
|
|
|
137
137
|
x=0, y=0, width=10, height=10,
|
|
138
138
|
model_name="mock", )])
|
|
139
139
|
|
|
140
|
-
def evaluate(self, image:
|
|
140
|
+
def evaluate(self, image: np.ndarray) -> ImageMetadata:
|
|
141
141
|
return self.image_metadata
|
|
142
142
|
|
|
143
143
|
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import os
|
|
2
2
|
from typing import List
|
|
3
3
|
|
|
4
|
+
import numpy as np
|
|
4
5
|
import pytest
|
|
5
6
|
|
|
6
7
|
from ....data_classes.image_metadata import BoxDetection, ImageMetadata, PointDetection
|
|
@@ -31,4 +32,4 @@ async def test_unexpected_observations_count(metadata: ImageMetadata, reason: Li
|
|
|
31
32
|
outbox = Outbox()
|
|
32
33
|
|
|
33
34
|
relevance_filter = RelevanceFilter(outbox)
|
|
34
|
-
assert await relevance_filter.may_upload_detections(metadata,
|
|
35
|
+
assert await relevance_filter.may_upload_detections(metadata, image=np.zeros((300, 600, 3), dtype=np.uint8), cam_id='0:0:0:0') == reason
|
|
@@ -2,8 +2,10 @@ import asyncio
|
|
|
2
2
|
import json
|
|
3
3
|
import os
|
|
4
4
|
|
|
5
|
+
import numpy as np
|
|
5
6
|
import pytest
|
|
6
7
|
import requests # type: ignore
|
|
8
|
+
from PIL import Image
|
|
7
9
|
|
|
8
10
|
from ...data_classes import ModelInformation
|
|
9
11
|
from ...detector.detector_node import DetectorNode
|
|
@@ -23,11 +25,10 @@ async def test_detector_path(test_detector_node: DetectorNode):
|
|
|
23
25
|
|
|
24
26
|
|
|
25
27
|
async def test_sio_detect(test_detector_node, sio_client):
|
|
26
|
-
|
|
27
|
-
image_bytes = f.read()
|
|
28
|
+
image = np.array(Image.open(test_image_path))
|
|
28
29
|
|
|
29
30
|
await asyncio.sleep(5)
|
|
30
|
-
result = await sio_client.call('detect', {'image':
|
|
31
|
+
result = await sio_client.call('detect', {'image': {'bytes': image.tobytes(order='C'), 'shape': image.shape, 'dtype': str(image.dtype)}})
|
|
31
32
|
assert len(result['box_detections']) == 1
|
|
32
33
|
assert result['box_detections'][0]['category_name'] == 'some_category_name'
|
|
33
34
|
assert result['box_detections'][0]['category_id'] == 'some_id'
|
|
@@ -81,9 +82,8 @@ def test_rest_upload(test_detector_node: DetectorNode):
|
|
|
81
82
|
async def test_sio_upload(test_detector_node: DetectorNode, sio_client):
|
|
82
83
|
assert len(get_outbox_files(test_detector_node.outbox)) == 0
|
|
83
84
|
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
result = await sio_client.call('upload', {'image': image_bytes})
|
|
85
|
+
image = np.array(Image.open(test_image_path))
|
|
86
|
+
result = await sio_client.call('upload', {'image': {'bytes': image.tobytes(), 'shape': image.shape, 'dtype': str(image.dtype)}})
|
|
87
87
|
assert result.get('status') == 'OK'
|
|
88
88
|
assert len(get_outbox_files(test_detector_node.outbox)) == 2, 'There should be one image and one .json file.'
|
|
89
89
|
|
|
@@ -10,7 +10,7 @@ from learning_loop_node.detector.detector_node import DetectorNode
|
|
|
10
10
|
@pytest.mark.asyncio
|
|
11
11
|
async def test_get_detections(detector_node: DetectorNode, monkeypatch):
|
|
12
12
|
# Mock raw image data
|
|
13
|
-
|
|
13
|
+
np_image = np.zeros((100, 100, 3), dtype=np.uint8)
|
|
14
14
|
|
|
15
15
|
# Mock relevance_filter and outbox
|
|
16
16
|
filtered_upload_called = False
|
|
@@ -46,7 +46,7 @@ async def test_get_detections(detector_node: DetectorNode, monkeypatch):
|
|
|
46
46
|
]
|
|
47
47
|
|
|
48
48
|
expected_save_args = {
|
|
49
|
-
'image':
|
|
49
|
+
'image': np_image,
|
|
50
50
|
'detections': detector_node.detector_logic.image_metadata, # type: ignore
|
|
51
51
|
}
|
|
52
52
|
|
|
@@ -56,7 +56,7 @@ async def test_get_detections(detector_node: DetectorNode, monkeypatch):
|
|
|
56
56
|
created_tasks.clear()
|
|
57
57
|
|
|
58
58
|
result = await detector_node.get_detections(
|
|
59
|
-
|
|
59
|
+
image=np_image,
|
|
60
60
|
camera_id="test_camera",
|
|
61
61
|
tags=["test_tag"],
|
|
62
62
|
source="test_source",
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
-
import io
|
|
3
2
|
import os
|
|
4
3
|
import shutil
|
|
5
4
|
|
|
5
|
+
import numpy as np
|
|
6
6
|
import pytest
|
|
7
7
|
from PIL import Image
|
|
8
8
|
|
|
@@ -34,12 +34,12 @@ async def fix_upload_bug():
|
|
|
34
34
|
test_outbox = Outbox()
|
|
35
35
|
|
|
36
36
|
await test_outbox.set_mode('continuous_upload')
|
|
37
|
-
await test_outbox.save(
|
|
37
|
+
await test_outbox.save(get_test_image())
|
|
38
38
|
await asyncio.sleep(6)
|
|
39
39
|
assert await wait_for_outbox_count(test_outbox, 0, timeout=15), 'File was not cleared even though outbox should be in continuous_upload'
|
|
40
40
|
assert test_outbox.upload_counter == 1
|
|
41
41
|
|
|
42
|
-
await test_outbox.save(
|
|
42
|
+
await test_outbox.save(get_test_image())
|
|
43
43
|
await asyncio.sleep(6)
|
|
44
44
|
# assert await wait_for_outbox_count(test_outbox, 0, timeout=90), 'File was not cleared even though outbox should be in continuous_upload'
|
|
45
45
|
# assert test_outbox.upload_counter == 2
|
|
@@ -51,7 +51,7 @@ async def fix_upload_bug():
|
|
|
51
51
|
@pytest.mark.asyncio
|
|
52
52
|
async def test_set_outbox_mode(test_outbox: Outbox):
|
|
53
53
|
await test_outbox.set_mode('stopped')
|
|
54
|
-
await test_outbox.save(
|
|
54
|
+
await test_outbox.save(get_test_image())
|
|
55
55
|
assert await wait_for_outbox_count(test_outbox, 1)
|
|
56
56
|
await asyncio.sleep(6)
|
|
57
57
|
assert await wait_for_outbox_count(test_outbox, 1), 'File was cleared even though outbox should be stopped'
|
|
@@ -60,35 +60,13 @@ async def test_set_outbox_mode(test_outbox: Outbox):
|
|
|
60
60
|
assert await wait_for_outbox_count(test_outbox, 0, timeout=15), 'File was not cleared even though outbox should be in continuous_upload'
|
|
61
61
|
assert test_outbox.upload_counter == 1
|
|
62
62
|
|
|
63
|
-
|
|
64
|
-
@pytest.mark.asyncio
|
|
65
|
-
async def test_invalid_jpg_is_not_saved(test_outbox: Outbox):
|
|
66
|
-
invalid_bytes = b'invalid jpg'
|
|
67
|
-
await test_outbox.save(invalid_bytes)
|
|
68
|
-
assert len(test_outbox.get_upload_folders()) == 0
|
|
69
|
-
|
|
70
|
-
|
|
71
63
|
# ------------------------------ Helper functions --------------------------------------
|
|
72
64
|
|
|
73
65
|
|
|
74
|
-
def
|
|
75
|
-
img = Image.new('RGB', (600, 300), color=(73, 109, 137))
|
|
76
|
-
# convert img to
|
|
77
|
-
|
|
78
|
-
img_byte_arr = io.BytesIO()
|
|
79
|
-
img.save(img_byte_arr, format='JPEG')
|
|
80
|
-
return img_byte_arr.getvalue()
|
|
81
|
-
|
|
82
|
-
# return img.tobytes() # NOT WORKING
|
|
83
|
-
|
|
84
|
-
# img.save('/tmp/image.jpg')
|
|
85
|
-
# with open('/tmp/image.jpg', 'rb') as f:
|
|
86
|
-
# data = f.read()
|
|
87
|
-
|
|
88
|
-
# return data
|
|
89
|
-
|
|
90
|
-
# img = np.ones((300, 300, 3), np.uint8)*255 # NOT WORKING
|
|
91
|
-
# return img.tobytes()
|
|
66
|
+
def get_test_image() -> np.ndarray:
|
|
67
|
+
img = Image.new('RGB', (600, 300), color=(73, 109, 137)) # type: ignore
|
|
68
|
+
# convert img to np array
|
|
69
|
+
return np.array(img)
|
|
92
70
|
|
|
93
71
|
|
|
94
72
|
async def wait_for_outbox_count(outbox: Outbox, count: int, timeout: int = 10) -> bool:
|
|
@@ -3,6 +3,7 @@ import os
|
|
|
3
3
|
|
|
4
4
|
import numpy as np
|
|
5
5
|
import pytest
|
|
6
|
+
from PIL import Image
|
|
6
7
|
|
|
7
8
|
from ...data_classes import BoxDetection, ImageMetadata, PointDetection
|
|
8
9
|
from ...detector.detector_node import DetectorNode
|
|
@@ -30,7 +31,9 @@ async def test_filter_is_used_by_node(test_detector_node: DetectorNode, autouplo
|
|
|
30
31
|
assert test_detector_node.outbox.path.startswith('/tmp')
|
|
31
32
|
assert len(get_outbox_files(test_detector_node.outbox)) == 0
|
|
32
33
|
|
|
33
|
-
|
|
34
|
+
pil_image = Image.open(test_image_path)
|
|
35
|
+
image = np.array(pil_image)
|
|
36
|
+
|
|
34
37
|
_ = await test_detector_node.get_detections(image, tags=[], camera_id='00:.....', autoupload=autoupload)
|
|
35
38
|
# NOTE adding second images with identical detections
|
|
36
39
|
_ = await test_detector_node.get_detections(image, tags=[], camera_id='00:.....', autoupload=autoupload)
|
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
from typing import List
|
|
3
3
|
|
|
4
|
+
import numpy as np
|
|
5
|
+
|
|
4
6
|
from learning_loop_node.data_classes import ImagesMetadata
|
|
5
7
|
|
|
6
8
|
from ...data_classes import ImageMetadata
|
|
@@ -18,9 +20,9 @@ class TestingDetectorLogic(DetectorLogic):
|
|
|
18
20
|
def init(self) -> None:
|
|
19
21
|
pass
|
|
20
22
|
|
|
21
|
-
def evaluate(self, image:
|
|
23
|
+
def evaluate(self, image: np.ndarray) -> ImageMetadata:
|
|
22
24
|
logging.info('evaluating')
|
|
23
25
|
return self.det_to_return
|
|
24
26
|
|
|
25
|
-
def batch_evaluate(self, images: List[
|
|
27
|
+
def batch_evaluate(self, images: List[np.ndarray]) -> ImagesMetadata:
|
|
26
28
|
raise NotImplementedError()
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "learning_loop_node"
|
|
3
|
-
version = "v0.
|
|
3
|
+
version = "v0.18.0"
|
|
4
4
|
description = "Python Library for Nodes which connect to the Zauberzeug Learning Loop"
|
|
5
5
|
authors = ["Zauberzeug GmbH <info@zauberzeug.com>"]
|
|
6
6
|
license = "MIT"
|
|
@@ -29,7 +29,7 @@ tqdm = "^4.63.0"
|
|
|
29
29
|
pytest-mock = "3.6.1"
|
|
30
30
|
pynvml = "^11.4.1"
|
|
31
31
|
aiohttp = "^3.8.4"
|
|
32
|
-
httpx = "^0.
|
|
32
|
+
httpx = "^0.28.1"
|
|
33
33
|
dacite = "^1.8.1"
|
|
34
34
|
|
|
35
35
|
[tool.poetry.group.dev.dependencies]
|
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/annotation/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/data_classes/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/data_classes/general.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/data_classes/training.py
RENAMED
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/data_exchanger.py
RENAMED
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/__init__.py
RENAMED
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/exceptions.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/rest/__init__.py
RENAMED
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/detector/rest/about.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/enums/__init__.py
RENAMED
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/enums/annotator.py
RENAMED
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/enums/detector.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/helpers/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/helpers/log_conf.py
RENAMED
|
File without changes
|
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/loop_communication.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/pytest.ini
RENAMED
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/test.jpg
RENAMED
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/__init__.py
RENAMED
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/conftest.py
RENAMED
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/pytest.ini
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/test_helper.py
RENAMED
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/__init__.py
RENAMED
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/conftest.py
RENAMED
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/pytest.ini
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/trainer/__init__.py
RENAMED
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/trainer/downloader.py
RENAMED
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/trainer/exceptions.py
RENAMED
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/trainer/executor.py
RENAMED
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/trainer/io_helpers.py
RENAMED
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/trainer/rest/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/trainer/test_executor.py
RENAMED
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/trainer/trainer_logic.py
RENAMED
|
File without changes
|
|
File without changes
|
{learning_loop_node-0.17.2 → learning_loop_node-0.18.0}/learning_loop_node/trainer/trainer_node.py
RENAMED
|
File without changes
|