learning-loop-node 0.16.1__tar.gz → 0.17.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of learning-loop-node might be problematic. Click here for more details.

Files changed (105) hide show
  1. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/PKG-INFO +29 -21
  2. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/README.md +28 -20
  3. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/data_classes/__init__.py +6 -1
  4. learning_loop_node-0.17.0/learning_loop_node/data_classes/annotations.py +44 -0
  5. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/data_classes/detections.py +3 -7
  6. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/data_classes/image_metadata.py +15 -1
  7. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/detector/detector_logic.py +9 -9
  8. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/detector/detector_node.py +53 -47
  9. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/detector/inbox_filter/relevance_filter.py +13 -11
  10. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/detector/outbox.py +3 -17
  11. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/detector/rest/detect.py +10 -6
  12. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/detector/rest/upload.py +11 -1
  13. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/helpers/background_tasks.py +0 -1
  14. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/detector/conftest.py +2 -3
  15. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/detector/inbox_filter/test_unexpected_observations_count.py +3 -3
  16. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/detector/test_detector_node.py +18 -8
  17. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/detector/test_relevance_filter.py +1 -1
  18. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/detector/testing_detector.py +8 -4
  19. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/test_helper.py +19 -3
  20. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/trainer/states/test_state_detecting.py +2 -1
  21. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/pyproject.toml +1 -1
  22. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/__init__.py +0 -0
  23. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/annotation/__init__.py +0 -0
  24. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/annotation/annotator_logic.py +0 -0
  25. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/annotation/annotator_node.py +0 -0
  26. /learning_loop_node-0.16.1/learning_loop_node/data_classes/annotations.py → /learning_loop_node-0.17.0/learning_loop_node/data_classes/annotation_data.py +0 -0
  27. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/data_classes/general.py +0 -0
  28. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/data_classes/socket_response.py +0 -0
  29. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/data_classes/training.py +0 -0
  30. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/data_exchanger.py +0 -0
  31. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/detector/__init__.py +0 -0
  32. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/detector/exceptions.py +0 -0
  33. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/detector/inbox_filter/__init__.py +0 -0
  34. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/detector/inbox_filter/cam_observation_history.py +0 -0
  35. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/detector/rest/__init__.py +0 -0
  36. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/detector/rest/about.py +0 -0
  37. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/detector/rest/backdoor_controls.py +0 -0
  38. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/detector/rest/model_version_control.py +0 -0
  39. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/detector/rest/operation_mode.py +0 -0
  40. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/detector/rest/outbox_mode.py +0 -0
  41. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/enums/__init__.py +0 -0
  42. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/enums/annotator.py +0 -0
  43. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/enums/detector.py +0 -0
  44. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/enums/general.py +0 -0
  45. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/enums/trainer.py +0 -0
  46. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/examples/novelty_score_updater.py +0 -0
  47. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/globals.py +0 -0
  48. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/helpers/__init__.py +0 -0
  49. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/helpers/environment_reader.py +0 -0
  50. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/helpers/gdrive_downloader.py +0 -0
  51. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/helpers/log_conf.py +0 -0
  52. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/helpers/misc.py +0 -0
  53. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/helpers/run.py +0 -0
  54. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/loop_communication.py +0 -0
  55. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/node.py +0 -0
  56. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/py.typed +0 -0
  57. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/rest.py +0 -0
  58. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/__init__.py +0 -0
  59. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/annotator/__init__.py +0 -0
  60. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/annotator/conftest.py +0 -0
  61. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/annotator/pytest.ini +0 -0
  62. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/annotator/test_annotator_node.py +0 -0
  63. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/detector/__init__.py +0 -0
  64. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/detector/inbox_filter/__init__.py +0 -0
  65. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/detector/inbox_filter/test_observation.py +0 -0
  66. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/detector/inbox_filter/test_relevance_group.py +0 -0
  67. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/detector/pytest.ini +0 -0
  68. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/detector/test.jpg +0 -0
  69. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/detector/test_client_communication.py +0 -0
  70. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/detector/test_outbox.py +0 -0
  71. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/general/__init__.py +0 -0
  72. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/general/conftest.py +0 -0
  73. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/general/pytest.ini +0 -0
  74. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/general/test_data/file_1.txt +0 -0
  75. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/general/test_data/file_2.txt +0 -0
  76. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/general/test_data/model.json +0 -0
  77. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/general/test_data_classes.py +0 -0
  78. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/general/test_downloader.py +0 -0
  79. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/general/test_learning_loop_node.py +0 -0
  80. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/trainer/__init__.py +0 -0
  81. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/trainer/conftest.py +0 -0
  82. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/trainer/pytest.ini +0 -0
  83. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/trainer/state_helper.py +0 -0
  84. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/trainer/states/__init__.py +0 -0
  85. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/trainer/states/test_state_cleanup.py +0 -0
  86. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/trainer/states/test_state_download_train_model.py +0 -0
  87. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/trainer/states/test_state_prepare.py +0 -0
  88. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/trainer/states/test_state_sync_confusion_matrix.py +0 -0
  89. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/trainer/states/test_state_train.py +0 -0
  90. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/trainer/states/test_state_upload_detections.py +0 -0
  91. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/trainer/states/test_state_upload_model.py +0 -0
  92. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/trainer/test_errors.py +0 -0
  93. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/trainer/test_trainer_states.py +0 -0
  94. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/tests/trainer/testing_trainer_logic.py +0 -0
  95. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/trainer/__init__.py +0 -0
  96. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/trainer/downloader.py +0 -0
  97. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/trainer/exceptions.py +0 -0
  98. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/trainer/executor.py +0 -0
  99. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/trainer/io_helpers.py +0 -0
  100. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/trainer/rest/__init__.py +0 -0
  101. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/trainer/rest/backdoor_controls.py +0 -0
  102. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/trainer/test_executor.py +0 -0
  103. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/trainer/trainer_logic.py +0 -0
  104. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/trainer/trainer_logic_generic.py +0 -0
  105. {learning_loop_node-0.16.1 → learning_loop_node-0.17.0}/learning_loop_node/trainer/trainer_node.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: learning-loop-node
3
- Version: 0.16.1
3
+ Version: 0.17.0
4
4
  Summary: Python Library for Nodes which connect to the Zauberzeug Learning Loop
5
5
  Home-page: https://github.com/zauberzeug/learning_loop_node
6
6
  License: MIT
@@ -85,34 +85,48 @@ from learning_loop_node/learning_loop_node
85
85
 
86
86
  Detector Nodes are normally deployed on edge devices like robots or machinery but can also run in the cloud to provide backend services for an app or similar. These nodes register themself at the Learning Loop. They provide REST and Socket.io APIs to run inference on images. The processed images can automatically be used for active learning: e.g. uncertain predictions will be send to the Learning Loop.
87
87
 
88
- ### Running Inference
88
+ ### Inference API
89
89
 
90
90
  Images can be send to the detector node via socketio or rest.
91
- The later approach can be used via curl,
91
+ Via **REST** you may provide the following parameters:
92
+
93
+ - `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
94
+ - `camera_id`: a camera identifier (string) used to improve the autoupload filtering
95
+ - `tags`: comma separated list of tags to add to the image in the learning loop
96
+ - `source`: optional source identifier (str) for the image (e.g. a robot id)
97
+ - `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
98
+ - `creation_date`: optional creation date (str) for the image in isoformat (e.g. `2023-01-30T12:34:56`)
92
99
 
93
100
  Example usage:
94
101
 
95
- `curl --request POST -F 'file=@test.jpg' localhost:8004/detect`
102
+ `curl --request POST -F 'file=@test.jpg' -H 'autoupload: all' -H 'camera_id: front_cam' localhost:8004/detect`
103
+
104
+ To use the **SocketIO** inference EPs, the caller needs to connect to the detector node's SocketIO server and emit the `detect` or `batch_detect` event with the image data and image metadata.
105
+ Example code can be found [in the rosys implementation](https://github.com/zauberzeug/rosys/blob/main/rosys/vision/detector_hardware.py).
106
+
107
+ ### Upload API
96
108
 
97
- Where 8804 is the specified port in this example.
98
- You can additionally provide the following camera parameters:
109
+ The detector has a **REST** endpoint to upload images (and detections) to the Learning Loop. The endpoint takes a POST request with one or multiple images. The images are expected to be in jpg format. The following optional parameters may be set via headers:
99
110
 
100
- - `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled` (example curl parameter `-H 'autoupload: all'`)
101
- - `camera-id`: a string which groups images for submission together (example curl parameter `-H 'camera-id: front_cam'`)
111
+ - `source`: optional source identifier (str) for the image (e.g. a robot id)
112
+ - `creation_date`: optional creation date (str) for the image in isoformat (e.g. `2023-01-30T12:34:56`)
113
+ - `upload_priority`: A boolean flag to prioritize the upload (defaults to False)
102
114
 
103
- To use the socketio interface, the caller needs to connect to the detector node's socketio server and emit the `detect` or `batch_detect` event with the image data and image metadata. Example code can be found [in the rosys implementation](https://github.com/zauberzeug/rosys/blob/main/rosys/vision/detector_hardware.py).
115
+ Example:
104
116
 
105
- The detector also has a sio **upload endpoint** that can be used to upload images and detections to the learning loop. The function receives a json dictionary, with the following entries:
117
+ `curl -X POST -F 'files=@test.jpg' "http://localhost:/upload"`
118
+
119
+ The detector also has a **SocketIO** upload endpoint that can be used to upload images and detections to the learning loop. The function receives a json dictionary, with the following entries:
106
120
 
107
121
  - `image`: the image data in jpg format
108
- - `tags`: a list of strings. If not provided the tag is `picked_by_system`
109
- - `detections`: a dictionary representing the detections. UUIDs for the classes are automatically determined based on the category names. This field is optional. If not provided, no detections are uploaded.
110
- - `source`: optional source identifier for the image
111
- - `creation_date`: optional creation date for the image
122
+
123
+ - `metadata`: a dictionary representing the image metadata. If metadata contains detections and/or annotations, UUIDs for the classes are automatically determined based on the category names. Metadata should follow the schema of the `ImageMetadata` data class.
112
124
  - `upload_priority`: boolean flag to prioritize the upload (defaults to False)
113
125
 
114
126
  The endpoint returns None if the upload was successful and an error message otherwise.
115
127
 
128
+ For both ways to upload an image, the tag `picked_by_system` is automatically added to the image metadata.
129
+
116
130
  ### Changing the model versioning mode
117
131
 
118
132
  The detector can be configured to one of the following behaviors:
@@ -164,12 +178,6 @@ The outbox mode can also be queried via:
164
178
  - HTTP: `curl http://localhost/outbox_mode`
165
179
  - SocketIO: `sio.emit('get_outbox_mode')`
166
180
 
167
- ### Explicit upload
168
-
169
- The detector has a REST endpoint to upload images (and detections) to the Learning Loop. The endpoint takes a POST request with the image and optionally the detections. The image is expected to be in jpg format. The detections are expected to be a json dictionary. Example:
170
-
171
- `curl -X POST -F 'files=@test.jpg' "http://localhost:/upload"`
172
-
173
181
  ## Trainer Node
174
182
 
175
183
  Trainers fetch the images and anntoations from the Learning Loop to train new models.
@@ -184,7 +192,7 @@ A Conveter Node converts models from one format into another.
184
192
 
185
193
  ...
186
194
 
187
- #### Test operability
195
+ ### Test operability
188
196
 
189
197
  Assumend there is a Converter Node which converts models of format 'format_a' into 'format_b'.
190
198
  Upload a model with
@@ -45,34 +45,48 @@ from learning_loop_node/learning_loop_node
45
45
 
46
46
  Detector Nodes are normally deployed on edge devices like robots or machinery but can also run in the cloud to provide backend services for an app or similar. These nodes register themself at the Learning Loop. They provide REST and Socket.io APIs to run inference on images. The processed images can automatically be used for active learning: e.g. uncertain predictions will be send to the Learning Loop.
47
47
 
48
- ### Running Inference
48
+ ### Inference API
49
49
 
50
50
  Images can be send to the detector node via socketio or rest.
51
- The later approach can be used via curl,
51
+ Via **REST** you may provide the following parameters:
52
+
53
+ - `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
54
+ - `camera_id`: a camera identifier (string) used to improve the autoupload filtering
55
+ - `tags`: comma separated list of tags to add to the image in the learning loop
56
+ - `source`: optional source identifier (str) for the image (e.g. a robot id)
57
+ - `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
58
+ - `creation_date`: optional creation date (str) for the image in isoformat (e.g. `2023-01-30T12:34:56`)
52
59
 
53
60
  Example usage:
54
61
 
55
- `curl --request POST -F 'file=@test.jpg' localhost:8004/detect`
62
+ `curl --request POST -F 'file=@test.jpg' -H 'autoupload: all' -H 'camera_id: front_cam' localhost:8004/detect`
63
+
64
+ To use the **SocketIO** inference EPs, the caller needs to connect to the detector node's SocketIO server and emit the `detect` or `batch_detect` event with the image data and image metadata.
65
+ Example code can be found [in the rosys implementation](https://github.com/zauberzeug/rosys/blob/main/rosys/vision/detector_hardware.py).
66
+
67
+ ### Upload API
56
68
 
57
- Where 8804 is the specified port in this example.
58
- You can additionally provide the following camera parameters:
69
+ The detector has a **REST** endpoint to upload images (and detections) to the Learning Loop. The endpoint takes a POST request with one or multiple images. The images are expected to be in jpg format. The following optional parameters may be set via headers:
59
70
 
60
- - `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled` (example curl parameter `-H 'autoupload: all'`)
61
- - `camera-id`: a string which groups images for submission together (example curl parameter `-H 'camera-id: front_cam'`)
71
+ - `source`: optional source identifier (str) for the image (e.g. a robot id)
72
+ - `creation_date`: optional creation date (str) for the image in isoformat (e.g. `2023-01-30T12:34:56`)
73
+ - `upload_priority`: A boolean flag to prioritize the upload (defaults to False)
62
74
 
63
- To use the socketio interface, the caller needs to connect to the detector node's socketio server and emit the `detect` or `batch_detect` event with the image data and image metadata. Example code can be found [in the rosys implementation](https://github.com/zauberzeug/rosys/blob/main/rosys/vision/detector_hardware.py).
75
+ Example:
64
76
 
65
- The detector also has a sio **upload endpoint** that can be used to upload images and detections to the learning loop. The function receives a json dictionary, with the following entries:
77
+ `curl -X POST -F 'files=@test.jpg' "http://localhost:/upload"`
78
+
79
+ The detector also has a **SocketIO** upload endpoint that can be used to upload images and detections to the learning loop. The function receives a json dictionary, with the following entries:
66
80
 
67
81
  - `image`: the image data in jpg format
68
- - `tags`: a list of strings. If not provided the tag is `picked_by_system`
69
- - `detections`: a dictionary representing the detections. UUIDs for the classes are automatically determined based on the category names. This field is optional. If not provided, no detections are uploaded.
70
- - `source`: optional source identifier for the image
71
- - `creation_date`: optional creation date for the image
82
+
83
+ - `metadata`: a dictionary representing the image metadata. If metadata contains detections and/or annotations, UUIDs for the classes are automatically determined based on the category names. Metadata should follow the schema of the `ImageMetadata` data class.
72
84
  - `upload_priority`: boolean flag to prioritize the upload (defaults to False)
73
85
 
74
86
  The endpoint returns None if the upload was successful and an error message otherwise.
75
87
 
88
+ For both ways to upload an image, the tag `picked_by_system` is automatically added to the image metadata.
89
+
76
90
  ### Changing the model versioning mode
77
91
 
78
92
  The detector can be configured to one of the following behaviors:
@@ -124,12 +138,6 @@ The outbox mode can also be queried via:
124
138
  - HTTP: `curl http://localhost/outbox_mode`
125
139
  - SocketIO: `sio.emit('get_outbox_mode')`
126
140
 
127
- ### Explicit upload
128
-
129
- The detector has a REST endpoint to upload images (and detections) to the Learning Loop. The endpoint takes a POST request with the image and optionally the detections. The image is expected to be in jpg format. The detections are expected to be a json dictionary. Example:
130
-
131
- `curl -X POST -F 'files=@test.jpg' "http://localhost:/upload"`
132
-
133
141
  ## Trainer Node
134
142
 
135
143
  Trainers fetch the images and anntoations from the Learning Loop to train new models.
@@ -144,7 +152,7 @@ A Conveter Node converts models from one format into another.
144
152
 
145
153
  ...
146
154
 
147
- #### Test operability
155
+ ### Test operability
148
156
 
149
157
  Assumend there is a Converter Node which converts models of format 'format_a' into 'format_b'.
150
158
  Upload a model with
@@ -1,4 +1,9 @@
1
- from .annotations import AnnotationData, SegmentationAnnotation, ToolOutput, UserInput
1
+ from .annotation_data import (
2
+ AnnotationData,
3
+ SegmentationAnnotation,
4
+ ToolOutput,
5
+ UserInput,
6
+ )
2
7
  from .detections import (
3
8
  BoxDetection,
4
9
  ClassificationDetection,
@@ -0,0 +1,44 @@
1
+
2
+ import sys
3
+ from dataclasses import dataclass, field
4
+ from typing import Optional
5
+
6
+ KWONLY_SLOTS = {'kw_only': True, 'slots': True} if sys.version_info >= (3, 10) else {}
7
+
8
+
9
+ @dataclass(**KWONLY_SLOTS)
10
+ class BoxAnnotation():
11
+ """Coordinates according to COCO format. x,y is the top left corner of the box.
12
+ x increases to the right, y increases downwards.
13
+ """
14
+ category_name: str = field(metadata={'description': 'Category name'})
15
+ x: int = field(metadata={'description': 'X coordinate (left to right)'})
16
+ y: int = field(metadata={'description': 'Y coordinate (top to bottom)'})
17
+ width: int = field(metadata={'description': 'Width'})
18
+ height: int = field(metadata={'description': 'Height'})
19
+ category_id: Optional[str] = field(default=None, metadata={'description': 'Category UUID'})
20
+
21
+ def __str__(self):
22
+ return f'x:{int(self.x)} y: {int(self.y)}, w: {int(self.width)} h: {int(self.height)} -> {self.category_name}'
23
+
24
+
25
+ @dataclass(**KWONLY_SLOTS)
26
+ class PointAnnotation():
27
+ """Coordinates according to COCO format. x,y is the center of the point.
28
+ x increases to the right, y increases downwards."""
29
+ category_name: str = field(metadata={'description': 'Category name'})
30
+ x: float = field(metadata={'description': 'X coordinate (right)'})
31
+ y: float = field(metadata={'description': 'Y coordinate (down)'})
32
+ category_id: Optional[str] = field(default=None, metadata={'description': 'Category UUID'})
33
+
34
+ def __str__(self):
35
+ return f'x:{int(self.x)} y: {int(self.y)}, -> {self.category_name}'
36
+
37
+
38
+ @dataclass(**KWONLY_SLOTS)
39
+ class ClassificationAnnotation():
40
+ category_name: str = field(metadata={'description': 'Category name'})
41
+ category_id: Optional[str] = field(default=None, metadata={'description': 'Category UUID'})
42
+
43
+ def __str__(self):
44
+ return f'-> {self.category_name}'
@@ -9,10 +9,6 @@ import numpy as np
9
9
  KWONLY_SLOTS = {'kw_only': True, 'slots': True} if sys.version_info >= (3, 10) else {}
10
10
 
11
11
 
12
- def current_datetime():
13
- return datetime.now().isoformat(sep='_', timespec='milliseconds')
14
-
15
-
16
12
  @dataclass(**KWONLY_SLOTS)
17
13
  class BoxDetection():
18
14
  """Coordinates according to COCO format. x,y is the top left corner of the box.
@@ -25,7 +21,7 @@ class BoxDetection():
25
21
  height: int = field(metadata={'description': 'Height'})
26
22
  model_name: str = field(metadata={'description': 'Model name'})
27
23
  confidence: float = field(metadata={'description': 'Confidence'})
28
- category_id: Optional[str] = field(default=None, metadata={'description': 'Category ID'})
24
+ category_id: Optional[str] = field(default=None, metadata={'description': 'Category UUID'})
29
25
 
30
26
  def intersection_over_union(self, other_detection: 'BoxDetection') -> float:
31
27
  # https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/
@@ -59,7 +55,7 @@ class PointDetection():
59
55
  y: float = field(metadata={'description': 'Y coordinate (down)'})
60
56
  model_name: str = field(metadata={'description': 'Model name'})
61
57
  confidence: float = field(metadata={'description': 'Confidence'})
62
- category_id: Optional[str] = field(default=None, metadata={'description': 'Category ID'})
58
+ category_id: Optional[str] = field(default=None, metadata={'description': 'Category UUID'})
63
59
 
64
60
  def distance(self, other: 'PointDetection') -> float:
65
61
  return np.sqrt((other.x - self.x)**2 + (other.y - self.y)**2)
@@ -73,7 +69,7 @@ class ClassificationDetection():
73
69
  category_name: str = field(metadata={'description': 'Category name'})
74
70
  model_name: str = field(metadata={'description': 'Model name'})
75
71
  confidence: float = field(metadata={'description': 'Confidence'})
76
- category_id: Optional[str] = field(default=None, metadata={'description': 'Category ID'})
72
+ category_id: Optional[str] = field(default=None, metadata={'description': 'Category UUID'})
77
73
 
78
74
  def __str__(self):
79
75
  return f'c: {self.confidence:.2f} -> {self.category_name}'
@@ -4,7 +4,13 @@ from dataclasses import dataclass, field
4
4
  from datetime import datetime
5
5
  from typing import List, Optional
6
6
 
7
- from .detections import BoxDetection, ClassificationDetection, PointDetection, SegmentationDetection
7
+ from .annotations import BoxAnnotation, ClassificationAnnotation, PointAnnotation
8
+ from .detections import (
9
+ BoxDetection,
10
+ ClassificationDetection,
11
+ PointDetection,
12
+ SegmentationDetection,
13
+ )
8
14
 
9
15
  # pylint: disable=too-many-instance-attributes
10
16
 
@@ -25,6 +31,14 @@ class ImageMetadata():
25
31
  'description': 'List of segmentation detections'})
26
32
  classification_detections: List[ClassificationDetection] = field(default_factory=list, metadata={
27
33
  'description': 'List of classification detections'})
34
+
35
+ box_annotations: List[BoxAnnotation] = field(default_factory=list, metadata={
36
+ 'description': 'List of box annotations'})
37
+ point_annotations: List[PointAnnotation] = field(default_factory=list, metadata={
38
+ 'description': 'List of point annotations'})
39
+ classification_annotation: Optional[ClassificationAnnotation] = field(default=None, metadata={
40
+ 'description': 'Classification annotation'})
41
+
28
42
  tags: List[str] = field(default_factory=list, metadata={
29
43
  'description': 'List of tags'})
30
44
 
@@ -42,18 +42,18 @@ class DetectorLogic():
42
42
  def init(self):
43
43
  """Called when a (new) model was loaded. Initialize the model. Model information available via `self.model_info`"""
44
44
 
45
- def evaluate_with_all_info(self, image: bytes, tags: List[str], source: Optional[str] = None, creation_date: Optional[str] = None) -> ImageMetadata: # pylint: disable=unused-argument
46
- """Called by the detector node when an image should be evaluated (REST or SocketIO).
47
- Tags, source come from the caller and may be used in this function.
48
- By default, this function simply calls `evaluate`"""
49
- return self.evaluate(image)
50
-
51
45
  @abstractmethod
52
- def evaluate(self, image: bytes) -> ImageMetadata:
46
+ def evaluate(self, image: bytes) -> ImageMetadata: # pylint: disable=unused-argument
53
47
  """Evaluate the image and return the detections.
54
- The object should return empty detections if it is not initialized"""
48
+
49
+ Called by the detector node when an image should be evaluated (REST or SocketIO).
50
+ The resulting detections should be stored in the ImageMetadata.
51
+ Tags stored in the ImageMetadata will be uploaded to the learning loop.
52
+ The function should return empty metadata if the detector is not initialized."""
55
53
 
56
54
  @abstractmethod
57
55
  def batch_evaluate(self, images: List[bytes]) -> ImagesMetadata:
58
56
  """Evaluate a batch of images and return the detections.
59
- The object should return empty detections if it is not initialized"""
57
+ The resulting detections per image should be stored in the ImagesMetadata.
58
+ Tags stored in the ImagesMetadata will be uploaded to the learning loop.
59
+ The function should return empty metadata if the detector is not initialized."""
@@ -8,6 +8,11 @@ from dataclasses import asdict
8
8
  from datetime import datetime
9
9
  from typing import Dict, List, Optional
10
10
 
11
+ try:
12
+ from typing import Literal
13
+ except ImportError: # Python <= 3.8
14
+ from typing_extensions import Literal # type: ignore
15
+
11
16
  import socketio
12
17
  from dacite import from_dict
13
18
  from fastapi.encoders import jsonable_encoder
@@ -223,10 +228,10 @@ class DetectorNode(Node):
223
228
  try:
224
229
  det = await self.get_detections(
225
230
  raw_image=data['image'],
226
- camera_id=data.get('camera-id', None) or data.get('mac', None),
231
+ camera_id=data.get('camera_id', None),
227
232
  tags=data.get('tags', []),
228
233
  source=data.get('source', None),
229
- autoupload=data.get('autoupload', None),
234
+ autoupload=data.get('autoupload', 'filtered'),
230
235
  creation_date=data.get('creation_date', None)
231
236
  )
232
237
  if det is None:
@@ -245,9 +250,9 @@ class DetectorNode(Node):
245
250
  det = await self.get_batch_detections(
246
251
  raw_images=data['images'],
247
252
  tags=data.get('tags', []),
248
- camera_id=data.get('camera-id', None) or data.get('mac', None),
253
+ camera_id=data.get('camera_id', None),
249
254
  source=data.get('source', None),
250
- autoupload=data.get('autoupload', None),
255
+ autoupload=data.get('autoupload', 'filtered'),
251
256
  creation_date=data.get('creation_date', None)
252
257
  )
253
258
  if det is None:
@@ -296,27 +301,30 @@ class DetectorNode(Node):
296
301
 
297
302
  @self.sio.event
298
303
  async def upload(sid, data: Dict) -> Dict:
299
- """Upload an image with detections"""
304
+ """Upload a single image with metadata to the learning loop.
300
305
 
306
+ The data dict must contain:
307
+ - image: The image bytes to upload
308
+ - metadata: The metadata for the image (optional)
309
+ """
301
310
  self.log.debug('Processing upload via socketio.')
302
- detection_data = data.get('detections', {})
303
- if detection_data and self.detector_logic.model_info is not None:
311
+
312
+ metadata = data.get('metadata', None)
313
+ if metadata:
304
314
  try:
305
- image_metadata = from_dict(data_class=ImageMetadata, data=detection_data)
315
+ image_metadata = from_dict(data_class=ImageMetadata, data=metadata)
306
316
  except Exception as e:
307
317
  self.log.exception('could not parse detections')
308
318
  return {'error': str(e)}
309
- image_metadata = self.add_category_id_to_detections(self.detector_logic.model_info, image_metadata)
319
+ if self.detector_logic.model_info is not None:
320
+ image_metadata = self.add_category_id_to_detections(self.detector_logic.model_info, image_metadata)
310
321
  else:
311
322
  image_metadata = ImageMetadata()
312
323
 
313
324
  try:
314
325
  await self.upload_images(
315
326
  images=[data['image']],
316
- image_metadata=image_metadata,
317
- tags=data.get('tags', []),
318
- source=data.get('source', None),
319
- creation_date=data.get('creation_date', None),
327
+ images_metadata=ImagesMetadata(items=[image_metadata]) if metadata else None,
320
328
  upload_priority=data.get('upload_priority', False)
321
329
  )
322
330
  except Exception as e:
@@ -506,34 +514,34 @@ class DetectorNode(Node):
506
514
  *,
507
515
  camera_id: Optional[str] = None,
508
516
  source: Optional[str] = None,
509
- autoupload: Optional[str] = None,
517
+ autoupload: Literal['filtered', 'all', 'disabled'],
510
518
  creation_date: Optional[str] = None) -> ImageMetadata:
511
519
  """ Main processing function for the detector node when an image is received via REST or SocketIO.
512
520
  This function infers the detections from the image, cares about uploading to the loop and returns the detections as ImageMetadata object.
513
521
  Note: raw_image is a numpy array of type uint8, but not in the correct shape!
514
- It can be converted e.g. using cv2.imdecode(raw_image, cv2.IMREAD_COLOR)"""
522
+ It can be converted e.g. using cv2.imdecode(np.frombuffer(image, np.uint8), cv2.IMREAD_COLOR)"""
515
523
 
516
524
  await self.detection_lock.acquire()
517
- detections = await run.io_bound(self.detector_logic.evaluate_with_all_info, raw_image, tags, source, creation_date)
525
+ metadata = await run.io_bound(self.detector_logic.evaluate, raw_image)
526
+ metadata.tags.extend(tags)
527
+ metadata.source = source
528
+ metadata.created = creation_date
518
529
  self.detection_lock.release()
519
530
 
520
- fix_shape_detections(detections)
521
- n_bo, n_cl = len(detections.box_detections), len(detections.classification_detections)
522
- n_po, n_se = len(detections.point_detections), len(detections.segmentation_detections)
531
+ fix_shape_detections(metadata)
532
+ n_bo, n_cl = len(metadata.box_detections), len(metadata.classification_detections)
533
+ n_po, n_se = len(metadata.point_detections), len(metadata.segmentation_detections)
523
534
  self.log.debug('Detected: %d boxes, %d points, %d segs, %d classes', n_bo, n_po, n_se, n_cl)
524
535
 
525
- autoupload = autoupload or 'filtered'
526
- if autoupload == 'filtered' and camera_id is not None:
527
- background_tasks.create(self.relevance_filter.may_upload_detections(
528
- detections, camera_id, raw_image, tags, source, creation_date
529
- ))
536
+ if autoupload == 'filtered':
537
+ background_tasks.create(self.relevance_filter.may_upload_detections(metadata, camera_id, raw_image))
530
538
  elif autoupload == 'all':
531
- background_tasks.create(self.outbox.save(raw_image, detections, tags, source, creation_date))
539
+ background_tasks.create(self.outbox.save(raw_image, metadata))
532
540
  elif autoupload == 'disabled':
533
541
  pass
534
542
  else:
535
543
  self.log.error('unknown autoupload value %s', autoupload)
536
- return detections
544
+ return metadata
537
545
 
538
546
  async def get_batch_detections(self,
539
547
  raw_images: List[bytes],
@@ -541,7 +549,7 @@ class DetectorNode(Node):
541
549
  *,
542
550
  camera_id: Optional[str] = None,
543
551
  source: Optional[str] = None,
544
- autoupload: Optional[str] = None,
552
+ autoupload: str = 'filtered',
545
553
  creation_date: Optional[str] = None) -> ImagesMetadata:
546
554
  """ Processing function for the detector node when a a batch inference is requested via SocketIO.
547
555
  This function infers the detections from all images, cares about uploading to the loop and returns the detections as a list of ImageMetadata."""
@@ -556,13 +564,10 @@ class DetectorNode(Node):
556
564
  n_po, n_se = len(detections.point_detections), len(detections.segmentation_detections)
557
565
  self.log.debug('Detected: %d boxes, %d points, %d segs, %d classes', n_bo, n_po, n_se, n_cl)
558
566
 
559
- autoupload = autoupload or 'filtered'
560
- if autoupload == 'filtered' and camera_id is not None:
561
- background_tasks.create(self.relevance_filter.may_upload_detections(
562
- detections, camera_id, raw_image, tags, source, creation_date
563
- ))
567
+ if autoupload == 'filtered':
568
+ background_tasks.create(self.relevance_filter.may_upload_detections(detections, camera_id, raw_image))
564
569
  elif autoupload == 'all':
565
- background_tasks.create(self.outbox.save(raw_image, detections, tags, source, creation_date))
570
+ background_tasks.create(self.outbox.save(raw_image, detections))
566
571
  elif autoupload == 'disabled':
567
572
  pass
568
573
  else:
@@ -572,24 +577,25 @@ class DetectorNode(Node):
572
577
  async def upload_images(
573
578
  self, *,
574
579
  images: List[bytes],
575
- image_metadata: Optional[ImageMetadata] = None,
576
- tags: Optional[List[str]] = None,
577
- source: Optional[str],
578
- creation_date: Optional[str],
580
+ images_metadata: Optional[ImagesMetadata] = None,
579
581
  upload_priority: bool = False
580
582
  ) -> None:
581
583
  """Save images to the outbox using an asyncio executor.
582
- Used by SIO and REST upload endpoints."""
584
+ Used by SIO and REST upload endpoints.
583
585
 
584
- if image_metadata is None:
585
- image_metadata = ImageMetadata()
586
- if tags is None:
587
- tags = []
586
+ :param images: List of images to upload
587
+ :param images_metadata: Optional metadata for all images
588
+ :param upload_priority: Whether to upload the images with priority
589
+ :raises ValueError: If the number of images and number of metadata items do not match
590
+ """
588
591
 
589
- tags.append('picked_by_system')
592
+ if images_metadata and len(images_metadata.items) != len(images):
593
+ raise ValueError('Number of images and number of metadata items do not match')
590
594
 
591
- for image in images:
592
- await self.outbox.save(image, image_metadata, tags, source, creation_date, upload_priority)
595
+ for i, image in enumerate(images):
596
+ image_metadata = images_metadata.items[i] if images_metadata else ImageMetadata()
597
+ image_metadata.tags.append('picked_by_system')
598
+ await self.outbox.save(image, image_metadata, upload_priority)
593
599
 
594
600
  def add_category_id_to_detections(self, model_info: ModelInformation, image_metadata: ImageMetadata):
595
601
  def find_category_id_by_name(categories: List[Category], category_name: str):
@@ -628,9 +634,9 @@ def step_into(new_dir):
628
634
  os.chdir(previous_dir)
629
635
 
630
636
 
631
- def fix_shape_detections(detections: ImageMetadata):
637
+ def fix_shape_detections(metadata: ImageMetadata):
632
638
  # TODO This is a quick fix.. check how loop upload detections deals with this
633
- for seg_detection in detections.segmentation_detections:
639
+ for seg_detection in metadata.segmentation_detections:
634
640
  if isinstance(seg_detection.shape, Shape):
635
641
  points = ','.join([str(value) for p in seg_detection.shape.points for _,
636
642
  value in asdict(p).items()])
@@ -9,28 +9,30 @@ class RelevanceFilter():
9
9
 
10
10
  def __init__(self, outbox: Outbox) -> None:
11
11
  self.cam_histories: Dict[str, CamObservationHistory] = {}
12
+ self.unknown_cam_history: CamObservationHistory = CamObservationHistory()
12
13
  self.outbox: Outbox = outbox
13
14
 
14
15
  async def may_upload_detections(self,
15
16
  image_metadata: ImageMetadata,
16
- cam_id: str,
17
- raw_image: bytes,
18
- tags: List[str],
19
- source: Optional[str] = None,
20
- creation_date: Optional[str] = None) -> List[str]:
17
+ cam_id: Optional[str],
18
+ raw_image: bytes) -> List[str]:
21
19
  """Check if the detection should be uploaded to the outbox.
22
20
  If so, upload it and return the list of causes for the upload.
23
21
  """
24
22
  for group in self.cam_histories.values():
25
23
  group.forget_old_detections()
26
24
 
27
- if cam_id not in self.cam_histories:
28
- self.cam_histories[cam_id] = CamObservationHistory()
29
- causes = self.cam_histories[cam_id].get_causes_to_upload(image_metadata)
25
+ if cam_id is None:
26
+ history = self.unknown_cam_history
27
+ else:
28
+ if cam_id not in self.cam_histories:
29
+ self.cam_histories[cam_id] = CamObservationHistory()
30
+ history = self.cam_histories[cam_id]
31
+
32
+ causes = history.get_causes_to_upload(image_metadata)
30
33
  if len(image_metadata) >= 80:
31
34
  causes.append('unexpected_observations_count')
32
35
  if len(causes) > 0:
33
- tags = tags if tags is not None else []
34
- tags.extend(causes)
35
- await self.outbox.save(raw_image, image_metadata, tags, source, creation_date)
36
+ image_metadata.tags.extend(causes)
37
+ await self.outbox.save(raw_image, image_metadata)
36
38
  return causes
@@ -78,9 +78,6 @@ class Outbox():
78
78
  async def save(self,
79
79
  image: bytes,
80
80
  image_metadata: Optional[ImageMetadata] = None,
81
- tags: Optional[List[str]] = None,
82
- source: Optional[str] = None,
83
- creation_date: Optional[str] = None,
84
81
  upload_priority: bool = False) -> None:
85
82
 
86
83
  if not await run.io_bound(self._is_valid_jpg, image):
@@ -89,12 +86,11 @@ class Outbox():
89
86
 
90
87
  if image_metadata is None:
91
88
  image_metadata = ImageMetadata()
92
- if not tags:
93
- tags = []
89
+
94
90
  identifier = datetime.now().isoformat(sep='_', timespec='microseconds')
95
91
 
96
92
  try:
97
- await run.io_bound(self._save_files_to_disk, identifier, image, image_metadata, tags, source, creation_date, upload_priority)
93
+ await run.io_bound(self._save_files_to_disk, identifier, image, image_metadata, upload_priority)
98
94
  except Exception as e:
99
95
  self.log.error('Failed to save files for image %s: %s', identifier, e)
100
96
  return
@@ -110,9 +106,6 @@ class Outbox():
110
106
  identifier: str,
111
107
  image: bytes,
112
108
  image_metadata: ImageMetadata,
113
- tags: List[str],
114
- source: Optional[str],
115
- creation_date: Optional[str],
116
109
  upload_priority: bool) -> None:
117
110
  subpath = 'priority' if upload_priority else 'normal'
118
111
  full_path = f'{self.path}/{subpath}/{identifier}'
@@ -120,14 +113,6 @@ class Outbox():
120
113
  raise FileExistsError(f'Directory with identifier {identifier} already exists')
121
114
 
122
115
  tmp = f'{GLOBALS.data_folder}/tmp/{identifier}'
123
- image_metadata.tags = tags
124
- if self._is_valid_isoformat(creation_date):
125
- image_metadata.created = creation_date
126
- else:
127
- image_metadata.created = identifier
128
-
129
- image_metadata.source = source or 'unknown'
130
-
131
116
  os.makedirs(tmp, exist_ok=True)
132
117
 
133
118
  with open(tmp + f'/image_{identifier}.json', 'w') as f:
@@ -139,6 +124,7 @@ class Outbox():
139
124
  if not os.path.exists(tmp):
140
125
  self.log.error('Could not rename %s to %s', tmp, full_path)
141
126
  raise FileNotFoundError(f'Could not rename {tmp} to {full_path}')
127
+
142
128
  os.rename(tmp, full_path)
143
129
 
144
130
  async def _trim_upload_queue(self) -> None: