learning-loop-node 0.17.1__tar.gz → 0.18.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of learning-loop-node might be problematic. Click here for more details.

Files changed (105) hide show
  1. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/PKG-INFO +25 -9
  2. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/README.md +23 -7
  3. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/detector/detector_logic.py +5 -2
  4. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/detector/detector_node.py +74 -22
  5. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/detector/inbox_filter/cam_observation_history.py +13 -4
  6. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/detector/inbox_filter/relevance_filter.py +4 -2
  7. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/detector/outbox.py +12 -7
  8. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/detector/rest/detect.py +10 -9
  9. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/detector/rest/upload.py +8 -4
  10. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/helpers/background_tasks.py +4 -4
  11. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/helpers/misc.py +34 -1
  12. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/annotator/conftest.py +1 -1
  13. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/conftest.py +2 -2
  14. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/inbox_filter/test_unexpected_observations_count.py +2 -1
  15. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/test_client_communication.py +6 -6
  16. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/test_detector_node.py +3 -3
  17. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/test_outbox.py +8 -30
  18. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/test_relevance_filter.py +4 -1
  19. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/testing_detector.py +4 -2
  20. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/pyproject.toml +2 -2
  21. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/__init__.py +0 -0
  22. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/annotation/__init__.py +0 -0
  23. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/annotation/annotator_logic.py +0 -0
  24. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/annotation/annotator_node.py +0 -0
  25. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/data_classes/__init__.py +0 -0
  26. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/data_classes/annotation_data.py +0 -0
  27. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/data_classes/annotations.py +0 -0
  28. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/data_classes/detections.py +0 -0
  29. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/data_classes/general.py +0 -0
  30. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/data_classes/image_metadata.py +0 -0
  31. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/data_classes/socket_response.py +0 -0
  32. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/data_classes/training.py +0 -0
  33. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/data_exchanger.py +0 -0
  34. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/detector/__init__.py +0 -0
  35. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/detector/exceptions.py +0 -0
  36. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/detector/inbox_filter/__init__.py +0 -0
  37. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/detector/rest/__init__.py +0 -0
  38. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/detector/rest/about.py +0 -0
  39. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/detector/rest/backdoor_controls.py +0 -0
  40. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/detector/rest/model_version_control.py +0 -0
  41. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/detector/rest/operation_mode.py +0 -0
  42. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/detector/rest/outbox_mode.py +0 -0
  43. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/enums/__init__.py +0 -0
  44. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/enums/annotator.py +0 -0
  45. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/enums/detector.py +0 -0
  46. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/enums/general.py +0 -0
  47. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/enums/trainer.py +0 -0
  48. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/examples/novelty_score_updater.py +0 -0
  49. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/globals.py +0 -0
  50. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/helpers/__init__.py +0 -0
  51. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/helpers/environment_reader.py +0 -0
  52. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/helpers/gdrive_downloader.py +0 -0
  53. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/helpers/log_conf.py +0 -0
  54. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/helpers/run.py +0 -0
  55. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/loop_communication.py +0 -0
  56. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/node.py +0 -0
  57. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/py.typed +0 -0
  58. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/rest.py +0 -0
  59. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/__init__.py +0 -0
  60. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/annotator/__init__.py +0 -0
  61. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/annotator/pytest.ini +0 -0
  62. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/annotator/test_annotator_node.py +0 -0
  63. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/__init__.py +0 -0
  64. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/inbox_filter/__init__.py +0 -0
  65. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/inbox_filter/test_observation.py +0 -0
  66. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/inbox_filter/test_relevance_group.py +0 -0
  67. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/pytest.ini +0 -0
  68. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/detector/test.jpg +0 -0
  69. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/__init__.py +0 -0
  70. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/conftest.py +0 -0
  71. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/pytest.ini +0 -0
  72. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/test_data/file_1.txt +0 -0
  73. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/test_data/file_2.txt +0 -0
  74. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/test_data/model.json +0 -0
  75. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/test_data_classes.py +0 -0
  76. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/test_downloader.py +0 -0
  77. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/general/test_learning_loop_node.py +0 -0
  78. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/test_helper.py +0 -0
  79. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/__init__.py +0 -0
  80. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/conftest.py +0 -0
  81. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/pytest.ini +0 -0
  82. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/state_helper.py +0 -0
  83. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/states/__init__.py +0 -0
  84. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/states/test_state_cleanup.py +0 -0
  85. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/states/test_state_detecting.py +0 -0
  86. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/states/test_state_download_train_model.py +0 -0
  87. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/states/test_state_prepare.py +0 -0
  88. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/states/test_state_sync_confusion_matrix.py +0 -0
  89. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/states/test_state_train.py +0 -0
  90. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/states/test_state_upload_detections.py +0 -0
  91. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/states/test_state_upload_model.py +0 -0
  92. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/test_errors.py +0 -0
  93. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/test_trainer_states.py +0 -0
  94. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/tests/trainer/testing_trainer_logic.py +0 -0
  95. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/trainer/__init__.py +0 -0
  96. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/trainer/downloader.py +0 -0
  97. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/trainer/exceptions.py +0 -0
  98. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/trainer/executor.py +0 -0
  99. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/trainer/io_helpers.py +0 -0
  100. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/trainer/rest/__init__.py +0 -0
  101. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/trainer/rest/backdoor_controls.py +0 -0
  102. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/trainer/test_executor.py +0 -0
  103. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/trainer/trainer_logic.py +0 -0
  104. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/trainer/trainer_logic_generic.py +0 -0
  105. {learning_loop_node-0.17.1 → learning_loop_node-0.18.0}/learning_loop_node/trainer/trainer_node.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: learning-loop-node
3
- Version: 0.17.1
3
+ Version: 0.18.0
4
4
  Summary: Python Library for Nodes which connect to the Zauberzeug Learning Loop
5
5
  Home-page: https://github.com/zauberzeug/learning_loop_node
6
6
  License: MIT
@@ -21,7 +21,7 @@ Requires-Dist: dacite (>=1.8.1,<2.0.0)
21
21
  Requires-Dist: fastapi (>=0.93,<0.109)
22
22
  Requires-Dist: fastapi-socketio (>=0.0.10,<0.0.11)
23
23
  Requires-Dist: fastapi-utils (>=0.2.1,<0.3.0)
24
- Requires-Dist: httpx (>=0.24.1,<0.25.0)
24
+ Requires-Dist: httpx (>=0.28.1,<0.29.0)
25
25
  Requires-Dist: icecream (>=2.1.0,<3.0.0)
26
26
  Requires-Dist: numpy (>=1.13.3,<2.0.0)
27
27
  Requires-Dist: psutil (>=5.8.0,<6.0.0)
@@ -90,9 +90,8 @@ Detector Nodes are normally deployed on edge devices like robots or machinery bu
90
90
  Images can be send to the detector node via socketio or rest.
91
91
  Via **REST** you may provide the following parameters:
92
92
 
93
- - `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
94
93
  - `camera_id`: a camera identifier (string) used to improve the autoupload filtering
95
- - `tags`: comma separated list of tags to add to the image in the learning loop
94
+ - `tags`: comma separated list of tags to add to the image in the learning loop to add to the image in the learning loop
96
95
  - `source`: optional source identifier (str) for the image (e.g. a robot id)
97
96
  - `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
98
97
  - `creation_date`: optional creation date (str) for the image in isoformat (e.g. `2023-01-30T12:34:56`)
@@ -101,7 +100,22 @@ Example usage:
101
100
 
102
101
  `curl --request POST -F 'file=@test.jpg' -H 'autoupload: all' -H 'camera_id: front_cam' localhost:8004/detect`
103
102
 
104
- To use the **SocketIO** inference EPs, the caller needs to connect to the detector node's SocketIO server and emit the `detect` or `batch_detect` event with the image data and image metadata.
103
+ To use the **SocketIO** inference EPs, the caller needs to connect to the detector node's SocketIO server and emit the `detect` or `batch_detect` event with the image data and image metadata. The `detect` endpoint receives a dictionary, with the following entries:
104
+
105
+ - `image`: The image data as dictionary with the following keys:
106
+ - `bytes`: bytes of the ndarray (retrieved via `ndarray.tobytes(order='C')`)
107
+ - `dtype`: data type of the ndarray as string (e.g. `uint8`, `float32`, etc.)
108
+ - `shape`: shape of the ndarray as tuple of ints (e.g. `(480, 640, 3)`)
109
+ - `camera_id`: optional camera identifier (string) used to improve the autoupload filtering
110
+ - `tags`: optional list of tags to add to the image in the learning loop
111
+ - `source`: optional source string
112
+ - `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
113
+ - `creation_date`: optional creation date (str) for the image in isoformat (e.g. `2023-01-30T12:34:56`)
114
+
115
+ The `batch_detect` endpoint receives a dictionary, with the same entries as the `detect` endpoint, except that the `image` entry is replaced by:
116
+
117
+ - `images`: List of image data dictionaries, each with the same structure as the `image` entry in the `detect` endpoint
118
+
105
119
  Example code can be found [in the rosys implementation](https://github.com/zauberzeug/rosys/blob/main/rosys/vision/detector_hardware.py).
106
120
 
107
121
  ### Upload API
@@ -116,12 +130,14 @@ Example:
116
130
 
117
131
  `curl -X POST -F 'files=@test.jpg' "http://localhost:/upload"`
118
132
 
119
- The detector also has a **SocketIO** upload endpoint that can be used to upload images and detections to the learning loop. The function receives a json dictionary, with the following entries:
120
-
121
- - `image`: the image data in jpg format
133
+ The detector also has a **SocketIO** upload endpoint that can be used to upload images and detections to the learning loop. The function receives a dictionary, with the following entries:
122
134
 
135
+ - `image`: the image data as dictionary with the following keys:
136
+ - `bytes`: bytes of the ndarray (retrieved via `ndarray.tobytes(order='C')`)
137
+ - `dtype`: data type of the ndarray as string (e.g. `uint8`, `float32`, etc.)
138
+ - `shape`: shape of the ndarray as tuple of ints (e.g. `(480, 640, 3)`)
123
139
  - `metadata`: a dictionary representing the image metadata. If metadata contains detections and/or annotations, UUIDs for the classes are automatically determined based on the category names. Metadata should follow the schema of the `ImageMetadata` data class.
124
- - `upload_priority`: boolean flag to prioritize the upload (defaults to False)
140
+ - `upload_priority`: Optional boolean flag to prioritize the upload (defaults to False)
125
141
 
126
142
  The endpoint returns None if the upload was successful and an error message otherwise.
127
143
 
@@ -50,9 +50,8 @@ Detector Nodes are normally deployed on edge devices like robots or machinery bu
50
50
  Images can be send to the detector node via socketio or rest.
51
51
  Via **REST** you may provide the following parameters:
52
52
 
53
- - `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
54
53
  - `camera_id`: a camera identifier (string) used to improve the autoupload filtering
55
- - `tags`: comma separated list of tags to add to the image in the learning loop
54
+ - `tags`: comma separated list of tags to add to the image in the learning loop to add to the image in the learning loop
56
55
  - `source`: optional source identifier (str) for the image (e.g. a robot id)
57
56
  - `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
58
57
  - `creation_date`: optional creation date (str) for the image in isoformat (e.g. `2023-01-30T12:34:56`)
@@ -61,7 +60,22 @@ Example usage:
61
60
 
62
61
  `curl --request POST -F 'file=@test.jpg' -H 'autoupload: all' -H 'camera_id: front_cam' localhost:8004/detect`
63
62
 
64
- To use the **SocketIO** inference EPs, the caller needs to connect to the detector node's SocketIO server and emit the `detect` or `batch_detect` event with the image data and image metadata.
63
+ To use the **SocketIO** inference EPs, the caller needs to connect to the detector node's SocketIO server and emit the `detect` or `batch_detect` event with the image data and image metadata. The `detect` endpoint receives a dictionary, with the following entries:
64
+
65
+ - `image`: The image data as dictionary with the following keys:
66
+ - `bytes`: bytes of the ndarray (retrieved via `ndarray.tobytes(order='C')`)
67
+ - `dtype`: data type of the ndarray as string (e.g. `uint8`, `float32`, etc.)
68
+ - `shape`: shape of the ndarray as tuple of ints (e.g. `(480, 640, 3)`)
69
+ - `camera_id`: optional camera identifier (string) used to improve the autoupload filtering
70
+ - `tags`: optional list of tags to add to the image in the learning loop
71
+ - `source`: optional source string
72
+ - `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
73
+ - `creation_date`: optional creation date (str) for the image in isoformat (e.g. `2023-01-30T12:34:56`)
74
+
75
+ The `batch_detect` endpoint receives a dictionary, with the same entries as the `detect` endpoint, except that the `image` entry is replaced by:
76
+
77
+ - `images`: List of image data dictionaries, each with the same structure as the `image` entry in the `detect` endpoint
78
+
65
79
  Example code can be found [in the rosys implementation](https://github.com/zauberzeug/rosys/blob/main/rosys/vision/detector_hardware.py).
66
80
 
67
81
  ### Upload API
@@ -76,12 +90,14 @@ Example:
76
90
 
77
91
  `curl -X POST -F 'files=@test.jpg' "http://localhost:/upload"`
78
92
 
79
- The detector also has a **SocketIO** upload endpoint that can be used to upload images and detections to the learning loop. The function receives a json dictionary, with the following entries:
80
-
81
- - `image`: the image data in jpg format
93
+ The detector also has a **SocketIO** upload endpoint that can be used to upload images and detections to the learning loop. The function receives a dictionary, with the following entries:
82
94
 
95
+ - `image`: the image data as dictionary with the following keys:
96
+ - `bytes`: bytes of the ndarray (retrieved via `ndarray.tobytes(order='C')`)
97
+ - `dtype`: data type of the ndarray as string (e.g. `uint8`, `float32`, etc.)
98
+ - `shape`: shape of the ndarray as tuple of ints (e.g. `(480, 640, 3)`)
83
99
  - `metadata`: a dictionary representing the image metadata. If metadata contains detections and/or annotations, UUIDs for the classes are automatically determined based on the category names. Metadata should follow the schema of the `ImageMetadata` data class.
84
- - `upload_priority`: boolean flag to prioritize the upload (defaults to False)
100
+ - `upload_priority`: Optional boolean flag to prioritize the upload (defaults to False)
85
101
 
86
102
  The endpoint returns None if the upload was successful and an error message otherwise.
87
103
 
@@ -2,6 +2,8 @@ import logging
2
2
  from abc import abstractmethod
3
3
  from typing import List, Optional
4
4
 
5
+ import numpy as np
6
+
5
7
  from ..data_classes import ImageMetadata, ImagesMetadata, ModelInformation
6
8
  from ..globals import GLOBALS
7
9
  from .exceptions import NodeNeedsRestartError
@@ -43,7 +45,7 @@ class DetectorLogic():
43
45
  """Called when a (new) model was loaded. Initialize the model. Model information available via `self.model_info`"""
44
46
 
45
47
  @abstractmethod
46
- def evaluate(self, image: bytes) -> ImageMetadata: # pylint: disable=unused-argument
48
+ def evaluate(self, image: np.ndarray) -> ImageMetadata:
47
49
  """Evaluate the image and return the detections.
48
50
 
49
51
  Called by the detector node when an image should be evaluated (REST or SocketIO).
@@ -52,8 +54,9 @@ class DetectorLogic():
52
54
  The function should return empty metadata if the detector is not initialized."""
53
55
 
54
56
  @abstractmethod
55
- def batch_evaluate(self, images: List[bytes]) -> ImagesMetadata:
57
+ def batch_evaluate(self, images: List[np.ndarray]) -> ImagesMetadata:
56
58
  """Evaluate a batch of images and return the detections.
59
+
57
60
  The resulting detections per image should be stored in the ImagesMetadata.
58
61
  Tags stored in the ImagesMetadata will be uploaded to the learning loop.
59
62
  The function should return empty metadata if the detector is not initialized."""
@@ -8,6 +8,8 @@ from dataclasses import asdict
8
8
  from datetime import datetime
9
9
  from typing import Dict, List, Optional
10
10
 
11
+ import numpy as np
12
+
11
13
  try:
12
14
  from typing import Literal
13
15
  except ImportError: # Python <= 3.8
@@ -33,6 +35,7 @@ from ..data_exchanger import DataExchanger, DownloadError
33
35
  from ..enums import OperationMode, VersionMode
34
36
  from ..globals import GLOBALS
35
37
  from ..helpers import background_tasks, environment_reader, run
38
+ from ..helpers.misc import numpy_image_from_dict
36
39
  from ..node import Node
37
40
  from .detector_logic import DetectorLogic
38
41
  from .exceptions import NodeNeedsRestartError
@@ -225,9 +228,28 @@ class DetectorNode(Node):
225
228
 
226
229
  @self.sio.event
227
230
  async def detect(sid, data: Dict) -> Dict:
231
+ """Detect objects in a single image sent via SocketIO.
232
+
233
+ The data dict has the following schema:
234
+ - image: The image data as dictionary:
235
+ - bytes: bytes of the ndarray
236
+ - dtype: data type of the ndarray
237
+ - shape: shape of the ndarray
238
+ - camera_id: Optional camera ID
239
+ - tags: Optional list of tags
240
+ - source: Optional source string
241
+ - autoupload: Optional 'filtered', 'all' or 'disabled' (default: 'filtered')
242
+ - creation_date: Optional creation date in isoformat string
243
+ """
244
+ try:
245
+ image = numpy_image_from_dict(data['image'])
246
+ except Exception:
247
+ self.log.exception('could not parse image from socketio')
248
+ return {'error': 'could not parse image from data'}
249
+
228
250
  try:
229
251
  det = await self.get_detections(
230
- raw_image=data['image'],
252
+ image=image,
231
253
  camera_id=data.get('camera_id', None),
232
254
  tags=data.get('tags', []),
233
255
  source=data.get('source', None),
@@ -246,9 +268,22 @@ class DetectorNode(Node):
246
268
 
247
269
  @self.sio.event
248
270
  async def batch_detect(sid, data: Dict) -> Dict:
271
+ """
272
+ Detect objects in a batch of images sent via SocketIO.
273
+
274
+ Data dict follows the schema of the detect endpoint,
275
+ but 'images' is a list of image dicts.
276
+ """
277
+ try:
278
+ images_data = data['images']
279
+ images = [numpy_image_from_dict(image) for image in images_data]
280
+ except Exception:
281
+ self.log.exception('could not parse images from socketio')
282
+ return {'error': 'could not parse images from data'}
283
+
249
284
  try:
250
285
  det = await self.get_batch_detections(
251
- raw_images=data['images'],
286
+ images=images,
252
287
  tags=data.get('tags', []),
253
288
  camera_id=data.get('camera_id', None),
254
289
  source=data.get('source', None),
@@ -304,8 +339,12 @@ class DetectorNode(Node):
304
339
  """Upload a single image with metadata to the learning loop.
305
340
 
306
341
  The data dict must contain:
307
- - image: The image bytes to upload
342
+ - image: The image data as dictionary with the following keys:
343
+ - bytes: bytes of the ndarray (retrieved via `ndarray.tobytes(order='C')`)
344
+ - dtype: data type of the ndarray as string (e.g. `uint8`, `float32`, etc.)
345
+ - shape: shape of the ndarray as tuple of ints (e.g. `(480, 640, 3)`)
308
346
  - metadata: The metadata for the image (optional)
347
+ - upload_priority: Whether to upload with priority (optional)
309
348
  """
310
349
  self.log.debug('Processing upload via socketio.')
311
350
 
@@ -321,9 +360,15 @@ class DetectorNode(Node):
321
360
  else:
322
361
  image_metadata = ImageMetadata()
323
362
 
363
+ try:
364
+ image = numpy_image_from_dict(data['image'])
365
+ except Exception:
366
+ self.log.exception('could not parse image from socketio')
367
+ return {'error': 'could not parse image from data'}
368
+
324
369
  try:
325
370
  await self.upload_images(
326
- images=[data['image']],
371
+ images=[image],
327
372
  images_metadata=ImagesMetadata(items=[image_metadata]) if metadata else None,
328
373
  upload_priority=data.get('upload_priority', False)
329
374
  )
@@ -373,6 +418,7 @@ class DetectorNode(Node):
373
418
  )
374
419
 
375
420
  self.log_status_on_change(status.state, status)
421
+ response = None
376
422
 
377
423
  try:
378
424
  response = await self.loop_communicator.post(
@@ -380,8 +426,8 @@ class DetectorNode(Node):
380
426
  except Exception:
381
427
  self.log.warning('Exception while trying to sync status with loop')
382
428
 
383
- if response.status_code != 200:
384
- self.log.warning('Status update failed: %s', str(response))
429
+ if not response or not response.is_success:
430
+ self.log.warning('Status update failed. Response: "%s"', response)
385
431
 
386
432
  async def _update_model_if_required(self) -> None:
387
433
  """Check if a new model is available and update if necessary.
@@ -509,21 +555,24 @@ class DetectorNode(Node):
509
555
  self.log.error('could not reload app')
510
556
 
511
557
  async def get_detections(self,
512
- raw_image: bytes,
558
+ image: np.ndarray,
513
559
  tags: List[str],
514
560
  *,
515
561
  camera_id: Optional[str] = None,
516
562
  source: Optional[str] = None,
517
563
  autoupload: Literal['filtered', 'all', 'disabled'],
518
564
  creation_date: Optional[str] = None) -> ImageMetadata:
519
- """ Main processing function for the detector node when an image is received via REST or SocketIO.
520
- This function infers the detections from the image, cares about uploading to the loop and returns the detections as ImageMetadata object.
521
- Note: raw_image is a numpy array of type uint8, but not in the correct shape!
522
- It can be converted e.g. using cv2.imdecode(np.frombuffer(image, np.uint8), cv2.IMREAD_COLOR)"""
565
+ """
566
+ Main processing function for the detector node.
567
+
568
+ Used when an image is received via REST or SocketIO.
569
+ This function infers the detections from the image,
570
+ cares about uploading to the loop and returns the detections as ImageMetadata object.
571
+ """
523
572
 
524
573
  await self.detection_lock.acquire()
525
574
  try:
526
- metadata = await run.io_bound(self.detector_logic.evaluate, raw_image)
575
+ metadata = await run.io_bound(self.detector_logic.evaluate, image)
527
576
  finally:
528
577
  self.detection_lock.release()
529
578
 
@@ -537,9 +586,9 @@ class DetectorNode(Node):
537
586
  self.log.debug('Detected: %d boxes, %d points, %d segs, %d classes', n_bo, n_po, n_se, n_cl)
538
587
 
539
588
  if autoupload == 'filtered':
540
- background_tasks.create(self.relevance_filter.may_upload_detections(metadata, camera_id, raw_image))
589
+ background_tasks.create(self.relevance_filter.may_upload_detections(metadata, camera_id, image))
541
590
  elif autoupload == 'all':
542
- background_tasks.create(self.outbox.save(raw_image, metadata))
591
+ background_tasks.create(self.outbox.save(image, metadata))
543
592
  elif autoupload == 'disabled':
544
593
  pass
545
594
  else:
@@ -547,19 +596,22 @@ class DetectorNode(Node):
547
596
  return metadata
548
597
 
549
598
  async def get_batch_detections(self,
550
- raw_images: List[bytes],
599
+ images: List[np.ndarray],
551
600
  tags: List[str],
552
601
  *,
553
602
  camera_id: Optional[str] = None,
554
603
  source: Optional[str] = None,
555
604
  autoupload: str = 'filtered',
556
605
  creation_date: Optional[str] = None) -> ImagesMetadata:
557
- """ Processing function for the detector node when a a batch inference is requested via SocketIO.
558
- This function infers the detections from all images, cares about uploading to the loop and returns the detections as a list of ImageMetadata."""
606
+ """
607
+ Processing function for the detector node when a a batch inference is requested via SocketIO.
608
+
609
+ This function infers the detections from all images, cares about uploading to the loop and returns the detections as a list of ImageMetadata.
610
+ """
559
611
 
560
612
  await self.detection_lock.acquire()
561
613
  try:
562
- all_detections = await run.io_bound(self.detector_logic.batch_evaluate, raw_images)
614
+ all_detections = await run.io_bound(self.detector_logic.batch_evaluate, images)
563
615
  finally:
564
616
  self.detection_lock.release()
565
617
 
@@ -568,16 +620,16 @@ class DetectorNode(Node):
568
620
  metadata.source = source
569
621
  metadata.created = creation_date
570
622
 
571
- for detections, raw_image in zip(all_detections.items, raw_images):
623
+ for detections, image in zip(all_detections.items, images):
572
624
  fix_shape_detections(detections)
573
625
  n_bo, n_cl = len(detections.box_detections), len(detections.classification_detections)
574
626
  n_po, n_se = len(detections.point_detections), len(detections.segmentation_detections)
575
627
  self.log.debug('Detected: %d boxes, %d points, %d segs, %d classes', n_bo, n_po, n_se, n_cl)
576
628
 
577
629
  if autoupload == 'filtered':
578
- background_tasks.create(self.relevance_filter.may_upload_detections(detections, camera_id, raw_image))
630
+ background_tasks.create(self.relevance_filter.may_upload_detections(detections, camera_id, image))
579
631
  elif autoupload == 'all':
580
- background_tasks.create(self.outbox.save(raw_image, detections))
632
+ background_tasks.create(self.outbox.save(image, detections))
581
633
  elif autoupload == 'disabled':
582
634
  pass
583
635
  else:
@@ -586,7 +638,7 @@ class DetectorNode(Node):
586
638
 
587
639
  async def upload_images(
588
640
  self, *,
589
- images: List[bytes],
641
+ images: List[np.ndarray],
590
642
  images_metadata: Optional[ImagesMetadata] = None,
591
643
  upload_priority: bool = False
592
644
  ) -> None:
@@ -1,8 +1,14 @@
1
1
  import os
2
2
  from typing import List, Union
3
3
 
4
- from ...data_classes import (BoxDetection, ClassificationDetection, ImageMetadata, Observation, PointDetection,
5
- SegmentationDetection)
4
+ from ...data_classes import (
5
+ BoxDetection,
6
+ ClassificationDetection,
7
+ ImageMetadata,
8
+ Observation,
9
+ PointDetection,
10
+ SegmentationDetection,
11
+ )
6
12
 
7
13
 
8
14
  class CamObservationHistory:
@@ -10,6 +16,8 @@ class CamObservationHistory:
10
16
  self.reset_time = 3600
11
17
  self.recent_observations: List[Observation] = []
12
18
  self.iou_threshold = 0.5
19
+ self.min_uncertain_threshold = float(os.environ.get('MIN_UNCERTAIN_THRESHOLD', '0.3'))
20
+ self.max_uncertain_threshold = float(os.environ.get('MAX_UNCERTAIN_THRESHOLD', '0.6'))
13
21
 
14
22
  def forget_old_detections(self) -> None:
15
23
  self.recent_observations = [detection
@@ -25,7 +33,8 @@ class CamObservationHistory:
25
33
  continue
26
34
  if isinstance(detection, ClassificationDetection):
27
35
  # self.recent_observations.append(Observation(detection))
28
- causes.add('classification_detection')
36
+ if self.min_uncertain_threshold <= detection.confidence <= self.max_uncertain_threshold:
37
+ causes.add('uncertain')
29
38
  continue
30
39
 
31
40
  assert isinstance(detection, (BoxDetection, PointDetection)), f"Unknown detection type: {type(detection)}"
@@ -37,7 +46,7 @@ class CamObservationHistory:
37
46
  continue
38
47
 
39
48
  self.recent_observations.append(Observation(detection))
40
- if float(os.environ.get('MIN_UNCERTAIN_THRESHOLD', '0.3')) <= detection.confidence <= float(os.environ.get('MAX_UNCERTAIN_THRESHOLD', '0.6')):
49
+ if self.min_uncertain_threshold <= detection.confidence <= self.max_uncertain_threshold:
41
50
  causes.add('uncertain')
42
51
 
43
52
  return list(causes)
@@ -1,5 +1,7 @@
1
1
  from typing import Dict, List, Optional
2
2
 
3
+ import numpy as np
4
+
3
5
  from ...data_classes.image_metadata import ImageMetadata
4
6
  from ..outbox import Outbox
5
7
  from .cam_observation_history import CamObservationHistory
@@ -15,7 +17,7 @@ class RelevanceFilter():
15
17
  async def may_upload_detections(self,
16
18
  image_metadata: ImageMetadata,
17
19
  cam_id: Optional[str],
18
- raw_image: bytes) -> List[str]:
20
+ image: np.ndarray) -> List[str]:
19
21
  """Check if the detection should be uploaded to the outbox.
20
22
  If so, upload it and return the list of causes for the upload.
21
23
  """
@@ -34,5 +36,5 @@ class RelevanceFilter():
34
36
  causes.append('unexpected_observations_count')
35
37
  if len(causes) > 0:
36
38
  image_metadata.tags.extend(causes)
37
- await self.outbox.save(raw_image, image_metadata)
39
+ await self.outbox.save(image, image_metadata)
38
40
  return causes
@@ -16,6 +16,7 @@ from threading import Lock
16
16
  from typing import List, Optional, Tuple, TypeVar, Union
17
17
 
18
18
  import aiohttp
19
+ import numpy as np
19
20
  import PIL
20
21
  import PIL.Image # type: ignore
21
22
  from fastapi.encoders import jsonable_encoder
@@ -24,6 +25,7 @@ from ..data_classes import ImageMetadata
24
25
  from ..enums import OutboxMode
25
26
  from ..globals import GLOBALS
26
27
  from ..helpers import environment_reader, run
28
+ from ..helpers.misc import numpy_array_to_jpg_bytes
27
29
 
28
30
  T = TypeVar('T')
29
31
 
@@ -76,13 +78,16 @@ class Outbox():
76
78
  self.upload_folders.append(file)
77
79
 
78
80
  async def save(self,
79
- image: bytes,
81
+ image: np.ndarray,
80
82
  image_metadata: Optional[ImageMetadata] = None,
81
83
  upload_priority: bool = False) -> None:
84
+ """
85
+ Save an image and its metadata to disk.
82
86
 
83
- if not await run.io_bound(self._is_valid_jpg, image):
84
- self.log.error('Invalid jpg image')
85
- return
87
+ The data will be picked up by the continuous upload process.
88
+ """
89
+
90
+ jpg_bytes = numpy_array_to_jpg_bytes(image)
86
91
 
87
92
  if image_metadata is None:
88
93
  image_metadata = ImageMetadata()
@@ -90,7 +95,7 @@ class Outbox():
90
95
  identifier = datetime.now().isoformat(sep='_', timespec='microseconds')
91
96
 
92
97
  try:
93
- await run.io_bound(self._save_files_to_disk, identifier, image, image_metadata, upload_priority)
98
+ await run.io_bound(self._save_files_to_disk, identifier, jpg_bytes, image_metadata, upload_priority)
94
99
  except Exception as e:
95
100
  self.log.error('Failed to save files for image %s: %s', identifier, e)
96
101
  return
@@ -104,7 +109,7 @@ class Outbox():
104
109
 
105
110
  def _save_files_to_disk(self,
106
111
  identifier: str,
107
- image: bytes,
112
+ jpeg_image: bytes,
108
113
  image_metadata: ImageMetadata,
109
114
  upload_priority: bool) -> None:
110
115
  subpath = 'priority' if upload_priority else 'normal'
@@ -119,7 +124,7 @@ class Outbox():
119
124
  json.dump(jsonable_encoder(asdict(image_metadata)), f)
120
125
 
121
126
  with open(tmp + f'/image_{identifier}.jpg', 'wb') as f:
122
- f.write(image)
127
+ f.write(jpeg_image)
123
128
 
124
129
  if not os.path.exists(tmp):
125
130
  self.log.error('Could not rename %s to %s', tmp, full_path)
@@ -9,6 +9,7 @@ except ImportError: # Python <= 3.8
9
9
  from fastapi import APIRouter, File, Header, Request, UploadFile
10
10
 
11
11
  from ...data_classes.image_metadata import ImageMetadata
12
+ from ...helpers.misc import jpg_bytes_to_numpy_array
12
13
 
13
14
  if TYPE_CHECKING:
14
15
  from ..detector_node import DetectorNode
@@ -35,23 +36,23 @@ async def http_detect(
35
36
  Multiple images example:
36
37
 
37
38
  for i in `seq 1 10`; do time curl --request POST -F 'file=@test.jpg' localhost:8004/detect; done
38
-
39
39
  """
40
+ node: 'DetectorNode' = request.app
41
+
40
42
  try:
41
43
  # Read file directly to bytes instead of using numpy
42
- file_bytes = file.file.read()
44
+ file_bytes = await file.read()
43
45
  except Exception as exc:
44
46
  logging.exception('Error during reading of image %s.', file.filename)
45
47
  raise Exception(f'Uploaded file {file.filename} is no image file.') from exc
46
48
 
47
49
  try:
48
- app: 'DetectorNode' = request.app
49
- detections = await app.get_detections(raw_image=file_bytes,
50
- camera_id=camera_id or None,
51
- tags=tags.split(',') if tags else [],
52
- source=source,
53
- autoupload=autoupload or 'filtered',
54
- creation_date=creation_date)
50
+ detections = await node.get_detections(image=jpg_bytes_to_numpy_array(file_bytes),
51
+ camera_id=camera_id or None,
52
+ tags=tags.split(',') if tags else [],
53
+ source=source,
54
+ autoupload=autoupload or 'filtered',
55
+ creation_date=creation_date)
55
56
  except Exception as exc:
56
57
  logging.exception('Error during detection of image %s.', file.filename)
57
58
  raise Exception(f'Error during detection of image {file.filename}.') from exc
@@ -3,6 +3,7 @@ from typing import TYPE_CHECKING, List, Optional
3
3
  from fastapi import APIRouter, File, Query, Request, UploadFile
4
4
 
5
5
  from ...data_classes.image_metadata import ImageMetadata, ImagesMetadata
6
+ from ...helpers.misc import jpg_bytes_to_numpy_array
6
7
 
7
8
  if TYPE_CHECKING:
8
9
  from ..detector_node import DetectorNode
@@ -26,15 +27,18 @@ async def upload_image(request: Request,
26
27
 
27
28
  curl -X POST -F 'files=@test.jpg' "http://localhost:/upload?source=test&creation_date=2024-01-01T00:00:00&upload_priority=true"
28
29
  """
29
- raw_files = [await file.read() for file in files]
30
+ node: 'DetectorNode' = request.app
31
+
32
+ files_bytes = [await file.read() for file in files]
30
33
  image_metadatas = []
31
- for _ in files:
34
+ images = []
35
+ for file_bytes in files_bytes:
36
+ images.append(jpg_bytes_to_numpy_array(file_bytes))
32
37
  image_metadatas.append(ImageMetadata(source=source, created=creation_date))
33
38
 
34
39
  images_metadata = ImagesMetadata(items=image_metadatas)
35
40
 
36
- node: DetectorNode = request.app
37
- await node.upload_images(images=raw_files,
41
+ await node.upload_images(images=images,
38
42
  images_metadata=images_metadata,
39
43
  upload_priority=upload_priority)
40
44
  return 200, "OK"
@@ -25,14 +25,14 @@ from __future__ import annotations
25
25
 
26
26
  import asyncio
27
27
  import logging
28
- from typing import Awaitable, Dict, Set
28
+ from typing import Coroutine, Dict, Set
29
29
 
30
30
  running_tasks: Set[asyncio.Task] = set()
31
31
  lazy_tasks_running: Dict[str, asyncio.Task] = {}
32
- lazy_tasks_waiting: Dict[str, Awaitable] = {}
32
+ lazy_tasks_waiting: Dict[str, Coroutine] = {}
33
33
 
34
34
 
35
- def create(coroutine: Awaitable, *, name: str = 'unnamed task') -> asyncio.Task:
35
+ def create(coroutine: Coroutine, *, name: str = 'unnamed task') -> asyncio.Task:
36
36
  """Wraps a loop.create_task call and ensures there is an exception handler added to the task.
37
37
 
38
38
  If the task raises an exception, it is logged and handled by the global exception handlers.
@@ -48,7 +48,7 @@ def create(coroutine: Awaitable, *, name: str = 'unnamed task') -> asyncio.Task:
48
48
  return task
49
49
 
50
50
 
51
- def create_lazy(coroutine: Awaitable, *, name: str) -> None:
51
+ def create_lazy(coroutine: Coroutine, *, name: str) -> None:
52
52
  """Wraps a create call and ensures a second task with the same name is delayed until the first one is done.
53
53
 
54
54
  If a third task with the same name is created while the first one is still running, the second one is discarded.
@@ -1,6 +1,7 @@
1
1
  """original copied from https://quantlane.com/blog/ensure-asyncio-task-exceptions-get-logged/"""
2
2
  import asyncio
3
3
  import functools
4
+ import io
4
5
  import json
5
6
  import logging
6
7
  import os
@@ -9,10 +10,12 @@ import sys
9
10
  from dataclasses import asdict
10
11
  from glob import glob
11
12
  from time import perf_counter
12
- from typing import Any, Coroutine, List, Optional, Tuple, TypeVar
13
+ from typing import Any, Coroutine, Dict, List, Optional, Tuple, TypeVar
13
14
  from uuid import UUID, uuid4
14
15
 
16
+ import numpy as np
15
17
  import pynvml
18
+ from PIL import Image
16
19
 
17
20
  from ..data_classes.general import Context
18
21
  from ..data_classes.socket_response import SocketResponse
@@ -204,3 +207,33 @@ def create_training_folder(project_folder: str, trainings_id: str) -> str:
204
207
  training_folder = f'{project_folder}/trainings/{trainings_id}'
205
208
  os.makedirs(training_folder, exist_ok=True)
206
209
  return training_folder
210
+
211
+
212
+ def jpg_bytes_to_numpy_array(jpg_bytes: bytes) -> np.ndarray:
213
+ """Convert jpg bytes to numpy array."""
214
+ image = Image.open(io.BytesIO(jpg_bytes))
215
+ return np.array(image)
216
+
217
+
218
+ def numpy_array_to_jpg_bytes(image_array: np.ndarray) -> bytes:
219
+ """Convert jpg bytes to numpy array."""
220
+ buffer = io.BytesIO()
221
+ Image.fromarray(image_array).save(buffer, format="JPEG")
222
+ jpg_bytes = buffer.getvalue()
223
+ return jpg_bytes
224
+
225
+
226
+ def numpy_image_from_dict(image_data: Dict) -> np.ndarray:
227
+ """
228
+ Convert image dict to numpy array.
229
+
230
+ The image_data dict should have the following keys:
231
+ - bytes: bytes of the image in C order (row-major)
232
+ - dtype: data type of the pixel values
233
+ - shape: shape of the image (height, width, channels)
234
+ """
235
+
236
+ image_bytes = image_data['bytes']
237
+ image_dtype = image_data['dtype']
238
+ image_shape = image_data['shape']
239
+ return np.frombuffer(image_bytes, dtype=image_dtype).reshape(image_shape, order='C')