learning-loop-node 0.10.11__tar.gz → 0.10.13__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of learning-loop-node might be problematic. Click here for more details.

Files changed (95) hide show
  1. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/PKG-INFO +16 -15
  2. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/README.md +15 -14
  3. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/data_classes/detections.py +2 -1
  4. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/data_exchanger.py +6 -0
  5. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/detector/detector_logic.py +7 -1
  6. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/detector/detector_node.py +65 -45
  7. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/detector/inbox_filter/relevance_filter.py +9 -3
  8. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/detector/outbox.py +8 -1
  9. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/detector/rest/about.py +1 -0
  10. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/detector/rest/backdoor_controls.py +1 -3
  11. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/detector/rest/detect.py +12 -5
  12. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/helpers/log_conf.py +5 -0
  13. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/node.py +4 -1
  14. learning_loop_node-0.10.13/learning_loop_node/rest.py +32 -0
  15. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/detector/conftest.py +34 -0
  16. learning_loop_node-0.10.13/learning_loop_node/tests/detector/test_detector_node.py +86 -0
  17. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/trainer/io_helpers.py +3 -6
  18. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/trainer/trainer_logic.py +4 -4
  19. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/trainer/trainer_logic_generic.py +8 -8
  20. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/trainer/trainer_node.py +1 -1
  21. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/pyproject.toml +1 -1
  22. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/__init__.py +0 -0
  23. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/annotation/__init__.py +0 -0
  24. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/annotation/annotator_logic.py +0 -0
  25. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/annotation/annotator_node.py +0 -0
  26. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/data_classes/__init__.py +0 -0
  27. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/data_classes/annotations.py +0 -0
  28. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/data_classes/general.py +0 -0
  29. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/data_classes/socket_response.py +0 -0
  30. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/data_classes/training.py +0 -0
  31. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/detector/__init__.py +0 -0
  32. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/detector/inbox_filter/__init__.py +0 -0
  33. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/detector/inbox_filter/cam_observation_history.py +0 -0
  34. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/detector/rest/__init__.py +0 -0
  35. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/detector/rest/model_version_control.py +0 -0
  36. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/detector/rest/operation_mode.py +0 -0
  37. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/detector/rest/outbox_mode.py +0 -0
  38. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/detector/rest/upload.py +0 -0
  39. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/examples/novelty_score_updater.py +0 -0
  40. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/globals.py +0 -0
  41. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/helpers/__init__.py +0 -0
  42. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/helpers/environment_reader.py +0 -0
  43. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/helpers/gdrive_downloader.py +0 -0
  44. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/helpers/misc.py +0 -0
  45. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/loop_communication.py +0 -0
  46. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/py.typed +0 -0
  47. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/__init__.py +0 -0
  48. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/annotator/__init__.py +0 -0
  49. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/annotator/conftest.py +0 -0
  50. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/annotator/pytest.ini +0 -0
  51. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/annotator/test_annotator_node.py +0 -0
  52. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/detector/__init__.py +0 -0
  53. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/detector/inbox_filter/__init__.py +0 -0
  54. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/detector/inbox_filter/test_observation.py +0 -0
  55. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/detector/inbox_filter/test_relevance_group.py +0 -0
  56. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/detector/inbox_filter/test_unexpected_observations_count.py +0 -0
  57. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/detector/pytest.ini +0 -0
  58. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/detector/test.jpg +0 -0
  59. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/detector/test_client_communication.py +0 -0
  60. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/detector/test_outbox.py +0 -0
  61. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/detector/test_relevance_filter.py +0 -0
  62. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/detector/testing_detector.py +0 -0
  63. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/general/__init__.py +0 -0
  64. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/general/conftest.py +0 -0
  65. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/general/pytest.ini +0 -0
  66. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/general/test_data/file_1.txt +0 -0
  67. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/general/test_data/file_2.txt +0 -0
  68. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/general/test_data/model.json +0 -0
  69. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/general/test_data_classes.py +0 -0
  70. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/general/test_downloader.py +0 -0
  71. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/general/test_learning_loop_node.py +0 -0
  72. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/test_helper.py +0 -0
  73. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/trainer/__init__.py +0 -0
  74. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/trainer/conftest.py +0 -0
  75. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/trainer/pytest.ini +0 -0
  76. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/trainer/state_helper.py +0 -0
  77. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/trainer/states/__init__.py +0 -0
  78. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/trainer/states/test_state_cleanup.py +0 -0
  79. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/trainer/states/test_state_detecting.py +0 -0
  80. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/trainer/states/test_state_download_train_model.py +0 -0
  81. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/trainer/states/test_state_prepare.py +0 -0
  82. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/trainer/states/test_state_sync_confusion_matrix.py +0 -0
  83. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/trainer/states/test_state_train.py +0 -0
  84. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/trainer/states/test_state_upload_detections.py +0 -0
  85. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/trainer/states/test_state_upload_model.py +0 -0
  86. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/trainer/test_errors.py +0 -0
  87. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/trainer/test_trainer_states.py +0 -0
  88. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/tests/trainer/testing_trainer_logic.py +0 -0
  89. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/trainer/__init__.py +0 -0
  90. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/trainer/downloader.py +0 -0
  91. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/trainer/exceptions.py +0 -0
  92. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/trainer/executor.py +0 -0
  93. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/trainer/rest/__init__.py +0 -0
  94. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/trainer/rest/backdoor_controls.py +0 -0
  95. {learning_loop_node-0.10.11 → learning_loop_node-0.10.13}/learning_loop_node/trainer/test_executor.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: learning-loop-node
3
- Version: 0.10.11
3
+ Version: 0.10.13
4
4
  Summary: Python Library for Nodes which connect to the Zauberzeug Learning Loop
5
5
  Home-page: https://github.com/zauberzeug/learning_loop_node
6
6
  License: MIT
@@ -57,20 +57,21 @@ To start a node you have to implement the logic by inheriting from the correspon
57
57
 
58
58
  You can configure connection to our Learning Loop by specifying the following environment variables before starting:
59
59
 
60
- | Name | Alias | Purpose | Required by |
61
- | ------------------------ | ------------ | ------------------------------------------------------------ | -------------------- |
62
- | LOOP_HOST | HOST | Learning Loop address (e.g. learning-loop.ai) | all |
63
- | LOOP_USERNAME | USERNAME | Learning Loop user name | all besides Detector |
64
- | LOOP_PASSWORD | PASSWORD | Learning Loop password | all besides Detector |
65
- | LOOP_SSL_CERT_PATH | - | Path to the SSL certificate | all (opt.) |
66
- | LOOP_ORGANIZATION | ORGANIZATION | Organization name | Detector |
67
- | LOOP_PROJECT | PROJECT | Project name | Detector |
68
- | MIN_UNCERTAIN_THRESHOLD | PROJECT | smallest confidence (float) at which auto-upload will happen | Detector |
69
- | MAX_UNCERTAIN_THRESHOLD | PROJECT | largest confidence (float) at which auto-upload will happen | Detector |
70
- | INFERENCE_BATCH_SIZE | - | Batch size of trainer when calculating detections | Trainer (opt.) |
71
- | RESTART_AFTER_TRAINING | - | Restart the trainer after training (set to 1) | Trainer (opt.) |
72
- | KEEP_OLD_TRAININGS | - | Do not delete old trainings (set to 1) | Trainer (opt.) |
73
- | TRAINER_IDLE_TIMEOUT_SEC | - | Automatically shutdown trainer after timeout (in seconds) | Trainer (opt.) |
60
+ | Name | Alias | Purpose | Required by |
61
+ | ------------------------ | ------------ | ------------------------------------------------------------ | ------------------------- |
62
+ | LOOP_HOST | HOST | Learning Loop address (e.g. learning-loop.ai) | all |
63
+ | LOOP_USERNAME | USERNAME | Learning Loop user name | all besides Detector |
64
+ | LOOP_PASSWORD | PASSWORD | Learning Loop password | all besides Detector |
65
+ | LOOP_SSL_CERT_PATH | - | Path to the SSL certificate | all (opt.) |
66
+ | LOOP_ORGANIZATION | ORGANIZATION | Organization name | Detector |
67
+ | LOOP_PROJECT | PROJECT | Project name | Detector (opt.) |
68
+ | MIN_UNCERTAIN_THRESHOLD | - | smallest confidence (float) at which auto-upload will happen | Detector (opt.) |
69
+ | MAX_UNCERTAIN_THRESHOLD | - | largest confidence (float) at which auto-upload will happen | Detector (opt.) |
70
+ | INFERENCE_BATCH_SIZE | - | Batch size of trainer when calculating detections | Trainer (opt.) |
71
+ | RESTART_AFTER_TRAINING | - | Restart the trainer after training (set to 1) | Trainer (opt.) |
72
+ | KEEP_OLD_TRAININGS | - | Do not delete old trainings (set to 1) | Trainer (opt.) |
73
+ | TRAINER_IDLE_TIMEOUT_SEC | - | Automatically shutdown trainer after timeout (in seconds) | Trainer (opt.) |
74
+ | USE_BACKDOOR_CONTROLS | - | Always enable backdoor controls (set to 1) | Trainer / Detector (opt.) |
74
75
 
75
76
  #### Testing
76
77
 
@@ -17,20 +17,21 @@ To start a node you have to implement the logic by inheriting from the correspon
17
17
 
18
18
  You can configure connection to our Learning Loop by specifying the following environment variables before starting:
19
19
 
20
- | Name | Alias | Purpose | Required by |
21
- | ------------------------ | ------------ | ------------------------------------------------------------ | -------------------- |
22
- | LOOP_HOST | HOST | Learning Loop address (e.g. learning-loop.ai) | all |
23
- | LOOP_USERNAME | USERNAME | Learning Loop user name | all besides Detector |
24
- | LOOP_PASSWORD | PASSWORD | Learning Loop password | all besides Detector |
25
- | LOOP_SSL_CERT_PATH | - | Path to the SSL certificate | all (opt.) |
26
- | LOOP_ORGANIZATION | ORGANIZATION | Organization name | Detector |
27
- | LOOP_PROJECT | PROJECT | Project name | Detector |
28
- | MIN_UNCERTAIN_THRESHOLD | PROJECT | smallest confidence (float) at which auto-upload will happen | Detector |
29
- | MAX_UNCERTAIN_THRESHOLD | PROJECT | largest confidence (float) at which auto-upload will happen | Detector |
30
- | INFERENCE_BATCH_SIZE | - | Batch size of trainer when calculating detections | Trainer (opt.) |
31
- | RESTART_AFTER_TRAINING | - | Restart the trainer after training (set to 1) | Trainer (opt.) |
32
- | KEEP_OLD_TRAININGS | - | Do not delete old trainings (set to 1) | Trainer (opt.) |
33
- | TRAINER_IDLE_TIMEOUT_SEC | - | Automatically shutdown trainer after timeout (in seconds) | Trainer (opt.) |
20
+ | Name | Alias | Purpose | Required by |
21
+ | ------------------------ | ------------ | ------------------------------------------------------------ | ------------------------- |
22
+ | LOOP_HOST | HOST | Learning Loop address (e.g. learning-loop.ai) | all |
23
+ | LOOP_USERNAME | USERNAME | Learning Loop user name | all besides Detector |
24
+ | LOOP_PASSWORD | PASSWORD | Learning Loop password | all besides Detector |
25
+ | LOOP_SSL_CERT_PATH | - | Path to the SSL certificate | all (opt.) |
26
+ | LOOP_ORGANIZATION | ORGANIZATION | Organization name | Detector |
27
+ | LOOP_PROJECT | PROJECT | Project name | Detector (opt.) |
28
+ | MIN_UNCERTAIN_THRESHOLD | - | smallest confidence (float) at which auto-upload will happen | Detector (opt.) |
29
+ | MAX_UNCERTAIN_THRESHOLD | - | largest confidence (float) at which auto-upload will happen | Detector (opt.) |
30
+ | INFERENCE_BATCH_SIZE | - | Batch size of trainer when calculating detections | Trainer (opt.) |
31
+ | RESTART_AFTER_TRAINING | - | Restart the trainer after training (set to 1) | Trainer (opt.) |
32
+ | KEEP_OLD_TRAININGS | - | Do not delete old trainings (set to 1) | Trainer (opt.) |
33
+ | TRAINER_IDLE_TIMEOUT_SEC | - | Automatically shutdown trainer after timeout (in seconds) | Trainer (opt.) |
34
+ | USE_BACKDOOR_CONTROLS | - | Always enable backdoor controls (set to 1) | Trainer / Detector (opt.) |
34
35
 
35
36
  #### Testing
36
37
 
@@ -118,7 +118,8 @@ class Detections():
118
118
  classification_detections: List[ClassificationDetection] = field(default_factory=list)
119
119
  tags: List[str] = field(default_factory=list)
120
120
  date: Optional[str] = field(default_factory=current_datetime)
121
- image_id: Optional[str] = None # used for detection of trainers
121
+ image_id: Optional[str] = None # (actually UUID) used for detection of trainers
122
+ source: Optional[str] = None
122
123
 
123
124
  def __len__(self):
124
125
  return len(self.box_detections) + len(self.point_detections) + len(self.segmentation_detections) + len(self.classification_detections)
@@ -101,6 +101,12 @@ class DataExchanger():
101
101
 
102
102
  paths, _ = create_resource_paths(self.context.organization, self.context.project, new_image_uuids)
103
103
  num_new_image_ids = len(new_image_uuids)
104
+ if num_new_image_ids == 0:
105
+ logging.info('All images are already downloaded. Nothing to do.')
106
+ self.progress = 1.0
107
+ return
108
+
109
+ logging.info(f'Downloading {num_new_image_ids} new images to {image_folder}..')
104
110
  os.makedirs(image_folder, exist_ok=True)
105
111
 
106
112
  progress_factor = 0.5 / num_new_image_ids # second 50% of progress is for downloading images
@@ -1,6 +1,6 @@
1
1
  import logging
2
2
  from abc import abstractmethod
3
- from typing import Optional
3
+ from typing import List, Optional
4
4
 
5
5
  import numpy as np
6
6
 
@@ -46,6 +46,12 @@ class DetectorLogic():
46
46
  def init(self):
47
47
  """Called when a (new) model was loaded. Initialize the model. Model information available via `self.model_info`"""
48
48
 
49
+ def evaluate_with_all_info(self, image: np.ndarray, tags: List[str], source: Optional[str] = None) -> Detections: # pylint: disable=unused-argument
50
+ """Called by the detector node when an image should be evaluated (REST or SocketIO).
51
+ Tags, source come from the caller and may be used in this function.
52
+ By default, this function simply calls `evaluate`"""
53
+ return self.evaluate(image)
54
+
49
55
  @abstractmethod
50
56
  def evaluate(self, image: np.ndarray) -> Detections:
51
57
  """Evaluate the image and return the detections.
@@ -9,9 +9,9 @@ from threading import Thread
9
9
  from typing import Dict, List, Optional, Union
10
10
 
11
11
  import numpy as np
12
+ import socketio
12
13
  from dacite import from_dict
13
14
  from fastapi.encoders import jsonable_encoder
14
- from fastapi_socketio import SocketManager
15
15
  from socketio import AsyncClient
16
16
 
17
17
  from ..data_classes import Category, Context, Detections, DetectionStatus, ModelInformation, Shape
@@ -41,7 +41,7 @@ class DetectorNode(Node):
41
41
  self.organization = environment_reader.organization()
42
42
  self.project = environment_reader.project()
43
43
  assert self.organization and self.project, 'Detector node needs an organization and an project'
44
- self.log.info(f'Using {self.organization}/{self.project}')
44
+ self.log.info('Using %s/%s', self.organization, self.project)
45
45
  self.operation_mode: OperationMode = OperationMode.Startup
46
46
  self.connected_clients: List[str] = []
47
47
 
@@ -70,7 +70,7 @@ class DetectorNode(Node):
70
70
  self.include_router(rest_outbox_mode.router, tags=["outbox_mode"])
71
71
  self.include_router(rest_version_control.router, tags=["model_version"])
72
72
 
73
- if use_backdoor_controls:
73
+ if use_backdoor_controls or os.environ.get('USE_BACKDOOR_CONTROLS', '0').lower() in ('1', 'true'):
74
74
  self.include_router(backdoor_controls.router)
75
75
 
76
76
  self.setup_sio_server()
@@ -126,22 +126,32 @@ class DetectorNode(Node):
126
126
 
127
127
  def setup_sio_server(self) -> None:
128
128
  """The DetectorNode acts as a SocketIO server. This method sets up the server and defines the event handlers."""
129
-
130
129
  # pylint: disable=unused-argument
131
130
 
132
- async def _detect(sid, data: Dict) -> Dict:
133
- self.log.info('running detect via socketio')
131
+ # Initialize the Socket.IO server
132
+ self.sio = socketio.AsyncServer(async_mode='asgi')
133
+ # Initialize and mount the ASGI app
134
+ self.sio_app = socketio.ASGIApp(self.sio, socketio_path='/socket.io')
135
+ self.mount('/ws', self.sio_app)
136
+ # Register event handlers
137
+
138
+ self.log.info('>>>>>>>>>>>>>>>>>>>>>>> Setting up the SIO server')
139
+
140
+ @self.sio.event
141
+ async def detect(sid, data: Dict) -> Dict:
142
+ self.log.debug('running detect via socketio')
134
143
  try:
135
144
  np_image = np.frombuffer(data['image'], np.uint8)
136
145
  det = await self.get_detections(
137
146
  raw_image=np_image,
138
147
  camera_id=data.get('camera-id', None) or data.get('mac', None),
139
148
  tags=data.get('tags', []),
140
- autoupload=data.get('autoupload', None),
149
+ source=data.get('source', None),
150
+ autoupload=data.get('autoupload', None)
141
151
  )
142
152
  if det is None:
143
153
  return {'error': 'no model loaded'}
144
- self.log.info('detect via socketio finished')
154
+ self.log.debug('detect via socketio finished')
145
155
  return det
146
156
  except Exception as e:
147
157
  self.log.exception('could not detect via socketio')
@@ -149,12 +159,14 @@ class DetectorNode(Node):
149
159
  f.write(data['image'])
150
160
  return {'error': str(e)}
151
161
 
152
- async def _info(sid) -> Union[str, Dict]:
162
+ @self.sio.event
163
+ async def info(sid) -> Union[str, Dict]:
153
164
  if self.detector_logic.is_initialized:
154
165
  return asdict(self.detector_logic.model_info)
155
166
  return 'No model loaded'
156
167
 
157
- async def _upload(sid, data: Dict) -> Optional[Dict]:
168
+ @self.sio.event
169
+ async def upload(sid, data: Dict) -> Optional[Dict]:
158
170
  '''upload an image with detections'''
159
171
 
160
172
  detection_data = data.get('detections', {})
@@ -171,38 +183,33 @@ class DetectorNode(Node):
171
183
  tags = data.get('tags', [])
172
184
  tags.append('picked_by_system')
173
185
 
186
+ source = data.get('source', None)
187
+
174
188
  loop = asyncio.get_event_loop()
175
189
  try:
176
- await loop.run_in_executor(None, self.outbox.save, data['image'], detections, tags)
190
+ await loop.run_in_executor(None, self.outbox.save, data['image'], detections, tags, source)
177
191
  except Exception as e:
178
192
  self.log.exception('could not upload via socketio')
179
193
  return {'error': str(e)}
180
194
  return None
181
195
 
182
- def _connect(sid, environ, auth) -> None:
196
+ @self.sio.event
197
+ def connect(sid, environ, auth) -> None:
183
198
  self.connected_clients.append(sid)
184
199
 
185
- print('>>>>>>>>>>>>>>>>>>>>>>> setting up sio server', flush=True)
186
-
187
- self.sio_server = SocketManager(app=self)
188
- self.sio_server.on('detect', _detect)
189
- self.sio_server.on('info', _info)
190
- self.sio_server.on('upload', _upload)
191
- self.sio_server.on('connect', _connect)
192
-
193
200
  async def _check_for_update(self) -> None:
194
201
  if self.operation_mode == OperationMode.Startup:
195
202
  return
196
203
  try:
197
- self.log.info(f'Current operation mode is {self.operation_mode}')
204
+ self.log.info('Current operation mode is %s', self.operation_mode)
198
205
  try:
199
206
  await self.sync_status_with_learning_loop()
200
207
  except Exception as e:
201
- self.log.error(f'Could not check for updates: {e}')
208
+ self.log.error('Could not check for updates: %s', e)
202
209
  return
203
210
 
204
211
  if self.operation_mode != OperationMode.Idle:
205
- self.log.info(f'not checking for updates; operation mode is {self.operation_mode}')
212
+ self.log.info('not checking for updates; operation mode is %s', self.operation_mode)
206
213
  return
207
214
 
208
215
  self.status.reset_error('update_model')
@@ -210,11 +217,11 @@ class DetectorNode(Node):
210
217
  self.log.info('not checking for updates; no target model selected')
211
218
  return
212
219
 
213
- current_version = self.detector_logic._model_info.version if self.detector_logic._model_info is not None else None
220
+ current_version = self.detector_logic._model_info.version if self.detector_logic._model_info is not None else None # pylint: disable=protected-access
214
221
 
215
222
  if not self.detector_logic.is_initialized or self.target_model.version != current_version:
216
- self.log.info(
217
- f'Current model "{current_version or "-"}" needs to be updated to {self.target_model.version}')
223
+ self.log.info('Current model "%s" needs to be updated to %s',
224
+ current_version or "-", self.target_model.version)
218
225
 
219
226
  with step_into(GLOBALS.data_folder):
220
227
  model_symlink = 'model'
@@ -232,7 +239,7 @@ class DetectorNode(Node):
232
239
  except Exception:
233
240
  pass
234
241
  os.symlink(target_model_folder, model_symlink)
235
- self.log.info(f'Updated symlink for model to {os.readlink(model_symlink)}')
242
+ self.log.info('Updated symlink for model to %s', os.readlink(model_symlink))
236
243
 
237
244
  self.detector_logic.load_model()
238
245
  try:
@@ -283,13 +290,13 @@ class DetectorNode(Node):
283
290
  model_format=self.detector_logic.model_format,
284
291
  )
285
292
 
286
- self.log.info(f'sending status {status}')
293
+ self.log.info('sending status %s', status)
287
294
  response = await self.sio_client.call('update_detector', (self.organization, self.project, jsonable_encoder(asdict(status))))
288
295
 
289
296
  assert response is not None
290
297
  socket_response = from_dict(data_class=SocketResponse, data=response)
291
298
  if not socket_response.success:
292
- self.log.error(f'Statusupdate failed: {response}')
299
+ self.log.error('Statusupdate failed: %s', response)
293
300
  raise Exception(f'Statusupdate failed: {response}')
294
301
 
295
302
  assert socket_response.payload is not None
@@ -303,19 +310,19 @@ class DetectorNode(Node):
303
310
 
304
311
  if self.version_control == rest_version_control.VersionMode.FollowLoop:
305
312
  self.target_model = self.loop_deployment_target
306
- self.log.info(f'After sending status. Target_model is {self.target_model.version}')
313
+ self.log.info('After sending status. Target_model is %s', self.target_model.version)
307
314
 
308
315
  async def set_operation_mode(self, mode: OperationMode):
309
316
  self.operation_mode = mode
310
317
  try:
311
318
  await self.sync_status_with_learning_loop()
312
319
  except Exception as e:
313
- self.log.warning(f'Operation mode set to {mode}, but sync failed: {e}')
320
+ self.log.warning('Operation mode set to %s, but sync failed: %s', mode, e)
314
321
 
315
322
  def reload(self, reason: str):
316
323
  '''provide a cause for the reload'''
317
324
 
318
- self.log.info(f'########## reloading app because {reason}')
325
+ self.log.info('########## reloading app because %s', reason)
319
326
  if os.path.isfile('/app/app_code/restart/restart.py'):
320
327
  subprocess.call(['touch', '/app/app_code/restart/restart.py'])
321
328
  elif os.path.isfile('/app/main.py'):
@@ -325,32 +332,36 @@ class DetectorNode(Node):
325
332
  else:
326
333
  self.log.error('could not reload app')
327
334
 
328
- async def get_detections(self, raw_image: np.ndarray, camera_id: Optional[str], tags: List[str], autoupload: Optional[str] = None) -> Optional[Dict]:
329
- """Note: raw_image is a numpy array of type uint8, but not in the correrct shape!
335
+ async def get_detections(self,
336
+ raw_image: np.ndarray,
337
+ camera_id: Optional[str],
338
+ tags: List[str],
339
+ source: Optional[str] = None,
340
+ autoupload: Optional[str] = None) -> Optional[Dict]:
341
+ """ Main processing function for the detector node when an image is received via REST or SocketIO.
342
+ This function infers the detections from the image, cares about uploading to the loop and returns the detections as a dictionary.
343
+ Note: raw_image is a numpy array of type uint8, but not in the correct shape!
330
344
  It can be converted e.g. using cv2.imdecode(raw_image, cv2.IMREAD_COLOR)"""
331
- loop = asyncio.get_event_loop()
345
+
332
346
  await self.detection_lock.acquire()
333
- detections: Detections = await loop.run_in_executor(None, self.detector_logic.evaluate, raw_image)
347
+ loop = asyncio.get_event_loop()
348
+ detections = await loop.run_in_executor(None, self.detector_logic.evaluate_with_all_info, raw_image, tags, source)
334
349
  self.detection_lock.release()
335
- for seg_detection in detections.segmentation_detections:
336
- if isinstance(seg_detection.shape, Shape):
337
- shapes = ','.join([str(value) for p in seg_detection.shape.points for _,
338
- value in asdict(p).items()])
339
- seg_detection.shape = shapes # TODO This seems to be a quick fix.. check how loop upload detections deals with this
340
350
 
351
+ fix_shape_detections(detections)
341
352
  n_bo, n_cl = len(detections.box_detections), len(detections.classification_detections)
342
353
  n_po, n_se = len(detections.point_detections), len(detections.segmentation_detections)
343
- self.log.info(f'detected:{n_bo} boxes, {n_po} points, {n_se} segs, {n_cl} classes')
354
+ self.log.debug('Detected: %d boxes, %d points, %d segs, %d classes', n_bo, n_po, n_se, n_cl)
344
355
 
345
356
  if autoupload is None or autoupload == 'filtered': # NOTE default is filtered
346
357
  Thread(target=self.relevance_filter.may_upload_detections,
347
- args=(detections, camera_id, raw_image, tags)).start()
358
+ args=(detections, camera_id, raw_image, tags, source)).start()
348
359
  elif autoupload == 'all':
349
- Thread(target=self.outbox.save, args=(raw_image, detections, tags)).start()
360
+ Thread(target=self.outbox.save, args=(raw_image, detections, tags, source)).start()
350
361
  elif autoupload == 'disabled':
351
362
  pass
352
363
  else:
353
- self.log.error(f'unknown autoupload value {autoupload}')
364
+ self.log.error('unknown autoupload value %s', autoupload)
354
365
  return jsonable_encoder(asdict(detections))
355
366
 
356
367
  async def upload_images(self, images: List[bytes]):
@@ -393,3 +404,12 @@ def step_into(new_dir):
393
404
  yield
394
405
  finally:
395
406
  os.chdir(previous_dir)
407
+
408
+
409
+ def fix_shape_detections(detections: Detections):
410
+ # TODO This is a quick fix.. check how loop upload detections deals with this
411
+ for seg_detection in detections.segmentation_detections:
412
+ if isinstance(seg_detection.shape, Shape):
413
+ points = ','.join([str(value) for p in seg_detection.shape.points for _,
414
+ value in asdict(p).items()])
415
+ seg_detection.shape = points
@@ -1,4 +1,4 @@
1
- from typing import Dict, List
1
+ from typing import Dict, List, Optional
2
2
 
3
3
  from ...data_classes.detections import Detections
4
4
  from ..outbox import Outbox
@@ -11,7 +11,13 @@ class RelevanceFilter():
11
11
  self.cam_histories: Dict[str, CamObservationHistory] = {}
12
12
  self.outbox: Outbox = outbox
13
13
 
14
- def may_upload_detections(self, dets: Detections, cam_id: str, raw_image: bytes, tags: List[str]) -> List[str]:
14
+ def may_upload_detections(self,
15
+ dets: Detections,
16
+ cam_id: str,
17
+ raw_image: bytes,
18
+ tags: List[str],
19
+ source: Optional[str] = None
20
+ ) -> List[str]:
15
21
  for group in self.cam_histories.values():
16
22
  group.forget_old_detections()
17
23
 
@@ -23,5 +29,5 @@ class RelevanceFilter():
23
29
  if len(causes) > 0:
24
30
  tags = tags if tags is not None else []
25
31
  tags.extend(causes)
26
- self.outbox.save(raw_image, dets, tags)
32
+ self.outbox.save(raw_image, dets, tags, source)
27
33
  return causes
@@ -55,7 +55,13 @@ class Outbox():
55
55
 
56
56
  self.upload_counter = 0
57
57
 
58
- def save(self, image: bytes, detections: Optional[Detections] = None, tags: Optional[List[str]] = None) -> None:
58
+ def save(self,
59
+ image: bytes,
60
+ detections: Optional[Detections] = None,
61
+ tags: Optional[List[str]] = None,
62
+ source: Optional[str] = None
63
+ ) -> None:
64
+
59
65
  if not self._is_valid_jpg(image):
60
66
  self.log.error('Invalid jpg image')
61
67
  return
@@ -71,6 +77,7 @@ class Outbox():
71
77
  tmp = f'{GLOBALS.data_folder}/tmp/{identifier}'
72
78
  detections.tags = tags
73
79
  detections.date = identifier
80
+ detections.source = source or 'unknown'
74
81
  os.makedirs(tmp, exist_ok=True)
75
82
 
76
83
  with open(tmp + '/image.json', 'w') as f:
@@ -22,4 +22,5 @@ async def get_about(request: Request):
22
22
  'state': app.status.state,
23
23
  'model_info': app.detector_logic._model_info, # pylint: disable=protected-access
24
24
  'target_model': app.target_model.version if app.target_model is not None else 'None',
25
+ 'version_control': app.version_control.value,
25
26
  }
@@ -22,10 +22,8 @@ async def _socketio(request: Request):
22
22
  curl -X PUT -d "on" http://localhost:8007/socketio
23
23
  '''
24
24
  state = str(await request.body(), 'utf-8')
25
- await _switch_socketio(state, request.app)
25
+ detector_node: 'DetectorNode' = request.app
26
26
 
27
-
28
- async def _switch_socketio(state: str, detector_node: 'DetectorNode'):
29
27
  if state == 'off':
30
28
  logging.info('BC: turning socketio off')
31
29
  await detector_node.sio_client.disconnect()
@@ -1,10 +1,14 @@
1
1
  import logging
2
- from typing import Optional
2
+ from typing import TYPE_CHECKING, Optional
3
3
 
4
4
  import numpy as np
5
5
  from fastapi import APIRouter, File, Header, Request, UploadFile
6
6
  from fastapi.responses import JSONResponse
7
7
 
8
+ if TYPE_CHECKING:
9
+ from ..detector_node import DetectorNode
10
+
11
+
8
12
  router = APIRouter()
9
13
 
10
14
 
@@ -15,6 +19,7 @@ async def http_detect(
15
19
  camera_id: Optional[str] = Header(None),
16
20
  mac: Optional[str] = Header(None),
17
21
  tags: Optional[str] = Header(None),
22
+ source: Optional[str] = Header(None),
18
23
  autoupload: Optional[str] = Header(None),
19
24
  ):
20
25
  """
@@ -35,10 +40,12 @@ async def http_detect(
35
40
  raise Exception(f'Uploaded file {file.filename} is no image file.') from exc
36
41
 
37
42
  try:
38
- detections = await request.app.get_detections(raw_image=np_image,
39
- camera_id=camera_id or mac or None,
40
- tags=tags.split(',') if tags else [],
41
- autoupload=autoupload,)
43
+ app: 'DetectorNode' = request.app
44
+ detections = await app.get_detections(raw_image=np_image,
45
+ camera_id=camera_id or mac or None,
46
+ tags=tags.split(',') if tags else [],
47
+ source=source,
48
+ autoupload=autoupload)
42
49
  except Exception as exc:
43
50
  logging.exception(f'Error during detection of image {file.filename}.')
44
51
  raise Exception(f'Error during detection of image {file.filename}.') from exc
@@ -23,6 +23,11 @@ LOGGING_CONF = {
23
23
  'level': 'INFO',
24
24
  'propagate': False,
25
25
  },
26
+ 'Node': {
27
+ 'handlers': ['console'],
28
+ 'level': 'INFO',
29
+ 'propagate': False,
30
+ },
26
31
  },
27
32
  }
28
33
 
@@ -18,6 +18,7 @@ from .data_exchanger import DataExchanger
18
18
  from .helpers import log_conf
19
19
  from .helpers.misc import ensure_socket_response, read_or_create_uuid
20
20
  from .loop_communication import LoopCommunicator
21
+ from .rest import router
21
22
 
22
23
 
23
24
  class Node(FastAPI):
@@ -41,7 +42,7 @@ class Node(FastAPI):
41
42
  self.uuid = uuid or read_or_create_uuid(self.name)
42
43
  self.needs_login = needs_login
43
44
 
44
- self.log = logging.getLogger()
45
+ self.log = logging.getLogger('Node')
45
46
  self.loop_communicator = LoopCommunicator()
46
47
  self.websocket_url = self.loop_communicator.websocket_url()
47
48
  self.data_exchanger = DataExchanger(None, self.loop_communicator)
@@ -56,6 +57,8 @@ class Node(FastAPI):
56
57
 
57
58
  self.repeat_task: Any = None
58
59
 
60
+ self.include_router(router)
61
+
59
62
  @property
60
63
  def sio_client(self) -> AsyncClient:
61
64
  if self._sio_client is None:
@@ -0,0 +1,32 @@
1
+ import logging
2
+ from typing import TYPE_CHECKING
3
+
4
+ from fastapi import APIRouter, Request, HTTPException
5
+
6
+ if TYPE_CHECKING:
7
+ from .node import Node
8
+
9
+
10
+ router = APIRouter()
11
+ logger = logging.getLogger('Node.rest')
12
+
13
+
14
+ @router.put("/debug_logging")
15
+ async def _debug_logging(request: Request):
16
+ '''
17
+ Example Usage
18
+
19
+ curl -X PUT -d "on" http://localhost:8007/debug_logging
20
+ '''
21
+ state = str(await request.body(), 'utf-8')
22
+ node: 'Node' = request.app
23
+
24
+ if state == 'off':
25
+ logger.info('turning debug logging off')
26
+ node.log.setLevel('INFO')
27
+ return 'off'
28
+ if state == 'on':
29
+ logger.info('turning debug logging on')
30
+ node.log.setLevel('DEBUG')
31
+ return 'on'
32
+ raise HTTPException(status_code=400, detail='Invalid state')
@@ -12,6 +12,9 @@ import pytest
12
12
  import socketio
13
13
  import uvicorn
14
14
 
15
+ from learning_loop_node.data_classes import BoxDetection, Detections
16
+ from learning_loop_node.detector.detector_logic import DetectorLogic
17
+
15
18
  from ...detector.detector_node import DetectorNode
16
19
  from ...detector.outbox import Outbox
17
20
  from ...globals import GLOBALS
@@ -113,6 +116,37 @@ def get_outbox_files(outbox: Outbox):
113
116
  files = glob(f'{outbox.path}/**/*', recursive=True)
114
117
  return [file for file in files if os.path.isfile(file)]
115
118
 
119
+
120
+ @pytest.fixture
121
+ def mock_detector_logic():
122
+ class MockDetectorLogic(DetectorLogic): # pylint: disable=abstract-method
123
+ def __init__(self):
124
+ super().__init__('mock')
125
+ self.detections = Detections(
126
+ box_detections=[BoxDetection(category_name="test",
127
+ category_id="1",
128
+ confidence=0.9,
129
+ x=0, y=0, width=10, height=10,
130
+ model_name="mock",
131
+ )]
132
+ )
133
+
134
+ @property
135
+ def is_initialized(self):
136
+ return True
137
+
138
+ def evaluate_with_all_info(self, image, tags, source): # pylint: disable=signature-differs
139
+ return self.detections
140
+
141
+ return MockDetectorLogic()
142
+
143
+
144
+ @pytest.fixture
145
+ def detector_node(mock_detector_logic):
146
+ os.environ['ORGANIZATION'] = 'test_organization'
147
+ os.environ['PROJECT'] = 'test_project'
148
+ return DetectorNode(name="test_node", detector=mock_detector_logic)
149
+
116
150
  # ====================================== REDUNDANT FIXTURES IN ALL CONFTESTS ! ======================================
117
151
 
118
152
 
@@ -0,0 +1,86 @@
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from learning_loop_node.detector.detector_node import DetectorNode
5
+
6
+
7
+ @pytest.mark.asyncio
8
+ async def test_get_detections(detector_node: DetectorNode, monkeypatch):
9
+ # Mock raw image data
10
+ raw_image = np.zeros((100, 100, 3), dtype=np.uint8)
11
+
12
+ # Mock relevance_filter and outbox
13
+ filtered_upload_called = False
14
+ save_called = False
15
+
16
+ save_args = []
17
+
18
+ def mock_filtered_upload(*args, **kwargs): # pylint: disable=unused-argument
19
+ nonlocal filtered_upload_called
20
+ filtered_upload_called = True
21
+
22
+ def mock_save(*args, **kwargs):
23
+ nonlocal save_called
24
+ nonlocal save_args
25
+ save_called = True
26
+ save_args = (args, kwargs)
27
+
28
+ monkeypatch.setattr(detector_node.relevance_filter, "may_upload_detections", mock_filtered_upload)
29
+ monkeypatch.setattr(detector_node.outbox, "save", mock_save)
30
+
31
+ # Test cases
32
+ test_cases = [
33
+ (None, True, False),
34
+ ("filtered", True, False),
35
+ ("all", False, True),
36
+ ("disabled", False, False),
37
+ ]
38
+
39
+ expected_save_args = {
40
+ 'image': raw_image,
41
+ 'detections': detector_node.detector_logic.detections, # type: ignore
42
+ 'tags': ['test_tag'],
43
+ 'source': 'test_source',
44
+ }
45
+
46
+ for autoupload, expect_filtered, expect_all in test_cases:
47
+ filtered_upload_called = False
48
+ save_called = False
49
+
50
+ result = await detector_node.get_detections(
51
+ raw_image=raw_image,
52
+ camera_id="test_camera",
53
+ tags=["test_tag"],
54
+ source="test_source",
55
+ autoupload=autoupload
56
+ )
57
+
58
+ # Check if detections were processed
59
+ assert result is not None
60
+ assert "box_detections" in result
61
+ assert len(result["box_detections"]) == 1
62
+ assert result["box_detections"][0]["category_name"] == "test"
63
+
64
+ # Check if the correct upload method was called
65
+ assert filtered_upload_called == expect_filtered
66
+ assert save_called == expect_all
67
+
68
+ if save_called:
69
+ save_pos_args, save_kwargs = save_args # pylint: disable=unbalanced-tuple-unpacking
70
+ expected_values = list(expected_save_args.values())
71
+ assert len(save_pos_args) + len(save_kwargs) == len(expected_values)
72
+
73
+ # Check positional arguments
74
+ for arg, expected in zip(save_pos_args, expected_values[:len(save_pos_args)]):
75
+ if isinstance(arg, (list, np.ndarray)):
76
+ assert np.array_equal(arg, expected)
77
+ else:
78
+ assert arg == expected
79
+
80
+ # Check keyword arguments
81
+ for key, value in save_kwargs.items():
82
+ expected = expected_save_args[key]
83
+ if isinstance(value, (list, np.ndarray)):
84
+ assert np.array_equal(value, expected)
85
+ else:
86
+ assert value == expected
@@ -16,12 +16,9 @@ from ..loop_communication import LoopCommunicator
16
16
 
17
17
  class EnvironmentVars:
18
18
  def __init__(self) -> None:
19
- self.restart_after_training = os.environ.get(
20
- 'RESTART_AFTER_TRAINING', 'FALSE').lower() in ['true', '1']
21
- self.keep_old_trainings = os.environ.get(
22
- 'KEEP_OLD_TRAININGS', 'FALSE').lower() in ['true', '1']
23
- self.inference_batch_size = int(
24
- os.environ.get('INFERENCE_BATCH_SIZE', '10'))
19
+ self.restart_after_training = os.environ.get('RESTART_AFTER_TRAINING', 'FALSE').lower() in ['true', '1']
20
+ self.keep_old_trainings = os.environ.get('KEEP_OLD_TRAININGS', 'FALSE').lower() in ['true', '1']
21
+ self.inference_batch_size = int(os.environ.get('INFERENCE_BATCH_SIZE', '10'))
25
22
 
26
23
 
27
24
  class LastTrainingIO:
@@ -25,7 +25,7 @@ class TrainerLogic(TrainerLogicGeneric):
25
25
  self._detection_progress: Optional[float] = None
26
26
  self._executor: Optional[Executor] = None
27
27
  self.start_training_task: Optional[Coroutine] = None
28
- self.inference_batch_size = 10
28
+ self.inference_batch_size = self._environment_vars.inference_batch_size
29
29
 
30
30
  # ---------------------------------------- IMPLEMENTED ABSTRACT PROPERTIES ----------------------------------------
31
31
 
@@ -92,7 +92,7 @@ class TrainerLogic(TrainerLogicGeneric):
92
92
 
93
93
  shutil.rmtree(tmp_folder, ignore_errors=True)
94
94
  os.makedirs(tmp_folder)
95
- logging.info(f'downloading detection model to {tmp_folder}')
95
+ logging.info('downloading detection model to %s', tmp_folder)
96
96
 
97
97
  await self.node.data_exchanger.download_model(tmp_folder, context, model_id, self.model_format)
98
98
  with open(f'{tmp_folder}/model.json', 'r') as f:
@@ -104,10 +104,10 @@ class TrainerLogic(TrainerLogicGeneric):
104
104
  image_ids = []
105
105
  for state, p in zip(['inbox', 'annotate', 'review', 'complete'], [0.1, 0.2, 0.3, 0.4]):
106
106
  self._detection_progress = p
107
- logging.info(f'fetching image ids of {state}')
107
+ logging.info('fetching image ids of state %s', state)
108
108
  new_ids = await self.node.data_exchanger.fetch_image_uuids(query_params=f'state={state}')
109
109
  image_ids += new_ids
110
- logging.info(f'downloading {len(new_ids)} images')
110
+ logging.info('downloading %d images', len(new_ids))
111
111
  await self.node.data_exchanger.download_images(new_ids, image_folder)
112
112
  self._detection_progress = 0.42
113
113
  # await delete_corrupt_images(image_folder)
@@ -210,7 +210,7 @@ class TrainerLogicGeneric(ABC):
210
210
 
211
211
  self._active_training_io = ActiveTrainingIO(
212
212
  self._training.training_folder, self.node.loop_communicator, context)
213
- logger.info(f'new training initialized: {self._training}')
213
+ logger.info('new training initialized: %s', self._training)
214
214
 
215
215
  async def _run(self) -> None:
216
216
  """Called on `begin_training` event from the Learning Loop.
@@ -229,8 +229,8 @@ class TrainerLogicGeneric(ABC):
229
229
  self._may_restart()
230
230
  else:
231
231
  logger.info('CancelledError in _run - shutting down')
232
- except Exception as e:
233
- logger.exception(f'Error in train: {e}')
232
+ except Exception:
233
+ logger.exception('(Ignored) exception in trainer_logic._run:')
234
234
 
235
235
  # ---------------------------------------- TRAINING STATES ----------------------------------------
236
236
 
@@ -271,7 +271,7 @@ class TrainerLogicGeneric(ABC):
271
271
  '''
272
272
 
273
273
  await asyncio.sleep(0.1)
274
- logger.info(f'Performing state: {state_during}')
274
+ logger.info('Performing state: %s', state_during)
275
275
  previous_state = self.training.training_state
276
276
  self.training.training_state = state_during
277
277
  await asyncio.sleep(0.1)
@@ -283,12 +283,12 @@ class TrainerLogicGeneric(ABC):
283
283
 
284
284
  except asyncio.CancelledError:
285
285
  if self.shutdown_event.is_set():
286
- logger.info(f'CancelledError in {state_during} - shutdown event set')
286
+ logger.info('CancelledError in %s - shutdown event set', state_during)
287
287
  raise
288
- logger.info(f'CancelledError in {state_during} - cleaning up')
288
+ logger.info('CancelledError in %s - cleaning up', state_during)
289
289
  self.training.training_state = TrainerState.ReadyForCleanup
290
290
  except CriticalError as e:
291
- logger.error(f'CriticalError in {state_during} - Exception: {e}')
291
+ logger.error('CriticalError in %s - Exception: %s', state_during, e)
292
292
  self.errors.set(error_key, str(e))
293
293
  self.training.training_state = TrainerState.ReadyForCleanup
294
294
  except Exception as e:
@@ -297,7 +297,7 @@ class TrainerLogicGeneric(ABC):
297
297
  self.training.training_state = previous_state
298
298
  return
299
299
  else:
300
- logger.info(f'Successfully finished state: {state_during}')
300
+ logger.info('Successfully finished state: %s', state_during)
301
301
  if not reset_early:
302
302
  self.errors.reset(error_key)
303
303
  self.training.training_state = state_after
@@ -32,7 +32,7 @@ class TrainerNode(Node):
32
32
  self.log.info(
33
33
  f'Trainer started with an idle_timeout of {self.idle_timeout} seconds. Note that shutdown does not work if docker container has the restart policy set to always')
34
34
 
35
- if use_backdoor_controls:
35
+ if use_backdoor_controls or os.environ.get('USE_BACKDOOR_CONTROLS', '0').lower() in ('1', 'true'):
36
36
  self.include_router(backdoor_controls.router, tags=["controls"])
37
37
 
38
38
  # ----------------------------------- NODE LIVECYCLE METHODS --------------------------
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "learning_loop_node"
3
- version = "v0.10.11"
3
+ version = "v0.10.13"
4
4
  description = "Python Library for Nodes which connect to the Zauberzeug Learning Loop"
5
5
  authors = ["Zauberzeug GmbH <info@zauberzeug.com>"]
6
6
  license = "MIT"