learning-loop-node 0.10.17__py3-none-any.whl → 0.11.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of learning-loop-node might be problematic. Click here for more details.

Files changed (26) hide show
  1. learning_loop_node/data_classes/__init__.py +3 -2
  2. learning_loop_node/data_classes/detections.py +5 -12
  3. learning_loop_node/data_classes/image_metadata.py +37 -0
  4. learning_loop_node/detector/detector_logic.py +3 -3
  5. learning_loop_node/detector/detector_node.py +23 -20
  6. learning_loop_node/detector/inbox_filter/cam_observation_history.py +3 -3
  7. learning_loop_node/detector/inbox_filter/relevance_filter.py +7 -6
  8. learning_loop_node/detector/outbox.py +24 -10
  9. learning_loop_node/detector/rest/detect.py +5 -4
  10. learning_loop_node/detector/rest/upload.py +13 -5
  11. learning_loop_node/loop_communication.py +3 -3
  12. learning_loop_node/tests/detector/conftest.py +9 -9
  13. learning_loop_node/tests/detector/inbox_filter/test_relevance_group.py +7 -7
  14. learning_loop_node/tests/detector/inbox_filter/test_unexpected_observations_count.py +6 -6
  15. learning_loop_node/tests/detector/test_client_communication.py +46 -46
  16. learning_loop_node/tests/detector/test_detector_node.py +3 -1
  17. learning_loop_node/tests/detector/test_outbox.py +2 -2
  18. learning_loop_node/tests/detector/test_relevance_filter.py +2 -2
  19. learning_loop_node/tests/detector/testing_detector.py +3 -3
  20. learning_loop_node/tests/general/test_downloader.py +4 -4
  21. learning_loop_node/tests/test_helper.py +1 -2
  22. learning_loop_node/trainer/io_helpers.py +4 -4
  23. learning_loop_node/trainer/trainer_logic_generic.py +8 -4
  24. {learning_loop_node-0.10.17.dist-info → learning_loop_node-0.11.1.dist-info}/METADATA +1 -1
  25. {learning_loop_node-0.10.17.dist-info → learning_loop_node-0.11.1.dist-info}/RECORD +26 -25
  26. {learning_loop_node-0.10.17.dist-info → learning_loop_node-0.11.1.dist-info}/WHEEL +0 -0
@@ -3,14 +3,15 @@ from .detections import (BoxDetection, ClassificationDetection, Detections, Obse
3
3
  SegmentationDetection, Shape)
4
4
  from .general import (AnnotationNodeStatus, Category, CategoryType, Context, DetectionStatus, ErrorConfiguration,
5
5
  ModelInformation, NodeState, NodeStatus)
6
+ from .image_metadata import ImageMetadata
6
7
  from .socket_response import SocketResponse
7
8
  from .training import (Errors, Hyperparameter, Model, PretrainedModel, TrainerState, Training, TrainingData,
8
9
  TrainingError, TrainingOut, TrainingStateData, TrainingStatus)
9
10
 
10
11
  __all__ = [
11
12
  'AnnotationData', 'AnnotationEventType', 'SegmentationAnnotation', 'ToolOutput', 'UserInput',
12
- 'BoxDetection', 'ClassificationDetection', 'Detections', 'Observation', 'Point', 'PointDetection',
13
- 'SegmentationDetection', 'Shape',
13
+ 'BoxDetection', 'ClassificationDetection', 'ImageMetadata', 'Observation', 'Point', 'PointDetection',
14
+ 'SegmentationDetection', 'Shape', 'Detections',
14
15
  'AnnotationNodeStatus', 'Category', 'CategoryType', 'Context', 'DetectionStatus', 'ErrorConfiguration',
15
16
  'ModelInformation', 'NodeState', 'NodeStatus',
16
17
  'SocketResponse',
@@ -6,11 +6,13 @@ from typing import List, Optional, Union
6
6
 
7
7
  import numpy as np
8
8
 
9
- # pylint: disable=too-many-instance-attributes
10
-
11
9
  KWONLY_SLOTS = {'kw_only': True, 'slots': True} if sys.version_info >= (3, 10) else {}
12
10
 
13
11
 
12
+ def current_datetime():
13
+ return datetime.now().isoformat(sep='_', timespec='milliseconds')
14
+
15
+
14
16
  @dataclass(**KWONLY_SLOTS)
15
17
  class BoxDetection():
16
18
  """Coordinates according to COCO format. x,y is the top left corner of the box.
@@ -106,10 +108,6 @@ class SegmentationDetection():
106
108
  return f'shape:{str(self.shape)}, c: {self.confidence:.2f} -> {self.category_name}'
107
109
 
108
110
 
109
- def current_datetime():
110
- return datetime.now().isoformat(sep='_', timespec='milliseconds')
111
-
112
-
113
111
  @dataclass(**KWONLY_SLOTS)
114
112
  class Detections():
115
113
  box_detections: List[BoxDetection] = field(default_factory=list, metadata={
@@ -120,14 +118,9 @@ class Detections():
120
118
  'description': 'List of segmentation detections'})
121
119
  classification_detections: List[ClassificationDetection] = field(default_factory=list, metadata={
122
120
  'description': 'List of classification detections'})
123
- tags: List[str] = field(default_factory=list, metadata={
124
- 'description': 'List of tags'})
125
- date: Optional[str] = field(default_factory=current_datetime, metadata={
126
- 'description': 'Date of the detections'})
121
+
127
122
  image_id: Optional[str] = field(default=None, metadata={
128
123
  'description': 'Image uuid'})
129
- source: Optional[str] = field(default=None, metadata={
130
- 'description': 'Source of the detections'})
131
124
 
132
125
  def __len__(self):
133
126
  return len(self.box_detections) + len(self.point_detections) + len(self.segmentation_detections) + len(self.classification_detections)
@@ -0,0 +1,37 @@
1
+
2
+ import sys
3
+ from dataclasses import dataclass, field
4
+ from datetime import datetime
5
+ from typing import List, Optional
6
+
7
+ from .detections import BoxDetection, ClassificationDetection, PointDetection, SegmentationDetection
8
+
9
+ # pylint: disable=too-many-instance-attributes
10
+
11
+ KWONLY_SLOTS = {'kw_only': True, 'slots': True} if sys.version_info >= (3, 10) else {}
12
+
13
+
14
+ def current_datetime():
15
+ return datetime.now().isoformat(sep='_', timespec='milliseconds')
16
+
17
+
18
+ @dataclass(**KWONLY_SLOTS)
19
+ class ImageMetadata():
20
+ box_detections: List[BoxDetection] = field(default_factory=list, metadata={
21
+ 'description': 'List of box detections'})
22
+ point_detections: List[PointDetection] = field(default_factory=list, metadata={
23
+ 'description': 'List of point detections'})
24
+ segmentation_detections: List[SegmentationDetection] = field(default_factory=list, metadata={
25
+ 'description': 'List of segmentation detections'})
26
+ classification_detections: List[ClassificationDetection] = field(default_factory=list, metadata={
27
+ 'description': 'List of classification detections'})
28
+ tags: List[str] = field(default_factory=list, metadata={
29
+ 'description': 'List of tags'})
30
+
31
+ created: Optional[str] = field(default_factory=current_datetime, metadata={
32
+ 'description': 'Creation date of the image'})
33
+ source: Optional[str] = field(default=None, metadata={
34
+ 'description': 'Source of the image'})
35
+
36
+ def __len__(self):
37
+ return len(self.box_detections) + len(self.point_detections) + len(self.segmentation_detections) + len(self.classification_detections)
@@ -4,7 +4,7 @@ from typing import List, Optional
4
4
 
5
5
  import numpy as np
6
6
 
7
- from ..data_classes import Detections, ModelInformation
7
+ from ..data_classes import ImageMetadata, ModelInformation
8
8
  from ..globals import GLOBALS
9
9
 
10
10
 
@@ -46,13 +46,13 @@ class DetectorLogic():
46
46
  def init(self):
47
47
  """Called when a (new) model was loaded. Initialize the model. Model information available via `self.model_info`"""
48
48
 
49
- def evaluate_with_all_info(self, image: np.ndarray, tags: List[str], source: Optional[str] = None) -> Detections: # pylint: disable=unused-argument
49
+ def evaluate_with_all_info(self, image: np.ndarray, tags: List[str], source: Optional[str] = None, creation_date: Optional[str] = None) -> ImageMetadata: # pylint: disable=unused-argument
50
50
  """Called by the detector node when an image should be evaluated (REST or SocketIO).
51
51
  Tags, source come from the caller and may be used in this function.
52
52
  By default, this function simply calls `evaluate`"""
53
53
  return self.evaluate(image)
54
54
 
55
55
  @abstractmethod
56
- def evaluate(self, image: np.ndarray) -> Detections:
56
+ def evaluate(self, image: np.ndarray) -> ImageMetadata:
57
57
  """Evaluate the image and return the detections.
58
58
  The object should return empty detections if it is not initialized"""
@@ -14,7 +14,7 @@ from dacite import from_dict
14
14
  from fastapi.encoders import jsonable_encoder
15
15
  from socketio import AsyncClient
16
16
 
17
- from ..data_classes import Category, Context, Detections, DetectionStatus, ModelInformation, Shape
17
+ from ..data_classes import Category, Context, DetectionStatus, ImageMetadata, ModelInformation, Shape
18
18
  from ..data_classes.socket_response import SocketResponse
19
19
  from ..data_exchanger import DataExchanger, DownloadError
20
20
  from ..globals import GLOBALS
@@ -140,7 +140,6 @@ class DetectorNode(Node):
140
140
 
141
141
  @self.sio.event
142
142
  async def detect(sid, data: Dict) -> Dict:
143
- self.log.debug('running detect via socketio')
144
143
  try:
145
144
  np_image = np.frombuffer(data['image'], np.uint8)
146
145
  det = await self.get_detections(
@@ -153,7 +152,6 @@ class DetectorNode(Node):
153
152
  if det is None:
154
153
  return {'error': 'no model loaded'}
155
154
  detection_dict = jsonable_encoder(asdict(det))
156
- self.log.debug('detect via socketio finished')
157
155
  return detection_dict
158
156
  except Exception as e:
159
157
  self.log.exception('could not detect via socketio')
@@ -174,22 +172,26 @@ class DetectorNode(Node):
174
172
  detection_data = data.get('detections', {})
175
173
  if detection_data and self.detector_logic.is_initialized:
176
174
  try:
177
- detections = from_dict(data_class=Detections, data=detection_data)
175
+ image_metadata = from_dict(data_class=ImageMetadata, data=detection_data)
178
176
  except Exception as e:
179
177
  self.log.exception('could not parse detections')
180
178
  return {'error': str(e)}
181
- detections = self.add_category_id_to_detections(self.detector_logic.model_info, detections)
179
+ image_metadata = self.add_category_id_to_detections(self.detector_logic.model_info, image_metadata)
182
180
  else:
183
- detections = Detections()
181
+ image_metadata = ImageMetadata()
184
182
 
185
183
  tags = data.get('tags', [])
186
184
  tags.append('picked_by_system')
187
185
 
188
186
  source = data.get('source', None)
187
+ creation_date = data.get('creation_date', None)
188
+
189
+ self.log.debug('running upload via socketio. tags: %s, source: %s, creation_date: %s',
190
+ tags, source, creation_date)
189
191
 
190
192
  loop = asyncio.get_event_loop()
191
193
  try:
192
- await loop.run_in_executor(None, self.outbox.save, data['image'], detections, tags, source)
194
+ await loop.run_in_executor(None, self.outbox.save, data['image'], image_metadata, tags, source, creation_date)
193
195
  except Exception as e:
194
196
  self.log.exception('could not upload via socketio')
195
197
  return {'error': str(e)}
@@ -343,7 +345,8 @@ class DetectorNode(Node):
343
345
  camera_id: Optional[str],
344
346
  tags: List[str],
345
347
  source: Optional[str] = None,
346
- autoupload: Optional[str] = None) -> Detections:
348
+ autoupload: Optional[str] = None,
349
+ creation_date: Optional[str] = None) -> ImageMetadata:
347
350
  """ Main processing function for the detector node when an image is received via REST or SocketIO.
348
351
  This function infers the detections from the image, cares about uploading to the loop and returns the detections as a dictionary.
349
352
  Note: raw_image is a numpy array of type uint8, but not in the correct shape!
@@ -351,7 +354,7 @@ class DetectorNode(Node):
351
354
 
352
355
  await self.detection_lock.acquire()
353
356
  loop = asyncio.get_event_loop()
354
- detections = await loop.run_in_executor(None, self.detector_logic.evaluate_with_all_info, raw_image, tags, source)
357
+ detections = await loop.run_in_executor(None, self.detector_logic.evaluate_with_all_info, raw_image, tags, source, creation_date)
355
358
  self.detection_lock.release()
356
359
 
357
360
  fix_shape_detections(detections)
@@ -361,42 +364,42 @@ class DetectorNode(Node):
361
364
 
362
365
  if autoupload is None or autoupload == 'filtered': # NOTE default is filtered
363
366
  Thread(target=self.relevance_filter.may_upload_detections,
364
- args=(detections, camera_id, raw_image, tags, source)).start()
367
+ args=(detections, camera_id, raw_image, tags, source, creation_date)).start()
365
368
  elif autoupload == 'all':
366
- Thread(target=self.outbox.save, args=(raw_image, detections, tags, source)).start()
369
+ Thread(target=self.outbox.save, args=(raw_image, detections, tags, source, creation_date)).start()
367
370
  elif autoupload == 'disabled':
368
371
  pass
369
372
  else:
370
373
  self.log.error('unknown autoupload value %s', autoupload)
371
374
  return detections
372
375
 
373
- async def upload_images(self, images: List[bytes]):
376
+ async def upload_images(self, images: List[bytes], source: Optional[str], creation_date: Optional[str]):
374
377
  loop = asyncio.get_event_loop()
375
378
  for image in images:
376
- await loop.run_in_executor(None, self.outbox.save, image, Detections(), ['picked_by_system'])
379
+ await loop.run_in_executor(None, self.outbox.save, image, ImageMetadata(), ['picked_by_system'], source, creation_date)
377
380
 
378
- def add_category_id_to_detections(self, model_info: ModelInformation, detections: Detections):
381
+ def add_category_id_to_detections(self, model_info: ModelInformation, image_metadata: ImageMetadata):
379
382
  def find_category_id_by_name(categories: List[Category], category_name: str):
380
383
  category_id = [category.id for category in categories if category.name == category_name]
381
384
  return category_id[0] if category_id else ''
382
385
 
383
- for box_detection in detections.box_detections:
386
+ for box_detection in image_metadata.box_detections:
384
387
  category_name = box_detection.category_name
385
388
  category_id = find_category_id_by_name(model_info.categories, category_name)
386
389
  box_detection.category_id = category_id
387
- for point_detection in detections.point_detections:
390
+ for point_detection in image_metadata.point_detections:
388
391
  category_name = point_detection.category_name
389
392
  category_id = find_category_id_by_name(model_info.categories, category_name)
390
393
  point_detection.category_id = category_id
391
- for segmentation_detection in detections.segmentation_detections:
394
+ for segmentation_detection in image_metadata.segmentation_detections:
392
395
  category_name = segmentation_detection.category_name
393
396
  category_id = find_category_id_by_name(model_info.categories, category_name)
394
397
  segmentation_detection.category_id = category_id
395
- for classification_detection in detections.classification_detections:
398
+ for classification_detection in image_metadata.classification_detections:
396
399
  category_name = classification_detection.category_name
397
400
  category_id = find_category_id_by_name(model_info.categories, category_name)
398
401
  classification_detection.category_id = category_id
399
- return detections
402
+ return image_metadata
400
403
 
401
404
  def register_sio_events(self, sio_client: AsyncClient):
402
405
  pass
@@ -412,7 +415,7 @@ def step_into(new_dir):
412
415
  os.chdir(previous_dir)
413
416
 
414
417
 
415
- def fix_shape_detections(detections: Detections):
418
+ def fix_shape_detections(detections: ImageMetadata):
416
419
  # TODO This is a quick fix.. check how loop upload detections deals with this
417
420
  for seg_detection in detections.segmentation_detections:
418
421
  if isinstance(seg_detection.shape, Shape):
@@ -1,7 +1,7 @@
1
1
  import os
2
2
  from typing import List, Union
3
3
 
4
- from learning_loop_node.data_classes import (BoxDetection, ClassificationDetection, Detections, Observation,
4
+ from learning_loop_node.data_classes import (BoxDetection, ClassificationDetection, ImageMetadata, Observation,
5
5
  PointDetection, SegmentationDetection)
6
6
 
7
7
 
@@ -16,9 +16,9 @@ class CamObservationHistory:
16
16
  for detection in self.recent_observations
17
17
  if not detection.is_older_than(self.reset_time)]
18
18
 
19
- def get_causes_to_upload(self, detections: Detections) -> List[str]:
19
+ def get_causes_to_upload(self, image_metadata: ImageMetadata) -> List[str]:
20
20
  causes = set()
21
- for detection in detections.box_detections + detections.point_detections + detections.segmentation_detections + detections.classification_detections:
21
+ for detection in image_metadata.box_detections + image_metadata.point_detections + image_metadata.segmentation_detections + image_metadata.classification_detections:
22
22
  if isinstance(detection, SegmentationDetection):
23
23
  # self.recent_observations.append(Observation(detection))
24
24
  causes.add('segmentation_detection')
@@ -1,6 +1,6 @@
1
1
  from typing import Dict, List, Optional
2
2
 
3
- from ...data_classes.detections import Detections
3
+ from ...data_classes.image_metadata import ImageMetadata
4
4
  from ..outbox import Outbox
5
5
  from .cam_observation_history import CamObservationHistory
6
6
 
@@ -12,22 +12,23 @@ class RelevanceFilter():
12
12
  self.outbox: Outbox = outbox
13
13
 
14
14
  def may_upload_detections(self,
15
- dets: Detections,
15
+ image_metadata: ImageMetadata,
16
16
  cam_id: str,
17
17
  raw_image: bytes,
18
18
  tags: List[str],
19
- source: Optional[str] = None
19
+ source: Optional[str] = None,
20
+ creation_date: Optional[str] = None
20
21
  ) -> List[str]:
21
22
  for group in self.cam_histories.values():
22
23
  group.forget_old_detections()
23
24
 
24
25
  if cam_id not in self.cam_histories:
25
26
  self.cam_histories[cam_id] = CamObservationHistory()
26
- causes = self.cam_histories[cam_id].get_causes_to_upload(dets)
27
- if len(dets) >= 80:
27
+ causes = self.cam_histories[cam_id].get_causes_to_upload(image_metadata)
28
+ if len(image_metadata) >= 80:
28
29
  causes.append('unexpected_observations_count')
29
30
  if len(causes) > 0:
30
31
  tags = tags if tags is not None else []
31
32
  tags.extend(causes)
32
- self.outbox.save(raw_image, dets, tags, source)
33
+ self.outbox.save(raw_image, image_metadata, tags, source, creation_date)
33
34
  return causes
@@ -19,7 +19,7 @@ import PIL
19
19
  import PIL.Image # type: ignore
20
20
  from fastapi.encoders import jsonable_encoder
21
21
 
22
- from ..data_classes import Detections
22
+ from ..data_classes import ImageMetadata
23
23
  from ..globals import GLOBALS
24
24
  from ..helpers import environment_reader
25
25
 
@@ -56,17 +56,18 @@ class Outbox():
56
56
 
57
57
  def save(self,
58
58
  image: bytes,
59
- detections: Optional[Detections] = None,
59
+ image_metadata: Optional[ImageMetadata] = None,
60
60
  tags: Optional[List[str]] = None,
61
- source: Optional[str] = None
61
+ source: Optional[str] = None,
62
+ creation_date: Optional[str] = None
62
63
  ) -> None:
63
64
 
64
65
  if not self._is_valid_jpg(image):
65
66
  self.log.error('Invalid jpg image')
66
67
  return
67
68
 
68
- if detections is None:
69
- detections = Detections()
69
+ if image_metadata is None:
70
+ image_metadata = ImageMetadata()
70
71
  if not tags:
71
72
  tags = []
72
73
  identifier = datetime.now().isoformat(sep='_', timespec='microseconds')
@@ -74,13 +75,17 @@ class Outbox():
74
75
  self.log.error('Directory with identifier %s already exists', identifier)
75
76
  return
76
77
  tmp = f'{GLOBALS.data_folder}/tmp/{identifier}'
77
- detections.tags = tags
78
- detections.date = identifier
79
- detections.source = source or 'unknown'
78
+ image_metadata.tags = tags
79
+ if self._is_valid_isoformat(creation_date):
80
+ image_metadata.created = creation_date
81
+ else:
82
+ image_metadata.created = identifier
83
+
84
+ image_metadata.source = source or 'unknown'
80
85
  os.makedirs(tmp, exist_ok=True)
81
86
 
82
87
  with open(tmp + '/image.json', 'w') as f:
83
- json.dump(jsonable_encoder(asdict(detections)), f)
88
+ json.dump(jsonable_encoder(asdict(image_metadata)), f)
84
89
 
85
90
  with open(tmp + '/image.jpg', 'wb') as f:
86
91
  f.write(image)
@@ -90,6 +95,15 @@ class Outbox():
90
95
  else:
91
96
  self.log.error('Could not rename %s to %s', tmp, self.path + '/' + identifier)
92
97
 
98
+ def _is_valid_isoformat(self, date: Optional[str]) -> bool:
99
+ if date is None:
100
+ return False
101
+ try:
102
+ datetime.fromisoformat(date)
103
+ return True
104
+ except Exception:
105
+ return False
106
+
93
107
  def get_data_files(self):
94
108
  return glob(f'{self.path}/*')
95
109
 
@@ -142,7 +156,7 @@ class Outbox():
142
156
  self.log.exception('Could not upload images')
143
157
  return
144
158
  finally:
145
- self.log.info('Closing files')
159
+ self.log.debug('Closing files')
146
160
  for _, file in data:
147
161
  file.close()
148
162
 
@@ -3,9 +3,8 @@ from typing import TYPE_CHECKING, Optional
3
3
 
4
4
  import numpy as np
5
5
  from fastapi import APIRouter, File, Header, Request, UploadFile
6
- from fastapi.responses import JSONResponse
7
6
 
8
- from ...data_classes.detections import Detections
7
+ from ...data_classes.image_metadata import ImageMetadata
9
8
 
10
9
  if TYPE_CHECKING:
11
10
  from ..detector_node import DetectorNode
@@ -13,7 +12,7 @@ if TYPE_CHECKING:
13
12
  router = APIRouter()
14
13
 
15
14
 
16
- @router.post("/detect", response_model=Detections)
15
+ @router.post("/detect", response_model=ImageMetadata)
17
16
  async def http_detect(
18
17
  request: Request,
19
18
  file: UploadFile = File(..., description='The image file to run detection on'),
@@ -23,6 +22,7 @@ async def http_detect(
23
22
  source: Optional[str] = Header(None, description='The source of the image (used by learning loop)'),
24
23
  autoupload: Optional[str] = Header(None, description='Mode to decide whether to upload the image to the learning loop',
25
24
  examples=['filtered', 'all', 'disabled']),
25
+ creation_date: Optional[str] = Header(None, description='The creation date of the image (used by learning loop)')
26
26
  ):
27
27
  """
28
28
  Single image example:
@@ -46,7 +46,8 @@ async def http_detect(
46
46
  camera_id=camera_id or mac or None,
47
47
  tags=tags.split(',') if tags else [],
48
48
  source=source,
49
- autoupload=autoupload)
49
+ autoupload=autoupload,
50
+ creation_date=creation_date)
50
51
  except Exception as exc:
51
52
  logging.exception('Error during detection of image %s.', file.filename)
52
53
  raise Exception(f'Error during detection of image {file.filename}.') from exc
@@ -1,6 +1,6 @@
1
- from typing import TYPE_CHECKING, List
1
+ from typing import TYPE_CHECKING, List, Optional
2
2
 
3
- from fastapi import APIRouter, File, Request, UploadFile
3
+ from fastapi import APIRouter, File, Query, Request, UploadFile
4
4
 
5
5
  if TYPE_CHECKING:
6
6
  from ..detector_node import DetectorNode
@@ -9,13 +9,21 @@ router = APIRouter()
9
9
 
10
10
 
11
11
  @router.post("/upload")
12
- async def upload_image(request: Request, files: List[UploadFile] = File(...)):
12
+ async def upload_image(request: Request,
13
+ files: List[UploadFile] = File(...),
14
+ source: Optional[str] = Query(None, description='Source of the image'),
15
+ creation_date: Optional[str] = Query(None, description='Creation date of the image')):
13
16
  """
17
+ Upload an image or multiple images to the learning loop.
18
+
19
+ The image source and the image creation date are optional query parameters.
20
+ Images are automatically tagged with 'picked_by_system'.
21
+
14
22
  Example Usage
15
23
 
16
- curl -X POST -F 'files=@test.jpg' "http://localhost:/upload"
24
+ curl -X POST -F 'files=@test.jpg' "http://localhost:/upload?source=test&creation_date=2024-01-01T00:00:00"
17
25
  """
18
26
  raw_files = [await file.read() for file in files]
19
27
  node: DetectorNode = request.app
20
- await node.upload_images(raw_files)
28
+ await node.upload_images(raw_files, source, creation_date)
21
29
  return 200, "OK"
@@ -35,7 +35,7 @@ class LoopCommunicator():
35
35
  else:
36
36
  self.async_client = httpx.AsyncClient(base_url=self.base_url, timeout=Timeout(60.0))
37
37
 
38
- logging.info(f'Loop interface initialized with base_url: {self.base_url} / user: {self.username}')
38
+ logging.info('Loop interface initialized with base_url: %s / user: %s', self.base_url, self.username)
39
39
 
40
40
  def websocket_url(self) -> str:
41
41
  return f'ws{"s" if "learning-loop.ai" in self.host else ""}://' + self.host
@@ -48,7 +48,7 @@ class LoopCommunicator():
48
48
  self.async_client.cookies.clear()
49
49
  response = await self.async_client.post('/api/login', data={'username': self.username, 'password': self.password})
50
50
  if response.status_code != 200:
51
- logging.info(f'Login failed with response: {response}')
51
+ logging.info('Login failed with response: %s', response)
52
52
  raise LoopCommunicationException('Login failed with response: ' + str(response))
53
53
  self.async_client.cookies.update(response.cookies)
54
54
 
@@ -57,7 +57,7 @@ class LoopCommunicator():
57
57
 
58
58
  response = await self.async_client.post('/api/logout')
59
59
  if response.status_code != 200:
60
- logging.info(f'Logout failed with response: {response}')
60
+ logging.info('Logout failed with response: %s', response)
61
61
  raise LoopCommunicationException('Logout failed with response: ' + str(response))
62
62
  self.async_client.cookies.clear()
63
63
 
@@ -6,13 +6,14 @@ import shutil
6
6
  import socket
7
7
  from glob import glob
8
8
  from multiprocessing import Process, log_to_stderr
9
- from typing import AsyncGenerator
9
+ from typing import AsyncGenerator, List, Optional
10
10
 
11
+ import numpy as np
11
12
  import pytest
12
13
  import socketio
13
14
  import uvicorn
14
15
 
15
- from learning_loop_node.data_classes import BoxDetection, Detections
16
+ from learning_loop_node.data_classes import BoxDetection, ImageMetadata
16
17
  from learning_loop_node.detector.detector_logic import DetectorLogic
17
18
 
18
19
  from ...detector.detector_node import DetectorNode
@@ -100,8 +101,8 @@ async def sio_client() -> AsyncGenerator[socketio.AsyncClient, None]:
100
101
  try:
101
102
  await sio.connect(f"ws://localhost:{detector_port}", socketio_path="/ws/socket.io")
102
103
  try_connect = False
103
- except Exception as e:
104
- logging.warning(f"Connection failed with error: {str(e)}")
104
+ except Exception:
105
+ logging.exception("Connection failed with error:")
105
106
  logging.warning('trying again')
106
107
  await asyncio.sleep(5)
107
108
  retry_count += 1
@@ -122,21 +123,20 @@ def mock_detector_logic():
122
123
  class MockDetectorLogic(DetectorLogic): # pylint: disable=abstract-method
123
124
  def __init__(self):
124
125
  super().__init__('mock')
125
- self.detections = Detections(
126
+ self.image_metadata = ImageMetadata(
126
127
  box_detections=[BoxDetection(category_name="test",
127
128
  category_id="1",
128
129
  confidence=0.9,
129
130
  x=0, y=0, width=10, height=10,
130
131
  model_name="mock",
131
- )]
132
- )
132
+ )])
133
133
 
134
134
  @property
135
135
  def is_initialized(self):
136
136
  return True
137
137
 
138
- def evaluate_with_all_info(self, image, tags, source): # pylint: disable=signature-differs
139
- return self.detections
138
+ def evaluate_with_all_info(self, image: np.ndarray, tags: List[str], source: Optional[str] = None, creation_date: Optional[str] = None):
139
+ return self.image_metadata
140
140
 
141
141
  return MockDetectorLogic()
142
142
 
@@ -5,7 +5,7 @@ from typing import List
5
5
 
6
6
  from dacite import from_dict
7
7
 
8
- from ....data_classes.detections import BoxDetection, Detections, Point, PointDetection, SegmentationDetection, Shape
8
+ from ....data_classes import BoxDetection, ImageMetadata, Point, PointDetection, SegmentationDetection, Shape
9
9
  from ....detector.inbox_filter.cam_observation_history import CamObservationHistory
10
10
 
11
11
  dirt_detection = BoxDetection(category_name='dirt', x=0, y=0, width=100, height=100,
@@ -18,16 +18,16 @@ conf_too_low_detection = BoxDetection(category_name='dirt', x=0, y=0, width=100,
18
18
  height=100, category_id='xyz', model_name='test_model', confidence=.29)
19
19
 
20
20
 
21
- def det_from_boxes(box_detections: List[BoxDetection]) -> Detections:
22
- return Detections(box_detections=box_detections)
21
+ def det_from_boxes(box_detections: List[BoxDetection]) -> ImageMetadata:
22
+ return ImageMetadata(box_detections=box_detections)
23
23
 
24
24
 
25
- def det_from_points(point_detections: List[PointDetection]) -> Detections:
26
- return Detections(point_detections=point_detections)
25
+ def det_from_points(point_detections: List[PointDetection]) -> ImageMetadata:
26
+ return ImageMetadata(point_detections=point_detections)
27
27
 
28
28
 
29
- def det_from_seg(seg_detections: List[SegmentationDetection]) -> Detections:
30
- return Detections(segmentation_detections=seg_detections)
29
+ def det_from_seg(seg_detections: List[SegmentationDetection]) -> ImageMetadata:
30
+ return ImageMetadata(segmentation_detections=seg_detections)
31
31
 
32
32
 
33
33
  def test_group_confidence():
@@ -3,7 +3,7 @@ from typing import List
3
3
 
4
4
  import pytest
5
5
 
6
- from ....data_classes.detections import BoxDetection, Detections, PointDetection
6
+ from ....data_classes.image_metadata import BoxDetection, ImageMetadata, PointDetection
7
7
  from ....detector.inbox_filter.relevance_filter import RelevanceFilter
8
8
  from ....detector.outbox import Outbox
9
9
 
@@ -17,14 +17,14 @@ l_conf_point_det = PointDetection(category_name='point', x=100, y=100,
17
17
 
18
18
  @pytest.mark.parametrize(
19
19
  "detections,reason",
20
- [(Detections(box_detections=[h_conf_box_det] * 40, point_detections=[h_conf_point_det] * 40),
20
+ [(ImageMetadata(box_detections=[h_conf_box_det] * 40, point_detections=[h_conf_point_det] * 40),
21
21
  ['unexpected_observations_count']),
22
- (Detections(box_detections=[h_conf_box_det], point_detections=[h_conf_point_det]), []),
23
- (Detections(box_detections=[h_conf_box_det] * 40, point_detections=[l_conf_point_det] * 40),
22
+ (ImageMetadata(box_detections=[h_conf_box_det], point_detections=[h_conf_point_det]), []),
23
+ (ImageMetadata(box_detections=[h_conf_box_det] * 40, point_detections=[l_conf_point_det] * 40),
24
24
  ['uncertain', 'unexpected_observations_count']),
25
- (Detections(box_detections=[h_conf_box_det], point_detections=[l_conf_point_det]),
25
+ (ImageMetadata(box_detections=[h_conf_box_det], point_detections=[l_conf_point_det]),
26
26
  ['uncertain'])])
27
- def test_unexpected_observations_count(detections: Detections, reason: List[str]):
27
+ def test_unexpected_observations_count(detections: ImageMetadata, reason: List[str]):
28
28
  os.environ['LOOP_ORGANIZATION'] = 'zauberzeug'
29
29
  os.environ['LOOP_PROJECT'] = 'demo'
30
30
  outbox = Outbox()
@@ -3,7 +3,7 @@ import json
3
3
  import os
4
4
 
5
5
  import pytest
6
- import requests
6
+ import requests # type: ignore
7
7
 
8
8
  from ...data_classes import ModelInformation
9
9
  from ...detector.detector_node import DetectorNode
@@ -93,7 +93,7 @@ async def test_sio_upload(test_detector_node: DetectorNode, sio_client):
93
93
 
94
94
  # NOTE: This test seems to be flaky.
95
95
  async def test_about_endpoint(test_detector_node: DetectorNode):
96
- await asyncio.sleep(11)
96
+ await asyncio.sleep(16)
97
97
  response = requests.get(f'http://localhost:{GLOBALS.detector_port}/about', timeout=30)
98
98
 
99
99
  assert response.status_code == 200, response.content
@@ -108,60 +108,60 @@ async def test_about_endpoint(test_detector_node: DetectorNode):
108
108
 
109
109
 
110
110
  async def test_model_version_api(test_detector_node: DetectorNode):
111
- await asyncio.sleep(11)
112
-
113
- response = requests.get(f'http://localhost:{GLOBALS.detector_port}/model_version', timeout=30)
114
- assert response.status_code == 200, response.content
115
- response_dict = json.loads(response.content)
116
- assert response_dict['version_control'] == 'follow_loop'
117
- assert response_dict['current_version'] == '1.1'
118
- assert response_dict['target_version'] == '1.1'
119
- assert response_dict['loop_version'] == '1.1'
120
- assert response_dict['local_versions'] == ['1.1']
111
+ async def await_correct_response(target_values: dict) -> None:
112
+ response_dict = {}
113
+ for _ in range(20):
114
+ await asyncio.sleep(1)
115
+ response = requests.get(f'http://localhost:{GLOBALS.detector_port}/model_version', timeout=30)
116
+ if not response.status_code == 200:
117
+ continue
118
+ response_dict = json.loads(response.content)
119
+ for key, target_value in target_values.items():
120
+ if key == 'local_versions':
121
+ target_value = set(target_value)
122
+
123
+ response_value = response_dict.get(key, None)
124
+ if response_value != target_value:
125
+ break
126
+ return
127
+ raise Exception(f'Did not receive correct response: {response_dict} != {target_values}')
128
+
129
+ await await_correct_response({'version_control': 'follow_loop',
130
+ 'current_version': '1.1',
131
+ 'target_version': '1.1',
132
+ 'loop_version': '1.1',
133
+ 'local_versions': ['1.1']})
121
134
 
122
135
  response = requests.put(f'http://localhost:{GLOBALS.detector_port}/model_version', data='1.0', timeout=30)
123
136
  assert response.status_code == 200, response.content
124
- response = requests.get(f'http://localhost:{GLOBALS.detector_port}/model_version', timeout=30)
125
- assert response.status_code == 200, response.content
126
- response_dict = json.loads(response.content)
127
- assert response_dict['version_control'] == 'specific_version'
128
- assert response_dict['current_version'] == '1.1'
129
- assert response_dict['target_version'] == '1.0'
130
- assert response_dict['loop_version'] == '1.1'
131
- assert response_dict['local_versions'] == ['1.1']
132
137
 
133
- await asyncio.sleep(11)
134
- response = requests.get(f'http://localhost:{GLOBALS.detector_port}/model_version', timeout=30)
135
- assert response.status_code == 200, response.content
136
- response_dict = json.loads(response.content)
137
- assert response_dict['version_control'] == 'specific_version'
138
- assert response_dict['current_version'] == '1.0'
139
- assert response_dict['target_version'] == '1.0'
140
- assert response_dict['loop_version'] == '1.1'
141
- assert set(response_dict['local_versions']) == set(['1.1', '1.0'])
138
+ await await_correct_response({'version_control': 'specific_version',
139
+ 'current_version': '1.1',
140
+ 'target_version': '1.0',
141
+ 'loop_version': '1.1'})
142
+
143
+ await await_correct_response({'version_control': 'specific_version',
144
+ 'current_version': '1.0',
145
+ 'target_version': '1.0',
146
+ 'loop_version': '1.1',
147
+ 'local_versions': ['1.1', '1.0']})
142
148
 
143
149
  response = requests.put(f'http://localhost:{GLOBALS.detector_port}/model_version', data='pause', timeout=30)
144
150
  assert response.status_code == 200, response.content
145
- await asyncio.sleep(11)
146
- response = requests.get(f'http://localhost:{GLOBALS.detector_port}/model_version', timeout=30)
147
- assert response.status_code == 200, response.content
148
- response_dict = json.loads(response.content)
149
- assert response_dict['version_control'] == 'pause'
150
- assert response_dict['current_version'] == '1.0'
151
- assert response_dict['target_version'] == '1.0'
152
- assert response_dict['loop_version'] == '1.1'
153
- assert set(response_dict['local_versions']) == set(['1.1', '1.0'])
151
+
152
+ await await_correct_response({'version_control': 'pause',
153
+ 'current_version': '1.0',
154
+ 'target_version': '1.0',
155
+ 'loop_version': '1.1',
156
+ 'local_versions': ['1.1', '1.0']})
154
157
 
155
158
  response = requests.put(f'http://localhost:{GLOBALS.detector_port}/model_version', data='follow_loop', timeout=30)
156
159
  await asyncio.sleep(11)
157
- response = requests.get(f'http://localhost:{GLOBALS.detector_port}/model_version', timeout=30)
158
- assert response.status_code == 200, response.content
159
- response_dict = json.loads(response.content)
160
- assert response_dict['version_control'] == 'follow_loop'
161
- assert response_dict['current_version'] == '1.1'
162
- assert response_dict['target_version'] == '1.1'
163
- assert response_dict['loop_version'] == '1.1'
164
- assert set(response_dict['local_versions']) == set(['1.1', '1.0'])
160
+ await await_correct_response({'version_control': 'follow_loop',
161
+ 'current_version': '1.1',
162
+ 'target_version': '1.1',
163
+ 'loop_version': '1.1',
164
+ 'local_versions': ['1.1', '1.0']})
165
165
 
166
166
 
167
167
  async def test_rest_outbox_mode(test_detector_node: DetectorNode):
@@ -38,9 +38,10 @@ async def test_get_detections(detector_node: DetectorNode, monkeypatch):
38
38
 
39
39
  expected_save_args = {
40
40
  'image': raw_image,
41
- 'detections': detector_node.detector_logic.detections, # type: ignore
41
+ 'detections': detector_node.detector_logic.image_metadata, # type: ignore
42
42
  'tags': ['test_tag'],
43
43
  'source': 'test_source',
44
+ 'creation_date': '2024-01-01T00:00:00',
44
45
  }
45
46
 
46
47
  for autoupload, expect_filtered, expect_all in test_cases:
@@ -52,6 +53,7 @@ async def test_get_detections(detector_node: DetectorNode, monkeypatch):
52
53
  camera_id="test_camera",
53
54
  tags=["test_tag"],
54
55
  source="test_source",
56
+ creation_date="2024-01-01T00:00:00",
55
57
  autoupload=autoupload
56
58
  )
57
59
 
@@ -6,7 +6,7 @@ import shutil
6
6
  import pytest
7
7
  from PIL import Image
8
8
 
9
- from ...data_classes import Detections
9
+ from ...data_classes import ImageMetadata
10
10
  from ...detector.detector_node import DetectorNode
11
11
  from ...detector.outbox import Outbox
12
12
  from ...globals import GLOBALS
@@ -28,7 +28,7 @@ async def test_outbox():
28
28
 
29
29
  @pytest.mark.asyncio
30
30
  async def test_files_are_automatically_uploaded_by_node(test_detector_node: DetectorNode):
31
- test_detector_node.outbox.save(get_test_image_binary(), Detections())
31
+ test_detector_node.outbox.save(get_test_image_binary(), ImageMetadata())
32
32
  assert await wait_for_outbox_count(test_detector_node.outbox, 1)
33
33
  assert await wait_for_outbox_count(test_detector_node.outbox, 0)
34
34
 
@@ -4,7 +4,7 @@ import os
4
4
  import numpy as np
5
5
  import pytest
6
6
 
7
- from ...data_classes import BoxDetection, Detections, PointDetection
7
+ from ...data_classes import BoxDetection, ImageMetadata, PointDetection
8
8
  from ...detector.detector_node import DetectorNode
9
9
  from .conftest import get_outbox_files
10
10
  from .testing_detector import TestingDetectorLogic
@@ -19,7 +19,7 @@ async def test_filter_is_used_by_node(test_detector_node: DetectorNode, autouplo
19
19
  Note thatt we have to mock the dummy detections to only return a point and a box detection."""
20
20
 
21
21
  assert isinstance(test_detector_node.detector_logic, TestingDetectorLogic)
22
- test_detector_node.detector_logic.det_to_return = Detections(
22
+ test_detector_node.detector_logic.det_to_return = ImageMetadata(
23
23
  box_detections=[
24
24
  BoxDetection(category_name='some_category_name', x=1, y=2, height=3, width=4,
25
25
  model_name='some_model', confidence=.42, category_id='some_id')],
@@ -2,7 +2,7 @@ import logging
2
2
 
3
3
  import numpy as np
4
4
 
5
- from ...data_classes import Detections
5
+ from ...data_classes import ImageMetadata
6
6
  from ...detector.detector_logic import DetectorLogic
7
7
  from ..test_helper import get_dummy_detections
8
8
 
@@ -12,11 +12,11 @@ class TestingDetectorLogic(DetectorLogic):
12
12
 
13
13
  def __init__(self) -> None:
14
14
  super().__init__('mocked')
15
- self.det_to_return: Detections = get_dummy_detections()
15
+ self.det_to_return: ImageMetadata = get_dummy_detections()
16
16
 
17
17
  def init(self) -> None:
18
18
  pass
19
19
 
20
- def evaluate(self, image: np.ndarray) -> Detections:
20
+ def evaluate(self, image: np.ndarray) -> ImageMetadata:
21
21
  logging.info('evaluating')
22
22
  return self.det_to_return
@@ -4,7 +4,7 @@ import shutil
4
4
  from ...data_classes import Context
5
5
  from ...data_exchanger import DataExchanger
6
6
  from ...globals import GLOBALS
7
- from ...helpers.misc import delete_corrupt_images
7
+ from ...helpers.misc import create_image_folder, create_project_folder, create_training_folder, delete_corrupt_images
8
8
  from .. import test_helper
9
9
 
10
10
  # Used by all Nodes
@@ -77,8 +77,8 @@ async def test_removal_of_corrupted_images(data_exchanger: DataExchanger):
77
77
 
78
78
 
79
79
  def create_needed_folders(training_uuid: str = 'some_uuid'): # pylint: disable=unused-argument
80
- project_folder = test_helper.create_project_folder(
80
+ project_folder = create_project_folder(
81
81
  Context(organization='zauberzeug', project='pytest_nodelib_general'))
82
- image_folder = test_helper.create_image_folder(project_folder)
83
- training_folder = test_helper.create_training_folder(project_folder, training_uuid)
82
+ image_folder = create_image_folder(project_folder)
83
+ training_folder = create_training_folder(project_folder, training_uuid)
84
84
  return project_folder, image_folder, training_folder
@@ -6,9 +6,8 @@ import zipfile
6
6
  from glob import glob
7
7
  from typing import Callable
8
8
 
9
- from ..data_classes import (BoxDetection, ClassificationDetection, Context, Detections, Point, PointDetection,
9
+ from ..data_classes import (BoxDetection, ClassificationDetection, Detections, Point, PointDetection,
10
10
  SegmentationDetection, Shape)
11
- from ..helpers.misc import create_image_folder, create_project_folder, create_training_folder
12
11
  from ..loop_communication import LoopCommunicator
13
12
 
14
13
 
@@ -142,14 +142,14 @@ class ActiveTrainingIO:
142
142
 
143
143
  async def upload_detetions(self):
144
144
  num_files = self.get_number_of_detection_files()
145
- print(f'num_files: {num_files}', flush=True)
145
+ logging.info('Going to upload %s detections', num_files)
146
146
  if not num_files:
147
147
  logging.error('no detection files found')
148
148
  return
149
149
  current_json_file_index = self.load_detections_upload_file_index()
150
150
  for i in range(current_json_file_index, num_files):
151
151
  detections = self.load_detections(i)
152
- logging.info(f'uploading detections in file {i}/{num_files}')
152
+ logging.debug('uploading detections in file %s/%s', i, num_files)
153
153
  await self._upload_detections_batched(self.context, detections)
154
154
  self.save_detections_upload_file_index(i+1)
155
155
 
@@ -166,10 +166,10 @@ class ActiveTrainingIO:
166
166
 
167
167
  async def _upload_detections_and_save_progress(self, context: Context, batch_detections: List[Detections], up_progress: int):
168
168
  if len(batch_detections) == 0:
169
- print('skipping empty batch', flush=True)
169
+ logging.debug('skipping empty batch')
170
170
  return
171
171
  detections_json = [jsonable_encoder(asdict(detections)) for detections in batch_detections]
172
- print(f'uploading {len(detections_json)} detections', flush=True)
172
+ logging.info('uploading %s detections', len(detections_json))
173
173
  response = await self.loop_communicator.post(
174
174
  f'/{context.organization}/projects/{context.project}/detections', json=detections_json)
175
175
  if response.status_code != 200:
@@ -328,11 +328,11 @@ class TrainerLogicGeneric(ABC):
328
328
 
329
329
  # TODO this checks if we continue a training -> make more explicit
330
330
  if not base_model_uuid or not is_valid_uuid4(base_model_uuid):
331
- logger.info(f'skipping model download. No base model provided (in form of uuid): {base_model_uuid}')
331
+ logger.info('skipping model download. No base model provided (in form of uuid): %s', base_model_uuid)
332
332
  return
333
333
 
334
334
  logger.info('loading model from Learning Loop')
335
- logger.info(f'downloading model {base_model_uuid} as {self.model_format}')
335
+ logger.info('downloading model %s as %s', base_model_uuid, self.model_format)
336
336
  await self.node.data_exchanger.download_model(self.training.training_folder, self.training.context, base_model_uuid, self.model_format)
337
337
  shutil.move(f'{self.training.training_folder}/model.json',
338
338
  f'{self.training.training_folder}/base_model.json')
@@ -355,7 +355,7 @@ class TrainerLogicGeneric(ABC):
355
355
  result = await self.node.sio_client.call('update_training', (
356
356
  self.training.context.organization, self.training.context.project, jsonable_encoder(new_training)))
357
357
  if isinstance(result, dict) and result['success']:
358
- logger.info(f'successfully updated training {asdict(new_training)}')
358
+ logger.info('successfully updated training %s', asdict(new_training))
359
359
  self._on_metrics_published(new_best_model)
360
360
  else:
361
361
  raise Exception(f'Error for update_training: Response from loop was : {result}')
@@ -369,7 +369,7 @@ class TrainerLogicGeneric(ABC):
369
369
  """Uploads the latest model to the Learning Loop.
370
370
  """
371
371
  new_model_uuid = await self._upload_model_return_new_model_uuid(self.training.context)
372
- logger.info(f'Successfully uploaded model and received new model id: {new_model_uuid}')
372
+ logger.info('Successfully uploaded model and received new model id: %s', new_model_uuid)
373
373
  self.training.model_uuid_for_detecting = new_model_uuid
374
374
 
375
375
  async def _upload_model_return_new_model_uuid(self, context: Context) -> str:
@@ -380,6 +380,7 @@ class TrainerLogicGeneric(ABC):
380
380
 
381
381
  :return: The new model UUID.
382
382
  :raise CriticalError: If the latest model files cannot be obtained.
383
+ :raise Exception: If the response for the model upload attempt is invalid.
383
384
  """
384
385
 
385
386
  files = await self._get_latest_model_files()
@@ -402,6 +403,9 @@ class TrainerLogicGeneric(ABC):
402
403
  already_uploaded_formats.append(file_format)
403
404
  self.active_training_io.save_model_upload_progress(already_uploaded_formats)
404
405
 
406
+ if not model_uuid:
407
+ raise Exception('Invalid response for model upload attempt')
408
+
405
409
  return model_uuid
406
410
 
407
411
  def _dump_categories_to_json(self) -> str:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: learning-loop-node
3
- Version: 0.10.17
3
+ Version: 0.11.1
4
4
  Summary: Python Library for Nodes which connect to the Zauberzeug Learning Loop
5
5
  Home-page: https://github.com/zauberzeug/learning_loop_node
6
6
  License: MIT
@@ -2,28 +2,29 @@ learning_loop_node/__init__.py,sha256=onN5s8-x_xBsCM6NLmJO0Ym1sJHeCFaGw8qb0oQZmz
2
2
  learning_loop_node/annotation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  learning_loop_node/annotation/annotator_logic.py,sha256=BTaopkJZkIf1CI5lfsVKsxbxoUIbDJrevavuQUT5e_c,1000
4
4
  learning_loop_node/annotation/annotator_node.py,sha256=UrJ8MpZ44UhsjmVuSHr2BhHyLC-kIMDi3IuBBMKzN1g,4117
5
- learning_loop_node/data_classes/__init__.py,sha256=wCX88lDgbb8V-gtVCVe9i-NvvZuMe5FX7eD_UJgYYXw,1305
5
+ learning_loop_node/data_classes/__init__.py,sha256=JaEwaBHuDOs0DUkeGT8zLtARD5mvkImY7ZKB9ZuNuRc,1364
6
6
  learning_loop_node/data_classes/annotations.py,sha256=iInU0Nuy_oYT_sj4k_n-W0UShCBI2cHQYrt8imymbtM,1211
7
- learning_loop_node/data_classes/detections.py,sha256=hifsGz2LbmeKLZdHxG7cnlOYNEqDmtJd2gxhyU-Xjjs,5811
7
+ learning_loop_node/data_classes/detections.py,sha256=7vqcS0EK8cmDjRDckHlpSZDZ9YO6qajRmYvx-oxatFc,5425
8
8
  learning_loop_node/data_classes/general.py,sha256=usXokcTOVqTuaKJtBf0ffFWfzZhMrQtF7puKfwi6A5k,6195
9
+ learning_loop_node/data_classes/image_metadata.py,sha256=56nNSf_7aMlvKsJOG8vKCzJHcqKGHVRoULp85pJ2imA,1598
9
10
  learning_loop_node/data_classes/socket_response.py,sha256=tIdt-oYf6ULoJIDYQCecNM9OtWR6_wJ9tL0Ksu83Vko,655
10
11
  learning_loop_node/data_classes/training.py,sha256=hnMHZMk-WNRERyo7U97qL09v1tIdhnzPfTH-JgifLwU,6164
11
12
  learning_loop_node/data_exchanger.py,sha256=6wK9hSGjpCxIx3VklEfPoAl3UyEZy5DfKP4sj97kf_w,9116
12
13
  learning_loop_node/detector/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
- learning_loop_node/detector/detector_logic.py,sha256=IG1s9RF_cCBcNQ8WW1rAS37QKdGzlVoVkuO_CrLGvYs,2084
14
- learning_loop_node/detector/detector_node.py,sha256=TeChzkpVVmEiZbnfWmtkjuUXNwCZvIWfingmcuAz2cs,19567
14
+ learning_loop_node/detector/detector_logic.py,sha256=fAaeLykvkuOeaQx-scuN1pkydK8cPdmNT75P8xqImY0,2130
15
+ learning_loop_node/detector/detector_node.py,sha256=ryzPcv5wfNjA_Sk5YDcUkZoKEUGPT1s29rvFLGGPIZ8,19929
15
16
  learning_loop_node/detector/inbox_filter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
- learning_loop_node/detector/inbox_filter/cam_observation_history.py,sha256=TD346I9ymtIP0_CJXCIKMRuiXbfVVanXNu_iHAwDd7Q,3318
17
- learning_loop_node/detector/inbox_filter/relevance_filter.py,sha256=7_-x8D8Zf6KJeJXmiC2VrRHU8Ig_R98uhdXVwwX0N4M,1240
18
- learning_loop_node/detector/outbox.py,sha256=u0pi2p_Fnm1f83FxiG5J8DT9YVa9TuT0Nxhbz171aco,8186
17
+ learning_loop_node/detector/inbox_filter/cam_observation_history.py,sha256=8gzxYPD3t1OS9wBHXfIvNV2xTTMo0B70O1b50iaH2D8,3344
18
+ learning_loop_node/detector/inbox_filter/relevance_filter.py,sha256=NPEmrAtuGjIWCtHS0B3zDmnYWkhVFCLbd_7RUp08_AM,1372
19
+ learning_loop_node/detector/outbox.py,sha256=AIoQFCX3CA4jcQWKcmCkL9su1SWMDci7p-Xip0kNbTE,8643
19
20
  learning_loop_node/detector/rest/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
21
  learning_loop_node/detector/rest/about.py,sha256=COYgmYO1tXGSIwjF__P79mVZUfSDZoHsW0GUarQ2rv0,1686
21
22
  learning_loop_node/detector/rest/backdoor_controls.py,sha256=ZNaFOvC0OLWNtcLiG-NIqS_y1kkLP4csgk3CHhp8Gis,885
22
- learning_loop_node/detector/rest/detect.py,sha256=KSYUOuTxuc2q3RgV37cBBuCvCJf11BFL7QtnPAM4XbU,2343
23
+ learning_loop_node/detector/rest/detect.py,sha256=ofJ3ysTarbCpiH1YAD6gSJbrDOzAcsLRuGxhr57dtk0,2503
23
24
  learning_loop_node/detector/rest/model_version_control.py,sha256=jLp3rvCYq8T_QC3KK7uLDYpbDjydwazWkQCUXvkxl-c,4654
24
25
  learning_loop_node/detector/rest/operation_mode.py,sha256=RAzVLtGzy4n9-LSIq_XSwMfXDehU4XmorgWAWbQ6BW8,1804
25
26
  learning_loop_node/detector/rest/outbox_mode.py,sha256=H8coDNbgLGEfXmKQrhtXWeUHBAHpnrdZktuHXQz0xis,1148
26
- learning_loop_node/detector/rest/upload.py,sha256=IPzxJPayD7_Gx5uYC1lVJwWxdnQgM8MYGa5NugXVosY,544
27
+ learning_loop_node/detector/rest/upload.py,sha256=5YWY0Ku4duZqKd6tjyJzq-Ga83o2UYb1VmzuxBIgo0w,1061
27
28
  learning_loop_node/examples/novelty_score_updater.py,sha256=1DRgM9lxjFV-q2JvGDDsNLz_ic_rhEZ9wc6ZdjcxwPE,2038
28
29
  learning_loop_node/globals.py,sha256=tgw_8RYOipPV9aYlyUhYtXfUxvJKRvfUk6u-qVAtZmY,174
29
30
  learning_loop_node/helpers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -31,7 +32,7 @@ learning_loop_node/helpers/environment_reader.py,sha256=OtCTDc0KT9r-SMygkZB_Mw-Z
31
32
  learning_loop_node/helpers/gdrive_downloader.py,sha256=zeYJciTAJVRpu_eFjwgYLCpIa6hU1d71anqEBb564Rk,1145
32
33
  learning_loop_node/helpers/log_conf.py,sha256=z_0PHh7U7DkJbSbKoSPyUfS7NhBHtRxXHdNcj67Hpbc,951
33
34
  learning_loop_node/helpers/misc.py,sha256=j4is8Rv0ttnCqF-R-wP3xwEi67OI6IBJav5Woo5lyDk,7701
34
- learning_loop_node/loop_communication.py,sha256=xkoZtHRgxq1arusHQtC_lEBculFBLeCijyfVSYIEchY,6755
35
+ learning_loop_node/loop_communication.py,sha256=Pdc9jdYFmGh12CAHMYX1sF1ARAXEAhGO4-sbC4jnrIo,6760
35
36
  learning_loop_node/node.py,sha256=vbMR_6QsruB2IYYKUWx4--9Ywjf_vuBQb4jyzLRqpRQ,10300
36
37
  learning_loop_node/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
38
  learning_loop_node/rest.py,sha256=o1dl4Mtznd5duyEQtCYSGlK04l1Y-p_YRjG40Q4l31c,1491
@@ -41,18 +42,18 @@ learning_loop_node/tests/annotator/conftest.py,sha256=G4ZvdZUdvPp9bYCzg3eEVkGCeX
41
42
  learning_loop_node/tests/annotator/pytest.ini,sha256=8QdjmawLy1zAzXrJ88or1kpFDhJw0W5UOnDfGGs_igU,262
42
43
  learning_loop_node/tests/annotator/test_annotator_node.py,sha256=TPNPPrQAxQ_zEecQcH7hlczgD3ABtTCNtUvWD1_oApk,1985
43
44
  learning_loop_node/tests/detector/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
44
- learning_loop_node/tests/detector/conftest.py,sha256=Q14KHTSuSCsASVIxY9CttdVJm5FC7_JH-W5Q4CdDqoM,5414
45
+ learning_loop_node/tests/detector/conftest.py,sha256=9wMPcj2QvKevyXAU853dGFjcoXdJHOGK8uxxgjSLX3U,5482
45
46
  learning_loop_node/tests/detector/inbox_filter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
46
47
  learning_loop_node/tests/detector/inbox_filter/test_observation.py,sha256=k4WYdvnuV7d_r7zI4M2aA8WuBjm0aycQ0vj1rGE2q4w,1370
47
- learning_loop_node/tests/detector/inbox_filter/test_relevance_group.py,sha256=XjiMsS0LgvM0OkPf5-s2rjFbG7C42LTmz_rDVMGHKoY,7603
48
- learning_loop_node/tests/detector/inbox_filter/test_unexpected_observations_count.py,sha256=MWC7PbaCy14jjRw0_oilkXj6gymAsUZXHJdzNW5m2D4,1639
48
+ learning_loop_node/tests/detector/inbox_filter/test_relevance_group.py,sha256=r-wABFQVsTNTjv7vYGr8wbHfOWy43F_B14ZDWHfiZ-A,7613
49
+ learning_loop_node/tests/detector/inbox_filter/test_unexpected_observations_count.py,sha256=3KKwf-J9oJRMIuuVju2vT9IM9vWhKvswPiXJI8KxmcU,1661
49
50
  learning_loop_node/tests/detector/pytest.ini,sha256=8QdjmawLy1zAzXrJ88or1kpFDhJw0W5UOnDfGGs_igU,262
50
51
  learning_loop_node/tests/detector/test.jpg,sha256=msA-vHPmvPiro_D102Qmn1fn4vNfooqYYEXPxZUmYpk,161390
51
- learning_loop_node/tests/detector/test_client_communication.py,sha256=NAOUrHWxoI4yG6oy3BGxWWXX794IOODEj9QBKF3CyrY,9375
52
- learning_loop_node/tests/detector/test_detector_node.py,sha256=KX2RcFpdIbpPEmcyYM0YMs-6wwTpbOOZONoiwIWryUI,2922
53
- learning_loop_node/tests/detector/test_outbox.py,sha256=5RMKQfuu1-rvpVCpEtt_D70bYgma-sIrTHWxHdTdU9Y,3001
54
- learning_loop_node/tests/detector/test_relevance_filter.py,sha256=3VLhHKaxPzLYmiNZagvgg9ZHkPhWk4_-qpmkJw36wBU,2046
55
- learning_loop_node/tests/detector/testing_detector.py,sha256=FeQroV85IvsT8dmalQBqf1FLNt_buCtZK3-lgtmbrBI,542
52
+ learning_loop_node/tests/detector/test_client_communication.py,sha256=RUsdmZRQE1YO587JPHB2c2-bTE_tmThQodWuhjzBPEk,9180
53
+ learning_loop_node/tests/detector/test_detector_node.py,sha256=0ZMV6coAvdq-nH8CwY9_LR2tUcH9VLcAB1CWuwHQMpo,3023
54
+ learning_loop_node/tests/detector/test_outbox.py,sha256=IfCz4iBmYA4bm3TK4q2NmWyzQCwZWhUbBrKQNHGxZM4,3007
55
+ learning_loop_node/tests/detector/test_relevance_filter.py,sha256=ZKcCstFWCDxJzKdVlAe8E6sZzv5NiH8mADhaZjokHoU,2052
56
+ learning_loop_node/tests/detector/testing_detector.py,sha256=MZajybyzISz2G1OENfLHgZhBcLCYzTR4iN9JkWpq5-s,551
56
57
  learning_loop_node/tests/general/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
57
58
  learning_loop_node/tests/general/conftest.py,sha256=oVuE-XZfUPjOXE8KLJgDbIMKIF9Mmgfna2rlupC44TE,2298
58
59
  learning_loop_node/tests/general/pytest.ini,sha256=8QdjmawLy1zAzXrJ88or1kpFDhJw0W5UOnDfGGs_igU,262
@@ -60,9 +61,9 @@ learning_loop_node/tests/general/test_data/file_1.txt,sha256=Lis06nfvbFPVCBZyEgQ
60
61
  learning_loop_node/tests/general/test_data/file_2.txt,sha256=Xp8EETGhZBdVAgb4URowSSpOytwwwJdV0Renkdur7R8,19
61
62
  learning_loop_node/tests/general/test_data/model.json,sha256=_xNDucGOWila8gWnu8yFfrqmQ45Xq-_39eLKzjRtvpE,516
62
63
  learning_loop_node/tests/general/test_data_classes.py,sha256=u5GoXNk2yqCp1EVm9YoBnYreL2SCjgJ0a3x01JQDOuM,701
63
- learning_loop_node/tests/general/test_downloader.py,sha256=C6b_wG3TfQX53lmuanpH1yaQAdATFGXOmQ1nzXWqNss,3315
64
+ learning_loop_node/tests/general/test_downloader.py,sha256=y4GcUyR0OAfrwltd6eyQgopwTt3DwjzX0Sr8yrooLec,3347
64
65
  learning_loop_node/tests/general/test_learning_loop_node.py,sha256=SZd-VChpWnnsPN46pr4E_LL3ZevYx6psU-AWdVeOFpQ,770
65
- learning_loop_node/tests/test_helper.py,sha256=nTynYtuUaK2hKh87pk7t7AIJaOiD3wJ5d6nCPqnwRMk,3012
66
+ learning_loop_node/tests/test_helper.py,sha256=Xajn6BWJqeD36YAETwdcJd6awY2NPmaOis3gWgFc97k,2909
66
67
  learning_loop_node/tests/trainer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
67
68
  learning_loop_node/tests/trainer/conftest.py,sha256=E3SQL_CGFJ_sNjEfVJbxbvH0g6hjI5753ndAFUbnkQk,3366
68
69
  learning_loop_node/tests/trainer/pytest.ini,sha256=8QdjmawLy1zAzXrJ88or1kpFDhJw0W5UOnDfGGs_igU,262
@@ -83,13 +84,13 @@ learning_loop_node/trainer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJ
83
84
  learning_loop_node/trainer/downloader.py,sha256=Qk-oBcrGCVuWTVs3hvAJzQSqCIHPGZ7NXLJ_fAqvCoY,1469
84
85
  learning_loop_node/trainer/exceptions.py,sha256=vbuoE6kssLQuA8zd3LiDHmZglP6E2IJJwEi5AZtWXxY,420
85
86
  learning_loop_node/trainer/executor.py,sha256=-0BxDqmAI1NCiISi7Rw8McJQfgxxVy1gSa1epYuL3U0,3942
86
- learning_loop_node/trainer/io_helpers.py,sha256=hGEtNAQBSBbVB56U1ndwfP8qK5K4YIwMQrjCDcaMy9I,7218
87
+ learning_loop_node/trainer/io_helpers.py,sha256=ZnAPVqhq8XCHe1NoiOQJ_w0B-estcc8CBQHnb423UDw,7226
87
88
  learning_loop_node/trainer/rest/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
88
89
  learning_loop_node/trainer/rest/backdoor_controls.py,sha256=-pU4iHheBWf0SW2QzBVBsLiCMZBRz9CDdVZv6414Ts8,5134
89
90
  learning_loop_node/trainer/test_executor.py,sha256=6BVGDN_6f5GEMMEvDLSG1yzMybSvgXaP5uYpSfsVPP0,2224
90
91
  learning_loop_node/trainer/trainer_logic.py,sha256=PlYExIskU9pWJO0e9m_0KJnUdOI10GtW0oDOevYmg1o,8461
91
- learning_loop_node/trainer/trainer_logic_generic.py,sha256=ERfuGhHGNvIPRyd_QOGavylPDXTCC8qCOO1eJXAwEO8,25957
92
+ learning_loop_node/trainer/trainer_logic_generic.py,sha256=zXoi1wWkRy6SGp2sd9xkD2DGd7hiCHxa4NE0RiC71v4,26147
92
93
  learning_loop_node/trainer/trainer_node.py,sha256=8ANS9iy-swdTLvt9wEFixE6YlmqvqBl17A-R4tVYD-I,5384
93
- learning_loop_node-0.10.17.dist-info/METADATA,sha256=rLgBSvCfSm3TWkJGJL0iyPTqt3qNHvxcZWzaoyqon38,11907
94
- learning_loop_node-0.10.17.dist-info/WHEEL,sha256=WGfLGfLX43Ei_YORXSnT54hxFygu34kMpcQdmgmEwCQ,88
95
- learning_loop_node-0.10.17.dist-info/RECORD,,
94
+ learning_loop_node-0.11.1.dist-info/METADATA,sha256=AyF22d7GjFT2yktztap-HALd6f8jwDSqJYzy9wH5PHc,11906
95
+ learning_loop_node-0.11.1.dist-info/WHEEL,sha256=WGfLGfLX43Ei_YORXSnT54hxFygu34kMpcQdmgmEwCQ,88
96
+ learning_loop_node-0.11.1.dist-info/RECORD,,