learning-loop-node 0.16.1__py3-none-any.whl → 0.17.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of learning-loop-node might be problematic. Click here for more details.

@@ -1,4 +1,9 @@
1
- from .annotations import AnnotationData, SegmentationAnnotation, ToolOutput, UserInput
1
+ from .annotation_data import (
2
+ AnnotationData,
3
+ SegmentationAnnotation,
4
+ ToolOutput,
5
+ UserInput,
6
+ )
2
7
  from .detections import (
3
8
  BoxDetection,
4
9
  ClassificationDetection,
@@ -0,0 +1,43 @@
1
+ import sys
2
+ from dataclasses import dataclass
3
+ from typing import Optional, Union
4
+
5
+ from ..enums import AnnotationEventType
6
+ from .detections import Point, Shape
7
+ from .general import Category, Context
8
+
9
+ KWONLY_SLOTS = {'kw_only': True, 'slots': True} if sys.version_info >= (3, 10) else {}
10
+
11
+
12
+ @dataclass(**KWONLY_SLOTS)
13
+ class AnnotationData():
14
+ coordinate: Point
15
+ event_type: Union[AnnotationEventType, str]
16
+ context: Context
17
+ image_uuid: str
18
+ category: Category
19
+
20
+ key_up: Optional[str] = None
21
+ key_down: Optional[str] = None
22
+ epsilon: Optional[float] = None
23
+ is_shift_key_pressed: Optional[bool] = None
24
+
25
+
26
+ @dataclass(**KWONLY_SLOTS)
27
+ class SegmentationAnnotation():
28
+ id: str
29
+ shape: Shape
30
+ image_id: str
31
+ category_id: str
32
+
33
+
34
+ @dataclass(**KWONLY_SLOTS)
35
+ class UserInput():
36
+ frontend_id: str
37
+ data: AnnotationData
38
+
39
+
40
+ @dataclass(**KWONLY_SLOTS)
41
+ class ToolOutput():
42
+ svg: str
43
+ annotation: Optional[SegmentationAnnotation] = None
@@ -1,43 +1,44 @@
1
- import sys
2
- from dataclasses import dataclass
3
- from typing import Optional, Union
4
1
 
5
- from ..enums import AnnotationEventType
6
- from .detections import Point, Shape
7
- from .general import Category, Context
2
+ import sys
3
+ from dataclasses import dataclass, field
4
+ from typing import Optional
8
5
 
9
6
  KWONLY_SLOTS = {'kw_only': True, 'slots': True} if sys.version_info >= (3, 10) else {}
10
7
 
11
8
 
12
9
  @dataclass(**KWONLY_SLOTS)
13
- class AnnotationData():
14
- coordinate: Point
15
- event_type: Union[AnnotationEventType, str]
16
- context: Context
17
- image_uuid: str
18
- category: Category
10
+ class BoxAnnotation():
11
+ """Coordinates according to COCO format. x,y is the top left corner of the box.
12
+ x increases to the right, y increases downwards.
13
+ """
14
+ category_name: str = field(metadata={'description': 'Category name'})
15
+ x: int = field(metadata={'description': 'X coordinate (left to right)'})
16
+ y: int = field(metadata={'description': 'Y coordinate (top to bottom)'})
17
+ width: int = field(metadata={'description': 'Width'})
18
+ height: int = field(metadata={'description': 'Height'})
19
+ category_id: Optional[str] = field(default=None, metadata={'description': 'Category UUID'})
19
20
 
20
- key_up: Optional[str] = None
21
- key_down: Optional[str] = None
22
- epsilon: Optional[float] = None
23
- is_shift_key_pressed: Optional[bool] = None
21
+ def __str__(self):
22
+ return f'x:{int(self.x)} y: {int(self.y)}, w: {int(self.width)} h: {int(self.height)} -> {self.category_name}'
24
23
 
25
24
 
26
25
  @dataclass(**KWONLY_SLOTS)
27
- class SegmentationAnnotation():
28
- id: str
29
- shape: Shape
30
- image_id: str
31
- category_id: str
26
+ class PointAnnotation():
27
+ """Coordinates according to COCO format. x,y is the center of the point.
28
+ x increases to the right, y increases downwards."""
29
+ category_name: str = field(metadata={'description': 'Category name'})
30
+ x: float = field(metadata={'description': 'X coordinate (right)'})
31
+ y: float = field(metadata={'description': 'Y coordinate (down)'})
32
+ category_id: Optional[str] = field(default=None, metadata={'description': 'Category UUID'})
32
33
 
33
-
34
- @dataclass(**KWONLY_SLOTS)
35
- class UserInput():
36
- frontend_id: str
37
- data: AnnotationData
34
+ def __str__(self):
35
+ return f'x:{int(self.x)} y: {int(self.y)}, -> {self.category_name}'
38
36
 
39
37
 
40
38
  @dataclass(**KWONLY_SLOTS)
41
- class ToolOutput():
42
- svg: str
43
- annotation: Optional[SegmentationAnnotation] = None
39
+ class ClassificationAnnotation():
40
+ category_name: str = field(metadata={'description': 'Category name'})
41
+ category_id: Optional[str] = field(default=None, metadata={'description': 'Category UUID'})
42
+
43
+ def __str__(self):
44
+ return f'-> {self.category_name}'
@@ -9,10 +9,6 @@ import numpy as np
9
9
  KWONLY_SLOTS = {'kw_only': True, 'slots': True} if sys.version_info >= (3, 10) else {}
10
10
 
11
11
 
12
- def current_datetime():
13
- return datetime.now().isoformat(sep='_', timespec='milliseconds')
14
-
15
-
16
12
  @dataclass(**KWONLY_SLOTS)
17
13
  class BoxDetection():
18
14
  """Coordinates according to COCO format. x,y is the top left corner of the box.
@@ -25,7 +21,7 @@ class BoxDetection():
25
21
  height: int = field(metadata={'description': 'Height'})
26
22
  model_name: str = field(metadata={'description': 'Model name'})
27
23
  confidence: float = field(metadata={'description': 'Confidence'})
28
- category_id: Optional[str] = field(default=None, metadata={'description': 'Category ID'})
24
+ category_id: Optional[str] = field(default=None, metadata={'description': 'Category UUID'})
29
25
 
30
26
  def intersection_over_union(self, other_detection: 'BoxDetection') -> float:
31
27
  # https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/
@@ -59,7 +55,7 @@ class PointDetection():
59
55
  y: float = field(metadata={'description': 'Y coordinate (down)'})
60
56
  model_name: str = field(metadata={'description': 'Model name'})
61
57
  confidence: float = field(metadata={'description': 'Confidence'})
62
- category_id: Optional[str] = field(default=None, metadata={'description': 'Category ID'})
58
+ category_id: Optional[str] = field(default=None, metadata={'description': 'Category UUID'})
63
59
 
64
60
  def distance(self, other: 'PointDetection') -> float:
65
61
  return np.sqrt((other.x - self.x)**2 + (other.y - self.y)**2)
@@ -73,7 +69,7 @@ class ClassificationDetection():
73
69
  category_name: str = field(metadata={'description': 'Category name'})
74
70
  model_name: str = field(metadata={'description': 'Model name'})
75
71
  confidence: float = field(metadata={'description': 'Confidence'})
76
- category_id: Optional[str] = field(default=None, metadata={'description': 'Category ID'})
72
+ category_id: Optional[str] = field(default=None, metadata={'description': 'Category UUID'})
77
73
 
78
74
  def __str__(self):
79
75
  return f'c: {self.confidence:.2f} -> {self.category_name}'
@@ -4,7 +4,13 @@ from dataclasses import dataclass, field
4
4
  from datetime import datetime
5
5
  from typing import List, Optional
6
6
 
7
- from .detections import BoxDetection, ClassificationDetection, PointDetection, SegmentationDetection
7
+ from .annotations import BoxAnnotation, ClassificationAnnotation, PointAnnotation
8
+ from .detections import (
9
+ BoxDetection,
10
+ ClassificationDetection,
11
+ PointDetection,
12
+ SegmentationDetection,
13
+ )
8
14
 
9
15
  # pylint: disable=too-many-instance-attributes
10
16
 
@@ -25,6 +31,14 @@ class ImageMetadata():
25
31
  'description': 'List of segmentation detections'})
26
32
  classification_detections: List[ClassificationDetection] = field(default_factory=list, metadata={
27
33
  'description': 'List of classification detections'})
34
+
35
+ box_annotations: List[BoxAnnotation] = field(default_factory=list, metadata={
36
+ 'description': 'List of box annotations'})
37
+ point_annotations: List[PointAnnotation] = field(default_factory=list, metadata={
38
+ 'description': 'List of point annotations'})
39
+ classification_annotation: Optional[ClassificationAnnotation] = field(default=None, metadata={
40
+ 'description': 'Classification annotation'})
41
+
28
42
  tags: List[str] = field(default_factory=list, metadata={
29
43
  'description': 'List of tags'})
30
44
 
@@ -42,18 +42,18 @@ class DetectorLogic():
42
42
  def init(self):
43
43
  """Called when a (new) model was loaded. Initialize the model. Model information available via `self.model_info`"""
44
44
 
45
- def evaluate_with_all_info(self, image: bytes, tags: List[str], source: Optional[str] = None, creation_date: Optional[str] = None) -> ImageMetadata: # pylint: disable=unused-argument
46
- """Called by the detector node when an image should be evaluated (REST or SocketIO).
47
- Tags, source come from the caller and may be used in this function.
48
- By default, this function simply calls `evaluate`"""
49
- return self.evaluate(image)
50
-
51
45
  @abstractmethod
52
- def evaluate(self, image: bytes) -> ImageMetadata:
46
+ def evaluate(self, image: bytes) -> ImageMetadata: # pylint: disable=unused-argument
53
47
  """Evaluate the image and return the detections.
54
- The object should return empty detections if it is not initialized"""
48
+
49
+ Called by the detector node when an image should be evaluated (REST or SocketIO).
50
+ The resulting detections should be stored in the ImageMetadata.
51
+ Tags stored in the ImageMetadata will be uploaded to the learning loop.
52
+ The function should return empty metadata if the detector is not initialized."""
55
53
 
56
54
  @abstractmethod
57
55
  def batch_evaluate(self, images: List[bytes]) -> ImagesMetadata:
58
56
  """Evaluate a batch of images and return the detections.
59
- The object should return empty detections if it is not initialized"""
57
+ The resulting detections per image should be stored in the ImagesMetadata.
58
+ Tags stored in the ImagesMetadata will be uploaded to the learning loop.
59
+ The function should return empty metadata if the detector is not initialized."""
@@ -8,6 +8,11 @@ from dataclasses import asdict
8
8
  from datetime import datetime
9
9
  from typing import Dict, List, Optional
10
10
 
11
+ try:
12
+ from typing import Literal
13
+ except ImportError: # Python <= 3.8
14
+ from typing_extensions import Literal # type: ignore
15
+
11
16
  import socketio
12
17
  from dacite import from_dict
13
18
  from fastapi.encoders import jsonable_encoder
@@ -223,10 +228,10 @@ class DetectorNode(Node):
223
228
  try:
224
229
  det = await self.get_detections(
225
230
  raw_image=data['image'],
226
- camera_id=data.get('camera-id', None) or data.get('mac', None),
231
+ camera_id=data.get('camera_id', None),
227
232
  tags=data.get('tags', []),
228
233
  source=data.get('source', None),
229
- autoupload=data.get('autoupload', None),
234
+ autoupload=data.get('autoupload', 'filtered'),
230
235
  creation_date=data.get('creation_date', None)
231
236
  )
232
237
  if det is None:
@@ -245,9 +250,9 @@ class DetectorNode(Node):
245
250
  det = await self.get_batch_detections(
246
251
  raw_images=data['images'],
247
252
  tags=data.get('tags', []),
248
- camera_id=data.get('camera-id', None) or data.get('mac', None),
253
+ camera_id=data.get('camera_id', None),
249
254
  source=data.get('source', None),
250
- autoupload=data.get('autoupload', None),
255
+ autoupload=data.get('autoupload', 'filtered'),
251
256
  creation_date=data.get('creation_date', None)
252
257
  )
253
258
  if det is None:
@@ -296,27 +301,30 @@ class DetectorNode(Node):
296
301
 
297
302
  @self.sio.event
298
303
  async def upload(sid, data: Dict) -> Dict:
299
- """Upload an image with detections"""
304
+ """Upload a single image with metadata to the learning loop.
300
305
 
306
+ The data dict must contain:
307
+ - image: The image bytes to upload
308
+ - metadata: The metadata for the image (optional)
309
+ """
301
310
  self.log.debug('Processing upload via socketio.')
302
- detection_data = data.get('detections', {})
303
- if detection_data and self.detector_logic.model_info is not None:
311
+
312
+ metadata = data.get('metadata', None)
313
+ if metadata:
304
314
  try:
305
- image_metadata = from_dict(data_class=ImageMetadata, data=detection_data)
315
+ image_metadata = from_dict(data_class=ImageMetadata, data=metadata)
306
316
  except Exception as e:
307
317
  self.log.exception('could not parse detections')
308
318
  return {'error': str(e)}
309
- image_metadata = self.add_category_id_to_detections(self.detector_logic.model_info, image_metadata)
319
+ if self.detector_logic.model_info is not None:
320
+ image_metadata = self.add_category_id_to_detections(self.detector_logic.model_info, image_metadata)
310
321
  else:
311
322
  image_metadata = ImageMetadata()
312
323
 
313
324
  try:
314
325
  await self.upload_images(
315
326
  images=[data['image']],
316
- image_metadata=image_metadata,
317
- tags=data.get('tags', []),
318
- source=data.get('source', None),
319
- creation_date=data.get('creation_date', None),
327
+ images_metadata=ImagesMetadata(items=[image_metadata]) if metadata else None,
320
328
  upload_priority=data.get('upload_priority', False)
321
329
  )
322
330
  except Exception as e:
@@ -506,34 +514,37 @@ class DetectorNode(Node):
506
514
  *,
507
515
  camera_id: Optional[str] = None,
508
516
  source: Optional[str] = None,
509
- autoupload: Optional[str] = None,
517
+ autoupload: Literal['filtered', 'all', 'disabled'],
510
518
  creation_date: Optional[str] = None) -> ImageMetadata:
511
519
  """ Main processing function for the detector node when an image is received via REST or SocketIO.
512
520
  This function infers the detections from the image, cares about uploading to the loop and returns the detections as ImageMetadata object.
513
521
  Note: raw_image is a numpy array of type uint8, but not in the correct shape!
514
- It can be converted e.g. using cv2.imdecode(raw_image, cv2.IMREAD_COLOR)"""
522
+ It can be converted e.g. using cv2.imdecode(np.frombuffer(image, np.uint8), cv2.IMREAD_COLOR)"""
515
523
 
516
524
  await self.detection_lock.acquire()
517
- detections = await run.io_bound(self.detector_logic.evaluate_with_all_info, raw_image, tags, source, creation_date)
518
- self.detection_lock.release()
525
+ try:
526
+ metadata = await run.io_bound(self.detector_logic.evaluate, raw_image)
527
+ finally:
528
+ self.detection_lock.release()
529
+
530
+ metadata.tags.extend(tags)
531
+ metadata.source = source
532
+ metadata.created = creation_date
519
533
 
520
- fix_shape_detections(detections)
521
- n_bo, n_cl = len(detections.box_detections), len(detections.classification_detections)
522
- n_po, n_se = len(detections.point_detections), len(detections.segmentation_detections)
534
+ fix_shape_detections(metadata)
535
+ n_bo, n_cl = len(metadata.box_detections), len(metadata.classification_detections)
536
+ n_po, n_se = len(metadata.point_detections), len(metadata.segmentation_detections)
523
537
  self.log.debug('Detected: %d boxes, %d points, %d segs, %d classes', n_bo, n_po, n_se, n_cl)
524
538
 
525
- autoupload = autoupload or 'filtered'
526
- if autoupload == 'filtered' and camera_id is not None:
527
- background_tasks.create(self.relevance_filter.may_upload_detections(
528
- detections, camera_id, raw_image, tags, source, creation_date
529
- ))
539
+ if autoupload == 'filtered':
540
+ background_tasks.create(self.relevance_filter.may_upload_detections(metadata, camera_id, raw_image))
530
541
  elif autoupload == 'all':
531
- background_tasks.create(self.outbox.save(raw_image, detections, tags, source, creation_date))
542
+ background_tasks.create(self.outbox.save(raw_image, metadata))
532
543
  elif autoupload == 'disabled':
533
544
  pass
534
545
  else:
535
546
  self.log.error('unknown autoupload value %s', autoupload)
536
- return detections
547
+ return metadata
537
548
 
538
549
  async def get_batch_detections(self,
539
550
  raw_images: List[bytes],
@@ -541,14 +552,21 @@ class DetectorNode(Node):
541
552
  *,
542
553
  camera_id: Optional[str] = None,
543
554
  source: Optional[str] = None,
544
- autoupload: Optional[str] = None,
555
+ autoupload: str = 'filtered',
545
556
  creation_date: Optional[str] = None) -> ImagesMetadata:
546
557
  """ Processing function for the detector node when a a batch inference is requested via SocketIO.
547
558
  This function infers the detections from all images, cares about uploading to the loop and returns the detections as a list of ImageMetadata."""
548
559
 
549
560
  await self.detection_lock.acquire()
550
- all_detections = await run.io_bound(self.detector_logic.batch_evaluate, raw_images)
551
- self.detection_lock.release()
561
+ try:
562
+ all_detections = await run.io_bound(self.detector_logic.batch_evaluate, raw_images)
563
+ finally:
564
+ self.detection_lock.release()
565
+
566
+ for metadata in all_detections.items:
567
+ metadata.tags.extend(tags)
568
+ metadata.source = source
569
+ metadata.created = creation_date
552
570
 
553
571
  for detections, raw_image in zip(all_detections.items, raw_images):
554
572
  fix_shape_detections(detections)
@@ -556,13 +574,10 @@ class DetectorNode(Node):
556
574
  n_po, n_se = len(detections.point_detections), len(detections.segmentation_detections)
557
575
  self.log.debug('Detected: %d boxes, %d points, %d segs, %d classes', n_bo, n_po, n_se, n_cl)
558
576
 
559
- autoupload = autoupload or 'filtered'
560
- if autoupload == 'filtered' and camera_id is not None:
561
- background_tasks.create(self.relevance_filter.may_upload_detections(
562
- detections, camera_id, raw_image, tags, source, creation_date
563
- ))
577
+ if autoupload == 'filtered':
578
+ background_tasks.create(self.relevance_filter.may_upload_detections(detections, camera_id, raw_image))
564
579
  elif autoupload == 'all':
565
- background_tasks.create(self.outbox.save(raw_image, detections, tags, source, creation_date))
580
+ background_tasks.create(self.outbox.save(raw_image, detections))
566
581
  elif autoupload == 'disabled':
567
582
  pass
568
583
  else:
@@ -572,24 +587,25 @@ class DetectorNode(Node):
572
587
  async def upload_images(
573
588
  self, *,
574
589
  images: List[bytes],
575
- image_metadata: Optional[ImageMetadata] = None,
576
- tags: Optional[List[str]] = None,
577
- source: Optional[str],
578
- creation_date: Optional[str],
590
+ images_metadata: Optional[ImagesMetadata] = None,
579
591
  upload_priority: bool = False
580
592
  ) -> None:
581
593
  """Save images to the outbox using an asyncio executor.
582
- Used by SIO and REST upload endpoints."""
594
+ Used by SIO and REST upload endpoints.
583
595
 
584
- if image_metadata is None:
585
- image_metadata = ImageMetadata()
586
- if tags is None:
587
- tags = []
596
+ :param images: List of images to upload
597
+ :param images_metadata: Optional metadata for all images
598
+ :param upload_priority: Whether to upload the images with priority
599
+ :raises ValueError: If the number of images and number of metadata items do not match
600
+ """
588
601
 
589
- tags.append('picked_by_system')
602
+ if images_metadata and len(images_metadata.items) != len(images):
603
+ raise ValueError('Number of images and number of metadata items do not match')
590
604
 
591
- for image in images:
592
- await self.outbox.save(image, image_metadata, tags, source, creation_date, upload_priority)
605
+ for i, image in enumerate(images):
606
+ image_metadata = images_metadata.items[i] if images_metadata else ImageMetadata()
607
+ image_metadata.tags.append('picked_by_system')
608
+ await self.outbox.save(image, image_metadata, upload_priority)
593
609
 
594
610
  def add_category_id_to_detections(self, model_info: ModelInformation, image_metadata: ImageMetadata):
595
611
  def find_category_id_by_name(categories: List[Category], category_name: str):
@@ -628,9 +644,9 @@ def step_into(new_dir):
628
644
  os.chdir(previous_dir)
629
645
 
630
646
 
631
- def fix_shape_detections(detections: ImageMetadata):
647
+ def fix_shape_detections(metadata: ImageMetadata):
632
648
  # TODO This is a quick fix.. check how loop upload detections deals with this
633
- for seg_detection in detections.segmentation_detections:
649
+ for seg_detection in metadata.segmentation_detections:
634
650
  if isinstance(seg_detection.shape, Shape):
635
651
  points = ','.join([str(value) for p in seg_detection.shape.points for _,
636
652
  value in asdict(p).items()])
@@ -9,28 +9,30 @@ class RelevanceFilter():
9
9
 
10
10
  def __init__(self, outbox: Outbox) -> None:
11
11
  self.cam_histories: Dict[str, CamObservationHistory] = {}
12
+ self.unknown_cam_history: CamObservationHistory = CamObservationHistory()
12
13
  self.outbox: Outbox = outbox
13
14
 
14
15
  async def may_upload_detections(self,
15
16
  image_metadata: ImageMetadata,
16
- cam_id: str,
17
- raw_image: bytes,
18
- tags: List[str],
19
- source: Optional[str] = None,
20
- creation_date: Optional[str] = None) -> List[str]:
17
+ cam_id: Optional[str],
18
+ raw_image: bytes) -> List[str]:
21
19
  """Check if the detection should be uploaded to the outbox.
22
20
  If so, upload it and return the list of causes for the upload.
23
21
  """
24
22
  for group in self.cam_histories.values():
25
23
  group.forget_old_detections()
26
24
 
27
- if cam_id not in self.cam_histories:
28
- self.cam_histories[cam_id] = CamObservationHistory()
29
- causes = self.cam_histories[cam_id].get_causes_to_upload(image_metadata)
25
+ if cam_id is None:
26
+ history = self.unknown_cam_history
27
+ else:
28
+ if cam_id not in self.cam_histories:
29
+ self.cam_histories[cam_id] = CamObservationHistory()
30
+ history = self.cam_histories[cam_id]
31
+
32
+ causes = history.get_causes_to_upload(image_metadata)
30
33
  if len(image_metadata) >= 80:
31
34
  causes.append('unexpected_observations_count')
32
35
  if len(causes) > 0:
33
- tags = tags if tags is not None else []
34
- tags.extend(causes)
35
- await self.outbox.save(raw_image, image_metadata, tags, source, creation_date)
36
+ image_metadata.tags.extend(causes)
37
+ await self.outbox.save(raw_image, image_metadata)
36
38
  return causes
@@ -78,9 +78,6 @@ class Outbox():
78
78
  async def save(self,
79
79
  image: bytes,
80
80
  image_metadata: Optional[ImageMetadata] = None,
81
- tags: Optional[List[str]] = None,
82
- source: Optional[str] = None,
83
- creation_date: Optional[str] = None,
84
81
  upload_priority: bool = False) -> None:
85
82
 
86
83
  if not await run.io_bound(self._is_valid_jpg, image):
@@ -89,12 +86,11 @@ class Outbox():
89
86
 
90
87
  if image_metadata is None:
91
88
  image_metadata = ImageMetadata()
92
- if not tags:
93
- tags = []
89
+
94
90
  identifier = datetime.now().isoformat(sep='_', timespec='microseconds')
95
91
 
96
92
  try:
97
- await run.io_bound(self._save_files_to_disk, identifier, image, image_metadata, tags, source, creation_date, upload_priority)
93
+ await run.io_bound(self._save_files_to_disk, identifier, image, image_metadata, upload_priority)
98
94
  except Exception as e:
99
95
  self.log.error('Failed to save files for image %s: %s', identifier, e)
100
96
  return
@@ -110,9 +106,6 @@ class Outbox():
110
106
  identifier: str,
111
107
  image: bytes,
112
108
  image_metadata: ImageMetadata,
113
- tags: List[str],
114
- source: Optional[str],
115
- creation_date: Optional[str],
116
109
  upload_priority: bool) -> None:
117
110
  subpath = 'priority' if upload_priority else 'normal'
118
111
  full_path = f'{self.path}/{subpath}/{identifier}'
@@ -120,14 +113,6 @@ class Outbox():
120
113
  raise FileExistsError(f'Directory with identifier {identifier} already exists')
121
114
 
122
115
  tmp = f'{GLOBALS.data_folder}/tmp/{identifier}'
123
- image_metadata.tags = tags
124
- if self._is_valid_isoformat(creation_date):
125
- image_metadata.created = creation_date
126
- else:
127
- image_metadata.created = identifier
128
-
129
- image_metadata.source = source or 'unknown'
130
-
131
116
  os.makedirs(tmp, exist_ok=True)
132
117
 
133
118
  with open(tmp + f'/image_{identifier}.json', 'w') as f:
@@ -139,6 +124,7 @@ class Outbox():
139
124
  if not os.path.exists(tmp):
140
125
  self.log.error('Could not rename %s to %s', tmp, full_path)
141
126
  raise FileNotFoundError(f'Could not rename {tmp} to {full_path}')
127
+
142
128
  os.rename(tmp, full_path)
143
129
 
144
130
  async def _trim_upload_queue(self) -> None:
@@ -1,6 +1,11 @@
1
1
  import logging
2
2
  from typing import TYPE_CHECKING, Optional
3
3
 
4
+ try:
5
+ from typing import Literal
6
+ except ImportError: # Python <= 3.8
7
+ from typing_extensions import Literal # type: ignore
8
+
4
9
  from fastapi import APIRouter, File, Header, Request, UploadFile
5
10
 
6
11
  from ...data_classes.image_metadata import ImageMetadata
@@ -16,17 +21,16 @@ async def http_detect(
16
21
  request: Request,
17
22
  file: UploadFile = File(..., description='The image file to run detection on'),
18
23
  camera_id: Optional[str] = Header(None, description='The camera id (used by learning loop)'),
19
- mac: Optional[str] = Header(None, description='The camera mac address (used by learning loop)'),
20
24
  tags: Optional[str] = Header(None, description='Tags to add to the image (used by learning loop)'),
21
25
  source: Optional[str] = Header(None, description='The source of the image (used by learning loop)'),
22
- autoupload: Optional[str] = Header(None, description='Mode to decide whether to upload the image to the learning loop',
23
- examples=['filtered', 'all', 'disabled']),
26
+ autoupload: Optional[Literal['filtered', 'all', 'disabled']] = Header(None, description='Mode to decide whether to upload the image to the learning loop',
27
+ examples=['filtered', 'all', 'disabled']),
24
28
  creation_date: Optional[str] = Header(None, description='The creation date of the image (used by learning loop)')
25
29
  ):
26
30
  """
27
31
  Single image example:
28
32
 
29
- curl --request POST -F 'file=@test.jpg' localhost:8004/detect -H 'autoupload: all' -H 'camera-id: front_cam' -H 'source: test' -H 'tags: test,test2'
33
+ curl --request POST -F 'file=@test.jpg' localhost:8004/detect -H 'autoupload: all' -H 'camera_id: front_cam' -H 'source: test' -H 'tags: test,test2'
30
34
 
31
35
  Multiple images example:
32
36
 
@@ -43,10 +47,10 @@ async def http_detect(
43
47
  try:
44
48
  app: 'DetectorNode' = request.app
45
49
  detections = await app.get_detections(raw_image=file_bytes,
46
- camera_id=camera_id or mac or None,
50
+ camera_id=camera_id or None,
47
51
  tags=tags.split(',') if tags else [],
48
52
  source=source,
49
- autoupload=autoupload,
53
+ autoupload=autoupload or 'filtered',
50
54
  creation_date=creation_date)
51
55
  except Exception as exc:
52
56
  logging.exception('Error during detection of image %s.', file.filename)
@@ -2,6 +2,8 @@ from typing import TYPE_CHECKING, List, Optional
2
2
 
3
3
  from fastapi import APIRouter, File, Query, Request, UploadFile
4
4
 
5
+ from ...data_classes.image_metadata import ImageMetadata, ImagesMetadata
6
+
5
7
  if TYPE_CHECKING:
6
8
  from ..detector_node import DetectorNode
7
9
 
@@ -25,6 +27,14 @@ async def upload_image(request: Request,
25
27
  curl -X POST -F 'files=@test.jpg' "http://localhost:/upload?source=test&creation_date=2024-01-01T00:00:00&upload_priority=true"
26
28
  """
27
29
  raw_files = [await file.read() for file in files]
30
+ image_metadatas = []
31
+ for _ in files:
32
+ image_metadatas.append(ImageMetadata(source=source, created=creation_date))
33
+
34
+ images_metadata = ImagesMetadata(items=image_metadatas)
35
+
28
36
  node: DetectorNode = request.app
29
- await node.upload_images(images=raw_files, source=source, creation_date=creation_date, upload_priority=upload_priority)
37
+ await node.upload_images(images=raw_files,
38
+ images_metadata=images_metadata,
39
+ upload_priority=upload_priority)
30
40
  return 200, "OK"
@@ -21,7 +21,6 @@
21
21
  # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
22
  # SOFTWARE.
23
23
 
24
- """inspired from https://quantlane.com/blog/ensure-asyncio-task-exceptions-get-logged/"""
25
24
  from __future__ import annotations
26
25
 
27
26
  import asyncio
@@ -135,10 +135,9 @@ class MockDetectorLogic(DetectorLogic): # pylint: disable=abstract-method
135
135
  category_id="1",
136
136
  confidence=0.9,
137
137
  x=0, y=0, width=10, height=10,
138
- model_name="mock",
139
- )])
138
+ model_name="mock", )])
140
139
 
141
- def evaluate_with_all_info(self, image: np.ndarray, tags: List[str], source: Optional[str] = None, creation_date: Optional[str] = None):
140
+ def evaluate(self, image: bytes) -> ImageMetadata:
142
141
  return self.image_metadata
143
142
 
144
143
 
@@ -16,7 +16,7 @@ l_conf_point_det = PointDetection(category_name='point', x=100, y=100,
16
16
 
17
17
 
18
18
  @pytest.mark.parametrize(
19
- "detections,reason",
19
+ "metadata,reason",
20
20
  [(ImageMetadata(box_detections=[h_conf_box_det] * 40, point_detections=[h_conf_point_det] * 40),
21
21
  ['unexpected_observations_count']),
22
22
  (ImageMetadata(box_detections=[h_conf_box_det], point_detections=[h_conf_point_det]), []),
@@ -25,10 +25,10 @@ l_conf_point_det = PointDetection(category_name='point', x=100, y=100,
25
25
  (ImageMetadata(box_detections=[h_conf_box_det], point_detections=[l_conf_point_det]),
26
26
  ['uncertain'])])
27
27
  @pytest.mark.asyncio
28
- async def test_unexpected_observations_count(detections: ImageMetadata, reason: List[str]):
28
+ async def test_unexpected_observations_count(metadata: ImageMetadata, reason: List[str]):
29
29
  os.environ['LOOP_ORGANIZATION'] = 'zauberzeug'
30
30
  os.environ['LOOP_PROJECT'] = 'demo'
31
31
  outbox = Outbox()
32
32
 
33
33
  relevance_filter = RelevanceFilter(outbox)
34
- assert await relevance_filter.may_upload_detections(detections, raw_image=b'', cam_id='0:0:0:0', tags=[]) == reason
34
+ assert await relevance_filter.may_upload_detections(metadata, raw_image=b'', cam_id='0:0:0:0') == reason
@@ -1,13 +1,16 @@
1
+ from typing import List, Literal, Tuple
2
+
1
3
  import numpy as np
2
4
  import pytest
3
5
 
6
+ from learning_loop_node.detector import detector_node as detector_node_module
4
7
  from learning_loop_node.detector.detector_node import DetectorNode
5
8
 
6
9
 
7
10
  @pytest.mark.asyncio
8
11
  async def test_get_detections(detector_node: DetectorNode, monkeypatch):
9
12
  # Mock raw image data
10
- raw_image = np.zeros((100, 100, 3), dtype=np.uint8)
13
+ raw_image = np.zeros((100, 100, 3), dtype=np.uint8).tobytes()
11
14
 
12
15
  # Mock relevance_filter and outbox
13
16
  filtered_upload_called = False
@@ -15,11 +18,12 @@ async def test_get_detections(detector_node: DetectorNode, monkeypatch):
15
18
 
16
19
  save_args = []
17
20
 
18
- def mock_filtered_upload(*args, **kwargs): # pylint: disable=unused-argument
21
+ async def mock_filtered_upload(*args, **kwargs) -> List[str]: # pylint: disable=unused-argument
19
22
  nonlocal filtered_upload_called
20
23
  filtered_upload_called = True
24
+ return []
21
25
 
22
- def mock_save(*args, **kwargs):
26
+ async def mock_save(*args, **kwargs):
23
27
  nonlocal save_called
24
28
  nonlocal save_args
25
29
  save_called = True
@@ -28,9 +32,14 @@ async def test_get_detections(detector_node: DetectorNode, monkeypatch):
28
32
  monkeypatch.setattr(detector_node.relevance_filter, "may_upload_detections", mock_filtered_upload)
29
33
  monkeypatch.setattr(detector_node.outbox, "save", mock_save)
30
34
 
35
+ created_tasks = []
36
+
37
+ def mock_create_task(coroutine, *args, **kwargs):
38
+ created_tasks.append(coroutine)
39
+ monkeypatch.setattr(detector_node_module.background_tasks, "create", mock_create_task)
40
+
31
41
  # Test cases
32
- test_cases = [
33
- (None, True, False),
42
+ test_cases: List[Tuple[Literal['filtered', 'all', 'disabled'], bool, bool]] = [
34
43
  ("filtered", True, False),
35
44
  ("all", False, True),
36
45
  ("disabled", False, False),
@@ -39,14 +48,12 @@ async def test_get_detections(detector_node: DetectorNode, monkeypatch):
39
48
  expected_save_args = {
40
49
  'image': raw_image,
41
50
  'detections': detector_node.detector_logic.image_metadata, # type: ignore
42
- 'tags': ['test_tag'],
43
- 'source': 'test_source',
44
- 'creation_date': '2024-01-01T00:00:00',
45
51
  }
46
52
 
47
53
  for autoupload, expect_filtered, expect_all in test_cases:
48
54
  filtered_upload_called = False
49
55
  save_called = False
56
+ created_tasks.clear()
50
57
 
51
58
  result = await detector_node.get_detections(
52
59
  raw_image=raw_image,
@@ -57,6 +64,9 @@ async def test_get_detections(detector_node: DetectorNode, monkeypatch):
57
64
  autoupload=autoupload
58
65
  )
59
66
 
67
+ for task in created_tasks:
68
+ await task
69
+
60
70
  # Check if detections were processed
61
71
  assert result is not None
62
72
  assert result.box_detections is not None
@@ -13,7 +13,7 @@ file_path = os.path.abspath(__file__)
13
13
  test_image_path = os.path.join(os.path.dirname(file_path), 'test.jpg')
14
14
 
15
15
 
16
- @pytest.mark.parametrize('autoupload, expected_file_count', [(None, 2), ('all', 4)])
16
+ @pytest.mark.parametrize('autoupload, expected_file_count', [('filtered', 2), ('all', 4)])
17
17
  async def test_filter_is_used_by_node(test_detector_node: DetectorNode, autoupload, expected_file_count):
18
18
  """Test if filtering is used by the node. In particular, when upload is filtered, the identical detections should not be uploaded twice.
19
19
  Note thatt we have to mock the dummy detections to only return a point and a box detection."""
@@ -1,10 +1,11 @@
1
1
  import logging
2
+ from typing import List
2
3
 
3
- import numpy as np
4
+ from learning_loop_node.data_classes import ImagesMetadata
4
5
 
5
6
  from ...data_classes import ImageMetadata
6
7
  from ...detector.detector_logic import DetectorLogic
7
- from ..test_helper import get_dummy_detections
8
+ from ..test_helper import get_dummy_metadata
8
9
 
9
10
 
10
11
  class TestingDetectorLogic(DetectorLogic):
@@ -12,11 +13,14 @@ class TestingDetectorLogic(DetectorLogic):
12
13
 
13
14
  def __init__(self) -> None:
14
15
  super().__init__('mocked')
15
- self.det_to_return: ImageMetadata = get_dummy_detections()
16
+ self.det_to_return: ImageMetadata = get_dummy_metadata()
16
17
 
17
18
  def init(self) -> None:
18
19
  pass
19
20
 
20
- def evaluate(self, image: np.ndarray) -> ImageMetadata:
21
+ def evaluate(self, image: bytes) -> ImageMetadata:
21
22
  logging.info('evaluating')
22
23
  return self.det_to_return
24
+
25
+ def batch_evaluate(self, images: List[bytes]) -> ImagesMetadata:
26
+ raise NotImplementedError()
@@ -6,8 +6,16 @@ import zipfile
6
6
  from glob import glob
7
7
  from typing import Callable
8
8
 
9
- from ..data_classes import (BoxDetection, ClassificationDetection, Detections, Point, PointDetection,
10
- SegmentationDetection, Shape)
9
+ from ..data_classes import (
10
+ BoxDetection,
11
+ ClassificationDetection,
12
+ Detections,
13
+ Point,
14
+ PointDetection,
15
+ SegmentationDetection,
16
+ Shape,
17
+ )
18
+ from ..data_classes.image_metadata import ImageMetadata
11
19
  from ..loop_communication import LoopCommunicator
12
20
 
13
21
 
@@ -62,7 +70,7 @@ def _update_attribute_dict(obj: dict, **kwargs) -> None:
62
70
  obj[key] = value
63
71
 
64
72
 
65
- def get_dummy_detections():
73
+ def get_dummy_detections() -> Detections:
66
74
  return Detections(
67
75
  box_detections=[
68
76
  BoxDetection(category_name='some_category_name', x=1, y=2, height=3, width=4,
@@ -78,3 +86,11 @@ def get_dummy_detections():
78
86
  classification_detections=[
79
87
  ClassificationDetection(category_name='some_category_name_4', model_name='some_model',
80
88
  confidence=.42, category_id='some_id_4')])
89
+
90
+
91
+ def get_dummy_metadata() -> ImageMetadata:
92
+ detections = get_dummy_detections()
93
+ return ImageMetadata(box_detections=detections.box_detections,
94
+ point_detections=detections.point_detections,
95
+ segmentation_detections=detections.segmentation_detections,
96
+ classification_detections=detections.classification_detections)
@@ -16,7 +16,8 @@ def trainer_has_detecting_error(trainer: TrainerLogic):
16
16
  async def test_successful_detecting(test_initialized_trainer: TestingTrainerLogic):
17
17
  trainer = test_initialized_trainer
18
18
  create_active_training_file(trainer, training_state='train_model_uploaded',
19
- model_uuid_for_detecting='00000000-0000-0000-0000-000000000011') # NOTE: this is the hard coded model uuid for zauberzeug/demo (model version 1.1)
19
+ # NOTE: this is the hard coded model uuid for zauberzeug/demo (model version 1.1)
20
+ model_uuid_for_detecting='00000000-0000-0000-0000-000000000011')
20
21
 
21
22
  _ = asyncio.get_running_loop().create_task(
22
23
  trainer._perform_state('detecting', TrainerState.Detecting, TrainerState.Detected, trainer._do_detections))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: learning-loop-node
3
- Version: 0.16.1
3
+ Version: 0.17.1
4
4
  Summary: Python Library for Nodes which connect to the Zauberzeug Learning Loop
5
5
  Home-page: https://github.com/zauberzeug/learning_loop_node
6
6
  License: MIT
@@ -85,34 +85,48 @@ from learning_loop_node/learning_loop_node
85
85
 
86
86
  Detector Nodes are normally deployed on edge devices like robots or machinery but can also run in the cloud to provide backend services for an app or similar. These nodes register themself at the Learning Loop. They provide REST and Socket.io APIs to run inference on images. The processed images can automatically be used for active learning: e.g. uncertain predictions will be send to the Learning Loop.
87
87
 
88
- ### Running Inference
88
+ ### Inference API
89
89
 
90
90
  Images can be send to the detector node via socketio or rest.
91
- The later approach can be used via curl,
91
+ Via **REST** you may provide the following parameters:
92
+
93
+ - `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
94
+ - `camera_id`: a camera identifier (string) used to improve the autoupload filtering
95
+ - `tags`: comma separated list of tags to add to the image in the learning loop
96
+ - `source`: optional source identifier (str) for the image (e.g. a robot id)
97
+ - `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled`
98
+ - `creation_date`: optional creation date (str) for the image in isoformat (e.g. `2023-01-30T12:34:56`)
92
99
 
93
100
  Example usage:
94
101
 
95
- `curl --request POST -F 'file=@test.jpg' localhost:8004/detect`
102
+ `curl --request POST -F 'file=@test.jpg' -H 'autoupload: all' -H 'camera_id: front_cam' localhost:8004/detect`
103
+
104
+ To use the **SocketIO** inference EPs, the caller needs to connect to the detector node's SocketIO server and emit the `detect` or `batch_detect` event with the image data and image metadata.
105
+ Example code can be found [in the rosys implementation](https://github.com/zauberzeug/rosys/blob/main/rosys/vision/detector_hardware.py).
106
+
107
+ ### Upload API
96
108
 
97
- Where 8804 is the specified port in this example.
98
- You can additionally provide the following camera parameters:
109
+ The detector has a **REST** endpoint to upload images (and detections) to the Learning Loop. The endpoint takes a POST request with one or multiple images. The images are expected to be in jpg format. The following optional parameters may be set via headers:
99
110
 
100
- - `autoupload`: configures auto-submission to the learning loop; `filtered` (default), `all`, `disabled` (example curl parameter `-H 'autoupload: all'`)
101
- - `camera-id`: a string which groups images for submission together (example curl parameter `-H 'camera-id: front_cam'`)
111
+ - `source`: optional source identifier (str) for the image (e.g. a robot id)
112
+ - `creation_date`: optional creation date (str) for the image in isoformat (e.g. `2023-01-30T12:34:56`)
113
+ - `upload_priority`: A boolean flag to prioritize the upload (defaults to False)
102
114
 
103
- To use the socketio interface, the caller needs to connect to the detector node's socketio server and emit the `detect` or `batch_detect` event with the image data and image metadata. Example code can be found [in the rosys implementation](https://github.com/zauberzeug/rosys/blob/main/rosys/vision/detector_hardware.py).
115
+ Example:
104
116
 
105
- The detector also has a sio **upload endpoint** that can be used to upload images and detections to the learning loop. The function receives a json dictionary, with the following entries:
117
+ `curl -X POST -F 'files=@test.jpg' "http://localhost:/upload"`
118
+
119
+ The detector also has a **SocketIO** upload endpoint that can be used to upload images and detections to the learning loop. The function receives a json dictionary, with the following entries:
106
120
 
107
121
  - `image`: the image data in jpg format
108
- - `tags`: a list of strings. If not provided the tag is `picked_by_system`
109
- - `detections`: a dictionary representing the detections. UUIDs for the classes are automatically determined based on the category names. This field is optional. If not provided, no detections are uploaded.
110
- - `source`: optional source identifier for the image
111
- - `creation_date`: optional creation date for the image
122
+
123
+ - `metadata`: a dictionary representing the image metadata. If metadata contains detections and/or annotations, UUIDs for the classes are automatically determined based on the category names. Metadata should follow the schema of the `ImageMetadata` data class.
112
124
  - `upload_priority`: boolean flag to prioritize the upload (defaults to False)
113
125
 
114
126
  The endpoint returns None if the upload was successful and an error message otherwise.
115
127
 
128
+ For both ways to upload an image, the tag `picked_by_system` is automatically added to the image metadata.
129
+
116
130
  ### Changing the model versioning mode
117
131
 
118
132
  The detector can be configured to one of the following behaviors:
@@ -164,12 +178,6 @@ The outbox mode can also be queried via:
164
178
  - HTTP: `curl http://localhost/outbox_mode`
165
179
  - SocketIO: `sio.emit('get_outbox_mode')`
166
180
 
167
- ### Explicit upload
168
-
169
- The detector has a REST endpoint to upload images (and detections) to the Learning Loop. The endpoint takes a POST request with the image and optionally the detections. The image is expected to be in jpg format. The detections are expected to be a json dictionary. Example:
170
-
171
- `curl -X POST -F 'files=@test.jpg' "http://localhost:/upload"`
172
-
173
181
  ## Trainer Node
174
182
 
175
183
  Trainers fetch the images and anntoations from the Learning Loop to train new models.
@@ -184,7 +192,7 @@ A Conveter Node converts models from one format into another.
184
192
 
185
193
  ...
186
194
 
187
- #### Test operability
195
+ ### Test operability
188
196
 
189
197
  Assumend there is a Converter Node which converts models of format 'format_a' into 'format_b'.
190
198
  Upload a model with
@@ -2,30 +2,31 @@ learning_loop_node/__init__.py,sha256=onN5s8-x_xBsCM6NLmJO0Ym1sJHeCFaGw8qb0oQZmz
2
2
  learning_loop_node/annotation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  learning_loop_node/annotation/annotator_logic.py,sha256=BTaopkJZkIf1CI5lfsVKsxbxoUIbDJrevavuQUT5e_c,1000
4
4
  learning_loop_node/annotation/annotator_node.py,sha256=J5xwSnM5rwTWrTe-TI37J0JHKf_4PlDuABaHvgjYr_Q,4443
5
- learning_loop_node/data_classes/__init__.py,sha256=6-pLbokCAvTFW-lh1lLUu7u8V5ZyD-2IVmFg5HHI4Cc,1329
6
- learning_loop_node/data_classes/annotations.py,sha256=NfMlTv2_5AfVY_JDM4tbjETFjSN2S2I2LJJPMMcDT50,966
7
- learning_loop_node/data_classes/detections.py,sha256=7vqcS0EK8cmDjRDckHlpSZDZ9YO6qajRmYvx-oxatFc,5425
5
+ learning_loop_node/data_classes/__init__.py,sha256=_2dxfVkkI9kNM955Y7ZE8RLgfpegZpX5tXkkPP_n6Fo,1354
6
+ learning_loop_node/data_classes/annotation_data.py,sha256=NfMlTv2_5AfVY_JDM4tbjETFjSN2S2I2LJJPMMcDT50,966
7
+ learning_loop_node/data_classes/annotations.py,sha256=ha7uuWmZqW-LU2vYrkvfnaVpvf5C2xdm826IVVBu0w0,1848
8
+ learning_loop_node/data_classes/detections.py,sha256=hGSKc1elk4Drp4XUjQ3F9HGeRpyLvA0q064b-S9lIug,5335
8
9
  learning_loop_node/data_classes/general.py,sha256=GQ6vPEIm4qqBV4RZT_YS_dPeKMdbCKo6Pe5-e4Cg3_k,7295
9
- learning_loop_node/data_classes/image_metadata.py,sha256=YccDyHMbnOrRr4-9hHbCNBpuhlZem5M64c0ZbZXTASY,1764
10
+ learning_loop_node/data_classes/image_metadata.py,sha256=4FFs89iZAfr2Jh-40QqZ_GGmJ8hl_W61eHcCqHflJLE,2293
10
11
  learning_loop_node/data_classes/socket_response.py,sha256=tIdt-oYf6ULoJIDYQCecNM9OtWR6_wJ9tL0Ksu83Vko,655
11
12
  learning_loop_node/data_classes/training.py,sha256=TybwcCDf_NUaDUaOj30lPm-7Z3Qk9XFRibEX5qIv96Y,5737
12
13
  learning_loop_node/data_exchanger.py,sha256=nd9JNPLn9amIeTcSIyUPpbE97ORAcb5yNphvmpgWSUQ,9095
13
14
  learning_loop_node/detector/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
- learning_loop_node/detector/detector_logic.py,sha256=YmsEsqSr0CUUWKtSR7EFU92HA90NvdYiPZGDQKXJUxU,2462
15
- learning_loop_node/detector/detector_node.py,sha256=IW9vGbl8Xq7DdylYM-jSJtitkCTs4uGYRZyWGuWauYo,29498
15
+ learning_loop_node/detector/detector_logic.py,sha256=0RilHkb_IYFk-BXso1QJ8in01WodbN7XeAXsKzptovY,2470
16
+ learning_loop_node/detector/detector_node.py,sha256=3xWI6kauXJx4WAe6iaRsdBxk-c0zAKEMusxTKQqOCyY,29961
16
17
  learning_loop_node/detector/exceptions.py,sha256=C6KbNPlSbtfgDrZx2Hbhm7Suk9jVoR3fMRCO0CkrMsQ,196
17
18
  learning_loop_node/detector/inbox_filter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
19
  learning_loop_node/detector/inbox_filter/cam_observation_history.py,sha256=1PHgXRrhSQ34HSFw7mdX8ndRxHf_i1aP5nXXnrZxhAY,3312
19
- learning_loop_node/detector/inbox_filter/relevance_filter.py,sha256=rI46jL9ZuI0hiDVxWCfXllB8DlQyyewNs6oZ6MnglMc,1540
20
- learning_loop_node/detector/outbox.py,sha256=izWJtnHG0PNX3-YWtkybLch2slnmT2pmAYrqZpHOaTA,12768
20
+ learning_loop_node/detector/inbox_filter/relevance_filter.py,sha256=IpoJMBPAO5GSr2uGINNu5uFar_jxWQWbH0Lz6FQ3n1M,1501
21
+ learning_loop_node/detector/outbox.py,sha256=HaNps_XEbvOZ-jlpZTCsk4Dbk5zq-vNYdKMBu001ckU,12132
21
22
  learning_loop_node/detector/rest/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
23
  learning_loop_node/detector/rest/about.py,sha256=evHJ2svUZY_DFz0FSef5u9c5KW4Uc3GL7EbPinG9-dg,583
23
24
  learning_loop_node/detector/rest/backdoor_controls.py,sha256=ZNaFOvC0OLWNtcLiG-NIqS_y1kkLP4csgk3CHhp8Gis,885
24
- learning_loop_node/detector/rest/detect.py,sha256=wYf9cCgtImMgnHbrcE6GMXE2aBopdZciKvGmc92ZCGw,2533
25
+ learning_loop_node/detector/rest/detect.py,sha256=_wPgmlH-vdnprM3fc4s9YlAb0jkSxqG9AVcjMkMhP9I,2641
25
26
  learning_loop_node/detector/rest/model_version_control.py,sha256=P4FOG0U9HT6QtCoNt-1s1pT6drtgdVjGZWEuCAyuNmA,1370
26
27
  learning_loop_node/detector/rest/operation_mode.py,sha256=1_xfutA_6nzdb4Q_jZiHQ5m_wA83bcG5jSIy-sfNIvk,1575
27
28
  learning_loop_node/detector/rest/outbox_mode.py,sha256=H8coDNbgLGEfXmKQrhtXWeUHBAHpnrdZktuHXQz0xis,1148
28
- learning_loop_node/detector/rest/upload.py,sha256=GMDKyN3UNfzsKq5GtBBlv828lht0bztgqRqT_PQHkZM,1250
29
+ learning_loop_node/detector/rest/upload.py,sha256=YrIa1kILvShOn-S7Bm74zRDod-oA-Q9NLQXw6BSgB0U,1562
29
30
  learning_loop_node/enums/__init__.py,sha256=tjSrhztIQ8W656_QuXfTbbVNtH_wDXP5hpYZgzfgRhc,285
30
31
  learning_loop_node/enums/annotator.py,sha256=mtTAw-8LJIrHcYkBjYHCZuhYEEHS6QzSK8k6BhLusvQ,285
31
32
  learning_loop_node/enums/detector.py,sha256=Qvm5LWWR9BfsDxHEQ8YzaPaUuSmp4BescYuV4X4ikwE,512
@@ -34,7 +35,7 @@ learning_loop_node/enums/trainer.py,sha256=VaD63guLO4aKgVfXT0EryPlXKQGegSET3Cp4R
34
35
  learning_loop_node/examples/novelty_score_updater.py,sha256=1DRgM9lxjFV-q2JvGDDsNLz_ic_rhEZ9wc6ZdjcxwPE,2038
35
36
  learning_loop_node/globals.py,sha256=tgw_8RYOipPV9aYlyUhYtXfUxvJKRvfUk6u-qVAtZmY,174
36
37
  learning_loop_node/helpers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
- learning_loop_node/helpers/background_tasks.py,sha256=sNKyHyk9J5vNn-0GG1OzNJbB-F7GXGcbCWKE3MbRrno,3346
38
+ learning_loop_node/helpers/background_tasks.py,sha256=gAaEXurrW_pkYlDYD-NhWvFZpU7kwoo-SzTFCckURlo,3256
38
39
  learning_loop_node/helpers/environment_reader.py,sha256=6DxDJecLHxiGczByhyVa_JssAwwft7vuNCGaEzoSY2I,1662
39
40
  learning_loop_node/helpers/gdrive_downloader.py,sha256=zeYJciTAJVRpu_eFjwgYLCpIa6hU1d71anqEBb564Rk,1145
40
41
  learning_loop_node/helpers/log_conf.py,sha256=hqVAa_9NnYEU6N0dcOKmph82p7MpgKqeF_eomTLYzWY,961
@@ -50,18 +51,18 @@ learning_loop_node/tests/annotator/conftest.py,sha256=e83I8WNAUgCFmum1GCx_nSjP9u
50
51
  learning_loop_node/tests/annotator/pytest.ini,sha256=8QdjmawLy1zAzXrJ88or1kpFDhJw0W5UOnDfGGs_igU,262
51
52
  learning_loop_node/tests/annotator/test_annotator_node.py,sha256=OgdUj0PEWSe0KPTNVVi-1d7DoK7IC9Q3Q3G8TPiP9f4,2090
52
53
  learning_loop_node/tests/detector/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
53
- learning_loop_node/tests/detector/conftest.py,sha256=Z1uPZGSL5jZyRQkHycQpHjsBjn-sL1QfuJrrJrGTNtM,5517
54
+ learning_loop_node/tests/detector/conftest.py,sha256=c0L8KRpuV4No9YUOXo5eWvsjyCZhrI2CvDDlTMwKlrI,5390
54
55
  learning_loop_node/tests/detector/inbox_filter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
55
56
  learning_loop_node/tests/detector/inbox_filter/test_observation.py,sha256=k4WYdvnuV7d_r7zI4M2aA8WuBjm0aycQ0vj1rGE2q4w,1370
56
57
  learning_loop_node/tests/detector/inbox_filter/test_relevance_group.py,sha256=r-wABFQVsTNTjv7vYGr8wbHfOWy43F_B14ZDWHfiZ-A,7613
57
- learning_loop_node/tests/detector/inbox_filter/test_unexpected_observations_count.py,sha256=JbUnPZVjzdtAlp6cTZVAdXUluQYNueGU9eITNJKY-tU,1710
58
+ learning_loop_node/tests/detector/inbox_filter/test_unexpected_observations_count.py,sha256=KoK7lL9u0N6CeZcOCCNBGDmNnXzcmMSqYM16s-A7VO4,1695
58
59
  learning_loop_node/tests/detector/pytest.ini,sha256=8QdjmawLy1zAzXrJ88or1kpFDhJw0W5UOnDfGGs_igU,262
59
60
  learning_loop_node/tests/detector/test.jpg,sha256=msA-vHPmvPiro_D102Qmn1fn4vNfooqYYEXPxZUmYpk,161390
60
61
  learning_loop_node/tests/detector/test_client_communication.py,sha256=cVviUmAwbLY3LsJcY-D3ve-Jwxk9WVOrVupeh-PdKtA,8013
61
- learning_loop_node/tests/detector/test_detector_node.py,sha256=0ZMV6coAvdq-nH8CwY9_LR2tUcH9VLcAB1CWuwHQMpo,3023
62
+ learning_loop_node/tests/detector/test_detector_node.py,sha256=vw-QOSUav460spujfVoYC7qZwWA5bLpbTTGX31rYY9I,3419
62
63
  learning_loop_node/tests/detector/test_outbox.py,sha256=K7c0GeKujNlgjDFS3aY1lN7kDbfJ4dBQfB9lBp3o3_Q,3262
63
- learning_loop_node/tests/detector/test_relevance_filter.py,sha256=7oTXW4AuObk7NxMqGSwnjcspH3-QUbSdCYlz9hvzV78,2079
64
- learning_loop_node/tests/detector/testing_detector.py,sha256=MZajybyzISz2G1OENfLHgZhBcLCYzTR4iN9JkWpq5-s,551
64
+ learning_loop_node/tests/detector/test_relevance_filter.py,sha256=Lki8ElGdjm0yhxY-dy_46DJjOgj7HpTa7SbnBMKjjZ4,2085
65
+ learning_loop_node/tests/detector/testing_detector.py,sha256=UqrFHS0AhrB1bOfvOvU5U_0ukfm5_aYNcGfPc59mGg8,712
65
66
  learning_loop_node/tests/general/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
66
67
  learning_loop_node/tests/general/conftest.py,sha256=kEtkuVA2wgny-YBkLDn7Ff5j6ShOPghQUU0cH9IIl_8,2430
67
68
  learning_loop_node/tests/general/pytest.ini,sha256=8QdjmawLy1zAzXrJ88or1kpFDhJw0W5UOnDfGGs_igU,262
@@ -71,14 +72,14 @@ learning_loop_node/tests/general/test_data/model.json,sha256=_xNDucGOWila8gWnu8y
71
72
  learning_loop_node/tests/general/test_data_classes.py,sha256=RnDzRtB-eRfWnaaA6qAzC1W8wurFzJ4xt1Q5pd7ZCS0,721
72
73
  learning_loop_node/tests/general/test_downloader.py,sha256=y4GcUyR0OAfrwltd6eyQgopwTt3DwjzX0Sr8yrooLec,3347
73
74
  learning_loop_node/tests/general/test_learning_loop_node.py,sha256=SZd-VChpWnnsPN46pr4E_LL3ZevYx6psU-AWdVeOFpQ,770
74
- learning_loop_node/tests/test_helper.py,sha256=Xajn6BWJqeD36YAETwdcJd6awY2NPmaOis3gWgFc97k,2909
75
+ learning_loop_node/tests/test_helper.py,sha256=Qj_jS0xVOnwZvSJjUkrDxRXFe93hSi0RdG0wAlDVguo,3379
75
76
  learning_loop_node/tests/trainer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
76
77
  learning_loop_node/tests/trainer/conftest.py,sha256=eJUUBVRTmwcEooEN29hIa3eNuo0ogAPNn7Vqs9FSRDM,3660
77
78
  learning_loop_node/tests/trainer/pytest.ini,sha256=8QdjmawLy1zAzXrJ88or1kpFDhJw0W5UOnDfGGs_igU,262
78
79
  learning_loop_node/tests/trainer/state_helper.py,sha256=MDe9opeKruip74FoRFff8MSWGiQNFqDpPtIEIbgPnFc,919
79
80
  learning_loop_node/tests/trainer/states/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
80
81
  learning_loop_node/tests/trainer/states/test_state_cleanup.py,sha256=gZNxSSwnj9f0esExNnQzqadM6-sE3IsF5sNbD0bZNu8,1250
81
- learning_loop_node/tests/trainer/states/test_state_detecting.py,sha256=-NLR5se7_OY_X8_Gf-BWw7X6dS_Pzsnkz84J5aTbqFU,3689
82
+ learning_loop_node/tests/trainer/states/test_state_detecting.py,sha256=y3WCOqe4LPH4Rf97x7MmybbHOBwEqS3oOxg0TxScSX8,3720
82
83
  learning_loop_node/tests/trainer/states/test_state_download_train_model.py,sha256=-T8iAutBliv0MV5bV5lPvn2aNjF3vMBCj8iAZTC-Q7g,2992
83
84
  learning_loop_node/tests/trainer/states/test_state_prepare.py,sha256=boCU93Bv2VWbW73MC_suTbwCcuR7RWn-6dgVvdiJ9tA,2291
84
85
  learning_loop_node/tests/trainer/states/test_state_sync_confusion_matrix.py,sha256=R3UqQJ2GQMapwRQ5WuZJb9M5IfroD2QqFI4h8etiH0Y,5223
@@ -99,6 +100,6 @@ learning_loop_node/trainer/test_executor.py,sha256=6BVGDN_6f5GEMMEvDLSG1yzMybSvg
99
100
  learning_loop_node/trainer/trainer_logic.py,sha256=eK-01qZzi10UjLMCQX8vy5eW2FoghPj3rzzDC-s3Si4,8792
100
101
  learning_loop_node/trainer/trainer_logic_generic.py,sha256=KcHmXr-Hp8_Wuejzj8odY6sRPqi6aw1SEXv3YlbjM98,27057
101
102
  learning_loop_node/trainer/trainer_node.py,sha256=tsAMzJewdS7Bi_1b9FwG0d2lGlv2lY37pgOLWr0bP_I,4582
102
- learning_loop_node-0.16.1.dist-info/METADATA,sha256=nHAEMpBL_tSXA00hNvuLoAuh0RcS1FACaCS_JsCP7rA,13509
103
- learning_loop_node-0.16.1.dist-info/WHEEL,sha256=WGfLGfLX43Ei_YORXSnT54hxFygu34kMpcQdmgmEwCQ,88
104
- learning_loop_node-0.16.1.dist-info/RECORD,,
103
+ learning_loop_node-0.17.1.dist-info/METADATA,sha256=cGlwVljdR8t8cF0_ZPqw6_4M44gvLZEQByusIZD7gSs,13979
104
+ learning_loop_node-0.17.1.dist-info/WHEEL,sha256=WGfLGfLX43Ei_YORXSnT54hxFygu34kMpcQdmgmEwCQ,88
105
+ learning_loop_node-0.17.1.dist-info/RECORD,,