learning-loop-node 0.9.3__py3-none-any.whl → 0.10.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of learning-loop-node might be problematic. Click here for more details.
- learning_loop_node/__init__.py +2 -3
- learning_loop_node/annotation/annotator_logic.py +2 -2
- learning_loop_node/annotation/annotator_node.py +16 -15
- learning_loop_node/data_classes/__init__.py +17 -10
- learning_loop_node/data_classes/detections.py +7 -2
- learning_loop_node/data_classes/general.py +4 -5
- learning_loop_node/data_classes/training.py +49 -21
- learning_loop_node/data_exchanger.py +85 -139
- learning_loop_node/detector/__init__.py +0 -1
- learning_loop_node/detector/detector_node.py +10 -13
- learning_loop_node/detector/inbox_filter/cam_observation_history.py +4 -7
- learning_loop_node/detector/outbox.py +0 -1
- learning_loop_node/detector/rest/about.py +1 -0
- learning_loop_node/detector/tests/conftest.py +0 -1
- learning_loop_node/detector/tests/test_client_communication.py +5 -3
- learning_loop_node/detector/tests/test_outbox.py +2 -0
- learning_loop_node/detector/tests/testing_detector.py +1 -8
- learning_loop_node/globals.py +2 -2
- learning_loop_node/helpers/gdrive_downloader.py +1 -1
- learning_loop_node/helpers/misc.py +124 -17
- learning_loop_node/loop_communication.py +57 -25
- learning_loop_node/node.py +62 -135
- learning_loop_node/tests/test_downloader.py +8 -7
- learning_loop_node/tests/test_executor.py +14 -11
- learning_loop_node/tests/test_helper.py +3 -5
- learning_loop_node/trainer/downloader.py +1 -1
- learning_loop_node/trainer/executor.py +87 -83
- learning_loop_node/trainer/io_helpers.py +68 -9
- learning_loop_node/trainer/rest/backdoor_controls.py +10 -5
- learning_loop_node/trainer/rest/controls.py +3 -1
- learning_loop_node/trainer/tests/conftest.py +19 -28
- learning_loop_node/trainer/tests/states/test_state_cleanup.py +5 -3
- learning_loop_node/trainer/tests/states/test_state_detecting.py +23 -20
- learning_loop_node/trainer/tests/states/test_state_download_train_model.py +18 -12
- learning_loop_node/trainer/tests/states/test_state_prepare.py +13 -12
- learning_loop_node/trainer/tests/states/test_state_sync_confusion_matrix.py +21 -18
- learning_loop_node/trainer/tests/states/test_state_train.py +27 -28
- learning_loop_node/trainer/tests/states/test_state_upload_detections.py +34 -32
- learning_loop_node/trainer/tests/states/test_state_upload_model.py +22 -20
- learning_loop_node/trainer/tests/test_errors.py +20 -12
- learning_loop_node/trainer/tests/test_trainer_states.py +4 -5
- learning_loop_node/trainer/tests/testing_trainer_logic.py +25 -30
- learning_loop_node/trainer/trainer_logic.py +80 -590
- learning_loop_node/trainer/trainer_logic_generic.py +495 -0
- learning_loop_node/trainer/trainer_node.py +27 -106
- {learning_loop_node-0.9.3.dist-info → learning_loop_node-0.10.1.dist-info}/METADATA +1 -1
- learning_loop_node-0.10.1.dist-info/RECORD +85 -0
- learning_loop_node/converter/converter_logic.py +0 -68
- learning_loop_node/converter/converter_node.py +0 -125
- learning_loop_node/converter/tests/test_converter.py +0 -55
- learning_loop_node/trainer/training_syncronizer.py +0 -52
- learning_loop_node-0.9.3.dist-info/RECORD +0 -88
- /learning_loop_node/{converter/__init__.py → py.typed} +0 -0
- {learning_loop_node-0.9.3.dist-info → learning_loop_node-0.10.1.dist-info}/WHEEL +0 -0
|
@@ -2,23 +2,19 @@ import asyncio
|
|
|
2
2
|
import logging
|
|
3
3
|
import os
|
|
4
4
|
import shutil
|
|
5
|
-
import time
|
|
6
5
|
import zipfile
|
|
7
6
|
from glob import glob
|
|
8
7
|
from http import HTTPStatus
|
|
9
8
|
from io import BytesIO
|
|
10
|
-
from time import
|
|
9
|
+
from time import time
|
|
11
10
|
from typing import Dict, List, Optional
|
|
12
11
|
|
|
13
|
-
import aiofiles
|
|
14
|
-
from tqdm.asyncio import tqdm
|
|
12
|
+
import aiofiles # type: ignore
|
|
15
13
|
|
|
16
14
|
from .data_classes import Context
|
|
17
|
-
from .helpers.misc import create_resource_paths, create_task
|
|
15
|
+
from .helpers.misc import create_resource_paths, create_task, is_valid_image
|
|
18
16
|
from .loop_communication import LoopCommunicator
|
|
19
17
|
|
|
20
|
-
check_jpeg = shutil.which('jpeginfo') is not None
|
|
21
|
-
|
|
22
18
|
|
|
23
19
|
class DownloadError(Exception):
|
|
24
20
|
|
|
@@ -26,201 +22,151 @@ class DownloadError(Exception):
|
|
|
26
22
|
super().__init__(*args)
|
|
27
23
|
self.cause = cause
|
|
28
24
|
|
|
25
|
+
def __str__(self) -> str:
|
|
26
|
+
return f'DownloadError: {self.cause}'
|
|
27
|
+
|
|
29
28
|
|
|
30
29
|
class DataExchanger():
|
|
31
30
|
|
|
32
31
|
def __init__(self, context: Optional[Context], loop_communicator: LoopCommunicator):
|
|
33
|
-
|
|
32
|
+
"""Exchanges data with the learning loop via the loop_communicator (rest api).
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
context (Optional[Context]): The context of the node. This is the organization and project name.
|
|
36
|
+
loop_communicator (LoopCommunicator): The loop_communicator to use for communication with the learning loop.
|
|
37
|
+
|
|
38
|
+
Note:
|
|
39
|
+
The context can be set later with the set_context method.
|
|
40
|
+
"""
|
|
41
|
+
self.set_context(context)
|
|
42
|
+
self.progress = 0.0
|
|
34
43
|
self.loop_communicator = loop_communicator
|
|
44
|
+
|
|
45
|
+
self.check_jpeg = shutil.which('jpeginfo') is not None
|
|
46
|
+
if self.check_jpeg:
|
|
47
|
+
logging.info('Detected command line tool "jpeginfo". Images will be checked for validity')
|
|
48
|
+
else:
|
|
49
|
+
logging.error('Missing command line tool "jpeginfo". We cannot check for validity of images.')
|
|
50
|
+
|
|
51
|
+
def set_context(self, context: Optional[Context]) -> None:
|
|
52
|
+
self._context = context
|
|
35
53
|
self.progress = 0.0
|
|
36
54
|
|
|
37
|
-
|
|
38
|
-
|
|
55
|
+
@property
|
|
56
|
+
def context(self) -> Context:
|
|
57
|
+
assert self._context, 'DataExchanger: Context was not set yet.. call set_context() first.'
|
|
58
|
+
return self._context
|
|
39
59
|
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
60
|
+
# ---------------------------- END OF INIT ----------------------------
|
|
61
|
+
|
|
62
|
+
async def fetch_image_uuids(self, query_params: Optional[str] = '') -> List[str]:
|
|
63
|
+
"""Fetch image uuids from the learning loop data endpoint."""
|
|
64
|
+
logging.info(f'Fetching image uuids for {self.context.organization}/{self.context.project}..')
|
|
44
65
|
|
|
45
66
|
response = await self.loop_communicator.get(f'/{self.context.organization}/projects/{self.context.project}/data?{query_params}')
|
|
46
67
|
assert response.status_code == 200, response
|
|
47
68
|
return (response.json())['image_ids']
|
|
48
69
|
|
|
49
|
-
async def download_images_data(self,
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
logging.warning('context was not set yet')
|
|
53
|
-
return []
|
|
54
|
-
|
|
55
|
-
return await self._download_images_data(self.context.organization, self.context.project, ids)
|
|
56
|
-
|
|
57
|
-
async def download_images(self, image_ids: List[str], image_folder: str) -> None:
|
|
58
|
-
'''Download images. Will skip existing images'''
|
|
59
|
-
if self.context is None:
|
|
60
|
-
logging.warning('context was not set yet')
|
|
61
|
-
return
|
|
62
|
-
|
|
63
|
-
new_image_ids = await asyncio.get_event_loop().run_in_executor(None, DataExchanger.filter_existing_images, image_ids, image_folder)
|
|
64
|
-
paths, ids = create_resource_paths(self.context.organization, self.context.project, new_image_ids)
|
|
65
|
-
await self._download_images(paths, ids, image_folder)
|
|
66
|
-
|
|
67
|
-
@staticmethod
|
|
68
|
-
async def delete_corrupt_images(image_folder: str) -> None:
|
|
69
|
-
logging.info('deleting corrupt images')
|
|
70
|
-
n_deleted = 0
|
|
71
|
-
for image in glob(f'{image_folder}/*.jpg'):
|
|
72
|
-
if not await DataExchanger.is_valid_image(image):
|
|
73
|
-
logging.debug(f' deleting image {image}')
|
|
74
|
-
os.remove(image)
|
|
75
|
-
n_deleted += 1
|
|
76
|
-
|
|
77
|
-
logging.info(f'deleted {n_deleted} images')
|
|
78
|
-
|
|
79
|
-
@staticmethod
|
|
80
|
-
def filter_existing_images(all_image_ids, image_folder) -> List[str]:
|
|
81
|
-
logging.info(f'### Going to filter {len(all_image_ids)} images ids')
|
|
82
|
-
start = perf_counter()
|
|
83
|
-
ids = [os.path.splitext(os.path.basename(image))[0]
|
|
84
|
-
for image in glob(f'{image_folder}/*.jpg')]
|
|
85
|
-
logging.info(f'found {len(ids)} images on disc')
|
|
86
|
-
result = [id for id in all_image_ids if id not in ids]
|
|
87
|
-
end = perf_counter()
|
|
88
|
-
logging.info(f'calculated {len(result)} new image ids, which took {end-start:0.2f} seconds')
|
|
89
|
-
return result
|
|
90
|
-
|
|
91
|
-
def jepeg_check_info(self):
|
|
92
|
-
if check_jpeg:
|
|
93
|
-
logging.info('Detected command line tool "jpeginfo". Images will be checked for validity')
|
|
94
|
-
else:
|
|
95
|
-
logging.error('Missing command line tool "jpeginfo". We can not check for validity of images.')
|
|
70
|
+
async def download_images_data(self, image_uuids: List[str], chunk_size: int = 100) -> List[Dict]:
|
|
71
|
+
"""Download image annotations, tags, set and other information for the given image uuids."""
|
|
72
|
+
logging.info(f'Fetching annotations, tags, sets, etc. for {len(image_uuids)} images..')
|
|
96
73
|
|
|
97
|
-
|
|
98
|
-
logging.info('fetching annotations and other image data')
|
|
99
|
-
num_image_ids = len(image_ids)
|
|
100
|
-
self.jepeg_check_info()
|
|
101
|
-
images_data = []
|
|
74
|
+
num_image_ids = len(image_uuids)
|
|
102
75
|
if num_image_ids == 0:
|
|
103
76
|
logging.info('got empty list. No images were downloaded')
|
|
104
|
-
return
|
|
105
|
-
|
|
77
|
+
return []
|
|
78
|
+
|
|
106
79
|
progress_factor = 0.5 / num_image_ids # 50% of progress is for downloading data
|
|
107
|
-
|
|
80
|
+
images_data: List[Dict] = []
|
|
81
|
+
for i in range(0, num_image_ids, chunk_size):
|
|
108
82
|
self.progress = i * progress_factor
|
|
109
|
-
chunk_ids =
|
|
110
|
-
response = await self.loop_communicator.get(f'/{organization}/projects/{project}/images?ids={",".join(chunk_ids)}')
|
|
83
|
+
chunk_ids = image_uuids[i:i+chunk_size]
|
|
84
|
+
response = await self.loop_communicator.get(f'/{self.context.organization}/projects/{self.context.project}/images?ids={",".join(chunk_ids)}')
|
|
111
85
|
if response.status_code != 200:
|
|
112
|
-
logging.error(
|
|
113
|
-
f'Error during downloading list of images. Statuscode is {response.status_code}')
|
|
86
|
+
logging.error(f'Error {response.status_code} during downloading image data. Continue with next batch..')
|
|
114
87
|
continue
|
|
115
88
|
images_data += response.json()['images']
|
|
116
|
-
|
|
117
|
-
if images_data:
|
|
118
|
-
per100 = total_time / len(images_data) * 100
|
|
119
|
-
logging.debug(f'[+] Performance: {total_time} sec total. Per 100 : {per100:.1f} sec')
|
|
120
|
-
else:
|
|
121
|
-
logging.debug(f'[+] Performance: {total_time} sec total.')
|
|
89
|
+
|
|
122
90
|
return images_data
|
|
123
91
|
|
|
124
|
-
async def
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
92
|
+
async def download_images(self, image_uuids: List[str], image_folder: str, chunk_size: int = 10) -> None:
|
|
93
|
+
"""Downloads images (actual image data). Will skip existing images"""
|
|
94
|
+
logging.info(f'Downloading {len(image_uuids)} images (actual image data).. skipping existing images.')
|
|
95
|
+
if not image_uuids:
|
|
128
96
|
return
|
|
129
|
-
|
|
130
|
-
|
|
97
|
+
|
|
98
|
+
existing_uuids = {os.path.splitext(os.path.basename(image))[0] for image in glob(f'{image_folder}/*.jpg')}
|
|
99
|
+
new_image_uuids = [id for id in image_uuids if id not in existing_uuids]
|
|
100
|
+
|
|
101
|
+
paths, _ = create_resource_paths(self.context.organization, self.context.project, new_image_uuids)
|
|
102
|
+
num_image_ids = len(image_uuids)
|
|
131
103
|
os.makedirs(image_folder, exist_ok=True)
|
|
132
104
|
|
|
133
105
|
progress_factor = 0.5 / num_image_ids # second 50% of progress is for downloading images
|
|
134
|
-
for i in
|
|
106
|
+
for i in range(0, num_image_ids, chunk_size):
|
|
135
107
|
self.progress = 0.5 + i * progress_factor
|
|
136
108
|
chunk_paths = paths[i:i+chunk_size]
|
|
137
|
-
chunk_ids =
|
|
109
|
+
chunk_ids = image_uuids[i:i+chunk_size]
|
|
138
110
|
tasks = []
|
|
139
111
|
for j, chunk_j in enumerate(chunk_paths):
|
|
140
|
-
|
|
112
|
+
start = time()
|
|
113
|
+
tasks.append(create_task(self._download_one_image(chunk_j, chunk_ids[j], image_folder)))
|
|
114
|
+
await asyncio.sleep(max(0, 0.02 - (time() - start))) # prevent too many requests at once
|
|
141
115
|
await asyncio.gather(*tasks)
|
|
142
|
-
total_time = round(time.time() - starttime, 1)
|
|
143
|
-
per100 = total_time / (i + len(tasks)) * 100
|
|
144
|
-
logging.debug(f'[+] Performance (image files): {total_time} sec total. Per 100 : {per100:.1f}')
|
|
145
116
|
|
|
146
|
-
async def
|
|
117
|
+
async def _download_one_image(self, path: str, image_id: str, image_folder: str) -> None:
|
|
147
118
|
response = await self.loop_communicator.get(path)
|
|
148
119
|
if response.status_code != HTTPStatus.OK:
|
|
149
|
-
logging.error(f'bad status code {response.status_code} for {path}')
|
|
120
|
+
logging.error(f'bad status code {response.status_code} for {path}. Details: {response.text}')
|
|
150
121
|
return
|
|
151
122
|
filename = f'{image_folder}/{image_id}.jpg'
|
|
152
123
|
async with aiofiles.open(filename, 'wb') as f:
|
|
153
124
|
await f.write(response.content)
|
|
154
|
-
if not await
|
|
125
|
+
if not await is_valid_image(filename, self.check_jpeg):
|
|
155
126
|
os.remove(filename)
|
|
156
127
|
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
info = await asyncio.create_subprocess_shell(
|
|
165
|
-
f'jpeginfo -c {filename}',
|
|
166
|
-
stdout=asyncio.subprocess.PIPE,
|
|
167
|
-
stderr=asyncio.subprocess.PIPE)
|
|
168
|
-
out, _ = await info.communicate()
|
|
169
|
-
return "OK" in out.decode()
|
|
170
|
-
|
|
171
|
-
async def download_model(self, target_folder: str, context: Context, model_id: str, model_format: str) -> List[str]:
|
|
172
|
-
path = f'/{context.organization}/projects/{context.project}/models/{model_id}/{model_format}/file'
|
|
128
|
+
async def download_model(self, target_folder: str, context: Context, model_uuid: str, model_format: str) -> List[str]:
|
|
129
|
+
"""Downloads a model (and additional meta data like model.json) and returns the paths of the downloaded files.
|
|
130
|
+
Used before training a model (when continuing a finished training) or before detecting images.
|
|
131
|
+
"""
|
|
132
|
+
logging.info(f'Downloading model data for uuid {model_uuid} from the loop to {target_folder}..')
|
|
133
|
+
|
|
134
|
+
path = f'/{context.organization}/projects/{context.project}/models/{model_uuid}/{model_format}/file'
|
|
173
135
|
response = await self.loop_communicator.get(path, requires_login=False)
|
|
174
136
|
if response.status_code != 200:
|
|
175
137
|
content = response.json()
|
|
176
|
-
logging.error(
|
|
177
|
-
f'could not download {self.loop_communicator.base_url}/{path}: {response.status_code}, content: {content}')
|
|
138
|
+
logging.error(f'could not download loop/{path}: {response.status_code}, content: {content}')
|
|
178
139
|
raise DownloadError(content['detail'])
|
|
179
140
|
try:
|
|
180
141
|
provided_filename = response.headers.get(
|
|
181
142
|
"Content-Disposition").split("filename=")[1].strip('"')
|
|
182
143
|
content = response.content
|
|
183
144
|
except:
|
|
184
|
-
logging.
|
|
185
|
-
try:
|
|
186
|
-
logging.exception(response.json())
|
|
187
|
-
except Exception:
|
|
188
|
-
pass
|
|
145
|
+
logging.exception(f'Error during downloading model {path}:')
|
|
189
146
|
raise
|
|
190
147
|
|
|
191
|
-
# unzip and place downloaded model
|
|
192
148
|
tmp_path = f'/tmp/{os.path.splitext(provided_filename)[0]}'
|
|
193
149
|
shutil.rmtree(tmp_path, ignore_errors=True)
|
|
194
150
|
with zipfile.ZipFile(BytesIO(content), 'r') as zip_:
|
|
195
151
|
zip_.extractall(tmp_path)
|
|
196
152
|
|
|
197
|
-
logging.info(f'---- downloaded model {model_id} to {tmp_path}.')
|
|
198
|
-
|
|
199
153
|
created_files = []
|
|
200
|
-
|
|
201
|
-
for file in files:
|
|
154
|
+
for file in glob(f'{tmp_path}/**/*', recursive=True):
|
|
202
155
|
new_file = shutil.move(file, target_folder)
|
|
203
|
-
logging.info(f'moved model file {os.path.basename(file)} to {new_file}.')
|
|
204
156
|
created_files.append(new_file)
|
|
205
|
-
return created_files
|
|
206
157
|
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
msg = f'---- could not upload model with id {model_id} and format {mformat}. Details: {response.text}'
|
|
211
|
-
raise Exception(msg)
|
|
212
|
-
logging.info(f'---- uploaded model with id {model_id} and format {mformat}.')
|
|
158
|
+
shutil.rmtree(tmp_path, ignore_errors=True)
|
|
159
|
+
logging.info(f'Downloaded model {model_uuid}({model_format}) to {target_folder}.')
|
|
160
|
+
return created_files
|
|
213
161
|
|
|
214
|
-
async def
|
|
215
|
-
"""
|
|
162
|
+
async def upload_model_get_uuid(self, context: Context, files: List[str], training_number: Optional[int], mformat: str) -> Optional[str]:
|
|
163
|
+
"""Used by the trainers. Function returns the new model uuid to use for detection."""
|
|
216
164
|
response = await self.loop_communicator.put(f'/{context.organization}/projects/{context.project}/trainings/{training_number}/models/latest/{mformat}/file', files=files)
|
|
217
165
|
if response.status_code != 200:
|
|
218
|
-
|
|
219
|
-
logging.error(msg)
|
|
166
|
+
logging.error(f'Could not upload model for training {training_number}, format {mformat}: {response.text}')
|
|
220
167
|
response.raise_for_status()
|
|
221
168
|
return None
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
return uploaded_model['id']
|
|
169
|
+
|
|
170
|
+
uploaded_model = response.json()
|
|
171
|
+
logging.info(f'Uploaded model for training {training_number}, format {mformat}. Response is: {uploaded_model}')
|
|
172
|
+
return uploaded_model['id']
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
|
|
@@ -14,7 +14,7 @@ from fastapi.encoders import jsonable_encoder
|
|
|
14
14
|
from fastapi_socketio import SocketManager
|
|
15
15
|
from socketio import AsyncClient
|
|
16
16
|
|
|
17
|
-
from ..data_classes import Category, Context, Detections, DetectionStatus, ModelInformation,
|
|
17
|
+
from ..data_classes import Category, Context, Detections, DetectionStatus, ModelInformation, Shape
|
|
18
18
|
from ..data_classes.socket_response import SocketResponse
|
|
19
19
|
from ..data_exchanger import DataExchanger, DownloadError
|
|
20
20
|
from ..globals import GLOBALS
|
|
@@ -34,9 +34,8 @@ from .rest.operation_mode import OperationMode
|
|
|
34
34
|
class DetectorNode(Node):
|
|
35
35
|
|
|
36
36
|
def __init__(self, name: str, detector: DetectorLogic, uuid: Optional[str] = None, use_backdoor_controls: bool = False) -> None:
|
|
37
|
-
super().__init__(name, uuid)
|
|
37
|
+
super().__init__(name, uuid, 'detector', False)
|
|
38
38
|
self.detector_logic = detector
|
|
39
|
-
self.needs_login = False
|
|
40
39
|
self.organization = environment_reader.organization()
|
|
41
40
|
self.project = environment_reader.project()
|
|
42
41
|
assert self.organization and self.project, 'Detector node needs an organization and an project'
|
|
@@ -170,6 +169,8 @@ class DetectorNode(Node):
|
|
|
170
169
|
def _connect(sid, environ, auth) -> None:
|
|
171
170
|
self.connected_clients.append(sid)
|
|
172
171
|
|
|
172
|
+
print('>>>>>>>>>>>>>>>>>>>>>>> setting up sio server', flush=True)
|
|
173
|
+
|
|
173
174
|
self.sio_server = SocketManager(app=self)
|
|
174
175
|
self.sio_server.on('detect', _detect)
|
|
175
176
|
self.sio_server.on('info', _info)
|
|
@@ -185,7 +186,9 @@ class DetectorNode(Node):
|
|
|
185
186
|
if not update_to_model_id:
|
|
186
187
|
self.log.info('could not check for updates')
|
|
187
188
|
return
|
|
188
|
-
|
|
189
|
+
|
|
190
|
+
# TODO: solve race condition (it should not be required to recheck if model_info is not None, but it is!)
|
|
191
|
+
if self.detector_logic.is_initialized:
|
|
189
192
|
model_info = self.detector_logic._model_info # pylint: disable=protected-access
|
|
190
193
|
if model_info is not None:
|
|
191
194
|
self.log.info(f'Current model: {model_info.version} with id {model_info.id}')
|
|
@@ -220,8 +223,7 @@ class DetectorNode(Node):
|
|
|
220
223
|
await self.data_exchanger.download_model(target_model_folder,
|
|
221
224
|
Context(organization=self.organization,
|
|
222
225
|
project=self.project),
|
|
223
|
-
update_to_model_id,
|
|
224
|
-
self.detector_logic.model_format)
|
|
226
|
+
update_to_model_id, self.detector_logic.model_format)
|
|
225
227
|
try:
|
|
226
228
|
os.unlink(model_symlink)
|
|
227
229
|
os.remove(model_symlink)
|
|
@@ -256,7 +258,7 @@ class DetectorNode(Node):
|
|
|
256
258
|
name=self.name,
|
|
257
259
|
state=self.status.state,
|
|
258
260
|
errors=self.status.errors,
|
|
259
|
-
uptime=int((datetime.now() - self.
|
|
261
|
+
uptime=int((datetime.now() - self.startup_datetime).total_seconds()),
|
|
260
262
|
operation_mode=self.operation_mode,
|
|
261
263
|
current_model=current_model,
|
|
262
264
|
target_model=self.target_model,
|
|
@@ -272,13 +274,11 @@ class DetectorNode(Node):
|
|
|
272
274
|
return False
|
|
273
275
|
|
|
274
276
|
assert socket_response.payload is not None
|
|
277
|
+
# TODO This is weird because target_model_version is stored in self and target_model_id is returned
|
|
275
278
|
self.target_model = socket_response.payload['target_model_version']
|
|
276
279
|
self.log.info(f'After sending status. Target_model is {self.target_model}')
|
|
277
280
|
return socket_response.payload['target_model_id']
|
|
278
281
|
|
|
279
|
-
async def get_state(self):
|
|
280
|
-
return NodeState.Online # NOTE At the moment only trainer-nodes use a meaningful state
|
|
281
|
-
|
|
282
282
|
async def set_operation_mode(self, mode: OperationMode):
|
|
283
283
|
self.operation_mode = mode
|
|
284
284
|
await self.send_status()
|
|
@@ -353,9 +353,6 @@ class DetectorNode(Node):
|
|
|
353
353
|
classification_detection.category_id = category_id
|
|
354
354
|
return detections
|
|
355
355
|
|
|
356
|
-
def get_node_type(self):
|
|
357
|
-
return 'detector'
|
|
358
|
-
|
|
359
356
|
def register_sio_events(self, sio_client: AsyncClient):
|
|
360
357
|
pass
|
|
361
358
|
|
|
@@ -1,20 +1,17 @@
|
|
|
1
1
|
import os
|
|
2
2
|
from typing import List, Union
|
|
3
3
|
|
|
4
|
-
from learning_loop_node.data_classes import (BoxDetection,
|
|
5
|
-
|
|
6
|
-
Detections, Observation,
|
|
7
|
-
PointDetection,
|
|
8
|
-
SegmentationDetection)
|
|
4
|
+
from learning_loop_node.data_classes import (BoxDetection, ClassificationDetection, Detections, Observation,
|
|
5
|
+
PointDetection, SegmentationDetection)
|
|
9
6
|
|
|
10
7
|
|
|
11
8
|
class CamObservationHistory:
|
|
12
|
-
def __init__(self):
|
|
9
|
+
def __init__(self) -> None:
|
|
13
10
|
self.reset_time = 3600
|
|
14
11
|
self.recent_observations: List[Observation] = []
|
|
15
12
|
self.iou_threshold = 0.5
|
|
16
13
|
|
|
17
|
-
def forget_old_detections(self):
|
|
14
|
+
def forget_old_detections(self) -> None:
|
|
18
15
|
self.recent_observations = [detection
|
|
19
16
|
for detection in self.recent_observations
|
|
20
17
|
if not detection.is_older_than(self.reset_time)]
|
|
@@ -53,7 +53,6 @@ class Outbox():
|
|
|
53
53
|
with open(tmp + '/image.json', 'w') as f:
|
|
54
54
|
json.dump(jsonable_encoder(asdict(detections)), f)
|
|
55
55
|
|
|
56
|
-
# TODO sometimes No such file or directory: '/tmp/learning_loop_lib_data/tmp/2023-09-07_13:27:38.399/image.jpg'
|
|
57
56
|
with open(tmp + '/image.jpg', 'wb') as f:
|
|
58
57
|
f.write(image)
|
|
59
58
|
|
|
@@ -12,7 +12,6 @@ import socketio
|
|
|
12
12
|
import uvicorn
|
|
13
13
|
|
|
14
14
|
from learning_loop_node import DetectorNode
|
|
15
|
-
from learning_loop_node.data_classes.general import Category, ModelInformation
|
|
16
15
|
from learning_loop_node.detector.outbox import Outbox
|
|
17
16
|
from learning_loop_node.globals import GLOBALS
|
|
18
17
|
|
|
@@ -5,7 +5,7 @@ import pytest
|
|
|
5
5
|
import requests
|
|
6
6
|
|
|
7
7
|
from learning_loop_node import DetectorNode
|
|
8
|
-
from learning_loop_node.data_classes import
|
|
8
|
+
from learning_loop_node.data_classes import ModelInformation
|
|
9
9
|
from learning_loop_node.detector.tests.conftest import get_outbox_files
|
|
10
10
|
from learning_loop_node.globals import GLOBALS
|
|
11
11
|
|
|
@@ -88,15 +88,17 @@ async def test_sio_upload(test_detector_node: DetectorNode, sio_client):
|
|
|
88
88
|
assert len(get_outbox_files(test_detector_node.outbox)) == 2, 'There should be one image and one .json file.'
|
|
89
89
|
|
|
90
90
|
|
|
91
|
+
# NOTE: This test seems to be flaky.
|
|
91
92
|
async def test_about_endpoint(test_detector_node: DetectorNode):
|
|
92
|
-
await asyncio.sleep(
|
|
93
|
+
await asyncio.sleep(3)
|
|
93
94
|
response = requests.get(f'http://localhost:{GLOBALS.detector_port}/about', timeout=30)
|
|
94
95
|
|
|
95
96
|
assert response.status_code == 200
|
|
96
97
|
response_dict = json.loads(response.content)
|
|
98
|
+
assert response_dict['model_info']
|
|
97
99
|
model_information = ModelInformation.from_dict(response_dict['model_info'])
|
|
98
100
|
|
|
99
101
|
assert response_dict['operation_mode'] == 'idle'
|
|
100
102
|
assert response_dict['state'] == 'online'
|
|
101
103
|
assert response_dict['target_model'] == '1.1'
|
|
102
|
-
assert any(
|
|
104
|
+
assert any(c.name == 'purple point' for c in model_information.categories)
|
|
@@ -9,6 +9,8 @@ from learning_loop_node.data_classes import Detections
|
|
|
9
9
|
from learning_loop_node.detector.detector_node import DetectorNode
|
|
10
10
|
from learning_loop_node.detector.outbox import Outbox
|
|
11
11
|
|
|
12
|
+
# pylint: disable=redefined-outer-name
|
|
13
|
+
|
|
12
14
|
|
|
13
15
|
@pytest.fixture()
|
|
14
16
|
def test_outbox():
|
|
@@ -4,7 +4,7 @@ import numpy as np
|
|
|
4
4
|
|
|
5
5
|
from learning_loop_node import DetectorLogic
|
|
6
6
|
from learning_loop_node.conftest import get_dummy_detections
|
|
7
|
-
from learning_loop_node.data_classes import
|
|
7
|
+
from learning_loop_node.data_classes import Detections
|
|
8
8
|
|
|
9
9
|
|
|
10
10
|
class TestingDetectorLogic(DetectorLogic):
|
|
@@ -20,10 +20,3 @@ class TestingDetectorLogic(DetectorLogic):
|
|
|
20
20
|
def evaluate(self, image: np.ndarray) -> Detections:
|
|
21
21
|
logging.info('evaluating')
|
|
22
22
|
return self.det_to_return
|
|
23
|
-
|
|
24
|
-
# return Detections(
|
|
25
|
-
# box_detections=[BoxDetection(category_name='some_category_name', x=1, y=2, height=3, width=4,
|
|
26
|
-
# model_name='some_model', confidence=.42, category_id='some_id')],
|
|
27
|
-
# point_detections=[PointDetection(category_name='some_category_name_2', x=10, y=12,
|
|
28
|
-
# model_name='some_model', confidence=.42, category_id='some_id')]
|
|
29
|
-
# )
|
learning_loop_node/globals.py
CHANGED