learning-loop-node 0.11.1__tar.gz → 0.12.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of learning-loop-node might be problematic. Click here for more details.
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/PKG-INFO +1 -1
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/data_classes/__init__.py +3 -3
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/data_classes/general.py +1 -1
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/data_classes/training.py +62 -67
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/data_exchanger.py +11 -9
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/helpers/environment_reader.py +2 -2
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/helpers/log_conf.py +4 -1
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/helpers/misc.py +7 -17
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/loop_communication.py +4 -8
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/node.py +10 -4
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/rest.py +4 -2
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/detector/conftest.py +17 -21
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/trainer/conftest.py +18 -12
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/trainer/states/test_state_download_train_model.py +7 -3
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/trainer/states/test_state_prepare.py +0 -1
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/trainer/states/test_state_sync_confusion_matrix.py +2 -1
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/trainer/states/test_state_train.py +0 -2
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/trainer/test_trainer_states.py +6 -1
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/trainer/testing_trainer_logic.py +3 -3
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/trainer/downloader.py +1 -1
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/trainer/executor.py +2 -2
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/trainer/rest/backdoor_controls.py +6 -6
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/trainer/trainer_logic.py +7 -3
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/trainer/trainer_logic_generic.py +59 -41
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/trainer/trainer_node.py +18 -35
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/pyproject.toml +1 -1
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/README.md +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/__init__.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/annotation/__init__.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/annotation/annotator_logic.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/annotation/annotator_node.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/data_classes/annotations.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/data_classes/detections.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/data_classes/image_metadata.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/data_classes/socket_response.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/detector/__init__.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/detector/detector_logic.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/detector/detector_node.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/detector/inbox_filter/__init__.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/detector/inbox_filter/cam_observation_history.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/detector/inbox_filter/relevance_filter.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/detector/outbox.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/detector/rest/__init__.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/detector/rest/about.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/detector/rest/backdoor_controls.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/detector/rest/detect.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/detector/rest/model_version_control.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/detector/rest/operation_mode.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/detector/rest/outbox_mode.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/detector/rest/upload.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/examples/novelty_score_updater.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/globals.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/helpers/__init__.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/helpers/gdrive_downloader.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/py.typed +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/__init__.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/annotator/__init__.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/annotator/conftest.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/annotator/pytest.ini +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/annotator/test_annotator_node.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/detector/__init__.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/detector/inbox_filter/__init__.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/detector/inbox_filter/test_observation.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/detector/inbox_filter/test_relevance_group.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/detector/inbox_filter/test_unexpected_observations_count.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/detector/pytest.ini +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/detector/test.jpg +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/detector/test_client_communication.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/detector/test_detector_node.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/detector/test_outbox.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/detector/test_relevance_filter.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/detector/testing_detector.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/general/__init__.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/general/conftest.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/general/pytest.ini +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/general/test_data/file_1.txt +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/general/test_data/file_2.txt +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/general/test_data/model.json +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/general/test_data_classes.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/general/test_downloader.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/general/test_learning_loop_node.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/test_helper.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/trainer/__init__.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/trainer/pytest.ini +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/trainer/state_helper.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/trainer/states/__init__.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/trainer/states/test_state_cleanup.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/trainer/states/test_state_detecting.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/trainer/states/test_state_upload_detections.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/trainer/states/test_state_upload_model.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/trainer/test_errors.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/trainer/__init__.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/trainer/exceptions.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/trainer/io_helpers.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/trainer/rest/__init__.py +0 -0
- {learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/trainer/test_executor.py +0 -0
{learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/data_classes/__init__.py
RENAMED
|
@@ -5,8 +5,8 @@ from .general import (AnnotationNodeStatus, Category, CategoryType, Context, Det
|
|
|
5
5
|
ModelInformation, NodeState, NodeStatus)
|
|
6
6
|
from .image_metadata import ImageMetadata
|
|
7
7
|
from .socket_response import SocketResponse
|
|
8
|
-
from .training import (Errors,
|
|
9
|
-
|
|
8
|
+
from .training import (Errors, PretrainedModel, TrainerState, Training, TrainingError, TrainingOut, TrainingStateData,
|
|
9
|
+
TrainingStatus)
|
|
10
10
|
|
|
11
11
|
__all__ = [
|
|
12
12
|
'AnnotationData', 'AnnotationEventType', 'SegmentationAnnotation', 'ToolOutput', 'UserInput',
|
|
@@ -15,6 +15,6 @@ __all__ = [
|
|
|
15
15
|
'AnnotationNodeStatus', 'Category', 'CategoryType', 'Context', 'DetectionStatus', 'ErrorConfiguration',
|
|
16
16
|
'ModelInformation', 'NodeState', 'NodeStatus',
|
|
17
17
|
'SocketResponse',
|
|
18
|
-
'Errors', '
|
|
18
|
+
'Errors', 'PretrainedModel', 'TrainerState', 'Training',
|
|
19
19
|
'TrainingError', 'TrainingOut', 'TrainingStateData', 'TrainingStatus',
|
|
20
20
|
]
|
{learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/data_classes/general.py
RENAMED
|
@@ -75,7 +75,7 @@ class ModelInformation():
|
|
|
75
75
|
"""
|
|
76
76
|
model_info_file_path = f'{model_root_path}/model.json'
|
|
77
77
|
if not os.path.exists(model_info_file_path):
|
|
78
|
-
logging.warning(
|
|
78
|
+
logging.warning('could not find model information file %s', model_info_file_path)
|
|
79
79
|
return None
|
|
80
80
|
with open(model_info_file_path, 'r') as f:
|
|
81
81
|
try:
|
{learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/data_classes/training.py
RENAMED
|
@@ -4,46 +4,16 @@ import time
|
|
|
4
4
|
from dataclasses import dataclass, field
|
|
5
5
|
from enum import Enum
|
|
6
6
|
from pathlib import Path
|
|
7
|
-
from typing import Dict, List, Optional
|
|
7
|
+
from typing import Any, Dict, List, Optional
|
|
8
|
+
from uuid import uuid4
|
|
8
9
|
|
|
10
|
+
from ..helpers.misc import create_image_folder, create_training_folder
|
|
9
11
|
# pylint: disable=no-name-in-module
|
|
10
12
|
from .general import Category, Context
|
|
11
13
|
|
|
12
14
|
KWONLY_SLOTS = {'kw_only': True, 'slots': True} if sys.version_info >= (3, 10) else {}
|
|
13
15
|
|
|
14
16
|
|
|
15
|
-
@dataclass(**KWONLY_SLOTS)
|
|
16
|
-
class Hyperparameter():
|
|
17
|
-
resolution: int
|
|
18
|
-
flip_rl: bool
|
|
19
|
-
flip_ud: bool
|
|
20
|
-
|
|
21
|
-
@staticmethod
|
|
22
|
-
def from_data(data: Dict):
|
|
23
|
-
return Hyperparameter(
|
|
24
|
-
resolution=data['resolution'],
|
|
25
|
-
flip_rl=data.get('flip_rl', False),
|
|
26
|
-
flip_ud=data.get('flip_ud', False)
|
|
27
|
-
)
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
@dataclass(**KWONLY_SLOTS)
|
|
31
|
-
class TrainingData():
|
|
32
|
-
image_data: List[Dict] = field(default_factory=list)
|
|
33
|
-
skipped_image_count: Optional[int] = 0
|
|
34
|
-
categories: List[Category] = field(default_factory=list)
|
|
35
|
-
hyperparameter: Optional[Hyperparameter] = None
|
|
36
|
-
|
|
37
|
-
def image_ids(self):
|
|
38
|
-
return [image['id'] for image in self.image_data]
|
|
39
|
-
|
|
40
|
-
def train_image_count(self):
|
|
41
|
-
return len([image for image in self.image_data if image['set'] == 'train'])
|
|
42
|
-
|
|
43
|
-
def test_image_count(self):
|
|
44
|
-
return len([image for image in self.image_data if image['set'] == 'test'])
|
|
45
|
-
|
|
46
|
-
|
|
47
17
|
@dataclass(**KWONLY_SLOTS)
|
|
48
18
|
class PretrainedModel():
|
|
49
19
|
name: str
|
|
@@ -75,26 +45,21 @@ class TrainerState(str, Enum):
|
|
|
75
45
|
class TrainingStatus():
|
|
76
46
|
id: str # NOTE this must not be changed, but tests wont detect a change -> update tests!
|
|
77
47
|
name: str
|
|
48
|
+
|
|
78
49
|
state: Optional[str]
|
|
79
|
-
errors: Optional[Dict]
|
|
80
50
|
uptime: Optional[float]
|
|
51
|
+
errors: Optional[Dict[str, Any]]
|
|
81
52
|
progress: Optional[float]
|
|
82
53
|
|
|
83
|
-
train_image_count: Optional[int] = None
|
|
84
|
-
test_image_count: Optional[int] = None
|
|
85
|
-
skipped_image_count: Optional[int] = None
|
|
86
54
|
pretrained_models: List[PretrainedModel] = field(default_factory=list)
|
|
87
|
-
hyperparameters: Optional[Dict] = None
|
|
88
55
|
architecture: Optional[str] = None
|
|
89
56
|
context: Optional[Context] = None
|
|
90
57
|
|
|
91
58
|
def short_str(self) -> str:
|
|
92
59
|
prgr = f'{self.progress * 100:.0f}%' if self.progress else ''
|
|
93
|
-
trtesk = f'{self.train_image_count}/{self.test_image_count}/{self.skipped_image_count}' if self.train_image_count else 'n.a.'
|
|
94
60
|
cntxt = f'{self.context.organization}/{self.context.project}' if self.context else ''
|
|
95
|
-
hyps = f'({self.hyperparameters})' if self.hyperparameters else ''
|
|
96
61
|
arch = f'.{self.architecture} - ' if self.architecture else ''
|
|
97
|
-
return f'[{str(self.state).rsplit(".", maxsplit=1)[-1]} {prgr}. {self.name}({self.id}).
|
|
62
|
+
return f'[{str(self.state).rsplit(".", maxsplit=1)[-1]} {prgr}. {self.name}({self.id}). {cntxt}{arch}]'
|
|
98
63
|
|
|
99
64
|
|
|
100
65
|
@dataclass(**KWONLY_SLOTS)
|
|
@@ -105,53 +70,83 @@ class Training():
|
|
|
105
70
|
project_folder: str # f'{GLOBALS.data_folder}/{context.organization}/{context.project}'
|
|
106
71
|
images_folder: str # f'{project_folder}/images'
|
|
107
72
|
training_folder: str # f'{project_folder}/trainings/{trainings_id}'
|
|
73
|
+
|
|
74
|
+
categories: List[Category]
|
|
75
|
+
hyperparameters: dict
|
|
76
|
+
|
|
77
|
+
training_number: int
|
|
78
|
+
training_state: str
|
|
79
|
+
model_variant: str # from `provided_pretrained_models->name`
|
|
80
|
+
|
|
108
81
|
start_time: float = field(default_factory=time.time)
|
|
109
82
|
|
|
110
|
-
# model uuid to
|
|
111
|
-
base_model_uuid_or_name: Optional[str] = None
|
|
83
|
+
base_model_uuid: Optional[str] = None # model uuid to continue training (is loaded from loop)
|
|
112
84
|
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
model_uuid_for_detecting: Optional[str] = None
|
|
117
|
-
hyperparameters: Optional[Dict] = None
|
|
85
|
+
# NOTE: these are set later after the model has been uploaded
|
|
86
|
+
image_data: Optional[List[dict]] = None
|
|
87
|
+
skipped_image_count: Optional[int] = None
|
|
88
|
+
model_uuid_for_detecting: Optional[str] = None # Model uuid to load from the loop after training and upload
|
|
118
89
|
|
|
119
90
|
@property
|
|
120
91
|
def training_folder_path(self) -> Path:
|
|
121
92
|
return Path(self.training_folder)
|
|
122
93
|
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
94
|
+
@classmethod
|
|
95
|
+
def generate_training(cls, project_folder: str, context: Context, data: Dict[str, Any]) -> 'Training':
|
|
96
|
+
if 'hyperparameters' not in data or not isinstance(data['hyperparameters'], dict):
|
|
97
|
+
raise ValueError('hyperparameters missing or not a dict')
|
|
98
|
+
if 'categories' not in data or not isinstance(data['categories'], list):
|
|
99
|
+
raise ValueError('categories missing or not a list')
|
|
100
|
+
if 'training_number' not in data or not isinstance(data['training_number'], int):
|
|
101
|
+
raise ValueError('training_number missing or not an int')
|
|
102
|
+
if 'model_variant' not in data or not isinstance(data['model_variant'], str):
|
|
103
|
+
raise ValueError('model_variant missing or not a str')
|
|
104
|
+
|
|
105
|
+
training_uuid = str(uuid4())
|
|
106
|
+
|
|
107
|
+
return Training(
|
|
108
|
+
id=training_uuid,
|
|
109
|
+
context=context,
|
|
110
|
+
project_folder=project_folder,
|
|
111
|
+
images_folder=create_image_folder(project_folder),
|
|
112
|
+
training_folder=create_training_folder(project_folder, training_uuid),
|
|
113
|
+
categories=Category.from_list(data['categories']),
|
|
114
|
+
hyperparameters=data['hyperparameters'],
|
|
115
|
+
training_number=data['training_number'],
|
|
116
|
+
base_model_uuid=data.get('base_model_uuid', None),
|
|
117
|
+
model_variant=data['model_variant'],
|
|
118
|
+
training_state=TrainerState.Initialized.value
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
def image_ids(self) -> List[str]:
|
|
122
|
+
assert self.image_data is not None, 'Image data not set'
|
|
123
|
+
return [image['id'] for image in self.image_data]
|
|
124
|
+
|
|
125
|
+
def train_image_count(self) -> int:
|
|
126
|
+
assert self.image_data is not None, 'Image data not set'
|
|
127
|
+
return len([image for image in self.image_data if image['set'] == 'train'])
|
|
128
|
+
|
|
129
|
+
def test_image_count(self) -> int:
|
|
130
|
+
assert self.image_data is not None, 'Image data not set'
|
|
131
|
+
return len([image for image in self.image_data if image['set'] == 'test'])
|
|
129
132
|
|
|
130
133
|
|
|
131
134
|
@dataclass(**KWONLY_SLOTS)
|
|
132
135
|
class TrainingOut():
|
|
136
|
+
trainer_id: str
|
|
137
|
+
trainer_name: Optional[str] = None
|
|
133
138
|
confusion_matrix: Optional[Dict] = None # This is actually just class-wise metrics
|
|
134
139
|
train_image_count: Optional[int] = None
|
|
135
140
|
test_image_count: Optional[int] = None
|
|
136
|
-
|
|
137
|
-
|
|
141
|
+
hyperparameters: Optional[Dict[str, Any]] = None
|
|
142
|
+
best_epoch: Optional[int] = None
|
|
138
143
|
|
|
139
144
|
|
|
140
145
|
@dataclass(**KWONLY_SLOTS)
|
|
141
146
|
class TrainingStateData():
|
|
142
147
|
confusion_matrix: Dict = field(default_factory=dict)
|
|
143
148
|
meta_information: Dict = field(default_factory=dict)
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
@dataclass(**KWONLY_SLOTS)
|
|
147
|
-
class Model():
|
|
148
|
-
uuid: str
|
|
149
|
-
confusion_matrix: Optional[Dict] = None
|
|
150
|
-
parent_id: Optional[str] = None
|
|
151
|
-
train_image_count: Optional[int] = None
|
|
152
|
-
test_image_count: Optional[int] = None
|
|
153
|
-
trainer_id: Optional[str] = None
|
|
154
|
-
hyperparameters: Optional[str] = None
|
|
149
|
+
epoch: Optional[int] = None
|
|
155
150
|
|
|
156
151
|
|
|
157
152
|
class Errors():
|
{learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/data_exchanger.py
RENAMED
|
@@ -62,7 +62,7 @@ class DataExchanger():
|
|
|
62
62
|
|
|
63
63
|
async def fetch_image_uuids(self, query_params: Optional[str] = '') -> List[str]:
|
|
64
64
|
"""Fetch image uuids from the learning loop data endpoint."""
|
|
65
|
-
logging.info(
|
|
65
|
+
logging.info('Fetching image uuids for %s/%s..', self.context.organization, self.context.project)
|
|
66
66
|
|
|
67
67
|
response = await self.loop_communicator.get(f'/{self.context.organization}/projects/{self.context.project}/data?{query_params}')
|
|
68
68
|
assert response.status_code == 200, response
|
|
@@ -70,7 +70,7 @@ class DataExchanger():
|
|
|
70
70
|
|
|
71
71
|
async def download_images_data(self, image_uuids: List[str], chunk_size: int = 100) -> List[Dict]:
|
|
72
72
|
"""Download image annotations, tags, set and other information for the given image uuids."""
|
|
73
|
-
logging.info(
|
|
73
|
+
logging.info('Fetching annotations, tags, sets, etc. for %s images..', len(image_uuids))
|
|
74
74
|
|
|
75
75
|
num_image_ids = len(image_uuids)
|
|
76
76
|
if num_image_ids == 0:
|
|
@@ -84,7 +84,7 @@ class DataExchanger():
|
|
|
84
84
|
chunk_ids = image_uuids[i:i+chunk_size]
|
|
85
85
|
response = await self.loop_communicator.get(f'/{self.context.organization}/projects/{self.context.project}/images?ids={",".join(chunk_ids)}')
|
|
86
86
|
if response.status_code != 200:
|
|
87
|
-
logging.error(
|
|
87
|
+
logging.error('Error %s during downloading image data. Continue with next batch..', response.status_code)
|
|
88
88
|
continue
|
|
89
89
|
images_data += response.json()['images']
|
|
90
90
|
|
|
@@ -92,7 +92,7 @@ class DataExchanger():
|
|
|
92
92
|
|
|
93
93
|
async def download_images(self, image_uuids: List[str], image_folder: str, chunk_size: int = 10) -> None:
|
|
94
94
|
"""Downloads images (actual image data). Will skip existing images"""
|
|
95
|
-
logging.info(
|
|
95
|
+
logging.info('Downloading %s images (actual image data).. skipping existing images.', len(image_uuids))
|
|
96
96
|
if not image_uuids:
|
|
97
97
|
return
|
|
98
98
|
|
|
@@ -106,7 +106,7 @@ class DataExchanger():
|
|
|
106
106
|
self.progress = 1.0
|
|
107
107
|
return
|
|
108
108
|
|
|
109
|
-
logging.info(
|
|
109
|
+
logging.info('Downloading %s new images to %s..', num_new_image_ids, image_folder)
|
|
110
110
|
os.makedirs(image_folder, exist_ok=True)
|
|
111
111
|
|
|
112
112
|
progress_factor = 0.5 / num_new_image_ids # second 50% of progress is for downloading images
|
|
@@ -128,7 +128,7 @@ class DataExchanger():
|
|
|
128
128
|
await asyncio.sleep(1)
|
|
129
129
|
response = await self.loop_communicator.get(path)
|
|
130
130
|
if response.status_code != HTTPStatus.OK:
|
|
131
|
-
logging.error(
|
|
131
|
+
logging.error('bad status code %s for %s. Details: %s', response.status_code, path, response.text)
|
|
132
132
|
return
|
|
133
133
|
filename = f'{image_folder}/{image_id}.jpg'
|
|
134
134
|
async with aiofiles.open(filename, 'wb') as f:
|
|
@@ -171,7 +171,7 @@ class DataExchanger():
|
|
|
171
171
|
created_files.append(new_file)
|
|
172
172
|
|
|
173
173
|
shutil.rmtree(tmp_path, ignore_errors=True)
|
|
174
|
-
logging.info(
|
|
174
|
+
logging.info('Downloaded model %s(%s) to %s.', model_uuid, model_format, target_folder)
|
|
175
175
|
return created_files
|
|
176
176
|
|
|
177
177
|
async def upload_model_get_uuid(self, context: Context, files: List[str], training_number: Optional[int], mformat: str) -> str:
|
|
@@ -182,10 +182,12 @@ class DataExchanger():
|
|
|
182
182
|
"""
|
|
183
183
|
response = await self.loop_communicator.put(f'/{context.organization}/projects/{context.project}/trainings/{training_number}/models/latest/{mformat}/file', files=files)
|
|
184
184
|
if response.status_code != 200:
|
|
185
|
-
logging.error(
|
|
185
|
+
logging.error('Could not upload model for training %s, format %s: %s',
|
|
186
|
+
training_number, mformat, response.text)
|
|
186
187
|
raise CriticalError(
|
|
187
188
|
f'Could not upload model for training {training_number}, format {mformat}: {response.text}')
|
|
188
189
|
|
|
189
190
|
uploaded_model = response.json()
|
|
190
|
-
logging.info(
|
|
191
|
+
logging.info('Uploaded model for training %s, format %s. Response is: %s',
|
|
192
|
+
training_number, mformat, uploaded_model)
|
|
191
193
|
return uploaded_model['id']
|
|
@@ -11,14 +11,14 @@ def read_from_env(possible_names: List[str], ignore_errors: bool = True) -> Opti
|
|
|
11
11
|
# Possible error: no values are set
|
|
12
12
|
if not values:
|
|
13
13
|
if ignore_errors:
|
|
14
|
-
logging.warning(
|
|
14
|
+
logging.warning('no environment variable set for %s', possible_names)
|
|
15
15
|
return None
|
|
16
16
|
raise ValueError(f'no environment variable set for {possible_names}')
|
|
17
17
|
|
|
18
18
|
# Possible error: multiple values are not None and not equal
|
|
19
19
|
if len(values) > 1 and len(set(values)) > 1:
|
|
20
20
|
if ignore_errors:
|
|
21
|
-
logging.warning(
|
|
21
|
+
logging.warning('different environment variables set for %s: %s', possible_names, values)
|
|
22
22
|
return None
|
|
23
23
|
raise ValueError(f'different environment variables set for {possible_names}: {values}')
|
|
24
24
|
|
{learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/helpers/log_conf.py
RENAMED
|
@@ -2,7 +2,7 @@ import logging.config
|
|
|
2
2
|
|
|
3
3
|
LOGGING_CONF = {
|
|
4
4
|
'version': 1,
|
|
5
|
-
'disable_existing_loggers':
|
|
5
|
+
'disable_existing_loggers': False, # to make sure this config is used
|
|
6
6
|
'formatters': {
|
|
7
7
|
'default': {
|
|
8
8
|
'format': '%(asctime)s,%(msecs)01d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
|
|
@@ -34,3 +34,6 @@ LOGGING_CONF = {
|
|
|
34
34
|
|
|
35
35
|
def init():
|
|
36
36
|
logging.config.dictConfig(LOGGING_CONF)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
init()
|
|
@@ -14,7 +14,8 @@ from uuid import UUID, uuid4
|
|
|
14
14
|
|
|
15
15
|
import pynvml
|
|
16
16
|
|
|
17
|
-
from ..data_classes import Context
|
|
17
|
+
from ..data_classes.general import Context
|
|
18
|
+
from ..data_classes.socket_response import SocketResponse
|
|
18
19
|
from ..globals import GLOBALS
|
|
19
20
|
|
|
20
21
|
T = TypeVar('T')
|
|
@@ -81,11 +82,11 @@ async def delete_corrupt_images(image_folder: str, check_jpeg: bool = False) ->
|
|
|
81
82
|
n_deleted = 0
|
|
82
83
|
for image in glob(f'{image_folder}/*.jpg'):
|
|
83
84
|
if not await is_valid_image(image, check_jpeg):
|
|
84
|
-
logging.debug(
|
|
85
|
+
logging.debug(' deleting image %s', image)
|
|
85
86
|
os.remove(image)
|
|
86
87
|
n_deleted += 1
|
|
87
88
|
|
|
88
|
-
logging.info(
|
|
89
|
+
logging.info('deleted %s images', n_deleted)
|
|
89
90
|
|
|
90
91
|
|
|
91
92
|
def create_resource_paths(organization_name: str, project_name: str, image_ids: List[str]) -> Tuple[List[str], List[str]]:
|
|
@@ -144,7 +145,7 @@ def ensure_socket_response(func):
|
|
|
144
145
|
raise Exception(
|
|
145
146
|
f"Return type for sio must be str, bool, SocketResponse or None', but was {type(value)}'")
|
|
146
147
|
except Exception as e:
|
|
147
|
-
logging.exception(
|
|
148
|
+
logging.exception('An error occured for %s', args[0])
|
|
148
149
|
|
|
149
150
|
return asdict(SocketResponse.for_failure(str(e)))
|
|
150
151
|
|
|
@@ -183,26 +184,15 @@ def activate_asyncio_warnings() -> None:
|
|
|
183
184
|
|
|
184
185
|
|
|
185
186
|
def images_for_ids(image_ids, image_folder) -> List[str]:
|
|
186
|
-
logging.info(
|
|
187
|
+
logging.info('### Going to get images for %s images ids', len(image_ids))
|
|
187
188
|
start = perf_counter()
|
|
188
189
|
images = [img for img in glob(f'{image_folder}/**/*.*', recursive=True)
|
|
189
190
|
if os.path.splitext(os.path.basename(img))[0] in image_ids]
|
|
190
191
|
end = perf_counter()
|
|
191
|
-
logging.info(
|
|
192
|
+
logging.info('found %s images for %s image ids, which took %.2f seconds', len(images), len(image_ids), end-start)
|
|
192
193
|
return images
|
|
193
194
|
|
|
194
195
|
|
|
195
|
-
def generate_training(project_folder: str, context: Context) -> Training:
|
|
196
|
-
training_uuid = str(uuid4())
|
|
197
|
-
return Training(
|
|
198
|
-
id=training_uuid,
|
|
199
|
-
context=context,
|
|
200
|
-
project_folder=project_folder,
|
|
201
|
-
images_folder=create_image_folder(project_folder),
|
|
202
|
-
training_folder=create_training_folder(project_folder, training_uuid)
|
|
203
|
-
)
|
|
204
|
-
|
|
205
|
-
|
|
206
196
|
def delete_all_training_folders(project_folder: str):
|
|
207
197
|
if not os.path.exists(f'{project_folder}/trainings'):
|
|
208
198
|
return
|
{learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/loop_communication.py
RENAMED
|
@@ -94,8 +94,7 @@ class LoopCommunicator():
|
|
|
94
94
|
if requires_login:
|
|
95
95
|
await self.ensure_login()
|
|
96
96
|
return await self.retry_on_401(self._get, path, api_prefix)
|
|
97
|
-
|
|
98
|
-
return await self._get(path, api_prefix)
|
|
97
|
+
return await self._get(path, api_prefix)
|
|
99
98
|
|
|
100
99
|
async def _get(self, path: str, api_prefix: str) -> httpx.Response:
|
|
101
100
|
return await self.async_client.get(api_prefix+path)
|
|
@@ -104,8 +103,7 @@ class LoopCommunicator():
|
|
|
104
103
|
if requires_login:
|
|
105
104
|
await self.ensure_login()
|
|
106
105
|
return await self.retry_on_401(self._put, path, files, api_prefix, **kwargs)
|
|
107
|
-
|
|
108
|
-
return await self._put(path, files, api_prefix, **kwargs)
|
|
106
|
+
return await self._put(path, files, api_prefix, **kwargs)
|
|
109
107
|
|
|
110
108
|
async def _put(self, path: str, files: Optional[List[str]], api_prefix: str, **kwargs) -> httpx.Response:
|
|
111
109
|
if files is None:
|
|
@@ -133,8 +131,7 @@ class LoopCommunicator():
|
|
|
133
131
|
if requires_login:
|
|
134
132
|
await self.ensure_login()
|
|
135
133
|
return await self.retry_on_401(self._post, path, api_prefix, **kwargs)
|
|
136
|
-
|
|
137
|
-
return await self._post(path, api_prefix, **kwargs)
|
|
134
|
+
return await self._post(path, api_prefix, **kwargs)
|
|
138
135
|
|
|
139
136
|
async def _post(self, path, api_prefix='/api', **kwargs) -> httpx.Response:
|
|
140
137
|
return await self.async_client.post(api_prefix+path, **kwargs)
|
|
@@ -143,8 +140,7 @@ class LoopCommunicator():
|
|
|
143
140
|
if requires_login:
|
|
144
141
|
await self.ensure_login()
|
|
145
142
|
return await self.retry_on_401(self._delete, path, api_prefix, **kwargs)
|
|
146
|
-
|
|
147
|
-
return await self._delete(path, api_prefix, **kwargs)
|
|
143
|
+
return await self._delete(path, api_prefix, **kwargs)
|
|
148
144
|
|
|
149
145
|
async def _delete(self, path, api_prefix, **kwargs) -> httpx.Response:
|
|
150
146
|
return await self.async_client.delete(api_prefix+path, **kwargs)
|
|
@@ -1,3 +1,10 @@
|
|
|
1
|
+
|
|
2
|
+
# NOTE: log_conf is imported first to initialize the loggers before they are created
|
|
3
|
+
from .helpers import log_conf # pylint: disable=unused-import
|
|
4
|
+
|
|
5
|
+
# isort: split
|
|
6
|
+
# pylint: disable=wrong-import-order,ungrouped-imports
|
|
7
|
+
|
|
1
8
|
import asyncio
|
|
2
9
|
import logging
|
|
3
10
|
import ssl
|
|
@@ -14,7 +21,6 @@ from socketio import AsyncClient
|
|
|
14
21
|
|
|
15
22
|
from .data_classes import NodeStatus
|
|
16
23
|
from .data_exchanger import DataExchanger
|
|
17
|
-
from .helpers import log_conf
|
|
18
24
|
from .helpers.misc import ensure_socket_response, read_or_create_uuid
|
|
19
25
|
from .loop_communication import LoopCommunicator
|
|
20
26
|
from .rest import router
|
|
@@ -39,7 +45,6 @@ class Node(FastAPI):
|
|
|
39
45
|
"""
|
|
40
46
|
|
|
41
47
|
super().__init__(lifespan=self.lifespan)
|
|
42
|
-
log_conf.init()
|
|
43
48
|
|
|
44
49
|
self.name = name
|
|
45
50
|
self.uuid = uuid or read_or_create_uuid(self.name)
|
|
@@ -98,13 +103,14 @@ class Node(FastAPI):
|
|
|
98
103
|
pass
|
|
99
104
|
|
|
100
105
|
async def _on_startup(self):
|
|
101
|
-
self.log.info('received "startup" lifecycle-event')
|
|
106
|
+
self.log.info('received "startup" lifecycle-event - connecting to loop')
|
|
102
107
|
try:
|
|
103
108
|
await self.reconnect_to_loop()
|
|
104
109
|
except Exception:
|
|
105
110
|
self.log.warning('Could not establish sio connection to loop during startup')
|
|
106
|
-
self.log.info('
|
|
111
|
+
self.log.info('successfully connected to loop - calling on_startup')
|
|
107
112
|
await self.on_startup()
|
|
113
|
+
self.log.info('successfully finished on_startup')
|
|
108
114
|
|
|
109
115
|
async def _on_shutdown(self):
|
|
110
116
|
self.log.info('received "shutdown" lifecycle-event')
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import logging
|
|
2
|
+
from logging import Logger
|
|
2
3
|
from typing import TYPE_CHECKING
|
|
3
4
|
|
|
4
5
|
from fastapi import APIRouter, HTTPException, Request
|
|
@@ -20,14 +21,15 @@ async def _debug_logging(request: Request) -> str:
|
|
|
20
21
|
'''
|
|
21
22
|
state = str(await request.body(), 'utf-8')
|
|
22
23
|
node: 'Node' = request.app
|
|
24
|
+
log: Logger = node.log # type: ignore
|
|
23
25
|
|
|
24
26
|
if state == 'off':
|
|
25
27
|
logger.info('turning debug logging off')
|
|
26
|
-
|
|
28
|
+
log.setLevel('INFO')
|
|
27
29
|
return 'off'
|
|
28
30
|
if state == 'on':
|
|
29
31
|
logger.info('turning debug logging on')
|
|
30
|
-
|
|
32
|
+
log.setLevel('DEBUG')
|
|
31
33
|
return 'on'
|
|
32
34
|
raise HTTPException(status_code=400, detail='Invalid state')
|
|
33
35
|
|
|
@@ -118,34 +118,30 @@ def get_outbox_files(outbox: Outbox):
|
|
|
118
118
|
return [file for file in files if os.path.isfile(file)]
|
|
119
119
|
|
|
120
120
|
|
|
121
|
-
|
|
122
|
-
def
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
model_name="mock",
|
|
132
|
-
)])
|
|
133
|
-
|
|
134
|
-
@property
|
|
135
|
-
def is_initialized(self):
|
|
136
|
-
return True
|
|
121
|
+
class MockDetectorLogic(DetectorLogic): # pylint: disable=abstract-method
|
|
122
|
+
def __init__(self):
|
|
123
|
+
super().__init__('mock')
|
|
124
|
+
self.image_metadata = ImageMetadata(
|
|
125
|
+
box_detections=[BoxDetection(category_name="test",
|
|
126
|
+
category_id="1",
|
|
127
|
+
confidence=0.9,
|
|
128
|
+
x=0, y=0, width=10, height=10,
|
|
129
|
+
model_name="mock",
|
|
130
|
+
)])
|
|
137
131
|
|
|
138
|
-
|
|
139
|
-
|
|
132
|
+
@property
|
|
133
|
+
def is_initialized(self):
|
|
134
|
+
return True
|
|
140
135
|
|
|
141
|
-
|
|
136
|
+
def evaluate_with_all_info(self, image: np.ndarray, tags: List[str], source: Optional[str] = None, creation_date: Optional[str] = None):
|
|
137
|
+
return self.image_metadata
|
|
142
138
|
|
|
143
139
|
|
|
144
140
|
@pytest.fixture
|
|
145
|
-
def detector_node(
|
|
141
|
+
def detector_node():
|
|
146
142
|
os.environ['LOOP_ORGANIZATION'] = 'test_organization'
|
|
147
143
|
os.environ['LOOP_PROJECT'] = 'test_project'
|
|
148
|
-
return DetectorNode(name="test_node", detector=
|
|
144
|
+
return DetectorNode(name="test_node", detector=MockDetectorLogic())
|
|
149
145
|
|
|
150
146
|
# ====================================== REDUNDANT FIXTURES IN ALL CONFTESTS ! ======================================
|
|
151
147
|
|
{learning_loop_node-0.11.1 → learning_loop_node-0.12.0}/learning_loop_node/tests/trainer/conftest.py
RENAMED
|
@@ -30,12 +30,15 @@ async def test_initialized_trainer_node():
|
|
|
30
30
|
node = TrainerNode(name='test', trainer_logic=trainer, uuid='NOD30000-0000-0000-0000-000000000000')
|
|
31
31
|
trainer._node = node
|
|
32
32
|
trainer._init_new_training(context=Context(organization='zauberzeug', project='demo'),
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
33
|
+
training_config={'categories': [],
|
|
34
|
+
'id': '00000000-0000-0000-0000-000000000012', # version 1.2 of demo project
|
|
35
|
+
'training_number': 0,
|
|
36
|
+
'model_variant': '',
|
|
37
|
+
'hyperparameters': {
|
|
38
|
+
'resolution': 800,
|
|
39
|
+
'flip_rl': False,
|
|
40
|
+
'flip_ud': False}
|
|
41
|
+
})
|
|
39
42
|
await node._on_startup()
|
|
40
43
|
yield node
|
|
41
44
|
await node._on_shutdown()
|
|
@@ -50,12 +53,15 @@ async def test_initialized_trainer():
|
|
|
50
53
|
await node._on_startup()
|
|
51
54
|
trainer._node = node
|
|
52
55
|
trainer._init_new_training(context=Context(organization='zauberzeug', project='demo'),
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
56
|
+
training_config={'categories': [],
|
|
57
|
+
'id': '00000000-0000-0000-0000-000000000012', # version 1.2 of demo project
|
|
58
|
+
'training_number': 0,
|
|
59
|
+
'model_variant': '',
|
|
60
|
+
'hyperparameters': {
|
|
61
|
+
'resolution': 800,
|
|
62
|
+
'flip_rl': False,
|
|
63
|
+
'flip_ud': False}
|
|
64
|
+
})
|
|
59
65
|
yield trainer
|
|
60
66
|
try:
|
|
61
67
|
await node._on_shutdown()
|
|
@@ -3,6 +3,7 @@ import asyncio
|
|
|
3
3
|
import os
|
|
4
4
|
|
|
5
5
|
from ....data_classes import TrainerState
|
|
6
|
+
from ... import test_helper
|
|
6
7
|
from ..state_helper import assert_training_state, create_active_training_file
|
|
7
8
|
from ..testing_trainer_logic import TestingTrainerLogic
|
|
8
9
|
|
|
@@ -11,9 +12,12 @@ from ..testing_trainer_logic import TestingTrainerLogic
|
|
|
11
12
|
|
|
12
13
|
async def test_downloading_is_successful(test_initialized_trainer: TestingTrainerLogic):
|
|
13
14
|
trainer = test_initialized_trainer
|
|
14
|
-
create_active_training_file(trainer, training_state=TrainerState.DataDownloaded)
|
|
15
15
|
|
|
16
|
-
|
|
16
|
+
model_id = await test_helper.get_latest_model_id(project='demo')
|
|
17
|
+
create_active_training_file(trainer,
|
|
18
|
+
base_model_uuid=model_id,
|
|
19
|
+
training_state=TrainerState.DataDownloaded)
|
|
20
|
+
|
|
17
21
|
trainer._init_from_last_training()
|
|
18
22
|
|
|
19
23
|
asyncio.get_running_loop().create_task(
|
|
@@ -50,7 +54,7 @@ async def test_abort_download_model(test_initialized_trainer: TestingTrainerLogi
|
|
|
50
54
|
async def test_downloading_failed(test_initialized_trainer: TestingTrainerLogic):
|
|
51
55
|
trainer = test_initialized_trainer
|
|
52
56
|
create_active_training_file(trainer, training_state=TrainerState.DataDownloaded,
|
|
53
|
-
|
|
57
|
+
base_model_uuid='00000000-0000-0000-0000-000000000000') # bad model id)
|
|
54
58
|
trainer._init_from_last_training()
|
|
55
59
|
|
|
56
60
|
trainer._begin_training_task()
|
|
@@ -20,7 +20,6 @@ async def test_preparing_is_successful(test_initialized_trainer: TestingTrainerL
|
|
|
20
20
|
await trainer._perform_state('prepare', TrainerState.DataDownloading, TrainerState.DataDownloaded, trainer._prepare)
|
|
21
21
|
assert trainer_has_prepare_error(trainer) is False
|
|
22
22
|
assert trainer.training.training_state == TrainerState.DataDownloaded
|
|
23
|
-
assert trainer.training.data is not None
|
|
24
23
|
assert trainer.node.last_training_io.load() == trainer.training
|
|
25
24
|
|
|
26
25
|
|