synapse-sdk 1.0.0a48__py3-none-any.whl → 1.0.0a50__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of synapse-sdk might be problematic. Click here for more details.

synapse_sdk/loggers.py CHANGED
@@ -1,21 +1,48 @@
1
1
  import datetime
2
2
  import time
3
+ from typing import Any, Dict
3
4
 
4
5
  from synapse_sdk.clients.exceptions import ClientError
5
6
 
6
7
 
7
8
  class BaseLogger:
9
+ """Base class for logging progress and events.
10
+
11
+ Args:
12
+ progress_record(dict): Progress record to track the progress of a task.
13
+ progress_categories: dict | None: List of categories for progress tracking.
14
+ metrics_categories: dict | None: List of categories for metrics tracking.
15
+ current_progress_category: str | None: Current progress category.
16
+ time_begin_per_category(dict): Dictionary to track the start time for each category.
17
+ metrics_record(dict): Dictionary to record metrics.
18
+ """
19
+
8
20
  progress_record = {}
21
+ metrics_record = {}
9
22
  progress_categories = None
10
- current_category = None
23
+ metrics_categories = None
24
+ current_progress_category = None
11
25
  time_begin_per_category = {}
12
26
 
13
- def __init__(self, progress_categories=None):
27
+ def __init__(self, progress_categories=None, metrics_categories=None):
28
+ # Setup progress categories
14
29
  self.progress_categories = progress_categories
15
30
  if progress_categories:
16
31
  self.progress_record['categories'] = progress_categories
17
32
 
18
- def set_progress(self, current, total, category=None):
33
+ # Setup metrics categories
34
+ self.metrics_categories = metrics_categories
35
+ if metrics_categories:
36
+ self.metrics_record['categories'] = metrics_categories
37
+
38
+ def set_progress(self, current: int, total: int, category: str | None = None):
39
+ """Set progress for plugin run.
40
+
41
+ Args:
42
+ current(int): current progress value
43
+ total(int): total progress value
44
+ category(str | None): progress category
45
+ """
19
46
  assert 0 <= current <= total and total > 0
20
47
  assert category is not None or 'categories' not in self.progress_record
21
48
 
@@ -32,7 +59,7 @@ class BaseLogger:
32
59
  current_progress = {'percent': percent, 'time_remaining': time_remaining}
33
60
 
34
61
  if category:
35
- self.current_category = category
62
+ self.current_progress_category = category
36
63
  self.progress_record['categories'][category].update(current_progress)
37
64
  else:
38
65
  self.progress_record.update(current_progress)
@@ -45,15 +72,15 @@ class BaseLogger:
45
72
 
46
73
  overall = 0
47
74
  for category, category_record in categories.items():
48
- if category == self.current_category:
75
+ if category == self.current_progress_category:
49
76
  break
50
77
  overall += category_record['proportion']
51
78
 
52
- category_record = categories[self.current_category]
79
+ category_record = categories[self.current_progress_category]
53
80
  category_percent = category_record.get('percent', 0)
54
81
  if not category_progress and 'percent' in category_record:
55
82
  category_progress = {
56
- 'category': self.current_category,
83
+ 'category': self.current_progress_category,
57
84
  'percent': category_percent,
58
85
  'time_remaining': category_record.get('time_remaining'),
59
86
  }
@@ -68,6 +95,23 @@ class BaseLogger:
68
95
 
69
96
  return progress
70
97
 
98
+ def set_metrics(self, value: Dict[Any, Any], category: str):
99
+ """Set metrics for plugin run.
100
+
101
+ * Metrics which are representing the progress of the plugin run should be set in the metrics_record.
102
+
103
+ Args:
104
+ value(Dict[Any, Any]): metrics value
105
+ category(str): metrics category
106
+ """
107
+ assert category is not None and category != '', 'A category argument must be a non-empty string.'
108
+ assert isinstance(value, dict), f'A value argument must be a dictionary, but got {type(value).__name__}.'
109
+
110
+ if 'categories' not in self.metrics_record:
111
+ self.metrics_record['categories'] = {}
112
+
113
+ self.metrics_record['categories'][category] = value
114
+
71
115
  def log(self, action, data, file=None):
72
116
  raise NotImplementedError
73
117
 
@@ -77,6 +121,10 @@ class ConsoleLogger(BaseLogger):
77
121
  super().set_progress(current, total, category=category)
78
122
  print(self.get_current_progress())
79
123
 
124
+ def set_metrics(self, value: Dict[Any, Any], category: str):
125
+ super().set_metrics(value, category)
126
+ print(self.metrics_record)
127
+
80
128
  def log(self, action, data, file=None):
81
129
  print(action, data)
82
130
 
@@ -102,6 +150,16 @@ class BackendLogger(BaseLogger):
102
150
  except ClientError:
103
151
  pass
104
152
 
153
+ def set_metrics(self, value: Dict[Any, Any], category: str):
154
+ super().set_metrics(value, category)
155
+ try:
156
+ metrics_record = {
157
+ 'record': self.metrics_record,
158
+ }
159
+ self.client.update_job(self.job_id, data={'metrics_record': metrics_record})
160
+ except ClientError:
161
+ pass
162
+
105
163
  def log(self, event, data, file=None):
106
164
  print(event, data)
107
165
 
@@ -25,7 +25,8 @@ class Action:
25
25
  method (RunMethod): The method to run of the action.
26
26
  run_class (Run): The class to run the action.
27
27
  params_model (BaseModel): The model to validate the params.
28
- progress_categories (List[str]): The categories to update the progress.
28
+ progress_categories (Dict[str] | None): The categories to update the progress.
29
+ metrics_categories (Dict[str] | None): The categories to update the metrics.
29
30
  params (Dict): The params to run the action.
30
31
  plugin_config (Dict): The plugin config.
31
32
  plugin_release (PluginRelease): The plugin release.
@@ -48,6 +49,7 @@ class Action:
48
49
  run_class = Run
49
50
  params_model = None
50
51
  progress_categories = None
52
+ metrics_categories = None
51
53
 
52
54
  # init 변수
53
55
  params = None
@@ -142,6 +144,7 @@ class Action:
142
144
  context = {
143
145
  'plugin_release': self.plugin_release,
144
146
  'progress_categories': self.progress_categories,
147
+ 'metrics_categories': self.metrics_categories,
145
148
  'params': self.params,
146
149
  'envs': self.envs,
147
150
  'debug': self.debug,
@@ -27,6 +27,13 @@ class ExportRun(Run):
27
27
  error: str | None = None
28
28
  created: str
29
29
 
30
+ class MetricsRecord(BaseModel):
31
+ """Metrics record model."""
32
+
33
+ stand_by: int
34
+ failed: int
35
+ success: int
36
+
30
37
  def log_file(
31
38
  self, log_type: str, target_id: int, data_file_info: dict, status: ExportStatus, error: str | None = None
32
39
  ):
@@ -51,6 +58,16 @@ class ExportRun(Run):
51
58
  ).model_dump(),
52
59
  )
53
60
 
61
+ def log_metrics(self, record: MetricsRecord, category: str):
62
+ """Log export metrics.
63
+
64
+ Args:
65
+ record (MetricsRecord): The metrics record to log.
66
+ category (str): The category of the metrics.
67
+ """
68
+ record = self.MetricsRecord.model_validate(record)
69
+ self.set_metrics(value=record.dict(), category=category)
70
+
54
71
  def export_log_json_file(
55
72
  self,
56
73
  target_id: int,
@@ -263,6 +280,7 @@ class ExportAction(Action):
263
280
  'proportion': 100,
264
281
  }
265
282
  }
283
+ metrics_categories = {'data_file', 'original_file'}
266
284
 
267
285
  def get_filtered_results(self, filters, handler):
268
286
  """Get filtered target results."""
@@ -42,6 +42,8 @@ def export(run, export_items, path_root, **params):
42
42
  origin_files_output_path.mkdir(parents=True, exist_ok=True)
43
43
 
44
44
  total = params['count']
45
+ original_file_metrics_record = run.MetricsRecord(stand_by=total, success=0, failed=0)
46
+ data_file_metrics_record = run.MetricsRecord(stand_by=total, success=0, failed=0)
45
47
  # progress init
46
48
  run.set_progress(0, total, category='dataset_conversion')
47
49
  for no, export_item in enumerate(export_items, start=1):
@@ -56,12 +58,30 @@ def export(run, export_items, path_root, **params):
56
58
  if save_original_file_flag:
57
59
  if no == 1:
58
60
  run.log_message('Saving original file.')
59
- save_original_file(run, final_data, origin_files_output_path, errors_original_file_list)
61
+ original_status = save_original_file(run, final_data, origin_files_output_path, errors_original_file_list)
62
+
63
+ original_file_metrics_record.stand_by -= 1
64
+ if original_status == ExportStatus.FAILED:
65
+ original_file_metrics_record.failed += 1
66
+ continue
67
+ else:
68
+ original_file_metrics_record.success += 1
69
+
70
+ run.log_metrics(record=original_file_metrics_record, category='original_file')
60
71
 
61
72
  # Extract data as JSON files
62
73
  if no == 1:
63
74
  run.log_message('Saving json file.')
64
- save_as_json(run, final_data, json_output_path, errors_json_file_list)
75
+ data_status = save_as_json(run, final_data, json_output_path, errors_json_file_list)
76
+
77
+ data_file_metrics_record.stand_by -= 1
78
+ if data_status == ExportStatus.FAILED:
79
+ data_file_metrics_record.failed += 1
80
+ continue
81
+ else:
82
+ data_file_metrics_record.success += 1
83
+
84
+ run.log_metrics(record=data_file_metrics_record, category='data_file')
65
85
 
66
86
  run.end_log()
67
87
 
@@ -89,7 +109,7 @@ def after_convert(data):
89
109
  return data
90
110
 
91
111
 
92
- def get_original_file_pathlib(files):
112
+ def get_original_file_name(files):
93
113
  """Retrieve the original file path from the given file information.
94
114
 
95
115
  Args:
@@ -97,9 +117,9 @@ def get_original_file_pathlib(files):
97
117
  original file path, metadata, etc.
98
118
 
99
119
  Returns:
100
- pathlib.Path: The original file path extracted from the metadata.
120
+ file_name (str): The original file name extracted from the file information.
101
121
  """
102
- return Path(files['meta']['path_original'])
122
+ return files['file_name_original']
103
123
 
104
124
 
105
125
  def save_original_file(run, result, base_path, error_file_list):
@@ -112,7 +132,7 @@ def save_original_file(run, result, base_path, error_file_list):
112
132
  error_file_list (list): A list to store error files.
113
133
  """
114
134
  file_url = result['files']['url']
115
- file_name = get_original_file_pathlib(result['files']).name
135
+ file_name = get_original_file_name(result['files'])
116
136
  response = requests.get(file_url)
117
137
  file_info = {'file_name': file_name}
118
138
  error_msg = ''
@@ -126,6 +146,7 @@ def save_original_file(run, result, base_path, error_file_list):
126
146
  status = ExportStatus.FAILED
127
147
 
128
148
  run.export_log_original_file(result['id'], file_info, status, error_msg)
149
+ return status
129
150
 
130
151
 
131
152
  def save_as_json(run, result, base_path, error_file_list):
@@ -138,7 +159,7 @@ def save_as_json(run, result, base_path, error_file_list):
138
159
  error_file_list (list): A list to store error files.
139
160
  """
140
161
  # Default save file name: original file name
141
- file_name = get_original_file_pathlib(result['files']).stem
162
+ file_name = Path(get_original_file_name(result['files'])).stem
142
163
  json_data = result['data']
143
164
  file_info = {'file_name': f'{file_name}.json'}
144
165
  error_msg = ''
@@ -152,3 +173,4 @@ def save_as_json(run, result, base_path, error_file_list):
152
173
  status = ExportStatus.FAILED
153
174
 
154
175
  run.export_log_json_file(result['id'], file_info, status, error_msg)
176
+ return status
@@ -21,9 +21,9 @@ class TrainRun(Run):
21
21
  # TODO validate input via plugin config
22
22
  self.log('metric', {'category': category, 'key': key, 'value': value, 'metrics': metrics})
23
23
 
24
- def log_visualization(self, category, group, image, **meta):
24
+ def log_visualization(self, category, group, index, image, **meta):
25
25
  # TODO validate input via plugin config
26
- self.log('visualization', {'category': category, 'group': group, **meta}, file=image)
26
+ self.log('visualization', {'category': category, 'group': group, 'index': index, **meta}, file=image)
27
27
 
28
28
 
29
29
  class Hyperparameter(BaseModel):
@@ -22,11 +22,23 @@ class TuneRun(TrainRun):
22
22
  checkpoint_output = None
23
23
 
24
24
 
25
+ class SearchAlgo(BaseModel):
26
+ name: str
27
+ points_to_evaluate: Optional[dict] = None
28
+
29
+
30
+ class Scheduler(BaseModel):
31
+ name: str
32
+ options: Optional[str] = None
33
+
34
+
25
35
  class TuneConfig(BaseModel):
26
36
  mode: Optional[str] = None
27
37
  metric: Optional[str] = None
28
38
  num_samples: int = 1
29
39
  max_concurrent_trials: Optional[int] = None
40
+ search_alg: Optional[SearchAlgo] = None
41
+ scheduler: Optional[Scheduler] = None
30
42
 
31
43
 
32
44
  class TuneParams(BaseModel):
@@ -170,8 +182,12 @@ class TuneAction(TrainAction):
170
182
  entrypoint = _tune
171
183
 
172
184
  trainable = tune.with_parameters(entrypoint, run=self.run, dataset=input_dataset, checkpoint=checkpoint)
185
+
173
186
  tune_config = self.params['tune_config']
174
187
 
188
+ tune_config['search_alg'] = self.convert_tune_search_alg(tune_config)
189
+ tune_config['scheduler'] = self.convert_tune_scheduler(tune_config)
190
+
175
191
  hyperparameter = self.params['hyperparameter']
176
192
  param_space = self.convert_tune_params(hyperparameter)
177
193
  temp_path = tempfile.TemporaryDirectory()
@@ -230,6 +246,91 @@ class TuneAction(TrainAction):
230
246
  **params,
231
247
  })
232
248
 
249
+ @staticmethod
250
+ def convert_tune_scheduler(tune_config):
251
+ """
252
+ Convert YAML hyperparameter configuration to a Ray Tune scheduler.
253
+
254
+ Args:
255
+ tune_config (dict): Hyperparameter configuration.
256
+
257
+ Returns:
258
+ object: Ray Tune scheduler instance.
259
+ """
260
+
261
+ from ray.tune.schedulers import (
262
+ ASHAScheduler,
263
+ HyperBandScheduler,
264
+ MedianStoppingRule,
265
+ PopulationBasedTraining,
266
+ FIFOScheduler,
267
+ )
268
+
269
+ if tune_config.get('scheduler') is None:
270
+ return None
271
+
272
+ scheduler_map = {
273
+ 'fifo': FIFOScheduler,
274
+ 'asha': ASHAScheduler,
275
+ 'hyperband': HyperBandScheduler,
276
+ 'pbt': PopulationBasedTraining,
277
+ 'median': MedianStoppingRule,
278
+ }
279
+
280
+ scheduler_type = tune_config['scheduler'].get('name', 'fifo').lower()
281
+ scheduler_class = scheduler_map.get(scheduler_type, ASHAScheduler)
282
+
283
+ # 옵션이 있는 경우 전달하고, 없으면 기본 생성자 호출
284
+ options = tune_config['scheduler'].get('options')
285
+
286
+ # options가 None이거나 빈 딕셔너리가 아닌 경우에만 전달
287
+ scheduler = scheduler_class(**options) if options else scheduler_class()
288
+
289
+ return scheduler
290
+
291
+ @staticmethod
292
+ def convert_tune_search_alg(tune_config):
293
+ """
294
+ Convert YAML hyperparameter configuration to Ray Tune search algorithm and scheduler.
295
+
296
+ Args:
297
+ tune_config (dict): Hyperparameter configuration.
298
+
299
+ Returns:
300
+ dict: Ray Tune search algorithm and scheduler
301
+ """
302
+
303
+ if tune_config.get('search_alg') is None:
304
+ return None
305
+
306
+ search_alg_name = tune_config['search_alg']['name'].lower()
307
+ metric = tune_config['metric']
308
+ mode = tune_config['mode']
309
+ points_to_evaluate = tune_config['search_alg'].get('points_to_evaluate', None)
310
+
311
+ if search_alg_name == 'axsearch':
312
+ from ray.tune.search.ax import AxSearch
313
+
314
+ search_alg = AxSearch(metric=metric, mode=mode)
315
+ elif search_alg_name == 'bayesoptsearch':
316
+ from ray.tune.search.bayesopt import BayesOptSearch
317
+
318
+ search_alg = BayesOptSearch(metric=metric, mode=mode)
319
+ elif search_alg_name == 'hyperoptsearch':
320
+ from ray.tune.search.hyperopt import HyperOptSearch
321
+
322
+ search_alg = HyperOptSearch(metric=metric, mode=mode)
323
+ elif search_alg_name == 'optunasearch':
324
+ from ray.tune.search.optuna import OptunaSearch
325
+
326
+ search_alg = OptunaSearch(metric=metric, mode=mode)
327
+ elif search_alg_name == 'basicvariantgenerator':
328
+ from ray.tune.search.basic_variant import BasicVariantGenerator
329
+
330
+ search_alg = BasicVariantGenerator(points_to_evaluate=points_to_evaluate)
331
+
332
+ return search_alg
333
+
233
334
  @staticmethod
234
335
  def convert_tune_params(param_list):
235
336
  """
@@ -243,19 +344,31 @@ class TuneAction(TrainAction):
243
344
  """
244
345
  from ray import tune
245
346
 
347
+ param_handlers = {
348
+ 'uniform': lambda p: tune.uniform(p['min'], p['max']),
349
+ 'quniform': lambda p: tune.quniform(p['min'], p['max']),
350
+ 'loguniform': lambda p: tune.loguniform(p['min'], p['max'], p['base']),
351
+ 'qloguniform': lambda p: tune.qloguniform(p['min'], p['max'], p['base']),
352
+ 'randn': lambda p: tune.randn(p['mean'], p['sd']),
353
+ 'qrandn': lambda p: tune.qrandn(p['mean'], p['sd']),
354
+ 'randint': lambda p: tune.randint(p['min'], p['max']),
355
+ 'qrandint': lambda p: tune.qrandint(p['min'], p['max']),
356
+ 'lograndint': lambda p: tune.lograndint(p['min'], p['max'], p['base']),
357
+ 'qlograndint': lambda p: tune.qlograndint(p['min'], p['max'], p['base']),
358
+ 'choice': lambda p: tune.choice(p['options']),
359
+ 'grid_search': lambda p: tune.grid_search(p['options']),
360
+ }
361
+
246
362
  param_space = {}
247
363
 
248
364
  for param in param_list:
249
365
  name = param['name']
250
366
  param_type = param['type']
251
367
 
252
- if param_type == 'loguniform':
253
- param_space[name] = tune.loguniform(param['min'], param['max'])
254
- elif param_type == 'choice':
255
- param_space[name] = tune.choice(param['options'])
256
- elif param_type == 'randint':
257
- param_space[name] = tune.randint(param['min'], param['max'])
258
- # Add more type handlers as needed
368
+ if param_type in param_handlers:
369
+ param_space[name] = param_handlers[param_type](param)
370
+ else:
371
+ raise ValueError(f'Unknown parameter type: {param_type}')
259
372
 
260
373
  return param_space
261
374
 
@@ -5,7 +5,7 @@ readme: README.md
5
5
  description: This is plugin_name plugin
6
6
  category: neural_net
7
7
  tasks:
8
- - object_detection
8
+ - image.object_detection
9
9
  data_type: image
10
10
  package_manager: uv
11
11
  actions:
@@ -19,6 +19,8 @@ actions:
19
19
  hyperparameters:
20
20
  ui_schema: |
21
21
  Dumped FormKit Schema for hyperparameters
22
+ options:
23
+ visualize: false # Whether to visualize the training process
22
24
  deployment:
23
25
  entrypoint: plugin.inference.MockNetInference
24
26
  inference:
@@ -0,0 +1,100 @@
1
+ from enum import Enum
2
+ from typing import Annotated
3
+
4
+ from pydantic import AfterValidator, BaseModel, field_validator
5
+ from pydantic_core import PydanticCustomError
6
+
7
+ from synapse_sdk.clients.exceptions import ClientError
8
+ from synapse_sdk.plugins.categories.decorators import register_action
9
+ from synapse_sdk.plugins.enums import PluginCategory, RunMethod
10
+ from synapse_sdk.plugins.models import Run
11
+ from synapse_sdk.utils.pydantic.validators import non_blank
12
+
13
+
14
+ class TaskDataAnnotationType(str, Enum):
15
+ FILE = 'file'
16
+ INFERENCE = 'inference'
17
+
18
+
19
+ class TaskPreAnnotationRun(Run):
20
+ pass
21
+
22
+
23
+ class TaskPreAnnotationParams(BaseModel):
24
+ """TaskPreAnnotation action parameters.
25
+
26
+ Args:
27
+ name (str): The name of the action.
28
+ description (str | None): The description of the action.
29
+ project (int): The project ID.
30
+ data_collection (int): The data collection ID.
31
+ task_data_annotation_type (TaskDataAnnotationType): The type of task data annotation.
32
+ """
33
+
34
+ name: Annotated[str, AfterValidator(non_blank)]
35
+ description: str | None
36
+ project: int
37
+ data_collection: int
38
+ task_data_annotation_type: TaskDataAnnotationType
39
+
40
+ @field_validator('data_collection', mode='before')
41
+ @classmethod
42
+ def check_data_collection_exists(cls, value: str, info) -> str:
43
+ """Validate synapse-backend collection exists."""
44
+ action = info.context['action']
45
+ client = action.client
46
+ try:
47
+ client.get_dataset(value)
48
+ except ClientError:
49
+ raise PydanticCustomError('client_error', 'Error occurred while checking data collection exists.')
50
+ return value
51
+
52
+ @field_validator('project', mode='before')
53
+ @classmethod
54
+ def check_project_exists(cls, value: str, info) -> str:
55
+ """Validate synapse-backend project exists."""
56
+ if not value:
57
+ return value
58
+
59
+ action = info.context['action']
60
+ client = action.client
61
+ try:
62
+ client.get_project(value)
63
+ except ClientError:
64
+ raise PydanticCustomError('client_error', 'Error occurred while checking project exists.')
65
+ return value
66
+
67
+
68
+ @register_action
69
+ class TaskPreAnnotationAction(TaskPreAnnotationRun):
70
+ """TaskPreAnnotation action class.
71
+
72
+ * Annotate data to tasks.
73
+ """
74
+
75
+ name = 'task_pre_annotation'
76
+ category = PluginCategory.UPLOAD
77
+ method = RunMethod.JOB
78
+ run_class = TaskPreAnnotationRun
79
+ progress_categories = {
80
+ 'generate_tasks': {
81
+ 'proportion': 10,
82
+ },
83
+ 'annotate_task_data': {
84
+ 'proportion': 90,
85
+ },
86
+ }
87
+
88
+ def start(self):
89
+ """Start task_pre_annotation action.
90
+
91
+ * Generate tasks.
92
+ * Annotate data to tasks.
93
+ """
94
+ task_pre_annotation = self.get_task_pre_annotation()
95
+ task_pre_annotation.handle_annotate_data_from_files()
96
+ return {}
97
+
98
+ def get_task_pre_annotation(self):
99
+ """Get task pre annotation entrypoint."""
100
+ return self.entrypoint()
@@ -105,8 +105,6 @@ class UploadParams(BaseModel):
105
105
  storage: int
106
106
  collection: int
107
107
  project: int | None
108
- is_generate_tasks: bool = False
109
- is_generate_ground_truths: bool = False
110
108
 
111
109
  @field_validator('storage', mode='before')
112
110
  @classmethod
@@ -165,8 +163,6 @@ class UploadAction(Action):
165
163
  analyze_collection: The progress category for the analyze collection process.
166
164
  data_file_upload: The progress category for the upload process.
167
165
  generate_data_units: The progress category for the generate data units process.
168
- generate_tasks: The progress category for the generate tasks process.
169
- generate_ground_truths: The progress category for the generate ground truths process.
170
166
  """
171
167
 
172
168
  name = 'upload'
@@ -175,19 +171,13 @@ class UploadAction(Action):
175
171
  run_class = UploadRun
176
172
  progress_categories = {
177
173
  'analyze_collection': {
178
- 'proportion': 5,
174
+ 'proportion': 10,
179
175
  },
180
176
  'upload_data_files': {
181
- 'proportion': 35,
177
+ 'proportion': 50,
182
178
  },
183
179
  'generate_data_units': {
184
- 'proportion': 20,
185
- },
186
- 'generate_tasks': {
187
- 'proportion': 20,
188
- },
189
- 'generate_ground_truths': {
190
- 'proportion': 20,
180
+ 'proportion': 40,
191
181
  },
192
182
  }
193
183
 
@@ -239,26 +229,6 @@ class UploadAction(Action):
239
229
  generated_data_units = self._generate_data_units(uploaded_files, upload_result_count)
240
230
  result['generated_data_units_count'] = len(generated_data_units)
241
231
 
242
- # Setup task with uploaded synapse-backend data units.
243
- if not len(generated_data_units):
244
- self.run.log_message('No data units were generated.', context=Context.WARNING.value)
245
- self.run.end_log()
246
- return result
247
-
248
- if self.config['options']['allow_generate_tasks'] and self.params['is_generate_tasks']:
249
- generated_tasks = self._generate_tasks(generated_data_units)
250
- result['generated_tasks_count'] = len(generated_tasks)
251
- else:
252
- self.run.log_message('Generating tasks process has passed.')
253
-
254
- # Generate ground truths for the uploaded data.
255
- # TODO: Need to add ground truths generation logic later.
256
- if self.config['options']['allow_generate_ground_truths'] and self.params['is_generate_ground_truths']:
257
- generated_ground_truths = self._generate_ground_truths()
258
- result['generated_ground_truths_count'] = len(generated_ground_truths)
259
- else:
260
- self.run.log_message('Generating ground truths process has passed.')
261
-
262
232
  self.run.end_log()
263
233
  return result
264
234
 
@@ -343,53 +313,3 @@ class UploadAction(Action):
343
313
  self.run.set_progress(upload_result_count, upload_result_count, category='generate_data_units')
344
314
 
345
315
  return sum(generated_data_units, [])
346
-
347
- def _generate_tasks(self, generated_data_units: List) -> List:
348
- """Setup task with uploaded synapse-backend data units.
349
-
350
- TODO: make batch size configurable.
351
- """
352
- # Initialize progress
353
- self.run.set_progress(0, 1, category='generate_tasks')
354
- self.run.log_message('Generating tasks with data files...')
355
-
356
- # Prepare batches for processing
357
- client = self.run.client
358
- project_id = self.params['project']
359
- current_progress = 0
360
-
361
- # Generate tasks
362
- generated_tasks = []
363
- generated_data_units_count = len(generated_data_units)
364
- for data_unit in generated_data_units:
365
- tasks_data = []
366
- task_data = {'project': project_id, 'data_unit': data_unit['id']}
367
- tasks_data.append(task_data)
368
- if tasks_data:
369
- created_tasks = client.create_tasks(tasks_data)
370
- created_task_ids = [created_task['id'] for created_task in created_tasks]
371
- generated_tasks.append(created_task_ids)
372
- for created_task_id in created_task_ids:
373
- self.run.log_task(created_task_id, UploadStatus.SUCCESS)
374
-
375
- self.run.set_progress(current_progress, generated_data_units_count, category='generate_tasks')
376
- current_progress += 1
377
-
378
- # Finish progress
379
- self.run.log_message('Generating tasks completed')
380
- self.run.set_progress(1, 1, category='generate_tasks')
381
-
382
- return sum(generated_tasks, [])
383
-
384
- def _generate_ground_truths(self):
385
- """Generate ground truths for the uploaded data.
386
-
387
- TODO: Need to add ground truths generation logic later.
388
- """
389
- # Initialize progress
390
- self.run.set_progress(0, 1, category='generate_ground_truths')
391
- self.run.log_message('Generating ground truths...')
392
-
393
- # Finish progress
394
- self.run.log_message('Generating ground truths completed')
395
- self.run.set_progress(1, 1, category='generate_ground_truths')
@@ -1,8 +1,10 @@
1
1
  actions:
2
2
  upload:
3
3
  entrypoint: plugin.upload.Uploader
4
- options:
5
- allow_generate_tasks: false
6
- allow_generate_ground_truths: false
4
+ supported_data_type: image # A primary data type of synapse backend collection. (e.g. 'image', 'text', 'video', 'pcd', 'audio')
5
+ ui_schema: |
6
+ Dumped FormKit Schema for upload plugin custom options
7
+ task_pre_annotation:
8
+ entrypoint: plugin.upload.TaskPreAnnotation
7
9
  ui_schema: |
8
10
  Dumped FormKit Schema for upload plugin custom options
@@ -0,0 +1,14 @@
1
+ class TaskPreAnnotation:
2
+ def __init__(self, run, *args, **kwargs):
3
+ """Initialize the plugin task pre annotation action class.
4
+
5
+ Args:
6
+ run: Plugin run object.
7
+ """
8
+ self.run = run
9
+
10
+ def handle_annotate_data_from_files(self):
11
+ pass
12
+
13
+ def handle_annotate_data_with_inference(self):
14
+ pass
@@ -128,15 +128,11 @@ class Run:
128
128
  self.logger = ConsoleLogger(**kwargs)
129
129
 
130
130
  def set_progress(self, current, total, category=''):
131
- """Set progress for plugin run.
132
-
133
- Args:
134
- current: current progress value
135
- total: total progress value
136
- category: progress category
137
- """
138
131
  self.logger.set_progress(current, total, category)
139
132
 
133
+ def set_metrics(self, value: Dict[Any, Any], category: str):
134
+ self.logger.set_metrics(value, category)
135
+
140
136
  def log(self, event, data, file=None):
141
137
  self.logger.log(event, data, file=file)
142
138
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: synapse-sdk
3
- Version: 1.0.0a48
3
+ Version: 1.0.0a50
4
4
  Summary: synapse sdk
5
5
  Author-email: datamaker <developer@datamaker.io>
6
6
  License: MIT
@@ -22,6 +22,8 @@ Requires-Dist: fsspec[gcs,s3,sftp]
22
22
  Provides-Extra: all
23
23
  Requires-Dist: ray[all]; extra == "all"
24
24
  Requires-Dist: python-nmap; extra == "all"
25
+ Requires-Dist: hyperopt; extra == "all"
26
+ Requires-Dist: bayesian-optimization==1.4.3; extra == "all"
25
27
  Dynamic: license-file
26
28
 
27
29
  This is the SDK to develop synapse plugins
@@ -4,7 +4,7 @@ locale/ko/LC_MESSAGES/messages.mo,sha256=7HJEJA0wKlN14xQ5VF4FCNet54tjw6mfWYj3IaB
4
4
  locale/ko/LC_MESSAGES/messages.po,sha256=TFii_RbURDH-Du_9ZQf3wNh-2briGk1IqY33-9GKrMU,1126
5
5
  synapse_sdk/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  synapse_sdk/i18n.py,sha256=VXMR-Zm_1hTAg9iPk3YZNNq-T1Bhx1J2fEtRT6kyYbg,766
7
- synapse_sdk/loggers.py,sha256=OSTDDhEAvj8fiAuYNZqsZ9bygGM20sMC5yJ_nOaLDDU,4155
7
+ synapse_sdk/loggers.py,sha256=hi-m5y4x0OcR2Esf7o849fWpdKrzCGOR4AxazbB7ShE,6528
8
8
  synapse_sdk/types.py,sha256=khzn8KpgxFdn1SrpbcuX84m_Md1Mz_HIoUoPq8uok40,698
9
9
  synapse_sdk/cli/__init__.py,sha256=P-_FXCqb_nTVdQznuHop6kDXF_JuncZpeAmgHiGoILQ,152
10
10
  synapse_sdk/cli/alias/__init__.py,sha256=jDy8N_KupVy7n_jKKWhjQOj76-mR-uoVvMoyzObUkuI,405
@@ -44,11 +44,11 @@ synapse_sdk/clients/validators/collections.py,sha256=LtnwvutsScubOUcZ2reGHLCzseX
44
44
  synapse_sdk/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
45
45
  synapse_sdk/plugins/enums.py,sha256=ibixwqA3sCNSriG1jAtL54JQc_Zwo3MufwYUqGhVncc,523
46
46
  synapse_sdk/plugins/exceptions.py,sha256=Qs7qODp_RRLO9y2otU2T4ryj5LFwIZODvSIXkAh91u0,691
47
- synapse_sdk/plugins/models.py,sha256=FI_6Hr4q4hGj-GwHjucOfX4HYsUpraGd2yeuy4FjjC0,4438
47
+ synapse_sdk/plugins/models.py,sha256=QSsF9A3MSbujdKEeVgkt7vpm0LGQYg_9P_T05WVSmCA,4362
48
48
  synapse_sdk/plugins/upload.py,sha256=VJOotYMayylOH0lNoAGeGHRkLdhP7jnC_A0rFQMvQpQ,3228
49
49
  synapse_sdk/plugins/utils.py,sha256=4_K6jIl0WrsXOEhFp94faMOriSsddOhIiaXcawYYUUA,3300
50
50
  synapse_sdk/plugins/categories/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
51
- synapse_sdk/plugins/categories/base.py,sha256=ATI1VjBWm2rimSkNiiCjfZn7FO4x2oltmh81pJJGL0w,10389
51
+ synapse_sdk/plugins/categories/base.py,sha256=akaBeRK23knqyipTohWu5dKNU4WmPJtxRLu9cdV-xOU,10570
52
52
  synapse_sdk/plugins/categories/decorators.py,sha256=Gw6T-UHwpCKrSt596X-g2sZbY_Z1zbbogowClj7Pr5Q,518
53
53
  synapse_sdk/plugins/categories/registry.py,sha256=KdQR8SUlLT-3kgYzDNWawS1uJnAhrcw2j4zFaTpilRs,636
54
54
  synapse_sdk/plugins/categories/templates.py,sha256=FF5FerhkZMeW1YcKLY5cylC0SkWSYdJODA_Qcm4OGYQ,887
@@ -61,21 +61,21 @@ synapse_sdk/plugins/categories/data_validation/templates/plugin/validation.py,sh
61
61
  synapse_sdk/plugins/categories/export/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
62
62
  synapse_sdk/plugins/categories/export/enums.py,sha256=gtyngvQ1DKkos9iKGcbecwTVQQ6sDwbrBPSGPNb5Am0,127
63
63
  synapse_sdk/plugins/categories/export/actions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
64
- synapse_sdk/plugins/categories/export/actions/export.py,sha256=xqPB_MufeMP3riaKCbGVFGukV8RdXcg6-zUrkw4t1-A,9922
64
+ synapse_sdk/plugins/categories/export/actions/export.py,sha256=2lIjur8EiwTB9sc16FV8ZaPXFxUtGRPx9hreG_DKLQA,10483
65
65
  synapse_sdk/plugins/categories/export/templates/config.yaml,sha256=N7YmnFROb3s3M35SA9nmabyzoSb5O2t2TRPicwFNN2o,56
66
66
  synapse_sdk/plugins/categories/export/templates/plugin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
67
- synapse_sdk/plugins/categories/export/templates/plugin/export.py,sha256=UzPOYvH2rwUlTUpMgcbn7mBsenf3hmWytE-eD_cB_9A,5202
67
+ synapse_sdk/plugins/categories/export/templates/plugin/export.py,sha256=zG8mSn7ZGIj8cttWmb7GEPcGgQRbZ97brJCzkuK7RP8,6106
68
68
  synapse_sdk/plugins/categories/neural_net/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
69
69
  synapse_sdk/plugins/categories/neural_net/actions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
70
70
  synapse_sdk/plugins/categories/neural_net/actions/deployment.py,sha256=y2LrS-pwazqRI5O0q1NUy45NQYsBj6ykbrXnDMs_fqE,1987
71
71
  synapse_sdk/plugins/categories/neural_net/actions/gradio.py,sha256=jBkonh0JHRIKFPxv-XBBFM8Da3dSJs-vyJu_KGT73DQ,4508
72
72
  synapse_sdk/plugins/categories/neural_net/actions/inference.py,sha256=0a655ELqNVjPFZTJDiw4EUdcMCPGveUEKyoYqpwMFBU,1019
73
73
  synapse_sdk/plugins/categories/neural_net/actions/test.py,sha256=JY25eg-Fo6WbgtMkGoo_qNqoaZkp3AQNEypJmeGzEog,320
74
- synapse_sdk/plugins/categories/neural_net/actions/train.py,sha256=kve6iTCg2kUeavMQTR2JFuoYDu-QWZFFlB58ZICQtdM,5406
75
- synapse_sdk/plugins/categories/neural_net/actions/tune.py,sha256=XJczlLDF8FOJXA-7TXNZa3npWhMsT0wGqQwYW3w5TDo,9475
74
+ synapse_sdk/plugins/categories/neural_net/actions/train.py,sha256=i406Ar0V74QwdvqI_g_DgHblB_SoGRPMsuwWcxfoeew,5429
75
+ synapse_sdk/plugins/categories/neural_net/actions/tune.py,sha256=UoXVW0KKUEDeHIk5I77KG1AWLSCPAwbmQFLsYwtHenU,13494
76
76
  synapse_sdk/plugins/categories/neural_net/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
77
77
  synapse_sdk/plugins/categories/neural_net/base/inference.py,sha256=R5DASI6-5vzsjDOYxqeGGMBjnav5qHF4hNJT8zNUR3I,1097
78
- synapse_sdk/plugins/categories/neural_net/templates/config.yaml,sha256=uZVuXjIfsd_pTaSKptHeHn1TN2FIiLrvvpkClToc6po,596
78
+ synapse_sdk/plugins/categories/neural_net/templates/config.yaml,sha256=VUCMN1_c6m6VUZqbt2zMwAfdBlusb1SE4TldEbw4498,682
79
79
  synapse_sdk/plugins/categories/neural_net/templates/plugin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
80
80
  synapse_sdk/plugins/categories/neural_net/templates/plugin/inference.py,sha256=InfqKWJYi6sqiUnfPKHC5KYGhxckDaWZNQ202u-uVP4,366
81
81
  synapse_sdk/plugins/categories/neural_net/templates/plugin/test.py,sha256=kYyk7l4UtcDUAH4nkdVUGrHHHjxI4p1U13HSLnmGPyE,53
@@ -100,9 +100,11 @@ synapse_sdk/plugins/categories/smart_tool/templates/plugin/__init__.py,sha256=47
100
100
  synapse_sdk/plugins/categories/smart_tool/templates/plugin/auto_label.py,sha256=eevNg0nOcYFR4z_L_R-sCvVOYoLWSAH1jwDkAf3YCjY,320
101
101
  synapse_sdk/plugins/categories/upload/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
102
102
  synapse_sdk/plugins/categories/upload/actions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
103
- synapse_sdk/plugins/categories/upload/actions/upload.py,sha256=9DIH4Aw70LxDpfhrpD0MfncE1m9oj-v52FpaChkVEnA,14755
104
- synapse_sdk/plugins/categories/upload/templates/config.yaml,sha256=PARk7_F5zNqMK6VMXiMtzG16Um9i0-f-tHYa38Nf498,225
103
+ synapse_sdk/plugins/categories/upload/actions/task_pre_annotation.py,sha256=rzHpxz9fOEcp6S_YDkKFoEtfa1ZruawiJSF7yVqaKDA,3017
104
+ synapse_sdk/plugins/categories/upload/actions/upload.py,sha256=3qjuvH28BfMdBK2bOTo-GlqoF24eKPX10hFhha0-GEk,11278
105
+ synapse_sdk/plugins/categories/upload/templates/config.yaml,sha256=1O0kMfkFMGYwnpBcttrlC9bu4xzU9docw2MBOq_Elmo,417
105
106
  synapse_sdk/plugins/categories/upload/templates/plugin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
107
+ synapse_sdk/plugins/categories/upload/templates/plugin/task_pre_annotation.py,sha256=9XkUZu7USjVjDPufM0NlYmkdKfV7Hf_9v5GN1RgZzS0,350
106
108
  synapse_sdk/plugins/categories/upload/templates/plugin/upload.py,sha256=dnK8gy33GjG5ettayawDJv1gM3xCm1K6lM-PfeeTjQw,1163
107
109
  synapse_sdk/plugins/templates/cookiecutter.json,sha256=NxOWk9A_v1pO0Ny4IYT9Cj5iiJ16--cIQrGC67QdR0I,396
108
110
  synapse_sdk/plugins/templates/hooks/post_gen_project.py,sha256=jqlYkY1O2TxIR-Vh3gnwILYy8k-D39Xx66d2KNQVMCs,147
@@ -134,9 +136,9 @@ synapse_sdk/utils/storage/providers/__init__.py,sha256=x7RGwZryT2FpVxS7fGWryRVpq
134
136
  synapse_sdk/utils/storage/providers/gcp.py,sha256=i2BQCu1Kej1If9SuNr2_lEyTcr5M_ncGITZrL0u5wEA,363
135
137
  synapse_sdk/utils/storage/providers/s3.py,sha256=W94rQvhGRXti3R4mYP7gmU5pcyCQpGFIBLvxxqLVdRM,2231
136
138
  synapse_sdk/utils/storage/providers/sftp.py,sha256=_8s9hf0JXIO21gvm-JVS00FbLsbtvly4c-ETLRax68A,1426
137
- synapse_sdk-1.0.0a48.dist-info/licenses/LICENSE,sha256=bKzmC5YAg4V1Fhl8OO_tqY8j62hgdncAkN7VrdjmrGk,1101
138
- synapse_sdk-1.0.0a48.dist-info/METADATA,sha256=d3PEB2-ivG8oy4s1Kbm78h_KPA-kP_5YM3PtqV8vRW0,1203
139
- synapse_sdk-1.0.0a48.dist-info/WHEEL,sha256=pxyMxgL8-pra_rKaQ4drOZAegBVuX-G_4nRHjjgWbmo,91
140
- synapse_sdk-1.0.0a48.dist-info/entry_points.txt,sha256=VNptJoGoNJI8yLXfBmhgUefMsmGI0m3-0YoMvrOgbxo,48
141
- synapse_sdk-1.0.0a48.dist-info/top_level.txt,sha256=ytgJMRK1slVOKUpgcw3LEyHHP7S34J6n_gJzdkcSsw8,12
142
- synapse_sdk-1.0.0a48.dist-info/RECORD,,
139
+ synapse_sdk-1.0.0a50.dist-info/licenses/LICENSE,sha256=bKzmC5YAg4V1Fhl8OO_tqY8j62hgdncAkN7VrdjmrGk,1101
140
+ synapse_sdk-1.0.0a50.dist-info/METADATA,sha256=ykGGzwz-t_btKYxsEGDVbLbC4gTFsGMfd2pVt1LKZmQ,1303
141
+ synapse_sdk-1.0.0a50.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
142
+ synapse_sdk-1.0.0a50.dist-info/entry_points.txt,sha256=VNptJoGoNJI8yLXfBmhgUefMsmGI0m3-0YoMvrOgbxo,48
143
+ synapse_sdk-1.0.0a50.dist-info/top_level.txt,sha256=ytgJMRK1slVOKUpgcw3LEyHHP7S34J6n_gJzdkcSsw8,12
144
+ synapse_sdk-1.0.0a50.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (79.0.0)
2
+ Generator: setuptools (80.8.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5