synapse-sdk 1.0.0a70__py3-none-any.whl → 1.0.0a72__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of synapse-sdk might be problematic. Click here for more details.
- synapse_sdk/loggers.py +1 -1
- synapse_sdk/plugins/categories/export/actions/export.py +12 -1
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task.py +189 -9
- synapse_sdk/plugins/categories/pre_annotation/templates/plugin/to_task.py +16 -0
- synapse_sdk/plugins/categories/upload/actions/upload.py +12 -1
- synapse_sdk/plugins/models.py +4 -1
- synapse_sdk/utils/converters/__init__.py +6 -5
- synapse_sdk/utils/converters/coco/from_dm.py +3 -3
- synapse_sdk/utils/converters/yolo/__init__.py +0 -0
- synapse_sdk/utils/converters/yolo/from_dm.py +296 -0
- {synapse_sdk-1.0.0a70.dist-info → synapse_sdk-1.0.0a72.dist-info}/METADATA +1 -1
- {synapse_sdk-1.0.0a70.dist-info → synapse_sdk-1.0.0a72.dist-info}/RECORD +16 -14
- {synapse_sdk-1.0.0a70.dist-info → synapse_sdk-1.0.0a72.dist-info}/WHEEL +0 -0
- {synapse_sdk-1.0.0a70.dist-info → synapse_sdk-1.0.0a72.dist-info}/entry_points.txt +0 -0
- {synapse_sdk-1.0.0a70.dist-info → synapse_sdk-1.0.0a72.dist-info}/licenses/LICENSE +0 -0
- {synapse_sdk-1.0.0a70.dist-info → synapse_sdk-1.0.0a72.dist-info}/top_level.txt +0 -0
synapse_sdk/loggers.py
CHANGED
|
@@ -110,7 +110,7 @@ class BaseLogger:
|
|
|
110
110
|
if 'categories' not in self.metrics_record:
|
|
111
111
|
self.metrics_record['categories'] = {}
|
|
112
112
|
|
|
113
|
-
self.metrics_record['categories']
|
|
113
|
+
self.metrics_record['categories'].setdefault(category, {}).update(value)
|
|
114
114
|
|
|
115
115
|
def log(self, action, data, file=None):
|
|
116
116
|
raise NotImplementedError
|
|
@@ -280,7 +280,18 @@ class ExportAction(Action):
|
|
|
280
280
|
'proportion': 100,
|
|
281
281
|
}
|
|
282
282
|
}
|
|
283
|
-
metrics_categories = {
|
|
283
|
+
metrics_categories = {
|
|
284
|
+
'data_file': {
|
|
285
|
+
'stand_by': 0,
|
|
286
|
+
'failed': 0,
|
|
287
|
+
'success': 0,
|
|
288
|
+
},
|
|
289
|
+
'original_file': {
|
|
290
|
+
'stand_by': 0,
|
|
291
|
+
'failed': 0,
|
|
292
|
+
'success': 0,
|
|
293
|
+
},
|
|
294
|
+
}
|
|
284
295
|
|
|
285
296
|
def get_filtered_results(self, filters, handler):
|
|
286
297
|
"""Get filtered target results."""
|
|
@@ -67,9 +67,11 @@ class ToTaskParams(BaseModel):
|
|
|
67
67
|
name (str): The name of the action.
|
|
68
68
|
description (str | None): The description of the action.
|
|
69
69
|
project (int): The project ID.
|
|
70
|
+
agent (int): The agent ID.
|
|
70
71
|
task_filters (dict): The filters of tasks.
|
|
71
72
|
method (AnnotationMethod): The method of annotation.
|
|
72
73
|
target_specification_name (str | None): The name of the target specification.
|
|
74
|
+
model (int): The model ID.
|
|
73
75
|
pre_processor (int | None): The pre processor ID.
|
|
74
76
|
pre_processor_params (dict): The params of the pre processor.
|
|
75
77
|
"""
|
|
@@ -77,9 +79,11 @@ class ToTaskParams(BaseModel):
|
|
|
77
79
|
name: Annotated[str, AfterValidator(non_blank)]
|
|
78
80
|
description: Optional[str] = None
|
|
79
81
|
project: int
|
|
82
|
+
agent: int
|
|
80
83
|
task_filters: Dict[str, Any]
|
|
81
84
|
method: Optional[AnnotationMethod] = None
|
|
82
85
|
target_specification_name: Optional[str] = None
|
|
86
|
+
model: Optional[int] = None
|
|
83
87
|
pre_processor: Optional[int] = None
|
|
84
88
|
pre_processor_params: Dict[str, Any]
|
|
85
89
|
|
|
@@ -142,7 +146,13 @@ class ToTaskAction(Action):
|
|
|
142
146
|
'proportion': 100,
|
|
143
147
|
},
|
|
144
148
|
}
|
|
145
|
-
metrics_categories = {
|
|
149
|
+
metrics_categories = {
|
|
150
|
+
'annotate_task_data': {
|
|
151
|
+
'stand_by': 0,
|
|
152
|
+
'failed': 0,
|
|
153
|
+
'success': 0,
|
|
154
|
+
}
|
|
155
|
+
}
|
|
146
156
|
|
|
147
157
|
def start(self):
|
|
148
158
|
"""Start to_task action.
|
|
@@ -256,7 +266,9 @@ class ToTaskAction(Action):
|
|
|
256
266
|
# Process each task
|
|
257
267
|
for task_id in task_ids:
|
|
258
268
|
try:
|
|
259
|
-
result = self._process_single_task(
|
|
269
|
+
result = self._process_single_task(
|
|
270
|
+
client, task_id, task_params, target_specification_name, AnnotationMethod.FILE
|
|
271
|
+
)
|
|
260
272
|
if result['success']:
|
|
261
273
|
success_count += 1
|
|
262
274
|
else:
|
|
@@ -279,7 +291,12 @@ class ToTaskAction(Action):
|
|
|
279
291
|
self.run.log_message(f'Annotation completed. Success: {success_count}, Failed: {failed_count}')
|
|
280
292
|
|
|
281
293
|
def _process_single_task(
|
|
282
|
-
self,
|
|
294
|
+
self,
|
|
295
|
+
client: BackendClient,
|
|
296
|
+
task_id: int,
|
|
297
|
+
task_params: Dict[str, Any],
|
|
298
|
+
target_specification_name: Optional[str],
|
|
299
|
+
method: AnnotationMethod,
|
|
283
300
|
) -> Dict[str, Any]:
|
|
284
301
|
"""Process a single task for annotation.
|
|
285
302
|
|
|
@@ -287,7 +304,8 @@ class ToTaskAction(Action):
|
|
|
287
304
|
client (BackendClient): The backend client instance.
|
|
288
305
|
task_id (int): The task ID to process.
|
|
289
306
|
task_params (Dict[str, Any]): Parameters for getting task data.
|
|
290
|
-
target_specification_name (str): The name of the target specification.
|
|
307
|
+
target_specification_name (Optional[str]): The name of the target specification.
|
|
308
|
+
method (AnnotationMethod): The annotation method to use.
|
|
291
309
|
|
|
292
310
|
Returns:
|
|
293
311
|
Dict[str, Any]: Result dictionary with 'success' boolean and optional 'error' message.
|
|
@@ -308,6 +326,41 @@ class ToTaskAction(Action):
|
|
|
308
326
|
|
|
309
327
|
task: Dict[str, Any] = task_response
|
|
310
328
|
|
|
329
|
+
if method == AnnotationMethod.FILE:
|
|
330
|
+
if not target_specification_name:
|
|
331
|
+
error_msg = 'Target specification name is required for file annotation method'
|
|
332
|
+
self.run.log_message(f'{error_msg} for task {task_id}')
|
|
333
|
+
self.run.log_annotate_task_data({'task_id': task_id, 'error': error_msg}, AnnotateTaskDataStatus.FAILED)
|
|
334
|
+
return {'success': False, 'error': error_msg}
|
|
335
|
+
return self._process_single_task_with_file(client, task_id, task, target_specification_name)
|
|
336
|
+
elif method == AnnotationMethod.INFERENCE:
|
|
337
|
+
return self._process_single_task_with_inference(client, task_id, task)
|
|
338
|
+
else:
|
|
339
|
+
error_msg = f'Unsupported annotation method: {method}'
|
|
340
|
+
self.run.log_message(f'{error_msg} for task {task_id}')
|
|
341
|
+
self.run.log_annotate_task_data({'task_id': task_id, 'error': error_msg}, AnnotateTaskDataStatus.FAILED)
|
|
342
|
+
return {'success': False, 'error': error_msg}
|
|
343
|
+
|
|
344
|
+
def _process_single_task_with_file(
|
|
345
|
+
self, client: BackendClient, task_id: int, task: Dict[str, Any], target_specification_name: str
|
|
346
|
+
) -> Dict[str, Any]:
|
|
347
|
+
"""Process a single task for file-based annotation.
|
|
348
|
+
|
|
349
|
+
Args:
|
|
350
|
+
client (BackendClient): The backend client instance.
|
|
351
|
+
task_id (int): The task ID to process.
|
|
352
|
+
task (Dict[str, Any]): The task data.
|
|
353
|
+
target_specification_name (str): The name of the target specification.
|
|
354
|
+
|
|
355
|
+
Returns:
|
|
356
|
+
Dict[str, Any]: Result dictionary with 'success' boolean and optional 'error' message.
|
|
357
|
+
"""
|
|
358
|
+
if not self.run:
|
|
359
|
+
raise ValueError('Run instance not properly initialized')
|
|
360
|
+
|
|
361
|
+
# Type assertion to help the linter
|
|
362
|
+
assert isinstance(self.run, ToTaskRun)
|
|
363
|
+
|
|
311
364
|
# Extract data file information
|
|
312
365
|
data_unit = task.get('data_unit', {})
|
|
313
366
|
files = data_unit.get('files', {})
|
|
@@ -332,8 +385,12 @@ class ToTaskAction(Action):
|
|
|
332
385
|
response.raise_for_status()
|
|
333
386
|
data = json.loads(response.content)
|
|
334
387
|
|
|
388
|
+
# Convert data to task object
|
|
389
|
+
annotation_to_task = self.entrypoint(self.run)
|
|
390
|
+
converted_data = annotation_to_task.convert_data_from_file(data)
|
|
391
|
+
|
|
335
392
|
# Submit annotation data
|
|
336
|
-
client.annotate_task_data(task_id, data={'action': 'submit', 'data':
|
|
393
|
+
client.annotate_task_data(task_id, data={'action': 'submit', 'data': converted_data})
|
|
337
394
|
|
|
338
395
|
# Log success
|
|
339
396
|
self.run.log_annotate_task_data({'task_id': task_id, 'url': url}, AnnotateTaskDataStatus.SUCCESS)
|
|
@@ -350,6 +407,84 @@ class ToTaskAction(Action):
|
|
|
350
407
|
self.run.log_annotate_task_data({'task_id': task_id, 'error': error_msg}, AnnotateTaskDataStatus.FAILED)
|
|
351
408
|
return {'success': False, 'error': error_msg}
|
|
352
409
|
|
|
410
|
+
def _process_single_task_with_inference(
|
|
411
|
+
self, client: BackendClient, task_id: int, task: Dict[str, Any]
|
|
412
|
+
) -> Dict[str, Any]:
|
|
413
|
+
"""Process a single task for inference-based annotation.
|
|
414
|
+
|
|
415
|
+
Args:
|
|
416
|
+
client (BackendClient): The backend client instance.
|
|
417
|
+
task_id (int): The task ID to process.
|
|
418
|
+
task (Dict[str, Any]): The task data.
|
|
419
|
+
|
|
420
|
+
Returns:
|
|
421
|
+
Dict[str, Any]: Result dictionary with 'success' boolean and optional 'error' message.
|
|
422
|
+
"""
|
|
423
|
+
if not self.run or not self.params:
|
|
424
|
+
raise ValueError('Run instance or parameters not properly initialized')
|
|
425
|
+
|
|
426
|
+
# Type assertion to help the linter
|
|
427
|
+
assert isinstance(self.run, ToTaskRun)
|
|
428
|
+
|
|
429
|
+
try:
|
|
430
|
+
# Get pre-processor information
|
|
431
|
+
pre_processor_id = self.params.get('pre_processor')
|
|
432
|
+
|
|
433
|
+
if not pre_processor_id:
|
|
434
|
+
error_msg = 'Pre-processor ID is required for inference annotation method'
|
|
435
|
+
self.run.log_message(f'{error_msg} for task {task_id}')
|
|
436
|
+
self.run.log_annotate_task_data({'task_id': task_id, 'error': error_msg}, AnnotateTaskDataStatus.FAILED)
|
|
437
|
+
return {'success': False, 'error': error_msg}
|
|
438
|
+
|
|
439
|
+
# Call inference pre processor if specified.
|
|
440
|
+
pre_processor = client.get_plugin_release(pre_processor_id)
|
|
441
|
+
pre_processor_code = pre_processor['config']['code']
|
|
442
|
+
pre_processor_version = pre_processor['version']
|
|
443
|
+
|
|
444
|
+
# Extract task data for inference
|
|
445
|
+
data_unit = task.get('data_unit', {})
|
|
446
|
+
files = data_unit.get('files', {})
|
|
447
|
+
|
|
448
|
+
# Find primary image URL from files
|
|
449
|
+
primary_file_url = ''
|
|
450
|
+
for file_info in files.values():
|
|
451
|
+
if file_info.get('is_primary') and file_info.get('url'):
|
|
452
|
+
primary_file_url = file_info['url']
|
|
453
|
+
break
|
|
454
|
+
|
|
455
|
+
pre_processor_params = self.params.get('pre_processor_params', {})
|
|
456
|
+
pre_processor_params['image_path'] = primary_file_url
|
|
457
|
+
inference_payload = {
|
|
458
|
+
'agent': 1,
|
|
459
|
+
'action': 'inference',
|
|
460
|
+
'version': pre_processor_version,
|
|
461
|
+
'params': {
|
|
462
|
+
'model': self.params.get('model'),
|
|
463
|
+
'method': 'post',
|
|
464
|
+
'json': pre_processor_params,
|
|
465
|
+
},
|
|
466
|
+
}
|
|
467
|
+
inference_data = client.run_plugin(pre_processor_code, inference_payload)
|
|
468
|
+
|
|
469
|
+
# Convert data to task object
|
|
470
|
+
annotation_to_task = self.entrypoint(self.run)
|
|
471
|
+
converted_result = annotation_to_task.convert_data_from_inference(inference_data)
|
|
472
|
+
|
|
473
|
+
# Submit inference annotation data
|
|
474
|
+
client.annotate_task_data(task_id, data={'action': 'submit', 'data': converted_result})
|
|
475
|
+
|
|
476
|
+
# Log success
|
|
477
|
+
self.run.log_annotate_task_data(
|
|
478
|
+
{'task_id': task_id, 'pre_processor_id': pre_processor_id}, AnnotateTaskDataStatus.SUCCESS
|
|
479
|
+
)
|
|
480
|
+
return {'success': True}
|
|
481
|
+
|
|
482
|
+
except Exception as e:
|
|
483
|
+
error_msg = f'Failed to process inference for task {task_id}: {str(e)}'
|
|
484
|
+
self.run.log_message(error_msg)
|
|
485
|
+
self.run.log_annotate_task_data({'task_id': task_id, 'error': error_msg}, AnnotateTaskDataStatus.FAILED)
|
|
486
|
+
return {'success': False, 'error': error_msg}
|
|
487
|
+
|
|
353
488
|
def _update_metrics(self, total_tasks: int, success_count: int, failed_count: int):
|
|
354
489
|
"""Update metrics for task annotation progress.
|
|
355
490
|
|
|
@@ -375,8 +510,53 @@ class ToTaskAction(Action):
|
|
|
375
510
|
Args:
|
|
376
511
|
task_ids (List[int]): List of task IDs to annotate data to.
|
|
377
512
|
"""
|
|
378
|
-
if not self.run:
|
|
379
|
-
raise ValueError('Run instance not properly initialized')
|
|
513
|
+
if not self.run or not self.params:
|
|
514
|
+
raise ValueError('Run instance or parameters not properly initialized')
|
|
380
515
|
|
|
381
|
-
self.
|
|
382
|
-
|
|
516
|
+
if not self.params.get('model'):
|
|
517
|
+
raise ValueError('Model is required for inference annotation method')
|
|
518
|
+
|
|
519
|
+
# Type assertion to help the linter
|
|
520
|
+
assert isinstance(self.run, ToTaskRun)
|
|
521
|
+
assert isinstance(self.run.client, BackendClient)
|
|
522
|
+
|
|
523
|
+
client: BackendClient = self.run.client
|
|
524
|
+
task_params = {
|
|
525
|
+
'fields': 'id,data,data_unit',
|
|
526
|
+
'expand': 'data_unit',
|
|
527
|
+
}
|
|
528
|
+
|
|
529
|
+
total_tasks = len(task_ids)
|
|
530
|
+
success_count = 0
|
|
531
|
+
failed_count = 0
|
|
532
|
+
current_progress = 0
|
|
533
|
+
|
|
534
|
+
# Initialize metrics and progress
|
|
535
|
+
self._update_metrics(total_tasks, success_count, failed_count)
|
|
536
|
+
self.run.set_progress(0, total_tasks, category='annotate_task_data')
|
|
537
|
+
self.run.log_message('Annotating data to tasks using inference...')
|
|
538
|
+
|
|
539
|
+
# Process each task
|
|
540
|
+
for task_id in task_ids:
|
|
541
|
+
try:
|
|
542
|
+
result = self._process_single_task(client, task_id, task_params, None, AnnotationMethod.INFERENCE)
|
|
543
|
+
if result['success']:
|
|
544
|
+
success_count += 1
|
|
545
|
+
else:
|
|
546
|
+
failed_count += 1
|
|
547
|
+
|
|
548
|
+
current_progress += 1
|
|
549
|
+
self._update_metrics(total_tasks, success_count, failed_count)
|
|
550
|
+
self.run.set_progress(current_progress, total_tasks, category='annotate_task_data')
|
|
551
|
+
|
|
552
|
+
except Exception as e:
|
|
553
|
+
self.run.log_message(f'Failed to process task {task_id}: {str(e)}')
|
|
554
|
+
self.run.log_annotate_task_data({'task_id': task_id, 'error': str(e)}, AnnotateTaskDataStatus.FAILED)
|
|
555
|
+
failed_count += 1
|
|
556
|
+
current_progress += 1
|
|
557
|
+
self._update_metrics(total_tasks, success_count, failed_count)
|
|
558
|
+
self.run.set_progress(current_progress, total_tasks, category='annotate_task_data')
|
|
559
|
+
|
|
560
|
+
# Finalize progress
|
|
561
|
+
self.run.set_progress(total_tasks, total_tasks, category='annotate_task_data')
|
|
562
|
+
self.run.log_message(f'Inference annotation completed. Success: {success_count}, Failed: {failed_count}')
|
|
@@ -6,3 +6,19 @@ class AnnotationToTask:
|
|
|
6
6
|
run: Plugin run object.
|
|
7
7
|
"""
|
|
8
8
|
self.run = run
|
|
9
|
+
|
|
10
|
+
def convert_data_from_file(self, data: dict):
|
|
11
|
+
"""Convert the data from a file to a task object.
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
data: Converted data.
|
|
15
|
+
"""
|
|
16
|
+
return data
|
|
17
|
+
|
|
18
|
+
def convert_data_from_inference(self, data: dict):
|
|
19
|
+
"""Convert the data from inference result to a task object.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
data: Converted data.
|
|
23
|
+
"""
|
|
24
|
+
return data
|
|
@@ -203,7 +203,18 @@ class UploadAction(Action):
|
|
|
203
203
|
'proportion': 40,
|
|
204
204
|
},
|
|
205
205
|
}
|
|
206
|
-
metrics_categories = {
|
|
206
|
+
metrics_categories = {
|
|
207
|
+
'data_file': {
|
|
208
|
+
'stand_by': 0,
|
|
209
|
+
'failed': 0,
|
|
210
|
+
'success': 0,
|
|
211
|
+
},
|
|
212
|
+
'data_unit': {
|
|
213
|
+
'stand_by': 0,
|
|
214
|
+
'failed': 0,
|
|
215
|
+
'success': 0,
|
|
216
|
+
},
|
|
217
|
+
}
|
|
207
218
|
|
|
208
219
|
def get_uploader(self, path, file_specification, organized_files):
|
|
209
220
|
"""Get uploader from entrypoint."""
|
synapse_sdk/plugins/models.py
CHANGED
|
@@ -128,7 +128,10 @@ class Run:
|
|
|
128
128
|
self.set_logger()
|
|
129
129
|
|
|
130
130
|
def set_logger(self):
|
|
131
|
-
kwargs = {
|
|
131
|
+
kwargs = {
|
|
132
|
+
'progress_categories': self.context['progress_categories'],
|
|
133
|
+
'metrics_categories': self.context['metrics_categories'],
|
|
134
|
+
}
|
|
132
135
|
|
|
133
136
|
if self.job_id:
|
|
134
137
|
self.logger = BackendLogger(self.client, self.job_id, **kwargs)
|
|
@@ -48,7 +48,7 @@ class BaseConverter:
|
|
|
48
48
|
else:
|
|
49
49
|
required_dirs = {
|
|
50
50
|
'json': os.path.join(self.root_dir, 'json'),
|
|
51
|
-
'
|
|
51
|
+
'original_files': os.path.join(self.root_dir, 'original_files'),
|
|
52
52
|
}
|
|
53
53
|
self._validate_required_dirs(required_dirs)
|
|
54
54
|
splits['root'] = self.root_dir
|
|
@@ -60,10 +60,10 @@ class BaseConverter:
|
|
|
60
60
|
if split:
|
|
61
61
|
split_dir = os.path.join(self.root_dir, split)
|
|
62
62
|
self.json_dir = os.path.join(split_dir, 'json')
|
|
63
|
-
self.original_file_dir = os.path.join(split_dir, '
|
|
63
|
+
self.original_file_dir = os.path.join(split_dir, 'original_files')
|
|
64
64
|
else:
|
|
65
65
|
self.json_dir = os.path.join(self.root_dir, 'json')
|
|
66
|
-
self.original_file_dir = os.path.join(self.root_dir, '
|
|
66
|
+
self.original_file_dir = os.path.join(self.root_dir, 'original_files')
|
|
67
67
|
|
|
68
68
|
|
|
69
69
|
class FromDMConverter(BaseConverter):
|
|
@@ -185,7 +185,7 @@ class ToDMConverter(BaseConverter):
|
|
|
185
185
|
for split, img_dict in self.converted_data.items():
|
|
186
186
|
split_dir = os.path.join(output_dir, split)
|
|
187
187
|
json_dir = os.path.join(split_dir, 'json')
|
|
188
|
-
original_file_dir = os.path.join(split_dir, '
|
|
188
|
+
original_file_dir = os.path.join(split_dir, 'original_files')
|
|
189
189
|
self.ensure_dir(json_dir)
|
|
190
190
|
self.ensure_dir(original_file_dir)
|
|
191
191
|
for img_filename, (dm_json, img_src_path) in img_dict.items():
|
|
@@ -196,7 +196,7 @@ class ToDMConverter(BaseConverter):
|
|
|
196
196
|
shutil.copy(img_src_path, os.path.join(original_file_dir, img_filename))
|
|
197
197
|
else:
|
|
198
198
|
json_dir = os.path.join(output_dir, 'json')
|
|
199
|
-
original_file_dir = os.path.join(output_dir, '
|
|
199
|
+
original_file_dir = os.path.join(output_dir, 'original_files')
|
|
200
200
|
self.ensure_dir(json_dir)
|
|
201
201
|
self.ensure_dir(original_file_dir)
|
|
202
202
|
for img_filename, (dm_json, img_src_path) in self.converted_data.items():
|
|
@@ -205,4 +205,5 @@ class ToDMConverter(BaseConverter):
|
|
|
205
205
|
json.dump(dm_json, jf, indent=2, ensure_ascii=False)
|
|
206
206
|
if img_src_path and os.path.exists(img_src_path):
|
|
207
207
|
shutil.copy(img_src_path, os.path.join(original_file_dir, img_filename))
|
|
208
|
+
|
|
208
209
|
print(f'[DM] Data exported to {output_dir}')
|
|
@@ -11,7 +11,7 @@ from tqdm import tqdm
|
|
|
11
11
|
from synapse_sdk.utils.converters import FromDMConverter
|
|
12
12
|
|
|
13
13
|
|
|
14
|
-
class
|
|
14
|
+
class FromDMToCOCOConverter(FromDMConverter):
|
|
15
15
|
"""Convert DM (Data Manager) format annotations to COCO format.
|
|
16
16
|
Designed for easy future extensibility to handle various data types.
|
|
17
17
|
"""
|
|
@@ -242,11 +242,11 @@ class DMToCOCOConverter(FromDMConverter):
|
|
|
242
242
|
for split, coco_data in self.converted_data.items():
|
|
243
243
|
split_output_dir = os.path.join(output_dir, split)
|
|
244
244
|
self._save_annotations_and_images(
|
|
245
|
-
coco_data, split_output_dir, os.path.join(self.root_dir, split, '
|
|
245
|
+
coco_data, split_output_dir, os.path.join(self.root_dir, split, 'original_files')
|
|
246
246
|
)
|
|
247
247
|
else:
|
|
248
248
|
self._save_annotations_and_images(
|
|
249
|
-
self.converted_data, output_dir, os.path.join(self.root_dir, '
|
|
249
|
+
self.converted_data, output_dir, os.path.join(self.root_dir, 'original_files')
|
|
250
250
|
)
|
|
251
251
|
|
|
252
252
|
def _save_annotations_and_images(self, coco_data, output_dir, original_file_dir):
|
|
File without changes
|
|
@@ -0,0 +1,296 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
import shutil
|
|
4
|
+
from glob import glob
|
|
5
|
+
from typing import Any, Dict, List, Optional, Union
|
|
6
|
+
|
|
7
|
+
from PIL import Image
|
|
8
|
+
|
|
9
|
+
from synapse_sdk.utils.converters import FromDMConverter
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class FromDMToYOLOConverter(FromDMConverter):
|
|
13
|
+
"""Convert DM dataset format to YOLO format."""
|
|
14
|
+
|
|
15
|
+
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.bmp']
|
|
16
|
+
|
|
17
|
+
def __init__(self, root_dir: str, is_categorized_dataset: bool = False):
|
|
18
|
+
super().__init__(root_dir, is_categorized_dataset)
|
|
19
|
+
self.class_names: List[str] = []
|
|
20
|
+
self.class_map: Dict[str, int] = {}
|
|
21
|
+
self.dataset_yaml_content: str = ''
|
|
22
|
+
|
|
23
|
+
@staticmethod
|
|
24
|
+
def get_all_classes(list_of_dirs: List[str]) -> List[str]:
|
|
25
|
+
"""Collect all unique class names from all splits or the root."""
|
|
26
|
+
classes = set()
|
|
27
|
+
for d in list_of_dirs:
|
|
28
|
+
if not d or not os.path.isdir(d):
|
|
29
|
+
continue
|
|
30
|
+
json_dir = os.path.join(d, 'json') if os.path.isdir(os.path.join(d, 'json')) else d
|
|
31
|
+
for jfile in glob(os.path.join(json_dir, '*.json')):
|
|
32
|
+
with open(jfile, encoding='utf-8') as jf:
|
|
33
|
+
data = json.load(jf)
|
|
34
|
+
for img_ann in data['images']:
|
|
35
|
+
for k in ['bounding_box', 'polygon', 'keypoint']:
|
|
36
|
+
if k in img_ann:
|
|
37
|
+
for ann in img_ann[k]:
|
|
38
|
+
classes.add(ann['classification'])
|
|
39
|
+
return sorted(list(classes))
|
|
40
|
+
|
|
41
|
+
@staticmethod
|
|
42
|
+
def get_image_size(image_path: str):
|
|
43
|
+
with Image.open(image_path) as img:
|
|
44
|
+
return img.size
|
|
45
|
+
|
|
46
|
+
@staticmethod
|
|
47
|
+
def polygon_to_bbox(polygon: list):
|
|
48
|
+
"""Convert polygon points to bounding box [cx, cy, w, h]."""
|
|
49
|
+
if not polygon or len(polygon) == 0:
|
|
50
|
+
return None
|
|
51
|
+
xs = [p[0] for p in polygon]
|
|
52
|
+
ys = [p[1] for p in polygon]
|
|
53
|
+
x_min, y_min = min(xs), min(ys)
|
|
54
|
+
x_max, y_max = max(xs), max(ys)
|
|
55
|
+
cx = (x_min + x_max) / 2
|
|
56
|
+
cy = (y_min + y_max) / 2
|
|
57
|
+
w = x_max - x_min
|
|
58
|
+
h = y_max - y_min
|
|
59
|
+
return [cx, cy, w, h]
|
|
60
|
+
|
|
61
|
+
@staticmethod
|
|
62
|
+
def keypoints_to_yolo_string(keypoints: list, width: int, height: int):
|
|
63
|
+
"""Convert keypoints to normalized YOLO keypoint format string (x1 y1 v1 x2 y2 v2 ...)."""
|
|
64
|
+
kp_strs = []
|
|
65
|
+
for kp in keypoints:
|
|
66
|
+
# kp: [x, y, visible]
|
|
67
|
+
x, y, v = kp
|
|
68
|
+
x = x / width
|
|
69
|
+
y = y / height
|
|
70
|
+
kp_strs.extend([f'{x:.6f}', f'{y:.6f}', str(v)])
|
|
71
|
+
return ' '.join(kp_strs)
|
|
72
|
+
|
|
73
|
+
def _convert_split_dir(self, split_dir: str, split_name: str) -> List[Dict[str, Any]]:
|
|
74
|
+
"""Convert one split folder to YOLO format."""
|
|
75
|
+
if not self.class_map:
|
|
76
|
+
raise ValueError('class_map is not initialized. Ensure get_all_classes() is called before this method.')
|
|
77
|
+
|
|
78
|
+
json_dir = os.path.join(split_dir, 'json')
|
|
79
|
+
img_dir = os.path.join(split_dir, 'original_files')
|
|
80
|
+
entries = []
|
|
81
|
+
for jfile in glob(os.path.join(json_dir, '*.json')):
|
|
82
|
+
base = os.path.splitext(os.path.basename(jfile))[0]
|
|
83
|
+
found_img = None
|
|
84
|
+
for ext in self.IMG_EXTENSIONS:
|
|
85
|
+
img_path = os.path.join(img_dir, base + ext)
|
|
86
|
+
if os.path.exists(img_path):
|
|
87
|
+
found_img = img_path
|
|
88
|
+
break
|
|
89
|
+
if not found_img:
|
|
90
|
+
print(f'[{split_name}] Image for {base} not found, skipping.')
|
|
91
|
+
continue
|
|
92
|
+
width, height = self.get_image_size(found_img)
|
|
93
|
+
with open(jfile, encoding='utf-8') as jf:
|
|
94
|
+
data = json.load(jf)
|
|
95
|
+
img_ann = data['images'][0]
|
|
96
|
+
label_lines = []
|
|
97
|
+
|
|
98
|
+
# bbox
|
|
99
|
+
if 'bounding_box' in img_ann:
|
|
100
|
+
for box in img_ann['bounding_box']:
|
|
101
|
+
cidx = self.class_map[box['classification']]
|
|
102
|
+
x, y, w, h = box['data']
|
|
103
|
+
cx = x + w / 2
|
|
104
|
+
cy = y + h / 2
|
|
105
|
+
cx /= width
|
|
106
|
+
cy /= height
|
|
107
|
+
w /= width
|
|
108
|
+
h /= height
|
|
109
|
+
label_lines.append(f'{cidx} {cx:.6f} {cy:.6f} {w:.6f} {h:.6f}')
|
|
110
|
+
|
|
111
|
+
# polygon
|
|
112
|
+
if 'polygon' in img_ann:
|
|
113
|
+
for poly in img_ann['polygon']:
|
|
114
|
+
cidx = self.class_map[poly['classification']]
|
|
115
|
+
bbox = self.polygon_to_bbox(poly['data'])
|
|
116
|
+
if bbox is None:
|
|
117
|
+
print(f'[{split_name}] Polygon for {base} is empty, skipping this polygon.')
|
|
118
|
+
continue
|
|
119
|
+
cx, cy, w, h = bbox
|
|
120
|
+
cx /= width
|
|
121
|
+
cy /= height
|
|
122
|
+
w /= width
|
|
123
|
+
h /= height
|
|
124
|
+
label_lines.append(f'{cidx} {cx:.6f} {cy:.6f} {w:.6f} {h:.6f}')
|
|
125
|
+
|
|
126
|
+
# keypoint
|
|
127
|
+
if 'keypoint' in img_ann:
|
|
128
|
+
for kp in img_ann['keypoint']:
|
|
129
|
+
cidx = self.class_map[kp['classification']]
|
|
130
|
+
# Assume bounding box exists for keypoint, or fallback to full image
|
|
131
|
+
if 'bounding_box' in kp:
|
|
132
|
+
x, y, w, h = kp['bounding_box']
|
|
133
|
+
cx = x + w / 2
|
|
134
|
+
cy = y + h / 2
|
|
135
|
+
cx /= width
|
|
136
|
+
cy /= height
|
|
137
|
+
w /= width
|
|
138
|
+
h /= height
|
|
139
|
+
else:
|
|
140
|
+
# fallback to the whole image
|
|
141
|
+
cx, cy, w, h = 0.5, 0.5, 1.0, 1.0
|
|
142
|
+
kp_str = self.keypoints_to_yolo_string(kp['data'], width, height)
|
|
143
|
+
label_lines.append(f'{cidx} {cx:.6f} {cy:.6f} {w:.6f} {h:.6f} {kp_str}')
|
|
144
|
+
|
|
145
|
+
entries.append({
|
|
146
|
+
'img_path': found_img,
|
|
147
|
+
'img_name': os.path.basename(found_img),
|
|
148
|
+
'label_name': base + '.txt',
|
|
149
|
+
'label_lines': label_lines,
|
|
150
|
+
})
|
|
151
|
+
return entries
|
|
152
|
+
|
|
153
|
+
def _convert_root_dir(self) -> List[Dict[str, Any]]:
|
|
154
|
+
"""Convert non-categorized dataset to YOLO format."""
|
|
155
|
+
json_dir = os.path.join(self.root_dir, 'json')
|
|
156
|
+
img_dir = os.path.join(self.root_dir, 'original_files')
|
|
157
|
+
entries = []
|
|
158
|
+
for jfile in glob(os.path.join(json_dir, '*.json')):
|
|
159
|
+
base = os.path.splitext(os.path.basename(jfile))[0]
|
|
160
|
+
found_img = None
|
|
161
|
+
for ext in self.IMG_EXTENSIONS:
|
|
162
|
+
img_path = os.path.join(img_dir, base + ext)
|
|
163
|
+
if os.path.exists(img_path):
|
|
164
|
+
found_img = img_path
|
|
165
|
+
break
|
|
166
|
+
if not found_img:
|
|
167
|
+
print(f'[single] Image for {base} not found, skipping.')
|
|
168
|
+
continue
|
|
169
|
+
width, height = self.get_image_size(found_img)
|
|
170
|
+
with open(jfile, encoding='utf-8') as jf:
|
|
171
|
+
data = json.load(jf)
|
|
172
|
+
img_ann = data['images'][0]
|
|
173
|
+
label_lines = []
|
|
174
|
+
|
|
175
|
+
# bbox
|
|
176
|
+
if 'bounding_box' in img_ann:
|
|
177
|
+
for box in img_ann['bounding_box']:
|
|
178
|
+
cidx = self.class_map[box['classification']]
|
|
179
|
+
x, y, w, h = box['data']
|
|
180
|
+
cx = x + w / 2
|
|
181
|
+
cy = y + h / 2
|
|
182
|
+
cx /= width
|
|
183
|
+
cy /= height
|
|
184
|
+
w /= width
|
|
185
|
+
h /= height
|
|
186
|
+
label_lines.append(f'{cidx} {cx:.6f} {cy:.6f} {w:.6f} {h:.6f}')
|
|
187
|
+
|
|
188
|
+
# polygon
|
|
189
|
+
if 'polygon' in img_ann:
|
|
190
|
+
for poly in img_ann['polygon']:
|
|
191
|
+
cidx = self.class_map[poly['classification']]
|
|
192
|
+
bbox = self.polygon_to_bbox(poly['data'])
|
|
193
|
+
if bbox is None:
|
|
194
|
+
print(f'[single] Polygon for {base} is empty, skipping this polygon.')
|
|
195
|
+
continue
|
|
196
|
+
cx, cy, w, h = bbox
|
|
197
|
+
cx /= width
|
|
198
|
+
cy /= height
|
|
199
|
+
w /= width
|
|
200
|
+
h /= height
|
|
201
|
+
label_lines.append(f'{cidx} {cx:.6f} {cy:.6f} {w:.6f} {h:.6f}')
|
|
202
|
+
|
|
203
|
+
# keypoint
|
|
204
|
+
if 'keypoint' in img_ann:
|
|
205
|
+
for kp in img_ann['keypoint']:
|
|
206
|
+
cidx = self.class_map[kp['classification']]
|
|
207
|
+
if 'bounding_box' in kp:
|
|
208
|
+
x, y, w, h = kp['bounding_box']
|
|
209
|
+
cx = x + w / 2
|
|
210
|
+
cy = y + h / 2
|
|
211
|
+
cx /= width
|
|
212
|
+
cy /= height
|
|
213
|
+
w /= width
|
|
214
|
+
h /= height
|
|
215
|
+
else:
|
|
216
|
+
cx, cy, w, h = 0.5, 0.5, 1.0, 1.0
|
|
217
|
+
kp_str = self.keypoints_to_yolo_string(kp['data'], width, height)
|
|
218
|
+
label_lines.append(f'{cidx} {cx:.6f} {cy:.6f} {w:.6f} {h:.6f} {kp_str}')
|
|
219
|
+
|
|
220
|
+
entries.append({
|
|
221
|
+
'img_path': found_img,
|
|
222
|
+
'img_name': os.path.basename(found_img),
|
|
223
|
+
'label_name': base + '.txt',
|
|
224
|
+
'label_lines': label_lines,
|
|
225
|
+
})
|
|
226
|
+
return entries
|
|
227
|
+
|
|
228
|
+
def convert(self) -> Union[Dict[str, List[Dict[str, Any]]], List[Dict[str, Any]]]:
|
|
229
|
+
"""Convert DM format to YOLO format (categorized split or not).
|
|
230
|
+
|
|
231
|
+
Returns:
|
|
232
|
+
- If categorized: dict {split: list of entries}
|
|
233
|
+
- If not: list of entries
|
|
234
|
+
"""
|
|
235
|
+
# Prepare dataset.yaml content (for save_to_folder)
|
|
236
|
+
yaml_lines = [
|
|
237
|
+
'path: ' + self.root_dir,
|
|
238
|
+
]
|
|
239
|
+
|
|
240
|
+
if self.is_categorized_dataset:
|
|
241
|
+
splits = self._validate_splits(required_splits=['train', 'valid'], optional_splits=['test'])
|
|
242
|
+
self.class_names = self.get_all_classes(list(splits.values()))
|
|
243
|
+
self.class_map = {name: idx for idx, name in enumerate(self.class_names)}
|
|
244
|
+
result = {}
|
|
245
|
+
for split, split_dir in splits.items():
|
|
246
|
+
result[split] = self._convert_split_dir(split_dir, split)
|
|
247
|
+
self.converted_data = result
|
|
248
|
+
|
|
249
|
+
yaml_lines.append('train: train/images')
|
|
250
|
+
yaml_lines.append('val: valid/images')
|
|
251
|
+
if 'test' in splits:
|
|
252
|
+
yaml_lines.append('test: test/images')
|
|
253
|
+
else:
|
|
254
|
+
self._validate_splits(required_splits=[], optional_splits=[])
|
|
255
|
+
self.class_names = self.get_all_classes([self.root_dir])
|
|
256
|
+
self.class_map = {name: idx for idx, name in enumerate(self.class_names)}
|
|
257
|
+
result = self._convert_root_dir()
|
|
258
|
+
self.converted_data = result
|
|
259
|
+
|
|
260
|
+
yaml_lines += ['', f'nc: {len(self.class_names)}', f'names: {self.class_names}', '']
|
|
261
|
+
self.dataset_yaml_content = '\n'.join(yaml_lines)
|
|
262
|
+
return result
|
|
263
|
+
|
|
264
|
+
def save_to_folder(self, output_dir: Optional[str] = None) -> None:
|
|
265
|
+
"""Save converted YOLO data to the specified folder."""
|
|
266
|
+
output_dir = output_dir or self.root_dir
|
|
267
|
+
self.ensure_dir(output_dir)
|
|
268
|
+
if self.converted_data is None:
|
|
269
|
+
self.converted_data = self.convert()
|
|
270
|
+
|
|
271
|
+
if self.is_categorized_dataset:
|
|
272
|
+
for split, entries in self.converted_data.items():
|
|
273
|
+
split_imgs = os.path.join(output_dir, split, 'images')
|
|
274
|
+
split_labels = os.path.join(output_dir, split, 'labels')
|
|
275
|
+
self.ensure_dir(split_imgs)
|
|
276
|
+
self.ensure_dir(split_labels)
|
|
277
|
+
for entry in entries:
|
|
278
|
+
shutil.copy(entry['img_path'], os.path.join(split_imgs, entry['img_name']))
|
|
279
|
+
with open(os.path.join(split_labels, entry['label_name']), 'w', encoding='utf-8') as f:
|
|
280
|
+
f.write('\n'.join(entry['label_lines']))
|
|
281
|
+
else:
|
|
282
|
+
imgs_dir = os.path.join(output_dir, 'images')
|
|
283
|
+
labels_dir = os.path.join(output_dir, 'labels')
|
|
284
|
+
self.ensure_dir(imgs_dir)
|
|
285
|
+
self.ensure_dir(labels_dir)
|
|
286
|
+
for entry in self.converted_data:
|
|
287
|
+
shutil.copy(entry['img_path'], os.path.join(imgs_dir, entry['img_name']))
|
|
288
|
+
with open(os.path.join(labels_dir, entry['label_name']), 'w', encoding='utf-8') as f:
|
|
289
|
+
f.write('\n'.join(entry['label_lines']))
|
|
290
|
+
|
|
291
|
+
with open(os.path.join(output_dir, 'dataset.yaml'), 'w', encoding='utf-8') as f:
|
|
292
|
+
f.write(self.dataset_yaml_content)
|
|
293
|
+
with open(os.path.join(output_dir, 'classes.txt'), 'w', encoding='utf-8') as f:
|
|
294
|
+
for c in self.class_names:
|
|
295
|
+
f.write(f'{c}\n')
|
|
296
|
+
print(f'YOLO data exported to {output_dir}')
|
|
@@ -4,7 +4,7 @@ locale/ko/LC_MESSAGES/messages.mo,sha256=7HJEJA0wKlN14xQ5VF4FCNet54tjw6mfWYj3IaB
|
|
|
4
4
|
locale/ko/LC_MESSAGES/messages.po,sha256=TFii_RbURDH-Du_9ZQf3wNh-2briGk1IqY33-9GKrMU,1126
|
|
5
5
|
synapse_sdk/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
6
|
synapse_sdk/i18n.py,sha256=VXMR-Zm_1hTAg9iPk3YZNNq-T1Bhx1J2fEtRT6kyYbg,766
|
|
7
|
-
synapse_sdk/loggers.py,sha256=
|
|
7
|
+
synapse_sdk/loggers.py,sha256=xK48h3ZaDDZLaF-qsdnv1-6-4vw_cYlgpSCKHYUQw1g,6549
|
|
8
8
|
synapse_sdk/types.py,sha256=khzn8KpgxFdn1SrpbcuX84m_Md1Mz_HIoUoPq8uok40,698
|
|
9
9
|
synapse_sdk/cli/__init__.py,sha256=RLZwqbtoC90-tw_2ErakY8-GxSNf6Ms2lNePBd_y-9U,9694
|
|
10
10
|
synapse_sdk/cli/config.py,sha256=ooIHI7ZDA1yLtisxk_Xn1ptz4sM5j7TDivxaPvBUONE,11886
|
|
@@ -108,7 +108,7 @@ synapse_sdk/devtools/web/src/views/PluginView.jsx,sha256=_-V8elSiEtsvKECeROtQopS
|
|
|
108
108
|
synapse_sdk/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
109
109
|
synapse_sdk/plugins/enums.py,sha256=ibixwqA3sCNSriG1jAtL54JQc_Zwo3MufwYUqGhVncc,523
|
|
110
110
|
synapse_sdk/plugins/exceptions.py,sha256=Qs7qODp_RRLO9y2otU2T4ryj5LFwIZODvSIXkAh91u0,691
|
|
111
|
-
synapse_sdk/plugins/models.py,sha256=
|
|
111
|
+
synapse_sdk/plugins/models.py,sha256=AKZfVT6hsVEklcEDnHwoVAwvLxydMibfeJetug3Qk0U,4738
|
|
112
112
|
synapse_sdk/plugins/upload.py,sha256=VJOotYMayylOH0lNoAGeGHRkLdhP7jnC_A0rFQMvQpQ,3228
|
|
113
113
|
synapse_sdk/plugins/utils.py,sha256=4_K6jIl0WrsXOEhFp94faMOriSsddOhIiaXcawYYUUA,3300
|
|
114
114
|
synapse_sdk/plugins/categories/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -125,7 +125,7 @@ synapse_sdk/plugins/categories/data_validation/templates/plugin/validation.py,sh
|
|
|
125
125
|
synapse_sdk/plugins/categories/export/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
126
126
|
synapse_sdk/plugins/categories/export/enums.py,sha256=gtyngvQ1DKkos9iKGcbecwTVQQ6sDwbrBPSGPNb5Am0,127
|
|
127
127
|
synapse_sdk/plugins/categories/export/actions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
128
|
-
synapse_sdk/plugins/categories/export/actions/export.py,sha256=
|
|
128
|
+
synapse_sdk/plugins/categories/export/actions/export.py,sha256=buCpjRPZ-QPfEANKrSopKjCPHadjF1_A0eAkoezaAFA,10688
|
|
129
129
|
synapse_sdk/plugins/categories/export/templates/config.yaml,sha256=N7YmnFROb3s3M35SA9nmabyzoSb5O2t2TRPicwFNN2o,56
|
|
130
130
|
synapse_sdk/plugins/categories/export/templates/plugin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
131
131
|
synapse_sdk/plugins/categories/export/templates/plugin/export.py,sha256=zG8mSn7ZGIj8cttWmb7GEPcGgQRbZ97brJCzkuK7RP8,6106
|
|
@@ -153,11 +153,11 @@ synapse_sdk/plugins/categories/post_annotation/templates/plugin/post_annotation.
|
|
|
153
153
|
synapse_sdk/plugins/categories/pre_annotation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
154
154
|
synapse_sdk/plugins/categories/pre_annotation/actions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
155
155
|
synapse_sdk/plugins/categories/pre_annotation/actions/pre_annotation.py,sha256=6ib3RmnGrjpsQ0e_G-mRH1lfFunQ3gh2M831vuDn7HU,344
|
|
156
|
-
synapse_sdk/plugins/categories/pre_annotation/actions/to_task.py,sha256=
|
|
156
|
+
synapse_sdk/plugins/categories/pre_annotation/actions/to_task.py,sha256=7w7CLHrsOjVC_qBjaPA1Vz2L0jyRFNsg05pS72cAr7Y,23331
|
|
157
157
|
synapse_sdk/plugins/categories/pre_annotation/templates/config.yaml,sha256=4SKJe2gF8UCi3oD0kV8B4M2MkYcosz5GZzzAjAg3slc,508
|
|
158
158
|
synapse_sdk/plugins/categories/pre_annotation/templates/plugin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
159
159
|
synapse_sdk/plugins/categories/pre_annotation/templates/plugin/pre_annotation.py,sha256=HBHxHuv2gMBzDB2alFfrzI_SZ1Ztk6mo7eFbR5GqHKw,106
|
|
160
|
-
synapse_sdk/plugins/categories/pre_annotation/templates/plugin/to_task.py,sha256=
|
|
160
|
+
synapse_sdk/plugins/categories/pre_annotation/templates/plugin/to_task.py,sha256=0j01vFZYkaAw8mtf6HYfun3IUDlryTexqvss_JZtc-Y,618
|
|
161
161
|
synapse_sdk/plugins/categories/smart_tool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
162
162
|
synapse_sdk/plugins/categories/smart_tool/actions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
163
163
|
synapse_sdk/plugins/categories/smart_tool/actions/auto_label.py,sha256=fHiqA8ntmzjs2GMVMuByR7Clh2zhLie8OPF9B8OmwxM,1279
|
|
@@ -166,7 +166,7 @@ synapse_sdk/plugins/categories/smart_tool/templates/plugin/__init__.py,sha256=47
|
|
|
166
166
|
synapse_sdk/plugins/categories/smart_tool/templates/plugin/auto_label.py,sha256=eevNg0nOcYFR4z_L_R-sCvVOYoLWSAH1jwDkAf3YCjY,320
|
|
167
167
|
synapse_sdk/plugins/categories/upload/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
168
168
|
synapse_sdk/plugins/categories/upload/actions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
169
|
-
synapse_sdk/plugins/categories/upload/actions/upload.py,sha256=
|
|
169
|
+
synapse_sdk/plugins/categories/upload/actions/upload.py,sha256=cO0Hl6CyQm3MLVxmy_3LN-X58flCJUh_fJjJmh7Qg3U,18721
|
|
170
170
|
synapse_sdk/plugins/categories/upload/templates/config.yaml,sha256=kwHNWHFYbzDi1mEh40KozatPZbZGH44dlP0t0J7ejJw,483
|
|
171
171
|
synapse_sdk/plugins/categories/upload/templates/plugin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
172
172
|
synapse_sdk/plugins/categories/upload/templates/plugin/upload.py,sha256=IZU4sdSMSLKPCtlNqF7DP2howTdYR6hr74HCUZsGdPk,1559
|
|
@@ -192,10 +192,12 @@ synapse_sdk/utils/http.py,sha256=yRxYfru8tMnBVeBK-7S0Ga13yOf8oRHquG5e8K_FWcI,475
|
|
|
192
192
|
synapse_sdk/utils/module_loading.py,sha256=chHpU-BZjtYaTBD_q0T7LcKWtqKvYBS4L0lPlKkoMQ8,1020
|
|
193
193
|
synapse_sdk/utils/network.py,sha256=WI8qn6KlKpHdMi45V57ofKJB8zusJrbQsxT74LwVfsY,1000
|
|
194
194
|
synapse_sdk/utils/string.py,sha256=rEwuZ9SAaZLcQ8TYiwNKr1h2u4CfnrQx7SUL8NWmChg,216
|
|
195
|
-
synapse_sdk/utils/converters/__init__.py,sha256=
|
|
195
|
+
synapse_sdk/utils/converters/__init__.py,sha256=jy-BxGUe1NOxQ0g8s9HNoPSYM2Xeq4Rd0vk8HDvFOhs,9926
|
|
196
196
|
synapse_sdk/utils/converters/coco/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
197
|
-
synapse_sdk/utils/converters/coco/from_dm.py,sha256=
|
|
197
|
+
synapse_sdk/utils/converters/coco/from_dm.py,sha256=78aJ_O2_hmkQQ96nrjHY38roETWfBZB8GRHDn7qKEls,9626
|
|
198
198
|
synapse_sdk/utils/converters/coco/to_dm.py,sha256=Ve8LrcKVlzNysam3fidcgP5fdm0_UGbBgSPoj2dT_JA,4906
|
|
199
|
+
synapse_sdk/utils/converters/yolo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
200
|
+
synapse_sdk/utils/converters/yolo/from_dm.py,sha256=e9u4CM4gWnh9_EzYiA-EqL8RICZo5NafWXzaqnrIrdU,12600
|
|
199
201
|
synapse_sdk/utils/pydantic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
200
202
|
synapse_sdk/utils/pydantic/config.py,sha256=1vYOcUI35GslfD1rrqhFkNXXJOXt4IDqOPSx9VWGfNE,123
|
|
201
203
|
synapse_sdk/utils/pydantic/errors.py,sha256=0v0T12eQBr1KrFiEOBu6KMaPK4aPEGEC6etPJGoR5b4,1061
|
|
@@ -207,9 +209,9 @@ synapse_sdk/utils/storage/providers/gcp.py,sha256=i2BQCu1Kej1If9SuNr2_lEyTcr5M_n
|
|
|
207
209
|
synapse_sdk/utils/storage/providers/http.py,sha256=2DhIulND47JOnS5ZY7MZUex7Su3peAPksGo1Wwg07L4,5828
|
|
208
210
|
synapse_sdk/utils/storage/providers/s3.py,sha256=ZmqekAvIgcQBdRU-QVJYv1Rlp6VHfXwtbtjTSphua94,2573
|
|
209
211
|
synapse_sdk/utils/storage/providers/sftp.py,sha256=_8s9hf0JXIO21gvm-JVS00FbLsbtvly4c-ETLRax68A,1426
|
|
210
|
-
synapse_sdk-1.0.
|
|
211
|
-
synapse_sdk-1.0.
|
|
212
|
-
synapse_sdk-1.0.
|
|
213
|
-
synapse_sdk-1.0.
|
|
214
|
-
synapse_sdk-1.0.
|
|
215
|
-
synapse_sdk-1.0.
|
|
212
|
+
synapse_sdk-1.0.0a72.dist-info/licenses/LICENSE,sha256=bKzmC5YAg4V1Fhl8OO_tqY8j62hgdncAkN7VrdjmrGk,1101
|
|
213
|
+
synapse_sdk-1.0.0a72.dist-info/METADATA,sha256=1tbrM3SJ6aPgNShk_AEYZNYLZ2i8iencEXohtAvoVG0,1130
|
|
214
|
+
synapse_sdk-1.0.0a72.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
215
|
+
synapse_sdk-1.0.0a72.dist-info/entry_points.txt,sha256=VNptJoGoNJI8yLXfBmhgUefMsmGI0m3-0YoMvrOgbxo,48
|
|
216
|
+
synapse_sdk-1.0.0a72.dist-info/top_level.txt,sha256=ytgJMRK1slVOKUpgcw3LEyHHP7S34J6n_gJzdkcSsw8,12
|
|
217
|
+
synapse_sdk-1.0.0a72.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|