synapse-sdk 1.0.0a74__py3-none-any.whl → 1.0.0a75__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of synapse-sdk might be problematic. Click here for more details.

@@ -15,10 +15,6 @@ class AnnotationClientMixin(BaseClient):
15
15
  path = f'tasks/{pk}/annotate_task_data/'
16
16
  return self._put(path, data=data)
17
17
 
18
- def patch_task(self, pk, data):
19
- path = f'tasks/{pk}/'
20
- return self._patch(path, data=data)
21
-
22
18
  def get_task_tag(self, pk):
23
19
  path = f'task_tags/{pk}/'
24
20
  return self._get(path)
@@ -17,6 +17,14 @@ const sidebars: SidebarsConfig = {
17
17
  'introduction',
18
18
  'installation',
19
19
  'quickstart',
20
+ {
21
+ type: 'category',
22
+ label: 'Features',
23
+ items: [
24
+ 'features/features',
25
+ 'features/converters/converters',
26
+ ],
27
+ },
20
28
  {
21
29
  type: 'category',
22
30
  label: 'API Reference',
@@ -13,6 +13,7 @@ from synapse_sdk.plugins.categories.base import Action
13
13
  from synapse_sdk.plugins.categories.decorators import register_action
14
14
  from synapse_sdk.plugins.enums import PluginCategory, RunMethod
15
15
  from synapse_sdk.plugins.models import Run
16
+ from synapse_sdk.shared.enums import Context
16
17
  from synapse_sdk.utils.pydantic.validators import non_blank
17
18
 
18
19
 
@@ -26,6 +27,14 @@ class AnnotateTaskDataStatus(str, Enum):
26
27
  FAILED = 'failed'
27
28
 
28
29
 
30
+ class CriticalError(Exception):
31
+ """Critical error."""
32
+
33
+ def __init__(self, message: str = 'Critical error occured while processing task'):
34
+ self.message = message
35
+ super().__init__(self.message)
36
+
37
+
29
38
  class ToTaskRun(Run):
30
39
  class AnnotateTaskDataLog(BaseModel):
31
40
  """Log model for annotate task data."""
@@ -278,6 +287,12 @@ class ToTaskAction(Action):
278
287
  self._update_metrics(total_tasks, success_count, failed_count)
279
288
  self.run.set_progress(current_progress, total_tasks, category='annotate_task_data')
280
289
 
290
+ except CriticalError:
291
+ self.run.log_message(
292
+ 'Critical error occured while processing task. Stopping the job.', context=Context.DANGER.value
293
+ )
294
+ return
295
+
281
296
  except Exception as e:
282
297
  self.run.log_message(f'Failed to process task {task_id}: {str(e)}')
283
298
  self.run.log_annotate_task_data({'task_id': task_id, 'error': str(e)}, AnnotateTaskDataStatus.FAILED)
@@ -427,48 +442,43 @@ class ToTaskAction(Action):
427
442
  assert isinstance(self.run, ToTaskRun)
428
443
 
429
444
  try:
430
- # Get pre-processor information
445
+ # Validate pre-processor ID
431
446
  pre_processor_id = self.params.get('pre_processor')
432
-
433
447
  if not pre_processor_id:
434
448
  error_msg = 'Pre-processor ID is required for inference annotation method'
435
449
  self.run.log_message(f'{error_msg} for task {task_id}')
436
450
  self.run.log_annotate_task_data({'task_id': task_id, 'error': error_msg}, AnnotateTaskDataStatus.FAILED)
437
451
  return {'success': False, 'error': error_msg}
438
452
 
439
- # Call inference pre processor if specified.
440
- pre_processor = client.get_plugin_release(pre_processor_id)
441
- pre_processor_code = pre_processor['config']['code']
442
- pre_processor_version = pre_processor['version']
443
-
444
- # Extract task data for inference
445
- data_unit = task.get('data_unit', {})
446
- files = data_unit.get('files', {})
447
-
448
- # Find primary image URL from files
449
- primary_file_url = ''
450
- for file_info in files.values():
451
- if file_info.get('is_primary') and file_info.get('url'):
452
- primary_file_url = file_info['url']
453
- break
453
+ # Get pre-processor information
454
+ pre_processor_info = self._get_pre_processor_info(client, pre_processor_id)
455
+ if not pre_processor_info['success']:
456
+ return pre_processor_info
457
+
458
+ pre_processor_code = pre_processor_info['code']
459
+ pre_processor_version = pre_processor_info['version']
460
+
461
+ # Ensure pre-processor is running
462
+ pre_processor_status = self._ensure_pre_processor_running(client, pre_processor_code)
463
+ if not pre_processor_status['success']:
464
+ return pre_processor_status
465
+
466
+ # Extract primary file URL from task data
467
+ primary_file_url = self._extract_primary_file_url(task)
468
+ if not primary_file_url:
469
+ error_msg = 'Primary image URL not found in task data'
470
+ self.run.log_message(f'{error_msg} for task {task_id}')
471
+ self.run.log_annotate_task_data({'task_id': task_id, 'error': error_msg}, AnnotateTaskDataStatus.FAILED)
472
+ return {'success': False, 'error': error_msg}
454
473
 
455
- pre_processor_params = self.params.get('pre_processor_params', {})
456
- pre_processor_params['image_path'] = primary_file_url
457
- inference_payload = {
458
- 'agent': 1,
459
- 'action': 'inference',
460
- 'version': pre_processor_version,
461
- 'params': {
462
- 'model': self.params.get('model'),
463
- 'method': 'post',
464
- 'json': pre_processor_params,
465
- },
466
- }
467
- inference_data = client.run_plugin(pre_processor_code, inference_payload)
474
+ # Run inference
475
+ inference_result = self._run_inference(client, pre_processor_code, pre_processor_version, primary_file_url)
476
+ if not inference_result['success']:
477
+ return inference_result
468
478
 
469
- # Convert data to task object
479
+ # Convert and submit inference data
470
480
  annotation_to_task = self.entrypoint(self.run)
471
- converted_result = annotation_to_task.convert_data_from_inference(inference_data)
481
+ converted_result = annotation_to_task.convert_data_from_inference(inference_result['data'])
472
482
 
473
483
  # Submit inference annotation data
474
484
  client.annotate_task_data(task_id, data={'action': 'submit', 'data': converted_result})
@@ -485,6 +495,172 @@ class ToTaskAction(Action):
485
495
  self.run.log_annotate_task_data({'task_id': task_id, 'error': error_msg}, AnnotateTaskDataStatus.FAILED)
486
496
  return {'success': False, 'error': error_msg}
487
497
 
498
+ def _get_pre_processor_info(self, client: BackendClient, pre_processor_id: int) -> Dict[str, Any]:
499
+ """Get pre-processor information from the backend.
500
+
501
+ Args:
502
+ client (BackendClient): The backend client instance.
503
+ pre_processor_id (int): The pre-processor ID.
504
+
505
+ Returns:
506
+ Dict[str, Any]: Result dictionary with pre-processor info or error.
507
+ """
508
+ try:
509
+ pre_processor_response = client.get_plugin_release(pre_processor_id)
510
+ if isinstance(pre_processor_response, str):
511
+ return {'success': False, 'error': 'Invalid pre-processor response received'}
512
+
513
+ pre_processor: Dict[str, Any] = pre_processor_response
514
+ config = pre_processor.get('config', {})
515
+ code = config.get('code')
516
+ version = pre_processor.get('version')
517
+
518
+ if not code or not version:
519
+ return {'success': False, 'error': 'Invalid pre-processor configuration'}
520
+
521
+ return {'success': True, 'code': code, 'version': version}
522
+ except Exception as e:
523
+ return {'success': False, 'error': f'Failed to get pre-processor info: {str(e)}'}
524
+
525
+ def _ensure_pre_processor_running(self, client: BackendClient, pre_processor_code: str) -> Dict[str, Any]:
526
+ """Ensure the pre-processor is running, restart if necessary.
527
+
528
+ Args:
529
+ client (BackendClient): The backend client instance.
530
+ pre_processor_code (str): The pre-processor code.
531
+
532
+ Returns:
533
+ Dict[str, Any]: Result dictionary indicating success or failure.
534
+ """
535
+ try:
536
+ # Check if pre-processor is running
537
+ serve_applications_response = client.list_serve_applications(params={'plugin_code': pre_processor_code})
538
+ if isinstance(serve_applications_response, str):
539
+ return {'success': False, 'error': 'Invalid serve applications response'}
540
+
541
+ # Handle the response properly - it should be a dict with 'results' key
542
+ if not isinstance(serve_applications_response, dict):
543
+ return {'success': False, 'error': 'Unexpected serve applications response format'}
544
+
545
+ serve_applications: Dict[str, Any] = serve_applications_response
546
+ results = serve_applications.get('results', [])
547
+ running_serve_apps = [app for app in results if isinstance(app, dict) and app.get('status') == 'RUNNING']
548
+
549
+ # If not running, restart the pre-processor
550
+ if not running_serve_apps:
551
+ restart_result = self._restart_pre_processor(client, pre_processor_code)
552
+ if not restart_result['success']:
553
+ return restart_result
554
+
555
+ # Verify restart was successful
556
+ serve_applications_response = client.list_serve_applications(params={'plugin_code': pre_processor_code})
557
+ if isinstance(serve_applications_response, str):
558
+ return {'success': False, 'error': 'Failed to verify pre-processor restart'}
559
+
560
+ if not isinstance(serve_applications_response, dict):
561
+ return {'success': False, 'error': 'Unexpected serve applications response format after restart'}
562
+
563
+ serve_applications: Dict[str, Any] = serve_applications_response
564
+ results = serve_applications.get('results', [])
565
+ running_serve_apps = [
566
+ app for app in results if isinstance(app, dict) and app.get('status') == 'RUNNING'
567
+ ]
568
+
569
+ if not running_serve_apps:
570
+ return {'success': False, 'error': 'Failed to restart pre-processor'}
571
+
572
+ return {'success': True}
573
+ except Exception as e:
574
+ return {'success': False, 'error': f'Failed to ensure pre-processor running: {str(e)}'}
575
+
576
+ def _restart_pre_processor(self, client: BackendClient, pre_processor_code: str) -> Dict[str, Any]:
577
+ """Restart the pre-processor.
578
+
579
+ Args:
580
+ client (BackendClient): The backend client instance.
581
+ pre_processor_code (str): The pre-processor code.
582
+
583
+ Returns:
584
+ Dict[str, Any]: Result dictionary indicating success or failure.
585
+ """
586
+ try:
587
+ if not self.config:
588
+ return {'success': False, 'error': 'Configuration not available'}
589
+
590
+ inference_options = self.config.get('inference_options', {})
591
+ serve_application_deployment_payload = {
592
+ 'agent': self.params.get('agent') if self.params else None,
593
+ 'action': 'deployment',
594
+ 'params': {
595
+ 'num_cpus': inference_options.get('required_cpu_count', 2),
596
+ 'num_gpus': inference_options.get('required_gpu_count', 1),
597
+ },
598
+ 'debug': True,
599
+ }
600
+
601
+ deployment_result = client.run_plugin(pre_processor_code, serve_application_deployment_payload)
602
+ if not deployment_result:
603
+ return {'success': False, 'error': 'Failed to restart pre-processor'}
604
+
605
+ return {'success': True}
606
+ except Exception as e:
607
+ return {'success': False, 'error': f'Failed to restart pre-processor: {str(e)}'}
608
+
609
+ def _extract_primary_file_url(self, task: Dict[str, Any]) -> Optional[str]:
610
+ """Extract the primary file URL from task data.
611
+
612
+ Args:
613
+ task (Dict[str, Any]): The task data.
614
+
615
+ Returns:
616
+ Optional[str]: The primary file URL if found, None otherwise.
617
+ """
618
+ data_unit = task.get('data_unit', {})
619
+ files = data_unit.get('files', {})
620
+
621
+ for file_info in files.values():
622
+ if isinstance(file_info, dict) and file_info.get('is_primary') and file_info.get('url'):
623
+ return file_info['url']
624
+
625
+ return None
626
+
627
+ def _run_inference(
628
+ self, client: BackendClient, pre_processor_code: str, pre_processor_version: str, primary_file_url: str
629
+ ) -> Dict[str, Any]:
630
+ """Run inference using the pre-processor.
631
+
632
+ Args:
633
+ client (BackendClient): The backend client instance.
634
+ pre_processor_code (str): The pre-processor code.
635
+ pre_processor_version (str): The pre-processor version.
636
+ primary_file_url (str): The primary image URL.
637
+
638
+ Returns:
639
+ Dict[str, Any]: Result dictionary with inference data or error.
640
+ """
641
+ try:
642
+ if not self.params:
643
+ return {'success': False, 'error': 'Parameters not available'}
644
+
645
+ pre_processor_params = self.params.get('pre_processor_params', {})
646
+ pre_processor_params['image_path'] = primary_file_url
647
+
648
+ inference_payload = {
649
+ 'agent': 1,
650
+ 'action': 'inference',
651
+ 'version': pre_processor_version,
652
+ 'params': {
653
+ 'model': self.params['model'],
654
+ 'method': 'post',
655
+ 'json': pre_processor_params,
656
+ },
657
+ }
658
+
659
+ inference_data = client.run_plugin(pre_processor_code, inference_payload)
660
+ return {'success': True, 'data': inference_data}
661
+ except Exception as e:
662
+ return {'success': False, 'error': f'Failed to run inference: {str(e)}'}
663
+
488
664
  def _update_metrics(self, total_tasks: int, success_count: int, failed_count: int):
489
665
  """Update metrics for task annotation progress.
490
666
 
@@ -3,6 +3,9 @@ actions:
3
3
  entrypoint: plugin.pre_annotation.pre_annotate
4
4
  to_task:
5
5
  entrypoint: plugin.to_task.AnnotationToTask
6
+ inference_options:
7
+ required_cpu_count: 1
8
+ required_gpu_count: 0.1
6
9
  ui_schema:
7
10
  - $formkit: "radio"
8
11
  name: "schema_to_convert"
@@ -6,3 +6,4 @@ class Context(str, Enum):
6
6
  SUCCESS = 'success'
7
7
  WARNING = 'warning'
8
8
  DANGER = 'danger'
9
+ ERROR = 'error'
@@ -192,7 +192,9 @@ class ToDMConverter(BaseConverter):
192
192
  json_filename = os.path.splitext(img_filename)[0] + '.json'
193
193
  with open(os.path.join(json_dir, json_filename), 'w', encoding='utf-8') as jf:
194
194
  json.dump(dm_json, jf, indent=2, ensure_ascii=False)
195
- if img_src_path and os.path.exists(img_src_path):
195
+ if img_src_path:
196
+ if not os.path.exists(img_src_path):
197
+ raise FileNotFoundError(f'Source file does not exist: {img_src_path}')
196
198
  shutil.copy(img_src_path, os.path.join(original_file_dir, img_filename))
197
199
  else:
198
200
  json_dir = os.path.join(output_dir, 'json')
@@ -0,0 +1,109 @@
1
+ from abc import ABC, abstractmethod
2
+
3
+
4
+ class BaseDMConverter(ABC):
5
+ """Base class for DM format converters."""
6
+
7
+ SUPPORTED_TOOLS = [
8
+ 'bounding_box',
9
+ 'named_entity',
10
+ 'classification',
11
+ 'polyline',
12
+ 'keypoint',
13
+ '3d_bounding_box',
14
+ 'segmentation',
15
+ 'polygon',
16
+ 'relation',
17
+ 'group',
18
+ ]
19
+
20
+ def __init__(self):
21
+ """Initialize the base converter."""
22
+ self.tool_processors = self._setup_tool_processors()
23
+
24
+ def _setup_tool_processors(self):
25
+ """Setup tool processor mapping."""
26
+ return {
27
+ 'bounding_box': self._process_bounding_box,
28
+ 'named_entity': self._process_named_entity,
29
+ 'classification': self._process_classification,
30
+ 'polyline': self._process_polyline,
31
+ 'keypoint': self._process_keypoint,
32
+ '3d_bounding_box': self._process_3d_bounding_box,
33
+ 'segmentation': self._process_segmentation,
34
+ 'polygon': self._process_polygon,
35
+ 'relation': self._process_relation,
36
+ 'group': self._process_group,
37
+ }
38
+
39
+ @abstractmethod
40
+ def convert(self):
41
+ """Convert data from one format to another."""
42
+ pass
43
+
44
+ @abstractmethod
45
+ def _process_bounding_box(self, *args, **kwargs):
46
+ """Process bounding box annotation."""
47
+ pass
48
+
49
+ @abstractmethod
50
+ def _process_named_entity(self, *args, **kwargs):
51
+ """Process named entity annotation."""
52
+ pass
53
+
54
+ @abstractmethod
55
+ def _process_classification(self, *args, **kwargs):
56
+ """Process classification annotation."""
57
+ pass
58
+
59
+ @abstractmethod
60
+ def _process_polyline(self, *args, **kwargs):
61
+ """Process polyline annotation."""
62
+ pass
63
+
64
+ @abstractmethod
65
+ def _process_keypoint(self, *args, **kwargs):
66
+ """Process keypoint annotation."""
67
+ pass
68
+
69
+ @abstractmethod
70
+ def _process_3d_bounding_box(self, *args, **kwargs):
71
+ """Process 3D bounding box annotation."""
72
+ pass
73
+
74
+ @abstractmethod
75
+ def _process_segmentation(self, *args, **kwargs):
76
+ """Process segmentation annotation."""
77
+ pass
78
+
79
+ @abstractmethod
80
+ def _process_polygon(self, *args, **kwargs):
81
+ """Process polygon annotation."""
82
+ pass
83
+
84
+ @abstractmethod
85
+ def _process_relation(self, *args, **kwargs):
86
+ """Process relation annotation."""
87
+ pass
88
+
89
+ @abstractmethod
90
+ def _process_group(self, *args, **kwargs):
91
+ """Process group annotation."""
92
+ pass
93
+
94
+ def _handle_unknown_tool(self, tool_type, item_id=None):
95
+ """Handle unknown tool types with consistent warning message."""
96
+ warning_msg = f"Warning: Unknown tool type '{tool_type}'"
97
+ if item_id:
98
+ warning_msg += f' for item {item_id}'
99
+ print(warning_msg)
100
+
101
+ def _extract_media_type_info(self, media_id):
102
+ """Extract media type information from media ID."""
103
+ media_type = media_id.split('_')[0] if '_' in media_id else media_id
104
+ media_type_plural = media_type + 's' if not media_type.endswith('s') else media_type
105
+ return media_type, media_type_plural
106
+
107
+ def _singularize_media_type(self, media_type_plural):
108
+ """Convert plural media type to singular."""
109
+ return media_type_plural.rstrip('s')