synapse-sdk 1.0.0a74__py3-none-any.whl → 1.0.0a76__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of synapse-sdk might be problematic. Click here for more details.

Files changed (27) hide show
  1. synapse_sdk/clients/backend/annotation.py +0 -4
  2. synapse_sdk/devtools/docs/sidebars.ts +9 -1
  3. synapse_sdk/plugins/categories/pre_annotation/actions/to_task.py +208 -32
  4. synapse_sdk/plugins/categories/pre_annotation/templates/config.yaml +3 -0
  5. synapse_sdk/plugins/utils/__init__.py +43 -0
  6. synapse_sdk/plugins/utils/actions.py +119 -0
  7. synapse_sdk/plugins/utils/config.py +203 -0
  8. synapse_sdk/plugins/utils/legacy.py +95 -0
  9. synapse_sdk/plugins/utils/registry.py +58 -0
  10. synapse_sdk/plugins/utils.py +27 -0
  11. synapse_sdk/shared/enums.py +1 -0
  12. synapse_sdk/utils/converters/__init__.py +3 -1
  13. synapse_sdk/utils/converters/dm/__init__.py +109 -0
  14. synapse_sdk/utils/converters/dm/from_v1.py +415 -0
  15. synapse_sdk/utils/converters/dm/to_v1.py +254 -0
  16. synapse_sdk/utils/converters/pascal/__init__.py +0 -0
  17. synapse_sdk/utils/converters/pascal/from_dm.py +177 -0
  18. synapse_sdk/utils/converters/pascal/to_dm.py +135 -0
  19. synapse_sdk/utils/converters/yolo/from_dm.py +24 -18
  20. synapse_sdk/utils/converters/yolo/to_dm.py +185 -0
  21. synapse_sdk-1.0.0a76.dist-info/METADATA +107 -0
  22. {synapse_sdk-1.0.0a74.dist-info → synapse_sdk-1.0.0a76.dist-info}/RECORD +26 -14
  23. synapse_sdk-1.0.0a74.dist-info/METADATA +0 -37
  24. {synapse_sdk-1.0.0a74.dist-info → synapse_sdk-1.0.0a76.dist-info}/WHEEL +0 -0
  25. {synapse_sdk-1.0.0a74.dist-info → synapse_sdk-1.0.0a76.dist-info}/entry_points.txt +0 -0
  26. {synapse_sdk-1.0.0a74.dist-info → synapse_sdk-1.0.0a76.dist-info}/licenses/LICENSE +0 -0
  27. {synapse_sdk-1.0.0a74.dist-info → synapse_sdk-1.0.0a76.dist-info}/top_level.txt +0 -0
@@ -15,10 +15,6 @@ class AnnotationClientMixin(BaseClient):
15
15
  path = f'tasks/{pk}/annotate_task_data/'
16
16
  return self._put(path, data=data)
17
17
 
18
- def patch_task(self, pk, data):
19
- path = f'tasks/{pk}/'
20
- return self._patch(path, data=data)
21
-
22
18
  def get_task_tag(self, pk):
23
19
  path = f'task_tags/{pk}/'
24
20
  return self._get(path)
@@ -17,6 +17,15 @@ const sidebars: SidebarsConfig = {
17
17
  'introduction',
18
18
  'installation',
19
19
  'quickstart',
20
+ {
21
+ type: 'category',
22
+ label: 'Features',
23
+ items: [
24
+ 'features/features',
25
+ 'features/plugins/plugins',
26
+ 'features/converters/converters',
27
+ ],
28
+ },
20
29
  {
21
30
  type: 'category',
22
31
  label: 'API Reference',
@@ -28,7 +37,6 @@ const sidebars: SidebarsConfig = {
28
37
  'configuration',
29
38
  'troubleshooting',
30
39
  'faq',
31
- 'changelog',
32
40
  'contributing',
33
41
  ],
34
42
  };
@@ -13,6 +13,7 @@ from synapse_sdk.plugins.categories.base import Action
13
13
  from synapse_sdk.plugins.categories.decorators import register_action
14
14
  from synapse_sdk.plugins.enums import PluginCategory, RunMethod
15
15
  from synapse_sdk.plugins.models import Run
16
+ from synapse_sdk.shared.enums import Context
16
17
  from synapse_sdk.utils.pydantic.validators import non_blank
17
18
 
18
19
 
@@ -26,6 +27,14 @@ class AnnotateTaskDataStatus(str, Enum):
26
27
  FAILED = 'failed'
27
28
 
28
29
 
30
+ class CriticalError(Exception):
31
+ """Critical error."""
32
+
33
+ def __init__(self, message: str = 'Critical error occured while processing task'):
34
+ self.message = message
35
+ super().__init__(self.message)
36
+
37
+
29
38
  class ToTaskRun(Run):
30
39
  class AnnotateTaskDataLog(BaseModel):
31
40
  """Log model for annotate task data."""
@@ -278,6 +287,12 @@ class ToTaskAction(Action):
278
287
  self._update_metrics(total_tasks, success_count, failed_count)
279
288
  self.run.set_progress(current_progress, total_tasks, category='annotate_task_data')
280
289
 
290
+ except CriticalError:
291
+ self.run.log_message(
292
+ 'Critical error occured while processing task. Stopping the job.', context=Context.DANGER.value
293
+ )
294
+ return
295
+
281
296
  except Exception as e:
282
297
  self.run.log_message(f'Failed to process task {task_id}: {str(e)}')
283
298
  self.run.log_annotate_task_data({'task_id': task_id, 'error': str(e)}, AnnotateTaskDataStatus.FAILED)
@@ -427,48 +442,43 @@ class ToTaskAction(Action):
427
442
  assert isinstance(self.run, ToTaskRun)
428
443
 
429
444
  try:
430
- # Get pre-processor information
445
+ # Validate pre-processor ID
431
446
  pre_processor_id = self.params.get('pre_processor')
432
-
433
447
  if not pre_processor_id:
434
448
  error_msg = 'Pre-processor ID is required for inference annotation method'
435
449
  self.run.log_message(f'{error_msg} for task {task_id}')
436
450
  self.run.log_annotate_task_data({'task_id': task_id, 'error': error_msg}, AnnotateTaskDataStatus.FAILED)
437
451
  return {'success': False, 'error': error_msg}
438
452
 
439
- # Call inference pre processor if specified.
440
- pre_processor = client.get_plugin_release(pre_processor_id)
441
- pre_processor_code = pre_processor['config']['code']
442
- pre_processor_version = pre_processor['version']
443
-
444
- # Extract task data for inference
445
- data_unit = task.get('data_unit', {})
446
- files = data_unit.get('files', {})
447
-
448
- # Find primary image URL from files
449
- primary_file_url = ''
450
- for file_info in files.values():
451
- if file_info.get('is_primary') and file_info.get('url'):
452
- primary_file_url = file_info['url']
453
- break
453
+ # Get pre-processor information
454
+ pre_processor_info = self._get_pre_processor_info(client, pre_processor_id)
455
+ if not pre_processor_info['success']:
456
+ return pre_processor_info
457
+
458
+ pre_processor_code = pre_processor_info['code']
459
+ pre_processor_version = pre_processor_info['version']
460
+
461
+ # Ensure pre-processor is running
462
+ pre_processor_status = self._ensure_pre_processor_running(client, pre_processor_code)
463
+ if not pre_processor_status['success']:
464
+ return pre_processor_status
465
+
466
+ # Extract primary file URL from task data
467
+ primary_file_url = self._extract_primary_file_url(task)
468
+ if not primary_file_url:
469
+ error_msg = 'Primary image URL not found in task data'
470
+ self.run.log_message(f'{error_msg} for task {task_id}')
471
+ self.run.log_annotate_task_data({'task_id': task_id, 'error': error_msg}, AnnotateTaskDataStatus.FAILED)
472
+ return {'success': False, 'error': error_msg}
454
473
 
455
- pre_processor_params = self.params.get('pre_processor_params', {})
456
- pre_processor_params['image_path'] = primary_file_url
457
- inference_payload = {
458
- 'agent': 1,
459
- 'action': 'inference',
460
- 'version': pre_processor_version,
461
- 'params': {
462
- 'model': self.params.get('model'),
463
- 'method': 'post',
464
- 'json': pre_processor_params,
465
- },
466
- }
467
- inference_data = client.run_plugin(pre_processor_code, inference_payload)
474
+ # Run inference
475
+ inference_result = self._run_inference(client, pre_processor_code, pre_processor_version, primary_file_url)
476
+ if not inference_result['success']:
477
+ return inference_result
468
478
 
469
- # Convert data to task object
479
+ # Convert and submit inference data
470
480
  annotation_to_task = self.entrypoint(self.run)
471
- converted_result = annotation_to_task.convert_data_from_inference(inference_data)
481
+ converted_result = annotation_to_task.convert_data_from_inference(inference_result['data'])
472
482
 
473
483
  # Submit inference annotation data
474
484
  client.annotate_task_data(task_id, data={'action': 'submit', 'data': converted_result})
@@ -485,6 +495,172 @@ class ToTaskAction(Action):
485
495
  self.run.log_annotate_task_data({'task_id': task_id, 'error': error_msg}, AnnotateTaskDataStatus.FAILED)
486
496
  return {'success': False, 'error': error_msg}
487
497
 
498
+ def _get_pre_processor_info(self, client: BackendClient, pre_processor_id: int) -> Dict[str, Any]:
499
+ """Get pre-processor information from the backend.
500
+
501
+ Args:
502
+ client (BackendClient): The backend client instance.
503
+ pre_processor_id (int): The pre-processor ID.
504
+
505
+ Returns:
506
+ Dict[str, Any]: Result dictionary with pre-processor info or error.
507
+ """
508
+ try:
509
+ pre_processor_response = client.get_plugin_release(pre_processor_id)
510
+ if isinstance(pre_processor_response, str):
511
+ return {'success': False, 'error': 'Invalid pre-processor response received'}
512
+
513
+ pre_processor: Dict[str, Any] = pre_processor_response
514
+ config = pre_processor.get('config', {})
515
+ code = config.get('code')
516
+ version = pre_processor.get('version')
517
+
518
+ if not code or not version:
519
+ return {'success': False, 'error': 'Invalid pre-processor configuration'}
520
+
521
+ return {'success': True, 'code': code, 'version': version}
522
+ except Exception as e:
523
+ return {'success': False, 'error': f'Failed to get pre-processor info: {str(e)}'}
524
+
525
+ def _ensure_pre_processor_running(self, client: BackendClient, pre_processor_code: str) -> Dict[str, Any]:
526
+ """Ensure the pre-processor is running, restart if necessary.
527
+
528
+ Args:
529
+ client (BackendClient): The backend client instance.
530
+ pre_processor_code (str): The pre-processor code.
531
+
532
+ Returns:
533
+ Dict[str, Any]: Result dictionary indicating success or failure.
534
+ """
535
+ try:
536
+ # Check if pre-processor is running
537
+ serve_applications_response = client.list_serve_applications(params={'plugin_code': pre_processor_code})
538
+ if isinstance(serve_applications_response, str):
539
+ return {'success': False, 'error': 'Invalid serve applications response'}
540
+
541
+ # Handle the response properly - it should be a dict with 'results' key
542
+ if not isinstance(serve_applications_response, dict):
543
+ return {'success': False, 'error': 'Unexpected serve applications response format'}
544
+
545
+ serve_applications: Dict[str, Any] = serve_applications_response
546
+ results = serve_applications.get('results', [])
547
+ running_serve_apps = [app for app in results if isinstance(app, dict) and app.get('status') == 'RUNNING']
548
+
549
+ # If not running, restart the pre-processor
550
+ if not running_serve_apps:
551
+ restart_result = self._restart_pre_processor(client, pre_processor_code)
552
+ if not restart_result['success']:
553
+ return restart_result
554
+
555
+ # Verify restart was successful
556
+ serve_applications_response = client.list_serve_applications(params={'plugin_code': pre_processor_code})
557
+ if isinstance(serve_applications_response, str):
558
+ return {'success': False, 'error': 'Failed to verify pre-processor restart'}
559
+
560
+ if not isinstance(serve_applications_response, dict):
561
+ return {'success': False, 'error': 'Unexpected serve applications response format after restart'}
562
+
563
+ serve_applications: Dict[str, Any] = serve_applications_response
564
+ results = serve_applications.get('results', [])
565
+ running_serve_apps = [
566
+ app for app in results if isinstance(app, dict) and app.get('status') == 'RUNNING'
567
+ ]
568
+
569
+ if not running_serve_apps:
570
+ return {'success': False, 'error': 'Failed to restart pre-processor'}
571
+
572
+ return {'success': True}
573
+ except Exception as e:
574
+ return {'success': False, 'error': f'Failed to ensure pre-processor running: {str(e)}'}
575
+
576
+ def _restart_pre_processor(self, client: BackendClient, pre_processor_code: str) -> Dict[str, Any]:
577
+ """Restart the pre-processor.
578
+
579
+ Args:
580
+ client (BackendClient): The backend client instance.
581
+ pre_processor_code (str): The pre-processor code.
582
+
583
+ Returns:
584
+ Dict[str, Any]: Result dictionary indicating success or failure.
585
+ """
586
+ try:
587
+ if not self.config:
588
+ return {'success': False, 'error': 'Configuration not available'}
589
+
590
+ inference_options = self.config.get('inference_options', {})
591
+ serve_application_deployment_payload = {
592
+ 'agent': self.params.get('agent') if self.params else None,
593
+ 'action': 'deployment',
594
+ 'params': {
595
+ 'num_cpus': inference_options.get('required_cpu_count', 2),
596
+ 'num_gpus': inference_options.get('required_gpu_count', 1),
597
+ },
598
+ 'debug': True,
599
+ }
600
+
601
+ deployment_result = client.run_plugin(pre_processor_code, serve_application_deployment_payload)
602
+ if not deployment_result:
603
+ return {'success': False, 'error': 'Failed to restart pre-processor'}
604
+
605
+ return {'success': True}
606
+ except Exception as e:
607
+ return {'success': False, 'error': f'Failed to restart pre-processor: {str(e)}'}
608
+
609
+ def _extract_primary_file_url(self, task: Dict[str, Any]) -> Optional[str]:
610
+ """Extract the primary file URL from task data.
611
+
612
+ Args:
613
+ task (Dict[str, Any]): The task data.
614
+
615
+ Returns:
616
+ Optional[str]: The primary file URL if found, None otherwise.
617
+ """
618
+ data_unit = task.get('data_unit', {})
619
+ files = data_unit.get('files', {})
620
+
621
+ for file_info in files.values():
622
+ if isinstance(file_info, dict) and file_info.get('is_primary') and file_info.get('url'):
623
+ return file_info['url']
624
+
625
+ return None
626
+
627
+ def _run_inference(
628
+ self, client: BackendClient, pre_processor_code: str, pre_processor_version: str, primary_file_url: str
629
+ ) -> Dict[str, Any]:
630
+ """Run inference using the pre-processor.
631
+
632
+ Args:
633
+ client (BackendClient): The backend client instance.
634
+ pre_processor_code (str): The pre-processor code.
635
+ pre_processor_version (str): The pre-processor version.
636
+ primary_file_url (str): The primary image URL.
637
+
638
+ Returns:
639
+ Dict[str, Any]: Result dictionary with inference data or error.
640
+ """
641
+ try:
642
+ if not self.params:
643
+ return {'success': False, 'error': 'Parameters not available'}
644
+
645
+ pre_processor_params = self.params.get('pre_processor_params', {})
646
+ pre_processor_params['image_path'] = primary_file_url
647
+
648
+ inference_payload = {
649
+ 'agent': 1,
650
+ 'action': 'inference',
651
+ 'version': pre_processor_version,
652
+ 'params': {
653
+ 'model': self.params['model'],
654
+ 'method': 'post',
655
+ 'json': pre_processor_params,
656
+ },
657
+ }
658
+
659
+ inference_data = client.run_plugin(pre_processor_code, inference_payload)
660
+ return {'success': True, 'data': inference_data}
661
+ except Exception as e:
662
+ return {'success': False, 'error': f'Failed to run inference: {str(e)}'}
663
+
488
664
  def _update_metrics(self, total_tasks: int, success_count: int, failed_count: int):
489
665
  """Update metrics for task annotation progress.
490
666
 
@@ -3,6 +3,9 @@ actions:
3
3
  entrypoint: plugin.pre_annotation.pre_annotate
4
4
  to_task:
5
5
  entrypoint: plugin.to_task.AnnotationToTask
6
+ inference_options:
7
+ required_cpu_count: 1
8
+ required_gpu_count: 0.1
6
9
  ui_schema:
7
10
  - $formkit: "radio"
8
11
  name: "schema_to_convert"
@@ -0,0 +1,43 @@
1
+ # New utilities
2
+ from .actions import (
3
+ get_action,
4
+ get_action_class,
5
+ get_available_actions,
6
+ is_action_available,
7
+ )
8
+ from .config import (
9
+ get_action_config,
10
+ get_plugin_actions,
11
+ get_plugin_metadata,
12
+ read_plugin_config,
13
+ validate_plugin_config,
14
+ )
15
+
16
+ # Import legacy functions for backward compatibility
17
+ from .legacy import read_requirements, run_plugin
18
+ from .registry import (
19
+ get_category_display_name,
20
+ get_plugin_categories,
21
+ is_valid_category,
22
+ )
23
+
24
+ __all__ = [
25
+ # Config utilities
26
+ 'get_plugin_actions',
27
+ 'get_action_config',
28
+ 'read_plugin_config',
29
+ 'validate_plugin_config',
30
+ 'get_plugin_metadata',
31
+ # Action utilities
32
+ 'get_action',
33
+ 'get_action_class',
34
+ 'get_available_actions',
35
+ 'is_action_available',
36
+ # Registry utilities
37
+ 'get_plugin_categories',
38
+ 'is_valid_category',
39
+ 'get_category_display_name',
40
+ # Legacy utilities for backward compatibility
41
+ 'read_requirements',
42
+ 'run_plugin',
43
+ ]
@@ -0,0 +1,119 @@
1
+ """Plugin action utilities."""
2
+
3
+ import json
4
+ from typing import Any, Dict, Union
5
+
6
+ from synapse_sdk.plugins.categories.registry import _REGISTERED_ACTIONS, register_actions
7
+ from synapse_sdk.utils.file import get_dict_from_file
8
+
9
+ from .config import read_plugin_config
10
+
11
+
12
+ def get_action(action: str, params_data: Union[str, Dict[str, Any]], *args, **kwargs):
13
+ """Get a plugin action instance with validated parameters.
14
+
15
+ Args:
16
+ action: Name of the action to get.
17
+ params_data: Parameters as string (JSON/file path) or dictionary.
18
+ *args: Additional positional arguments.
19
+ **kwargs: Additional keyword arguments including 'config'.
20
+
21
+ Returns:
22
+ Configured action instance ready for execution.
23
+
24
+ Raises:
25
+ ActionError: If parameters are invalid or action not found.
26
+ """
27
+ if isinstance(params_data, str):
28
+ try:
29
+ params = json.loads(params_data)
30
+ except json.JSONDecodeError:
31
+ params = get_dict_from_file(params_data)
32
+ else:
33
+ params = params_data
34
+
35
+ config_data = kwargs.pop('config', False)
36
+ if config_data:
37
+ if isinstance(config_data, str):
38
+ config = read_plugin_config(plugin_path=config_data)
39
+ else:
40
+ config = config_data
41
+ else:
42
+ config = read_plugin_config()
43
+
44
+ category = config['category']
45
+ return get_action_class(category, action)(params, config, *args, **kwargs)
46
+
47
+
48
+ def get_action_class(category: str, action: str):
49
+ """Get action class by category and action name.
50
+
51
+ Args:
52
+ category: Plugin category (e.g., 'neural_net', 'export').
53
+ action: Action name (e.g., 'train', 'inference').
54
+
55
+ Returns:
56
+ Action class ready for instantiation.
57
+
58
+ Raises:
59
+ KeyError: If category or action not found in registry.
60
+ """
61
+ register_actions()
62
+ try:
63
+ return _REGISTERED_ACTIONS[category][action]
64
+ except KeyError as e:
65
+ if category not in _REGISTERED_ACTIONS:
66
+ available_categories = list(_REGISTERED_ACTIONS.keys())
67
+ raise KeyError(f"Category '{category}' not found. Available categories: {available_categories}") from e
68
+ else:
69
+ available_actions = list(_REGISTERED_ACTIONS[category].keys())
70
+ raise KeyError(
71
+ f"Action '{action}' not found in category '{category}'. Available actions: {available_actions}"
72
+ ) from e
73
+
74
+
75
+ def get_available_actions(category: str) -> list:
76
+ """Get list of available actions for a plugin category.
77
+
78
+ Args:
79
+ category: Plugin category to get actions for.
80
+
81
+ Returns:
82
+ List of available action names.
83
+
84
+ Raises:
85
+ KeyError: If category not found in registry.
86
+
87
+ Examples:
88
+ >>> get_available_actions('neural_net')
89
+ ['train', 'inference', 'test', 'deployment', 'gradio', 'tune']
90
+ """
91
+ register_actions()
92
+ if category not in _REGISTERED_ACTIONS:
93
+ available_categories = list(_REGISTERED_ACTIONS.keys())
94
+ raise KeyError(f"Category '{category}' not found. Available categories: {available_categories}")
95
+
96
+ return list(_REGISTERED_ACTIONS[category].keys())
97
+
98
+
99
+ def is_action_available(category: str, action: str) -> bool:
100
+ """Check if an action is available in a given category.
101
+
102
+ Args:
103
+ category: Plugin category to check.
104
+ action: Action name to check.
105
+
106
+ Returns:
107
+ True if action is available, False otherwise.
108
+
109
+ Examples:
110
+ >>> is_action_available('neural_net', 'train')
111
+ True
112
+ >>> is_action_available('neural_net', 'nonexistent')
113
+ False
114
+ """
115
+ try:
116
+ available_actions = get_available_actions(category)
117
+ return action in available_actions
118
+ except KeyError:
119
+ return False