synapse-sdk 2025.10.1__py3-none-any.whl → 2025.10.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of synapse-sdk might be problematic. Click here for more details.

Files changed (54) hide show
  1. synapse_sdk/devtools/docs/docs/plugins/categories/pre-annotation-plugins/pre-annotation-plugin-overview.md +198 -0
  2. synapse_sdk/devtools/docs/docs/plugins/categories/pre-annotation-plugins/to-task-action-development.md +1645 -0
  3. synapse_sdk/devtools/docs/docs/plugins/categories/pre-annotation-plugins/to-task-overview.md +717 -0
  4. synapse_sdk/devtools/docs/docs/plugins/categories/pre-annotation-plugins/to-task-template-development.md +1380 -0
  5. synapse_sdk/devtools/docs/docs/plugins/categories/upload-plugins/upload-plugin-action.md +934 -0
  6. synapse_sdk/devtools/docs/docs/plugins/categories/upload-plugins/upload-plugin-overview.md +560 -0
  7. synapse_sdk/devtools/docs/docs/plugins/categories/upload-plugins/upload-plugin-template.md +715 -0
  8. synapse_sdk/devtools/docs/docs/plugins/plugins.md +12 -5
  9. synapse_sdk/devtools/docs/i18n/ko/docusaurus-plugin-content-docs/current/plugins/categories/pre-annotation-plugins/pre-annotation-plugin-overview.md +198 -0
  10. synapse_sdk/devtools/docs/i18n/ko/docusaurus-plugin-content-docs/current/plugins/categories/pre-annotation-plugins/to-task-action-development.md +1645 -0
  11. synapse_sdk/devtools/docs/i18n/ko/docusaurus-plugin-content-docs/current/plugins/categories/pre-annotation-plugins/to-task-overview.md +717 -0
  12. synapse_sdk/devtools/docs/i18n/ko/docusaurus-plugin-content-docs/current/plugins/categories/pre-annotation-plugins/to-task-template-development.md +1380 -0
  13. synapse_sdk/devtools/docs/i18n/ko/docusaurus-plugin-content-docs/current/plugins/categories/upload-plugins/upload-plugin-action.md +934 -0
  14. synapse_sdk/devtools/docs/i18n/ko/docusaurus-plugin-content-docs/current/plugins/categories/upload-plugins/upload-plugin-overview.md +560 -0
  15. synapse_sdk/devtools/docs/i18n/ko/docusaurus-plugin-content-docs/current/plugins/categories/upload-plugins/upload-plugin-template.md +715 -0
  16. synapse_sdk/devtools/docs/i18n/ko/docusaurus-plugin-content-docs/current.json +16 -4
  17. synapse_sdk/devtools/docs/sidebars.ts +27 -1
  18. synapse_sdk/plugins/README.md +487 -80
  19. synapse_sdk/plugins/categories/export/actions/export/action.py +8 -3
  20. synapse_sdk/plugins/categories/export/actions/export/utils.py +108 -8
  21. synapse_sdk/plugins/categories/pre_annotation/actions/__init__.py +4 -0
  22. synapse_sdk/plugins/categories/pre_annotation/actions/pre_annotation/__init__.py +3 -0
  23. synapse_sdk/plugins/categories/pre_annotation/actions/pre_annotation/action.py +10 -0
  24. synapse_sdk/plugins/categories/pre_annotation/actions/to_task/__init__.py +28 -0
  25. synapse_sdk/plugins/categories/pre_annotation/actions/to_task/action.py +145 -0
  26. synapse_sdk/plugins/categories/pre_annotation/actions/to_task/enums.py +269 -0
  27. synapse_sdk/plugins/categories/pre_annotation/actions/to_task/exceptions.py +14 -0
  28. synapse_sdk/plugins/categories/pre_annotation/actions/to_task/factory.py +76 -0
  29. synapse_sdk/plugins/categories/pre_annotation/actions/to_task/models.py +97 -0
  30. synapse_sdk/plugins/categories/pre_annotation/actions/to_task/orchestrator.py +250 -0
  31. synapse_sdk/plugins/categories/pre_annotation/actions/to_task/run.py +64 -0
  32. synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/__init__.py +17 -0
  33. synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/annotation.py +284 -0
  34. synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/base.py +170 -0
  35. synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/extraction.py +83 -0
  36. synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/metrics.py +87 -0
  37. synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/preprocessor.py +127 -0
  38. synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/validation.py +143 -0
  39. synapse_sdk/plugins/categories/upload/actions/upload/__init__.py +2 -1
  40. synapse_sdk/plugins/categories/upload/actions/upload/models.py +134 -94
  41. synapse_sdk/plugins/categories/upload/actions/upload/steps/cleanup.py +2 -2
  42. synapse_sdk/plugins/categories/upload/actions/upload/steps/metadata.py +106 -14
  43. synapse_sdk/plugins/categories/upload/actions/upload/steps/organize.py +113 -36
  44. synapse_sdk/plugins/categories/upload/templates/README.md +365 -0
  45. {synapse_sdk-2025.10.1.dist-info → synapse_sdk-2025.10.4.dist-info}/METADATA +1 -1
  46. {synapse_sdk-2025.10.1.dist-info → synapse_sdk-2025.10.4.dist-info}/RECORD +50 -22
  47. synapse_sdk/devtools/docs/docs/plugins/developing-upload-template.md +0 -1463
  48. synapse_sdk/devtools/docs/docs/plugins/upload-plugins.md +0 -1964
  49. synapse_sdk/devtools/docs/i18n/ko/docusaurus-plugin-content-docs/current/plugins/developing-upload-template.md +0 -1463
  50. synapse_sdk/devtools/docs/i18n/ko/docusaurus-plugin-content-docs/current/plugins/upload-plugins.md +0 -2077
  51. {synapse_sdk-2025.10.1.dist-info → synapse_sdk-2025.10.4.dist-info}/WHEEL +0 -0
  52. {synapse_sdk-2025.10.1.dist-info → synapse_sdk-2025.10.4.dist-info}/entry_points.txt +0 -0
  53. {synapse_sdk-2025.10.1.dist-info → synapse_sdk-2025.10.4.dist-info}/licenses/LICENSE +0 -0
  54. {synapse_sdk-2025.10.1.dist-info → synapse_sdk-2025.10.4.dist-info}/top_level.txt +0 -0
@@ -95,11 +95,11 @@ class ExportAction(Action):
95
95
  PydanticCustomError: If data retrieval fails
96
96
  """
97
97
  try:
98
- result_list = handler.get_results(self.client, filters)
98
+ result_list = handler.get_results(self.client, filters, run=self.run)
99
99
  results = result_list[0]
100
100
  count = result_list[1]
101
101
  except ClientError:
102
- raise PydanticCustomError('client_error', _('Unable to get Ground Truth dataset.'))
102
+ raise PydanticCustomError('client_error', _('Unable to get dataset.'))
103
103
  return results, count
104
104
 
105
105
  def start(self) -> Dict[str, Any]:
@@ -116,7 +116,12 @@ class ExportAction(Action):
116
116
  """
117
117
  self.run.log_message_with_code(LogCode.EXPORT_STARTED)
118
118
 
119
- filters = {'expand': 'data', **self.params['filter']}
119
+ # Get expand setting from config, default to True (expand data)
120
+ filters = {**self.params['filter']}
121
+ data_expand = self.config.get('data_expand', True)
122
+ if data_expand:
123
+ filters['expand'] = 'data'
124
+
120
125
  target = self.params['target']
121
126
  handler = TargetHandlerFactory.get_handler(target)
122
127
 
@@ -1,10 +1,12 @@
1
1
  from abc import ABC, abstractmethod
2
- from typing import Any
2
+ from typing import Any, Optional
3
+ import time
3
4
 
4
5
  from pydantic_core import PydanticCustomError
5
6
 
6
7
  from synapse_sdk.clients.exceptions import ClientError
7
8
  from synapse_sdk.i18n import gettext as _
9
+ from synapse_sdk.shared.enums import Context
8
10
 
9
11
 
10
12
  class ExportTargetHandler(ABC):
@@ -15,6 +17,103 @@ class ExportTargetHandler(ABC):
15
17
  of methods to validate filters, retrieve results, and process collections of results.
16
18
  """
17
19
 
20
+ # TODO: This is a temporary workaround and needs improvement in the future
21
+ def _get_results_chunked(self, list_method, filters, chunk_size=100, max_retries=3, retry_delay=1, run=None):
22
+ """
23
+ Retrieve results in chunks to avoid memory and response size limits.
24
+
25
+ Args:
26
+ list_method: The client method to call (e.g., client.list_assignments)
27
+ filters (dict): The filter criteria to apply
28
+ chunk_size (int): Number of items to fetch per chunk
29
+ max_retries (int): Maximum number of retries for failed requests
30
+ retry_delay (int): Delay in seconds between retries
31
+
32
+ Returns:
33
+ tuple: A tuple containing the results generator and the total count
34
+ """
35
+ filters = filters.copy()
36
+ filters['page_size'] = chunk_size
37
+
38
+ page = 1
39
+ results = []
40
+ total_count = 0
41
+
42
+ try:
43
+ while True:
44
+ filters['page'] = page
45
+
46
+ # Retry logic for handling temporary server issues
47
+ for attempt in range(max_retries + 1):
48
+ try:
49
+ response = list_method(params=filters, list_all=False)
50
+ break
51
+ except ClientError as e:
52
+ error_msg = str(e)
53
+
54
+ # Use log_dev_event for better debugging and monitoring
55
+ if run:
56
+ run.log_dev_event(
57
+ 'Chunked data retrieval error',
58
+ {
59
+ 'page': page,
60
+ 'attempt': attempt + 1,
61
+ 'error_message': error_msg,
62
+ 'chunk_size': chunk_size,
63
+ },
64
+ level=Context.WARNING,
65
+ )
66
+
67
+ # Check for JSON decode errors specifically
68
+ if 'Expecting value' in error_msg or 'JSONDecodeError' in error_msg:
69
+ if run:
70
+ run.log_dev_event(
71
+ 'JSON parsing error - skipping page',
72
+ {'page': page, 'error_type': 'JSON_DECODE_ERROR', 'error_details': error_msg},
73
+ level=Context.DANGER,
74
+ )
75
+ # Skip this page and continue with next
76
+ page += 1
77
+ break
78
+ elif attempt < max_retries and ('503' in error_msg or 'connection' in error_msg.lower()):
79
+ retry_delay_seconds = retry_delay * (2**attempt)
80
+ if run:
81
+ run.log_dev_event(
82
+ 'Server issue - retrying with backoff',
83
+ {
84
+ 'page': page,
85
+ 'retry_attempt': attempt + 1,
86
+ 'max_retries': max_retries,
87
+ 'retry_delay_seconds': retry_delay_seconds,
88
+ 'error_type': 'SERVER_ISSUE',
89
+ },
90
+ level=Context.INFO,
91
+ )
92
+ time.sleep(retry_delay_seconds) # Exponential backoff
93
+ continue
94
+ else:
95
+ raise
96
+
97
+ if page == 1:
98
+ total_count = response['count']
99
+
100
+ current_results = response.get('results', [])
101
+ results.extend(current_results)
102
+
103
+ # Check if we've got all results or if there are no more results
104
+ if len(current_results) < chunk_size or not response.get('next'):
105
+ break
106
+
107
+ page += 1
108
+
109
+ # Small delay between pages to avoid overwhelming the server
110
+ time.sleep(0.1)
111
+
112
+ return results, total_count
113
+ except Exception:
114
+ # Re-raise the exception to be handled by the calling method
115
+ raise
116
+
18
117
  @abstractmethod
19
118
  def validate_filter(self, value: dict, client: Any):
20
119
  """
@@ -33,13 +132,14 @@ class ExportTargetHandler(ABC):
33
132
  pass
34
133
 
35
134
  @abstractmethod
36
- def get_results(self, client: Any, filters: dict):
135
+ def get_results(self, client: Any, filters: dict, run=None):
37
136
  """
38
137
  Retrieve original data from target sources.
39
138
 
40
139
  Args:
41
140
  client (Any): The client used to retrieve the results.
42
141
  filters (dict): The filter criteria to apply.
142
+ run: Optional ExportRun instance for logging.
43
143
 
44
144
  Returns:
45
145
  tuple: A tuple containing the results and the total count of results.
@@ -76,8 +176,8 @@ class AssignmentExportTargetHandler(ExportTargetHandler):
76
176
  raise PydanticCustomError('client_error', _('Unable to get Assignment.'))
77
177
  return value
78
178
 
79
- def get_results(self, client: Any, filters: dict):
80
- return client.list_assignments(params=filters, list_all=True)
179
+ def get_results(self, client: Any, filters: dict, run=None):
180
+ return self._get_results_chunked(client.list_assignments, filters, run=run)
81
181
 
82
182
  def get_export_item(self, results):
83
183
  for result in results:
@@ -104,9 +204,9 @@ class GroundTruthExportTargetHandler(ExportTargetHandler):
104
204
  raise PydanticCustomError('client_error', _('Unable to get Ground Truth dataset version.'))
105
205
  return value
106
206
 
107
- def get_results(self, client: Any, filters: dict):
207
+ def get_results(self, client: Any, filters: dict, run=None):
108
208
  filters['ground_truth_dataset_versions'] = filters.pop('ground_truth_dataset_version')
109
- return client.list_ground_truth_events(params=filters, list_all=True)
209
+ return self._get_results_chunked(client.list_ground_truth_events, filters, run=run)
110
210
 
111
211
  def get_export_item(self, results):
112
212
  for result in results:
@@ -134,9 +234,9 @@ class TaskExportTargetHandler(ExportTargetHandler):
134
234
  raise PydanticCustomError('client_error', _('Unable to get Task.'))
135
235
  return value
136
236
 
137
- def get_results(self, client: Any, filters: dict):
237
+ def get_results(self, client: Any, filters: dict, run=None):
138
238
  filters['expand'] = ['data_unit', 'assignment', 'workshop']
139
- return client.list_tasks(params=filters, list_all=True)
239
+ return self._get_results_chunked(client.list_tasks, filters, run=run)
140
240
 
141
241
  def get_export_item(self, results):
142
242
  for result in results:
@@ -0,0 +1,4 @@
1
+ from .pre_annotation.action import PreAnnotationAction
2
+ from .to_task.action import ToTaskAction
3
+
4
+ __all__ = ['PreAnnotationAction', 'ToTaskAction']
@@ -0,0 +1,3 @@
1
+ from .action import PreAnnotationAction
2
+
3
+ __all__ = ['PreAnnotationAction']
@@ -0,0 +1,10 @@
1
+ from synapse_sdk.plugins.categories.base import Action
2
+ from synapse_sdk.plugins.categories.decorators import register_action
3
+ from synapse_sdk.plugins.enums import PluginCategory, RunMethod
4
+
5
+
6
+ @register_action
7
+ class PreAnnotationAction(Action):
8
+ name = 'pre_annotation'
9
+ category = PluginCategory.PRE_ANNOTATION
10
+ method = RunMethod.TASK
@@ -0,0 +1,28 @@
1
+ from .action import ToTaskAction
2
+ from .enums import AnnotateTaskDataStatus, AnnotationMethod, LogCode
3
+ from .exceptions import CriticalError, PreAnnotationToTaskFailed
4
+
5
+ # Advanced imports for extending the system
6
+ from .factory import ToTaskStrategyFactory
7
+ from .models import MetricsRecord, ToTaskParams, ToTaskResult
8
+ from .orchestrator import ToTaskOrchestrator
9
+ from .run import ToTaskRun
10
+ from .strategies.base import ToTaskContext
11
+
12
+ __all__ = [
13
+ # Core public API (maintains backward compatibility)
14
+ 'ToTaskAction',
15
+ 'ToTaskRun',
16
+ 'ToTaskParams',
17
+ 'ToTaskResult',
18
+ 'AnnotationMethod',
19
+ 'AnnotateTaskDataStatus',
20
+ 'LogCode',
21
+ 'CriticalError',
22
+ 'PreAnnotationToTaskFailed',
23
+ 'MetricsRecord',
24
+ # Advanced components for customization and testing
25
+ 'ToTaskOrchestrator',
26
+ 'ToTaskContext',
27
+ 'ToTaskStrategyFactory',
28
+ ]
@@ -0,0 +1,145 @@
1
+ """Refactored ToTask action using Strategy and Facade patterns."""
2
+
3
+ from typing import Dict
4
+
5
+ from synapse_sdk.clients.backend import BackendClient
6
+ from synapse_sdk.clients.backend.models import JobStatus
7
+ from synapse_sdk.plugins.categories.base import Action
8
+ from synapse_sdk.plugins.categories.decorators import register_action
9
+ from synapse_sdk.plugins.enums import PluginCategory, RunMethod
10
+
11
+ from .enums import LogCode
12
+ from .exceptions import PreAnnotationToTaskFailed
13
+ from .models import ToTaskParams, ToTaskResult
14
+ from .orchestrator import ToTaskOrchestrator
15
+ from .run import ToTaskRun
16
+ from .strategies.base import ToTaskContext
17
+
18
+
19
+ @register_action
20
+ class ToTaskAction(Action):
21
+ """ToTask action for pre-annotation data processing using Strategy and Facade patterns.
22
+
23
+ This action handles the process of annotating data to tasks in a project. It supports
24
+ two annotation methods: file-based annotation and inference-based annotation.
25
+
26
+ The action uses a Strategy pattern to handle different annotation methods and validation
27
+ approaches, coordinated by an Orchestrator (Facade pattern) that manages the complete
28
+ workflow with rollback capabilities.
29
+
30
+ File-based annotation fetches data from file URLs specified in task data units,
31
+ downloads and processes JSON data, and updates task data with the processed information.
32
+ It also validates target specification names against file specifications.
33
+
34
+ Inference-based annotation uses pre-processor plugins for model inference
35
+ for automatic data annotation.
36
+
37
+ Attrs:
38
+ name (str): Action name, set to 'to_task'.
39
+ category (PluginCategory): Plugin category, set to PRE_ANNOTATION.
40
+ method (RunMethod): Execution method, set to JOB.
41
+ run_class (Type[ToTaskRun]): Run class for this action.
42
+ params_model (Type[ToTaskParams]): Parameter validation model.
43
+ progress_categories (Dict): Progress tracking configuration.
44
+ metrics_categories (Set[str]): Metrics categories for this action.
45
+
46
+ Note:
47
+ This action requires a valid project with an associated data collection.
48
+ For file-based annotation, the target_specification_name must exist in the
49
+ project's file specifications.
50
+
51
+ Raises:
52
+ ValueError: If run instance or parameters are not properly initialized.
53
+ PreAnnotationToTaskFailed: If the annotation workflow fails.
54
+ """
55
+
56
+ name = 'to_task'
57
+ category = PluginCategory.PRE_ANNOTATION
58
+ method = RunMethod.JOB
59
+ run_class = ToTaskRun
60
+ params_model = ToTaskParams
61
+ progress_categories = {
62
+ 'annotate_task_data': {
63
+ 'proportion': 100,
64
+ },
65
+ }
66
+ metrics_categories = {
67
+ 'annotate_task_data': {
68
+ 'stand_by': 0,
69
+ 'failed': 0,
70
+ 'success': 0,
71
+ }
72
+ }
73
+
74
+ def __init__(self, *args, **kwargs):
75
+ """Initialize the action with orchestrator context."""
76
+ super().__init__(*args, **kwargs)
77
+ self.context = None
78
+
79
+ def start(self) -> Dict:
80
+ """Start to_task action using orchestrator facade.
81
+
82
+ The action now uses a simplified workflow:
83
+ 1. Validate initialization
84
+ 2. Create execution context
85
+ 3. Execute workflow through orchestrator
86
+ 4. Handle results and errors
87
+
88
+ Returns:
89
+ dict: Validated result with status and message.
90
+ """
91
+ # Validate initialization
92
+ if not self.run or not self.params:
93
+ result = ToTaskResult(
94
+ status=JobStatus.FAILED, message='Run instance or parameters not properly initialized'
95
+ )
96
+ raise PreAnnotationToTaskFailed(result.message)
97
+
98
+ # Type assertions for better IDE support
99
+ assert isinstance(self.run, ToTaskRun)
100
+ assert isinstance(self.run.client, BackendClient)
101
+
102
+ # Log action start
103
+ self.run.log_message_with_code(LogCode.TO_TASK_STARTED)
104
+
105
+ try:
106
+ # Create execution context
107
+ self.context = ToTaskContext(
108
+ params=self.params,
109
+ client=self.run.client,
110
+ logger=self.run,
111
+ entrypoint=self.entrypoint,
112
+ config=self.config,
113
+ plugin_config=self.plugin_config,
114
+ job_id=self.job_id,
115
+ progress_categories=self.progress_categories,
116
+ metrics_categories=self.metrics_categories,
117
+ )
118
+
119
+ # Create and execute orchestrator
120
+ orchestrator = ToTaskOrchestrator(self.context)
121
+ result = orchestrator.execute_workflow()
122
+
123
+ # Log successful completion
124
+ self.run.log_message_with_code(LogCode.TO_TASK_COMPLETED)
125
+ return result
126
+
127
+ except PreAnnotationToTaskFailed as e:
128
+ # Re-raise pre-annotation specific errors
129
+ self.run.log_message_with_code(LogCode.TO_TASK_FAILED, str(e))
130
+ raise e
131
+
132
+ except Exception as e:
133
+ # Handle unexpected errors
134
+ error_msg = f'ToTask action failed: {str(e)}'
135
+ self.run.log_message_with_code(LogCode.TO_TASK_FAILED, error_msg)
136
+ result = ToTaskResult(status=JobStatus.FAILED, message=error_msg)
137
+ raise PreAnnotationToTaskFailed(result.message)
138
+
139
+ def get_context(self) -> ToTaskContext:
140
+ """Get the current execution context for testing/debugging.
141
+
142
+ Returns:
143
+ ToTaskContext: The current execution context, or None if not initialized.
144
+ """
145
+ return self.context
@@ -0,0 +1,269 @@
1
+ from enum import Enum
2
+
3
+ from synapse_sdk.shared.enums import Context
4
+
5
+
6
+ class AnnotationMethod(str, Enum):
7
+ FILE = 'file'
8
+ INFERENCE = 'inference'
9
+
10
+
11
+ class AnnotateTaskDataStatus(str, Enum):
12
+ SUCCESS = 'success'
13
+ FAILED = 'failed'
14
+
15
+
16
+ class LogCode(str, Enum):
17
+ """Type-safe logging codes for to_task operations.
18
+
19
+ Enumeration of all possible log events during to_task processing. Each code
20
+ corresponds to a specific event or error state with predefined message
21
+ templates and log levels.
22
+
23
+ The codes are organized by category:
24
+ - Validation codes (INVALID_PROJECT_RESPONSE, NO_DATA_COLLECTION, etc.)
25
+ - Processing codes (ANNOTATING_DATA, ANNOTATION_COMPLETED, etc.)
26
+ - Error codes (CRITICAL_ERROR, TASK_PROCESSING_FAILED, etc.)
27
+ - Inference codes (ANNOTATING_INFERENCE_DATA, INFERENCE_PROCESSING_FAILED, etc.)
28
+ """
29
+
30
+ INVALID_PROJECT_RESPONSE = 'INVALID_PROJECT_RESPONSE'
31
+ NO_DATA_COLLECTION = 'NO_DATA_COLLECTION'
32
+ INVALID_DATA_COLLECTION_RESPONSE = 'INVALID_DATA_COLLECTION_RESPONSE'
33
+ NO_TASKS_FOUND = 'NO_TASKS_FOUND'
34
+ TARGET_SPEC_REQUIRED = 'TARGET_SPEC_REQUIRED'
35
+ TARGET_SPEC_NOT_FOUND = 'TARGET_SPEC_NOT_FOUND'
36
+ UNSUPPORTED_METHOD = 'UNSUPPORTED_METHOD'
37
+ ANNOTATING_DATA = 'ANNOTATING_DATA'
38
+ CRITICAL_ERROR = 'CRITICAL_ERROR'
39
+ TASK_PROCESSING_FAILED = 'TASK_PROCESSING_FAILED'
40
+ ANNOTATION_COMPLETED = 'ANNOTATION_COMPLETED'
41
+ INVALID_TASK_RESPONSE = 'INVALID_TASK_RESPONSE'
42
+ TARGET_SPEC_REQUIRED_FOR_TASK = 'TARGET_SPEC_REQUIRED_FOR_TASK'
43
+ UNSUPPORTED_METHOD_FOR_TASK = 'UNSUPPORTED_METHOD_FOR_TASK'
44
+ PRIMARY_IMAGE_URL_NOT_FOUND = 'PRIMARY_IMAGE_URL_NOT_FOUND'
45
+ FILE_SPEC_NOT_FOUND = 'FILE_SPEC_NOT_FOUND'
46
+ FILE_ORIGINAL_NAME_NOT_FOUND = 'FILE_ORIGINAL_NAME_NOT_FOUND'
47
+ URL_NOT_FOUND = 'URL_NOT_FOUND'
48
+ FETCH_DATA_FAILED = 'FETCH_DATA_FAILED'
49
+ CONVERT_DATA_FAILED = 'CONVERT_DATA_FAILED'
50
+ PREPROCESSOR_ID_REQUIRED = 'PREPROCESSOR_ID_REQUIRED'
51
+ INFERENCE_PROCESSING_FAILED = 'INFERENCE_PROCESSING_FAILED'
52
+ ANNOTATING_INFERENCE_DATA = 'ANNOTATING_INFERENCE_DATA'
53
+ INFERENCE_ANNOTATION_COMPLETED = 'INFERENCE_ANNOTATION_COMPLETED'
54
+ INFERENCE_PREPROCESSOR_FAILED = 'INFERENCE_PREPROCESSOR_FAILED'
55
+
56
+ # Orchestrator workflow codes
57
+ TO_TASK_STARTED = 'TO_TASK_STARTED'
58
+ TO_TASK_COMPLETED = 'TO_TASK_COMPLETED'
59
+ TO_TASK_FAILED = 'TO_TASK_FAILED'
60
+ STEP_STARTED = 'STEP_STARTED'
61
+ STEP_COMPLETED = 'STEP_COMPLETED'
62
+ STEP_FAILED = 'STEP_FAILED'
63
+ ROLLBACK_FAILED = 'ROLLBACK_FAILED'
64
+ ROLLBACK_ACTION_FAILED = 'ROLLBACK_ACTION_FAILED'
65
+
66
+ # Additional strategy codes
67
+ VALIDATION_FAILED = 'VALIDATION_FAILED'
68
+ NO_DATA_UNIT = 'NO_DATA_UNIT'
69
+ NO_DATA_UNIT_FILES = 'NO_DATA_UNIT_FILES'
70
+ TARGET_SPEC_URL_NOT_FOUND = 'TARGET_SPEC_URL_NOT_FOUND'
71
+ DATA_DOWNLOAD_FAILED = 'DATA_DOWNLOAD_FAILED'
72
+ JSON_DECODE_FAILED = 'JSON_DECODE_FAILED'
73
+ ANNOTATION_SUBMISSION_FAILED = 'ANNOTATION_SUBMISSION_FAILED'
74
+ NO_PREPROCESSOR_ID = 'NO_PREPROCESSOR_ID'
75
+ DATA_EXTRACTION_FAILED = 'DATA_EXTRACTION_FAILED'
76
+ PROGRESS_UPDATE_FAILED = 'PROGRESS_UPDATE_FAILED'
77
+ METRICS_RECORDING_FAILED = 'METRICS_RECORDING_FAILED'
78
+ METRICS_UPDATE_FAILED = 'METRICS_UPDATE_FAILED'
79
+ METRICS_FINALIZATION_FAILED = 'METRICS_FINALIZATION_FAILED'
80
+
81
+
82
+ LOG_MESSAGES = {
83
+ LogCode.INVALID_PROJECT_RESPONSE: {
84
+ 'message': 'Invalid project response received.',
85
+ 'level': Context.DANGER,
86
+ },
87
+ LogCode.NO_DATA_COLLECTION: {
88
+ 'message': 'Project does not have a data collection.',
89
+ 'level': Context.DANGER,
90
+ },
91
+ LogCode.INVALID_DATA_COLLECTION_RESPONSE: {
92
+ 'message': 'Invalid data collection response received.',
93
+ 'level': Context.DANGER,
94
+ },
95
+ LogCode.NO_TASKS_FOUND: {
96
+ 'message': 'Tasks to annotate not found.',
97
+ 'level': Context.WARNING,
98
+ },
99
+ LogCode.TARGET_SPEC_REQUIRED: {
100
+ 'message': 'Target specification name is required for file annotation method.',
101
+ 'level': Context.DANGER,
102
+ },
103
+ LogCode.TARGET_SPEC_NOT_FOUND: {
104
+ 'message': 'Target specification name "{}" not found in file specifications',
105
+ 'level': Context.DANGER,
106
+ },
107
+ LogCode.UNSUPPORTED_METHOD: {
108
+ 'message': 'Unsupported annotation method: {}',
109
+ 'level': Context.DANGER,
110
+ },
111
+ LogCode.ANNOTATING_DATA: {
112
+ 'message': 'Annotating data to tasks...',
113
+ 'level': None,
114
+ },
115
+ LogCode.CRITICAL_ERROR: {
116
+ 'message': 'Critical error occured while processing task. Stopping the job.',
117
+ 'level': Context.DANGER,
118
+ },
119
+ LogCode.TASK_PROCESSING_FAILED: {
120
+ 'message': 'Failed to process task {}: {}',
121
+ 'level': Context.DANGER,
122
+ },
123
+ LogCode.ANNOTATION_COMPLETED: {
124
+ 'message': 'Annotation completed. Success: {}, Failed: {}',
125
+ 'level': None,
126
+ },
127
+ LogCode.INVALID_TASK_RESPONSE: {
128
+ 'message': 'Invalid task response received for task {}',
129
+ 'level': Context.DANGER,
130
+ },
131
+ LogCode.TARGET_SPEC_REQUIRED_FOR_TASK: {
132
+ 'message': 'Target specification name is required for file annotation method for task {}',
133
+ 'level': Context.DANGER,
134
+ },
135
+ LogCode.UNSUPPORTED_METHOD_FOR_TASK: {
136
+ 'message': 'Unsupported annotation method: {} for task {}',
137
+ 'level': Context.DANGER,
138
+ },
139
+ LogCode.PRIMARY_IMAGE_URL_NOT_FOUND: {
140
+ 'message': 'Primary image URL not found in task data for task {}',
141
+ 'level': Context.DANGER,
142
+ },
143
+ LogCode.FILE_SPEC_NOT_FOUND: {
144
+ 'message': 'File specification not found for task {}',
145
+ 'level': Context.DANGER,
146
+ },
147
+ LogCode.FILE_ORIGINAL_NAME_NOT_FOUND: {
148
+ 'message': 'File original name not found for task {}',
149
+ 'level': Context.DANGER,
150
+ },
151
+ LogCode.URL_NOT_FOUND: {
152
+ 'message': 'URL not found for task {}',
153
+ 'level': Context.DANGER,
154
+ },
155
+ LogCode.FETCH_DATA_FAILED: {
156
+ 'message': 'Failed to fetch data from URL: {} for task {}',
157
+ 'level': Context.DANGER,
158
+ },
159
+ LogCode.CONVERT_DATA_FAILED: {
160
+ 'message': 'Failed to convert data to task object: {} for task {}',
161
+ 'level': Context.DANGER,
162
+ },
163
+ LogCode.PREPROCESSOR_ID_REQUIRED: {
164
+ 'message': 'Pre-processor ID is required for inference annotation method for task {}',
165
+ 'level': Context.DANGER,
166
+ },
167
+ LogCode.INFERENCE_PROCESSING_FAILED: {
168
+ 'message': 'Failed to process inference for task {}: {}',
169
+ 'level': Context.DANGER,
170
+ },
171
+ LogCode.ANNOTATING_INFERENCE_DATA: {
172
+ 'message': 'Annotating data to tasks using inference...',
173
+ 'level': None,
174
+ },
175
+ LogCode.INFERENCE_ANNOTATION_COMPLETED: {
176
+ 'message': 'Inference annotation completed. Success: {}, Failed: {}',
177
+ 'level': None,
178
+ },
179
+ LogCode.INFERENCE_PREPROCESSOR_FAILED: {
180
+ 'message': 'Inference pre processor failed for task {}: {}',
181
+ 'level': Context.DANGER,
182
+ },
183
+ # Orchestrator workflow messages
184
+ LogCode.TO_TASK_STARTED: {
185
+ 'message': 'ToTask action started.',
186
+ 'level': Context.INFO,
187
+ },
188
+ LogCode.TO_TASK_COMPLETED: {
189
+ 'message': 'ToTask action completed successfully.',
190
+ 'level': Context.SUCCESS,
191
+ },
192
+ LogCode.TO_TASK_FAILED: {
193
+ 'message': 'ToTask action failed: {}',
194
+ 'level': Context.DANGER,
195
+ },
196
+ LogCode.STEP_STARTED: {
197
+ 'message': 'Starting workflow step: {}',
198
+ 'level': Context.INFO,
199
+ },
200
+ LogCode.STEP_COMPLETED: {
201
+ 'message': 'Completed workflow step: {}',
202
+ 'level': Context.INFO,
203
+ },
204
+ LogCode.STEP_FAILED: {
205
+ 'message': 'Failed workflow step {}: {}',
206
+ 'level': Context.DANGER,
207
+ },
208
+ LogCode.ROLLBACK_FAILED: {
209
+ 'message': 'Failed to rollback step {}: {}',
210
+ 'level': Context.WARNING,
211
+ },
212
+ LogCode.ROLLBACK_ACTION_FAILED: {
213
+ 'message': 'Failed to execute rollback action: {}',
214
+ 'level': Context.WARNING,
215
+ },
216
+ # Additional strategy messages
217
+ LogCode.VALIDATION_FAILED: {
218
+ 'message': 'Validation failed: {}',
219
+ 'level': Context.DANGER,
220
+ },
221
+ LogCode.NO_DATA_UNIT: {
222
+ 'message': 'Task does not have a data unit',
223
+ 'level': Context.DANGER,
224
+ },
225
+ LogCode.NO_DATA_UNIT_FILES: {
226
+ 'message': 'Data unit does not have files',
227
+ 'level': Context.DANGER,
228
+ },
229
+ LogCode.TARGET_SPEC_URL_NOT_FOUND: {
230
+ 'message': 'Target specification URL not found for {} in task {}',
231
+ 'level': Context.DANGER,
232
+ },
233
+ LogCode.DATA_DOWNLOAD_FAILED: {
234
+ 'message': 'Failed to download data for task {}: {}',
235
+ 'level': Context.DANGER,
236
+ },
237
+ LogCode.JSON_DECODE_FAILED: {
238
+ 'message': 'Failed to decode JSON data for task {}: {}',
239
+ 'level': Context.DANGER,
240
+ },
241
+ LogCode.ANNOTATION_SUBMISSION_FAILED: {
242
+ 'message': 'Failed to submit annotation data for task {}: {}',
243
+ 'level': Context.DANGER,
244
+ },
245
+ LogCode.NO_PREPROCESSOR_ID: {
246
+ 'message': 'Pre-processor ID is required for inference annotation method',
247
+ 'level': Context.DANGER,
248
+ },
249
+ LogCode.DATA_EXTRACTION_FAILED: {
250
+ 'message': 'Data extraction failed: {}',
251
+ 'level': Context.DANGER,
252
+ },
253
+ LogCode.PROGRESS_UPDATE_FAILED: {
254
+ 'message': 'Progress update failed: {}',
255
+ 'level': Context.WARNING,
256
+ },
257
+ LogCode.METRICS_RECORDING_FAILED: {
258
+ 'message': 'Metrics recording failed: {}',
259
+ 'level': Context.WARNING,
260
+ },
261
+ LogCode.METRICS_UPDATE_FAILED: {
262
+ 'message': 'Metrics update failed: {}',
263
+ 'level': Context.WARNING,
264
+ },
265
+ LogCode.METRICS_FINALIZATION_FAILED: {
266
+ 'message': 'Metrics finalization failed: {}',
267
+ 'level': Context.WARNING,
268
+ },
269
+ }
@@ -0,0 +1,14 @@
1
+ class CriticalError(Exception):
2
+ """Critical error."""
3
+
4
+ def __init__(self, message: str = 'Critical error occured while processing task'):
5
+ self.message = message
6
+ super().__init__(self.message)
7
+
8
+
9
+ class PreAnnotationToTaskFailed(Exception):
10
+ """Pre-annotation to task failed."""
11
+
12
+ def __init__(self, message: str = 'Pre-annotation to task failed'):
13
+ self.message = message
14
+ super().__init__(self.message)