synapse-sdk 1.0.0b5__py3-none-any.whl → 2025.12.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- synapse_sdk/__init__.py +24 -0
- synapse_sdk/cli/code_server.py +305 -33
- synapse_sdk/clients/agent/__init__.py +2 -1
- synapse_sdk/clients/agent/container.py +143 -0
- synapse_sdk/clients/agent/ray.py +296 -38
- synapse_sdk/clients/backend/annotation.py +1 -1
- synapse_sdk/clients/backend/core.py +31 -4
- synapse_sdk/clients/backend/data_collection.py +82 -7
- synapse_sdk/clients/backend/hitl.py +1 -1
- synapse_sdk/clients/backend/ml.py +1 -1
- synapse_sdk/clients/base.py +211 -61
- synapse_sdk/loggers.py +46 -0
- synapse_sdk/plugins/README.md +1340 -0
- synapse_sdk/plugins/categories/base.py +59 -9
- synapse_sdk/plugins/categories/export/actions/__init__.py +3 -0
- synapse_sdk/plugins/categories/export/actions/export/__init__.py +28 -0
- synapse_sdk/plugins/categories/export/actions/export/action.py +165 -0
- synapse_sdk/plugins/categories/export/actions/export/enums.py +113 -0
- synapse_sdk/plugins/categories/export/actions/export/exceptions.py +53 -0
- synapse_sdk/plugins/categories/export/actions/export/models.py +74 -0
- synapse_sdk/plugins/categories/export/actions/export/run.py +195 -0
- synapse_sdk/plugins/categories/export/actions/export/utils.py +187 -0
- synapse_sdk/plugins/categories/export/templates/config.yaml +19 -1
- synapse_sdk/plugins/categories/export/templates/plugin/__init__.py +390 -0
- synapse_sdk/plugins/categories/export/templates/plugin/export.py +153 -177
- synapse_sdk/plugins/categories/neural_net/actions/train.py +1130 -32
- synapse_sdk/plugins/categories/neural_net/actions/tune.py +157 -4
- synapse_sdk/plugins/categories/neural_net/templates/config.yaml +7 -4
- synapse_sdk/plugins/categories/pre_annotation/actions/__init__.py +4 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/pre_annotation/__init__.py +3 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/pre_annotation/action.py +10 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/__init__.py +28 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/action.py +148 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/enums.py +269 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/exceptions.py +14 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/factory.py +76 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/models.py +100 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/orchestrator.py +248 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/run.py +64 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/__init__.py +17 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/annotation.py +265 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/base.py +170 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/extraction.py +83 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/metrics.py +92 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/preprocessor.py +243 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/validation.py +143 -0
- synapse_sdk/plugins/categories/upload/actions/upload/__init__.py +19 -0
- synapse_sdk/plugins/categories/upload/actions/upload/action.py +236 -0
- synapse_sdk/plugins/categories/upload/actions/upload/context.py +185 -0
- synapse_sdk/plugins/categories/upload/actions/upload/enums.py +493 -0
- synapse_sdk/plugins/categories/upload/actions/upload/exceptions.py +36 -0
- synapse_sdk/plugins/categories/upload/actions/upload/factory.py +138 -0
- synapse_sdk/plugins/categories/upload/actions/upload/models.py +214 -0
- synapse_sdk/plugins/categories/upload/actions/upload/orchestrator.py +183 -0
- synapse_sdk/plugins/categories/upload/actions/upload/registry.py +113 -0
- synapse_sdk/plugins/categories/upload/actions/upload/run.py +179 -0
- synapse_sdk/plugins/categories/upload/actions/upload/steps/__init__.py +1 -0
- synapse_sdk/plugins/categories/upload/actions/upload/steps/base.py +107 -0
- synapse_sdk/plugins/categories/upload/actions/upload/steps/cleanup.py +62 -0
- synapse_sdk/plugins/categories/upload/actions/upload/steps/collection.py +63 -0
- synapse_sdk/plugins/categories/upload/actions/upload/steps/generate.py +91 -0
- synapse_sdk/plugins/categories/upload/actions/upload/steps/initialize.py +82 -0
- synapse_sdk/plugins/categories/upload/actions/upload/steps/metadata.py +235 -0
- synapse_sdk/plugins/categories/upload/actions/upload/steps/organize.py +201 -0
- synapse_sdk/plugins/categories/upload/actions/upload/steps/upload.py +104 -0
- synapse_sdk/plugins/categories/upload/actions/upload/steps/validate.py +71 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/__init__.py +1 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/base.py +82 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/data_unit/__init__.py +1 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/data_unit/batch.py +39 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/data_unit/single.py +29 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/file_discovery/__init__.py +1 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/file_discovery/flat.py +300 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/file_discovery/recursive.py +287 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/metadata/__init__.py +1 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/metadata/excel.py +174 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/metadata/none.py +16 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/upload/__init__.py +1 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/upload/sync.py +84 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/validation/__init__.py +1 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/validation/default.py +60 -0
- synapse_sdk/plugins/categories/upload/actions/upload/utils.py +250 -0
- synapse_sdk/plugins/categories/upload/templates/README.md +470 -0
- synapse_sdk/plugins/categories/upload/templates/config.yaml +28 -2
- synapse_sdk/plugins/categories/upload/templates/plugin/__init__.py +310 -0
- synapse_sdk/plugins/categories/upload/templates/plugin/upload.py +82 -20
- synapse_sdk/plugins/models.py +111 -9
- synapse_sdk/plugins/templates/plugin-config-schema.json +7 -0
- synapse_sdk/plugins/templates/schema.json +7 -0
- synapse_sdk/plugins/utils/__init__.py +3 -0
- synapse_sdk/plugins/utils/ray_gcs.py +66 -0
- synapse_sdk/shared/__init__.py +25 -0
- synapse_sdk/utils/converters/dm/__init__.py +42 -41
- synapse_sdk/utils/converters/dm/base.py +137 -0
- synapse_sdk/utils/converters/dm/from_v1.py +208 -562
- synapse_sdk/utils/converters/dm/to_v1.py +258 -304
- synapse_sdk/utils/converters/dm/tools/__init__.py +214 -0
- synapse_sdk/utils/converters/dm/tools/answer.py +95 -0
- synapse_sdk/utils/converters/dm/tools/bounding_box.py +132 -0
- synapse_sdk/utils/converters/dm/tools/bounding_box_3d.py +121 -0
- synapse_sdk/utils/converters/dm/tools/classification.py +75 -0
- synapse_sdk/utils/converters/dm/tools/keypoint.py +117 -0
- synapse_sdk/utils/converters/dm/tools/named_entity.py +111 -0
- synapse_sdk/utils/converters/dm/tools/polygon.py +122 -0
- synapse_sdk/utils/converters/dm/tools/polyline.py +124 -0
- synapse_sdk/utils/converters/dm/tools/prompt.py +94 -0
- synapse_sdk/utils/converters/dm/tools/relation.py +86 -0
- synapse_sdk/utils/converters/dm/tools/segmentation.py +141 -0
- synapse_sdk/utils/converters/dm/tools/segmentation_3d.py +83 -0
- synapse_sdk/utils/converters/dm/types.py +168 -0
- synapse_sdk/utils/converters/dm/utils.py +162 -0
- synapse_sdk/utils/converters/dm_legacy/__init__.py +56 -0
- synapse_sdk/utils/converters/dm_legacy/from_v1.py +627 -0
- synapse_sdk/utils/converters/dm_legacy/to_v1.py +367 -0
- synapse_sdk/utils/file/__init__.py +58 -0
- synapse_sdk/utils/file/archive.py +32 -0
- synapse_sdk/utils/file/checksum.py +56 -0
- synapse_sdk/utils/file/chunking.py +31 -0
- synapse_sdk/utils/file/download.py +385 -0
- synapse_sdk/utils/file/encoding.py +40 -0
- synapse_sdk/utils/file/io.py +22 -0
- synapse_sdk/utils/file/upload.py +165 -0
- synapse_sdk/utils/file/video/__init__.py +29 -0
- synapse_sdk/utils/file/video/transcode.py +307 -0
- synapse_sdk/utils/{file.py → file.py.backup} +77 -0
- synapse_sdk/utils/network.py +272 -0
- synapse_sdk/utils/storage/__init__.py +6 -2
- synapse_sdk/utils/storage/providers/file_system.py +6 -0
- {synapse_sdk-1.0.0b5.dist-info → synapse_sdk-2025.12.3.dist-info}/METADATA +19 -2
- {synapse_sdk-1.0.0b5.dist-info → synapse_sdk-2025.12.3.dist-info}/RECORD +134 -74
- synapse_sdk/devtools/docs/.gitignore +0 -20
- synapse_sdk/devtools/docs/README.md +0 -41
- synapse_sdk/devtools/docs/blog/2019-05-28-first-blog-post.md +0 -12
- synapse_sdk/devtools/docs/blog/2019-05-29-long-blog-post.md +0 -44
- synapse_sdk/devtools/docs/blog/2021-08-01-mdx-blog-post.mdx +0 -24
- synapse_sdk/devtools/docs/blog/2021-08-26-welcome/docusaurus-plushie-banner.jpeg +0 -0
- synapse_sdk/devtools/docs/blog/2021-08-26-welcome/index.md +0 -29
- synapse_sdk/devtools/docs/blog/authors.yml +0 -25
- synapse_sdk/devtools/docs/blog/tags.yml +0 -19
- synapse_sdk/devtools/docs/docusaurus.config.ts +0 -138
- synapse_sdk/devtools/docs/package-lock.json +0 -17455
- synapse_sdk/devtools/docs/package.json +0 -47
- synapse_sdk/devtools/docs/sidebars.ts +0 -44
- synapse_sdk/devtools/docs/src/components/HomepageFeatures/index.tsx +0 -71
- synapse_sdk/devtools/docs/src/components/HomepageFeatures/styles.module.css +0 -11
- synapse_sdk/devtools/docs/src/css/custom.css +0 -30
- synapse_sdk/devtools/docs/src/pages/index.module.css +0 -23
- synapse_sdk/devtools/docs/src/pages/index.tsx +0 -21
- synapse_sdk/devtools/docs/src/pages/markdown-page.md +0 -7
- synapse_sdk/devtools/docs/static/.nojekyll +0 -0
- synapse_sdk/devtools/docs/static/img/docusaurus-social-card.jpg +0 -0
- synapse_sdk/devtools/docs/static/img/docusaurus.png +0 -0
- synapse_sdk/devtools/docs/static/img/favicon.ico +0 -0
- synapse_sdk/devtools/docs/static/img/logo.png +0 -0
- synapse_sdk/devtools/docs/static/img/undraw_docusaurus_mountain.svg +0 -171
- synapse_sdk/devtools/docs/static/img/undraw_docusaurus_react.svg +0 -170
- synapse_sdk/devtools/docs/static/img/undraw_docusaurus_tree.svg +0 -40
- synapse_sdk/devtools/docs/tsconfig.json +0 -8
- synapse_sdk/plugins/categories/export/actions/export.py +0 -346
- synapse_sdk/plugins/categories/export/enums.py +0 -7
- synapse_sdk/plugins/categories/neural_net/actions/gradio.py +0 -151
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task.py +0 -943
- synapse_sdk/plugins/categories/upload/actions/upload.py +0 -954
- {synapse_sdk-1.0.0b5.dist-info → synapse_sdk-2025.12.3.dist-info}/WHEEL +0 -0
- {synapse_sdk-1.0.0b5.dist-info → synapse_sdk-2025.12.3.dist-info}/entry_points.txt +0 -0
- {synapse_sdk-1.0.0b5.dist-info → synapse_sdk-2025.12.3.dist-info}/licenses/LICENSE +0 -0
- {synapse_sdk-1.0.0b5.dist-info → synapse_sdk-2025.12.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Segmentation Tool Processor (Image/Video Unified)
|
|
3
|
+
|
|
4
|
+
Created: 2025-12-12
|
|
5
|
+
|
|
6
|
+
Conversion Rules (see data-model.md 9.3, 9.4):
|
|
7
|
+
|
|
8
|
+
Image Segmentation:
|
|
9
|
+
V1 → V2:
|
|
10
|
+
- pixel_indices [...] → data [...]
|
|
11
|
+
|
|
12
|
+
V2 → V1:
|
|
13
|
+
- data [...] → pixel_indices [...]
|
|
14
|
+
|
|
15
|
+
Video Segmentation:
|
|
16
|
+
V1 → V2:
|
|
17
|
+
- section {startFrame, endFrame} → data {startFrame, endFrame}
|
|
18
|
+
|
|
19
|
+
V2 → V1:
|
|
20
|
+
- data {startFrame, endFrame} → section {startFrame, endFrame}
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
from typing import Any
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class SegmentationProcessor:
|
|
27
|
+
"""Segmentation Tool Processor (Image/Video Unified)
|
|
28
|
+
|
|
29
|
+
Image Segmentation:
|
|
30
|
+
V1 pixel_indices: [int, ...]
|
|
31
|
+
V2 data: [int, ...]
|
|
32
|
+
|
|
33
|
+
Video Segmentation:
|
|
34
|
+
V1 section: {startFrame, endFrame}
|
|
35
|
+
V2 data: {startFrame, endFrame}
|
|
36
|
+
|
|
37
|
+
Differentiate image/video by data structure:
|
|
38
|
+
- list: Image segmentation (pixel_indices)
|
|
39
|
+
- dict: Video segmentation (section)
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
tool_name = 'segmentation'
|
|
43
|
+
|
|
44
|
+
# V1 meta fields (not stored in attrs)
|
|
45
|
+
_META_FIELDS = {'isLocked', 'isVisible', 'isValid', 'isDrawCompleted', 'label', 'id', 'tool'}
|
|
46
|
+
|
|
47
|
+
# Special attrs not restored to V1 classification (_ prefix)
|
|
48
|
+
_INTERNAL_ATTR_PREFIX = '_'
|
|
49
|
+
|
|
50
|
+
def to_v2(self, v1_annotation: dict[str, Any], v1_data: dict[str, Any]) -> dict[str, Any]:
|
|
51
|
+
"""Convert V1 segmentation to V2
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
v1_annotation: V1 annotations[] item
|
|
55
|
+
v1_data: V1 annotationsData[] item (same ID)
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
V2 format segmentation annotation
|
|
59
|
+
"""
|
|
60
|
+
classification_obj = v1_annotation.get('classification') or {}
|
|
61
|
+
|
|
62
|
+
# Process based on data type
|
|
63
|
+
if 'pixel_indices' in v1_data:
|
|
64
|
+
# Image segmentation
|
|
65
|
+
data = v1_data.get('pixel_indices', [])
|
|
66
|
+
elif 'section' in v1_data:
|
|
67
|
+
# Video segmentation
|
|
68
|
+
section = v1_data.get('section', {})
|
|
69
|
+
data = {
|
|
70
|
+
'startFrame': section.get('startFrame', 0),
|
|
71
|
+
'endFrame': section.get('endFrame', 0),
|
|
72
|
+
}
|
|
73
|
+
else:
|
|
74
|
+
# Default (empty array)
|
|
75
|
+
data = []
|
|
76
|
+
|
|
77
|
+
# Build V2 attrs
|
|
78
|
+
attrs: list[dict[str, Any]] = []
|
|
79
|
+
|
|
80
|
+
# Add other classification properties to attrs (excluding class)
|
|
81
|
+
for key, value in classification_obj.items():
|
|
82
|
+
if key != 'class':
|
|
83
|
+
attrs.append({'name': key, 'value': value})
|
|
84
|
+
|
|
85
|
+
# Build V2 annotation
|
|
86
|
+
return {
|
|
87
|
+
'id': v1_annotation.get('id', ''),
|
|
88
|
+
'classification': classification_obj.get('class', ''),
|
|
89
|
+
'attrs': attrs,
|
|
90
|
+
'data': data,
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
def to_v1(self, v2_annotation: dict[str, Any]) -> tuple[dict[str, Any], dict[str, Any]]:
|
|
94
|
+
"""Convert V2 segmentation to V1
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
v2_annotation: V2 annotation object
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
(V1 annotation, V1 annotationData) tuple
|
|
101
|
+
"""
|
|
102
|
+
annotation_id = v2_annotation.get('id', '')
|
|
103
|
+
classification_str = v2_annotation.get('classification', '')
|
|
104
|
+
attrs = v2_annotation.get('attrs', [])
|
|
105
|
+
data = v2_annotation.get('data', [])
|
|
106
|
+
|
|
107
|
+
# Build V1 classification
|
|
108
|
+
classification: dict[str, Any] = {'class': classification_str}
|
|
109
|
+
|
|
110
|
+
# Restore properties from attrs
|
|
111
|
+
for attr in attrs:
|
|
112
|
+
name = attr.get('name', '')
|
|
113
|
+
value = attr.get('value')
|
|
114
|
+
|
|
115
|
+
if not name.startswith(self._INTERNAL_ATTR_PREFIX):
|
|
116
|
+
classification[name] = value
|
|
117
|
+
|
|
118
|
+
# V1 annotation (meta info)
|
|
119
|
+
v1_annotation: dict[str, Any] = {
|
|
120
|
+
'id': annotation_id,
|
|
121
|
+
'tool': self.tool_name,
|
|
122
|
+
'classification': classification,
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
# V1 annotationData (coordinate info) - process based on data type
|
|
126
|
+
v1_data: dict[str, Any] = {'id': annotation_id}
|
|
127
|
+
|
|
128
|
+
if isinstance(data, list):
|
|
129
|
+
# Image segmentation
|
|
130
|
+
v1_data['pixel_indices'] = data
|
|
131
|
+
elif isinstance(data, dict):
|
|
132
|
+
# Video segmentation
|
|
133
|
+
v1_data['section'] = {
|
|
134
|
+
'startFrame': data.get('startFrame', 0),
|
|
135
|
+
'endFrame': data.get('endFrame', 0),
|
|
136
|
+
}
|
|
137
|
+
else:
|
|
138
|
+
# Default
|
|
139
|
+
v1_data['pixel_indices'] = []
|
|
140
|
+
|
|
141
|
+
return v1_annotation, v1_data
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
"""
|
|
2
|
+
3D Segmentation Tool Processor
|
|
3
|
+
|
|
4
|
+
Created: 2025-12-12
|
|
5
|
+
|
|
6
|
+
Conversion Rules (see data-model.md 9.7):
|
|
7
|
+
V1 → V2:
|
|
8
|
+
- points [...] → data {points: [...]}
|
|
9
|
+
- classification.class → classification
|
|
10
|
+
- classification.{other} → attrs[{name, value}]
|
|
11
|
+
|
|
12
|
+
V2 → V1:
|
|
13
|
+
- data {points: [...]} → points [...]
|
|
14
|
+
- classification → classification.class
|
|
15
|
+
- attrs[{name, value}] → classification.{name: value}
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
from typing import Any
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class Segmentation3DProcessor:
|
|
22
|
+
"""3D Segmentation Tool Processor
|
|
23
|
+
|
|
24
|
+
V1 annotationData: {points: list[int]}
|
|
25
|
+
V2 data: {points: list[int]}
|
|
26
|
+
|
|
27
|
+
Used with pcd media type.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
tool_name = '3d_segmentation'
|
|
31
|
+
|
|
32
|
+
_META_FIELDS = {'isLocked', 'isVisible', 'isValid', 'isDrawCompleted', 'label', 'id', 'tool'}
|
|
33
|
+
_INTERNAL_ATTR_PREFIX = '_'
|
|
34
|
+
|
|
35
|
+
def to_v2(self, v1_annotation: dict[str, Any], v1_data: dict[str, Any]) -> dict[str, Any]:
|
|
36
|
+
"""Convert V1 3D segmentation to V2"""
|
|
37
|
+
classification_obj = v1_annotation.get('classification') or {}
|
|
38
|
+
|
|
39
|
+
# V2 data: {points: [...]}
|
|
40
|
+
data = {
|
|
41
|
+
'points': v1_data.get('points', []),
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
# Build V2 attrs
|
|
45
|
+
attrs: list[dict[str, Any]] = []
|
|
46
|
+
for key, value in classification_obj.items():
|
|
47
|
+
if key != 'class':
|
|
48
|
+
attrs.append({'name': key, 'value': value})
|
|
49
|
+
|
|
50
|
+
return {
|
|
51
|
+
'id': v1_annotation.get('id', ''),
|
|
52
|
+
'classification': classification_obj.get('class', ''),
|
|
53
|
+
'attrs': attrs,
|
|
54
|
+
'data': data,
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
def to_v1(self, v2_annotation: dict[str, Any]) -> tuple[dict[str, Any], dict[str, Any]]:
|
|
58
|
+
"""Convert V2 3D segmentation to V1"""
|
|
59
|
+
annotation_id = v2_annotation.get('id', '')
|
|
60
|
+
classification_str = v2_annotation.get('classification', '')
|
|
61
|
+
attrs = v2_annotation.get('attrs', [])
|
|
62
|
+
data = v2_annotation.get('data', {})
|
|
63
|
+
|
|
64
|
+
# Build V1 classification
|
|
65
|
+
classification: dict[str, Any] = {'class': classification_str}
|
|
66
|
+
for attr in attrs:
|
|
67
|
+
name = attr.get('name', '')
|
|
68
|
+
value = attr.get('value')
|
|
69
|
+
if not name.startswith(self._INTERNAL_ATTR_PREFIX):
|
|
70
|
+
classification[name] = value
|
|
71
|
+
|
|
72
|
+
v1_annotation: dict[str, Any] = {
|
|
73
|
+
'id': annotation_id,
|
|
74
|
+
'tool': self.tool_name,
|
|
75
|
+
'classification': classification,
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
v1_data: dict[str, Any] = {
|
|
79
|
+
'id': annotation_id,
|
|
80
|
+
'points': data.get('points', []),
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
return v1_annotation, v1_data
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
"""
|
|
2
|
+
DM Schema V1/V2 Converter Type Definitions
|
|
3
|
+
|
|
4
|
+
Created: 2025-12-11
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Any, TypedDict
|
|
8
|
+
|
|
9
|
+
# =============================================================================
|
|
10
|
+
# V1 Type Definitions
|
|
11
|
+
# =============================================================================
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class BoundingBoxCoordinate(TypedDict, total=False):
|
|
15
|
+
"""V1 Bounding Box Coordinate"""
|
|
16
|
+
|
|
17
|
+
x: float
|
|
18
|
+
y: float
|
|
19
|
+
width: float
|
|
20
|
+
height: float
|
|
21
|
+
rotation: float # In radians, optional
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class PolygonPoint(TypedDict):
|
|
25
|
+
"""V1 Polygon Individual Point"""
|
|
26
|
+
|
|
27
|
+
x: float
|
|
28
|
+
y: float
|
|
29
|
+
id: str
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# V1 Polygon Coordinate Type
|
|
33
|
+
PolygonCoordinate = list[PolygonPoint]
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class AnnotationBase(TypedDict, total=False):
|
|
37
|
+
"""V1 Annotation Meta Information"""
|
|
38
|
+
|
|
39
|
+
id: str # 10-character random string
|
|
40
|
+
tool: str # Tool code (bounding_box, polygon, etc.)
|
|
41
|
+
isLocked: bool # Edit lock (default: False)
|
|
42
|
+
isVisible: bool # Display visibility (default: True)
|
|
43
|
+
isValid: bool # Validity (default: False)
|
|
44
|
+
isDrawCompleted: bool # Drawing completed
|
|
45
|
+
classification: dict[str, Any] | None # Classification info
|
|
46
|
+
label: list[str] # Label array
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class AnnotationDataItem(TypedDict, total=False):
|
|
50
|
+
"""V1 Annotation Coordinate Data"""
|
|
51
|
+
|
|
52
|
+
id: str # Matches AnnotationBase.id
|
|
53
|
+
coordinate: BoundingBoxCoordinate | PolygonCoordinate | Any
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class AnnotatorDataV1(TypedDict, total=False):
|
|
57
|
+
"""DM Schema V1 Top-Level Structure"""
|
|
58
|
+
|
|
59
|
+
extra: dict[str, Any] # Per-media metadata
|
|
60
|
+
annotations: dict[str, list[AnnotationBase]] # Annotation meta
|
|
61
|
+
annotationsData: dict[str, list[AnnotationDataItem]] # Coordinate data
|
|
62
|
+
relations: dict[str, list[Any]] # Relations
|
|
63
|
+
annotationGroups: dict[str, list[Any]] # Groups
|
|
64
|
+
assignmentId: int | str | None # Task identifier
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
# =============================================================================
|
|
68
|
+
# V2 Type Definitions
|
|
69
|
+
# =============================================================================
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class V2Attr(TypedDict):
|
|
73
|
+
"""V2 Attribute Object"""
|
|
74
|
+
|
|
75
|
+
name: str
|
|
76
|
+
value: Any
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class V2Annotation(TypedDict, total=False):
|
|
80
|
+
"""V2 Annotation Common Structure"""
|
|
81
|
+
|
|
82
|
+
id: str # Unique identifier (10 chars, alphanumeric)
|
|
83
|
+
classification: str # Class label
|
|
84
|
+
attrs: list[V2Attr] # Additional attributes array
|
|
85
|
+
data: Any # Tool-specific data (type varies)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class V2MediaItem(TypedDict, total=False):
|
|
89
|
+
"""V2 Media Item (annotation arrays by tool)"""
|
|
90
|
+
|
|
91
|
+
bounding_box: list[V2Annotation]
|
|
92
|
+
polygon: list[V2Annotation]
|
|
93
|
+
polyline: list[V2Annotation]
|
|
94
|
+
keypoint: list[V2Annotation]
|
|
95
|
+
# Other tools can be added as needed
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class V2AnnotationData(TypedDict, total=False):
|
|
99
|
+
"""V2 Common Annotation Structure (annotation_data)"""
|
|
100
|
+
|
|
101
|
+
classification: dict[str, list[str]] # Class labels by tool
|
|
102
|
+
images: list[V2MediaItem] # Image media
|
|
103
|
+
videos: list[V2MediaItem] # Video media
|
|
104
|
+
pcds: list[V2MediaItem] # PCD media
|
|
105
|
+
texts: list[V2MediaItem] # Text media
|
|
106
|
+
audios: list[V2MediaItem] # Audio media
|
|
107
|
+
prompts: list[V2MediaItem] # Prompt media
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class AnnotationMeta(TypedDict, total=False):
|
|
111
|
+
"""V1 Top-Level Structure Preserved (annotation_meta)
|
|
112
|
+
|
|
113
|
+
V1 top-level structure preserved during V1→V2 conversion.
|
|
114
|
+
Combined with annotation_data for complete V1 restoration during V2→V1 conversion.
|
|
115
|
+
"""
|
|
116
|
+
|
|
117
|
+
extra: dict[str, Any] # Per-media metadata
|
|
118
|
+
annotations: dict[str, list[AnnotationBase]] # Annotation meta
|
|
119
|
+
annotationsData: dict[str, list[AnnotationDataItem]] # Coordinate data
|
|
120
|
+
relations: dict[str, list[Any]] # Relations
|
|
121
|
+
annotationGroups: dict[str, list[Any]] # Groups
|
|
122
|
+
assignmentId: int | str | None # Task identifier
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
class V2ConversionResult(TypedDict):
|
|
126
|
+
"""V1→V2 Conversion Result (separated structure)
|
|
127
|
+
|
|
128
|
+
V1→V2 conversion result is separated into two parts:
|
|
129
|
+
- annotation_data: V2 common annotation structure (id, classification, attrs, data)
|
|
130
|
+
- annotation_meta: V1 top-level structure preserved
|
|
131
|
+
(extra, annotations, annotationsData, relations, annotationGroups, assignmentId)
|
|
132
|
+
|
|
133
|
+
V2→V1 conversion:
|
|
134
|
+
- If both parts exist, complete V1 restoration is possible
|
|
135
|
+
- If only annotation_data exists, convert to V1 using defaults
|
|
136
|
+
"""
|
|
137
|
+
|
|
138
|
+
annotation_data: V2AnnotationData
|
|
139
|
+
annotation_meta: AnnotationMeta
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
# =============================================================================
|
|
143
|
+
# Bounding Box V2 Data Types
|
|
144
|
+
# =============================================================================
|
|
145
|
+
|
|
146
|
+
# V2 Bounding Box data: [x, y, width, height]
|
|
147
|
+
BoundingBoxData = list[float]
|
|
148
|
+
|
|
149
|
+
# V2 Polygon data: [[x1, y1], [x2, y2], ...]
|
|
150
|
+
PolygonData = list[list[float]]
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
# =============================================================================
|
|
154
|
+
# Media Type Constants
|
|
155
|
+
# =============================================================================
|
|
156
|
+
|
|
157
|
+
SUPPORTED_FILE_TYPES = ('image', 'video', 'pcd', 'text', 'audio', 'prompt')
|
|
158
|
+
|
|
159
|
+
MEDIA_TYPE_MAP = {
|
|
160
|
+
'image': 'images',
|
|
161
|
+
'video': 'videos',
|
|
162
|
+
'pcd': 'pcds',
|
|
163
|
+
'text': 'texts',
|
|
164
|
+
'audio': 'audios',
|
|
165
|
+
'prompt': 'prompts',
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
MEDIA_TYPE_REVERSE_MAP = {v: k for k, v in MEDIA_TYPE_MAP.items()}
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
"""
|
|
2
|
+
DM Schema V1/V2 Converter Utility Functions
|
|
3
|
+
|
|
4
|
+
Created: 2025-12-11
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import random
|
|
8
|
+
import string
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
from .types import MEDIA_TYPE_MAP, MEDIA_TYPE_REVERSE_MAP
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def generate_random_id(length: int = 10) -> str:
|
|
15
|
+
"""Generate a random ID compatible with V1 format
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
length: ID length (default: 10)
|
|
19
|
+
|
|
20
|
+
Returns:
|
|
21
|
+
Random alphanumeric string
|
|
22
|
+
|
|
23
|
+
Example:
|
|
24
|
+
>>> generate_random_id()
|
|
25
|
+
'Cd1qfFQFI4'
|
|
26
|
+
"""
|
|
27
|
+
chars = string.ascii_letters + string.digits
|
|
28
|
+
return ''.join(random.choice(chars) for _ in range(length))
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def extract_media_type_info(media_id: str) -> tuple[str, str]:
|
|
32
|
+
"""Extract type information from media ID
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
media_id: Media ID (e.g., 'image_1', 'video_2')
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
(singular, plural) tuple (e.g., ('image', 'images'))
|
|
39
|
+
|
|
40
|
+
Raises:
|
|
41
|
+
ValueError: Unknown media type
|
|
42
|
+
|
|
43
|
+
Example:
|
|
44
|
+
>>> extract_media_type_info('image_1')
|
|
45
|
+
('image', 'images')
|
|
46
|
+
>>> extract_media_type_info('video_2')
|
|
47
|
+
('video', 'videos')
|
|
48
|
+
"""
|
|
49
|
+
# Extract type from media ID (e.g., 'image_1' -> 'image')
|
|
50
|
+
for media_type in MEDIA_TYPE_MAP:
|
|
51
|
+
if media_id.startswith(media_type):
|
|
52
|
+
return media_type, MEDIA_TYPE_MAP[media_type]
|
|
53
|
+
|
|
54
|
+
raise ValueError(f'Unknown media type: {media_id}')
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def detect_file_type(data: dict[str, Any], is_v2: bool = False) -> str:
|
|
58
|
+
"""Auto-detect file type from data
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
data: Input data (V1 or V2)
|
|
62
|
+
is_v2: Whether the format is V2
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
Detected file type ('image', 'video', etc.)
|
|
66
|
+
|
|
67
|
+
Raises:
|
|
68
|
+
ValueError: Unable to detect file type
|
|
69
|
+
"""
|
|
70
|
+
if is_v2:
|
|
71
|
+
# Detect from V2 data (annotation_data or direct data)
|
|
72
|
+
check_data = data.get('annotation_data', data)
|
|
73
|
+
for plural_type in MEDIA_TYPE_REVERSE_MAP:
|
|
74
|
+
if plural_type in check_data and check_data[plural_type]:
|
|
75
|
+
return MEDIA_TYPE_REVERSE_MAP[plural_type]
|
|
76
|
+
else:
|
|
77
|
+
# Detect from V1 data (from annotations or annotationsData keys)
|
|
78
|
+
for key in ['annotations', 'annotationsData']:
|
|
79
|
+
if key in data and data[key]:
|
|
80
|
+
# Extract type from first media ID
|
|
81
|
+
first_media_id = next(iter(data[key].keys()), None)
|
|
82
|
+
if first_media_id:
|
|
83
|
+
singular, _ = extract_media_type_info(first_media_id)
|
|
84
|
+
return singular
|
|
85
|
+
|
|
86
|
+
raise ValueError('Unable to detect file type')
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def get_attr_value(attrs: list[dict[str, Any]], name: str, default: Any = None) -> Any:
|
|
90
|
+
"""Extract value for a specific name from attrs list
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
attrs: V2 attrs list [{"name": "...", "value": ...}, ...]
|
|
94
|
+
name: Attribute name to find
|
|
95
|
+
default: Default value (if not found)
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
Found value or default
|
|
99
|
+
"""
|
|
100
|
+
for attr in attrs:
|
|
101
|
+
if attr.get('name') == name:
|
|
102
|
+
return attr.get('value', default)
|
|
103
|
+
return default
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def set_attr_value(attrs: list[dict[str, Any]], name: str, value: Any) -> list[dict[str, Any]]:
|
|
107
|
+
"""Add or update attribute in attrs list
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
attrs: V2 attrs list
|
|
111
|
+
name: Attribute name
|
|
112
|
+
value: Attribute value
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
Updated attrs list
|
|
116
|
+
"""
|
|
117
|
+
# Update existing attribute
|
|
118
|
+
for attr in attrs:
|
|
119
|
+
if attr.get('name') == name:
|
|
120
|
+
attr['value'] = value
|
|
121
|
+
return attrs
|
|
122
|
+
|
|
123
|
+
# Add new attribute
|
|
124
|
+
attrs.append({'name': name, 'value': value})
|
|
125
|
+
return attrs
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def build_v1_annotation_base(
|
|
129
|
+
annotation_id: str,
|
|
130
|
+
tool: str,
|
|
131
|
+
classification: dict[str, Any] | None = None,
|
|
132
|
+
is_locked: bool = False,
|
|
133
|
+
is_visible: bool = True,
|
|
134
|
+
is_valid: bool = False,
|
|
135
|
+
is_draw_completed: bool = True,
|
|
136
|
+
label: list[str] | None = None,
|
|
137
|
+
) -> dict[str, Any]:
|
|
138
|
+
"""Create V1 AnnotationBase object
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
annotation_id: Annotation ID
|
|
142
|
+
tool: Tool code
|
|
143
|
+
classification: Classification info
|
|
144
|
+
is_locked: Edit lock
|
|
145
|
+
is_visible: Display visibility
|
|
146
|
+
is_valid: Validity
|
|
147
|
+
is_draw_completed: Drawing completed
|
|
148
|
+
label: Label array
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
V1 AnnotationBase dictionary
|
|
152
|
+
"""
|
|
153
|
+
return {
|
|
154
|
+
'id': annotation_id,
|
|
155
|
+
'tool': tool,
|
|
156
|
+
'isLocked': is_locked,
|
|
157
|
+
'isVisible': is_visible,
|
|
158
|
+
'isValid': is_valid,
|
|
159
|
+
'isDrawCompleted': is_draw_completed,
|
|
160
|
+
'classification': classification,
|
|
161
|
+
'label': label or [],
|
|
162
|
+
}
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
|
|
3
|
+
from synapse_sdk.shared.enums import SupportedTools
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class BaseDMConverter(ABC):
|
|
7
|
+
"""Base class for DM format converters."""
|
|
8
|
+
|
|
9
|
+
SUPPORTED_TOOLS = SupportedTools.get_all_values()
|
|
10
|
+
|
|
11
|
+
def __init__(self, file_type=None):
|
|
12
|
+
"""Initialize the base converter.
|
|
13
|
+
|
|
14
|
+
Args:
|
|
15
|
+
file_type (str, optional): Type of file being converted (image, video, pcd, text, audio)
|
|
16
|
+
"""
|
|
17
|
+
self.file_type = file_type
|
|
18
|
+
self.tool_processors = self._setup_tool_processors()
|
|
19
|
+
|
|
20
|
+
def _setup_tool_processors(self):
|
|
21
|
+
"""Setup tool processor mapping dynamically based on file_type."""
|
|
22
|
+
if not self.file_type:
|
|
23
|
+
return {}
|
|
24
|
+
|
|
25
|
+
processors = {}
|
|
26
|
+
tools = SupportedTools.get_tools_for_file_type(self.file_type)
|
|
27
|
+
|
|
28
|
+
for tool in tools:
|
|
29
|
+
# For other tools, use generic method names
|
|
30
|
+
method_name = f'_convert_{tool.method_name}'
|
|
31
|
+
|
|
32
|
+
if hasattr(self, method_name):
|
|
33
|
+
processors[tool.annotation_tool] = getattr(self, method_name)
|
|
34
|
+
|
|
35
|
+
return processors
|
|
36
|
+
|
|
37
|
+
@abstractmethod
|
|
38
|
+
def convert(self):
|
|
39
|
+
"""Convert data from one format to another."""
|
|
40
|
+
|
|
41
|
+
def _handle_unknown_tool(self, tool_type, item_id=None):
|
|
42
|
+
"""Handle unknown tool types with consistent warning message."""
|
|
43
|
+
warning_msg = f"Warning: Unknown tool type '{tool_type}'"
|
|
44
|
+
if item_id:
|
|
45
|
+
warning_msg += f' for item {item_id}'
|
|
46
|
+
print(warning_msg)
|
|
47
|
+
|
|
48
|
+
def _extract_media_type_info(self, media_id):
|
|
49
|
+
"""Extract media type information from media ID."""
|
|
50
|
+
media_type = media_id.split('_')[0] if '_' in media_id else media_id
|
|
51
|
+
media_type_plural = media_type + 's' if not media_type.endswith('s') else media_type
|
|
52
|
+
return media_type, media_type_plural
|
|
53
|
+
|
|
54
|
+
def _singularize_media_type(self, media_type_plural):
|
|
55
|
+
"""Convert plural media type to singular."""
|
|
56
|
+
return media_type_plural.rstrip('s')
|