synapse-sdk 1.0.0a23__py3-none-any.whl → 2025.12.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- synapse_sdk/__init__.py +24 -0
- synapse_sdk/cli/__init__.py +310 -5
- synapse_sdk/cli/alias/__init__.py +22 -0
- synapse_sdk/cli/alias/create.py +36 -0
- synapse_sdk/cli/alias/dataclass.py +31 -0
- synapse_sdk/cli/alias/default.py +16 -0
- synapse_sdk/cli/alias/delete.py +15 -0
- synapse_sdk/cli/alias/list.py +19 -0
- synapse_sdk/cli/alias/read.py +15 -0
- synapse_sdk/cli/alias/update.py +17 -0
- synapse_sdk/cli/alias/utils.py +61 -0
- synapse_sdk/cli/code_server.py +687 -0
- synapse_sdk/cli/config.py +440 -0
- synapse_sdk/cli/devtools.py +90 -0
- synapse_sdk/cli/plugin/__init__.py +33 -0
- synapse_sdk/cli/{create_plugin.py → plugin/create.py} +2 -2
- synapse_sdk/{plugins/cli → cli/plugin}/publish.py +23 -15
- synapse_sdk/clients/agent/__init__.py +9 -3
- synapse_sdk/clients/agent/container.py +143 -0
- synapse_sdk/clients/agent/core.py +19 -0
- synapse_sdk/clients/agent/ray.py +298 -9
- synapse_sdk/clients/backend/__init__.py +30 -12
- synapse_sdk/clients/backend/annotation.py +13 -5
- synapse_sdk/clients/backend/core.py +31 -4
- synapse_sdk/clients/backend/data_collection.py +186 -0
- synapse_sdk/clients/backend/hitl.py +17 -0
- synapse_sdk/clients/backend/integration.py +16 -1
- synapse_sdk/clients/backend/ml.py +5 -1
- synapse_sdk/clients/backend/models.py +78 -0
- synapse_sdk/clients/base.py +384 -41
- synapse_sdk/clients/ray/serve.py +2 -0
- synapse_sdk/clients/validators/collections.py +31 -0
- synapse_sdk/devtools/config.py +94 -0
- synapse_sdk/devtools/server.py +41 -0
- synapse_sdk/devtools/streamlit_app/__init__.py +5 -0
- synapse_sdk/devtools/streamlit_app/app.py +128 -0
- synapse_sdk/devtools/streamlit_app/services/__init__.py +11 -0
- synapse_sdk/devtools/streamlit_app/services/job_service.py +233 -0
- synapse_sdk/devtools/streamlit_app/services/plugin_service.py +236 -0
- synapse_sdk/devtools/streamlit_app/services/serve_service.py +95 -0
- synapse_sdk/devtools/streamlit_app/ui/__init__.py +15 -0
- synapse_sdk/devtools/streamlit_app/ui/config_tab.py +76 -0
- synapse_sdk/devtools/streamlit_app/ui/deployment_tab.py +66 -0
- synapse_sdk/devtools/streamlit_app/ui/http_tab.py +125 -0
- synapse_sdk/devtools/streamlit_app/ui/jobs_tab.py +573 -0
- synapse_sdk/devtools/streamlit_app/ui/serve_tab.py +346 -0
- synapse_sdk/devtools/streamlit_app/ui/status_bar.py +118 -0
- synapse_sdk/devtools/streamlit_app/utils/__init__.py +40 -0
- synapse_sdk/devtools/streamlit_app/utils/json_viewer.py +197 -0
- synapse_sdk/devtools/streamlit_app/utils/log_formatter.py +38 -0
- synapse_sdk/devtools/streamlit_app/utils/styles.py +241 -0
- synapse_sdk/devtools/streamlit_app/utils/ui_components.py +289 -0
- synapse_sdk/devtools/streamlit_app.py +10 -0
- synapse_sdk/loggers.py +120 -9
- synapse_sdk/plugins/README.md +1340 -0
- synapse_sdk/plugins/__init__.py +0 -13
- synapse_sdk/plugins/categories/base.py +117 -11
- synapse_sdk/plugins/categories/data_validation/actions/validation.py +72 -0
- synapse_sdk/plugins/categories/data_validation/templates/plugin/validation.py +33 -5
- synapse_sdk/plugins/categories/export/actions/__init__.py +3 -0
- synapse_sdk/plugins/categories/export/actions/export/__init__.py +28 -0
- synapse_sdk/plugins/categories/export/actions/export/action.py +165 -0
- synapse_sdk/plugins/categories/export/actions/export/enums.py +113 -0
- synapse_sdk/plugins/categories/export/actions/export/exceptions.py +53 -0
- synapse_sdk/plugins/categories/export/actions/export/models.py +74 -0
- synapse_sdk/plugins/categories/export/actions/export/run.py +195 -0
- synapse_sdk/plugins/categories/export/actions/export/utils.py +187 -0
- synapse_sdk/plugins/categories/export/templates/config.yaml +21 -0
- synapse_sdk/plugins/categories/export/templates/plugin/__init__.py +390 -0
- synapse_sdk/plugins/categories/export/templates/plugin/export.py +160 -0
- synapse_sdk/plugins/categories/neural_net/actions/deployment.py +13 -12
- synapse_sdk/plugins/categories/neural_net/actions/train.py +1134 -31
- synapse_sdk/plugins/categories/neural_net/actions/tune.py +534 -0
- synapse_sdk/plugins/categories/neural_net/base/inference.py +1 -1
- synapse_sdk/plugins/categories/neural_net/templates/config.yaml +32 -4
- synapse_sdk/plugins/categories/neural_net/templates/plugin/inference.py +26 -10
- synapse_sdk/plugins/categories/pre_annotation/actions/__init__.py +4 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/pre_annotation/__init__.py +3 -0
- synapse_sdk/plugins/categories/{export/actions/export.py → pre_annotation/actions/pre_annotation/action.py} +4 -4
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/__init__.py +28 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/action.py +148 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/enums.py +269 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/exceptions.py +14 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/factory.py +76 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/models.py +100 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/orchestrator.py +248 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/run.py +64 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/__init__.py +17 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/annotation.py +265 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/base.py +170 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/extraction.py +83 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/metrics.py +92 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/preprocessor.py +243 -0
- synapse_sdk/plugins/categories/pre_annotation/actions/to_task/strategies/validation.py +143 -0
- synapse_sdk/plugins/categories/pre_annotation/templates/config.yaml +19 -0
- synapse_sdk/plugins/categories/pre_annotation/templates/plugin/to_task.py +40 -0
- synapse_sdk/plugins/categories/smart_tool/templates/config.yaml +2 -0
- synapse_sdk/plugins/categories/upload/__init__.py +0 -0
- synapse_sdk/plugins/categories/upload/actions/__init__.py +0 -0
- synapse_sdk/plugins/categories/upload/actions/upload/__init__.py +19 -0
- synapse_sdk/plugins/categories/upload/actions/upload/action.py +236 -0
- synapse_sdk/plugins/categories/upload/actions/upload/context.py +185 -0
- synapse_sdk/plugins/categories/upload/actions/upload/enums.py +493 -0
- synapse_sdk/plugins/categories/upload/actions/upload/exceptions.py +36 -0
- synapse_sdk/plugins/categories/upload/actions/upload/factory.py +138 -0
- synapse_sdk/plugins/categories/upload/actions/upload/models.py +214 -0
- synapse_sdk/plugins/categories/upload/actions/upload/orchestrator.py +183 -0
- synapse_sdk/plugins/categories/upload/actions/upload/registry.py +113 -0
- synapse_sdk/plugins/categories/upload/actions/upload/run.py +179 -0
- synapse_sdk/plugins/categories/upload/actions/upload/steps/__init__.py +1 -0
- synapse_sdk/plugins/categories/upload/actions/upload/steps/base.py +107 -0
- synapse_sdk/plugins/categories/upload/actions/upload/steps/cleanup.py +62 -0
- synapse_sdk/plugins/categories/upload/actions/upload/steps/collection.py +63 -0
- synapse_sdk/plugins/categories/upload/actions/upload/steps/generate.py +91 -0
- synapse_sdk/plugins/categories/upload/actions/upload/steps/initialize.py +82 -0
- synapse_sdk/plugins/categories/upload/actions/upload/steps/metadata.py +235 -0
- synapse_sdk/plugins/categories/upload/actions/upload/steps/organize.py +201 -0
- synapse_sdk/plugins/categories/upload/actions/upload/steps/upload.py +104 -0
- synapse_sdk/plugins/categories/upload/actions/upload/steps/validate.py +71 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/__init__.py +1 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/base.py +82 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/data_unit/__init__.py +1 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/data_unit/batch.py +39 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/data_unit/single.py +29 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/file_discovery/__init__.py +1 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/file_discovery/flat.py +300 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/file_discovery/recursive.py +287 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/metadata/__init__.py +1 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/metadata/excel.py +174 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/metadata/none.py +16 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/upload/__init__.py +1 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/upload/sync.py +84 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/validation/__init__.py +1 -0
- synapse_sdk/plugins/categories/upload/actions/upload/strategies/validation/default.py +60 -0
- synapse_sdk/plugins/categories/upload/actions/upload/utils.py +250 -0
- synapse_sdk/plugins/categories/upload/templates/README.md +470 -0
- synapse_sdk/plugins/categories/upload/templates/config.yaml +33 -0
- synapse_sdk/plugins/categories/upload/templates/plugin/__init__.py +310 -0
- synapse_sdk/plugins/categories/upload/templates/plugin/upload.py +102 -0
- synapse_sdk/plugins/enums.py +3 -1
- synapse_sdk/plugins/models.py +148 -11
- synapse_sdk/plugins/templates/plugin-config-schema.json +406 -0
- synapse_sdk/plugins/templates/schema.json +491 -0
- synapse_sdk/plugins/templates/synapse-{{cookiecutter.plugin_code}}-plugin/config.yaml +1 -0
- synapse_sdk/plugins/templates/synapse-{{cookiecutter.plugin_code}}-plugin/requirements.txt +1 -1
- synapse_sdk/plugins/utils/__init__.py +46 -0
- synapse_sdk/plugins/utils/actions.py +119 -0
- synapse_sdk/plugins/utils/config.py +203 -0
- synapse_sdk/plugins/{utils.py → utils/legacy.py} +26 -46
- synapse_sdk/plugins/utils/ray_gcs.py +66 -0
- synapse_sdk/plugins/utils/registry.py +58 -0
- synapse_sdk/shared/__init__.py +25 -0
- synapse_sdk/shared/enums.py +93 -0
- synapse_sdk/types.py +19 -0
- synapse_sdk/utils/converters/__init__.py +240 -0
- synapse_sdk/utils/converters/coco/__init__.py +0 -0
- synapse_sdk/utils/converters/coco/from_dm.py +322 -0
- synapse_sdk/utils/converters/coco/to_dm.py +215 -0
- synapse_sdk/utils/converters/dm/__init__.py +57 -0
- synapse_sdk/utils/converters/dm/base.py +137 -0
- synapse_sdk/utils/converters/dm/from_v1.py +273 -0
- synapse_sdk/utils/converters/dm/to_v1.py +321 -0
- synapse_sdk/utils/converters/dm/tools/__init__.py +214 -0
- synapse_sdk/utils/converters/dm/tools/answer.py +95 -0
- synapse_sdk/utils/converters/dm/tools/bounding_box.py +132 -0
- synapse_sdk/utils/converters/dm/tools/bounding_box_3d.py +121 -0
- synapse_sdk/utils/converters/dm/tools/classification.py +75 -0
- synapse_sdk/utils/converters/dm/tools/keypoint.py +117 -0
- synapse_sdk/utils/converters/dm/tools/named_entity.py +111 -0
- synapse_sdk/utils/converters/dm/tools/polygon.py +122 -0
- synapse_sdk/utils/converters/dm/tools/polyline.py +124 -0
- synapse_sdk/utils/converters/dm/tools/prompt.py +94 -0
- synapse_sdk/utils/converters/dm/tools/relation.py +86 -0
- synapse_sdk/utils/converters/dm/tools/segmentation.py +141 -0
- synapse_sdk/utils/converters/dm/tools/segmentation_3d.py +83 -0
- synapse_sdk/utils/converters/dm/types.py +168 -0
- synapse_sdk/utils/converters/dm/utils.py +162 -0
- synapse_sdk/utils/converters/dm_legacy/__init__.py +56 -0
- synapse_sdk/utils/converters/dm_legacy/from_v1.py +627 -0
- synapse_sdk/utils/converters/dm_legacy/to_v1.py +367 -0
- synapse_sdk/utils/converters/pascal/__init__.py +0 -0
- synapse_sdk/utils/converters/pascal/from_dm.py +244 -0
- synapse_sdk/utils/converters/pascal/to_dm.py +214 -0
- synapse_sdk/utils/converters/yolo/__init__.py +0 -0
- synapse_sdk/utils/converters/yolo/from_dm.py +384 -0
- synapse_sdk/utils/converters/yolo/to_dm.py +267 -0
- synapse_sdk/utils/dataset.py +46 -0
- synapse_sdk/utils/encryption.py +158 -0
- synapse_sdk/utils/file/__init__.py +58 -0
- synapse_sdk/utils/file/archive.py +32 -0
- synapse_sdk/utils/file/checksum.py +56 -0
- synapse_sdk/utils/file/chunking.py +31 -0
- synapse_sdk/utils/file/download.py +385 -0
- synapse_sdk/utils/file/encoding.py +40 -0
- synapse_sdk/utils/file/io.py +22 -0
- synapse_sdk/utils/file/upload.py +165 -0
- synapse_sdk/utils/file/video/__init__.py +29 -0
- synapse_sdk/utils/file/video/transcode.py +307 -0
- synapse_sdk/utils/file.py.backup +301 -0
- synapse_sdk/utils/http.py +138 -0
- synapse_sdk/utils/network.py +309 -0
- synapse_sdk/utils/storage/__init__.py +72 -0
- synapse_sdk/utils/storage/providers/__init__.py +183 -0
- synapse_sdk/utils/storage/providers/file_system.py +134 -0
- synapse_sdk/utils/storage/providers/gcp.py +13 -0
- synapse_sdk/utils/storage/providers/http.py +190 -0
- synapse_sdk/utils/storage/providers/s3.py +91 -0
- synapse_sdk/utils/storage/providers/sftp.py +47 -0
- synapse_sdk/utils/storage/registry.py +17 -0
- synapse_sdk-2025.12.3.dist-info/METADATA +123 -0
- synapse_sdk-2025.12.3.dist-info/RECORD +279 -0
- {synapse_sdk-1.0.0a23.dist-info → synapse_sdk-2025.12.3.dist-info}/WHEEL +1 -1
- synapse_sdk/clients/backend/dataset.py +0 -51
- synapse_sdk/plugins/categories/import/actions/import.py +0 -10
- synapse_sdk/plugins/cli/__init__.py +0 -21
- synapse_sdk/plugins/templates/synapse-{{cookiecutter.plugin_code}}-plugin/.env +0 -24
- synapse_sdk/plugins/templates/synapse-{{cookiecutter.plugin_code}}-plugin/.env.dist +0 -24
- synapse_sdk/plugins/templates/synapse-{{cookiecutter.plugin_code}}-plugin/main.py +0 -4
- synapse_sdk/utils/file.py +0 -168
- synapse_sdk/utils/storage.py +0 -91
- synapse_sdk-1.0.0a23.dist-info/METADATA +0 -44
- synapse_sdk-1.0.0a23.dist-info/RECORD +0 -114
- /synapse_sdk/{plugins/cli → cli/plugin}/run.py +0 -0
- /synapse_sdk/{plugins/categories/import → clients/validators}/__init__.py +0 -0
- /synapse_sdk/{plugins/categories/import/actions → devtools}/__init__.py +0 -0
- {synapse_sdk-1.0.0a23.dist-info → synapse_sdk-2025.12.3.dist-info}/entry_points.txt +0 -0
- {synapse_sdk-1.0.0a23.dist-info → synapse_sdk-2025.12.3.dist-info/licenses}/LICENSE +0 -0
- {synapse_sdk-1.0.0a23.dist-info → synapse_sdk-2025.12.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Polyline Tool Processor
|
|
3
|
+
|
|
4
|
+
Created: 2025-12-12
|
|
5
|
+
|
|
6
|
+
Conversion Rules (see data-model.md 4.2):
|
|
7
|
+
V1 → V2:
|
|
8
|
+
- coordinate [{x, y, id}, ...] → data [[x, y], ...]
|
|
9
|
+
- classification.class → classification
|
|
10
|
+
- classification.{other} → attrs[{name, value}]
|
|
11
|
+
|
|
12
|
+
V2 → V1:
|
|
13
|
+
- data [[x, y], ...] → coordinate [{x, y, id}, ...]
|
|
14
|
+
- classification → classification.class
|
|
15
|
+
- attrs[{name, value}] → classification.{name: value} (excluding special attrs)
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
from typing import Any
|
|
19
|
+
|
|
20
|
+
from ..utils import generate_random_id
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class PolylineProcessor:
|
|
24
|
+
"""Polyline Tool Processor
|
|
25
|
+
|
|
26
|
+
V1 coordinate: [{x, y, id}, ...]
|
|
27
|
+
V2 data: [[x, y], ...]
|
|
28
|
+
|
|
29
|
+
Same data structure as polygon, only tool_name differs.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
tool_name = 'polyline'
|
|
33
|
+
|
|
34
|
+
# V1 meta fields (not stored in attrs)
|
|
35
|
+
_META_FIELDS = {'isLocked', 'isVisible', 'isValid', 'isDrawCompleted', 'label', 'id', 'tool'}
|
|
36
|
+
|
|
37
|
+
# Special attrs not restored to V1 classification (_ prefix)
|
|
38
|
+
_INTERNAL_ATTR_PREFIX = '_'
|
|
39
|
+
|
|
40
|
+
def to_v2(self, v1_annotation: dict[str, Any], v1_data: dict[str, Any]) -> dict[str, Any]:
|
|
41
|
+
"""Convert V1 polyline to V2
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
v1_annotation: V1 annotations[] item
|
|
45
|
+
v1_data: V1 annotationsData[] item (same ID)
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
V2 format polyline annotation
|
|
49
|
+
"""
|
|
50
|
+
coordinate = v1_data.get('coordinate', [])
|
|
51
|
+
classification_obj = v1_annotation.get('classification') or {}
|
|
52
|
+
|
|
53
|
+
# V2 data: [[x, y], ...] - extract only x, y from coordinate array
|
|
54
|
+
data = []
|
|
55
|
+
for point in coordinate:
|
|
56
|
+
if isinstance(point, dict):
|
|
57
|
+
data.append([point.get('x', 0), point.get('y', 0)])
|
|
58
|
+
|
|
59
|
+
# Build V2 attrs
|
|
60
|
+
attrs: list[dict[str, Any]] = []
|
|
61
|
+
|
|
62
|
+
# Add other classification properties to attrs (excluding class)
|
|
63
|
+
for key, value in classification_obj.items():
|
|
64
|
+
if key != 'class':
|
|
65
|
+
attrs.append({'name': key, 'value': value})
|
|
66
|
+
|
|
67
|
+
# Build V2 annotation
|
|
68
|
+
return {
|
|
69
|
+
'id': v1_annotation.get('id', ''),
|
|
70
|
+
'classification': classification_obj.get('class', ''),
|
|
71
|
+
'attrs': attrs,
|
|
72
|
+
'data': data,
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
def to_v1(self, v2_annotation: dict[str, Any]) -> tuple[dict[str, Any], dict[str, Any]]:
|
|
76
|
+
"""Convert V2 polyline to V1
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
v2_annotation: V2 annotation object
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
(V1 annotation, V1 annotationData) tuple
|
|
83
|
+
"""
|
|
84
|
+
annotation_id = v2_annotation.get('id', '')
|
|
85
|
+
classification_str = v2_annotation.get('classification', '')
|
|
86
|
+
attrs = v2_annotation.get('attrs', [])
|
|
87
|
+
data = v2_annotation.get('data', [])
|
|
88
|
+
|
|
89
|
+
# Build V1 coordinate: [[x, y], ...] → [{x, y, id}, ...]
|
|
90
|
+
coordinate: list[dict[str, Any]] = []
|
|
91
|
+
for point in data:
|
|
92
|
+
if isinstance(point, list) and len(point) >= 2:
|
|
93
|
+
coordinate.append({
|
|
94
|
+
'x': point[0],
|
|
95
|
+
'y': point[1],
|
|
96
|
+
'id': generate_random_id(),
|
|
97
|
+
})
|
|
98
|
+
|
|
99
|
+
# Build V1 classification
|
|
100
|
+
classification: dict[str, Any] = {'class': classification_str}
|
|
101
|
+
|
|
102
|
+
# Restore properties from attrs
|
|
103
|
+
for attr in attrs:
|
|
104
|
+
name = attr.get('name', '')
|
|
105
|
+
value = attr.get('value')
|
|
106
|
+
|
|
107
|
+
if not name.startswith(self._INTERNAL_ATTR_PREFIX):
|
|
108
|
+
# Add non-internal attrs to classification
|
|
109
|
+
classification[name] = value
|
|
110
|
+
|
|
111
|
+
# V1 annotation (meta info)
|
|
112
|
+
v1_annotation: dict[str, Any] = {
|
|
113
|
+
'id': annotation_id,
|
|
114
|
+
'tool': self.tool_name,
|
|
115
|
+
'classification': classification,
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
# V1 annotationData (coordinate info)
|
|
119
|
+
v1_data: dict[str, Any] = {
|
|
120
|
+
'id': annotation_id,
|
|
121
|
+
'coordinate': coordinate,
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
return v1_annotation, v1_data
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Prompt Tool Processor
|
|
3
|
+
|
|
4
|
+
Created: 2025-12-12
|
|
5
|
+
|
|
6
|
+
Conversion Rules (see data-model.md 9.10):
|
|
7
|
+
V1 → V2:
|
|
8
|
+
- input → data.input
|
|
9
|
+
- Other fields from annotationsData → preserved in data
|
|
10
|
+
- classification.class → classification
|
|
11
|
+
- classification.{other} → attrs[{name, value}]
|
|
12
|
+
|
|
13
|
+
V2 → V1:
|
|
14
|
+
- data.input → input
|
|
15
|
+
- Other fields from data → preserved in annotationsData
|
|
16
|
+
- classification → classification.class
|
|
17
|
+
- attrs[{name, value}] → classification.{name: value}
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
from typing import Any
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class PromptProcessor:
|
|
24
|
+
"""Prompt Tool Processor
|
|
25
|
+
|
|
26
|
+
V1 annotationsData: {id, tool, input: [{type, value, changeHistory}]}
|
|
27
|
+
V2 data: {input: [...], model?, displayName?, generatedBy?, timestamp?}
|
|
28
|
+
|
|
29
|
+
Prompt annotation data conversion.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
tool_name = 'prompt'
|
|
33
|
+
|
|
34
|
+
_META_FIELDS = {'isLocked', 'isVisible', 'isValid', 'isDrawCompleted', 'label', 'id', 'tool'}
|
|
35
|
+
_INTERNAL_ATTR_PREFIX = '_'
|
|
36
|
+
# Fields to copy from annotationsData to V2 data
|
|
37
|
+
_DATA_FIELDS = {'input', 'model', 'displayName', 'generatedBy', 'timestamp'}
|
|
38
|
+
|
|
39
|
+
def to_v2(self, v1_annotation: dict[str, Any], v1_data: dict[str, Any]) -> dict[str, Any]:
|
|
40
|
+
"""Convert V1 prompt to V2"""
|
|
41
|
+
classification_obj = v1_annotation.get('classification') or {}
|
|
42
|
+
|
|
43
|
+
# Build V2 data (input and other fields)
|
|
44
|
+
data: dict[str, Any] = {}
|
|
45
|
+
for key in self._DATA_FIELDS:
|
|
46
|
+
if key in v1_data:
|
|
47
|
+
data[key] = v1_data[key]
|
|
48
|
+
|
|
49
|
+
# Build V2 attrs (all classification properties excluding class)
|
|
50
|
+
attrs: list[dict[str, Any]] = []
|
|
51
|
+
for key, value in classification_obj.items():
|
|
52
|
+
if key != 'class':
|
|
53
|
+
attrs.append({'name': key, 'value': value})
|
|
54
|
+
|
|
55
|
+
return {
|
|
56
|
+
'id': v1_annotation.get('id', ''),
|
|
57
|
+
'classification': classification_obj.get('class', ''),
|
|
58
|
+
'attrs': attrs,
|
|
59
|
+
'data': data,
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
def to_v1(self, v2_annotation: dict[str, Any]) -> tuple[dict[str, Any], dict[str, Any]]:
|
|
63
|
+
"""Convert V2 prompt to V1"""
|
|
64
|
+
annotation_id = v2_annotation.get('id', '')
|
|
65
|
+
classification_str = v2_annotation.get('classification', '')
|
|
66
|
+
attrs = v2_annotation.get('attrs', [])
|
|
67
|
+
data = v2_annotation.get('data', {})
|
|
68
|
+
|
|
69
|
+
# Build V1 classification
|
|
70
|
+
classification: dict[str, Any] = {'class': classification_str}
|
|
71
|
+
for attr in attrs:
|
|
72
|
+
name = attr.get('name', '')
|
|
73
|
+
value = attr.get('value')
|
|
74
|
+
if not name.startswith(self._INTERNAL_ATTR_PREFIX):
|
|
75
|
+
classification[name] = value
|
|
76
|
+
|
|
77
|
+
v1_annotation: dict[str, Any] = {
|
|
78
|
+
'id': annotation_id,
|
|
79
|
+
'tool': self.tool_name,
|
|
80
|
+
'classification': classification,
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
# Build V1 annotationsData
|
|
84
|
+
v1_data: dict[str, Any] = {
|
|
85
|
+
'id': annotation_id,
|
|
86
|
+
'tool': self.tool_name,
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
# Copy fields from data
|
|
90
|
+
for key in self._DATA_FIELDS:
|
|
91
|
+
if key in data:
|
|
92
|
+
v1_data[key] = data[key]
|
|
93
|
+
|
|
94
|
+
return v1_annotation, v1_data
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Relation Tool Processor
|
|
3
|
+
|
|
4
|
+
Created: 2025-12-12
|
|
5
|
+
|
|
6
|
+
Conversion Rules (see data-model.md 9.9):
|
|
7
|
+
V1 → V2:
|
|
8
|
+
- annotationId, targetAnnotationId → data [from_id, to_id]
|
|
9
|
+
- classification.class → classification
|
|
10
|
+
- classification.{other} → attrs[{name, value}]
|
|
11
|
+
|
|
12
|
+
V2 → V1:
|
|
13
|
+
- data [from_id, to_id] → annotationId, targetAnnotationId
|
|
14
|
+
- classification → classification.class
|
|
15
|
+
- attrs[{name, value}] → classification.{name: value}
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
from typing import Any
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class RelationProcessor:
|
|
22
|
+
"""Relation Tool Processor
|
|
23
|
+
|
|
24
|
+
V1 annotationData: {annotationId, targetAnnotationId}
|
|
25
|
+
V2 data: [from_id, to_id]
|
|
26
|
+
|
|
27
|
+
Represents relationship between two annotations.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
tool_name = 'relation'
|
|
31
|
+
|
|
32
|
+
_META_FIELDS = {'isLocked', 'isVisible', 'isValid', 'isDrawCompleted', 'label', 'id', 'tool'}
|
|
33
|
+
_INTERNAL_ATTR_PREFIX = '_'
|
|
34
|
+
|
|
35
|
+
def to_v2(self, v1_annotation: dict[str, Any], v1_data: dict[str, Any]) -> dict[str, Any]:
|
|
36
|
+
"""Convert V1 relation to V2"""
|
|
37
|
+
classification_obj = v1_annotation.get('classification') or {}
|
|
38
|
+
|
|
39
|
+
# V2 data: [from_id, to_id]
|
|
40
|
+
data = [
|
|
41
|
+
v1_data.get('annotationId', ''),
|
|
42
|
+
v1_data.get('targetAnnotationId', ''),
|
|
43
|
+
]
|
|
44
|
+
|
|
45
|
+
# Build V2 attrs
|
|
46
|
+
attrs: list[dict[str, Any]] = []
|
|
47
|
+
for key, value in classification_obj.items():
|
|
48
|
+
if key != 'class':
|
|
49
|
+
attrs.append({'name': key, 'value': value})
|
|
50
|
+
|
|
51
|
+
return {
|
|
52
|
+
'id': v1_annotation.get('id', ''),
|
|
53
|
+
'classification': classification_obj.get('class', ''),
|
|
54
|
+
'attrs': attrs,
|
|
55
|
+
'data': data,
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
def to_v1(self, v2_annotation: dict[str, Any]) -> tuple[dict[str, Any], dict[str, Any]]:
|
|
59
|
+
"""Convert V2 relation to V1"""
|
|
60
|
+
annotation_id = v2_annotation.get('id', '')
|
|
61
|
+
classification_str = v2_annotation.get('classification', '')
|
|
62
|
+
attrs = v2_annotation.get('attrs', [])
|
|
63
|
+
data = v2_annotation.get('data', [])
|
|
64
|
+
|
|
65
|
+
# Build V1 classification
|
|
66
|
+
classification: dict[str, Any] = {'class': classification_str}
|
|
67
|
+
for attr in attrs:
|
|
68
|
+
name = attr.get('name', '')
|
|
69
|
+
value = attr.get('value')
|
|
70
|
+
if not name.startswith(self._INTERNAL_ATTR_PREFIX):
|
|
71
|
+
classification[name] = value
|
|
72
|
+
|
|
73
|
+
v1_annotation: dict[str, Any] = {
|
|
74
|
+
'id': annotation_id,
|
|
75
|
+
'tool': self.tool_name,
|
|
76
|
+
'classification': classification,
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
# V1 annotationData
|
|
80
|
+
v1_data: dict[str, Any] = {
|
|
81
|
+
'id': annotation_id,
|
|
82
|
+
'annotationId': data[0] if len(data) > 0 else '',
|
|
83
|
+
'targetAnnotationId': data[1] if len(data) > 1 else '',
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
return v1_annotation, v1_data
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Segmentation Tool Processor (Image/Video Unified)
|
|
3
|
+
|
|
4
|
+
Created: 2025-12-12
|
|
5
|
+
|
|
6
|
+
Conversion Rules (see data-model.md 9.3, 9.4):
|
|
7
|
+
|
|
8
|
+
Image Segmentation:
|
|
9
|
+
V1 → V2:
|
|
10
|
+
- pixel_indices [...] → data [...]
|
|
11
|
+
|
|
12
|
+
V2 → V1:
|
|
13
|
+
- data [...] → pixel_indices [...]
|
|
14
|
+
|
|
15
|
+
Video Segmentation:
|
|
16
|
+
V1 → V2:
|
|
17
|
+
- section {startFrame, endFrame} → data {startFrame, endFrame}
|
|
18
|
+
|
|
19
|
+
V2 → V1:
|
|
20
|
+
- data {startFrame, endFrame} → section {startFrame, endFrame}
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
from typing import Any
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class SegmentationProcessor:
|
|
27
|
+
"""Segmentation Tool Processor (Image/Video Unified)
|
|
28
|
+
|
|
29
|
+
Image Segmentation:
|
|
30
|
+
V1 pixel_indices: [int, ...]
|
|
31
|
+
V2 data: [int, ...]
|
|
32
|
+
|
|
33
|
+
Video Segmentation:
|
|
34
|
+
V1 section: {startFrame, endFrame}
|
|
35
|
+
V2 data: {startFrame, endFrame}
|
|
36
|
+
|
|
37
|
+
Differentiate image/video by data structure:
|
|
38
|
+
- list: Image segmentation (pixel_indices)
|
|
39
|
+
- dict: Video segmentation (section)
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
tool_name = 'segmentation'
|
|
43
|
+
|
|
44
|
+
# V1 meta fields (not stored in attrs)
|
|
45
|
+
_META_FIELDS = {'isLocked', 'isVisible', 'isValid', 'isDrawCompleted', 'label', 'id', 'tool'}
|
|
46
|
+
|
|
47
|
+
# Special attrs not restored to V1 classification (_ prefix)
|
|
48
|
+
_INTERNAL_ATTR_PREFIX = '_'
|
|
49
|
+
|
|
50
|
+
def to_v2(self, v1_annotation: dict[str, Any], v1_data: dict[str, Any]) -> dict[str, Any]:
|
|
51
|
+
"""Convert V1 segmentation to V2
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
v1_annotation: V1 annotations[] item
|
|
55
|
+
v1_data: V1 annotationsData[] item (same ID)
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
V2 format segmentation annotation
|
|
59
|
+
"""
|
|
60
|
+
classification_obj = v1_annotation.get('classification') or {}
|
|
61
|
+
|
|
62
|
+
# Process based on data type
|
|
63
|
+
if 'pixel_indices' in v1_data:
|
|
64
|
+
# Image segmentation
|
|
65
|
+
data = v1_data.get('pixel_indices', [])
|
|
66
|
+
elif 'section' in v1_data:
|
|
67
|
+
# Video segmentation
|
|
68
|
+
section = v1_data.get('section', {})
|
|
69
|
+
data = {
|
|
70
|
+
'startFrame': section.get('startFrame', 0),
|
|
71
|
+
'endFrame': section.get('endFrame', 0),
|
|
72
|
+
}
|
|
73
|
+
else:
|
|
74
|
+
# Default (empty array)
|
|
75
|
+
data = []
|
|
76
|
+
|
|
77
|
+
# Build V2 attrs
|
|
78
|
+
attrs: list[dict[str, Any]] = []
|
|
79
|
+
|
|
80
|
+
# Add other classification properties to attrs (excluding class)
|
|
81
|
+
for key, value in classification_obj.items():
|
|
82
|
+
if key != 'class':
|
|
83
|
+
attrs.append({'name': key, 'value': value})
|
|
84
|
+
|
|
85
|
+
# Build V2 annotation
|
|
86
|
+
return {
|
|
87
|
+
'id': v1_annotation.get('id', ''),
|
|
88
|
+
'classification': classification_obj.get('class', ''),
|
|
89
|
+
'attrs': attrs,
|
|
90
|
+
'data': data,
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
def to_v1(self, v2_annotation: dict[str, Any]) -> tuple[dict[str, Any], dict[str, Any]]:
|
|
94
|
+
"""Convert V2 segmentation to V1
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
v2_annotation: V2 annotation object
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
(V1 annotation, V1 annotationData) tuple
|
|
101
|
+
"""
|
|
102
|
+
annotation_id = v2_annotation.get('id', '')
|
|
103
|
+
classification_str = v2_annotation.get('classification', '')
|
|
104
|
+
attrs = v2_annotation.get('attrs', [])
|
|
105
|
+
data = v2_annotation.get('data', [])
|
|
106
|
+
|
|
107
|
+
# Build V1 classification
|
|
108
|
+
classification: dict[str, Any] = {'class': classification_str}
|
|
109
|
+
|
|
110
|
+
# Restore properties from attrs
|
|
111
|
+
for attr in attrs:
|
|
112
|
+
name = attr.get('name', '')
|
|
113
|
+
value = attr.get('value')
|
|
114
|
+
|
|
115
|
+
if not name.startswith(self._INTERNAL_ATTR_PREFIX):
|
|
116
|
+
classification[name] = value
|
|
117
|
+
|
|
118
|
+
# V1 annotation (meta info)
|
|
119
|
+
v1_annotation: dict[str, Any] = {
|
|
120
|
+
'id': annotation_id,
|
|
121
|
+
'tool': self.tool_name,
|
|
122
|
+
'classification': classification,
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
# V1 annotationData (coordinate info) - process based on data type
|
|
126
|
+
v1_data: dict[str, Any] = {'id': annotation_id}
|
|
127
|
+
|
|
128
|
+
if isinstance(data, list):
|
|
129
|
+
# Image segmentation
|
|
130
|
+
v1_data['pixel_indices'] = data
|
|
131
|
+
elif isinstance(data, dict):
|
|
132
|
+
# Video segmentation
|
|
133
|
+
v1_data['section'] = {
|
|
134
|
+
'startFrame': data.get('startFrame', 0),
|
|
135
|
+
'endFrame': data.get('endFrame', 0),
|
|
136
|
+
}
|
|
137
|
+
else:
|
|
138
|
+
# Default
|
|
139
|
+
v1_data['pixel_indices'] = []
|
|
140
|
+
|
|
141
|
+
return v1_annotation, v1_data
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
"""
|
|
2
|
+
3D Segmentation Tool Processor
|
|
3
|
+
|
|
4
|
+
Created: 2025-12-12
|
|
5
|
+
|
|
6
|
+
Conversion Rules (see data-model.md 9.7):
|
|
7
|
+
V1 → V2:
|
|
8
|
+
- points [...] → data {points: [...]}
|
|
9
|
+
- classification.class → classification
|
|
10
|
+
- classification.{other} → attrs[{name, value}]
|
|
11
|
+
|
|
12
|
+
V2 → V1:
|
|
13
|
+
- data {points: [...]} → points [...]
|
|
14
|
+
- classification → classification.class
|
|
15
|
+
- attrs[{name, value}] → classification.{name: value}
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
from typing import Any
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class Segmentation3DProcessor:
|
|
22
|
+
"""3D Segmentation Tool Processor
|
|
23
|
+
|
|
24
|
+
V1 annotationData: {points: list[int]}
|
|
25
|
+
V2 data: {points: list[int]}
|
|
26
|
+
|
|
27
|
+
Used with pcd media type.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
tool_name = '3d_segmentation'
|
|
31
|
+
|
|
32
|
+
_META_FIELDS = {'isLocked', 'isVisible', 'isValid', 'isDrawCompleted', 'label', 'id', 'tool'}
|
|
33
|
+
_INTERNAL_ATTR_PREFIX = '_'
|
|
34
|
+
|
|
35
|
+
def to_v2(self, v1_annotation: dict[str, Any], v1_data: dict[str, Any]) -> dict[str, Any]:
|
|
36
|
+
"""Convert V1 3D segmentation to V2"""
|
|
37
|
+
classification_obj = v1_annotation.get('classification') or {}
|
|
38
|
+
|
|
39
|
+
# V2 data: {points: [...]}
|
|
40
|
+
data = {
|
|
41
|
+
'points': v1_data.get('points', []),
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
# Build V2 attrs
|
|
45
|
+
attrs: list[dict[str, Any]] = []
|
|
46
|
+
for key, value in classification_obj.items():
|
|
47
|
+
if key != 'class':
|
|
48
|
+
attrs.append({'name': key, 'value': value})
|
|
49
|
+
|
|
50
|
+
return {
|
|
51
|
+
'id': v1_annotation.get('id', ''),
|
|
52
|
+
'classification': classification_obj.get('class', ''),
|
|
53
|
+
'attrs': attrs,
|
|
54
|
+
'data': data,
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
def to_v1(self, v2_annotation: dict[str, Any]) -> tuple[dict[str, Any], dict[str, Any]]:
|
|
58
|
+
"""Convert V2 3D segmentation to V1"""
|
|
59
|
+
annotation_id = v2_annotation.get('id', '')
|
|
60
|
+
classification_str = v2_annotation.get('classification', '')
|
|
61
|
+
attrs = v2_annotation.get('attrs', [])
|
|
62
|
+
data = v2_annotation.get('data', {})
|
|
63
|
+
|
|
64
|
+
# Build V1 classification
|
|
65
|
+
classification: dict[str, Any] = {'class': classification_str}
|
|
66
|
+
for attr in attrs:
|
|
67
|
+
name = attr.get('name', '')
|
|
68
|
+
value = attr.get('value')
|
|
69
|
+
if not name.startswith(self._INTERNAL_ATTR_PREFIX):
|
|
70
|
+
classification[name] = value
|
|
71
|
+
|
|
72
|
+
v1_annotation: dict[str, Any] = {
|
|
73
|
+
'id': annotation_id,
|
|
74
|
+
'tool': self.tool_name,
|
|
75
|
+
'classification': classification,
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
v1_data: dict[str, Any] = {
|
|
79
|
+
'id': annotation_id,
|
|
80
|
+
'points': data.get('points', []),
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
return v1_annotation, v1_data
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
"""
|
|
2
|
+
DM Schema V1/V2 Converter Type Definitions
|
|
3
|
+
|
|
4
|
+
Created: 2025-12-11
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Any, TypedDict
|
|
8
|
+
|
|
9
|
+
# =============================================================================
|
|
10
|
+
# V1 Type Definitions
|
|
11
|
+
# =============================================================================
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class BoundingBoxCoordinate(TypedDict, total=False):
|
|
15
|
+
"""V1 Bounding Box Coordinate"""
|
|
16
|
+
|
|
17
|
+
x: float
|
|
18
|
+
y: float
|
|
19
|
+
width: float
|
|
20
|
+
height: float
|
|
21
|
+
rotation: float # In radians, optional
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class PolygonPoint(TypedDict):
|
|
25
|
+
"""V1 Polygon Individual Point"""
|
|
26
|
+
|
|
27
|
+
x: float
|
|
28
|
+
y: float
|
|
29
|
+
id: str
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# V1 Polygon Coordinate Type
|
|
33
|
+
PolygonCoordinate = list[PolygonPoint]
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class AnnotationBase(TypedDict, total=False):
|
|
37
|
+
"""V1 Annotation Meta Information"""
|
|
38
|
+
|
|
39
|
+
id: str # 10-character random string
|
|
40
|
+
tool: str # Tool code (bounding_box, polygon, etc.)
|
|
41
|
+
isLocked: bool # Edit lock (default: False)
|
|
42
|
+
isVisible: bool # Display visibility (default: True)
|
|
43
|
+
isValid: bool # Validity (default: False)
|
|
44
|
+
isDrawCompleted: bool # Drawing completed
|
|
45
|
+
classification: dict[str, Any] | None # Classification info
|
|
46
|
+
label: list[str] # Label array
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class AnnotationDataItem(TypedDict, total=False):
|
|
50
|
+
"""V1 Annotation Coordinate Data"""
|
|
51
|
+
|
|
52
|
+
id: str # Matches AnnotationBase.id
|
|
53
|
+
coordinate: BoundingBoxCoordinate | PolygonCoordinate | Any
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class AnnotatorDataV1(TypedDict, total=False):
|
|
57
|
+
"""DM Schema V1 Top-Level Structure"""
|
|
58
|
+
|
|
59
|
+
extra: dict[str, Any] # Per-media metadata
|
|
60
|
+
annotations: dict[str, list[AnnotationBase]] # Annotation meta
|
|
61
|
+
annotationsData: dict[str, list[AnnotationDataItem]] # Coordinate data
|
|
62
|
+
relations: dict[str, list[Any]] # Relations
|
|
63
|
+
annotationGroups: dict[str, list[Any]] # Groups
|
|
64
|
+
assignmentId: int | str | None # Task identifier
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
# =============================================================================
|
|
68
|
+
# V2 Type Definitions
|
|
69
|
+
# =============================================================================
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class V2Attr(TypedDict):
|
|
73
|
+
"""V2 Attribute Object"""
|
|
74
|
+
|
|
75
|
+
name: str
|
|
76
|
+
value: Any
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class V2Annotation(TypedDict, total=False):
|
|
80
|
+
"""V2 Annotation Common Structure"""
|
|
81
|
+
|
|
82
|
+
id: str # Unique identifier (10 chars, alphanumeric)
|
|
83
|
+
classification: str # Class label
|
|
84
|
+
attrs: list[V2Attr] # Additional attributes array
|
|
85
|
+
data: Any # Tool-specific data (type varies)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class V2MediaItem(TypedDict, total=False):
|
|
89
|
+
"""V2 Media Item (annotation arrays by tool)"""
|
|
90
|
+
|
|
91
|
+
bounding_box: list[V2Annotation]
|
|
92
|
+
polygon: list[V2Annotation]
|
|
93
|
+
polyline: list[V2Annotation]
|
|
94
|
+
keypoint: list[V2Annotation]
|
|
95
|
+
# Other tools can be added as needed
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class V2AnnotationData(TypedDict, total=False):
|
|
99
|
+
"""V2 Common Annotation Structure (annotation_data)"""
|
|
100
|
+
|
|
101
|
+
classification: dict[str, list[str]] # Class labels by tool
|
|
102
|
+
images: list[V2MediaItem] # Image media
|
|
103
|
+
videos: list[V2MediaItem] # Video media
|
|
104
|
+
pcds: list[V2MediaItem] # PCD media
|
|
105
|
+
texts: list[V2MediaItem] # Text media
|
|
106
|
+
audios: list[V2MediaItem] # Audio media
|
|
107
|
+
prompts: list[V2MediaItem] # Prompt media
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class AnnotationMeta(TypedDict, total=False):
|
|
111
|
+
"""V1 Top-Level Structure Preserved (annotation_meta)
|
|
112
|
+
|
|
113
|
+
V1 top-level structure preserved during V1→V2 conversion.
|
|
114
|
+
Combined with annotation_data for complete V1 restoration during V2→V1 conversion.
|
|
115
|
+
"""
|
|
116
|
+
|
|
117
|
+
extra: dict[str, Any] # Per-media metadata
|
|
118
|
+
annotations: dict[str, list[AnnotationBase]] # Annotation meta
|
|
119
|
+
annotationsData: dict[str, list[AnnotationDataItem]] # Coordinate data
|
|
120
|
+
relations: dict[str, list[Any]] # Relations
|
|
121
|
+
annotationGroups: dict[str, list[Any]] # Groups
|
|
122
|
+
assignmentId: int | str | None # Task identifier
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
class V2ConversionResult(TypedDict):
|
|
126
|
+
"""V1→V2 Conversion Result (separated structure)
|
|
127
|
+
|
|
128
|
+
V1→V2 conversion result is separated into two parts:
|
|
129
|
+
- annotation_data: V2 common annotation structure (id, classification, attrs, data)
|
|
130
|
+
- annotation_meta: V1 top-level structure preserved
|
|
131
|
+
(extra, annotations, annotationsData, relations, annotationGroups, assignmentId)
|
|
132
|
+
|
|
133
|
+
V2→V1 conversion:
|
|
134
|
+
- If both parts exist, complete V1 restoration is possible
|
|
135
|
+
- If only annotation_data exists, convert to V1 using defaults
|
|
136
|
+
"""
|
|
137
|
+
|
|
138
|
+
annotation_data: V2AnnotationData
|
|
139
|
+
annotation_meta: AnnotationMeta
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
# =============================================================================
|
|
143
|
+
# Bounding Box V2 Data Types
|
|
144
|
+
# =============================================================================
|
|
145
|
+
|
|
146
|
+
# V2 Bounding Box data: [x, y, width, height]
|
|
147
|
+
BoundingBoxData = list[float]
|
|
148
|
+
|
|
149
|
+
# V2 Polygon data: [[x1, y1], [x2, y2], ...]
|
|
150
|
+
PolygonData = list[list[float]]
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
# =============================================================================
|
|
154
|
+
# Media Type Constants
|
|
155
|
+
# =============================================================================
|
|
156
|
+
|
|
157
|
+
SUPPORTED_FILE_TYPES = ('image', 'video', 'pcd', 'text', 'audio', 'prompt')
|
|
158
|
+
|
|
159
|
+
MEDIA_TYPE_MAP = {
|
|
160
|
+
'image': 'images',
|
|
161
|
+
'video': 'videos',
|
|
162
|
+
'pcd': 'pcds',
|
|
163
|
+
'text': 'texts',
|
|
164
|
+
'audio': 'audios',
|
|
165
|
+
'prompt': 'prompts',
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
MEDIA_TYPE_REVERSE_MAP = {v: k for k, v in MEDIA_TYPE_MAP.items()}
|