synapse-sdk 1.0.0a74__py3-none-any.whl → 1.0.0a75__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of synapse-sdk might be problematic. Click here for more details.

@@ -0,0 +1,415 @@
1
+ from . import BaseDMConverter
2
+
3
+
4
+ class DMV1ToV2Converter(BaseDMConverter):
5
+ """DM v1 to v2 format converter class."""
6
+
7
+ def __init__(self, old_dm_data={}):
8
+ """Initialize the converter.
9
+
10
+ Args:
11
+ old_dm_data (dict): DM v1 format data to be converted
12
+ """
13
+ super().__init__()
14
+ self.old_dm_data = old_dm_data
15
+ self.classification_info = {}
16
+ self.media_data = {}
17
+
18
+ def convert(self):
19
+ """Convert DM v1 data to v2 format.
20
+
21
+ Returns:
22
+ dict: Converted data in DM v2 format
23
+ """
24
+ # Reset state
25
+ old_dm_data = self.old_dm_data
26
+ self.classification_info = {}
27
+ self.media_data = {}
28
+
29
+ # Extract media IDs from annotations key
30
+ media_ids = list(old_dm_data.get('annotations', {}).keys())
31
+
32
+ for media_id in media_ids:
33
+ self._process_media_item(old_dm_data, media_id)
34
+
35
+ # Build final result (put classification at the front)
36
+ result = {'classification': self.classification_info}
37
+ result.update(self.media_data)
38
+
39
+ return result
40
+
41
+ def _process_media_item(self, old_dm_data, media_id):
42
+ """Process a single media item.
43
+
44
+ Args:
45
+ old_dm_data (dict): Original DM v1 data
46
+ media_id (str): ID of the media item to process
47
+ """
48
+ # Extract media type (e.g., "video_1" -> "videos", "image_2" -> "images")
49
+ media_type, media_type_plural = self._extract_media_type_info(media_id)
50
+
51
+ # Create list for this media type if it doesn't exist
52
+ if media_type_plural not in self.media_data:
53
+ self.media_data[media_type_plural] = []
54
+
55
+ # Create id -> class and tool mappings
56
+ id_to_class = {
57
+ annotation['id']: annotation['classification']['class']
58
+ for annotation in old_dm_data['annotations'][media_id]
59
+ }
60
+
61
+ id_to_tool = {annotation['id']: annotation['tool'] for annotation in old_dm_data['annotations'][media_id]}
62
+
63
+ # Create id -> full classification mapping (including additional attributes)
64
+ id_to_full_classification = {
65
+ annotation['id']: annotation['classification'] for annotation in old_dm_data['annotations'][media_id]
66
+ }
67
+
68
+ # Initialize current media item
69
+ media_item = {}
70
+
71
+ # Process data from annotationsData for this media
72
+ annotations_data = old_dm_data.get('annotationsData', {}).get(media_id, [])
73
+
74
+ # Group by annotation tool type
75
+ tools_data = {}
76
+
77
+ for item in annotations_data:
78
+ item_id = item.get('id', '')
79
+ # Get tool and classification info from annotations
80
+ tool_type = id_to_tool.get(item_id, '')
81
+ classification = id_to_class.get(item_id, '')
82
+
83
+ # Collect classification info (maintain existing ID)
84
+ if tool_type not in self.classification_info:
85
+ self.classification_info[tool_type] = []
86
+
87
+ # Add only non-duplicate classifications
88
+ if classification and classification not in self.classification_info[tool_type]:
89
+ self.classification_info[tool_type].append(classification)
90
+
91
+ # Process by each tool type
92
+ self._process_annotation_item(
93
+ item, item_id, tool_type, classification, id_to_full_classification, tools_data
94
+ )
95
+
96
+ # Add processed tool data to media item
97
+ for tool_type, tool_data in tools_data.items():
98
+ if tool_data: # Only add if data exists
99
+ media_item[tool_type] = tool_data
100
+
101
+ # Add media item to result (only if data exists)
102
+ if media_item:
103
+ self.media_data[media_type_plural].append(media_item)
104
+
105
+ def _process_annotation_item(self, item, item_id, tool_type, classification, id_to_full_classification, tools_data):
106
+ """Process a single annotation item based on its tool type.
107
+
108
+ Args:
109
+ item (dict): Annotation item data
110
+ item_id (str): ID of the annotation item
111
+ tool_type (str): Type of annotation tool
112
+ classification (str): Classification label
113
+ id_to_full_classification (dict): Mapping of ID to full classification data
114
+ tools_data (dict): Dictionary to store processed tool data
115
+ """
116
+ processor = self.tool_processors.get(tool_type)
117
+ if processor:
118
+ processor(item, item_id, classification, tools_data, id_to_full_classification)
119
+ else:
120
+ # Handle unknown tool_type
121
+ self._handle_unknown_tool(tool_type, item_id)
122
+
123
+ def _process_bounding_box(self, item, item_id, classification, tools_data, id_to_full_classification=None):
124
+ """Process bounding box annotation.
125
+
126
+ Args:
127
+ item (dict): Annotation item data
128
+ item_id (str): ID of the annotation item
129
+ classification (str): Classification label
130
+ tools_data (dict): Dictionary to store processed tool data
131
+ id_to_full_classification (dict, optional): Full classification mapping
132
+ """
133
+ if 'bounding_box' not in tools_data:
134
+ tools_data['bounding_box'] = []
135
+
136
+ # Process coordinate or coordinates
137
+ coord_data = None
138
+ if 'coordinate' in item and isinstance(item['coordinate'], dict):
139
+ # Single coordinate structure (dictionary)
140
+ coord_data = item['coordinate']
141
+ elif 'coordinates' in item:
142
+ # Multiple coordinates structure (video etc.)
143
+ coords_data = item['coordinates']
144
+ if coords_data:
145
+ # Use coordinate data from first key
146
+ first_key = list(coords_data.keys())[0]
147
+ coord_data = coords_data[first_key]
148
+
149
+ if coord_data and 'width' in coord_data and 'height' in coord_data:
150
+ data = [
151
+ coord_data['x'],
152
+ coord_data['y'],
153
+ coord_data['width'],
154
+ coord_data['height'],
155
+ ]
156
+
157
+ tools_data['bounding_box'].append({
158
+ 'id': item_id,
159
+ 'classification': classification,
160
+ 'attrs': [],
161
+ 'data': data,
162
+ })
163
+
164
+ def _process_named_entity(self, item, item_id, classification, tools_data, id_to_full_classification=None):
165
+ """Process named entity annotation.
166
+
167
+ Args:
168
+ item (dict): Annotation item data
169
+ item_id (str): ID of the annotation item
170
+ classification (str): Classification label
171
+ tools_data (dict): Dictionary to store processed tool data
172
+ id_to_full_classification (dict, optional): Full classification mapping
173
+ """
174
+ if 'named_entity' not in tools_data:
175
+ tools_data['named_entity'] = []
176
+
177
+ # Process named_entity ranges and content
178
+ entity_data = {}
179
+ if 'ranges' in item and isinstance(item['ranges'], list):
180
+ # Store ranges information
181
+ entity_data['ranges'] = item['ranges']
182
+
183
+ if 'content' in item:
184
+ # Store selected text content
185
+ entity_data['content'] = item['content']
186
+
187
+ tools_data['named_entity'].append({
188
+ 'id': item_id,
189
+ 'classification': classification,
190
+ 'attrs': [],
191
+ 'data': entity_data, # Format: {ranges: [...], content: "..."}
192
+ })
193
+
194
+ def _process_classification(self, item, item_id, classification, tools_data, id_to_full_classification):
195
+ """Process classification annotation.
196
+
197
+ Args:
198
+ item (dict): Annotation item data
199
+ item_id (str): ID of the annotation item
200
+ classification (str): Classification label
201
+ tools_data (dict): Dictionary to store processed tool data
202
+ id_to_full_classification (dict): Full classification mapping
203
+ """
204
+ if 'classification' not in tools_data:
205
+ tools_data['classification'] = []
206
+
207
+ # Get full classification info (including additional attributes)
208
+ full_classification = id_to_full_classification.get(item_id, {})
209
+
210
+ # Store additional attributes in attrs array
211
+ attrs = []
212
+ classification_data = {}
213
+
214
+ for key, value in full_classification.items():
215
+ if key != 'class': # class is already stored in classification field
216
+ if isinstance(value, list) and len(value) > 0:
217
+ # Array attributes like multiple
218
+ attrs.append({'name': key, 'value': value})
219
+ elif isinstance(value, str) and value.strip():
220
+ # String attributes like text, single_radio, single_dropdown
221
+ attrs.append({'name': key, 'value': value})
222
+
223
+ tools_data['classification'].append({
224
+ 'id': item_id,
225
+ 'classification': classification,
226
+ 'attrs': attrs,
227
+ 'data': classification_data, # Empty object for full text classification
228
+ })
229
+
230
+ def _process_polyline(self, item, item_id, classification, tools_data, id_to_full_classification=None):
231
+ """Process polyline annotation.
232
+
233
+ Args:
234
+ item (dict): Annotation item data
235
+ item_id (str): ID of the annotation item
236
+ classification (str): Classification label
237
+ tools_data (dict): Dictionary to store processed tool data
238
+ id_to_full_classification (dict, optional): Full classification mapping
239
+ """
240
+ if 'polyline' not in tools_data:
241
+ tools_data['polyline'] = []
242
+
243
+ # Process polyline coordinates
244
+ polyline_data = []
245
+ if 'coordinate' in item and isinstance(item['coordinate'], list):
246
+ # Convert each coordinate point to [x, y] format
247
+ for point in item['coordinate']:
248
+ if 'x' in point and 'y' in point:
249
+ polyline_data.extend([point['x'], point['y']])
250
+
251
+ tools_data['polyline'].append({
252
+ 'id': item_id,
253
+ 'classification': classification,
254
+ 'attrs': [],
255
+ 'data': polyline_data, # Format: [x1, y1, x2, y2, x3, y3, ...]
256
+ })
257
+
258
+ def _process_keypoint(self, item, item_id, classification, tools_data, id_to_full_classification=None):
259
+ """Process keypoint annotation.
260
+
261
+ Args:
262
+ item (dict): Annotation item data
263
+ item_id (str): ID of the annotation item
264
+ classification (str): Classification label
265
+ tools_data (dict): Dictionary to store processed tool data
266
+ id_to_full_classification (dict, optional): Full classification mapping
267
+ """
268
+ if 'keypoint' not in tools_data:
269
+ tools_data['keypoint'] = []
270
+
271
+ # Process keypoint coordinate (single point)
272
+ keypoint_data = []
273
+ if 'coordinate' in item and isinstance(item['coordinate'], dict):
274
+ coord = item['coordinate']
275
+ if 'x' in coord and 'y' in coord:
276
+ keypoint_data = [coord['x'], coord['y']]
277
+
278
+ tools_data['keypoint'].append({
279
+ 'id': item_id,
280
+ 'classification': classification,
281
+ 'attrs': [],
282
+ 'data': keypoint_data, # Format: [x, y]
283
+ })
284
+
285
+ def _process_3d_bounding_box(self, item, item_id, classification, tools_data, id_to_full_classification=None):
286
+ """Process 3D bounding box annotation.
287
+
288
+ Args:
289
+ item (dict): Annotation item data
290
+ item_id (str): ID of the annotation item
291
+ classification (str): Classification label
292
+ tools_data (dict): Dictionary to store processed tool data
293
+ id_to_full_classification (dict, optional): Full classification mapping
294
+ """
295
+ if '3d_bounding_box' not in tools_data:
296
+ tools_data['3d_bounding_box'] = []
297
+
298
+ # Process 3d_bounding_box psr (position, scale, rotation)
299
+ psr_data = {}
300
+ if 'psr' in item and isinstance(item['psr'], dict):
301
+ psr_data = item['psr']
302
+
303
+ tools_data['3d_bounding_box'].append({
304
+ 'id': item_id,
305
+ 'classification': classification,
306
+ 'attrs': [],
307
+ 'data': psr_data, # Format: {position: {x,y,z}, scale: {x,y,z}, rotation: {x,y,z}}
308
+ })
309
+
310
+ def _process_segmentation(self, item, item_id, classification, tools_data, id_to_full_classification=None):
311
+ """Process segmentation annotation.
312
+
313
+ Args:
314
+ item (dict): Annotation item data
315
+ item_id (str): ID of the annotation item
316
+ classification (str): Classification label
317
+ tools_data (dict): Dictionary to store processed tool data
318
+ id_to_full_classification (dict, optional): Full classification mapping
319
+ """
320
+ if 'segmentation' not in tools_data:
321
+ tools_data['segmentation'] = []
322
+
323
+ # Process segmentation pixel_indices or section
324
+ segmentation_data = {}
325
+ if 'pixel_indices' in item and isinstance(item['pixel_indices'], list):
326
+ # Pixel-based segmentation (images)
327
+ segmentation_data = item['pixel_indices']
328
+ elif 'section' in item and isinstance(item['section'], dict):
329
+ # Frame section-based segmentation (videos)
330
+ segmentation_data = item['section']
331
+
332
+ tools_data['segmentation'].append({
333
+ 'id': item_id,
334
+ 'classification': classification,
335
+ 'attrs': [],
336
+ 'data': segmentation_data, # Format: [pixel_indices...] or {startFrame: x, endFrame: y}
337
+ })
338
+
339
+ def _process_polygon(self, item, item_id, classification, tools_data, id_to_full_classification=None):
340
+ """Process polygon annotation.
341
+
342
+ Args:
343
+ item (dict): Annotation item data
344
+ item_id (str): ID of the annotation item
345
+ classification (str): Classification label
346
+ tools_data (dict): Dictionary to store processed tool data
347
+ id_to_full_classification (dict, optional): Full classification mapping
348
+ """
349
+ if 'polygon' not in tools_data:
350
+ tools_data['polygon'] = []
351
+
352
+ # Process polygon coordinates
353
+ polygon_data = []
354
+ if 'coordinate' in item and isinstance(item['coordinate'], list):
355
+ # Convert each coordinate point to [x, y] format
356
+ for point in item['coordinate']:
357
+ if 'x' in point and 'y' in point:
358
+ polygon_data.extend([point['x'], point['y']])
359
+
360
+ tools_data['polygon'].append({
361
+ 'id': item_id,
362
+ 'classification': classification,
363
+ 'attrs': [],
364
+ 'data': polygon_data, # Format: [x1, y1, x2, y2, x3, y3, ...]
365
+ })
366
+
367
+ def _process_relation(self, item, item_id, classification, tools_data, id_to_full_classification=None):
368
+ """Process relation annotation.
369
+
370
+ Args:
371
+ item (dict): Annotation item data
372
+ item_id (str): ID of the annotation item
373
+ classification (str): Classification label
374
+ tools_data (dict): Dictionary to store processed tool data
375
+ id_to_full_classification (dict, optional): Full classification mapping
376
+ """
377
+ if 'relation' not in tools_data:
378
+ tools_data['relation'] = []
379
+
380
+ # Process relation data (needs adjustment based on actual relation data structure)
381
+ relation_data = []
382
+ if 'data' in item:
383
+ relation_data = item['data']
384
+
385
+ tools_data['relation'].append({
386
+ 'id': item_id,
387
+ 'classification': classification,
388
+ 'attrs': [],
389
+ 'data': relation_data, # Format: ['from_id', 'to_id']
390
+ })
391
+
392
+ def _process_group(self, item, item_id, classification, tools_data, id_to_full_classification=None):
393
+ """Process group annotation.
394
+
395
+ Args:
396
+ item (dict): Annotation item data
397
+ item_id (str): ID of the annotation item
398
+ classification (str): Classification label
399
+ tools_data (dict): Dictionary to store processed tool data
400
+ id_to_full_classification (dict, optional): Full classification mapping
401
+ """
402
+ if 'group' not in tools_data:
403
+ tools_data['group'] = []
404
+
405
+ # Process group data (needs adjustment based on actual group data structure)
406
+ group_data = []
407
+ if 'data' in item:
408
+ group_data = item['data']
409
+
410
+ tools_data['group'].append({
411
+ 'id': item_id,
412
+ 'classification': classification,
413
+ 'attrs': [],
414
+ 'data': group_data, # Format: ['id1', 'id2', 'id3', ...]
415
+ })
@@ -0,0 +1,254 @@
1
+ import random
2
+ import string
3
+
4
+ from . import BaseDMConverter
5
+
6
+
7
+ class DMV2ToV1Converter(BaseDMConverter):
8
+ """DM v2 to v1 format converter class."""
9
+
10
+ def __init__(self, new_dm_data={}):
11
+ """Initialize the converter.
12
+
13
+ Args:
14
+ new_dm_data (dict): DM v2 format data to be converted
15
+ """
16
+ super().__init__()
17
+ self.new_dm_data = new_dm_data
18
+ self.annotations = {}
19
+ self.annotations_data = {}
20
+ self.extra = {}
21
+ self.relations = {}
22
+ self.annotation_groups = {}
23
+
24
+ def convert(self):
25
+ """Convert DM v2 data to v1 format.
26
+
27
+ Returns:
28
+ dict: Converted data in DM v1 format
29
+ """
30
+ # Reset state
31
+ new_dm_data = self.new_dm_data
32
+ self.annotations = {}
33
+ self.annotations_data = {}
34
+ self.extra = {}
35
+ self.relations = {}
36
+ self.annotation_groups = {}
37
+
38
+ # Process each media type (images, videos, etc.)
39
+ for media_type_plural, media_items in new_dm_data.items():
40
+ if media_type_plural == 'classification':
41
+ continue
42
+
43
+ media_type = self._singularize_media_type(media_type_plural)
44
+
45
+ for index, media_item in enumerate(media_items, 1):
46
+ media_id = f'{media_type}_{index}'
47
+
48
+ # Initialize structures for this media
49
+ self.annotations[media_id] = []
50
+ self.annotations_data[media_id] = []
51
+ self.extra[media_id] = {}
52
+ self.relations[media_id] = []
53
+ self.annotation_groups[media_id] = []
54
+
55
+ # Process each tool type in the media item
56
+ for tool_type, tool_data in media_item.items():
57
+ self._process_tool_data(media_id, tool_type, tool_data)
58
+
59
+ # Build final result
60
+ result = {
61
+ 'extra': self.extra,
62
+ 'relations': self.relations,
63
+ 'annotations': self.annotations,
64
+ 'annotationsData': self.annotations_data,
65
+ 'annotationGroups': self.annotation_groups,
66
+ }
67
+
68
+ return result
69
+
70
+ def _process_tool_data(self, media_id, tool_type, tool_data):
71
+ """Process tool data for a specific media item.
72
+
73
+ Args:
74
+ media_id (str): ID of the media item
75
+ tool_type (str): Type of annotation tool
76
+ tool_data (list): List of annotation data for this tool
77
+ """
78
+ for annotation in tool_data:
79
+ annotation_id = annotation['id']
80
+ classification = annotation['classification']
81
+ attrs = annotation.get('attrs', [])
82
+ data = annotation.get('data', {})
83
+
84
+ # Create annotation entry
85
+ annotation_entry = {
86
+ 'id': annotation_id,
87
+ 'tool': tool_type,
88
+ 'isLocked': False,
89
+ 'isVisible': True,
90
+ 'classification': {'class': classification},
91
+ }
92
+
93
+ # Add additional classification attributes from attrs
94
+ for attr in attrs:
95
+ attr_name = attr.get('name')
96
+ attr_value = attr.get('value')
97
+ if attr_name and attr_value is not None:
98
+ annotation_entry['classification'][attr_name] = attr_value
99
+
100
+ # Add special attributes for specific tools
101
+ if tool_type == 'keypoint':
102
+ annotation_entry['shape'] = 'circle'
103
+
104
+ self.annotations[media_id].append(annotation_entry)
105
+
106
+ # Create annotations data entry using tool processor
107
+ processor = self.tool_processors.get(tool_type)
108
+ if processor:
109
+ processor(annotation_id, data, self.annotations_data[media_id])
110
+ else:
111
+ self._handle_unknown_tool(tool_type, annotation_id)
112
+
113
+ def _process_bounding_box(self, annotation_id, data, annotations_data):
114
+ """Process bounding box annotation data.
115
+
116
+ Args:
117
+ annotation_id (str): ID of the annotation
118
+ data (list): Bounding box data [x1, y1, x2, y2]
119
+ annotations_data (list): List to append the processed data
120
+ """
121
+ if len(data) >= 4:
122
+ x1, y1, width, height = data[:4]
123
+ coordinate = {'x': x1, 'y': y1, 'width': width, 'height': height}
124
+
125
+ annotations_data.append({'id': annotation_id, 'coordinate': coordinate})
126
+
127
+ def _process_named_entity(self, annotation_id, data, annotations_data):
128
+ """Process named entity annotation data.
129
+
130
+ Args:
131
+ annotation_id (str): ID of the annotation
132
+ data (dict): Named entity data with ranges and content
133
+ annotations_data (list): List to append the processed data
134
+ """
135
+ entity_data = {'id': annotation_id}
136
+
137
+ if 'ranges' in data:
138
+ entity_data['ranges'] = data['ranges']
139
+
140
+ if 'content' in data:
141
+ entity_data['content'] = data['content']
142
+
143
+ annotations_data.append(entity_data)
144
+
145
+ def _process_classification(self, annotation_id, data, annotations_data):
146
+ """Process classification annotation data.
147
+
148
+ Args:
149
+ annotation_id (str): ID of the annotation
150
+ data (dict): Classification data (usually empty)
151
+ annotations_data (list): List to append the processed data
152
+ """
153
+ # Classification data is typically empty in v2, so we just add the ID
154
+ annotations_data.append({'id': annotation_id})
155
+
156
+ def _process_polyline(self, annotation_id, data, annotations_data):
157
+ """Process polyline annotation data.
158
+
159
+ Args:
160
+ annotation_id (str): ID of the annotation
161
+ data (list): Polyline data [x1, y1, x2, y2, ...]
162
+ annotations_data (list): List to append the processed data
163
+ """
164
+ # Convert flat array to coordinate objects
165
+ coordinates = []
166
+ for i in range(0, len(data), 2):
167
+ if i + 1 < len(data):
168
+ coordinates.append({'x': data[i], 'y': data[i + 1], 'id': self._generate_random_id()})
169
+
170
+ annotations_data.append({'id': annotation_id, 'coordinate': coordinates})
171
+
172
+ def _process_keypoint(self, annotation_id, data, annotations_data):
173
+ """Process keypoint annotation data.
174
+
175
+ Args:
176
+ annotation_id (str): ID of the annotation
177
+ data (list): Keypoint data [x, y]
178
+ annotations_data (list): List to append the processed data
179
+ """
180
+ if len(data) >= 2:
181
+ coordinate = {'x': data[0], 'y': data[1]}
182
+
183
+ annotations_data.append({'id': annotation_id, 'coordinate': coordinate})
184
+
185
+ def _process_3d_bounding_box(self, annotation_id, data, annotations_data):
186
+ """Process 3D bounding box annotation data.
187
+
188
+ Args:
189
+ annotation_id (str): ID of the annotation
190
+ data (dict): 3D bounding box PSR data
191
+ annotations_data (list): List to append the processed data
192
+ """
193
+ annotations_data.append({'id': annotation_id, 'psr': data})
194
+
195
+ def _process_segmentation(self, annotation_id, data, annotations_data):
196
+ """Process segmentation annotation data.
197
+
198
+ Args:
199
+ annotation_id (str): ID of the annotation
200
+ data (list or dict): Segmentation data (pixel_indices or section)
201
+ annotations_data (list): List to append the processed data
202
+ """
203
+ annotation_data = {'id': annotation_id}
204
+
205
+ if isinstance(data, list):
206
+ # Pixel-based segmentation
207
+ annotation_data['pixel_indices'] = data
208
+ elif isinstance(data, dict):
209
+ # Section-based segmentation (video)
210
+ annotation_data['section'] = data
211
+
212
+ annotations_data.append(annotation_data)
213
+
214
+ def _process_polygon(self, annotation_id, data, annotations_data):
215
+ """Process polygon annotation data.
216
+
217
+ Args:
218
+ annotation_id (str): ID of the annotation
219
+ data (list): Polygon data [x1, y1, x2, y2, ...]
220
+ annotations_data (list): List to append the processed data
221
+ """
222
+ # Convert flat array to coordinate objects
223
+ coordinates = []
224
+ for i in range(0, len(data), 2):
225
+ if i + 1 < len(data):
226
+ coordinates.append({'x': data[i], 'y': data[i + 1], 'id': self._generate_random_id()})
227
+
228
+ annotations_data.append({'id': annotation_id, 'coordinate': coordinates})
229
+
230
+ def _process_relation(self, annotation_id, data, annotations_data):
231
+ """Process relation annotation data.
232
+
233
+ Args:
234
+ annotation_id (str): ID of the annotation
235
+ data (list): Relation data
236
+ annotations_data (list): List to append the processed data
237
+ """
238
+ annotations_data.append({'id': annotation_id, 'data': data})
239
+
240
+ def _process_group(self, annotation_id, data, annotations_data):
241
+ """Process group annotation data.
242
+
243
+ Args:
244
+ annotation_id (str): ID of the annotation
245
+ data (list): Group data
246
+ annotations_data (list): List to append the processed data
247
+ """
248
+ annotations_data.append({'id': annotation_id, 'data': data})
249
+
250
+ def _generate_random_id(self):
251
+ """Generate a random ID similar to the original format."""
252
+ # Generate 10-character random string with letters, numbers, and symbols
253
+ chars = string.ascii_letters + string.digits + '-_'
254
+ return ''.join(random.choices(chars, k=10))
File without changes