anatools 6.0.1__py3-none-any.whl → 6.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- anatools/__init__.py +1 -1
- anatools/anaclient/api/graphs.py +8 -1
- anatools/anaclient/api/preview.py +24 -3
- anatools/anaclient/api/services.py +12 -4
- anatools/anaclient/api/workspaces.py +7 -1
- anatools/anaclient/preview.py +26 -4
- anatools/anaclient/volumes.py +20 -13
- {anatools-6.0.1.data → anatools-6.0.3.data}/scripts/anadeploy +1 -1
- {anatools-6.0.1.data → anatools-6.0.3.data}/scripts/anarules +6 -4
- {anatools-6.0.1.data → anatools-6.0.3.data}/scripts/renderedai +462 -68
- {anatools-6.0.1.dist-info → anatools-6.0.3.dist-info}/METADATA +2 -2
- {anatools-6.0.1.dist-info → anatools-6.0.3.dist-info}/RECORD +22 -22
- {anatools-6.0.1.data → anatools-6.0.3.data}/scripts/ana +0 -0
- {anatools-6.0.1.data → anatools-6.0.3.data}/scripts/anamount +0 -0
- {anatools-6.0.1.data → anatools-6.0.3.data}/scripts/anaprofile +0 -0
- {anatools-6.0.1.data → anatools-6.0.3.data}/scripts/anaserver +0 -0
- {anatools-6.0.1.data → anatools-6.0.3.data}/scripts/anatransfer +0 -0
- {anatools-6.0.1.data → anatools-6.0.3.data}/scripts/anautils +0 -0
- {anatools-6.0.1.dist-info → anatools-6.0.3.dist-info}/WHEEL +0 -0
- {anatools-6.0.1.dist-info → anatools-6.0.3.dist-info}/entry_points.txt +0 -0
- {anatools-6.0.1.dist-info → anatools-6.0.3.dist-info}/licenses/LICENSE +0 -0
- {anatools-6.0.1.dist-info → anatools-6.0.3.dist-info}/top_level.txt +0 -0
|
@@ -167,6 +167,7 @@ def cmd_workspaces_get(args):
|
|
|
167
167
|
workspaceId=args.workspaceid,
|
|
168
168
|
organizationId=args.orgid,
|
|
169
169
|
limit=args.limit,
|
|
170
|
+
cursor=args.cursor,
|
|
170
171
|
fields=parse_list_arg(args.fields) if args.fields else None
|
|
171
172
|
)
|
|
172
173
|
output_json(result)
|
|
@@ -225,6 +226,7 @@ def cmd_organizations_get(args):
|
|
|
225
226
|
result = client.get_organizations(
|
|
226
227
|
organizationId=args.orgid,
|
|
227
228
|
limit=args.limit,
|
|
229
|
+
cursor=args.cursor,
|
|
228
230
|
fields=parse_list_arg(args.fields) if args.fields else None
|
|
229
231
|
)
|
|
230
232
|
output_json(result)
|
|
@@ -242,6 +244,7 @@ def cmd_members_get(args):
|
|
|
242
244
|
result = client.get_organization_members(
|
|
243
245
|
organizationId=org_id,
|
|
244
246
|
limit=args.limit,
|
|
247
|
+
cursor=args.cursor,
|
|
245
248
|
fields=parse_list_arg(args.fields) if args.fields else None
|
|
246
249
|
)
|
|
247
250
|
output_json(result)
|
|
@@ -260,6 +263,7 @@ def cmd_datasets_get(args):
|
|
|
260
263
|
workspaceId=workspace_id,
|
|
261
264
|
datasetId=args.datasetid,
|
|
262
265
|
limit=args.limit,
|
|
266
|
+
cursor=args.cursor,
|
|
263
267
|
fields=parse_list_arg(args.fields) if args.fields else None
|
|
264
268
|
)
|
|
265
269
|
output_json(result)
|
|
@@ -329,6 +333,8 @@ def cmd_datasets_cancel(args):
|
|
|
329
333
|
|
|
330
334
|
def cmd_datasets_download(args):
|
|
331
335
|
"""Download a dataset or a single file from a dataset."""
|
|
336
|
+
import zipfile
|
|
337
|
+
|
|
332
338
|
client = get_client()
|
|
333
339
|
workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
|
|
334
340
|
dataset_id = require_arg(args, 'datasetid', 'Dataset ID')
|
|
@@ -347,7 +353,15 @@ def cmd_datasets_download(args):
|
|
|
347
353
|
datasetId=dataset_id,
|
|
348
354
|
localDir=args.outputdir
|
|
349
355
|
)
|
|
350
|
-
|
|
356
|
+
|
|
357
|
+
if args.extract and result and result.endswith('.zip') and os.path.isfile(result):
|
|
358
|
+
extract_dir = os.path.splitext(result)[0]
|
|
359
|
+
with zipfile.ZipFile(result, 'r') as zf:
|
|
360
|
+
zf.extractall(extract_dir)
|
|
361
|
+
os.remove(result)
|
|
362
|
+
output_json({"downloadPath": extract_dir, "extracted": True})
|
|
363
|
+
else:
|
|
364
|
+
output_json({"downloadPath": result})
|
|
351
365
|
|
|
352
366
|
|
|
353
367
|
def cmd_datasets_upload(args):
|
|
@@ -406,7 +420,8 @@ def cmd_datasets_files(args):
|
|
|
406
420
|
workspaceId=workspace_id,
|
|
407
421
|
datasetId=dataset_id,
|
|
408
422
|
path=args.path,
|
|
409
|
-
limit=args.limit
|
|
423
|
+
limit=args.limit,
|
|
424
|
+
cursor=args.cursor
|
|
410
425
|
)
|
|
411
426
|
output_json(result)
|
|
412
427
|
|
|
@@ -420,6 +435,7 @@ def cmd_datasets_jobs(args):
|
|
|
420
435
|
organizationId=args.orgid,
|
|
421
436
|
datasetId=args.datasetid,
|
|
422
437
|
limit=args.limit,
|
|
438
|
+
cursor=args.cursor,
|
|
423
439
|
fields=parse_list_arg(args.fields) if args.fields else None
|
|
424
440
|
)
|
|
425
441
|
output_json(result)
|
|
@@ -456,6 +472,7 @@ def cmd_volumes_get(args):
|
|
|
456
472
|
workspaceId=args.workspaceid,
|
|
457
473
|
organizationId=args.orgid,
|
|
458
474
|
limit=args.limit,
|
|
475
|
+
cursor=args.cursor,
|
|
459
476
|
fields=parse_list_arg(args.fields) if args.fields else None
|
|
460
477
|
)
|
|
461
478
|
output_json(result)
|
|
@@ -514,7 +531,8 @@ def cmd_volume_data_get(args):
|
|
|
514
531
|
dir=args.dir,
|
|
515
532
|
files=parse_list_arg(args.files) if args.files else None,
|
|
516
533
|
recursive=args.recursive,
|
|
517
|
-
limit=args.limit
|
|
534
|
+
limit=args.limit,
|
|
535
|
+
cursor=args.cursor
|
|
518
536
|
)
|
|
519
537
|
output_json(result)
|
|
520
538
|
|
|
@@ -573,7 +591,8 @@ def cmd_volume_data_search(args):
|
|
|
573
591
|
keywords=parse_list_arg(args.keywords) if args.keywords else None,
|
|
574
592
|
fileformats=parse_list_arg(args.formats) if args.formats else None,
|
|
575
593
|
filetypes=parse_list_arg(args.types) if args.types else None,
|
|
576
|
-
limit=args.limit
|
|
594
|
+
limit=args.limit,
|
|
595
|
+
cursor=args.cursor
|
|
577
596
|
)
|
|
578
597
|
output_json(result)
|
|
579
598
|
|
|
@@ -986,6 +1005,7 @@ def cmd_graphs_get(args):
|
|
|
986
1005
|
graphId=args.graphid,
|
|
987
1006
|
staged=args.staged,
|
|
988
1007
|
limit=args.limit,
|
|
1008
|
+
cursor=args.cursor,
|
|
989
1009
|
fields=parse_list_arg(args.fields) if args.fields else None
|
|
990
1010
|
)
|
|
991
1011
|
output_json(result)
|
|
@@ -1085,46 +1105,271 @@ def cmd_graphs_stage(args):
|
|
|
1085
1105
|
output_json({"graphId": result})
|
|
1086
1106
|
|
|
1087
1107
|
|
|
1108
|
+
# =============================================================================
|
|
1109
|
+
# DATASET-VIEWER
|
|
1110
|
+
# =============================================================================
|
|
1111
|
+
|
|
1112
|
+
DATASET_VIEWER_TRIGGER_PATH = os.path.join(os.path.expanduser('~'), '.theia', 'dataset-viewer-open')
|
|
1113
|
+
DATASET_VIEWER_STATUS_PATH = os.path.join(os.path.expanduser('~'), '.theia', 'dataset-viewer-status.json')
|
|
1114
|
+
|
|
1115
|
+
|
|
1116
|
+
def _write_dataset_viewer_trigger(action: str, payload: dict):
|
|
1117
|
+
"""Write a trigger file for the dataset viewer extension to pick up."""
|
|
1118
|
+
trigger_data = {"action": action, **payload}
|
|
1119
|
+
trigger_dir = os.path.dirname(DATASET_VIEWER_TRIGGER_PATH)
|
|
1120
|
+
os.makedirs(trigger_dir, exist_ok=True)
|
|
1121
|
+
with open(DATASET_VIEWER_TRIGGER_PATH, 'w') as f:
|
|
1122
|
+
json.dump(trigger_data, f, indent=2)
|
|
1123
|
+
return trigger_data
|
|
1124
|
+
|
|
1125
|
+
|
|
1126
|
+
def cmd_dataset_viewer_open(args):
|
|
1127
|
+
"""Open a dataset folder in the Annotation Viewer."""
|
|
1128
|
+
dataset_path = getattr(args, 'path', None)
|
|
1129
|
+
if not dataset_path:
|
|
1130
|
+
output_error("Dataset path is required. Use --path", "MISSING_PATH")
|
|
1131
|
+
sys.exit(1)
|
|
1132
|
+
|
|
1133
|
+
dataset_path = os.path.abspath(dataset_path)
|
|
1134
|
+
if not os.path.isdir(dataset_path):
|
|
1135
|
+
output_error(f"Directory not found: {dataset_path}", "PATH_NOT_FOUND")
|
|
1136
|
+
sys.exit(1)
|
|
1137
|
+
|
|
1138
|
+
images_dir = os.path.join(dataset_path, 'images')
|
|
1139
|
+
if not os.path.isdir(images_dir):
|
|
1140
|
+
output_error(f"No images/ directory found in {dataset_path}", "NO_IMAGES_DIR")
|
|
1141
|
+
sys.exit(1)
|
|
1142
|
+
|
|
1143
|
+
image_index = getattr(args, 'index', None) or 0
|
|
1144
|
+
|
|
1145
|
+
trigger_data = _write_dataset_viewer_trigger('open', {
|
|
1146
|
+
'datasetPath': dataset_path,
|
|
1147
|
+
'imageIndex': int(image_index),
|
|
1148
|
+
})
|
|
1149
|
+
|
|
1150
|
+
output_json({
|
|
1151
|
+
"status": "ok",
|
|
1152
|
+
"action": "open",
|
|
1153
|
+
"datasetPath": dataset_path,
|
|
1154
|
+
"imageIndex": int(image_index),
|
|
1155
|
+
"triggerPath": DATASET_VIEWER_TRIGGER_PATH,
|
|
1156
|
+
})
|
|
1157
|
+
|
|
1158
|
+
|
|
1159
|
+
def cmd_dataset_viewer_next(args):
|
|
1160
|
+
"""Navigate to the next image in the dataset viewer."""
|
|
1161
|
+
trigger_data = _write_dataset_viewer_trigger('next', {})
|
|
1162
|
+
output_json({"status": "ok", "action": "next", "triggerPath": DATASET_VIEWER_TRIGGER_PATH})
|
|
1163
|
+
|
|
1164
|
+
|
|
1165
|
+
def cmd_dataset_viewer_prev(args):
|
|
1166
|
+
"""Navigate to the previous image in the dataset viewer."""
|
|
1167
|
+
trigger_data = _write_dataset_viewer_trigger('prev', {})
|
|
1168
|
+
output_json({"status": "ok", "action": "prev", "triggerPath": DATASET_VIEWER_TRIGGER_PATH})
|
|
1169
|
+
|
|
1170
|
+
|
|
1171
|
+
def cmd_dataset_viewer_goto(args):
|
|
1172
|
+
"""Navigate to a specific image by index or name."""
|
|
1173
|
+
index = getattr(args, 'index', None)
|
|
1174
|
+
name = getattr(args, 'name', None)
|
|
1175
|
+
|
|
1176
|
+
if index is None and not name:
|
|
1177
|
+
output_error("Specify --index or --name", "MISSING_TARGET")
|
|
1178
|
+
sys.exit(1)
|
|
1179
|
+
|
|
1180
|
+
payload = {}
|
|
1181
|
+
if index is not None:
|
|
1182
|
+
payload['imageIndex'] = int(index)
|
|
1183
|
+
if name:
|
|
1184
|
+
payload['imageName'] = name
|
|
1185
|
+
|
|
1186
|
+
trigger_data = _write_dataset_viewer_trigger('goto', payload)
|
|
1187
|
+
output_json({
|
|
1188
|
+
"status": "ok",
|
|
1189
|
+
"action": "goto",
|
|
1190
|
+
**payload,
|
|
1191
|
+
"triggerPath": DATASET_VIEWER_TRIGGER_PATH,
|
|
1192
|
+
})
|
|
1193
|
+
|
|
1194
|
+
|
|
1195
|
+
def cmd_dataset_viewer_annotations(args):
|
|
1196
|
+
"""Set which annotation types are enabled in the viewer."""
|
|
1197
|
+
types_str = getattr(args, 'types', None)
|
|
1198
|
+
if not types_str:
|
|
1199
|
+
output_error("Annotation types required. Use --types (comma-separated: bbox,bbox3d,segmentation,centroid,mask)", "MISSING_TYPES")
|
|
1200
|
+
sys.exit(1)
|
|
1201
|
+
|
|
1202
|
+
valid_types = {'bbox', 'bbox3d', 'segmentation', 'centroid', 'mask'}
|
|
1203
|
+
types = [t.strip() for t in types_str.split(',')]
|
|
1204
|
+
invalid = [t for t in types if t not in valid_types]
|
|
1205
|
+
if invalid:
|
|
1206
|
+
output_error(f"Invalid annotation types: {', '.join(invalid)}. Valid: {', '.join(sorted(valid_types))}", "INVALID_TYPES")
|
|
1207
|
+
sys.exit(1)
|
|
1208
|
+
|
|
1209
|
+
trigger_data = _write_dataset_viewer_trigger('setAnnotations', {'annotations': types})
|
|
1210
|
+
output_json({
|
|
1211
|
+
"status": "ok",
|
|
1212
|
+
"action": "setAnnotations",
|
|
1213
|
+
"annotations": types,
|
|
1214
|
+
"triggerPath": DATASET_VIEWER_TRIGGER_PATH,
|
|
1215
|
+
})
|
|
1216
|
+
|
|
1217
|
+
|
|
1218
|
+
def cmd_dataset_viewer_filter(args):
|
|
1219
|
+
"""Set object type filter in the viewer."""
|
|
1220
|
+
types_str = getattr(args, 'types', None)
|
|
1221
|
+
|
|
1222
|
+
if types_str:
|
|
1223
|
+
types = [t.strip() for t in types_str.split(',')]
|
|
1224
|
+
else:
|
|
1225
|
+
# No types = show all (clear filter)
|
|
1226
|
+
types = []
|
|
1227
|
+
|
|
1228
|
+
trigger_data = _write_dataset_viewer_trigger('setFilter', {'objectTypes': types})
|
|
1229
|
+
output_json({
|
|
1230
|
+
"status": "ok",
|
|
1231
|
+
"action": "setFilter",
|
|
1232
|
+
"objectTypes": types,
|
|
1233
|
+
"triggerPath": DATASET_VIEWER_TRIGGER_PATH,
|
|
1234
|
+
})
|
|
1235
|
+
|
|
1236
|
+
|
|
1237
|
+
def cmd_dataset_viewer_status(args):
|
|
1238
|
+
"""Get the current status of the dataset annotation viewer."""
|
|
1239
|
+
if not os.path.exists(DATASET_VIEWER_STATUS_PATH):
|
|
1240
|
+
output_json({
|
|
1241
|
+
"status": "no_status_file",
|
|
1242
|
+
"message": "No dataset viewer status file found. The viewer may not have been opened yet.",
|
|
1243
|
+
"statusPath": DATASET_VIEWER_STATUS_PATH,
|
|
1244
|
+
"sessions": [],
|
|
1245
|
+
})
|
|
1246
|
+
return
|
|
1247
|
+
|
|
1248
|
+
try:
|
|
1249
|
+
with open(DATASET_VIEWER_STATUS_PATH, 'r') as f:
|
|
1250
|
+
status_data = json.load(f)
|
|
1251
|
+
except json.JSONDecodeError as e:
|
|
1252
|
+
output_error(f"Failed to parse status file: {e}", "PARSE_ERROR")
|
|
1253
|
+
return
|
|
1254
|
+
except IOError as e:
|
|
1255
|
+
output_error(f"Failed to read status file: {e}", "READ_ERROR")
|
|
1256
|
+
return
|
|
1257
|
+
|
|
1258
|
+
# Optionally filter by dataset path
|
|
1259
|
+
filter_path = getattr(args, 'path', None)
|
|
1260
|
+
if filter_path:
|
|
1261
|
+
filter_path = os.path.abspath(filter_path)
|
|
1262
|
+
sessions = status_data.get('sessions', {})
|
|
1263
|
+
if isinstance(sessions, dict):
|
|
1264
|
+
filtered = {k: v for k, v in sessions.items() if os.path.abspath(k) == filter_path}
|
|
1265
|
+
status_data['sessions'] = filtered
|
|
1266
|
+
|
|
1267
|
+
output_json({
|
|
1268
|
+
"status": "ok",
|
|
1269
|
+
"statusPath": DATASET_VIEWER_STATUS_PATH,
|
|
1270
|
+
**status_data,
|
|
1271
|
+
})
|
|
1272
|
+
|
|
1273
|
+
|
|
1088
1274
|
# =============================================================================
|
|
1089
1275
|
# GRAPH-EDITOR
|
|
1090
1276
|
# =============================================================================
|
|
1091
1277
|
|
|
1092
1278
|
def cmd_graph_editor_open(args):
|
|
1093
|
-
"""
|
|
1094
|
-
client = get_client()
|
|
1095
|
-
workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
|
|
1096
|
-
graph_id = require_arg(args, 'graphid', 'Graph ID')
|
|
1097
|
-
directory = args.outputdir or os.getcwd()
|
|
1279
|
+
"""Open a graph in the graph editor.
|
|
1098
1280
|
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1281
|
+
Supports two modes:
|
|
1282
|
+
1. Download from platform: --workspaceid and --graphid (downloads graph and schema)
|
|
1283
|
+
2. Local files: --graphfile and --schemafile (uses existing local files)
|
|
1284
|
+
"""
|
|
1285
|
+
# Get arguments
|
|
1286
|
+
workspace_id = getattr(args, 'workspaceid', None)
|
|
1287
|
+
graph_id = getattr(args, 'graphid', None)
|
|
1288
|
+
graph_file = getattr(args, 'graphfile', None)
|
|
1289
|
+
schema_file = getattr(args, 'schemafile', None)
|
|
1290
|
+
|
|
1291
|
+
# Determine mode and validate arguments
|
|
1292
|
+
has_platform_args = workspace_id or graph_id
|
|
1293
|
+
has_local_args = graph_file or schema_file
|
|
1294
|
+
|
|
1295
|
+
if has_platform_args and has_local_args:
|
|
1296
|
+
output_error(
|
|
1297
|
+
"Cannot use both platform arguments (--workspaceid/--graphid) and local file arguments (--graphfile/--schemafile)",
|
|
1298
|
+
"INVALID_ARGS"
|
|
1299
|
+
)
|
|
1103
1300
|
return
|
|
1104
1301
|
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1302
|
+
if not has_platform_args and not has_local_args:
|
|
1303
|
+
output_error(
|
|
1304
|
+
"Must provide either --workspaceid and --graphid (to download from platform) or --graphfile and --schemafile (to use local files)",
|
|
1305
|
+
"MISSING_ARGS"
|
|
1306
|
+
)
|
|
1109
1307
|
return
|
|
1110
1308
|
|
|
1111
|
-
#
|
|
1112
|
-
|
|
1309
|
+
# Mode 1: Local files
|
|
1310
|
+
if has_local_args:
|
|
1311
|
+
if not graph_file:
|
|
1312
|
+
output_error("--graphfile is required when using local files", "MISSING_GRAPHFILE")
|
|
1313
|
+
return
|
|
1314
|
+
if not schema_file:
|
|
1315
|
+
output_error("--schemafile is required when using local files", "MISSING_SCHEMAFILE")
|
|
1316
|
+
return
|
|
1317
|
+
|
|
1318
|
+
# Validate files exist
|
|
1319
|
+
if not os.path.exists(graph_file):
|
|
1320
|
+
output_error(f"Graph file not found: {graph_file}", "FILE_NOT_FOUND")
|
|
1321
|
+
return
|
|
1322
|
+
if not os.path.exists(schema_file):
|
|
1323
|
+
output_error(f"Schema file not found: {schema_file}", "FILE_NOT_FOUND")
|
|
1324
|
+
return
|
|
1113
1325
|
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1326
|
+
graph_path = graph_file
|
|
1327
|
+
schema_path = schema_file
|
|
1328
|
+
graph_id = None
|
|
1329
|
+
channel_id = None
|
|
1330
|
+
graph_name = os.path.basename(graph_file)
|
|
1118
1331
|
|
|
1119
|
-
# Download
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
|
|
1123
|
-
|
|
1332
|
+
# Mode 2: Download from platform
|
|
1333
|
+
else:
|
|
1334
|
+
if not workspace_id:
|
|
1335
|
+
output_error("--workspaceid is required when downloading from platform", "MISSING_WORKSPACEID")
|
|
1336
|
+
return
|
|
1337
|
+
if not graph_id:
|
|
1338
|
+
output_error("--graphid is required when downloading from platform", "MISSING_GRAPHID")
|
|
1339
|
+
return
|
|
1340
|
+
|
|
1341
|
+
client = get_client()
|
|
1342
|
+
directory = args.outputdir or os.getcwd()
|
|
1343
|
+
|
|
1344
|
+
# Get graph metadata to find channelId
|
|
1345
|
+
graphs = client.get_graphs(workspaceId=workspace_id, graphId=graph_id)
|
|
1346
|
+
if not graphs:
|
|
1347
|
+
output_error(f"Graph {graph_id} not found", "GRAPH_NOT_FOUND")
|
|
1348
|
+
return
|
|
1124
1349
|
|
|
1125
|
-
|
|
1126
|
-
|
|
1127
|
-
|
|
1350
|
+
graph_info = graphs[0]
|
|
1351
|
+
channel_id = graph_info.get('channelId')
|
|
1352
|
+
if not channel_id:
|
|
1353
|
+
output_error("Graph has no associated channel", "NO_CHANNEL")
|
|
1354
|
+
return
|
|
1355
|
+
|
|
1356
|
+
# Create output directory if needed
|
|
1357
|
+
os.makedirs(directory, exist_ok=True)
|
|
1358
|
+
|
|
1359
|
+
# Download graph
|
|
1360
|
+
graph_name = graph_info.get('name', graph_id).replace(' ', '_')
|
|
1361
|
+
graph_path = os.path.join(directory, f"{graph_name}.yaml")
|
|
1362
|
+
client.download_graph(workspaceId=workspace_id, graphId=graph_id, filepath=graph_path)
|
|
1363
|
+
|
|
1364
|
+
# Download channel schema
|
|
1365
|
+
schema = client.get_channel_nodes(channelId=channel_id)
|
|
1366
|
+
if not schema:
|
|
1367
|
+
output_error("Failed to fetch channel schema", "SCHEMA_ERROR")
|
|
1368
|
+
return
|
|
1369
|
+
|
|
1370
|
+
schema_path = os.path.join(directory, f"{channel_id}_schema.json")
|
|
1371
|
+
with open(schema_path, 'w') as f:
|
|
1372
|
+
json.dump(schema, f, indent=2)
|
|
1128
1373
|
|
|
1129
1374
|
# Write trigger file to open in graph editor
|
|
1130
1375
|
trigger_path = os.path.join(os.path.expanduser('~'), '.theia', 'graph-editor-open')
|
|
@@ -1139,14 +1384,25 @@ def cmd_graph_editor_open(args):
|
|
|
1139
1384
|
with open(trigger_path, 'w') as f:
|
|
1140
1385
|
json.dump(trigger_data, f)
|
|
1141
1386
|
|
|
1142
|
-
|
|
1387
|
+
# Build output
|
|
1388
|
+
result = {
|
|
1143
1389
|
"graphPath": os.path.abspath(graph_path),
|
|
1144
1390
|
"schemaPath": os.path.abspath(schema_path),
|
|
1145
|
-
"triggerPath": trigger_path
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
|
|
1391
|
+
"triggerPath": trigger_path
|
|
1392
|
+
}
|
|
1393
|
+
|
|
1394
|
+
# Add platform-specific fields if downloaded
|
|
1395
|
+
if graph_id:
|
|
1396
|
+
result["graphId"] = graph_id
|
|
1397
|
+
if channel_id:
|
|
1398
|
+
result["channelId"] = channel_id
|
|
1399
|
+
if has_local_args:
|
|
1400
|
+
result["mode"] = "local"
|
|
1401
|
+
else:
|
|
1402
|
+
result["mode"] = "platform"
|
|
1403
|
+
result["graphName"] = graph_info.get('name')
|
|
1404
|
+
|
|
1405
|
+
output_json(result)
|
|
1150
1406
|
|
|
1151
1407
|
|
|
1152
1408
|
def cmd_graph_editor_edit_node(args):
|
|
@@ -2003,6 +2259,7 @@ def cmd_channels_get(args):
|
|
|
2003
2259
|
organizationId=args.orgid,
|
|
2004
2260
|
channelId=args.channelid,
|
|
2005
2261
|
limit=args.limit,
|
|
2262
|
+
cursor=args.cursor,
|
|
2006
2263
|
fields=parse_list_arg(args.fields) if args.fields else None
|
|
2007
2264
|
)
|
|
2008
2265
|
output_json(result)
|
|
@@ -2135,6 +2392,7 @@ def cmd_services_get(args):
|
|
|
2135
2392
|
organizationId=args.orgid,
|
|
2136
2393
|
serviceId=args.serviceid,
|
|
2137
2394
|
limit=args.limit,
|
|
2395
|
+
cursor=args.cursor,
|
|
2138
2396
|
fields=parse_list_arg(args.fields) if args.fields else None
|
|
2139
2397
|
)
|
|
2140
2398
|
output_json(result)
|
|
@@ -2191,6 +2449,7 @@ def cmd_services_jobs(args):
|
|
|
2191
2449
|
workspaceId=workspace_id,
|
|
2192
2450
|
jobId=args.jobid,
|
|
2193
2451
|
limit=args.limit,
|
|
2452
|
+
cursor=args.cursor,
|
|
2194
2453
|
fields=parse_list_arg(args.fields) if args.fields else None
|
|
2195
2454
|
)
|
|
2196
2455
|
output_json(result)
|
|
@@ -2387,6 +2646,8 @@ def cmd_annotations_formats(args):
|
|
|
2387
2646
|
|
|
2388
2647
|
def cmd_annotations_download(args):
|
|
2389
2648
|
"""Download an annotation."""
|
|
2649
|
+
import zipfile
|
|
2650
|
+
|
|
2390
2651
|
client = get_client()
|
|
2391
2652
|
workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
|
|
2392
2653
|
annotation_id = require_arg(args, 'annotationid', 'Annotation ID')
|
|
@@ -2395,7 +2656,15 @@ def cmd_annotations_download(args):
|
|
|
2395
2656
|
workspaceId=workspace_id,
|
|
2396
2657
|
annotationId=annotation_id
|
|
2397
2658
|
)
|
|
2398
|
-
|
|
2659
|
+
|
|
2660
|
+
if args.extract and result and result.endswith('.zip') and os.path.isfile(result):
|
|
2661
|
+
extract_dir = os.path.splitext(result)[0]
|
|
2662
|
+
with zipfile.ZipFile(result, 'r') as zf:
|
|
2663
|
+
zf.extractall(extract_dir)
|
|
2664
|
+
os.remove(result)
|
|
2665
|
+
output_json({"downloadPath": extract_dir, "extracted": True})
|
|
2666
|
+
else:
|
|
2667
|
+
output_json({"downloadPath": result})
|
|
2399
2668
|
|
|
2400
2669
|
|
|
2401
2670
|
def cmd_annotations_edit(args):
|
|
@@ -2588,6 +2857,7 @@ def cmd_gan_datasets_get(args):
|
|
|
2588
2857
|
datasetId=args.datasetid,
|
|
2589
2858
|
gandatasetId=args.gandatasetid,
|
|
2590
2859
|
limit=args.limit,
|
|
2860
|
+
cursor=args.cursor,
|
|
2591
2861
|
fields=parse_list_arg(args.fields) if args.fields else None
|
|
2592
2862
|
)
|
|
2593
2863
|
output_json(result)
|
|
@@ -2633,6 +2903,7 @@ def cmd_gan_models_get(args):
|
|
|
2633
2903
|
workspaceId=args.workspaceid,
|
|
2634
2904
|
modelId=args.modelid,
|
|
2635
2905
|
limit=args.limit,
|
|
2906
|
+
cursor=args.cursor,
|
|
2636
2907
|
fields=parse_list_arg(args.fields) if args.fields else None
|
|
2637
2908
|
)
|
|
2638
2909
|
output_json(result)
|
|
@@ -2681,6 +2952,7 @@ def cmd_umap_get(args):
|
|
|
2681
2952
|
umapId=args.umapid,
|
|
2682
2953
|
datasetId=args.datasetid,
|
|
2683
2954
|
limit=args.limit,
|
|
2955
|
+
cursor=args.cursor,
|
|
2684
2956
|
fields=parse_list_arg(args.fields) if args.fields else None
|
|
2685
2957
|
)
|
|
2686
2958
|
output_json(result)
|
|
@@ -2732,6 +3004,7 @@ def cmd_servers_get(args):
|
|
|
2732
3004
|
workspaceId=args.workspaceid,
|
|
2733
3005
|
serverId=args.serverid,
|
|
2734
3006
|
limit=args.limit,
|
|
3007
|
+
cursor=args.cursor,
|
|
2735
3008
|
fields=parse_list_arg(args.fields) if args.fields else None
|
|
2736
3009
|
)
|
|
2737
3010
|
output_json(result)
|
|
@@ -2801,6 +3074,7 @@ def cmd_ml_models_get(args):
|
|
|
2801
3074
|
datasetId=args.datasetid,
|
|
2802
3075
|
modelId=args.modelid,
|
|
2803
3076
|
limit=args.limit,
|
|
3077
|
+
cursor=args.cursor,
|
|
2804
3078
|
fields=parse_list_arg(args.fields) if args.fields else None
|
|
2805
3079
|
)
|
|
2806
3080
|
output_json(result)
|
|
@@ -2852,6 +3126,7 @@ def cmd_ml_inferences_get(args):
|
|
|
2852
3126
|
datasetId=args.datasetid,
|
|
2853
3127
|
modelId=args.modelid,
|
|
2854
3128
|
limit=args.limit,
|
|
3129
|
+
cursor=args.cursor,
|
|
2855
3130
|
fields=parse_list_arg(args.fields) if args.fields else None
|
|
2856
3131
|
)
|
|
2857
3132
|
output_json(result)
|
|
@@ -2887,6 +3162,7 @@ def cmd_inpaint_get(args):
|
|
|
2887
3162
|
volumeId=volume_id,
|
|
2888
3163
|
inpaintId=args.inpaintid,
|
|
2889
3164
|
limit=args.limit,
|
|
3165
|
+
cursor=args.cursor,
|
|
2890
3166
|
fields=parse_list_arg(args.fields) if args.fields else None
|
|
2891
3167
|
)
|
|
2892
3168
|
output_json(result)
|
|
@@ -2947,12 +3223,40 @@ def cmd_preview_get(args):
|
|
|
2947
3223
|
workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
|
|
2948
3224
|
preview_id = require_arg(args, 'previewid', 'Preview ID')
|
|
2949
3225
|
|
|
2950
|
-
|
|
2951
|
-
|
|
2952
|
-
|
|
2953
|
-
|
|
2954
|
-
|
|
2955
|
-
|
|
3226
|
+
if args.download:
|
|
3227
|
+
# Fetch preview with thumbnail and status fields for download
|
|
3228
|
+
fields = parse_list_arg(args.fields) if args.fields else None
|
|
3229
|
+
if fields is not None:
|
|
3230
|
+
for f in ['thumbnail', 'status']:
|
|
3231
|
+
if f not in fields:
|
|
3232
|
+
fields.append(f)
|
|
3233
|
+
result = client.get_preview(
|
|
3234
|
+
workspaceId=workspace_id,
|
|
3235
|
+
previewId=preview_id,
|
|
3236
|
+
fields=fields
|
|
3237
|
+
)
|
|
3238
|
+
status = result.get('status') if isinstance(result, dict) else None
|
|
3239
|
+
thumbnail = result.get('thumbnail') if isinstance(result, dict) else None
|
|
3240
|
+
if status != 'success':
|
|
3241
|
+
output_error(f"Preview is not complete (status: {status}). Cannot download.", "PREVIEW_NOT_READY")
|
|
3242
|
+
sys.exit(1)
|
|
3243
|
+
if not thumbnail:
|
|
3244
|
+
output_error("Preview has no thumbnail URL available.", "NO_THUMBNAIL")
|
|
3245
|
+
sys.exit(1)
|
|
3246
|
+
from anatools.lib.download import download_file
|
|
3247
|
+
# Derive filename from URL or use default
|
|
3248
|
+
from urllib.parse import urlparse
|
|
3249
|
+
url_path = urlparse(thumbnail).path
|
|
3250
|
+
fname = os.path.basename(url_path) if os.path.basename(url_path) else 'preview.png'
|
|
3251
|
+
download_path = download_file(url=thumbnail, fname=fname, localDir=args.outputdir)
|
|
3252
|
+
output_json({"downloadPath": download_path})
|
|
3253
|
+
else:
|
|
3254
|
+
result = client.get_preview(
|
|
3255
|
+
workspaceId=workspace_id,
|
|
3256
|
+
previewId=preview_id,
|
|
3257
|
+
fields=parse_list_arg(args.fields) if args.fields else None
|
|
3258
|
+
)
|
|
3259
|
+
output_json(result)
|
|
2956
3260
|
|
|
2957
3261
|
|
|
2958
3262
|
def cmd_preview_create(args):
|
|
@@ -2968,6 +3272,20 @@ def cmd_preview_create(args):
|
|
|
2968
3272
|
output_json({"previewId": result})
|
|
2969
3273
|
|
|
2970
3274
|
|
|
3275
|
+
def cmd_preview_log(args):
|
|
3276
|
+
"""Get preview job log."""
|
|
3277
|
+
client = get_client()
|
|
3278
|
+
workspace_id = require_arg(args, 'workspaceid', 'Workspace ID')
|
|
3279
|
+
preview_id = require_arg(args, 'previewid', 'Preview ID')
|
|
3280
|
+
|
|
3281
|
+
result = client.get_preview_log(
|
|
3282
|
+
workspaceId=workspace_id,
|
|
3283
|
+
previewId=preview_id,
|
|
3284
|
+
fields=parse_list_arg(args.fields) if args.fields else None
|
|
3285
|
+
)
|
|
3286
|
+
output_json(result)
|
|
3287
|
+
|
|
3288
|
+
|
|
2971
3289
|
# =============================================================================
|
|
2972
3290
|
# AGENTS
|
|
2973
3291
|
# =============================================================================
|
|
@@ -3108,7 +3426,8 @@ Examples:
|
|
|
3108
3426
|
ws_get = workspaces_sub.add_parser('get', help='Get workspaces')
|
|
3109
3427
|
ws_get.add_argument('--workspaceid', help='Filter by workspace ID')
|
|
3110
3428
|
ws_get.add_argument('--orgid', help='Filter by organization ID')
|
|
3111
|
-
ws_get.add_argument('--limit', type=int, help='Maximum results')
|
|
3429
|
+
ws_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
|
|
3430
|
+
ws_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
|
|
3112
3431
|
ws_get.add_argument('--fields', help='Comma-separated fields to return')
|
|
3113
3432
|
ws_get.set_defaults(func=cmd_workspaces_get)
|
|
3114
3433
|
|
|
@@ -3158,7 +3477,8 @@ Examples:
|
|
|
3158
3477
|
# organizations get
|
|
3159
3478
|
org_get = organizations_sub.add_parser('get', help='Get organizations')
|
|
3160
3479
|
org_get.add_argument('--orgid', help='Filter by organization ID')
|
|
3161
|
-
org_get.add_argument('--limit', type=int, help='Maximum results')
|
|
3480
|
+
org_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
|
|
3481
|
+
org_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
|
|
3162
3482
|
org_get.add_argument('--fields', help='Comma-separated fields to return')
|
|
3163
3483
|
org_get.set_defaults(func=cmd_organizations_get)
|
|
3164
3484
|
|
|
@@ -3171,7 +3491,8 @@ Examples:
|
|
|
3171
3491
|
# members get
|
|
3172
3492
|
members_get = members_sub.add_parser('get', help='Get organization members')
|
|
3173
3493
|
members_get.add_argument('--orgid', required=True, help='Organization ID')
|
|
3174
|
-
members_get.add_argument('--limit', type=int, help='Maximum results')
|
|
3494
|
+
members_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
|
|
3495
|
+
members_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
|
|
3175
3496
|
members_get.add_argument('--fields', help='Comma-separated fields to return')
|
|
3176
3497
|
members_get.set_defaults(func=cmd_members_get)
|
|
3177
3498
|
|
|
@@ -3185,7 +3506,8 @@ Examples:
|
|
|
3185
3506
|
ds_get = datasets_sub.add_parser('get', help='Get datasets')
|
|
3186
3507
|
ds_get.add_argument('--workspaceid', required=True, help='Workspace ID')
|
|
3187
3508
|
ds_get.add_argument('--datasetid', help='Filter by dataset ID')
|
|
3188
|
-
ds_get.add_argument('--limit', type=int, help='Maximum results')
|
|
3509
|
+
ds_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
|
|
3510
|
+
ds_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
|
|
3189
3511
|
ds_get.add_argument('--fields', help='Comma-separated fields to return')
|
|
3190
3512
|
ds_get.set_defaults(func=cmd_datasets_get)
|
|
3191
3513
|
|
|
@@ -3230,6 +3552,7 @@ Examples:
|
|
|
3230
3552
|
ds_download.add_argument('--datasetid', required=True, help='Dataset ID')
|
|
3231
3553
|
ds_download.add_argument('--filepath', help='Relative path to a specific file within the dataset (e.g., "images/000000-1-image.png"). If not provided, downloads the entire dataset.')
|
|
3232
3554
|
ds_download.add_argument('--outputdir', help='Output directory')
|
|
3555
|
+
ds_download.add_argument('--extract', action='store_true', help='Extract the downloaded zip file and remove the archive')
|
|
3233
3556
|
ds_download.set_defaults(func=cmd_datasets_download)
|
|
3234
3557
|
|
|
3235
3558
|
# datasets upload
|
|
@@ -3261,7 +3584,8 @@ Examples:
|
|
|
3261
3584
|
ds_files.add_argument('--workspaceid', required=True, help='Workspace ID')
|
|
3262
3585
|
ds_files.add_argument('--datasetid', required=True, help='Dataset ID')
|
|
3263
3586
|
ds_files.add_argument('--path', help='Path within dataset')
|
|
3264
|
-
ds_files.add_argument('--limit', type=int, default=
|
|
3587
|
+
ds_files.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
|
|
3588
|
+
ds_files.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
|
|
3265
3589
|
ds_files.set_defaults(func=cmd_datasets_files)
|
|
3266
3590
|
|
|
3267
3591
|
# datasets jobs
|
|
@@ -3269,7 +3593,8 @@ Examples:
|
|
|
3269
3593
|
ds_jobs.add_argument('--workspaceid', help='Workspace ID')
|
|
3270
3594
|
ds_jobs.add_argument('--orgid', help='Organization ID')
|
|
3271
3595
|
ds_jobs.add_argument('--datasetid', help='Filter by dataset ID')
|
|
3272
|
-
ds_jobs.add_argument('--limit', type=int, help='Maximum results')
|
|
3596
|
+
ds_jobs.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
|
|
3597
|
+
ds_jobs.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
|
|
3273
3598
|
ds_jobs.add_argument('--fields', help='Comma-separated fields to return')
|
|
3274
3599
|
ds_jobs.set_defaults(func=cmd_datasets_jobs)
|
|
3275
3600
|
|
|
@@ -3294,7 +3619,8 @@ Examples:
|
|
|
3294
3619
|
vol_get.add_argument('--volumeid', help='Filter by volume ID')
|
|
3295
3620
|
vol_get.add_argument('--workspaceid', help='Filter by workspace ID')
|
|
3296
3621
|
vol_get.add_argument('--orgid', help='Filter by organization ID')
|
|
3297
|
-
vol_get.add_argument('--limit', type=int, help='Maximum results')
|
|
3622
|
+
vol_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
|
|
3623
|
+
vol_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
|
|
3298
3624
|
vol_get.add_argument('--fields', help='Comma-separated fields to return')
|
|
3299
3625
|
vol_get.set_defaults(func=cmd_volumes_get)
|
|
3300
3626
|
|
|
@@ -3345,7 +3671,8 @@ Examples:
|
|
|
3345
3671
|
vd_get.add_argument('--dir', help='Directory path')
|
|
3346
3672
|
vd_get.add_argument('--files', help='Comma-separated file paths')
|
|
3347
3673
|
vd_get.add_argument('--recursive', action='store_true', help='Recursive listing')
|
|
3348
|
-
vd_get.add_argument('--limit', type=int, help='Maximum results')
|
|
3674
|
+
vd_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
|
|
3675
|
+
vd_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
|
|
3349
3676
|
vd_get.set_defaults(func=cmd_volume_data_get)
|
|
3350
3677
|
|
|
3351
3678
|
# volume-data upload
|
|
@@ -3380,7 +3707,8 @@ Examples:
|
|
|
3380
3707
|
vd_search.add_argument('--keywords', help='Comma-separated keywords')
|
|
3381
3708
|
vd_search.add_argument('--formats', help='Comma-separated file formats (e.g., png,jpg)')
|
|
3382
3709
|
vd_search.add_argument('--types', help='Comma-separated file types (e.g., Image,3D)')
|
|
3383
|
-
vd_search.add_argument('--limit', type=int, help='Maximum results')
|
|
3710
|
+
vd_search.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
|
|
3711
|
+
vd_search.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
|
|
3384
3712
|
vd_search.set_defaults(func=cmd_volume_data_search)
|
|
3385
3713
|
|
|
3386
3714
|
# -------------------------------------------------------------------------
|
|
@@ -3394,7 +3722,8 @@ Examples:
|
|
|
3394
3722
|
gr_get.add_argument('--workspaceid', required=True, help='Workspace ID')
|
|
3395
3723
|
gr_get.add_argument('--graphid', help='Filter by graph ID')
|
|
3396
3724
|
gr_get.add_argument('--staged', action='store_true', help='Only staged graphs')
|
|
3397
|
-
gr_get.add_argument('--limit', type=int, help='Maximum results')
|
|
3725
|
+
gr_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
|
|
3726
|
+
gr_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
|
|
3398
3727
|
gr_get.add_argument('--fields', help='Comma-separated fields to return')
|
|
3399
3728
|
gr_get.set_defaults(func=cmd_graphs_get)
|
|
3400
3729
|
|
|
@@ -3444,10 +3773,14 @@ Examples:
|
|
|
3444
3773
|
graph_editor_sub = graph_editor.add_subparsers(dest='action', help='Action')
|
|
3445
3774
|
|
|
3446
3775
|
# graph-editor open
|
|
3447
|
-
ge_open = graph_editor_sub.add_parser('open', help='
|
|
3448
|
-
|
|
3449
|
-
ge_open.add_argument('--
|
|
3450
|
-
ge_open.add_argument('--
|
|
3776
|
+
ge_open = graph_editor_sub.add_parser('open', help='Open graph in editor (download from platform or use local files)')
|
|
3777
|
+
# Option 1: Download from platform
|
|
3778
|
+
ge_open.add_argument('--workspaceid', help='Workspace ID (use with --graphid to download from platform)')
|
|
3779
|
+
ge_open.add_argument('--graphid', help='Graph ID (use with --workspaceid to download from platform)')
|
|
3780
|
+
ge_open.add_argument('--outputdir', help='Output directory for downloaded files (default: current directory)')
|
|
3781
|
+
# Option 2: Use local files
|
|
3782
|
+
ge_open.add_argument('--graphfile', help='Path to local graph file (use with --schemafile)')
|
|
3783
|
+
ge_open.add_argument('--schemafile', help='Path to local schema file (use with --graphfile)')
|
|
3451
3784
|
ge_open.set_defaults(func=cmd_graph_editor_open)
|
|
3452
3785
|
|
|
3453
3786
|
# graph-editor edit-node
|
|
@@ -3544,6 +3877,47 @@ Examples:
|
|
|
3544
3877
|
ge_status.add_argument('--file', help='Filter to a specific graph file path')
|
|
3545
3878
|
ge_status.set_defaults(func=cmd_graph_editor_status)
|
|
3546
3879
|
|
|
3880
|
+
# -------------------------------------------------------------------------
|
|
3881
|
+
# DATASET-VIEWER
|
|
3882
|
+
# -------------------------------------------------------------------------
|
|
3883
|
+
dataset_viewer = subparsers.add_parser('dataset-viewer', help='Dataset annotation viewer integration')
|
|
3884
|
+
dv_sub = dataset_viewer.add_subparsers(dest='action', help='Action')
|
|
3885
|
+
|
|
3886
|
+
# dataset-viewer open
|
|
3887
|
+
dv_open = dv_sub.add_parser('open', help='Open a dataset folder in the Annotation Viewer')
|
|
3888
|
+
dv_open.add_argument('--path', required=True, help='Path to dataset directory (must contain images/ subdirectory)')
|
|
3889
|
+
dv_open.add_argument('--index', type=int, default=0, help='Initial image index (default: 0)')
|
|
3890
|
+
dv_open.set_defaults(func=cmd_dataset_viewer_open)
|
|
3891
|
+
|
|
3892
|
+
# dataset-viewer next
|
|
3893
|
+
dv_next = dv_sub.add_parser('next', help='Navigate to the next image')
|
|
3894
|
+
dv_next.set_defaults(func=cmd_dataset_viewer_next)
|
|
3895
|
+
|
|
3896
|
+
# dataset-viewer prev
|
|
3897
|
+
dv_prev = dv_sub.add_parser('prev', help='Navigate to the previous image')
|
|
3898
|
+
dv_prev.set_defaults(func=cmd_dataset_viewer_prev)
|
|
3899
|
+
|
|
3900
|
+
# dataset-viewer goto
|
|
3901
|
+
dv_goto = dv_sub.add_parser('goto', help='Navigate to a specific image by index or name')
|
|
3902
|
+
dv_goto.add_argument('--index', type=int, help='Image index (0-based)')
|
|
3903
|
+
dv_goto.add_argument('--name', help='Image filename (or partial match)')
|
|
3904
|
+
dv_goto.set_defaults(func=cmd_dataset_viewer_goto)
|
|
3905
|
+
|
|
3906
|
+
# dataset-viewer annotations
|
|
3907
|
+
dv_annotations = dv_sub.add_parser('annotations', help='Set which annotation types are displayed')
|
|
3908
|
+
dv_annotations.add_argument('--types', required=True, help='Comma-separated annotation types: bbox,bbox3d,segmentation,centroid,mask')
|
|
3909
|
+
dv_annotations.set_defaults(func=cmd_dataset_viewer_annotations)
|
|
3910
|
+
|
|
3911
|
+
# dataset-viewer filter
|
|
3912
|
+
dv_filter = dv_sub.add_parser('filter', help='Filter visible objects by type (omit --types to show all)')
|
|
3913
|
+
dv_filter.add_argument('--types', help='Comma-separated object type names to show (omit to clear filter)')
|
|
3914
|
+
dv_filter.set_defaults(func=cmd_dataset_viewer_filter)
|
|
3915
|
+
|
|
3916
|
+
# dataset-viewer status
|
|
3917
|
+
dv_status = dv_sub.add_parser('status', help='Get dataset viewer status')
|
|
3918
|
+
dv_status.add_argument('--path', help='Filter to a specific dataset path')
|
|
3919
|
+
dv_status.set_defaults(func=cmd_dataset_viewer_status)
|
|
3920
|
+
|
|
3547
3921
|
# -------------------------------------------------------------------------
|
|
3548
3922
|
# CHANNELS
|
|
3549
3923
|
# -------------------------------------------------------------------------
|
|
@@ -3555,7 +3929,8 @@ Examples:
|
|
|
3555
3929
|
ch_get.add_argument('--workspaceid', help='Filter by workspace ID')
|
|
3556
3930
|
ch_get.add_argument('--orgid', help='Filter by organization ID')
|
|
3557
3931
|
ch_get.add_argument('--channelid', help='Filter by channel ID')
|
|
3558
|
-
ch_get.add_argument('--limit', type=int, help='Maximum results')
|
|
3932
|
+
ch_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
|
|
3933
|
+
ch_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
|
|
3559
3934
|
ch_get.add_argument('--fields', help='Comma-separated fields to return')
|
|
3560
3935
|
ch_get.set_defaults(func=cmd_channels_get)
|
|
3561
3936
|
|
|
@@ -3605,7 +3980,8 @@ Examples:
|
|
|
3605
3980
|
svc_get.add_argument('--workspaceid', help='Filter by workspace ID')
|
|
3606
3981
|
svc_get.add_argument('--orgid', help='Filter by organization ID')
|
|
3607
3982
|
svc_get.add_argument('--serviceid', help='Filter by service ID')
|
|
3608
|
-
svc_get.add_argument('--limit', type=int, help='Maximum results')
|
|
3983
|
+
svc_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
|
|
3984
|
+
svc_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
|
|
3609
3985
|
svc_get.add_argument('--fields', help='Comma-separated fields to return')
|
|
3610
3986
|
svc_get.set_defaults(func=cmd_services_get)
|
|
3611
3987
|
|
|
@@ -3645,7 +4021,8 @@ Examples:
|
|
|
3645
4021
|
svc_jobs_get = service_jobs_sub.add_parser('get', help='Get service jobs')
|
|
3646
4022
|
svc_jobs_get.add_argument('--workspaceid', required=True, help='Workspace ID')
|
|
3647
4023
|
svc_jobs_get.add_argument('--jobid', help='Filter by job ID')
|
|
3648
|
-
svc_jobs_get.add_argument('--limit', type=int, help='Maximum results')
|
|
4024
|
+
svc_jobs_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
|
|
4025
|
+
svc_jobs_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
|
|
3649
4026
|
svc_jobs_get.add_argument('--fields', help='Comma-separated fields to return')
|
|
3650
4027
|
svc_jobs_get.set_defaults(func=cmd_services_jobs)
|
|
3651
4028
|
|
|
@@ -3747,6 +4124,7 @@ Examples:
|
|
|
3747
4124
|
ann_download = annotations_sub.add_parser('download', help='Download an annotation')
|
|
3748
4125
|
ann_download.add_argument('--workspaceid', required=True, help='Workspace ID')
|
|
3749
4126
|
ann_download.add_argument('--annotationid', required=True, help='Annotation ID')
|
|
4127
|
+
ann_download.add_argument('--extract', action='store_true', help='Extract the downloaded zip file and remove the archive')
|
|
3750
4128
|
ann_download.set_defaults(func=cmd_annotations_download)
|
|
3751
4129
|
|
|
3752
4130
|
# annotations formats
|
|
@@ -3827,7 +4205,8 @@ Examples:
|
|
|
3827
4205
|
gan_models_get.add_argument('--orgid', help='Organization ID')
|
|
3828
4206
|
gan_models_get.add_argument('--workspaceid', help='Workspace ID')
|
|
3829
4207
|
gan_models_get.add_argument('--modelid', help='Model ID')
|
|
3830
|
-
gan_models_get.add_argument('--limit', type=int, help='Maximum results')
|
|
4208
|
+
gan_models_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
|
|
4209
|
+
gan_models_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
|
|
3831
4210
|
gan_models_get.add_argument('--fields', help='Comma-separated fields')
|
|
3832
4211
|
gan_models_get.set_defaults(func=cmd_gan_models_get)
|
|
3833
4212
|
|
|
@@ -3858,7 +4237,8 @@ Examples:
|
|
|
3858
4237
|
gan_ds_get.add_argument('--workspaceid', required=True, help='Workspace ID')
|
|
3859
4238
|
gan_ds_get.add_argument('--datasetid', help='Dataset ID')
|
|
3860
4239
|
gan_ds_get.add_argument('--gandatasetid', help='GAN dataset ID')
|
|
3861
|
-
gan_ds_get.add_argument('--limit', type=int, help='Maximum results')
|
|
4240
|
+
gan_ds_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
|
|
4241
|
+
gan_ds_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
|
|
3862
4242
|
gan_ds_get.add_argument('--fields', help='Comma-separated fields')
|
|
3863
4243
|
gan_ds_get.set_defaults(func=cmd_gan_datasets_get)
|
|
3864
4244
|
|
|
@@ -3889,7 +4269,8 @@ Examples:
|
|
|
3889
4269
|
umap_get.add_argument('--workspaceid', required=True, help='Workspace ID')
|
|
3890
4270
|
umap_get.add_argument('--umapid', help='UMAP ID')
|
|
3891
4271
|
umap_get.add_argument('--datasetid', help='Dataset ID')
|
|
3892
|
-
umap_get.add_argument('--limit', type=int, help='Maximum results')
|
|
4272
|
+
umap_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
|
|
4273
|
+
umap_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
|
|
3893
4274
|
umap_get.add_argument('--fields', help='Comma-separated fields')
|
|
3894
4275
|
umap_get.set_defaults(func=cmd_umap_get)
|
|
3895
4276
|
|
|
@@ -3921,7 +4302,8 @@ Examples:
|
|
|
3921
4302
|
srv_get.add_argument('--orgid', help='Organization ID')
|
|
3922
4303
|
srv_get.add_argument('--workspaceid', help='Workspace ID')
|
|
3923
4304
|
srv_get.add_argument('--serverid', help='Server ID')
|
|
3924
|
-
srv_get.add_argument('--limit', type=int, help='Maximum results')
|
|
4305
|
+
srv_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
|
|
4306
|
+
srv_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
|
|
3925
4307
|
srv_get.add_argument('--fields', help='Comma-separated fields')
|
|
3926
4308
|
srv_get.set_defaults(func=cmd_servers_get)
|
|
3927
4309
|
|
|
@@ -3964,7 +4346,8 @@ Examples:
|
|
|
3964
4346
|
ml_models_get.add_argument('--workspaceid', required=True, help='Workspace ID')
|
|
3965
4347
|
ml_models_get.add_argument('--datasetid', help='Dataset ID')
|
|
3966
4348
|
ml_models_get.add_argument('--modelid', help='Model ID')
|
|
3967
|
-
ml_models_get.add_argument('--limit', type=int, help='Maximum results')
|
|
4349
|
+
ml_models_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
|
|
4350
|
+
ml_models_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
|
|
3968
4351
|
ml_models_get.add_argument('--fields', help='Comma-separated fields')
|
|
3969
4352
|
ml_models_get.set_defaults(func=cmd_ml_models_get)
|
|
3970
4353
|
|
|
@@ -3999,7 +4382,8 @@ Examples:
|
|
|
3999
4382
|
ml_inf_get.add_argument('--inferenceid', help='Inference ID')
|
|
4000
4383
|
ml_inf_get.add_argument('--datasetid', help='Dataset ID')
|
|
4001
4384
|
ml_inf_get.add_argument('--modelid', help='Model ID')
|
|
4002
|
-
ml_inf_get.add_argument('--limit', type=int, help='Maximum results')
|
|
4385
|
+
ml_inf_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
|
|
4386
|
+
ml_inf_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
|
|
4003
4387
|
ml_inf_get.add_argument('--fields', help='Comma-separated fields')
|
|
4004
4388
|
ml_inf_get.set_defaults(func=cmd_ml_inferences_get)
|
|
4005
4389
|
|
|
@@ -4022,7 +4406,8 @@ Examples:
|
|
|
4022
4406
|
inp_get = inpaint_sub.add_parser('get', help='Get inpaint jobs')
|
|
4023
4407
|
inp_get.add_argument('--volumeid', required=True, help='Volume ID')
|
|
4024
4408
|
inp_get.add_argument('--inpaintid', help='Inpaint ID')
|
|
4025
|
-
inp_get.add_argument('--limit', type=int, help='Maximum results')
|
|
4409
|
+
inp_get.add_argument('--limit', type=int, default=50, help='Maximum results (default: 50)')
|
|
4410
|
+
inp_get.add_argument('--cursor', help='Cursor for pagination (use last item ID from previous page)')
|
|
4026
4411
|
inp_get.add_argument('--fields', help='Comma-separated fields')
|
|
4027
4412
|
inp_get.set_defaults(func=cmd_inpaint_get)
|
|
4028
4413
|
|
|
@@ -4061,6 +4446,8 @@ Examples:
|
|
|
4061
4446
|
prv_get.add_argument('--workspaceid', required=True, help='Workspace ID')
|
|
4062
4447
|
prv_get.add_argument('--previewid', required=True, help='Preview ID')
|
|
4063
4448
|
prv_get.add_argument('--fields', help='Comma-separated fields')
|
|
4449
|
+
prv_get.add_argument('--download', action='store_true', help='Download the preview thumbnail image')
|
|
4450
|
+
prv_get.add_argument('--outputdir', help='Output directory for downloaded preview (default: current directory)')
|
|
4064
4451
|
prv_get.set_defaults(func=cmd_preview_get)
|
|
4065
4452
|
|
|
4066
4453
|
# preview create
|
|
@@ -4069,6 +4456,13 @@ Examples:
|
|
|
4069
4456
|
prv_create.add_argument('--graphid', required=True, help='Graph ID')
|
|
4070
4457
|
prv_create.set_defaults(func=cmd_preview_create)
|
|
4071
4458
|
|
|
4459
|
+
# preview log
|
|
4460
|
+
prv_log = preview_sub.add_parser('log', help='Get preview job log')
|
|
4461
|
+
prv_log.add_argument('--workspaceid', required=True, help='Workspace ID')
|
|
4462
|
+
prv_log.add_argument('--previewid', required=True, help='Preview ID')
|
|
4463
|
+
prv_log.add_argument('--fields', help='Comma-separated fields')
|
|
4464
|
+
prv_log.set_defaults(func=cmd_preview_log)
|
|
4465
|
+
|
|
4072
4466
|
# -------------------------------------------------------------------------
|
|
4073
4467
|
# AGENTS
|
|
4074
4468
|
# -------------------------------------------------------------------------
|