cognite-toolkit 0.7.54__py3-none-any.whl → 0.7.56__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cognite_toolkit/_cdf_tk/apps/_download_app.py +19 -42
- cognite_toolkit/_cdf_tk/apps/_migrate_app.py +28 -36
- cognite_toolkit/_cdf_tk/apps/_purge.py +14 -15
- cognite_toolkit/_cdf_tk/apps/_upload_app.py +3 -9
- cognite_toolkit/_cdf_tk/client/http_client/__init__.py +0 -38
- cognite_toolkit/_cdf_tk/client/http_client/_client.py +4 -161
- cognite_toolkit/_cdf_tk/client/http_client/_data_classes2.py +18 -18
- cognite_toolkit/_cdf_tk/client/resource_classes/filemetadata.py +7 -1
- cognite_toolkit/_cdf_tk/commands/_migrate/command.py +8 -8
- cognite_toolkit/_cdf_tk/commands/_migrate/migration_io.py +26 -25
- cognite_toolkit/_cdf_tk/commands/_profile.py +1 -1
- cognite_toolkit/_cdf_tk/commands/_purge.py +20 -21
- cognite_toolkit/_cdf_tk/commands/_upload.py +4 -6
- cognite_toolkit/_cdf_tk/commands/auth.py +12 -15
- cognite_toolkit/_cdf_tk/commands/clean.py +2 -1
- cognite_toolkit/_cdf_tk/commands/dump_resource.py +30 -19
- cognite_toolkit/_cdf_tk/commands/init.py +3 -3
- cognite_toolkit/_cdf_tk/commands/modules.py +17 -10
- cognite_toolkit/_cdf_tk/commands/pull.py +2 -2
- cognite_toolkit/_cdf_tk/commands/repo.py +1 -1
- cognite_toolkit/_cdf_tk/commands/resources.py +8 -5
- cognite_toolkit/_cdf_tk/commands/run.py +8 -7
- cognite_toolkit/_cdf_tk/protocols.py +3 -1
- cognite_toolkit/_cdf_tk/storageio/_applications.py +3 -3
- cognite_toolkit/_cdf_tk/storageio/_base.py +16 -11
- cognite_toolkit/_cdf_tk/storageio/_datapoints.py +37 -25
- cognite_toolkit/_cdf_tk/storageio/_file_content.py +39 -35
- cognite_toolkit/_cdf_tk/storageio/_raw.py +6 -5
- cognite_toolkit/_cdf_tk/utils/auth.py +7 -7
- cognite_toolkit/_cdf_tk/utils/interactive_select.py +49 -49
- cognite_toolkit/_repo_files/GitHub/.github/workflows/deploy.yaml +1 -1
- cognite_toolkit/_repo_files/GitHub/.github/workflows/dry-run.yaml +1 -1
- cognite_toolkit/_resources/cdf.toml +1 -1
- cognite_toolkit/_version.py +1 -1
- {cognite_toolkit-0.7.54.dist-info → cognite_toolkit-0.7.56.dist-info}/METADATA +1 -1
- {cognite_toolkit-0.7.54.dist-info → cognite_toolkit-0.7.56.dist-info}/RECORD +38 -39
- cognite_toolkit/_cdf_tk/client/http_client/_data_classes.py +0 -428
- {cognite_toolkit-0.7.54.dist-info → cognite_toolkit-0.7.56.dist-info}/WHEEL +0 -0
- {cognite_toolkit-0.7.54.dist-info → cognite_toolkit-0.7.56.dist-info}/entry_points.txt +0 -0
|
@@ -136,7 +136,9 @@ class DataModelFinder(ResourceFinder[DataModelId]):
|
|
|
136
136
|
if len(available_spaces) == 1:
|
|
137
137
|
selected_space = available_spaces[0]
|
|
138
138
|
else:
|
|
139
|
-
selected_space = questionary.select(
|
|
139
|
+
selected_space = questionary.select(
|
|
140
|
+
"In which space is your data model located?", available_spaces
|
|
141
|
+
).unsafe_ask()
|
|
140
142
|
data_model_ids = sorted(
|
|
141
143
|
[model for model in data_model_ids if model.space == selected_space], key=lambda model: model.as_tuple()
|
|
142
144
|
)
|
|
@@ -147,7 +149,7 @@ class DataModelFinder(ResourceFinder[DataModelId]):
|
|
|
147
149
|
Choice(f"{model_id!r}", value=model_id)
|
|
148
150
|
for model_id in sorted(data_model_ids, key=lambda model: model.as_tuple())
|
|
149
151
|
],
|
|
150
|
-
).
|
|
152
|
+
).unsafe_ask()
|
|
151
153
|
|
|
152
154
|
retrieved_models = self.client.data_modeling.data_models.retrieve(
|
|
153
155
|
(selected_data_model.space, selected_data_model.external_id), inline_views=False
|
|
@@ -165,7 +167,7 @@ class DataModelFinder(ResourceFinder[DataModelId]):
|
|
|
165
167
|
if not questionary.confirm(
|
|
166
168
|
f"Would you like to select a different version than {selected_data_model.version} of the data model",
|
|
167
169
|
default=False,
|
|
168
|
-
).
|
|
170
|
+
).unsafe_ask():
|
|
169
171
|
self.data_model = models_by_version[cast(str, selected_data_model.version)]
|
|
170
172
|
return selected_data_model
|
|
171
173
|
|
|
@@ -175,7 +177,7 @@ class DataModelFinder(ResourceFinder[DataModelId]):
|
|
|
175
177
|
Choice(f"{version} ({len(model.views)} views)", value=version)
|
|
176
178
|
for version, model in models_by_version.items()
|
|
177
179
|
],
|
|
178
|
-
).
|
|
180
|
+
).unsafe_ask()
|
|
179
181
|
self.data_model = models_by_version[selected_model]
|
|
180
182
|
return self.data_model.as_id()
|
|
181
183
|
|
|
@@ -240,7 +242,7 @@ class WorkflowFinder(ResourceFinder[WorkflowVersionId]):
|
|
|
240
242
|
selected_workflow_id: str = questionary.select(
|
|
241
243
|
"Which workflow would you like to dump?",
|
|
242
244
|
[Choice(workflow_id, value=workflow_id) for workflow_id in workflows.as_external_ids()],
|
|
243
|
-
).
|
|
245
|
+
).unsafe_ask()
|
|
244
246
|
for workflow in workflows:
|
|
245
247
|
if workflow.external_id == selected_workflow_id:
|
|
246
248
|
self._workflow = workflow
|
|
@@ -256,7 +258,7 @@ class WorkflowFinder(ResourceFinder[WorkflowVersionId]):
|
|
|
256
258
|
selected_version: WorkflowVersionId = questionary.select(
|
|
257
259
|
"Which version would you like to dump?",
|
|
258
260
|
[Choice(f"{version!r}", value=version) for version in versions.as_ids()],
|
|
259
|
-
).
|
|
261
|
+
).unsafe_ask()
|
|
260
262
|
for version in versions:
|
|
261
263
|
if version.version == selected_version.version:
|
|
262
264
|
self._workflow_version = version
|
|
@@ -309,7 +311,8 @@ class TransformationFinder(ResourceFinder[tuple[str, ...]]):
|
|
|
309
311
|
selected_transformation_ids: tuple[str, ...] | None = questionary.checkbox(
|
|
310
312
|
"Which transformation(s) would you like to dump?",
|
|
311
313
|
choices=choices,
|
|
312
|
-
|
|
314
|
+
validate=lambda choices: True if choices else "You must select at least one transformation.",
|
|
315
|
+
).unsafe_ask()
|
|
313
316
|
if not selected_transformation_ids:
|
|
314
317
|
raise ToolkitValueError(f"No transformations selected for dumping.{_INTERACTIVE_SELECT_HELPER_TEXT}")
|
|
315
318
|
return tuple(selected_transformation_ids)
|
|
@@ -354,7 +357,8 @@ class GroupFinder(ResourceFinder[tuple[str, ...]]):
|
|
|
354
357
|
Choice(f"{group_name} ({len(group_list)} group{'s' if len(group_list) > 1 else ''})", value=group_list)
|
|
355
358
|
for group_name, group_list in sorted(groups_by_name.items())
|
|
356
359
|
],
|
|
357
|
-
|
|
360
|
+
validate=lambda choices: True if choices else "You must select at least one group.",
|
|
361
|
+
).unsafe_ask()
|
|
358
362
|
if not selected_groups:
|
|
359
363
|
raise ToolkitValueError(f"No group selected for dumping.{_INTERACTIVE_SELECT_HELPER_TEXT}")
|
|
360
364
|
self.groups = [group for group_list in selected_groups for group in group_list]
|
|
@@ -387,7 +391,8 @@ class AgentFinder(ResourceFinder[tuple[str, ...]]):
|
|
|
387
391
|
selected_agent_ids: list[str] | None = questionary.checkbox(
|
|
388
392
|
"Which agent(s) would you like to dump?",
|
|
389
393
|
choices=choices,
|
|
390
|
-
|
|
394
|
+
validate=lambda choices: True if choices else "You must select at least one agent.",
|
|
395
|
+
).unsafe_ask()
|
|
391
396
|
if not selected_agent_ids:
|
|
392
397
|
raise ToolkitValueError(f"No agents selected for dumping.{_INTERACTIVE_SELECT_HELPER_TEXT}")
|
|
393
398
|
return tuple(selected_agent_ids)
|
|
@@ -418,7 +423,7 @@ class NodeFinder(ResourceFinder[dm.ViewId]):
|
|
|
418
423
|
raise ToolkitMissingResourceError("No spaces found")
|
|
419
424
|
selected_space: str = questionary.select(
|
|
420
425
|
"In which space is your node property view located?", [space.space for space in spaces]
|
|
421
|
-
).
|
|
426
|
+
).unsafe_ask()
|
|
422
427
|
|
|
423
428
|
views = self.client.data_modeling.views.list(space=selected_space, limit=-1, all_versions=False)
|
|
424
429
|
if not views:
|
|
@@ -428,7 +433,7 @@ class NodeFinder(ResourceFinder[dm.ViewId]):
|
|
|
428
433
|
selected_view_id: dm.ViewId = questionary.select(
|
|
429
434
|
"Which node property view would you like to dump?",
|
|
430
435
|
[Choice(repr(view), value=view) for view in views.as_ids()],
|
|
431
|
-
).
|
|
436
|
+
).unsafe_ask()
|
|
432
437
|
return selected_view_id
|
|
433
438
|
|
|
434
439
|
def __iter__(self) -> Iterator[tuple[list[Hashable], CogniteResourceList | None, ResourceCRUD, None | str]]:
|
|
@@ -444,7 +449,7 @@ class NodeFinder(ResourceFinder[dm.ViewId]):
|
|
|
444
449
|
if not questionary.confirm(
|
|
445
450
|
f"Are you sure you want to dump {count} nodes? This may take a while.",
|
|
446
451
|
default=False,
|
|
447
|
-
).
|
|
452
|
+
).unsafe_ask():
|
|
448
453
|
typer.Exit(0)
|
|
449
454
|
nodes = dm.NodeList[dm.Node](list(loader.iterate()))
|
|
450
455
|
yield [], nodes, loader, None
|
|
@@ -463,7 +468,8 @@ class LocationFilterFinder(ResourceFinder[tuple[str, ...]]):
|
|
|
463
468
|
selected_filter_ids: tuple[str, ...] | None = questionary.checkbox(
|
|
464
469
|
"Which filters would you like to dump?",
|
|
465
470
|
choices=[Choice(name, value=id_) for name, id_ in id_by_display_name.items()],
|
|
466
|
-
|
|
471
|
+
validate=lambda choices: True if choices else "You must select at least one filter.",
|
|
472
|
+
).unsafe_ask()
|
|
467
473
|
if not selected_filter_ids:
|
|
468
474
|
raise ToolkitValueError(f"No filters selected for dumping.{_INTERACTIVE_SELECT_HELPER_TEXT}")
|
|
469
475
|
return tuple(selected_filter_ids)
|
|
@@ -499,7 +505,8 @@ class ExtractionPipelineFinder(ResourceFinder[tuple[str, ...]]):
|
|
|
499
505
|
selected_pipeline_ids: tuple[str, ...] | None = questionary.checkbox(
|
|
500
506
|
"Which extraction pipeline(s) would you like to dump?",
|
|
501
507
|
choices=choices,
|
|
502
|
-
|
|
508
|
+
validate=lambda choices: True if choices else "You must select at least one pipeline.",
|
|
509
|
+
).unsafe_ask()
|
|
503
510
|
if not selected_pipeline_ids:
|
|
504
511
|
raise ToolkitValueError(f"No extraction pipelines selected for dumping.{_INTERACTIVE_SELECT_HELPER_TEXT}")
|
|
505
512
|
return tuple(selected_pipeline_ids)
|
|
@@ -538,7 +545,8 @@ class DataSetFinder(ResourceFinder[tuple[str, ...]]):
|
|
|
538
545
|
selected_dataset_ids: tuple[str, ...] | None = questionary.checkbox(
|
|
539
546
|
"Which dataset(s) would you like to dump?",
|
|
540
547
|
choices=choices,
|
|
541
|
-
|
|
548
|
+
validate=lambda choices: True if choices else "You must select at least one dataset.",
|
|
549
|
+
).unsafe_ask()
|
|
542
550
|
if not selected_dataset_ids:
|
|
543
551
|
raise ToolkitValueError(f"No datasets selected for dumping.{_INTERACTIVE_SELECT_HELPER_TEXT}")
|
|
544
552
|
return tuple(selected_dataset_ids)
|
|
@@ -574,7 +582,8 @@ class FunctionFinder(ResourceFinder[tuple[str, ...]]):
|
|
|
574
582
|
selected_function_ids: tuple[str, ...] | None = questionary.checkbox(
|
|
575
583
|
"Which function(s) would you like to dump?",
|
|
576
584
|
choices=choices,
|
|
577
|
-
|
|
585
|
+
validate=lambda choices: True if choices else "You must select at least one function.",
|
|
586
|
+
).unsafe_ask()
|
|
578
587
|
if not selected_function_ids:
|
|
579
588
|
raise ToolkitValueError(f"No functions selected for dumping.{_INTERACTIVE_SELECT_HELPER_TEXT}")
|
|
580
589
|
return tuple(selected_function_ids)
|
|
@@ -635,7 +644,7 @@ class StreamlitFinder(ResourceFinder[tuple[str, ...]]):
|
|
|
635
644
|
Choice(f"{item.value} ({item.count})", value=item.value)
|
|
636
645
|
for item in sorted(result, key=lambda r: (r.count, str(r.value) or ""))
|
|
637
646
|
],
|
|
638
|
-
).
|
|
647
|
+
).unsafe_ask()
|
|
639
648
|
files = self.client.files.list(
|
|
640
649
|
limit=-1, directory_prefix="/streamlit-apps/", metadata={"creator": str(selected_creator)}
|
|
641
650
|
)
|
|
@@ -653,7 +662,8 @@ class StreamlitFinder(ResourceFinder[tuple[str, ...]]):
|
|
|
653
662
|
)
|
|
654
663
|
for app in sorted(self.apps, key=lambda a: a.name)
|
|
655
664
|
],
|
|
656
|
-
|
|
665
|
+
validate=lambda choices: True if choices else "You must select at least one Streamlit app.",
|
|
666
|
+
).unsafe_ask()
|
|
657
667
|
if not selected_ids:
|
|
658
668
|
raise ToolkitValueError(f"No Streamlit app selected for dumping.{_INTERACTIVE_SELECT_HELPER_TEXT}")
|
|
659
669
|
return tuple(selected_ids)
|
|
@@ -772,7 +782,8 @@ class SearchConfigFinder(ResourceFinder[tuple[SearchConfigViewId, ...]]):
|
|
|
772
782
|
selected_view_ids: list[SearchConfigViewId] | None = questionary.checkbox(
|
|
773
783
|
"For which view would you like to dump the search configuration?",
|
|
774
784
|
choices=choices,
|
|
775
|
-
|
|
785
|
+
validate=lambda choices: True if choices else "You must select at least one view.",
|
|
786
|
+
).unsafe_ask()
|
|
776
787
|
if not selected_view_ids:
|
|
777
788
|
raise ToolkitValueError("No view selected for dumping the search configuration.")
|
|
778
789
|
return tuple(selected_view_ids)
|
|
@@ -129,7 +129,7 @@ class InitCommand(ToolkitCommand):
|
|
|
129
129
|
"Select a task:",
|
|
130
130
|
choices=choices,
|
|
131
131
|
default=default_value,
|
|
132
|
-
).
|
|
132
|
+
).unsafe_ask()
|
|
133
133
|
|
|
134
134
|
# User cancelled (Ctrl+C or similar)
|
|
135
135
|
if selected is None:
|
|
@@ -161,7 +161,7 @@ class InitCommand(ToolkitCommand):
|
|
|
161
161
|
confirm = questionary.confirm(
|
|
162
162
|
f"'{selected_item.description}' was already run {status_text}",
|
|
163
163
|
default=False,
|
|
164
|
-
).
|
|
164
|
+
).unsafe_ask()
|
|
165
165
|
if not confirm:
|
|
166
166
|
continue
|
|
167
167
|
|
|
@@ -218,7 +218,7 @@ class InitCommand(ToolkitCommand):
|
|
|
218
218
|
opt_in = questionary.confirm(
|
|
219
219
|
"Do you want to opt in to collect usage statistics? This will help us improve the Toolkit.",
|
|
220
220
|
default=True,
|
|
221
|
-
).
|
|
221
|
+
).unsafe_ask()
|
|
222
222
|
if dry_run:
|
|
223
223
|
print("Would opt in to collect data" if opt_in else "Would not opt in to collect data")
|
|
224
224
|
return
|
|
@@ -181,7 +181,7 @@ class ModulesCommand(ToolkitCommand):
|
|
|
181
181
|
if questionary.confirm(
|
|
182
182
|
f"{INDENT}Module {module.name} already exists in folder {target_dir}. Would you like to overwrite?",
|
|
183
183
|
default=False,
|
|
184
|
-
).
|
|
184
|
+
).unsafe_ask():
|
|
185
185
|
safe_rmtree(target_dir)
|
|
186
186
|
else:
|
|
187
187
|
continue
|
|
@@ -351,7 +351,10 @@ class ModulesCommand(ToolkitCommand):
|
|
|
351
351
|
# Need to rerun the mode as bootcamp has a hardcoded organization directory
|
|
352
352
|
mode = self._verify_clean(bootcamp_org / MODULES, clean)
|
|
353
353
|
# We only ask to verify if the user has not already selected overwrite in the _verify_clean method
|
|
354
|
-
if
|
|
354
|
+
if (
|
|
355
|
+
mode == "clean"
|
|
356
|
+
or questionary.confirm("Would you like to continue with creation?", default=True).unsafe_ask()
|
|
357
|
+
):
|
|
355
358
|
self._create(
|
|
356
359
|
organization_dir=bootcamp_org,
|
|
357
360
|
selected_packages=selected,
|
|
@@ -363,7 +366,7 @@ class ModulesCommand(ToolkitCommand):
|
|
|
363
366
|
|
|
364
367
|
if (
|
|
365
368
|
not is_interactive
|
|
366
|
-
and not questionary.confirm("Would you like to continue with creation?", default=True).
|
|
369
|
+
and not questionary.confirm("Would you like to continue with creation?", default=True).unsafe_ask()
|
|
367
370
|
):
|
|
368
371
|
print("Exiting...")
|
|
369
372
|
raise typer.Exit()
|
|
@@ -380,7 +383,8 @@ class ModulesCommand(ToolkitCommand):
|
|
|
380
383
|
qmark=INDENT,
|
|
381
384
|
pointer=POINTER,
|
|
382
385
|
style=custom_style_fancy,
|
|
383
|
-
|
|
386
|
+
validate=lambda choices: True if choices else "You must select at least one environment.",
|
|
387
|
+
).unsafe_ask()
|
|
384
388
|
else:
|
|
385
389
|
environments = user_environments
|
|
386
390
|
|
|
@@ -425,7 +429,7 @@ class ModulesCommand(ToolkitCommand):
|
|
|
425
429
|
)
|
|
426
430
|
)
|
|
427
431
|
|
|
428
|
-
organization_dir_raw = questionary.text(message="", default="").
|
|
432
|
+
organization_dir_raw = questionary.text(message="", default="").unsafe_ask()
|
|
429
433
|
return Path(organization_dir_raw.strip())
|
|
430
434
|
|
|
431
435
|
@staticmethod
|
|
@@ -438,7 +442,7 @@ class ModulesCommand(ToolkitCommand):
|
|
|
438
442
|
download_data = questionary.confirm(
|
|
439
443
|
f"The modules {humanize_collection(example_data)} has example data. Would you like to download it?",
|
|
440
444
|
default=True,
|
|
441
|
-
).
|
|
445
|
+
).unsafe_ask()
|
|
442
446
|
return download_data
|
|
443
447
|
|
|
444
448
|
def _select_modules_in_package(self, package: Package) -> list[ModuleLocation]:
|
|
@@ -467,7 +471,8 @@ class ModulesCommand(ToolkitCommand):
|
|
|
467
471
|
qmark=INDENT,
|
|
468
472
|
pointer=POINTER,
|
|
469
473
|
style=custom_style_fancy,
|
|
470
|
-
|
|
474
|
+
validate=lambda choices: True if choices else "You must select at least one module.",
|
|
475
|
+
).unsafe_ask()
|
|
471
476
|
|
|
472
477
|
def _select_packages(self, packages: Packages, existing_module_names: list[str] | None = None) -> Packages:
|
|
473
478
|
adding_to_existing = False
|
|
@@ -488,7 +493,9 @@ class ModulesCommand(ToolkitCommand):
|
|
|
488
493
|
print(Padding.indent(tree, 5))
|
|
489
494
|
print("\n")
|
|
490
495
|
|
|
491
|
-
if not questionary.confirm(
|
|
496
|
+
if not questionary.confirm(
|
|
497
|
+
"Would you like to make changes to the selection?", default=False
|
|
498
|
+
).unsafe_ask():
|
|
492
499
|
break
|
|
493
500
|
|
|
494
501
|
if not any([len(package.modules) > 0 for package in packages.values()]):
|
|
@@ -510,7 +517,7 @@ class ModulesCommand(ToolkitCommand):
|
|
|
510
517
|
choices=choices,
|
|
511
518
|
pointer=POINTER,
|
|
512
519
|
style=custom_style_fancy,
|
|
513
|
-
).
|
|
520
|
+
).unsafe_ask()
|
|
514
521
|
|
|
515
522
|
if package is None:
|
|
516
523
|
raise typer.Exit(code=0)
|
|
@@ -545,7 +552,7 @@ class ModulesCommand(ToolkitCommand):
|
|
|
545
552
|
pointer=POINTER,
|
|
546
553
|
style=custom_style_fancy,
|
|
547
554
|
instruction="use arrow up/down and " + "⮐ " + " to save",
|
|
548
|
-
).
|
|
555
|
+
).unsafe_ask()
|
|
549
556
|
if user_selection == "abort":
|
|
550
557
|
print("Aborting...")
|
|
551
558
|
raise typer.Exit()
|
|
@@ -436,7 +436,7 @@ class PullCommand(ToolkitCommand):
|
|
|
436
436
|
selected = questionary.select(
|
|
437
437
|
"Select a module to pull",
|
|
438
438
|
choices=[Choice(title=module.name, value=module.name) for module in modules],
|
|
439
|
-
).
|
|
439
|
+
).unsafe_ask()
|
|
440
440
|
else:
|
|
441
441
|
selected = parse_user_selected_modules([module_name_or_path])[0]
|
|
442
442
|
build_module: str | Path
|
|
@@ -629,7 +629,7 @@ class PullCommand(ToolkitCommand):
|
|
|
629
629
|
return questionary.select(
|
|
630
630
|
f"Select a {loader.display_name} to pull",
|
|
631
631
|
choices=[Choice(title=f"{r.identifier!r} - ({r.module_name})", value=r) for r in local_resources],
|
|
632
|
-
).
|
|
632
|
+
).unsafe_ask()
|
|
633
633
|
if id_ not in local_resources.identifiers:
|
|
634
634
|
raise ToolkitMissingResourceError(
|
|
635
635
|
f"No {loader.display_name} with external id {id_} found in the current configuration in {organization_dir}."
|
|
@@ -52,7 +52,7 @@ class RepoCommand(ToolkitCommand):
|
|
|
52
52
|
)
|
|
53
53
|
|
|
54
54
|
if host is None:
|
|
55
|
-
repo_host = questionary.select("Where do are you hosting the repository?", REPOSITORY_HOSTING).
|
|
55
|
+
repo_host = questionary.select("Where do are you hosting the repository?", REPOSITORY_HOSTING).unsafe_ask()
|
|
56
56
|
else:
|
|
57
57
|
repo_host = next(
|
|
58
58
|
(provider for provider in REPOSITORY_HOSTING if provider.casefold() == host.casefold()), "Other"
|
|
@@ -32,7 +32,7 @@ class ResourcesCommand(ToolkitCommand):
|
|
|
32
32
|
if mod.name.casefold() == module.casefold():
|
|
33
33
|
return mod.dir
|
|
34
34
|
|
|
35
|
-
if questionary.confirm(f"{module} module not found. Do you want to create a new one?").
|
|
35
|
+
if questionary.confirm(f"{module} module not found. Do you want to create a new one?").unsafe_ask():
|
|
36
36
|
return organization_dir / MODULES / module
|
|
37
37
|
|
|
38
38
|
if verbose:
|
|
@@ -44,10 +44,10 @@ class ResourcesCommand(ToolkitCommand):
|
|
|
44
44
|
choices = [Choice(title=mod.name, value=mod.dir) for mod in present_modules]
|
|
45
45
|
choices.append(Choice(title="<Create new module>", value="NEW"))
|
|
46
46
|
|
|
47
|
-
selected = questionary.select("Select a module:", choices=choices).
|
|
47
|
+
selected = questionary.select("Select a module:", choices=choices).unsafe_ask()
|
|
48
48
|
|
|
49
49
|
if selected == "NEW":
|
|
50
|
-
new_module_name = questionary.text("Enter name for new module:").
|
|
50
|
+
new_module_name = questionary.text("Enter name for new module:").unsafe_ask()
|
|
51
51
|
if not new_module_name:
|
|
52
52
|
print("[red]No module name provided. Aborting...[/red]")
|
|
53
53
|
raise typer.Exit()
|
|
@@ -69,7 +69,7 @@ class ResourcesCommand(ToolkitCommand):
|
|
|
69
69
|
sorted_cruds = sorted(RESOURCE_CRUD_LIST, key=lambda x: x.kind)
|
|
70
70
|
choices = [Choice(title=crud.kind, value=crud) for crud in sorted_cruds]
|
|
71
71
|
|
|
72
|
-
selected = questionary.select("Select resource type:", choices=choices).
|
|
72
|
+
selected = questionary.select("Select resource type:", choices=choices).unsafe_ask()
|
|
73
73
|
if not selected:
|
|
74
74
|
print("[red]No resource type selected. Aborting...[/red]")
|
|
75
75
|
raise typer.Exit()
|
|
@@ -142,7 +142,10 @@ class ResourcesCommand(ToolkitCommand):
|
|
|
142
142
|
file_name = f"{final_prefix}.{resource_crud.kind}.yaml"
|
|
143
143
|
file_path: Path = resource_dir / file_name
|
|
144
144
|
|
|
145
|
-
if
|
|
145
|
+
if (
|
|
146
|
+
file_path.exists()
|
|
147
|
+
and not questionary.confirm(f"{file_path.name} file already exists. Overwrite?").unsafe_ask()
|
|
148
|
+
):
|
|
146
149
|
print("[red]Skipping...[/red]")
|
|
147
150
|
return
|
|
148
151
|
|
|
@@ -153,7 +153,7 @@ if __name__ == "__main__":
|
|
|
153
153
|
)
|
|
154
154
|
|
|
155
155
|
if is_interactive:
|
|
156
|
-
wait = questionary.confirm("Do you want to wait for the function to complete?").
|
|
156
|
+
wait = questionary.confirm("Do you want to wait for the function to complete?").unsafe_ask()
|
|
157
157
|
|
|
158
158
|
# Todo: Get one shot token using the call_args.authentication
|
|
159
159
|
session = client.iam.sessions.create(session_type="ONESHOT_TOKEN_EXCHANGE")
|
|
@@ -220,7 +220,7 @@ if __name__ == "__main__":
|
|
|
220
220
|
# Interactive mode
|
|
221
221
|
external_id = questionary.select(
|
|
222
222
|
"Select function to run", choices=list(function_builds_by_identifier.keys())
|
|
223
|
-
).
|
|
223
|
+
).unsafe_ask()
|
|
224
224
|
elif external_id not in function_builds_by_identifier.keys():
|
|
225
225
|
raise ToolkitMissingResourceError(f"Could not find function with external id {external_id}")
|
|
226
226
|
return function_builds_by_identifier[external_id]
|
|
@@ -235,7 +235,8 @@ if __name__ == "__main__":
|
|
|
235
235
|
is_interactive: bool,
|
|
236
236
|
) -> FunctionCallArgs:
|
|
237
237
|
if data_source is None and (
|
|
238
|
-
not is_interactive
|
|
238
|
+
not is_interactive
|
|
239
|
+
or not questionary.confirm("Do you want to provide input data for the function?").unsafe_ask()
|
|
239
240
|
):
|
|
240
241
|
return FunctionCallArgs()
|
|
241
242
|
if is_interactive:
|
|
@@ -315,7 +316,7 @@ if __name__ == "__main__":
|
|
|
315
316
|
if len(options) == 0:
|
|
316
317
|
print(f"No schedules or workflows found for this {function_external_id} function.")
|
|
317
318
|
return {}, None
|
|
318
|
-
selected_name: str = questionary.select("Select schedule to run", choices=options).
|
|
319
|
+
selected_name: str = questionary.select("Select schedule to run", choices=options).unsafe_ask() # type: ignore[arg-type]
|
|
319
320
|
selected = options[selected_name]
|
|
320
321
|
if isinstance(selected, BuiltResourceFull):
|
|
321
322
|
# Schedule
|
|
@@ -689,7 +690,7 @@ class RunWorkflowCommand(ToolkitCommand):
|
|
|
689
690
|
if external_id is None:
|
|
690
691
|
# Interactive mode
|
|
691
692
|
choices = [questionary.Choice(title=f"{build.identifier!r}", value=build) for build in workflows]
|
|
692
|
-
selected = questionary.select("Select workflow to run", choices=choices).
|
|
693
|
+
selected = questionary.select("Select workflow to run", choices=choices).unsafe_ask()
|
|
693
694
|
else:
|
|
694
695
|
selected_ = next(
|
|
695
696
|
(
|
|
@@ -719,7 +720,7 @@ class RunWorkflowCommand(ToolkitCommand):
|
|
|
719
720
|
is_interactive
|
|
720
721
|
and not questionary.confirm(
|
|
721
722
|
"Do you want to use input data and authentication from this trigger?"
|
|
722
|
-
).
|
|
723
|
+
).unsafe_ask()
|
|
723
724
|
):
|
|
724
725
|
break
|
|
725
726
|
credentials = (
|
|
@@ -750,7 +751,7 @@ class RunWorkflowCommand(ToolkitCommand):
|
|
|
750
751
|
raise AuthorizationError(f"Could not create oneshot session for workflow {id_!r}: {e!s}") from e
|
|
751
752
|
|
|
752
753
|
if is_interactive:
|
|
753
|
-
wait = questionary.confirm("Do you want to wait for the workflow to complete?").
|
|
754
|
+
wait = questionary.confirm("Do you want to wait for the workflow to complete?").unsafe_ask()
|
|
754
755
|
if id_.version is None:
|
|
755
756
|
raise ToolkitValueError("Version is required for workflow.")
|
|
756
757
|
execution = client.workflows.executions.run(
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import sys
|
|
2
2
|
from collections.abc import Collection, Iterator
|
|
3
|
-
from typing import Any, Generic, Protocol, TypeVar
|
|
3
|
+
from typing import Any, Generic, Protocol, TypeVar, runtime_checkable
|
|
4
4
|
|
|
5
5
|
from cognite.client import CogniteClient
|
|
6
6
|
|
|
@@ -10,6 +10,7 @@ else:
|
|
|
10
10
|
from typing_extensions import Self
|
|
11
11
|
|
|
12
12
|
|
|
13
|
+
@runtime_checkable
|
|
13
14
|
class ResourceRequestProtocol(Protocol):
|
|
14
15
|
@classmethod
|
|
15
16
|
def _load(cls, data: dict[str, Any]) -> Self: ...
|
|
@@ -17,6 +18,7 @@ class ResourceRequestProtocol(Protocol):
|
|
|
17
18
|
def dump(self, camel_case: bool = True) -> dict[str, Any]: ...
|
|
18
19
|
|
|
19
20
|
|
|
21
|
+
@runtime_checkable
|
|
20
22
|
class ResourceResponseProtocol(Protocol):
|
|
21
23
|
def as_write(self) -> ResourceRequestProtocol: ...
|
|
22
24
|
|
|
@@ -7,11 +7,11 @@ from pydantic import JsonValue
|
|
|
7
7
|
from cognite_toolkit._cdf_tk.client import ToolkitClient
|
|
8
8
|
from cognite_toolkit._cdf_tk.client.http_client import (
|
|
9
9
|
HTTPClient,
|
|
10
|
-
HTTPMessage,
|
|
11
10
|
HTTPResult2,
|
|
12
11
|
RequestMessage2,
|
|
13
12
|
SuccessResponse2,
|
|
14
13
|
)
|
|
14
|
+
from cognite_toolkit._cdf_tk.client.http_client._item_classes import ItemsResultList
|
|
15
15
|
from cognite_toolkit._cdf_tk.client.resource_classes.legacy.canvas import (
|
|
16
16
|
IndustrialCanvas,
|
|
17
17
|
IndustrialCanvasApply,
|
|
@@ -189,9 +189,9 @@ class CanvasIO(UploadableStorageIO[CanvasSelector, IndustrialCanvas, IndustrialC
|
|
|
189
189
|
data_chunk: Sequence[UploadItem[IndustrialCanvasApply]],
|
|
190
190
|
http_client: HTTPClient,
|
|
191
191
|
selector: CanvasSelector | None = None,
|
|
192
|
-
) ->
|
|
192
|
+
) -> ItemsResultList:
|
|
193
193
|
config = http_client.config
|
|
194
|
-
results
|
|
194
|
+
results = ItemsResultList()
|
|
195
195
|
for item in data_chunk:
|
|
196
196
|
instances = item.item.as_instances()
|
|
197
197
|
upsert_items: list[dict[str, JsonValue]] = []
|
|
@@ -1,10 +1,14 @@
|
|
|
1
1
|
from abc import ABC, abstractmethod
|
|
2
2
|
from collections.abc import Iterable, Mapping, Sequence, Sized
|
|
3
3
|
from dataclasses import dataclass
|
|
4
|
-
from typing import ClassVar, Generic, Literal, TypeVar
|
|
4
|
+
from typing import Any, ClassVar, Generic, Literal, TypeVar
|
|
5
|
+
|
|
6
|
+
from pydantic import ConfigDict
|
|
5
7
|
|
|
6
8
|
from cognite_toolkit._cdf_tk.client import ToolkitClient
|
|
7
|
-
from cognite_toolkit._cdf_tk.client.
|
|
9
|
+
from cognite_toolkit._cdf_tk.client._resource_base import RequestItem
|
|
10
|
+
from cognite_toolkit._cdf_tk.client.http_client import HTTPClient
|
|
11
|
+
from cognite_toolkit._cdf_tk.client.http_client._item_classes import ItemsRequest2, ItemsResultList
|
|
8
12
|
from cognite_toolkit._cdf_tk.exceptions import ToolkitNotImplementedError
|
|
9
13
|
from cognite_toolkit._cdf_tk.protocols import T_ResourceRequest, T_ResourceResponse
|
|
10
14
|
from cognite_toolkit._cdf_tk.utils.collection import chunker
|
|
@@ -36,8 +40,7 @@ class Page(Generic[T_ResourceResponse], Sized):
|
|
|
36
40
|
return len(self.items)
|
|
37
41
|
|
|
38
42
|
|
|
39
|
-
|
|
40
|
-
class UploadItem(Generic[T_ResourceRequest]):
|
|
43
|
+
class UploadItem(RequestItem, Generic[T_ResourceRequest]):
|
|
41
44
|
"""An item to be uploaded to CDF, consisting of a source ID and the writable Cognite resource.
|
|
42
45
|
|
|
43
46
|
Attributes:
|
|
@@ -45,14 +48,16 @@ class UploadItem(Generic[T_ResourceRequest]):
|
|
|
45
48
|
item: The writable Cognite resource to be uploaded.
|
|
46
49
|
"""
|
|
47
50
|
|
|
51
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
52
|
+
|
|
48
53
|
source_id: str
|
|
49
54
|
item: T_ResourceRequest
|
|
50
55
|
|
|
51
|
-
def
|
|
56
|
+
def __str__(self) -> str:
|
|
52
57
|
return self.source_id
|
|
53
58
|
|
|
54
|
-
def dump(self) ->
|
|
55
|
-
return self.item.dump(camel_case=
|
|
59
|
+
def dump(self, camel_case: bool = True, exclude_extra: bool = False) -> dict[str, Any]:
|
|
60
|
+
return self.item.dump(camel_case=camel_case)
|
|
56
61
|
|
|
57
62
|
|
|
58
63
|
class StorageIO(ABC, Generic[T_Selector, T_ResourceResponse]):
|
|
@@ -155,7 +160,7 @@ class UploadableStorageIO(
|
|
|
155
160
|
data_chunk: Sequence[UploadItem[T_ResourceRequest]],
|
|
156
161
|
http_client: HTTPClient,
|
|
157
162
|
selector: T_Selector | None = None,
|
|
158
|
-
) ->
|
|
163
|
+
) -> ItemsResultList:
|
|
159
164
|
"""Upload a chunk of data to the storage using a custom HTTP client.
|
|
160
165
|
This ensures that even if one item in the chunk fails, the rest will still be uploaded.
|
|
161
166
|
|
|
@@ -179,11 +184,11 @@ class UploadableStorageIO(
|
|
|
179
184
|
else:
|
|
180
185
|
raise ToolkitNotImplementedError(f"Unsupported UPLOAD_ENDPOINT_TYPE {self.UPLOAD_ENDPOINT_TYPE!r}.")
|
|
181
186
|
|
|
182
|
-
return http_client.
|
|
183
|
-
message=
|
|
187
|
+
return http_client.request_items_retries(
|
|
188
|
+
message=ItemsRequest2(
|
|
184
189
|
endpoint_url=url,
|
|
185
190
|
method=self.UPLOAD_ENDPOINT_METHOD,
|
|
186
|
-
items=
|
|
191
|
+
items=data_chunk,
|
|
187
192
|
extra_body_fields=dict(self.UPLOAD_EXTRA_ARGS or {}),
|
|
188
193
|
)
|
|
189
194
|
)
|