cognite-toolkit 0.7.54__py3-none-any.whl → 0.7.55__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -319,37 +319,26 @@ class DownloadApp(typer.Typer):
319
319
  f"Select format to download the {display_name} in:",
320
320
  choices=[Choice(title=format_.value, value=format_) for format_ in available_formats],
321
321
  default=file_format,
322
- ).ask()
322
+ ).unsafe_ask()
323
323
  compression = questionary.select(
324
324
  f"Select compression format to use when downloading the {display_name}:",
325
325
  choices=[Choice(title=comp.value, value=comp) for comp in CompressionFormat],
326
326
  default=compression,
327
- ).ask()
327
+ ).unsafe_ask()
328
328
  output_dir = Path(
329
329
  questionary.path(
330
330
  f"Where to download the {display_name}:",
331
331
  default=str(output_dir),
332
332
  only_directories=True,
333
- ).ask()
333
+ ).unsafe_ask()
334
334
  )
335
- while True:
336
- limit_str = questionary.text(
335
+ limit = int(
336
+ questionary.text(
337
337
  f"The maximum number of {display_name} to download from each dataset. Use -1 to download all {display_name}.",
338
338
  default=str(limit),
339
- ).ask()
340
- if limit_str is None:
341
- raise typer.Abort()
342
- try:
343
- limit = int(limit_str)
344
- except ValueError:
345
- print("[red]Please enter a valid integer for the limit.[/]")
346
- else:
347
- if max_limit is not None and limit > max_limit:
348
- print(
349
- f"[red]The maximum limit for downloading {display_name} is {max_limit}. Please enter a lower value.[/]"
350
- )
351
- else:
352
- break
339
+ validate=lambda value: value.lstrip("-").isdigit() and (max_limit is None or int(value) <= max_limit),
340
+ ).unsafe_ask()
341
+ )
353
342
  return data_sets, file_format, compression, output_dir, limit
354
343
 
355
344
  def download_timeseries_cmd(
@@ -584,7 +573,7 @@ class DownloadApp(typer.Typer):
584
573
  Choice(title="Yes", value=True),
585
574
  Choice(title="No", value=False),
586
575
  ],
587
- ).ask()
576
+ ).unsafe_ask()
588
577
  else:
589
578
  include_file_contents = False
590
579
 
@@ -933,52 +922,40 @@ class DownloadApp(typer.Typer):
933
922
  "Select the type of datapoints to download:",
934
923
  choices=[Choice(title=dt.value, value=dt) for dt in DatapointsDataTypes],
935
924
  default=datapoint_type,
936
- ).ask()
925
+ ).unsafe_ask()
937
926
 
938
927
  start_time = (
939
928
  questionary.text(
940
929
  "Enter the start time for the datapoints to download (RFC3339 format or relative time, e.g., '1d-ago'). Leave empty to download from the beginning.",
941
930
  default=start_time or "",
942
- ).ask()
931
+ ).unsafe_ask()
943
932
  or None
944
933
  )
945
934
  end_time = (
946
935
  questionary.text(
947
936
  "Enter the end time for the datapoints to download (RFC3339 format or relative time, e.g., '1d-ago'). Leave empty to download up to the latest.",
948
937
  default=end_time or "",
949
- ).ask()
938
+ ).unsafe_ask()
950
939
  or None
951
940
  )
952
941
  file_format = questionary.select(
953
942
  "Select format to download the datapoints in:",
954
943
  choices=[Choice(title=format_.value, value=format_) for format_ in DatapointFormats],
955
944
  default=file_format,
956
- ).ask()
945
+ ).unsafe_ask()
957
946
  output_dir = Path(
958
947
  questionary.path(
959
948
  "Where to download the datapoints:", default=str(output_dir), only_directories=True
960
- ).ask()
949
+ ).unsafe_ask()
961
950
  )
962
- while True:
963
- limit_str = questionary.text(
951
+ limit = int(
952
+ questionary.text(
964
953
  "The maximum number of timeseries to download datapoints from. Use -1 to download all timeseries."
965
954
  "The maximum number of datapoints in total is 10 million and 100 000 per timeseries.",
966
955
  default=str(limit),
967
- ).ask()
968
- if limit_str is None:
969
- raise typer.Abort()
970
- try:
971
- limit = int(limit_str)
972
- except ValueError:
973
- print("[red]Please enter a valid integer for the limit.[/]")
974
- else:
975
- if limit != -1 and limit < 1:
976
- print("[red]Please enter a valid integer greater than 0 or -1 for unlimited.[/]")
977
- else:
978
- break
979
- verbose = questionary.confirm(
980
- "Turn on to get more verbose output when running the command?", default=verbose
981
- ).ask()
956
+ validate=lambda value: value.lstrip("-").isdigit() and (int(value) == -1 or int(value) > 0),
957
+ ).unsafe_ask()
958
+ )
982
959
 
983
960
  cmd = DownloadCommand()
984
961
  selector = DataPointsDataSetSelector(
@@ -159,14 +159,13 @@ class MigrateApp(typer.Typer):
159
159
  # Interactive model
160
160
  selector = AssetInteractiveSelect(client, "migrate")
161
161
  data_set = selector.select_data_sets()
162
- dry_run = questionary.confirm("Do you want to perform a dry run?", default=dry_run).ask()
163
- output_dir = questionary.path(
164
- "Specify output directory for instance space definitions:", default=str(output_dir)
165
- ).ask()
166
- verbose = questionary.confirm("Do you want verbose output?", default=verbose).ask()
167
- if any(res is None for res in [dry_run, output_dir, verbose]):
168
- raise typer.Abort()
169
- output_dir = Path(output_dir)
162
+ dry_run = questionary.confirm("Do you want to perform a dry run?", default=dry_run).unsafe_ask()
163
+ output_dir = Path(
164
+ questionary.path(
165
+ "Specify output directory for instance space definitions:", default=str(output_dir)
166
+ ).unsafe_ask()
167
+ )
168
+ verbose = questionary.confirm("Do you want verbose output?", default=verbose).unsafe_ask()
170
169
 
171
170
  cmd = MigrationCommand()
172
171
  cmd.run(
@@ -235,14 +234,13 @@ class MigrateApp(typer.Typer):
235
234
  message="In which instance space do you want to create the source system?",
236
235
  include_empty=True,
237
236
  )
238
- dry_run = questionary.confirm("Do you want to perform a dry run?", default=dry_run).ask()
239
- output_dir = questionary.path(
240
- "Specify output directory for instance space definitions:", default=str(output_dir)
241
- ).ask()
242
- verbose = questionary.confirm("Do you want verbose output?", default=verbose).ask()
243
- if any(res is None for res in [instance_space, dry_run, output_dir, verbose]):
244
- raise typer.Abort()
245
- output_dir = Path(output_dir)
237
+ dry_run = questionary.confirm("Do you want to perform a dry run?", default=dry_run).unsafe_ask()
238
+ output_dir = Path(
239
+ questionary.path(
240
+ "Specify output directory for instance space definitions:", default=str(output_dir)
241
+ ).unsafe_ask()
242
+ )
243
+ verbose = questionary.confirm("Do you want verbose output?", default=verbose).unsafe_ask()
246
244
  elif data_set is None or instance_space is None:
247
245
  raise typer.BadParameter("Both data_set and instance_space must be provided together.")
248
246
 
@@ -399,10 +397,9 @@ class MigrateApp(typer.Typer):
399
397
  ingestion_mapping=asset_mapping.external_id,
400
398
  preferred_consumer_view=preferred_consumer_view,
401
399
  )
402
- dry_run = questionary.confirm("Do you want to perform a dry run?", default=dry_run).ask()
403
- verbose = questionary.confirm("Do you want verbose output?", default=verbose).ask()
404
- if any(res is None for res in [dry_run, verbose]):
405
- raise typer.Abort()
400
+ dry_run = questionary.confirm("Do you want to perform a dry run?", default=dry_run).unsafe_ask()
401
+ verbose = questionary.confirm("Do you want verbose output?", default=verbose).unsafe_ask()
402
+
406
403
  return selected, dry_run, verbose
407
404
 
408
405
  @classmethod
@@ -593,7 +590,7 @@ class MigrateApp(typer.Typer):
593
590
  if data_set_id is None and mapping_file is None:
594
591
  skip_linking = not questionary.confirm(
595
592
  "Do you want to link old and new TimeSeries?", default=not skip_linking
596
- ).ask()
593
+ ).unsafe_ask()
597
594
 
598
595
  cmd = MigrationCommand()
599
596
  cmd.run(
@@ -702,7 +699,7 @@ class MigrateApp(typer.Typer):
702
699
  if data_set_id is None:
703
700
  skip_linking = not questionary.confirm(
704
701
  "Do you want to link old and new Files?", default=not skip_linking
705
- ).ask()
702
+ ).unsafe_ask()
706
703
 
707
704
  cmd.run(
708
705
  lambda: cmd.migrate(
@@ -841,10 +838,8 @@ class MigrateApp(typer.Typer):
841
838
  default_file_annotation_mapping=file_annotation_mapping,
842
839
  )
843
840
 
844
- dry_run = questionary.confirm("Do you want to perform a dry run?", default=dry_run).ask()
845
- verbose = questionary.confirm("Do you want verbose output?", default=verbose).ask()
846
- if any(res is None for res in [dry_run, verbose]):
847
- raise typer.Abort()
841
+ dry_run = questionary.confirm("Do you want to perform a dry run?", default=dry_run).unsafe_ask()
842
+ verbose = questionary.confirm("Do you want verbose output?", default=verbose).unsafe_ask()
848
843
 
849
844
  cmd = MigrationCommand()
850
845
  cmd.run(
@@ -913,12 +908,11 @@ class MigrateApp(typer.Typer):
913
908
  if external_id is None:
914
909
  interactive = InteractiveCanvasSelect(client)
915
910
  external_id = interactive.select_external_ids()
916
- log_dir = questionary.path("Specify log directory for migration logs:", default=str(log_dir)).ask()
917
- dry_run = questionary.confirm("Do you want to perform a dry run?", default=dry_run).ask()
918
- verbose = questionary.confirm("Do you want verbose output?", default=verbose).ask()
919
- if any(res is None for res in [log_dir, dry_run, verbose]):
920
- raise typer.Abort()
921
- log_dir = Path(log_dir)
911
+ log_dir = Path(
912
+ questionary.path("Specify log directory for migration logs:", default=str(log_dir)).unsafe_ask()
913
+ )
914
+ dry_run = questionary.confirm("Do you want to perform a dry run?", default=dry_run).unsafe_ask()
915
+ verbose = questionary.confirm("Do you want verbose output?", default=verbose).unsafe_ask()
922
916
 
923
917
  cmd = MigrationCommand()
924
918
  selector = CanvasExternalIdSelector(external_ids=tuple(external_id))
@@ -1131,10 +1125,8 @@ class MigrateApp(typer.Typer):
1131
1125
  message="In which instance space do you want to create the CAD Node nodes?",
1132
1126
  include_empty=False,
1133
1127
  )
1134
- dry_run = questionary.confirm("Do you want to perform a dry run?", default=dry_run).ask()
1135
- verbose = questionary.confirm("Do you want verbose output?", default=verbose).ask()
1136
- if any(res is None for res in [dry_run, verbose]):
1137
- raise typer.Abort()
1128
+ dry_run = questionary.confirm("Do you want to perform a dry run?", default=dry_run).unsafe_ask()
1129
+ verbose = questionary.confirm("Do you want verbose output?", default=verbose).unsafe_ask()
1138
1130
 
1139
1131
  if object_3D_space is None or cad_node_space is None:
1140
1132
  raise typer.BadParameter(
@@ -116,24 +116,21 @@ class PurgeApp(typer.Typer):
116
116
  skip_data = not questionary.confirm(
117
117
  "Delete data in the dataset (time series, events, files, assets, sequences, relationships, labels, 3D models)?",
118
118
  default=True,
119
- ).ask()
119
+ ).unsafe_ask()
120
120
  include_configurations = questionary.confirm(
121
121
  "Delete configurations (workflows, extraction pipelines and transformations) in the dataset?",
122
122
  default=False,
123
- ).ask()
123
+ ).unsafe_ask()
124
124
  asset_recursive = questionary.confirm(
125
125
  "When deleting assets, delete all child assets recursively? (WARNING: This can lead "
126
126
  "to assets not in the selected dataset being deleted if they are children of assets in the dataset.)",
127
127
  default=False,
128
- ).ask()
129
- archive_dataset = questionary.confirm("Archive the dataset itself after purging?", default=False).ask()
130
- dry_run = questionary.confirm("Dry run?", default=True).ask()
131
- verbose = questionary.confirm("Verbose?", default=True).ask()
132
-
133
- user_options = [archive_dataset, dry_run, verbose, skip_data, include_configurations, asset_recursive]
134
-
135
- if any(selected is None for selected in user_options):
136
- raise typer.Abort("Aborted by user.")
128
+ ).unsafe_ask()
129
+ archive_dataset = questionary.confirm(
130
+ "Archive the dataset itself after purging?", default=False
131
+ ).unsafe_ask()
132
+ dry_run = questionary.confirm("Dry run?", default=True).unsafe_ask()
133
+ verbose = questionary.confirm("Verbose?", default=True).unsafe_ask()
137
134
 
138
135
  cmd.run(
139
136
  lambda: cmd.dataset(
@@ -222,11 +219,11 @@ class PurgeApp(typer.Typer):
222
219
  space = interactive.select_schema_space(include_global=False).space
223
220
  else:
224
221
  raise ToolkitValueError("Invalid space type selected.")
225
- dry_run = questionary.confirm("Dry run?", default=True).ask()
222
+ dry_run = questionary.confirm("Dry run?", default=True).unsafe_ask()
226
223
  if space_type == "empty":
227
224
  include_space = True
228
225
  else:
229
- include_space = questionary.confirm("Delete the space itself?", default=False).ask()
226
+ include_space = questionary.confirm("Delete the space itself?", default=False).unsafe_ask()
230
227
 
231
228
  cmd.run(
232
229
  lambda: cmd.space(
@@ -339,8 +336,10 @@ class PurgeApp(typer.Typer):
339
336
  instance_type=selected_instance_type,
340
337
  instance_spaces=tuple(instance_space) if instance_space else None,
341
338
  )
342
- dry_run = questionary.confirm("Dry run?", default=True).ask()
343
- unlink = questionary.confirm("Unlink instances connected to timeseries or files?", default=True).ask()
339
+ dry_run = questionary.confirm("Dry run?", default=True).unsafe_ask()
340
+ unlink = questionary.confirm(
341
+ "Unlink instances connected to timeseries or files?", default=True
342
+ ).unsafe_ask()
344
343
  elif instance_list is not None:
345
344
  selector = InstanceFileSelector(datafile=instance_list)
346
345
  elif view is not None:
@@ -73,11 +73,8 @@ class UploadApp(typer.Typer):
73
73
  input_dir = questionary.select(
74
74
  "Select the input directory containing the data to upload:",
75
75
  choices=[Choice(str(option.name), value=option) for option in input_candidate],
76
- ).ask()
77
- if input_dir is None:
78
- typer.echo("No input directory selected. Exiting.")
79
- raise typer.Exit(code=1)
80
- dry_run = questionary.confirm("Proceed with dry run?", default=dry_run).ask()
76
+ ).unsafe_ask()
77
+ dry_run = questionary.confirm("Proceed with dry run?", default=dry_run).unsafe_ask()
81
78
  if dry_run is None:
82
79
  typer.echo("No selection made for dry run. Exiting.")
83
80
  raise typer.Exit(code=1)
@@ -90,10 +87,7 @@ class UploadApp(typer.Typer):
90
87
 
91
88
  deploy_resources = questionary.confirm(
92
89
  f"Deploy resources found in {display_name!r}?", default=deploy_resources
93
- ).ask()
94
- if deploy_resources is None:
95
- typer.echo("No selection made for deploying resources. Exiting.")
96
- raise typer.Exit(code=1)
90
+ ).unsafe_ask()
97
91
 
98
92
  client = EnvironmentVariables.create_from_environment().get_client()
99
93
  cmd.run(
@@ -329,7 +329,7 @@ class ProfileCommand(ToolkitCommand, ABC, Generic[T_Index]):
329
329
  )
330
330
 
331
331
  def _ask_store_file(self) -> None:
332
- if file_path := questionary.path("Where do you want to save the profile?").ask():
332
+ if file_path := questionary.path("Where do you want to save the profile?").unsafe_ask():
333
333
  self.output_spreadsheet = Path(file_path)
334
334
 
335
335
 
@@ -222,7 +222,7 @@ class PurgeCommand(ToolkitCommand):
222
222
  if not dry_run and not auto_yes:
223
223
  confirm = questionary.confirm(
224
224
  f"Are you really sure you want to purge the {selected_space!r} space?", default=False
225
- ).ask()
225
+ ).unsafe_ask()
226
226
  if not confirm:
227
227
  return DeployResults([], "purge", dry_run=dry_run)
228
228
 
@@ -423,7 +423,7 @@ class PurgeCommand(ToolkitCommand):
423
423
  if not dry_run and not auto_yes:
424
424
  confirm = questionary.confirm(
425
425
  f"Are you really sure you want to purge the {selected_data_set_external_id!r} dataSet?", default=False
426
- ).ask()
426
+ ).unsafe_ask()
427
427
  if not confirm:
428
428
  return DeployResults([], "purge", dry_run=dry_run)
429
429
 
@@ -595,7 +595,7 @@ class PurgeCommand(ToolkitCommand):
595
595
  confirm = questionary.confirm(
596
596
  f"Are you really sure you want to purge all {total:,} instances in {selector!s}?",
597
597
  default=False,
598
- ).ask()
598
+ ).unsafe_ask()
599
599
  if not confirm:
600
600
  return DeleteResults()
601
601
 
@@ -84,7 +84,7 @@ class AuthCommand(ToolkitCommand):
84
84
  ask_user = True
85
85
  if env_vars and not env_vars.get_missing_vars():
86
86
  print("Auth variables are already set.")
87
- ask_user = questionary.confirm("Do you want to reconfigure the auth variables?", default=False).ask()
87
+ ask_user = questionary.confirm("Do you want to reconfigure the auth variables?", default=False).unsafe_ask()
88
88
 
89
89
  if ask_user or not env_vars:
90
90
  env_vars = prompt_user_environment_variables(env_vars)
@@ -111,10 +111,10 @@ class AuthCommand(ToolkitCommand):
111
111
  if questionary.confirm(
112
112
  f"Do you want to overwrite the existing '.env' file? The existing will be renamed to {filename}",
113
113
  default=False,
114
- ).ask():
114
+ ).unsafe_ask():
115
115
  shutil.move(".env", filename)
116
116
  Path(".env").write_text(new_env_file, encoding="utf-8")
117
- elif questionary.confirm("Do you want to save these to .env file for next time?", default=True).ask():
117
+ elif questionary.confirm("Do you want to save these to .env file for next time?", default=True).unsafe_ask():
118
118
  Path(".env").write_text(new_env_file, encoding="utf-8")
119
119
 
120
120
  def verify(
@@ -197,7 +197,7 @@ class AuthCommand(ToolkitCommand):
197
197
  if (
198
198
  is_interactive
199
199
  and missing_capabilities
200
- and questionary.confirm("Do you want to update the group with the missing capabilities?").ask()
200
+ and questionary.confirm("Do you want to update the group with the missing capabilities?").unsafe_ask()
201
201
  ) or is_demo:
202
202
  has_added_capabilities = self._update_missing_capabilities(
203
203
  client, cdf_toolkit_group, missing_capabilities, dry_run
@@ -213,7 +213,7 @@ class AuthCommand(ToolkitCommand):
213
213
  if (
214
214
  is_interactive
215
215
  and missing_capabilities
216
- and questionary.confirm("Do you want to update the group with the missing capabilities?").ask()
216
+ and questionary.confirm("Do you want to update the group with the missing capabilities?").unsafe_ask()
217
217
  ):
218
218
  self._update_missing_capabilities(client, cdf_toolkit_group, missing_capabilities, dry_run)
219
219
  elif is_demo:
@@ -246,7 +246,7 @@ class AuthCommand(ToolkitCommand):
246
246
  if extra := self.check_duplicated_names(all_groups, cdf_toolkit_group):
247
247
  if (
248
248
  is_interactive
249
- and questionary.confirm("Do you want to delete the extra groups?", default=True).ask()
249
+ and questionary.confirm("Do you want to delete the extra groups?", default=True).unsafe_ask()
250
250
  ):
251
251
  try:
252
252
  client.iam.groups.delete(extra.as_ids())
@@ -274,7 +274,7 @@ class AuthCommand(ToolkitCommand):
274
274
  if not questionary.confirm(
275
275
  "Do you want to create it?",
276
276
  default=True,
277
- ).ask():
277
+ ).unsafe_ask():
278
278
  return None
279
279
 
280
280
  if dry_run:
@@ -283,13 +283,10 @@ class AuthCommand(ToolkitCommand):
283
283
  )
284
284
  return None
285
285
 
286
- while True:
287
- source_id = questionary.text(
288
- "What is the source id for the new group (typically a group id in the identity provider)?"
289
- ).ask()
290
- if source_id:
291
- break
292
- print("Source id cannot be empty.")
286
+ source_id = questionary.text(
287
+ "What is the source id for the new group (typically a group id in the identity provider)?",
288
+ validate=lambda value: value.strip() != "",
289
+ ).unsafe_ask()
293
290
 
294
291
  toolkit_group.source_id = source_id
295
292
  if already_used := [group.name for group in all_groups if group.source_id == source_id]:
@@ -298,7 +295,7 @@ class AuthCommand(ToolkitCommand):
298
295
  f"The source id {source_id!r} is already used by the groups: {humanize_collection(already_used)!r}."
299
296
  )
300
297
  )
301
- if not questionary.confirm("This is NOT recommended. Do you want to continue?", default=False).ask():
298
+ if not questionary.confirm("This is NOT recommended. Do you want to continue?", default=False).unsafe_ask():
302
299
  return None
303
300
 
304
301
  return self._create_toolkit_group_in_cdf(client, toolkit_group)
@@ -197,7 +197,8 @@ class CleanCommand(ToolkitCommand):
197
197
  "Which modules would you like to clean?",
198
198
  instruction="Use arrow up/down, press space to select item(s) and enter to save",
199
199
  choices=choices,
200
- ).ask()
200
+ validate=lambda choice: "You must select at least one module." if len(choice) == 0 else True,
201
+ ).unsafe_ask()
201
202
 
202
203
  if not selected_modules:
203
204
  return None
@@ -136,7 +136,9 @@ class DataModelFinder(ResourceFinder[DataModelId]):
136
136
  if len(available_spaces) == 1:
137
137
  selected_space = available_spaces[0]
138
138
  else:
139
- selected_space = questionary.select("In which space is your data model located?", available_spaces).ask()
139
+ selected_space = questionary.select(
140
+ "In which space is your data model located?", available_spaces
141
+ ).unsafe_ask()
140
142
  data_model_ids = sorted(
141
143
  [model for model in data_model_ids if model.space == selected_space], key=lambda model: model.as_tuple()
142
144
  )
@@ -147,7 +149,7 @@ class DataModelFinder(ResourceFinder[DataModelId]):
147
149
  Choice(f"{model_id!r}", value=model_id)
148
150
  for model_id in sorted(data_model_ids, key=lambda model: model.as_tuple())
149
151
  ],
150
- ).ask()
152
+ ).unsafe_ask()
151
153
 
152
154
  retrieved_models = self.client.data_modeling.data_models.retrieve(
153
155
  (selected_data_model.space, selected_data_model.external_id), inline_views=False
@@ -165,7 +167,7 @@ class DataModelFinder(ResourceFinder[DataModelId]):
165
167
  if not questionary.confirm(
166
168
  f"Would you like to select a different version than {selected_data_model.version} of the data model",
167
169
  default=False,
168
- ).ask():
170
+ ).unsafe_ask():
169
171
  self.data_model = models_by_version[cast(str, selected_data_model.version)]
170
172
  return selected_data_model
171
173
 
@@ -175,7 +177,7 @@ class DataModelFinder(ResourceFinder[DataModelId]):
175
177
  Choice(f"{version} ({len(model.views)} views)", value=version)
176
178
  for version, model in models_by_version.items()
177
179
  ],
178
- ).ask()
180
+ ).unsafe_ask()
179
181
  self.data_model = models_by_version[selected_model]
180
182
  return self.data_model.as_id()
181
183
 
@@ -240,7 +242,7 @@ class WorkflowFinder(ResourceFinder[WorkflowVersionId]):
240
242
  selected_workflow_id: str = questionary.select(
241
243
  "Which workflow would you like to dump?",
242
244
  [Choice(workflow_id, value=workflow_id) for workflow_id in workflows.as_external_ids()],
243
- ).ask()
245
+ ).unsafe_ask()
244
246
  for workflow in workflows:
245
247
  if workflow.external_id == selected_workflow_id:
246
248
  self._workflow = workflow
@@ -256,7 +258,7 @@ class WorkflowFinder(ResourceFinder[WorkflowVersionId]):
256
258
  selected_version: WorkflowVersionId = questionary.select(
257
259
  "Which version would you like to dump?",
258
260
  [Choice(f"{version!r}", value=version) for version in versions.as_ids()],
259
- ).ask()
261
+ ).unsafe_ask()
260
262
  for version in versions:
261
263
  if version.version == selected_version.version:
262
264
  self._workflow_version = version
@@ -309,7 +311,8 @@ class TransformationFinder(ResourceFinder[tuple[str, ...]]):
309
311
  selected_transformation_ids: tuple[str, ...] | None = questionary.checkbox(
310
312
  "Which transformation(s) would you like to dump?",
311
313
  choices=choices,
312
- ).ask()
314
+ validate=lambda choices: True if choices else "You must select at least one transformation.",
315
+ ).unsafe_ask()
313
316
  if not selected_transformation_ids:
314
317
  raise ToolkitValueError(f"No transformations selected for dumping.{_INTERACTIVE_SELECT_HELPER_TEXT}")
315
318
  return tuple(selected_transformation_ids)
@@ -354,7 +357,8 @@ class GroupFinder(ResourceFinder[tuple[str, ...]]):
354
357
  Choice(f"{group_name} ({len(group_list)} group{'s' if len(group_list) > 1 else ''})", value=group_list)
355
358
  for group_name, group_list in sorted(groups_by_name.items())
356
359
  ],
357
- ).ask()
360
+ validate=lambda choices: True if choices else "You must select at least one group.",
361
+ ).unsafe_ask()
358
362
  if not selected_groups:
359
363
  raise ToolkitValueError(f"No group selected for dumping.{_INTERACTIVE_SELECT_HELPER_TEXT}")
360
364
  self.groups = [group for group_list in selected_groups for group in group_list]
@@ -387,7 +391,8 @@ class AgentFinder(ResourceFinder[tuple[str, ...]]):
387
391
  selected_agent_ids: list[str] | None = questionary.checkbox(
388
392
  "Which agent(s) would you like to dump?",
389
393
  choices=choices,
390
- ).ask()
394
+ validate=lambda choices: True if choices else "You must select at least one agent.",
395
+ ).unsafe_ask()
391
396
  if not selected_agent_ids:
392
397
  raise ToolkitValueError(f"No agents selected for dumping.{_INTERACTIVE_SELECT_HELPER_TEXT}")
393
398
  return tuple(selected_agent_ids)
@@ -418,7 +423,7 @@ class NodeFinder(ResourceFinder[dm.ViewId]):
418
423
  raise ToolkitMissingResourceError("No spaces found")
419
424
  selected_space: str = questionary.select(
420
425
  "In which space is your node property view located?", [space.space for space in spaces]
421
- ).ask()
426
+ ).unsafe_ask()
422
427
 
423
428
  views = self.client.data_modeling.views.list(space=selected_space, limit=-1, all_versions=False)
424
429
  if not views:
@@ -428,7 +433,7 @@ class NodeFinder(ResourceFinder[dm.ViewId]):
428
433
  selected_view_id: dm.ViewId = questionary.select(
429
434
  "Which node property view would you like to dump?",
430
435
  [Choice(repr(view), value=view) for view in views.as_ids()],
431
- ).ask()
436
+ ).unsafe_ask()
432
437
  return selected_view_id
433
438
 
434
439
  def __iter__(self) -> Iterator[tuple[list[Hashable], CogniteResourceList | None, ResourceCRUD, None | str]]:
@@ -444,7 +449,7 @@ class NodeFinder(ResourceFinder[dm.ViewId]):
444
449
  if not questionary.confirm(
445
450
  f"Are you sure you want to dump {count} nodes? This may take a while.",
446
451
  default=False,
447
- ).ask():
452
+ ).unsafe_ask():
448
453
  typer.Exit(0)
449
454
  nodes = dm.NodeList[dm.Node](list(loader.iterate()))
450
455
  yield [], nodes, loader, None
@@ -463,7 +468,8 @@ class LocationFilterFinder(ResourceFinder[tuple[str, ...]]):
463
468
  selected_filter_ids: tuple[str, ...] | None = questionary.checkbox(
464
469
  "Which filters would you like to dump?",
465
470
  choices=[Choice(name, value=id_) for name, id_ in id_by_display_name.items()],
466
- ).ask()
471
+ validate=lambda choices: True if choices else "You must select at least one filter.",
472
+ ).unsafe_ask()
467
473
  if not selected_filter_ids:
468
474
  raise ToolkitValueError(f"No filters selected for dumping.{_INTERACTIVE_SELECT_HELPER_TEXT}")
469
475
  return tuple(selected_filter_ids)
@@ -499,7 +505,8 @@ class ExtractionPipelineFinder(ResourceFinder[tuple[str, ...]]):
499
505
  selected_pipeline_ids: tuple[str, ...] | None = questionary.checkbox(
500
506
  "Which extraction pipeline(s) would you like to dump?",
501
507
  choices=choices,
502
- ).ask()
508
+ validate=lambda choices: True if choices else "You must select at least one pipeline.",
509
+ ).unsafe_ask()
503
510
  if not selected_pipeline_ids:
504
511
  raise ToolkitValueError(f"No extraction pipelines selected for dumping.{_INTERACTIVE_SELECT_HELPER_TEXT}")
505
512
  return tuple(selected_pipeline_ids)
@@ -538,7 +545,8 @@ class DataSetFinder(ResourceFinder[tuple[str, ...]]):
538
545
  selected_dataset_ids: tuple[str, ...] | None = questionary.checkbox(
539
546
  "Which dataset(s) would you like to dump?",
540
547
  choices=choices,
541
- ).ask()
548
+ validate=lambda choices: True if choices else "You must select at least one dataset.",
549
+ ).unsafe_ask()
542
550
  if not selected_dataset_ids:
543
551
  raise ToolkitValueError(f"No datasets selected for dumping.{_INTERACTIVE_SELECT_HELPER_TEXT}")
544
552
  return tuple(selected_dataset_ids)
@@ -574,7 +582,8 @@ class FunctionFinder(ResourceFinder[tuple[str, ...]]):
574
582
  selected_function_ids: tuple[str, ...] | None = questionary.checkbox(
575
583
  "Which function(s) would you like to dump?",
576
584
  choices=choices,
577
- ).ask()
585
+ validate=lambda choices: True if choices else "You must select at least one function.",
586
+ ).unsafe_ask()
578
587
  if not selected_function_ids:
579
588
  raise ToolkitValueError(f"No functions selected for dumping.{_INTERACTIVE_SELECT_HELPER_TEXT}")
580
589
  return tuple(selected_function_ids)
@@ -635,7 +644,7 @@ class StreamlitFinder(ResourceFinder[tuple[str, ...]]):
635
644
  Choice(f"{item.value} ({item.count})", value=item.value)
636
645
  for item in sorted(result, key=lambda r: (r.count, str(r.value) or ""))
637
646
  ],
638
- ).ask()
647
+ ).unsafe_ask()
639
648
  files = self.client.files.list(
640
649
  limit=-1, directory_prefix="/streamlit-apps/", metadata={"creator": str(selected_creator)}
641
650
  )
@@ -653,7 +662,8 @@ class StreamlitFinder(ResourceFinder[tuple[str, ...]]):
653
662
  )
654
663
  for app in sorted(self.apps, key=lambda a: a.name)
655
664
  ],
656
- ).ask()
665
+ validate=lambda choices: True if choices else "You must select at least one Streamlit app.",
666
+ ).unsafe_ask()
657
667
  if not selected_ids:
658
668
  raise ToolkitValueError(f"No Streamlit app selected for dumping.{_INTERACTIVE_SELECT_HELPER_TEXT}")
659
669
  return tuple(selected_ids)
@@ -772,7 +782,8 @@ class SearchConfigFinder(ResourceFinder[tuple[SearchConfigViewId, ...]]):
772
782
  selected_view_ids: list[SearchConfigViewId] | None = questionary.checkbox(
773
783
  "For which view would you like to dump the search configuration?",
774
784
  choices=choices,
775
- ).ask()
785
+ validate=lambda choices: True if choices else "You must select at least one view.",
786
+ ).unsafe_ask()
776
787
  if not selected_view_ids:
777
788
  raise ToolkitValueError("No view selected for dumping the search configuration.")
778
789
  return tuple(selected_view_ids)