cognite-toolkit 0.7.30__py3-none-any.whl → 0.7.32__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. cognite_toolkit/_cdf.py +5 -6
  2. cognite_toolkit/_cdf_tk/apps/__init__.py +2 -0
  3. cognite_toolkit/_cdf_tk/apps/_core_app.py +7 -1
  4. cognite_toolkit/_cdf_tk/apps/_import_app.py +41 -0
  5. cognite_toolkit/_cdf_tk/client/api/extended_functions.py +9 -9
  6. cognite_toolkit/_cdf_tk/client/api/infield.py +23 -17
  7. cognite_toolkit/_cdf_tk/client/api/project.py +8 -7
  8. cognite_toolkit/_cdf_tk/client/api/streams.py +19 -14
  9. cognite_toolkit/_cdf_tk/client/api/three_d.py +5 -5
  10. cognite_toolkit/_cdf_tk/client/data_classes/base.py +2 -22
  11. cognite_toolkit/_cdf_tk/client/data_classes/instance_api.py +1 -1
  12. cognite_toolkit/_cdf_tk/client/data_classes/three_d.py +3 -0
  13. cognite_toolkit/_cdf_tk/commands/__init__.py +1 -0
  14. cognite_toolkit/_cdf_tk/commands/build_v2/build_cmd.py +241 -0
  15. cognite_toolkit/_cdf_tk/commands/build_v2/build_input.py +85 -0
  16. cognite_toolkit/_cdf_tk/commands/build_v2/build_issues.py +27 -0
  17. cognite_toolkit/_cdf_tk/cruds/_resource_cruds/transformation.py +48 -13
  18. cognite_toolkit/_cdf_tk/resource_classes/workflow_version.py +164 -5
  19. cognite_toolkit/_cdf_tk/utils/http_client/__init__.py +28 -0
  20. cognite_toolkit/_cdf_tk/utils/http_client/_client.py +3 -2
  21. cognite_toolkit/_cdf_tk/utils/http_client/_data_classes2.py +69 -7
  22. cognite_toolkit/_cdf_tk/validation.py +4 -0
  23. cognite_toolkit/_repo_files/GitHub/.github/workflows/deploy.yaml +1 -1
  24. cognite_toolkit/_repo_files/GitHub/.github/workflows/dry-run.yaml +1 -1
  25. cognite_toolkit/_resources/cdf.toml +1 -1
  26. cognite_toolkit/_version.py +1 -1
  27. {cognite_toolkit-0.7.30.dist-info → cognite_toolkit-0.7.32.dist-info}/METADATA +1 -1
  28. {cognite_toolkit-0.7.30.dist-info → cognite_toolkit-0.7.32.dist-info}/RECORD +32 -29
  29. {cognite_toolkit-0.7.30.dist-info → cognite_toolkit-0.7.32.dist-info}/WHEEL +1 -1
  30. cognite_toolkit/_cdf_tk/prototypes/import_app.py +0 -41
  31. /cognite_toolkit/_cdf_tk/{prototypes/commands/import_.py → commands/_import_cmd.py} +0 -0
  32. /cognite_toolkit/_cdf_tk/{prototypes/commands → commands/build_v2}/__init__.py +0 -0
  33. {cognite_toolkit-0.7.30.dist-info → cognite_toolkit-0.7.32.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,241 @@
1
+ from pathlib import Path
2
+ from typing import Any, Literal, TypedDict
3
+
4
+ from rich import print
5
+ from rich.panel import Panel
6
+
7
+ from cognite_toolkit._cdf_tk.client import ToolkitClient
8
+ from cognite_toolkit._cdf_tk.commands._base import ToolkitCommand
9
+ from cognite_toolkit._cdf_tk.commands.build_cmd import BuildCommand as OldBuildCommand
10
+ from cognite_toolkit._cdf_tk.commands.build_v2.build_input import BuildInput
11
+ from cognite_toolkit._cdf_tk.commands.build_v2.build_issues import BuildIssue, BuildIssueList
12
+ from cognite_toolkit._cdf_tk.data_classes import (
13
+ BuildConfigYAML,
14
+ BuildVariables,
15
+ BuiltModuleList,
16
+ ModuleDirectories,
17
+ )
18
+ from cognite_toolkit._cdf_tk.exceptions import ToolkitError
19
+ from cognite_toolkit._cdf_tk.hints import verify_module_directory
20
+ from cognite_toolkit._cdf_tk.tk_warnings import ToolkitWarning, WarningList
21
+ from cognite_toolkit._cdf_tk.utils.file import safe_rmtree
22
+ from cognite_toolkit._cdf_tk.validation import validate_module_selection, validate_modules_variables
23
+ from cognite_toolkit._version import __version__
24
+
25
+
26
+ class BuildWarnings(TypedDict):
27
+ warning: ToolkitWarning
28
+ location: list[Path]
29
+
30
+
31
+ class BuildCommand(ToolkitCommand):
32
+ def __init__(self, print_warning: bool = True, skip_tracking: bool = False, silent: bool = False) -> None:
33
+ super().__init__(print_warning, skip_tracking, silent)
34
+ self.issues = BuildIssueList()
35
+
36
+ def execute(
37
+ self,
38
+ verbose: bool,
39
+ organization_dir: Path,
40
+ build_dir: Path,
41
+ selected: list[str | Path] | None,
42
+ build_env_name: str | None,
43
+ no_clean: bool,
44
+ client: ToolkitClient | None = None,
45
+ on_error: Literal["continue", "raise"] = "continue",
46
+ ) -> BuiltModuleList:
47
+ """
48
+ Build the resources into deployable artifacts in the build directory.
49
+ """
50
+
51
+ self.verbose = verbose
52
+ self.on_error = on_error
53
+
54
+ # Tracking the project and cluster for the build.
55
+ if client:
56
+ self._additional_tracking_info.project = client.config.project
57
+ self._additional_tracking_info.cluster = client.config.cdf_cluster
58
+
59
+ # Setting the parameters for the build.
60
+ input = BuildInput.load(organization_dir, build_dir, build_env_name, client, selected)
61
+
62
+ # Print the build input.
63
+ if self.verbose:
64
+ self._print_build_input(input)
65
+
66
+ # Capture warnings from module structure integrity
67
+ if module_selection_issues := self._validate_modules(input):
68
+ self.issues.extend(module_selection_issues)
69
+
70
+ # Logistics: clean and create build directory
71
+ if prepare_issues := self._prepare_target_directory(input, not no_clean):
72
+ self.issues.extend(prepare_issues)
73
+
74
+ # Compile the configuration and variables,
75
+ # check syntax on module and resource level
76
+ # for any "compilation errors and warnings"
77
+ built_modules, build_integrity_issues = self._build_configuration(input)
78
+ if build_integrity_issues:
79
+ self.issues.extend(build_integrity_issues)
80
+
81
+ # This is where we would add any recommendations for the user to improve the build.
82
+ if build_quality_issues := self._verify_build_quality(built_modules):
83
+ self.issues.extend(build_quality_issues)
84
+
85
+ # Finally, print warnings grouped by category/code and location.
86
+ self._print_or_log_warnings_by_category(self.issues)
87
+
88
+ return built_modules
89
+
90
+ def _print_build_input(self, input: BuildInput) -> None:
91
+ print(
92
+ Panel(
93
+ f"Building {input.organization_dir!s}:\n - Toolkit Version '{__version__!s}'\n"
94
+ f" - Environment name {input.build_env_name!r}, validation-type {input.config.environment.validation_type!r}.\n"
95
+ f" - Config '{input.config.filepath!s}'",
96
+ expand=False,
97
+ )
98
+ )
99
+
100
+ def _prepare_target_directory(self, input: BuildInput, clean: bool = False) -> BuildIssueList:
101
+ """
102
+ Directory logistics
103
+ """
104
+ issues = BuildIssueList()
105
+ if input.build_dir.exists() and any(input.build_dir.iterdir()):
106
+ if not clean:
107
+ raise ToolkitError("Build directory is not empty. Run without --no-clean to remove existing files.")
108
+
109
+ if self.verbose:
110
+ issues.append(BuildIssue(description=f"Build directory {input.build_dir!s} is not empty. Clearing."))
111
+ safe_rmtree(input.build_dir)
112
+ input.build_dir.mkdir(parents=True, exist_ok=True)
113
+ return issues
114
+
115
+ def _validate_modules(self, input: BuildInput) -> BuildIssueList:
116
+ issues = BuildIssueList()
117
+ # Verify that the modules exists, are not duplicates,
118
+ # and at least one is selected
119
+ verify_module_directory(input.organization_dir, input.build_env_name)
120
+
121
+ # Validate module selection
122
+ user_selected_modules = input.config.environment.get_selected_modules({})
123
+ module_warnings = validate_module_selection(
124
+ modules=input.modules,
125
+ config=input.config,
126
+ packages={},
127
+ selected_modules=user_selected_modules,
128
+ organization_dir=input.organization_dir,
129
+ )
130
+ if module_warnings:
131
+ issues.extend(BuildIssueList.from_warning_list(module_warnings))
132
+
133
+ # Validate variables. Note: this looks for non-replaced template
134
+ # variables <.*?> and can be improved in the future.
135
+ # Keeping for reference.
136
+ variables_warnings = validate_modules_variables(input.variables, input.config.filepath)
137
+ if variables_warnings:
138
+ issues.extend(BuildIssueList.from_warning_list(variables_warnings))
139
+
140
+ # Track LOC of managed configuration
141
+ # Note: _track is not implemented yet, so we skip it for now
142
+ # self._track(input)
143
+
144
+ return issues
145
+
146
+ def _build_configuration(self, input: BuildInput) -> tuple[BuiltModuleList, BuildIssueList]:
147
+ issues = BuildIssueList()
148
+ # Use input.modules.selected directly (it's already a ModuleDirectories)
149
+ if not input.modules.selected:
150
+ return BuiltModuleList(), issues
151
+
152
+ # first collect variables into practical lookup
153
+ # TODO: parallelism is not implemented yet. I'm sure there are optimizations to be had here, but we'll focus on process parallelism since we believe loading yaml and file i/O are the biggest bottlenecks.
154
+
155
+ old_build_command = OldBuildCommand(print_warning=False, skip_tracking=False)
156
+ built_modules = old_build_command.build_config(
157
+ build_dir=input.build_dir,
158
+ organization_dir=input.organization_dir,
159
+ config=input.config,
160
+ packages={},
161
+ clean=False,
162
+ verbose=self.verbose,
163
+ client=input.client,
164
+ progress_bar=False,
165
+ on_error=self.on_error,
166
+ )
167
+ # Copy tracking info from old command to self
168
+ self._additional_tracking_info.package_ids.update(old_build_command._additional_tracking_info.package_ids)
169
+ self._additional_tracking_info.module_ids.update(old_build_command._additional_tracking_info.module_ids)
170
+
171
+ # Collect warnings from the old build command and convert to issues
172
+ # Always convert warnings to issues, even if the list appears empty
173
+ # (WarningList might have custom __bool__ behavior)
174
+ if old_build_command.warning_list:
175
+ converted_issues = BuildIssueList.from_warning_list(old_build_command.warning_list)
176
+ issues.extend(converted_issues)
177
+ return built_modules, issues
178
+
179
+ def _verify_build_quality(self, built_modules: BuiltModuleList) -> BuildIssueList:
180
+ issues = BuildIssueList()
181
+ return issues
182
+
183
+ def _write(self, input: BuildInput) -> None:
184
+ # Write the build to the build directory.
185
+ # Track lines of code built.
186
+ raise NotImplementedError()
187
+
188
+ def _track(self, input: BuildInput) -> None:
189
+ raise NotImplementedError()
190
+
191
+ def _print_or_log_warnings_by_category(self, issues: BuildIssueList) -> None:
192
+ pass
193
+
194
+ # Delegate to old BuildCommand for backward compatibility with tests
195
+ def build_modules(
196
+ self,
197
+ modules: ModuleDirectories,
198
+ build_dir: Path,
199
+ variables: BuildVariables,
200
+ verbose: bool = False,
201
+ progress_bar: bool = False,
202
+ on_error: Literal["continue", "raise"] = "continue",
203
+ ) -> BuiltModuleList:
204
+ """Delegate to old BuildCommand for backward compatibility."""
205
+ old_cmd = OldBuildCommand()
206
+
207
+ built_modules = old_cmd.build_modules(modules, build_dir, variables, verbose, progress_bar, on_error)
208
+ self._additional_tracking_info.package_ids.update(old_cmd._additional_tracking_info.package_ids)
209
+ self._additional_tracking_info.module_ids.update(old_cmd._additional_tracking_info.module_ids)
210
+ self.issues.extend(BuildIssueList.from_warning_list(old_cmd.warning_list or WarningList[ToolkitWarning]()))
211
+ return built_modules
212
+
213
+ def build_config(
214
+ self,
215
+ build_dir: Path,
216
+ organization_dir: Path,
217
+ config: BuildConfigYAML,
218
+ packages: dict[str, list[str]],
219
+ clean: bool = False,
220
+ verbose: bool = False,
221
+ client: ToolkitClient | None = None,
222
+ progress_bar: bool = False,
223
+ on_error: Literal["continue", "raise"] = "continue",
224
+ ) -> BuiltModuleList:
225
+ """Delegate to old BuildCommand for backward compatibility."""
226
+ old_cmd = OldBuildCommand()
227
+ return old_cmd.build_config(
228
+ build_dir, organization_dir, config, packages, clean, verbose, client, progress_bar, on_error
229
+ )
230
+
231
+ def _replace_variables(
232
+ self,
233
+ resource_files: list[Path],
234
+ variables: BuildVariables,
235
+ resource_name: str,
236
+ module_dir: Path,
237
+ verbose: bool = False,
238
+ ) -> list[Any]:
239
+ """Delegate to old BuildCommand for backward compatibility."""
240
+ old_cmd = OldBuildCommand()
241
+ return old_cmd._replace_variables(resource_files, variables, resource_name, module_dir, verbose)
@@ -0,0 +1,85 @@
1
+ import sys
2
+ from functools import cached_property
3
+ from pathlib import Path
4
+
5
+ if sys.version_info >= (3, 11):
6
+ from typing import Self
7
+ else:
8
+ from typing_extensions import Self
9
+
10
+ from pydantic import BaseModel, ConfigDict
11
+
12
+ from cognite_toolkit._cdf_tk.client import ToolkitClient
13
+ from cognite_toolkit._cdf_tk.constants import DEFAULT_ENV
14
+ from cognite_toolkit._cdf_tk.data_classes import (
15
+ BuildConfigYAML,
16
+ BuildVariables,
17
+ ModuleDirectories,
18
+ )
19
+ from cognite_toolkit._cdf_tk.tk_warnings import ToolkitWarning, WarningList
20
+ from cognite_toolkit._cdf_tk.utils.modules import parse_user_selected_modules
21
+
22
+
23
+ class BuildInput(BaseModel):
24
+ """Input to the build process."""
25
+
26
+ # need this until we turn BuildConfigYaml and ToolkitClient into Pydantic models
27
+ model_config = ConfigDict(frozen=True, arbitrary_types_allowed=True)
28
+
29
+ organization_dir: Path
30
+ build_dir: Path
31
+ build_env_name: str
32
+ config: BuildConfigYAML
33
+ client: ToolkitClient | None = None
34
+ selected: list[str | Path] | None = None
35
+ warnings: WarningList[ToolkitWarning] | None = None
36
+
37
+ @classmethod
38
+ def load(
39
+ cls,
40
+ organization_dir: Path,
41
+ build_dir: Path,
42
+ build_env_name: str | None,
43
+ client: ToolkitClient | None,
44
+ selected: list[str | Path] | None = None,
45
+ ) -> Self:
46
+ resolved_org_dir = Path.cwd() if organization_dir in {Path("."), Path("./")} else organization_dir
47
+ resolved_env = build_env_name or DEFAULT_ENV
48
+ config, warnings = cls._load_config(resolved_org_dir, resolved_env, selected)
49
+ return cls(
50
+ organization_dir=resolved_org_dir,
51
+ build_dir=build_dir,
52
+ build_env_name=resolved_env,
53
+ config=config,
54
+ client=client,
55
+ selected=selected,
56
+ warnings=warnings,
57
+ )
58
+
59
+ @classmethod
60
+ def _load_config(
61
+ cls, organization_dir: Path, build_env_name: str, selected: list[str | Path] | None
62
+ ) -> tuple[BuildConfigYAML, WarningList[ToolkitWarning]]:
63
+ warnings: WarningList[ToolkitWarning] = WarningList[ToolkitWarning]()
64
+ if (organization_dir / BuildConfigYAML.get_filename(build_env_name or DEFAULT_ENV)).exists():
65
+ config = BuildConfigYAML.load_from_directory(organization_dir, build_env_name or DEFAULT_ENV)
66
+ else:
67
+ # Loads the default environment
68
+ config = BuildConfigYAML.load_default(organization_dir)
69
+ if selected:
70
+ config.environment.selected = parse_user_selected_modules(selected, organization_dir)
71
+ config.set_environment_variables()
72
+ if environment_warning := config.validate_environment():
73
+ warnings.append(environment_warning)
74
+ return config, warnings
75
+
76
+ @cached_property
77
+ def modules(self) -> ModuleDirectories:
78
+ user_selected_modules = self.config.environment.get_selected_modules({})
79
+ return ModuleDirectories.load(self.organization_dir, user_selected_modules)
80
+
81
+ @cached_property
82
+ def variables(self) -> BuildVariables:
83
+ return BuildVariables.load_raw(
84
+ self.config.variables, self.modules.available_paths, self.modules.selected.available_paths
85
+ )
@@ -0,0 +1,27 @@
1
+ import sys
2
+
3
+ if sys.version_info >= (3, 11):
4
+ from typing import Self
5
+ else:
6
+ from typing_extensions import Self
7
+
8
+ from collections import UserList
9
+
10
+ from pydantic import BaseModel
11
+
12
+ from cognite_toolkit._cdf_tk.tk_warnings import ToolkitWarning, WarningList
13
+
14
+
15
+ class BuildIssue(BaseModel):
16
+ """Issue with the build. Can have a recommendation for the user to improve the build."""
17
+
18
+ description: str
19
+
20
+
21
+ class BuildIssueList(UserList[BuildIssue]):
22
+ """List of build issues."""
23
+
24
+ @classmethod
25
+ def from_warning_list(cls, warning_list: WarningList[ToolkitWarning]) -> Self:
26
+ """Create a BuildIssueList from a WarningList."""
27
+ return cls([BuildIssue(description=warning.get_message()) for warning in warning_list])
@@ -26,6 +26,8 @@
26
26
  # limitations under the License.
27
27
 
28
28
 
29
+ import random
30
+ import time
29
31
  import warnings
30
32
  from collections import defaultdict
31
33
  from collections.abc import Callable, Hashable, Iterable, Sequence
@@ -427,25 +429,58 @@ class TransformationCRUD(ResourceCRUD[str, TransformationWrite, Transformation])
427
429
  raise error from e
428
430
  raise e
429
431
  except CogniteAPIError as e:
430
- if "Failed to bind session using nonce for" in e.message and len(chunk) > 1:
431
- MediumSeverityWarning(
432
- f"Failed to create {len(chunk)} transformations in a batch due to nonce binding error. "
433
- "Trying to recover by creating them one by one."
434
- ).print_warning(console=self.console)
435
- # Retry one by one
436
- for item in chunk:
437
- recovered = self._execute_in_batches(items=[item], api_call=api_call)
438
- results.extend(recovered)
439
- if self.console:
440
- self.console.print(
441
- f" [bold green]RECOVERED:[/] Successfully created {len(chunk)} transformations one by one."
442
- )
432
+ if "Failed to bind session using nonce" in e.message and len(chunk) > 1:
433
+ results.extend(self._execute_one_by_one(chunk, api_call))
443
434
  else:
444
435
  raise
445
436
  else:
446
437
  results.extend(chunk_results)
447
438
  return results
448
439
 
440
+ def _execute_one_by_one(
441
+ self,
442
+ chunk: Sequence[TransformationWrite],
443
+ api_call: Callable[[Sequence[TransformationWrite]], TransformationList],
444
+ ) -> TransformationList:
445
+ MediumSeverityWarning(
446
+ f"Failed to create {len(chunk)} transformations in a batch due to nonce binding error. "
447
+ "Trying to recover by creating them one by one."
448
+ ).print_warning(console=self.client.console)
449
+ # Retry one by one
450
+ failed_ids: list[str] = []
451
+ success_count = 0
452
+ delay = 0.3
453
+ self._sleep_with_jitter(delay, delay + 0.3)
454
+ results = TransformationList([])
455
+ for item in chunk:
456
+ try:
457
+ recovered = api_call([item])
458
+ except CogniteAPIError as e:
459
+ if "Failed to bind session using nonce" in e.message:
460
+ failed_ids.append(item.external_id or "<missing>")
461
+ self._sleep_with_jitter(delay, delay + 0.3)
462
+ else:
463
+ raise
464
+ else:
465
+ results.extend(recovered)
466
+ success_count += 1
467
+ message = f" [bold]RECOVERY COMPLETE:[/] Successfully created {success_count:,} transformations"
468
+ if failed_ids:
469
+ message += f", failed to create {len(failed_ids):,} transformations: {humanize_collection(failed_ids)}"
470
+ else:
471
+ message += "."
472
+ if failed_ids:
473
+ HighSeverityWarning(message).print_warning(include_timestamp=True, console=self.client.console)
474
+ else:
475
+ self.client.console.print(message)
476
+ return results
477
+
478
+ @staticmethod
479
+ def _sleep_with_jitter(base_delay: float, max_delay: float) -> None:
480
+ """Sleeps for a random duration between base_delay and max_delay (inclusive)."""
481
+ sleep_time = random.uniform(base_delay, max_delay)
482
+ time.sleep(sleep_time)
483
+
449
484
  def _update_nonce(self, items: Sequence[TransformationWrite]) -> None:
450
485
  for item in items:
451
486
  if not item.external_id:
@@ -1,4 +1,5 @@
1
- from typing import Literal
1
+ from abc import ABC
2
+ from typing import Annotated, Literal
2
3
 
3
4
  from cognite.client.data_classes import WorkflowVersionUpsert
4
5
  from pydantic import Field, JsonValue
@@ -6,18 +7,141 @@ from pydantic import Field, JsonValue
6
7
  from .base import BaseModelResource, ToolkitResource
7
8
 
8
9
 
10
+ class WorkflowVersionId(BaseModelResource):
11
+ workflow_external_id: str = Field(
12
+ max_length=255,
13
+ description="Identifier for a workflow. Must be unique for the project. No trailing or leading whitespace and no null characters allowed.",
14
+ )
15
+ version: str = Field(
16
+ max_length=255,
17
+ description="Identifier for a version. Must be unique for the workflow. No trailing or leading whitespace and no null characters allowed.",
18
+ )
19
+
20
+
21
+ class CogniteFunctionRef(BaseModelResource):
22
+ external_id: str = Field(
23
+ description="The external id of the Cognite Function in the project. This can be either a function external ID or a reference like ${myTaskExternalId.output.someKey}"
24
+ )
25
+ data: str | JsonValue | None = Field(
26
+ None, description="Input data that will be passed to the Cognite Function. Limited to 100KB in size."
27
+ )
28
+
29
+
30
+ class FunctionTaskParameters(BaseModelResource):
31
+ function: CogniteFunctionRef
32
+ is_async_complete: bool = Field(
33
+ False, description="Defines if the execution of the task should be completed asynchronously."
34
+ )
35
+
36
+
37
+ class TransformationRef(BaseModelResource):
38
+ external_id: str = Field(
39
+ description="The external id of the Transformation in the project. This can be either a transformation external ID or a reference like ${myTaskExternalId.output.someKey}"
40
+ )
41
+ concurrency_policy: Literal["fail", "waitForCurrent", "restartAfterCurrent"] = Field(
42
+ "fail",
43
+ description="""Determines the behavior of the task if the Transformation is already running.
44
+
45
+ fail: The task fails if another instance of the Transformation is currently running.
46
+ waitForCurrent: The task will pause and wait for the already running Transformation to complete. Once completed, the task is completed. This mode is useful for preventing redundant Transformation runs.
47
+ restartAfterCurrent: The task waits for the ongoing Transformation to finish. After completion, the task restarts the Transformation. This mode ensures that the most recent data can be used by following tasks.""",
48
+ )
49
+ use_transformation_credentials: bool = Field(
50
+ False,
51
+ description="If set to true, the transformation will run using the client credentials configured on the transformation. If set to false, the transformation will run using the client credentials used to trigger the workflow.",
52
+ )
53
+
54
+
55
+ class TransformationTaskParameters(BaseModelResource):
56
+ transformation: TransformationRef
57
+
58
+
59
+ class CDFRequest(BaseModelResource):
60
+ resource_path: str = Field(
61
+ description="The path of the request. The path should be prefixed by {cluster}.cognitedata.com/api/v1/project/{project} based on the relevant cluster and project. It can also contain references like ${myTaskExternalId.output.someKey}"
62
+ )
63
+ method: Literal["POST", "GET", "PUT"] | str = Field(
64
+ description="The HTTP method of the request. It can also be a reference like ${myTaskExternalId.output.someKey}"
65
+ )
66
+ query_parameters: dict[str, JsonValue] | str | None = Field(
67
+ None,
68
+ description="The query parameters of the request. It can also be a reference like ${myTaskExternalId.output.someKey}",
69
+ )
70
+ body: JsonValue | str | None = Field(
71
+ None, description="The body of the request. It can also be a reference like ${myTaskExternalId.output.someKey}"
72
+ )
73
+ request_timeout_in_millis: float | str | None = Field(
74
+ None,
75
+ description="The timeout for the request in milliseconds. It can also be a reference like ${myTaskExternalId.output.someKey}",
76
+ )
77
+ cdf_version_header: Literal["alpha", "beta"] | str | None = Field(
78
+ None, description="The Cognite Data Fusion version header to use for the request."
79
+ )
80
+
81
+
82
+ class CDFTaskParameters(BaseModelResource):
83
+ cdf_request: CDFRequest
84
+
85
+
86
+ class DynamicRef(BaseModelResource):
87
+ tasks: str = Field(
88
+ description="A Reference is an expression that allows dynamically injecting input to a task during execution. References can be used to reference the input of the Workflow, the output of a previous task in the Workflow, or the input of a previous task in the Workflow. Note that the injected value must be valid in the context of the property it is injected into. Example Task reference: ${myTaskExternalId.output.someKey} Example Workflow input reference: ${workflow.input.myKey}"
89
+ )
90
+
91
+
92
+ class DynamicTaskParameters(BaseModelResource):
93
+ dynamic: DynamicRef = Field(description="Reference to another task to use as the definition for this task.")
94
+
95
+
96
+ class SubworkflowRef(BaseModelResource):
97
+ tasks: "WorkflowVersionId | list[Task]" = Field(
98
+ description="Reference to the subworkflow to execute. This can be either a reference to an existing workflow version or an inline definition of tasks."
99
+ )
100
+
101
+
102
+ class SubworkflowTaskParameters(BaseModelResource):
103
+ subworkflow: SubworkflowRef = Field(description="Reference to the subworkflow to execute.")
104
+
105
+
106
+ class SimulatorInputUnit(BaseModelResource):
107
+ name: str = Field(description="Name of the unit.")
108
+
109
+
110
+ class SimulatorInput(BaseModelResource):
111
+ reference_id: str = Field(description="Reference id of the value to override.")
112
+ value: str | int | float | list[str] | list[int] | list[float] = Field(
113
+ description="Override the value used for a simulation run."
114
+ )
115
+ unit: SimulatorInputUnit | None = Field(None, description="Override the unit of the value")
116
+
117
+
118
+ class SimulationRef(BaseModelResource):
119
+ routine_external_id: str = Field(description="The external id of the routine to be executed.")
120
+ run_time: int | None = Field(
121
+ None,
122
+ description="Run time in milliseconds. Reference timestamp used for data pre-processing and data sampling.",
123
+ )
124
+ inputs: list[SimulatorInput] | None = Field(
125
+ None, description="List of inputs to be provided to the simulation.", max_length=200
126
+ )
127
+
128
+
129
+ class SimulationTaskParameters(BaseModelResource):
130
+ simulation: SimulationRef = Field(description="Reference to the simulation to execute.")
131
+
132
+
9
133
  class TaskId(BaseModelResource):
10
134
  external_id: str = Field(
11
135
  max_length=255, description="The external ID provided by the client. Must be unique for the resource type."
12
136
  )
13
137
 
14
138
 
15
- class TaskDefinition(BaseModelResource):
139
+ class TaskDefinition(BaseModelResource, ABC):
16
140
  external_id: str = Field(
17
141
  max_length=255,
18
142
  description="Identifier for the task. Must be unique within the version. No trailing or leading whitespace and no null characters allowed.",
19
143
  )
20
- type: Literal["function", "transformation", "cdf", "dynamic", "subworkflow", "simulation"]
144
+ type: str
21
145
  name: str | None = Field(
22
146
  default=None,
23
147
  max_length=255,
@@ -28,7 +152,6 @@ class TaskDefinition(BaseModelResource):
28
152
  max_length=500,
29
153
  description="Description of the intention of the task",
30
154
  )
31
- parameters: JsonValue = Field()
32
155
  retries: int = Field(
33
156
  3,
34
157
  ge=0,
@@ -52,13 +175,49 @@ class TaskDefinition(BaseModelResource):
52
175
  )
53
176
 
54
177
 
178
+ class FunctionTask(TaskDefinition):
179
+ type: Literal["function"] = "function"
180
+ parameters: FunctionTaskParameters
181
+
182
+
183
+ class TransformationTask(TaskDefinition):
184
+ type: Literal["transformation"] = "transformation"
185
+ parameters: TransformationTaskParameters
186
+
187
+
188
+ class CDFTask(TaskDefinition):
189
+ type: Literal["cdfRequest"] = "cdfRequest"
190
+ parameters: CDFTaskParameters
191
+
192
+
193
+ class DynamicTask(TaskDefinition):
194
+ type: Literal["dynamic"] = "dynamic"
195
+ parameters: DynamicTaskParameters
196
+
197
+
198
+ class SubworkflowTask(TaskDefinition):
199
+ type: Literal["subworkflow"] = "subworkflow"
200
+ parameters: SubworkflowTaskParameters
201
+
202
+
203
+ class SimulationTask(TaskDefinition):
204
+ type: Literal["simulation"] = "simulation"
205
+ parameters: SimulationTaskParameters
206
+
207
+
208
+ Task = Annotated[
209
+ FunctionTask | TransformationTask | CDFTask | DynamicTask | SubworkflowTask | SimulationTask,
210
+ Field(discriminator="type"),
211
+ ]
212
+
213
+
55
214
  class WorkflowDefinition(BaseModelResource):
56
215
  description: str | None = Field(
57
216
  default=None,
58
217
  max_length=500,
59
218
  description="The description of the workflow version.",
60
219
  )
61
- tasks: list[TaskDefinition]
220
+ tasks: list[Task]
62
221
 
63
222
 
64
223
  class WorkflowVersionYAML(ToolkitResource):