gooddata-pipelines 1.50.1.dev1__py3-none-any.whl → 1.51.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gooddata-pipelines might be problematic. Click here for more details.

@@ -6,11 +6,22 @@ from ._version import __version__
6
6
  from .backup_and_restore.backup_manager import BackupManager
7
7
  from .backup_and_restore.models.storage import (
8
8
  BackupRestoreConfig,
9
+ LocalStorageConfig,
10
+ S3StorageConfig,
9
11
  StorageType,
10
12
  )
11
13
  from .backup_and_restore.storage.local_storage import LocalStorage
12
14
  from .backup_and_restore.storage.s3_storage import S3Storage
13
15
 
16
+ # -------- LDM Extension --------
17
+ from .ldm_extension.ldm_extension_manager import LdmExtensionManager
18
+ from .ldm_extension.models.custom_data_object import (
19
+ ColumnDataType,
20
+ CustomDatasetDefinition,
21
+ CustomFieldDefinition,
22
+ CustomFieldType,
23
+ )
24
+
14
25
  # -------- Provisioning --------
15
26
  from .provisioning.entities.user_data_filters.models.udf_models import (
16
27
  UserDataFilterFullLoad,
@@ -51,6 +62,8 @@ __all__ = [
51
62
  "UserIncrementalLoad",
52
63
  "UserGroupIncrementalLoad",
53
64
  "PermissionFullLoad",
65
+ "LocalStorageConfig",
66
+ "S3StorageConfig",
54
67
  "PermissionIncrementalLoad",
55
68
  "UserFullLoad",
56
69
  "UserGroupFullLoad",
@@ -61,5 +74,10 @@ __all__ = [
61
74
  "UserDataFilterProvisioner",
62
75
  "UserDataFilterFullLoad",
63
76
  "EntityType",
77
+ "LdmExtensionManager",
78
+ "CustomDatasetDefinition",
79
+ "CustomFieldDefinition",
80
+ "ColumnDataType",
81
+ "CustomFieldType",
64
82
  "__version__",
65
83
  ]
@@ -174,6 +174,49 @@ class ApiMethods:
174
174
  )
175
175
  return self._get(endpoint)
176
176
 
177
+ def get_all_metrics(self, workspace_id: str) -> requests.Response:
178
+ """Get all metrics from the specified workspace.
179
+
180
+ Args:
181
+ workspace_id (str): The ID of the workspace to retrieve metrics from.
182
+ Returns:
183
+ requests.Response: The response containing the metrics.
184
+ """
185
+ endpoint = f"/entities/workspaces/{workspace_id}/metrics"
186
+ headers = {**self.headers, "X-GDC-VALIDATE-RELATIONS": "true"}
187
+ return self._get(endpoint, headers=headers)
188
+
189
+ def get_all_visualization_objects(
190
+ self, workspace_id: str
191
+ ) -> requests.Response:
192
+ """Get all visualizations from the specified workspace.
193
+
194
+ Args:
195
+ workspace_id (str): The ID of the workspace to retrieve visualizations from.
196
+ Returns:
197
+ requests.Response: The response containing the visualizations.
198
+ """
199
+ endpoint = f"/entities/workspaces/{workspace_id}/visualizationObjects"
200
+ headers = {**self.headers, "X-GDC-VALIDATE-RELATIONS": "true"}
201
+ return self._get(endpoint, headers=headers)
202
+
203
+ def get_all_dashboards(self, workspace_id: str) -> requests.Response:
204
+ """Get all dashboards from the specified workspace.
205
+
206
+ Args:
207
+ workspace_id (str): The ID of the workspace to retrieve dashboards from.
208
+ Returns:
209
+ requests.Response: The response containing the dashboards.
210
+ """
211
+ endpoint = f"/entities/workspaces/{workspace_id}/analyticalDashboards"
212
+ headers = {**self.headers, "X-GDC-VALIDATE-RELATIONS": "true"}
213
+ return self._get(endpoint, headers=headers)
214
+
215
+ def get_profile(self) -> requests.Response:
216
+ """Returns organization and current user information."""
217
+ endpoint = "/profile"
218
+ return self._get(endpoint)
219
+
177
220
  def _get(
178
221
  self, endpoint: str, headers: dict[str, str] | None = None
179
222
  ) -> requests.Response:
@@ -253,3 +296,15 @@ class ApiMethods:
253
296
  url = self._get_url(endpoint)
254
297
 
255
298
  return requests.delete(url, headers=self.headers, timeout=TIMEOUT)
299
+
300
+ @staticmethod
301
+ def raise_if_response_not_ok(*responses: requests.Response) -> None:
302
+ """Check if responses from API calls are OK.
303
+
304
+ Raises ValueError if any response is not OK (status code not 2xx).
305
+ """
306
+ for response in responses:
307
+ if not response.ok:
308
+ raise ValueError(
309
+ f"Request to {response.url} failed with status code {response.status_code}: {response.text}"
310
+ )
@@ -0,0 +1 @@
1
+ # (C) 2025 GoodData Corporation
@@ -0,0 +1,286 @@
1
+ # (C) 2025 GoodData Corporation
2
+ """Module for processing validated custom datasets and fields data.
3
+
4
+ This module is responsible for converting validated custom datasets and fields
5
+ into objects defined in the GoodData Python SDK.
6
+ """
7
+
8
+ from gooddata_sdk.catalog.identifier import (
9
+ CatalogDatasetWorkspaceDataFilterIdentifier,
10
+ CatalogGrainIdentifier,
11
+ CatalogReferenceIdentifier,
12
+ )
13
+ from gooddata_sdk.catalog.workspace.declarative_model.workspace.logical_model.data_filter_references import (
14
+ CatalogDeclarativeWorkspaceDataFilterReferences,
15
+ )
16
+ from gooddata_sdk.catalog.workspace.declarative_model.workspace.logical_model.dataset.dataset import (
17
+ CatalogDataSourceTableIdentifier,
18
+ CatalogDeclarativeAttribute,
19
+ CatalogDeclarativeDataset,
20
+ CatalogDeclarativeDatasetSql,
21
+ CatalogDeclarativeFact,
22
+ CatalogDeclarativeReference,
23
+ CatalogDeclarativeReferenceSource,
24
+ CatalogDeclarativeWorkspaceDataFilterColumn,
25
+ )
26
+ from gooddata_sdk.catalog.workspace.declarative_model.workspace.logical_model.date_dataset.date_dataset import (
27
+ CatalogDeclarativeDateDataset,
28
+ CatalogGranularitiesFormatting,
29
+ )
30
+ from gooddata_sdk.catalog.workspace.declarative_model.workspace.logical_model.ldm import (
31
+ CatalogDeclarativeLdm,
32
+ CatalogDeclarativeModel,
33
+ )
34
+
35
+ from gooddata_pipelines.ldm_extension.models.aliases import DatasetId
36
+ from gooddata_pipelines.ldm_extension.models.custom_data_object import (
37
+ ColumnDataType,
38
+ CustomDataset,
39
+ CustomFieldDefinition,
40
+ CustomFieldType,
41
+ )
42
+
43
+
44
+ class LdmExtensionDataProcessor:
45
+ """Create GoodData LDM from validated custom datasets and fields."""
46
+
47
+ DATE_GRANULARITIES: list[str] = [
48
+ "MINUTE",
49
+ "HOUR",
50
+ "DAY",
51
+ "WEEK",
52
+ "MONTH",
53
+ "QUARTER",
54
+ "YEAR",
55
+ "MINUTE_OF_HOUR",
56
+ "HOUR_OF_DAY",
57
+ "DAY_OF_WEEK",
58
+ "DAY_OF_MONTH",
59
+ "DAY_OF_YEAR",
60
+ "WEEK_OF_YEAR",
61
+ "MONTH_OF_YEAR",
62
+ "QUARTER_OF_YEAR",
63
+ ]
64
+
65
+ @staticmethod
66
+ def _attribute_from_field(
67
+ dataset_name: str,
68
+ custom_field: CustomFieldDefinition,
69
+ ) -> CatalogDeclarativeAttribute:
70
+ """Assign a declarative attribute from a custom field definition."""
71
+ return CatalogDeclarativeAttribute(
72
+ id=custom_field.custom_field_id,
73
+ title=custom_field.custom_field_name,
74
+ source_column=custom_field.custom_field_source_column,
75
+ labels=[],
76
+ source_column_data_type=custom_field.custom_field_source_column_data_type.value,
77
+ tags=[dataset_name],
78
+ )
79
+
80
+ @staticmethod
81
+ def _fact_from_field(
82
+ dataset_name: str,
83
+ custom_field: CustomFieldDefinition,
84
+ ) -> CatalogDeclarativeFact:
85
+ """Assign a declarative fact from a custom field definition."""
86
+ return CatalogDeclarativeFact(
87
+ id=custom_field.custom_field_id,
88
+ title=custom_field.custom_field_name,
89
+ source_column=custom_field.custom_field_source_column,
90
+ source_column_data_type=custom_field.custom_field_source_column_data_type.value,
91
+ tags=[dataset_name],
92
+ )
93
+
94
+ def _date_from_field(
95
+ self,
96
+ dataset_name: str,
97
+ custom_field: CustomFieldDefinition,
98
+ ) -> CatalogDeclarativeDateDataset:
99
+ """Assign a declarative date dataset from a custom field definition."""
100
+
101
+ return CatalogDeclarativeDateDataset(
102
+ id=custom_field.custom_field_id,
103
+ title=custom_field.custom_field_name,
104
+ granularities_formatting=CatalogGranularitiesFormatting(
105
+ title_base="",
106
+ title_pattern="%titleBase - %granularityTitle",
107
+ ),
108
+ granularities=self.DATE_GRANULARITIES,
109
+ tags=[dataset_name],
110
+ )
111
+
112
+ @staticmethod
113
+ def _date_ref_from_field(
114
+ custom_field: CustomFieldDefinition,
115
+ ) -> CatalogDeclarativeReference:
116
+ """Create a date reference from a custom field definition."""
117
+ return CatalogDeclarativeReference(
118
+ identifier=CatalogReferenceIdentifier(
119
+ id=custom_field.custom_field_id
120
+ ),
121
+ multivalue=False,
122
+ sources=[
123
+ CatalogDeclarativeReferenceSource(
124
+ column=custom_field.custom_field_source_column,
125
+ target=CatalogGrainIdentifier(
126
+ id=custom_field.custom_field_id,
127
+ type=CustomFieldType.DATE.value,
128
+ ),
129
+ data_type=custom_field.custom_field_source_column_data_type.value,
130
+ )
131
+ ],
132
+ )
133
+
134
+ @staticmethod
135
+ def _get_sources(
136
+ dataset: CustomDataset,
137
+ ) -> tuple[
138
+ CatalogDataSourceTableIdentifier | None,
139
+ CatalogDeclarativeDatasetSql | None,
140
+ ]:
141
+ """Get the data source table and SQL from the dataset definition."""
142
+ # We will have either a table id or a sql statement. Let's store
143
+ # whatever data is available to variables and pass it to the
144
+ # dataset. Both can be object instances or None, but at least one
145
+ # should be valid as per prior validation.
146
+ dataset_source_table_id = (
147
+ CatalogDataSourceTableIdentifier(
148
+ id=dataset.definition.dataset_source_table,
149
+ data_source_id=dataset.definition.dataset_datasource_id,
150
+ path=[dataset.definition.dataset_source_table],
151
+ )
152
+ if dataset.definition.dataset_source_table
153
+ else None
154
+ )
155
+
156
+ dataset_sql = (
157
+ CatalogDeclarativeDatasetSql(
158
+ statement=dataset.definition.dataset_source_sql,
159
+ data_source_id=dataset.definition.dataset_datasource_id,
160
+ )
161
+ if dataset.definition.dataset_source_sql
162
+ else None
163
+ )
164
+ return dataset_source_table_id, dataset_sql
165
+
166
+ def datasets_to_ldm(
167
+ self, datasets: dict[DatasetId, CustomDataset]
168
+ ) -> CatalogDeclarativeModel:
169
+ """Convert validated datasets to GoodData declarative model.
170
+
171
+ Args:
172
+ datasets (dict[DatasetId, CustomDataset]): Dictionary of validated
173
+ datasets.
174
+ Returns:
175
+ CatalogDeclarativeModel: GoodData declarative model representation
176
+ of the datasets.
177
+ """
178
+
179
+ declarative_datasets: list[CatalogDeclarativeDataset] = []
180
+
181
+ # Date dimensions are not stored in a dataset, but as a separate datasets
182
+ # in `date_instances` object on the LDM
183
+ date_instances: list[CatalogDeclarativeDateDataset] = []
184
+
185
+ for dataset in datasets.values():
186
+ date_references: list[CatalogDeclarativeReference] = []
187
+ attributes: list[CatalogDeclarativeAttribute] = []
188
+ facts: list[CatalogDeclarativeFact] = []
189
+
190
+ # Iterate through the custom fields and create the appropriate objects
191
+ for custom_field in dataset.custom_fields:
192
+ if custom_field.custom_field_type == CustomFieldType.ATTRIBUTE:
193
+ attributes.append(
194
+ self._attribute_from_field(
195
+ dataset.definition.dataset_name, custom_field
196
+ )
197
+ )
198
+
199
+ elif custom_field.custom_field_type == CustomFieldType.FACT:
200
+ facts.append(
201
+ self._fact_from_field(
202
+ dataset.definition.dataset_name, custom_field
203
+ )
204
+ )
205
+
206
+ # Process date dimensions and store them to date_instances. Date
207
+ # dimensions are not stored in a dataset, but as a separate dataset.
208
+ # However, they need to be referenced in the dataset references to
209
+ # create the connection between the dataset and the date dimension
210
+ # in the GoodData Logical Data Model.
211
+ elif custom_field.custom_field_type == CustomFieldType.DATE:
212
+ # Add the date dimension to the date_instances
213
+ date_instances.append(
214
+ self._date_from_field(
215
+ dataset.definition.dataset_name, custom_field
216
+ )
217
+ )
218
+
219
+ # Create a reference so that the date dimension is connected
220
+ # to the dataset in the GoodData Logical Data Model.
221
+ date_references.append(
222
+ self._date_ref_from_field(custom_field)
223
+ )
224
+
225
+ else:
226
+ raise ValueError(
227
+ f"Unsupported custom field type: {custom_field.custom_field_type}"
228
+ )
229
+
230
+ # Get the data source info
231
+ dataset_source_table_id, dataset_sql = self._get_sources(dataset)
232
+
233
+ # Construct the declarative dataset object and append it to the list.
234
+ declarative_datasets.append(
235
+ CatalogDeclarativeDataset(
236
+ id=dataset.definition.dataset_id,
237
+ title=dataset.definition.dataset_name,
238
+ grain=[],
239
+ references=[
240
+ CatalogDeclarativeReference(
241
+ identifier=CatalogReferenceIdentifier(
242
+ id=dataset.definition.parent_dataset_reference,
243
+ ),
244
+ multivalue=True,
245
+ sources=[
246
+ CatalogDeclarativeReferenceSource(
247
+ column=dataset.definition.dataset_reference_source_column,
248
+ data_type=dataset.definition.dataset_reference_source_column_data_type.value,
249
+ target=CatalogGrainIdentifier(
250
+ id=dataset.definition.parent_dataset_reference_attribute_id,
251
+ type=CustomFieldType.ATTRIBUTE.value,
252
+ ),
253
+ )
254
+ ],
255
+ ),
256
+ ]
257
+ + date_references,
258
+ description=None,
259
+ attributes=attributes,
260
+ facts=facts,
261
+ data_source_table_id=dataset_source_table_id,
262
+ sql=dataset_sql,
263
+ workspace_data_filter_columns=[
264
+ CatalogDeclarativeWorkspaceDataFilterColumn(
265
+ name=dataset.definition.workspace_data_filter_column_name,
266
+ data_type=ColumnDataType.STRING.value,
267
+ )
268
+ ],
269
+ workspace_data_filter_references=[
270
+ CatalogDeclarativeWorkspaceDataFilterReferences(
271
+ filter_id=CatalogDatasetWorkspaceDataFilterIdentifier(
272
+ id=dataset.definition.workspace_data_filter_id
273
+ ),
274
+ filter_column=dataset.definition.workspace_data_filter_column_name,
275
+ filter_column_data_type=ColumnDataType.STRING.value,
276
+ )
277
+ ],
278
+ tags=[dataset.definition.dataset_name],
279
+ )
280
+ )
281
+
282
+ # Create the Logical Data Model from the datasets and the date instances.
283
+ ldm = CatalogDeclarativeLdm(
284
+ datasets=declarative_datasets, date_instances=date_instances
285
+ )
286
+ return CatalogDeclarativeModel(ldm=ldm)
@@ -0,0 +1,185 @@
1
+ # (C) 2025 GoodData Corporation
2
+ """Module for validating custom fields input data.
3
+
4
+ This module is responsible for validating custom fields input data checking for
5
+ row level and aggregated constraints.
6
+ """
7
+
8
+ from collections import Counter
9
+ from typing import Any, TypeVar
10
+
11
+ from pydantic import BaseModel
12
+
13
+ from gooddata_pipelines.ldm_extension.models.aliases import (
14
+ DatasetId,
15
+ WorkspaceId,
16
+ )
17
+ from gooddata_pipelines.ldm_extension.models.custom_data_object import (
18
+ CustomDataset,
19
+ CustomDatasetDefinition,
20
+ CustomFieldDefinition,
21
+ CustomFieldType,
22
+ )
23
+
24
+
25
+ class LdmExtensionDataValidator:
26
+ ModelT = TypeVar("ModelT", bound=BaseModel)
27
+
28
+ def validate(
29
+ self,
30
+ dataset_definitions: list[CustomDatasetDefinition],
31
+ field_definitions: list[CustomFieldDefinition],
32
+ ) -> dict[WorkspaceId, dict[DatasetId, CustomDataset]]:
33
+ """Validate dataset and field definitions.
34
+
35
+ Validates the dataset definitions and field definitions by using Pydantic
36
+ models to check row level constraints, then aggregates the definitions
37
+ per workspace, while checking for integrity on aggregated level, i.e.,
38
+ uniqueness of combinations of identifieres on workspace level.
39
+
40
+ Args:
41
+ raw_dataset_definitions (list[dict[str, str]]): List of raw dataset definitions to validate.
42
+ raw_field_definitions (list[dict[str, str]]): List of raw field definitions to validate.
43
+ Returns:
44
+ dict[WorkspaceId, dict[DatasetId, CustomDataset]]:
45
+ Dictionary of validated dataset definitions per workspace,
46
+ where each dataset contains its custom fields:
47
+ ```python
48
+ {
49
+ "workspace_id_1": {
50
+ "dataset_id_1": CustomDataset(...),
51
+ "dataset_id_2": CustomDataset(...),
52
+ },
53
+ ...
54
+ }
55
+ ```
56
+ """
57
+
58
+ # First, validate the dataset definitions and aggregate them per workspace.
59
+ validated_data = self._validate_dataset_definitions(dataset_definitions)
60
+
61
+ # Then validate the field definitions and connect them to the datasets
62
+ validated_data = self._validate_field_definitions(
63
+ validated_data, field_definitions
64
+ )
65
+
66
+ return validated_data
67
+
68
+ def _validate_dataset_definitions(
69
+ self,
70
+ dataset_definitions: list[CustomDatasetDefinition],
71
+ ) -> dict[WorkspaceId, dict[DatasetId, CustomDataset]]:
72
+ self._check_dataset_combinations(dataset_definitions)
73
+
74
+ validated_definitions: dict[
75
+ WorkspaceId, dict[DatasetId, CustomDataset]
76
+ ] = {}
77
+ for definition in dataset_definitions:
78
+ validated_definitions.setdefault(definition.workspace_id, {})[
79
+ definition.dataset_id
80
+ ] = CustomDataset(definition=definition, custom_fields=[])
81
+
82
+ return validated_definitions
83
+
84
+ def _check_dataset_combinations(
85
+ self, dataset_definitions: list[CustomDatasetDefinition]
86
+ ) -> None:
87
+ """Check integrity of provided dataset definitions.
88
+
89
+ Validation criteria:
90
+ - workspace_id + dataset_id must be unique across all dataset definitions.
91
+
92
+ Args:
93
+ dataset_definitions (list[CustomDatasetDefinition]): List of dataset definitions to check.
94
+ Raises:
95
+ ValueError: If there are duplicate dataset definitions based on workspace_id and dataset_id.
96
+ """
97
+ workspace_dataset_combinations = [
98
+ (definition.workspace_id, definition.dataset_id)
99
+ for definition in dataset_definitions
100
+ ]
101
+ if len(workspace_dataset_combinations) != len(
102
+ set(workspace_dataset_combinations)
103
+ ):
104
+ duplicates = self._get_duplicates(workspace_dataset_combinations)
105
+ raise ValueError(
106
+ "Duplicate dataset definitions found in the raw dataset "
107
+ + f"definitions (workspace_id, dataset_id): {duplicates}"
108
+ )
109
+
110
+ @staticmethod
111
+ def _get_duplicates(list_to_check: list[Any]) -> list[Any]:
112
+ """Get duplicates from a list.
113
+
114
+ Args:
115
+ list_to_check (list[Any]): List of items to check for duplicates.
116
+ Returns:
117
+ list[Any]: List of duplicate items.
118
+ """
119
+ counts = Counter(list_to_check)
120
+ return [item for item, count in counts.items() if count > 1]
121
+
122
+ def _check_field_combinations(
123
+ self, field_definitions: list[CustomFieldDefinition]
124
+ ) -> None:
125
+ """Check integrity of provided field definitions.
126
+
127
+ Validation criteria (per workspace):
128
+ - unique workspace_id + cf_id combinations (only for attribute and fact custom_field_type)
129
+ - there is no row with the same dataset_id and cf_id (only for date custom_field_type)
130
+
131
+ Args:
132
+ field_definitions (list[CustomFieldDefinition]): List of field definitions to check.
133
+ Raises:
134
+ ValueError: If there are duplicate field definitions based on workspace_id and cf_id.
135
+ """
136
+ workspace_field_combinations: set[tuple[str, str]] = set()
137
+ dataset_field_combinations: set[tuple[str, str]] = set()
138
+
139
+ for field in field_definitions:
140
+ if field.custom_field_type in [
141
+ CustomFieldType.ATTRIBUTE,
142
+ CustomFieldType.FACT,
143
+ ]:
144
+ combination = (field.workspace_id, field.custom_field_id)
145
+ if combination in workspace_field_combinations:
146
+ raise ValueError(
147
+ f"Duplicate custom field found for workspace {field.workspace_id} "
148
+ + f"with field ID {field.custom_field_id}"
149
+ )
150
+ workspace_field_combinations.add(combination)
151
+
152
+ elif field.custom_field_type == CustomFieldType.DATE:
153
+ combination = (field.dataset_id, field.custom_field_id)
154
+ if combination in dataset_field_combinations:
155
+ raise ValueError(
156
+ f"Duplicate custom field found for dataset {field.dataset_id} "
157
+ + f"with field ID {field.custom_field_id}"
158
+ )
159
+ dataset_field_combinations.add(combination)
160
+
161
+ def _validate_field_definitions(
162
+ self,
163
+ validated_definitions: dict[
164
+ WorkspaceId, dict[DatasetId, CustomDataset]
165
+ ],
166
+ field_definitions: list[CustomFieldDefinition],
167
+ ) -> dict[WorkspaceId, dict[DatasetId, CustomDataset]]:
168
+ """Validates custom field definitions amd connects them to the datasets.
169
+
170
+ Args:
171
+ validated_definitions (dict[WorkspaceId, dict[DatasetId, CustomDataset]]):
172
+ Dictionary of validated dataset definitions per workspace.
173
+ raw_field_definitions (list[dict[str, str]]): List of raw field definitions to validate.
174
+ Returns:
175
+ dict[WorkspaceId, dict[DatasetId, CustomDataset]]:
176
+ Updated dictionary of validated dataset definitions with custom fields added.
177
+ """
178
+ self._check_field_combinations(field_definitions)
179
+
180
+ for field_definition in field_definitions:
181
+ validated_definitions[field_definition.workspace_id][
182
+ field_definition.dataset_id
183
+ ].custom_fields.append(field_definition)
184
+
185
+ return validated_definitions
@@ -0,0 +1,283 @@
1
+ # (C) 2025 GoodData Corporation
2
+ """Module orchestrating the custom fields logic."""
3
+
4
+ from pathlib import Path
5
+
6
+ from gooddata_sdk.sdk import GoodDataSdk
7
+ from gooddata_sdk.utils import PROFILES_FILE_PATH, profile_content
8
+
9
+ from gooddata_pipelines.api import GoodDataApi
10
+ from gooddata_pipelines.ldm_extension.input_processor import (
11
+ LdmExtensionDataProcessor,
12
+ )
13
+ from gooddata_pipelines.ldm_extension.input_validator import (
14
+ LdmExtensionDataValidator,
15
+ )
16
+ from gooddata_pipelines.ldm_extension.models.aliases import (
17
+ DatasetId,
18
+ WorkspaceId,
19
+ )
20
+ from gooddata_pipelines.ldm_extension.models.analytical_object import (
21
+ AnalyticalObject,
22
+ AnalyticalObjects,
23
+ )
24
+ from gooddata_pipelines.ldm_extension.models.custom_data_object import (
25
+ CustomDataset,
26
+ CustomDatasetDefinition,
27
+ CustomFieldDefinition,
28
+ )
29
+ from gooddata_pipelines.logger.logger import LogObserver
30
+
31
+
32
+ class LdmExtensionManager:
33
+ """Manager for creating custom datasets and fields in GoodData workspaces."""
34
+
35
+ INDENT = " " * 2
36
+
37
+ @classmethod
38
+ def create(cls, host: str, token: str) -> "LdmExtensionManager":
39
+ return cls(host=host, token=token)
40
+
41
+ @classmethod
42
+ def create_from_profile(
43
+ cls,
44
+ profile: str = "default",
45
+ profiles_path: Path = PROFILES_FILE_PATH,
46
+ ) -> "LdmExtensionManager":
47
+ """Creates a provisioner instance using a GoodData profile file."""
48
+ content = profile_content(profile, profiles_path)
49
+ return cls(host=content["host"], token=content["token"])
50
+
51
+ def __init__(self, host: str, token: str):
52
+ self._validator = LdmExtensionDataValidator()
53
+ self._processor = LdmExtensionDataProcessor()
54
+ self._sdk = GoodDataSdk.create(host_=host, token_=token)
55
+ self._api = GoodDataApi(host=host, token=token)
56
+ self.logger = LogObserver()
57
+
58
+ def _get_objects_with_invalid_relations(
59
+ self, workspace_id: str
60
+ ) -> list[AnalyticalObject]:
61
+ """Check for invalid references in the provided analytical objects.
62
+
63
+ This method checks if the references in the provided analytical objects
64
+ are valid. It returns a set of analytical objects that have invalid references.
65
+
66
+ Args:
67
+ workspace_id (str): The ID of the workspace to check.
68
+
69
+ Returns:
70
+ list[AnalyticalObject]: Set of analytical objects with invalid references.
71
+ """
72
+
73
+ analytical_objects: list[AnalyticalObject] = (
74
+ self._get_analytical_objects(workspace_id=workspace_id)
75
+ )
76
+
77
+ objects_with_invalid_relations = [
78
+ obj
79
+ for obj in analytical_objects
80
+ if not obj.attributes.are_relations_valid
81
+ ]
82
+ return objects_with_invalid_relations
83
+
84
+ def _get_analytical_objects(
85
+ self, workspace_id: str
86
+ ) -> list[AnalyticalObject]:
87
+ """Get analytical objects in the workspace.
88
+
89
+ This method retrieves all analytical objects (metrics, visualizations, dashboards)
90
+ in the specified workspace and returns them as a list.
91
+
92
+ Args:
93
+ workspace_id (str): The ID of the workspace to retrieve objects from.
94
+
95
+ Returns:
96
+ list[AnalyticalObject]: List of analytical objects in the workspace.
97
+ """
98
+ metrics_response = self._api.get_all_metrics(workspace_id)
99
+ visualizations_response = self._api.get_all_visualization_objects(
100
+ workspace_id
101
+ )
102
+ dashboards_response = self._api.get_all_dashboards(workspace_id)
103
+
104
+ self._api.raise_if_response_not_ok(
105
+ metrics_response,
106
+ visualizations_response,
107
+ dashboards_response,
108
+ )
109
+ metrics = AnalyticalObjects(**metrics_response.json())
110
+ visualizations = AnalyticalObjects(**visualizations_response.json())
111
+ dashboards = AnalyticalObjects(**dashboards_response.json())
112
+
113
+ return metrics.data + visualizations.data + dashboards.data
114
+
115
+ @staticmethod
116
+ def _new_ldm_does_not_invalidate_relations(
117
+ current_invalid_relations: list[AnalyticalObject],
118
+ new_invalid_relations: list[AnalyticalObject],
119
+ ) -> bool:
120
+ """Check if the new LDM does not invalidate any new relations.
121
+
122
+ This method compares the lists of analytical objects containing invalid
123
+ relations. It creates sets of object IDs for each list and compares them.
124
+
125
+ If the set of new invalid relations is a subset of the set of current
126
+ invalid relations (that is before the changes to the LDM), the new LDM
127
+ does not invalidate any new relations and `True` is returned.
128
+
129
+ If the set of new invalid relations is not a subset of the current one,
130
+ it means that the new LDM invalidates new relations and `False` is returned.
131
+
132
+ Args:
133
+ current_invalid_relations (list[AnalyticalObject]): The current (before
134
+ changes to LDM) invalid relations.
135
+ new_invalid_relations (list[AnalyticalObject]): The new (after changes to
136
+ LDM) invalid relations.
137
+
138
+ Returns:
139
+ bool: True if the new LDM does not invalidate any relations, False otherwise.
140
+ """
141
+ # Create a set of IDs for each group, then compare those sets
142
+ set_current_invalid_relations = {
143
+ obj.id for obj in current_invalid_relations
144
+ }
145
+ set_new_invalid_relations = {obj.id for obj in new_invalid_relations}
146
+
147
+ # If the set of new invalid relations is a subset of the current one,
148
+ return set_new_invalid_relations.issubset(set_current_invalid_relations)
149
+
150
+ def _process_with_relations_check(
151
+ self,
152
+ validated_data: dict[WorkspaceId, dict[DatasetId, CustomDataset]],
153
+ ) -> None:
154
+ """Check whether relations of analytical objects are valid before and after
155
+ updating the LDM in the GoodData workspace.
156
+ """
157
+ # Iterate through the workspaces.
158
+ for workspace_id, datasets in validated_data.items():
159
+ self.logger.info(f"⚙️ Processing workspace {workspace_id}...")
160
+ # Get current workspace layout
161
+ current_layout = (
162
+ self._sdk.catalog_workspace.get_declarative_workspace(
163
+ workspace_id
164
+ )
165
+ )
166
+ # Get a set of objects with invalid relations from current workspace state
167
+ current_invalid_relations = (
168
+ self._get_objects_with_invalid_relations(
169
+ workspace_id=workspace_id
170
+ )
171
+ )
172
+
173
+ # Put the LDM with custom datasets into the GoodData workspace.
174
+ self._sdk.catalog_workspace_content.put_declarative_ldm(
175
+ workspace_id=workspace_id,
176
+ ldm=self._processor.datasets_to_ldm(datasets),
177
+ )
178
+
179
+ # Get a set of objects with invalid relations from the new workspace state
180
+ new_invalid_relations = self._get_objects_with_invalid_relations(
181
+ workspace_id=workspace_id
182
+ )
183
+
184
+ if self._new_ldm_does_not_invalidate_relations(
185
+ current_invalid_relations, new_invalid_relations
186
+ ):
187
+ self._log_success_message(workspace_id)
188
+ continue
189
+
190
+ self.logger.error(
191
+ f"❌ Difference in invalid relations found in workspace {workspace_id}."
192
+ )
193
+ self._log_diff_invalid_relations(
194
+ current_invalid_relations, new_invalid_relations
195
+ )
196
+
197
+ self.logger.info(
198
+ f"{self.INDENT}⚠️ Reverting the workspace layout to the original state."
199
+ )
200
+ # Put the original workspace layout back to the workspace
201
+ try:
202
+ self._sdk.catalog_workspace.put_declarative_workspace(
203
+ workspace_id=workspace_id, workspace=current_layout
204
+ )
205
+ except Exception as e:
206
+ self.logger.error(
207
+ f"Failed to revert workspace layout in {workspace_id}: {e}"
208
+ )
209
+
210
+ def _log_diff_invalid_relations(
211
+ self,
212
+ current_invalid_relations: list[AnalyticalObject],
213
+ new_invalid_relations: list[AnalyticalObject],
214
+ ) -> None:
215
+ """Logs objects with newly invalid relations.
216
+
217
+ Objects which previously did not have invalid relations, but do so after
218
+ updating the LDM, are logged.
219
+ """
220
+ # TODO: test !
221
+ diff_to_log: list[str] = []
222
+ for obj in new_invalid_relations:
223
+ if obj not in current_invalid_relations:
224
+ diff_to_log.append(
225
+ f"{self.INDENT}∙ {obj.id} ({obj.type}) {obj.attributes.title}"
226
+ )
227
+ joined_diff_to_log = "\n".join(diff_to_log)
228
+ error_message = f"{self.INDENT}Objects with newly invalidated relations:\n{joined_diff_to_log}"
229
+
230
+ self.logger.error(error_message)
231
+
232
+ def _process_without_relations_check(
233
+ self,
234
+ validated_data: dict[WorkspaceId, dict[DatasetId, CustomDataset]],
235
+ ) -> None:
236
+ """Update the LDM in the GoodData workspace without checking relations."""
237
+ for workspace_id, datasets in validated_data.items():
238
+ # Put the LDM with custom datasets into the GoodData workspace.
239
+ self._sdk.catalog_workspace_content.put_declarative_ldm(
240
+ workspace_id=workspace_id,
241
+ ldm=self._processor.datasets_to_ldm(datasets),
242
+ )
243
+ self._log_success_message(workspace_id)
244
+
245
+ def _log_success_message(self, workspace_id: str) -> None:
246
+ """Log a success message after updating the workspace LDM."""
247
+ self.logger.info(f"✅ LDM in {workspace_id} updated successfully.")
248
+
249
+ def process(
250
+ self,
251
+ custom_datasets: list[CustomDatasetDefinition],
252
+ custom_fields: list[CustomFieldDefinition],
253
+ check_relations: bool = True,
254
+ ) -> None:
255
+ """Create custom datasets and fields in GoodData workspaces.
256
+
257
+ Creates custom datasets and fields to extend the Logical Data Model (LDM)
258
+ in GoodData workspaces based on the provided raw data definitions. The raw
259
+ data is validated by Pydantic models (CustomDatasetDefinition and CustomFieldDefinition).
260
+ The defined datasets and fields are then uploaded to GoodData Cloud.
261
+
262
+ Args:
263
+ custom_datasets (list[CustomDatasetDefinition]): List of custom dataset definitions.
264
+ custom_fields (list[CustomFieldDefinition]): List of custom field definitions.
265
+ check_relations (bool): If True, checks for invalid relations in the workspace
266
+ after updating the LDM. If the number of invalid relations increases,
267
+ the LDM will be reverted to its previous state. If False, the check
268
+ is skiped and the LDM is updated directly. Defaults to True.
269
+
270
+ Raises:
271
+ ValueError: If there are validation errors in the dataset or field definitions.
272
+ """
273
+ # Validate raw data and aggregate the custom field and dataset
274
+ # definitions per workspace.
275
+ validated_data: dict[WorkspaceId, dict[DatasetId, CustomDataset]] = (
276
+ self._validator.validate(custom_datasets, custom_fields)
277
+ )
278
+
279
+ if check_relations:
280
+ # Process the validated data with relations check.
281
+ self._process_with_relations_check(validated_data)
282
+ else:
283
+ self._process_without_relations_check(validated_data)
@@ -0,0 +1 @@
1
+ # (C) 2025 GoodData Corporation
@@ -0,0 +1,9 @@
1
+ # (C) 2025 GoodData Corporation
2
+ """This module defines type aliases intended to improve readability."""
3
+
4
+ from typing import TypeAlias
5
+
6
+ WorkspaceId: TypeAlias = str
7
+ DatasetId: TypeAlias = str
8
+
9
+ __all__ = ["WorkspaceId", "DatasetId"]
@@ -0,0 +1,33 @@
1
+ # (C) 2025 GoodData Corporation
2
+ """This module defines the AnalyticalObjects Pydantic model.
3
+
4
+ The model is used to represent features of analytical objects important for
5
+ checking the validity of references.
6
+ """
7
+
8
+ from pydantic import BaseModel, Field
9
+
10
+
11
+ class Attributes(BaseModel):
12
+ title: str
13
+ are_relations_valid: bool = Field(alias="areRelationsValid")
14
+
15
+
16
+ class AnalyticalObject(BaseModel):
17
+ id: str
18
+ type: str
19
+ attributes: Attributes
20
+
21
+
22
+ class AnalyticalObjects(BaseModel):
23
+ """Simplified model representing response obtained from GoodData API when querying
24
+ analytical objects.
25
+
26
+ This model is used to represent analytical objects such as metrics, visualizations,
27
+ and dashboard in a simplified manner, with the purpose of checkinf the validity
28
+ of references of these objects.
29
+
30
+ This is not a complete schema of the analytical objects!
31
+ """
32
+
33
+ data: list[AnalyticalObject]
@@ -0,0 +1,90 @@
1
+ # (C) 2025 GoodData Corporation
2
+ """This module defines enums and models used to represent the input data.
3
+
4
+ Models defined here are used to validate and structure the input data before
5
+ further processing.
6
+ """
7
+
8
+ from enum import Enum
9
+
10
+ from pydantic import BaseModel, model_validator
11
+
12
+
13
+ class CustomFieldType(str, Enum):
14
+ """GoodData field types."""
15
+
16
+ # NOTE: Start using StrEnum with Python 3.11
17
+ ATTRIBUTE = "attribute"
18
+ FACT = "fact"
19
+ DATE = "date"
20
+
21
+
22
+ class ColumnDataType(str, Enum):
23
+ """Supported data types"""
24
+
25
+ # NOTE: Start using StrEnum with Python 3.11
26
+ INT = "INT"
27
+ STRING = "STRING"
28
+ DATE = "DATE"
29
+ NUMERIC = "NUMERIC"
30
+ TIMESTAMP = "TIMESTAMP"
31
+ TIMESTAMP_TZ = "TIMESTAMP_TZ"
32
+ BOOLEAN = "BOOLEAN"
33
+
34
+
35
+ class CustomFieldDefinition(BaseModel):
36
+ """Input model for custom field definition."""
37
+
38
+ workspace_id: str
39
+ dataset_id: str
40
+ custom_field_id: str
41
+ custom_field_name: str
42
+ custom_field_type: CustomFieldType
43
+ custom_field_source_column: str
44
+ custom_field_source_column_data_type: ColumnDataType
45
+
46
+ @model_validator(mode="after")
47
+ def check_ids_not_equal(self) -> "CustomFieldDefinition":
48
+ """Check that custom field ID is not the same as dataset ID."""
49
+ if self.custom_field_id == self.dataset_id:
50
+ raise ValueError(
51
+ f"Custom field ID {self.custom_field_id} cannot be the same as dataset ID {self.dataset_id}"
52
+ )
53
+ return self
54
+
55
+
56
+ class CustomDatasetDefinition(BaseModel):
57
+ """Input model for custom dataset definition."""
58
+
59
+ workspace_id: str
60
+ dataset_id: str
61
+ dataset_name: str
62
+ dataset_datasource_id: str
63
+ dataset_source_table: str | None
64
+ dataset_source_sql: str | None
65
+ parent_dataset_reference: str
66
+ parent_dataset_reference_attribute_id: str
67
+ dataset_reference_source_column: str
68
+ dataset_reference_source_column_data_type: ColumnDataType
69
+ workspace_data_filter_id: str
70
+ workspace_data_filter_column_name: str
71
+
72
+ @model_validator(mode="after")
73
+ def check_source(self) -> "CustomDatasetDefinition":
74
+ """At least one of dataset_source_table or dataset_source_sql is provided."""
75
+ if not (self.dataset_source_table or self.dataset_source_sql):
76
+ raise ValueError(
77
+ "One of dataset_source_table and dataset_source_sql must be provided"
78
+ )
79
+ if self.dataset_source_table and self.dataset_source_sql:
80
+ raise ValueError(
81
+ "Only one of dataset_source_table and dataset_source_sql can be provided"
82
+ )
83
+ return self
84
+
85
+
86
+ class CustomDataset(BaseModel):
87
+ """Custom dataset with its definition and custom fields."""
88
+
89
+ definition: CustomDatasetDefinition
90
+ custom_fields: list[CustomFieldDefinition]
@@ -3,7 +3,16 @@
3
3
  from typing import Any
4
4
 
5
5
  from gooddata_sdk.catalog.user.entity_model.user import CatalogUser
6
- from pydantic import BaseModel
6
+ from pydantic import BaseModel, Field
7
+
8
+
9
+ class UserProfile(BaseModel):
10
+ """Minimal model of api/v1/profile response.
11
+
12
+ Does not contain all fields from the response.
13
+ """
14
+
15
+ user_id: str = Field(alias="userId")
7
16
 
8
17
 
9
18
  class BaseUser(BaseModel):
@@ -11,6 +11,7 @@ from gooddata_sdk.catalog.user.entity_model.user_group import CatalogUserGroup
11
11
  from gooddata_pipelines.provisioning.entities.users.models.users import (
12
12
  UserFullLoad,
13
13
  UserIncrementalLoad,
14
+ UserProfile,
14
15
  )
15
16
  from gooddata_pipelines.provisioning.provisioning import Provisioning
16
17
  from gooddata_pipelines.provisioning.utils.context_objects import UserContext
@@ -30,6 +31,8 @@ class UserProvisioner(Provisioning[UserFullLoad, UserIncrementalLoad]):
30
31
  source_group_incremental: list[UserIncrementalLoad]
31
32
  source_group_full: list[UserFullLoad]
32
33
 
34
+ current_user_id: str
35
+
33
36
  FULL_LOAD_TYPE: type[UserFullLoad] = UserFullLoad
34
37
  INCREMENTAL_LOAD_TYPE: type[UserIncrementalLoad] = UserIncrementalLoad
35
38
 
@@ -37,6 +40,19 @@ class UserProvisioner(Provisioning[UserFullLoad, UserIncrementalLoad]):
37
40
  super().__init__(host, token)
38
41
  self.upstream_user_cache: dict[UserId, UserModel] = {}
39
42
 
43
+ def _get_current_user_id(self) -> str:
44
+ """Gets the current user ID."""
45
+
46
+ profile_response = self._api.get_profile()
47
+
48
+ if not profile_response.ok:
49
+ raise Exception("Failed to get current user profile")
50
+
51
+ profile_json = profile_response.json()
52
+ profile = UserProfile.model_validate(profile_json)
53
+
54
+ return profile.user_id
55
+
40
56
  def _try_get_user(
41
57
  self, user: UserModel, model: type[UserModel]
42
58
  ) -> UserModel | None:
@@ -99,6 +115,14 @@ class UserProvisioner(Provisioning[UserFullLoad, UserIncrementalLoad]):
99
115
  for its existence and create it if needed.
100
116
 
101
117
  """
118
+
119
+ if user.user_id == self.current_user_id:
120
+ self.logger.warning(
121
+ f"Skipping creation/update of current user: {user.user_id}. "
122
+ + "Current user should not be modified.",
123
+ )
124
+ return
125
+
102
126
  user_context = UserContext(
103
127
  user_id=user.user_id,
104
128
  user_groups=user.user_groups,
@@ -118,6 +142,13 @@ class UserProvisioner(Provisioning[UserFullLoad, UserIncrementalLoad]):
118
142
 
119
143
  def _delete_user(self, user_id: str) -> None:
120
144
  """Deletes user from the project."""
145
+ if user_id == self.current_user_id:
146
+ self.logger.warning(
147
+ f"Skipping deletion of current user: {user_id}."
148
+ + " Current user should not be deleted.",
149
+ )
150
+ return
151
+
121
152
  try:
122
153
  self._api._sdk.catalog_user.get_user(user_id)
123
154
  except NotFoundException:
@@ -135,6 +166,9 @@ class UserProvisioner(Provisioning[UserFullLoad, UserIncrementalLoad]):
135
166
 
136
167
  def _provision_incremental_load(self) -> None:
137
168
  """Runs the incremental provisioning logic."""
169
+ # Set the current user ID
170
+ self.current_user_id = self._get_current_user_id()
171
+
138
172
  for user in self.source_group_incremental:
139
173
  # Attempt to process each user. On failure, log the error and continue
140
174
  try:
@@ -146,6 +180,10 @@ class UserProvisioner(Provisioning[UserFullLoad, UserIncrementalLoad]):
146
180
 
147
181
  def _provision_full_load(self) -> None:
148
182
  """Runs the full load provisioning logic."""
183
+
184
+ # Set the current user ID
185
+ self.current_user_id = self._get_current_user_id()
186
+
149
187
  # Get all upstream users
150
188
  catalog_upstream_users: list[CatalogUser] = self._api.list_users()
151
189
 
@@ -50,7 +50,7 @@ class Provisioning(Generic[TFullLoadSourceData, TIncrementalSourceData]):
50
50
  ) -> TProvisioning:
51
51
  """Creates a provisioner instance using a GoodData profile file."""
52
52
  content = profile_content(profile, profiles_path)
53
- return cls(**content)
53
+ return cls(host=content["host"], token=content["token"])
54
54
 
55
55
  @staticmethod
56
56
  def _validate_credentials(host: str, token: str) -> None:
@@ -165,5 +165,4 @@ class Provisioning(Generic[TFullLoadSourceData, TIncrementalSourceData]):
165
165
 
166
166
  self.logger.error(exception_message)
167
167
 
168
- if not self.logger.subscribers:
169
- raise Exception(exception_message)
168
+ raise Exception(exception_message)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: gooddata-pipelines
3
- Version: 1.50.1.dev1
3
+ Version: 1.51.0
4
4
  Summary: GoodData Cloud lifecycle automation pipelines
5
5
  Author-email: GoodData <support@gooddata.com>
6
6
  License: MIT
@@ -8,7 +8,7 @@ License-File: LICENSE.txt
8
8
  Requires-Python: >=3.10
9
9
  Requires-Dist: boto3-stubs<2.0.0,>=1.39.3
10
10
  Requires-Dist: boto3<2.0.0,>=1.39.3
11
- Requires-Dist: gooddata-sdk~=1.50.1.dev1
11
+ Requires-Dist: gooddata-sdk~=1.51.0
12
12
  Requires-Dist: pydantic<3.0.0,>=2.11.3
13
13
  Requires-Dist: requests<3.0.0,>=2.32.3
14
14
  Requires-Dist: types-pyyaml<7.0.0,>=6.0.12.20250326
@@ -74,4 +74,12 @@ full_load_data: list[UserFullLoad] = UserFullLoad.from_list_of_dicts(
74
74
  provisioner.full_load(full_load_data)
75
75
  ```
76
76
 
77
- Ready-made scripts covering the basic use cases can be found here in the [GoodData Productivity Tools](https://github.com/gooddata/gooddata-productivity-tools) repository
77
+ ## Bugs & Requests
78
+
79
+ Please use the [GitHub issue tracker](https://github.com/gooddata/gooddata-python-sdk/issues) to submit bugs
80
+ or request features.
81
+
82
+ ## Changelog
83
+
84
+ See [Github releases](https://github.com/gooddata/gooddata-python-sdk/releases) for released versions
85
+ and a list of changes.
@@ -1,9 +1,9 @@
1
- gooddata_pipelines/__init__.py,sha256=AEKIRuGBPMA_RkL14RF-recw9hS4dGV8cVqgDM3XmrA,1931
1
+ gooddata_pipelines/__init__.py,sha256=UyE19wWfPh2R_5O0KSAS4XLllP3km3iGkDzRQFBd7jQ,2415
2
2
  gooddata_pipelines/_version.py,sha256=Zi8Ht5ofjFeSYGG5USixQtJNB1po6okh0Rez8VyAsFM,200
3
3
  gooddata_pipelines/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  gooddata_pipelines/api/__init__.py,sha256=0WaBI2XMdkkZgnUsQ9kqipNzh2l2zamZvUt_qjp8xCk,106
5
5
  gooddata_pipelines/api/exceptions.py,sha256=rddQXfv8Ktckz7RONKBnKfm53M7dzPCh50Dl1k-8hqs,1545
6
- gooddata_pipelines/api/gooddata_api.py,sha256=ALuxTgu3KOK5S2b0C5HpDyvmT_UNfGeF-eqbvxXhDQM,8667
6
+ gooddata_pipelines/api/gooddata_api.py,sha256=8AZ5-mGGvo_4pPFjaf_DxkzQQqp2dRtiRPTM2sIdfYs,10934
7
7
  gooddata_pipelines/api/gooddata_api_wrapper.py,sha256=t7dFrXJ6X4yXS9XDthOmvd2CyzdnDDNPeIngTEW72YU,1152
8
8
  gooddata_pipelines/api/gooddata_sdk.py,sha256=wd5O4e9BQLWUawt6odrs5a51nqFGthBkvqh9WOiW36Q,13734
9
9
  gooddata_pipelines/api/utils.py,sha256=3QY_aYH17I9THoCINE3l-n5oj52k-gNeT1wv6Z_VxN8,1433
@@ -20,10 +20,18 @@ gooddata_pipelines/backup_and_restore/storage/__init__.py,sha256=-BG28PGDbalLyZG
20
20
  gooddata_pipelines/backup_and_restore/storage/base_storage.py,sha256=67wdItlG3neExeb_eCUDQhswdUB62X5Nyj9sOImB_Hg,487
21
21
  gooddata_pipelines/backup_and_restore/storage/local_storage.py,sha256=NvhPRzRAvuSpc5qCDyPqZaMB0i1jeZOZczaSwjUSGEg,1155
22
22
  gooddata_pipelines/backup_and_restore/storage/s3_storage.py,sha256=ZAysu4sPMAvdWs3RUroHHp2XZLHeU_LhJ5qBHlBQ7n4,3732
23
+ gooddata_pipelines/ldm_extension/__init__.py,sha256=-BG28PGDbalLyZGQjpFG0pjdIvtf25ut0r8ZwZVbi4s,32
24
+ gooddata_pipelines/ldm_extension/input_processor.py,sha256=lNIx6YfU4OJpSLyAitCoPwwf6eFIT6OyivRnqYX5O-o,11678
25
+ gooddata_pipelines/ldm_extension/input_validator.py,sha256=sAl-tixrS69G_lP19U9CjKHiWZinXOcjeAqwiydVctQ,7459
26
+ gooddata_pipelines/ldm_extension/ldm_extension_manager.py,sha256=XHNBMAaiUvIzBib3zz9mYcmGk6YOkIhqrxYfQaV9s9Q,11483
27
+ gooddata_pipelines/ldm_extension/models/__init__.py,sha256=-BG28PGDbalLyZGQjpFG0pjdIvtf25ut0r8ZwZVbi4s,32
28
+ gooddata_pipelines/ldm_extension/models/aliases.py,sha256=vmac3fGhTjGQqclW3Be42kE-ooC3ZBtYS8JqpXmBy_g,231
29
+ gooddata_pipelines/ldm_extension/models/analytical_object.py,sha256=biWgRdczuF-IRz7zQNWrWAWmc-r7_OpSdDJA7klI7ME,913
30
+ gooddata_pipelines/ldm_extension/models/custom_data_object.py,sha256=wH2ZrgjKiuFCDB2BTUntyGbEw-oFuwtaepYKdtSwgHY,2771
23
31
  gooddata_pipelines/logger/__init__.py,sha256=W-fJvMStnsDUY52AYFhx_LnS2cSCFNf3bB47Iew2j04,129
24
32
  gooddata_pipelines/logger/logger.py,sha256=yIMdvqsmOSGQLI4U_tQwxX5E2q_FXUu0Ko7Hv39slFM,3549
25
33
  gooddata_pipelines/provisioning/__init__.py,sha256=RZDEiv8nla4Jwa2TZXUdp1NSxg2_-lLqz4h7k2c4v5Y,854
26
- gooddata_pipelines/provisioning/provisioning.py,sha256=Mibf1-ZwPfHzmoAjgIRuYvtakY7LqerDTF36FgPg990,6175
34
+ gooddata_pipelines/provisioning/provisioning.py,sha256=UUHClT0q6O1XDAgiR2M23eFgtU3uEFBp87-b13-m97I,6166
27
35
  gooddata_pipelines/provisioning/assets/wdf_setting.json,sha256=nxOLGZkEQiMdARcUDER5ygqr3Zu-MQlLlUyXVhPUq64,280
28
36
  gooddata_pipelines/provisioning/entities/__init__.py,sha256=-BG28PGDbalLyZGQjpFG0pjdIvtf25ut0r8ZwZVbi4s,32
29
37
  gooddata_pipelines/provisioning/entities/user_data_filters/__init__.py,sha256=-BG28PGDbalLyZGQjpFG0pjdIvtf25ut0r8ZwZVbi4s,32
@@ -33,11 +41,11 @@ gooddata_pipelines/provisioning/entities/user_data_filters/models/udf_models.py,
33
41
  gooddata_pipelines/provisioning/entities/users/__init__.py,sha256=-BG28PGDbalLyZGQjpFG0pjdIvtf25ut0r8ZwZVbi4s,32
34
42
  gooddata_pipelines/provisioning/entities/users/permissions.py,sha256=2k3oPI7WyABcD2TMmLPsMUDrAjnKM7Vw56kz_RWhcmI,7135
35
43
  gooddata_pipelines/provisioning/entities/users/user_groups.py,sha256=-2Nca01ZMjXmnAGDUuKP5G7mqFyn4MnsgZsnS2oy7vg,8511
36
- gooddata_pipelines/provisioning/entities/users/users.py,sha256=TVfOp3fqQYmzA4K03IBGNYJrqGQAzWH_oay0qsvR8Xo,6633
44
+ gooddata_pipelines/provisioning/entities/users/users.py,sha256=BPTbE0-lvwkgoTVwLUbMqmlq7L597nwRCSK5FaM8F4I,7730
37
45
  gooddata_pipelines/provisioning/entities/users/models/__init__.py,sha256=-BG28PGDbalLyZGQjpFG0pjdIvtf25ut0r8ZwZVbi4s,32
38
46
  gooddata_pipelines/provisioning/entities/users/models/permissions.py,sha256=buyNtDShvAJL4mFZSV-UqK_9JAL_2-AaIlGYCHibhHo,7244
39
47
  gooddata_pipelines/provisioning/entities/users/models/user_groups.py,sha256=Odp4yZoK2vC40jgh7FBKmaIINpwffl62uoaT8Xxr-14,1160
40
- gooddata_pipelines/provisioning/entities/users/models/users.py,sha256=lwb8Q-slBELs_0882KOumkMgKiFKCL3ZABONsoT5Nw0,2234
48
+ gooddata_pipelines/provisioning/entities/users/models/users.py,sha256=hR5on68NEpw3KAPooR3Z1TRUzV5nbp0jrrOLUDW8P24,2424
41
49
  gooddata_pipelines/provisioning/entities/workspaces/__init__.py,sha256=-BG28PGDbalLyZGQjpFG0pjdIvtf25ut0r8ZwZVbi4s,32
42
50
  gooddata_pipelines/provisioning/entities/workspaces/models.py,sha256=-ehte9HLNos3l6yLip4mZU6wBcmY_Yzwq0t0m0fhwPI,2031
43
51
  gooddata_pipelines/provisioning/entities/workspaces/workspace.py,sha256=jngaEKNlMfhjRr4rQ2ECQDoh0gk7KaZTMuTazPLECnM,11505
@@ -50,7 +58,7 @@ gooddata_pipelines/provisioning/utils/exceptions.py,sha256=1WnAOlPhqOf0xRcvn70lx
50
58
  gooddata_pipelines/provisioning/utils/utils.py,sha256=uF3k5hmoM5d6UoWWfPGCQgT_861zcU-ACyaQHHOOncY,2434
51
59
  gooddata_pipelines/utils/__init__.py,sha256=s9TtSjKqo1gSGWOVoGrXaGi1TsbRowjRDYKtjmKy7BY,155
52
60
  gooddata_pipelines/utils/rate_limiter.py,sha256=owbcEZhUxlTnE7rRHiWQ8XBC-vML2fVPbt41EeGEM7o,2002
53
- gooddata_pipelines-1.50.1.dev1.dist-info/METADATA,sha256=YcJNGu4zGfjvfMJlzUzm77G5rm0xubtl2-0MUB40jWI,3522
54
- gooddata_pipelines-1.50.1.dev1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
55
- gooddata_pipelines-1.50.1.dev1.dist-info/licenses/LICENSE.txt,sha256=PNC7WXGIo6OKkNoPLRxlVrw6jaLcjSTUsSxy9Xcu9Jo,560365
56
- gooddata_pipelines-1.50.1.dev1.dist-info/RECORD,,
61
+ gooddata_pipelines-1.51.0.dist-info/METADATA,sha256=250PnN2MssPWL7c1O0lcZR3eyoRw0WB_9tpY1POc6yU,3632
62
+ gooddata_pipelines-1.51.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
63
+ gooddata_pipelines-1.51.0.dist-info/licenses/LICENSE.txt,sha256=PNC7WXGIo6OKkNoPLRxlVrw6jaLcjSTUsSxy9Xcu9Jo,560365
64
+ gooddata_pipelines-1.51.0.dist-info/RECORD,,