gooddata-pipelines 1.50.0__py3-none-any.whl → 1.50.1.dev2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gooddata-pipelines might be problematic. Click here for more details.

@@ -0,0 +1,185 @@
1
+ # (C) 2025 GoodData Corporation
2
+ """Module for validating custom fields input data.
3
+
4
+ This module is responsible for validating custom fields input data checking for
5
+ row level and aggregated constraints.
6
+ """
7
+
8
+ from collections import Counter
9
+ from typing import Any, TypeVar
10
+
11
+ from pydantic import BaseModel
12
+
13
+ from gooddata_pipelines.ldm_extension.models.aliases import (
14
+ DatasetId,
15
+ WorkspaceId,
16
+ )
17
+ from gooddata_pipelines.ldm_extension.models.custom_data_object import (
18
+ CustomDataset,
19
+ CustomDatasetDefinition,
20
+ CustomFieldDefinition,
21
+ CustomFieldType,
22
+ )
23
+
24
+
25
+ class LdmExtensionDataValidator:
26
+ ModelT = TypeVar("ModelT", bound=BaseModel)
27
+
28
+ def validate(
29
+ self,
30
+ dataset_definitions: list[CustomDatasetDefinition],
31
+ field_definitions: list[CustomFieldDefinition],
32
+ ) -> dict[WorkspaceId, dict[DatasetId, CustomDataset]]:
33
+ """Validate dataset and field definitions.
34
+
35
+ Validates the dataset definitions and field definitions by using Pydantic
36
+ models to check row level constraints, then aggregates the definitions
37
+ per workspace, while checking for integrity on aggregated level, i.e.,
38
+ uniqueness of combinations of identifieres on workspace level.
39
+
40
+ Args:
41
+ raw_dataset_definitions (list[dict[str, str]]): List of raw dataset definitions to validate.
42
+ raw_field_definitions (list[dict[str, str]]): List of raw field definitions to validate.
43
+ Returns:
44
+ dict[WorkspaceId, dict[DatasetId, CustomDataset]]:
45
+ Dictionary of validated dataset definitions per workspace,
46
+ where each dataset contains its custom fields:
47
+ ```python
48
+ {
49
+ "workspace_id_1": {
50
+ "dataset_id_1": CustomDataset(...),
51
+ "dataset_id_2": CustomDataset(...),
52
+ },
53
+ ...
54
+ }
55
+ ```
56
+ """
57
+
58
+ # First, validate the dataset definitions and aggregate them per workspace.
59
+ validated_data = self._validate_dataset_definitions(dataset_definitions)
60
+
61
+ # Then validate the field definitions and connect them to the datasets
62
+ validated_data = self._validate_field_definitions(
63
+ validated_data, field_definitions
64
+ )
65
+
66
+ return validated_data
67
+
68
+ def _validate_dataset_definitions(
69
+ self,
70
+ dataset_definitions: list[CustomDatasetDefinition],
71
+ ) -> dict[WorkspaceId, dict[DatasetId, CustomDataset]]:
72
+ self._check_dataset_combinations(dataset_definitions)
73
+
74
+ validated_definitions: dict[
75
+ WorkspaceId, dict[DatasetId, CustomDataset]
76
+ ] = {}
77
+ for definition in dataset_definitions:
78
+ validated_definitions.setdefault(definition.workspace_id, {})[
79
+ definition.dataset_id
80
+ ] = CustomDataset(definition=definition, custom_fields=[])
81
+
82
+ return validated_definitions
83
+
84
+ def _check_dataset_combinations(
85
+ self, dataset_definitions: list[CustomDatasetDefinition]
86
+ ) -> None:
87
+ """Check integrity of provided dataset definitions.
88
+
89
+ Validation criteria:
90
+ - workspace_id + dataset_id must be unique across all dataset definitions.
91
+
92
+ Args:
93
+ dataset_definitions (list[CustomDatasetDefinition]): List of dataset definitions to check.
94
+ Raises:
95
+ ValueError: If there are duplicate dataset definitions based on workspace_id and dataset_id.
96
+ """
97
+ workspace_dataset_combinations = [
98
+ (definition.workspace_id, definition.dataset_id)
99
+ for definition in dataset_definitions
100
+ ]
101
+ if len(workspace_dataset_combinations) != len(
102
+ set(workspace_dataset_combinations)
103
+ ):
104
+ duplicates = self._get_duplicates(workspace_dataset_combinations)
105
+ raise ValueError(
106
+ "Duplicate dataset definitions found in the raw dataset "
107
+ + f"definitions (workspace_id, dataset_id): {duplicates}"
108
+ )
109
+
110
+ @staticmethod
111
+ def _get_duplicates(list_to_check: list[Any]) -> list[Any]:
112
+ """Get duplicates from a list.
113
+
114
+ Args:
115
+ list_to_check (list[Any]): List of items to check for duplicates.
116
+ Returns:
117
+ list[Any]: List of duplicate items.
118
+ """
119
+ counts = Counter(list_to_check)
120
+ return [item for item, count in counts.items() if count > 1]
121
+
122
+ def _check_field_combinations(
123
+ self, field_definitions: list[CustomFieldDefinition]
124
+ ) -> None:
125
+ """Check integrity of provided field definitions.
126
+
127
+ Validation criteria (per workspace):
128
+ - unique workspace_id + cf_id combinations (only for attribute and fact custom_field_type)
129
+ - there is no row with the same dataset_id and cf_id (only for date custom_field_type)
130
+
131
+ Args:
132
+ field_definitions (list[CustomFieldDefinition]): List of field definitions to check.
133
+ Raises:
134
+ ValueError: If there are duplicate field definitions based on workspace_id and cf_id.
135
+ """
136
+ workspace_field_combinations: set[tuple[str, str]] = set()
137
+ dataset_field_combinations: set[tuple[str, str]] = set()
138
+
139
+ for field in field_definitions:
140
+ if field.custom_field_type in [
141
+ CustomFieldType.ATTRIBUTE,
142
+ CustomFieldType.FACT,
143
+ ]:
144
+ combination = (field.workspace_id, field.custom_field_id)
145
+ if combination in workspace_field_combinations:
146
+ raise ValueError(
147
+ f"Duplicate custom field found for workspace {field.workspace_id} "
148
+ + f"with field ID {field.custom_field_id}"
149
+ )
150
+ workspace_field_combinations.add(combination)
151
+
152
+ elif field.custom_field_type == CustomFieldType.DATE:
153
+ combination = (field.dataset_id, field.custom_field_id)
154
+ if combination in dataset_field_combinations:
155
+ raise ValueError(
156
+ f"Duplicate custom field found for dataset {field.dataset_id} "
157
+ + f"with field ID {field.custom_field_id}"
158
+ )
159
+ dataset_field_combinations.add(combination)
160
+
161
+ def _validate_field_definitions(
162
+ self,
163
+ validated_definitions: dict[
164
+ WorkspaceId, dict[DatasetId, CustomDataset]
165
+ ],
166
+ field_definitions: list[CustomFieldDefinition],
167
+ ) -> dict[WorkspaceId, dict[DatasetId, CustomDataset]]:
168
+ """Validates custom field definitions amd connects them to the datasets.
169
+
170
+ Args:
171
+ validated_definitions (dict[WorkspaceId, dict[DatasetId, CustomDataset]]):
172
+ Dictionary of validated dataset definitions per workspace.
173
+ raw_field_definitions (list[dict[str, str]]): List of raw field definitions to validate.
174
+ Returns:
175
+ dict[WorkspaceId, dict[DatasetId, CustomDataset]]:
176
+ Updated dictionary of validated dataset definitions with custom fields added.
177
+ """
178
+ self._check_field_combinations(field_definitions)
179
+
180
+ for field_definition in field_definitions:
181
+ validated_definitions[field_definition.workspace_id][
182
+ field_definition.dataset_id
183
+ ].custom_fields.append(field_definition)
184
+
185
+ return validated_definitions
@@ -0,0 +1,283 @@
1
+ # (C) 2025 GoodData Corporation
2
+ """Module orchestrating the custom fields logic."""
3
+
4
+ from pathlib import Path
5
+
6
+ from gooddata_sdk.sdk import GoodDataSdk
7
+ from gooddata_sdk.utils import PROFILES_FILE_PATH, profile_content
8
+
9
+ from gooddata_pipelines.api import GoodDataApi
10
+ from gooddata_pipelines.ldm_extension.input_processor import (
11
+ LdmExtensionDataProcessor,
12
+ )
13
+ from gooddata_pipelines.ldm_extension.input_validator import (
14
+ LdmExtensionDataValidator,
15
+ )
16
+ from gooddata_pipelines.ldm_extension.models.aliases import (
17
+ DatasetId,
18
+ WorkspaceId,
19
+ )
20
+ from gooddata_pipelines.ldm_extension.models.analytical_object import (
21
+ AnalyticalObject,
22
+ AnalyticalObjects,
23
+ )
24
+ from gooddata_pipelines.ldm_extension.models.custom_data_object import (
25
+ CustomDataset,
26
+ CustomDatasetDefinition,
27
+ CustomFieldDefinition,
28
+ )
29
+ from gooddata_pipelines.logger.logger import LogObserver
30
+
31
+
32
+ class LdmExtensionManager:
33
+ """Manager for creating custom datasets and fields in GoodData workspaces."""
34
+
35
+ INDENT = " " * 2
36
+
37
+ @classmethod
38
+ def create(cls, host: str, token: str) -> "LdmExtensionManager":
39
+ return cls(host=host, token=token)
40
+
41
+ @classmethod
42
+ def create_from_profile(
43
+ cls,
44
+ profile: str = "default",
45
+ profiles_path: Path = PROFILES_FILE_PATH,
46
+ ) -> "LdmExtensionManager":
47
+ """Creates a provisioner instance using a GoodData profile file."""
48
+ content = profile_content(profile, profiles_path)
49
+ return cls(host=content["host"], token=content["token"])
50
+
51
+ def __init__(self, host: str, token: str):
52
+ self._validator = LdmExtensionDataValidator()
53
+ self._processor = LdmExtensionDataProcessor()
54
+ self._sdk = GoodDataSdk.create(host_=host, token_=token)
55
+ self._api = GoodDataApi(host=host, token=token)
56
+ self.logger = LogObserver()
57
+
58
+ def _get_objects_with_invalid_relations(
59
+ self, workspace_id: str
60
+ ) -> list[AnalyticalObject]:
61
+ """Check for invalid references in the provided analytical objects.
62
+
63
+ This method checks if the references in the provided analytical objects
64
+ are valid. It returns a set of analytical objects that have invalid references.
65
+
66
+ Args:
67
+ workspace_id (str): The ID of the workspace to check.
68
+
69
+ Returns:
70
+ list[AnalyticalObject]: Set of analytical objects with invalid references.
71
+ """
72
+
73
+ analytical_objects: list[AnalyticalObject] = (
74
+ self._get_analytical_objects(workspace_id=workspace_id)
75
+ )
76
+
77
+ objects_with_invalid_relations = [
78
+ obj
79
+ for obj in analytical_objects
80
+ if not obj.attributes.are_relations_valid
81
+ ]
82
+ return objects_with_invalid_relations
83
+
84
+ def _get_analytical_objects(
85
+ self, workspace_id: str
86
+ ) -> list[AnalyticalObject]:
87
+ """Get analytical objects in the workspace.
88
+
89
+ This method retrieves all analytical objects (metrics, visualizations, dashboards)
90
+ in the specified workspace and returns them as a list.
91
+
92
+ Args:
93
+ workspace_id (str): The ID of the workspace to retrieve objects from.
94
+
95
+ Returns:
96
+ list[AnalyticalObject]: List of analytical objects in the workspace.
97
+ """
98
+ metrics_response = self._api.get_all_metrics(workspace_id)
99
+ visualizations_response = self._api.get_all_visualization_objects(
100
+ workspace_id
101
+ )
102
+ dashboards_response = self._api.get_all_dashboards(workspace_id)
103
+
104
+ self._api.raise_if_response_not_ok(
105
+ metrics_response,
106
+ visualizations_response,
107
+ dashboards_response,
108
+ )
109
+ metrics = AnalyticalObjects(**metrics_response.json())
110
+ visualizations = AnalyticalObjects(**visualizations_response.json())
111
+ dashboards = AnalyticalObjects(**dashboards_response.json())
112
+
113
+ return metrics.data + visualizations.data + dashboards.data
114
+
115
+ @staticmethod
116
+ def _new_ldm_does_not_invalidate_relations(
117
+ current_invalid_relations: list[AnalyticalObject],
118
+ new_invalid_relations: list[AnalyticalObject],
119
+ ) -> bool:
120
+ """Check if the new LDM does not invalidate any new relations.
121
+
122
+ This method compares the lists of analytical objects containing invalid
123
+ relations. It creates sets of object IDs for each list and compares them.
124
+
125
+ If the set of new invalid relations is a subset of the set of current
126
+ invalid relations (that is before the changes to the LDM), the new LDM
127
+ does not invalidate any new relations and `True` is returned.
128
+
129
+ If the set of new invalid relations is not a subset of the current one,
130
+ it means that the new LDM invalidates new relations and `False` is returned.
131
+
132
+ Args:
133
+ current_invalid_relations (list[AnalyticalObject]): The current (before
134
+ changes to LDM) invalid relations.
135
+ new_invalid_relations (list[AnalyticalObject]): The new (after changes to
136
+ LDM) invalid relations.
137
+
138
+ Returns:
139
+ bool: True if the new LDM does not invalidate any relations, False otherwise.
140
+ """
141
+ # Create a set of IDs for each group, then compare those sets
142
+ set_current_invalid_relations = {
143
+ obj.id for obj in current_invalid_relations
144
+ }
145
+ set_new_invalid_relations = {obj.id for obj in new_invalid_relations}
146
+
147
+ # If the set of new invalid relations is a subset of the current one,
148
+ return set_new_invalid_relations.issubset(set_current_invalid_relations)
149
+
150
+ def _process_with_relations_check(
151
+ self,
152
+ validated_data: dict[WorkspaceId, dict[DatasetId, CustomDataset]],
153
+ ) -> None:
154
+ """Check whether relations of analytical objects are valid before and after
155
+ updating the LDM in the GoodData workspace.
156
+ """
157
+ # Iterate through the workspaces.
158
+ for workspace_id, datasets in validated_data.items():
159
+ self.logger.info(f"⚙️ Processing workspace {workspace_id}...")
160
+ # Get current workspace layout
161
+ current_layout = (
162
+ self._sdk.catalog_workspace.get_declarative_workspace(
163
+ workspace_id
164
+ )
165
+ )
166
+ # Get a set of objects with invalid relations from current workspace state
167
+ current_invalid_relations = (
168
+ self._get_objects_with_invalid_relations(
169
+ workspace_id=workspace_id
170
+ )
171
+ )
172
+
173
+ # Put the LDM with custom datasets into the GoodData workspace.
174
+ self._sdk.catalog_workspace_content.put_declarative_ldm(
175
+ workspace_id=workspace_id,
176
+ ldm=self._processor.datasets_to_ldm(datasets),
177
+ )
178
+
179
+ # Get a set of objects with invalid relations from the new workspace state
180
+ new_invalid_relations = self._get_objects_with_invalid_relations(
181
+ workspace_id=workspace_id
182
+ )
183
+
184
+ if self._new_ldm_does_not_invalidate_relations(
185
+ current_invalid_relations, new_invalid_relations
186
+ ):
187
+ self._log_success_message(workspace_id)
188
+ continue
189
+
190
+ self.logger.error(
191
+ f"❌ Difference in invalid relations found in workspace {workspace_id}."
192
+ )
193
+ self._log_diff_invalid_relations(
194
+ current_invalid_relations, new_invalid_relations
195
+ )
196
+
197
+ self.logger.info(
198
+ f"{self.INDENT}⚠️ Reverting the workspace layout to the original state."
199
+ )
200
+ # Put the original workspace layout back to the workspace
201
+ try:
202
+ self._sdk.catalog_workspace.put_declarative_workspace(
203
+ workspace_id=workspace_id, workspace=current_layout
204
+ )
205
+ except Exception as e:
206
+ self.logger.error(
207
+ f"Failed to revert workspace layout in {workspace_id}: {e}"
208
+ )
209
+
210
+ def _log_diff_invalid_relations(
211
+ self,
212
+ current_invalid_relations: list[AnalyticalObject],
213
+ new_invalid_relations: list[AnalyticalObject],
214
+ ) -> None:
215
+ """Logs objects with newly invalid relations.
216
+
217
+ Objects which previously did not have invalid relations, but do so after
218
+ updating the LDM, are logged.
219
+ """
220
+ # TODO: test !
221
+ diff_to_log: list[str] = []
222
+ for obj in new_invalid_relations:
223
+ if obj not in current_invalid_relations:
224
+ diff_to_log.append(
225
+ f"{self.INDENT}∙ {obj.id} ({obj.type}) {obj.attributes.title}"
226
+ )
227
+ joined_diff_to_log = "\n".join(diff_to_log)
228
+ error_message = f"{self.INDENT}Objects with newly invalidated relations:\n{joined_diff_to_log}"
229
+
230
+ self.logger.error(error_message)
231
+
232
+ def _process_without_relations_check(
233
+ self,
234
+ validated_data: dict[WorkspaceId, dict[DatasetId, CustomDataset]],
235
+ ) -> None:
236
+ """Update the LDM in the GoodData workspace without checking relations."""
237
+ for workspace_id, datasets in validated_data.items():
238
+ # Put the LDM with custom datasets into the GoodData workspace.
239
+ self._sdk.catalog_workspace_content.put_declarative_ldm(
240
+ workspace_id=workspace_id,
241
+ ldm=self._processor.datasets_to_ldm(datasets),
242
+ )
243
+ self._log_success_message(workspace_id)
244
+
245
+ def _log_success_message(self, workspace_id: str) -> None:
246
+ """Log a success message after updating the workspace LDM."""
247
+ self.logger.info(f"✅ LDM in {workspace_id} updated successfully.")
248
+
249
+ def process(
250
+ self,
251
+ custom_datasets: list[CustomDatasetDefinition],
252
+ custom_fields: list[CustomFieldDefinition],
253
+ check_relations: bool = True,
254
+ ) -> None:
255
+ """Create custom datasets and fields in GoodData workspaces.
256
+
257
+ Creates custom datasets and fields to extend the Logical Data Model (LDM)
258
+ in GoodData workspaces based on the provided raw data definitions. The raw
259
+ data is validated by Pydantic models (CustomDatasetDefinition and CustomFieldDefinition).
260
+ The defined datasets and fields are then uploaded to GoodData Cloud.
261
+
262
+ Args:
263
+ custom_datasets (list[CustomDatasetDefinition]): List of custom dataset definitions.
264
+ custom_fields (list[CustomFieldDefinition]): List of custom field definitions.
265
+ check_relations (bool): If True, checks for invalid relations in the workspace
266
+ after updating the LDM. If the number of invalid relations increases,
267
+ the LDM will be reverted to its previous state. If False, the check
268
+ is skiped and the LDM is updated directly. Defaults to True.
269
+
270
+ Raises:
271
+ ValueError: If there are validation errors in the dataset or field definitions.
272
+ """
273
+ # Validate raw data and aggregate the custom field and dataset
274
+ # definitions per workspace.
275
+ validated_data: dict[WorkspaceId, dict[DatasetId, CustomDataset]] = (
276
+ self._validator.validate(custom_datasets, custom_fields)
277
+ )
278
+
279
+ if check_relations:
280
+ # Process the validated data with relations check.
281
+ self._process_with_relations_check(validated_data)
282
+ else:
283
+ self._process_without_relations_check(validated_data)
@@ -0,0 +1 @@
1
+ # (C) 2025 GoodData Corporation
@@ -0,0 +1,9 @@
1
+ # (C) 2025 GoodData Corporation
2
+ """This module defines type aliases intended to improve readability."""
3
+
4
+ from typing import TypeAlias
5
+
6
+ WorkspaceId: TypeAlias = str
7
+ DatasetId: TypeAlias = str
8
+
9
+ __all__ = ["WorkspaceId", "DatasetId"]
@@ -0,0 +1,33 @@
1
+ # (C) 2025 GoodData Corporation
2
+ """This module defines the AnalyticalObjects Pydantic model.
3
+
4
+ The model is used to represent features of analytical objects important for
5
+ checking the validity of references.
6
+ """
7
+
8
+ from pydantic import BaseModel, Field
9
+
10
+
11
+ class Attributes(BaseModel):
12
+ title: str
13
+ are_relations_valid: bool = Field(alias="areRelationsValid")
14
+
15
+
16
+ class AnalyticalObject(BaseModel):
17
+ id: str
18
+ type: str
19
+ attributes: Attributes
20
+
21
+
22
+ class AnalyticalObjects(BaseModel):
23
+ """Simplified model representing response obtained from GoodData API when querying
24
+ analytical objects.
25
+
26
+ This model is used to represent analytical objects such as metrics, visualizations,
27
+ and dashboard in a simplified manner, with the purpose of checkinf the validity
28
+ of references of these objects.
29
+
30
+ This is not a complete schema of the analytical objects!
31
+ """
32
+
33
+ data: list[AnalyticalObject]
@@ -0,0 +1,90 @@
1
+ # (C) 2025 GoodData Corporation
2
+ """This module defines enums and models used to represent the input data.
3
+
4
+ Models defined here are used to validate and structure the input data before
5
+ further processing.
6
+ """
7
+
8
+ from enum import Enum
9
+
10
+ from pydantic import BaseModel, model_validator
11
+
12
+
13
+ class CustomFieldType(str, Enum):
14
+ """GoodData field types."""
15
+
16
+ # NOTE: Start using StrEnum with Python 3.11
17
+ ATTRIBUTE = "attribute"
18
+ FACT = "fact"
19
+ DATE = "date"
20
+
21
+
22
+ class ColumnDataType(str, Enum):
23
+ """Supported data types"""
24
+
25
+ # NOTE: Start using StrEnum with Python 3.11
26
+ INT = "INT"
27
+ STRING = "STRING"
28
+ DATE = "DATE"
29
+ NUMERIC = "NUMERIC"
30
+ TIMESTAMP = "TIMESTAMP"
31
+ TIMESTAMP_TZ = "TIMESTAMP_TZ"
32
+ BOOLEAN = "BOOLEAN"
33
+
34
+
35
+ class CustomFieldDefinition(BaseModel):
36
+ """Input model for custom field definition."""
37
+
38
+ workspace_id: str
39
+ dataset_id: str
40
+ custom_field_id: str
41
+ custom_field_name: str
42
+ custom_field_type: CustomFieldType
43
+ custom_field_source_column: str
44
+ custom_field_source_column_data_type: ColumnDataType
45
+
46
+ @model_validator(mode="after")
47
+ def check_ids_not_equal(self) -> "CustomFieldDefinition":
48
+ """Check that custom field ID is not the same as dataset ID."""
49
+ if self.custom_field_id == self.dataset_id:
50
+ raise ValueError(
51
+ f"Custom field ID {self.custom_field_id} cannot be the same as dataset ID {self.dataset_id}"
52
+ )
53
+ return self
54
+
55
+
56
+ class CustomDatasetDefinition(BaseModel):
57
+ """Input model for custom dataset definition."""
58
+
59
+ workspace_id: str
60
+ dataset_id: str
61
+ dataset_name: str
62
+ dataset_datasource_id: str
63
+ dataset_source_table: str | None
64
+ dataset_source_sql: str | None
65
+ parent_dataset_reference: str
66
+ parent_dataset_reference_attribute_id: str
67
+ dataset_reference_source_column: str
68
+ dataset_reference_source_column_data_type: ColumnDataType
69
+ workspace_data_filter_id: str
70
+ workspace_data_filter_column_name: str
71
+
72
+ @model_validator(mode="after")
73
+ def check_source(self) -> "CustomDatasetDefinition":
74
+ """At least one of dataset_source_table or dataset_source_sql is provided."""
75
+ if not (self.dataset_source_table or self.dataset_source_sql):
76
+ raise ValueError(
77
+ "One of dataset_source_table and dataset_source_sql must be provided"
78
+ )
79
+ if self.dataset_source_table and self.dataset_source_sql:
80
+ raise ValueError(
81
+ "Only one of dataset_source_table and dataset_source_sql can be provided"
82
+ )
83
+ return self
84
+
85
+
86
+ class CustomDataset(BaseModel):
87
+ """Custom dataset with its definition and custom fields."""
88
+
89
+ definition: CustomDatasetDefinition
90
+ custom_fields: list[CustomFieldDefinition]
@@ -3,7 +3,16 @@
3
3
  from typing import Any
4
4
 
5
5
  from gooddata_sdk.catalog.user.entity_model.user import CatalogUser
6
- from pydantic import BaseModel
6
+ from pydantic import BaseModel, Field
7
+
8
+
9
+ class UserProfile(BaseModel):
10
+ """Minimal model of api/v1/profile response.
11
+
12
+ Does not contain all fields from the response.
13
+ """
14
+
15
+ user_id: str = Field(alias="userId")
7
16
 
8
17
 
9
18
  class BaseUser(BaseModel):