gooddata-pandas 1.15.1.dev4__py3-none-any.whl → 1.34.1.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gooddata-pandas might be problematic. Click here for more details.

@@ -1,7 +1,4 @@
1
1
  # (C) 2021 GoodData Corporation
2
- try:
3
- from importlib import metadata
4
- except ImportError:
5
- import importlib_metadata as metadata # type: ignore # mypy issue #1153
2
+ from importlib import metadata
6
3
 
7
4
  __version__: str = metadata.version("gooddata-pandas")
@@ -6,7 +6,7 @@ from typing import Any, Optional, Union
6
6
  from gooddata_sdk import (
7
7
  Attribute,
8
8
  AttributeFilter,
9
- CatalogWorkspaceContent,
9
+ CatalogAttribute,
10
10
  ExecutionDefinition,
11
11
  ExecutionResponse,
12
12
  Filter,
@@ -16,6 +16,7 @@ from gooddata_sdk import (
16
16
  ObjId,
17
17
  TableDimension,
18
18
  )
19
+ from gooddata_sdk.utils import IdObjType, filter_for_attributes_labels
19
20
 
20
21
  from gooddata_pandas.utils import (
21
22
  ColumnsDef,
@@ -119,10 +120,7 @@ class ExecutionDefinitionBuilder:
119
120
  if index_by is None:
120
121
  return
121
122
 
122
- if not isinstance(index_by, dict):
123
- _index_by = {self._DEFAULT_INDEX_NAME: index_by}
124
- else:
125
- _index_by = index_by
123
+ _index_by = {self._DEFAULT_INDEX_NAME: index_by} if not isinstance(index_by, dict) else index_by
126
124
 
127
125
  for index_name, index_def in _index_by.items():
128
126
  if isinstance(index_def, str) and (index_def in self._col_to_attr_idx):
@@ -208,11 +206,14 @@ class ExecutionDefinitionBuilder:
208
206
  raise ValueError(f"AttributeFilter instance referencing metric [{_filter.label}]")
209
207
  else:
210
208
  _filter.label = _str_to_obj_id(_filter.label) or _filter.label
211
- elif isinstance(_filter, MetricValueFilter) and isinstance(_filter.metric, str):
212
- if _filter.metric in self._col_to_metric_idx:
213
- # Metric is referenced by local_id which was already generated during creation of columns
214
- # When Metric filter contains ObjId reference, it does not need to be modified
215
- _filter.metric = self._metrics[self._col_to_metric_idx[_filter.metric]].local_id
209
+ elif (
210
+ isinstance(_filter, MetricValueFilter)
211
+ and isinstance(_filter.metric, str)
212
+ and _filter.metric in self._col_to_metric_idx
213
+ ):
214
+ # Metric is referenced by local_id which was already generated during creation of columns
215
+ # When Metric filter contains ObjId reference, it does not need to be modified
216
+ _filter.metric = self._metrics[self._col_to_metric_idx[_filter.metric]].local_id
216
217
 
217
218
  return filters
218
219
 
@@ -311,27 +312,32 @@ def _extract_for_metrics_only(response: ExecutionResponse, cols: list, col_to_me
311
312
  """
312
313
  exec_def = response.exec_def
313
314
  result = response.read_result(len(exec_def.metrics))
314
- data = dict()
315
+ if len(result.data) == 0:
316
+ return {col: [] for col in cols}
317
+
318
+ return {col: [result.data[col_to_metric_idx[col]]] for col in cols}
315
319
 
316
- for col in cols:
317
- data[col] = [result.data[col_to_metric_idx[col]]]
318
320
 
319
- return data
321
+ def _find_attribute(attributes: list[CatalogAttribute], id_obj: IdObjType) -> Union[CatalogAttribute, None]:
322
+ for attribute in attributes:
323
+ if attribute.find_label(id_obj) is not None:
324
+ return attribute
325
+ return None
320
326
 
321
327
 
322
- def _typed_result(catalog: CatalogWorkspaceContent, attribute: Attribute, result_values: list[Any]) -> list[Any]:
328
+ def _typed_result(attributes: list[CatalogAttribute], attribute: Attribute, result_values: list[Any]) -> list[Any]:
323
329
  """
324
330
  Internal function to convert result_values to proper data types.
325
331
 
326
332
  Args:
327
- catalog (CatalogWorkspaceContent): The catalog workspace content.
333
+ attributes (list[CatalogAttribute]): The catalog of attributes.
328
334
  attribute (Attribute): The attribute for which the typed result will be computed.
329
335
  result_values (list[Any]): A list of raw values.
330
336
 
331
337
  Returns:
332
338
  list[Any]: A list of converted values with proper data types.
333
339
  """
334
- catalog_attribute = catalog.find_label_attribute(attribute.label)
340
+ catalog_attribute = _find_attribute(attributes, attribute.label)
335
341
  if catalog_attribute is None:
336
342
  raise ValueError(f"Unable to find attribute {attribute.label} in catalog")
337
343
  return [_typed_attribute_value(catalog_attribute, value) for value in result_values]
@@ -339,7 +345,7 @@ def _typed_result(catalog: CatalogWorkspaceContent, attribute: Attribute, result
339
345
 
340
346
  def _extract_from_attributes_and_maybe_metrics(
341
347
  response: ExecutionResponse,
342
- catalog: CatalogWorkspaceContent,
348
+ attributes: list[CatalogAttribute],
343
349
  cols: list[str],
344
350
  col_to_attr_idx: dict[str, int],
345
351
  col_to_metric_idx: dict[str, int],
@@ -382,12 +388,12 @@ def _extract_from_attributes_and_maybe_metrics(
382
388
  for idx_name in index:
383
389
  rs = result.get_all_header_values(attribute_dim, safe_index_to_attr_idx[idx_name])
384
390
  attribute = index_to_attribute[idx_name]
385
- index[idx_name] += _typed_result(catalog, attribute, rs)
391
+ index[idx_name] += _typed_result(attributes, attribute, rs)
386
392
  for col in cols:
387
393
  if col in col_to_attr_idx:
388
394
  rs = result.get_all_header_values(attribute_dim, col_to_attr_idx[col])
389
395
  attribute = col_to_attribute[col]
390
- data[col] += _typed_result(catalog, attribute, rs)
396
+ data[col] += _typed_result(attributes, attribute, rs)
391
397
  elif col_to_metric_idx[col] < len(result.data):
392
398
  data[col] += result.data[col_to_metric_idx[col]]
393
399
  if result.is_complete(attribute_dim):
@@ -437,14 +443,18 @@ def compute_and_extract(
437
443
  exec_def = response.exec_def
438
444
  cols = list(columns.keys())
439
445
 
440
- catalog = sdk.catalog_workspace_content.get_full_catalog(workspace_id)
441
-
442
446
  if not exec_def.has_attributes():
443
447
  return _extract_for_metrics_only(response, cols, col_to_metric_idx), dict()
444
448
  else:
449
+ filter_query = filter_for_attributes_labels(exec_def.attributes)
450
+ # if there is to many labels then all attributes are fetched and no rsql filter is used
451
+ # it prevention again 414 Request-URI Too Long
452
+ attributes = sdk.catalog_workspace_content.get_attributes_catalog(
453
+ workspace_id, include=["labels", "datasets"], rsql_filter=filter_query
454
+ )
445
455
  return _extract_from_attributes_and_maybe_metrics(
446
456
  response,
447
- catalog,
457
+ attributes,
448
458
  cols,
449
459
  col_to_attr_idx,
450
460
  col_to_metric_idx,
@@ -1,8 +1,7 @@
1
1
  # (C) 2021 GoodData Corporation
2
2
  from __future__ import annotations
3
3
 
4
- from typing import Optional, Tuple, Union
5
- from warnings import warn
4
+ from typing import Optional, Union
6
5
 
7
6
  import pandas
8
7
  from gooddata_api_client import models
@@ -184,15 +183,6 @@ class DataFrameFactory:
184
183
 
185
184
  return self.for_items(columns, filter_by=filter_by, auto_index=auto_index)
186
185
 
187
- def for_insight(self, insight_id: str, auto_index: bool = True) -> pandas.DataFrame:
188
- warn(
189
- "This method is deprecated and it will be removed in v1.20.0 release. "
190
- "Please use 'for_visualization' method instead.",
191
- DeprecationWarning,
192
- stacklevel=2,
193
- )
194
- return self.for_visualization(insight_id, auto_index)
195
-
196
186
  def result_cache_metadata_for_exec_result_id(self, result_id: str) -> ResultCacheMetadata:
197
187
  """
198
188
  Retrieves result cache metadata for given :result_id:
@@ -212,7 +202,7 @@ class DataFrameFactory:
212
202
  result_size_dimensions_limits: ResultSizeDimensions = (),
213
203
  result_size_bytes_limit: Optional[int] = None,
214
204
  page_size: int = _DEFAULT_PAGE_SIZE,
215
- ) -> Tuple[pandas.DataFrame, DataFrameMetadata]:
205
+ ) -> tuple[pandas.DataFrame, DataFrameMetadata]:
216
206
  """
217
207
  Creates a data frame using an execution definition.
218
208
 
@@ -269,8 +259,9 @@ class DataFrameFactory:
269
259
  result_size_dimensions_limits: ResultSizeDimensions = (),
270
260
  result_size_bytes_limit: Optional[int] = None,
271
261
  use_local_ids_in_headers: bool = False,
262
+ use_primary_labels_in_attributes: bool = False,
272
263
  page_size: int = _DEFAULT_PAGE_SIZE,
273
- ) -> Tuple[pandas.DataFrame, DataFrameMetadata]:
264
+ ) -> tuple[pandas.DataFrame, DataFrameMetadata]:
274
265
  """
275
266
  Retrieves a DataFrame and DataFrame metadata for a given execution result identifier.
276
267
 
@@ -298,6 +289,7 @@ class DataFrameFactory:
298
289
  result_size_dimensions_limits (ResultSizeDimensions): A tuple containing maximum size of result dimensions.
299
290
  result_size_bytes_limit (Optional[int]): Maximum size of result in bytes.
300
291
  use_local_ids_in_headers (bool): Use local identifier in headers.
292
+ use_primary_labels_in_attributes (bool): Use primary labels in attributes.
301
293
  page_size (int): Number of records per page.
302
294
 
303
295
  Returns:
@@ -322,5 +314,6 @@ class DataFrameFactory:
322
314
  result_size_dimensions_limits=result_size_dimensions_limits,
323
315
  result_size_bytes_limit=result_size_bytes_limit,
324
316
  use_local_ids_in_headers=use_local_ids_in_headers,
317
+ use_primary_labels_in_attributes=use_primary_labels_in_attributes,
325
318
  page_size=page_size,
326
319
  )
@@ -1,14 +1,14 @@
1
1
  # (C) 2022 GoodData Corporation
2
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
2
+ from typing import Any, Callable, Optional, Union, cast
3
3
 
4
4
  import pandas
5
5
  from attrs import define, field, frozen
6
6
  from gooddata_sdk import BareExecutionResponse, ExecutionResult, ResultCacheMetadata, ResultSizeDimensions
7
7
 
8
8
  _DEFAULT_PAGE_SIZE = 100
9
- _DataHeaders = List[List[Any]]
10
- _DataArray = List[Union[int, None]]
11
- LabelOverrides = Dict[str, Dict[str, Dict[str, str]]]
9
+ _DataHeaders = list[list[Any]]
10
+ _DataArray = list[Union[int, None]]
11
+ LabelOverrides = dict[str, dict[str, dict[str, str]]]
12
12
 
13
13
 
14
14
  @frozen
@@ -26,10 +26,10 @@ class _DataWithHeaders:
26
26
  Per-dimension grand total headers.
27
27
  """
28
28
 
29
- data: List[_DataArray]
30
- data_headers: Tuple[_DataHeaders, Optional[_DataHeaders]]
31
- grand_totals: Tuple[Optional[List[_DataArray]], Optional[List[_DataArray]]]
32
- grand_total_headers: Tuple[Optional[List[Dict[str, _DataHeaders]]], Optional[List[Dict[str, _DataHeaders]]]]
29
+ data: list[_DataArray]
30
+ data_headers: tuple[_DataHeaders, Optional[_DataHeaders]]
31
+ grand_totals: tuple[Optional[list[_DataArray]], Optional[list[_DataArray]]]
32
+ grand_total_headers: tuple[Optional[list[dict[str, _DataHeaders]]], Optional[list[dict[str, _DataHeaders]]]]
33
33
 
34
34
 
35
35
  @define
@@ -46,10 +46,10 @@ class _AccumulatedData:
46
46
  grand_totals_headers (List[Optional[_DataHeaders]]): Holds the headers for grand total data arrays.
47
47
  """
48
48
 
49
- data: List[_DataArray] = field(init=False, factory=list)
50
- data_headers: List[Optional[_DataHeaders]] = field(init=False, factory=lambda: [None, None])
51
- grand_totals: List[Optional[List[_DataArray]]] = field(init=False, factory=lambda: [None, None])
52
- grand_totals_headers: List[Optional[List[Dict[str, _DataHeaders]]]] = field(
49
+ data: list[_DataArray] = field(init=False, factory=list)
50
+ data_headers: list[Optional[_DataHeaders]] = field(init=False, factory=lambda: [None, None])
51
+ grand_totals: list[Optional[list[_DataArray]]] = field(init=False, factory=lambda: [None, None])
52
+ grand_totals_headers: list[Optional[list[dict[str, _DataHeaders]]]] = field(
53
53
  init=False, factory=lambda: [None, None]
54
54
  )
55
55
  total_of_grant_totals_processed: bool = field(init=False, default=False)
@@ -125,7 +125,7 @@ class _AccumulatedData:
125
125
  # if dims are empty then data contain total of column and row grandtotals so extend existing data array
126
126
  if len(dims) == 0:
127
127
  if not self.total_of_grant_totals_processed:
128
- grand_totals_item = cast(List[_DataArray], self.grand_totals[0])
128
+ grand_totals_item = cast(list[_DataArray], self.grand_totals[0])
129
129
  for total_idx, total_data in enumerate(grand_total["data"]):
130
130
  grand_totals_item[total_idx].extend(total_data)
131
131
  self.total_of_grant_totals_processed = True
@@ -151,7 +151,7 @@ class _AccumulatedData:
151
151
  # grand totals are already initialized and the code is paging in the direction that reveals
152
152
  # additional grand total values; append them accordingly; no need to consider total headers:
153
153
  # that is because only the grand total data is subject to paging
154
- grand_totals_item = cast(List[_DataArray], self.grand_totals[opposite_dim])
154
+ grand_totals_item = cast(list[_DataArray], self.grand_totals[opposite_dim])
155
155
  if opposite_dim == 0:
156
156
  # have column totals and paging 'to the right'; totals for the new columns are revealed so
157
157
  # extend existing data arrays
@@ -198,14 +198,18 @@ class DataFrameMetadata:
198
198
  execution response.
199
199
  """
200
200
 
201
- row_totals_indexes: List[List[int]]
201
+ row_totals_indexes: list[list[int]]
202
202
  execution_response: BareExecutionResponse
203
+ primary_labels_from_index: dict[int, dict[str, str]]
204
+ primary_labels_from_columns: dict[int, dict[str, str]]
203
205
 
204
206
  @classmethod
205
207
  def from_data(
206
208
  cls,
207
- headers: Tuple[_DataHeaders, Optional[_DataHeaders]],
209
+ headers: tuple[_DataHeaders, Optional[_DataHeaders]],
208
210
  execution_response: BareExecutionResponse,
211
+ primary_labels_from_index: dict[int, dict[str, str]],
212
+ primary_labels_from_columns: dict[int, dict[str, str]],
209
213
  ) -> "DataFrameMetadata":
210
214
  """This method constructs a DataFrameMetadata object from data headers and an execution response.
211
215
 
@@ -219,6 +223,8 @@ class DataFrameMetadata:
219
223
  return cls(
220
224
  row_totals_indexes=row_totals_indexes,
221
225
  execution_response=execution_response,
226
+ primary_labels_from_index=primary_labels_from_index,
227
+ primary_labels_from_columns=primary_labels_from_columns,
222
228
  )
223
229
 
224
230
 
@@ -304,8 +310,10 @@ def _read_complete_execution_result(
304
310
  def _create_header_mapper(
305
311
  response: BareExecutionResponse,
306
312
  dim: int,
313
+ primary_attribute_labels_mapping: dict[int, dict[str, str]],
307
314
  label_overrides: Optional[LabelOverrides] = None,
308
315
  use_local_ids_in_headers: bool = False,
316
+ use_primary_labels_in_attributes: bool = False,
309
317
  ) -> Callable[[Any, Optional[int]], Optional[str]]:
310
318
  """
311
319
  Prepares a header mapper function which translates header structures into appropriate labels used
@@ -314,9 +322,12 @@ def _create_header_mapper(
314
322
  Args:
315
323
  response (BareExecutionResponse): Response structure to gather dimension header details.
316
324
  dim (int): Dimension id.
325
+ primary_attribute_labels_mapping (Dict[int, Dict[str, str]]): Dict to be filled by mapping of primary labels to
326
+ custom labels per level identified by integer.
317
327
  label_overrides (Optional[LabelOverrides]): Label overrides. Defaults to None.
318
328
  use_local_ids_in_headers (bool): Use local identifiers of header attributes and metrics. Optional.
319
329
  Defaults to False.
330
+ use_primary_labels_in_attributes (bool): Use primary labels in attributes. Optional. Defaults to False.
320
331
 
321
332
  Returns:
322
333
  Callable[[Any, Optional[int]], Optional[str]]: Mapper function.
@@ -334,7 +345,14 @@ def _create_header_mapper(
334
345
  pass
335
346
  elif "attributeHeader" in header:
336
347
  if "labelValue" in header["attributeHeader"]:
337
- label = header["attributeHeader"]["labelValue"]
348
+ label_value = header["attributeHeader"]["labelValue"]
349
+ primary_label_value = header["attributeHeader"]["primaryLabelValue"]
350
+ label = primary_label_value if use_primary_labels_in_attributes else label_value
351
+ if header_idx is not None:
352
+ if header_idx in primary_attribute_labels_mapping:
353
+ primary_attribute_labels_mapping[header_idx][primary_label_value] = label_value
354
+ else:
355
+ primary_attribute_labels_mapping[header_idx] = {primary_label_value: label_value}
338
356
  # explicitly handle '(empty value)' if it's None otherwise it's not recognizable in final MultiIndex
339
357
  # backend represents ^^^ by "" (datasource value is "") or None (datasource value is NULL) therefore
340
358
  # if both representation are used it's necessary to set label to unique header label (space) to avoid
@@ -372,11 +390,12 @@ def _create_header_mapper(
372
390
 
373
391
  def _headers_to_index(
374
392
  dim_idx: int,
375
- headers: Tuple[_DataHeaders, Optional[_DataHeaders]],
393
+ headers: tuple[_DataHeaders, Optional[_DataHeaders]],
376
394
  response: BareExecutionResponse,
377
395
  label_overrides: LabelOverrides,
378
396
  use_local_ids_in_headers: bool = False,
379
- ) -> Optional[pandas.Index]:
397
+ use_primary_labels_in_attributes: bool = False,
398
+ ) -> tuple[Optional[pandas.Index], dict[int, dict[str, str]]]:
380
399
  """Converts headers to a pandas MultiIndex.
381
400
 
382
401
  This function converts the headers present in the response to a pandas MultiIndex (can be used in pandas dataframes)
@@ -388,18 +407,26 @@ def _headers_to_index(
388
407
  response (BareExecutionResponse): The execution response object with all data.
389
408
  label_overrides (LabelOverrides): A dictionary containing label overrides for the headers.
390
409
  use_local_ids_in_headers (bool, optional): If True, uses local Ids in headers, otherwise not. Defaults to False.
410
+ use_primary_labels_in_attributes (bool, optional): If True, uses primary labels in attributes, otherwise not.
411
+ Defaults to False.
391
412
 
392
413
  Returns:
393
- Optional[pandas.Index]: A pandas MultiIndex object created from the headers, or None if the headers are empty.
414
+ Tuple[Optional[pandas.Index], Dict[int, Dict[str, str]]: A pandas MultiIndex object created from the headers
415
+ with primary attribute labels mapping as Dict, or None with empty Dict if the headers are empty.
394
416
  """
417
+ # dict of primary labels and it's custom labels for attributes per level as key
418
+ primary_attribute_labels_mapping: dict[int, dict[str, str]] = {}
419
+
395
420
  if len(response.dimensions) <= dim_idx or not len(response.dimensions[dim_idx]["headers"]):
396
- return None
421
+ return None, primary_attribute_labels_mapping
397
422
 
398
423
  mapper = _create_header_mapper(
399
424
  response=response,
400
425
  dim=dim_idx,
401
426
  label_overrides=label_overrides,
402
427
  use_local_ids_in_headers=use_local_ids_in_headers,
428
+ use_primary_labels_in_attributes=use_primary_labels_in_attributes,
429
+ primary_attribute_labels_mapping=primary_attribute_labels_mapping,
403
430
  )
404
431
 
405
432
  return pandas.MultiIndex.from_arrays(
@@ -408,10 +435,10 @@ def _headers_to_index(
408
435
  for header_idx, header_group in enumerate(cast(_DataHeaders, headers[dim_idx]))
409
436
  ],
410
437
  names=[mapper(dim_header, None) for dim_header in (response.dimensions[dim_idx]["headers"])],
411
- )
438
+ ), primary_attribute_labels_mapping
412
439
 
413
440
 
414
- def _merge_grand_totals_into_data(extract: _DataWithHeaders) -> Union[_DataArray, List[_DataArray]]:
441
+ def _merge_grand_totals_into_data(extract: _DataWithHeaders) -> Union[_DataArray, list[_DataArray]]:
415
442
  """
416
443
  Merges grand totals into the extracted data. This function will mutate the extracted data,
417
444
  extending the rows and columns with grand totals. Going with mutation here so as not to copy arrays around.
@@ -422,7 +449,7 @@ def _merge_grand_totals_into_data(extract: _DataWithHeaders) -> Union[_DataArray
422
449
  Returns:
423
450
  Union[_DataArray, List[_DataArray]]: Mutated data with rows and columns extended with grand totals.
424
451
  """
425
- data: List[_DataArray] = extract.data
452
+ data: list[_DataArray] = extract.data
426
453
 
427
454
  if extract.grand_totals[0] is not None:
428
455
  # column totals are computed into extra rows, one row per column total
@@ -438,7 +465,7 @@ def _merge_grand_totals_into_data(extract: _DataWithHeaders) -> Union[_DataArray
438
465
  return data
439
466
 
440
467
 
441
- def _merge_grand_total_headers_into_headers(extract: _DataWithHeaders) -> Tuple[_DataHeaders, Optional[_DataHeaders]]:
468
+ def _merge_grand_total_headers_into_headers(extract: _DataWithHeaders) -> tuple[_DataHeaders, Optional[_DataHeaders]]:
442
469
  """Merges grand total headers into data headers. This function will mutate the extracted data.
443
470
 
444
471
  Args:
@@ -448,12 +475,12 @@ def _merge_grand_total_headers_into_headers(extract: _DataWithHeaders) -> Tuple[
448
475
  Tuple[_DataHeaders, Optional[_DataHeaders]]:
449
476
  A tuple containing the modified data headers and the grand total headers if present.
450
477
  """
451
- headers: Tuple[_DataHeaders, Optional[_DataHeaders]] = extract.data_headers
478
+ headers: tuple[_DataHeaders, Optional[_DataHeaders]] = extract.data_headers
452
479
 
453
480
  for dim_idx, grand_total_headers in enumerate(extract.grand_total_headers):
454
481
  if grand_total_headers is None:
455
482
  continue
456
- header = cast(List[List[Any]], headers[dim_idx])
483
+ header = cast(list[list[Any]], headers[dim_idx])
457
484
  for level, grand_total_header in enumerate(grand_total_headers):
458
485
  header[level].extend(grand_total_header["headers"])
459
486
 
@@ -467,8 +494,9 @@ def convert_execution_response_to_dataframe(
467
494
  result_size_dimensions_limits: ResultSizeDimensions,
468
495
  result_size_bytes_limit: Optional[int] = None,
469
496
  use_local_ids_in_headers: bool = False,
497
+ use_primary_labels_in_attributes: bool = False,
470
498
  page_size: int = _DEFAULT_PAGE_SIZE,
471
- ) -> Tuple[pandas.DataFrame, DataFrameMetadata]:
499
+ ) -> tuple[pandas.DataFrame, DataFrameMetadata]:
472
500
  """
473
501
  Converts execution result to a pandas dataframe, maintaining the dimensionality of the result.
474
502
 
@@ -480,6 +508,8 @@ def convert_execution_response_to_dataframe(
480
508
  result_size_dimensions_limits (ResultSizeDimensions): Dimension limits for the dataframe.
481
509
  result_size_bytes_limit (Optional[int], default=None): Size limit in bytes for the dataframe.
482
510
  use_local_ids_in_headers (bool, default=False): Use local ids in headers if True, else use default settings.
511
+ use_primary_labels_in_attributes (bool, default=False): Use primary labels in attributes if True, else use
512
+ default settings.
483
513
  page_size (int, default=_DEFAULT_PAGE_SIZE): Size of the page.
484
514
 
485
515
  Returns:
@@ -495,22 +525,33 @@ def convert_execution_response_to_dataframe(
495
525
  full_data = _merge_grand_totals_into_data(extract)
496
526
  full_headers = _merge_grand_total_headers_into_headers(extract)
497
527
 
528
+ index, primary_labels_from_index = _headers_to_index(
529
+ dim_idx=0,
530
+ headers=full_headers,
531
+ response=execution_response,
532
+ label_overrides=label_overrides,
533
+ use_local_ids_in_headers=use_local_ids_in_headers,
534
+ use_primary_labels_in_attributes=use_primary_labels_in_attributes,
535
+ )
536
+
537
+ columns, primary_labels_from_columns = _headers_to_index(
538
+ dim_idx=1,
539
+ headers=full_headers,
540
+ response=execution_response,
541
+ label_overrides=label_overrides,
542
+ use_local_ids_in_headers=use_local_ids_in_headers,
543
+ use_primary_labels_in_attributes=use_primary_labels_in_attributes,
544
+ )
545
+
498
546
  df = pandas.DataFrame(
499
547
  data=full_data,
500
- index=_headers_to_index(
501
- dim_idx=0,
502
- headers=full_headers,
503
- response=execution_response,
504
- label_overrides=label_overrides,
505
- use_local_ids_in_headers=use_local_ids_in_headers,
506
- ),
507
- columns=_headers_to_index(
508
- dim_idx=1,
509
- headers=full_headers,
510
- response=execution_response,
511
- label_overrides=label_overrides,
512
- use_local_ids_in_headers=use_local_ids_in_headers,
513
- ),
548
+ index=index,
549
+ columns=columns,
514
550
  )
515
551
 
516
- return df, DataFrameMetadata.from_data(headers=full_headers, execution_response=execution_response)
552
+ return df, DataFrameMetadata.from_data(
553
+ headers=full_headers,
554
+ execution_response=execution_response,
555
+ primary_labels_from_index=primary_labels_from_index,
556
+ primary_labels_from_columns=primary_labels_from_columns,
557
+ )
gooddata_pandas/utils.py CHANGED
@@ -3,7 +3,7 @@ from __future__ import annotations
3
3
 
4
4
  import hashlib
5
5
  import uuid
6
- from typing import Any, Dict, Optional, Union
6
+ from typing import Any, Optional, Union
7
7
 
8
8
  import pandas
9
9
  from gooddata_sdk import (
@@ -20,8 +20,8 @@ from pandas import Index, MultiIndex
20
20
 
21
21
  LabelItemDef = Union[Attribute, ObjId, str]
22
22
  DataItemDef = Union[Attribute, Metric, ObjId, str]
23
- IndexDef = Union[LabelItemDef, Dict[str, LabelItemDef]]
24
- ColumnsDef = Dict[str, DataItemDef]
23
+ IndexDef = Union[LabelItemDef, dict[str, LabelItemDef]]
24
+ ColumnsDef = dict[str, DataItemDef]
25
25
 
26
26
  # register external pandas types to converters
27
27
  IntegerConverter.set_external_fnc(lambda self, value: pandas.to_numeric(value))