gooddata-pandas 1.24.0__py3-none-any.whl → 1.25.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gooddata-pandas might be problematic. Click here for more details.

@@ -1,7 +1,4 @@
1
1
  # (C) 2021 GoodData Corporation
2
- try:
3
- from importlib import metadata
4
- except ImportError:
5
- import importlib_metadata as metadata # type: ignore # mypy issue #1153
2
+ from importlib import metadata
6
3
 
7
4
  __version__: str = metadata.version("gooddata-pandas")
@@ -119,10 +119,7 @@ class ExecutionDefinitionBuilder:
119
119
  if index_by is None:
120
120
  return
121
121
 
122
- if not isinstance(index_by, dict):
123
- _index_by = {self._DEFAULT_INDEX_NAME: index_by}
124
- else:
125
- _index_by = index_by
122
+ _index_by = {self._DEFAULT_INDEX_NAME: index_by} if not isinstance(index_by, dict) else index_by
126
123
 
127
124
  for index_name, index_def in _index_by.items():
128
125
  if isinstance(index_def, str) and (index_def in self._col_to_attr_idx):
@@ -208,11 +205,14 @@ class ExecutionDefinitionBuilder:
208
205
  raise ValueError(f"AttributeFilter instance referencing metric [{_filter.label}]")
209
206
  else:
210
207
  _filter.label = _str_to_obj_id(_filter.label) or _filter.label
211
- elif isinstance(_filter, MetricValueFilter) and isinstance(_filter.metric, str):
212
- if _filter.metric in self._col_to_metric_idx:
213
- # Metric is referenced by local_id which was already generated during creation of columns
214
- # When Metric filter contains ObjId reference, it does not need to be modified
215
- _filter.metric = self._metrics[self._col_to_metric_idx[_filter.metric]].local_id
208
+ elif (
209
+ isinstance(_filter, MetricValueFilter)
210
+ and isinstance(_filter.metric, str)
211
+ and _filter.metric in self._col_to_metric_idx
212
+ ):
213
+ # Metric is referenced by local_id which was already generated during creation of columns
214
+ # When Metric filter contains ObjId reference, it does not need to be modified
215
+ _filter.metric = self._metrics[self._col_to_metric_idx[_filter.metric]].local_id
216
216
 
217
217
  return filters
218
218
 
@@ -1,7 +1,7 @@
1
1
  # (C) 2021 GoodData Corporation
2
2
  from __future__ import annotations
3
3
 
4
- from typing import Optional, Tuple, Union
4
+ from typing import Optional, Union
5
5
 
6
6
  import pandas
7
7
  from gooddata_api_client import models
@@ -202,7 +202,7 @@ class DataFrameFactory:
202
202
  result_size_dimensions_limits: ResultSizeDimensions = (),
203
203
  result_size_bytes_limit: Optional[int] = None,
204
204
  page_size: int = _DEFAULT_PAGE_SIZE,
205
- ) -> Tuple[pandas.DataFrame, DataFrameMetadata]:
205
+ ) -> tuple[pandas.DataFrame, DataFrameMetadata]:
206
206
  """
207
207
  Creates a data frame using an execution definition.
208
208
 
@@ -261,7 +261,7 @@ class DataFrameFactory:
261
261
  use_local_ids_in_headers: bool = False,
262
262
  use_primary_labels_in_attributes: bool = False,
263
263
  page_size: int = _DEFAULT_PAGE_SIZE,
264
- ) -> Tuple[pandas.DataFrame, DataFrameMetadata]:
264
+ ) -> tuple[pandas.DataFrame, DataFrameMetadata]:
265
265
  """
266
266
  Retrieves a DataFrame and DataFrame metadata for a given execution result identifier.
267
267
 
@@ -1,14 +1,14 @@
1
1
  # (C) 2022 GoodData Corporation
2
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
2
+ from typing import Any, Callable, Optional, Union, cast
3
3
 
4
4
  import pandas
5
5
  from attrs import define, field, frozen
6
6
  from gooddata_sdk import BareExecutionResponse, ExecutionResult, ResultCacheMetadata, ResultSizeDimensions
7
7
 
8
8
  _DEFAULT_PAGE_SIZE = 100
9
- _DataHeaders = List[List[Any]]
10
- _DataArray = List[Union[int, None]]
11
- LabelOverrides = Dict[str, Dict[str, Dict[str, str]]]
9
+ _DataHeaders = list[list[Any]]
10
+ _DataArray = list[Union[int, None]]
11
+ LabelOverrides = dict[str, dict[str, dict[str, str]]]
12
12
 
13
13
 
14
14
  @frozen
@@ -26,10 +26,10 @@ class _DataWithHeaders:
26
26
  Per-dimension grand total headers.
27
27
  """
28
28
 
29
- data: List[_DataArray]
30
- data_headers: Tuple[_DataHeaders, Optional[_DataHeaders]]
31
- grand_totals: Tuple[Optional[List[_DataArray]], Optional[List[_DataArray]]]
32
- grand_total_headers: Tuple[Optional[List[Dict[str, _DataHeaders]]], Optional[List[Dict[str, _DataHeaders]]]]
29
+ data: list[_DataArray]
30
+ data_headers: tuple[_DataHeaders, Optional[_DataHeaders]]
31
+ grand_totals: tuple[Optional[list[_DataArray]], Optional[list[_DataArray]]]
32
+ grand_total_headers: tuple[Optional[list[dict[str, _DataHeaders]]], Optional[list[dict[str, _DataHeaders]]]]
33
33
 
34
34
 
35
35
  @define
@@ -46,10 +46,10 @@ class _AccumulatedData:
46
46
  grand_totals_headers (List[Optional[_DataHeaders]]): Holds the headers for grand total data arrays.
47
47
  """
48
48
 
49
- data: List[_DataArray] = field(init=False, factory=list)
50
- data_headers: List[Optional[_DataHeaders]] = field(init=False, factory=lambda: [None, None])
51
- grand_totals: List[Optional[List[_DataArray]]] = field(init=False, factory=lambda: [None, None])
52
- grand_totals_headers: List[Optional[List[Dict[str, _DataHeaders]]]] = field(
49
+ data: list[_DataArray] = field(init=False, factory=list)
50
+ data_headers: list[Optional[_DataHeaders]] = field(init=False, factory=lambda: [None, None])
51
+ grand_totals: list[Optional[list[_DataArray]]] = field(init=False, factory=lambda: [None, None])
52
+ grand_totals_headers: list[Optional[list[dict[str, _DataHeaders]]]] = field(
53
53
  init=False, factory=lambda: [None, None]
54
54
  )
55
55
  total_of_grant_totals_processed: bool = field(init=False, default=False)
@@ -125,7 +125,7 @@ class _AccumulatedData:
125
125
  # if dims are empty then data contain total of column and row grandtotals so extend existing data array
126
126
  if len(dims) == 0:
127
127
  if not self.total_of_grant_totals_processed:
128
- grand_totals_item = cast(List[_DataArray], self.grand_totals[0])
128
+ grand_totals_item = cast(list[_DataArray], self.grand_totals[0])
129
129
  for total_idx, total_data in enumerate(grand_total["data"]):
130
130
  grand_totals_item[total_idx].extend(total_data)
131
131
  self.total_of_grant_totals_processed = True
@@ -151,7 +151,7 @@ class _AccumulatedData:
151
151
  # grand totals are already initialized and the code is paging in the direction that reveals
152
152
  # additional grand total values; append them accordingly; no need to consider total headers:
153
153
  # that is because only the grand total data is subject to paging
154
- grand_totals_item = cast(List[_DataArray], self.grand_totals[opposite_dim])
154
+ grand_totals_item = cast(list[_DataArray], self.grand_totals[opposite_dim])
155
155
  if opposite_dim == 0:
156
156
  # have column totals and paging 'to the right'; totals for the new columns are revealed so
157
157
  # extend existing data arrays
@@ -198,18 +198,18 @@ class DataFrameMetadata:
198
198
  execution response.
199
199
  """
200
200
 
201
- row_totals_indexes: List[List[int]]
201
+ row_totals_indexes: list[list[int]]
202
202
  execution_response: BareExecutionResponse
203
- primary_labels_from_index: Dict[int, Dict[str, str]]
204
- primary_labels_from_columns: Dict[int, Dict[str, str]]
203
+ primary_labels_from_index: dict[int, dict[str, str]]
204
+ primary_labels_from_columns: dict[int, dict[str, str]]
205
205
 
206
206
  @classmethod
207
207
  def from_data(
208
208
  cls,
209
- headers: Tuple[_DataHeaders, Optional[_DataHeaders]],
209
+ headers: tuple[_DataHeaders, Optional[_DataHeaders]],
210
210
  execution_response: BareExecutionResponse,
211
- primary_labels_from_index: Dict[int, Dict[str, str]],
212
- primary_labels_from_columns: Dict[int, Dict[str, str]],
211
+ primary_labels_from_index: dict[int, dict[str, str]],
212
+ primary_labels_from_columns: dict[int, dict[str, str]],
213
213
  ) -> "DataFrameMetadata":
214
214
  """This method constructs a DataFrameMetadata object from data headers and an execution response.
215
215
 
@@ -310,7 +310,7 @@ def _read_complete_execution_result(
310
310
  def _create_header_mapper(
311
311
  response: BareExecutionResponse,
312
312
  dim: int,
313
- primary_attribute_labels_mapping: Dict[int, Dict[str, str]],
313
+ primary_attribute_labels_mapping: dict[int, dict[str, str]],
314
314
  label_overrides: Optional[LabelOverrides] = None,
315
315
  use_local_ids_in_headers: bool = False,
316
316
  use_primary_labels_in_attributes: bool = False,
@@ -347,10 +347,7 @@ def _create_header_mapper(
347
347
  if "labelValue" in header["attributeHeader"]:
348
348
  label_value = header["attributeHeader"]["labelValue"]
349
349
  primary_label_value = header["attributeHeader"]["primaryLabelValue"]
350
- if use_primary_labels_in_attributes:
351
- label = primary_label_value
352
- else:
353
- label = label_value
350
+ label = primary_label_value if use_primary_labels_in_attributes else label_value
354
351
  if header_idx is not None:
355
352
  if header_idx in primary_attribute_labels_mapping:
356
353
  primary_attribute_labels_mapping[header_idx][primary_label_value] = label_value
@@ -393,12 +390,12 @@ def _create_header_mapper(
393
390
 
394
391
  def _headers_to_index(
395
392
  dim_idx: int,
396
- headers: Tuple[_DataHeaders, Optional[_DataHeaders]],
393
+ headers: tuple[_DataHeaders, Optional[_DataHeaders]],
397
394
  response: BareExecutionResponse,
398
395
  label_overrides: LabelOverrides,
399
396
  use_local_ids_in_headers: bool = False,
400
397
  use_primary_labels_in_attributes: bool = False,
401
- ) -> Tuple[Optional[pandas.Index], Dict[int, Dict[str, str]]]:
398
+ ) -> tuple[Optional[pandas.Index], dict[int, dict[str, str]]]:
402
399
  """Converts headers to a pandas MultiIndex.
403
400
 
404
401
  This function converts the headers present in the response to a pandas MultiIndex (can be used in pandas dataframes)
@@ -418,7 +415,7 @@ def _headers_to_index(
418
415
  with primary attribute labels mapping as Dict, or None with empty Dict if the headers are empty.
419
416
  """
420
417
  # dict of primary labels and it's custom labels for attributes per level as key
421
- primary_attribute_labels_mapping: Dict[int, Dict[str, str]] = {}
418
+ primary_attribute_labels_mapping: dict[int, dict[str, str]] = {}
422
419
 
423
420
  if len(response.dimensions) <= dim_idx or not len(response.dimensions[dim_idx]["headers"]):
424
421
  return None, primary_attribute_labels_mapping
@@ -441,7 +438,7 @@ def _headers_to_index(
441
438
  ), primary_attribute_labels_mapping
442
439
 
443
440
 
444
- def _merge_grand_totals_into_data(extract: _DataWithHeaders) -> Union[_DataArray, List[_DataArray]]:
441
+ def _merge_grand_totals_into_data(extract: _DataWithHeaders) -> Union[_DataArray, list[_DataArray]]:
445
442
  """
446
443
  Merges grand totals into the extracted data. This function will mutate the extracted data,
447
444
  extending the rows and columns with grand totals. Going with mutation here so as not to copy arrays around.
@@ -452,7 +449,7 @@ def _merge_grand_totals_into_data(extract: _DataWithHeaders) -> Union[_DataArray
452
449
  Returns:
453
450
  Union[_DataArray, List[_DataArray]]: Mutated data with rows and columns extended with grand totals.
454
451
  """
455
- data: List[_DataArray] = extract.data
452
+ data: list[_DataArray] = extract.data
456
453
 
457
454
  if extract.grand_totals[0] is not None:
458
455
  # column totals are computed into extra rows, one row per column total
@@ -468,7 +465,7 @@ def _merge_grand_totals_into_data(extract: _DataWithHeaders) -> Union[_DataArray
468
465
  return data
469
466
 
470
467
 
471
- def _merge_grand_total_headers_into_headers(extract: _DataWithHeaders) -> Tuple[_DataHeaders, Optional[_DataHeaders]]:
468
+ def _merge_grand_total_headers_into_headers(extract: _DataWithHeaders) -> tuple[_DataHeaders, Optional[_DataHeaders]]:
472
469
  """Merges grand total headers into data headers. This function will mutate the extracted data.
473
470
 
474
471
  Args:
@@ -478,12 +475,12 @@ def _merge_grand_total_headers_into_headers(extract: _DataWithHeaders) -> Tuple[
478
475
  Tuple[_DataHeaders, Optional[_DataHeaders]]:
479
476
  A tuple containing the modified data headers and the grand total headers if present.
480
477
  """
481
- headers: Tuple[_DataHeaders, Optional[_DataHeaders]] = extract.data_headers
478
+ headers: tuple[_DataHeaders, Optional[_DataHeaders]] = extract.data_headers
482
479
 
483
480
  for dim_idx, grand_total_headers in enumerate(extract.grand_total_headers):
484
481
  if grand_total_headers is None:
485
482
  continue
486
- header = cast(List[List[Any]], headers[dim_idx])
483
+ header = cast(list[list[Any]], headers[dim_idx])
487
484
  for level, grand_total_header in enumerate(grand_total_headers):
488
485
  header[level].extend(grand_total_header["headers"])
489
486
 
@@ -499,7 +496,7 @@ def convert_execution_response_to_dataframe(
499
496
  use_local_ids_in_headers: bool = False,
500
497
  use_primary_labels_in_attributes: bool = False,
501
498
  page_size: int = _DEFAULT_PAGE_SIZE,
502
- ) -> Tuple[pandas.DataFrame, DataFrameMetadata]:
499
+ ) -> tuple[pandas.DataFrame, DataFrameMetadata]:
503
500
  """
504
501
  Converts execution result to a pandas dataframe, maintaining the dimensionality of the result.
505
502
 
gooddata_pandas/utils.py CHANGED
@@ -3,7 +3,7 @@ from __future__ import annotations
3
3
 
4
4
  import hashlib
5
5
  import uuid
6
- from typing import Any, Dict, Optional, Union
6
+ from typing import Any, Optional, Union
7
7
 
8
8
  import pandas
9
9
  from gooddata_sdk import (
@@ -20,8 +20,8 @@ from pandas import Index, MultiIndex
20
20
 
21
21
  LabelItemDef = Union[Attribute, ObjId, str]
22
22
  DataItemDef = Union[Attribute, Metric, ObjId, str]
23
- IndexDef = Union[LabelItemDef, Dict[str, LabelItemDef]]
24
- ColumnsDef = Dict[str, DataItemDef]
23
+ IndexDef = Union[LabelItemDef, dict[str, LabelItemDef]]
24
+ ColumnsDef = dict[str, DataItemDef]
25
25
 
26
26
  # register external pandas types to converters
27
27
  IntegerConverter.set_external_fnc(lambda self, value: pandas.to_numeric(value))
@@ -1,17 +1,16 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gooddata-pandas
3
- Version: 1.24.0
3
+ Version: 1.25.0
4
4
  Summary: GoodData Cloud to pandas
5
5
  Author: GoodData
6
6
  Author-email: support@gooddata.com
7
7
  License: MIT
8
- Project-URL: Documentation, https://gooddata-pandas.readthedocs.io/en/v1.24.0
8
+ Project-URL: Documentation, https://gooddata-pandas.readthedocs.io/en/v1.25.0
9
9
  Project-URL: Source, https://github.com/gooddata/gooddata-python-sdk
10
10
  Keywords: gooddata,pandas,series,data,frame,data_frame,analytics,headless,business,intelligence,headless-bi,cloud,native,semantic,layer,sql,metrics
11
11
  Classifier: Development Status :: 5 - Production/Stable
12
12
  Classifier: Environment :: Console
13
13
  Classifier: License :: OSI Approved :: MIT License
14
- Classifier: Programming Language :: Python :: 3.8
15
14
  Classifier: Programming Language :: Python :: 3.9
16
15
  Classifier: Programming Language :: Python :: 3.10
17
16
  Classifier: Programming Language :: Python :: 3.11
@@ -20,10 +19,10 @@ Classifier: Topic :: Database
20
19
  Classifier: Topic :: Scientific/Engineering
21
20
  Classifier: Topic :: Software Development
22
21
  Classifier: Typing :: Typed
23
- Requires-Python: >=3.8.0
22
+ Requires-Python: >=3.9.0
24
23
  Description-Content-Type: text/markdown
25
24
  License-File: LICENSE.txt
26
- Requires-Dist: gooddata-sdk ~=1.24.0
25
+ Requires-Dist: gooddata-sdk ~=1.25.0
27
26
  Requires-Dist: pandas <3.0.0,>=2.0.0
28
27
 
29
28
  # GoodData Pandas
@@ -38,7 +37,7 @@ See [DOCUMENTATION](https://gooddata-pandas.readthedocs.io/en/latest/) for more
38
37
  - GoodData.CN installation; either running on your cloud
39
38
  infrastructure or the free Community Edition running on your workstation
40
39
 
41
- - Python 3.8 or newer
40
+ - Python 3.9 or newer
42
41
 
43
42
  ## Installation
44
43
 
@@ -0,0 +1,14 @@
1
+ gooddata_pandas/__init__.py,sha256=Ta3qIIDq7kBRUsYSV3aC69AQBFvFvhtWDQucgP-l88w,297
2
+ gooddata_pandas/_version.py,sha256=960vTs6l7xsN2BOXWCxOc4PSKdzzKhnNEPTMnmMTCQs,119
3
+ gooddata_pandas/data_access.py,sha256=nkXYBCnDh8h02n15bk6KUnWWOEtqhU_uTthMnY6f9Ag,18750
4
+ gooddata_pandas/dataframe.py,sha256=SeNIx-tlf7PjmGppkhNDPEl8RQN6BRKOC3UJ89uFnjo,13000
5
+ gooddata_pandas/good_pandas.py,sha256=ePEm2Lmeiftz5td0BLC71q7my5Aj8aABn3xV0myRmqI,3444
6
+ gooddata_pandas/py.typed,sha256=u_MS29sadlaIqGRPYFjWml5u0gQnoQfvbsf9pu3TZJU,94
7
+ gooddata_pandas/result_convertor.py,sha256=r7uFrjeM6cxMy08YcS3LywF1iUPSyEyG3BAddh0DkIQ,25807
8
+ gooddata_pandas/series.py,sha256=wTvJR_I0FUteyxo4RwHzP20eU7rei0dP8ZdqfrLbf5c,5759
9
+ gooddata_pandas/utils.py,sha256=JQZOuGjDfpPZqBnz-KdN8EwjzXXTbQCONWmUEE3cY9M,7217
10
+ gooddata_pandas-1.25.0.dist-info/LICENSE.txt,sha256=3RjzQk8y9HG1_LgqvbEqWZKJnTQGOO1cpzYzBc13Myk,149825
11
+ gooddata_pandas-1.25.0.dist-info/METADATA,sha256=c-gXNBSVZU_WfYLZLS1zt0-s4MzHvTeDz70IoeGM4W0,2828
12
+ gooddata_pandas-1.25.0.dist-info/WHEEL,sha256=Mdi9PDNwEZptOjTlUcAth7XJDFtKrHYaQMPulZeBCiQ,91
13
+ gooddata_pandas-1.25.0.dist-info/top_level.txt,sha256=B7K_WFxlxplJbEbv5Mf0YhX74dbOpTPgDX-W6I7CssI,16
14
+ gooddata_pandas-1.25.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (72.1.0)
2
+ Generator: setuptools (73.0.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,14 +0,0 @@
1
- gooddata_pandas/__init__.py,sha256=Ta3qIIDq7kBRUsYSV3aC69AQBFvFvhtWDQucgP-l88w,297
2
- gooddata_pandas/_version.py,sha256=YxaAwfP9Yw10sit_vyhVfHJQBAzUUwtm5p0BItfBFx8,225
3
- gooddata_pandas/data_access.py,sha256=X8NKYtwWKFEfXvgrUbybuQmg1cub5pAhFtDCFMyzffY,18748
4
- gooddata_pandas/dataframe.py,sha256=YwjkuO6PzqSfQTaB1Bsn2VuQHvxyiYL75xIW2iI7t3M,13007
5
- gooddata_pandas/good_pandas.py,sha256=ePEm2Lmeiftz5td0BLC71q7my5Aj8aABn3xV0myRmqI,3444
6
- gooddata_pandas/py.typed,sha256=u_MS29sadlaIqGRPYFjWml5u0gQnoQfvbsf9pu3TZJU,94
7
- gooddata_pandas/result_convertor.py,sha256=6k9-Z6Jgtej2yPcR2iftKd2c6e8OwSEDkiil2o-zjP0,25892
8
- gooddata_pandas/series.py,sha256=wTvJR_I0FUteyxo4RwHzP20eU7rei0dP8ZdqfrLbf5c,5759
9
- gooddata_pandas/utils.py,sha256=PpkB6oWacRxYY9S-RbEZm9Jdblo4bgAzrmHzV9MlMPQ,7223
10
- gooddata_pandas-1.24.0.dist-info/LICENSE.txt,sha256=3RjzQk8y9HG1_LgqvbEqWZKJnTQGOO1cpzYzBc13Myk,149825
11
- gooddata_pandas-1.24.0.dist-info/METADATA,sha256=KU_mdywEg9E5BasDyZW9-_GVLaQqUzZv7_mGDcX95Cs,2878
12
- gooddata_pandas-1.24.0.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
13
- gooddata_pandas-1.24.0.dist-info/top_level.txt,sha256=B7K_WFxlxplJbEbv5Mf0YhX74dbOpTPgDX-W6I7CssI,16
14
- gooddata_pandas-1.24.0.dist-info/RECORD,,