gooddata-pandas 1.49.0__tar.gz → 1.49.1.dev2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gooddata-pandas might be problematic. Click here for more details.

Files changed (21) hide show
  1. {gooddata_pandas-1.49.0/gooddata_pandas.egg-info → gooddata_pandas-1.49.1.dev2}/PKG-INFO +3 -3
  2. {gooddata_pandas-1.49.0 → gooddata_pandas-1.49.1.dev2}/gooddata_pandas/dataframe.py +18 -0
  3. {gooddata_pandas-1.49.0 → gooddata_pandas-1.49.1.dev2}/gooddata_pandas/result_convertor.py +304 -47
  4. {gooddata_pandas-1.49.0 → gooddata_pandas-1.49.1.dev2/gooddata_pandas.egg-info}/PKG-INFO +3 -3
  5. gooddata_pandas-1.49.1.dev2/gooddata_pandas.egg-info/requires.txt +2 -0
  6. {gooddata_pandas-1.49.0 → gooddata_pandas-1.49.1.dev2}/setup.py +3 -3
  7. gooddata_pandas-1.49.0/gooddata_pandas.egg-info/requires.txt +0 -2
  8. {gooddata_pandas-1.49.0 → gooddata_pandas-1.49.1.dev2}/LICENSE.txt +0 -0
  9. {gooddata_pandas-1.49.0 → gooddata_pandas-1.49.1.dev2}/MANIFEST.in +0 -0
  10. {gooddata_pandas-1.49.0 → gooddata_pandas-1.49.1.dev2}/README.md +0 -0
  11. {gooddata_pandas-1.49.0 → gooddata_pandas-1.49.1.dev2}/gooddata_pandas/__init__.py +0 -0
  12. {gooddata_pandas-1.49.0 → gooddata_pandas-1.49.1.dev2}/gooddata_pandas/_version.py +0 -0
  13. {gooddata_pandas-1.49.0 → gooddata_pandas-1.49.1.dev2}/gooddata_pandas/data_access.py +0 -0
  14. {gooddata_pandas-1.49.0 → gooddata_pandas-1.49.1.dev2}/gooddata_pandas/good_pandas.py +0 -0
  15. {gooddata_pandas-1.49.0 → gooddata_pandas-1.49.1.dev2}/gooddata_pandas/py.typed +0 -0
  16. {gooddata_pandas-1.49.0 → gooddata_pandas-1.49.1.dev2}/gooddata_pandas/series.py +0 -0
  17. {gooddata_pandas-1.49.0 → gooddata_pandas-1.49.1.dev2}/gooddata_pandas/utils.py +0 -0
  18. {gooddata_pandas-1.49.0 → gooddata_pandas-1.49.1.dev2}/gooddata_pandas.egg-info/SOURCES.txt +0 -0
  19. {gooddata_pandas-1.49.0 → gooddata_pandas-1.49.1.dev2}/gooddata_pandas.egg-info/dependency_links.txt +0 -0
  20. {gooddata_pandas-1.49.0 → gooddata_pandas-1.49.1.dev2}/gooddata_pandas.egg-info/top_level.txt +0 -0
  21. {gooddata_pandas-1.49.0 → gooddata_pandas-1.49.1.dev2}/setup.cfg +0 -0
@@ -1,11 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: gooddata-pandas
3
- Version: 1.49.0
3
+ Version: 1.49.1.dev2
4
4
  Summary: GoodData Cloud to pandas
5
5
  Author: GoodData
6
6
  Author-email: support@gooddata.com
7
7
  License: MIT
8
- Project-URL: Documentation, https://gooddata-pandas.readthedocs.io/en/v1.49.0
8
+ Project-URL: Documentation, https://gooddata-pandas.readthedocs.io/en/v1.49.1.dev2
9
9
  Project-URL: Source, https://github.com/gooddata/gooddata-python-sdk
10
10
  Keywords: gooddata,pandas,series,data,frame,data_frame,analytics,headless,business,intelligence,headless-bi,cloud,native,semantic,layer,sql,metrics
11
11
  Classifier: Development Status :: 5 - Production/Stable
@@ -23,7 +23,7 @@ Classifier: Typing :: Typed
23
23
  Requires-Python: >=3.9.0
24
24
  Description-Content-Type: text/markdown
25
25
  License-File: LICENSE.txt
26
- Requires-Dist: gooddata-sdk~=1.49.0
26
+ Requires-Dist: gooddata-sdk~=1.49.1.dev2
27
27
  Requires-Dist: pandas<3.0.0,>=2.0.0
28
28
  Dynamic: author
29
29
  Dynamic: author-email
@@ -238,6 +238,7 @@ class DataFrameFactory:
238
238
  created_visualizations_response: dict,
239
239
  on_execution_submitted: Optional[Callable[[Execution], None]] = None,
240
240
  is_cancellable: bool = False,
241
+ optimized: bool = False,
241
242
  ) -> tuple[pandas.DataFrame, DataFrameMetadata]:
242
243
  """
243
244
  Creates a data frame using a created visualization.
@@ -247,6 +248,10 @@ class DataFrameFactory:
247
248
  on_execution_submitted (Optional[Callable[[Execution], None]]): Callback to call when the execution was
248
249
  submitted to the backend.
249
250
  is_cancellable (bool, optional): Whether the execution should be cancelled when the connection is interrupted.
251
+ optimized (bool, default=False): Use memory optimized accumulator if True; by default, the accumulator stores
252
+ headers in memory as lists of dicts, which can consume a lot of memory for large results.
253
+ Optimized accumulator stores only unique values and story only reference to them in the list,
254
+ which can significantly reduce memory usage.
250
255
 
251
256
  Returns:
252
257
  pandas.DataFrame: A DataFrame instance.
@@ -257,6 +262,7 @@ class DataFrameFactory:
257
262
  return self.for_exec_def(
258
263
  exec_def=execution_definition,
259
264
  on_execution_submitted=on_execution_submitted,
265
+ optimized=optimized,
260
266
  )
261
267
 
262
268
  def result_cache_metadata_for_exec_result_id(self, result_id: str) -> ResultCacheMetadata:
@@ -279,6 +285,7 @@ class DataFrameFactory:
279
285
  result_size_bytes_limit: Optional[int] = None,
280
286
  page_size: int = _DEFAULT_PAGE_SIZE,
281
287
  on_execution_submitted: Optional[Callable[[Execution], None]] = None,
288
+ optimized: bool = False,
282
289
  ) -> tuple[pandas.DataFrame, DataFrameMetadata]:
283
290
  """
284
291
  Creates a data frame using an execution definition.
@@ -311,6 +318,10 @@ class DataFrameFactory:
311
318
  page_size (int): Number of records per page.
312
319
  on_execution_submitted (Optional[Callable[[Execution], None]]): Callback to call when the execution was
313
320
  submitted to the backend.
321
+ optimized (bool, default=False): Use memory optimized accumulator if True; by default, the accumulator stores
322
+ headers in memory as lists of dicts, which can consume a lot of memory for large results.
323
+ Optimized accumulator stores only unique values and story only reference to them in the list,
324
+ which can significantly reduce memory usage.
314
325
 
315
326
  Returns:
316
327
  Tuple[pandas.DataFrame, DataFrameMetadata]: Tuple holding DataFrame and DataFrame metadata.
@@ -331,6 +342,7 @@ class DataFrameFactory:
331
342
  result_size_dimensions_limits=result_size_dimensions_limits,
332
343
  result_size_bytes_limit=result_size_bytes_limit,
333
344
  page_size=page_size,
345
+ optimized=optimized,
334
346
  )
335
347
 
336
348
  def for_exec_result_id(
@@ -343,6 +355,7 @@ class DataFrameFactory:
343
355
  use_local_ids_in_headers: bool = False,
344
356
  use_primary_labels_in_attributes: bool = False,
345
357
  page_size: int = _DEFAULT_PAGE_SIZE,
358
+ optimized: bool = False,
346
359
  ) -> tuple[pandas.DataFrame, DataFrameMetadata]:
347
360
  """
348
361
  Retrieves a DataFrame and DataFrame metadata for a given execution result identifier.
@@ -373,6 +386,10 @@ class DataFrameFactory:
373
386
  use_local_ids_in_headers (bool): Use local identifier in headers.
374
387
  use_primary_labels_in_attributes (bool): Use primary labels in attributes.
375
388
  page_size (int): Number of records per page.
389
+ optimized (bool, default=False): Use memory optimized accumulator if True; by default, the accumulator stores
390
+ headers in memory as lists of dicts, which can consume a lot of memory for large results.
391
+ Optimized accumulator stores only unique values and story only reference to them in the list,
392
+ which can significantly reduce memory usage.
376
393
 
377
394
  Returns:
378
395
  Tuple[pandas.DataFrame, DataFrameMetadata]: Tuple holding DataFrame and DataFrame metadata.
@@ -398,4 +415,5 @@ class DataFrameFactory:
398
415
  use_local_ids_in_headers=use_local_ids_in_headers,
399
416
  use_primary_labels_in_attributes=use_primary_labels_in_attributes,
400
417
  page_size=page_size,
418
+ optimized=optimized,
401
419
  )
@@ -1,4 +1,7 @@
1
1
  # (C) 2022 GoodData Corporation
2
+ from abc import ABC, abstractmethod
3
+ from collections.abc import Iterator
4
+ from functools import cached_property
2
5
  from typing import Any, Callable, Optional, Union, cast
3
6
 
4
7
  import pandas
@@ -11,6 +14,163 @@ _DataArray = list[Union[int, None]]
11
14
  LabelOverrides = dict[str, dict[str, dict[str, str]]]
12
15
 
13
16
 
17
+ @define(frozen=True, slots=True)
18
+ class _Header(ABC):
19
+ """
20
+ Abstract base class for headers. There are 4 types of headers:
21
+ - attribute header with attribute value and primary label value
22
+ - attribute header with label name and label identifier
23
+ - measure header
24
+ - total header
25
+
26
+ We convert dict representation to _Header objects with slots to improve memory usage.
27
+ """
28
+
29
+ @cached_property
30
+ @abstractmethod
31
+ def _dict(self) -> dict[str, Any]:
32
+ pass
33
+
34
+ def get(self, key: str, default: Optional[Any] = None) -> Optional[Any]:
35
+ return self._dict.get(key, default)
36
+
37
+
38
+ @define(frozen=True, slots=True)
39
+ class _AttributeValuePrimary(_Header):
40
+ """
41
+ Attribute header with label value and primary label value.
42
+ """
43
+
44
+ label_value: str
45
+ primary_label_value: str
46
+
47
+ @cached_property
48
+ def _dict(self) -> dict[str, Any]:
49
+ return {"attributeHeader": {"labelValue": self.label_value, "primaryLabelValue": self.primary_label_value}}
50
+
51
+
52
+ @define(frozen=True, slots=True)
53
+ class _AttributeNameLocal(_Header):
54
+ """
55
+ Attribute header with label name and label identifier.
56
+ """
57
+
58
+ label_name: str
59
+ local_identifier: str
60
+
61
+ @cached_property
62
+ def _dict(self) -> dict[str, Any]:
63
+ return {"attributeHeader": {"labelName": self.label_name, "localIdentifier": self.local_identifier}}
64
+
65
+
66
+ @define(frozen=True, slots=True)
67
+ class _MeasureHeader(_Header):
68
+ """
69
+ Measure header.
70
+ """
71
+
72
+ measure_index: str
73
+
74
+ @cached_property
75
+ def _dict(self) -> dict[str, Any]:
76
+ return {"measureHeader": {"measureIndex": self.measure_index}}
77
+
78
+
79
+ @define(frozen=True, slots=True)
80
+ class _TotalHeader(_Header):
81
+ """
82
+ Total header.
83
+ """
84
+
85
+ function: str
86
+
87
+ @cached_property
88
+ def _dict(self) -> dict[str, Any]:
89
+ return {"totalHeader": {"function": self.function}}
90
+
91
+
92
+ def _header_from_dict(d: dict[str, Any]) -> Optional[_Header]:
93
+ """
94
+ Convert dict representation to _Header object.
95
+ :param d: dictionary representation of a header
96
+ :return: _Header object or None if the dictionary does not represent a header or if it's not supported.
97
+ However, we expect that all execution results contain correct data.
98
+ """
99
+ if attribute_header := d.get("attributeHeader"):
100
+ if "labelValue" in attribute_header:
101
+ return _AttributeValuePrimary(
102
+ label_value=attribute_header["labelValue"], primary_label_value=attribute_header["primaryLabelValue"]
103
+ )
104
+ if "labelName" in attribute_header:
105
+ return _AttributeNameLocal(
106
+ label_name=attribute_header["labelName"], local_identifier=attribute_header["localIdentifier"]
107
+ )
108
+ return None
109
+
110
+ if measure_header := d.get("measureHeader"):
111
+ return _MeasureHeader(measure_header["measureIndex"])
112
+
113
+ if total_header := d.get("totalHeader"):
114
+ return _TotalHeader(total_header["function"])
115
+
116
+ return None
117
+
118
+
119
+ @define
120
+ class _HeaderContainer:
121
+ """
122
+ Container for headers to improve memory usage.
123
+ Unique headers are stored as keys in _header_cache and references to them are stored in _headers.
124
+ This way we avoid storing the same header multiple times, reducing memory allocations,
125
+ which is important for large datatables with many attributes.
126
+ """
127
+
128
+ _headers: list[_Header] = field(factory=list)
129
+ _header_cache: dict[_Header, _Header] = field(factory=dict)
130
+
131
+ def append(self, header_dict: dict) -> None:
132
+ """
133
+ Add header to the container.
134
+
135
+ First, try to convert header dict to _Header object, and return early if it's not possible.
136
+ Then, check if the header is already in the container.
137
+ If it is, get a pointer to the existing header and add it to the container.
138
+ If it is not, add it to the container.
139
+ """
140
+
141
+ header = _header_from_dict(header_dict)
142
+ if header is None:
143
+ return
144
+
145
+ if header not in self._header_cache:
146
+ self._header_cache[header] = header
147
+ self._headers.append(self._header_cache[header])
148
+
149
+ def extend(self, header_dicts: list[dict]) -> None:
150
+ """
151
+ Add multiple headers to the container.
152
+ """
153
+ for header_dict in header_dicts:
154
+ self.append(header_dict)
155
+
156
+ def __iter__(self) -> Iterator[_Header]:
157
+ yield from self._headers
158
+
159
+ def __len__(self) -> int:
160
+ return len(self._headers)
161
+
162
+ def __getitem__(self, index: int) -> _Header:
163
+ return self._headers[index]
164
+
165
+
166
+ _DataHeaderContainers = list[_HeaderContainer]
167
+
168
+ # Optimized version of _DataWithHeaders uses _HeaderContainer instead of list of headers
169
+ _HeadersByAxis = tuple[
170
+ Union[_DataHeaders, _DataHeaderContainers], Union[Optional[_DataHeaders], Optional[_DataHeaderContainers]]
171
+ ]
172
+
173
+
14
174
  @frozen
15
175
  class _DataWithHeaders:
16
176
  """Extracted data; either array of values for one-dimensional result or array of arrays of values.
@@ -18,7 +178,7 @@ class _DataWithHeaders:
18
178
  Attributes:
19
179
  data (List[_DataArray]):
20
180
  Extracted data; either array of values for one-dimensional result or array of arrays of values.
21
- data_headers (Tuple[_DataHeaders, Optional[_DataHeaders]]):
181
+ data_headers (_HeadersByAxis):
22
182
  Per-dimension headers for the data.
23
183
  grand_totals (Tuple[Optional[List[_DataArray]], Optional[List[_DataArray]]]):
24
184
  Per-dimension grand total data.
@@ -27,32 +187,34 @@ class _DataWithHeaders:
27
187
  """
28
188
 
29
189
  data: list[_DataArray]
30
- data_headers: tuple[_DataHeaders, Optional[_DataHeaders]]
190
+ data_headers: _HeadersByAxis
31
191
  grand_totals: tuple[Optional[list[_DataArray]], Optional[list[_DataArray]]]
32
192
  grand_total_headers: tuple[Optional[list[dict[str, _DataHeaders]]], Optional[list[dict[str, _DataHeaders]]]]
33
193
 
34
194
 
35
195
  @define
36
- class _AccumulatedData:
196
+ class _AbstractAccumulatedData(ABC):
37
197
  """
38
198
  Utility class to offload code from the function that extracts all data and headers for a
39
199
  particular paged result. The method drives the paging and calls out to this class to accumulate
40
200
  the essential data and headers from the page.
201
+ Note that if optimized is enabled, the data_headers are stored in _HeaderContainer instead of list of headers.
202
+ We do not store grand_totals_headers in _HeaderContainer, as we do not except
41
203
 
42
204
  Attributes:
43
205
  data (List[_DataArray]): Holds the accumulated data arrays from the pages.
44
- data_headers (List[Optional[_DataHeaders]]): Holds the headers for data arrays.
206
+ data_headers (List[Optional[Any]]): Holds the headers for data arrays.
45
207
  grand_totals (List[Optional[List[_DataArray]]]): Holds the grand total data arrays.
46
208
  grand_totals_headers (List[Optional[_DataHeaders]]): Holds the headers for grand total data arrays.
47
209
  """
48
210
 
49
211
  data: list[_DataArray] = field(init=False, factory=list)
50
- data_headers: list[Optional[_DataHeaders]] = field(init=False, factory=lambda: [None, None])
212
+ data_headers: list[Optional[Any]] = field(init=False, factory=lambda: [None, None])
51
213
  grand_totals: list[Optional[list[_DataArray]]] = field(init=False, factory=lambda: [None, None])
214
+ total_of_grant_totals_processed: bool = field(init=False, default=False)
52
215
  grand_totals_headers: list[Optional[list[dict[str, _DataHeaders]]]] = field(
53
216
  init=False, factory=lambda: [None, None]
54
217
  )
55
- total_of_grant_totals_processed: bool = field(init=False, default=False)
56
218
 
57
219
  def accumulate_data(self, from_result: ExecutionResult) -> None:
58
220
  """
@@ -79,24 +241,6 @@ class _AccumulatedData:
79
241
  for i in range(len(from_result.data)):
80
242
  self.data[offset + i].extend(from_result.data[i])
81
243
 
82
- def accumulate_headers(self, from_result: ExecutionResult, from_dim: int) -> None:
83
- """
84
- Accumulate headers for a particular dimension of a result into the provided `data_headers` array at the index
85
- matching the dimension index.
86
-
87
- This will mutate the `data_headers`.
88
-
89
- Args:
90
- from_result (ExecutionResult): The result whose headers will be accumulated.
91
- from_dim (int): The dimension index.
92
- """
93
-
94
- if self.data_headers[from_dim] is None:
95
- self.data_headers[from_dim] = from_result.get_all_headers(dim=from_dim)
96
- else:
97
- for idx, headers in enumerate(from_result.get_all_headers(dim=from_dim)):
98
- cast(_DataHeaders, self.data_headers[from_dim])[idx].extend(headers)
99
-
100
244
  def accumulate_grand_totals(
101
245
  self, from_result: ExecutionResult, paging_dim: int, response: BareExecutionResponse
102
246
  ) -> None:
@@ -161,6 +305,56 @@ class _AccumulatedData:
161
305
  # have row totals and paging down, keep adding extra rows
162
306
  grand_totals_item.extend(grand_total["data"])
163
307
 
308
+ @abstractmethod
309
+ def accumulate_headers(self, from_result: ExecutionResult, from_dim: int) -> None:
310
+ """
311
+ Accumulate headers for a particular dimension of a result into the provided `data_headers` array at the index
312
+ matching the dimension index.
313
+
314
+ This will mutate the `data_headers`.
315
+
316
+ Args:
317
+ from_result (ExecutionResult): The result whose headers will be accumulated.
318
+ from_dim (int): The dimension index.
319
+ """
320
+
321
+ @abstractmethod
322
+ def result(self) -> _DataWithHeaders:
323
+ """
324
+ Returns the data with headers.
325
+
326
+ Returns:
327
+ _DataWithHeaders: The data, data headers, grand totals and grand total headers.
328
+ """
329
+
330
+
331
+ @define
332
+ class _AccumulatedData(_AbstractAccumulatedData):
333
+ """
334
+ Implementation of _AbstractAccumulatedData that uses list of dicts as storage,
335
+ which is used when non-optimized data extraction is used.
336
+
337
+ This implementation may lead to uncontrolled memory usage for large results.
338
+ """
339
+
340
+ def accumulate_headers(self, from_result: ExecutionResult, from_dim: int) -> None:
341
+ """
342
+ Accumulate headers for a particular dimension of a result into the provided `data_headers` array at the index
343
+ matching the dimension index.
344
+
345
+ This will mutate the `data_headers`.
346
+
347
+ Args:
348
+ from_result (ExecutionResult): The result whose headers will be accumulated.
349
+ from_dim (int): The dimension index.
350
+ """
351
+
352
+ if self.data_headers[from_dim] is None:
353
+ self.data_headers[from_dim] = from_result.get_all_headers(dim=from_dim)
354
+ else:
355
+ for idx, headers in enumerate(from_result.get_all_headers(dim=from_dim)):
356
+ cast(_DataHeaders, self.data_headers[from_dim])[idx].extend(headers)
357
+
164
358
  def result(self) -> _DataWithHeaders:
165
359
  """
166
360
  Returns the data with headers.
@@ -176,6 +370,55 @@ class _AccumulatedData:
176
370
  )
177
371
 
178
372
 
373
+ @define
374
+ class _OptimizedAccumulatedData(_AbstractAccumulatedData):
375
+ """
376
+ Implementation of _AbstractAccumulatedData that stores headers in _HeaderContainer objects,
377
+ which is used when optimized data extraction is used.
378
+
379
+ This implementation is more memory efficient than _AccumulatedData.
380
+ """
381
+
382
+ def accumulate_headers(self, from_result: ExecutionResult, from_dim: int) -> None:
383
+ """
384
+ Accumulate headers for a particular dimension of a result into the provided `data_headers` array at the index
385
+ matching the dimension index.
386
+
387
+ This will mutate the `data_headers`.
388
+
389
+ Args:
390
+ from_result (ExecutionResult): The result whose headers will be accumulated.
391
+ from_dim (int): The dimension index.
392
+ """
393
+
394
+ if containers := self.data_headers[from_dim]:
395
+ for idx, headers in enumerate(from_result.get_all_headers(dim=from_dim)):
396
+ containers[idx].extend(headers)
397
+ else:
398
+ self.data_headers[from_dim] = []
399
+ containers = []
400
+ for idx, headers in enumerate(from_result.get_all_headers(dim=from_dim)):
401
+ hc = _HeaderContainer()
402
+ hc.extend(headers)
403
+ containers.append(hc)
404
+ self.data_headers[from_dim] = containers
405
+
406
+ def result(self) -> _DataWithHeaders:
407
+ """
408
+ Returns the data with headers.
409
+
410
+ Returns:
411
+ _DataWithHeaders: The data, data headers, grand totals and grand total headers.
412
+ """
413
+
414
+ return _DataWithHeaders(
415
+ data=self.data,
416
+ data_headers=(cast(_DataHeaderContainers, self.data_headers[0]), self.data_headers[1]),
417
+ grand_totals=(self.grand_totals[0], self.grand_totals[1]),
418
+ grand_total_headers=(self.grand_totals_headers[0], self.grand_totals_headers[1]),
419
+ )
420
+
421
+
179
422
  @define
180
423
  class DataFrameMetadata:
181
424
  """
@@ -206,19 +449,20 @@ class DataFrameMetadata:
206
449
  @classmethod
207
450
  def from_data(
208
451
  cls,
209
- headers: tuple[_DataHeaders, Optional[_DataHeaders]],
452
+ headers: _HeadersByAxis,
210
453
  execution_response: BareExecutionResponse,
211
454
  primary_labels_from_index: dict[int, dict[str, str]],
212
455
  primary_labels_from_columns: dict[int, dict[str, str]],
213
456
  ) -> "DataFrameMetadata":
214
457
  """This method constructs a DataFrameMetadata object from data headers and an execution response.
215
458
 
216
- Args: headers (Tuple[_DataHeaders, Optional[_DataHeaders]]):
459
+ Args: headers (_HeadersByAxis):
217
460
  A tuple containing data headers. execution_response (BareExecutionResponse): An ExecutionResponse object.
218
461
 
219
462
  Returns: DataFrameMetadata: An initialized DataFrameMetadata object."""
220
463
  row_totals_indexes = [
221
- [idx for idx, hdr in enumerate(dim) if hdr is not None and "totalHeader" in hdr] for dim in headers[0]
464
+ [idx for idx, hdr in enumerate(dim) if hdr is not None and hdr.get("totalHeader") is not None]
465
+ for dim in headers[0]
222
466
  ]
223
467
  return cls(
224
468
  row_totals_indexes=row_totals_indexes,
@@ -234,6 +478,7 @@ def _read_complete_execution_result(
234
478
  result_size_dimensions_limits: ResultSizeDimensions,
235
479
  result_size_bytes_limit: Optional[int] = None,
236
480
  page_size: int = _DEFAULT_PAGE_SIZE,
481
+ optimized: bool = False,
237
482
  ) -> _DataWithHeaders:
238
483
  """
239
484
  Extracts all data and headers for an execution result. This does page around the execution result to extract
@@ -245,6 +490,10 @@ def _read_complete_execution_result(
245
490
  result_size_dimensions_limits (ResultSizeDimensions): Limits for result size dimensions.
246
491
  result_size_bytes_limit (Optional[int], optional): Limit for result size in bytes. Defaults to None.
247
492
  page_size (int, optional): Page size to use when reading data. Defaults to _DEFAULT_PAGE_SIZE.
493
+ optimized (bool, default=False): Use memory optimized accumulator if True; by default, the accumulator stores
494
+ headers in memory as lists of dicts, which can consume a lot of memory for large results.
495
+ Optimized accumulator stores only unique values and story only reference to them in the list,
496
+ which can significantly reduce memory usage.
248
497
 
249
498
  Returns:
250
499
  _DataWithHeaders: All the data and headers from the execution result.
@@ -252,10 +501,10 @@ def _read_complete_execution_result(
252
501
  num_dims = len(execution_response.dimensions)
253
502
  offset = [0] * num_dims
254
503
  limit = [page_size] * num_dims
255
- acc = _AccumulatedData()
256
504
 
257
- result_size_limits_checked = False
505
+ acc = _OptimizedAccumulatedData() if optimized else _AccumulatedData()
258
506
 
507
+ result_size_limits_checked = False
259
508
  while True:
260
509
  # top-level loop pages through the first dimension;
261
510
  #
@@ -303,7 +552,6 @@ def _read_complete_execution_result(
303
552
  break
304
553
 
305
554
  offset = [result.next_page_start(dim=0), 0] if num_dims > 1 else [result.next_page_start(dim=0)]
306
-
307
555
  return acc.result()
308
556
 
309
557
 
@@ -339,14 +587,14 @@ def _create_header_mapper(
339
587
  attribute_labels = label_overrides.get("labels", {})
340
588
  measure_labels = label_overrides.get("metrics", {})
341
589
 
342
- def _mapper(header: Any, header_idx: Optional[int]) -> Optional[str]:
590
+ def _mapper(header: Union[dict, _Header, None], header_idx: Optional[int]) -> Optional[str]:
343
591
  label = None
344
592
  if header is None:
345
593
  pass
346
- elif "attributeHeader" in header:
347
- if "labelValue" in header["attributeHeader"]:
348
- label_value = header["attributeHeader"]["labelValue"]
349
- primary_label_value = header["attributeHeader"]["primaryLabelValue"]
594
+ elif attribute_header := header.get("attributeHeader"):
595
+ if "labelValue" in attribute_header:
596
+ label_value = attribute_header["labelValue"]
597
+ primary_label_value = attribute_header["primaryLabelValue"]
350
598
  label = primary_label_value if use_primary_labels_in_attributes else label_value
351
599
  if header_idx is not None:
352
600
  if header_idx in primary_attribute_labels_mapping:
@@ -359,17 +607,18 @@ def _create_header_mapper(
359
607
  # Excel formatter apply call failure
360
608
  if label is None:
361
609
  label = " "
362
- elif "labelName" in header["attributeHeader"]:
363
- attr_local_id = header["attributeHeader"]["localIdentifier"]
610
+ elif "labelName" in attribute_header:
611
+ attr_local_id = attribute_header["localIdentifier"]
364
612
  if use_local_ids_in_headers:
365
613
  label = attr_local_id
366
614
  else:
367
615
  if attr_local_id in attribute_labels:
368
616
  label = attribute_labels[attr_local_id]["title"]
369
617
  else:
370
- label = header["attributeHeader"]["labelName"]
371
- elif "measureHeader" in header and header_idx is not None:
372
- measure_idx = header["measureHeader"]["measureIndex"]
618
+ label = attribute_header["labelName"]
619
+
620
+ elif (measure_header := header.get("measureHeader")) and header_idx is not None:
621
+ measure_idx = measure_header["measureIndex"]
373
622
  measure_descriptor = dim_descriptor["headers"][header_idx]["measureGroupHeaders"][measure_idx]
374
623
 
375
624
  if use_local_ids_in_headers:
@@ -381,8 +630,9 @@ def _create_header_mapper(
381
630
  label = measure_descriptor["name"]
382
631
  else:
383
632
  label = measure_descriptor["localIdentifier"]
384
- elif "totalHeader" in header:
385
- label = header["totalHeader"]["function"]
633
+
634
+ elif total_header := header.get("totalHeader"):
635
+ label = total_header["function"]
386
636
  return label
387
637
 
388
638
  return _mapper
@@ -390,7 +640,7 @@ def _create_header_mapper(
390
640
 
391
641
  def _headers_to_index(
392
642
  dim_idx: int,
393
- headers: tuple[_DataHeaders, Optional[_DataHeaders]],
643
+ headers: _HeadersByAxis,
394
644
  response: BareExecutionResponse,
395
645
  label_overrides: LabelOverrides,
396
646
  use_local_ids_in_headers: bool = False,
@@ -432,7 +682,7 @@ def _headers_to_index(
432
682
  return pandas.MultiIndex.from_arrays(
433
683
  [
434
684
  tuple(mapper(header, header_idx) for header in header_group)
435
- for header_idx, header_group in enumerate(cast(_DataHeaders, headers[dim_idx]))
685
+ for header_idx, header_group in enumerate(cast(list, headers[dim_idx]))
436
686
  ],
437
687
  names=[mapper(dim_header, None) for dim_header in (response.dimensions[dim_idx]["headers"])],
438
688
  ), primary_attribute_labels_mapping
@@ -465,17 +715,17 @@ def _merge_grand_totals_into_data(extract: _DataWithHeaders) -> Union[_DataArray
465
715
  return data
466
716
 
467
717
 
468
- def _merge_grand_total_headers_into_headers(extract: _DataWithHeaders) -> tuple[_DataHeaders, Optional[_DataHeaders]]:
718
+ def _merge_grand_total_headers_into_headers(extract: _DataWithHeaders) -> _HeadersByAxis:
469
719
  """Merges grand total headers into data headers. This function will mutate the extracted data.
470
720
 
471
721
  Args:
472
722
  extract (_DataWithHeaders): The data along with its headers that need to be merged.
473
723
 
474
724
  Returns:
475
- Tuple[_DataHeaders, Optional[_DataHeaders]]:
725
+ _HeadersByAxis:
476
726
  A tuple containing the modified data headers and the grand total headers if present.
477
727
  """
478
- headers: tuple[_DataHeaders, Optional[_DataHeaders]] = extract.data_headers
728
+ headers: _HeadersByAxis = extract.data_headers
479
729
 
480
730
  for dim_idx, grand_total_headers in enumerate(extract.grand_total_headers):
481
731
  if grand_total_headers is None:
@@ -496,6 +746,7 @@ def convert_execution_response_to_dataframe(
496
746
  use_local_ids_in_headers: bool = False,
497
747
  use_primary_labels_in_attributes: bool = False,
498
748
  page_size: int = _DEFAULT_PAGE_SIZE,
749
+ optimized: bool = False,
499
750
  ) -> tuple[pandas.DataFrame, DataFrameMetadata]:
500
751
  """
501
752
  Converts execution result to a pandas dataframe, maintaining the dimensionality of the result.
@@ -511,6 +762,10 @@ def convert_execution_response_to_dataframe(
511
762
  use_primary_labels_in_attributes (bool, default=False): Use primary labels in attributes if True, else use
512
763
  default settings.
513
764
  page_size (int, default=_DEFAULT_PAGE_SIZE): Size of the page.
765
+ optimized (bool, default=False): Use memory optimized accumulator if True; by default, the accumulator stores
766
+ headers in memory as lists of dicts, which can consume a lot of memory for large results.
767
+ Optimized accumulator stores only unique values and story only reference to them in the list,
768
+ which can significantly reduce memory usage.
514
769
 
515
770
  Returns:
516
771
  Tuple[pandas.DataFrame, DataFrameMetadata]: A tuple containing the created dataframe and its metadata.
@@ -521,7 +776,9 @@ def convert_execution_response_to_dataframe(
521
776
  result_size_dimensions_limits=result_size_dimensions_limits,
522
777
  result_size_bytes_limit=result_size_bytes_limit,
523
778
  page_size=page_size,
779
+ optimized=optimized,
524
780
  )
781
+
525
782
  full_data = _merge_grand_totals_into_data(extract)
526
783
  full_headers = _merge_grand_total_headers_into_headers(extract)
527
784
 
@@ -1,11 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: gooddata-pandas
3
- Version: 1.49.0
3
+ Version: 1.49.1.dev2
4
4
  Summary: GoodData Cloud to pandas
5
5
  Author: GoodData
6
6
  Author-email: support@gooddata.com
7
7
  License: MIT
8
- Project-URL: Documentation, https://gooddata-pandas.readthedocs.io/en/v1.49.0
8
+ Project-URL: Documentation, https://gooddata-pandas.readthedocs.io/en/v1.49.1.dev2
9
9
  Project-URL: Source, https://github.com/gooddata/gooddata-python-sdk
10
10
  Keywords: gooddata,pandas,series,data,frame,data_frame,analytics,headless,business,intelligence,headless-bi,cloud,native,semantic,layer,sql,metrics
11
11
  Classifier: Development Status :: 5 - Production/Stable
@@ -23,7 +23,7 @@ Classifier: Typing :: Typed
23
23
  Requires-Python: >=3.9.0
24
24
  Description-Content-Type: text/markdown
25
25
  License-File: LICENSE.txt
26
- Requires-Dist: gooddata-sdk~=1.49.0
26
+ Requires-Dist: gooddata-sdk~=1.49.1.dev2
27
27
  Requires-Dist: pandas<3.0.0,>=2.0.0
28
28
  Dynamic: author
29
29
  Dynamic: author-email
@@ -0,0 +1,2 @@
1
+ gooddata-sdk~=1.49.1.dev2
2
+ pandas<3.0.0,>=2.0.0
@@ -7,7 +7,7 @@ this_directory = Path(__file__).parent
7
7
  long_description = (this_directory / "README.md").read_text(encoding="utf-8")
8
8
 
9
9
  REQUIRES = [
10
- "gooddata-sdk~=1.49.0",
10
+ "gooddata-sdk~=1.49.1.dev2",
11
11
  "pandas>=2.0.0,<3.0.0",
12
12
  ]
13
13
 
@@ -16,7 +16,7 @@ setup(
16
16
  description="GoodData Cloud to pandas",
17
17
  long_description=long_description,
18
18
  long_description_content_type="text/markdown",
19
- version="1.49.0",
19
+ version="1.49.1.dev2",
20
20
  author="GoodData",
21
21
  author_email="support@gooddata.com",
22
22
  license="MIT",
@@ -26,7 +26,7 @@ setup(
26
26
  packages=find_packages(exclude=["tests*"]),
27
27
  python_requires=">=3.9.0",
28
28
  project_urls={
29
- "Documentation": "https://gooddata-pandas.readthedocs.io/en/v1.49.0",
29
+ "Documentation": "https://gooddata-pandas.readthedocs.io/en/v1.49.1.dev2",
30
30
  "Source": "https://github.com/gooddata/gooddata-python-sdk",
31
31
  },
32
32
  classifiers=[
@@ -1,2 +0,0 @@
1
- gooddata-sdk~=1.49.0
2
- pandas<3.0.0,>=2.0.0