gooddata-pandas 1.10.0__py3-none-any.whl → 1.21.1.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gooddata-pandas might be problematic. Click here for more details.

@@ -3,15 +3,6 @@ from __future__ import annotations
3
3
 
4
4
  from typing import Any, Optional, Union
5
5
 
6
- from gooddata_pandas.utils import (
7
- ColumnsDef,
8
- IndexDef,
9
- LabelItemDef,
10
- _str_to_obj_id,
11
- _to_attribute,
12
- _to_item,
13
- _typed_attribute_value,
14
- )
15
6
  from gooddata_sdk import (
16
7
  Attribute,
17
8
  AttributeFilter,
@@ -23,6 +14,17 @@ from gooddata_sdk import (
23
14
  Metric,
24
15
  MetricValueFilter,
25
16
  ObjId,
17
+ TableDimension,
18
+ )
19
+
20
+ from gooddata_pandas.utils import (
21
+ ColumnsDef,
22
+ IndexDef,
23
+ LabelItemDef,
24
+ _str_to_obj_id,
25
+ _to_attribute,
26
+ _to_item,
27
+ _typed_attribute_value,
26
28
  )
27
29
 
28
30
 
@@ -229,8 +231,12 @@ class ExecutionDefinitionBuilder:
229
231
  and dimensions.
230
232
  """
231
233
  dimensions = [
232
- ["measureGroup"] if len(self._metrics) else None,
233
- [a.local_id for a in self._attributes] if len(self._attributes) else None,
234
+ TableDimension(
235
+ item_ids=["measureGroup"] if self._metrics else None,
236
+ ),
237
+ TableDimension(
238
+ item_ids=[a.local_id for a in self._attributes] if self._attributes else None,
239
+ ),
234
240
  ]
235
241
 
236
242
  filters = self._update_filter_ids(filter_by)
@@ -4,8 +4,17 @@ from __future__ import annotations
4
4
  from typing import Optional, Tuple, Union
5
5
 
6
6
  import pandas
7
-
8
7
  from gooddata_api_client import models
8
+ from gooddata_sdk import (
9
+ Attribute,
10
+ BareExecutionResponse,
11
+ ExecutionDefinition,
12
+ Filter,
13
+ GoodDataSdk,
14
+ ResultCacheMetadata,
15
+ ResultSizeDimensions,
16
+ )
17
+
9
18
  from gooddata_pandas.data_access import compute_and_extract
10
19
  from gooddata_pandas.result_convertor import (
11
20
  _DEFAULT_PAGE_SIZE,
@@ -15,21 +24,12 @@ from gooddata_pandas.result_convertor import (
15
24
  )
16
25
  from gooddata_pandas.utils import (
17
26
  ColumnsDef,
18
- DefaultInsightColumnNaming,
27
+ DefaultVisualizationColumnNaming,
19
28
  IndexDef,
20
29
  LabelItemDef,
21
30
  _to_item,
22
31
  make_pandas_index,
23
32
  )
24
- from gooddata_sdk import (
25
- Attribute,
26
- BareExecutionResponse,
27
- ExecutionDefinition,
28
- Filter,
29
- GoodDataSdk,
30
- ResultCacheMetadata,
31
- ResultSizeDimensions,
32
- )
33
33
 
34
34
 
35
35
  class DataFrameFactory:
@@ -43,7 +43,7 @@ class DataFrameFactory:
43
43
  -> pandas.DataFrame:
44
44
  - for_items(self, items: ColumnsDef, filter_by: Optional[Union[Filter, list[Filter]]] = None,
45
45
  auto_index: bool = True) -> pandas.DataFrame:
46
- - for_insight(self, insight_id: str, auto_index: bool = True)
46
+ - for_visualization(self, visualization_id: str, auto_index: bool = True)
47
47
  -> pandas.DataFrame:
48
48
  - result_cache_metadata_for_exec_result_id(self, result_id: str)
49
49
  -> ResultCacheMetadata:
@@ -159,24 +159,26 @@ class DataFrameFactory:
159
159
  filter_by=filter_by,
160
160
  )
161
161
 
162
- def for_insight(self, insight_id: str, auto_index: bool = True) -> pandas.DataFrame:
162
+ def for_visualization(self, visualization_id: str, auto_index: bool = True) -> pandas.DataFrame:
163
163
  """
164
- Creates a data frame with columns based on the content of the insight with the provided identifier.
164
+ Creates a data frame with columns based on the content of the visualization with the provided identifier.
165
165
 
166
166
  Args:
167
- insight_id (str): Insight identifier.
167
+ visualization_id (str): Visualization identifier.
168
168
  auto_index (bool): Default True. Enables creation of DataFrame with index depending on the contents
169
- of the insight.
169
+ of the visualization.
170
170
 
171
171
  Returns:
172
172
  pandas.DataFrame: A DataFrame instance.
173
173
  """
174
- naming = DefaultInsightColumnNaming()
175
- insight = self._sdk.insights.get_insight(workspace_id=self._workspace_id, insight_id=insight_id)
176
- filter_by = [f.as_computable() for f in insight.filters]
174
+ naming = DefaultVisualizationColumnNaming()
175
+ visualization = self._sdk.visualizations.get_visualization(
176
+ workspace_id=self._workspace_id, visualization_id=visualization_id
177
+ )
178
+ filter_by = [f.as_computable() for f in visualization.filters]
177
179
  columns: ColumnsDef = {
178
- **{naming.col_name_for_attribute(a): a.as_computable() for a in insight.attributes},
179
- **{naming.col_name_for_metric(m): m.as_computable() for m in insight.metrics},
180
+ **{naming.col_name_for_attribute(a): a.as_computable() for a in visualization.attributes},
181
+ **{naming.col_name_for_metric(m): m.as_computable() for m in visualization.metrics},
180
182
  }
181
183
 
182
184
  return self.for_items(columns, filter_by=filter_by, auto_index=auto_index)
@@ -257,6 +259,7 @@ class DataFrameFactory:
257
259
  result_size_dimensions_limits: ResultSizeDimensions = (),
258
260
  result_size_bytes_limit: Optional[int] = None,
259
261
  use_local_ids_in_headers: bool = False,
262
+ use_primary_labels_in_attributes: bool = False,
260
263
  page_size: int = _DEFAULT_PAGE_SIZE,
261
264
  ) -> Tuple[pandas.DataFrame, DataFrameMetadata]:
262
265
  """
@@ -286,6 +289,7 @@ class DataFrameFactory:
286
289
  result_size_dimensions_limits (ResultSizeDimensions): A tuple containing maximum size of result dimensions.
287
290
  result_size_bytes_limit (Optional[int]): Maximum size of result in bytes.
288
291
  use_local_ids_in_headers (bool): Use local identifier in headers.
292
+ use_primary_labels_in_attributes (bool): Use primary labels in attributes.
289
293
  page_size (int): Number of records per page.
290
294
 
291
295
  Returns:
@@ -310,5 +314,6 @@ class DataFrameFactory:
310
314
  result_size_dimensions_limits=result_size_dimensions_limits,
311
315
  result_size_bytes_limit=result_size_bytes_limit,
312
316
  use_local_ids_in_headers=use_local_ids_in_headers,
317
+ use_primary_labels_in_attributes=use_primary_labels_in_attributes,
313
318
  page_size=page_size,
314
319
  )
@@ -4,11 +4,12 @@ from __future__ import annotations
4
4
  from pathlib import Path
5
5
  from typing import Optional
6
6
 
7
+ from gooddata_sdk import GoodDataSdk
8
+ from gooddata_sdk.utils import PROFILES_FILE_PATH, good_pandas_profile_content
9
+
7
10
  from gooddata_pandas import __version__
8
11
  from gooddata_pandas.dataframe import DataFrameFactory
9
12
  from gooddata_pandas.series import SeriesFactory
10
- from gooddata_sdk import GoodDataSdk
11
- from gooddata_sdk.utils import PROFILES_FILE_PATH, good_pandas_profile_content
12
13
 
13
14
  USER_AGENT = f"gooddata-pandas/{__version__}"
14
15
  """Extra segment of the User-Agent header that will be appended to standard gooddata-sdk user agent."""
@@ -3,7 +3,6 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
3
3
 
4
4
  import pandas
5
5
  from attrs import define, field, frozen
6
-
7
6
  from gooddata_sdk import BareExecutionResponse, ExecutionResult, ResultCacheMetadata, ResultSizeDimensions
8
7
 
9
8
  _DEFAULT_PAGE_SIZE = 100
@@ -30,7 +29,7 @@ class _DataWithHeaders:
30
29
  data: List[_DataArray]
31
30
  data_headers: Tuple[_DataHeaders, Optional[_DataHeaders]]
32
31
  grand_totals: Tuple[Optional[List[_DataArray]], Optional[List[_DataArray]]]
33
- grand_total_headers: Tuple[Optional[_DataHeaders], Optional[_DataHeaders]]
32
+ grand_total_headers: Tuple[Optional[List[Dict[str, _DataHeaders]]], Optional[List[Dict[str, _DataHeaders]]]]
34
33
 
35
34
 
36
35
  @define
@@ -50,7 +49,10 @@ class _AccumulatedData:
50
49
  data: List[_DataArray] = field(init=False, factory=list)
51
50
  data_headers: List[Optional[_DataHeaders]] = field(init=False, factory=lambda: [None, None])
52
51
  grand_totals: List[Optional[List[_DataArray]]] = field(init=False, factory=lambda: [None, None])
53
- grand_totals_headers: List[Optional[_DataHeaders]] = field(init=False, factory=lambda: [None, None])
52
+ grand_totals_headers: List[Optional[List[Dict[str, _DataHeaders]]]] = field(
53
+ init=False, factory=lambda: [None, None]
54
+ )
55
+ total_of_grant_totals_processed: bool = field(init=False, default=False)
54
56
 
55
57
  def accumulate_data(self, from_result: ExecutionResult) -> None:
56
58
  """
@@ -119,6 +121,16 @@ class _AccumulatedData:
119
121
  for grand_total in grand_totals:
120
122
  # 2-dim results have always 1-dim grand totals (3-dim results have 2-dim gt but DataFrame stores 2D only)
121
123
  dims = grand_total["totalDimensions"]
124
+
125
+ # if dims are empty then data contain total of column and row grandtotals so extend existing data array
126
+ if len(dims) == 0:
127
+ if not self.total_of_grant_totals_processed:
128
+ grand_totals_item = cast(List[_DataArray], self.grand_totals[0])
129
+ for total_idx, total_data in enumerate(grand_total["data"]):
130
+ grand_totals_item[total_idx].extend(total_data)
131
+ self.total_of_grant_totals_processed = True
132
+ continue
133
+
122
134
  assert len(dims) == 1, "Only 2-dimensional results are supported"
123
135
  dim_idx = dim_idx_dict[dims[0]]
124
136
  # the dimension id specified on the grand total says from what dimension were
@@ -134,11 +146,7 @@ class _AccumulatedData:
134
146
  # grand totals not initialized yet; initialize both data and headers by making
135
147
  # a shallow copy from the results
136
148
  self.grand_totals[opposite_dim] = grand_total["data"][:]
137
- # TODO: row total measure headers are currently not supported (only aggregation info w/o measure label)
138
- # measure header defs are under ["headerGroups"][>0]
139
- self.grand_totals_headers[opposite_dim] = grand_total["dimensionHeaders"][0]["headerGroups"][0][
140
- "headers"
141
- ][:]
149
+ self.grand_totals_headers[opposite_dim] = grand_total["dimensionHeaders"][0]["headerGroups"]
142
150
  elif paging_dim != opposite_dim:
143
151
  # grand totals are already initialized and the code is paging in the direction that reveals
144
152
  # additional grand total values; append them accordingly; no need to consider total headers:
@@ -192,12 +200,16 @@ class DataFrameMetadata:
192
200
 
193
201
  row_totals_indexes: List[List[int]]
194
202
  execution_response: BareExecutionResponse
203
+ primary_labels_from_index: Dict[int, Dict[str, str]]
204
+ primary_labels_from_columns: Dict[int, Dict[str, str]]
195
205
 
196
206
  @classmethod
197
207
  def from_data(
198
208
  cls,
199
209
  headers: Tuple[_DataHeaders, Optional[_DataHeaders]],
200
210
  execution_response: BareExecutionResponse,
211
+ primary_labels_from_index: Dict[int, Dict[str, str]],
212
+ primary_labels_from_columns: Dict[int, Dict[str, str]],
201
213
  ) -> "DataFrameMetadata":
202
214
  """This method constructs a DataFrameMetadata object from data headers and an execution response.
203
215
 
@@ -211,6 +223,8 @@ class DataFrameMetadata:
211
223
  return cls(
212
224
  row_totals_indexes=row_totals_indexes,
213
225
  execution_response=execution_response,
226
+ primary_labels_from_index=primary_labels_from_index,
227
+ primary_labels_from_columns=primary_labels_from_columns,
214
228
  )
215
229
 
216
230
 
@@ -296,8 +310,10 @@ def _read_complete_execution_result(
296
310
  def _create_header_mapper(
297
311
  response: BareExecutionResponse,
298
312
  dim: int,
313
+ primary_attribute_labels_mapping: Dict[int, Dict[str, str]],
299
314
  label_overrides: Optional[LabelOverrides] = None,
300
315
  use_local_ids_in_headers: bool = False,
316
+ use_primary_labels_in_attributes: bool = False,
301
317
  ) -> Callable[[Any, Optional[int]], Optional[str]]:
302
318
  """
303
319
  Prepares a header mapper function which translates header structures into appropriate labels used
@@ -306,9 +322,12 @@ def _create_header_mapper(
306
322
  Args:
307
323
  response (BareExecutionResponse): Response structure to gather dimension header details.
308
324
  dim (int): Dimension id.
325
+ primary_attribute_labels_mapping (Dict[int, Dict[str, str]]): Dict to be filled by mapping of primary labels to
326
+ custom labels per level identified by integer.
309
327
  label_overrides (Optional[LabelOverrides]): Label overrides. Defaults to None.
310
328
  use_local_ids_in_headers (bool): Use local identifiers of header attributes and metrics. Optional.
311
329
  Defaults to False.
330
+ use_primary_labels_in_attributes (bool): Use primary labels in attributes. Optional. Defaults to False.
312
331
 
313
332
  Returns:
314
333
  Callable[[Any, Optional[int]], Optional[str]]: Mapper function.
@@ -326,7 +345,17 @@ def _create_header_mapper(
326
345
  pass
327
346
  elif "attributeHeader" in header:
328
347
  if "labelValue" in header["attributeHeader"]:
329
- label = header["attributeHeader"]["labelValue"]
348
+ label_value = header["attributeHeader"]["labelValue"]
349
+ primary_label_value = header["attributeHeader"]["primaryLabelValue"]
350
+ if use_primary_labels_in_attributes:
351
+ label = primary_label_value
352
+ else:
353
+ label = label_value
354
+ if header_idx is not None:
355
+ if header_idx in primary_attribute_labels_mapping:
356
+ primary_attribute_labels_mapping[header_idx][primary_label_value] = label_value
357
+ else:
358
+ primary_attribute_labels_mapping[header_idx] = {primary_label_value: label_value}
330
359
  # explicitly handle '(empty value)' if it's None otherwise it's not recognizable in final MultiIndex
331
360
  # backend represents ^^^ by "" (datasource value is "") or None (datasource value is NULL) therefore
332
361
  # if both representation are used it's necessary to set label to unique header label (space) to avoid
@@ -368,7 +397,8 @@ def _headers_to_index(
368
397
  response: BareExecutionResponse,
369
398
  label_overrides: LabelOverrides,
370
399
  use_local_ids_in_headers: bool = False,
371
- ) -> Optional[pandas.Index]:
400
+ use_primary_labels_in_attributes: bool = False,
401
+ ) -> Tuple[Optional[pandas.Index], Dict[int, Dict[str, str]]]:
372
402
  """Converts headers to a pandas MultiIndex.
373
403
 
374
404
  This function converts the headers present in the response to a pandas MultiIndex (can be used in pandas dataframes)
@@ -380,18 +410,26 @@ def _headers_to_index(
380
410
  response (BareExecutionResponse): The execution response object with all data.
381
411
  label_overrides (LabelOverrides): A dictionary containing label overrides for the headers.
382
412
  use_local_ids_in_headers (bool, optional): If True, uses local Ids in headers, otherwise not. Defaults to False.
413
+ use_primary_labels_in_attributes (bool, optional): If True, uses primary labels in attributes, otherwise not.
414
+ Defaults to False.
383
415
 
384
416
  Returns:
385
- Optional[pandas.Index]: A pandas MultiIndex object created from the headers, or None if the headers are empty.
417
+ Tuple[Optional[pandas.Index], Dict[int, Dict[str, str]]: A pandas MultiIndex object created from the headers
418
+ with primary attribute labels mapping as Dict, or None with empty Dict if the headers are empty.
386
419
  """
420
+ # dict of primary labels and it's custom labels for attributes per level as key
421
+ primary_attribute_labels_mapping: Dict[int, Dict[str, str]] = {}
422
+
387
423
  if len(response.dimensions) <= dim_idx or not len(response.dimensions[dim_idx]["headers"]):
388
- return None
424
+ return None, primary_attribute_labels_mapping
389
425
 
390
426
  mapper = _create_header_mapper(
391
427
  response=response,
392
428
  dim=dim_idx,
393
429
  label_overrides=label_overrides,
394
430
  use_local_ids_in_headers=use_local_ids_in_headers,
431
+ use_primary_labels_in_attributes=use_primary_labels_in_attributes,
432
+ primary_attribute_labels_mapping=primary_attribute_labels_mapping,
395
433
  )
396
434
 
397
435
  return pandas.MultiIndex.from_arrays(
@@ -400,7 +438,7 @@ def _headers_to_index(
400
438
  for header_idx, header_group in enumerate(cast(_DataHeaders, headers[dim_idx]))
401
439
  ],
402
440
  names=[mapper(dim_header, None) for dim_header in (response.dimensions[dim_idx]["headers"])],
403
- )
441
+ ), primary_attribute_labels_mapping
404
442
 
405
443
 
406
444
  def _merge_grand_totals_into_data(extract: _DataWithHeaders) -> Union[_DataArray, List[_DataArray]]:
@@ -446,10 +484,8 @@ def _merge_grand_total_headers_into_headers(extract: _DataWithHeaders) -> Tuple[
446
484
  if grand_total_headers is None:
447
485
  continue
448
486
  header = cast(List[List[Any]], headers[dim_idx])
449
- header[0].extend(grand_total_headers)
450
- padding = [None] * len(grand_total_headers)
451
- for other_headers in header[1:]:
452
- other_headers.extend(padding)
487
+ for level, grand_total_header in enumerate(grand_total_headers):
488
+ header[level].extend(grand_total_header["headers"])
453
489
 
454
490
  return headers
455
491
 
@@ -461,6 +497,7 @@ def convert_execution_response_to_dataframe(
461
497
  result_size_dimensions_limits: ResultSizeDimensions,
462
498
  result_size_bytes_limit: Optional[int] = None,
463
499
  use_local_ids_in_headers: bool = False,
500
+ use_primary_labels_in_attributes: bool = False,
464
501
  page_size: int = _DEFAULT_PAGE_SIZE,
465
502
  ) -> Tuple[pandas.DataFrame, DataFrameMetadata]:
466
503
  """
@@ -474,6 +511,8 @@ def convert_execution_response_to_dataframe(
474
511
  result_size_dimensions_limits (ResultSizeDimensions): Dimension limits for the dataframe.
475
512
  result_size_bytes_limit (Optional[int], default=None): Size limit in bytes for the dataframe.
476
513
  use_local_ids_in_headers (bool, default=False): Use local ids in headers if True, else use default settings.
514
+ use_primary_labels_in_attributes (bool, default=False): Use primary labels in attributes if True, else use
515
+ default settings.
477
516
  page_size (int, default=_DEFAULT_PAGE_SIZE): Size of the page.
478
517
 
479
518
  Returns:
@@ -489,22 +528,33 @@ def convert_execution_response_to_dataframe(
489
528
  full_data = _merge_grand_totals_into_data(extract)
490
529
  full_headers = _merge_grand_total_headers_into_headers(extract)
491
530
 
531
+ index, primary_labels_from_index = _headers_to_index(
532
+ dim_idx=0,
533
+ headers=full_headers,
534
+ response=execution_response,
535
+ label_overrides=label_overrides,
536
+ use_local_ids_in_headers=use_local_ids_in_headers,
537
+ use_primary_labels_in_attributes=use_primary_labels_in_attributes,
538
+ )
539
+
540
+ columns, primary_labels_from_columns = _headers_to_index(
541
+ dim_idx=1,
542
+ headers=full_headers,
543
+ response=execution_response,
544
+ label_overrides=label_overrides,
545
+ use_local_ids_in_headers=use_local_ids_in_headers,
546
+ use_primary_labels_in_attributes=use_primary_labels_in_attributes,
547
+ )
548
+
492
549
  df = pandas.DataFrame(
493
550
  data=full_data,
494
- index=_headers_to_index(
495
- dim_idx=0,
496
- headers=full_headers,
497
- response=execution_response,
498
- label_overrides=label_overrides,
499
- use_local_ids_in_headers=use_local_ids_in_headers,
500
- ),
501
- columns=_headers_to_index(
502
- dim_idx=1,
503
- headers=full_headers,
504
- response=execution_response,
505
- label_overrides=label_overrides,
506
- use_local_ids_in_headers=use_local_ids_in_headers,
507
- ),
551
+ index=index,
552
+ columns=columns,
508
553
  )
509
554
 
510
- return df, DataFrameMetadata.from_data(headers=full_headers, execution_response=execution_response)
555
+ return df, DataFrameMetadata.from_data(
556
+ headers=full_headers,
557
+ execution_response=execution_response,
558
+ primary_labels_from_index=primary_labels_from_index,
559
+ primary_labels_from_columns=primary_labels_from_columns,
560
+ )
gooddata_pandas/series.py CHANGED
@@ -4,10 +4,10 @@ from __future__ import annotations
4
4
  from typing import Optional, Union
5
5
 
6
6
  import pandas
7
+ from gooddata_sdk import Attribute, Filter, GoodDataSdk, ObjId, SimpleMetric
7
8
 
8
9
  from gooddata_pandas.data_access import compute_and_extract
9
10
  from gooddata_pandas.utils import IndexDef, LabelItemDef, make_pandas_index
10
- from gooddata_sdk import Attribute, Filter, GoodDataSdk, ObjId, SimpleMetric
11
11
 
12
12
 
13
13
  class SeriesFactory:
gooddata_pandas/utils.py CHANGED
@@ -6,17 +6,23 @@ import uuid
6
6
  from typing import Any, Dict, Optional, Union
7
7
 
8
8
  import pandas
9
- from pandas import Index, MultiIndex
10
-
11
- from gooddata_sdk import Attribute, CatalogAttribute, InsightAttribute, InsightMetric, Metric, ObjId, SimpleMetric
9
+ from gooddata_sdk import (
10
+ Attribute,
11
+ CatalogAttribute,
12
+ Metric,
13
+ ObjId,
14
+ SimpleMetric,
15
+ VisualizationAttribute,
16
+ VisualizationMetric,
17
+ )
12
18
  from gooddata_sdk.type_converter import AttributeConverterStore, DateConverter, DatetimeConverter, IntegerConverter
19
+ from pandas import Index, MultiIndex
13
20
 
14
21
  LabelItemDef = Union[Attribute, ObjId, str]
15
22
  DataItemDef = Union[Attribute, Metric, ObjId, str]
16
23
  IndexDef = Union[LabelItemDef, Dict[str, LabelItemDef]]
17
24
  ColumnsDef = Dict[str, DataItemDef]
18
25
 
19
-
20
26
  # register external pandas types to converters
21
27
  IntegerConverter.set_external_fnc(lambda self, value: pandas.to_numeric(value))
22
28
  DateConverter.set_external_fnc(lambda self, value: pandas.to_datetime(value))
@@ -162,10 +168,10 @@ def make_pandas_index(index: dict) -> Optional[Union[Index, MultiIndex]]:
162
168
  return _idx
163
169
 
164
170
 
165
- class DefaultInsightColumnNaming:
171
+ class DefaultVisualizationColumnNaming:
166
172
  def __init__(self) -> None:
167
173
  """
168
- Initialize a DefaultInsightColumnNaming instance with an empty dictionary for unique names.
174
+ Initialize a DefaultVisualizationColumnNaming instance with an empty dictionary for unique names.
169
175
  """
170
176
  self._uniques: dict[str, int] = dict()
171
177
 
@@ -206,24 +212,24 @@ class DefaultInsightColumnNaming:
206
212
  self._uniques[unique_candidate] = 1
207
213
  return unique_candidate
208
214
 
209
- def col_name_for_attribute(self, attr: InsightAttribute) -> str:
215
+ def col_name_for_attribute(self, attr: VisualizationAttribute) -> str:
210
216
  """
211
217
  Generate a unique column name for the given attribute.
212
218
 
213
219
  Args:
214
- attr (InsightAttribute): The attribute.
220
+ attr (VisualizationAttribute): The attribute.
215
221
 
216
222
  Returns:
217
223
  str: The unique column name.
218
224
  """
219
225
  return self._ensure_unique(attr.label_id)
220
226
 
221
- def col_name_for_metric(self, measure: InsightMetric) -> str:
227
+ def col_name_for_metric(self, measure: VisualizationMetric) -> str:
222
228
  """
223
229
  Generate a unique column name for the given metric.
224
230
 
225
231
  Args:
226
- measure (InsightMetric): The metric.
232
+ measure (VisualizationMetric): The metric.
227
233
 
228
234
  Returns:
229
235
  str: The unique column name.
@@ -1,11 +1,11 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gooddata-pandas
3
- Version: 1.10.0
3
+ Version: 1.21.1.dev3
4
4
  Summary: GoodData Cloud to pandas
5
5
  Author: GoodData
6
6
  Author-email: support@gooddata.com
7
7
  License: MIT
8
- Project-URL: Documentation, https://gooddata-pandas.readthedocs.io/en/v1.10.0
8
+ Project-URL: Documentation, https://gooddata-pandas.readthedocs.io/en/v1.21.1.dev3
9
9
  Project-URL: Source, https://github.com/gooddata/gooddata-python-sdk
10
10
  Keywords: gooddata,pandas,series,data,frame,data_frame,analytics,headless,business,intelligence,headless-bi,cloud,native,semantic,layer,sql,metrics
11
11
  Classifier: Development Status :: 5 - Production/Stable
@@ -15,6 +15,7 @@ Classifier: Programming Language :: Python :: 3.8
15
15
  Classifier: Programming Language :: Python :: 3.9
16
16
  Classifier: Programming Language :: Python :: 3.10
17
17
  Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
18
19
  Classifier: Topic :: Database
19
20
  Classifier: Topic :: Scientific/Engineering
20
21
  Classifier: Topic :: Software Development
@@ -22,8 +23,8 @@ Classifier: Typing :: Typed
22
23
  Requires-Python: >=3.8.0
23
24
  Description-Content-Type: text/markdown
24
25
  License-File: LICENSE.txt
25
- Requires-Dist: gooddata-sdk ~=1.10.0
26
- Requires-Dist: pandas <2.0.0,>=1.0.0
26
+ Requires-Dist: gooddata-sdk ~=1.21.1.dev3
27
+ Requires-Dist: pandas <3.0.0,>=2.0.0
27
28
 
28
29
  # GoodData Pandas
29
30
 
@@ -0,0 +1,14 @@
1
+ gooddata_pandas/__init__.py,sha256=Ta3qIIDq7kBRUsYSV3aC69AQBFvFvhtWDQucgP-l88w,297
2
+ gooddata_pandas/_version.py,sha256=YxaAwfP9Yw10sit_vyhVfHJQBAzUUwtm5p0BItfBFx8,225
3
+ gooddata_pandas/data_access.py,sha256=X8NKYtwWKFEfXvgrUbybuQmg1cub5pAhFtDCFMyzffY,18748
4
+ gooddata_pandas/dataframe.py,sha256=YwjkuO6PzqSfQTaB1Bsn2VuQHvxyiYL75xIW2iI7t3M,13007
5
+ gooddata_pandas/good_pandas.py,sha256=ePEm2Lmeiftz5td0BLC71q7my5Aj8aABn3xV0myRmqI,3444
6
+ gooddata_pandas/py.typed,sha256=u_MS29sadlaIqGRPYFjWml5u0gQnoQfvbsf9pu3TZJU,94
7
+ gooddata_pandas/result_convertor.py,sha256=6k9-Z6Jgtej2yPcR2iftKd2c6e8OwSEDkiil2o-zjP0,25892
8
+ gooddata_pandas/series.py,sha256=wTvJR_I0FUteyxo4RwHzP20eU7rei0dP8ZdqfrLbf5c,5759
9
+ gooddata_pandas/utils.py,sha256=PpkB6oWacRxYY9S-RbEZm9Jdblo4bgAzrmHzV9MlMPQ,7223
10
+ gooddata_pandas-1.21.1.dev3.dist-info/LICENSE.txt,sha256=CTs8U6T7MmKBKFFiQYARwgCfWgUzdosq01DI298WFiY,77209
11
+ gooddata_pandas-1.21.1.dev3.dist-info/METADATA,sha256=4DGa5M7y0bwLfmBeSKLoRUVuyOWvSflmtfhjHfU2RHM,2893
12
+ gooddata_pandas-1.21.1.dev3.dist-info/WHEEL,sha256=y4mX-SOX4fYIkonsAGA5N0Oy-8_gI4FXw5HNI1xqvWg,91
13
+ gooddata_pandas-1.21.1.dev3.dist-info/top_level.txt,sha256=B7K_WFxlxplJbEbv5Mf0YhX74dbOpTPgDX-W6I7CssI,16
14
+ gooddata_pandas-1.21.1.dev3.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.42.0)
2
+ Generator: setuptools (70.2.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,14 +0,0 @@
1
- gooddata_pandas/__init__.py,sha256=Ta3qIIDq7kBRUsYSV3aC69AQBFvFvhtWDQucgP-l88w,297
2
- gooddata_pandas/_version.py,sha256=YxaAwfP9Yw10sit_vyhVfHJQBAzUUwtm5p0BItfBFx8,225
3
- gooddata_pandas/data_access.py,sha256=8o7N2lC_tUU-3ht2RZm0osB4bYA-YQq8sCQauDaJSs4,18625
4
- gooddata_pandas/dataframe.py,sha256=bpCpTBpne_qmgLQ44EJxJ8IUC-Kw0UzLaVNt89ugvO0,12655
5
- gooddata_pandas/good_pandas.py,sha256=9E8sSGppJaxW-eMCdY2rR_7k5hdlAPQqv4djfkvllcg,3443
6
- gooddata_pandas/py.typed,sha256=u_MS29sadlaIqGRPYFjWml5u0gQnoQfvbsf9pu3TZJU,94
7
- gooddata_pandas/result_convertor.py,sha256=8G7UMfrACKEWpfAw7C4M0JEnnCqeBLsH7p0zQLl-w7Y,22864
8
- gooddata_pandas/series.py,sha256=UmfYvX0RhuRsItXPDm-kFo5CBT73qkro_WtYEITgg1g,5759
9
- gooddata_pandas/utils.py,sha256=RtOnyjHkzsOo8kpAvw_puEkI7EkqdHmOwPD3kV26RS0,7144
10
- gooddata_pandas-1.10.0.dist-info/LICENSE.txt,sha256=CTs8U6T7MmKBKFFiQYARwgCfWgUzdosq01DI298WFiY,77209
11
- gooddata_pandas-1.10.0.dist-info/METADATA,sha256=-MIAR9PrLhVeogViGOsHz9AZdNIUnxtU2fghx_Ugf-I,2827
12
- gooddata_pandas-1.10.0.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
13
- gooddata_pandas-1.10.0.dist-info/top_level.txt,sha256=B7K_WFxlxplJbEbv5Mf0YhX74dbOpTPgDX-W6I7CssI,16
14
- gooddata_pandas-1.10.0.dist-info/RECORD,,