vtlengine 1.0.1__py3-none-any.whl → 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vtlengine might be problematic. Click here for more details.

@@ -32,6 +32,7 @@ LN = "ln"
32
32
  POWER = "power"
33
33
  LOG = "log"
34
34
  SQRT = "sqrt"
35
+ RANDOM = "random"
35
36
  # Boolean operators.
36
37
  AND = "and"
37
38
  OR = "or"
@@ -70,6 +71,7 @@ SYMDIFF = "symdiff"
70
71
  IF = "if"
71
72
  THEN = "then"
72
73
  ELSE = "else"
74
+ CASE = "case"
73
75
  NVL = "nvl"
74
76
  # Clause Operators.
75
77
  FILTER = "filter"
@@ -89,6 +91,16 @@ STOCK_TO_FLOW = "stock_to_flow"
89
91
  TIMESHIFT = "timeshift"
90
92
  TIME_AGG = "time_agg"
91
93
  CURRENT_DATE = "current_date"
94
+ DATE_DIFF = "date_diff"
95
+ DATE_ADD = "date_add"
96
+ YEAR = "year"
97
+ MONTH = "month"
98
+ DAYOFMONTH = "dayofmonth"
99
+ DAYOFYEAR = "dayofyear"
100
+ DAYTOYEAR = "daytoyear"
101
+ DAYTOMONTH = "daytomonth"
102
+ YEARTODAY = "yeartoday"
103
+ MONTHTODAY = "monthtoday"
92
104
  # Join Operators.
93
105
  INNER_JOIN = "inner_join"
94
106
  LEFT_JOIN = "left_join"
@@ -62,6 +62,10 @@ class VtlVisitor(ParseTreeVisitor):
62
62
  def visitIfExpr(self, ctx: Parser.IfExprContext):
63
63
  return self.visitChildren(ctx)
64
64
 
65
+ # Visit a parse tree produced by Parser#caseExpr.
66
+ def visitCaseExpr(self, ctx: Parser.CaseExprContext):
67
+ return self.visitChildren(ctx)
68
+
65
69
  # Visit a parse tree produced by Parser#clauseExpr.
66
70
  def visitClauseExpr(self, ctx: Parser.ClauseExprContext):
67
71
  return self.visitChildren(ctx)
@@ -90,6 +94,10 @@ class VtlVisitor(ParseTreeVisitor):
90
94
  def visitIfExprComp(self, ctx: Parser.IfExprCompContext):
91
95
  return self.visitChildren(ctx)
92
96
 
97
+ # Visit a parse tree produced by Parser#caseExprComp.
98
+ def visitCaseExprComp(self, ctx: Parser.CaseExprCompContext):
99
+ return self.visitChildren(ctx)
100
+
93
101
  # Visit a parse tree produced by Parser#comparisonExprComp.
94
102
  def visitComparisonExprComp(self, ctx: Parser.ComparisonExprCompContext):
95
103
  return self.visitChildren(ctx)
@@ -399,7 +407,7 @@ class VtlVisitor(ParseTreeVisitor):
399
407
  return self.visitChildren(ctx)
400
408
 
401
409
  # Visit a parse tree produced by Parser#periodAtomComponent.
402
- def visitPeriodAtomComponent(self, ctx: Parser.PeriodAtomComponentContext):
410
+ def visitTimeUnaryAtomComponent(self, ctx: Parser.PeriodAtomComponentContext):
403
411
  return self.visitChildren(ctx)
404
412
 
405
413
  # Visit a parse tree produced by Parser#fillTimeAtomComponent.
vtlengine/AST/__init__.py CHANGED
@@ -344,6 +344,25 @@ class If(AST):
344
344
  elseOp: AST
345
345
 
346
346
 
347
+ class CaseObj:
348
+ condition: AST
349
+ thenOp: AST
350
+
351
+ def __init__(self, condition: AST, thenOp: AST):
352
+ self.condition = condition
353
+ self.thenOp = thenOp
354
+
355
+
356
+ @dataclass
357
+ class Case(AST):
358
+ """
359
+ Case: (condition, thenOp, elseOp)
360
+ """
361
+
362
+ cases: List[CaseObj]
363
+ elseOp: AST
364
+
365
+
347
366
  @dataclass
348
367
  class Validation(AST):
349
368
  """
@@ -145,6 +145,8 @@ class TimePeriodHandler:
145
145
  _period_number: int
146
146
 
147
147
  def __init__(self, period: str) -> None:
148
+ if isinstance(period, int):
149
+ period = str(period)
148
150
  if "-" in period:
149
151
  self.year, self.period_indicator, self.period_number = (
150
152
  from_input_customer_support_to_internal(period)
@@ -130,3 +130,47 @@ class InputValidationException(VTLEngineException):
130
130
  super().__init__(message, lino, colno, code)
131
131
  else:
132
132
  super().__init__(message, lino, colno)
133
+
134
+
135
+ def check_key(field: str, dict_keys: Any, key: str) -> None:
136
+ if key not in dict_keys:
137
+ closest_key = find_closest_key(dict_keys, key)
138
+ message_append = f". Did you mean {closest_key}?" if closest_key else ""
139
+ raise SemanticError("0-1-1-13", field=field, key=key, closest_key=message_append)
140
+
141
+
142
+ def find_closest_key(dict_keys: Any, key: str) -> Optional[str]:
143
+ closest_key = None
144
+ max_distance = 3
145
+ min_distance = float('inf')
146
+
147
+ for dict_key in dict_keys:
148
+ distance = key_distance(key, dict_key)
149
+ if distance < min_distance:
150
+ min_distance = distance
151
+ closest_key = dict_key
152
+
153
+ if min_distance <= max_distance:
154
+ return closest_key
155
+ return None
156
+
157
+
158
+ def key_distance(key: str, objetive: str) -> int:
159
+ dp = [[0] * (len(objetive) + 1) for _ in range(len(key) + 1)]
160
+
161
+ for i in range(len(key) + 1):
162
+ dp[i][0] = i
163
+ for j in range(len(objetive) + 1):
164
+ dp[0][j] = j
165
+
166
+ for i in range(1, len(key) + 1):
167
+ for j in range(1, len(objetive) + 1):
168
+ if key[i - 1] == objetive[j - 1]:
169
+ cost = 0
170
+ else:
171
+ cost = 1
172
+ dp[i][j] = min(dp[i - 1][j] + 1,
173
+ dp[i][j - 1] + 1,
174
+ dp[i - 1][j - 1] + cost)
175
+
176
+ return dp[-1][-1]
@@ -20,21 +20,21 @@ centralised_messages = {
20
20
  # Infer Data Structure errors
21
21
  # "0-1-1-1": "A csv file or a dataframe is required.",
22
22
  "0-1-1-2": "The provided {source} must have data to can infer the data structure.",
23
- "0-1-1-3": "Can not infer data structure: {errors}",
23
+ "0-1-1-3": "Can not infer data structure: {errors}.",
24
24
  "0-1-1-4": "On Dataset {name} loading: An identifier cannot have null values, found null "
25
25
  "values on {null_identifier}.",
26
26
  "0-1-1-5": "On Dataset {name} loading: Datasets without identifiers must have 0 or "
27
27
  "1 datapoints.",
28
28
  "0-1-1-6": "Duplicated records. Combination of identifiers are repeated.",
29
- "0-1-1-7": "G1 - The provided CSV file is empty",
30
- "0-1-1-8": "The following identifiers {ids} were not found , review file {file}",
29
+ "0-1-1-7": "G1 - The provided CSV file is empty.",
30
+ "0-1-1-8": "The following identifiers {ids} were not found , review file {file}.",
31
31
  "0-1-1-9": "You have a problem related with commas, review rfc4180 standard, review file "
32
- "{file}",
32
+ "{file}.",
33
33
  "0-1-1-10": "On Dataset {name} loading: Component {comp_name} is missing in Datapoints.",
34
- "0-1-1-11": "Wrong data in the file for this scalardataset {name}",
35
- "0-1-1-12": "On Dataset {name} loading: not possible to cast column {column} to {type}",
36
- #
37
- "0-1-0-1": " Trying to redefine input datasets {dataset}", # Semantic Error
34
+ "0-1-1-11": "Wrong data in the file for this scalardataset {name}.",
35
+ "0-1-1-12": "On Dataset {name} loading: not possible to cast column {column} to {type}.",
36
+ "0-1-1-13": "Invalid key on {field} field: {key}{closest_key}.",
37
+ "0-1-0-1": " Trying to redefine input datasets {dataset}.", # Semantic Error
38
38
  # ------------Operators-------------
39
39
  # General Semantic errors
40
40
  # "1-1-1-1": "At op {op}. Unable to validate types.",
@@ -134,6 +134,14 @@ centralised_messages = {
134
134
  "be Datasets or at least one of them a Scalar.",
135
135
  "1-1-9-13": "At op {op}: then {then} and else {else_clause} datasets must contain the same "
136
136
  "number of components.",
137
+ "2-1-9-1": "At op {op}: Condition operators must have the same operator type.",
138
+ "2-1-9-2": "At op {op}: Condition {name} it's not a boolean.",
139
+ "2-1-9-3": "At op {op}: All then and else operands must be scalars.",
140
+ "2-1-9-4": "At op {op}: Condition {name} must be boolean type.",
141
+ "2-1-9-5": "At op {op}: Condition Dataset {name} measure must be Boolean.",
142
+ "2-1-9-6": "At op {op}: At least a then or else operand must be Dataset.",
143
+ "2-1-9-7": "At op {op}: All Dataset operands must have the same components.",
144
+
137
145
  # Data Validation errors
138
146
  "1-1-10-1": "At op {op}: The {op_type} operand must have exactly one measure of type {me_type}",
139
147
  "1-1-10-2": "At op {op}: Number of variable has to be equal between the call and signature.",
@@ -18,7 +18,7 @@ from vtlengine.Operators.Aggregation import extract_grouping_identifiers
18
18
  from vtlengine.Operators.Assignment import Assignment
19
19
  from vtlengine.Operators.CastOperator import Cast
20
20
  from vtlengine.Operators.Comparison import Between, ExistIn
21
- from vtlengine.Operators.Conditional import If
21
+ from vtlengine.Operators.Conditional import If, Case
22
22
  from vtlengine.Operators.General import Eval
23
23
  from vtlengine.Operators.HROperators import get_measure_from_dataset, HAAssignment, Hierarchy
24
24
  from vtlengine.Operators.Numeric import Round, Trunc
@@ -73,6 +73,7 @@ from vtlengine.AST.Grammar.tokens import (
73
73
  EQ,
74
74
  CURRENT_DATE,
75
75
  CALC,
76
+ COUNT,
76
77
  )
77
78
  from vtlengine.Exceptions import SemanticError
78
79
  from vtlengine.Model import (
@@ -103,7 +104,7 @@ class InterpreterAnalyzer(ASTTemplate):
103
104
  # Time Period Representation
104
105
  time_period_representation: Optional[TimePeriodRepresentation] = None
105
106
  # Flags to change behavior
106
- nested_if: Union[str, bool] = False
107
+ nested_condition: Union[str, bool] = False
107
108
  is_from_assignment: bool = False
108
109
  is_from_component_assignment: bool = False
109
110
  is_from_regular_aggregation: bool = False
@@ -115,7 +116,7 @@ class InterpreterAnalyzer(ASTTemplate):
115
116
  is_from_condition: bool = False
116
117
  is_from_hr_val: bool = False
117
118
  is_from_hr_agg: bool = False
118
- if_stack: Optional[List[str]] = None
119
+ condition_stack: Optional[List[str]] = None
119
120
  # Handlers for simplicity
120
121
  regular_aggregation_dataset: Optional[Dataset] = None
121
122
  aggregation_grouping: Optional[List[str]] = None
@@ -206,10 +207,10 @@ class InterpreterAnalyzer(ASTTemplate):
206
207
 
207
208
  # Reset some handlers (joins and if)
208
209
  self.is_from_join = False
209
- self.if_stack = None
210
+ self.condition_stack = None
210
211
  self.then_condition_dataset = None
211
212
  self.else_condition_dataset = None
212
- self.nested_if = False
213
+ self.nested_condition = False
213
214
 
214
215
  if result is None:
215
216
  continue
@@ -361,8 +362,8 @@ class InterpreterAnalyzer(ASTTemplate):
361
362
  if (
362
363
  not self.is_from_condition
363
364
  and node.op != MEMBERSHIP
364
- and self.if_stack is not None
365
- and len(self.if_stack) > 0
365
+ and self.condition_stack is not None
366
+ and len(self.condition_stack) > 0
366
367
  ):
367
368
  is_from_if = self.is_from_if
368
369
  self.is_from_if = False
@@ -520,6 +521,7 @@ class InterpreterAnalyzer(ASTTemplate):
520
521
  return having
521
522
 
522
523
  def visit_Analytic(self, node: AST.Analytic) -> Any: # noqa: C901
524
+ component_name = None
523
525
  if self.is_from_regular_aggregation:
524
526
  if self.regular_aggregation_dataset is None:
525
527
  raise SemanticError("1-1-6-10")
@@ -527,6 +529,7 @@ class InterpreterAnalyzer(ASTTemplate):
527
529
  operand = self.regular_aggregation_dataset
528
530
  else:
529
531
  operand_comp = self.visit(node.operand)
532
+ component_name = operand_comp.name
530
533
  measure_names = self.regular_aggregation_dataset.get_measures_names()
531
534
  dataset_components = self.regular_aggregation_dataset.components.copy()
532
535
  for name in measure_names:
@@ -598,6 +601,7 @@ class InterpreterAnalyzer(ASTTemplate):
598
601
  ordering=ordering,
599
602
  window=node.window,
600
603
  params=params,
604
+ component_name=component_name,
601
605
  )
602
606
  if not self.is_from_regular_aggregation:
603
607
  return result
@@ -610,7 +614,10 @@ class InterpreterAnalyzer(ASTTemplate):
610
614
  )
611
615
 
612
616
  # # Extracting the component we need (only measure)
613
- measure_name = result.get_measures_names()[0]
617
+ if component_name is None or node.op == COUNT:
618
+ measure_name = result.get_measures_names()[0]
619
+ else:
620
+ measure_name = component_name
614
621
  # Joining the result with the original dataset
615
622
  if self.only_semantic:
616
623
  data = None
@@ -961,36 +968,52 @@ class InterpreterAnalyzer(ASTTemplate):
961
968
 
962
969
  # Analysis for data component and dataset
963
970
  else:
964
- if self.if_stack is None:
965
- self.if_stack = []
971
+ if self.condition_stack is None:
972
+ self.condition_stack = []
966
973
  if self.then_condition_dataset is None:
967
974
  self.then_condition_dataset = []
968
975
  if self.else_condition_dataset is None:
969
976
  self.else_condition_dataset = []
970
977
  self.generate_then_else_datasets(copy(condition))
971
978
 
972
- self.if_stack.append(THEN_ELSE["then"])
979
+ self.condition_stack.append(THEN_ELSE["then"])
973
980
  self.is_from_if = True
974
- self.nested_if = "T" if isinstance(node.thenOp, AST.If) else False
981
+ self.nested_condition = "T" if isinstance(node.thenOp, AST.If) else False
975
982
  thenOp = self.visit(node.thenOp)
976
983
  if isinstance(thenOp, Scalar) or not isinstance(node.thenOp, AST.BinOp):
977
984
  self.then_condition_dataset.pop()
978
- self.if_stack.pop()
985
+ self.condition_stack.pop()
979
986
 
980
- self.if_stack.append(THEN_ELSE["else"])
987
+ self.condition_stack.append(THEN_ELSE["else"])
981
988
  self.is_from_if = True
982
- self.nested_if = "E" if isinstance(node.elseOp, AST.If) else False
989
+ self.nested_condition = "E" if isinstance(node.elseOp, AST.If) else False
983
990
  elseOp = self.visit(node.elseOp)
984
991
  if isinstance(elseOp, Scalar) or (
985
992
  not isinstance(node.elseOp, AST.BinOp) and not isinstance(node.elseOp, AST.If)
986
993
  ):
987
994
  if len(self.else_condition_dataset) > 0:
988
995
  self.else_condition_dataset.pop()
989
- if len(self.if_stack) > 0:
990
- self.if_stack.pop()
996
+ if len(self.condition_stack) > 0:
997
+ self.condition_stack.pop()
991
998
 
992
999
  return If.analyze(condition, thenOp, elseOp)
993
1000
 
1001
+ def visit_Case(self, node: AST.Case) -> Any:
1002
+ conditions: List[Any] = []
1003
+ thenOps: List[Any] = []
1004
+
1005
+ if self.condition_stack is None:
1006
+ self.condition_stack = []
1007
+
1008
+ while node.cases:
1009
+ case = node.cases.pop(0)
1010
+ self.is_from_condition = True
1011
+ conditions.append(self.visit(case.condition))
1012
+ self.is_from_condition = False
1013
+ thenOps.append(self.visit(case.thenOp))
1014
+
1015
+ return Case.analyze(conditions, thenOps, self.visit(node.elseOp))
1016
+
994
1017
  def visit_RenameNode(self, node: AST.RenameNode) -> Any:
995
1018
  if self.udo_params is not None:
996
1019
  if "#" in node.old_name:
@@ -1323,9 +1346,11 @@ class InterpreterAnalyzer(ASTTemplate):
1323
1346
  if self.rule_data is None:
1324
1347
  return None
1325
1348
  filtering_indexes = list(filter_comp.data[filter_comp.data == True].index)
1349
+ nan_indexes = list(filter_comp.data[filter_comp.data.isnull()].index)
1326
1350
  # If no filtering indexes, then all datapoints are valid on DPR and HR
1327
1351
  if len(filtering_indexes) == 0 and not (self.is_from_hr_agg or self.is_from_hr_val):
1328
1352
  self.rule_data["bool_var"] = True
1353
+ self.rule_data.loc[nan_indexes, "bool_var"] = None
1329
1354
  return self.rule_data
1330
1355
  non_filtering_indexes = list(set(filter_comp.data.index) - set(filtering_indexes))
1331
1356
 
@@ -1340,6 +1365,7 @@ class InterpreterAnalyzer(ASTTemplate):
1340
1365
  self.rule_data, how="left", on=original_data.columns.tolist()
1341
1366
  )
1342
1367
  original_data.loc[non_filtering_indexes, "bool_var"] = True
1368
+ original_data.loc[nan_indexes, "bool_var"] = None
1343
1369
  return original_data
1344
1370
  elif node.op in HR_COMP_MAPPING:
1345
1371
  self.is_from_assignment = True
@@ -1484,10 +1510,10 @@ class InterpreterAnalyzer(ASTTemplate):
1484
1510
  data = condition.data
1485
1511
 
1486
1512
  if data is not None:
1487
- if self.nested_if and self.if_stack is not None:
1513
+ if self.nested_condition and self.condition_stack is not None:
1488
1514
  merge_df = (
1489
1515
  self.then_condition_dataset[-1]
1490
- if self.if_stack[-1] == THEN_ELSE["then"]
1516
+ if self.condition_stack[-1] == THEN_ELSE["then"]
1491
1517
  else self.else_condition_dataset[-1]
1492
1518
  )
1493
1519
  indexes = merge_df.data[merge_df.data.columns[-1]]
@@ -1540,12 +1566,12 @@ class InterpreterAnalyzer(ASTTemplate):
1540
1566
  if (
1541
1567
  self.then_condition_dataset is None
1542
1568
  or self.else_condition_dataset is None
1543
- or self.if_stack is None
1569
+ or self.condition_stack is None
1544
1570
  ):
1545
1571
  return left_operand, right_operand
1546
1572
  merge_dataset = (
1547
1573
  self.then_condition_dataset.pop()
1548
- if self.if_stack.pop() == THEN_ELSE["then"]
1574
+ if self.condition_stack.pop() == THEN_ELSE["then"]
1549
1575
  else (self.else_condition_dataset.pop())
1550
1576
  )
1551
1577
  merge_index = merge_dataset.data[merge_dataset.get_measures_names()[0]].to_list()
@@ -1617,8 +1643,9 @@ class InterpreterAnalyzer(ASTTemplate):
1617
1643
 
1618
1644
  # Getting Dataset elements
1619
1645
  result_components = {
1620
- c_name: copy(comp)
1621
- for c_name, comp in self.ruleset_dataset.components.items() # type: ignore[union-attr]
1646
+ comp_name: copy(comp)
1647
+ for comp_name, comp in
1648
+ self.ruleset_dataset.components.items() # type: ignore[union-attr]
1622
1649
  }
1623
1650
  if self.ruleset_signature is not None:
1624
1651
  hr_component = self.ruleset_signature["RULE_COMPONENT"]
@@ -1732,9 +1759,8 @@ class InterpreterAnalyzer(ASTTemplate):
1732
1759
  signature_values[param["name"]] = self.visit(node.params[i])
1733
1760
  elif param["type"] in ["Dataset", "Component"]:
1734
1761
  if isinstance(node.params[i], AST.VarID):
1735
- signature_values[param["name"]] = node.params[
1736
- i
1737
- ].value # type: ignore[attr-defined]
1762
+ signature_values[param["name"]] = (
1763
+ node.params[i].value) # type: ignore[attr-defined]
1738
1764
  else:
1739
1765
  param_element = self.visit(node.params[i])
1740
1766
  if isinstance(param_element, Dataset):
@@ -41,6 +41,13 @@ class Scalar:
41
41
  return same_name and same_type and same_value
42
42
 
43
43
 
44
+ Role_keys = [
45
+ "Identifier",
46
+ "Attribute",
47
+ "Measure",
48
+ ]
49
+
50
+
44
51
  class Role(Enum):
45
52
  """
46
53
  Enum class for the role of a component (Identifier, Attribute, Measure)
@@ -58,6 +58,7 @@ class Analytic(Operator.Unary):
58
58
  ordering: Optional[List[OrderBy]],
59
59
  window: Optional[Windowing],
60
60
  params: Optional[List[int]],
61
+ component_name: Optional[str] = None,
61
62
  ) -> Dataset:
62
63
  if ordering is None:
63
64
  order_components = []
@@ -83,25 +84,51 @@ class Analytic(Operator.Unary):
83
84
  raise SemanticError(
84
85
  "1-1-1-10", op=cls.op, comp_name=comp_name, dataset_name=operand.name
85
86
  )
86
- measures = operand.get_measures()
87
- if measures is None:
88
- raise SemanticError("1-1-1-8", op=cls.op, name=operand.name)
89
- if cls.type_to_check is not None:
90
- for measure in measures:
91
- unary_implicit_promotion(measure.data_type, cls.type_to_check)
92
- if cls.return_type is not None:
93
- for measure in measures:
94
- new_measure = copy(measure)
95
- new_measure.data_type = cls.return_type
96
- result_components[measure.name] = new_measure
97
- if cls.op == COUNT and len(measures) <= 1:
98
- measure_name = COMP_NAME_MAPPING[cls.return_type]
99
- nullable = False if len(measures) == 0 else measures[0].nullable
100
- if len(measures) == 1:
101
- del result_components[measures[0].name]
102
- result_components[measure_name] = Component(
103
- name=measure_name, data_type=cls.return_type, role=Role.MEASURE, nullable=nullable
104
- )
87
+ if component_name is not None:
88
+ if cls.type_to_check is not None:
89
+ unary_implicit_promotion(
90
+ operand.components[component_name].data_type, cls.type_to_check
91
+ )
92
+ if cls.return_type is not None:
93
+ result_components[component_name] = Component(
94
+ name=component_name,
95
+ data_type=cls.return_type,
96
+ role=operand.components[component_name].role,
97
+ nullable=operand.components[component_name].nullable,
98
+ )
99
+ if cls.op == COUNT:
100
+ measure_name = COMP_NAME_MAPPING[cls.return_type]
101
+ result_components[measure_name] = Component(
102
+ name=measure_name,
103
+ data_type=cls.return_type,
104
+ role=Role.MEASURE,
105
+ nullable=operand.components[component_name].nullable,
106
+ )
107
+ if component_name in result_components:
108
+ del result_components[component_name]
109
+ else:
110
+ measures = operand.get_measures()
111
+ if len(measures) == 0:
112
+ raise SemanticError("1-1-1-8", op=cls.op, name=operand.name)
113
+ if cls.type_to_check is not None:
114
+ for measure in measures:
115
+ unary_implicit_promotion(measure.data_type, cls.type_to_check)
116
+ if cls.return_type is not None:
117
+ for measure in measures:
118
+ new_measure = copy(measure)
119
+ new_measure.data_type = cls.return_type
120
+ result_components[measure.name] = new_measure
121
+ if cls.op == COUNT and len(measures) <= 1:
122
+ measure_name = COMP_NAME_MAPPING[cls.return_type]
123
+ nullable = False if len(measures) == 0 else measures[0].nullable
124
+ if len(measures) == 1:
125
+ del result_components[measures[0].name]
126
+ result_components[measure_name] = Component(
127
+ name=measure_name,
128
+ data_type=cls.return_type,
129
+ role=Role.MEASURE,
130
+ nullable=nullable,
131
+ )
105
132
 
106
133
  return Dataset(name="result", components=result_components, data=None)
107
134
 
@@ -205,12 +232,17 @@ class Analytic(Operator.Unary):
205
232
  ordering: Optional[List[OrderBy]],
206
233
  window: Optional[Windowing],
207
234
  params: Optional[List[int]],
235
+ component_name: Optional[str] = None,
208
236
  ) -> Dataset:
209
- result = cls.validate(operand, partitioning, ordering, window, params)
237
+ result = cls.validate(operand, partitioning, ordering, window, params, component_name)
210
238
  df = operand.data.copy() if operand.data is not None else pd.DataFrame()
211
- measure_names = operand.get_measures_names()
212
239
  identifier_names = operand.get_identifiers_names()
213
240
 
241
+ if component_name is not None:
242
+ measure_names = [component_name]
243
+ else:
244
+ measure_names = operand.get_measures_names()
245
+
214
246
  result.data = cls.analyticfunc(
215
247
  df=df,
216
248
  partitioning=partitioning,
@@ -31,7 +31,7 @@ class Binary(Operator.Binary):
31
31
  @classmethod
32
32
  def apply_operation_two_series(cls, left_series: Any, right_series: Any) -> Any:
33
33
  result = cls.comp_op(
34
- left_series.astype("bool[pyarrow]"), right_series.astype("bool[pyarrow]")
34
+ left_series.astype("boolean"), right_series.astype("boolean")
35
35
  )
36
36
  return result.replace({pd.NA: None}).astype(object)
37
37