pydpm_xl 0.1.10__py3-none-any.whl → 0.1.39rc23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. py_dpm/AST/ASTConstructor.py +11 -0
  2. py_dpm/AST/ASTObjects.py +67 -3
  3. py_dpm/AST/ASTTemplate.py +5 -1
  4. py_dpm/AST/MLGeneration.py +12 -0
  5. py_dpm/AST/check_operands.py +301 -78
  6. py_dpm/OperationScopes/OperationScopeService.py +94 -30
  7. py_dpm/Operators/ClauseOperators.py +41 -0
  8. py_dpm/Operators/ConditionalOperators.py +21 -4
  9. py_dpm/{utils → Utils}/ast_serialization.py +289 -37
  10. py_dpm/Utils/operator_mapping.py +3 -2
  11. py_dpm/Utils/tokens.py +1 -0
  12. py_dpm/__init__.py +2 -8
  13. py_dpm/api/__init__.py +73 -4
  14. py_dpm/api/ast_generator.py +6 -3
  15. py_dpm/api/complete_ast.py +558 -21
  16. py_dpm/api/data_dictionary.py +984 -0
  17. py_dpm/api/data_dictionary_validation.py +44 -7
  18. py_dpm/api/migration.py +1 -1
  19. py_dpm/api/operation_scopes.py +1230 -0
  20. py_dpm/api/semantic.py +173 -65
  21. py_dpm/client.py +380 -0
  22. py_dpm/db_utils.py +110 -6
  23. py_dpm/grammar/dist/dpm_xlLexer.interp +5 -1
  24. py_dpm/grammar/dist/dpm_xlLexer.py +882 -708
  25. py_dpm/grammar/dist/dpm_xlLexer.tokens +46 -45
  26. py_dpm/grammar/dist/dpm_xlParser.interp +3 -1
  27. py_dpm/grammar/dist/dpm_xlParser.py +762 -614
  28. py_dpm/grammar/dist/dpm_xlParser.tokens +46 -45
  29. py_dpm/grammar/dist/dpm_xlParserListener.py +11 -3
  30. py_dpm/grammar/dist/dpm_xlParserVisitor.py +7 -3
  31. py_dpm/grammar/dist/listeners.py +1 -1
  32. py_dpm/grammar/dpm_xlLexer.g4 +3 -1
  33. py_dpm/grammar/dpm_xlParser.g4 +5 -2
  34. py_dpm/migration.py +122 -15
  35. py_dpm/models.py +1718 -502
  36. py_dpm/semantics/SemanticAnalyzer.py +26 -9
  37. {pydpm_xl-0.1.10.dist-info → pydpm_xl-0.1.39rc23.dist-info}/METADATA +17 -14
  38. {pydpm_xl-0.1.10.dist-info → pydpm_xl-0.1.39rc23.dist-info}/RECORD +45 -43
  39. {pydpm_xl-0.1.10.dist-info → pydpm_xl-0.1.39rc23.dist-info}/WHEEL +2 -1
  40. pydpm_xl-0.1.39rc23.dist-info/entry_points.txt +2 -0
  41. pydpm_xl-0.1.39rc23.dist-info/top_level.txt +1 -0
  42. py_dpm/utils/__init__.py +0 -0
  43. pydpm_xl-0.1.10.dist-info/entry_points.txt +0 -3
  44. {pydpm_xl-0.1.10.dist-info → pydpm_xl-0.1.39rc23.dist-info/licenses}/LICENSE +0 -0
@@ -1,20 +1,46 @@
1
1
  from abc import ABC
2
2
 
3
3
  import pandas as pd
4
-
5
- from py_dpm.AST.ASTObjects import Dimension, OperationRef, PersistentAssignment, PreconditionItem, \
6
- Scalar, TemporaryAssignment, VarID, VarRef, WhereClauseOp, WithExpression
4
+ import warnings
5
+
6
+ # Suppress pandas UserWarning about SQLAlchemy connection types
7
+ warnings.filterwarnings("ignore", message=".*pandas only supports SQLAlchemy.*")
8
+
9
+ from py_dpm.AST.ASTObjects import (
10
+ Dimension,
11
+ GetOp,
12
+ OperationRef,
13
+ PersistentAssignment,
14
+ PreconditionItem,
15
+ Scalar,
16
+ TemporaryAssignment,
17
+ VarID,
18
+ VarRef,
19
+ WhereClauseOp,
20
+ WithExpression,
21
+ )
7
22
  from py_dpm.AST.ASTTemplate import ASTTemplate
8
23
  from py_dpm.AST.WhereClauseChecker import WhereClauseChecker
9
24
  from py_dpm.DataTypes.ScalarTypes import Integer, Mixed, Number, ScalarFactory
10
25
  from py_dpm.Exceptions import exceptions
11
26
  from py_dpm.Exceptions.exceptions import SemanticError
12
- from py_dpm.models import ItemCategory, Operation, VariableVersion, ViewDatapoints, \
13
- ViewKeyComponents, ViewOpenKeys
27
+ from py_dpm.models import (
28
+ ItemCategory,
29
+ Operation,
30
+ VariableVersion,
31
+ ViewDatapoints,
32
+ ViewKeyComponents,
33
+ ViewOpenKeys,
34
+ Table,
35
+ TableVersion,
36
+ TableVersionHeader,
37
+ Header,
38
+ filter_by_release,
39
+ )
14
40
  from py_dpm.Utils.operands_mapping import generate_new_label, set_operand_label
15
41
  from py_dpm.data_handlers import filter_all_data
16
42
 
17
- operand_elements = ['table', 'rows', 'cols', 'sheets', 'default', 'interval']
43
+ operand_elements = ["table", "rows", "cols", "sheets", "default", "interval"]
18
44
 
19
45
 
20
46
  def _create_operand_label(node):
@@ -25,12 +51,12 @@ def _create_operand_label(node):
25
51
  def _modify_element_info(new_data, element, table_info):
26
52
  if new_data is None and table_info[element] is None:
27
53
  pass
28
- elif table_info[element] == ['*']:
54
+ elif table_info[element] == ["*"]:
29
55
  pass
30
56
  elif new_data is not None and table_info[element] is None:
31
57
  table_info[element] = new_data
32
58
 
33
- elif new_data == ['*']:
59
+ elif new_data == ["*"]:
34
60
  # We have already all data available
35
61
  table_info[element] = new_data
36
62
 
@@ -48,14 +74,61 @@ def _modify_table(node, table_info):
48
74
 
49
75
 
50
76
  def format_missing_data(node):
51
- rows = ', '.join([f"r{x}" for x in node.rows]) if node.rows else None
52
- cols = ', '.join([f"c{x}" for x in node.cols]) if node.cols else None
53
- sheets = ', '.join([f"s{x}" for x in node.sheets]) if node.sheets else None
77
+ rows = ", ".join([f"r{x}" for x in node.rows]) if node.rows else None
78
+ cols = ", ".join([f"c{x}" for x in node.cols]) if node.cols else None
79
+ sheets = ", ".join([f"s{x}" for x in node.sheets]) if node.sheets else None
54
80
  op_pos = [node.table, rows, cols, sheets]
55
81
  cell_exp = ", ".join(x for x in op_pos if x is not None)
56
82
  raise exceptions.SemanticError("1-2", cell_expression=cell_exp)
57
83
 
58
84
 
85
+ def _has_range_syntax(values):
86
+ """Check if a list of values contains range syntax (e.g., '0010-0080')."""
87
+ if not values or not isinstance(values, list):
88
+ return False
89
+ return any("-" in str(v) for v in values if v and v != "*")
90
+
91
+
92
+ def _expand_ranges_from_data(node, node_data):
93
+ """
94
+ Expand range-type values in VarID node's rows/cols/sheets to actual codes from the database.
95
+
96
+ When a VarID has range syntax like ['0010-0080'], this function replaces it with
97
+ the actual codes found in node_data (e.g., ['0010', '0020', '0030', ...]).
98
+
99
+ This ensures adam-engine receives scalar values in the JSON output instead of
100
+ list-type ranges which it cannot parse.
101
+
102
+ Args:
103
+ node: VarID AST node with rows, cols, sheets attributes
104
+ node_data: DataFrame containing the actual cell data with row_code, column_code, sheet_code
105
+ """
106
+ if node_data is None or node_data.empty:
107
+ return
108
+
109
+ # Expand rows if they contain range syntax
110
+ if _has_range_syntax(node.rows):
111
+ actual_rows = node_data["row_code"].dropna().unique().tolist()
112
+ if actual_rows:
113
+ # Sort to maintain consistent ordering
114
+ actual_rows = sorted(actual_rows)
115
+ node.rows = actual_rows
116
+
117
+ # Expand cols if they contain range syntax
118
+ if _has_range_syntax(node.cols):
119
+ actual_cols = node_data["column_code"].dropna().unique().tolist()
120
+ if actual_cols:
121
+ actual_cols = sorted(actual_cols)
122
+ node.cols = actual_cols
123
+
124
+ # Expand sheets if they contain range syntax
125
+ if _has_range_syntax(node.sheets):
126
+ actual_sheets = node_data["sheet_code"].dropna().unique().tolist()
127
+ if actual_sheets:
128
+ actual_sheets = sorted(actual_sheets)
129
+ node.sheets = actual_sheets
130
+
131
+
59
132
  class OperandsChecking(ASTTemplate, ABC):
60
133
  def __init__(self, session, expression, ast, release_id, is_scripting=False):
61
134
  self.expression = expression
@@ -69,7 +142,14 @@ class OperandsChecking(ASTTemplate, ABC):
69
142
  self.items = []
70
143
  self.preconditions = False
71
144
  self.dimension_codes = []
145
+ self.dimension_nodes = (
146
+ []
147
+ ) # Store references to Dimension nodes for property_id enrichment
72
148
  self.open_keys = None
149
+ self.getop_components = [] # Store GetOp component codes for property_id lookup
150
+ self.getop_nodes = (
151
+ []
152
+ ) # Store references to GetOp nodes for property_id enrichment
73
153
 
74
154
  self.operations = []
75
155
  self.operations_data = None
@@ -83,107 +163,228 @@ class OperandsChecking(ASTTemplate, ABC):
83
163
  self.check_items()
84
164
  self.check_tables()
85
165
  self.check_dimensions()
166
+ self.check_getop_components()
86
167
 
87
168
  self.check_operations()
88
169
 
89
170
  def _check_header_present(self, table, header):
90
- if (self.partial_selection is not None and self.partial_selection.table == table and
91
- getattr(self.partial_selection, header) is not None):
171
+ if (
172
+ self.partial_selection is not None
173
+ and self.partial_selection.table == table
174
+ and getattr(self.partial_selection, header) is not None
175
+ ):
92
176
  return
93
177
  for node in self.operands[table]:
94
178
  if getattr(node, header) is None:
95
- if header == 'cols':
96
- header = 'columns'
179
+ if header == "cols":
180
+ header = "columns"
97
181
  raise exceptions.SemanticError("1-20", header=header, table=table)
98
182
 
99
183
  def check_headers(self):
100
184
  table_codes = list(self.tables.keys())
101
185
  if len(table_codes) == 0:
102
186
  return
103
- query = """
104
- SELECT DISTINCT tv.Code, tv.StartReleaseID, tv.EndReleaseID, h.Direction, t.HasOpenRows, t.HasOpenColumns, t.HasOpenSheets
105
- FROM [Table] AS t
106
- INNER JOIN TableVersion tv ON t.TableID = tv.TableID
107
- INNER JOIN TableVersionHeader tvh ON tv.TableVID = tvh.TableVID
108
- INNER JOIN Header h ON h.HeaderID = tvh.HeaderID
109
- """
110
- codes = [f"{code!r}" for code in table_codes]
111
- query += f"WHERE tv.Code IN ({', '.join(codes)})"
112
- query += "AND tv.EndReleaseID is null"
113
- df_headers = pd.read_sql(query, self.session.connection())
187
+
188
+ # Build ORM query
189
+ query = (
190
+ self.session.query(
191
+ TableVersion.code.label("Code"),
192
+ TableVersion.startreleaseid.label("StartReleaseID"),
193
+ TableVersion.endreleaseid.label("EndReleaseID"),
194
+ Header.direction.label("Direction"),
195
+ Table.hasopenrows.label("HasOpenRows"),
196
+ Table.hasopencolumns.label("HasOpenColumns"),
197
+ Table.hasopensheets.label("HasOpenSheets"),
198
+ )
199
+ .join(Table, Table.tableid == TableVersion.tableid)
200
+ .join(
201
+ TableVersionHeader, TableVersion.tablevid == TableVersionHeader.tablevid
202
+ )
203
+ .join(Header, Header.headerid == TableVersionHeader.headerid)
204
+ .filter(TableVersion.code.in_(table_codes))
205
+ .distinct()
206
+ )
207
+
208
+ # Apply release filter
209
+ query = filter_by_release(
210
+ query,
211
+ TableVersion.startreleaseid,
212
+ TableVersion.endreleaseid,
213
+ self.release_id,
214
+ )
215
+
216
+ # Execute query and convert to DataFrame
217
+ from py_dpm.models import _compile_query_for_pandas
218
+
219
+ compiled_query = _compile_query_for_pandas(query.statement, self.session)
220
+ df_headers = pd.read_sql(compiled_query, self.session.connection().connection)
221
+
114
222
  for table in table_codes:
115
- table_headers = df_headers[df_headers['Code'] == table]
116
- open_rows = table_headers['HasOpenRows'].values[0]
117
- open_cols = table_headers['HasOpenColumns'].values[0]
118
- open_sheets = table_headers['HasOpenSheets'].values[0]
223
+ table_headers = df_headers[df_headers["Code"] == table]
119
224
  if table_headers.empty:
120
225
  continue
121
- if "Y" in table_headers['Direction'].values and not open_rows:
122
- self._check_header_present(table, 'rows')
123
- if "X" in table_headers['Direction'].values and not open_cols:
124
- self._check_header_present(table, 'cols')
125
- if "Z" in table_headers['Direction'].values and not open_sheets:
126
- self._check_header_present(table, 'sheets')
127
-
226
+ open_rows = table_headers["HasOpenRows"].values[0]
227
+ open_cols = table_headers["HasOpenColumns"].values[0]
228
+ open_sheets = table_headers["HasOpenSheets"].values[0]
229
+ if "Y" in table_headers["Direction"].values and not open_rows:
230
+ self._check_header_present(table, "rows")
231
+ if "X" in table_headers["Direction"].values and not open_cols:
232
+ self._check_header_present(table, "cols")
233
+ if "Z" in table_headers["Direction"].values and not open_sheets:
234
+ self._check_header_present(table, "sheets")
128
235
 
129
236
  def check_items(self):
130
237
  if len(self.items) == 0:
131
238
  return
132
239
  df_items = ItemCategory.get_items(self.session, self.items, self.release_id)
133
240
  if len(df_items.iloc[:, 0].values) < len(set(self.items)):
134
- not_found_items = list(set(self.items).difference(set(df_items['Signature'])))
241
+ not_found_items = list(
242
+ set(self.items).difference(set(df_items["Signature"]))
243
+ )
135
244
  raise exceptions.SemanticError("1-1", items=not_found_items)
136
245
 
137
246
  def check_dimensions(self):
138
247
  if len(self.dimension_codes) == 0:
139
248
  return
140
- self.open_keys = ViewOpenKeys.get_keys(self.session, self.dimension_codes, self.release_id)
249
+ self.open_keys = ViewOpenKeys.get_keys(
250
+ self.session, self.dimension_codes, self.release_id
251
+ )
141
252
  if len(self.open_keys) < len(self.dimension_codes):
142
- not_found_dimensions = list(set(self.dimension_codes).difference(self.open_keys['property_code']))
253
+ not_found_dimensions = list(
254
+ set(self.dimension_codes).difference(self.open_keys["property_code"])
255
+ )
143
256
  raise exceptions.SemanticError("1-5", open_keys=not_found_dimensions)
144
257
 
258
+ # Enrich Dimension nodes with property_id from open_keys
259
+ # This is required by adam-engine for WHERE clause resolution
260
+ if self.open_keys is not None and not self.open_keys.empty:
261
+ # Create a mapping from dimension_code to property_id
262
+ property_id_map = dict(
263
+ zip(self.open_keys["property_code"], self.open_keys["property_id"])
264
+ )
265
+ for node in self.dimension_nodes:
266
+ if node.dimension_code in property_id_map:
267
+ node.property_id = int(property_id_map[node.dimension_code])
268
+
269
+ def check_getop_components(self):
270
+ """Check and enrich GetOp nodes with property_id for their component codes.
271
+
272
+ GetOp components (like qEGS, qLGS) are property codes that need to be
273
+ resolved to property_id for adam-engine to process them correctly.
274
+ """
275
+ if len(self.getop_components) == 0:
276
+ return
277
+
278
+ # Query property_ids for GetOp components (same query as dimensions)
279
+ getop_keys = ViewOpenKeys.get_keys(
280
+ self.session, self.getop_components, self.release_id
281
+ )
282
+
283
+ if len(getop_keys) < len(self.getop_components):
284
+ not_found_components = list(
285
+ set(self.getop_components).difference(getop_keys["property_code"])
286
+ )
287
+ raise exceptions.SemanticError("1-5", open_keys=not_found_components)
288
+
289
+ # Enrich GetOp nodes with property_id
290
+ # This is required by adam-engine for [get ...] operations
291
+ if getop_keys is not None and not getop_keys.empty:
292
+ # Create a mapping from component code to property_id
293
+ property_id_map = dict(
294
+ zip(getop_keys["property_code"], getop_keys["property_id"])
295
+ )
296
+ for node in self.getop_nodes:
297
+ if node.component in property_id_map:
298
+ node.property_id = int(property_id_map[node.component])
299
+
145
300
  def check_tables(self):
146
301
  for table, value in self.tables.items():
147
302
  # Extract all data and filter to get only necessary data
148
303
  table_info = value
149
- df_table = ViewDatapoints.get_table_data(self.session, table, table_info['rows'],
150
- table_info['cols'], table_info['sheets'], self.release_id)
151
- if df_table.empty:
152
- cell_expression = f'table: {table}'
153
- for k, v in table_info.items():
154
- if v:
155
- cell_expression += f' {k}: {v}'
156
- raise exceptions.SemanticError("1-2", cell_expression=cell_expression)
157
-
304
+ df_table = ViewDatapoints.get_table_data(
305
+ self.session,
306
+ table,
307
+ table_info["rows"],
308
+ table_info["cols"],
309
+ table_info["sheets"],
310
+ self.release_id,
311
+ )
158
312
  # Insert data type on each node by selecting only data required by node
159
313
  for node in self.operands[table]:
160
- node_data = filter_all_data(df_table, table, node.rows, node.cols, node.sheets)
314
+ node_data = filter_all_data(
315
+ df_table, table, node.rows, node.cols, node.sheets
316
+ )
161
317
  # Checking grey cells (no variable ID in data for that cell)
162
- grey_cells_data = node_data[node_data['variable_id'].isnull()]
318
+ grey_cells_data = node_data[node_data["variable_id"].isnull()]
163
319
  if not grey_cells_data.empty:
164
320
  if len(grey_cells_data) > 10:
165
321
  list_cells = grey_cells_data["cell_code"].values[:10]
166
322
  else:
167
323
  list_cells = grey_cells_data["cell_code"].values
168
- cell_expression = ', '.join(list_cells)
169
- raise exceptions.SemanticError("1-17", cell_expression=cell_expression)
324
+ cell_expression = ", ".join(list_cells)
325
+ raise exceptions.SemanticError(
326
+ "1-17", cell_expression=cell_expression
327
+ )
170
328
  if node_data.empty:
171
329
  format_missing_data(node)
172
- extract_data_types(node, node_data['data_type'])
173
- if df_table['sheet_code'].isnull().all() and node.sheets is not None and '*' in node.sheets:
174
- raise SemanticError("1-18")
175
- if df_table['row_code'].isnull().all() and node.rows is not None and '*' in node.rows:
176
- raise SemanticError("1-19")
177
- del node_data
330
+ extract_data_types(node, node_data["data_type"])
331
+
332
+ # Check for invalid sheet wildcards
333
+ if (
334
+ df_table["sheet_code"].isnull().all()
335
+ and node.sheets is not None
336
+ and "*" in node.sheets
337
+ ):
338
+ # Check if s* is required to avoid duplicate (row, column) combinations
339
+ # Group by (row_code, column_code) and check for duplicates
340
+ # IMPORTANT: Include NA/NULL values in grouping (dropna=False)
341
+ df_without_sheets = df_table.groupby(
342
+ ["row_code", "column_code"], dropna=False
343
+ ).size()
344
+ has_duplicates = (df_without_sheets > 1).any()
345
+
346
+ if not has_duplicates:
347
+ # Only raise error if sheets are truly not needed (no duplicates without them)
348
+ raise SemanticError("1-18")
349
+ # else: s* is required even though sheet_code is NULL, so allow it
350
+
351
+ # Check for invalid row wildcards
352
+ if (
353
+ df_table["row_code"].isnull().all()
354
+ and node.rows is not None
355
+ and "*" in node.rows
356
+ ):
357
+ # Check if r* is required to avoid duplicate (column, sheet) combinations
358
+ # IMPORTANT: Include NA/NULL values in grouping (dropna=False)
359
+ df_without_rows = df_table.groupby(
360
+ ["column_code", "sheet_code"], dropna=False
361
+ ).size()
362
+ has_duplicates = (df_without_rows > 1).any()
363
+
364
+ if not has_duplicates:
365
+ # Only raise error if rows are truly not needed
366
+ raise SemanticError("1-19")
367
+ # else: r* is required even though row_code is NULL, so allow it
368
+
369
+ # Attach node_data to the VarID node for later serialization
370
+ # This enables the JSON serializer to output actual cell data
371
+ node.data = node_data
372
+
373
+ # Expand range-type values in rows/cols/sheets to actual codes from the database
374
+ # This ensures adam-engine receives scalar values, not list-type ranges
375
+ _expand_ranges_from_data(node, node_data)
178
376
 
179
377
  # Adding data to self.data
180
378
  if self.data is None:
181
379
  self.data = df_table
182
380
  else:
183
- self.data: pd.DataFrame = pd.concat([self.data, df_table], axis=0).reset_index(drop=True)
184
-
381
+ self.data: pd.DataFrame = pd.concat(
382
+ [self.data, df_table], axis=0
383
+ ).reset_index(drop=True)
185
384
 
186
- self.key_components[table] = ViewKeyComponents.get_by_table(self.session, table, self.release_id)
385
+ self.key_components[table] = ViewKeyComponents.get_by_table(
386
+ self.session, table, self.release_id
387
+ )
187
388
 
188
389
  # Start of visiting nodes
189
390
  def visit_WithExpression(self, node: WithExpression):
@@ -199,7 +400,10 @@ class OperandsChecking(ASTTemplate, ABC):
199
400
 
200
401
  if self.partial_selection:
201
402
  for attribute in operand_elements:
202
- if getattr(node, attribute, None) is None and not getattr(self.partial_selection, attribute, None) is None:
403
+ if (
404
+ getattr(node, attribute, None) is None
405
+ and not getattr(self.partial_selection, attribute, None) is None
406
+ ):
203
407
  setattr(node, attribute, getattr(self.partial_selection, attribute))
204
408
 
205
409
  if not node.table:
@@ -208,11 +412,7 @@ class OperandsChecking(ASTTemplate, ABC):
208
412
  _create_operand_label(node)
209
413
  set_operand_label(node.label, node)
210
414
 
211
- table_info = {
212
- 'rows': node.rows,
213
- 'cols': node.cols,
214
- 'sheets': node.sheets
215
- }
415
+ table_info = {"rows": node.rows, "cols": node.cols, "sheets": node.sheets}
216
416
 
217
417
  if node.table not in self.tables:
218
418
  self.tables[node.table] = table_info
@@ -224,17 +424,32 @@ class OperandsChecking(ASTTemplate, ABC):
224
424
  def visit_Dimension(self, node: Dimension):
225
425
  if node.dimension_code not in self.dimension_codes:
226
426
  self.dimension_codes.append(node.dimension_code)
427
+ # Store reference to node for property_id enrichment
428
+ self.dimension_nodes.append(node)
429
+
430
+ def visit_GetOp(self, node: GetOp):
431
+ """Visit GetOp nodes to collect component codes for property_id lookup."""
432
+ if node.component not in self.getop_components:
433
+ self.getop_components.append(node.component)
434
+ # Store reference to node for property_id enrichment
435
+ self.getop_nodes.append(node)
436
+ # Visit the operand to ensure it gets processed (e.g., VarID nodes inside GetOp)
437
+ self.visit(node.operand)
227
438
 
228
439
  def visit_VarRef(self, node: VarRef):
229
- if not VariableVersion.check_variable_exists(self.session, node.variable, self.release_id):
230
- raise exceptions.SemanticError('1-3', variable=node.variable)
440
+ if not VariableVersion.check_variable_exists(
441
+ self.session, node.variable, self.release_id
442
+ ):
443
+ raise exceptions.SemanticError("1-3", variable=node.variable)
231
444
 
232
445
  def visit_PreconditionItem(self, node: PreconditionItem):
233
446
 
234
447
  if self.is_scripting:
235
- raise exceptions.SemanticError('6-3', precondition=node.variable_code)
448
+ raise exceptions.SemanticError("6-3", precondition=node.variable_code)
236
449
 
237
- if not VariableVersion.check_variable_exists(self.session, node.variable_code, self.release_id):
450
+ if not VariableVersion.check_variable_exists(
451
+ self.session, node.variable_code, self.release_id
452
+ ):
238
453
  raise exceptions.SemanticError("1-3", variable=node.variable_code)
239
454
 
240
455
  self.preconditions = True
@@ -242,7 +457,7 @@ class OperandsChecking(ASTTemplate, ABC):
242
457
  set_operand_label(node.label, node)
243
458
 
244
459
  def visit_Scalar(self, node: Scalar):
245
- if node.item and node.scalar_type == 'Item':
460
+ if node.item and node.scalar_type == "Item":
246
461
  if node.item not in self.items:
247
462
  self.items.append(node.item)
248
463
 
@@ -268,11 +483,16 @@ class OperandsChecking(ASTTemplate, ABC):
268
483
 
269
484
  def check_operations(self):
270
485
  if len(self.operations):
271
- df_operations = Operation.get_operations_from_codes(session=self.session, operation_codes=self.operations,
272
- release_id=self.release_id)
486
+ df_operations = Operation.get_operations_from_codes(
487
+ session=self.session,
488
+ operation_codes=self.operations,
489
+ release_id=self.release_id,
490
+ )
273
491
  if len(df_operations.values) < len(self.operations):
274
- not_found_operations = list(set(self.operations).difference(set(df_operations['Code'])))
275
- raise exceptions.SemanticError('1-8', operations=not_found_operations)
492
+ not_found_operations = list(
493
+ set(self.operations).difference(set(df_operations["Code"]))
494
+ )
495
+ raise exceptions.SemanticError("1-8", operations=not_found_operations)
276
496
  self.operations_data = df_operations
277
497
 
278
498
 
@@ -292,7 +512,10 @@ def extract_data_types(node: VarID, database_types: pd.Series) -> None:
292
512
  else:
293
513
  setattr(node, "type", data_type())
294
514
  else:
295
- data_types = {scalar_factory.database_types_mapping(data_type) for data_type in unique_types}
515
+ data_types = {
516
+ scalar_factory.database_types_mapping(data_type)
517
+ for data_type in unique_types
518
+ }
296
519
  if len(data_types) == 1:
297
520
  data_type = data_types.pop()
298
521
  setattr(node, "type", data_type())