pydpm_xl 0.1.39rc32__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. py_dpm/__init__.py +1 -1
  2. py_dpm/api/__init__.py +58 -189
  3. py_dpm/api/dpm/__init__.py +20 -0
  4. py_dpm/api/{data_dictionary.py → dpm/data_dictionary.py} +903 -984
  5. py_dpm/api/dpm/explorer.py +236 -0
  6. py_dpm/api/dpm/hierarchical_queries.py +142 -0
  7. py_dpm/api/{migration.py → dpm/migration.py} +16 -19
  8. py_dpm/api/{operation_scopes.py → dpm/operation_scopes.py} +319 -267
  9. py_dpm/api/dpm_xl/__init__.py +25 -0
  10. py_dpm/api/{ast_generator.py → dpm_xl/ast_generator.py} +3 -3
  11. py_dpm/api/{complete_ast.py → dpm_xl/complete_ast.py} +191 -167
  12. py_dpm/api/dpm_xl/semantic.py +354 -0
  13. py_dpm/api/{syntax.py → dpm_xl/syntax.py} +6 -5
  14. py_dpm/api/explorer.py +4 -0
  15. py_dpm/api/semantic.py +30 -306
  16. py_dpm/cli/__init__.py +9 -0
  17. py_dpm/{client.py → cli/main.py} +8 -8
  18. py_dpm/dpm/__init__.py +11 -0
  19. py_dpm/{models.py → dpm/models.py} +112 -88
  20. py_dpm/dpm/queries/base.py +100 -0
  21. py_dpm/dpm/queries/basic_objects.py +33 -0
  22. py_dpm/dpm/queries/explorer_queries.py +352 -0
  23. py_dpm/dpm/queries/filters.py +139 -0
  24. py_dpm/dpm/queries/glossary.py +45 -0
  25. py_dpm/dpm/queries/hierarchical_queries.py +838 -0
  26. py_dpm/dpm/queries/tables.py +133 -0
  27. py_dpm/dpm/utils.py +356 -0
  28. py_dpm/dpm_xl/__init__.py +8 -0
  29. py_dpm/dpm_xl/ast/__init__.py +14 -0
  30. py_dpm/{AST/ASTConstructor.py → dpm_xl/ast/constructor.py} +6 -6
  31. py_dpm/{AST/MLGeneration.py → dpm_xl/ast/ml_generation.py} +137 -87
  32. py_dpm/{AST/ModuleAnalyzer.py → dpm_xl/ast/module_analyzer.py} +7 -7
  33. py_dpm/{AST/ModuleDependencies.py → dpm_xl/ast/module_dependencies.py} +56 -41
  34. py_dpm/{AST/ASTObjects.py → dpm_xl/ast/nodes.py} +1 -1
  35. py_dpm/{AST/check_operands.py → dpm_xl/ast/operands.py} +16 -13
  36. py_dpm/{AST/ASTTemplate.py → dpm_xl/ast/template.py} +2 -2
  37. py_dpm/{AST/WhereClauseChecker.py → dpm_xl/ast/where_clause.py} +2 -2
  38. py_dpm/dpm_xl/grammar/__init__.py +18 -0
  39. py_dpm/dpm_xl/operators/__init__.py +19 -0
  40. py_dpm/{Operators/AggregateOperators.py → dpm_xl/operators/aggregate.py} +7 -7
  41. py_dpm/{Operators/NumericOperators.py → dpm_xl/operators/arithmetic.py} +6 -6
  42. py_dpm/{Operators/Operator.py → dpm_xl/operators/base.py} +5 -5
  43. py_dpm/{Operators/BooleanOperators.py → dpm_xl/operators/boolean.py} +5 -5
  44. py_dpm/{Operators/ClauseOperators.py → dpm_xl/operators/clause.py} +8 -8
  45. py_dpm/{Operators/ComparisonOperators.py → dpm_xl/operators/comparison.py} +5 -5
  46. py_dpm/{Operators/ConditionalOperators.py → dpm_xl/operators/conditional.py} +7 -7
  47. py_dpm/{Operators/StringOperators.py → dpm_xl/operators/string.py} +5 -5
  48. py_dpm/{Operators/TimeOperators.py → dpm_xl/operators/time.py} +6 -6
  49. py_dpm/{semantics/SemanticAnalyzer.py → dpm_xl/semantic_analyzer.py} +168 -68
  50. py_dpm/{semantics/Symbols.py → dpm_xl/symbols.py} +3 -3
  51. py_dpm/dpm_xl/types/__init__.py +13 -0
  52. py_dpm/{DataTypes/TypePromotion.py → dpm_xl/types/promotion.py} +2 -2
  53. py_dpm/{DataTypes/ScalarTypes.py → dpm_xl/types/scalar.py} +2 -2
  54. py_dpm/dpm_xl/utils/__init__.py +14 -0
  55. py_dpm/{data_handlers.py → dpm_xl/utils/data_handlers.py} +2 -2
  56. py_dpm/{Utils → dpm_xl/utils}/operands_mapping.py +1 -1
  57. py_dpm/{Utils → dpm_xl/utils}/operator_mapping.py +8 -8
  58. py_dpm/{OperationScopes/OperationScopeService.py → dpm_xl/utils/scopes_calculator.py} +148 -58
  59. py_dpm/{Utils/ast_serialization.py → dpm_xl/utils/serialization.py} +2 -2
  60. py_dpm/dpm_xl/validation/__init__.py +12 -0
  61. py_dpm/{Utils/ValidationsGenerationUtils.py → dpm_xl/validation/generation_utils.py} +2 -3
  62. py_dpm/{ValidationsGeneration/PropertiesConstraintsProcessor.py → dpm_xl/validation/property_constraints.py} +56 -21
  63. py_dpm/{ValidationsGeneration/auxiliary_functions.py → dpm_xl/validation/utils.py} +2 -2
  64. py_dpm/{ValidationsGeneration/VariantsProcessor.py → dpm_xl/validation/variants.py} +149 -55
  65. py_dpm/exceptions/__init__.py +23 -0
  66. py_dpm/{Exceptions → exceptions}/exceptions.py +7 -2
  67. pydpm_xl-0.2.0.dist-info/METADATA +278 -0
  68. pydpm_xl-0.2.0.dist-info/RECORD +88 -0
  69. pydpm_xl-0.2.0.dist-info/entry_points.txt +2 -0
  70. py_dpm/Exceptions/__init__.py +0 -0
  71. py_dpm/OperationScopes/__init__.py +0 -0
  72. py_dpm/Operators/__init__.py +0 -0
  73. py_dpm/Utils/__init__.py +0 -0
  74. py_dpm/Utils/utils.py +0 -2
  75. py_dpm/ValidationsGeneration/Utils.py +0 -364
  76. py_dpm/ValidationsGeneration/__init__.py +0 -0
  77. py_dpm/api/data_dictionary_validation.py +0 -614
  78. py_dpm/db_utils.py +0 -221
  79. py_dpm/grammar/__init__.py +0 -0
  80. py_dpm/grammar/dist/__init__.py +0 -0
  81. py_dpm/grammar/dpm_xlLexer.g4 +0 -437
  82. py_dpm/grammar/dpm_xlParser.g4 +0 -263
  83. py_dpm/semantics/DAG/DAGAnalyzer.py +0 -158
  84. py_dpm/semantics/DAG/__init__.py +0 -0
  85. py_dpm/semantics/__init__.py +0 -0
  86. py_dpm/views/data_types.sql +0 -12
  87. py_dpm/views/datapoints.sql +0 -65
  88. py_dpm/views/hierarchy_operand_reference.sql +0 -11
  89. py_dpm/views/hierarchy_preconditions.sql +0 -13
  90. py_dpm/views/hierarchy_variables.sql +0 -26
  91. py_dpm/views/hierarchy_variables_context.sql +0 -14
  92. py_dpm/views/key_components.sql +0 -18
  93. py_dpm/views/module_from_table.sql +0 -11
  94. py_dpm/views/open_keys.sql +0 -13
  95. py_dpm/views/operation_info.sql +0 -27
  96. py_dpm/views/operation_list.sql +0 -18
  97. py_dpm/views/operations_versions_from_module_version.sql +0 -30
  98. py_dpm/views/precondition_info.sql +0 -17
  99. py_dpm/views/report_type_operand_reference_info.sql +0 -18
  100. py_dpm/views/subcategory_info.sql +0 -17
  101. py_dpm/views/table_info.sql +0 -19
  102. pydpm_xl-0.1.39rc32.dist-info/METADATA +0 -53
  103. pydpm_xl-0.1.39rc32.dist-info/RECORD +0 -96
  104. pydpm_xl-0.1.39rc32.dist-info/entry_points.txt +0 -2
  105. /py_dpm/{AST → cli/commands}/__init__.py +0 -0
  106. /py_dpm/{migration.py → dpm/migration.py} +0 -0
  107. /py_dpm/{AST/ASTVisitor.py → dpm_xl/ast/visitor.py} +0 -0
  108. /py_dpm/{DataTypes → dpm_xl/grammar/generated}/__init__.py +0 -0
  109. /py_dpm/{grammar/dist → dpm_xl/grammar/generated}/dpm_xlLexer.interp +0 -0
  110. /py_dpm/{grammar/dist → dpm_xl/grammar/generated}/dpm_xlLexer.py +0 -0
  111. /py_dpm/{grammar/dist → dpm_xl/grammar/generated}/dpm_xlLexer.tokens +0 -0
  112. /py_dpm/{grammar/dist → dpm_xl/grammar/generated}/dpm_xlParser.interp +0 -0
  113. /py_dpm/{grammar/dist → dpm_xl/grammar/generated}/dpm_xlParser.py +0 -0
  114. /py_dpm/{grammar/dist → dpm_xl/grammar/generated}/dpm_xlParser.tokens +0 -0
  115. /py_dpm/{grammar/dist → dpm_xl/grammar/generated}/dpm_xlParserListener.py +0 -0
  116. /py_dpm/{grammar/dist → dpm_xl/grammar/generated}/dpm_xlParserVisitor.py +0 -0
  117. /py_dpm/{grammar/dist → dpm_xl/grammar/generated}/listeners.py +0 -0
  118. /py_dpm/{DataTypes/TimeClasses.py → dpm_xl/types/time.py} +0 -0
  119. /py_dpm/{Utils → dpm_xl/utils}/tokens.py +0 -0
  120. /py_dpm/{Exceptions → exceptions}/messages.py +0 -0
  121. {pydpm_xl-0.1.39rc32.dist-info → pydpm_xl-0.2.0.dist-info}/WHEEL +0 -0
  122. {pydpm_xl-0.1.39rc32.dist-info → pydpm_xl-0.2.0.dist-info}/licenses/LICENSE +0 -0
  123. {pydpm_xl-0.1.39rc32.dist-info → pydpm_xl-0.2.0.dist-info}/top_level.txt +0 -0
@@ -11,14 +11,14 @@ Also provides enrichment functionality to create engine-ready ASTs with framewor
11
11
 
12
12
  from datetime import datetime
13
13
  from typing import Dict, Any, Optional
14
- from py_dpm.Utils.ast_serialization import ASTToJSONVisitor
14
+ from py_dpm.dpm_xl.utils.serialization import ASTToJSONVisitor
15
15
 
16
16
 
17
17
  def generate_complete_ast(
18
18
  expression: str,
19
19
  database_path: str = None,
20
20
  connection_url: str = None,
21
- release_id: Optional[int] = None
21
+ release_id: Optional[int] = None,
22
22
  ):
23
23
  """
24
24
  Generate complete AST with all data fields, exactly like json_scripts examples.
@@ -45,19 +45,21 @@ def generate_complete_ast(
45
45
  try:
46
46
  # Import here to avoid circular imports
47
47
  from py_dpm.api import API
48
- from py_dpm.db_utils import get_engine
48
+ from py_dpm.dpm.utils import get_engine
49
49
 
50
50
  # Initialize database connection if provided
51
51
  if connection_url or database_path:
52
52
  try:
53
- engine = get_engine(database_path=database_path, connection_url=connection_url)
53
+ engine = get_engine(
54
+ database_path=database_path, connection_url=connection_url
55
+ )
54
56
  except Exception as e:
55
57
  return {
56
- 'success': False,
57
- 'ast': None,
58
- 'context': None,
59
- 'error': f'Database connection failed: {e}',
60
- 'data_populated': False
58
+ "success": False,
59
+ "ast": None,
60
+ "context": None,
61
+ "error": f"Database connection failed: {e}",
62
+ "data_populated": False,
61
63
  }
62
64
 
63
65
  # Use the legacy API which does complete semantic validation
@@ -68,20 +70,19 @@ def generate_complete_ast(
68
70
  # This should populate all data fields on VarID nodes
69
71
  semantic_result = api.semantic_validation(expression, release_id=release_id)
70
72
 
71
-
72
73
  # Force data population if semantic validation completed successfully
73
- if hasattr(api, 'AST') and api.AST and semantic_result:
74
+ if hasattr(api, "AST") and api.AST and semantic_result:
74
75
  try:
75
- from py_dpm.AST.check_operands import OperandsChecking
76
- from py_dpm.db_utils import get_session
76
+ from py_dpm.dpm_xl.ast.operands import OperandsChecking
77
+ from py_dpm.dpm.utils import get_session
77
78
 
78
79
  session = get_session()
79
80
 
80
81
  # Extract the expression AST
81
82
  def get_inner_ast(ast_obj):
82
- if hasattr(ast_obj, 'children') and len(ast_obj.children) > 0:
83
+ if hasattr(ast_obj, "children") and len(ast_obj.children) > 0:
83
84
  child = ast_obj.children[0]
84
- if hasattr(child, 'expression'):
85
+ if hasattr(child, "expression"):
85
86
  return child.expression
86
87
  else:
87
88
  return child
@@ -94,33 +95,40 @@ def generate_complete_ast(
94
95
  session=session,
95
96
  expression=expression,
96
97
  ast=inner_ast,
97
- release_id=release_id
98
+ release_id=release_id,
98
99
  )
99
100
 
100
101
  # Apply the data from operand checker to VarID nodes
101
- if hasattr(oc, 'data') and oc.data is not None:
102
+ if hasattr(oc, "data") and oc.data is not None:
102
103
 
103
104
  # Apply data to VarID nodes in the AST
104
105
  def apply_data_to_varids(node):
105
- if hasattr(node, '__class__') and node.__class__.__name__ == 'VarID':
106
- table = getattr(node, 'table', None)
107
- rows = getattr(node, 'rows', None)
108
- cols = getattr(node, 'cols', None)
106
+ if (
107
+ hasattr(node, "__class__")
108
+ and node.__class__.__name__ == "VarID"
109
+ ):
110
+ table = getattr(node, "table", None)
111
+ rows = getattr(node, "rows", None)
112
+ cols = getattr(node, "cols", None)
109
113
 
110
114
  if table and table in oc.operands:
111
115
  # Filter data for this specific VarID
112
116
  # Start with table filter
113
- filter_mask = (oc.data['table_code'] == table)
117
+ filter_mask = oc.data["table_code"] == table
114
118
 
115
119
  # Add row filter only if rows is not None and doesn't contain wildcards
116
120
  # IMPORTANT: If rows contains '*', include all rows (don't filter)
117
- if rows is not None and '*' not in rows:
118
- filter_mask = filter_mask & (oc.data['row_code'].isin(rows))
121
+ if rows is not None and "*" not in rows:
122
+ filter_mask = filter_mask & (
123
+ oc.data["row_code"].isin(rows)
124
+ )
119
125
 
120
126
  # Add column filter only if cols is not None and doesn't contain wildcards
121
127
  # IMPORTANT: If cols contains '*', include all columns (don't filter)
122
- if cols is not None and '*' not in cols:
123
- filter_mask = filter_mask & (oc.data['column_code'].isin(cols))
128
+ if cols is not None and "*" not in cols:
129
+ filter_mask = filter_mask & (
130
+ oc.data["column_code"].isin(cols)
131
+ )
124
132
 
125
133
  filtered_data = oc.data[filter_mask]
126
134
 
@@ -132,22 +140,32 @@ def generate_complete_ast(
132
140
  # we want only the specific entries.
133
141
 
134
142
  # Remove rows where column_code is NULL if there are non-NULL column_code entries
135
- if filtered_data['column_code'].notna().any():
136
- filtered_data = filtered_data[filtered_data['column_code'].notna()]
143
+ if filtered_data["column_code"].notna().any():
144
+ filtered_data = filtered_data[
145
+ filtered_data["column_code"].notna()
146
+ ]
137
147
 
138
148
  # Remove rows where row_code is NULL if there are non-NULL row_code entries
139
- if filtered_data['row_code'].notna().any():
140
- filtered_data = filtered_data[filtered_data['row_code'].notna()]
149
+ if filtered_data["row_code"].notna().any():
150
+ filtered_data = filtered_data[
151
+ filtered_data["row_code"].notna()
152
+ ]
141
153
 
142
154
  # Remove rows where sheet_code is NULL if there are non-NULL sheet_code entries
143
- if filtered_data['sheet_code'].notna().any():
144
- filtered_data = filtered_data[filtered_data['sheet_code'].notna()]
155
+ if filtered_data["sheet_code"].notna().any():
156
+ filtered_data = filtered_data[
157
+ filtered_data["sheet_code"].notna()
158
+ ]
145
159
 
146
160
  # IMPORTANT: After filtering, remove any remaining duplicates
147
161
  # based on (row_code, column_code, sheet_code) combination
148
162
  filtered_data = filtered_data.drop_duplicates(
149
- subset=['row_code', 'column_code', 'sheet_code'],
150
- keep='first'
163
+ subset=[
164
+ "row_code",
165
+ "column_code",
166
+ "sheet_code",
167
+ ],
168
+ keep="first",
151
169
  )
152
170
 
153
171
  # Set the data attribute on the VarID node
@@ -155,14 +173,24 @@ def generate_complete_ast(
155
173
  node.data = filtered_data
156
174
 
157
175
  # Recursively apply to child nodes
158
- for attr_name in ['children', 'left', 'right', 'operand', 'operands', 'expression', 'condition', 'then_expr', 'else_expr']:
176
+ for attr_name in [
177
+ "children",
178
+ "left",
179
+ "right",
180
+ "operand",
181
+ "operands",
182
+ "expression",
183
+ "condition",
184
+ "then_expr",
185
+ "else_expr",
186
+ ]:
159
187
  if hasattr(node, attr_name):
160
188
  attr_value = getattr(node, attr_name)
161
189
  if isinstance(attr_value, list):
162
190
  for item in attr_value:
163
- if hasattr(item, '__class__'):
191
+ if hasattr(item, "__class__"):
164
192
  apply_data_to_varids(item)
165
- elif attr_value and hasattr(attr_value, '__class__'):
193
+ elif attr_value and hasattr(attr_value, "__class__"):
166
194
  apply_data_to_varids(attr_value)
167
195
 
168
196
  # Apply data to all VarID nodes in the AST
@@ -172,12 +200,12 @@ def generate_complete_ast(
172
200
  # Silently continue if data population fails
173
201
  pass
174
202
 
175
- if hasattr(api, 'AST') and api.AST is not None:
203
+ if hasattr(api, "AST") and api.AST is not None:
176
204
  # Extract components exactly like batch_validator does
177
205
  def extract_components(ast_obj):
178
- if hasattr(ast_obj, 'children') and len(ast_obj.children) > 0:
206
+ if hasattr(ast_obj, "children") and len(ast_obj.children) > 0:
179
207
  child = ast_obj.children[0]
180
- if hasattr(child, 'expression'):
208
+ if hasattr(child, "expression"):
181
209
  return child.expression, child.partial_selection
182
210
  else:
183
211
  return child, None
@@ -196,39 +224,39 @@ def generate_complete_ast(
196
224
  context_dict = None
197
225
  if context:
198
226
  context_dict = {
199
- 'table': getattr(context, 'table', None),
200
- 'rows': getattr(context, 'rows', None),
201
- 'columns': getattr(context, 'cols', None),
202
- 'sheets': getattr(context, 'sheets', None),
203
- 'default': getattr(context, 'default', None),
204
- 'interval': getattr(context, 'interval', None)
227
+ "table": getattr(context, "table", None),
228
+ "rows": getattr(context, "rows", None),
229
+ "columns": getattr(context, "cols", None),
230
+ "sheets": getattr(context, "sheets", None),
231
+ "default": getattr(context, "default", None),
232
+ "interval": getattr(context, "interval", None),
205
233
  }
206
234
 
207
235
  return {
208
- 'success': True,
209
- 'ast': ast_dict,
210
- 'context': context_dict,
211
- 'error': None,
212
- 'data_populated': data_populated,
213
- 'semantic_result': semantic_result
236
+ "success": True,
237
+ "ast": ast_dict,
238
+ "context": context_dict,
239
+ "error": None,
240
+ "data_populated": data_populated,
241
+ "semantic_result": semantic_result,
214
242
  }
215
243
 
216
244
  else:
217
245
  return {
218
- 'success': False,
219
- 'ast': None,
220
- 'context': None,
221
- 'error': 'Semantic validation did not generate AST',
222
- 'data_populated': False
246
+ "success": False,
247
+ "ast": None,
248
+ "context": None,
249
+ "error": "Semantic validation did not generate AST",
250
+ "data_populated": False,
223
251
  }
224
252
 
225
253
  except Exception as e:
226
254
  return {
227
- 'success': False,
228
- 'ast': None,
229
- 'context': None,
230
- 'error': f'API error: {str(e)}',
231
- 'data_populated': False
255
+ "success": False,
256
+ "ast": None,
257
+ "context": None,
258
+ "error": f"API error: {str(e)}",
259
+ "data_populated": False,
232
260
  }
233
261
 
234
262
 
@@ -237,7 +265,7 @@ def _check_data_fields_populated(ast_dict):
237
265
  if not isinstance(ast_dict, dict):
238
266
  return False
239
267
 
240
- if ast_dict.get('class_name') == 'VarID' and 'data' in ast_dict:
268
+ if ast_dict.get("class_name") == "VarID" and "data" in ast_dict:
241
269
  return True
242
270
 
243
271
  # Recursively check nested structures
@@ -257,7 +285,7 @@ def generate_complete_batch(
257
285
  expressions: list,
258
286
  database_path: str = None,
259
287
  connection_url: str = None,
260
- release_id: Optional[int] = None
288
+ release_id: Optional[int] = None,
261
289
  ):
262
290
  """
263
291
  Generate complete ASTs for multiple expressions.
@@ -277,7 +305,7 @@ def generate_complete_batch(
277
305
  result = generate_complete_ast(
278
306
  expr, database_path, connection_url, release_id=release_id
279
307
  )
280
- result['batch_index'] = i
308
+ result["batch_index"] = i
281
309
  results.append(result)
282
310
  return results
283
311
 
@@ -287,7 +315,7 @@ def parse_with_data_fields(
287
315
  expression: str,
288
316
  database_path: str = None,
289
317
  connection_url: str = None,
290
- release_id: Optional[int] = None
318
+ release_id: Optional[int] = None,
291
319
  ):
292
320
  """
293
321
  Simple function to parse expression and get AST with data fields.
@@ -305,7 +333,7 @@ def parse_with_data_fields(
305
333
  result = generate_complete_ast(
306
334
  expression, database_path, connection_url, release_id=release_id
307
335
  )
308
- return result['ast'] if result['success'] else None
336
+ return result["ast"] if result["success"] else None
309
337
 
310
338
 
311
339
  # ============================================================================
@@ -353,15 +381,15 @@ def generate_enriched_ast(
353
381
  expression, database_path, connection_url, release_id=release_id
354
382
  )
355
383
 
356
- if not complete_result['success']:
384
+ if not complete_result["success"]:
357
385
  return {
358
- 'success': False,
359
- 'enriched_ast': None,
360
- 'error': f"Failed to generate complete AST: {complete_result['error']}"
386
+ "success": False,
387
+ "enriched_ast": None,
388
+ "error": f"Failed to generate complete AST: {complete_result['error']}",
361
389
  }
362
390
 
363
- complete_ast = complete_result['ast']
364
- context = complete_result.get('context') or table_context
391
+ complete_ast = complete_result["ast"]
392
+ context = complete_result.get("context") or table_context
365
393
 
366
394
  # Enrich with framework structure
367
395
  enriched_ast = enrich_ast_with_metadata(
@@ -372,20 +400,16 @@ def generate_enriched_ast(
372
400
  connection_url=connection_url,
373
401
  dpm_version=dpm_version,
374
402
  operation_code=operation_code,
375
- precondition=precondition
403
+ precondition=precondition,
376
404
  )
377
405
 
378
- return {
379
- 'success': True,
380
- 'enriched_ast': enriched_ast,
381
- 'error': None
382
- }
406
+ return {"success": True, "enriched_ast": enriched_ast, "error": None}
383
407
 
384
408
  except Exception as e:
385
409
  return {
386
- 'success': False,
387
- 'enriched_ast': None,
388
- 'error': f'Enrichment error: {str(e)}'
410
+ "success": False,
411
+ "enriched_ast": None,
412
+ "error": f"Enrichment error: {str(e)}",
389
413
  }
390
414
 
391
415
 
@@ -417,8 +441,8 @@ def enrich_ast_with_metadata(
417
441
  Returns:
418
442
  dict: Engine-ready AST with framework structure
419
443
  """
420
- from py_dpm.db_utils import get_engine, get_session
421
- from py_dpm.models import TableVersion, Release
444
+ from py_dpm.dpm.utils import get_engine, get_session
445
+ from py_dpm.dpm.models import TableVersion, Release
422
446
  import copy
423
447
 
424
448
  # Initialize database connection
@@ -440,13 +464,10 @@ def enrich_ast_with_metadata(
440
464
  "module_version": "1.0.0",
441
465
  "framework_code": "default",
442
466
  "dpm_release": {
443
- "release": release_info['release'],
444
- "publication_date": release_info['publication_date']
467
+ "release": release_info["release"],
468
+ "publication_date": release_info["publication_date"],
445
469
  },
446
- "dates": {
447
- "from": "2001-01-01",
448
- "to": None
449
- }
470
+ "dates": {"from": "2001-01-01", "to": None},
450
471
  }
451
472
 
452
473
  # Add coordinates to AST data entries
@@ -461,7 +482,7 @@ def enrich_ast_with_metadata(
461
482
  "root_operator_id": 24, # Default for now
462
483
  "ast": ast_with_coords,
463
484
  "from_submission_date": current_date,
464
- "severity": "Error"
485
+ "severity": "Error",
465
486
  }
466
487
  }
467
488
 
@@ -473,27 +494,24 @@ def enrich_ast_with_metadata(
473
494
 
474
495
  # Build tables with their specific variables
475
496
  for table_code, table_variables in variables_by_table.items():
476
- tables[table_code] = {
477
- "variables": table_variables,
478
- "open_keys": {}
479
- }
497
+ tables[table_code] = {"variables": table_variables, "open_keys": {}}
480
498
 
481
499
  # Build preconditions
482
500
  preconditions = {}
483
501
  precondition_variables = {}
484
502
 
485
- if precondition or (context and 'table' in context):
503
+ if precondition or (context and "table" in context):
486
504
  preconditions, precondition_variables = _build_preconditions(
487
505
  precondition=precondition,
488
506
  context=context,
489
507
  operation_code=operation_code,
490
- engine=engine
508
+ engine=engine,
491
509
  )
492
510
 
493
511
  # Build dependency information
494
512
  dependency_info = {
495
513
  "intra_instance_validations": [operation_code],
496
- "cross_instance_dependencies": []
514
+ "cross_instance_dependencies": [],
497
515
  }
498
516
 
499
517
  # Build dependency modules
@@ -511,7 +529,7 @@ def enrich_ast_with_metadata(
511
529
  "preconditions": preconditions,
512
530
  "precondition_variables": precondition_variables,
513
531
  "dependency_information": dependency_info,
514
- "dependency_modules": dependency_modules
532
+ "dependency_modules": dependency_modules,
515
533
  }
516
534
  }
517
535
 
@@ -527,7 +545,7 @@ def _get_release_info(dpm_version: Optional[str], engine) -> Dict[str, Any]:
527
545
  Returns:
528
546
  dict: {'release': str, 'publication_date': str}
529
547
  """
530
- from py_dpm.models import Release
548
+ from py_dpm.dpm.models import Release
531
549
  from sqlalchemy.orm import sessionmaker
532
550
 
533
551
  Session = sessionmaker(bind=engine)
@@ -537,39 +555,44 @@ def _get_release_info(dpm_version: Optional[str], engine) -> Dict[str, Any]:
537
555
  if dpm_version:
538
556
  # Query for specific version
539
557
  version_float = float(dpm_version)
540
- release = session.query(Release).filter(
541
- Release.code == str(version_float)
542
- ).first()
558
+ release = (
559
+ session.query(Release)
560
+ .filter(Release.code == str(version_float))
561
+ .first()
562
+ )
543
563
 
544
564
  if release:
545
565
  return {
546
- 'release': str(release.code) if release.code else dpm_version,
547
- 'publication_date': release.date.strftime("%Y-%m-%d") if release.date else "2001-01-01"
566
+ "release": str(release.code) if release.code else dpm_version,
567
+ "publication_date": (
568
+ release.date.strftime("%Y-%m-%d")
569
+ if release.date
570
+ else "2001-01-01"
571
+ ),
548
572
  }
549
573
 
550
574
  # Fallback: get latest released version
551
- release = session.query(Release).filter(
552
- Release.status == 'released'
553
- ).order_by(Release.code.desc()).first()
575
+ release = (
576
+ session.query(Release)
577
+ .filter(Release.status == "released")
578
+ .order_by(Release.code.desc())
579
+ .first()
580
+ )
554
581
 
555
582
  if release:
556
583
  return {
557
- 'release': str(release.code) if release.code else "4.1",
558
- 'publication_date': release.date.strftime("%Y-%m-%d") if release.date else "2001-01-01"
584
+ "release": str(release.code) if release.code else "4.1",
585
+ "publication_date": (
586
+ release.date.strftime("%Y-%m-%d") if release.date else "2001-01-01"
587
+ ),
559
588
  }
560
589
 
561
590
  # Final fallback
562
- return {
563
- 'release': "4.1",
564
- 'publication_date': "2001-01-01"
565
- }
591
+ return {"release": "4.1", "publication_date": "2001-01-01"}
566
592
 
567
593
  except Exception:
568
594
  # Fallback on any error
569
- return {
570
- 'release': "4.1",
571
- 'publication_date': "2001-01-01"
572
- }
595
+ return {"release": "4.1", "publication_date": "2001-01-01"}
573
596
  finally:
574
597
  session.close()
575
598
 
@@ -585,7 +608,7 @@ def _get_table_info(table_code: str, engine) -> Optional[Dict[str, Any]]:
585
608
  Returns:
586
609
  dict: {'table_vid': int, 'code': str} or None if not found
587
610
  """
588
- from py_dpm.models import TableVersion
611
+ from py_dpm.dpm.models import TableVersion
589
612
  from sqlalchemy.orm import sessionmaker
590
613
  import re
591
614
 
@@ -594,41 +617,37 @@ def _get_table_info(table_code: str, engine) -> Optional[Dict[str, Any]]:
594
617
 
595
618
  try:
596
619
  # Try exact match first
597
- table = session.query(TableVersion).filter(
598
- TableVersion.code == table_code
599
- ).first()
620
+ table = (
621
+ session.query(TableVersion).filter(TableVersion.code == table_code).first()
622
+ )
600
623
 
601
624
  if table:
602
- return {
603
- 'table_vid': table.tablevid,
604
- 'code': table.code
605
- }
625
+ return {"table_vid": table.tablevid, "code": table.code}
606
626
 
607
627
  # Handle precondition parser format: F_25_01 -> F_25.01
608
- if re.match(r'^[A-Z]_\d+_\d+', table_code):
609
- parts = table_code.split('_', 2)
628
+ if re.match(r"^[A-Z]_\d+_\d+", table_code):
629
+ parts = table_code.split("_", 2)
610
630
  if len(parts) >= 3:
611
631
  table_code_with_dot = f"{parts[0]}_{parts[1]}.{parts[2]}"
612
- table = session.query(TableVersion).filter(
613
- TableVersion.code == table_code_with_dot
614
- ).first()
632
+ table = (
633
+ session.query(TableVersion)
634
+ .filter(TableVersion.code == table_code_with_dot)
635
+ .first()
636
+ )
615
637
 
616
638
  if table:
617
- return {
618
- 'table_vid': table.tablevid,
619
- 'code': table.code
620
- }
639
+ return {"table_vid": table.tablevid, "code": table.code}
621
640
 
622
641
  # Try LIKE pattern as last resort (handles sub-tables like F_25.01.a)
623
- table = session.query(TableVersion).filter(
624
- TableVersion.code.like(f"{table_code}%")
625
- ).order_by(TableVersion.code).first()
642
+ table = (
643
+ session.query(TableVersion)
644
+ .filter(TableVersion.code.like(f"{table_code}%"))
645
+ .order_by(TableVersion.code)
646
+ .first()
647
+ )
626
648
 
627
649
  if table:
628
- return {
629
- 'table_vid': table.tablevid,
630
- 'code': table.code
631
- }
650
+ return {"table_vid": table.tablevid, "code": table.code}
632
651
 
633
652
  return None
634
653
 
@@ -642,7 +661,7 @@ def _build_preconditions(
642
661
  precondition: Optional[str],
643
662
  context: Optional[Dict[str, Any]],
644
663
  operation_code: str,
645
- engine
664
+ engine,
646
665
  ) -> tuple:
647
666
  """
648
667
  Build preconditions and precondition_variables sections.
@@ -666,30 +685,30 @@ def _build_preconditions(
666
685
 
667
686
  if precondition:
668
687
  # Extract variable code from precondition reference like {v_F_44_04}
669
- match = re.match(r'\{v_([^}]+)\}', precondition)
688
+ match = re.match(r"\{v_([^}]+)\}", precondition)
670
689
  if match:
671
690
  table_code = match.group(1)
672
- elif context and 'table' in context:
673
- table_code = context['table']
691
+ elif context and "table" in context:
692
+ table_code = context["table"]
674
693
 
675
694
  if table_code:
676
695
  # Query database for actual variable ID and version
677
696
  table_info = _get_table_info(table_code, engine)
678
697
 
679
698
  if table_info:
680
- precondition_var_id = table_info['table_vid']
681
- version_id = table_info['table_vid']
699
+ precondition_var_id = table_info["table_vid"]
700
+ version_id = table_info["table_vid"]
682
701
  precondition_code = f"p_{precondition_var_id}"
683
702
 
684
703
  preconditions[precondition_code] = {
685
704
  "ast": {
686
705
  "class_name": "PreconditionItem",
687
706
  "variable_id": precondition_var_id,
688
- "variable_code": table_code
707
+ "variable_code": table_code,
689
708
  },
690
709
  "affected_operations": [operation_code],
691
710
  "version_id": version_id,
692
- "code": precondition_code
711
+ "code": precondition_code,
693
712
  }
694
713
 
695
714
  precondition_variables[str(precondition_var_id)] = "b"
@@ -713,17 +732,17 @@ def _extract_variables_from_ast(ast_dict: Dict[str, Any]) -> tuple:
713
732
  def extract_from_node(node):
714
733
  if isinstance(node, dict):
715
734
  # Check if this is a VarID node with data
716
- if node.get('class_name') == 'VarID' and 'data' in node:
717
- table = node.get('table')
735
+ if node.get("class_name") == "VarID" and "data" in node:
736
+ table = node.get("table")
718
737
  if table:
719
738
  if table not in variables_by_table:
720
739
  variables_by_table[table] = {}
721
740
 
722
741
  # Extract variable IDs and data types from AST data array
723
- for data_item in node['data']:
724
- if 'datapoint' in data_item:
725
- var_id = str(int(data_item['datapoint']))
726
- data_type = data_item.get('data_type', 'e')
742
+ for data_item in node["data"]:
743
+ if "datapoint" in data_item:
744
+ var_id = str(int(data_item["datapoint"]))
745
+ data_type = data_item.get("data_type", "e")
727
746
  variables_by_table[table][var_id] = data_type
728
747
  all_variables[var_id] = data_type
729
748
 
@@ -739,7 +758,9 @@ def _extract_variables_from_ast(ast_dict: Dict[str, Any]) -> tuple:
739
758
  return all_variables, variables_by_table
740
759
 
741
760
 
742
- def _add_coordinates_to_ast(ast_dict: Dict[str, Any], context: Optional[Dict[str, Any]]) -> Dict[str, Any]:
761
+ def _add_coordinates_to_ast(
762
+ ast_dict: Dict[str, Any], context: Optional[Dict[str, Any]]
763
+ ) -> Dict[str, Any]:
743
764
  """
744
765
  Add x/y/z coordinates to data entries in AST.
745
766
 
@@ -755,16 +776,16 @@ def _add_coordinates_to_ast(ast_dict: Dict[str, Any], context: Optional[Dict[str
755
776
  def add_coords_to_node(node):
756
777
  if isinstance(node, dict):
757
778
  # Handle VarID nodes with data arrays
758
- if node.get('class_name') == 'VarID' and 'data' in node:
779
+ if node.get("class_name") == "VarID" and "data" in node:
759
780
  # Get column information from context
760
781
  cols = []
761
- if context and 'columns' in context and context['columns']:
762
- cols = context['columns']
782
+ if context and "columns" in context and context["columns"]:
783
+ cols = context["columns"]
763
784
 
764
785
  # Group data entries by row to assign coordinates correctly
765
786
  entries_by_row = {}
766
- for data_entry in node['data']:
767
- row_code = data_entry.get('row', '')
787
+ for data_entry in node["data"]:
788
+ row_code = data_entry.get("row", "")
768
789
  if row_code not in entries_by_row:
769
790
  entries_by_row[row_code] = []
770
791
  entries_by_row[row_code].append(data_entry)
@@ -773,7 +794,7 @@ def _add_coordinates_to_ast(ast_dict: Dict[str, Any], context: Optional[Dict[str
773
794
  rows = list(entries_by_row.keys())
774
795
  for x_index, row_code in enumerate(rows, 1):
775
796
  for data_entry in entries_by_row[row_code]:
776
- column_code = data_entry.get('column', '')
797
+ column_code = data_entry.get("column", "")
777
798
 
778
799
  # Find y coordinate based on column position in context
779
800
  y_index = 1 # default
@@ -781,16 +802,19 @@ def _add_coordinates_to_ast(ast_dict: Dict[str, Any], context: Optional[Dict[str
781
802
  y_index = cols.index(column_code) + 1
782
803
  elif cols:
783
804
  # Fallback to order in data
784
- row_columns = [entry.get('column', '') for entry in entries_by_row[row_code]]
805
+ row_columns = [
806
+ entry.get("column", "")
807
+ for entry in entries_by_row[row_code]
808
+ ]
785
809
  if column_code in row_columns:
786
810
  y_index = row_columns.index(column_code) + 1
787
811
 
788
812
  # Always add y coordinate
789
- data_entry['y'] = y_index
813
+ data_entry["y"] = y_index
790
814
 
791
815
  # Add x coordinate only if there are multiple rows
792
816
  if len(rows) > 1:
793
- data_entry['x'] = x_index
817
+ data_entry["x"] = x_index
794
818
 
795
819
  # TODO: Add z coordinate for sheets when needed
796
820
 
@@ -805,4 +829,4 @@ def _add_coordinates_to_ast(ast_dict: Dict[str, Any], context: Optional[Dict[str
805
829
  # Create a deep copy to avoid modifying the original
806
830
  result = copy.deepcopy(ast_dict)
807
831
  add_coords_to_node(result)
808
- return result
832
+ return result