vtlengine 1.4.0rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vtlengine/API/_InternalApi.py +791 -0
- vtlengine/API/__init__.py +612 -0
- vtlengine/API/data/schema/external_routines_schema.json +34 -0
- vtlengine/API/data/schema/json_schema_2.1.json +116 -0
- vtlengine/API/data/schema/value_domain_schema.json +97 -0
- vtlengine/AST/ASTComment.py +57 -0
- vtlengine/AST/ASTConstructor.py +598 -0
- vtlengine/AST/ASTConstructorModules/Expr.py +1928 -0
- vtlengine/AST/ASTConstructorModules/ExprComponents.py +995 -0
- vtlengine/AST/ASTConstructorModules/Terminals.py +790 -0
- vtlengine/AST/ASTConstructorModules/__init__.py +50 -0
- vtlengine/AST/ASTDataExchange.py +10 -0
- vtlengine/AST/ASTEncoders.py +32 -0
- vtlengine/AST/ASTString.py +675 -0
- vtlengine/AST/ASTTemplate.py +558 -0
- vtlengine/AST/ASTVisitor.py +25 -0
- vtlengine/AST/DAG/__init__.py +479 -0
- vtlengine/AST/DAG/_words.py +10 -0
- vtlengine/AST/Grammar/Vtl.g4 +705 -0
- vtlengine/AST/Grammar/VtlTokens.g4 +409 -0
- vtlengine/AST/Grammar/__init__.py +0 -0
- vtlengine/AST/Grammar/lexer.py +2139 -0
- vtlengine/AST/Grammar/parser.py +16597 -0
- vtlengine/AST/Grammar/tokens.py +169 -0
- vtlengine/AST/VtlVisitor.py +824 -0
- vtlengine/AST/__init__.py +674 -0
- vtlengine/DataTypes/TimeHandling.py +562 -0
- vtlengine/DataTypes/__init__.py +863 -0
- vtlengine/DataTypes/_time_checking.py +135 -0
- vtlengine/Exceptions/__exception_file_generator.py +96 -0
- vtlengine/Exceptions/__init__.py +159 -0
- vtlengine/Exceptions/messages.py +1004 -0
- vtlengine/Interpreter/__init__.py +2048 -0
- vtlengine/Model/__init__.py +501 -0
- vtlengine/Operators/Aggregation.py +357 -0
- vtlengine/Operators/Analytic.py +455 -0
- vtlengine/Operators/Assignment.py +23 -0
- vtlengine/Operators/Boolean.py +106 -0
- vtlengine/Operators/CastOperator.py +451 -0
- vtlengine/Operators/Clause.py +366 -0
- vtlengine/Operators/Comparison.py +488 -0
- vtlengine/Operators/Conditional.py +495 -0
- vtlengine/Operators/General.py +191 -0
- vtlengine/Operators/HROperators.py +254 -0
- vtlengine/Operators/Join.py +447 -0
- vtlengine/Operators/Numeric.py +422 -0
- vtlengine/Operators/RoleSetter.py +77 -0
- vtlengine/Operators/Set.py +176 -0
- vtlengine/Operators/String.py +578 -0
- vtlengine/Operators/Time.py +1144 -0
- vtlengine/Operators/Validation.py +275 -0
- vtlengine/Operators/__init__.py +900 -0
- vtlengine/Utils/__Virtual_Assets.py +34 -0
- vtlengine/Utils/__init__.py +479 -0
- vtlengine/__extras_check.py +17 -0
- vtlengine/__init__.py +27 -0
- vtlengine/files/__init__.py +0 -0
- vtlengine/files/output/__init__.py +35 -0
- vtlengine/files/output/_time_period_representation.py +55 -0
- vtlengine/files/parser/__init__.py +240 -0
- vtlengine/files/parser/_rfc_dialect.py +22 -0
- vtlengine/py.typed +0 -0
- vtlengine-1.4.0rc2.dist-info/METADATA +89 -0
- vtlengine-1.4.0rc2.dist-info/RECORD +66 -0
- vtlengine-1.4.0rc2.dist-info/WHEEL +4 -0
- vtlengine-1.4.0rc2.dist-info/licenses/LICENSE.md +661 -0
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
from typing import Dict, Union
|
|
2
|
+
|
|
3
|
+
from antlr4.ParserRuleContext import ParserRuleContext
|
|
4
|
+
from antlr4.Token import CommonToken
|
|
5
|
+
|
|
6
|
+
from vtlengine.AST.Grammar.lexer import Lexer
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def extract_token_info(token: Union[CommonToken, ParserRuleContext]) -> Dict[str, int]:
|
|
10
|
+
"""
|
|
11
|
+
Extracts the token information from a token or ParserRuleContext.
|
|
12
|
+
|
|
13
|
+
The Token information includes:
|
|
14
|
+
- column_start: The starting column of the token.
|
|
15
|
+
- column_stop: The stopping column of the token.
|
|
16
|
+
- line_start: The starting line number of the token.
|
|
17
|
+
- line_stop: The stopping line number of the token.
|
|
18
|
+
|
|
19
|
+
The overall idea is to provide the information from which line and column,
|
|
20
|
+
and to which line and column, the text is referenced by the AST object, including children.
|
|
21
|
+
|
|
22
|
+
Important Note: the keys of the dict are the same as the class attributes of the AST Object.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
token (Union[CommonToken, ParserRuleContext]): The token or ParserRuleContext to extract
|
|
26
|
+
information from.
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
Dict[str, int]: A dictionary containing the token information.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
if isinstance(token, ParserRuleContext):
|
|
33
|
+
return {
|
|
34
|
+
"column_start": token.start.column,
|
|
35
|
+
"column_stop": token.stop.column + len(token.stop.text),
|
|
36
|
+
"line_start": token.start.line,
|
|
37
|
+
"line_stop": token.stop.line,
|
|
38
|
+
}
|
|
39
|
+
line_start = token.line
|
|
40
|
+
line_stop = token.line
|
|
41
|
+
# For block comments, we need to add the lines inside the block, marked by \n, to the stop line.
|
|
42
|
+
# The ML_COMMENT does not take into account the final \n in its grammar.
|
|
43
|
+
if token.type == Lexer.ML_COMMENT:
|
|
44
|
+
line_stop = token.line + token.text.count("\n")
|
|
45
|
+
return {
|
|
46
|
+
"column_start": token.column,
|
|
47
|
+
"column_stop": token.column + len(token.text),
|
|
48
|
+
"line_start": line_start,
|
|
49
|
+
"line_stop": line_stop,
|
|
50
|
+
}
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import json
|
|
2
|
+
|
|
3
|
+
from vtlengine import AST
|
|
4
|
+
from vtlengine.Model import Dataset
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class ComplexEncoder(json.JSONEncoder):
|
|
8
|
+
def default(self, obj):
|
|
9
|
+
if hasattr(obj, "toJSON"):
|
|
10
|
+
return obj.toJSON()
|
|
11
|
+
# Makes a circular reference error if we do not check for this
|
|
12
|
+
elif isinstance(obj, Dataset):
|
|
13
|
+
return "dataset"
|
|
14
|
+
else:
|
|
15
|
+
return json.__dict__
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ComplexDecoder(json.JSONDecoder):
|
|
19
|
+
@staticmethod
|
|
20
|
+
def object_hook(dictionary):
|
|
21
|
+
if "class_name" in dictionary:
|
|
22
|
+
if not hasattr(AST, dictionary["class_name"]):
|
|
23
|
+
raise ValueError(f"Class {dictionary['class_name']} not found in AST")
|
|
24
|
+
|
|
25
|
+
ast_class = getattr(AST, dictionary["class_name"])
|
|
26
|
+
del dictionary["class_name"]
|
|
27
|
+
try:
|
|
28
|
+
return ast_class(**dictionary)
|
|
29
|
+
except TypeError as e:
|
|
30
|
+
raise e
|
|
31
|
+
else:
|
|
32
|
+
return dictionary
|