vtlengine 1.0.4__py3-none-any.whl → 1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vtlengine might be problematic. Click here for more details.

@@ -1,3 +1,4 @@
1
+ import inspect
1
2
  import json
2
3
  from collections import Counter
3
4
  from dataclasses import dataclass
@@ -128,10 +129,14 @@ class Component:
128
129
  )
129
130
 
130
131
  def to_dict(self) -> Dict[str, Any]:
132
+ data_type = self.data_type
133
+ if not inspect.isclass(self.data_type):
134
+ data_type = self.data_type.__class__ # type: ignore[assignment]
131
135
  return {
132
136
  "name": self.name,
133
- "data_type": DataTypes.SCALAR_TYPES_CLASS_REVERSE[self.data_type],
134
- "role": self.role.value,
137
+ "data_type": DataTypes.SCALAR_TYPES_CLASS_REVERSE[data_type],
138
+ # Need to check here for NoneType as UDO argument has it
139
+ "role": self.role.value if self.role is not None else None, # type: ignore[redundant-expr]
135
140
  "nullable": self.nullable,
136
141
  }
137
142
 
@@ -229,11 +234,11 @@ class Dataset:
229
234
  self.data[comp.name] = self.data[comp.name].astype(str)
230
235
  other.data[comp.name] = other.data[comp.name].astype(str)
231
236
  self.data[comp.name] = self.data[comp.name].map(
232
- lambda x: str(TimePeriodHandler(x)) if x != "" else "",
237
+ lambda x: str(TimePeriodHandler(str(x))) if x != "" else "",
233
238
  na_action="ignore",
234
239
  )
235
240
  other.data[comp.name] = other.data[comp.name].map(
236
- lambda x: str(TimePeriodHandler(x)) if x != "" else "",
241
+ lambda x: str(TimePeriodHandler(str(x))) if x != "" else "",
237
242
  na_action="ignore",
238
243
  )
239
244
  elif type_name in ["Integer", "Number"]:
@@ -78,7 +78,7 @@ class Aggregation(Operator.Unary):
78
78
  data[measure.name] = (
79
79
  data[measure.name]
80
80
  .astype(object)
81
- .map(lambda x: TimePeriodHandler(x), na_action="ignore")
81
+ .map(lambda x: TimePeriodHandler(str(x)), na_action="ignore")
82
82
  )
83
83
  else:
84
84
  data[measure.name] = data[measure.name].map(
@@ -90,7 +90,7 @@ class Aggregation(Operator.Unary):
90
90
  data[measure.name]
91
91
  .astype(object)
92
92
  .map(
93
- lambda x: TimeIntervalHandler.from_iso_format(x),
93
+ lambda x: TimeIntervalHandler.from_iso_format(str(x)),
94
94
  na_action="ignore",
95
95
  )
96
96
  )
@@ -103,11 +103,13 @@ class Aggregation(Operator.Unary):
103
103
  elif measure.data_type == Duration:
104
104
  if mode == "input":
105
105
  data[measure.name] = data[measure.name].map(
106
- lambda x: PERIOD_IND_MAPPING[x], na_action="ignore"
106
+ lambda x: PERIOD_IND_MAPPING[x], # type: ignore[index]
107
+ na_action="ignore",
107
108
  )
108
109
  else:
109
110
  data[measure.name] = data[measure.name].map(
110
- lambda x: PERIOD_IND_MAPPING_REVERSE[x], na_action="ignore"
111
+ lambda x: PERIOD_IND_MAPPING_REVERSE[x], # type: ignore[index]
112
+ na_action="ignore",
111
113
  )
112
114
  elif measure.data_type == Boolean:
113
115
  if mode == "result":
@@ -208,7 +210,7 @@ class Aggregation(Operator.Unary):
208
210
  e = f'"{e}"'
209
211
  if cls.type_to_check is not None and cls.op != COUNT:
210
212
  functions += (
211
- f"{cls.py_op}(CAST({e} AS REAL)) AS {e}, " # Count can only be one here
213
+ f"{cls.py_op}(CAST({e} AS DOUBLE)) AS {e}, " # Count can only be one here
212
214
  )
213
215
  elif cls.op == COUNT:
214
216
  functions += f"{cls.py_op}({e}) AS int_var, "
@@ -189,21 +189,26 @@ class Analytic(Operator.Unary):
189
189
  if window is not None:
190
190
  mode = "ROWS" if window.type_ == "data" else "RANGE"
191
191
  start_mode = (
192
- window.start_mode
193
- if window.start_mode != "current" and window.start != "CURRENT ROW"
192
+ window.start_mode.upper()
193
+ if (isinstance(window.start, int) and window.start != 0)
194
+ or (isinstance(window.start, str) and window.start == "unbounded")
194
195
  else ""
195
196
  )
196
197
  stop_mode = (
197
- window.stop_mode
198
- if window.stop_mode != "current" and window.stop != "CURRENT ROW"
198
+ window.stop_mode.upper()
199
+ if (isinstance(window.stop, int) and window.stop != 0)
200
+ or (isinstance(window.stop, str) and window.stop == "unbounded")
199
201
  else ""
200
202
  )
201
- if isinstance(window.start, int) and window.start == -1:
202
- window.start = "UNBOUNDED"
203
-
204
- if stop_mode == "" and window.stop == 0:
205
- window.stop = "CURRENT ROW"
206
- window_str = f"{mode} BETWEEN {window.start} {start_mode} AND {window.stop} {stop_mode}"
203
+ start = (
204
+ "UNBOUNDED"
205
+ if window.start == "unbounded" or window.start == -1
206
+ else str(window.start)
207
+ )
208
+ stop = (
209
+ "CURRENT ROW" if window.stop == "current" or window.stop == 0 else str(window.stop)
210
+ )
211
+ window_str = f"{mode} BETWEEN {start} {start_mode} AND {stop} {stop_mode}"
207
212
 
208
213
  # Partitioning
209
214
  partition = "PARTITION BY " + ", ".join(partitioning) if len(partitioning) > 0 else ""
@@ -224,7 +229,7 @@ class Analytic(Operator.Unary):
224
229
  if cls.op == RANK:
225
230
  measure_query = f"{cls.sql_op}()"
226
231
  elif cls.op == RATIO_TO_REPORT:
227
- measure_query = f"CAST({measure} AS REAL) / SUM(CAST({measure} AS REAL))"
232
+ measure_query = f"CAST({measure} AS DOUBLE) / SUM(CAST({measure} AS DOUBLE))"
228
233
  elif cls.op in [LAG, LEAD]:
229
234
  measure_query = f"{cls.sql_op}({measure}, {','.join(map(str, params or []))})"
230
235
  else:
@@ -238,7 +238,7 @@ class Nvl(Binary):
238
238
  result = cls.validate(left, right)
239
239
 
240
240
  if isinstance(left, Scalar) and isinstance(result, Scalar):
241
- if pd.isnull(left): # type: ignore[call-overload]
241
+ if left.data_type is Null:
242
242
  result.value = right.value
243
243
  else:
244
244
  result.value = left.value
@@ -308,8 +308,23 @@ class Case(Operator):
308
308
  ) -> Union[Scalar, DataComponent, Dataset]:
309
309
  result = cls.validate(conditions, thenOps, elseOp)
310
310
  for condition in conditions:
311
- if isinstance(condition, (DataComponent, Dataset)) and condition.data is not None:
311
+ if isinstance(condition, Dataset) and condition.data is not None:
312
312
  condition.data.fillna(False, inplace=True)
313
+ condition_measure = condition.get_measures_names()[0]
314
+ if condition.data[condition_measure].dtype != bool:
315
+ condition.data[condition_measure] = condition.data[condition_measure].astype(
316
+ bool
317
+ )
318
+ elif (
319
+ isinstance(
320
+ condition,
321
+ DataComponent,
322
+ )
323
+ and condition.data is not None
324
+ ):
325
+ condition.data.fillna(False, inplace=True)
326
+ if condition.data.dtype != bool:
327
+ condition.data = condition.data.astype(bool)
313
328
  elif isinstance(condition, Scalar) and condition.value is None:
314
329
  condition.value = False
315
330
 
@@ -366,7 +381,7 @@ class Case(Operator):
366
381
  ]
367
382
  )
368
383
 
369
- result.data.loc[condition_mask_else, columns] = (
384
+ result.data.loc[condition_mask_else, columns] = ( # type: ignore[index, unused-ignore]
370
385
  elseOp.value
371
386
  if isinstance(elseOp, Scalar)
372
387
  else elseOp.data.loc[condition_mask_else, columns]
@@ -57,14 +57,17 @@ class Time(Operators.Operator):
57
57
  op = FLOW_TO_STOCK
58
58
 
59
59
  @classmethod
60
- def _get_time_id(cls, operand: Dataset) -> Optional[str]:
60
+ def _get_time_id(cls, operand: Dataset) -> str:
61
61
  reference_id = None
62
+ identifiers = operand.get_identifiers()
63
+ if len(identifiers) == 0:
64
+ raise SemanticError("1-1-19-8", op=cls.op, comp_type="time dataset")
62
65
  for id in operand.get_identifiers():
63
66
  if id.data_type in cls.TIME_DATA_TYPES:
64
67
  if reference_id is not None:
65
68
  raise SemanticError("1-1-19-8", op=cls.op, comp_type="time dataset")
66
69
  reference_id = id.name
67
- return reference_id
70
+ return str(reference_id)
68
71
 
69
72
  @classmethod
70
73
  def sort_by_time(cls, operand: Dataset) -> Optional[pd.DataFrame]:
@@ -182,7 +185,7 @@ class Period_indicator(Unary):
182
185
  def validate(cls, operand: Any) -> Any:
183
186
  if isinstance(operand, Dataset):
184
187
  time_id = cls._get_time_id(operand)
185
- if time_id is None or operand.components[time_id].data_type != TimePeriod:
188
+ if operand.components[time_id].data_type != TimePeriod:
186
189
  raise SemanticError("1-1-19-8", op=cls.op, comp_type="time period dataset")
187
190
  result_components = {
188
191
  comp.name: comp
@@ -223,7 +226,7 @@ class Period_indicator(Unary):
223
226
  if (operand.data is not None)
224
227
  else pd.Series()
225
228
  )
226
- period_series: Any = result.data[cls.time_id].map(cls._get_period) # type: ignore[index]
229
+ period_series: Any = result.data[cls.time_id].map(cls._get_period)
227
230
  result.data["duration_var"] = period_series
228
231
  return result
229
232
 
@@ -544,9 +547,7 @@ class Time_Shift(Binary):
544
547
  shift_value = int(shift_value.value)
545
548
  cls.time_id = cls._get_time_id(result)
546
549
 
547
- data_type: Any = (
548
- result.components[cls.time_id].data_type if isinstance(cls.time_id, str) else None
549
- )
550
+ data_type: Any = result.components[cls.time_id].data_type
550
551
 
551
552
  if data_type == Date:
552
553
  freq = cls.find_min_frequency(
@@ -966,7 +967,7 @@ class Date_Add(Parametrized):
966
967
  for measure in operand.get_measures():
967
968
  if measure.data_type in [Date, TimePeriod]:
968
969
  result.data[measure.name] = result.data[measure.name].map(
969
- lambda x: cls.py_op(x, shift, period, measure.data_type == TimePeriod),
970
+ lambda x: cls.py_op(str(x), shift, period, measure.data_type == TimePeriod),
970
971
  na_action="ignore",
971
972
  )
972
973
  measure.data_type = Date
@@ -1,5 +1,7 @@
1
1
  from typing import Any, Dict
2
2
 
3
+ from pysdmx.model.dataflow import Role
4
+
3
5
  from vtlengine.AST.Grammar.tokens import (
4
6
  ABS,
5
7
  AGGREGATE,
@@ -428,3 +430,50 @@ HA_UNARY_MAPPING = {
428
430
  PLUS: HRUnPlus,
429
431
  MINUS: HRUnMinus,
430
432
  }
433
+ VTL_DTYPES_MAPPING = {
434
+ "String": "String",
435
+ "Alpha": "String",
436
+ "AlphaNumeric": "String",
437
+ "Numeric": "String",
438
+ "BigInteger": "Integer",
439
+ "Integer": "Integer",
440
+ "Long": "Integer",
441
+ "Short": "Integer",
442
+ "Decimal": "Number",
443
+ "Float": "Number",
444
+ "Double": "Number",
445
+ "Boolean": "Boolean",
446
+ "URI": "String",
447
+ "Count": "Integer",
448
+ "InclusiveValueRange": "Number",
449
+ "ExclusiveValueRange": "Number",
450
+ "Incremental": "Number",
451
+ "ObservationalTimePeriod": "Time_Period",
452
+ "StandardTimePeriod": "Time_Period",
453
+ "BasicTimePeriod": "Date",
454
+ "GregorianTimePeriod": "Date",
455
+ "GregorianYear": "Date",
456
+ "GregorianYearMonth": "Date",
457
+ "GregorianMonth": "Date",
458
+ "GregorianDay": "Date",
459
+ "ReportingTimePeriod": "Time_Period",
460
+ "ReportingYear": "Time_Period",
461
+ "ReportingSemester": "Time_Period",
462
+ "ReportingTrimester": "Time_Period",
463
+ "ReportingQuarter": "Time_Period",
464
+ "ReportingMonth": "Time_Period",
465
+ "ReportingWeek": "Time_Period",
466
+ "ReportingDay": "Time_Period",
467
+ "DateTime": "Date",
468
+ "TimeRange": "Time",
469
+ "Month": "String",
470
+ "MonthDay": "String",
471
+ "Day": "String",
472
+ "Time": "String",
473
+ "Duration": "Duration",
474
+ }
475
+ VTL_ROLE_MAPPING = {
476
+ Role.DIMENSION: "Identifier",
477
+ Role.MEASURE: "Measure",
478
+ Role.ATTRIBUTE: "Attribute",
479
+ }
@@ -0,0 +1,17 @@
1
+ import importlib.util
2
+
3
+ EXTRAS_DOCS = "https://docs.vtlengine.meaningfuldata.eu/#installation"
4
+ ERROR_MESSAGE = (
5
+ "The '{extra_name}' extra is required to run {extra_desc}. "
6
+ "Please install it using 'pip install vtlengine[{extra_name}]' or "
7
+ "install all extras with 'pip install vtlengine[all]'. "
8
+ f"Check the documentation at: {EXTRAS_DOCS}"
9
+ )
10
+
11
+
12
+ def __check_s3_extra() -> None:
13
+ package_loc = importlib.util.find_spec("s3fs")
14
+ if package_loc is None:
15
+ raise ImportError(
16
+ ERROR_MESSAGE.format(extra_name="s3", extra_desc="over csv files using S3 URIs")
17
+ ) from None
vtlengine/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
- from vtlengine.API import run, semantic_analysis
1
+ from vtlengine.API import generate_sdmx, prettify, run, run_sdmx, semantic_analysis
2
2
 
3
- __all__ = ["semantic_analysis", "run"]
3
+ __all__ = ["semantic_analysis", "run", "generate_sdmx", "run_sdmx", "prettify"]
@@ -3,6 +3,7 @@ from typing import Optional, Union
3
3
 
4
4
  import pandas as pd
5
5
 
6
+ from vtlengine.__extras_check import __check_s3_extra
6
7
  from vtlengine.files.output._time_period_representation import (
7
8
  TimePeriodRepresentation,
8
9
  format_time_period_external_representation,
@@ -20,6 +21,7 @@ def save_datapoints(
20
21
  if time_period_representation is not None:
21
22
  format_time_period_external_representation(dataset, time_period_representation)
22
23
  if isinstance(output_path, str):
24
+ __check_s3_extra()
23
25
  if output_path.endswith("/"):
24
26
  s3_file_output = output_path + f"{dataset.name}.csv"
25
27
  else:
@@ -42,14 +42,9 @@ def _validate_csv_path(components: Dict[str, Component], csv_path: Path) -> None
42
42
  raise Exception(f"Path {csv_path} is not a file.")
43
43
  register_rfc()
44
44
  try:
45
- with open(csv_path, "r") as f:
45
+ with open(csv_path, "r", errors="replace", encoding="utf-8") as f:
46
46
  reader = DictReader(f, dialect="rfc")
47
47
  csv_columns = reader.fieldnames
48
-
49
- except UnicodeDecodeError as error:
50
- # https://coderwall.com/p/stzy9w/raising-unicodeencodeerror-and-unicodedecodeerror-
51
- # manually-for-testing-purposes
52
- raise InputValidationException("0-1-2-5", file=csv_path.name) from error
53
48
  except InputValidationException as ie:
54
49
  raise InputValidationException("{}".format(str(ie))) from None
55
50
  except Exception as e:
@@ -109,40 +104,18 @@ def _sanitize_pandas_columns(
109
104
  return data
110
105
 
111
106
 
112
- def _pandas_load_csv(components: Dict[str, Component], csv_path: Path) -> pd.DataFrame:
113
- obj_dtypes = {comp_name: np.object_ for comp_name, comp in components.items()}
114
-
115
- try:
116
- data = pd.read_csv(
117
- csv_path,
118
- dtype=obj_dtypes,
119
- engine="c",
120
- keep_default_na=False,
121
- na_values=[""],
122
- )
123
- except UnicodeDecodeError:
124
- raise InputValidationException(code="0-1-2-5", file=csv_path.name)
125
-
126
- return _sanitize_pandas_columns(components, csv_path, data)
107
+ def _pandas_load_csv(components: Dict[str, Component], csv_path: Union[str, Path]) -> pd.DataFrame:
108
+ obj_dtypes = {comp_name: object for comp_name, comp in components.items()}
127
109
 
110
+ data = pd.read_csv(
111
+ csv_path,
112
+ dtype=obj_dtypes,
113
+ engine="c",
114
+ keep_default_na=False,
115
+ na_values=[""],
116
+ encoding_errors="replace",
117
+ )
128
118
 
129
- def _pandas_load_s3_csv(components: Dict[str, Component], csv_path: str) -> pd.DataFrame:
130
- obj_dtypes = {comp_name: np.object_ for comp_name, comp in components.items()}
131
-
132
- # start = time()
133
- try:
134
- data = pd.read_csv(
135
- csv_path,
136
- dtype=obj_dtypes,
137
- engine="c",
138
- keep_default_na=False,
139
- na_values=[""],
140
- )
141
-
142
- except UnicodeDecodeError:
143
- raise InputValidationException(code="0-1-2-5", file=csv_path)
144
- except Exception as e:
145
- raise InputValidationException(f"ERROR: {str(e)}, review file {str(csv_path)}")
146
119
  return _sanitize_pandas_columns(components, csv_path, data)
147
120
 
148
121
 
@@ -187,13 +160,13 @@ def _validate_pandas(
187
160
  )
188
161
  elif comp.data_type == Integer:
189
162
  data[comp_name] = data[comp_name].map(
190
- lambda x: Integer.cast(float(x)), na_action="ignore"
163
+ lambda x: Integer.cast(float(str(x))), na_action="ignore"
191
164
  )
192
165
  elif comp.data_type == Number:
193
- data[comp_name] = data[comp_name].map(lambda x: float(x), na_action="ignore")
166
+ data[comp_name] = data[comp_name].map(lambda x: float((str(x))), na_action="ignore")
194
167
  elif comp.data_type == Boolean:
195
168
  data[comp_name] = data[comp_name].map(
196
- lambda x: _parse_boolean(x), na_action="ignore"
169
+ lambda x: _parse_boolean(str(x)), na_action="ignore"
197
170
  )
198
171
  elif comp.data_type == Duration:
199
172
  values_correct = (
@@ -209,7 +182,7 @@ def _validate_pandas(
209
182
  values_correct = (
210
183
  data[comp_name]
211
184
  .map(
212
- lambda x: x.replace(" ", "") in PERIOD_IND_MAPPING,
185
+ lambda x: x.replace(" ", "") in PERIOD_IND_MAPPING, # type: ignore[union-attr]
213
186
  na_action="ignore",
214
187
  )
215
188
  .all()
@@ -224,7 +197,7 @@ def _validate_pandas(
224
197
  data[comp_name] = data[comp_name].map(
225
198
  lambda x: str(x).replace('"', ""), na_action="ignore"
226
199
  )
227
- data[comp_name] = data[comp_name].astype(np.object_, errors="raise")
200
+ data[comp_name] = data[comp_name].astype(object, errors="raise")
228
201
 
229
202
  except ValueError:
230
203
  str_comp = SCALAR_TYPES_CLASS_REVERSE[comp.data_type] if comp else "Null"
@@ -240,10 +213,9 @@ def load_datapoints(
240
213
  ) -> pd.DataFrame:
241
214
  if csv_path is None or (isinstance(csv_path, Path) and not csv_path.exists()):
242
215
  return pd.DataFrame(columns=list(components.keys()))
243
- elif isinstance(csv_path, str):
244
- data = _pandas_load_s3_csv(components, csv_path)
245
- elif isinstance(csv_path, Path):
246
- _validate_csv_path(components, csv_path)
216
+ elif isinstance(csv_path, (str, Path)):
217
+ if isinstance(csv_path, Path):
218
+ _validate_csv_path(components, csv_path)
247
219
  data = _pandas_load_csv(components, csv_path)
248
220
  else:
249
221
  raise Exception("Invalid csv_path type")
@@ -19,4 +19,4 @@ class RFCDialect(csv.Dialect):
19
19
 
20
20
  def register_rfc() -> None:
21
21
  """Register the RFC dialect."""
22
- csv.register_dialect("rfc", RFCDialect) # type: ignore[arg-type]
22
+ csv.register_dialect("rfc", RFCDialect)
@@ -1,32 +1,35 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: vtlengine
3
- Version: 1.0.4
3
+ Version: 1.1
4
4
  Summary: Run and Validate VTL Scripts
5
5
  License: AGPL-3.0
6
6
  Keywords: vtl,sdmx,vtlengine,Validation and Transformation Language
7
7
  Author: MeaningfulData
8
8
  Author-email: info@meaningfuldata.eu
9
- Requires-Python: >=3.9,<4.0
9
+ Maintainer: Francisco Javier Hernandez del Caño
10
+ Maintainer-email: javier.hernandez@meaningfuldata.eu
11
+ Requires-Python: >=3.9
10
12
  Classifier: Development Status :: 5 - Production/Stable
11
13
  Classifier: Intended Audience :: Developers
12
14
  Classifier: Intended Audience :: Information Technology
13
15
  Classifier: Intended Audience :: Science/Research
14
- Classifier: License :: OSI Approved :: GNU Affero General Public License v3
15
- Classifier: Programming Language :: Python :: 3
16
- Classifier: Programming Language :: Python :: 3.9
17
- Classifier: Programming Language :: Python :: 3.10
18
- Classifier: Programming Language :: Python :: 3.11
19
- Classifier: Programming Language :: Python :: 3.12
20
- Classifier: Programming Language :: Python :: 3.13
21
16
  Classifier: Typing :: Typed
22
- Requires-Dist: antlr4-python3-runtime (==4.9.2)
23
- Requires-Dist: duckdb (>=1.1,<2.0)
24
- Requires-Dist: jsonschema (>=4.23.0,<5.0.0)
25
- Requires-Dist: networkx (>=2.8.8,<3.0.0)
26
- Requires-Dist: numexpr (>=2.9.0,<3.0.0)
27
- Requires-Dist: pandas (>=2.1.4,<3.0.0)
28
- Requires-Dist: s3fs (>=2025.2.0,<2026.0.0)
29
- Requires-Dist: sqlglot (>=22.2.0,<23.0.0)
17
+ Provides-Extra: all
18
+ Provides-Extra: s3
19
+ Requires-Dist: antlr4-python3-runtime (>=4.9.2,<4.10)
20
+ Requires-Dist: duckdb (>=1.1,<1.2)
21
+ Requires-Dist: fsspec (>=2022.11.0,<2023.0) ; extra == "all"
22
+ Requires-Dist: fsspec (>=2022.11.0,<2023.0) ; extra == "s3"
23
+ Requires-Dist: jsonschema (>=3.2.0,<5.0)
24
+ Requires-Dist: networkx (>=2.8,<3.0)
25
+ Requires-Dist: numpy (>=1.23.2,<2) ; python_version < "3.13"
26
+ Requires-Dist: numpy (>=2.1.0) ; python_version >= "3.13"
27
+ Requires-Dist: pandas (>=2.1.4,<2.2) ; python_version < "3.13"
28
+ Requires-Dist: pandas (>=2.2,<3.0) ; python_version >= "3.13"
29
+ Requires-Dist: pysdmx[xml] (>=1.3.0,<2.0)
30
+ Requires-Dist: s3fs (>=2022.11.0,<2023.0) ; extra == "all"
31
+ Requires-Dist: s3fs (>=2022.11.0,<2023.0) ; extra == "s3"
32
+ Requires-Dist: sqlglot (>=22.2.0,<23.0)
30
33
  Project-URL: Authors, https://github.com/Meaningful-Data/vtlengine/graphs/contributors
31
34
  Project-URL: Documentation, https://docs.vtlengine.meaningfuldata.eu
32
35
  Project-URL: IssueTracker, https://github.com/Meaningful-Data/vtlengine/issues
@@ -1,16 +1,18 @@
1
- vtlengine/API/_InternalApi.py,sha256=E3WbH-AfFAp0Dgk7TAx6T5FZb0EzHgVEwtVxTjcfTlA,16058
2
- vtlengine/API/__init__.py,sha256=2IDUvvSJdbkL5It8JulhmPCgkEnNbw52_VbbWm_aRp0,11061
1
+ vtlengine/API/_InternalApi.py,sha256=uSPW-Lny_zSVrhHfkz6FbBEaxQXNySTGPI2LF8wKTHY,22761
2
+ vtlengine/API/__init__.py,sha256=IiJZWSlHpUWq73Qv1_V-Tirim-ZnpF3xexFtW1Psyx8,17866
3
3
  vtlengine/API/data/schema/json_schema_2.1.json,sha256=v3-C0Xnq8qScJSPAtLgb3rjKMrd3nz-bIxgZdTSEUiU,4336
4
- vtlengine/AST/ASTConstructor.py,sha256=DdE0B6CyPt1RYb3he6L0tL-KhZ1UyHRxQisGC1GuKx8,19692
5
- vtlengine/AST/ASTConstructorModules/Expr.py,sha256=C4-toJm3bDt53_YSooeCJiqyUhK1ykgpiQTZjyZYsLE,65344
6
- vtlengine/AST/ASTConstructorModules/ExprComponents.py,sha256=6zUFY5a9KxzdOoSvC8TMyziYmfVW2bMbwYBI9r779Yg,36461
7
- vtlengine/AST/ASTConstructorModules/Terminals.py,sha256=141hfXUm5Xh53KD_W5VK5LwRMf8FYWLZ0PFR4Io2Dyo,25272
8
- vtlengine/AST/ASTConstructorModules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ vtlengine/AST/ASTComment.py,sha256=bAJW7aaqBXU2LqMtRvL_XOttdl1AFZufa15vmQdvNlY,1667
5
+ vtlengine/AST/ASTConstructor.py,sha256=X55I98BKG1ItyGIDObF9ALVfCcWnU-0wwCWJsiPILkg,21488
6
+ vtlengine/AST/ASTConstructorModules/Expr.py,sha256=aCL3uuQF0BJIels6rTckL8FAAykzImYb3AESs7umFcY,70066
7
+ vtlengine/AST/ASTConstructorModules/ExprComponents.py,sha256=2Ft4e5w2NtbfaqSNW8I9qSpG9iUaPIfdug7yYWo2gqE,38553
8
+ vtlengine/AST/ASTConstructorModules/Terminals.py,sha256=7zWDx_SFcbnL35G7Y0qZwl-lLEsfqReyzBX0UxwTCOk,27054
9
+ vtlengine/AST/ASTConstructorModules/__init__.py,sha256=J6g6NhJD8j0Ek1YmpethxRiFdjhLxUTM0mc3NHRFLlM,1879
9
10
  vtlengine/AST/ASTDataExchange.py,sha256=kPSz21DGbEv-2bZowObseqf2d2_iQj1VnrqWuD9ZwtA,140
10
- vtlengine/AST/ASTEncoders.py,sha256=HZfG-Oo2u2l16tloEsHLAyNoLbkBC30X3H7drLq44rA,773
11
- vtlengine/AST/ASTTemplate.py,sha256=3dd9fkWNJywDn279tUuZGcOCPbyMiGiqnBwqAB1FWrU,12357
11
+ vtlengine/AST/ASTEncoders.py,sha256=-Ar6a0GqMdJZK4CtZ1pUpIeGv57oSdN5qy3-aF0Zt9c,948
12
+ vtlengine/AST/ASTString.py,sha256=y_xWVm2OHgsFs9As1-0PjEnmftjaoRsZJHZNtsFcKXc,25297
13
+ vtlengine/AST/ASTTemplate.py,sha256=qUkz0AE1ay3gFrCidzhJAqxRnZR8nj98DOKAW2rXoso,12961
12
14
  vtlengine/AST/ASTVisitor.py,sha256=3QQTudBpbR4pPQdH7y07EgwuzhoGzNQ59qox8R-E3fM,500
13
- vtlengine/AST/DAG/__init__.py,sha256=DyFF3ZQW3f8RvtQ3X1I52BGk3CY8j51fCE5eEEwx6Dc,14618
15
+ vtlengine/AST/DAG/__init__.py,sha256=ViL1vfLOCU28Yx8cOMt8aIvguSrzYYTb9qPhAwoExwY,15074
14
16
  vtlengine/AST/DAG/_words.py,sha256=lEuBQ_w-KoKGna-x3gFGfbX1KP4Ez5EgdomH2LOeodk,170
15
17
  vtlengine/AST/Grammar/Vtl.g4,sha256=86bBWjQLCHZSuB5iLIk0JZRgMyMg0n7xbU8qzot2cIE,26313
16
18
  vtlengine/AST/Grammar/VtlTokens.g4,sha256=SwDR_59U25APqslczFcvTUiPoH7bC6kGaH2GkJ3kYzA,9972
@@ -19,21 +21,21 @@ vtlengine/AST/Grammar/lexer.py,sha256=ncoPevKkUpGyYx5mVKcKjocVhFoKSdu-5NSQDPY2V3
19
21
  vtlengine/AST/Grammar/parser.py,sha256=ISi5OWmPbLitMp-8fg-wa1-475TfKZWK98jXjyOLi-8,634355
20
22
  vtlengine/AST/Grammar/tokens.py,sha256=YF7tO0nF2zYC-VaBAJLyc6VitM72CvYfFQpoPDGCMzo,3139
21
23
  vtlengine/AST/VtlVisitor.py,sha256=NJfXJVP6wNmasJmPLlojFqm9R5VSamOAKg_w7BMrhac,35332
22
- vtlengine/AST/__init__.py,sha256=_-NLJ-GCX5mMmAUGPnY3NzErnFEgjg8LdoGfpdRQjrU,9809
24
+ vtlengine/AST/__init__.py,sha256=JnPilognG2rT2gtpjD4OwKFX0O3ZqvV-ic8gJxRu7Xo,11672
23
25
  vtlengine/DataTypes/TimeHandling.py,sha256=CYnC0sb1qbRjTnCSsA3wgez7QftOzrXHxbuZXlY3O3Q,20151
24
26
  vtlengine/DataTypes/__init__.py,sha256=LYXrde68bYm7MLeMLmr4haeOTSE4Fnpq9G2Ewy7DiaU,23084
25
27
  vtlengine/Exceptions/__init__.py,sha256=rSSskV_qCBFzg_W67Q1QBAL7Lnq88D7yi2BDYo1hytw,4727
26
- vtlengine/Exceptions/messages.py,sha256=Flczp7SrsLfMHjXaSPhM3mx9ScsWiUvEvcdKR53teeA,18811
27
- vtlengine/Interpreter/__init__.py,sha256=MBI5ApmgHljAjmdSkFi-2II_ELGQ4xWNkYI2dPMzaSg,81375
28
- vtlengine/Model/__init__.py,sha256=DOxJA_MlFVq2s0Yqx8S4TwXulMP8v3MpIh1CgW19vYg,15553
29
- vtlengine/Operators/Aggregation.py,sha256=yS8_ZwMbQuIt-FJ_O_KQp-dYxF5bj2CktERuaGoMuAY,11798
30
- vtlengine/Operators/Analytic.py,sha256=x-koqCS4z6BeD89Q2fDzU-LnQyTuynp_aayFbvmXkpQ,12573
28
+ vtlengine/Exceptions/messages.py,sha256=9Tzkm-Q4ZI7UFFmWfsiy2xI7hFKMrnPB-EmUfVgxuBo,19428
29
+ vtlengine/Interpreter/__init__.py,sha256=yFXLi3Mr7EnOmdynf-BvFwDHOBsWVjRXSkNgdmhfJVc,83533
30
+ vtlengine/Model/__init__.py,sha256=xWrwhdUOj8Y-5x38zP5XnmFPw8IkBVBBG2bPsUBGLA8,15869
31
+ vtlengine/Operators/Aggregation.py,sha256=43bqjaMqGG9zzFkcs6JLfShb1ISupmyQnXOQQ-HQo9E,11906
32
+ vtlengine/Operators/Analytic.py,sha256=GiVNwa02JNRaVcHEkqKlat9WSIgQ32OhpgOdYc9PlJo,12818
31
33
  vtlengine/Operators/Assignment.py,sha256=xyJgGPoFYbq6mzX06gz7Q7L8jXJxpUkgzdY3Lrne2hw,793
32
34
  vtlengine/Operators/Boolean.py,sha256=3U5lHkxW5d7QQdGDNxXeXqejlPfFrXKG8_TqknrC8Ls,2856
33
35
  vtlengine/Operators/CastOperator.py,sha256=mvWfNhJ1pEEk_ZQp-3unLoYJvJShUjUu_BOYQ6ByySI,16951
34
36
  vtlengine/Operators/Clause.py,sha256=_Sdt3qQUpphNRs4IQW5pSj9kagzwLluV9BRHMGNxqsI,15022
35
37
  vtlengine/Operators/Comparison.py,sha256=7G2UK1BDCDJR4jTXa-txJlAJEvzXEeYaDSA_2oxjgKY,17286
36
- vtlengine/Operators/Conditional.py,sha256=kjFQ90DS3lVIeYdsDlhxne_dOCtzTdvnWY8AOnR7ceM,19294
38
+ vtlengine/Operators/Conditional.py,sha256=nxatC0tr8UvsOVcCMcBjK_U6hzjbd2uf3VG3YiCMUOo,19944
37
39
  vtlengine/Operators/General.py,sha256=q1fpqP4IYEwURXi8Eo-_j5AUktK0dvNosL9SgSe7a8w,6711
38
40
  vtlengine/Operators/HROperators.py,sha256=VVp5FcdbDXhU_VCfUA6t75bs51qx9fKJT4n15WM2vyM,8866
39
41
  vtlengine/Operators/Join.py,sha256=df2XG2tKmha_WUhHEYhgZIVc_2L8Wr45o0ISm-HOReA,18108
@@ -41,18 +43,19 @@ vtlengine/Operators/Numeric.py,sha256=icYTWzEsw6VQFLYc5Wucgr8961d8ZwTFx_wfZ8Wp9C
41
43
  vtlengine/Operators/RoleSetter.py,sha256=mHZIdcHC3wflj81ekLbioDG1f8yHZXYDQFymV-KnyXA,2274
42
44
  vtlengine/Operators/Set.py,sha256=f1uLeY4XZF0cWEwpXRB_CczgbXr6s33DYPuFt39HlEg,7084
43
45
  vtlengine/Operators/String.py,sha256=ghWtYl6oUEAAzynY1a9Hg4yqRA9Sa7uk2B6iF9uuSqQ,20230
44
- vtlengine/Operators/Time.py,sha256=9f2kQ6iAoA4YPvlfphJ_uQjM-ZuqjSnOs312ttWMhgg,42679
46
+ vtlengine/Operators/Time.py,sha256=tpBZkuDldlcVnNWUNvrc8yhnG1knalZVFklGppbzw4k,42734
45
47
  vtlengine/Operators/Validation.py,sha256=ev3HyU7e1XbeAtUQ1y6zY3fzBwMqetDPhG3NNveAGOE,9988
46
48
  vtlengine/Operators/__init__.py,sha256=GN5eaAwmzfYKD7JJRIaRqdIJzflGc3UMvrOC9mlYNVo,37227
47
- vtlengine/Utils/__init__.py,sha256=ZobqGLc4rpMrsmniexTD4J-VokQt3qLrBGdFEDHHT1M,7571
48
- vtlengine/__init__.py,sha256=L9tGzRGQ8HMDS23sVWIbBvj41sXR89pf0ZMzEidIEMM,89
49
+ vtlengine/Utils/__init__.py,sha256=zhGPJA8MjHmtEEwMS4CxEFYL0tk2L5F0YPn7bitdRzM,8954
50
+ vtlengine/__extras_check.py,sha256=Wr-lxGZhXJZEacVV5cUkvKt7XM-mry0kYAe3VxNrVcY,614
51
+ vtlengine/__init__.py,sha256=E31A6eDHTZsPNI5Cq-zv9-O87D5n8du-oZtVB6u3dVk,165
49
52
  vtlengine/files/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
50
- vtlengine/files/output/__init__.py,sha256=W0P5E4Gnikp2cVIVtxPZ4BQazfUW7MTlMNjq7ltg5I8,1169
53
+ vtlengine/files/output/__init__.py,sha256=4tmf-p1Y1u5Ohrwt3clQA-FMGaijKI3HC_iwn3H9J8c,1250
51
54
  vtlengine/files/output/_time_period_representation.py,sha256=D5XCSXyEuX_aBzTvBV3sZxACcgwXz2Uu_YH3loMP8q0,1610
52
- vtlengine/files/parser/__init__.py,sha256=5emul7mwgAEq3A4_y6gyIl-PxoUV_oMxEtKlmkzNux4,9621
53
- vtlengine/files/parser/_rfc_dialect.py,sha256=0T8GshGA5z9ZgYStH7zz2ZwtdiGkj7B8jXcxsPkXfjs,488
55
+ vtlengine/files/parser/__init__.py,sha256=JamEIWI0pFZxT0sKYE6Fii8H2JQcsFn4Nf3T0OLSm9g,8637
56
+ vtlengine/files/parser/_rfc_dialect.py,sha256=Y8kAYBxH_t9AieN_tYg7QRh5A4DgvabKarx9Ko3QeCQ,462
54
57
  vtlengine/files/parser/_time_checking.py,sha256=UAC_Pv-eQJKrhgTguWb--xfqMMs6quyMeiAkGBt_vgI,4725
55
- vtlengine-1.0.4.dist-info/LICENSE.md,sha256=2xqHuoHohba7gpcZZKtOICRjzeKsQANXG8WoV9V35KM,33893
56
- vtlengine-1.0.4.dist-info/METADATA,sha256=6Mbusz1Tl8IKHID-GiuA0NEDg2uMu1ZXNf_X1lzhbHo,8816
57
- vtlengine-1.0.4.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
58
- vtlengine-1.0.4.dist-info/RECORD,,
58
+ vtlengine-1.1.dist-info/LICENSE.md,sha256=2xqHuoHohba7gpcZZKtOICRjzeKsQANXG8WoV9V35KM,33893
59
+ vtlengine-1.1.dist-info/METADATA,sha256=0F0SO41WyyQfA4zhfkYmIaM0-6eSlShGKoDE7i2uebI,8971
60
+ vtlengine-1.1.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
61
+ vtlengine-1.1.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.0.1
2
+ Generator: poetry-core 2.1.3
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any