pytrilogy 0.0.3.94__py3-none-any.whl → 0.0.3.96__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pytrilogy might be problematic. Click here for more details.
- {pytrilogy-0.0.3.94.dist-info → pytrilogy-0.0.3.96.dist-info}/METADATA +184 -136
- {pytrilogy-0.0.3.94.dist-info → pytrilogy-0.0.3.96.dist-info}/RECORD +35 -30
- trilogy/__init__.py +1 -1
- trilogy/authoring/__init__.py +61 -43
- trilogy/core/enums.py +13 -0
- trilogy/core/env_processor.py +19 -10
- trilogy/core/environment_helpers.py +111 -0
- trilogy/core/exceptions.py +21 -1
- trilogy/core/functions.py +6 -1
- trilogy/core/graph_models.py +11 -37
- trilogy/core/internal.py +18 -0
- trilogy/core/models/core.py +3 -0
- trilogy/core/models/environment.py +28 -0
- trilogy/core/models/execute.py +7 -0
- trilogy/core/processing/node_generators/select_merge_node.py +2 -2
- trilogy/core/query_processor.py +2 -1
- trilogy/core/statements/author.py +18 -3
- trilogy/core/statements/common.py +0 -10
- trilogy/core/statements/execute.py +73 -16
- trilogy/core/validation/common.py +110 -0
- trilogy/core/validation/concept.py +125 -0
- trilogy/core/validation/datasource.py +194 -0
- trilogy/core/validation/environment.py +71 -0
- trilogy/dialect/base.py +48 -21
- trilogy/dialect/metadata.py +233 -0
- trilogy/dialect/sql_server.py +3 -1
- trilogy/engine.py +25 -7
- trilogy/executor.py +94 -162
- trilogy/parsing/parse_engine.py +34 -3
- trilogy/parsing/trilogy.lark +11 -5
- {pytrilogy-0.0.3.94.dist-info → pytrilogy-0.0.3.96.dist-info}/WHEEL +0 -0
- {pytrilogy-0.0.3.94.dist-info → pytrilogy-0.0.3.96.dist-info}/entry_points.txt +0 -0
- {pytrilogy-0.0.3.94.dist-info → pytrilogy-0.0.3.96.dist-info}/licenses/LICENSE.md +0 -0
- {pytrilogy-0.0.3.94.dist-info → pytrilogy-0.0.3.96.dist-info}/top_level.txt +0 -0
- /trilogy/{compiler.py → core/validation/__init__.py} +0 -0
|
@@ -1,49 +1,106 @@
|
|
|
1
|
-
from
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
|
+
from typing import List, Optional, Union
|
|
2
3
|
|
|
3
|
-
from
|
|
4
|
-
from
|
|
5
|
-
|
|
6
|
-
from trilogy.core.models.author import ConceptRef
|
|
4
|
+
from trilogy.core.enums import IOType, ValidationScope
|
|
5
|
+
from trilogy.core.models.author import ConceptRef, HavingClause, WhereClause
|
|
7
6
|
from trilogy.core.models.build import (
|
|
8
7
|
BuildConcept,
|
|
9
8
|
BuildDatasource,
|
|
10
9
|
BuildOrderBy,
|
|
11
10
|
)
|
|
12
|
-
from trilogy.core.models.
|
|
11
|
+
from trilogy.core.models.datasource import Address, Datasource
|
|
12
|
+
from trilogy.core.models.environment import EnvironmentConceptDict
|
|
13
13
|
from trilogy.core.models.execute import CTE, UnionCTE
|
|
14
|
-
from trilogy.core.statements.common import CopyQueryMixin, PersistQueryMixin
|
|
15
14
|
|
|
16
15
|
|
|
17
|
-
|
|
16
|
+
@dataclass
|
|
17
|
+
class CopyQueryMixin:
|
|
18
|
+
target: str
|
|
19
|
+
target_type: IOType
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class MaterializedDataset:
|
|
24
|
+
address: Address
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class PersistQueryMixin:
|
|
29
|
+
output_to: MaterializedDataset
|
|
30
|
+
datasource: Datasource
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@dataclass
|
|
34
|
+
class SelectTypeMixin:
|
|
35
|
+
where_clause: Union["WhereClause", None] = field(default=None)
|
|
36
|
+
having_clause: Union["HavingClause", None] = field(default=None)
|
|
37
|
+
|
|
38
|
+
@property
|
|
39
|
+
def output_components(self) -> List[ConceptRef]:
|
|
40
|
+
raise NotImplementedError
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@dataclass
|
|
44
|
+
class ProcessedQuery:
|
|
18
45
|
output_columns: List[ConceptRef]
|
|
19
46
|
ctes: List[CTE | UnionCTE]
|
|
20
47
|
base: CTE | UnionCTE
|
|
21
|
-
hidden_columns: set[str] =
|
|
48
|
+
hidden_columns: set[str] = field(default_factory=set)
|
|
22
49
|
limit: Optional[int] = None
|
|
23
50
|
order_by: Optional[BuildOrderBy] = None
|
|
24
|
-
local_concepts:
|
|
25
|
-
EnvironmentConceptDict
|
|
26
|
-
|
|
51
|
+
local_concepts: EnvironmentConceptDict = field(
|
|
52
|
+
default_factory=EnvironmentConceptDict
|
|
53
|
+
)
|
|
54
|
+
locally_derived: set[str] = field(default_factory=set)
|
|
27
55
|
|
|
28
56
|
|
|
57
|
+
@dataclass
|
|
29
58
|
class ProcessedQueryPersist(ProcessedQuery, PersistQueryMixin):
|
|
30
59
|
pass
|
|
31
60
|
|
|
32
61
|
|
|
62
|
+
@dataclass
|
|
33
63
|
class ProcessedCopyStatement(ProcessedQuery, CopyQueryMixin):
|
|
34
64
|
pass
|
|
35
65
|
|
|
36
66
|
|
|
37
|
-
|
|
67
|
+
@dataclass
|
|
68
|
+
class ProcessedRawSQLStatement:
|
|
38
69
|
text: str
|
|
39
70
|
|
|
40
71
|
|
|
41
|
-
|
|
72
|
+
@dataclass
|
|
73
|
+
class ProcessedValidateStatement:
|
|
74
|
+
scope: ValidationScope
|
|
75
|
+
targets: Optional[List[str]]
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
@dataclass
|
|
79
|
+
class ProcessedStaticValueOutput:
|
|
42
80
|
values: List[dict]
|
|
43
81
|
|
|
44
82
|
|
|
45
|
-
|
|
83
|
+
@dataclass
|
|
84
|
+
class ProcessedShowStatement:
|
|
46
85
|
output_columns: List[ConceptRef]
|
|
47
86
|
output_values: List[
|
|
48
|
-
Union[
|
|
87
|
+
Union[
|
|
88
|
+
BuildConcept,
|
|
89
|
+
BuildDatasource,
|
|
90
|
+
ProcessedQuery,
|
|
91
|
+
ProcessedQueryPersist,
|
|
92
|
+
ProcessedCopyStatement,
|
|
93
|
+
ProcessedValidateStatement,
|
|
94
|
+
ProcessedStaticValueOutput,
|
|
95
|
+
]
|
|
49
96
|
]
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
PROCESSED_STATEMENT_TYPES = (
|
|
100
|
+
ProcessedCopyStatement
|
|
101
|
+
| ProcessedQuery
|
|
102
|
+
| ProcessedRawSQLStatement
|
|
103
|
+
| ProcessedQueryPersist
|
|
104
|
+
| ProcessedShowStatement
|
|
105
|
+
| ProcessedValidateStatement
|
|
106
|
+
)
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from enum import Enum
|
|
3
|
+
|
|
4
|
+
from trilogy import Environment
|
|
5
|
+
from trilogy.authoring import ConceptRef
|
|
6
|
+
from trilogy.core.exceptions import ModelValidationError
|
|
7
|
+
from trilogy.core.models.build import (
|
|
8
|
+
BuildComparison,
|
|
9
|
+
BuildConcept,
|
|
10
|
+
BuildConditional,
|
|
11
|
+
BuildDatasource,
|
|
12
|
+
)
|
|
13
|
+
from trilogy.core.models.environment import EnvironmentConceptDict
|
|
14
|
+
from trilogy.core.models.execute import (
|
|
15
|
+
CTE,
|
|
16
|
+
QueryDatasource,
|
|
17
|
+
)
|
|
18
|
+
from trilogy.core.statements.execute import ProcessedQuery
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ExpectationType(Enum):
|
|
22
|
+
LOGICAL = "logical"
|
|
23
|
+
ROWCOUNT = "rowcount"
|
|
24
|
+
DATA_TYPE_LIST = "data_type_list"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class ValidationTest:
|
|
29
|
+
check_type: ExpectationType
|
|
30
|
+
raw_query: ProcessedQuery | None = None
|
|
31
|
+
generated_query: str | None = None
|
|
32
|
+
expected: str | None = None
|
|
33
|
+
result: ModelValidationError | None = None
|
|
34
|
+
ran: bool = True
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class ValidationType(Enum):
|
|
38
|
+
DATASOURCES = "datasources"
|
|
39
|
+
CONCEPTS = "concepts"
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def easy_query(
|
|
43
|
+
concepts: list[BuildConcept],
|
|
44
|
+
datasource: BuildDatasource,
|
|
45
|
+
env: Environment,
|
|
46
|
+
condition: BuildConditional | BuildComparison | None = None,
|
|
47
|
+
limit: int = 100,
|
|
48
|
+
):
|
|
49
|
+
"""
|
|
50
|
+
Build basic datasource specific queries.
|
|
51
|
+
"""
|
|
52
|
+
datasource_outputs = {c.address: c for c in datasource.concepts}
|
|
53
|
+
first_qds_concepts = datasource.concepts + concepts
|
|
54
|
+
root_qds = QueryDatasource(
|
|
55
|
+
input_concepts=first_qds_concepts,
|
|
56
|
+
output_concepts=concepts,
|
|
57
|
+
datasources=[datasource],
|
|
58
|
+
joins=[],
|
|
59
|
+
source_map={
|
|
60
|
+
concept.address: (
|
|
61
|
+
set([datasource]) if concept.address in datasource_outputs else set()
|
|
62
|
+
)
|
|
63
|
+
# include all base datasource conepts for convenience
|
|
64
|
+
for concept in first_qds_concepts
|
|
65
|
+
},
|
|
66
|
+
grain=datasource.grain,
|
|
67
|
+
)
|
|
68
|
+
cte = CTE(
|
|
69
|
+
name=f"datasource_{datasource.name}_base",
|
|
70
|
+
source=root_qds,
|
|
71
|
+
output_columns=concepts,
|
|
72
|
+
source_map={
|
|
73
|
+
concept.address: (
|
|
74
|
+
[datasource.safe_identifier]
|
|
75
|
+
if concept.address in datasource_outputs
|
|
76
|
+
else []
|
|
77
|
+
)
|
|
78
|
+
for concept in first_qds_concepts
|
|
79
|
+
},
|
|
80
|
+
grain=datasource.grain,
|
|
81
|
+
group_to_grain=True,
|
|
82
|
+
base_alias_override=datasource.safe_identifier,
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
filter_cte = CTE(
|
|
86
|
+
name=f"datasource_{datasource.name}_filter",
|
|
87
|
+
source=QueryDatasource(
|
|
88
|
+
datasources=[root_qds],
|
|
89
|
+
input_concepts=cte.output_columns,
|
|
90
|
+
output_concepts=cte.output_columns,
|
|
91
|
+
joins=[],
|
|
92
|
+
source_map={concept.address: (set([root_qds])) for concept in concepts},
|
|
93
|
+
grain=cte.grain,
|
|
94
|
+
),
|
|
95
|
+
parent_ctes=[cte],
|
|
96
|
+
output_columns=cte.output_columns,
|
|
97
|
+
source_map={
|
|
98
|
+
concept.address: [cte.identifier] for concept in cte.output_columns
|
|
99
|
+
},
|
|
100
|
+
grain=cte.grain,
|
|
101
|
+
condition=condition,
|
|
102
|
+
limit=limit,
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
return ProcessedQuery(
|
|
106
|
+
output_columns=[ConceptRef(address=concept.address) for concept in concepts],
|
|
107
|
+
ctes=[cte, filter_cte],
|
|
108
|
+
base=cte,
|
|
109
|
+
local_concepts=EnvironmentConceptDict(**{}),
|
|
110
|
+
)
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
from trilogy import Environment, Executor
|
|
2
|
+
from trilogy.core.enums import Derivation, Purpose
|
|
3
|
+
from trilogy.core.exceptions import (
|
|
4
|
+
ConceptModelValidationError,
|
|
5
|
+
DatasourceModelValidationError,
|
|
6
|
+
)
|
|
7
|
+
from trilogy.core.models.build import (
|
|
8
|
+
BuildConcept,
|
|
9
|
+
)
|
|
10
|
+
from trilogy.core.models.build_environment import BuildEnvironment
|
|
11
|
+
from trilogy.core.validation.common import ExpectationType, ValidationTest, easy_query
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def validate_property_concept(
|
|
15
|
+
concept: BuildConcept, exec: Executor | None = None
|
|
16
|
+
) -> list[ValidationTest]:
|
|
17
|
+
return []
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def validate_key_concept(
|
|
21
|
+
concept: BuildConcept,
|
|
22
|
+
env: Environment,
|
|
23
|
+
build_env: BuildEnvironment,
|
|
24
|
+
exec: Executor | None = None,
|
|
25
|
+
):
|
|
26
|
+
results: list[ValidationTest] = []
|
|
27
|
+
seen: dict[str, int] = {}
|
|
28
|
+
for datasource in build_env.datasources.values():
|
|
29
|
+
if concept.address in [c.address for c in datasource.concepts]:
|
|
30
|
+
assignment = [
|
|
31
|
+
x for x in datasource.columns if x.concept.address == concept.address
|
|
32
|
+
][0]
|
|
33
|
+
# if it's not a partial, skip it
|
|
34
|
+
if not assignment.is_complete:
|
|
35
|
+
continue
|
|
36
|
+
type_query = easy_query(
|
|
37
|
+
concepts=[
|
|
38
|
+
# build_env.concepts[concept.address],
|
|
39
|
+
build_env.concepts[f"grain_check_{concept.safe_address}"],
|
|
40
|
+
],
|
|
41
|
+
datasource=datasource,
|
|
42
|
+
env=env,
|
|
43
|
+
limit=1,
|
|
44
|
+
)
|
|
45
|
+
if exec:
|
|
46
|
+
type_sql = exec.generate_sql(type_query)[-1]
|
|
47
|
+
|
|
48
|
+
rows = exec.execute_raw_sql(type_sql).fetchall()
|
|
49
|
+
seen[datasource.name] = rows[0][0] if rows else 0
|
|
50
|
+
else:
|
|
51
|
+
results.append(
|
|
52
|
+
ValidationTest(
|
|
53
|
+
raw_query=type_query,
|
|
54
|
+
check_type=ExpectationType.ROWCOUNT,
|
|
55
|
+
expected=f"equal_max_{concept.safe_address}",
|
|
56
|
+
result=None,
|
|
57
|
+
ran=False,
|
|
58
|
+
)
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
if not exec:
|
|
62
|
+
return results
|
|
63
|
+
max_seen: int = max([v for v in seen.values() if v is not None], default=0)
|
|
64
|
+
for datasource in build_env.datasources.values():
|
|
65
|
+
if concept.address in [c.address for c in datasource.concepts]:
|
|
66
|
+
assignment = [
|
|
67
|
+
x for x in datasource.columns if x.concept.address == concept.address
|
|
68
|
+
][0]
|
|
69
|
+
err = None
|
|
70
|
+
datasource_count: int = seen.get(datasource.name, 0)
|
|
71
|
+
if datasource_count < max_seen and assignment.is_complete:
|
|
72
|
+
err = DatasourceModelValidationError(
|
|
73
|
+
f"Key concept {concept.address} is missing values in datasource {datasource.name} (max cardinality in data {max_seen}, datasource has {seen[datasource.name]} values) but is not marked as partial."
|
|
74
|
+
)
|
|
75
|
+
results.append(
|
|
76
|
+
ValidationTest(
|
|
77
|
+
check_type=ExpectationType.ROWCOUNT,
|
|
78
|
+
expected=str(max_seen),
|
|
79
|
+
result=err,
|
|
80
|
+
ran=True,
|
|
81
|
+
)
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
return results
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def validate_datasources(
|
|
88
|
+
concept: BuildConcept, build_env: BuildEnvironment
|
|
89
|
+
) -> list[ValidationTest]:
|
|
90
|
+
if concept.lineage:
|
|
91
|
+
return []
|
|
92
|
+
for datasource in build_env.datasources.values():
|
|
93
|
+
if concept.address in [c.address for c in datasource.concepts]:
|
|
94
|
+
return []
|
|
95
|
+
if not concept.derivation == Derivation.ROOT:
|
|
96
|
+
return []
|
|
97
|
+
if concept.name.startswith("__") or (
|
|
98
|
+
concept.namespace and concept.namespace.startswith("__")
|
|
99
|
+
):
|
|
100
|
+
return []
|
|
101
|
+
return [
|
|
102
|
+
ValidationTest(
|
|
103
|
+
check_type=ExpectationType.LOGICAL,
|
|
104
|
+
expected=None,
|
|
105
|
+
result=ConceptModelValidationError(
|
|
106
|
+
f"Concept {concept.address} is a root concept but has no datasources bound"
|
|
107
|
+
),
|
|
108
|
+
ran=True,
|
|
109
|
+
)
|
|
110
|
+
]
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def validate_concept(
|
|
114
|
+
concept: BuildConcept,
|
|
115
|
+
env: Environment,
|
|
116
|
+
build_env: BuildEnvironment,
|
|
117
|
+
exec: Executor | None = None,
|
|
118
|
+
) -> list[ValidationTest]:
|
|
119
|
+
base: list[ValidationTest] = []
|
|
120
|
+
base += validate_datasources(concept, build_env)
|
|
121
|
+
if concept.purpose == Purpose.PROPERTY:
|
|
122
|
+
base += validate_property_concept(concept)
|
|
123
|
+
elif concept.purpose == Purpose.KEY:
|
|
124
|
+
base += validate_key_concept(concept, env, build_env, exec)
|
|
125
|
+
return base
|
|
@@ -0,0 +1,194 @@
|
|
|
1
|
+
from datetime import date, datetime
|
|
2
|
+
from decimal import Decimal
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from trilogy import Environment, Executor
|
|
6
|
+
from trilogy.authoring import (
|
|
7
|
+
ArrayType,
|
|
8
|
+
DataType,
|
|
9
|
+
MapType,
|
|
10
|
+
NumericType,
|
|
11
|
+
StructType,
|
|
12
|
+
TraitDataType,
|
|
13
|
+
)
|
|
14
|
+
from trilogy.core.enums import ComparisonOperator
|
|
15
|
+
from trilogy.core.exceptions import DatasourceModelValidationError
|
|
16
|
+
from trilogy.core.models.build import (
|
|
17
|
+
BuildComparison,
|
|
18
|
+
BuildDatasource,
|
|
19
|
+
)
|
|
20
|
+
from trilogy.core.models.build_environment import BuildEnvironment
|
|
21
|
+
from trilogy.core.validation.common import ExpectationType, ValidationTest, easy_query
|
|
22
|
+
from trilogy.utility import unique
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def type_check(
|
|
26
|
+
input: Any,
|
|
27
|
+
expected_type: (
|
|
28
|
+
DataType | ArrayType | StructType | MapType | NumericType | TraitDataType
|
|
29
|
+
),
|
|
30
|
+
nullable: bool = True,
|
|
31
|
+
) -> bool:
|
|
32
|
+
if input is None and nullable:
|
|
33
|
+
return True
|
|
34
|
+
target_type = expected_type
|
|
35
|
+
while isinstance(target_type, TraitDataType):
|
|
36
|
+
return type_check(input, target_type.data_type, nullable)
|
|
37
|
+
if target_type == DataType.STRING:
|
|
38
|
+
return isinstance(input, str)
|
|
39
|
+
if target_type == DataType.INTEGER:
|
|
40
|
+
return isinstance(input, int)
|
|
41
|
+
if target_type == DataType.FLOAT or isinstance(target_type, NumericType):
|
|
42
|
+
return (
|
|
43
|
+
isinstance(input, float)
|
|
44
|
+
or isinstance(input, int)
|
|
45
|
+
or isinstance(input, Decimal)
|
|
46
|
+
)
|
|
47
|
+
if target_type == DataType.BOOL:
|
|
48
|
+
return isinstance(input, bool)
|
|
49
|
+
if target_type == DataType.DATE:
|
|
50
|
+
return isinstance(input, date)
|
|
51
|
+
if target_type == DataType.DATETIME:
|
|
52
|
+
return isinstance(input, datetime)
|
|
53
|
+
if target_type == DataType.ARRAY or isinstance(target_type, ArrayType):
|
|
54
|
+
return isinstance(input, list)
|
|
55
|
+
if target_type == DataType.MAP or isinstance(target_type, MapType):
|
|
56
|
+
return isinstance(input, dict)
|
|
57
|
+
if target_type == DataType.STRUCT or isinstance(target_type, StructType):
|
|
58
|
+
return isinstance(input, dict)
|
|
59
|
+
return False
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def validate_datasource(
|
|
63
|
+
datasource: BuildDatasource,
|
|
64
|
+
env: Environment,
|
|
65
|
+
build_env: BuildEnvironment,
|
|
66
|
+
exec: Executor | None = None,
|
|
67
|
+
) -> list[ValidationTest]:
|
|
68
|
+
results: list[ValidationTest] = []
|
|
69
|
+
# we might have merged concepts, where both will map out to the same
|
|
70
|
+
unique_outputs = unique(
|
|
71
|
+
[build_env.concepts[col.concept.address] for col in datasource.columns],
|
|
72
|
+
"address",
|
|
73
|
+
)
|
|
74
|
+
type_query = easy_query(
|
|
75
|
+
concepts=unique_outputs,
|
|
76
|
+
datasource=datasource,
|
|
77
|
+
env=env,
|
|
78
|
+
limit=100,
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
rows = []
|
|
82
|
+
if exec:
|
|
83
|
+
type_sql = exec.generate_sql(type_query)[-1]
|
|
84
|
+
try:
|
|
85
|
+
rows = exec.execute_raw_sql(type_sql).fetchall()
|
|
86
|
+
except Exception as e:
|
|
87
|
+
results.append(
|
|
88
|
+
ValidationTest(
|
|
89
|
+
raw_query=type_query,
|
|
90
|
+
generated_query=type_sql,
|
|
91
|
+
check_type=ExpectationType.LOGICAL,
|
|
92
|
+
expected="valid_sql",
|
|
93
|
+
result=DatasourceModelValidationError(
|
|
94
|
+
f"Datasource {datasource.name} failed validation. Error executing type query {type_sql}: {e}"
|
|
95
|
+
),
|
|
96
|
+
ran=True,
|
|
97
|
+
)
|
|
98
|
+
)
|
|
99
|
+
return results
|
|
100
|
+
else:
|
|
101
|
+
|
|
102
|
+
results.append(
|
|
103
|
+
ValidationTest(
|
|
104
|
+
raw_query=type_query,
|
|
105
|
+
check_type=ExpectationType.LOGICAL,
|
|
106
|
+
expected="datatype_match",
|
|
107
|
+
result=None,
|
|
108
|
+
ran=False,
|
|
109
|
+
)
|
|
110
|
+
)
|
|
111
|
+
return results
|
|
112
|
+
failures: list[
|
|
113
|
+
tuple[
|
|
114
|
+
str,
|
|
115
|
+
Any,
|
|
116
|
+
DataType | ArrayType | StructType | MapType | NumericType | TraitDataType,
|
|
117
|
+
bool,
|
|
118
|
+
]
|
|
119
|
+
] = []
|
|
120
|
+
cols_with_error = set()
|
|
121
|
+
for row in rows:
|
|
122
|
+
for col in datasource.columns:
|
|
123
|
+
actual_address = build_env.concepts[col.concept.address].safe_address
|
|
124
|
+
if actual_address in cols_with_error:
|
|
125
|
+
continue
|
|
126
|
+
rval = row[actual_address]
|
|
127
|
+
passed = type_check(rval, col.concept.datatype, col.is_nullable)
|
|
128
|
+
if not passed:
|
|
129
|
+
failures.append(
|
|
130
|
+
(
|
|
131
|
+
col.concept.address,
|
|
132
|
+
rval,
|
|
133
|
+
col.concept.datatype,
|
|
134
|
+
col.is_nullable,
|
|
135
|
+
)
|
|
136
|
+
)
|
|
137
|
+
cols_with_error.add(actual_address)
|
|
138
|
+
|
|
139
|
+
def format_failure(failure):
|
|
140
|
+
return f"Concept {failure[0]} value '{failure[1]}' does not conform to expected type {str(failure[2])} (nullable={failure[3]})"
|
|
141
|
+
|
|
142
|
+
if failures:
|
|
143
|
+
results.append(
|
|
144
|
+
ValidationTest(
|
|
145
|
+
check_type=ExpectationType.LOGICAL,
|
|
146
|
+
expected="datatype_match",
|
|
147
|
+
ran=True,
|
|
148
|
+
result=DatasourceModelValidationError(
|
|
149
|
+
f"Datasource {datasource.name} failed validation. Found rows that do not conform to types: {[format_failure(failure) for failure in failures]}",
|
|
150
|
+
),
|
|
151
|
+
)
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
query = easy_query(
|
|
155
|
+
concepts=[build_env.concepts[name] for name in datasource.grain.components]
|
|
156
|
+
+ [build_env.concepts["grain_check"]],
|
|
157
|
+
datasource=datasource,
|
|
158
|
+
env=exec.environment,
|
|
159
|
+
condition=BuildComparison(
|
|
160
|
+
left=build_env.concepts["grain_check"],
|
|
161
|
+
right=1,
|
|
162
|
+
operator=ComparisonOperator.GT,
|
|
163
|
+
),
|
|
164
|
+
)
|
|
165
|
+
if not exec:
|
|
166
|
+
results.append(
|
|
167
|
+
ValidationTest(
|
|
168
|
+
raw_query=query,
|
|
169
|
+
check_type=ExpectationType.ROWCOUNT,
|
|
170
|
+
expected="0",
|
|
171
|
+
result=None,
|
|
172
|
+
ran=False,
|
|
173
|
+
)
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
else:
|
|
177
|
+
sql = exec.generate_sql(query)[-1]
|
|
178
|
+
|
|
179
|
+
rows = exec.execute_raw_sql(sql).fetchmany(10)
|
|
180
|
+
if rows:
|
|
181
|
+
results.append(
|
|
182
|
+
ValidationTest(
|
|
183
|
+
raw_query=query,
|
|
184
|
+
generated_query=sql,
|
|
185
|
+
check_type=ExpectationType.ROWCOUNT,
|
|
186
|
+
expected="0",
|
|
187
|
+
result=DatasourceModelValidationError(
|
|
188
|
+
f"Datasource {datasource.name} failed validation. Found rows that do not conform to grain: {rows}"
|
|
189
|
+
),
|
|
190
|
+
ran=True,
|
|
191
|
+
)
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
return results
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
from trilogy import Environment, Executor
|
|
2
|
+
from trilogy.authoring import DataType, Function
|
|
3
|
+
from trilogy.core.enums import FunctionType, Purpose, ValidationScope
|
|
4
|
+
from trilogy.core.exceptions import (
|
|
5
|
+
ModelValidationError,
|
|
6
|
+
)
|
|
7
|
+
from trilogy.core.validation.common import ValidationTest
|
|
8
|
+
from trilogy.core.validation.concept import validate_concept
|
|
9
|
+
from trilogy.core.validation.datasource import validate_datasource
|
|
10
|
+
from trilogy.parsing.common import function_to_concept
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def validate_environment(
|
|
14
|
+
env: Environment,
|
|
15
|
+
scope: ValidationScope = ValidationScope.ALL,
|
|
16
|
+
targets: list[str] | None = None,
|
|
17
|
+
exec: Executor | None = None,
|
|
18
|
+
) -> list[ValidationTest]:
|
|
19
|
+
# avoid mutating the environment for validation
|
|
20
|
+
generate_only = exec is None
|
|
21
|
+
env = env.duplicate()
|
|
22
|
+
grain_check = function_to_concept(
|
|
23
|
+
parent=Function(
|
|
24
|
+
operator=FunctionType.SUM,
|
|
25
|
+
arguments=[1],
|
|
26
|
+
output_datatype=DataType.INTEGER,
|
|
27
|
+
output_purpose=Purpose.METRIC,
|
|
28
|
+
),
|
|
29
|
+
name="grain_check",
|
|
30
|
+
environment=env,
|
|
31
|
+
)
|
|
32
|
+
env.add_concept(grain_check)
|
|
33
|
+
new_concepts = []
|
|
34
|
+
for concept in env.concepts.values():
|
|
35
|
+
concept_grain_check = function_to_concept(
|
|
36
|
+
parent=Function(
|
|
37
|
+
operator=FunctionType.COUNT_DISTINCT,
|
|
38
|
+
arguments=[concept.reference],
|
|
39
|
+
output_datatype=DataType.INTEGER,
|
|
40
|
+
output_purpose=Purpose.METRIC,
|
|
41
|
+
),
|
|
42
|
+
name=f"grain_check_{concept.safe_address}",
|
|
43
|
+
environment=env,
|
|
44
|
+
)
|
|
45
|
+
new_concepts.append(concept_grain_check)
|
|
46
|
+
for concept in new_concepts:
|
|
47
|
+
env.add_concept(concept)
|
|
48
|
+
build_env = env.materialize_for_select()
|
|
49
|
+
results: list[ValidationTest] = []
|
|
50
|
+
if scope == ValidationScope.ALL or scope == ValidationScope.DATASOURCES:
|
|
51
|
+
for datasource in build_env.datasources.values():
|
|
52
|
+
if targets and datasource.name not in targets:
|
|
53
|
+
continue
|
|
54
|
+
results += validate_datasource(datasource, env, build_env, exec)
|
|
55
|
+
if scope == ValidationScope.ALL or scope == ValidationScope.CONCEPTS:
|
|
56
|
+
|
|
57
|
+
for bconcept in build_env.concepts.values():
|
|
58
|
+
if targets and bconcept.address not in targets:
|
|
59
|
+
continue
|
|
60
|
+
results += validate_concept(bconcept, env, build_env, exec)
|
|
61
|
+
|
|
62
|
+
# raise a nicely formatted union of all exceptions
|
|
63
|
+
exceptions: list[ModelValidationError] = [e.result for e in results if e.result]
|
|
64
|
+
if exceptions:
|
|
65
|
+
if not generate_only:
|
|
66
|
+
messages = "\n".join([str(e) for e in exceptions])
|
|
67
|
+
raise ModelValidationError(
|
|
68
|
+
f"Environment validation failed with the following errors:\n{messages}",
|
|
69
|
+
children=exceptions,
|
|
70
|
+
)
|
|
71
|
+
return results
|