cfn-check 0.2.2__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cfn-check might be problematic. Click here for more details.
- cfn_check/cli/validate.py +5 -9
- cfn_check/evaluation/evaluator.py +85 -0
- cfn_check/evaluation/parsing/__init__.py +1 -0
- cfn_check/evaluation/parsing/query_parser.py +145 -0
- cfn_check/evaluation/parsing/token.py +269 -0
- cfn_check/evaluation/parsing/token_type.py +14 -0
- cfn_check/evaluation/validate.py +56 -47
- cfn_check-0.3.1.dist-info/METADATA +541 -0
- {cfn_check-0.2.2.dist-info → cfn_check-0.3.1.dist-info}/RECORD +14 -11
- example/rules.py +65 -6
- cfn_check/evaluation/check.py +0 -20
- cfn_check/evaluation/search.py +0 -137
- cfn_check-0.2.2.dist-info/METADATA +0 -247
- {cfn_check-0.2.2.dist-info → cfn_check-0.3.1.dist-info}/WHEEL +0 -0
- {cfn_check-0.2.2.dist-info → cfn_check-0.3.1.dist-info}/entry_points.txt +0 -0
- {cfn_check-0.2.2.dist-info → cfn_check-0.3.1.dist-info}/licenses/LICENSE +0 -0
- {cfn_check-0.2.2.dist-info → cfn_check-0.3.1.dist-info}/top_level.txt +0 -0
cfn_check/cli/validate.py
CHANGED
|
@@ -5,7 +5,7 @@ from cocoa.cli import CLI, ImportType
|
|
|
5
5
|
|
|
6
6
|
from cfn_check.cli.utils.attributes import bind
|
|
7
7
|
from cfn_check.cli.utils.files import load_templates
|
|
8
|
-
from cfn_check.evaluation.validate import
|
|
8
|
+
from cfn_check.evaluation.validate import ValidationSet
|
|
9
9
|
from cfn_check.logging.models import InfoLog
|
|
10
10
|
from cfn_check.collection.collection import Collection
|
|
11
11
|
from cfn_check.validation.validator import Validator
|
|
@@ -58,7 +58,7 @@ async def validate(
|
|
|
58
58
|
file_pattern=file_pattern,
|
|
59
59
|
)
|
|
60
60
|
|
|
61
|
-
|
|
61
|
+
validation_set = ValidationSet([
|
|
62
62
|
bind(
|
|
63
63
|
rule,
|
|
64
64
|
validation,
|
|
@@ -66,16 +66,12 @@ async def validate(
|
|
|
66
66
|
for rule in rules.data.values()
|
|
67
67
|
for _, validation in inspect.getmembers(rule)
|
|
68
68
|
if isinstance(validation, Validator)
|
|
69
|
-
]
|
|
69
|
+
])
|
|
70
70
|
|
|
71
|
-
if validation_error :=
|
|
72
|
-
templates,
|
|
73
|
-
validations,
|
|
74
|
-
):
|
|
71
|
+
if validation_error := validation_set.validate(templates):
|
|
75
72
|
raise validation_error
|
|
76
73
|
|
|
77
|
-
checks_passed = len(validations)
|
|
78
74
|
templates_evaluated = len(templates)
|
|
79
75
|
|
|
80
|
-
await logger.log(InfoLog(message=f'✅ {
|
|
76
|
+
await logger.log(InfoLog(message=f'✅ {validation_set.count} validations met for {templates_evaluated} templates'))
|
|
81
77
|
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
from collections import deque
|
|
2
|
+
from typing import Deque
|
|
3
|
+
|
|
4
|
+
from cfn_check.shared.types import (
|
|
5
|
+
Data,
|
|
6
|
+
Items,
|
|
7
|
+
YamlObject,
|
|
8
|
+
)
|
|
9
|
+
|
|
10
|
+
from .parsing import QueryParser
|
|
11
|
+
|
|
12
|
+
class Evaluator:
|
|
13
|
+
|
|
14
|
+
def __init__(self):
|
|
15
|
+
self._query_parser = QueryParser()
|
|
16
|
+
|
|
17
|
+
def match(
|
|
18
|
+
self,
|
|
19
|
+
resources: YamlObject,
|
|
20
|
+
path: str,
|
|
21
|
+
):
|
|
22
|
+
items: Items = deque()
|
|
23
|
+
items.append(resources)
|
|
24
|
+
|
|
25
|
+
segments = path.split("::")[::-1]
|
|
26
|
+
# Queries can be multi-segment,
|
|
27
|
+
# so we effectively perform per-segment
|
|
28
|
+
# repeated DFS searches, returning the matches
|
|
29
|
+
# for each segment
|
|
30
|
+
|
|
31
|
+
composite_keys: list[str] = []
|
|
32
|
+
|
|
33
|
+
while len(segments):
|
|
34
|
+
query = segments.pop()
|
|
35
|
+
items, keys = self._match_with_query(items, query)
|
|
36
|
+
|
|
37
|
+
if len(composite_keys) == 0:
|
|
38
|
+
composite_keys.extend(keys)
|
|
39
|
+
|
|
40
|
+
else:
|
|
41
|
+
updated_keys: list[str] = []
|
|
42
|
+
for composite_key in composite_keys:
|
|
43
|
+
while len(keys):
|
|
44
|
+
key = keys.pop()
|
|
45
|
+
|
|
46
|
+
updated_keys.append(f'{composite_key}.{key}')
|
|
47
|
+
|
|
48
|
+
composite_keys = updated_keys
|
|
49
|
+
|
|
50
|
+
assert len(composite_keys) == len(items), f'❌ {len(items)} returned for {len(composite_keys)} keys. Are you sure you used a range ([*]) selector?'
|
|
51
|
+
|
|
52
|
+
results: list[tuple[str, Data]] = []
|
|
53
|
+
for idx, item in enumerate(list(items)):
|
|
54
|
+
results.append((
|
|
55
|
+
composite_keys[idx],
|
|
56
|
+
item,
|
|
57
|
+
))
|
|
58
|
+
|
|
59
|
+
return results
|
|
60
|
+
|
|
61
|
+
def _match_with_query(
|
|
62
|
+
self,
|
|
63
|
+
items: Items,
|
|
64
|
+
query: str,
|
|
65
|
+
) -> tuple[Items, Deque[str]]:
|
|
66
|
+
|
|
67
|
+
found: Items = deque()
|
|
68
|
+
keys: Deque[str] = deque()
|
|
69
|
+
|
|
70
|
+
tokens = self._query_parser.parse(query)
|
|
71
|
+
|
|
72
|
+
while len(items):
|
|
73
|
+
node = items.pop()
|
|
74
|
+
|
|
75
|
+
for token in tokens:
|
|
76
|
+
matched_keys, matches = token.match(node)
|
|
77
|
+
|
|
78
|
+
if matched_keys and matches:
|
|
79
|
+
keys.extend(matched_keys)
|
|
80
|
+
found.extend(matches)
|
|
81
|
+
|
|
82
|
+
elif matched_keys is None and matches:
|
|
83
|
+
items.extend(matches)
|
|
84
|
+
|
|
85
|
+
return found, keys
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .query_parser import QueryParser as QueryParser
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import sys
|
|
3
|
+
from .token import Token
|
|
4
|
+
from .token_type import TokenType
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class QueryParser:
|
|
8
|
+
|
|
9
|
+
def __init__(self):
|
|
10
|
+
self.numbers_pattern = re.compile(r'\d+')
|
|
11
|
+
|
|
12
|
+
def parse(
|
|
13
|
+
self,
|
|
14
|
+
query: str,
|
|
15
|
+
):
|
|
16
|
+
|
|
17
|
+
tokens: list[Token] = []
|
|
18
|
+
|
|
19
|
+
if query.startswith('[') and query.endswith(']'):
|
|
20
|
+
tokens.extend(
|
|
21
|
+
self._parse_range_selector_token(query),
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
elif query.startswith('(') and query.endswith(')'):
|
|
25
|
+
tokens.append(
|
|
26
|
+
Token(
|
|
27
|
+
re.compile(query),
|
|
28
|
+
TokenType.PATTERN,
|
|
29
|
+
)
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
elif query == "*":
|
|
33
|
+
tokens.append(
|
|
34
|
+
Token(
|
|
35
|
+
query,
|
|
36
|
+
TokenType.WILDCARD,
|
|
37
|
+
)
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
else:
|
|
41
|
+
tokens.append(
|
|
42
|
+
Token(
|
|
43
|
+
query,
|
|
44
|
+
TokenType.KEY,
|
|
45
|
+
)
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
return tokens
|
|
49
|
+
|
|
50
|
+
def _parse_range_selector_token(
|
|
51
|
+
self,
|
|
52
|
+
query: str,
|
|
53
|
+
):
|
|
54
|
+
segments = [
|
|
55
|
+
segment
|
|
56
|
+
for segment in query[1:-1].split(',')
|
|
57
|
+
if len(segment) > 0
|
|
58
|
+
]
|
|
59
|
+
tokens: list[Token] = []
|
|
60
|
+
|
|
61
|
+
if len(segments) < 1:
|
|
62
|
+
tokens.append(
|
|
63
|
+
Token(
|
|
64
|
+
None,
|
|
65
|
+
TokenType.UNBOUND_RANGE,
|
|
66
|
+
),
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
else:
|
|
70
|
+
tokens.extend([
|
|
71
|
+
token
|
|
72
|
+
for segment in segments
|
|
73
|
+
for token in self._parse_selector_segment(segment)
|
|
74
|
+
])
|
|
75
|
+
|
|
76
|
+
return tokens
|
|
77
|
+
|
|
78
|
+
def _parse_selector_segment(self, segment: str):
|
|
79
|
+
|
|
80
|
+
if segment.startswith('[') and segment.endswith(']'):
|
|
81
|
+
tokens = self._parse_range_selector_token(segment)
|
|
82
|
+
|
|
83
|
+
return [
|
|
84
|
+
Token(
|
|
85
|
+
segment,
|
|
86
|
+
TokenType.NESTED_RANGE,
|
|
87
|
+
nested=tokens,
|
|
88
|
+
),
|
|
89
|
+
]
|
|
90
|
+
|
|
91
|
+
elif segment.startswith('(') and segment.endswith(')'):
|
|
92
|
+
return [
|
|
93
|
+
Token(
|
|
94
|
+
re.compile(segment),
|
|
95
|
+
TokenType.PATTERN_RANGE,
|
|
96
|
+
)
|
|
97
|
+
]
|
|
98
|
+
|
|
99
|
+
elif segment == '*':
|
|
100
|
+
return [
|
|
101
|
+
Token(
|
|
102
|
+
segment,
|
|
103
|
+
TokenType.WILDCARD_RANGE,
|
|
104
|
+
)
|
|
105
|
+
]
|
|
106
|
+
|
|
107
|
+
elif '-' in segment:
|
|
108
|
+
return [
|
|
109
|
+
self._parse_bound_range(
|
|
110
|
+
segment.split('-', maxsplit=1)
|
|
111
|
+
)
|
|
112
|
+
]
|
|
113
|
+
|
|
114
|
+
elif match := self.numbers_pattern.match(segment):
|
|
115
|
+
return [
|
|
116
|
+
Token(
|
|
117
|
+
int(match.group(0)),
|
|
118
|
+
TokenType.INDEX,
|
|
119
|
+
)
|
|
120
|
+
]
|
|
121
|
+
|
|
122
|
+
else:
|
|
123
|
+
return [
|
|
124
|
+
Token(
|
|
125
|
+
segment,
|
|
126
|
+
TokenType.KEY_RANGE
|
|
127
|
+
)
|
|
128
|
+
]
|
|
129
|
+
|
|
130
|
+
def _parse_bound_range(self, segment: tuple[str, ...]):
|
|
131
|
+
|
|
132
|
+
start, stop = segment
|
|
133
|
+
if not self.numbers_pattern.match(start):
|
|
134
|
+
start = 0
|
|
135
|
+
|
|
136
|
+
if not self.numbers_pattern.match(stop):
|
|
137
|
+
stop = str(sys.maxsize)
|
|
138
|
+
|
|
139
|
+
return Token(
|
|
140
|
+
(
|
|
141
|
+
int(start),
|
|
142
|
+
int(stop),
|
|
143
|
+
),
|
|
144
|
+
TokenType.BOUND_RANGE,
|
|
145
|
+
)
|
|
@@ -0,0 +1,269 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
import re
|
|
3
|
+
import sys
|
|
4
|
+
from collections import deque
|
|
5
|
+
from typing import Deque
|
|
6
|
+
from cfn_check.shared.types import Data, Items
|
|
7
|
+
from .token_type import TokenType
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class Token:
|
|
11
|
+
|
|
12
|
+
def __init__(
|
|
13
|
+
self,
|
|
14
|
+
selector: tuple[int, int] | int | re.Pattern | str,
|
|
15
|
+
selector_type: TokenType,
|
|
16
|
+
nested: list[Token] | None = None
|
|
17
|
+
):
|
|
18
|
+
self.selector = selector
|
|
19
|
+
self.selector_type = selector_type
|
|
20
|
+
self._nested = nested
|
|
21
|
+
|
|
22
|
+
def match(
|
|
23
|
+
self,
|
|
24
|
+
node: Data,
|
|
25
|
+
):
|
|
26
|
+
if isinstance(node, dict) and self.selector_type not in [
|
|
27
|
+
TokenType.WILDCARD,
|
|
28
|
+
]:
|
|
29
|
+
return None, list(node.items())
|
|
30
|
+
|
|
31
|
+
elif isinstance(node, list) and self.selector_type not in [
|
|
32
|
+
TokenType.BOUND_RANGE,
|
|
33
|
+
TokenType.INDEX,
|
|
34
|
+
TokenType.PATTERN_RANGE,
|
|
35
|
+
TokenType.UNBOUND_RANGE,
|
|
36
|
+
TokenType.KEY_RANGE,
|
|
37
|
+
TokenType.WILDCARD,
|
|
38
|
+
TokenType.WILDCARD_RANGE,
|
|
39
|
+
TokenType.NESTED_RANGE,
|
|
40
|
+
]:
|
|
41
|
+
return None, node
|
|
42
|
+
|
|
43
|
+
match self.selector_type:
|
|
44
|
+
|
|
45
|
+
case TokenType.BOUND_RANGE:
|
|
46
|
+
return self._match_bound_range(node)
|
|
47
|
+
|
|
48
|
+
case TokenType.INDEX:
|
|
49
|
+
return self._match_index(node)
|
|
50
|
+
|
|
51
|
+
case TokenType.KEY:
|
|
52
|
+
return self._match_key(node)
|
|
53
|
+
|
|
54
|
+
case TokenType.KEY_RANGE:
|
|
55
|
+
return self._match_key_range(node)
|
|
56
|
+
|
|
57
|
+
case TokenType.NESTED_RANGE:
|
|
58
|
+
return self._match_nested_range(node)
|
|
59
|
+
|
|
60
|
+
case TokenType.PATTERN:
|
|
61
|
+
return self._match_pattern(node)
|
|
62
|
+
|
|
63
|
+
case TokenType.PATTERN_RANGE:
|
|
64
|
+
return self._match_pattern_range(node)
|
|
65
|
+
|
|
66
|
+
case TokenType.UNBOUND_RANGE:
|
|
67
|
+
return self._match_unbound_range(node)
|
|
68
|
+
|
|
69
|
+
case TokenType.WILDCARD:
|
|
70
|
+
return self._match_wildcard(node)
|
|
71
|
+
|
|
72
|
+
case TokenType.WILDCARD_RANGE:
|
|
73
|
+
return self._match_wildcard_range(node)
|
|
74
|
+
|
|
75
|
+
case _:
|
|
76
|
+
return None, None
|
|
77
|
+
|
|
78
|
+
def _match_bound_range(
|
|
79
|
+
self,
|
|
80
|
+
node: Data,
|
|
81
|
+
):
|
|
82
|
+
if not isinstance(node, list) or not isinstance(self.selector, tuple):
|
|
83
|
+
return None, None
|
|
84
|
+
|
|
85
|
+
start, stop = self.selector
|
|
86
|
+
|
|
87
|
+
if stop == sys.maxsize:
|
|
88
|
+
stop = len(node)
|
|
89
|
+
|
|
90
|
+
return [f'{start}-{stop}'], [node[start:stop]]
|
|
91
|
+
|
|
92
|
+
def _match_index(
|
|
93
|
+
self,
|
|
94
|
+
node: Data,
|
|
95
|
+
):
|
|
96
|
+
if (
|
|
97
|
+
isinstance(node, list)
|
|
98
|
+
) and (
|
|
99
|
+
isinstance(self.selector, int)
|
|
100
|
+
) and self.selector < len(node):
|
|
101
|
+
return [str(self.selector)], [node[self.selector]]
|
|
102
|
+
|
|
103
|
+
return None, None
|
|
104
|
+
|
|
105
|
+
def _match_key(
|
|
106
|
+
self,
|
|
107
|
+
node: Data,
|
|
108
|
+
):
|
|
109
|
+
|
|
110
|
+
if not isinstance(node, tuple) or len(node) < 2:
|
|
111
|
+
return None, None
|
|
112
|
+
|
|
113
|
+
key, value = node
|
|
114
|
+
|
|
115
|
+
if key == self.selector:
|
|
116
|
+
return [key], [value]
|
|
117
|
+
|
|
118
|
+
return None, None
|
|
119
|
+
|
|
120
|
+
def _match_pattern(
|
|
121
|
+
self,
|
|
122
|
+
node: Data,
|
|
123
|
+
):
|
|
124
|
+
|
|
125
|
+
if not isinstance(node, tuple) or len(node) < 2:
|
|
126
|
+
return None, None
|
|
127
|
+
|
|
128
|
+
elif not isinstance(self.selector, re.Pattern):
|
|
129
|
+
return None, None
|
|
130
|
+
|
|
131
|
+
key, value = node
|
|
132
|
+
|
|
133
|
+
if self.selector.match(key):
|
|
134
|
+
return [key], [value]
|
|
135
|
+
|
|
136
|
+
return None, None
|
|
137
|
+
|
|
138
|
+
def _match_pattern_range(
|
|
139
|
+
self,
|
|
140
|
+
node: Data,
|
|
141
|
+
):
|
|
142
|
+
if not isinstance(node, list) or not isinstance(self.selector, re.Pattern):
|
|
143
|
+
return None, None
|
|
144
|
+
|
|
145
|
+
matches = [
|
|
146
|
+
(idx, item)
|
|
147
|
+
for idx, item in enumerate(node)
|
|
148
|
+
if self.selector.match(item) or (
|
|
149
|
+
isinstance(item, dict, list)
|
|
150
|
+
and any([
|
|
151
|
+
self.selector.match(val)
|
|
152
|
+
for val in item
|
|
153
|
+
])
|
|
154
|
+
)
|
|
155
|
+
]
|
|
156
|
+
|
|
157
|
+
return (
|
|
158
|
+
[str(idx) for idx in matches],
|
|
159
|
+
[item for item in matches]
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
def _match_unbound_range(
|
|
163
|
+
self,
|
|
164
|
+
node: Data,
|
|
165
|
+
):
|
|
166
|
+
if not isinstance(node, list):
|
|
167
|
+
return None, None
|
|
168
|
+
|
|
169
|
+
return (
|
|
170
|
+
['[]'],
|
|
171
|
+
[node],
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
def _match_key_range(
|
|
175
|
+
self,
|
|
176
|
+
node: Data,
|
|
177
|
+
):
|
|
178
|
+
if not isinstance(node, list):
|
|
179
|
+
return None, None
|
|
180
|
+
|
|
181
|
+
matches = [
|
|
182
|
+
(
|
|
183
|
+
str(idx),
|
|
184
|
+
value
|
|
185
|
+
) for idx, value in enumerate(node) if (
|
|
186
|
+
str(value) == self.selector
|
|
187
|
+
) or (
|
|
188
|
+
isinstance(value, dict, list)
|
|
189
|
+
and value in self.selector
|
|
190
|
+
)
|
|
191
|
+
]
|
|
192
|
+
|
|
193
|
+
return (
|
|
194
|
+
[str(idx) for idx in matches],
|
|
195
|
+
[item for item in matches]
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
def _match_nested_range(
|
|
199
|
+
self,
|
|
200
|
+
node: Data
|
|
201
|
+
):
|
|
202
|
+
if not isinstance(node, list):
|
|
203
|
+
return None, None
|
|
204
|
+
|
|
205
|
+
keys: list[str] = []
|
|
206
|
+
found: list[Data] = []
|
|
207
|
+
|
|
208
|
+
for item in node:
|
|
209
|
+
if isinstance(item, list):
|
|
210
|
+
nested_keys, nested_found = self._match_nested(item)
|
|
211
|
+
keys.extend([
|
|
212
|
+
f'[[{key}]]'
|
|
213
|
+
for key in nested_keys
|
|
214
|
+
])
|
|
215
|
+
found.extend(nested_found)
|
|
216
|
+
|
|
217
|
+
return (
|
|
218
|
+
keys,
|
|
219
|
+
found,
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
def _match_nested(
|
|
223
|
+
self,
|
|
224
|
+
node: Data,
|
|
225
|
+
):
|
|
226
|
+
found: Items = deque()
|
|
227
|
+
keys: Deque[str] = deque()
|
|
228
|
+
|
|
229
|
+
for token in self._nested:
|
|
230
|
+
matched_keys, matches = token.match(node)
|
|
231
|
+
|
|
232
|
+
if matched_keys and matches:
|
|
233
|
+
keys.extend(matched_keys)
|
|
234
|
+
found.extend(matches)
|
|
235
|
+
|
|
236
|
+
return keys, found
|
|
237
|
+
|
|
238
|
+
def _match_wildcard(
|
|
239
|
+
self,
|
|
240
|
+
node: Data
|
|
241
|
+
):
|
|
242
|
+
if not self.selector == '*':
|
|
243
|
+
return None, None
|
|
244
|
+
|
|
245
|
+
if isinstance(node, dict):
|
|
246
|
+
return ['*'], node.values()
|
|
247
|
+
|
|
248
|
+
elif isinstance(node, list):
|
|
249
|
+
return (
|
|
250
|
+
['*' for _ in node],
|
|
251
|
+
node,
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
return ['*'], [node]
|
|
255
|
+
|
|
256
|
+
def _match_wildcard_range(
|
|
257
|
+
self,
|
|
258
|
+
node: Data
|
|
259
|
+
):
|
|
260
|
+
if not self.selector == '*' or not (
|
|
261
|
+
isinstance(node, list)
|
|
262
|
+
):
|
|
263
|
+
return None, None
|
|
264
|
+
|
|
265
|
+
return (
|
|
266
|
+
['[*]' for _ in node],
|
|
267
|
+
node,
|
|
268
|
+
)
|
|
269
|
+
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class TokenType(Enum):
|
|
5
|
+
BOUND_RANGE = "BOUND_RANGE"
|
|
6
|
+
INDEX = "INDEX"
|
|
7
|
+
KEY = "KEY"
|
|
8
|
+
KEY_RANGE = "KEY_RANGE"
|
|
9
|
+
NESTED_RANGE = "NESTED"
|
|
10
|
+
PATTERN = "GLOB"
|
|
11
|
+
PATTERN_RANGE = "GLOB_RANGE"
|
|
12
|
+
UNBOUND_RANGE = "UNBOUND_RANGE"
|
|
13
|
+
WILDCARD = "WILDCARD"
|
|
14
|
+
WILDCARD_RANGE = "WILDCARD_RANGE"
|
cfn_check/evaluation/validate.py
CHANGED
|
@@ -1,51 +1,60 @@
|
|
|
1
1
|
from pydantic import ValidationError
|
|
2
2
|
|
|
3
3
|
from cfn_check.validation.validator import Validator
|
|
4
|
-
from .check import check
|
|
5
4
|
from .errors import assemble_validation_error
|
|
6
|
-
from .
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
5
|
+
from .evaluator import Evaluator
|
|
6
|
+
|
|
7
|
+
class ValidationSet:
|
|
8
|
+
|
|
9
|
+
def __init__(
|
|
10
|
+
self,
|
|
11
|
+
validators: list[Validator],
|
|
12
|
+
):
|
|
13
|
+
self._evaluator = Evaluator()
|
|
14
|
+
self._validators = validators
|
|
15
|
+
|
|
16
|
+
@property
|
|
17
|
+
def count(self):
|
|
18
|
+
return len(self._validators)
|
|
19
|
+
|
|
20
|
+
def validate(
|
|
21
|
+
self,
|
|
22
|
+
templates: list[str],
|
|
23
|
+
):
|
|
24
|
+
errors: list[Exception | ValidationError] = []
|
|
25
|
+
|
|
26
|
+
for template in templates:
|
|
27
|
+
for validator in self._validators:
|
|
28
|
+
if errs := self._match_validator(
|
|
29
|
+
validator,
|
|
30
|
+
template,
|
|
31
|
+
):
|
|
32
|
+
errors.extend([
|
|
33
|
+
(
|
|
34
|
+
validator,
|
|
35
|
+
err
|
|
36
|
+
) for err in errs
|
|
37
|
+
])
|
|
38
|
+
|
|
39
|
+
if validation_error := assemble_validation_error(errors):
|
|
40
|
+
return validation_error
|
|
41
|
+
|
|
42
|
+
def _match_validator(
|
|
43
|
+
self,
|
|
44
|
+
validator: Validator,
|
|
45
|
+
template: str,
|
|
46
|
+
):
|
|
47
|
+
found = self._evaluator.match(template, validator.query)
|
|
48
|
+
|
|
49
|
+
assert len(found) > 0, f"❌ No results matching results for query {validator.query}"
|
|
50
|
+
|
|
51
|
+
errors: list[Exception | ValidationError] = []
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
for matched in found:
|
|
55
|
+
if err := validator(matched):
|
|
56
|
+
errors.append(err)
|
|
57
|
+
|
|
58
|
+
if len(errors) > 0:
|
|
59
|
+
return errors
|
|
60
|
+
|