tellaro-query-language 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tellaro_query_language-0.1.0.dist-info/LICENSE +21 -0
- tellaro_query_language-0.1.0.dist-info/METADATA +401 -0
- tellaro_query_language-0.1.0.dist-info/RECORD +56 -0
- tellaro_query_language-0.1.0.dist-info/WHEEL +4 -0
- tellaro_query_language-0.1.0.dist-info/entry_points.txt +7 -0
- tql/__init__.py +47 -0
- tql/analyzer.py +385 -0
- tql/cache/__init__.py +7 -0
- tql/cache/base.py +25 -0
- tql/cache/memory.py +63 -0
- tql/cache/redis.py +68 -0
- tql/core.py +929 -0
- tql/core_components/README.md +92 -0
- tql/core_components/__init__.py +20 -0
- tql/core_components/file_operations.py +113 -0
- tql/core_components/opensearch_operations.py +869 -0
- tql/core_components/stats_operations.py +200 -0
- tql/core_components/validation_operations.py +599 -0
- tql/evaluator.py +379 -0
- tql/evaluator_components/README.md +131 -0
- tql/evaluator_components/__init__.py +17 -0
- tql/evaluator_components/field_access.py +176 -0
- tql/evaluator_components/special_expressions.py +296 -0
- tql/evaluator_components/value_comparison.py +315 -0
- tql/exceptions.py +160 -0
- tql/geoip_normalizer.py +233 -0
- tql/mutator_analyzer.py +830 -0
- tql/mutators/__init__.py +222 -0
- tql/mutators/base.py +78 -0
- tql/mutators/dns.py +316 -0
- tql/mutators/encoding.py +218 -0
- tql/mutators/geo.py +363 -0
- tql/mutators/list.py +212 -0
- tql/mutators/network.py +163 -0
- tql/mutators/security.py +225 -0
- tql/mutators/string.py +165 -0
- tql/opensearch.py +78 -0
- tql/opensearch_components/README.md +130 -0
- tql/opensearch_components/__init__.py +17 -0
- tql/opensearch_components/field_mapping.py +399 -0
- tql/opensearch_components/lucene_converter.py +305 -0
- tql/opensearch_components/query_converter.py +775 -0
- tql/opensearch_mappings.py +309 -0
- tql/opensearch_stats.py +451 -0
- tql/parser.py +1363 -0
- tql/parser_components/README.md +72 -0
- tql/parser_components/__init__.py +20 -0
- tql/parser_components/ast_builder.py +162 -0
- tql/parser_components/error_analyzer.py +101 -0
- tql/parser_components/field_extractor.py +112 -0
- tql/parser_components/grammar.py +473 -0
- tql/post_processor.py +737 -0
- tql/scripts.py +124 -0
- tql/stats_evaluator.py +444 -0
- tql/stats_transformer.py +184 -0
- tql/validators.py +110 -0
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
# Parser Components
|
|
2
|
+
|
|
3
|
+
This package contains the modular components that make up the TQL parser.
|
|
4
|
+
|
|
5
|
+
## Overview
|
|
6
|
+
|
|
7
|
+
The parser components package splits the TQL parser functionality into focused, maintainable modules:
|
|
8
|
+
|
|
9
|
+
### Components
|
|
10
|
+
|
|
11
|
+
#### `grammar.py` - Grammar Definitions
|
|
12
|
+
Contains all pyparsing grammar definitions for TQL syntax, including:
|
|
13
|
+
- Basic tokens (identifiers, strings, numbers)
|
|
14
|
+
- Operators (comparison, logical, collection)
|
|
15
|
+
- Field specifications with type hints and mutators
|
|
16
|
+
- Value specifications with mutators
|
|
17
|
+
- Special expressions (geo, nslookup)
|
|
18
|
+
- Statistics expressions
|
|
19
|
+
- Complete TQL expression grammar
|
|
20
|
+
|
|
21
|
+
#### `ast_builder.py` - AST Construction
|
|
22
|
+
Handles building Abstract Syntax Tree nodes from parsed tokens:
|
|
23
|
+
- `extract_field_info()` - Extracts field name, type hints, and mutators
|
|
24
|
+
- `extract_value_info()` - Extracts values and value mutators
|
|
25
|
+
- Processes complex nested structures
|
|
26
|
+
- Handles mutator parameter parsing
|
|
27
|
+
|
|
28
|
+
#### `error_analyzer.py` - Error Analysis
|
|
29
|
+
Provides detailed error analysis for parse failures:
|
|
30
|
+
- `analyze_parse_error()` - Main error analysis entry point
|
|
31
|
+
- Generates helpful error messages with context
|
|
32
|
+
- Suggests corrections for common mistakes
|
|
33
|
+
- Shows error location in the original query
|
|
34
|
+
|
|
35
|
+
#### `field_extractor.py` - Field Extraction
|
|
36
|
+
Extracts field references from parsed AST:
|
|
37
|
+
- `extract_fields()` - Recursively finds all field references
|
|
38
|
+
- Handles all node types including special expressions
|
|
39
|
+
- Returns unique sorted list of fields
|
|
40
|
+
- Used for validation and analysis
|
|
41
|
+
|
|
42
|
+
## Usage
|
|
43
|
+
|
|
44
|
+
These components are used internally by the main `TQLParser` class. They should not be imported directly in application code.
|
|
45
|
+
|
|
46
|
+
```python
|
|
47
|
+
# Don't do this:
|
|
48
|
+
from tql.parser_components.grammar import TQLGrammar
|
|
49
|
+
|
|
50
|
+
# Do this instead:
|
|
51
|
+
from tql import TQL
|
|
52
|
+
tql = TQL()
|
|
53
|
+
ast = tql.parse("field = 'value'")
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
## Architecture
|
|
57
|
+
|
|
58
|
+
The parser follows a modular architecture:
|
|
59
|
+
|
|
60
|
+
```
|
|
61
|
+
TQLParser (main class)
|
|
62
|
+
├── TQLGrammar (grammar definitions)
|
|
63
|
+
├── ASTBuilder (AST construction)
|
|
64
|
+
├── ErrorAnalyzer (error handling)
|
|
65
|
+
└── FieldExtractor (field analysis)
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
This separation allows for:
|
|
69
|
+
- Easier testing of individual components
|
|
70
|
+
- Better code organization
|
|
71
|
+
- Clearer separation of concerns
|
|
72
|
+
- Easier maintenance and updates
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
"""TQL Parser package.
|
|
2
|
+
|
|
3
|
+
This package organizes the TQL parser into logical modules:
|
|
4
|
+
- grammar: Grammar definitions using pyparsing
|
|
5
|
+
- ast_builder: AST building utilities
|
|
6
|
+
- error_analyzer: Error analysis and helpful feedback
|
|
7
|
+
- field_extractor: Field extraction from AST
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from .ast_builder import ASTBuilder
|
|
11
|
+
from .error_analyzer import ErrorAnalyzer
|
|
12
|
+
from .field_extractor import FieldExtractor
|
|
13
|
+
from .grammar import TQLGrammar
|
|
14
|
+
|
|
15
|
+
__all__ = [
|
|
16
|
+
"TQLGrammar",
|
|
17
|
+
"ASTBuilder",
|
|
18
|
+
"ErrorAnalyzer",
|
|
19
|
+
"FieldExtractor",
|
|
20
|
+
]
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
"""AST building utilities for TQL parser."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, List, Tuple, Union
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ASTBuilder:
|
|
7
|
+
"""Builds Abstract Syntax Tree from parsed TQL expressions."""
|
|
8
|
+
|
|
9
|
+
def extract_field_info(self, field_spec: Any) -> Tuple[str, Union[str, None], List[Dict[str, Any]]]:
|
|
10
|
+
"""Extract field name, optional type hint, and mutators from field specification.
|
|
11
|
+
|
|
12
|
+
Args:
|
|
13
|
+
field_spec: Field specification that may include type hint and mutators
|
|
14
|
+
|
|
15
|
+
Returns:
|
|
16
|
+
Tuple of (field_name, type_hint or None, list of mutators)
|
|
17
|
+
"""
|
|
18
|
+
if isinstance(field_spec, list):
|
|
19
|
+
field_name = field_spec[0]
|
|
20
|
+
type_hint = None
|
|
21
|
+
mutators = []
|
|
22
|
+
|
|
23
|
+
# Process remaining elements
|
|
24
|
+
i = 1
|
|
25
|
+
while i < len(field_spec):
|
|
26
|
+
item = field_spec[i]
|
|
27
|
+
if isinstance(item, str) and item.lower() in [
|
|
28
|
+
"number",
|
|
29
|
+
"int",
|
|
30
|
+
"float",
|
|
31
|
+
"decimal",
|
|
32
|
+
"date",
|
|
33
|
+
"array",
|
|
34
|
+
"bool",
|
|
35
|
+
"boolean",
|
|
36
|
+
"geo",
|
|
37
|
+
"object",
|
|
38
|
+
"string",
|
|
39
|
+
]:
|
|
40
|
+
# This is a type hint
|
|
41
|
+
type_hint = item.lower()
|
|
42
|
+
elif isinstance(item, list):
|
|
43
|
+
# This is a mutator [name, params] or [name]
|
|
44
|
+
if len(item) >= 1:
|
|
45
|
+
mutator_dict = {"name": item[0]}
|
|
46
|
+
if len(item) > 1 and isinstance(item[1], list):
|
|
47
|
+
# Has parameters
|
|
48
|
+
params = []
|
|
49
|
+
for param in item[1]:
|
|
50
|
+
if isinstance(param, list) and len(param) == 2:
|
|
51
|
+
params.append(param)
|
|
52
|
+
if params:
|
|
53
|
+
mutator_dict["params"] = params
|
|
54
|
+
mutators.append(mutator_dict)
|
|
55
|
+
i += 1
|
|
56
|
+
|
|
57
|
+
return field_name, type_hint, mutators
|
|
58
|
+
else:
|
|
59
|
+
# Just field name as string
|
|
60
|
+
return field_spec, None, []
|
|
61
|
+
|
|
62
|
+
def extract_value_info(self, value_spec: Any) -> Tuple[Any, List[Dict[str, Any]]]: # noqa: C901
|
|
63
|
+
"""Extract value and optional mutators from value specification.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
value_spec: Value specification that may include mutators
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
Tuple of (value, list of mutators)
|
|
70
|
+
"""
|
|
71
|
+
if isinstance(value_spec, list):
|
|
72
|
+
# Check if this is a list literal (all elements are simple values)
|
|
73
|
+
# vs a value with mutators (first element is value, rest are mutator specs)
|
|
74
|
+
if len(value_spec) == 0:
|
|
75
|
+
return value_spec, []
|
|
76
|
+
|
|
77
|
+
# If it's a single-element list containing a list, unwrap it
|
|
78
|
+
if len(value_spec) == 1 and isinstance(value_spec[0], list):
|
|
79
|
+
return value_spec[0], []
|
|
80
|
+
|
|
81
|
+
# If it's a single-element list containing a simple value, unwrap it
|
|
82
|
+
if len(value_spec) == 1 and not isinstance(value_spec[0], list):
|
|
83
|
+
return value_spec[0], []
|
|
84
|
+
|
|
85
|
+
# Special case: if first element is a list and rest are mutators,
|
|
86
|
+
# this is a list literal with mutators
|
|
87
|
+
if isinstance(value_spec[0], list) and all(
|
|
88
|
+
isinstance(value_spec[0][i], str) for i in range(len(value_spec[0]))
|
|
89
|
+
):
|
|
90
|
+
# First element is a list of strings
|
|
91
|
+
has_mutators = False
|
|
92
|
+
for i in range(1, len(value_spec)):
|
|
93
|
+
item = value_spec[i]
|
|
94
|
+
if isinstance(item, list) and len(item) >= 1 and isinstance(item[0], str):
|
|
95
|
+
# This looks like a mutator spec
|
|
96
|
+
has_mutators = True
|
|
97
|
+
break
|
|
98
|
+
|
|
99
|
+
if has_mutators:
|
|
100
|
+
# This is a list literal with mutators
|
|
101
|
+
value = value_spec[0]
|
|
102
|
+
mutators = []
|
|
103
|
+
# Process remaining elements as mutators
|
|
104
|
+
i = 1
|
|
105
|
+
while i < len(value_spec):
|
|
106
|
+
item = value_spec[i]
|
|
107
|
+
if isinstance(item, list):
|
|
108
|
+
# This is a mutator [name, params] or [name]
|
|
109
|
+
if len(item) >= 1:
|
|
110
|
+
mutator_dict = {"name": item[0]}
|
|
111
|
+
if len(item) > 1 and isinstance(item[1], list):
|
|
112
|
+
# Has parameters
|
|
113
|
+
params = []
|
|
114
|
+
for param in item[1]:
|
|
115
|
+
if isinstance(param, list) and len(param) == 2:
|
|
116
|
+
params.append(param)
|
|
117
|
+
if params:
|
|
118
|
+
mutator_dict["params"] = params
|
|
119
|
+
mutators.append(mutator_dict)
|
|
120
|
+
i += 1
|
|
121
|
+
return value, mutators
|
|
122
|
+
|
|
123
|
+
# Check if any element after the first looks like a mutator
|
|
124
|
+
has_mutators = False
|
|
125
|
+
for i in range(1, len(value_spec)):
|
|
126
|
+
item = value_spec[i]
|
|
127
|
+
if isinstance(item, list) and len(item) >= 1 and isinstance(item[0], str):
|
|
128
|
+
# This looks like a mutator spec
|
|
129
|
+
has_mutators = True
|
|
130
|
+
break
|
|
131
|
+
|
|
132
|
+
if not has_mutators:
|
|
133
|
+
# This is a list literal, return it as-is
|
|
134
|
+
return value_spec, []
|
|
135
|
+
|
|
136
|
+
# This is a value with mutators
|
|
137
|
+
value = value_spec[0]
|
|
138
|
+
mutators = []
|
|
139
|
+
|
|
140
|
+
# Process remaining elements as mutators
|
|
141
|
+
i = 1
|
|
142
|
+
while i < len(value_spec):
|
|
143
|
+
item = value_spec[i]
|
|
144
|
+
if isinstance(item, list):
|
|
145
|
+
# This is a mutator [name, params] or [name]
|
|
146
|
+
if len(item) >= 1:
|
|
147
|
+
mutator_dict = {"name": item[0]}
|
|
148
|
+
if len(item) > 1 and isinstance(item[1], list):
|
|
149
|
+
# Has parameters
|
|
150
|
+
params = []
|
|
151
|
+
for param in item[1]:
|
|
152
|
+
if isinstance(param, list) and len(param) == 2:
|
|
153
|
+
params.append(param)
|
|
154
|
+
if params:
|
|
155
|
+
mutator_dict["params"] = params
|
|
156
|
+
mutators.append(mutator_dict)
|
|
157
|
+
i += 1
|
|
158
|
+
|
|
159
|
+
return value, mutators
|
|
160
|
+
else:
|
|
161
|
+
# Just the value itself, no mutators
|
|
162
|
+
return value_spec, []
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
"""Error analysis utilities for TQL parser."""
|
|
2
|
+
|
|
3
|
+
from typing import List, Tuple
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ErrorAnalyzer:
|
|
7
|
+
"""Analyzes parse errors to provide helpful feedback."""
|
|
8
|
+
|
|
9
|
+
@staticmethod
|
|
10
|
+
def analyze_parse_error(query: str, position: int, error_str: str) -> Tuple[str, List[str]]: # noqa: C901
|
|
11
|
+
"""Analyze parse error to provide helpful feedback.
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
query: The original query string
|
|
15
|
+
position: Character position where error occurred
|
|
16
|
+
error_str: The original error string from pyparsing
|
|
17
|
+
|
|
18
|
+
Returns:
|
|
19
|
+
Tuple of (error message, list of suggestions)
|
|
20
|
+
"""
|
|
21
|
+
suggestions = []
|
|
22
|
+
|
|
23
|
+
# Check for invalid operators first (before position-specific checks)
|
|
24
|
+
if "==" in query:
|
|
25
|
+
pos = query.find("==")
|
|
26
|
+
return f"Invalid operator '==' at position {pos}. Use '=' for equality", [query.replace("==", "=")]
|
|
27
|
+
|
|
28
|
+
# Check if query ends with an operator (special case)
|
|
29
|
+
if query.rstrip().endswith(("=", "!=", ">", "<", ">=", "<=", "contains", "startswith", "endswith")):
|
|
30
|
+
# Find the operator
|
|
31
|
+
for op in [">=", "<=", "!=", "contains", "startswith", "endswith", "=", ">", "<"]:
|
|
32
|
+
if query.rstrip().endswith(op):
|
|
33
|
+
return f"Expected value after operator '{op}'", [f'Examples: field {op} "value"']
|
|
34
|
+
|
|
35
|
+
# Get context around error position
|
|
36
|
+
if position >= 0 and position < len(query):
|
|
37
|
+
# Look at what's around the error position
|
|
38
|
+
# start = max(0, position - 10) # Not used
|
|
39
|
+
# end = min(len(query), position + 10) # Not used
|
|
40
|
+
# context = query[start:end] # Not currently used
|
|
41
|
+
|
|
42
|
+
# Check for common issues
|
|
43
|
+
before = query[:position].strip()
|
|
44
|
+
after = query[position:].strip()
|
|
45
|
+
|
|
46
|
+
# Missing operator after field
|
|
47
|
+
if (
|
|
48
|
+
before
|
|
49
|
+
and after
|
|
50
|
+
and not any(op in before[-10:] for op in ["=", "!=", ">", "<", ">=", "<=", "in", "contains", "exists"])
|
|
51
|
+
):
|
|
52
|
+
# Likely missing operator
|
|
53
|
+
last_word = before.split()[-1] if before.split() else ""
|
|
54
|
+
suggestions = [
|
|
55
|
+
f'{last_word} = "{after.split()[0]}"' if after else f"{last_word} exists",
|
|
56
|
+
]
|
|
57
|
+
if after:
|
|
58
|
+
suggestions.append(f'{last_word} contains "{after.split()[0]}"')
|
|
59
|
+
return f"Expected operator after field '{last_word}'", suggestions
|
|
60
|
+
|
|
61
|
+
# Unclosed quote
|
|
62
|
+
if query.count('"') % 2 != 0:
|
|
63
|
+
last_quote_pos = query.rfind('"', 0, position)
|
|
64
|
+
if last_quote_pos >= 0:
|
|
65
|
+
return f"Unterminated string literal starting at position {last_quote_pos}", []
|
|
66
|
+
|
|
67
|
+
if query.count("'") % 2 != 0:
|
|
68
|
+
last_quote_pos = query.rfind("'", 0, position)
|
|
69
|
+
if last_quote_pos >= 0:
|
|
70
|
+
return f"Unterminated string literal starting at position {last_quote_pos}", []
|
|
71
|
+
|
|
72
|
+
# Missing value after operator
|
|
73
|
+
tokens = before.split()
|
|
74
|
+
if tokens and tokens[-1] in ["=", "!=", ">", "<", ">=", "<=", "contains", "startswith", "endswith"]:
|
|
75
|
+
return f"Expected value after operator '{tokens[-1]}'", [
|
|
76
|
+
f'Examples: {tokens[-2] if len(tokens) > 1 else "field"} {tokens[-1]} "value"'
|
|
77
|
+
]
|
|
78
|
+
|
|
79
|
+
# Default message if we can't determine specific issue
|
|
80
|
+
all_operators = [
|
|
81
|
+
"=",
|
|
82
|
+
"!=",
|
|
83
|
+
">",
|
|
84
|
+
"<",
|
|
85
|
+
">=",
|
|
86
|
+
"<=",
|
|
87
|
+
"contains",
|
|
88
|
+
"startswith",
|
|
89
|
+
"endswith",
|
|
90
|
+
"in",
|
|
91
|
+
"not in",
|
|
92
|
+
"between",
|
|
93
|
+
"not between",
|
|
94
|
+
"cidr",
|
|
95
|
+
"not cidr",
|
|
96
|
+
"exists",
|
|
97
|
+
"not exists",
|
|
98
|
+
"regexp",
|
|
99
|
+
"not regexp",
|
|
100
|
+
]
|
|
101
|
+
return "Invalid syntax", [f"Valid operators: {', '.join(all_operators)}"]
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
"""Field extraction utilities for TQL parser."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, List, Set
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class FieldExtractor:
|
|
7
|
+
"""Extracts field references from TQL AST."""
|
|
8
|
+
|
|
9
|
+
@staticmethod
|
|
10
|
+
def extract_fields(ast: Dict[str, Any]) -> List[str]:
|
|
11
|
+
"""Extract all unique field references from a TQL AST.
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
ast: The parsed AST
|
|
15
|
+
|
|
16
|
+
Returns:
|
|
17
|
+
Sorted list of unique field names referenced in the query
|
|
18
|
+
"""
|
|
19
|
+
# Use a set to collect unique field names
|
|
20
|
+
fields: Set[str] = set()
|
|
21
|
+
FieldExtractor._collect_fields_from_node(ast, fields)
|
|
22
|
+
|
|
23
|
+
# Return sorted list of field names
|
|
24
|
+
return sorted(fields)
|
|
25
|
+
|
|
26
|
+
@staticmethod
|
|
27
|
+
def _collect_fields_from_node(node: Dict[str, Any], fields: Set[str]) -> None: # noqa: C901
|
|
28
|
+
"""Recursively collect field names from an AST node.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
node: The AST node to extract fields from
|
|
32
|
+
fields: Set to collect unique field names
|
|
33
|
+
"""
|
|
34
|
+
if not isinstance(node, dict):
|
|
35
|
+
return
|
|
36
|
+
|
|
37
|
+
node_type = node.get("type")
|
|
38
|
+
|
|
39
|
+
if node_type == "comparison":
|
|
40
|
+
# Standard comparison, add the field
|
|
41
|
+
if "field" in node:
|
|
42
|
+
field = node["field"]
|
|
43
|
+
# Handle case where field might be a list (should not happen with valid queries)
|
|
44
|
+
if isinstance(field, list):
|
|
45
|
+
# This indicates a malformed query - skip it
|
|
46
|
+
pass
|
|
47
|
+
else:
|
|
48
|
+
fields.add(field)
|
|
49
|
+
|
|
50
|
+
elif node_type == "collection_op":
|
|
51
|
+
# Collection operation (ANY, ALL)
|
|
52
|
+
if "field" in node:
|
|
53
|
+
fields.add(node["field"])
|
|
54
|
+
|
|
55
|
+
elif node_type == "logical_op":
|
|
56
|
+
# Logical operation (AND, OR), process both sides
|
|
57
|
+
if "left" in node:
|
|
58
|
+
FieldExtractor._collect_fields_from_node(node["left"], fields)
|
|
59
|
+
if "right" in node:
|
|
60
|
+
FieldExtractor._collect_fields_from_node(node["right"], fields)
|
|
61
|
+
|
|
62
|
+
elif node_type == "unary_op":
|
|
63
|
+
# Unary operation (NOT), process the operand
|
|
64
|
+
if "operand" in node:
|
|
65
|
+
FieldExtractor._collect_fields_from_node(node["operand"], fields)
|
|
66
|
+
|
|
67
|
+
elif node_type == "geo_expr":
|
|
68
|
+
# Geo expression, add the field being geo-looked up
|
|
69
|
+
if "field" in node:
|
|
70
|
+
fields.add(node["field"])
|
|
71
|
+
# Also process any conditions inside the geo expression
|
|
72
|
+
if "conditions" in node:
|
|
73
|
+
FieldExtractor._collect_fields_from_node(node["conditions"], fields)
|
|
74
|
+
|
|
75
|
+
elif node_type == "nslookup_expr":
|
|
76
|
+
# NSLookup expression, add the field being looked up
|
|
77
|
+
if "field" in node:
|
|
78
|
+
fields.add(node["field"])
|
|
79
|
+
# Also process any conditions inside the nslookup expression
|
|
80
|
+
if "conditions" in node:
|
|
81
|
+
FieldExtractor._collect_fields_from_node(node["conditions"], fields)
|
|
82
|
+
|
|
83
|
+
elif node_type == "query_with_stats":
|
|
84
|
+
# Query with stats, process the filter part
|
|
85
|
+
if "filter" in node:
|
|
86
|
+
FieldExtractor._collect_fields_from_node(node["filter"], fields)
|
|
87
|
+
# Also collect fields from stats if needed
|
|
88
|
+
if "stats" in node:
|
|
89
|
+
FieldExtractor._collect_fields_from_stats(node["stats"], fields)
|
|
90
|
+
|
|
91
|
+
elif node_type == "stats_expr":
|
|
92
|
+
# Stats expression
|
|
93
|
+
FieldExtractor._collect_fields_from_stats(node, fields)
|
|
94
|
+
|
|
95
|
+
@staticmethod
|
|
96
|
+
def _collect_fields_from_stats(stats_node: Dict[str, Any], fields: Set[str]) -> None:
|
|
97
|
+
"""Collect field names from stats expressions.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
stats_node: Stats AST node
|
|
101
|
+
fields: Set to collect unique field names
|
|
102
|
+
"""
|
|
103
|
+
# Collect fields from aggregations
|
|
104
|
+
if "aggregations" in stats_node:
|
|
105
|
+
for agg in stats_node["aggregations"]:
|
|
106
|
+
if "field" in agg and agg["field"] != "*":
|
|
107
|
+
fields.add(agg["field"])
|
|
108
|
+
|
|
109
|
+
# Collect fields from group by
|
|
110
|
+
if "group_by" in stats_node:
|
|
111
|
+
for field in stats_node["group_by"]:
|
|
112
|
+
fields.add(field)
|