flowquery 1.0.27 → 1.0.29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/dist/flowquery.min.js +1 -1
  2. package/dist/graph/relationship.d.ts.map +1 -1
  3. package/dist/graph/relationship.js +5 -1
  4. package/dist/graph/relationship.js.map +1 -1
  5. package/dist/parsing/base_parser.d.ts +1 -1
  6. package/dist/parsing/base_parser.d.ts.map +1 -1
  7. package/dist/parsing/base_parser.js.map +1 -1
  8. package/dist/parsing/expressions/operator.d.ts +38 -1
  9. package/dist/parsing/expressions/operator.d.ts.map +1 -1
  10. package/dist/parsing/expressions/operator.js +156 -4
  11. package/dist/parsing/expressions/operator.js.map +1 -1
  12. package/dist/parsing/functions/count.d.ts +21 -0
  13. package/dist/parsing/functions/count.d.ts.map +1 -0
  14. package/dist/parsing/functions/count.js +70 -0
  15. package/dist/parsing/functions/count.js.map +1 -0
  16. package/dist/parsing/functions/function_factory.d.ts +2 -0
  17. package/dist/parsing/functions/function_factory.d.ts.map +1 -1
  18. package/dist/parsing/functions/function_factory.js +2 -0
  19. package/dist/parsing/functions/function_factory.js.map +1 -1
  20. package/dist/parsing/functions/string_distance.d.ts +7 -0
  21. package/dist/parsing/functions/string_distance.d.ts.map +1 -0
  22. package/dist/parsing/functions/string_distance.js +84 -0
  23. package/dist/parsing/functions/string_distance.js.map +1 -0
  24. package/dist/parsing/parser.d.ts +6 -0
  25. package/dist/parsing/parser.d.ts.map +1 -1
  26. package/dist/parsing/parser.js +123 -13
  27. package/dist/parsing/parser.js.map +1 -1
  28. package/dist/tokenization/keyword.d.ts +4 -1
  29. package/dist/tokenization/keyword.d.ts.map +1 -1
  30. package/dist/tokenization/keyword.js +3 -0
  31. package/dist/tokenization/keyword.js.map +1 -1
  32. package/dist/tokenization/token.d.ts +6 -0
  33. package/dist/tokenization/token.d.ts.map +1 -1
  34. package/dist/tokenization/token.js +18 -0
  35. package/dist/tokenization/token.js.map +1 -1
  36. package/docs/flowquery.min.js +1 -1
  37. package/flowquery-py/pyproject.toml +1 -1
  38. package/flowquery-py/src/graph/relationship.py +5 -1
  39. package/flowquery-py/src/parsing/expressions/__init__.py +4 -0
  40. package/flowquery-py/src/parsing/expressions/operator.py +102 -0
  41. package/flowquery-py/src/parsing/functions/__init__.py +4 -0
  42. package/flowquery-py/src/parsing/functions/count.py +79 -0
  43. package/flowquery-py/src/parsing/functions/string_distance.py +88 -0
  44. package/flowquery-py/src/parsing/parser.py +123 -16
  45. package/flowquery-py/src/tokenization/keyword.py +3 -0
  46. package/flowquery-py/src/tokenization/token.py +21 -0
  47. package/flowquery-py/tests/compute/test_runner.py +504 -1
  48. package/flowquery-py/tests/parsing/test_expression.py +200 -1
  49. package/flowquery-py/tests/parsing/test_parser.py +203 -0
  50. package/flowquery-vscode/flowQueryEngine/flowquery.min.js +1 -1
  51. package/package.json +1 -1
  52. package/src/graph/relationship.ts +4 -1
  53. package/src/parsing/base_parser.ts +1 -1
  54. package/src/parsing/expressions/operator.ts +155 -3
  55. package/src/parsing/functions/count.ts +54 -0
  56. package/src/parsing/functions/function_factory.ts +2 -0
  57. package/src/parsing/functions/string_distance.ts +80 -0
  58. package/src/parsing/parser.ts +134 -12
  59. package/src/tokenization/keyword.ts +3 -0
  60. package/src/tokenization/token.ts +24 -0
  61. package/tests/compute/runner.test.ts +462 -0
  62. package/tests/parsing/expression.test.ts +279 -16
  63. package/tests/parsing/parser.test.ts +200 -0
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "flowquery"
3
- version = "1.0.17"
3
+ version = "1.0.19"
4
4
  description = "A declarative query language for data processing pipelines"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10"
@@ -167,7 +167,7 @@ class Relationship(ASTNode):
167
167
  follow_id = 'left_id' if is_left else 'right_id'
168
168
  while self._data and find_match(left_id, hop):
169
169
  data = self._data.current(hop)
170
- if data and self._hops and hop >= self._hops.min:
170
+ if data and self._hops and hop + 1 >= self._hops.min:
171
171
  self.set_value(self)
172
172
  if not self._matches_properties(hop):
173
173
  continue
@@ -178,6 +178,10 @@ class Relationship(ASTNode):
178
178
  if self._hops and hop + 1 < self._hops.max:
179
179
  await self.find(data[follow_id], hop + 1)
180
180
  self._matches.pop()
181
+ elif data and self._hops:
182
+ # Below minimum hops: traverse the edge without yielding a match
183
+ if follow_id in data:
184
+ await self.find(data[follow_id], hop + 1)
181
185
 
182
186
  # Restore original source node
183
187
  self._source = original
@@ -13,6 +13,7 @@ from .operator import (
13
13
  Equals,
14
14
  GreaterThan,
15
15
  GreaterThanOrEqual,
16
+ In,
16
17
  Is,
17
18
  LessThan,
18
19
  LessThanOrEqual,
@@ -20,6 +21,7 @@ from .operator import (
20
21
  Multiply,
21
22
  Not,
22
23
  NotEquals,
24
+ NotIn,
23
25
  Operator,
24
26
  Or,
25
27
  Power,
@@ -54,4 +56,6 @@ __all__ = [
54
56
  "Or",
55
57
  "Not",
56
58
  "Is",
59
+ "In",
60
+ "NotIn",
57
61
  ]
@@ -167,3 +167,105 @@ class Is(Operator):
167
167
 
168
168
  def value(self) -> int:
169
169
  return 1 if self.lhs.value() == self.rhs.value() else 0
170
+
171
+
172
+ class IsNot(Operator):
173
+ def __init__(self) -> None:
174
+ super().__init__(-1, True)
175
+
176
+ def value(self) -> int:
177
+ return 1 if self.lhs.value() != self.rhs.value() else 0
178
+
179
+
180
+ class In(Operator):
181
+ def __init__(self) -> None:
182
+ super().__init__(-1, True)
183
+
184
+ def value(self) -> int:
185
+ lst = self.rhs.value()
186
+ if not isinstance(lst, list):
187
+ raise ValueError("Right operand of IN must be a list")
188
+ return 1 if self.lhs.value() in lst else 0
189
+
190
+
191
+ class NotIn(Operator):
192
+ def __init__(self) -> None:
193
+ super().__init__(-1, True)
194
+
195
+ def value(self) -> int:
196
+ lst = self.rhs.value()
197
+ if not isinstance(lst, list):
198
+ raise ValueError("Right operand of NOT IN must be a list")
199
+ return 0 if self.lhs.value() in lst else 1
200
+
201
+
202
+ class Contains(Operator):
203
+ def __init__(self) -> None:
204
+ super().__init__(0, True)
205
+
206
+ def value(self) -> int:
207
+ s = self.lhs.value()
208
+ search = self.rhs.value()
209
+ if not isinstance(s, str) or not isinstance(search, str):
210
+ raise ValueError("CONTAINS requires string operands")
211
+ return 1 if search in s else 0
212
+
213
+
214
+ class NotContains(Operator):
215
+ def __init__(self) -> None:
216
+ super().__init__(0, True)
217
+
218
+ def value(self) -> int:
219
+ s = self.lhs.value()
220
+ search = self.rhs.value()
221
+ if not isinstance(s, str) or not isinstance(search, str):
222
+ raise ValueError("NOT CONTAINS requires string operands")
223
+ return 0 if search in s else 1
224
+
225
+
226
+ class StartsWith(Operator):
227
+ def __init__(self) -> None:
228
+ super().__init__(0, True)
229
+
230
+ def value(self) -> int:
231
+ s = self.lhs.value()
232
+ search = self.rhs.value()
233
+ if not isinstance(s, str) or not isinstance(search, str):
234
+ raise ValueError("STARTS WITH requires string operands")
235
+ return 1 if s.startswith(search) else 0
236
+
237
+
238
+ class NotStartsWith(Operator):
239
+ def __init__(self) -> None:
240
+ super().__init__(0, True)
241
+
242
+ def value(self) -> int:
243
+ s = self.lhs.value()
244
+ search = self.rhs.value()
245
+ if not isinstance(s, str) or not isinstance(search, str):
246
+ raise ValueError("NOT STARTS WITH requires string operands")
247
+ return 0 if s.startswith(search) else 1
248
+
249
+
250
+ class EndsWith(Operator):
251
+ def __init__(self) -> None:
252
+ super().__init__(0, True)
253
+
254
+ def value(self) -> int:
255
+ s = self.lhs.value()
256
+ search = self.rhs.value()
257
+ if not isinstance(s, str) or not isinstance(search, str):
258
+ raise ValueError("ENDS WITH requires string operands")
259
+ return 1 if s.endswith(search) else 0
260
+
261
+
262
+ class NotEndsWith(Operator):
263
+ def __init__(self) -> None:
264
+ super().__init__(0, True)
265
+
266
+ def value(self) -> int:
267
+ s = self.lhs.value()
268
+ search = self.rhs.value()
269
+ if not isinstance(s, str) or not isinstance(search, str):
270
+ raise ValueError("NOT ENDS WITH requires string operands")
271
+ return 0 if s.endswith(search) else 1
@@ -4,6 +4,7 @@ from .aggregate_function import AggregateFunction
4
4
  from .async_function import AsyncFunction
5
5
  from .avg import Avg
6
6
  from .collect import Collect
7
+ from .count import Count
7
8
  from .function import Function
8
9
  from .function_factory import FunctionFactory
9
10
  from .function_metadata import (
@@ -30,6 +31,7 @@ from .round_ import Round
30
31
  from .schema import Schema
31
32
  from .size import Size
32
33
  from .split import Split
34
+ from .string_distance import StringDistance
33
35
  from .stringify import Stringify
34
36
 
35
37
  # Built-in functions
@@ -60,6 +62,7 @@ __all__ = [
60
62
  "Sum",
61
63
  "Avg",
62
64
  "Collect",
65
+ "Count",
63
66
  "Join",
64
67
  "Keys",
65
68
  "Rand",
@@ -68,6 +71,7 @@ __all__ = [
68
71
  "Round",
69
72
  "Size",
70
73
  "Split",
74
+ "StringDistance",
71
75
  "Stringify",
72
76
  "ToJson",
73
77
  "Type",
@@ -0,0 +1,79 @@
1
+ """Count aggregate function."""
2
+
3
+ import json
4
+ from typing import Any, Union
5
+
6
+ from .aggregate_function import AggregateFunction
7
+ from .function_metadata import FunctionDef
8
+ from .reducer_element import ReducerElement
9
+
10
+
11
+ class CountReducerElement(ReducerElement):
12
+ """Reducer element for Count aggregate function."""
13
+
14
+ def __init__(self) -> None:
15
+ self._value: int = 0
16
+
17
+ @property
18
+ def value(self) -> Any:
19
+ return self._value
20
+
21
+ @value.setter
22
+ def value(self, val: Any) -> None:
23
+ self._value += 1
24
+
25
+
26
+ class DistinctCountReducerElement(ReducerElement):
27
+ """Reducer element for Count aggregate function with DISTINCT."""
28
+
29
+ def __init__(self) -> None:
30
+ self._seen: set[Any] = set()
31
+
32
+ @property
33
+ def value(self) -> Any:
34
+ return len(self._seen)
35
+
36
+ @value.setter
37
+ def value(self, val: Any) -> None:
38
+ key: str = json.dumps(val, sort_keys=True, default=str)
39
+ self._seen.add(key)
40
+
41
+
42
+ @FunctionDef({
43
+ "description": "Counts the number of values across grouped rows",
44
+ "category": "aggregate",
45
+ "parameters": [
46
+ {"name": "value", "description": "Value to count", "type": "any"}
47
+ ],
48
+ "output": {"description": "Number of values", "type": "number", "example": 3},
49
+ "examples": [
50
+ "WITH [1, 2, 3] AS nums UNWIND nums AS n RETURN count(n)",
51
+ "WITH [1, 2, 2, 3] AS nums UNWIND nums AS n RETURN count(distinct n)"
52
+ ]
53
+ })
54
+ class Count(AggregateFunction):
55
+ """Count aggregate function.
56
+
57
+ Counts the number of values across grouped rows.
58
+ Supports DISTINCT to count only unique values.
59
+ """
60
+
61
+ def __init__(self) -> None:
62
+ super().__init__("count")
63
+ self._expected_parameter_count = 1
64
+ self._supports_distinct = True
65
+ self._distinct: bool = False
66
+
67
+ def reduce(self, element: Union[CountReducerElement, DistinctCountReducerElement]) -> None:
68
+ element.value = self.first_child().value()
69
+
70
+ def element(self) -> Union[CountReducerElement, DistinctCountReducerElement]:
71
+ return DistinctCountReducerElement() if self._distinct else CountReducerElement()
72
+
73
+ @property
74
+ def distinct(self) -> bool:
75
+ return self._distinct
76
+
77
+ @distinct.setter
78
+ def distinct(self, val: bool) -> None:
79
+ self._distinct = val
@@ -0,0 +1,88 @@
1
+ """String distance function using Levenshtein distance."""
2
+
3
+ from .function import Function
4
+ from .function_metadata import FunctionDef
5
+
6
+
7
+ def _levenshtein_distance(a: str, b: str) -> float:
8
+ """Compute the normalized Levenshtein distance between two strings.
9
+
10
+ The Levenshtein distance is the minimum number of single-character edits
11
+ (insertions, deletions, or substitutions) required to change one string
12
+ into the other. The result is normalized to [0, 1] by dividing by the
13
+ length of the longer string.
14
+
15
+ Args:
16
+ a: First string
17
+ b: Second string
18
+
19
+ Returns:
20
+ The normalized Levenshtein distance (0 = identical, 1 = completely different)
21
+ """
22
+ m = len(a)
23
+ n = len(b)
24
+
25
+ # Both empty strings are identical
26
+ if m == 0 and n == 0:
27
+ return 0.0
28
+
29
+ # Create a matrix of size (m+1) x (n+1)
30
+ dp = [[0] * (n + 1) for _ in range(m + 1)]
31
+
32
+ # Base cases: transforming empty string to/from a prefix
33
+ for i in range(m + 1):
34
+ dp[i][0] = i
35
+ for j in range(n + 1):
36
+ dp[0][j] = j
37
+
38
+ # Fill in the rest of the matrix
39
+ for i in range(1, m + 1):
40
+ for j in range(1, n + 1):
41
+ cost = 0 if a[i - 1] == b[j - 1] else 1
42
+ dp[i][j] = min(
43
+ dp[i - 1][j] + 1, # deletion
44
+ dp[i][j - 1] + 1, # insertion
45
+ dp[i - 1][j - 1] + cost # substitution
46
+ )
47
+
48
+ # Normalize by the length of the longer string
49
+ return dp[m][n] / max(m, n)
50
+
51
+
52
+ @FunctionDef({
53
+ "description": (
54
+ "Computes the normalized Levenshtein distance between two strings. "
55
+ "Returns a value in [0, 1] where 0 means identical and 1 means completely different."
56
+ ),
57
+ "category": "scalar",
58
+ "parameters": [
59
+ {"name": "string1", "description": "First string", "type": "string"},
60
+ {"name": "string2", "description": "Second string", "type": "string"}
61
+ ],
62
+ "output": {
63
+ "description": "Normalized Levenshtein distance (0 = identical, 1 = completely different)",
64
+ "type": "number",
65
+ "example": 0.43,
66
+ },
67
+ "examples": [
68
+ "RETURN string_distance('kitten', 'sitting')",
69
+ "WITH 'hello' AS a, 'hallo' AS b RETURN string_distance(a, b)"
70
+ ]
71
+ })
72
+ class StringDistance(Function):
73
+ """String distance function.
74
+
75
+ Computes the normalized Levenshtein distance between two strings.
76
+ Returns a value in [0, 1] where 0 means identical and 1 means completely different.
77
+ """
78
+
79
+ def __init__(self) -> None:
80
+ super().__init__("string_distance")
81
+ self._expected_parameter_count = 2
82
+
83
+ def value(self) -> float:
84
+ str1 = self.get_children()[0].value()
85
+ str2 = self.get_children()[1].value()
86
+ if not isinstance(str1, str) or not isinstance(str2, str):
87
+ raise ValueError("Invalid arguments for string_distance function: both arguments must be strings")
88
+ return _levenshtein_distance(str1, str2)
@@ -29,7 +29,19 @@ from .data_structures.range_lookup import RangeLookup
29
29
  from .expressions.expression import Expression
30
30
  from .expressions.f_string import FString
31
31
  from .expressions.identifier import Identifier
32
- from .expressions.operator import Not
32
+ from .expressions.operator import (
33
+ Contains,
34
+ EndsWith,
35
+ In,
36
+ Is,
37
+ IsNot,
38
+ Not,
39
+ NotContains,
40
+ NotEndsWith,
41
+ NotIn,
42
+ NotStartsWith,
43
+ StartsWith,
44
+ )
33
45
  from .expressions.reference import Reference
34
46
  from .expressions.string import String
35
47
  from .functions.aggregate_function import AggregateFunction
@@ -155,10 +167,15 @@ class Parser(BaseParser):
155
167
  return None
156
168
  self.set_next_token()
157
169
  self._expect_and_skip_whitespace_and_comments()
170
+ distinct = False
171
+ if self.token.is_distinct():
172
+ distinct = True
173
+ self.set_next_token()
174
+ self._expect_and_skip_whitespace_and_comments()
158
175
  expressions = list(self._parse_expressions(AliasOption.REQUIRED))
159
176
  if len(expressions) == 0:
160
177
  raise ValueError("Expected expression")
161
- if any(expr.has_reducers() for expr in expressions):
178
+ if distinct or any(expr.has_reducers() for expr in expressions):
162
179
  return AggregatedWith(expressions) # type: ignore[return-value]
163
180
  return With(expressions)
164
181
 
@@ -190,10 +207,15 @@ class Parser(BaseParser):
190
207
  return None
191
208
  self.set_next_token()
192
209
  self._expect_and_skip_whitespace_and_comments()
210
+ distinct = False
211
+ if self.token.is_distinct():
212
+ distinct = True
213
+ self.set_next_token()
214
+ self._expect_and_skip_whitespace_and_comments()
193
215
  expressions = list(self._parse_expressions(AliasOption.OPTIONAL))
194
216
  if len(expressions) == 0:
195
217
  raise ValueError("Expected expression")
196
- if any(expr.has_reducers() for expr in expressions):
218
+ if distinct or any(expr.has_reducers() for expr in expressions):
197
219
  return AggregatedReturn(expressions)
198
220
  self._returns += 1
199
221
  return Return(expressions)
@@ -469,10 +491,7 @@ class Parser(BaseParser):
469
491
  node = Node()
470
492
  node.label = label
471
493
  node.properties = dict(self._parse_properties())
472
- if label is not None and identifier is not None:
473
- node.identifier = identifier
474
- self._variables[identifier] = node
475
- elif identifier is not None:
494
+ if identifier is not None and identifier in self._variables:
476
495
  reference = self._variables.get(identifier)
477
496
  # Resolve through Expression -> Reference -> Node (e.g., after WITH)
478
497
  ref_child = reference.first_child() if isinstance(reference, Expression) else None
@@ -483,6 +502,9 @@ class Parser(BaseParser):
483
502
  if reference is None or not isinstance(reference, Node):
484
503
  raise ValueError(f"Undefined node reference: {identifier}")
485
504
  node = NodeReference(node, reference)
505
+ elif identifier is not None:
506
+ node.identifier = identifier
507
+ self._variables[identifier] = node
486
508
  if not self.token.is_right_parenthesis():
487
509
  raise ValueError("Expected closing parenthesis for node definition")
488
510
  self.set_next_token()
@@ -525,21 +547,20 @@ class Parser(BaseParser):
525
547
  relationship = Relationship()
526
548
  relationship.direction = direction
527
549
  relationship.properties = properties
528
- if rel_type is not None and variable is not None:
529
- relationship.identifier = variable
530
- self._variables[variable] = relationship
531
- elif variable is not None:
550
+ if variable is not None and variable in self._variables:
532
551
  reference = self._variables.get(variable)
533
552
  # Resolve through Expression -> Reference -> Relationship (e.g., after WITH)
534
- if isinstance(reference, Expression) and isinstance(
535
- reference.first_child(), Reference
536
- ):
537
- inner = reference.first_child().referred
553
+ first = reference.first_child() if isinstance(reference, Expression) else None
554
+ if isinstance(first, Reference):
555
+ inner = first.referred
538
556
  if isinstance(inner, Relationship):
539
557
  reference = inner
540
558
  if reference is None or not isinstance(reference, Relationship):
541
559
  raise ValueError(f"Undefined relationship reference: {variable}")
542
560
  relationship = RelationshipReference(relationship, reference)
561
+ elif variable is not None:
562
+ relationship.identifier = variable
563
+ self._variables[variable] = relationship
543
564
  if hops is not None:
544
565
  relationship.hops = hops
545
566
  relationship.type = rel_type
@@ -732,7 +753,23 @@ class Parser(BaseParser):
732
753
  break
733
754
  self._skip_whitespace_and_comments()
734
755
  if self.token.is_operator():
735
- expression.add_node(self.token.node)
756
+ if self.token.is_is():
757
+ expression.add_node(self._parse_is_operator())
758
+ else:
759
+ expression.add_node(self.token.node)
760
+ elif self.token.is_in():
761
+ expression.add_node(self._parse_in_operator())
762
+ elif self.token.is_contains():
763
+ expression.add_node(self._parse_contains_operator())
764
+ elif self.token.is_starts():
765
+ expression.add_node(self._parse_starts_with_operator())
766
+ elif self.token.is_ends():
767
+ expression.add_node(self._parse_ends_with_operator())
768
+ elif self.token.is_not():
769
+ not_op = self._parse_not_operator()
770
+ if not_op is None:
771
+ break
772
+ expression.add_node(not_op)
736
773
  else:
737
774
  break
738
775
  self.set_next_token()
@@ -742,6 +779,76 @@ class Parser(BaseParser):
742
779
  return expression
743
780
  return None
744
781
 
782
+ def _parse_is_operator(self) -> ASTNode:
783
+ """Parse IS or IS NOT operator."""
784
+ # Current token is IS. Look ahead for NOT to produce IS NOT.
785
+ saved_index = self._token_index
786
+ self.set_next_token()
787
+ self._skip_whitespace_and_comments()
788
+ if self.token.is_not():
789
+ return IsNot()
790
+ # Not IS NOT — restore position to IS so the outer loop's set_next_token advances past it.
791
+ self._token_index = saved_index
792
+ return Is()
793
+
794
+ def _parse_in_operator(self) -> In:
795
+ """Parse IN operator."""
796
+ # Current token is IN. Advance past it so the outer loop's set_next_token moves correctly.
797
+ return In()
798
+
799
+ def _parse_contains_operator(self) -> Contains:
800
+ """Parse CONTAINS operator."""
801
+ return Contains()
802
+
803
+ def _parse_starts_with_operator(self) -> StartsWith:
804
+ """Parse STARTS WITH operator."""
805
+ # Current token is STARTS. Look ahead for WITH.
806
+ saved_index = self._token_index
807
+ self.set_next_token()
808
+ self._skip_whitespace_and_comments()
809
+ if self.token.is_with():
810
+ return StartsWith()
811
+ self._token_index = saved_index
812
+ raise ValueError("Expected WITH after STARTS")
813
+
814
+ def _parse_ends_with_operator(self) -> EndsWith:
815
+ """Parse ENDS WITH operator."""
816
+ # Current token is ENDS. Look ahead for WITH.
817
+ saved_index = self._token_index
818
+ self.set_next_token()
819
+ self._skip_whitespace_and_comments()
820
+ if self.token.is_with():
821
+ return EndsWith()
822
+ self._token_index = saved_index
823
+ raise ValueError("Expected WITH after ENDS")
824
+
825
+ def _parse_not_operator(self) -> NotIn | NotContains | NotStartsWith | NotEndsWith | None:
826
+ """Parse NOT IN, NOT CONTAINS, NOT STARTS WITH, or NOT ENDS WITH operator."""
827
+ saved_index = self._token_index
828
+ self.set_next_token()
829
+ self._skip_whitespace_and_comments()
830
+ if self.token.is_in():
831
+ return NotIn()
832
+ if self.token.is_contains():
833
+ return NotContains()
834
+ if self.token.is_starts():
835
+ self.set_next_token()
836
+ self._skip_whitespace_and_comments()
837
+ if self.token.is_with():
838
+ return NotStartsWith()
839
+ self._token_index = saved_index
840
+ return None
841
+ if self.token.is_ends():
842
+ self.set_next_token()
843
+ self._skip_whitespace_and_comments()
844
+ if self.token.is_with():
845
+ return NotEndsWith()
846
+ self._token_index = saved_index
847
+ return None
848
+ # Not a recognized NOT operator — restore position and let the outer loop break.
849
+ self._token_index = saved_index
850
+ return None
851
+
745
852
  def _parse_lookup(self, node: ASTNode) -> ASTNode:
746
853
  variable = node
747
854
  lookup: Lookup | RangeLookup | None = None
@@ -46,3 +46,6 @@ class Keyword(Enum):
46
46
  END = "END"
47
47
  NULL = "NULL"
48
48
  IN = "IN"
49
+ CONTAINS = "CONTAINS"
50
+ STARTS = "STARTS"
51
+ ENDS = "ENDS"
@@ -567,6 +567,27 @@ class Token:
567
567
  def is_in(self) -> bool:
568
568
  return self._type == TokenType.KEYWORD and self._value == Keyword.IN.value
569
569
 
570
+ @staticmethod
571
+ def CONTAINS() -> Token:
572
+ return Token(TokenType.KEYWORD, Keyword.CONTAINS.value)
573
+
574
+ def is_contains(self) -> bool:
575
+ return self._type == TokenType.KEYWORD and self._value == Keyword.CONTAINS.value
576
+
577
+ @staticmethod
578
+ def STARTS() -> Token:
579
+ return Token(TokenType.KEYWORD, Keyword.STARTS.value)
580
+
581
+ def is_starts(self) -> bool:
582
+ return self._type == TokenType.KEYWORD and self._value == Keyword.STARTS.value
583
+
584
+ @staticmethod
585
+ def ENDS() -> Token:
586
+ return Token(TokenType.KEYWORD, Keyword.ENDS.value)
587
+
588
+ def is_ends(self) -> bool:
589
+ return self._type == TokenType.KEYWORD and self._value == Keyword.ENDS.value
590
+
570
591
  @staticmethod
571
592
  def PIPE() -> Token:
572
593
  return Token(TokenType.KEYWORD, Operator.PIPE.value)