flowquery 1.0.18 → 1.0.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (158) hide show
  1. package/.gitattributes +3 -0
  2. package/.github/workflows/python-publish.yml +56 -4
  3. package/.github/workflows/release.yml +26 -19
  4. package/.husky/pre-commit +26 -0
  5. package/README.md +37 -32
  6. package/dist/flowquery.min.js +1 -1
  7. package/dist/graph/data.d.ts +5 -4
  8. package/dist/graph/data.d.ts.map +1 -1
  9. package/dist/graph/data.js +38 -20
  10. package/dist/graph/data.js.map +1 -1
  11. package/dist/graph/node.d.ts +2 -0
  12. package/dist/graph/node.d.ts.map +1 -1
  13. package/dist/graph/node.js +23 -0
  14. package/dist/graph/node.js.map +1 -1
  15. package/dist/graph/node_data.js +1 -1
  16. package/dist/graph/node_data.js.map +1 -1
  17. package/dist/graph/pattern.d.ts.map +1 -1
  18. package/dist/graph/pattern.js +11 -4
  19. package/dist/graph/pattern.js.map +1 -1
  20. package/dist/graph/relationship.d.ts +6 -1
  21. package/dist/graph/relationship.d.ts.map +1 -1
  22. package/dist/graph/relationship.js +43 -5
  23. package/dist/graph/relationship.js.map +1 -1
  24. package/dist/graph/relationship_data.d.ts +2 -0
  25. package/dist/graph/relationship_data.d.ts.map +1 -1
  26. package/dist/graph/relationship_data.js +8 -1
  27. package/dist/graph/relationship_data.js.map +1 -1
  28. package/dist/graph/relationship_match_collector.js +2 -2
  29. package/dist/graph/relationship_match_collector.js.map +1 -1
  30. package/dist/graph/relationship_reference.d.ts.map +1 -1
  31. package/dist/graph/relationship_reference.js +2 -1
  32. package/dist/graph/relationship_reference.js.map +1 -1
  33. package/dist/index.d.ts +1 -1
  34. package/dist/index.js +1 -1
  35. package/dist/parsing/parser.d.ts +6 -0
  36. package/dist/parsing/parser.d.ts.map +1 -1
  37. package/dist/parsing/parser.js +139 -72
  38. package/dist/parsing/parser.js.map +1 -1
  39. package/docs/flowquery.min.js +1 -1
  40. package/flowquery-py/misc/data/test.json +10 -0
  41. package/flowquery-py/misc/data/users.json +242 -0
  42. package/flowquery-py/notebooks/TestFlowQuery.ipynb +440 -0
  43. package/flowquery-py/pyproject.toml +48 -2
  44. package/flowquery-py/src/__init__.py +7 -5
  45. package/flowquery-py/src/compute/runner.py +14 -10
  46. package/flowquery-py/src/extensibility.py +8 -8
  47. package/flowquery-py/src/graph/__init__.py +7 -7
  48. package/flowquery-py/src/graph/data.py +38 -20
  49. package/flowquery-py/src/graph/database.py +10 -20
  50. package/flowquery-py/src/graph/node.py +50 -19
  51. package/flowquery-py/src/graph/node_data.py +1 -1
  52. package/flowquery-py/src/graph/node_reference.py +10 -11
  53. package/flowquery-py/src/graph/pattern.py +27 -37
  54. package/flowquery-py/src/graph/pattern_expression.py +13 -11
  55. package/flowquery-py/src/graph/patterns.py +2 -2
  56. package/flowquery-py/src/graph/physical_node.py +4 -3
  57. package/flowquery-py/src/graph/physical_relationship.py +5 -5
  58. package/flowquery-py/src/graph/relationship.py +62 -14
  59. package/flowquery-py/src/graph/relationship_data.py +7 -2
  60. package/flowquery-py/src/graph/relationship_match_collector.py +15 -10
  61. package/flowquery-py/src/graph/relationship_reference.py +4 -4
  62. package/flowquery-py/src/io/command_line.py +13 -14
  63. package/flowquery-py/src/parsing/__init__.py +2 -2
  64. package/flowquery-py/src/parsing/alias_option.py +1 -1
  65. package/flowquery-py/src/parsing/ast_node.py +21 -20
  66. package/flowquery-py/src/parsing/base_parser.py +7 -7
  67. package/flowquery-py/src/parsing/components/__init__.py +3 -3
  68. package/flowquery-py/src/parsing/components/from_.py +3 -1
  69. package/flowquery-py/src/parsing/components/headers.py +2 -2
  70. package/flowquery-py/src/parsing/components/null.py +2 -2
  71. package/flowquery-py/src/parsing/context.py +7 -7
  72. package/flowquery-py/src/parsing/data_structures/associative_array.py +7 -7
  73. package/flowquery-py/src/parsing/data_structures/json_array.py +3 -3
  74. package/flowquery-py/src/parsing/data_structures/key_value_pair.py +4 -4
  75. package/flowquery-py/src/parsing/data_structures/lookup.py +2 -2
  76. package/flowquery-py/src/parsing/data_structures/range_lookup.py +2 -2
  77. package/flowquery-py/src/parsing/expressions/__init__.py +16 -16
  78. package/flowquery-py/src/parsing/expressions/expression.py +16 -13
  79. package/flowquery-py/src/parsing/expressions/expression_map.py +9 -9
  80. package/flowquery-py/src/parsing/expressions/f_string.py +3 -3
  81. package/flowquery-py/src/parsing/expressions/identifier.py +4 -3
  82. package/flowquery-py/src/parsing/expressions/number.py +3 -3
  83. package/flowquery-py/src/parsing/expressions/operator.py +16 -16
  84. package/flowquery-py/src/parsing/expressions/reference.py +3 -3
  85. package/flowquery-py/src/parsing/expressions/string.py +2 -2
  86. package/flowquery-py/src/parsing/functions/__init__.py +17 -17
  87. package/flowquery-py/src/parsing/functions/aggregate_function.py +8 -8
  88. package/flowquery-py/src/parsing/functions/async_function.py +12 -9
  89. package/flowquery-py/src/parsing/functions/avg.py +4 -4
  90. package/flowquery-py/src/parsing/functions/collect.py +6 -6
  91. package/flowquery-py/src/parsing/functions/function.py +6 -6
  92. package/flowquery-py/src/parsing/functions/function_factory.py +31 -34
  93. package/flowquery-py/src/parsing/functions/function_metadata.py +10 -11
  94. package/flowquery-py/src/parsing/functions/functions.py +14 -6
  95. package/flowquery-py/src/parsing/functions/join.py +3 -3
  96. package/flowquery-py/src/parsing/functions/keys.py +3 -3
  97. package/flowquery-py/src/parsing/functions/predicate_function.py +8 -7
  98. package/flowquery-py/src/parsing/functions/predicate_sum.py +12 -7
  99. package/flowquery-py/src/parsing/functions/rand.py +2 -2
  100. package/flowquery-py/src/parsing/functions/range_.py +9 -4
  101. package/flowquery-py/src/parsing/functions/replace.py +2 -2
  102. package/flowquery-py/src/parsing/functions/round_.py +2 -2
  103. package/flowquery-py/src/parsing/functions/size.py +2 -2
  104. package/flowquery-py/src/parsing/functions/split.py +9 -4
  105. package/flowquery-py/src/parsing/functions/stringify.py +3 -3
  106. package/flowquery-py/src/parsing/functions/sum.py +4 -4
  107. package/flowquery-py/src/parsing/functions/to_json.py +2 -2
  108. package/flowquery-py/src/parsing/functions/type_.py +3 -3
  109. package/flowquery-py/src/parsing/functions/value_holder.py +1 -1
  110. package/flowquery-py/src/parsing/logic/__init__.py +2 -2
  111. package/flowquery-py/src/parsing/logic/case.py +0 -1
  112. package/flowquery-py/src/parsing/logic/when.py +3 -1
  113. package/flowquery-py/src/parsing/operations/__init__.py +10 -10
  114. package/flowquery-py/src/parsing/operations/aggregated_return.py +3 -5
  115. package/flowquery-py/src/parsing/operations/aggregated_with.py +4 -4
  116. package/flowquery-py/src/parsing/operations/call.py +6 -7
  117. package/flowquery-py/src/parsing/operations/create_node.py +5 -4
  118. package/flowquery-py/src/parsing/operations/create_relationship.py +5 -4
  119. package/flowquery-py/src/parsing/operations/group_by.py +18 -16
  120. package/flowquery-py/src/parsing/operations/load.py +21 -19
  121. package/flowquery-py/src/parsing/operations/match.py +8 -7
  122. package/flowquery-py/src/parsing/operations/operation.py +3 -3
  123. package/flowquery-py/src/parsing/operations/projection.py +6 -6
  124. package/flowquery-py/src/parsing/operations/return_op.py +9 -5
  125. package/flowquery-py/src/parsing/operations/unwind.py +3 -2
  126. package/flowquery-py/src/parsing/operations/where.py +9 -7
  127. package/flowquery-py/src/parsing/operations/with_op.py +2 -2
  128. package/flowquery-py/src/parsing/parser.py +178 -114
  129. package/flowquery-py/src/parsing/token_to_node.py +2 -2
  130. package/flowquery-py/src/tokenization/__init__.py +4 -4
  131. package/flowquery-py/src/tokenization/keyword.py +1 -1
  132. package/flowquery-py/src/tokenization/operator.py +1 -1
  133. package/flowquery-py/src/tokenization/string_walker.py +4 -4
  134. package/flowquery-py/src/tokenization/symbol.py +1 -1
  135. package/flowquery-py/src/tokenization/token.py +11 -11
  136. package/flowquery-py/src/tokenization/token_mapper.py +10 -9
  137. package/flowquery-py/src/tokenization/token_type.py +1 -1
  138. package/flowquery-py/src/tokenization/tokenizer.py +19 -19
  139. package/flowquery-py/src/tokenization/trie.py +18 -17
  140. package/flowquery-py/src/utils/__init__.py +1 -1
  141. package/flowquery-py/src/utils/object_utils.py +3 -3
  142. package/flowquery-py/src/utils/string_utils.py +12 -12
  143. package/flowquery-py/tests/compute/test_runner.py +214 -7
  144. package/flowquery-py/tests/parsing/test_parser.py +41 -0
  145. package/flowquery-vscode/flowQueryEngine/flowquery.min.js +1 -1
  146. package/package.json +1 -1
  147. package/src/graph/data.ts +38 -20
  148. package/src/graph/node.ts +23 -0
  149. package/src/graph/node_data.ts +1 -1
  150. package/src/graph/pattern.ts +13 -4
  151. package/src/graph/relationship.ts +45 -5
  152. package/src/graph/relationship_data.ts +8 -1
  153. package/src/graph/relationship_match_collector.ts +1 -1
  154. package/src/graph/relationship_reference.ts +2 -1
  155. package/src/index.ts +5 -5
  156. package/src/parsing/parser.ts +139 -71
  157. package/tests/compute/runner.test.ts +249 -79
  158. package/tests/parsing/parser.test.ts +32 -0
@@ -1,25 +1,24 @@
1
1
  """Represents a single token in the FlowQuery language."""
2
2
 
3
3
  from __future__ import annotations
4
- from typing import TYPE_CHECKING, Optional, Any
5
4
 
6
- from .token_type import TokenType
5
+ from typing import Optional
6
+
7
+ from ..parsing.ast_node import ASTNode
8
+ from ..utils.string_utils import StringUtils
7
9
  from .keyword import Keyword
8
10
  from .operator import Operator
9
11
  from .symbol import Symbol
10
- from ..utils.string_utils import StringUtils
11
-
12
- if TYPE_CHECKING:
13
- from ..parsing.ast_node import ASTNode
12
+ from .token_type import TokenType
14
13
 
15
14
 
16
15
  class Token:
17
16
  """Represents a single token in the FlowQuery language.
18
-
17
+
19
18
  Tokens are the atomic units of lexical analysis, produced by the tokenizer
20
19
  and consumed by the parser. Each token has a type (keyword, operator, identifier, etc.)
21
20
  and an optional value.
22
-
21
+
23
22
  Example:
24
23
  with_token = Token.WITH()
25
24
  ident_token = Token.IDENTIFIER("myVar")
@@ -28,7 +27,7 @@ class Token:
28
27
 
29
28
  def __init__(self, type_: TokenType, value: Optional[str] = None):
30
29
  """Creates a new Token instance.
31
-
30
+
32
31
  Args:
33
32
  type_: The type of the token
34
33
  value: The optional value associated with the token
@@ -41,10 +40,10 @@ class Token:
41
40
 
42
41
  def equals(self, other: Token) -> bool:
43
42
  """Checks if this token equals another token.
44
-
43
+
45
44
  Args:
46
45
  other: The token to compare against
47
-
46
+
48
47
  Returns:
49
48
  True if tokens are equal, False otherwise
50
49
  """
@@ -82,6 +81,7 @@ class Token:
82
81
 
83
82
  @property
84
83
  def node(self) -> ASTNode:
84
+ # Import at runtime to avoid circular dependency
85
85
  from ..parsing.token_to_node import TokenToNode
86
86
  return TokenToNode.convert(self)
87
87
 
@@ -1,6 +1,7 @@
1
1
  """Maps string values to tokens using a Trie for efficient lookup."""
2
2
 
3
- from typing import Optional
3
+ from enum import Enum
4
+ from typing import Optional, Type
4
5
 
5
6
  from .token import Token
6
7
  from .trie import Trie
@@ -8,24 +9,24 @@ from .trie import Trie
8
9
 
9
10
  class TokenMapper:
10
11
  """Maps string values to tokens using a Trie for efficient lookup.
11
-
12
+
12
13
  Takes an enum of keywords, operators, or symbols and builds a trie
13
14
  for fast token matching during tokenization.
14
-
15
+
15
16
  Example:
16
17
  mapper = TokenMapper(Keyword)
17
18
  token = mapper.map("WITH")
18
19
  """
19
20
 
20
- def __init__(self, enum_class):
21
+ def __init__(self, enum_class: Type[Enum]) -> None:
21
22
  """Creates a TokenMapper from an enum of token values.
22
-
23
+
23
24
  Args:
24
25
  enum_class: An enum class containing token values
25
26
  """
26
27
  self._trie = Trie()
27
28
  self._enum = enum_class
28
-
29
+
29
30
  for member in enum_class:
30
31
  token = Token.method(member.name)
31
32
  if token is not None and token.value is not None:
@@ -33,10 +34,10 @@ class TokenMapper:
33
34
 
34
35
  def map(self, value: str) -> Optional[Token]:
35
36
  """Maps a string value to its corresponding token.
36
-
37
+
37
38
  Args:
38
39
  value: The string value to map
39
-
40
+
40
41
  Returns:
41
42
  The matched token, or None if no match found
42
43
  """
@@ -45,7 +46,7 @@ class TokenMapper:
45
46
  @property
46
47
  def last_found(self) -> Optional[str]:
47
48
  """Gets the last matched string from the most recent map operation.
48
-
49
+
49
50
  Returns:
50
51
  The last found string, or None if no match
51
52
  """
@@ -5,7 +5,7 @@ from enum import Enum
5
5
 
6
6
  class TokenType(Enum):
7
7
  """Enumeration of all token types in FlowQuery."""
8
-
8
+
9
9
  KEYWORD = "KEYWORD"
10
10
  BOOLEAN = "BOOLEAN"
11
11
  OPERATOR = "OPERATOR"
@@ -1,6 +1,6 @@
1
1
  """Tokenizes FlowQuery input strings into a sequence of tokens."""
2
2
 
3
- from typing import List, Optional, Iterator, Callable
3
+ from typing import Callable, Iterator, List, Optional
4
4
 
5
5
  from ..utils.string_utils import StringUtils
6
6
  from .keyword import Keyword
@@ -13,11 +13,11 @@ from .token_mapper import TokenMapper
13
13
 
14
14
  class Tokenizer:
15
15
  """Tokenizes FlowQuery input strings into a sequence of tokens.
16
-
16
+
17
17
  The tokenizer performs lexical analysis, breaking down the input text into
18
18
  meaningful tokens such as keywords, identifiers, operators, strings, numbers,
19
19
  and symbols. It handles comments, whitespace, and f-strings.
20
-
20
+
21
21
  Example:
22
22
  tokenizer = Tokenizer("WITH x = 1 RETURN x")
23
23
  tokens = tokenizer.tokenize()
@@ -25,7 +25,7 @@ class Tokenizer:
25
25
 
26
26
  def __init__(self, input_: str):
27
27
  """Creates a new Tokenizer instance for the given input.
28
-
28
+
29
29
  Args:
30
30
  input_: The FlowQuery input string to tokenize
31
31
  """
@@ -36,16 +36,16 @@ class Tokenizer:
36
36
 
37
37
  def tokenize(self) -> List[Token]:
38
38
  """Tokenizes the input string into an array of tokens.
39
-
39
+
40
40
  Returns:
41
41
  An array of Token objects representing the tokenized input
42
-
42
+
43
43
  Raises:
44
44
  ValueError: If an unrecognized token is encountered
45
45
  """
46
46
  tokens: List[Token] = []
47
47
  last: Optional[Token] = None
48
-
48
+
49
49
  while not self._walker.is_at_end:
50
50
  tokens.extend(self._f_string())
51
51
  last = self._get_last_non_whitespace_or_non_comment_token(tokens) or last
@@ -54,7 +54,7 @@ class Tokenizer:
54
54
  raise ValueError(f"Unrecognized token at position {self._walker.position}")
55
55
  token.position = self._walker.position
56
56
  tokens.append(token)
57
-
57
+
58
58
  return tokens
59
59
 
60
60
  def _get_last_non_whitespace_or_non_comment_token(self, tokens: List[Token]) -> Optional[Token]:
@@ -97,9 +97,9 @@ class Tokenizer:
97
97
  def _identifier(self) -> Optional[Token]:
98
98
  start_position = self._walker.position
99
99
  if self._walker.check_for_under_score() or self._walker.check_for_letter():
100
- while (not self._walker.is_at_end and
101
- (self._walker.check_for_letter() or
102
- self._walker.check_for_digit() or
100
+ while (not self._walker.is_at_end and
101
+ (self._walker.check_for_letter() or
102
+ self._walker.check_for_digit() or
103
103
  self._walker.check_for_under_score())):
104
104
  pass
105
105
  return Token.IDENTIFIER(self._walker.get_string(start_position))
@@ -110,7 +110,7 @@ class Tokenizer:
110
110
  quote_char = self._walker.check_for_quote()
111
111
  if quote_char is None:
112
112
  return None
113
-
113
+
114
114
  while not self._walker.is_at_end:
115
115
  if self._walker.escaped(quote_char):
116
116
  self._walker.move_next()
@@ -122,32 +122,32 @@ class Tokenizer:
122
122
  return Token.BACKTICK_STRING(value, quote_char)
123
123
  return Token.STRING(value, quote_char)
124
124
  self._walker.move_next()
125
-
125
+
126
126
  raise ValueError(f"Unterminated string at position {start_position}")
127
127
 
128
128
  def _f_string(self) -> Iterator[Token]:
129
129
  if not self._walker.check_for_f_string_start():
130
130
  return
131
-
131
+
132
132
  self._walker.move_next() # skip the f
133
133
  position = self._walker.position
134
134
  quote_char = self._walker.check_for_quote()
135
135
  if quote_char is None:
136
136
  return
137
-
137
+
138
138
  while not self._walker.is_at_end:
139
139
  if self._walker.escaped(quote_char) or self._walker.escaped_brace():
140
140
  self._walker.move_next()
141
141
  self._walker.move_next()
142
142
  continue
143
-
143
+
144
144
  if self._walker.opening_brace():
145
145
  yield Token.F_STRING(self._walker.get_string(position), quote_char)
146
146
  position = self._walker.position
147
147
  yield Token.OPENING_BRACE()
148
148
  self._walker.move_next() # skip the opening brace
149
149
  position = self._walker.position
150
-
150
+
151
151
  while not self._walker.is_at_end and not self._walker.closing_brace():
152
152
  token = self._get_next_token()
153
153
  if token is not None:
@@ -159,11 +159,11 @@ class Tokenizer:
159
159
  self._walker.move_next() # skip the closing brace
160
160
  position = self._walker.position
161
161
  break
162
-
162
+
163
163
  if self._walker.check_for_string(quote_char):
164
164
  yield Token.F_STRING(self._walker.get_string(position), quote_char)
165
165
  return
166
-
166
+
167
167
  self._walker.move_next()
168
168
 
169
169
  def _whitespace(self) -> Optional[Token]:
@@ -1,6 +1,7 @@
1
1
  """Trie (prefix tree) data structure for efficient keyword and operator lookup."""
2
2
 
3
3
  from __future__ import annotations
4
+
4
5
  from typing import TYPE_CHECKING, Optional
5
6
 
6
7
  if TYPE_CHECKING:
@@ -9,12 +10,12 @@ if TYPE_CHECKING:
9
10
 
10
11
  class TrieNode:
11
12
  """Represents a node in a Trie data structure.
12
-
13
+
13
14
  Each node can have children nodes (one per character) and may contain a token
14
15
  if the path to this node represents a complete word.
15
16
  """
16
17
 
17
- def __init__(self):
18
+ def __init__(self) -> None:
18
19
  self._children: dict[str, TrieNode] = {}
19
20
  self._token: Optional[Token] = None
20
21
 
@@ -43,59 +44,59 @@ class TrieNode:
43
44
 
44
45
  class Trie:
45
46
  """Trie (prefix tree) data structure for efficient keyword and operator lookup.
46
-
47
+
47
48
  Used during tokenization to quickly match input strings against known keywords
48
49
  and operators. Supports case-insensitive matching and tracks the longest match found.
49
-
50
+
50
51
  Example:
51
52
  trie = Trie()
52
53
  trie.insert(Token.WITH)
53
54
  found = trie.find("WITH")
54
55
  """
55
56
 
56
- def __init__(self):
57
+ def __init__(self) -> None:
57
58
  self._root = TrieNode()
58
59
  self._max_length = 0
59
60
  self._last_found: Optional[str] = None
60
61
 
61
62
  def insert(self, token: Token) -> None:
62
63
  """Inserts a token into the trie.
63
-
64
+
64
65
  Args:
65
66
  token: The token to insert
66
-
67
+
67
68
  Raises:
68
69
  ValueError: If the token value is None or empty
69
70
  """
70
71
  if token.value is None or len(token.value) == 0:
71
72
  raise ValueError("Token value cannot be null or empty")
72
-
73
+
73
74
  current_node = self._root
74
75
  for char in token.value:
75
76
  current_node = current_node.map(char.lower())
76
-
77
+
77
78
  if len(token.value) > self._max_length:
78
79
  self._max_length = len(token.value)
79
-
80
+
80
81
  current_node.token = token
81
82
 
82
83
  def find(self, value: str) -> Optional[Token]:
83
84
  """Finds a token by searching for the longest matching prefix in the trie.
84
-
85
+
85
86
  Args:
86
87
  value: The string value to search for
87
-
88
+
88
89
  Returns:
89
90
  The token if found, None otherwise
90
91
  """
91
92
  if len(value) == 0:
92
93
  return None
93
-
94
+
94
95
  index = 0
95
96
  current: Optional[TrieNode] = None
96
97
  found: Optional[Token] = None
97
98
  self._last_found = None
98
-
99
+
99
100
  while True:
100
101
  next_node = (current or self._root).retrieve(value[index].lower())
101
102
  if next_node is None:
@@ -107,17 +108,17 @@ class Trie:
107
108
  index += 1
108
109
  if index >= len(value) or index > self._max_length:
109
110
  break
110
-
111
+
111
112
  if current is not None and current.is_end_of_word():
112
113
  found = current.token
113
114
  self._last_found = value[:index]
114
-
115
+
115
116
  return found
116
117
 
117
118
  @property
118
119
  def last_found(self) -> Optional[str]:
119
120
  """Gets the last matched string from the most recent find operation.
120
-
121
+
121
122
  Returns:
122
123
  The last found string, or None if no match was found
123
124
  """
@@ -1,6 +1,6 @@
1
1
  """Utils module for FlowQuery."""
2
2
 
3
- from .string_utils import StringUtils
4
3
  from .object_utils import ObjectUtils
4
+ from .string_utils import StringUtils
5
5
 
6
6
  __all__ = ["StringUtils", "ObjectUtils"]
@@ -7,13 +7,13 @@ class ObjectUtils:
7
7
  """Utility class for object-related operations."""
8
8
 
9
9
  @staticmethod
10
- def is_instance_of_any(obj: Any, classes: List[Type]) -> bool:
10
+ def is_instance_of_any(obj: Any, classes: List[Type[Any]]) -> bool:
11
11
  """Checks if an object is an instance of any of the provided classes.
12
-
12
+
13
13
  Args:
14
14
  obj: The object to check
15
15
  classes: Array of class constructors to test against
16
-
16
+
17
17
  Returns:
18
18
  True if the object is an instance of any class, False otherwise
19
19
  """
@@ -3,11 +3,11 @@
3
3
 
4
4
  class StringUtils:
5
5
  """Utility class for string manipulation and validation.
6
-
6
+
7
7
  Provides methods for handling quoted strings, comments, escape sequences,
8
8
  and identifier validation.
9
9
  """
10
-
10
+
11
11
  quotes = ['"', "'", '`']
12
12
  letters = 'abcdefghijklmnopqrstuvwxyz'
13
13
  digits = '0123456789'
@@ -17,10 +17,10 @@ class StringUtils:
17
17
  @staticmethod
18
18
  def unquote(s: str) -> str:
19
19
  """Removes surrounding quotes from a string.
20
-
20
+
21
21
  Args:
22
22
  s: The string to unquote
23
-
23
+
24
24
  Returns:
25
25
  The unquoted string
26
26
  """
@@ -41,10 +41,10 @@ class StringUtils:
41
41
  @staticmethod
42
42
  def uncomment(s: str) -> str:
43
43
  """Removes comment markers from a string.
44
-
44
+
45
45
  Args:
46
46
  s: The comment string
47
-
47
+
48
48
  Returns:
49
49
  The string without comment markers
50
50
  """
@@ -59,11 +59,11 @@ class StringUtils:
59
59
  @staticmethod
60
60
  def remove_escaped_quotes(s: str, quote_char: str) -> str:
61
61
  """Removes escape sequences before quotes in a string.
62
-
62
+
63
63
  Args:
64
64
  s: The string to process
65
65
  quote_char: The quote character that was escaped
66
-
66
+
67
67
  Returns:
68
68
  The string with escape sequences removed
69
69
  """
@@ -79,10 +79,10 @@ class StringUtils:
79
79
  @staticmethod
80
80
  def remove_escaped_braces(s: str) -> str:
81
81
  """Removes escaped braces ({{ and }}) from f-strings.
82
-
82
+
83
83
  Args:
84
84
  s: The string to process
85
-
85
+
86
86
  Returns:
87
87
  The string with escaped braces resolved
88
88
  """
@@ -98,10 +98,10 @@ class StringUtils:
98
98
  @staticmethod
99
99
  def can_be_identifier(s: str) -> bool:
100
100
  """Checks if a string is a valid identifier.
101
-
101
+
102
102
  Args:
103
103
  s: The string to validate
104
-
104
+
105
105
  Returns:
106
106
  True if the string can be used as an identifier, false otherwise
107
107
  """