lionagi 0.2.11__py3-none-any.whl → 0.3.1__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (153) hide show
  1. lionagi/core/action/function_calling.py +13 -6
  2. lionagi/core/action/tool.py +10 -9
  3. lionagi/core/action/tool_manager.py +18 -9
  4. lionagi/core/agent/README.md +1 -1
  5. lionagi/core/agent/base_agent.py +5 -2
  6. lionagi/core/agent/eval/README.md +1 -1
  7. lionagi/core/collections/README.md +1 -1
  8. lionagi/core/collections/_logger.py +16 -6
  9. lionagi/core/collections/abc/README.md +1 -1
  10. lionagi/core/collections/abc/component.py +35 -11
  11. lionagi/core/collections/abc/concepts.py +5 -3
  12. lionagi/core/collections/abc/exceptions.py +3 -1
  13. lionagi/core/collections/flow.py +16 -5
  14. lionagi/core/collections/model.py +34 -8
  15. lionagi/core/collections/pile.py +65 -28
  16. lionagi/core/collections/progression.py +1 -2
  17. lionagi/core/collections/util.py +11 -2
  18. lionagi/core/director/README.md +1 -1
  19. lionagi/core/engine/branch_engine.py +35 -10
  20. lionagi/core/engine/instruction_map_engine.py +14 -5
  21. lionagi/core/engine/sandbox_.py +3 -1
  22. lionagi/core/engine/script_engine.py +6 -2
  23. lionagi/core/executor/base_executor.py +10 -3
  24. lionagi/core/executor/graph_executor.py +12 -4
  25. lionagi/core/executor/neo4j_executor.py +18 -6
  26. lionagi/core/generic/edge.py +7 -2
  27. lionagi/core/generic/graph.py +23 -7
  28. lionagi/core/generic/node.py +14 -5
  29. lionagi/core/generic/tree_node.py +5 -1
  30. lionagi/core/mail/mail_manager.py +3 -1
  31. lionagi/core/mail/package.py +3 -1
  32. lionagi/core/message/action_request.py +9 -2
  33. lionagi/core/message/action_response.py +9 -3
  34. lionagi/core/message/instruction.py +8 -2
  35. lionagi/core/message/util.py +15 -5
  36. lionagi/core/report/base.py +12 -7
  37. lionagi/core/report/form.py +7 -4
  38. lionagi/core/report/report.py +10 -3
  39. lionagi/core/report/util.py +3 -1
  40. lionagi/core/rule/action.py +4 -1
  41. lionagi/core/rule/base.py +17 -6
  42. lionagi/core/rule/rulebook.py +8 -4
  43. lionagi/core/rule/string.py +3 -1
  44. lionagi/core/session/branch.py +15 -4
  45. lionagi/core/session/directive_mixin.py +11 -3
  46. lionagi/core/session/session.py +6 -2
  47. lionagi/core/unit/parallel_unit.py +9 -3
  48. lionagi/core/unit/template/action.py +1 -1
  49. lionagi/core/unit/template/predict.py +3 -1
  50. lionagi/core/unit/template/select.py +5 -3
  51. lionagi/core/unit/unit.py +38 -4
  52. lionagi/core/unit/unit_form.py +13 -15
  53. lionagi/core/unit/unit_mixin.py +45 -27
  54. lionagi/core/unit/util.py +7 -3
  55. lionagi/core/validator/validator.py +28 -15
  56. lionagi/core/work/work_edge.py +7 -3
  57. lionagi/core/work/work_task.py +11 -5
  58. lionagi/core/work/worker.py +20 -5
  59. lionagi/core/work/worker_engine.py +6 -2
  60. lionagi/core/work/worklog.py +3 -1
  61. lionagi/experimental/compressor/llm_compressor.py +20 -5
  62. lionagi/experimental/directive/README.md +1 -1
  63. lionagi/experimental/directive/parser/base_parser.py +41 -14
  64. lionagi/experimental/directive/parser/base_syntax.txt +23 -23
  65. lionagi/experimental/directive/template/base_template.py +14 -6
  66. lionagi/experimental/directive/tokenizer.py +3 -1
  67. lionagi/experimental/evaluator/README.md +1 -1
  68. lionagi/experimental/evaluator/ast_evaluator.py +6 -2
  69. lionagi/experimental/evaluator/base_evaluator.py +27 -16
  70. lionagi/integrations/bridge/autogen_/autogen_.py +7 -3
  71. lionagi/integrations/bridge/langchain_/documents.py +13 -10
  72. lionagi/integrations/bridge/llamaindex_/llama_pack.py +36 -12
  73. lionagi/integrations/bridge/llamaindex_/node_parser.py +8 -3
  74. lionagi/integrations/bridge/llamaindex_/reader.py +3 -1
  75. lionagi/integrations/bridge/llamaindex_/textnode.py +9 -3
  76. lionagi/integrations/bridge/pydantic_/pydantic_bridge.py +7 -1
  77. lionagi/integrations/bridge/transformers_/install_.py +3 -1
  78. lionagi/integrations/chunker/chunk.py +5 -2
  79. lionagi/integrations/loader/load.py +7 -3
  80. lionagi/integrations/loader/load_util.py +35 -16
  81. lionagi/integrations/provider/oai.py +13 -4
  82. lionagi/integrations/provider/openrouter.py +13 -4
  83. lionagi/integrations/provider/services.py +3 -1
  84. lionagi/integrations/provider/transformers.py +5 -3
  85. lionagi/integrations/storage/neo4j.py +23 -7
  86. lionagi/integrations/storage/storage_util.py +23 -7
  87. lionagi/integrations/storage/structure_excel.py +7 -2
  88. lionagi/integrations/storage/to_csv.py +8 -2
  89. lionagi/integrations/storage/to_excel.py +11 -3
  90. lionagi/libs/ln_api.py +41 -19
  91. lionagi/libs/ln_context.py +4 -4
  92. lionagi/libs/ln_convert.py +35 -14
  93. lionagi/libs/ln_dataframe.py +9 -3
  94. lionagi/libs/ln_func_call.py +53 -18
  95. lionagi/libs/ln_image.py +9 -5
  96. lionagi/libs/ln_knowledge_graph.py +21 -7
  97. lionagi/libs/ln_nested.py +57 -16
  98. lionagi/libs/ln_parse.py +45 -15
  99. lionagi/libs/ln_queue.py +8 -3
  100. lionagi/libs/ln_tokenize.py +19 -6
  101. lionagi/libs/ln_validate.py +14 -3
  102. lionagi/libs/sys_util.py +44 -12
  103. lionagi/lions/coder/coder.py +24 -8
  104. lionagi/lions/coder/util.py +6 -2
  105. lionagi/lions/researcher/data_source/google_.py +12 -4
  106. lionagi/lions/researcher/data_source/wiki_.py +3 -1
  107. lionagi/version.py +1 -1
  108. {lionagi-0.2.11.dist-info → lionagi-0.3.1.dist-info}/METADATA +6 -7
  109. lionagi-0.3.1.dist-info/RECORD +226 -0
  110. lionagi/tests/__init__.py +0 -0
  111. lionagi/tests/api/__init__.py +0 -0
  112. lionagi/tests/api/aws/__init__.py +0 -0
  113. lionagi/tests/api/aws/conftest.py +0 -25
  114. lionagi/tests/api/aws/test_aws_s3.py +0 -6
  115. lionagi/tests/integrations/__init__.py +0 -0
  116. lionagi/tests/libs/__init__.py +0 -0
  117. lionagi/tests/libs/test_api.py +0 -48
  118. lionagi/tests/libs/test_convert.py +0 -89
  119. lionagi/tests/libs/test_field_validators.py +0 -354
  120. lionagi/tests/libs/test_func_call.py +0 -701
  121. lionagi/tests/libs/test_nested.py +0 -382
  122. lionagi/tests/libs/test_parse.py +0 -171
  123. lionagi/tests/libs/test_queue.py +0 -68
  124. lionagi/tests/libs/test_sys_util.py +0 -222
  125. lionagi/tests/test_core/__init__.py +0 -0
  126. lionagi/tests/test_core/collections/__init__.py +0 -0
  127. lionagi/tests/test_core/collections/test_component.py +0 -208
  128. lionagi/tests/test_core/collections/test_exchange.py +0 -139
  129. lionagi/tests/test_core/collections/test_flow.py +0 -146
  130. lionagi/tests/test_core/collections/test_pile.py +0 -172
  131. lionagi/tests/test_core/collections/test_progression.py +0 -130
  132. lionagi/tests/test_core/generic/__init__.py +0 -0
  133. lionagi/tests/test_core/generic/test_edge.py +0 -69
  134. lionagi/tests/test_core/generic/test_graph.py +0 -97
  135. lionagi/tests/test_core/generic/test_node.py +0 -107
  136. lionagi/tests/test_core/generic/test_structure.py +0 -194
  137. lionagi/tests/test_core/generic/test_tree_node.py +0 -74
  138. lionagi/tests/test_core/graph/__init__.py +0 -0
  139. lionagi/tests/test_core/graph/test_graph.py +0 -71
  140. lionagi/tests/test_core/graph/test_tree.py +0 -76
  141. lionagi/tests/test_core/mail/__init__.py +0 -0
  142. lionagi/tests/test_core/mail/test_mail.py +0 -98
  143. lionagi/tests/test_core/test_branch.py +0 -116
  144. lionagi/tests/test_core/test_form.py +0 -47
  145. lionagi/tests/test_core/test_report.py +0 -106
  146. lionagi/tests/test_core/test_structure/__init__.py +0 -0
  147. lionagi/tests/test_core/test_structure/test_base_structure.py +0 -198
  148. lionagi/tests/test_core/test_structure/test_graph.py +0 -55
  149. lionagi/tests/test_core/test_structure/test_tree.py +0 -49
  150. lionagi/tests/test_core/test_validator.py +0 -112
  151. lionagi-0.2.11.dist-info/RECORD +0 -267
  152. {lionagi-0.2.11.dist-info → lionagi-0.3.1.dist-info}/LICENSE +0 -0
  153. {lionagi-0.2.11.dist-info → lionagi-0.3.1.dist-info}/WHEEL +0 -0
@@ -25,7 +25,9 @@ class WorkLog(Progressable):
25
25
  Defaults to 1.
26
26
  """
27
27
  self.pile = (
28
- workpile if workpile and isinstance(workpile, Pile) else pile({}, Work)
28
+ workpile
29
+ if workpile and isinstance(workpile, Pile)
30
+ else pile({}, Work)
29
31
  )
30
32
  self.pending = progression(workpile) if workpile else progression()
31
33
  self.queue = WorkQueue(capacity=capacity, refresh_time=refresh_time)
@@ -101,7 +101,12 @@ class LLMCompressor(TokenCompressor):
101
101
  return a(text, **kwargs)
102
102
 
103
103
  async def rank_by_pplex(
104
- self, items: list, initial_text=None, cumulative=False, n_samples=None, **kwargs
104
+ self,
105
+ items: list,
106
+ initial_text=None,
107
+ cumulative=False,
108
+ n_samples=None,
109
+ **kwargs,
105
110
  ):
106
111
  """
107
112
  rank a list of items according to their perplexity
@@ -180,17 +185,23 @@ class LLMCompressor(TokenCompressor):
180
185
 
181
186
  if rank_by == "perplexity":
182
187
  ranked_items = await self.rank_by_pplex(
183
- items=items, initial_text=initial_text, cumulative=cumulative, **kwargs
188
+ items=items,
189
+ initial_text=initial_text,
190
+ cumulative=cumulative,
191
+ **kwargs,
184
192
  )
185
193
 
186
- prompt_tokens = sum([i[1]["num_prompt_tokens"] for i in ranked_items])
194
+ prompt_tokens = sum(
195
+ [i[1]["num_prompt_tokens"] for i in ranked_items]
196
+ )
187
197
 
188
198
  num_completion_tokens = sum(
189
199
  [i[1]["num_completion_tokens"] for i in ranked_items]
190
200
  )
191
201
 
192
202
  price = (
193
- prompt_tokens * 0.5 / 1000000 + num_completion_tokens * 1.5 / 1000000
203
+ prompt_tokens * 0.5 / 1000000
204
+ + num_completion_tokens * 1.5 / 1000000
194
205
  )
195
206
 
196
207
  selected_items = self.select_by_pplex(
@@ -227,7 +238,11 @@ class LLMCompressor(TokenCompressor):
227
238
  raise ValueError(f"Ranking method {rank_by} is not supported")
228
239
 
229
240
  def select_by_pplex(
230
- self, ranked_items, target_compression_ratio, original_length, min_pplex=None
241
+ self,
242
+ ranked_items,
243
+ target_compression_ratio,
244
+ original_length,
245
+ min_pplex=None,
231
246
  ):
232
247
  min_pplex = min_pplex or 0
233
248
 
@@ -1 +1 @@
1
- TODO
1
+ TODO
@@ -21,10 +21,10 @@ class BaseDirectiveParser:
21
21
  BaseToken(KEYWORD, IF)
22
22
  """
23
23
 
24
- def __init__(self, tokens: List[BaseToken]):
24
+ def __init__(self, tokens: list[BaseToken]):
25
25
  self.tokens = tokens
26
26
  self.current_token_index = -1
27
- self.current_token: Optional[BaseToken] = None
27
+ self.current_token: BaseToken | None = None
28
28
  self.next_token()
29
29
 
30
30
  def next_token(self) -> None:
@@ -50,13 +50,15 @@ class BaseDirectiveParser:
50
50
  else:
51
51
  return None
52
52
 
53
- def skip_until(self, token_types: List[str]) -> None:
53
+ def skip_until(self, token_types: list[str]) -> None:
54
54
  """Skips tokens until a token of the specified type is found.
55
55
 
56
56
  Args:
57
57
  token_types (List[str]): A list of token types to stop skipping.
58
58
  """
59
- while self.current_token and self.current_token.type not in token_types:
59
+ while (
60
+ self.current_token and self.current_token.type not in token_types
61
+ ):
60
62
  self.next_token()
61
63
 
62
64
  def mark(self) -> int:
@@ -108,10 +110,15 @@ class BaseDirectiveParser:
108
110
  """
109
111
  block = []
110
112
  # Parse the block until 'ELSE', 'ENDIF', ensuring not to include semicolons as part of the block
111
- while self.current_token and self.current_token.value not in ("ENDIF", "ELSE"):
113
+ while self.current_token and self.current_token.value not in (
114
+ "ENDIF",
115
+ "ELSE",
116
+ ):
112
117
  if self.current_token.value == "DO":
113
118
  self.next_token() # Move past 'DO' to get to the action
114
- block.append(self.current_token.value) # Add the action to the block
119
+ block.append(
120
+ self.current_token.value
121
+ ) # Add the action to the block
115
122
  self.next_token() # Move to the next token, which could be a semicolon or the next action
116
123
  if self.current_token.value == ";":
117
124
  self.next_token() # Move past the semicolon
@@ -126,11 +133,16 @@ class BaseDirectiveParser:
126
133
  Raises:
127
134
  SyntaxError: If the IF statement is not properly formed.
128
135
  """
129
- if self.current_token.type != "KEYWORD" or self.current_token.value != "IF":
136
+ if (
137
+ self.current_token.type != "KEYWORD"
138
+ or self.current_token.value != "IF"
139
+ ):
130
140
  raise SyntaxError("Expected IF statement")
131
141
  self.next_token() # Skip 'IF'
132
142
 
133
- condition = self.parse_expression() # Now properly ends after the semicolon
143
+ condition = (
144
+ self.parse_expression()
145
+ ) # Now properly ends after the semicolon
134
146
 
135
147
  true_block = []
136
148
  if self.current_token.value == "DO":
@@ -156,7 +168,10 @@ class BaseDirectiveParser:
156
168
  Raises:
157
169
  SyntaxError: If the FOR statement is not properly formed.
158
170
  """
159
- if self.current_token.type != "KEYWORD" or self.current_token.value != "FOR":
171
+ if (
172
+ self.current_token.type != "KEYWORD"
173
+ or self.current_token.value != "FOR"
174
+ ):
160
175
  raise SyntaxError("Expected FOR statement")
161
176
  self.next_token() # Skip 'FOR'
162
177
 
@@ -167,7 +182,10 @@ class BaseDirectiveParser:
167
182
  self.next_token() # Move past the iterator variable
168
183
 
169
184
  # Expect and skip 'IN' keyword
170
- if self.current_token.type != "KEYWORD" or self.current_token.value != "IN":
185
+ if (
186
+ self.current_token.type != "KEYWORD"
187
+ or self.current_token.value != "IN"
188
+ ):
171
189
  raise SyntaxError("Expected 'IN' after iterator variable")
172
190
  self.next_token() # Move past 'IN'
173
191
 
@@ -194,7 +212,9 @@ class BaseDirectiveParser:
194
212
  if self.current_token and self.current_token.value == "DO":
195
213
  self.next_token()
196
214
 
197
- while self.current_token and self.current_token.value not in ("ENDFOR",):
215
+ while self.current_token and self.current_token.value not in (
216
+ "ENDFOR",
217
+ ):
198
218
  if self.current_token.value == ";":
199
219
  # If a semicolon is encountered, skip it and move to the next token
200
220
  self.next_token()
@@ -217,11 +237,16 @@ class BaseDirectiveParser:
217
237
  Raises:
218
238
  SyntaxError: If the TRY statement is not properly formed.
219
239
  """
220
- if self.current_token.type != "KEYWORD" or self.current_token.value != "TRY":
240
+ if (
241
+ self.current_token.type != "KEYWORD"
242
+ or self.current_token.value != "TRY"
243
+ ):
221
244
  raise SyntaxError("Expected TRY statement")
222
245
  self.next_token() # Skip 'TRY'
223
246
 
224
- try_block = self.parse_try_block("EXCEPT") # Parse the try block until 'EXCEPT'
247
+ try_block = self.parse_try_block(
248
+ "EXCEPT"
249
+ ) # Parse the try block until 'EXCEPT'
225
250
 
226
251
  # Now expecting 'EXCEPT' keyword
227
252
  if not (self.current_token and self.current_token.value == "EXCEPT"):
@@ -256,7 +281,9 @@ class BaseDirectiveParser:
256
281
  self.next_token() # Move past the semicolon
257
282
  continue # Skip adding ';' to the block
258
283
  else:
259
- block.append(self.current_token.value) # Add the action to the block
284
+ block.append(
285
+ self.current_token.value
286
+ ) # Add the action to the block
260
287
  self.next_token()
261
288
 
262
289
  return block
@@ -24,11 +24,11 @@ ELIF
24
24
  ELSE
25
25
 
26
26
  # loop
27
- FOR
27
+ FOR
28
28
  IN
29
29
 
30
30
  # EXCEPTION
31
- TRY
31
+ TRY
32
32
  EXCEPT
33
33
 
34
34
  """
@@ -54,33 +54,33 @@ tools 1,2,3
54
54
 
55
55
  def main(): <-> compose
56
56
 
57
- If condition1 && condition2 || !condition3;
57
+ If condition1 && condition2 || !condition3;
58
58
  DO tool1(param1, param2);
59
59
  ENDIF;
60
-
60
+
61
61
  IF must follow condition, (condition parsing logic);
62
62
  DO ....;
63
63
  ENDIF;
64
-
65
-
66
-
67
-
68
- THEN;
64
+
65
+
66
+
67
+
68
+ THEN;
69
69
  DO tool2(param1, param2);
70
-
70
+
71
71
  THEN;
72
-
73
- TRY; DO ACTION_C;
74
- EXCEPT; DO ACTION_D;
75
- ENDEXCEPT;
76
- ENDTRY;
72
+
73
+ TRY; DO ACTION_C;
74
+ EXCEPT; DO ACTION_D;
75
+ ENDEXCEPT;
76
+ ENDTRY;
77
77
 
78
78
  RETURN <-> run
79
79
 
80
80
 
81
81
 
82
82
 
83
- COMPOSE; - indicate the beginning of a script-like block to execute
83
+ COMPOSE; - indicate the beginning of a script-like block to execute
84
84
 
85
85
  THEN; - indicate continuation of a script-like block, another statement to execute
86
86
 
@@ -91,7 +91,7 @@ XX; ; END XX;
91
91
 
92
92
 
93
93
 
94
- TRY; DO ACTION_A; END TRY;
94
+ TRY; DO ACTION_A; END TRY;
95
95
 
96
96
 
97
97
 
@@ -108,15 +108,15 @@ THEN TRY; DO ACTION_C ENDTRY;
108
108
 
109
109
  THEN IF EXCEPT; IF CONDITION_C; DO ACTION_D; ENDEXCEPT
110
110
 
111
- ;
111
+ ;
112
112
  THEN FOR ITEM IN COLLECTION; DO ACTION_E(input_.param1, input_.param2); ENDFOR
113
113
  RUN;
114
114
 
115
115
 
116
116
 
117
- DO ACTION B;
117
+ DO ACTION B;
118
118
 
119
- IF
119
+ IF
120
120
  THEN
121
121
 
122
122
  GROUP
@@ -150,7 +150,7 @@ FOR input_ IN collections DO action2(input_.param1, input_.param2); ENDFOR; END;
150
150
 
151
151
  example 2:
152
152
  BEGIN; IF condition1; TRY DO action1(param1, param2); EXCEPT DO action2(param3, param4);
153
- ENDTRY; ELIF condition2; DO action2(param1, param2); ELSE DO action3(param1, param2);
153
+ ENDTRY; ELIF condition2; DO action2(param1, param2); ELSE DO action3(param1, param2);
154
154
  ENDIF; END;
155
155
 
156
156
  """
@@ -180,7 +180,7 @@ CHAT / REACT
180
180
  DO 1,2
181
181
  session = Li.Session(..., tools=tools)
182
182
  await alcall(session.branches, func_1)
183
-
183
+
184
184
 
185
185
  THEN DO 3,4
186
186
  session.chat.py(3,4)
@@ -197,4 +197,4 @@ run 1,2,3,4,5
197
197
 
198
198
  1->2, or 1->3, or 1->4, THEN....
199
199
 
200
- """
200
+ """
@@ -11,9 +11,11 @@ class DirectiveTemplate:
11
11
  self.template_str = template_str
12
12
  self.evaluator = BaseEvaluator()
13
13
 
14
- def _render_conditionals(self, context: Dict[str, Any]) -> str:
14
+ def _render_conditionals(self, context: dict[str, Any]) -> str:
15
15
  """Processes conditional statements with improved logic and support for 'else'."""
16
- pattern = re.compile(r"\{if (.*?)\}(.*?)\{else\}(.*?)\{endif\}", re.DOTALL)
16
+ pattern = re.compile(
17
+ r"\{if (.*?)\}(.*?)\{else\}(.*?)\{endif\}", re.DOTALL
18
+ )
17
19
 
18
20
  def evaluate_condition(match):
19
21
  condition, if_text, else_text = match.groups()
@@ -24,9 +26,11 @@ class DirectiveTemplate:
24
26
 
25
27
  return pattern.sub(evaluate_condition, self.template_str)
26
28
 
27
- def _render_loops(self, template: str, context: Dict[str, Any]) -> str:
29
+ def _render_loops(self, template: str, context: dict[str, Any]) -> str:
28
30
  """Processes loop statements within the template."""
29
- loop_pattern = re.compile(r"\{for (\w+) in (\w+)\}(.*?)\{endfor\}", re.DOTALL)
31
+ loop_pattern = re.compile(
32
+ r"\{for (\w+) in (\w+)\}(.*?)\{endfor\}", re.DOTALL
33
+ )
30
34
 
31
35
  def render_loop(match):
32
36
  iterator_var, collection_name, loop_body = match.groups()
@@ -46,7 +50,9 @@ class DirectiveTemplate:
46
50
 
47
51
  return loop_pattern.sub(render_loop, template)
48
52
 
49
- def fill(self, template_str: str = "", context: Dict[str, Any] = {}) -> str:
53
+ def fill(
54
+ self, template_str: str = "", context: dict[str, Any] = {}
55
+ ) -> str:
50
56
  """Fills the template with values from context after processing conditionals and loops."""
51
57
  if not template_str: # Use the instance's template if not provided
52
58
  template_str = self.template_str
@@ -54,7 +60,9 @@ class DirectiveTemplate:
54
60
  # First, process conditionals with 'else'
55
61
  template_with_conditionals = self._render_conditionals(template_str)
56
62
  # Then, process loops
57
- template_with_loops = self._render_loops(template_with_conditionals, context)
63
+ template_with_loops = self._render_loops(
64
+ template_with_conditionals, context
65
+ )
58
66
  # Finally, substitute the placeholders with context values
59
67
  try:
60
68
  return template_with_loops.format(**context)
@@ -44,7 +44,9 @@ class BaseTokenizer:
44
44
  position = match.end() # Move past the matched token
45
45
  break
46
46
  if not match: # No match found, unrecognized token
47
- raise SyntaxError(f"Unexpected character: {self.script[position]}")
47
+ raise SyntaxError(
48
+ f"Unexpected character: {self.script[position]}"
49
+ )
48
50
  # break
49
51
 
50
52
  def get_tokens(self):
@@ -1 +1 @@
1
- TODO
1
+ TODO
@@ -26,7 +26,9 @@ class ASTEvaluator:
26
26
  tree = ast.parse(expression, mode="eval")
27
27
  return self._evaluate_node(tree.body, context)
28
28
  except Exception as e:
29
- raise ValueError(f"Failed to evaluate expression: {expression}. Error: {e}")
29
+ raise ValueError(
30
+ f"Failed to evaluate expression: {expression}. Error: {e}"
31
+ )
30
32
 
31
33
  def _evaluate_node(self, node, context):
32
34
  if isinstance(node, ast.Compare):
@@ -104,7 +106,9 @@ class ASTEvaluationEngine:
104
106
  value_expr = ast.unparse(stmt.value)
105
107
  value = self._evaluate_expression(value_expr)
106
108
  self._assign_variable(var_name, value)
107
- elif isinstance(stmt, ast.Expr) and isinstance(stmt.value, ast.Call):
109
+ elif isinstance(stmt, ast.Expr) and isinstance(
110
+ stmt.value, ast.Call
111
+ ):
108
112
  func_name = stmt.value.func.id
109
113
  arg_expr = ast.unparse(stmt.value.args[0])
110
114
  arg = self._evaluate_expression(arg_expr)
@@ -1,6 +1,7 @@
1
1
  import ast
2
2
  import operator
3
- from typing import Any, Callable, Dict, Tuple
3
+ from collections.abc import Callable
4
+ from typing import Any, Dict, Tuple
4
5
 
5
6
  from lionagi.libs.ln_convert import to_dict
6
7
 
@@ -16,7 +17,7 @@ class BaseEvaluator:
16
17
 
17
18
  def __init__(self) -> None:
18
19
  """Initializes the evaluator with supported operators and an empty cache."""
19
- self.allowed_operators: Dict[type, Any] = {
20
+ self.allowed_operators: dict[type, Any] = {
20
21
  ast.Add: operator.add,
21
22
  ast.Sub: operator.sub,
22
23
  ast.Mult: operator.mul,
@@ -34,9 +35,9 @@ class BaseEvaluator:
34
35
  ast.Not: operator.not_,
35
36
  ast.USub: operator.neg,
36
37
  }
37
- self.cache: Dict[Tuple[str, Tuple], Any] = {}
38
+ self.cache: dict[tuple[str, tuple], Any] = {}
38
39
 
39
- def evaluate(self, expression: str, context: Dict[str, Any]) -> Any:
40
+ def evaluate(self, expression: str, context: dict[str, Any]) -> Any:
40
41
  """
41
42
  Evaluates a given expression string using the provided context.
42
43
 
@@ -60,9 +61,11 @@ class BaseEvaluator:
60
61
  self.cache[cache_key] = result
61
62
  return result
62
63
  except Exception as e:
63
- raise ValueError(f"Failed to evaluate expression: {expression}. Error: {e}")
64
+ raise ValueError(
65
+ f"Failed to evaluate expression: {expression}. Error: {e}"
66
+ )
64
67
 
65
- def _evaluate_node(self, node: ast.AST, context: Dict[str, Any]) -> Any:
68
+ def _evaluate_node(self, node: ast.AST, context: dict[str, Any]) -> Any:
66
69
  """Recursively evaluates an AST node."""
67
70
  if isinstance(node, ast.BinOp):
68
71
  left = self._evaluate_node(node.left, context)
@@ -87,7 +90,9 @@ class BaseEvaluator:
87
90
  break
88
91
  left = right
89
92
  elif isinstance(node, ast.BoolOp):
90
- values = [self._evaluate_node(value, context) for value in node.values]
93
+ values = [
94
+ self._evaluate_node(value, context) for value in node.values
95
+ ]
91
96
  if isinstance(node.op, ast.And):
92
97
  result = all(values)
93
98
  elif isinstance(node.op, ast.Or):
@@ -104,12 +109,14 @@ class BaseEvaluator:
104
109
  if custom_node_class not in self.allowed_operators:
105
110
  self.allowed_operators[custom_node_class] = operation_func
106
111
  else:
107
- raise ValueError(f"Custom operator '{operator_name}' is already defined.")
112
+ raise ValueError(
113
+ f"Custom operator '{operator_name}' is already defined."
114
+ )
108
115
 
109
116
  def evaluate_file(self, file_path, context, format="line"):
110
117
  """Evaluates expressions from a file."""
111
118
  if format == "line":
112
- with open(file_path, "r") as file:
119
+ with open(file_path) as file:
113
120
  last_result = None
114
121
  for line in file:
115
122
  line = line.strip()
@@ -117,7 +124,7 @@ class BaseEvaluator:
117
124
  last_result = self.evaluate(line, context)
118
125
  return last_result
119
126
  elif format == "json":
120
- with open(file_path, "r") as file:
127
+ with open(file_path) as file:
121
128
  data = to_dict(file)
122
129
  last_result = None
123
130
  for expression in data:
@@ -153,8 +160,8 @@ class BaseEvaluator:
153
160
 
154
161
  class BaseEvaluationEngine:
155
162
  def __init__(self) -> None:
156
- self.variables: Dict[str, Any] = {}
157
- self.functions: Dict[str, Callable] = {
163
+ self.variables: dict[str, Any] = {}
164
+ self.functions: dict[str, Callable] = {
158
165
  "print": print,
159
166
  }
160
167
 
@@ -181,16 +188,20 @@ class BaseEvaluationEngine:
181
188
  elif isinstance(stmt, ast.Expr) and isinstance(stmt.value, ast.Call):
182
189
  func_name = stmt.value.func.id
183
190
  args = [
184
- self._evaluate_expression(ast.unparse(arg)) for arg in stmt.value.args
191
+ self._evaluate_expression(ast.unparse(arg))
192
+ for arg in stmt.value.args
185
193
  ]
186
194
  self._execute_function(func_name, *args)
187
195
  elif isinstance(stmt, ast.For):
188
196
  iter_var = stmt.target.id
189
- if isinstance(stmt.iter, ast.Call) and stmt.iter.func.id == "range":
190
- start, end = [
197
+ if (
198
+ isinstance(stmt.iter, ast.Call)
199
+ and stmt.iter.func.id == "range"
200
+ ):
201
+ start, end = (
191
202
  self._evaluate_expression(ast.unparse(arg))
192
203
  for arg in stmt.iter.args
193
- ]
204
+ )
194
205
  for i in range(start, end):
195
206
  self.variables[iter_var] = i
196
207
  for body_stmt in stmt.body:
@@ -18,14 +18,16 @@ def get_ipython_user_proxy():
18
18
  super().__init__(name, **kwargs)
19
19
  self._ipython = get_ipython()
20
20
 
21
- def generate_init_message(self, *args, **kwargs) -> Union[str, Dict]:
21
+ def generate_init_message(self, *args, **kwargs) -> str | dict:
22
22
  return (
23
23
  super().generate_init_message(*args, **kwargs)
24
24
  + """If you suggest code, the code will be executed in IPython."""
25
25
  )
26
26
 
27
27
  def run_code(self, code, **kwargs):
28
- result = self._ipython.run_cell("%%capture --no-display cap\n" + code)
28
+ result = self._ipython.run_cell(
29
+ "%%capture --no-display cap\n" + code
30
+ )
29
31
  log = self._ipython.ev("cap.stdout")
30
32
  log += self._ipython.ev("cap.stderr")
31
33
  if result.result is not None:
@@ -60,7 +62,9 @@ def get_autogen_coder(
60
62
  SysUtil.check_import("autogen", pip_name="pyautogen")
61
63
 
62
64
  import autogen
63
- from autogen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent
65
+ from autogen.agentchat.contrib.gpt_assistant_agent import (
66
+ GPTAssistantAgent,
67
+ )
64
68
  except Exception as e:
65
69
  raise ImportError(f"Please install autogen. {e}")
66
70
 
@@ -1,4 +1,5 @@
1
- from typing import Any, Callable, Dict, List, TypeVar, Union
1
+ from collections.abc import Callable
2
+ from typing import Any, Dict, List, TypeVar, Union
2
3
 
3
4
  from lionfuncs import check_import
4
5
 
@@ -39,9 +40,9 @@ def to_langchain_document(datanode: T, **kwargs: Any) -> Any:
39
40
 
40
41
 
41
42
  def langchain_loader(
42
- loader: Union[str, Callable],
43
- loader_args: List[Any] = [],
44
- loader_kwargs: Dict[str, Any] = {},
43
+ loader: str | Callable,
44
+ loader_args: list[Any] = [],
45
+ loader_kwargs: dict[str, Any] = {},
45
46
  ) -> Any:
46
47
  """
47
48
  Initializes and uses a specified loader to load data within the Langchain ecosystem.
@@ -67,7 +68,9 @@ def langchain_loader(
67
68
  """
68
69
 
69
70
  document_loaders = check_import(
70
- "langchain_community", module_name="document_loaders", pip_name="langchain"
71
+ "langchain_community",
72
+ module_name="document_loaders",
73
+ pip_name="langchain",
71
74
  )
72
75
 
73
76
  try:
@@ -87,11 +90,11 @@ def langchain_loader(
87
90
 
88
91
 
89
92
  def langchain_text_splitter(
90
- data: Union[str, List],
91
- splitter: Union[str, Callable],
92
- splitter_args: List[Any] = None,
93
- splitter_kwargs: Dict[str, Any] = None,
94
- ) -> List[str]:
93
+ data: str | list,
94
+ splitter: str | Callable,
95
+ splitter_args: list[Any] = None,
96
+ splitter_kwargs: dict[str, Any] = None,
97
+ ) -> list[str]:
95
98
  """
96
99
  Splits text or a list of texts using a specified Langchain text splitter.
97
100