lionagi 0.2.11__py3-none-any.whl → 0.3.0__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- lionagi/core/action/function_calling.py +13 -6
- lionagi/core/action/tool.py +10 -9
- lionagi/core/action/tool_manager.py +18 -9
- lionagi/core/agent/README.md +1 -1
- lionagi/core/agent/base_agent.py +5 -2
- lionagi/core/agent/eval/README.md +1 -1
- lionagi/core/collections/README.md +1 -1
- lionagi/core/collections/_logger.py +16 -6
- lionagi/core/collections/abc/README.md +1 -1
- lionagi/core/collections/abc/component.py +35 -11
- lionagi/core/collections/abc/concepts.py +5 -3
- lionagi/core/collections/abc/exceptions.py +3 -1
- lionagi/core/collections/flow.py +16 -5
- lionagi/core/collections/model.py +34 -8
- lionagi/core/collections/pile.py +65 -28
- lionagi/core/collections/progression.py +1 -2
- lionagi/core/collections/util.py +11 -2
- lionagi/core/director/README.md +1 -1
- lionagi/core/engine/branch_engine.py +35 -10
- lionagi/core/engine/instruction_map_engine.py +14 -5
- lionagi/core/engine/sandbox_.py +3 -1
- lionagi/core/engine/script_engine.py +6 -2
- lionagi/core/executor/base_executor.py +10 -3
- lionagi/core/executor/graph_executor.py +12 -4
- lionagi/core/executor/neo4j_executor.py +18 -6
- lionagi/core/generic/edge.py +7 -2
- lionagi/core/generic/graph.py +23 -7
- lionagi/core/generic/node.py +14 -5
- lionagi/core/generic/tree_node.py +5 -1
- lionagi/core/mail/mail_manager.py +3 -1
- lionagi/core/mail/package.py +3 -1
- lionagi/core/message/action_request.py +9 -2
- lionagi/core/message/action_response.py +9 -3
- lionagi/core/message/instruction.py +8 -2
- lionagi/core/message/util.py +15 -5
- lionagi/core/report/base.py +12 -7
- lionagi/core/report/form.py +7 -4
- lionagi/core/report/report.py +10 -3
- lionagi/core/report/util.py +3 -1
- lionagi/core/rule/action.py +4 -1
- lionagi/core/rule/base.py +17 -6
- lionagi/core/rule/rulebook.py +8 -4
- lionagi/core/rule/string.py +3 -1
- lionagi/core/session/branch.py +15 -4
- lionagi/core/session/session.py +6 -2
- lionagi/core/unit/parallel_unit.py +9 -3
- lionagi/core/unit/template/action.py +1 -1
- lionagi/core/unit/template/predict.py +3 -1
- lionagi/core/unit/template/select.py +5 -3
- lionagi/core/unit/unit.py +4 -2
- lionagi/core/unit/unit_form.py +13 -15
- lionagi/core/unit/unit_mixin.py +45 -27
- lionagi/core/unit/util.py +7 -3
- lionagi/core/validator/validator.py +28 -15
- lionagi/core/work/work_edge.py +7 -3
- lionagi/core/work/work_task.py +11 -5
- lionagi/core/work/worker.py +20 -5
- lionagi/core/work/worker_engine.py +6 -2
- lionagi/core/work/worklog.py +3 -1
- lionagi/experimental/compressor/llm_compressor.py +20 -5
- lionagi/experimental/directive/README.md +1 -1
- lionagi/experimental/directive/parser/base_parser.py +41 -14
- lionagi/experimental/directive/parser/base_syntax.txt +23 -23
- lionagi/experimental/directive/template/base_template.py +14 -6
- lionagi/experimental/directive/tokenizer.py +3 -1
- lionagi/experimental/evaluator/README.md +1 -1
- lionagi/experimental/evaluator/ast_evaluator.py +6 -2
- lionagi/experimental/evaluator/base_evaluator.py +27 -16
- lionagi/integrations/bridge/autogen_/autogen_.py +7 -3
- lionagi/integrations/bridge/langchain_/documents.py +13 -10
- lionagi/integrations/bridge/llamaindex_/llama_pack.py +36 -12
- lionagi/integrations/bridge/llamaindex_/node_parser.py +8 -3
- lionagi/integrations/bridge/llamaindex_/reader.py +3 -1
- lionagi/integrations/bridge/llamaindex_/textnode.py +9 -3
- lionagi/integrations/bridge/pydantic_/pydantic_bridge.py +7 -1
- lionagi/integrations/bridge/transformers_/install_.py +3 -1
- lionagi/integrations/chunker/chunk.py +5 -2
- lionagi/integrations/loader/load.py +7 -3
- lionagi/integrations/loader/load_util.py +35 -16
- lionagi/integrations/provider/oai.py +13 -4
- lionagi/integrations/provider/openrouter.py +13 -4
- lionagi/integrations/provider/services.py +3 -1
- lionagi/integrations/provider/transformers.py +5 -3
- lionagi/integrations/storage/neo4j.py +23 -7
- lionagi/integrations/storage/storage_util.py +23 -7
- lionagi/integrations/storage/structure_excel.py +7 -2
- lionagi/integrations/storage/to_csv.py +8 -2
- lionagi/integrations/storage/to_excel.py +11 -3
- lionagi/libs/ln_api.py +41 -19
- lionagi/libs/ln_context.py +4 -4
- lionagi/libs/ln_convert.py +35 -14
- lionagi/libs/ln_dataframe.py +9 -3
- lionagi/libs/ln_func_call.py +53 -18
- lionagi/libs/ln_image.py +9 -5
- lionagi/libs/ln_knowledge_graph.py +21 -7
- lionagi/libs/ln_nested.py +57 -16
- lionagi/libs/ln_parse.py +45 -15
- lionagi/libs/ln_queue.py +8 -3
- lionagi/libs/ln_tokenize.py +19 -6
- lionagi/libs/ln_validate.py +14 -3
- lionagi/libs/sys_util.py +44 -12
- lionagi/lions/coder/coder.py +24 -8
- lionagi/lions/coder/util.py +6 -2
- lionagi/lions/researcher/data_source/google_.py +12 -4
- lionagi/lions/researcher/data_source/wiki_.py +3 -1
- lionagi/version.py +1 -1
- {lionagi-0.2.11.dist-info → lionagi-0.3.0.dist-info}/METADATA +6 -7
- lionagi-0.3.0.dist-info/RECORD +226 -0
- lionagi/tests/__init__.py +0 -0
- lionagi/tests/api/__init__.py +0 -0
- lionagi/tests/api/aws/__init__.py +0 -0
- lionagi/tests/api/aws/conftest.py +0 -25
- lionagi/tests/api/aws/test_aws_s3.py +0 -6
- lionagi/tests/integrations/__init__.py +0 -0
- lionagi/tests/libs/__init__.py +0 -0
- lionagi/tests/libs/test_api.py +0 -48
- lionagi/tests/libs/test_convert.py +0 -89
- lionagi/tests/libs/test_field_validators.py +0 -354
- lionagi/tests/libs/test_func_call.py +0 -701
- lionagi/tests/libs/test_nested.py +0 -382
- lionagi/tests/libs/test_parse.py +0 -171
- lionagi/tests/libs/test_queue.py +0 -68
- lionagi/tests/libs/test_sys_util.py +0 -222
- lionagi/tests/test_core/__init__.py +0 -0
- lionagi/tests/test_core/collections/__init__.py +0 -0
- lionagi/tests/test_core/collections/test_component.py +0 -208
- lionagi/tests/test_core/collections/test_exchange.py +0 -139
- lionagi/tests/test_core/collections/test_flow.py +0 -146
- lionagi/tests/test_core/collections/test_pile.py +0 -172
- lionagi/tests/test_core/collections/test_progression.py +0 -130
- lionagi/tests/test_core/generic/__init__.py +0 -0
- lionagi/tests/test_core/generic/test_edge.py +0 -69
- lionagi/tests/test_core/generic/test_graph.py +0 -97
- lionagi/tests/test_core/generic/test_node.py +0 -107
- lionagi/tests/test_core/generic/test_structure.py +0 -194
- lionagi/tests/test_core/generic/test_tree_node.py +0 -74
- lionagi/tests/test_core/graph/__init__.py +0 -0
- lionagi/tests/test_core/graph/test_graph.py +0 -71
- lionagi/tests/test_core/graph/test_tree.py +0 -76
- lionagi/tests/test_core/mail/__init__.py +0 -0
- lionagi/tests/test_core/mail/test_mail.py +0 -98
- lionagi/tests/test_core/test_branch.py +0 -116
- lionagi/tests/test_core/test_form.py +0 -47
- lionagi/tests/test_core/test_report.py +0 -106
- lionagi/tests/test_core/test_structure/__init__.py +0 -0
- lionagi/tests/test_core/test_structure/test_base_structure.py +0 -198
- lionagi/tests/test_core/test_structure/test_graph.py +0 -55
- lionagi/tests/test_core/test_structure/test_tree.py +0 -49
- lionagi/tests/test_core/test_validator.py +0 -112
- lionagi-0.2.11.dist-info/RECORD +0 -267
- {lionagi-0.2.11.dist-info → lionagi-0.3.0.dist-info}/LICENSE +0 -0
- {lionagi-0.2.11.dist-info → lionagi-0.3.0.dist-info}/WHEEL +0 -0
@@ -21,10 +21,10 @@ class BaseDirectiveParser:
|
|
21
21
|
BaseToken(KEYWORD, IF)
|
22
22
|
"""
|
23
23
|
|
24
|
-
def __init__(self, tokens:
|
24
|
+
def __init__(self, tokens: list[BaseToken]):
|
25
25
|
self.tokens = tokens
|
26
26
|
self.current_token_index = -1
|
27
|
-
self.current_token:
|
27
|
+
self.current_token: BaseToken | None = None
|
28
28
|
self.next_token()
|
29
29
|
|
30
30
|
def next_token(self) -> None:
|
@@ -50,13 +50,15 @@ class BaseDirectiveParser:
|
|
50
50
|
else:
|
51
51
|
return None
|
52
52
|
|
53
|
-
def skip_until(self, token_types:
|
53
|
+
def skip_until(self, token_types: list[str]) -> None:
|
54
54
|
"""Skips tokens until a token of the specified type is found.
|
55
55
|
|
56
56
|
Args:
|
57
57
|
token_types (List[str]): A list of token types to stop skipping.
|
58
58
|
"""
|
59
|
-
while
|
59
|
+
while (
|
60
|
+
self.current_token and self.current_token.type not in token_types
|
61
|
+
):
|
60
62
|
self.next_token()
|
61
63
|
|
62
64
|
def mark(self) -> int:
|
@@ -108,10 +110,15 @@ class BaseDirectiveParser:
|
|
108
110
|
"""
|
109
111
|
block = []
|
110
112
|
# Parse the block until 'ELSE', 'ENDIF', ensuring not to include semicolons as part of the block
|
111
|
-
while self.current_token and self.current_token.value not in (
|
113
|
+
while self.current_token and self.current_token.value not in (
|
114
|
+
"ENDIF",
|
115
|
+
"ELSE",
|
116
|
+
):
|
112
117
|
if self.current_token.value == "DO":
|
113
118
|
self.next_token() # Move past 'DO' to get to the action
|
114
|
-
block.append(
|
119
|
+
block.append(
|
120
|
+
self.current_token.value
|
121
|
+
) # Add the action to the block
|
115
122
|
self.next_token() # Move to the next token, which could be a semicolon or the next action
|
116
123
|
if self.current_token.value == ";":
|
117
124
|
self.next_token() # Move past the semicolon
|
@@ -126,11 +133,16 @@ class BaseDirectiveParser:
|
|
126
133
|
Raises:
|
127
134
|
SyntaxError: If the IF statement is not properly formed.
|
128
135
|
"""
|
129
|
-
if
|
136
|
+
if (
|
137
|
+
self.current_token.type != "KEYWORD"
|
138
|
+
or self.current_token.value != "IF"
|
139
|
+
):
|
130
140
|
raise SyntaxError("Expected IF statement")
|
131
141
|
self.next_token() # Skip 'IF'
|
132
142
|
|
133
|
-
condition =
|
143
|
+
condition = (
|
144
|
+
self.parse_expression()
|
145
|
+
) # Now properly ends after the semicolon
|
134
146
|
|
135
147
|
true_block = []
|
136
148
|
if self.current_token.value == "DO":
|
@@ -156,7 +168,10 @@ class BaseDirectiveParser:
|
|
156
168
|
Raises:
|
157
169
|
SyntaxError: If the FOR statement is not properly formed.
|
158
170
|
"""
|
159
|
-
if
|
171
|
+
if (
|
172
|
+
self.current_token.type != "KEYWORD"
|
173
|
+
or self.current_token.value != "FOR"
|
174
|
+
):
|
160
175
|
raise SyntaxError("Expected FOR statement")
|
161
176
|
self.next_token() # Skip 'FOR'
|
162
177
|
|
@@ -167,7 +182,10 @@ class BaseDirectiveParser:
|
|
167
182
|
self.next_token() # Move past the iterator variable
|
168
183
|
|
169
184
|
# Expect and skip 'IN' keyword
|
170
|
-
if
|
185
|
+
if (
|
186
|
+
self.current_token.type != "KEYWORD"
|
187
|
+
or self.current_token.value != "IN"
|
188
|
+
):
|
171
189
|
raise SyntaxError("Expected 'IN' after iterator variable")
|
172
190
|
self.next_token() # Move past 'IN'
|
173
191
|
|
@@ -194,7 +212,9 @@ class BaseDirectiveParser:
|
|
194
212
|
if self.current_token and self.current_token.value == "DO":
|
195
213
|
self.next_token()
|
196
214
|
|
197
|
-
while self.current_token and self.current_token.value not in (
|
215
|
+
while self.current_token and self.current_token.value not in (
|
216
|
+
"ENDFOR",
|
217
|
+
):
|
198
218
|
if self.current_token.value == ";":
|
199
219
|
# If a semicolon is encountered, skip it and move to the next token
|
200
220
|
self.next_token()
|
@@ -217,11 +237,16 @@ class BaseDirectiveParser:
|
|
217
237
|
Raises:
|
218
238
|
SyntaxError: If the TRY statement is not properly formed.
|
219
239
|
"""
|
220
|
-
if
|
240
|
+
if (
|
241
|
+
self.current_token.type != "KEYWORD"
|
242
|
+
or self.current_token.value != "TRY"
|
243
|
+
):
|
221
244
|
raise SyntaxError("Expected TRY statement")
|
222
245
|
self.next_token() # Skip 'TRY'
|
223
246
|
|
224
|
-
try_block = self.parse_try_block(
|
247
|
+
try_block = self.parse_try_block(
|
248
|
+
"EXCEPT"
|
249
|
+
) # Parse the try block until 'EXCEPT'
|
225
250
|
|
226
251
|
# Now expecting 'EXCEPT' keyword
|
227
252
|
if not (self.current_token and self.current_token.value == "EXCEPT"):
|
@@ -256,7 +281,9 @@ class BaseDirectiveParser:
|
|
256
281
|
self.next_token() # Move past the semicolon
|
257
282
|
continue # Skip adding ';' to the block
|
258
283
|
else:
|
259
|
-
block.append(
|
284
|
+
block.append(
|
285
|
+
self.current_token.value
|
286
|
+
) # Add the action to the block
|
260
287
|
self.next_token()
|
261
288
|
|
262
289
|
return block
|
@@ -24,11 +24,11 @@ ELIF
|
|
24
24
|
ELSE
|
25
25
|
|
26
26
|
# loop
|
27
|
-
FOR
|
27
|
+
FOR
|
28
28
|
IN
|
29
29
|
|
30
30
|
# EXCEPTION
|
31
|
-
TRY
|
31
|
+
TRY
|
32
32
|
EXCEPT
|
33
33
|
|
34
34
|
"""
|
@@ -54,33 +54,33 @@ tools 1,2,3
|
|
54
54
|
|
55
55
|
def main(): <-> compose
|
56
56
|
|
57
|
-
If condition1 && condition2 || !condition3;
|
57
|
+
If condition1 && condition2 || !condition3;
|
58
58
|
DO tool1(param1, param2);
|
59
59
|
ENDIF;
|
60
|
-
|
60
|
+
|
61
61
|
IF must follow condition, (condition parsing logic);
|
62
62
|
DO ....;
|
63
63
|
ENDIF;
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
THEN;
|
64
|
+
|
65
|
+
|
66
|
+
|
67
|
+
|
68
|
+
THEN;
|
69
69
|
DO tool2(param1, param2);
|
70
|
-
|
70
|
+
|
71
71
|
THEN;
|
72
|
-
|
73
|
-
TRY; DO ACTION_C;
|
74
|
-
EXCEPT; DO ACTION_D;
|
75
|
-
ENDEXCEPT;
|
76
|
-
ENDTRY;
|
72
|
+
|
73
|
+
TRY; DO ACTION_C;
|
74
|
+
EXCEPT; DO ACTION_D;
|
75
|
+
ENDEXCEPT;
|
76
|
+
ENDTRY;
|
77
77
|
|
78
78
|
RETURN <-> run
|
79
79
|
|
80
80
|
|
81
81
|
|
82
82
|
|
83
|
-
COMPOSE; - indicate the beginning of a script-like block to execute
|
83
|
+
COMPOSE; - indicate the beginning of a script-like block to execute
|
84
84
|
|
85
85
|
THEN; - indicate continuation of a script-like block, another statement to execute
|
86
86
|
|
@@ -91,7 +91,7 @@ XX; ; END XX;
|
|
91
91
|
|
92
92
|
|
93
93
|
|
94
|
-
TRY; DO ACTION_A; END TRY;
|
94
|
+
TRY; DO ACTION_A; END TRY;
|
95
95
|
|
96
96
|
|
97
97
|
|
@@ -108,15 +108,15 @@ THEN TRY; DO ACTION_C ENDTRY;
|
|
108
108
|
|
109
109
|
THEN IF EXCEPT; IF CONDITION_C; DO ACTION_D; ENDEXCEPT
|
110
110
|
|
111
|
-
;
|
111
|
+
;
|
112
112
|
THEN FOR ITEM IN COLLECTION; DO ACTION_E(input_.param1, input_.param2); ENDFOR
|
113
113
|
RUN;
|
114
114
|
|
115
115
|
|
116
116
|
|
117
|
-
DO ACTION B;
|
117
|
+
DO ACTION B;
|
118
118
|
|
119
|
-
IF
|
119
|
+
IF
|
120
120
|
THEN
|
121
121
|
|
122
122
|
GROUP
|
@@ -150,7 +150,7 @@ FOR input_ IN collections DO action2(input_.param1, input_.param2); ENDFOR; END;
|
|
150
150
|
|
151
151
|
example 2:
|
152
152
|
BEGIN; IF condition1; TRY DO action1(param1, param2); EXCEPT DO action2(param3, param4);
|
153
|
-
ENDTRY; ELIF condition2; DO action2(param1, param2); ELSE DO action3(param1, param2);
|
153
|
+
ENDTRY; ELIF condition2; DO action2(param1, param2); ELSE DO action3(param1, param2);
|
154
154
|
ENDIF; END;
|
155
155
|
|
156
156
|
"""
|
@@ -180,7 +180,7 @@ CHAT / REACT
|
|
180
180
|
DO 1,2
|
181
181
|
session = Li.Session(..., tools=tools)
|
182
182
|
await alcall(session.branches, func_1)
|
183
|
-
|
183
|
+
|
184
184
|
|
185
185
|
THEN DO 3,4
|
186
186
|
session.chat.py(3,4)
|
@@ -197,4 +197,4 @@ run 1,2,3,4,5
|
|
197
197
|
|
198
198
|
1->2, or 1->3, or 1->4, THEN....
|
199
199
|
|
200
|
-
"""
|
200
|
+
"""
|
@@ -11,9 +11,11 @@ class DirectiveTemplate:
|
|
11
11
|
self.template_str = template_str
|
12
12
|
self.evaluator = BaseEvaluator()
|
13
13
|
|
14
|
-
def _render_conditionals(self, context:
|
14
|
+
def _render_conditionals(self, context: dict[str, Any]) -> str:
|
15
15
|
"""Processes conditional statements with improved logic and support for 'else'."""
|
16
|
-
pattern = re.compile(
|
16
|
+
pattern = re.compile(
|
17
|
+
r"\{if (.*?)\}(.*?)\{else\}(.*?)\{endif\}", re.DOTALL
|
18
|
+
)
|
17
19
|
|
18
20
|
def evaluate_condition(match):
|
19
21
|
condition, if_text, else_text = match.groups()
|
@@ -24,9 +26,11 @@ class DirectiveTemplate:
|
|
24
26
|
|
25
27
|
return pattern.sub(evaluate_condition, self.template_str)
|
26
28
|
|
27
|
-
def _render_loops(self, template: str, context:
|
29
|
+
def _render_loops(self, template: str, context: dict[str, Any]) -> str:
|
28
30
|
"""Processes loop statements within the template."""
|
29
|
-
loop_pattern = re.compile(
|
31
|
+
loop_pattern = re.compile(
|
32
|
+
r"\{for (\w+) in (\w+)\}(.*?)\{endfor\}", re.DOTALL
|
33
|
+
)
|
30
34
|
|
31
35
|
def render_loop(match):
|
32
36
|
iterator_var, collection_name, loop_body = match.groups()
|
@@ -46,7 +50,9 @@ class DirectiveTemplate:
|
|
46
50
|
|
47
51
|
return loop_pattern.sub(render_loop, template)
|
48
52
|
|
49
|
-
def fill(
|
53
|
+
def fill(
|
54
|
+
self, template_str: str = "", context: dict[str, Any] = {}
|
55
|
+
) -> str:
|
50
56
|
"""Fills the template with values from context after processing conditionals and loops."""
|
51
57
|
if not template_str: # Use the instance's template if not provided
|
52
58
|
template_str = self.template_str
|
@@ -54,7 +60,9 @@ class DirectiveTemplate:
|
|
54
60
|
# First, process conditionals with 'else'
|
55
61
|
template_with_conditionals = self._render_conditionals(template_str)
|
56
62
|
# Then, process loops
|
57
|
-
template_with_loops = self._render_loops(
|
63
|
+
template_with_loops = self._render_loops(
|
64
|
+
template_with_conditionals, context
|
65
|
+
)
|
58
66
|
# Finally, substitute the placeholders with context values
|
59
67
|
try:
|
60
68
|
return template_with_loops.format(**context)
|
@@ -44,7 +44,9 @@ class BaseTokenizer:
|
|
44
44
|
position = match.end() # Move past the matched token
|
45
45
|
break
|
46
46
|
if not match: # No match found, unrecognized token
|
47
|
-
raise SyntaxError(
|
47
|
+
raise SyntaxError(
|
48
|
+
f"Unexpected character: {self.script[position]}"
|
49
|
+
)
|
48
50
|
# break
|
49
51
|
|
50
52
|
def get_tokens(self):
|
@@ -1 +1 @@
|
|
1
|
-
TODO
|
1
|
+
TODO
|
@@ -26,7 +26,9 @@ class ASTEvaluator:
|
|
26
26
|
tree = ast.parse(expression, mode="eval")
|
27
27
|
return self._evaluate_node(tree.body, context)
|
28
28
|
except Exception as e:
|
29
|
-
raise ValueError(
|
29
|
+
raise ValueError(
|
30
|
+
f"Failed to evaluate expression: {expression}. Error: {e}"
|
31
|
+
)
|
30
32
|
|
31
33
|
def _evaluate_node(self, node, context):
|
32
34
|
if isinstance(node, ast.Compare):
|
@@ -104,7 +106,9 @@ class ASTEvaluationEngine:
|
|
104
106
|
value_expr = ast.unparse(stmt.value)
|
105
107
|
value = self._evaluate_expression(value_expr)
|
106
108
|
self._assign_variable(var_name, value)
|
107
|
-
elif isinstance(stmt, ast.Expr) and isinstance(
|
109
|
+
elif isinstance(stmt, ast.Expr) and isinstance(
|
110
|
+
stmt.value, ast.Call
|
111
|
+
):
|
108
112
|
func_name = stmt.value.func.id
|
109
113
|
arg_expr = ast.unparse(stmt.value.args[0])
|
110
114
|
arg = self._evaluate_expression(arg_expr)
|
@@ -1,6 +1,7 @@
|
|
1
1
|
import ast
|
2
2
|
import operator
|
3
|
-
from
|
3
|
+
from collections.abc import Callable
|
4
|
+
from typing import Any, Dict, Tuple
|
4
5
|
|
5
6
|
from lionagi.libs.ln_convert import to_dict
|
6
7
|
|
@@ -16,7 +17,7 @@ class BaseEvaluator:
|
|
16
17
|
|
17
18
|
def __init__(self) -> None:
|
18
19
|
"""Initializes the evaluator with supported operators and an empty cache."""
|
19
|
-
self.allowed_operators:
|
20
|
+
self.allowed_operators: dict[type, Any] = {
|
20
21
|
ast.Add: operator.add,
|
21
22
|
ast.Sub: operator.sub,
|
22
23
|
ast.Mult: operator.mul,
|
@@ -34,9 +35,9 @@ class BaseEvaluator:
|
|
34
35
|
ast.Not: operator.not_,
|
35
36
|
ast.USub: operator.neg,
|
36
37
|
}
|
37
|
-
self.cache:
|
38
|
+
self.cache: dict[tuple[str, tuple], Any] = {}
|
38
39
|
|
39
|
-
def evaluate(self, expression: str, context:
|
40
|
+
def evaluate(self, expression: str, context: dict[str, Any]) -> Any:
|
40
41
|
"""
|
41
42
|
Evaluates a given expression string using the provided context.
|
42
43
|
|
@@ -60,9 +61,11 @@ class BaseEvaluator:
|
|
60
61
|
self.cache[cache_key] = result
|
61
62
|
return result
|
62
63
|
except Exception as e:
|
63
|
-
raise ValueError(
|
64
|
+
raise ValueError(
|
65
|
+
f"Failed to evaluate expression: {expression}. Error: {e}"
|
66
|
+
)
|
64
67
|
|
65
|
-
def _evaluate_node(self, node: ast.AST, context:
|
68
|
+
def _evaluate_node(self, node: ast.AST, context: dict[str, Any]) -> Any:
|
66
69
|
"""Recursively evaluates an AST node."""
|
67
70
|
if isinstance(node, ast.BinOp):
|
68
71
|
left = self._evaluate_node(node.left, context)
|
@@ -87,7 +90,9 @@ class BaseEvaluator:
|
|
87
90
|
break
|
88
91
|
left = right
|
89
92
|
elif isinstance(node, ast.BoolOp):
|
90
|
-
values = [
|
93
|
+
values = [
|
94
|
+
self._evaluate_node(value, context) for value in node.values
|
95
|
+
]
|
91
96
|
if isinstance(node.op, ast.And):
|
92
97
|
result = all(values)
|
93
98
|
elif isinstance(node.op, ast.Or):
|
@@ -104,12 +109,14 @@ class BaseEvaluator:
|
|
104
109
|
if custom_node_class not in self.allowed_operators:
|
105
110
|
self.allowed_operators[custom_node_class] = operation_func
|
106
111
|
else:
|
107
|
-
raise ValueError(
|
112
|
+
raise ValueError(
|
113
|
+
f"Custom operator '{operator_name}' is already defined."
|
114
|
+
)
|
108
115
|
|
109
116
|
def evaluate_file(self, file_path, context, format="line"):
|
110
117
|
"""Evaluates expressions from a file."""
|
111
118
|
if format == "line":
|
112
|
-
with open(file_path
|
119
|
+
with open(file_path) as file:
|
113
120
|
last_result = None
|
114
121
|
for line in file:
|
115
122
|
line = line.strip()
|
@@ -117,7 +124,7 @@ class BaseEvaluator:
|
|
117
124
|
last_result = self.evaluate(line, context)
|
118
125
|
return last_result
|
119
126
|
elif format == "json":
|
120
|
-
with open(file_path
|
127
|
+
with open(file_path) as file:
|
121
128
|
data = to_dict(file)
|
122
129
|
last_result = None
|
123
130
|
for expression in data:
|
@@ -153,8 +160,8 @@ class BaseEvaluator:
|
|
153
160
|
|
154
161
|
class BaseEvaluationEngine:
|
155
162
|
def __init__(self) -> None:
|
156
|
-
self.variables:
|
157
|
-
self.functions:
|
163
|
+
self.variables: dict[str, Any] = {}
|
164
|
+
self.functions: dict[str, Callable] = {
|
158
165
|
"print": print,
|
159
166
|
}
|
160
167
|
|
@@ -181,16 +188,20 @@ class BaseEvaluationEngine:
|
|
181
188
|
elif isinstance(stmt, ast.Expr) and isinstance(stmt.value, ast.Call):
|
182
189
|
func_name = stmt.value.func.id
|
183
190
|
args = [
|
184
|
-
self._evaluate_expression(ast.unparse(arg))
|
191
|
+
self._evaluate_expression(ast.unparse(arg))
|
192
|
+
for arg in stmt.value.args
|
185
193
|
]
|
186
194
|
self._execute_function(func_name, *args)
|
187
195
|
elif isinstance(stmt, ast.For):
|
188
196
|
iter_var = stmt.target.id
|
189
|
-
if
|
190
|
-
|
197
|
+
if (
|
198
|
+
isinstance(stmt.iter, ast.Call)
|
199
|
+
and stmt.iter.func.id == "range"
|
200
|
+
):
|
201
|
+
start, end = (
|
191
202
|
self._evaluate_expression(ast.unparse(arg))
|
192
203
|
for arg in stmt.iter.args
|
193
|
-
|
204
|
+
)
|
194
205
|
for i in range(start, end):
|
195
206
|
self.variables[iter_var] = i
|
196
207
|
for body_stmt in stmt.body:
|
@@ -18,14 +18,16 @@ def get_ipython_user_proxy():
|
|
18
18
|
super().__init__(name, **kwargs)
|
19
19
|
self._ipython = get_ipython()
|
20
20
|
|
21
|
-
def generate_init_message(self, *args, **kwargs) ->
|
21
|
+
def generate_init_message(self, *args, **kwargs) -> str | dict:
|
22
22
|
return (
|
23
23
|
super().generate_init_message(*args, **kwargs)
|
24
24
|
+ """If you suggest code, the code will be executed in IPython."""
|
25
25
|
)
|
26
26
|
|
27
27
|
def run_code(self, code, **kwargs):
|
28
|
-
result = self._ipython.run_cell(
|
28
|
+
result = self._ipython.run_cell(
|
29
|
+
"%%capture --no-display cap\n" + code
|
30
|
+
)
|
29
31
|
log = self._ipython.ev("cap.stdout")
|
30
32
|
log += self._ipython.ev("cap.stderr")
|
31
33
|
if result.result is not None:
|
@@ -60,7 +62,9 @@ def get_autogen_coder(
|
|
60
62
|
SysUtil.check_import("autogen", pip_name="pyautogen")
|
61
63
|
|
62
64
|
import autogen
|
63
|
-
from autogen.agentchat.contrib.gpt_assistant_agent import
|
65
|
+
from autogen.agentchat.contrib.gpt_assistant_agent import (
|
66
|
+
GPTAssistantAgent,
|
67
|
+
)
|
64
68
|
except Exception as e:
|
65
69
|
raise ImportError(f"Please install autogen. {e}")
|
66
70
|
|
@@ -1,4 +1,5 @@
|
|
1
|
-
from
|
1
|
+
from collections.abc import Callable
|
2
|
+
from typing import Any, Dict, List, TypeVar, Union
|
2
3
|
|
3
4
|
from lionfuncs import check_import
|
4
5
|
|
@@ -39,9 +40,9 @@ def to_langchain_document(datanode: T, **kwargs: Any) -> Any:
|
|
39
40
|
|
40
41
|
|
41
42
|
def langchain_loader(
|
42
|
-
loader:
|
43
|
-
loader_args:
|
44
|
-
loader_kwargs:
|
43
|
+
loader: str | Callable,
|
44
|
+
loader_args: list[Any] = [],
|
45
|
+
loader_kwargs: dict[str, Any] = {},
|
45
46
|
) -> Any:
|
46
47
|
"""
|
47
48
|
Initializes and uses a specified loader to load data within the Langchain ecosystem.
|
@@ -67,7 +68,9 @@ def langchain_loader(
|
|
67
68
|
"""
|
68
69
|
|
69
70
|
document_loaders = check_import(
|
70
|
-
"langchain_community",
|
71
|
+
"langchain_community",
|
72
|
+
module_name="document_loaders",
|
73
|
+
pip_name="langchain",
|
71
74
|
)
|
72
75
|
|
73
76
|
try:
|
@@ -87,11 +90,11 @@ def langchain_loader(
|
|
87
90
|
|
88
91
|
|
89
92
|
def langchain_text_splitter(
|
90
|
-
data:
|
91
|
-
splitter:
|
92
|
-
splitter_args:
|
93
|
-
splitter_kwargs:
|
94
|
-
) ->
|
93
|
+
data: str | list,
|
94
|
+
splitter: str | Callable,
|
95
|
+
splitter_args: list[Any] = None,
|
96
|
+
splitter_kwargs: dict[str, Any] = None,
|
97
|
+
) -> list[str]:
|
95
98
|
"""
|
96
99
|
Splits text or a list of texts using a specified Langchain text splitter.
|
97
100
|
|
@@ -20,7 +20,9 @@ class LlamaPack:
|
|
20
20
|
return pack(*args, **kwargs)
|
21
21
|
|
22
22
|
@staticmethod
|
23
|
-
def stock_market_pack(
|
23
|
+
def stock_market_pack(
|
24
|
+
pack_path="./stock_market_data_pack", args=[], **kwargs
|
25
|
+
):
|
24
26
|
name_ = "StockMarketDataQueryEnginePack"
|
25
27
|
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
26
28
|
|
@@ -32,7 +34,9 @@ class LlamaPack:
|
|
32
34
|
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
33
35
|
|
34
36
|
@staticmethod
|
35
|
-
def rag_evaluator_pack(
|
37
|
+
def rag_evaluator_pack(
|
38
|
+
pack_path="./rag_evaluator_pack", args=[], **kwargs
|
39
|
+
):
|
36
40
|
name_ = "RagEvaluatorPack"
|
37
41
|
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
38
42
|
|
@@ -49,17 +53,23 @@ class LlamaPack:
|
|
49
53
|
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
50
54
|
|
51
55
|
@staticmethod
|
52
|
-
def resume_screener_pack(
|
56
|
+
def resume_screener_pack(
|
57
|
+
pack_path="./resume_screener_pack", args=[], **kwargs
|
58
|
+
):
|
53
59
|
name_ = "ResumeScreenerPack"
|
54
60
|
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
55
61
|
|
56
62
|
@staticmethod
|
57
|
-
def ragatouille_retriever_pack(
|
63
|
+
def ragatouille_retriever_pack(
|
64
|
+
pack_path="./ragatouille_pack", args=[], **kwargs
|
65
|
+
):
|
58
66
|
name_ = "RAGatouilleRetrieverPack"
|
59
67
|
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
60
68
|
|
61
69
|
@staticmethod
|
62
|
-
def chain_of_table_pack(
|
70
|
+
def chain_of_table_pack(
|
71
|
+
pack_path="./chain_of_table_pack", args=[], **kwargs
|
72
|
+
):
|
63
73
|
name_ = "ChainOfTablePack"
|
64
74
|
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
65
75
|
|
@@ -152,7 +162,9 @@ class LlamaPack:
|
|
152
162
|
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
153
163
|
|
154
164
|
@staticmethod
|
155
|
-
def llama_guard_moderator_pack(
|
165
|
+
def llama_guard_moderator_pack(
|
166
|
+
pack_path="./llamaguard_pack", args=[], **kwargs
|
167
|
+
):
|
156
168
|
name_ = "LlamaGuardModeratorPack"
|
157
169
|
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
158
170
|
|
@@ -178,7 +190,9 @@ class LlamaPack:
|
|
178
190
|
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
179
191
|
|
180
192
|
@staticmethod
|
181
|
-
def multi_tenancy_rag_pack(
|
193
|
+
def multi_tenancy_rag_pack(
|
194
|
+
pack_path="./multitenancy_rag_pack", args=[], **kwargs
|
195
|
+
):
|
182
196
|
name_ = "MultiTenancyRAGPack"
|
183
197
|
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
184
198
|
|
@@ -188,12 +202,16 @@ class LlamaPack:
|
|
188
202
|
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
189
203
|
|
190
204
|
@staticmethod
|
191
|
-
def snowflake_query_engine_pack(
|
205
|
+
def snowflake_query_engine_pack(
|
206
|
+
pack_path="./snowflake_pack", args=[], **kwargs
|
207
|
+
):
|
192
208
|
name_ = "SnowflakeQueryEnginePack"
|
193
209
|
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
194
210
|
|
195
211
|
@staticmethod
|
196
|
-
def agent_search_retriever_pack(
|
212
|
+
def agent_search_retriever_pack(
|
213
|
+
pack_path="./agent_search_pack", args=[], **kwargs
|
214
|
+
):
|
197
215
|
name_ = "AgentSearchRetrieverPack"
|
198
216
|
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
199
217
|
|
@@ -203,12 +221,16 @@ class LlamaPack:
|
|
203
221
|
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
204
222
|
|
205
223
|
@staticmethod
|
206
|
-
def chroma_autoretrieval_pack(
|
224
|
+
def chroma_autoretrieval_pack(
|
225
|
+
pack_path="./chroma_pack", args=[], **kwargs
|
226
|
+
):
|
207
227
|
name_ = "ChromaAutoretrievalPack"
|
208
228
|
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
209
229
|
|
210
230
|
@staticmethod
|
211
|
-
def arize_phoenix_query_engine_pack(
|
231
|
+
def arize_phoenix_query_engine_pack(
|
232
|
+
pack_path="./arize_pack", args=[], **kwargs
|
233
|
+
):
|
212
234
|
name_ = "ArizePhoenixQueryEnginePack"
|
213
235
|
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
214
236
|
|
@@ -227,6 +249,8 @@ class LlamaPack:
|
|
227
249
|
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
228
250
|
|
229
251
|
@staticmethod
|
230
|
-
def weaviate_retry_engine_pack(
|
252
|
+
def weaviate_retry_engine_pack(
|
253
|
+
pack_path="./weaviate_pack", args=[], **kwargs
|
254
|
+
):
|
231
255
|
name_ = "WeaviateRetryEnginePack"
|
232
256
|
return LlamaPack.build(name_, pack_path, args, **kwargs)
|
@@ -33,7 +33,9 @@ def get_llama_index_node_parser(node_parser: Any):
|
|
33
33
|
pip_name="llama-index",
|
34
34
|
)
|
35
35
|
|
36
|
-
if not isinstance(node_parser, str) and not issubclass(
|
36
|
+
if not isinstance(node_parser, str) and not issubclass(
|
37
|
+
node_parser, NodeParser
|
38
|
+
):
|
37
39
|
raise TypeError("node_parser must be a string or NodeParser.")
|
38
40
|
|
39
41
|
if isinstance(node_parser, str):
|
@@ -41,11 +43,14 @@ def get_llama_index_node_parser(node_parser: Any):
|
|
41
43
|
SysUtil.check_import("tree_sitter_languages")
|
42
44
|
|
43
45
|
try:
|
44
|
-
node_module = import_module(
|
46
|
+
node_module = import_module(
|
47
|
+
"llama_index.core", module_name="node_parser"
|
48
|
+
)
|
45
49
|
return getattr(node_module, node_parser)
|
46
50
|
except Exception as e:
|
47
51
|
raise AttributeError(
|
48
|
-
f"llama_index_core has no such attribute:"
|
52
|
+
f"llama_index_core has no such attribute:"
|
53
|
+
f" {node_parser}, Error: {e}"
|
49
54
|
) from e
|
50
55
|
|
51
56
|
elif isinstance(node_parser, NodeParser):
|