lionagi 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. lionagi/core/agent/base_agent.py +2 -3
  2. lionagi/core/branch/base.py +1 -1
  3. lionagi/core/branch/branch.py +2 -1
  4. lionagi/core/branch/flow_mixin.py +1 -1
  5. lionagi/core/branch/util.py +1 -1
  6. lionagi/core/execute/base_executor.py +1 -4
  7. lionagi/core/execute/branch_executor.py +66 -3
  8. lionagi/core/execute/instruction_map_executor.py +48 -0
  9. lionagi/core/execute/neo4j_executor.py +381 -0
  10. lionagi/core/execute/structure_executor.py +99 -3
  11. lionagi/core/flow/monoflow/ReAct.py +18 -18
  12. lionagi/core/flow/monoflow/chat_mixin.py +1 -1
  13. lionagi/core/flow/monoflow/followup.py +11 -12
  14. lionagi/core/flow/polyflow/__init__.py +1 -1
  15. lionagi/core/generic/component.py +0 -2
  16. lionagi/core/generic/condition.py +1 -1
  17. lionagi/core/generic/edge.py +52 -0
  18. lionagi/core/mail/mail_manager.py +3 -2
  19. lionagi/core/session/session.py +1 -1
  20. lionagi/experimental/__init__.py +0 -0
  21. lionagi/experimental/directive/__init__.py +0 -0
  22. lionagi/experimental/directive/evaluator/__init__.py +0 -0
  23. lionagi/experimental/directive/evaluator/ast_evaluator.py +115 -0
  24. lionagi/experimental/directive/evaluator/base_evaluator.py +202 -0
  25. lionagi/experimental/directive/evaluator/sandbox_.py +14 -0
  26. lionagi/experimental/directive/evaluator/script_engine.py +83 -0
  27. lionagi/experimental/directive/parser/__init__.py +0 -0
  28. lionagi/experimental/directive/parser/base_parser.py +215 -0
  29. lionagi/experimental/directive/schema.py +36 -0
  30. lionagi/experimental/directive/template_/__init__.py +0 -0
  31. lionagi/experimental/directive/template_/base_template.py +63 -0
  32. lionagi/experimental/tool/__init__.py +0 -0
  33. lionagi/experimental/tool/function_calling.py +43 -0
  34. lionagi/experimental/tool/manual.py +66 -0
  35. lionagi/experimental/tool/schema.py +59 -0
  36. lionagi/experimental/tool/tool_manager.py +138 -0
  37. lionagi/experimental/tool/util.py +16 -0
  38. lionagi/experimental/work/__init__.py +0 -0
  39. lionagi/experimental/work/_logger.py +25 -0
  40. lionagi/experimental/work/exchange.py +0 -0
  41. lionagi/experimental/work/schema.py +30 -0
  42. lionagi/experimental/work/tests.py +72 -0
  43. lionagi/experimental/work/util.py +0 -0
  44. lionagi/experimental/work/work_function.py +89 -0
  45. lionagi/experimental/work/worker.py +12 -0
  46. lionagi/integrations/bridge/autogen_/__init__.py +0 -0
  47. lionagi/integrations/bridge/autogen_/autogen_.py +124 -0
  48. lionagi/integrations/bridge/llamaindex_/get_index.py +294 -0
  49. lionagi/integrations/bridge/llamaindex_/llama_pack.py +227 -0
  50. lionagi/integrations/bridge/transformers_/__init__.py +0 -0
  51. lionagi/integrations/bridge/transformers_/install_.py +36 -0
  52. lionagi/integrations/config/oai_configs.py +1 -1
  53. lionagi/integrations/config/ollama_configs.py +1 -1
  54. lionagi/integrations/config/openrouter_configs.py +1 -1
  55. lionagi/integrations/storage/__init__.py +3 -0
  56. lionagi/integrations/storage/neo4j.py +673 -0
  57. lionagi/integrations/storage/storage_util.py +289 -0
  58. lionagi/integrations/storage/to_csv.py +63 -0
  59. lionagi/integrations/storage/to_excel.py +67 -0
  60. lionagi/libs/ln_knowledge_graph.py +405 -0
  61. lionagi/libs/ln_queue.py +101 -0
  62. lionagi/libs/ln_tokenizer.py +57 -0
  63. lionagi/libs/sys_util.py +1 -1
  64. lionagi/lions/__init__.py +0 -0
  65. lionagi/lions/coder/__init__.py +0 -0
  66. lionagi/lions/coder/add_feature.py +20 -0
  67. lionagi/lions/coder/base_prompts.py +22 -0
  68. lionagi/lions/coder/coder.py +121 -0
  69. lionagi/lions/coder/util.py +91 -0
  70. lionagi/lions/researcher/__init__.py +0 -0
  71. lionagi/lions/researcher/data_source/__init__.py +0 -0
  72. lionagi/lions/researcher/data_source/finhub_.py +191 -0
  73. lionagi/lions/researcher/data_source/google_.py +199 -0
  74. lionagi/lions/researcher/data_source/wiki_.py +96 -0
  75. lionagi/lions/researcher/data_source/yfinance_.py +21 -0
  76. lionagi/tests/libs/test_queue.py +67 -0
  77. lionagi/tests/test_core/test_branch.py +0 -1
  78. lionagi/version.py +1 -1
  79. {lionagi-0.1.0.dist-info → lionagi-0.1.1.dist-info}/METADATA +1 -1
  80. {lionagi-0.1.0.dist-info → lionagi-0.1.1.dist-info}/RECORD +83 -29
  81. {lionagi-0.1.0.dist-info → lionagi-0.1.1.dist-info}/LICENSE +0 -0
  82. {lionagi-0.1.0.dist-info → lionagi-0.1.1.dist-info}/WHEEL +0 -0
  83. {lionagi-0.1.0.dist-info → lionagi-0.1.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,202 @@
1
+ import ast
2
+ import operator
3
+ from typing import Any, Dict, Tuple, Callable
4
+
5
+ from lionagi.libs.ln_convert import to_dict
6
+
7
+
8
+ class BaseEvaluator:
9
+ """
10
+ A class to evaluate mathematical and boolean expressions from strings using Python's AST.
11
+
12
+ Attributes:
13
+ allowed_operators (Dict[type, Any]): A dictionary mapping AST node types to their corresponding Python operator functions.
14
+ cache (Dict[Tuple[str, Tuple], Any]): A dictionary used to cache the results of evaluated expressions and sub-expressions.
15
+ """
16
+
17
+ def __init__(self) -> None:
18
+ """Initializes the evaluator with supported operators and an empty cache."""
19
+ self.allowed_operators: Dict[type, Any] = {
20
+ ast.Add: operator.add,
21
+ ast.Sub: operator.sub,
22
+ ast.Mult: operator.mul,
23
+ ast.Div: operator.truediv,
24
+ ast.Pow: operator.pow,
25
+ ast.Mod: operator.mod,
26
+ ast.Eq: operator.eq,
27
+ ast.NotEq: operator.ne,
28
+ ast.Lt: operator.lt,
29
+ ast.LtE: operator.le,
30
+ ast.Gt: operator.gt,
31
+ ast.GtE: operator.ge,
32
+ ast.And: lambda x, y: x and y,
33
+ ast.Or: lambda x, y: x or y,
34
+ ast.Not: operator.not_,
35
+ ast.USub: operator.neg,
36
+ }
37
+ self.cache: Dict[Tuple[str, Tuple], Any] = {}
38
+
39
+ def evaluate(self, expression: str, context: Dict[str, Any]) -> Any:
40
+ """
41
+ Evaluates a given expression string using the provided context.
42
+
43
+ Args:
44
+ expression (str): The expression to evaluate.
45
+ context (Dict[str, Any]): A dictionary mapping variable names to their values.
46
+
47
+ Returns:
48
+ Any: The result of the evaluated expression.
49
+
50
+ Raises:
51
+ ValueError: If the expression cannot be evaluated.
52
+ """
53
+ cache_key = (expression, tuple(sorted(context.items())))
54
+ if cache_key in self.cache:
55
+ return self.cache[cache_key]
56
+
57
+ try:
58
+ tree = ast.parse(expression, mode="eval")
59
+ result = self._evaluate_node(tree.body, context)
60
+ self.cache[cache_key] = result
61
+ return result
62
+ except Exception as e:
63
+ raise ValueError(f"Failed to evaluate expression: {expression}. Error: {e}")
64
+
65
+ def _evaluate_node(self, node: ast.AST, context: Dict[str, Any]) -> Any:
66
+ """Recursively evaluates an AST node."""
67
+ if isinstance(node, ast.BinOp):
68
+ left = self._evaluate_node(node.left, context)
69
+ op_func = self.allowed_operators[type(node.op)]
70
+ right = self._evaluate_node(node.right, context)
71
+ result = op_func(left, right)
72
+ elif isinstance(node, ast.UnaryOp):
73
+ operand = self._evaluate_node(node.operand, context)
74
+ result = self.allowed_operators[type(node.op)](operand)
75
+ elif isinstance(node, ast.Name):
76
+ result = context.get(node.id, None)
77
+ elif isinstance(node, ast.Constant):
78
+ result = node.value
79
+ elif isinstance(node, ast.Compare):
80
+ left = self._evaluate_node(node.left, context)
81
+ result = True
82
+ for operation, comparator in zip(node.ops, node.comparators):
83
+ op_func = self.allowed_operators[type(operation)]
84
+ right = self._evaluate_node(comparator, context)
85
+ result = result and op_func(left, right)
86
+ if not result:
87
+ break
88
+ left = right
89
+ elif isinstance(node, ast.BoolOp):
90
+ values = [self._evaluate_node(value, context) for value in node.values]
91
+ if isinstance(node.op, ast.And):
92
+ result = all(values)
93
+ elif isinstance(node.op, ast.Or):
94
+ result = any(values)
95
+ else:
96
+ raise ValueError("Unsupported boolean operation.")
97
+ else:
98
+ raise ValueError("Unsupported operation in condition.")
99
+ return result
100
+
101
+ def add_custom_operator(self, operator_name, operation_func):
102
+ """Adds a custom operator to the evaluator."""
103
+ custom_node_class = type(operator_name, (ast.AST,), {})
104
+ if custom_node_class not in self.allowed_operators:
105
+ self.allowed_operators[custom_node_class] = operation_func
106
+ else:
107
+ raise ValueError(f"Custom operator '{operator_name}' is already defined.")
108
+
109
+ def evaluate_file(self, file_path, context, format="line"):
110
+ """Evaluates expressions from a file."""
111
+ if format == "line":
112
+ with open(file_path, "r") as file:
113
+ last_result = None
114
+ for line in file:
115
+ line = line.strip()
116
+ if line:
117
+ last_result = self.evaluate(line, context)
118
+ return last_result
119
+ elif format == "json":
120
+ with open(file_path, "r") as file:
121
+ data = to_dict(file)
122
+ last_result = None
123
+ for expression in data:
124
+ last_result = self.evaluate(expression, context)
125
+ return last_result
126
+ else:
127
+ raise ValueError(f"Unsupported file format: {format}")
128
+
129
+ def validate_expression(self, expression):
130
+ """Validates the given expression."""
131
+ try:
132
+ tree = ast.parse(expression, mode="eval")
133
+ self._validate_node(tree.body)
134
+ return True, "Expression is valid."
135
+ except Exception as e:
136
+ return False, f"Invalid expression: {str(e)}"
137
+
138
+ def _validate_node(self, node):
139
+ """Validates an AST node."""
140
+ if isinstance(
141
+ node, (ast.BinOp, ast.Compare, ast.BoolOp, ast.Name, ast.Constant)
142
+ ):
143
+ if (
144
+ isinstance(node, ast.BinOp)
145
+ and type(node.op) not in self.allowed_operators
146
+ ):
147
+ raise ValueError(
148
+ f"Operation {type(node.op).__name__} is not supported."
149
+ )
150
+ else:
151
+ raise ValueError("Unsupported node type in expression.")
152
+
153
+
154
+ class BaseEvaluationEngine:
155
+ def __init__(self) -> None:
156
+ self.variables: Dict[str, Any] = {}
157
+ self.functions: Dict[str, Callable] = {
158
+ "print": print,
159
+ }
160
+
161
+ def _evaluate_expression(self, expression: str) -> Any:
162
+ try:
163
+ return eval(expression, {}, self.variables)
164
+ except NameError as e:
165
+ raise ValueError(f"Undefined variable. {e}")
166
+
167
+ def _assign_variable(self, var_name: str, value: Any) -> None:
168
+ self.variables[var_name] = value
169
+
170
+ def _execute_function(self, func_name: str, *args: Any) -> None:
171
+ if func_name in self.functions:
172
+ self.functions[func_name](*args)
173
+ else:
174
+ raise ValueError(f"Function {func_name} not defined.")
175
+
176
+ def _execute_statement(self, stmt: ast.AST) -> None:
177
+ if isinstance(stmt, ast.Assign):
178
+ var_name = stmt.targets[0].id
179
+ value = self._evaluate_expression(ast.unparse(stmt.value))
180
+ self._assign_variable(var_name, value)
181
+ elif isinstance(stmt, ast.Expr) and isinstance(stmt.value, ast.Call):
182
+ func_name = stmt.value.func.id
183
+ args = [
184
+ self._evaluate_expression(ast.unparse(arg)) for arg in stmt.value.args
185
+ ]
186
+ self._execute_function(func_name, *args)
187
+ elif isinstance(stmt, ast.For):
188
+ iter_var = stmt.target.id
189
+ if isinstance(stmt.iter, ast.Call) and stmt.iter.func.id == "range":
190
+ start, end = [
191
+ self._evaluate_expression(ast.unparse(arg))
192
+ for arg in stmt.iter.args
193
+ ]
194
+ for i in range(start, end):
195
+ self.variables[iter_var] = i
196
+ for body_stmt in stmt.body:
197
+ self._execute_statement(body_stmt)
198
+
199
+ def execute(self, script: str) -> None:
200
+ tree = ast.parse(script)
201
+ for stmt in tree.body:
202
+ self._execute_statement(stmt)
@@ -0,0 +1,14 @@
1
+ # filename: enhanced_script_engine.py
2
+ import ast
3
+
4
+
5
+ class SandboxTransformer(ast.NodeTransformer):
6
+ """AST transformer to enforce restrictions in sandbox mode."""
7
+
8
+ def visit_Import(self, node):
9
+ raise RuntimeError("Import statements are not allowed in sandbox mode.")
10
+
11
+ def visit_Exec(self, node):
12
+ raise RuntimeError("Exec statements are not allowed in sandbox mode.")
13
+
14
+ # Add other visit methods for disallowed operations or nodes
@@ -0,0 +1,83 @@
1
+ import ast
2
+ from functools import lru_cache
3
+ from lionagi.libs import AsyncUtil
4
+ from .base_evaluator import BaseEvaluator
5
+ from .sandbox_ import SandboxTransformer
6
+
7
+
8
+ class ScriptEngine:
9
+ def __init__(self):
10
+ self.variables = {}
11
+ self.safe_evaluator = BaseEvaluator()
12
+ self.functions = {
13
+ "processData": self.process_data,
14
+ }
15
+
16
+ def process_data(self, data):
17
+ return data * 2
18
+
19
+ def _evaluate_expression(self, expression):
20
+ return self.safe_evaluator.evaluate(expression, self.variables)
21
+
22
+ def _assign_variable(self, var_name, value):
23
+ self.variables[var_name] = value
24
+
25
+ def _execute_function(self, func_name, arg):
26
+ if func_name in self.functions:
27
+ return self.functions[func_name](arg)
28
+ else:
29
+ raise ValueError(f"Function {func_name} not defined.")
30
+
31
+ def execute(self, script):
32
+ tree = ast.parse(script)
33
+ for stmt in tree.body:
34
+ if isinstance(stmt, ast.Assign):
35
+ var_name = stmt.targets[0].id
36
+ value = self._evaluate_expression(ast.unparse(stmt.value))
37
+ self._assign_variable(var_name, value)
38
+ elif isinstance(stmt, ast.Expr) and isinstance(stmt.value, ast.Call):
39
+ func_name = stmt.value.func.id
40
+ arg = self._evaluate_expression(ast.unparse(stmt.value.args[0]))
41
+ result = self._execute_function(func_name, arg)
42
+ # For demonstration, manually update 'x' to simulate expected behavior
43
+ if func_name == "processData":
44
+ self._assign_variable("x", result)
45
+
46
+ def add_hook(self, event_name, callback):
47
+ """Subscribe a callback function to a specific event."""
48
+ self.hooks[event_name].append(callback)
49
+
50
+ def trigger_hooks(self, event_name, *args, **kwargs):
51
+ """Trigger all callbacks attached to a specific event."""
52
+ for callback in self.hooks[event_name]:
53
+ callback(*args, **kwargs)
54
+
55
+ async def process_data(self, data):
56
+ # Example asynchronous function
57
+ return data * 2
58
+
59
+ @lru_cache(maxsize=128)
60
+ def evaluate_expression(self, expression):
61
+ return self.safe_evaluator.evaluate(expression, self.variables)
62
+
63
+ async def _execute_function_async(self, func_name, arg):
64
+ if func_name in self.functions:
65
+ func = self.functions[func_name]
66
+ if AsyncUtil.is_coroutine_func(func):
67
+ return await func(arg)
68
+ else:
69
+ return func(arg)
70
+ else:
71
+ raise ValueError(f"Function {func_name} not defined.")
72
+
73
+ def execute_sandboxed(self, script):
74
+ # Parse and sanitize the script
75
+ tree = ast.parse(script, mode="exec")
76
+ sanitized_tree = SandboxTransformer().visit(tree)
77
+ ast.fix_missing_locations(sanitized_tree)
78
+
79
+ # Compile the sanitized AST
80
+ code = compile(sanitized_tree, "<sandbox>", "exec")
81
+
82
+ # Execute the code in a restricted namespace
83
+ exec(code, {"__builtins__": None}, self.variables)
File without changes
@@ -0,0 +1,215 @@
1
+ from typing import List, Optional
2
+
3
+ from lionagi.libs.ln_tokenizer import BaseToken
4
+ from ..schema import IfNode, TryNode, ForNode
5
+
6
+
7
+ class BaseDirectiveParser:
8
+ """A base parser with lookahead, error recovery, and backtracking support.
9
+
10
+ Attributes:
11
+ tokens (List[BaseToken]): A list of tokens to be parsed.
12
+ current_token_index (int): The index of the current token in the tokens list.
13
+ current_token (Optional[BaseToken]): The current token being processed.
14
+
15
+ Examples:
16
+ >>> tokenizer = BaseTokenizer("IF x > 10 THEN DO something ENDIF")
17
+ >>> tokens = tokenizer.get_tokens()
18
+ >>> parser = BaseParser(tokens)
19
+ >>> print(parser.current_token)
20
+ BaseToken(KEYWORD, IF)
21
+ """
22
+
23
+ def __init__(self, tokens: List[BaseToken]):
24
+ self.tokens = tokens
25
+ self.current_token_index = -1
26
+ self.current_token: Optional[BaseToken] = None
27
+ self.next_token()
28
+
29
+ def next_token(self) -> None:
30
+ """Advances to the next token in the list."""
31
+ self.current_token_index += 1
32
+ if self.current_token_index < len(self.tokens):
33
+ self.current_token = self.tokens[self.current_token_index]
34
+ else:
35
+ self.current_token = None
36
+
37
+ def peek_next_token(self, offset: int = 1) -> BaseToken | None:
38
+ """Peeks at the next token without consuming it.
39
+
40
+ Args:
41
+ offset (int): The number of tokens to look ahead.
42
+
43
+ Returns:
44
+ Optional[BaseToken]: The token at the specified lookahead offset, or None if end of list.
45
+ """
46
+ peek_index = self.current_token_index + offset
47
+ if peek_index < len(self.tokens):
48
+ return self.tokens[peek_index]
49
+ else:
50
+ return None
51
+
52
+ def skip_until(self, token_types: List[str]) -> None:
53
+ """Skips tokens until a token of the specified type is found.
54
+
55
+ Args:
56
+ token_types (List[str]): A list of token types to stop skipping.
57
+ """
58
+ while self.current_token and self.current_token.type not in token_types:
59
+ self.next_token()
60
+
61
+ def mark(self) -> int:
62
+ """Marks the current position in the token list for potential backtracking.
63
+
64
+ Returns:
65
+ int: The current token index.
66
+ """
67
+ return self.current_token_index
68
+
69
+ def reset_to_mark(self, mark: int) -> None:
70
+ """Resets the parser to a previously marked position.
71
+
72
+ Args:
73
+ mark (int): The token index to reset to.
74
+ """
75
+ self.current_token_index = mark - 1
76
+ self.next_token()
77
+
78
+ def skip_semicolon(self):
79
+ if self.current_token and self.current_token.value == ";":
80
+ self.next_token()
81
+
82
+ def parse_expression(self):
83
+ expr = ""
84
+ while self.current_token and self.current_token.value != ";":
85
+ expr += self.current_token.value + " "
86
+ self.next_token()
87
+ # Expecting a semicolon at the end of the condition
88
+ if self.current_token.value != ";":
89
+ raise SyntaxError("Expected ';' at the end of the condition")
90
+ self.next_token() # Move past the semicolon to the next part of the statement
91
+ return expr.strip()
92
+
93
+ def parse_if_block(self):
94
+ block = []
95
+ # Parse the block until 'ELSE', 'ENDIF', ensuring not to include semicolons as part of the block
96
+ while self.current_token and self.current_token.value not in ("ENDIF", "ELSE"):
97
+ if self.current_token.value == "DO":
98
+ self.next_token() # Move past 'DO' to get to the action
99
+ block.append(self.current_token.value) # Add the action to the block
100
+ self.next_token() # Move to the next token, which could be a semicolon or the next action
101
+ if self.current_token.value == ";":
102
+ self.next_token() # Move past the semicolon
103
+ return block
104
+
105
+ def parse_if_statement(self):
106
+ if self.current_token.type != "KEYWORD" or self.current_token.value != "IF":
107
+ raise SyntaxError("Expected IF statement")
108
+ self.next_token() # Skip 'IF'
109
+
110
+ condition = self.parse_expression() # Now properly ends after the semicolon
111
+
112
+ true_block = []
113
+ if self.current_token.value == "DO":
114
+ true_block = self.parse_if_block() # Parse true block after 'DO'
115
+
116
+ false_block = None
117
+ if self.current_token and self.current_token.value == "ELSE":
118
+ self.next_token() # Skip 'ELSE', expect 'DO' next for the false block
119
+ self.skip_semicolon()
120
+ if self.current_token.value != "DO":
121
+ raise SyntaxError("Expected 'DO' after 'ELSE'")
122
+ self.next_token() # Skip 'DO'
123
+ false_block = self.parse_if_block() # Parse false block
124
+
125
+ return IfNode(condition, true_block, false_block)
126
+
127
+ def parse_for_statement(self):
128
+ if self.current_token.type != "KEYWORD" or self.current_token.value != "FOR":
129
+ raise SyntaxError("Expected FOR statement")
130
+ self.next_token() # Skip 'FOR'
131
+
132
+ # Parse the iterator variable
133
+ if self.current_token.type != "IDENTIFIER":
134
+ raise SyntaxError("Expected iterator variable after FOR")
135
+ iterator = self.current_token.value
136
+ self.next_token() # Move past the iterator variable
137
+
138
+ # Expect and skip 'IN' keyword
139
+ if self.current_token.type != "KEYWORD" or self.current_token.value != "IN":
140
+ raise SyntaxError("Expected 'IN' after iterator variable")
141
+ self.next_token() # Move past 'IN'
142
+
143
+ # Parse the collection
144
+ if self.current_token.type not in ["IDENTIFIER", "LITERAL"]:
145
+ raise SyntaxError("Expected collection after 'IN'")
146
+ collection = self.current_token.value
147
+ self.next_token() # Move past the collection
148
+
149
+ # Now, parse the block of statements to execute
150
+ true_block = self.parse_for_block()
151
+
152
+ # Construct and return a ForNode
153
+ return ForNode(iterator, collection, true_block)
154
+
155
+ def parse_for_block(self):
156
+ block = []
157
+ # Skip initial 'DO' if present
158
+ if self.current_token and self.current_token.value == "DO":
159
+ self.next_token()
160
+
161
+ while self.current_token and self.current_token.value not in ("ENDFOR",):
162
+ if self.current_token.value == ";":
163
+ # If a semicolon is encountered, skip it and move to the next token
164
+ self.next_token()
165
+ continue
166
+ # Add the current token to the block unless it's a 'DO' or ';'
167
+ if self.current_token.value != "DO":
168
+ block.append(self.current_token.value)
169
+ self.next_token()
170
+
171
+ # The loop exits when 'ENDFOR' is encountered; move past it for subsequent parsing
172
+ self.next_token() # Skip 'ENDFOR'
173
+ return block
174
+
175
+ def parse_try_statement(self):
176
+ if self.current_token.type != "KEYWORD" or self.current_token.value != "TRY":
177
+ raise SyntaxError("Expected TRY statement")
178
+ self.next_token() # Skip 'TRY'
179
+
180
+ try_block = self.parse_try_block("EXCEPT") # Parse the try block until 'EXCEPT'
181
+
182
+ # Now expecting 'EXCEPT' keyword
183
+ if not (self.current_token and self.current_token.value == "EXCEPT"):
184
+ raise SyntaxError("Expected 'EXCEPT' after try block")
185
+ self.next_token() # Move past 'EXCEPT'
186
+
187
+ except_block = self.parse_try_block(
188
+ "ENDTRY"
189
+ ) # Parse the except block until 'ENDTRY'
190
+
191
+ # Ensure we are correctly positioned after 'ENDTRY'
192
+ if self.current_token and self.current_token.value != "ENDTRY":
193
+ raise SyntaxError("Expected 'ENDTRY' at the end of except block")
194
+ self.next_token() # Move past 'ENDTRY' for subsequent parsing
195
+
196
+ return TryNode(try_block, except_block)
197
+
198
+ def parse_try_block(self, stop_keyword):
199
+ block = []
200
+ while self.current_token and self.current_token.value != stop_keyword:
201
+ if self.current_token.value == "DO":
202
+ self.next_token() # Move past 'DO' to get to the action
203
+ elif self.current_token.value == ";":
204
+ self.next_token() # Move past the semicolon
205
+ continue # Skip adding ';' to the block
206
+ else:
207
+ block.append(self.current_token.value) # Add the action to the block
208
+ self.next_token()
209
+
210
+ return block
211
+
212
+
213
+ # "IF condition1 && condition2; DO action2; ELSE; DO action3; ENDIF;"
214
+ # "FOR input_ IN collections; DO action(input_); ENDFOR;"
215
+ # "TRY; DO action(); EXCEPT; DO action(input_); ENDTRY;"
@@ -0,0 +1,36 @@
1
+ class Node:
2
+ """Base class for all nodes in the abstract syntax tree (AST)."""
3
+
4
+ pass
5
+
6
+
7
+ class IfNode(Node):
8
+ """Represents an 'IF' statement in the AST."""
9
+
10
+ def __init__(self, condition, true_block, false_block=None):
11
+ self.condition = condition
12
+ self.true_block = true_block
13
+ self.false_block = false_block
14
+
15
+
16
+ class ForNode(Node):
17
+ """Represents a 'FOR' loop in the AST."""
18
+
19
+ def __init__(self, iterator, collection, block):
20
+ self.iterator = iterator
21
+ self.collection = collection
22
+ self.block = block
23
+
24
+
25
+ class TryNode(Node):
26
+ """Represents a 'TRY-EXCEPT' block in the AST."""
27
+
28
+ def __init__(self, try_block, except_block):
29
+ self.try_block = try_block
30
+ self.except_block = except_block
31
+
32
+
33
+ class ActionNode(Node):
34
+
35
+ def __init__(self, action) -> None:
36
+ self.action = action
File without changes
@@ -0,0 +1,63 @@
1
+ from typing import Any, Dict
2
+ import re
3
+
4
+ from ..evaluator.base_evaluator import BaseEvaluator
5
+
6
+
7
+ class BaseDirectiveTemplate:
8
+ """Enhanced base template class for processing templates with conditionals and loops."""
9
+
10
+ def __init__(self, template_str: str):
11
+ self.template_str = template_str
12
+ self.evaluator = BaseEvaluator()
13
+
14
+ def _render_conditionals(self, context: Dict[str, Any]) -> str:
15
+ """Processes conditional statements with improved logic and support for 'else'."""
16
+ pattern = re.compile(r"\{if (.*?)\}(.*?)\{else\}(.*?)\{endif\}", re.DOTALL)
17
+
18
+ def evaluate_condition(match):
19
+ condition, if_text, else_text = match.groups()
20
+ if self.evaluator.evaluate(condition, context):
21
+ return if_text
22
+ else:
23
+ return else_text
24
+
25
+ return pattern.sub(evaluate_condition, self.template_str)
26
+
27
+ def _render_loops(self, template: str, context: Dict[str, Any]) -> str:
28
+ """Processes loop statements within the template."""
29
+ loop_pattern = re.compile(r"\{for (\w+) in (\w+)\}(.*?)\{endfor\}", re.DOTALL)
30
+
31
+ def render_loop(match):
32
+ iterator_var, collection_name, loop_body = match.groups()
33
+ collection = context.get(collection_name, [])
34
+ if not isinstance(collection, (list, range)):
35
+ raise ValueError(
36
+ f"Expected list or range for '{collection_name}', got {type(collection).__name__}."
37
+ )
38
+
39
+ loop_result = ""
40
+ for item in collection:
41
+ loop_context = context.copy()
42
+ loop_context[iterator_var] = item
43
+ loop_result += self.fill(loop_body, loop_context)
44
+
45
+ return loop_result
46
+
47
+ return loop_pattern.sub(render_loop, template)
48
+
49
+ def fill(self, template_str: str = "", context: Dict[str, Any] = {}) -> str:
50
+ """Fills the template with values from context after processing conditionals and loops."""
51
+ if not template_str: # Use the instance's template if not provided
52
+ template_str = self.template_str
53
+
54
+ # First, process conditionals with 'else'
55
+ template_with_conditionals = self._render_conditionals(template_str)
56
+ # Then, process loops
57
+ template_with_loops = self._render_loops(template_with_conditionals, context)
58
+ # Finally, substitute the placeholders with context values
59
+ try:
60
+ return template_with_loops.format(**context)
61
+ except KeyError as e:
62
+ print(f"Missing key in context: {e}")
63
+ return template_with_loops
File without changes
@@ -0,0 +1,43 @@
1
+ from typing import Any, Callable
2
+ from pydantic import BaseModel, Field, field_serializer
3
+ from functools import singledispatchmethod
4
+ from lionagi.libs import convert
5
+
6
+
7
+ class FunctionCalling(BaseModel):
8
+ func: Any = Field(..., alias="function")
9
+ kwargs: Any = Field({}, alias="arguments")
10
+
11
+ @field_serializer("func")
12
+ def serialize_func(self, func: Callable):
13
+ return func.__name__
14
+
15
+ @property
16
+ def func_name(self):
17
+ return self.func.__name__
18
+
19
+ @classmethod
20
+ @singledispatchmethod
21
+ def create(cls, func_call: Any):
22
+ raise TypeError(f"Unsupported type {type(func_call)}")
23
+
24
+ @create.register
25
+ def _(cls, func_call: tuple):
26
+ if len(func_call) == 2:
27
+ return cls(func=func_call[0], kwargs=func_call[1])
28
+ else:
29
+ raise ValueError(f"Invalid tuple length {len(func_call)}")
30
+
31
+ @create.register
32
+ def _(cls, func_call: dict):
33
+ return cls(**func_call)
34
+
35
+ @create.register
36
+ def _(cls, func_call: str):
37
+ try:
38
+ return cls(**convert.to_dict(func_call))
39
+ except Exception as e:
40
+ raise ValueError(f"Invalid string {func_call}") from e
41
+
42
+ def __str__(self):
43
+ return f"{self.func_name}({self.kwargs})"