quantalogic 0.61.2__py3-none-any.whl → 0.80__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quantalogic/agent.py +0 -1
- quantalogic/codeact/TODO.md +14 -0
- quantalogic/codeact/agent.py +400 -421
- quantalogic/codeact/cli.py +42 -224
- quantalogic/codeact/cli_commands/__init__.py +0 -0
- quantalogic/codeact/cli_commands/create_toolbox.py +45 -0
- quantalogic/codeact/cli_commands/install_toolbox.py +20 -0
- quantalogic/codeact/cli_commands/list_executor.py +15 -0
- quantalogic/codeact/cli_commands/list_reasoners.py +15 -0
- quantalogic/codeact/cli_commands/list_toolboxes.py +47 -0
- quantalogic/codeact/cli_commands/task.py +215 -0
- quantalogic/codeact/cli_commands/tool_info.py +24 -0
- quantalogic/codeact/cli_commands/uninstall_toolbox.py +43 -0
- quantalogic/codeact/config.yaml +21 -0
- quantalogic/codeact/constants.py +1 -1
- quantalogic/codeact/events.py +12 -5
- quantalogic/codeact/examples/README.md +342 -0
- quantalogic/codeact/examples/agent_sample.yaml +29 -0
- quantalogic/codeact/executor.py +186 -0
- quantalogic/codeact/history_manager.py +94 -0
- quantalogic/codeact/llm_util.py +3 -22
- quantalogic/codeact/plugin_manager.py +92 -0
- quantalogic/codeact/prompts/generate_action.j2 +65 -14
- quantalogic/codeact/prompts/generate_program.j2 +32 -19
- quantalogic/codeact/react_agent.py +318 -0
- quantalogic/codeact/reasoner.py +185 -0
- quantalogic/codeact/templates/toolbox/README.md.j2 +10 -0
- quantalogic/codeact/templates/toolbox/pyproject.toml.j2 +16 -0
- quantalogic/codeact/templates/toolbox/tools.py.j2 +6 -0
- quantalogic/codeact/templates.py +7 -0
- quantalogic/codeact/tools_manager.py +242 -119
- quantalogic/codeact/utils.py +16 -89
- quantalogic/codeact/xml_utils.py +126 -0
- quantalogic/flow/flow.py +151 -41
- quantalogic/flow/flow_extractor.py +61 -1
- quantalogic/flow/flow_generator.py +34 -6
- quantalogic/flow/flow_manager.py +64 -25
- quantalogic/flow/flow_manager_schema.py +32 -0
- quantalogic/tools/action_gen.py +1 -1
- quantalogic/tools/action_gen_safe.py +340 -0
- quantalogic/tools/tool.py +531 -109
- quantalogic/tools/write_file_tool.py +7 -8
- {quantalogic-0.61.2.dist-info → quantalogic-0.80.dist-info}/METADATA +3 -2
- {quantalogic-0.61.2.dist-info → quantalogic-0.80.dist-info}/RECORD +47 -42
- {quantalogic-0.61.2.dist-info → quantalogic-0.80.dist-info}/WHEEL +1 -1
- quantalogic-0.80.dist-info/entry_points.txt +3 -0
- quantalogic/python_interpreter/__init__.py +0 -23
- quantalogic/python_interpreter/assignment_visitors.py +0 -63
- quantalogic/python_interpreter/base_visitors.py +0 -20
- quantalogic/python_interpreter/class_visitors.py +0 -22
- quantalogic/python_interpreter/comprehension_visitors.py +0 -172
- quantalogic/python_interpreter/context_visitors.py +0 -59
- quantalogic/python_interpreter/control_flow_visitors.py +0 -88
- quantalogic/python_interpreter/exception_visitors.py +0 -109
- quantalogic/python_interpreter/exceptions.py +0 -39
- quantalogic/python_interpreter/execution.py +0 -202
- quantalogic/python_interpreter/function_utils.py +0 -386
- quantalogic/python_interpreter/function_visitors.py +0 -209
- quantalogic/python_interpreter/import_visitors.py +0 -28
- quantalogic/python_interpreter/interpreter_core.py +0 -358
- quantalogic/python_interpreter/literal_visitors.py +0 -74
- quantalogic/python_interpreter/misc_visitors.py +0 -148
- quantalogic/python_interpreter/operator_visitors.py +0 -108
- quantalogic/python_interpreter/scope.py +0 -10
- quantalogic/python_interpreter/visit_handlers.py +0 -110
- quantalogic-0.61.2.dist-info/entry_points.txt +0 -6
- {quantalogic-0.61.2.dist-info → quantalogic-0.80.dist-info}/LICENSE +0 -0
@@ -223,16 +223,48 @@ class TransitionDefinition(BaseModel):
|
|
223
223
|
)
|
224
224
|
|
225
225
|
|
226
|
+
class LoopDefinition(BaseModel):
|
227
|
+
"""Definition of a loop within the workflow."""
|
228
|
+
nodes: List[str] = Field(..., description="List of node names in the loop.")
|
229
|
+
condition: str = Field(..., description="Python expression using 'ctx' for the loop condition.")
|
230
|
+
exit_node: str = Field(..., description="Node to transition to when the loop ends.")
|
231
|
+
|
232
|
+
|
226
233
|
class WorkflowStructure(BaseModel):
|
227
234
|
"""Structure defining the workflow's execution flow."""
|
228
235
|
start: Optional[str] = Field(None, description="Name of the starting node.")
|
229
236
|
transitions: List[TransitionDefinition] = Field(
|
230
237
|
default_factory=list, description="List of transitions between nodes."
|
231
238
|
)
|
239
|
+
loops: List[LoopDefinition] = Field(
|
240
|
+
default_factory=list, description="List of loop definitions (optional, for explicit loop support)."
|
241
|
+
)
|
232
242
|
convergence_nodes: List[str] = Field(
|
233
243
|
default_factory=list, description="List of nodes where branches converge."
|
234
244
|
)
|
235
245
|
|
246
|
+
@model_validator(mode="before")
|
247
|
+
@classmethod
|
248
|
+
def check_loop_nodes(cls, data: Any) -> Any:
|
249
|
+
"""Ensure all nodes in loops exist in the workflow.
|
250
|
+
|
251
|
+
Args:
|
252
|
+
data: Raw data to validate.
|
253
|
+
|
254
|
+
Returns:
|
255
|
+
Validated data.
|
256
|
+
|
257
|
+
Raises:
|
258
|
+
ValueError: If loop nodes are not defined.
|
259
|
+
"""
|
260
|
+
loops = data.get("loops", [])
|
261
|
+
nodes = set(data.get("nodes", {}).keys())
|
262
|
+
for loop in loops:
|
263
|
+
for node in loop["nodes"] + [loop["exit_node"]]:
|
264
|
+
if node not in nodes:
|
265
|
+
raise ValueError(f"Loop node '{node}' not defined in nodes")
|
266
|
+
return data
|
267
|
+
|
236
268
|
|
237
269
|
class WorkflowDefinition(BaseModel):
|
238
270
|
"""Top-level definition of the workflow."""
|
quantalogic/tools/action_gen.py
CHANGED
@@ -8,8 +8,8 @@ from typing import Callable, Dict, List
|
|
8
8
|
import litellm
|
9
9
|
import typer
|
10
10
|
from loguru import logger
|
11
|
+
from quantalogic_pythonbox.python_interpreter import execute_async
|
11
12
|
|
12
|
-
from quantalogic.python_interpreter import execute_async
|
13
13
|
from quantalogic.tools.tool import Tool, ToolArgument
|
14
14
|
|
15
15
|
# Configure loguru to log to a file with rotation, matching original
|
@@ -0,0 +1,340 @@
|
|
1
|
+
import ast
|
2
|
+
import asyncio
|
3
|
+
from functools import partial
|
4
|
+
from typing import Callable, Dict, List
|
5
|
+
|
6
|
+
import litellm
|
7
|
+
import typer
|
8
|
+
from loguru import logger
|
9
|
+
|
10
|
+
from quantalogic.tools.tool import Tool, ToolArgument
|
11
|
+
from quantalogic.utils.python_interpreter import ASTInterpreter
|
12
|
+
|
13
|
+
# Configure loguru to log to a file with rotation
|
14
|
+
logger.add("action_gen_safe.log", rotation="10 MB", level="DEBUG")
|
15
|
+
|
16
|
+
# Initialize Typer app
|
17
|
+
app = typer.Typer()
|
18
|
+
|
19
|
+
# Define tool classes with logging in async_execute
|
20
|
+
class AddTool(Tool):
|
21
|
+
def __init__(self):
|
22
|
+
super().__init__(
|
23
|
+
name="add_tool",
|
24
|
+
description="Adds two numbers and returns the sum.",
|
25
|
+
arguments=[
|
26
|
+
ToolArgument(name="a", arg_type="int", description="First number", required=True),
|
27
|
+
ToolArgument(name="b", arg_type="int", description="Second number", required=True)
|
28
|
+
],
|
29
|
+
return_type="int"
|
30
|
+
)
|
31
|
+
|
32
|
+
async def async_execute(self, **kwargs) -> str:
|
33
|
+
logger.info(f"Adding {kwargs['a']} and {kwargs['b']}")
|
34
|
+
return str(int(kwargs["a"]) + int(kwargs["b"]))
|
35
|
+
|
36
|
+
class MultiplyTool(Tool):
|
37
|
+
def __init__(self):
|
38
|
+
super().__init__(
|
39
|
+
name="multiply_tool",
|
40
|
+
description="Multiplies two numbers and returns the product.",
|
41
|
+
arguments=[
|
42
|
+
ToolArgument(name="x", arg_type="int", description="First number", required=True),
|
43
|
+
ToolArgument(name="y", arg_type="int", description="Second number", required=True)
|
44
|
+
],
|
45
|
+
return_type="int"
|
46
|
+
)
|
47
|
+
|
48
|
+
async def async_execute(self, **kwargs) -> str:
|
49
|
+
logger.info(f"Multiplying {kwargs['x']} and {kwargs['y']}")
|
50
|
+
return str(int(kwargs["x"]) * int(kwargs["y"]))
|
51
|
+
|
52
|
+
class ConcatTool(Tool):
|
53
|
+
def __init__(self):
|
54
|
+
super().__init__(
|
55
|
+
name="concat_tool",
|
56
|
+
description="Concatenates two strings.",
|
57
|
+
arguments=[
|
58
|
+
ToolArgument(name="s1", arg_type="string", description="First string", required=True),
|
59
|
+
ToolArgument(name="s2", arg_type="string", description="Second string", required=True)
|
60
|
+
],
|
61
|
+
return_type="string"
|
62
|
+
)
|
63
|
+
|
64
|
+
async def async_execute(self, **kwargs) -> str:
|
65
|
+
logger.info(f"Concatenating '{kwargs['s1']}' and '{kwargs['s2']}'")
|
66
|
+
return kwargs["s1"] + kwargs["s2"]
|
67
|
+
|
68
|
+
class AgentTool(Tool):
|
69
|
+
def __init__(self, model: str = "gemini/gemini-2.0-flash"):
|
70
|
+
super().__init__(
|
71
|
+
name="agent_tool",
|
72
|
+
description="Generates text using a language model based on a system prompt and user prompt.",
|
73
|
+
arguments=[
|
74
|
+
ToolArgument(name="system_prompt", arg_type="string", description="System prompt to guide the model's behavior", required=True),
|
75
|
+
ToolArgument(name="prompt", arg_type="string", description="User prompt to generate a response for", required=True),
|
76
|
+
ToolArgument(name="temperature", arg_type="float", description="Temperature for generation (0 to 1)", required=True)
|
77
|
+
],
|
78
|
+
return_type="string"
|
79
|
+
)
|
80
|
+
self.model = model
|
81
|
+
|
82
|
+
async def async_execute(self, **kwargs) -> str:
|
83
|
+
system_prompt = kwargs["system_prompt"]
|
84
|
+
prompt = kwargs["prompt"]
|
85
|
+
temperature = float(kwargs["temperature"])
|
86
|
+
|
87
|
+
# Validate temperature
|
88
|
+
if not 0 <= temperature <= 1:
|
89
|
+
logger.error(f"Temperature {temperature} is out of range (0-1)")
|
90
|
+
raise ValueError("Temperature must be between 0 and 1")
|
91
|
+
|
92
|
+
logger.info(f"Generating text with model {self.model}, temperature {temperature}")
|
93
|
+
try:
|
94
|
+
response = await litellm.acompletion(
|
95
|
+
model=self.model,
|
96
|
+
messages=[
|
97
|
+
{"role": "system", "content": system_prompt},
|
98
|
+
{"role": "user", "content": prompt}
|
99
|
+
],
|
100
|
+
temperature=temperature,
|
101
|
+
max_tokens=1000 # Reasonable default for text generation
|
102
|
+
)
|
103
|
+
generated_text = response.choices[0].message.content.strip()
|
104
|
+
logger.debug(f"Generated text: {generated_text}")
|
105
|
+
return generated_text
|
106
|
+
except Exception as e:
|
107
|
+
logger.error(f"Failed to generate text with {self.model}: {str(e)}")
|
108
|
+
raise RuntimeError(f"Text generation failed: {str(e)}")
|
109
|
+
|
110
|
+
# Asynchronous function to generate the program
|
111
|
+
async def generate_program(task_description: str, tools: List[Tool], model: str, max_tokens: int) -> str:
|
112
|
+
"""
|
113
|
+
Asynchronously generate a Python program that solves a given task using a list of tools.
|
114
|
+
|
115
|
+
Args:
|
116
|
+
task_description (str): A description of the task to be solved.
|
117
|
+
tools (List[Tool]): A list of Tool objects available for use.
|
118
|
+
model (str): The litellm model to use for code generation.
|
119
|
+
max_tokens (int): Maximum number of tokens for the generated response.
|
120
|
+
|
121
|
+
Returns:
|
122
|
+
str: A string containing a complete Python program.
|
123
|
+
"""
|
124
|
+
logger.debug(f"Generating program for task: {task_description}")
|
125
|
+
# Collect tool docstrings
|
126
|
+
tool_docstrings = "\n\n".join([tool.to_docstring() for tool in tools])
|
127
|
+
|
128
|
+
# Construct the prompt for litellm
|
129
|
+
prompt = f"""
|
130
|
+
You are a Python code generator. Your task is to create a Python program that solves the following task:
|
131
|
+
"{task_description}"
|
132
|
+
|
133
|
+
You have access to the following pre-defined async tool functions, as defined with their signatures and descriptions:
|
134
|
+
|
135
|
+
{tool_docstrings}
|
136
|
+
|
137
|
+
Instructions:
|
138
|
+
1. Generate a Python program as a single string.
|
139
|
+
2. Include only the import for asyncio (import asyncio).
|
140
|
+
3. Define an async function named main() that solves the task.
|
141
|
+
4. Use the pre-defined tool functions (e.g., add_tool, multiply_tool, concat_tool) directly by calling them with await and the appropriate arguments as specified in their descriptions.
|
142
|
+
5. Do not redefine the tool functions within the program; assume they are already available in the namespace.
|
143
|
+
6. Return the program as a string enclosed in triple quotes (\"\"\"program\"\"\")).
|
144
|
+
7. Do not include asyncio.run(main()) or any code outside the main() function definition.
|
145
|
+
8. Do not include explanatory text outside the program string.
|
146
|
+
9. Express all string variables as multiline strings (\"\"\"\nstring\n\"\"\"), always start a string at the beginning of a line.
|
147
|
+
10. Always print the result at the end of the program.
|
148
|
+
11. Never call the main() function.
|
149
|
+
12. Never use not defined variables, modules, or functions.
|
150
|
+
|
151
|
+
Example task: "Add 5 and 7 and print the result"
|
152
|
+
Example output:
|
153
|
+
\"\"\"import asyncio
|
154
|
+
async def main():
|
155
|
+
result = await add_tool(a=5, b=7)
|
156
|
+
print(result)
|
157
|
+
\"\"\"
|
158
|
+
"""
|
159
|
+
|
160
|
+
logger.debug(f"Prompt sent to litellm:\n{prompt}")
|
161
|
+
|
162
|
+
try:
|
163
|
+
# Call litellm asynchronously to generate the program
|
164
|
+
logger.debug(f"Calling litellm with model {model}")
|
165
|
+
response = await litellm.acompletion(
|
166
|
+
model=model,
|
167
|
+
messages=[
|
168
|
+
{"role": "system", "content": "You are a Python code generator."},
|
169
|
+
{"role": "user", "content": prompt}
|
170
|
+
],
|
171
|
+
max_tokens=max_tokens,
|
172
|
+
temperature=0.3
|
173
|
+
)
|
174
|
+
generated_code = response.choices[0].message.content.strip()
|
175
|
+
logger.debug("Code generation successful")
|
176
|
+
except Exception as e:
|
177
|
+
logger.error(f"Failed to generate code: {str(e)}")
|
178
|
+
raise typer.BadParameter(f"Failed to generate code with model '{model}': {str(e)}")
|
179
|
+
|
180
|
+
# Clean up the output
|
181
|
+
if generated_code.startswith('"""') and generated_code.endswith('"""'):
|
182
|
+
generated_code = generated_code[3:-3]
|
183
|
+
elif generated_code.startswith("```python") and generated_code.endswith("```"):
|
184
|
+
generated_code = generated_code[9:-3].strip()
|
185
|
+
|
186
|
+
return generated_code
|
187
|
+
|
188
|
+
# Async core logic for generate
|
189
|
+
async def generate_core(task: str, model: str, max_tokens: int):
|
190
|
+
logger.info(f"Starting generate command for task: {task}")
|
191
|
+
# Input validation
|
192
|
+
if not task.strip():
|
193
|
+
logger.error("Task description is empty")
|
194
|
+
raise typer.BadParameter("Task description cannot be empty")
|
195
|
+
if max_tokens <= 0:
|
196
|
+
logger.error("max-tokens must be positive")
|
197
|
+
raise typer.BadParameter("max-tokens must be a positive integer")
|
198
|
+
|
199
|
+
# Initialize tools
|
200
|
+
tools = [
|
201
|
+
AddTool(),
|
202
|
+
MultiplyTool(),
|
203
|
+
ConcatTool(),
|
204
|
+
AgentTool(model=model) # Include AgentTool with the specified model
|
205
|
+
]
|
206
|
+
|
207
|
+
# Generate the program
|
208
|
+
try:
|
209
|
+
program = await generate_program(task, tools, model, max_tokens)
|
210
|
+
except Exception as e:
|
211
|
+
logger.error(f"Failed to generate program: {str(e)}")
|
212
|
+
typer.echo(typer.style(f"Error: {str(e)}", fg=typer.colors.RED))
|
213
|
+
raise typer.Exit(code=1)
|
214
|
+
|
215
|
+
logger.debug(f"Generated program:\n{program}")
|
216
|
+
# Output the generated program
|
217
|
+
typer.echo(typer.style("Generated Python Program:", fg=typer.colors.GREEN, bold=True))
|
218
|
+
typer.echo(program)
|
219
|
+
|
220
|
+
# Attempt to execute the program using the safe interpreter
|
221
|
+
typer.echo("\n" + typer.style("Executing the program safely:", fg=typer.colors.GREEN, bold=True))
|
222
|
+
try:
|
223
|
+
# Create instances of tools
|
224
|
+
add_tool_instance = AddTool()
|
225
|
+
multiply_tool_instance = MultiplyTool()
|
226
|
+
concat_tool_instance = ConcatTool()
|
227
|
+
agent_tool_instance = AgentTool(model=model)
|
228
|
+
|
229
|
+
# Define the allowed modules
|
230
|
+
allowed_modules = ["asyncio"]
|
231
|
+
|
232
|
+
# Create a namespace with our tools
|
233
|
+
namespace = {
|
234
|
+
"add_tool": add_tool_instance.async_execute,
|
235
|
+
"multiply_tool": multiply_tool_instance.async_execute,
|
236
|
+
"concat_tool": concat_tool_instance.async_execute,
|
237
|
+
"agent_tool": agent_tool_instance.async_execute,
|
238
|
+
}
|
239
|
+
|
240
|
+
# Parse the program to AST
|
241
|
+
program_ast = ast.parse(program)
|
242
|
+
|
243
|
+
# Extract imports first to ensure they're allowed
|
244
|
+
for node in program_ast.body:
|
245
|
+
if isinstance(node, ast.Import):
|
246
|
+
for name in node.names:
|
247
|
+
if name.name not in allowed_modules:
|
248
|
+
raise ValueError(f"Import not allowed: {name.name}")
|
249
|
+
elif isinstance(node, ast.ImportFrom):
|
250
|
+
if node.module not in allowed_modules:
|
251
|
+
raise ValueError(f"Import from not allowed: {node.module}")
|
252
|
+
|
253
|
+
# Find the main function in the AST
|
254
|
+
main_func_node = None
|
255
|
+
for node in program_ast.body:
|
256
|
+
if isinstance(node, ast.AsyncFunctionDef) and node.name == "main":
|
257
|
+
main_func_node = node
|
258
|
+
break
|
259
|
+
|
260
|
+
if not main_func_node:
|
261
|
+
logger.warning("No async main() function found in generated code")
|
262
|
+
typer.echo(typer.style("Warning: No async main() function found", fg=typer.colors.YELLOW))
|
263
|
+
return
|
264
|
+
|
265
|
+
# Execute the code safely to define functions in the namespace
|
266
|
+
# Use a custom ASTInterpreter directly instead of calling interpret_code
|
267
|
+
interpreter = ASTInterpreter(allowed_modules=allowed_modules, source=program)
|
268
|
+
|
269
|
+
# Add tools to the interpreter's environment
|
270
|
+
interpreter.env_stack[0].update({
|
271
|
+
"add_tool": add_tool_instance.async_execute,
|
272
|
+
"multiply_tool": multiply_tool_instance.async_execute,
|
273
|
+
"concat_tool": concat_tool_instance.async_execute,
|
274
|
+
"agent_tool": agent_tool_instance.async_execute,
|
275
|
+
})
|
276
|
+
|
277
|
+
# First, execute all non-main code (imports and other top-level code)
|
278
|
+
for node in program_ast.body:
|
279
|
+
if not isinstance(node, ast.AsyncFunctionDef) or node.name != "main":
|
280
|
+
await interpreter.visit(node, wrap_exceptions=True)
|
281
|
+
|
282
|
+
# Then execute the main function definition
|
283
|
+
for node in program_ast.body:
|
284
|
+
if isinstance(node, ast.AsyncFunctionDef) and node.name == "main":
|
285
|
+
await interpreter.visit(node, wrap_exceptions=True)
|
286
|
+
|
287
|
+
# Get the main function from the environment
|
288
|
+
main_func = interpreter.get_variable("main")
|
289
|
+
|
290
|
+
# Execute the main function in the current event loop
|
291
|
+
logger.debug("Executing main() function")
|
292
|
+
await main_func()
|
293
|
+
|
294
|
+
logger.info("Program executed successfully")
|
295
|
+
typer.echo(typer.style("Execution completed successfully", fg=typer.colors.GREEN))
|
296
|
+
|
297
|
+
except SyntaxError as e:
|
298
|
+
logger.error(f"Syntax error in generated code: {e}")
|
299
|
+
typer.echo(typer.style(f"Syntax error: {e}", fg=typer.colors.RED))
|
300
|
+
except Exception as e:
|
301
|
+
logger.error(f"Execution error: {e}")
|
302
|
+
typer.echo(typer.style(f"Execution failed: {e}", fg=typer.colors.RED))
|
303
|
+
|
304
|
+
# Synchronous callback to invoke async generate_core
|
305
|
+
@app.callback(invoke_without_command=True)
|
306
|
+
def generate(
|
307
|
+
task: str = typer.Argument(
|
308
|
+
...,
|
309
|
+
help="The task description to generate a program for (e.g., 'Add 5 and 7 and print the result')"
|
310
|
+
),
|
311
|
+
model: str = typer.Option(
|
312
|
+
"gemini/gemini-2.0-flash",
|
313
|
+
"--model",
|
314
|
+
"-m",
|
315
|
+
help="The litellm model to use for generation (e.g., 'gpt-3.5-turbo', 'gpt-4')"
|
316
|
+
),
|
317
|
+
max_tokens: int = typer.Option(
|
318
|
+
4000,
|
319
|
+
"--max-tokens",
|
320
|
+
"-t",
|
321
|
+
help="Maximum number of tokens for the generated response (default: 4000)"
|
322
|
+
)
|
323
|
+
):
|
324
|
+
"""
|
325
|
+
Asynchronously generate a Python program based on a task description using specified tools and model.
|
326
|
+
Executes the program in a controlled, safe environment.
|
327
|
+
|
328
|
+
Examples:
|
329
|
+
$ python action_gen_safe.py "Add 5 and 7 and print the result"
|
330
|
+
$ python action_gen_safe.py "Concatenate 'Hello' and 'World' and print it" --model gpt-4 --max-tokens 5000
|
331
|
+
"""
|
332
|
+
asyncio.run(generate_core(task, model, max_tokens))
|
333
|
+
|
334
|
+
# Entry point to start the app
|
335
|
+
def main():
|
336
|
+
logger.debug("Starting script execution")
|
337
|
+
app()
|
338
|
+
|
339
|
+
if __name__ == "__main__":
|
340
|
+
main()
|