quantalogic 0.2.8__py3-none-any.whl → 0.2.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
quantalogic/main.py CHANGED
@@ -2,6 +2,7 @@
2
2
  """Main module for the QuantaLogic agent."""
3
3
 
4
4
  # Standard library imports
5
+ import random
5
6
  import sys
6
7
  from typing import Optional
7
8
 
@@ -9,6 +10,9 @@ from typing import Optional
9
10
  import click
10
11
  from loguru import logger
11
12
 
13
+ from quantalogic.utils.check_version import check_if_is_latest_version
14
+ from quantalogic.version import get_version
15
+
12
16
  # Configure logger
13
17
  logger.remove() # Remove default logger
14
18
 
@@ -33,34 +37,63 @@ from quantalogic.version import get_version # noqa: E402
33
37
  AGENT_MODES = ["code", "basic", "interpreter", "full", "code-basic"]
34
38
 
35
39
 
36
- def create_agent_for_mode(mode: str, model_name: str) -> Agent:
40
+ def create_agent_for_mode(mode: str, model_name: str, vision_model_name: str | None) -> Agent:
37
41
  """Create an agent based on the specified mode."""
38
42
  logger.debug(f"Creating agent for mode: {mode} with model: {model_name}")
39
43
  if mode == "code":
40
44
  logger.debug("Creating code agent without basic mode")
41
- return create_coding_agent(model_name, basic=False)
45
+ return create_coding_agent(model_name, vision_model_name, basic=False)
42
46
  if mode == "code-basic":
43
- return create_coding_agent(model_name, basic=True)
47
+ return create_coding_agent(model_name, vision_model_name, basic=True)
44
48
  elif mode == "basic":
45
- return create_orchestrator_agent(model_name)
49
+ return create_orchestrator_agent(model_name, vision_model_name)
46
50
  elif mode == "full":
47
- return create_full_agent(model_name)
51
+ return create_full_agent(model_name, vision_model_name)
48
52
  elif mode == "interpreter":
49
- return create_interpreter_agent(model_name)
53
+ return create_interpreter_agent(model_name, vision_model_name)
50
54
  else:
51
55
  raise ValueError(f"Unknown agent mode: {mode}")
52
56
 
57
+ def check_new_version():
58
+ # Randomly check for updates (1 in 10 chance)
59
+ if random.randint(1, 10) == 1:
60
+ try:
61
+ current_version = get_version()
62
+ has_new_version, latest_version = check_if_is_latest_version()
63
+
64
+ if has_new_version:
65
+ console = Console()
66
+ console.print(
67
+ Panel.fit(
68
+ f"[yellow]⚠️ Update Available![/yellow]\n\n"
69
+ f"Current version: [bold]{current_version}[/bold]\n"
70
+ f"Latest version: [bold]{latest_version}[/bold]\n\n"
71
+ "To update, run:\n"
72
+ "[bold]pip install --upgrade quantalogic[/bold]\n"
73
+ "or if using pipx:\n"
74
+ "[bold]pipx upgrade quantalogic[/bold]",
75
+ title="[bold]Update Available[/bold]",
76
+ border_style="yellow",
77
+ )
78
+ )
79
+ except Exception:
80
+ return
53
81
 
54
82
  def configure_logger(log_level: str) -> None:
55
83
  """Configure the logger with the specified log level and format."""
56
84
  logger.remove()
57
- logger.add(sys.stderr, level=log_level.upper(), format="{time} | {level} | {message}")
58
- logger.info(f"Log level set to: {log_level}")
85
+ logger.add(
86
+ sys.stderr,
87
+ level=log_level.upper(),
88
+ format="<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <8}</level> | <cyan>{process}</cyan> | <magenta>{file}:{line}</magenta> | {message}",
89
+ )
90
+ logger.debug(f"Log level set to: {log_level}")
59
91
 
60
92
 
61
93
  def set_litellm_verbose(verbose_mode: bool) -> None:
62
94
  """Set the verbosity of the litellm library."""
63
95
  import litellm
96
+
64
97
  litellm.set_verbose = verbose_mode
65
98
 
66
99
 
@@ -87,7 +120,7 @@ def get_task_from_file(file_path: str) -> str:
87
120
  raise Exception(f"Unexpected error reading file: {e}")
88
121
 
89
122
 
90
- def display_welcome_message(console: Console, model_name: str) -> None:
123
+ def display_welcome_message(console: Console, model_name: str, vision_model_name: str | None) -> None:
91
124
  """Display the welcome message and instructions."""
92
125
  version = get_version()
93
126
  console.print(
@@ -97,9 +130,10 @@ def display_welcome_message(console: Console, model_name: str) -> None:
97
130
  "1. [bold]Describe your task[/bold]: Tell the AI what you need help with.\n"
98
131
  "2. [bold]Submit your task[/bold]: Press [bold]Enter[/bold] twice to send your request.\n\n"
99
132
  "3. [bold]Exit the app[/bold]: Leave the input blank and press [bold]Enter[/bold] twice to close the assistant.\n\n"
100
- f"[yellow]ℹ️ System Info:[/yellow]\n\n"
101
- f"- Version: {get_version()}\n"
102
- f"- Model: {model_name}\n\n"
133
+ f"[yellow] 🤖 System Info:[/yellow]\n\n"
134
+ "\n"
135
+ f"- Model: {model_name}\n"
136
+ f"- Vision Model: {vision_model_name}\n\n"
103
137
  "[bold magenta]💡 Pro Tips:[/bold magenta]\n\n"
104
138
  "- Be as specific as possible in your task description to get the best results!\n"
105
139
  "- Use clear and concise language when describing your task\n"
@@ -118,18 +152,38 @@ def display_welcome_message(console: Console, model_name: str) -> None:
118
152
  default=MODEL_NAME,
119
153
  help='Specify the model to use (litellm format, e.g. "openrouter/deepseek-chat").',
120
154
  )
121
- @click.option("--log", type=click.Choice(["info", "debug", "warning"]), default="info", help="Set logging level (info/debug/warning).")
155
+ @click.option(
156
+ "--log",
157
+ type=click.Choice(["info", "debug", "warning"]),
158
+ default="info",
159
+ help="Set logging level (info/debug/warning).",
160
+ )
122
161
  @click.option("--verbose", is_flag=True, help="Enable verbose output.")
123
162
  @click.option("--mode", type=click.Choice(AGENT_MODES), default="code", help="Agent mode (code/search/full).")
163
+ @click.option(
164
+ "--vision-model-name",
165
+ default=None,
166
+ help='Specify the vision model to use (litellm format, e.g. "openrouter/A/gpt-4o-mini").',
167
+ )
124
168
  @click.pass_context
125
- def cli(ctx: click.Context, version: bool, model_name: str, verbose: bool, mode: str, log: str) -> None:
169
+ def cli(
170
+ ctx: click.Context,
171
+ version: bool,
172
+ model_name: str,
173
+ verbose: bool,
174
+ mode: str,
175
+ log: str,
176
+ vision_model_name: str | None,
177
+ ) -> None:
126
178
  """QuantaLogic AI Assistant - A powerful AI tool for various tasks."""
127
179
  if version:
128
180
  console = Console()
129
181
  console.print(f"QuantaLogic version: {get_version()}")
130
182
  sys.exit(0)
131
183
  if ctx.invoked_subcommand is None:
132
- ctx.invoke(task, model_name=model_name, verbose=verbose, mode=mode, log=log)
184
+ ctx.invoke(
185
+ task, model_name=model_name, verbose=verbose, mode=mode, log=log, vision_model_name=vision_model_name
186
+ )
133
187
 
134
188
 
135
189
  @cli.command()
@@ -141,24 +195,45 @@ def cli(ctx: click.Context, version: bool, model_name: str, verbose: bool, mode:
141
195
  )
142
196
  @click.option("--verbose", is_flag=True, help="Enable verbose output.")
143
197
  @click.option("--mode", type=click.Choice(AGENT_MODES), default="code", help="Agent mode (code/search/full).")
144
- @click.option("--log", type=click.Choice(["info", "debug", "warning"]), default="info", help="Set logging level (info/debug/warning).")
198
+ @click.option(
199
+ "--log",
200
+ type=click.Choice(["info", "debug", "warning"]),
201
+ default="info",
202
+ help="Set logging level (info/debug/warning).",
203
+ )
204
+ @click.option(
205
+ "--vision-model-name",
206
+ default=None,
207
+ help='Specify the vision model to use (litellm format, e.g. "openrouter/openai/gpt-4o-mini").',
208
+ )
145
209
  @click.argument("task", required=False)
146
- def task(file: Optional[str], model_name: str, verbose: bool, mode: str, log: str, task: Optional[str]) -> None:
210
+ def task(
211
+ file: Optional[str],
212
+ model_name: str,
213
+ verbose: bool,
214
+ mode: str,
215
+ log: str,
216
+ vision_model_name: str | None,
217
+ task: Optional[str],
218
+ ) -> None:
147
219
  """Execute a task with the QuantaLogic AI Assistant."""
148
220
  console = Console()
149
221
  switch_verbose(verbose, log)
150
222
 
223
+
151
224
  try:
152
225
  if file:
153
226
  task_content = get_task_from_file(file)
154
227
  else:
155
228
  if task:
229
+ check_new_version()
156
230
  task_content = task
157
231
  else:
158
- display_welcome_message(console, model_name)
159
- logger.info("Waiting for user input...")
232
+ display_welcome_message(console, model_name, vision_model_name)
233
+ check_new_version()
234
+ logger.debug("Waiting for user input...")
160
235
  task_content = get_multiline_input(console).strip()
161
- logger.info(f"User input received. Task content: {task_content}")
236
+ logger.debug(f"User input received. Task content: {task_content}")
162
237
  if not task_content:
163
238
  logger.info("No task provided. Exiting...")
164
239
  console.print("[yellow]No task provided. Exiting...[/yellow]")
@@ -172,12 +247,20 @@ def task(file: Optional[str], model_name: str, verbose: bool, mode: str, log: st
172
247
  border_style="blue",
173
248
  )
174
249
  )
175
- if not Confirm.ask("[bold]Are you sure you want to submit this task?[/bold]"):
176
- console.print("[yellow]Task submission cancelled. Exiting...[/yellow]")
177
- sys.exit(0)
250
+ if not Confirm.ask("[bold]Are you sure you want to submit this task?[/bold]"):
251
+ console.print("[yellow]Task submission cancelled. Exiting...[/yellow]")
252
+ sys.exit(0)
253
+
254
+ console.print(
255
+ Panel.fit(
256
+ "[green]✓ Task successfully submitted! Processing...[/green]",
257
+ title="[bold]Status[/bold]",
258
+ border_style="green",
259
+ )
260
+ )
178
261
 
179
262
  logger.debug(f"Creating agent for mode: {mode} with model: {model_name}")
180
- agent = create_agent_for_mode(mode, model_name)
263
+ agent = create_agent_for_mode(mode, model_name, vision_model_name=vision_model_name)
181
264
  logger.debug(f"Created agent for mode: {mode} with model: {model_name}")
182
265
 
183
266
  events = [
@@ -215,7 +298,7 @@ def task(file: Optional[str], model_name: str, verbose: bool, mode: str, log: st
215
298
 
216
299
 
217
300
  def main():
218
- """Entry point for the quantalogic CLI."""
301
+ """Main Entry point"""
219
302
  cli()
220
303
 
221
304
 
@@ -1,12 +1,14 @@
1
1
  """Print events with rich formatting."""
2
2
 
3
+ from typing import Any
4
+
3
5
  from rich import box
4
6
  from rich.console import Console
5
7
  from rich.panel import Panel
6
8
  from rich.tree import Tree
7
9
 
8
10
 
9
- def console_print_events(event: str, data: dict[str, any] = None):
11
+ def console_print_events(event: str, data: dict[str, Any] | None = None):
10
12
  """Print events with rich formatting.
11
13
 
12
14
  Args:
quantalogic/prompts.py CHANGED
@@ -14,11 +14,10 @@ Tasks will be presented within XML tags:
14
14
  ### Response Protocol
15
15
  Every response must contain exactly two XML blocks:
16
16
 
17
- Be very concise and very precise in the <thinking> block
18
-
19
17
  1. Analysis Block:
20
18
  ```xml
21
19
  <thinking>
20
+ <!-- You must follow this precise format, be very concise and very precise -->
22
21
  <task_analysis_if_no_history>
23
22
  Only if no conversation history:
24
23
  * Rewrite the <task> and its context with your own words in detailed, clear, and specific manner.
@@ -51,20 +50,24 @@ Be very concise and very precise in the <thinking> block
51
50
  </result>
52
51
  </last_observation>
53
52
  <progess_analysis>
53
+ <!-- if there is a conversation history -->
54
54
  * Detail each step failed and completed so far.
55
55
  * Identify and evaluate any blockers or challenges to the progress of global task.
56
+ * Identify repetitions: if repeated steps, take a step back and rethink your approach.
56
57
  * Provide potential solutions, and if needed, suggest reevaluating the approach and the plan.
57
58
  </progess_analysis>
58
59
  <variables>
60
+ <!-- if there is a conversation history -->
59
61
  * List all variable names and concisely describe their current values.
60
62
  </variables>
61
63
  <next_steps>
62
64
  * Outline immediate actions required.
63
65
  * Justify tool selection and parameter choices.
64
- * Think about variable interpolation to minimize generation of tokens.
65
- * Consider alternatives if previous attempts were unsuccessful.
66
+ * Prefer variable interpolation if possible, to minimize generation of tokens.
67
+ * Consider alternatives, take a step back if previous attempts were unsuccessful to review the plan.
66
68
  </next_steps>
67
69
  <taskpad>
70
+ <!-- optional -->
68
71
  <note>Use this to record notes about intermediate steps.</note>
69
72
  </taskpad>
70
73
  </thinking>
@@ -73,25 +76,15 @@ Be very concise and very precise in the <thinking> block
73
76
  2. Action Block:
74
77
  ```xml
75
78
  <tool_name>
76
- <parameter1>value1</parameter1>
79
+ <!-- tool_name is the name of the tool from available tools -->
80
+ <parameter1>
81
+ <!-- Use variable interpolation to pass context to minimize generation of tokens, example: <content>$var1$<</content> -->
82
+ value1
83
+ </parameter1>
77
84
  <parameter2>value2</parameter2>
78
85
  </tool_name>
79
86
  ```
80
87
 
81
- ### Tool Usage Guidelines
82
- 1. Before Repeating a Tool Call:
83
- - Review previous results in detail.
84
- - State why a repeat is needed.
85
- - Adjust parameters if necessary.
86
- - Consider whether other tools are more appropriate.
87
- - Use variable interpolation to pass context to minimize generation of tokens, example: <toolname>$var1$<</toolname>
88
-
89
- 2. When Tool Calls Fail:
90
- - Examine the error message carefully.
91
- - Adjust parameters if needed.
92
- - Consider alternative tools.
93
- - Break down complex processes into smaller steps if necessary.
94
-
95
88
  ### Available Tools
96
89
  {tools}
97
90
 
@@ -47,7 +47,7 @@ VALIDATION_TIMEOUT = 30.0 # seconds
47
47
 
48
48
  def handle_sigterm(signum, frame):
49
49
  """Handle SIGTERM signal."""
50
- logger.info("Received SIGTERM signal")
50
+ logger.debug("Received SIGTERM signal")
51
51
  raise SystemExit(0)
52
52
 
53
53
 
@@ -74,7 +74,7 @@ class ServerState:
74
74
  async def initiate_shutdown(self, force: bool = False):
75
75
  """Initiate the shutdown process."""
76
76
  if not self.is_shutting_down or force:
77
- logger.info("Initiating server shutdown...")
77
+ logger.debug("Initiating server shutdown...")
78
78
  self.is_shutting_down = True
79
79
  self.force_exit = force
80
80
  self.shutdown_initiated.set()
@@ -88,7 +88,7 @@ class ServerState:
88
88
  """Handle interrupt signal."""
89
89
  self.interrupt_count += 1
90
90
  if self.interrupt_count == 1:
91
- logger.info("Graceful shutdown initiated (press Ctrl+C again to force)")
91
+ logger.debug("Graceful shutdown initiated (press Ctrl+C again to force)")
92
92
  asyncio.create_task(self.initiate_shutdown(force=False))
93
93
  else:
94
94
  logger.warning("Forced shutdown initiated...")
@@ -277,7 +277,7 @@ class AgentState:
277
277
  # Override ask_for_user_validation with SSE-based method
278
278
  self.agent.ask_for_user_validation = self.sse_ask_for_user_validation
279
279
 
280
- logger.info(f"Agent initialized with model: {model_name}")
280
+ logger.debug(f"Agent initialized with model: {model_name}")
281
281
  except Exception as e:
282
282
  logger.error(f"Failed to initialize agent: {e}", exc_info=True)
283
283
  raise
@@ -316,7 +316,7 @@ class AgentState:
316
316
  console_print_events(event_type, data)
317
317
 
318
318
  # Log event details
319
- logger.info(f"Agent Event: {event_type}")
319
+ logger.debug(f"Agent Event: {event_type}")
320
320
  logger.debug(f"Event Data: {data}")
321
321
 
322
322
  # Broadcast to clients
@@ -334,7 +334,7 @@ class AgentState:
334
334
  async def cleanup(self):
335
335
  """Clean up resources during shutdown."""
336
336
  try:
337
- logger.info("Cleaning up resources...")
337
+ logger.debug("Cleaning up resources...")
338
338
  if server_state.force_exit:
339
339
  logger.warning("Forced cleanup - skipping graceful shutdown")
340
340
  return
@@ -349,7 +349,7 @@ class AgentState:
349
349
  self.validation_responses.clear()
350
350
  # Clear agent
351
351
  self.agent = None
352
- logger.info("Cleanup completed")
352
+ logger.debug("Cleanup completed")
353
353
  except TimeoutError:
354
354
  logger.warning(f"Cleanup timed out after {SHUTDOWN_TIMEOUT} seconds")
355
355
  except Exception as e:
@@ -429,7 +429,7 @@ class AgentState:
429
429
  with self.queue_lock:
430
430
  if task_id in self.task_queues:
431
431
  del self.task_queues[task_id]
432
- logger.info(f"Removed event queue for task_id: {task_id}")
432
+ logger.debug(f"Removed event queue for task_id: {task_id}")
433
433
 
434
434
 
435
435
  # Initialize global states
@@ -448,11 +448,11 @@ async def lifespan(app: FastAPI):
448
448
  loop.add_signal_handler(sig, lambda s=sig: asyncio.create_task(handle_shutdown(s)))
449
449
  yield
450
450
  finally:
451
- logger.info("Shutting down server gracefully...")
451
+ logger.debug("Shutting down server gracefully...")
452
452
  await server_state.initiate_shutdown()
453
453
  await agent_state.cleanup()
454
454
  server_state.shutdown_complete.set()
455
- logger.info("Server shutdown complete")
455
+ logger.debug("Server shutdown complete")
456
456
 
457
457
 
458
458
  async def handle_shutdown(sig):
@@ -527,7 +527,7 @@ async def event_stream(request: Request, task_id: Optional[str] = None) -> Strea
527
527
  async def event_generator() -> AsyncGenerator[str, None]:
528
528
  # Ensure unique client-task combination
529
529
  client_id = agent_state.add_client(task_id)
530
- logger.info(f"Client {client_id} subscribed to {'task_id: ' + task_id if task_id else 'all events'}")
530
+ logger.debug(f"Client {client_id} subscribed to {'task_id: ' + task_id if task_id else 'all events'}")
531
531
 
532
532
  try:
533
533
  while not server_state.is_shutting_down:
@@ -557,7 +557,7 @@ async def event_stream(request: Request, task_id: Optional[str] = None) -> Strea
557
557
  finally:
558
558
  # Clean up the client's event queue
559
559
  agent_state.remove_client(client_id, task_id)
560
- logger.info(f"Client {client_id} {'unsubscribed from task_id: ' + task_id if task_id else 'disconnected'}")
560
+ logger.debug(f"Client {client_id} {'unsubscribed from task_id: ' + task_id if task_id else 'disconnected'}")
561
561
 
562
562
  return StreamingResponse(
563
563
  event_generator(),
@@ -629,5 +629,5 @@ if __name__ == "__main__":
629
629
  try:
630
630
  server.run()
631
631
  except KeyboardInterrupt:
632
- logger.info("Received keyboard interrupt")
632
+ logger.debug("Received keyboard interrupt")
633
633
  sys.exit(1)
@@ -35,7 +35,7 @@ class ServerState:
35
35
  async def initiate_shutdown(self, force: bool = False):
36
36
  """Initiate the shutdown process."""
37
37
  if not self.is_shutting_down or force:
38
- logger.info("Initiating server shutdown...")
38
+ logger.debug("Initiating server shutdown...")
39
39
  self.is_shutting_down = True
40
40
  self.force_exit = force
41
41
  self.shutdown_initiated.set()
@@ -48,7 +48,7 @@ class ServerState:
48
48
  """Handle interrupt signal."""
49
49
  self.interrupt_count += 1
50
50
  if self.interrupt_count == 1:
51
- logger.info("Graceful shutdown initiated (press Ctrl+C again to force)")
51
+ logger.debug("Graceful shutdown initiated (press Ctrl+C again to force)")
52
52
  asyncio.create_task(self.initiate_shutdown(force=False))
53
53
  else:
54
54
  logger.warning("Forced shutdown initiated...")
@@ -95,7 +95,7 @@ class AgentState:
95
95
  if task_id not in self.agents:
96
96
  self.agents[task_id] = self.create_agent_for_task(task_id)
97
97
 
98
- logger.info(f"New client connected: {client_id} for task: {task_id}")
98
+ logger.debug(f"New client connected: {client_id} for task: {task_id}")
99
99
  return client_id
100
100
 
101
101
  def create_agent_for_task(self, task_id: str) -> Any:
@@ -109,7 +109,7 @@ class AgentState:
109
109
  """
110
110
  # Placeholder for agent creation logic
111
111
  agent = ... # Replace with actual agent creation logic
112
- logger.info(f"Agent created for task: {task_id}")
112
+ logger.debug(f"Agent created for task: {task_id}")
113
113
  return agent
114
114
 
115
115
  def get_agent_for_task(self, task_id: str) -> Optional[Any]:
@@ -128,7 +128,7 @@ class AgentState:
128
128
  with self.queue_lock:
129
129
  if client_id in self.event_queues:
130
130
  del self.event_queues[client_id]
131
- logger.info(f"Client disconnected: {client_id}")
131
+ logger.debug(f"Client disconnected: {client_id}")
132
132
 
133
133
  def _format_data_for_client(self, data: Dict[str, Any]) -> Dict[str, Any]:
134
134
  """Format data for client consumption."""
@@ -18,18 +18,18 @@ class ToolManager(BaseModel):
18
18
 
19
19
  def add(self, tool: Tool):
20
20
  """Add a tool to the tool dictionary."""
21
- logger.info(f"Adding tool: {tool.name} to tool dictionary")
21
+ logger.debug(f"Adding tool: {tool.name} to tool dictionary")
22
22
  self.tools[tool.name] = tool
23
23
 
24
24
  def add_list(self, tools: list[Tool]):
25
25
  """Add a list of tools to the tool dictionary."""
26
- logger.info(f"Adding {len(tools)} tools to tool dictionary")
26
+ logger.debug(f"Adding {len(tools)} tools to tool dictionary")
27
27
  for tool in tools:
28
28
  self.add(tool)
29
29
 
30
30
  def remove(self, tool_name: str) -> bool:
31
31
  """Remove a tool from the tool dictionary."""
32
- logger.info(f"Removing tool: {tool_name} from tool dictionary")
32
+ logger.debug(f"Removing tool: {tool_name} from tool dictionary")
33
33
  del self.tools[tool_name]
34
34
  return True
35
35
 
@@ -45,7 +45,7 @@ class ToolManager(BaseModel):
45
45
 
46
46
  def execute(self, tool_name: str, **kwargs) -> str:
47
47
  """Execute a tool from the tool dictionary."""
48
- logger.info(f"Executing tool: {tool_name} with arguments: {kwargs}")
48
+ logger.debug(f"Executing tool: {tool_name} with arguments: {kwargs}")
49
49
  try:
50
50
  result = self.tools[tool_name].execute(**kwargs)
51
51
  logger.debug(f"Tool {tool_name} execution completed successfully")
@@ -8,6 +8,7 @@ from .execute_bash_command_tool import ExecuteBashCommandTool
8
8
  from .input_question_tool import InputQuestionTool
9
9
  from .list_directory_tool import ListDirectoryTool
10
10
  from .llm_tool import LLMTool
11
+ from .llm_vision_tool import LLMVisionTool
11
12
  from .markitdown_tool import MarkitdownTool
12
13
  from .nodejs_tool import NodeJsTool
13
14
  from .python_tool import PythonTool
@@ -30,6 +31,7 @@ __all__ = [
30
31
  "InputQuestionTool",
31
32
  "ListDirectoryTool",
32
33
  "LLMTool",
34
+ "LLMVisionTool",
33
35
  "ExecuteBashCommandTool",
34
36
  "PythonTool",
35
37
  "ElixirTool",
@@ -49,7 +49,7 @@ class AgentTool(Tool):
49
49
  )
50
50
 
51
51
  @model_validator(mode="before")
52
- def validate_agent(cls, values: dict[str, Any]) -> dict[str, Any]:
52
+ def validate_agent(cls, values: dict[str, Any]) -> dict[str, Any]: # noqa: N805
53
53
  """Validate the provided agent and its role."""
54
54
  agent = values.get("agent")
55
55
  # Lazy import to avoid circular dependency
@@ -122,7 +122,7 @@ end
122
122
 
123
123
  # Execute the main function
124
124
  ElixirScript.main()
125
- """ % script.strip()
125
+ """ % script.strip() # noqa: UP031
126
126
 
127
127
  with open(script_path, "w", encoding="utf-8") as f:
128
128
  f.write(wrapped_script.strip())
@@ -17,11 +17,11 @@ class LLMTool(Tool):
17
17
  description: str = Field(
18
18
  default=(
19
19
  "Generates answers to questions using a specified language model. "
20
- "Note: This tool operates in isolation and does not have access to: "
20
+ "Note: This tool operates in total isolation and does not have access to: "
21
21
  " - Memory: All context must be explicitly provided in the prompt. "
22
22
  " - File system."
23
23
  " - Variables: Any required variables should be interpolated into the prompt (e.g., $var1$). "
24
- " - Other tools: It cannot invoke or interact with other tools. "
24
+ " - No access to Tools, URL, file, or other external resources. "
25
25
  "Ensure all necessary information is included directly in your prompt."
26
26
  )
27
27
  )
@@ -54,6 +54,7 @@ class LLMTool(Tool):
54
54
 
55
55
  model_name: str = Field(..., description="The name of the language model to use")
56
56
  generative_model: GenerativeModel | None = Field(default=None)
57
+ system_prompt: str | None = Field(default=None)
57
58
 
58
59
  def model_post_init(self, __context):
59
60
  """Initialize the generative model after model initialization."""
@@ -61,7 +62,10 @@ class LLMTool(Tool):
61
62
  self.generative_model = GenerativeModel(model=self.model_name)
62
63
  logging.debug(f"Initialized LLMTool with model: {self.model_name}")
63
64
 
64
- def execute(self, system_prompt: str, prompt: str, temperature: str = "0.7") -> str:
65
+
66
+ def execute(
67
+ self, system_prompt: str | None = None, prompt: str | None = None, temperature: str | None = None
68
+ ) -> str:
65
69
  """Execute the tool to generate an answer based on the provided question.
66
70
 
67
71
  Args:
@@ -84,9 +88,11 @@ class LLMTool(Tool):
84
88
  logging.error(f"Invalid temperature value: {temperature}")
85
89
  raise ValueError(f"Invalid temperature value: {temperature}") from ve
86
90
 
91
+ used_system_prompt = self.system_prompt if self.system_prompt else system_prompt
92
+
87
93
  # Prepare the messages history
88
94
  messages_history = [
89
- Message(role="system", content=system_prompt),
95
+ Message(role="system", content=used_system_prompt),
90
96
  Message(role="user", content=prompt),
91
97
  ]
92
98
 
@@ -111,9 +117,12 @@ class LLMTool(Tool):
111
117
 
112
118
  if __name__ == "__main__":
113
119
  # Example usage of LLMTool
114
- tool = LLMTool(model_name="gpt-4o-mini")
120
+ tool = LLMTool(model_name="openrouter/openai/gpt-4o-mini")
115
121
  system_prompt = 'Answer the question as truthfully as possible using the provided context, and if the answer is not contained within the context, say "I don\'t know".'
116
122
  question = "What is the meaning of life?"
117
123
  temperature = "0.7"
118
124
  answer = tool.execute(system_prompt=system_prompt, prompt=question, temperature=temperature)
119
125
  print(answer)
126
+ pirate = LLMTool(model_name="openrouter/openai/gpt-4o-mini", system_prompt="You are a pirate.")
127
+ pirate_answer = pirate.execute(system_prompt=system_prompt, prompt=question, temperature=temperature)
128
+ print(pirate_answer)