quantalogic 0.2.10__py3-none-any.whl → 0.2.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quantalogic/agent.py +5 -7
- quantalogic/agent_config.py +1 -1
- quantalogic/coding_agent.py +2 -2
- quantalogic/generative_model.py +14 -15
- quantalogic/main.py +51 -9
- quantalogic/print_event.py +3 -1
- quantalogic/search_agent.py +41 -0
- quantalogic/server/agent_server.py +13 -13
- quantalogic/server/state.py +5 -5
- quantalogic/tool_manager.py +4 -4
- quantalogic/tools/__init__.py +4 -0
- quantalogic/tools/agent_tool.py +1 -1
- quantalogic/tools/elixir_tool.py +1 -1
- quantalogic/tools/llm_tool.py +12 -3
- quantalogic/tools/llm_vision_tool.py +4 -17
- quantalogic/tools/nodejs_tool.py +2 -2
- quantalogic/tools/python_tool.py +2 -2
- quantalogic/tools/replace_in_file_tool.py +3 -3
- quantalogic/tools/ripgrep_tool.py +3 -3
- quantalogic/tools/serpapi_search_tool.py +169 -0
- quantalogic/tools/tool.py +3 -3
- quantalogic/tools/unified_diff_tool.py +1 -0
- quantalogic/tools/wikipedia_search_tool.py +169 -0
- quantalogic/tools/write_file_tool.py +1 -0
- quantalogic/utils/check_version.py +37 -0
- quantalogic/utils/download_http_file.py +4 -4
- quantalogic/utils/read_http_text_content.py +2 -2
- quantalogic/version.py +3 -2
- {quantalogic-0.2.10.dist-info → quantalogic-0.2.13.dist-info}/METADATA +61 -8
- {quantalogic-0.2.10.dist-info → quantalogic-0.2.13.dist-info}/RECORD +33 -29
- {quantalogic-0.2.10.dist-info → quantalogic-0.2.13.dist-info}/LICENSE +0 -0
- {quantalogic-0.2.10.dist-info → quantalogic-0.2.13.dist-info}/WHEEL +0 -0
- {quantalogic-0.2.10.dist-info → quantalogic-0.2.13.dist-info}/entry_points.txt +0 -0
quantalogic/agent.py
CHANGED
@@ -1,7 +1,5 @@
|
|
1
1
|
"""Enhanced QuantaLogic agent implementing the ReAct framework."""
|
2
2
|
|
3
|
-
import os
|
4
|
-
import sys
|
5
3
|
from collections.abc import Callable
|
6
4
|
from datetime import datetime
|
7
5
|
from typing import Any
|
@@ -583,7 +581,9 @@ class Agent(BaseModel):
|
|
583
581
|
"\n"
|
584
582
|
"Available variables:\n"
|
585
583
|
"\n"
|
586
|
-
f"{', '.join(self.variable_store.keys())}\n"
|
584
|
+
f"{', '.join(self.variable_store.keys())}\n"
|
585
|
+
if len(self.variable_store.keys()) > 0
|
586
|
+
else "None\n"
|
587
587
|
)
|
588
588
|
return prompt_use_variables
|
589
589
|
|
@@ -630,10 +630,10 @@ class Agent(BaseModel):
|
|
630
630
|
|
631
631
|
def _generate_task_summary(self, content: str) -> str:
|
632
632
|
"""Generate a concise summary of the given content using the generative model.
|
633
|
-
|
633
|
+
|
634
634
|
Args:
|
635
635
|
content (str): The content to summarize
|
636
|
-
|
636
|
+
|
637
637
|
Returns:
|
638
638
|
str: Generated summary
|
639
639
|
"""
|
@@ -670,5 +670,3 @@ class Agent(BaseModel):
|
|
670
670
|
"session_add_message",
|
671
671
|
{"role": "assistant", "content": assistant_content},
|
672
672
|
)
|
673
|
-
|
674
|
-
|
quantalogic/agent_config.py
CHANGED
quantalogic/coding_agent.py
CHANGED
@@ -18,7 +18,7 @@ from quantalogic.utils import get_coding_environment
|
|
18
18
|
from quantalogic.utils.get_quantalogic_rules_content import get_quantalogic_rules_file_content
|
19
19
|
|
20
20
|
|
21
|
-
def create_coding_agent(model_name: str,vision_model_name: str | None = None, basic: bool = False) -> Agent:
|
21
|
+
def create_coding_agent(model_name: str, vision_model_name: str | None = None, basic: bool = False) -> Agent:
|
22
22
|
"""Creates and configures a coding agent with a comprehensive set of tools.
|
23
23
|
|
24
24
|
Args:
|
@@ -69,7 +69,7 @@ def create_coding_agent(model_name: str,vision_model_name: str | None = None, ba
|
|
69
69
|
LLMTool(
|
70
70
|
model_name=model_name,
|
71
71
|
system_prompt="You are a software expert, your role is to answer coding questions.",
|
72
|
-
name="coding_consultant", # Handles implementation-level coding questions
|
72
|
+
name="coding_consultant", # Handles implementation-level coding questions
|
73
73
|
)
|
74
74
|
)
|
75
75
|
tools.append(
|
quantalogic/generative_model.py
CHANGED
@@ -107,7 +107,9 @@ class GenerativeModel:
|
|
107
107
|
)
|
108
108
|
|
109
109
|
# Retry on specific retriable exceptions
|
110
|
-
def generate_with_history(
|
110
|
+
def generate_with_history(
|
111
|
+
self, messages_history: list[Message], prompt: str, image_url: str | None = None
|
112
|
+
) -> ResponseStats:
|
111
113
|
"""Generate a response with conversation history and optional image.
|
112
114
|
|
113
115
|
Generates a response based on previous conversation messages,
|
@@ -128,20 +130,17 @@ class GenerativeModel:
|
|
128
130
|
Exception: For other unexpected errors.
|
129
131
|
"""
|
130
132
|
messages = [{"role": msg.role, "content": str(msg.content)} for msg in messages_history]
|
131
|
-
|
133
|
+
|
132
134
|
if image_url:
|
133
|
-
messages.append(
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
"type": "image_url",
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
}
|
143
|
-
]
|
144
|
-
})
|
135
|
+
messages.append(
|
136
|
+
{
|
137
|
+
"role": "user",
|
138
|
+
"content": [
|
139
|
+
{"type": "text", "text": str(prompt)},
|
140
|
+
{"type": "image_url", "image_url": {"url": image_url}},
|
141
|
+
],
|
142
|
+
}
|
143
|
+
)
|
145
144
|
else:
|
146
145
|
messages.append({"role": "user", "content": str(prompt)})
|
147
146
|
|
@@ -249,7 +248,7 @@ class GenerativeModel:
|
|
249
248
|
logger.debug(f"Model info retrieved: {model_info.keys()}")
|
250
249
|
else:
|
251
250
|
logger.debug("No model info available")
|
252
|
-
|
251
|
+
|
253
252
|
return model_info
|
254
253
|
|
255
254
|
def get_model_max_input_tokens(self) -> int:
|
quantalogic/main.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2
2
|
"""Main module for the QuantaLogic agent."""
|
3
3
|
|
4
4
|
# Standard library imports
|
5
|
+
import random
|
5
6
|
import sys
|
6
7
|
from typing import Optional
|
7
8
|
|
@@ -9,6 +10,9 @@ from typing import Optional
|
|
9
10
|
import click
|
10
11
|
from loguru import logger
|
11
12
|
|
13
|
+
from quantalogic.utils.check_version import check_if_is_latest_version
|
14
|
+
from quantalogic.version import get_version
|
15
|
+
|
12
16
|
# Configure logger
|
13
17
|
logger.remove() # Remove default logger
|
14
18
|
|
@@ -28,9 +32,10 @@ from quantalogic.agent_config import ( # noqa: E402
|
|
28
32
|
)
|
29
33
|
from quantalogic.interactive_text_editor import get_multiline_input # noqa: E402
|
30
34
|
from quantalogic.print_event import console_print_events # noqa: E402
|
31
|
-
from quantalogic.version import get_version # noqa: E402
|
32
35
|
|
33
|
-
|
36
|
+
from quantalogic.search_agent import create_search_agent
|
37
|
+
|
38
|
+
AGENT_MODES = ["code", "basic", "interpreter", "full", "code-basic","search"]
|
34
39
|
|
35
40
|
|
36
41
|
def create_agent_for_mode(mode: str, model_name: str, vision_model_name: str | None) -> Agent:
|
@@ -47,9 +52,35 @@ def create_agent_for_mode(mode: str, model_name: str, vision_model_name: str | N
|
|
47
52
|
return create_full_agent(model_name, vision_model_name)
|
48
53
|
elif mode == "interpreter":
|
49
54
|
return create_interpreter_agent(model_name, vision_model_name)
|
55
|
+
elif mode == "search":
|
56
|
+
return create_search_agent(model_name)
|
50
57
|
else:
|
51
58
|
raise ValueError(f"Unknown agent mode: {mode}")
|
52
59
|
|
60
|
+
def check_new_version():
|
61
|
+
# Randomly check for updates (1 in 10 chance)
|
62
|
+
if random.randint(1, 10) == 1:
|
63
|
+
try:
|
64
|
+
current_version = get_version()
|
65
|
+
has_new_version, latest_version = check_if_is_latest_version()
|
66
|
+
|
67
|
+
if has_new_version:
|
68
|
+
console = Console()
|
69
|
+
console.print(
|
70
|
+
Panel.fit(
|
71
|
+
f"[yellow]⚠️ Update Available![/yellow]\n\n"
|
72
|
+
f"Current version: [bold]{current_version}[/bold]\n"
|
73
|
+
f"Latest version: [bold]{latest_version}[/bold]\n\n"
|
74
|
+
"To update, run:\n"
|
75
|
+
"[bold]pip install --upgrade quantalogic[/bold]\n"
|
76
|
+
"or if using pipx:\n"
|
77
|
+
"[bold]pipx upgrade quantalogic[/bold]",
|
78
|
+
title="[bold]Update Available[/bold]",
|
79
|
+
border_style="yellow",
|
80
|
+
)
|
81
|
+
)
|
82
|
+
except Exception:
|
83
|
+
return
|
53
84
|
|
54
85
|
def configure_logger(log_level: str) -> None:
|
55
86
|
"""Configure the logger with the specified log level and format."""
|
@@ -59,7 +90,7 @@ def configure_logger(log_level: str) -> None:
|
|
59
90
|
level=log_level.upper(),
|
60
91
|
format="<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <8}</level> | <cyan>{process}</cyan> | <magenta>{file}:{line}</magenta> | {message}",
|
61
92
|
)
|
62
|
-
logger.
|
93
|
+
logger.debug(f"Log level set to: {log_level}")
|
63
94
|
|
64
95
|
|
65
96
|
def set_litellm_verbose(verbose_mode: bool) -> None:
|
@@ -192,17 +223,20 @@ def task(
|
|
192
223
|
console = Console()
|
193
224
|
switch_verbose(verbose, log)
|
194
225
|
|
226
|
+
|
195
227
|
try:
|
196
228
|
if file:
|
197
229
|
task_content = get_task_from_file(file)
|
198
230
|
else:
|
199
231
|
if task:
|
232
|
+
check_new_version()
|
200
233
|
task_content = task
|
201
234
|
else:
|
202
235
|
display_welcome_message(console, model_name, vision_model_name)
|
203
|
-
|
236
|
+
check_new_version()
|
237
|
+
logger.debug("Waiting for user input...")
|
204
238
|
task_content = get_multiline_input(console).strip()
|
205
|
-
logger.
|
239
|
+
logger.debug(f"User input received. Task content: {task_content}")
|
206
240
|
if not task_content:
|
207
241
|
logger.info("No task provided. Exiting...")
|
208
242
|
console.print("[yellow]No task provided. Exiting...[/yellow]")
|
@@ -216,9 +250,17 @@ def task(
|
|
216
250
|
border_style="blue",
|
217
251
|
)
|
218
252
|
)
|
219
|
-
|
220
|
-
|
221
|
-
|
253
|
+
if not Confirm.ask("[bold]Are you sure you want to submit this task?[/bold]"):
|
254
|
+
console.print("[yellow]Task submission cancelled. Exiting...[/yellow]")
|
255
|
+
sys.exit(0)
|
256
|
+
|
257
|
+
console.print(
|
258
|
+
Panel.fit(
|
259
|
+
"[green]✓ Task successfully submitted! Processing...[/green]",
|
260
|
+
title="[bold]Status[/bold]",
|
261
|
+
border_style="green",
|
262
|
+
)
|
263
|
+
)
|
222
264
|
|
223
265
|
logger.debug(f"Creating agent for mode: {mode} with model: {model_name}")
|
224
266
|
agent = create_agent_for_mode(mode, model_name, vision_model_name=vision_model_name)
|
@@ -259,7 +301,7 @@ def task(
|
|
259
301
|
|
260
302
|
|
261
303
|
def main():
|
262
|
-
"""Entry point
|
304
|
+
"""Main Entry point"""
|
263
305
|
cli()
|
264
306
|
|
265
307
|
|
quantalogic/print_event.py
CHANGED
@@ -1,12 +1,14 @@
|
|
1
1
|
"""Print events with rich formatting."""
|
2
2
|
|
3
|
+
from typing import Any
|
4
|
+
|
3
5
|
from rich import box
|
4
6
|
from rich.console import Console
|
5
7
|
from rich.panel import Panel
|
6
8
|
from rich.tree import Tree
|
7
9
|
|
8
10
|
|
9
|
-
def console_print_events(event: str, data: dict[str,
|
11
|
+
def console_print_events(event: str, data: dict[str, Any] | None = None):
|
10
12
|
"""Print events with rich formatting.
|
11
13
|
|
12
14
|
Args:
|
@@ -0,0 +1,41 @@
|
|
1
|
+
from quantalogic.agent import Agent
|
2
|
+
from quantalogic.tools import InputQuestionTool, SerpApiSearchTool, TaskCompleteTool, WikipediaSearchTool, ReadFileBlockTool,ReadFileTool, MarkitdownTool, RipgrepTool
|
3
|
+
|
4
|
+
|
5
|
+
def create_search_agent(model_name: str) -> Agent:
|
6
|
+
"""Creates and configures a search agent with web and knowledge search tools.
|
7
|
+
|
8
|
+
Args:
|
9
|
+
model_name (str): Name of the language model to use for the agent's core capabilities
|
10
|
+
|
11
|
+
Returns:
|
12
|
+
Agent: A fully configured search agent instance with:
|
13
|
+
- Web search capabilities (SerpAPI)
|
14
|
+
- Knowledge search capabilities (Wikipedia)
|
15
|
+
- Basic interaction tools
|
16
|
+
"""
|
17
|
+
specific_expertise = (
|
18
|
+
"Search expert focused on web and knowledge search operations."
|
19
|
+
"Specializes in finding and summarizing information from various sources."
|
20
|
+
)
|
21
|
+
|
22
|
+
tools = [
|
23
|
+
# Search tools
|
24
|
+
SerpApiSearchTool(), # Web search capabilities
|
25
|
+
WikipediaSearchTool(), # Knowledge search capabilities
|
26
|
+
# Basic interaction tools
|
27
|
+
TaskCompleteTool(), # Marks task completion
|
28
|
+
InputQuestionTool(), # Handles user queries
|
29
|
+
# LLM tools
|
30
|
+
ReadFileBlockTool(), # Reads specific file sections
|
31
|
+
ReadFileTool(), # Reads entire file
|
32
|
+
MarkitdownTool(), # Converts markdown to text
|
33
|
+
# Code search tools
|
34
|
+
RipgrepTool(), # Code search capabilities
|
35
|
+
]
|
36
|
+
|
37
|
+
return Agent(
|
38
|
+
model_name=model_name,
|
39
|
+
tools=tools,
|
40
|
+
specific_expertise=specific_expertise,
|
41
|
+
)
|
@@ -47,7 +47,7 @@ VALIDATION_TIMEOUT = 30.0 # seconds
|
|
47
47
|
|
48
48
|
def handle_sigterm(signum, frame):
|
49
49
|
"""Handle SIGTERM signal."""
|
50
|
-
logger.
|
50
|
+
logger.debug("Received SIGTERM signal")
|
51
51
|
raise SystemExit(0)
|
52
52
|
|
53
53
|
|
@@ -74,7 +74,7 @@ class ServerState:
|
|
74
74
|
async def initiate_shutdown(self, force: bool = False):
|
75
75
|
"""Initiate the shutdown process."""
|
76
76
|
if not self.is_shutting_down or force:
|
77
|
-
logger.
|
77
|
+
logger.debug("Initiating server shutdown...")
|
78
78
|
self.is_shutting_down = True
|
79
79
|
self.force_exit = force
|
80
80
|
self.shutdown_initiated.set()
|
@@ -88,7 +88,7 @@ class ServerState:
|
|
88
88
|
"""Handle interrupt signal."""
|
89
89
|
self.interrupt_count += 1
|
90
90
|
if self.interrupt_count == 1:
|
91
|
-
logger.
|
91
|
+
logger.debug("Graceful shutdown initiated (press Ctrl+C again to force)")
|
92
92
|
asyncio.create_task(self.initiate_shutdown(force=False))
|
93
93
|
else:
|
94
94
|
logger.warning("Forced shutdown initiated...")
|
@@ -277,7 +277,7 @@ class AgentState:
|
|
277
277
|
# Override ask_for_user_validation with SSE-based method
|
278
278
|
self.agent.ask_for_user_validation = self.sse_ask_for_user_validation
|
279
279
|
|
280
|
-
logger.
|
280
|
+
logger.debug(f"Agent initialized with model: {model_name}")
|
281
281
|
except Exception as e:
|
282
282
|
logger.error(f"Failed to initialize agent: {e}", exc_info=True)
|
283
283
|
raise
|
@@ -316,7 +316,7 @@ class AgentState:
|
|
316
316
|
console_print_events(event_type, data)
|
317
317
|
|
318
318
|
# Log event details
|
319
|
-
logger.
|
319
|
+
logger.debug(f"Agent Event: {event_type}")
|
320
320
|
logger.debug(f"Event Data: {data}")
|
321
321
|
|
322
322
|
# Broadcast to clients
|
@@ -334,7 +334,7 @@ class AgentState:
|
|
334
334
|
async def cleanup(self):
|
335
335
|
"""Clean up resources during shutdown."""
|
336
336
|
try:
|
337
|
-
logger.
|
337
|
+
logger.debug("Cleaning up resources...")
|
338
338
|
if server_state.force_exit:
|
339
339
|
logger.warning("Forced cleanup - skipping graceful shutdown")
|
340
340
|
return
|
@@ -349,7 +349,7 @@ class AgentState:
|
|
349
349
|
self.validation_responses.clear()
|
350
350
|
# Clear agent
|
351
351
|
self.agent = None
|
352
|
-
logger.
|
352
|
+
logger.debug("Cleanup completed")
|
353
353
|
except TimeoutError:
|
354
354
|
logger.warning(f"Cleanup timed out after {SHUTDOWN_TIMEOUT} seconds")
|
355
355
|
except Exception as e:
|
@@ -429,7 +429,7 @@ class AgentState:
|
|
429
429
|
with self.queue_lock:
|
430
430
|
if task_id in self.task_queues:
|
431
431
|
del self.task_queues[task_id]
|
432
|
-
logger.
|
432
|
+
logger.debug(f"Removed event queue for task_id: {task_id}")
|
433
433
|
|
434
434
|
|
435
435
|
# Initialize global states
|
@@ -448,11 +448,11 @@ async def lifespan(app: FastAPI):
|
|
448
448
|
loop.add_signal_handler(sig, lambda s=sig: asyncio.create_task(handle_shutdown(s)))
|
449
449
|
yield
|
450
450
|
finally:
|
451
|
-
logger.
|
451
|
+
logger.debug("Shutting down server gracefully...")
|
452
452
|
await server_state.initiate_shutdown()
|
453
453
|
await agent_state.cleanup()
|
454
454
|
server_state.shutdown_complete.set()
|
455
|
-
logger.
|
455
|
+
logger.debug("Server shutdown complete")
|
456
456
|
|
457
457
|
|
458
458
|
async def handle_shutdown(sig):
|
@@ -527,7 +527,7 @@ async def event_stream(request: Request, task_id: Optional[str] = None) -> Strea
|
|
527
527
|
async def event_generator() -> AsyncGenerator[str, None]:
|
528
528
|
# Ensure unique client-task combination
|
529
529
|
client_id = agent_state.add_client(task_id)
|
530
|
-
logger.
|
530
|
+
logger.debug(f"Client {client_id} subscribed to {'task_id: ' + task_id if task_id else 'all events'}")
|
531
531
|
|
532
532
|
try:
|
533
533
|
while not server_state.is_shutting_down:
|
@@ -557,7 +557,7 @@ async def event_stream(request: Request, task_id: Optional[str] = None) -> Strea
|
|
557
557
|
finally:
|
558
558
|
# Clean up the client's event queue
|
559
559
|
agent_state.remove_client(client_id, task_id)
|
560
|
-
logger.
|
560
|
+
logger.debug(f"Client {client_id} {'unsubscribed from task_id: ' + task_id if task_id else 'disconnected'}")
|
561
561
|
|
562
562
|
return StreamingResponse(
|
563
563
|
event_generator(),
|
@@ -629,5 +629,5 @@ if __name__ == "__main__":
|
|
629
629
|
try:
|
630
630
|
server.run()
|
631
631
|
except KeyboardInterrupt:
|
632
|
-
logger.
|
632
|
+
logger.debug("Received keyboard interrupt")
|
633
633
|
sys.exit(1)
|
quantalogic/server/state.py
CHANGED
@@ -35,7 +35,7 @@ class ServerState:
|
|
35
35
|
async def initiate_shutdown(self, force: bool = False):
|
36
36
|
"""Initiate the shutdown process."""
|
37
37
|
if not self.is_shutting_down or force:
|
38
|
-
logger.
|
38
|
+
logger.debug("Initiating server shutdown...")
|
39
39
|
self.is_shutting_down = True
|
40
40
|
self.force_exit = force
|
41
41
|
self.shutdown_initiated.set()
|
@@ -48,7 +48,7 @@ class ServerState:
|
|
48
48
|
"""Handle interrupt signal."""
|
49
49
|
self.interrupt_count += 1
|
50
50
|
if self.interrupt_count == 1:
|
51
|
-
logger.
|
51
|
+
logger.debug("Graceful shutdown initiated (press Ctrl+C again to force)")
|
52
52
|
asyncio.create_task(self.initiate_shutdown(force=False))
|
53
53
|
else:
|
54
54
|
logger.warning("Forced shutdown initiated...")
|
@@ -95,7 +95,7 @@ class AgentState:
|
|
95
95
|
if task_id not in self.agents:
|
96
96
|
self.agents[task_id] = self.create_agent_for_task(task_id)
|
97
97
|
|
98
|
-
logger.
|
98
|
+
logger.debug(f"New client connected: {client_id} for task: {task_id}")
|
99
99
|
return client_id
|
100
100
|
|
101
101
|
def create_agent_for_task(self, task_id: str) -> Any:
|
@@ -109,7 +109,7 @@ class AgentState:
|
|
109
109
|
"""
|
110
110
|
# Placeholder for agent creation logic
|
111
111
|
agent = ... # Replace with actual agent creation logic
|
112
|
-
logger.
|
112
|
+
logger.debug(f"Agent created for task: {task_id}")
|
113
113
|
return agent
|
114
114
|
|
115
115
|
def get_agent_for_task(self, task_id: str) -> Optional[Any]:
|
@@ -128,7 +128,7 @@ class AgentState:
|
|
128
128
|
with self.queue_lock:
|
129
129
|
if client_id in self.event_queues:
|
130
130
|
del self.event_queues[client_id]
|
131
|
-
logger.
|
131
|
+
logger.debug(f"Client disconnected: {client_id}")
|
132
132
|
|
133
133
|
def _format_data_for_client(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
134
134
|
"""Format data for client consumption."""
|
quantalogic/tool_manager.py
CHANGED
@@ -18,18 +18,18 @@ class ToolManager(BaseModel):
|
|
18
18
|
|
19
19
|
def add(self, tool: Tool):
|
20
20
|
"""Add a tool to the tool dictionary."""
|
21
|
-
logger.
|
21
|
+
logger.debug(f"Adding tool: {tool.name} to tool dictionary")
|
22
22
|
self.tools[tool.name] = tool
|
23
23
|
|
24
24
|
def add_list(self, tools: list[Tool]):
|
25
25
|
"""Add a list of tools to the tool dictionary."""
|
26
|
-
logger.
|
26
|
+
logger.debug(f"Adding {len(tools)} tools to tool dictionary")
|
27
27
|
for tool in tools:
|
28
28
|
self.add(tool)
|
29
29
|
|
30
30
|
def remove(self, tool_name: str) -> bool:
|
31
31
|
"""Remove a tool from the tool dictionary."""
|
32
|
-
logger.
|
32
|
+
logger.debug(f"Removing tool: {tool_name} from tool dictionary")
|
33
33
|
del self.tools[tool_name]
|
34
34
|
return True
|
35
35
|
|
@@ -45,7 +45,7 @@ class ToolManager(BaseModel):
|
|
45
45
|
|
46
46
|
def execute(self, tool_name: str, **kwargs) -> str:
|
47
47
|
"""Execute a tool from the tool dictionary."""
|
48
|
-
logger.
|
48
|
+
logger.debug(f"Executing tool: {tool_name} with arguments: {kwargs}")
|
49
49
|
try:
|
50
50
|
result = self.tools[tool_name].execute(**kwargs)
|
51
51
|
logger.debug(f"Tool {tool_name} execution completed successfully")
|
quantalogic/tools/__init__.py
CHANGED
@@ -17,12 +17,16 @@ from .read_file_tool import ReadFileTool
|
|
17
17
|
from .replace_in_file_tool import ReplaceInFileTool
|
18
18
|
from .ripgrep_tool import RipgrepTool
|
19
19
|
from .search_definition_names import SearchDefinitionNames
|
20
|
+
from .serpapi_search_tool import SerpApiSearchTool
|
20
21
|
from .task_complete_tool import TaskCompleteTool
|
21
22
|
from .tool import Tool, ToolArgument
|
22
23
|
from .unified_diff_tool import UnifiedDiffTool
|
24
|
+
from .wikipedia_search_tool import WikipediaSearchTool
|
23
25
|
from .write_file_tool import WriteFileTool
|
24
26
|
|
25
27
|
__all__ = [
|
28
|
+
"WikipediaSearchTool",
|
29
|
+
"SerpApiSearchTool",
|
26
30
|
"Tool",
|
27
31
|
"ToolArgument",
|
28
32
|
"TaskCompleteTool",
|
quantalogic/tools/agent_tool.py
CHANGED
@@ -49,7 +49,7 @@ class AgentTool(Tool):
|
|
49
49
|
)
|
50
50
|
|
51
51
|
@model_validator(mode="before")
|
52
|
-
def validate_agent(cls, values: dict[str, Any]) -> dict[str, Any]:
|
52
|
+
def validate_agent(cls, values: dict[str, Any]) -> dict[str, Any]: # noqa: N805
|
53
53
|
"""Validate the provided agent and its role."""
|
54
54
|
agent = values.get("agent")
|
55
55
|
# Lazy import to avoid circular dependency
|
quantalogic/tools/elixir_tool.py
CHANGED
quantalogic/tools/llm_tool.py
CHANGED
@@ -54,6 +54,7 @@ class LLMTool(Tool):
|
|
54
54
|
|
55
55
|
model_name: str = Field(..., description="The name of the language model to use")
|
56
56
|
generative_model: GenerativeModel | None = Field(default=None)
|
57
|
+
system_prompt: str | None = Field(default=None)
|
57
58
|
|
58
59
|
def model_post_init(self, __context):
|
59
60
|
"""Initialize the generative model after model initialization."""
|
@@ -61,7 +62,10 @@ class LLMTool(Tool):
|
|
61
62
|
self.generative_model = GenerativeModel(model=self.model_name)
|
62
63
|
logging.debug(f"Initialized LLMTool with model: {self.model_name}")
|
63
64
|
|
64
|
-
|
65
|
+
|
66
|
+
def execute(
|
67
|
+
self, system_prompt: str | None = None, prompt: str | None = None, temperature: str | None = None
|
68
|
+
) -> str:
|
65
69
|
"""Execute the tool to generate an answer based on the provided question.
|
66
70
|
|
67
71
|
Args:
|
@@ -84,9 +88,11 @@ class LLMTool(Tool):
|
|
84
88
|
logging.error(f"Invalid temperature value: {temperature}")
|
85
89
|
raise ValueError(f"Invalid temperature value: {temperature}") from ve
|
86
90
|
|
91
|
+
used_system_prompt = self.system_prompt if self.system_prompt else system_prompt
|
92
|
+
|
87
93
|
# Prepare the messages history
|
88
94
|
messages_history = [
|
89
|
-
Message(role="system", content=
|
95
|
+
Message(role="system", content=used_system_prompt),
|
90
96
|
Message(role="user", content=prompt),
|
91
97
|
]
|
92
98
|
|
@@ -111,9 +117,12 @@ class LLMTool(Tool):
|
|
111
117
|
|
112
118
|
if __name__ == "__main__":
|
113
119
|
# Example usage of LLMTool
|
114
|
-
tool = LLMTool(model_name="gpt-4o-mini")
|
120
|
+
tool = LLMTool(model_name="openrouter/openai/gpt-4o-mini")
|
115
121
|
system_prompt = 'Answer the question as truthfully as possible using the provided context, and if the answer is not contained within the context, say "I don\'t know".'
|
116
122
|
question = "What is the meaning of life?"
|
117
123
|
temperature = "0.7"
|
118
124
|
answer = tool.execute(system_prompt=system_prompt, prompt=question, temperature=temperature)
|
119
125
|
print(answer)
|
126
|
+
pirate = LLMTool(model_name="openrouter/openai/gpt-4o-mini", system_prompt="You are a pirate.")
|
127
|
+
pirate_answer = pirate.execute(system_prompt=system_prompt, prompt=question, temperature=temperature)
|
128
|
+
print(pirate_answer)
|
@@ -8,7 +8,7 @@ from pydantic import ConfigDict, Field
|
|
8
8
|
from quantalogic.generative_model import GenerativeModel, Message
|
9
9
|
from quantalogic.tools.tool import Tool, ToolArgument
|
10
10
|
|
11
|
-
#DEFAULT_MODEL_NAME = "ollama/llama3.2-vision"
|
11
|
+
# DEFAULT_MODEL_NAME = "ollama/llama3.2-vision"
|
12
12
|
DEFAULT_MODEL_NAME = "openrouter/openai/gpt-4o-mini"
|
13
13
|
|
14
14
|
|
@@ -67,13 +67,7 @@ class LLMVisionTool(Tool):
|
|
67
67
|
self.generative_model = GenerativeModel(model=self.model_name)
|
68
68
|
logging.debug(f"Initialized LLMVisionTool with model: {self.model_name}")
|
69
69
|
|
70
|
-
def execute(
|
71
|
-
self,
|
72
|
-
system_prompt: str,
|
73
|
-
prompt: str,
|
74
|
-
image_url: str,
|
75
|
-
temperature: str = "0.7"
|
76
|
-
) -> str:
|
70
|
+
def execute(self, system_prompt: str, prompt: str, image_url: str, temperature: str = "0.7") -> str:
|
77
71
|
"""Execute the tool to analyze an image and generate a response.
|
78
72
|
|
79
73
|
Args:
|
@@ -112,9 +106,7 @@ class LLMVisionTool(Tool):
|
|
112
106
|
|
113
107
|
try:
|
114
108
|
response_stats = self.generative_model.generate_with_history(
|
115
|
-
messages_history=messages_history,
|
116
|
-
prompt=prompt,
|
117
|
-
image_url=image_url
|
109
|
+
messages_history=messages_history, prompt=prompt, image_url=image_url
|
118
110
|
)
|
119
111
|
response = response_stats.response.strip()
|
120
112
|
logging.info(f"Generated response: {response}")
|
@@ -131,10 +123,5 @@ if __name__ == "__main__":
|
|
131
123
|
question = "What is shown in this image? Describe it with details."
|
132
124
|
image_url = "https://fastly.picsum.photos/id/767/200/300.jpg?hmac=j5YA1cRw-jS6fK3Mx2ooPwl2_TS3RSyLmFmiM9TqLC4"
|
133
125
|
temperature = "0.7"
|
134
|
-
answer = tool.execute(
|
135
|
-
system_prompt=system_prompt,
|
136
|
-
prompt=question,
|
137
|
-
image_url=image_url,
|
138
|
-
temperature=temperature
|
139
|
-
)
|
126
|
+
answer = tool.execute(system_prompt=system_prompt, prompt=question, image_url=image_url, temperature=temperature)
|
140
127
|
print(answer)
|
quantalogic/tools/nodejs_tool.py
CHANGED
@@ -342,14 +342,14 @@ class NodeJsTool(Tool):
|
|
342
342
|
RuntimeError: If pulling the Docker image fails.
|
343
343
|
"""
|
344
344
|
try:
|
345
|
-
logger.
|
345
|
+
logger.debug(f"Pulling Docker image: {docker_image}")
|
346
346
|
subprocess.run(
|
347
347
|
["docker", "pull", docker_image],
|
348
348
|
check=True,
|
349
349
|
capture_output=True,
|
350
350
|
text=True,
|
351
351
|
)
|
352
|
-
logger.
|
352
|
+
logger.debug(f"Successfully pulled Docker image '{docker_image}'.")
|
353
353
|
except subprocess.CalledProcessError as e:
|
354
354
|
error_msg = f"Failed to pull Docker image '{docker_image}': {e.stderr.strip()}"
|
355
355
|
logger.error(error_msg)
|
quantalogic/tools/python_tool.py
CHANGED
@@ -266,7 +266,7 @@ class PythonTool(Tool):
|
|
266
266
|
capture_output=True,
|
267
267
|
text=True,
|
268
268
|
)
|
269
|
-
logger.
|
269
|
+
logger.debug(f"Successfully pulled Docker image '{docker_image}'.")
|
270
270
|
except subprocess.CalledProcessError as e:
|
271
271
|
error_msg = f"Failed to pull Docker image '{docker_image}': {e.stderr.strip()}"
|
272
272
|
logger.error(error_msg)
|
@@ -365,7 +365,7 @@ class PythonTool(Tool):
|
|
365
365
|
docker_run_cmd += ["bash", "-c", venv_and_run]
|
366
366
|
logger.debug("Added script execution command to Docker run command.")
|
367
367
|
|
368
|
-
logger.
|
368
|
+
logger.debug(f"Executing Docker command: {' '.join(docker_run_cmd)}")
|
369
369
|
try:
|
370
370
|
result = subprocess.run(
|
371
371
|
docker_run_cmd,
|