quantalogic 0.2.24__py3-none-any.whl → 0.2.26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quantalogic/.DS_Store +0 -0
- quantalogic/agent.py +18 -7
- quantalogic/agent_factory.py +106 -0
- quantalogic/generative_model.py +43 -15
- quantalogic/get_model_info.py +14 -0
- quantalogic/interactive_text_editor.py +4 -0
- quantalogic/main.py +25 -295
- quantalogic/task_file_reader.py +38 -0
- quantalogic/task_runner.py +284 -0
- quantalogic/tools/llm_tool.py +0 -1
- quantalogic/version_check.py +41 -0
- quantalogic/welcome_message.py +86 -0
- {quantalogic-0.2.24.dist-info → quantalogic-0.2.26.dist-info}/METADATA +1 -1
- {quantalogic-0.2.24.dist-info → quantalogic-0.2.26.dist-info}/RECORD +17 -10
- {quantalogic-0.2.24.dist-info → quantalogic-0.2.26.dist-info}/LICENSE +0 -0
- {quantalogic-0.2.24.dist-info → quantalogic-0.2.26.dist-info}/WHEEL +0 -0
- {quantalogic-0.2.24.dist-info → quantalogic-0.2.26.dist-info}/entry_points.txt +0 -0
quantalogic/.DS_Store
ADDED
Binary file
|
quantalogic/agent.py
CHANGED
@@ -140,7 +140,11 @@ class Agent(BaseModel):
|
|
140
140
|
logger.error(f"Failed to initialize agent: {str(e)}")
|
141
141
|
raise
|
142
142
|
|
143
|
-
def
|
143
|
+
def clear_memory(self):
|
144
|
+
"""Clear the memory and reset the session."""
|
145
|
+
self._reset_session(clear_memory=True)
|
146
|
+
|
147
|
+
def solve_task(self, task: str, max_iterations: int = 30, streaming: bool = False, clear_memory: bool = True) -> str:
|
144
148
|
"""Solve the given task using the ReAct framework.
|
145
149
|
|
146
150
|
Args:
|
@@ -148,18 +152,23 @@ class Agent(BaseModel):
|
|
148
152
|
max_iterations (int, optional): Maximum number of iterations to attempt solving the task.
|
149
153
|
Defaults to 30 to prevent infinite loops and ensure timely task completion.
|
150
154
|
streaming (bool, optional): Whether to use streaming mode for generating responses.
|
155
|
+
clear_memory (bool, optional): Whether to clear the memory before solving the task.
|
151
156
|
|
152
157
|
Returns:
|
153
158
|
str: The final response after task completion.
|
154
159
|
"""
|
155
160
|
logger.debug(f"Solving task... {task}")
|
156
|
-
self._reset_session(task_to_solve=task, max_iterations=max_iterations)
|
161
|
+
self._reset_session(task_to_solve=task, max_iterations=max_iterations,clear_memory=clear_memory)
|
157
162
|
|
158
163
|
# Generate task summary
|
159
164
|
self.task_to_solve_summary = self._generate_task_summary(task)
|
160
165
|
|
161
166
|
# Add system prompt to memory
|
162
|
-
|
167
|
+
# Check if system prompt is already in memory
|
168
|
+
# if not add it
|
169
|
+
# The system message is always the first message in memory
|
170
|
+
if not self.memory.memory or self.memory.memory[0].role != "system":
|
171
|
+
self.memory.add(Message(role="system", content=self.config.system_prompt))
|
163
172
|
|
164
173
|
self._emit_event(
|
165
174
|
"session_start",
|
@@ -263,13 +272,15 @@ class Agent(BaseModel):
|
|
263
272
|
|
264
273
|
return answer
|
265
274
|
|
266
|
-
def _reset_session(self, task_to_solve: str = "", max_iterations: int = 30):
|
275
|
+
def _reset_session(self, task_to_solve: str = "", max_iterations: int = 30,clear_memory: bool = True):
|
267
276
|
"""Reset the agent's session."""
|
268
277
|
logger.debug("Resetting session...")
|
269
278
|
self.task_to_solve = task_to_solve
|
270
|
-
|
271
|
-
|
272
|
-
|
279
|
+
if clear_memory:
|
280
|
+
logger.debug("Clearing memory...")
|
281
|
+
self.memory.reset()
|
282
|
+
self.variable_store.reset()
|
283
|
+
self.total_tokens = 0
|
273
284
|
self.current_iteration = 0
|
274
285
|
self.max_output_tokens = self.model.get_model_max_output_tokens() or DEFAULT_MAX_OUTPUT_TOKENS
|
275
286
|
self.max_input_tokens = self.model.get_model_max_input_tokens() or DEFAULT_MAX_INPUT_TOKENS
|
@@ -0,0 +1,106 @@
|
|
1
|
+
"""Agent factory module for creating different types of agents."""
|
2
|
+
|
3
|
+
from typing import Optional
|
4
|
+
|
5
|
+
from loguru import logger
|
6
|
+
|
7
|
+
from quantalogic.agent import Agent
|
8
|
+
from quantalogic.agent_config import (
|
9
|
+
create_basic_agent,
|
10
|
+
create_full_agent,
|
11
|
+
create_interpreter_agent,
|
12
|
+
)
|
13
|
+
from quantalogic.coding_agent import create_coding_agent
|
14
|
+
from quantalogic.search_agent import create_search_agent
|
15
|
+
|
16
|
+
|
17
|
+
def create_agent_for_mode(
|
18
|
+
mode: str,
|
19
|
+
model_name: str,
|
20
|
+
vision_model_name: Optional[str],
|
21
|
+
no_stream: bool = False,
|
22
|
+
compact_every_n_iteration: Optional[int] = None,
|
23
|
+
max_tokens_working_memory: Optional[int] = None
|
24
|
+
) -> Agent:
|
25
|
+
"""Create an agent based on the specified mode.
|
26
|
+
|
27
|
+
Args:
|
28
|
+
mode: The mode of operation for the agent
|
29
|
+
model_name: The name of the language model to use
|
30
|
+
vision_model_name: Optional name of the vision model
|
31
|
+
no_stream: Whether to disable streaming mode
|
32
|
+
compact_every_n_iteration: Optional number of iterations before compacting memory
|
33
|
+
max_tokens_working_memory: Optional maximum tokens for working memory
|
34
|
+
|
35
|
+
Returns:
|
36
|
+
Agent: The created agent instance
|
37
|
+
|
38
|
+
Raises:
|
39
|
+
ValueError: If an unknown agent mode is specified
|
40
|
+
"""
|
41
|
+
logger.debug(f"Creating agent for mode: {mode} with model: {model_name}")
|
42
|
+
logger.debug(f"Using vision model: {vision_model_name}")
|
43
|
+
logger.debug(f"Using no_stream: {no_stream}")
|
44
|
+
logger.debug(f"Using compact_every_n_iteration: {compact_every_n_iteration}")
|
45
|
+
logger.debug(f"Using max_tokens_working_memory: {max_tokens_working_memory}")
|
46
|
+
|
47
|
+
if mode == "code":
|
48
|
+
logger.debug("Creating code agent without basic mode")
|
49
|
+
return create_coding_agent(
|
50
|
+
model_name,
|
51
|
+
vision_model_name,
|
52
|
+
basic=False,
|
53
|
+
no_stream=no_stream,
|
54
|
+
compact_every_n_iteration=compact_every_n_iteration,
|
55
|
+
max_tokens_working_memory=max_tokens_working_memory
|
56
|
+
)
|
57
|
+
if mode == "code-basic":
|
58
|
+
return create_coding_agent(
|
59
|
+
model_name,
|
60
|
+
vision_model_name,
|
61
|
+
basic=True,
|
62
|
+
no_stream=no_stream,
|
63
|
+
compact_every_n_iteration=compact_every_n_iteration,
|
64
|
+
max_tokens_working_memory=max_tokens_working_memory
|
65
|
+
)
|
66
|
+
elif mode == "basic":
|
67
|
+
return create_basic_agent(
|
68
|
+
model_name,
|
69
|
+
vision_model_name,
|
70
|
+
no_stream=no_stream,
|
71
|
+
compact_every_n_iteration=compact_every_n_iteration,
|
72
|
+
max_tokens_working_memory=max_tokens_working_memory
|
73
|
+
)
|
74
|
+
elif mode == "full":
|
75
|
+
return create_full_agent(
|
76
|
+
model_name,
|
77
|
+
vision_model_name,
|
78
|
+
no_stream=no_stream,
|
79
|
+
compact_every_n_iteration=compact_every_n_iteration,
|
80
|
+
max_tokens_working_memory=max_tokens_working_memory
|
81
|
+
)
|
82
|
+
elif mode == "interpreter":
|
83
|
+
return create_interpreter_agent(
|
84
|
+
model_name,
|
85
|
+
vision_model_name,
|
86
|
+
no_stream=no_stream,
|
87
|
+
compact_every_n_iteration=compact_every_n_iteration,
|
88
|
+
max_tokens_working_memory=max_tokens_working_memory
|
89
|
+
)
|
90
|
+
elif mode == "search":
|
91
|
+
return create_search_agent(
|
92
|
+
model_name,
|
93
|
+
no_stream=no_stream,
|
94
|
+
compact_every_n_iteration=compact_every_n_iteration,
|
95
|
+
max_tokens_working_memory=max_tokens_working_memory
|
96
|
+
)
|
97
|
+
if mode == "search-full":
|
98
|
+
return create_search_agent(
|
99
|
+
model_name,
|
100
|
+
mode_full=True,
|
101
|
+
no_stream=no_stream,
|
102
|
+
compact_every_n_iteration=compact_every_n_iteration,
|
103
|
+
max_tokens_working_memory=max_tokens_working_memory
|
104
|
+
)
|
105
|
+
else:
|
106
|
+
raise ValueError(f"Unknown agent mode: {mode}")
|
quantalogic/generative_model.py
CHANGED
@@ -1,16 +1,17 @@
|
|
1
1
|
"""Generative model module for AI-powered text generation."""
|
2
2
|
|
3
3
|
import functools
|
4
|
-
from typing import Dict, Any, Optional, List
|
5
4
|
from datetime import datetime
|
5
|
+
from typing import Any, Dict, List
|
6
6
|
|
7
7
|
import litellm
|
8
8
|
import openai
|
9
|
-
from litellm import completion, exceptions, get_max_tokens, get_model_info,
|
9
|
+
from litellm import completion, exceptions, get_max_tokens, get_model_info, image_generation, token_counter
|
10
10
|
from loguru import logger
|
11
11
|
from pydantic import BaseModel, Field, field_validator
|
12
12
|
|
13
13
|
from quantalogic.event_emitter import EventEmitter # Importing the EventEmitter class
|
14
|
+
from quantalogic.get_model_info import get_max_input_tokens, get_max_output_tokens, model_info
|
14
15
|
|
15
16
|
MIN_RETRIES = 1
|
16
17
|
|
@@ -265,15 +266,23 @@ class GenerativeModel:
|
|
265
266
|
def _get_model_info_impl(self, model_name: str) -> dict:
|
266
267
|
"""Get information about the model with prefix fallback logic."""
|
267
268
|
original_model = model_name
|
268
|
-
|
269
|
+
tried_models = [model_name]
|
270
|
+
|
269
271
|
while True:
|
270
272
|
try:
|
271
273
|
logger.debug(f"Attempting to retrieve model info for: {model_name}")
|
272
|
-
model_info
|
273
|
-
if model_info:
|
274
|
-
logger.debug(f"Found model info for {model_name}
|
275
|
-
return model_info
|
276
|
-
|
274
|
+
# Try direct lookup from model_info dictionary first
|
275
|
+
if model_name in model_info:
|
276
|
+
logger.debug(f"Found model info for {model_name} in model_info")
|
277
|
+
return model_info[model_name]
|
278
|
+
|
279
|
+
# Try get_model_info as fallback
|
280
|
+
info = get_model_info(model_name)
|
281
|
+
if info:
|
282
|
+
logger.debug(f"Found model info for {model_name} via get_model_info")
|
283
|
+
return info
|
284
|
+
except Exception as e:
|
285
|
+
logger.debug(f"Failed to get model info for {model_name}: {str(e)}")
|
277
286
|
pass
|
278
287
|
|
279
288
|
# Try removing one prefix level
|
@@ -281,8 +290,9 @@ class GenerativeModel:
|
|
281
290
|
if len(parts) <= 1:
|
282
291
|
break
|
283
292
|
model_name = "/".join(parts[1:])
|
293
|
+
tried_models.append(model_name)
|
284
294
|
|
285
|
-
error_msg = f"Could not find model info for {original_model} after trying: {
|
295
|
+
error_msg = f"Could not find model info for {original_model} after trying: {' → '.join(tried_models)}"
|
286
296
|
logger.error(error_msg)
|
287
297
|
raise ValueError(error_msg)
|
288
298
|
|
@@ -292,12 +302,23 @@ class GenerativeModel:
|
|
292
302
|
model_name = self.model
|
293
303
|
return self._get_model_info_cached(model_name)
|
294
304
|
|
295
|
-
def get_model_max_input_tokens(self) -> int:
|
305
|
+
def get_model_max_input_tokens(self) -> int | None:
|
296
306
|
"""Get the maximum number of input tokens for the model."""
|
297
307
|
try:
|
308
|
+
# First try direct lookup
|
309
|
+
max_tokens = get_max_input_tokens(self.model)
|
310
|
+
if max_tokens is not None:
|
311
|
+
return max_tokens
|
312
|
+
|
313
|
+
# If not found, try getting from model info
|
298
314
|
model_info = self.get_model_info()
|
299
|
-
|
300
|
-
|
315
|
+
if model_info:
|
316
|
+
return model_info.get("max_input_tokens")
|
317
|
+
|
318
|
+
# If still not found, log warning and return default
|
319
|
+
logger.warning(f"No max input tokens found for {self.model}. Using default.")
|
320
|
+
return 8192 # A reasonable default for many models
|
321
|
+
|
301
322
|
except Exception as e:
|
302
323
|
logger.error(f"Error getting max input tokens for {self.model}: {e}")
|
303
324
|
return None
|
@@ -305,13 +326,20 @@ class GenerativeModel:
|
|
305
326
|
def get_model_max_output_tokens(self) -> int | None:
|
306
327
|
"""Get the maximum number of output tokens for the model."""
|
307
328
|
try:
|
329
|
+
# First try direct lookup
|
330
|
+
max_tokens = get_max_output_tokens(self.model)
|
331
|
+
if max_tokens is not None:
|
332
|
+
return max_tokens
|
333
|
+
|
334
|
+
# If not found, try getting from model info
|
308
335
|
model_info = self.get_model_info()
|
309
336
|
if model_info:
|
310
337
|
return model_info.get("max_output_tokens")
|
311
|
-
|
312
|
-
#
|
338
|
+
|
339
|
+
# If still not found, log warning and return default
|
313
340
|
logger.warning(f"No max output tokens found for {self.model}. Using default.")
|
314
|
-
return 4096 # A reasonable default for many
|
341
|
+
return 4096 # A reasonable default for many models
|
342
|
+
|
315
343
|
except Exception as e:
|
316
344
|
logger.error(f"Error getting max output tokens for {self.model}: {e}")
|
317
345
|
return None
|
@@ -0,0 +1,14 @@
|
|
1
|
+
model_info = {
|
2
|
+
"deepseek-reasoner": {"max_output_tokens": 8 * 1024, "max_input_tokens": 1024 * 128},
|
3
|
+
"openrouter/deepseek/deepseek-r1": {"max_output_tokens": 8 * 1024, "max_input_tokens": 1024 * 128},
|
4
|
+
}
|
5
|
+
|
6
|
+
|
7
|
+
def get_max_output_tokens(model_name: str) -> int | None:
|
8
|
+
"""Get the maximum output tokens for a given model name."""
|
9
|
+
return model_info.get(model_name, {}).get("max_output_tokens", None)
|
10
|
+
|
11
|
+
|
12
|
+
def get_max_input_tokens(model_name: str) -> int | None:
|
13
|
+
"""Get the maximum input tokens for a given model name."""
|
14
|
+
return model_info.get(model_name, {}).get("max_input_tokens", None)
|
@@ -171,6 +171,10 @@ def get_multiline_input(console: Console) -> str:
|
|
171
171
|
prompt_text = f"{line_number:>3}: "
|
172
172
|
line = session.prompt(prompt_text, rprompt="Press Enter twice to submit")
|
173
173
|
|
174
|
+
# Handle commands with single return
|
175
|
+
if line.strip().startswith('/'):
|
176
|
+
return line.strip()
|
177
|
+
|
174
178
|
if line.strip() == "":
|
175
179
|
blank_lines += 1
|
176
180
|
if blank_lines == 2:
|
quantalogic/main.py
CHANGED
@@ -2,211 +2,36 @@
|
|
2
2
|
"""Main module for the QuantaLogic agent."""
|
3
3
|
|
4
4
|
# Standard library imports
|
5
|
-
import random
|
6
5
|
import sys
|
7
6
|
from typing import Optional
|
8
7
|
|
9
8
|
# Third-party imports
|
10
9
|
import click
|
11
|
-
import requests
|
12
10
|
from loguru import logger
|
13
11
|
|
14
|
-
from quantalogic.console_print_events import console_print_events
|
15
|
-
from quantalogic.utils.check_version import check_if_is_latest_version
|
16
12
|
from quantalogic.version import get_version
|
17
13
|
|
18
14
|
# Configure logger
|
19
|
-
logger.remove()
|
20
|
-
|
21
|
-
from threading import Lock # noqa: E402
|
15
|
+
logger.remove()
|
22
16
|
|
23
17
|
from rich.console import Console # noqa: E402
|
24
18
|
from rich.panel import Panel # noqa: E402
|
25
|
-
from rich.prompt import Confirm # noqa: E402
|
26
|
-
|
27
|
-
from quantalogic.agent import Agent # noqa: E402
|
28
19
|
|
29
20
|
# Local application imports
|
30
21
|
from quantalogic.agent_config import ( # noqa: E402
|
31
22
|
MODEL_NAME,
|
32
|
-
create_basic_agent,
|
33
|
-
create_full_agent,
|
34
|
-
create_interpreter_agent,
|
35
23
|
)
|
36
|
-
from quantalogic.
|
37
|
-
from quantalogic.interactive_text_editor import get_multiline_input # noqa: E402
|
38
|
-
from quantalogic.search_agent import create_search_agent # noqa: E402
|
24
|
+
from quantalogic.task_runner import task_runner # noqa: E402
|
39
25
|
|
40
26
|
AGENT_MODES = ["code", "basic", "interpreter", "full", "code-basic", "search", "search-full"]
|
41
27
|
|
42
28
|
|
43
|
-
def create_agent_for_mode(mode: str, model_name: str, vision_model_name: str | None, no_stream: bool = False, compact_every_n_iteration: int | None = None, max_tokens_working_memory: int | None = None) -> Agent:
|
44
|
-
"""Create an agent based on the specified mode."""
|
45
|
-
logger.debug(f"Creating agent for mode: {mode} with model: {model_name}")
|
46
|
-
logger.debug(f"Using vision model: {vision_model_name}")
|
47
|
-
logger.debug(f"Using no_stream: {no_stream}")
|
48
|
-
logger.debug(f"Using compact_every_n_iteration: {compact_every_n_iteration}")
|
49
|
-
logger.debug(f"Using max_tokens_working_memory: {max_tokens_working_memory}")
|
50
|
-
if mode == "code":
|
51
|
-
logger.debug("Creating code agent without basic mode")
|
52
|
-
return create_coding_agent(model_name, vision_model_name, basic=False, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
|
53
|
-
if mode == "code-basic":
|
54
|
-
return create_coding_agent(model_name, vision_model_name, basic=True, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
|
55
|
-
elif mode == "basic":
|
56
|
-
return create_basic_agent(model_name, vision_model_name, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
|
57
|
-
elif mode == "full":
|
58
|
-
return create_full_agent(model_name, vision_model_name, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
|
59
|
-
elif mode == "interpreter":
|
60
|
-
return create_interpreter_agent(model_name, vision_model_name, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
|
61
|
-
elif mode == "search":
|
62
|
-
return create_search_agent(model_name, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
|
63
|
-
if mode == "search-full":
|
64
|
-
return create_search_agent(model_name, mode_full=True, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
|
65
|
-
else:
|
66
|
-
raise ValueError(f"Unknown agent mode: {mode}")
|
67
|
-
|
68
|
-
|
69
|
-
def check_new_version():
|
70
|
-
# Randomly check for updates (1 in 10 chance)
|
71
|
-
if random.randint(1, 10) == 1:
|
72
|
-
try:
|
73
|
-
current_version = get_version()
|
74
|
-
has_new_version, latest_version = check_if_is_latest_version()
|
75
|
-
|
76
|
-
if has_new_version:
|
77
|
-
console = Console()
|
78
|
-
console.print(
|
79
|
-
Panel.fit(
|
80
|
-
f"[yellow]⚠️ Update Available![/yellow]\n\n"
|
81
|
-
f"Current version: [bold]{current_version}[/bold]\n"
|
82
|
-
f"Latest version: [bold]{latest_version}[/bold]\n\n"
|
83
|
-
"To update, run:\n"
|
84
|
-
"[bold]pip install --upgrade quantalogic[/bold]\n"
|
85
|
-
"or if using pipx:\n"
|
86
|
-
"[bold]pipx upgrade quantalogic[/bold]",
|
87
|
-
title="[bold]Update Available[/bold]",
|
88
|
-
border_style="yellow",
|
89
|
-
)
|
90
|
-
)
|
91
|
-
except Exception:
|
92
|
-
return
|
93
|
-
|
94
|
-
|
95
|
-
def configure_logger(log_level: str) -> None:
|
96
|
-
"""Configure the logger with the specified log level and format."""
|
97
|
-
logger.remove()
|
98
|
-
logger.add(
|
99
|
-
sys.stderr,
|
100
|
-
level=log_level.upper(),
|
101
|
-
format="<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <8}</level> | <cyan>{process}</cyan> | <magenta>{file}:{line}</magenta> | {message}",
|
102
|
-
)
|
103
|
-
logger.debug(f"Log level set to: {log_level}")
|
104
|
-
|
105
|
-
|
106
|
-
def set_litellm_verbose(verbose_mode: bool) -> None:
|
107
|
-
"""Set the verbosity of the litellm library."""
|
108
|
-
import litellm
|
109
|
-
|
110
|
-
litellm.set_verbose = verbose_mode
|
111
|
-
|
112
|
-
|
113
|
-
def switch_verbose(verbose_mode: bool, log_level: str = "info") -> None:
|
114
|
-
"""Switch verbose mode and configure logger and litellm verbosity."""
|
115
|
-
if log_level == "debug":
|
116
|
-
configure_logger("DEBUG")
|
117
|
-
else:
|
118
|
-
configure_logger(log_level)
|
119
|
-
|
120
|
-
set_litellm_verbose(verbose_mode)
|
121
|
-
|
122
|
-
|
123
|
-
def get_task_from_file(source: str):
|
124
|
-
"""Get task content from specified file path or URL."""
|
125
|
-
try:
|
126
|
-
# Check if source is a URL
|
127
|
-
if source.startswith(('http://', 'https://')):
|
128
|
-
import requests
|
129
|
-
response = requests.get(source, timeout=10)
|
130
|
-
response.raise_for_status() # Raise an exception for bad status codes
|
131
|
-
return response.text.strip()
|
132
|
-
|
133
|
-
# If not a URL, treat as a local file path
|
134
|
-
with open(source, encoding="utf-8") as f:
|
135
|
-
return f.read().strip()
|
136
|
-
except FileNotFoundError:
|
137
|
-
raise FileNotFoundError(f"Error: File '{source}' not found.")
|
138
|
-
except PermissionError:
|
139
|
-
raise PermissionError(f"Error: Permission denied when reading '{source}'.")
|
140
|
-
except requests.exceptions.RequestException as e:
|
141
|
-
raise Exception(f"Error retrieving URL content: {e}")
|
142
|
-
except Exception as e:
|
143
|
-
raise Exception(f"Unexpected error: {e}")
|
144
|
-
|
145
|
-
|
146
|
-
# Spinner control
|
147
|
-
spinner_lock = Lock()
|
148
|
-
current_spinner = None
|
149
|
-
|
150
|
-
def start_spinner(console: Console) -> None:
|
151
|
-
"""Start the thinking spinner."""
|
152
|
-
global current_spinner
|
153
|
-
with spinner_lock:
|
154
|
-
if current_spinner is None:
|
155
|
-
current_spinner = console.status("[yellow]Thinking...", spinner="dots")
|
156
|
-
current_spinner.start()
|
157
|
-
|
158
|
-
def stop_spinner(console: Console) -> None:
|
159
|
-
"""Stop the thinking spinner."""
|
160
|
-
global current_spinner
|
161
|
-
with spinner_lock:
|
162
|
-
if current_spinner is not None:
|
163
|
-
current_spinner.stop()
|
164
|
-
current_spinner = None
|
165
|
-
|
166
|
-
|
167
|
-
def display_welcome_message(
|
168
|
-
console: Console,
|
169
|
-
model_name: str,
|
170
|
-
vision_model_name: str | None,
|
171
|
-
max_iterations: int = 50,
|
172
|
-
compact_every_n_iteration: int | None = None,
|
173
|
-
max_tokens_working_memory: int | None = None,
|
174
|
-
mode: str = "basic"
|
175
|
-
) -> None:
|
176
|
-
"""Display the welcome message and instructions."""
|
177
|
-
version = get_version()
|
178
|
-
console.print(
|
179
|
-
Panel.fit(
|
180
|
-
f"[bold cyan]🌟 Welcome to QuantaLogic AI Assistant v{version} ! 🌟[/bold cyan]\n\n"
|
181
|
-
"[green]🎯 How to Use:[/green]\n\n"
|
182
|
-
"1. [bold]Describe your task[/bold]: Tell the AI what you need help with.\n"
|
183
|
-
"2. [bold]Submit your task[/bold]: Press [bold]Enter[/bold] twice to send your request.\n\n"
|
184
|
-
"3. [bold]Exit the app[/bold]: Leave the input blank and press [bold]Enter[/bold] twice to close the assistant.\n\n"
|
185
|
-
f"[yellow] 🤖 System Info:[/yellow]\n\n"
|
186
|
-
"\n"
|
187
|
-
f"- Model: {model_name}\n"
|
188
|
-
f"- Vision Model: {vision_model_name}\n"
|
189
|
-
f"- Mode: {mode}\n"
|
190
|
-
f"- Max Iterations: {max_iterations}\n"
|
191
|
-
f"- Memory Compact Frequency: {compact_every_n_iteration or 'Default (Max Iterations)'}\n"
|
192
|
-
f"- Max Working Memory Tokens: {max_tokens_working_memory or 'Default'}\n\n"
|
193
|
-
"[bold magenta]💡 Pro Tips:[/bold magenta]\n\n"
|
194
|
-
"- Be as specific as possible in your task description to get the best results!\n"
|
195
|
-
"- Use clear and concise language when describing your task\n"
|
196
|
-
"- For coding tasks, include relevant context and requirements\n"
|
197
|
-
"- The coding agent mode can handle complex tasks - don't hesitate to ask challenging questions!",
|
198
|
-
title="[bold]Instructions[/bold]",
|
199
|
-
border_style="blue",
|
200
|
-
)
|
201
|
-
)
|
202
|
-
|
203
|
-
|
204
29
|
@click.group(invoke_without_command=True)
|
205
30
|
@click.option(
|
206
31
|
"--compact-every-n-iteration",
|
207
32
|
type=int,
|
208
33
|
default=None,
|
209
|
-
help="Set the frequency of memory compaction for the agent (default: max_iterations)."
|
34
|
+
help="Set the frequency of memory compaction for the agent (default: max_iterations).",
|
210
35
|
)
|
211
36
|
@click.option("--version", is_flag=True, help="Show version information.")
|
212
37
|
@click.option(
|
@@ -237,7 +62,7 @@ def display_welcome_message(
|
|
237
62
|
"--max-tokens-working-memory",
|
238
63
|
type=int,
|
239
64
|
default=None,
|
240
|
-
help="Set the maximum number of tokens allowed in the working memory."
|
65
|
+
help="Set the maximum number of tokens allowed in the working memory.",
|
241
66
|
)
|
242
67
|
@click.pass_context
|
243
68
|
def cli(
|
@@ -255,8 +80,12 @@ def cli(
|
|
255
80
|
"""QuantaLogic AI Assistant - A powerful AI tool for various tasks."""
|
256
81
|
if version:
|
257
82
|
console = Console()
|
258
|
-
|
259
|
-
|
83
|
+
current_version = get_version()
|
84
|
+
console.print(
|
85
|
+
Panel(f"QuantaLogic Version: [bold green]{current_version}[/bold green]", title="Version Information")
|
86
|
+
)
|
87
|
+
ctx.exit()
|
88
|
+
|
260
89
|
if ctx.invoked_subcommand is None:
|
261
90
|
ctx.invoke(
|
262
91
|
task,
|
@@ -301,13 +130,13 @@ def cli(
|
|
301
130
|
"--compact-every-n-iteration",
|
302
131
|
type=int,
|
303
132
|
default=None,
|
304
|
-
help="Set the frequency of memory compaction for the agent (default: max_iterations)."
|
133
|
+
help="Set the frequency of memory compaction for the agent (default: max_iterations).",
|
305
134
|
)
|
306
135
|
@click.option(
|
307
136
|
"--max-tokens-working-memory",
|
308
137
|
type=int,
|
309
138
|
default=None,
|
310
|
-
help="Set the maximum number of tokens allowed in the working memory."
|
139
|
+
help="Set the maximum number of tokens allowed in the working memory.",
|
311
140
|
)
|
312
141
|
@click.option(
|
313
142
|
"--no-stream",
|
@@ -328,122 +157,23 @@ def task(
|
|
328
157
|
max_tokens_working_memory: int | None,
|
329
158
|
no_stream: bool,
|
330
159
|
) -> None:
|
331
|
-
"""Execute a task with the QuantaLogic AI Assistant."""
|
332
160
|
console = Console()
|
333
|
-
switch_verbose(verbose, log)
|
334
161
|
|
335
162
|
try:
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
max_tokens_working_memory=max_tokens_working_memory,
|
350
|
-
mode=mode
|
351
|
-
)
|
352
|
-
check_new_version()
|
353
|
-
logger.debug("Waiting for user input...")
|
354
|
-
task_content = get_multiline_input(console).strip()
|
355
|
-
logger.debug(f"User input received. Task content: {task_content}")
|
356
|
-
if not task_content:
|
357
|
-
logger.info("No task provided. Exiting...")
|
358
|
-
console.print("[yellow]No task provided. Exiting...[/yellow]")
|
359
|
-
sys.exit(2)
|
360
|
-
|
361
|
-
console.print(
|
362
|
-
Panel.fit(
|
363
|
-
f"[bold]Task to be submitted:[/bold]\n{task_content}",
|
364
|
-
title="[bold]Task Preview[/bold]",
|
365
|
-
border_style="blue",
|
366
|
-
)
|
367
|
-
)
|
368
|
-
if not Confirm.ask("[bold]Are you sure you want to submit this task?[/bold]"):
|
369
|
-
console.print("[yellow]Task submission cancelled. Exiting...[/yellow]")
|
370
|
-
sys.exit(0)
|
371
|
-
|
372
|
-
console.print(
|
373
|
-
Panel.fit(
|
374
|
-
"[green]✓ Task successfully submitted! Processing...[/green]",
|
375
|
-
title="[bold]Status[/bold]",
|
376
|
-
border_style="green",
|
377
|
-
)
|
378
|
-
)
|
379
|
-
|
380
|
-
logger.debug(
|
381
|
-
f"Creating agent for mode: {mode} with model: {model_name}, vision model: {vision_model_name}, no_stream: {no_stream}"
|
382
|
-
)
|
383
|
-
agent = create_agent_for_mode(mode, model_name, vision_model_name=vision_model_name, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
|
384
|
-
logger.debug(
|
385
|
-
f"Created agent for mode: {mode} with model: {model_name}, vision model: {vision_model_name}, no_stream: {no_stream}"
|
386
|
-
)
|
387
|
-
|
388
|
-
events = [
|
389
|
-
"task_start",
|
390
|
-
"task_think_start",
|
391
|
-
"task_think_end",
|
392
|
-
"task_complete",
|
393
|
-
"tool_execution_start",
|
394
|
-
"tool_execution_end",
|
395
|
-
"error_max_iterations_reached",
|
396
|
-
"memory_full",
|
397
|
-
"memory_compacted",
|
398
|
-
"memory_summary",
|
399
|
-
]
|
400
|
-
# Add spinner control to event handlers
|
401
|
-
def handle_task_think_start(*args, **kwargs):
|
402
|
-
start_spinner(console)
|
403
|
-
|
404
|
-
def handle_task_think_end(*args, **kwargs):
|
405
|
-
stop_spinner(console)
|
406
|
-
|
407
|
-
def handle_stream_chunk(event: str, data: str) -> None:
|
408
|
-
if current_spinner:
|
409
|
-
stop_spinner(console)
|
410
|
-
if data is not None:
|
411
|
-
console.print(data, end="", markup=False)
|
412
|
-
|
413
|
-
agent.event_emitter.on(
|
414
|
-
event=events,
|
415
|
-
listener=console_print_events,
|
416
|
-
)
|
417
|
-
|
418
|
-
agent.event_emitter.on(
|
419
|
-
event="task_think_start",
|
420
|
-
listener=handle_task_think_start,
|
421
|
-
)
|
422
|
-
|
423
|
-
agent.event_emitter.on(
|
424
|
-
event="task_think_end",
|
425
|
-
listener=handle_task_think_end,
|
426
|
-
)
|
427
|
-
|
428
|
-
agent.event_emitter.on(
|
429
|
-
event="stream_chunk",
|
430
|
-
listener=handle_stream_chunk,
|
431
|
-
)
|
432
|
-
|
433
|
-
logger.debug("Registered event handlers for agent events with events: {events}")
|
434
|
-
|
435
|
-
logger.debug(f"Solving task with agent: {task_content}")
|
436
|
-
if max_iterations < 1:
|
437
|
-
raise ValueError("max_iterations must be greater than 0")
|
438
|
-
result = agent.solve_task(task=task_content, max_iterations=max_iterations, streaming=not no_stream)
|
439
|
-
logger.debug(f"Task solved with result: {result} using {max_iterations} iterations")
|
440
|
-
|
441
|
-
console.print(
|
442
|
-
Panel.fit(
|
443
|
-
f"[bold]Task Result:[/bold]\n{result}", title="[bold]Execution Output[/bold]", border_style="green"
|
444
|
-
)
|
163
|
+
task_runner(
|
164
|
+
console,
|
165
|
+
file,
|
166
|
+
model_name,
|
167
|
+
verbose,
|
168
|
+
mode,
|
169
|
+
log,
|
170
|
+
vision_model_name,
|
171
|
+
task,
|
172
|
+
max_iterations,
|
173
|
+
compact_every_n_iteration,
|
174
|
+
max_tokens_working_memory,
|
175
|
+
no_stream,
|
445
176
|
)
|
446
|
-
|
447
177
|
except Exception as e:
|
448
178
|
console.print(f"[red]{str(e)}[/red]")
|
449
179
|
logger.error(f"Error in task execution: {e}", exc_info=True)
|
@@ -0,0 +1,38 @@
|
|
1
|
+
"""Module for reading task content from files or URLs."""
|
2
|
+
|
3
|
+
import requests
|
4
|
+
|
5
|
+
|
6
|
+
def get_task_from_file(source: str) -> str:
|
7
|
+
"""Get task content from specified file path or URL.
|
8
|
+
|
9
|
+
Args:
|
10
|
+
source (str): File path or URL to read task content from
|
11
|
+
|
12
|
+
Returns:
|
13
|
+
str: Stripped task content from the file or URL
|
14
|
+
|
15
|
+
Raises:
|
16
|
+
FileNotFoundError: If the local file does not exist
|
17
|
+
PermissionError: If there are permission issues reading the file
|
18
|
+
requests.exceptions.RequestException: If there are issues retrieving URL content
|
19
|
+
Exception: For any other unexpected errors
|
20
|
+
"""
|
21
|
+
try:
|
22
|
+
# Check if source is a URL
|
23
|
+
if source.startswith(('http://', 'https://')):
|
24
|
+
response = requests.get(source, timeout=10)
|
25
|
+
response.raise_for_status() # Raise an exception for bad status codes
|
26
|
+
return response.text.strip()
|
27
|
+
|
28
|
+
# If not a URL, treat as a local file path
|
29
|
+
with open(source, encoding="utf-8") as f:
|
30
|
+
return f.read().strip()
|
31
|
+
except FileNotFoundError:
|
32
|
+
raise FileNotFoundError(f"Error: File '{source}' not found.")
|
33
|
+
except PermissionError:
|
34
|
+
raise PermissionError(f"Error: Permission denied when reading '{source}'.")
|
35
|
+
except requests.exceptions.RequestException as e:
|
36
|
+
raise Exception(f"Error retrieving URL content: {e}")
|
37
|
+
except Exception as e:
|
38
|
+
raise Exception(f"Unexpected error: {e}")
|
@@ -0,0 +1,284 @@
|
|
1
|
+
"""Task runner module for executing tasks with the QuantaLogic AI Assistant."""
|
2
|
+
|
3
|
+
import sys
|
4
|
+
from threading import Lock
|
5
|
+
from typing import Optional
|
6
|
+
|
7
|
+
from loguru import logger
|
8
|
+
from rich.console import Console
|
9
|
+
from rich.panel import Panel
|
10
|
+
from rich.prompt import Confirm
|
11
|
+
|
12
|
+
from quantalogic.agent_factory import create_agent_for_mode
|
13
|
+
from quantalogic.console_print_events import console_print_events
|
14
|
+
from quantalogic.interactive_text_editor import get_multiline_input
|
15
|
+
from quantalogic.task_file_reader import get_task_from_file
|
16
|
+
from quantalogic.version_check import check_new_version, get_version
|
17
|
+
from quantalogic.welcome_message import display_welcome_message
|
18
|
+
|
19
|
+
# Spinner control
|
20
|
+
spinner_lock = Lock()
|
21
|
+
current_spinner = None
|
22
|
+
|
23
|
+
|
24
|
+
def configure_logger(log_level: str) -> None:
|
25
|
+
"""Configure the logger with the specified log level and format."""
|
26
|
+
logger.add(
|
27
|
+
sys.stderr,
|
28
|
+
level=log_level.upper(),
|
29
|
+
format="<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <8}</level> | <cyan>{process}</cyan> | <magenta>{file}:{line}</magenta> | {message}",
|
30
|
+
)
|
31
|
+
logger.debug(f"Log level set to: {log_level}")
|
32
|
+
|
33
|
+
|
34
|
+
def set_litellm_verbose(verbose_mode: bool) -> None:
|
35
|
+
"""Set the verbosity of the litellm library."""
|
36
|
+
import litellm
|
37
|
+
|
38
|
+
litellm.set_verbose = verbose_mode
|
39
|
+
|
40
|
+
|
41
|
+
def switch_verbose(verbose_mode: bool, log_level: str = "info") -> None:
|
42
|
+
"""Switch verbose mode and configure logger and litellm verbosity."""
|
43
|
+
if log_level == "debug":
|
44
|
+
configure_logger("DEBUG")
|
45
|
+
else:
|
46
|
+
configure_logger(log_level)
|
47
|
+
|
48
|
+
set_litellm_verbose(verbose_mode)
|
49
|
+
|
50
|
+
|
51
|
+
def start_spinner(console: Console) -> None:
|
52
|
+
"""Start the thinking spinner."""
|
53
|
+
global current_spinner
|
54
|
+
with spinner_lock:
|
55
|
+
if current_spinner is None:
|
56
|
+
current_spinner = console.status("[yellow]Thinking...", spinner="dots")
|
57
|
+
current_spinner.start()
|
58
|
+
|
59
|
+
|
60
|
+
def stop_spinner(console: Console) -> None:
|
61
|
+
"""Stop the thinking spinner."""
|
62
|
+
global current_spinner
|
63
|
+
with spinner_lock:
|
64
|
+
if current_spinner is not None:
|
65
|
+
current_spinner.stop()
|
66
|
+
current_spinner = None
|
67
|
+
|
68
|
+
|
69
|
+
def interactive_task_runner(
|
70
|
+
agent,
|
71
|
+
console: Console,
|
72
|
+
max_iterations: int,
|
73
|
+
no_stream: bool,
|
74
|
+
) -> None:
|
75
|
+
"""Run tasks interactively, asking the user if they want to continue after each task.
|
76
|
+
|
77
|
+
Args:
|
78
|
+
agent: The agent instance to use for solving tasks
|
79
|
+
console: Rich console instance for output
|
80
|
+
max_iterations: Maximum number of iterations per task
|
81
|
+
no_stream: Disable streaming output
|
82
|
+
"""
|
83
|
+
while True:
|
84
|
+
logger.debug("Waiting for user input...")
|
85
|
+
task_content = get_multiline_input(console).strip()
|
86
|
+
|
87
|
+
if not task_content:
|
88
|
+
logger.info("No task provided. Exiting...")
|
89
|
+
console.print("[yellow]No task provided. Exiting...[/yellow]")
|
90
|
+
break
|
91
|
+
|
92
|
+
# Handle commands with single return
|
93
|
+
if task_content.startswith('/'):
|
94
|
+
command = task_content.lower()
|
95
|
+
if command == '/clear':
|
96
|
+
logger.info("Clearing agent memory...")
|
97
|
+
console.print("[yellow]Clearing agent memory...[/yellow]")
|
98
|
+
agent.clear_memory()
|
99
|
+
console.print("[green]Memory cleared successfully![/green]")
|
100
|
+
continue
|
101
|
+
else:
|
102
|
+
console.print(f"[red]Unknown command: {command}[/red]")
|
103
|
+
continue
|
104
|
+
|
105
|
+
# For non-commands, ask for confirmation
|
106
|
+
console.print(
|
107
|
+
Panel.fit(
|
108
|
+
f"[bold]Task to be submitted:[/bold]\n{task_content}",
|
109
|
+
title="[bold]Task Preview[/bold]",
|
110
|
+
border_style="blue",
|
111
|
+
)
|
112
|
+
)
|
113
|
+
|
114
|
+
if not Confirm.ask("[bold]Are you sure you want to submit this task?[/bold]"):
|
115
|
+
console.print("[yellow]Task submission cancelled.[/yellow]")
|
116
|
+
if not Confirm.ask("[bold]Would you like to ask another question?[/bold]"):
|
117
|
+
break
|
118
|
+
continue
|
119
|
+
|
120
|
+
console.print(
|
121
|
+
Panel.fit(
|
122
|
+
"[green]✓ Task successfully submitted! Processing...[/green]",
|
123
|
+
title="[bold]Status[/bold]",
|
124
|
+
border_style="green",
|
125
|
+
)
|
126
|
+
)
|
127
|
+
|
128
|
+
logger.debug(f"Solving task with agent: {task_content}")
|
129
|
+
result = agent.solve_task(task=task_content, max_iterations=max_iterations, streaming=not no_stream,clear_memory=False)
|
130
|
+
logger.debug(f"Task solved with result: {result} using {max_iterations} iterations")
|
131
|
+
|
132
|
+
console.print(
|
133
|
+
Panel.fit(
|
134
|
+
f"[bold]Task Result:[/bold]\n{result}",
|
135
|
+
title="[bold]Execution Output[/bold]",
|
136
|
+
border_style="green"
|
137
|
+
)
|
138
|
+
)
|
139
|
+
|
140
|
+
if not Confirm.ask("[bold]Would you like to ask another question?[/bold]"):
|
141
|
+
break
|
142
|
+
|
143
|
+
|
144
|
+
def task_runner(
|
145
|
+
console: Console,
|
146
|
+
file: Optional[str],
|
147
|
+
model_name: str,
|
148
|
+
verbose: bool,
|
149
|
+
mode: str,
|
150
|
+
log: str,
|
151
|
+
vision_model_name: str | None,
|
152
|
+
task: Optional[str],
|
153
|
+
max_iterations: int,
|
154
|
+
compact_every_n_iteration: int | None,
|
155
|
+
max_tokens_working_memory: int | None,
|
156
|
+
no_stream: bool,
|
157
|
+
) -> None:
|
158
|
+
"""Execute a task with the QuantaLogic AI Assistant.
|
159
|
+
|
160
|
+
Args:
|
161
|
+
console: Rich console instance for output
|
162
|
+
file: Optional path to task file
|
163
|
+
model_name: Name of the model to use
|
164
|
+
verbose: Enable verbose logging
|
165
|
+
mode: Operation mode
|
166
|
+
log: Log level
|
167
|
+
vision_model_name: Optional vision model name
|
168
|
+
task: Optional task string
|
169
|
+
max_iterations: Maximum number of iterations
|
170
|
+
compact_every_n_iteration: Optional number of iterations before memory compaction
|
171
|
+
max_tokens_working_memory: Optional maximum tokens for working memory
|
172
|
+
no_stream: Disable streaming output
|
173
|
+
"""
|
174
|
+
switch_verbose(verbose, log)
|
175
|
+
|
176
|
+
# Create the agent instance with the specified configuration
|
177
|
+
agent = create_agent_for_mode(
|
178
|
+
mode=mode,
|
179
|
+
model_name=model_name,
|
180
|
+
vision_model_name=vision_model_name,
|
181
|
+
compact_every_n_iteration=compact_every_n_iteration,
|
182
|
+
max_tokens_working_memory=max_tokens_working_memory
|
183
|
+
)
|
184
|
+
|
185
|
+
if file:
|
186
|
+
task_content = get_task_from_file(file)
|
187
|
+
# Execute single task from file
|
188
|
+
logger.debug(f"Solving task with agent: {task_content}")
|
189
|
+
if max_iterations < 1:
|
190
|
+
raise ValueError("max_iterations must be greater than 0")
|
191
|
+
result = agent.solve_task(task=task_content, max_iterations=max_iterations, streaming=not no_stream)
|
192
|
+
logger.debug(f"Task solved with result: {result} using {max_iterations} iterations")
|
193
|
+
|
194
|
+
console.print(
|
195
|
+
Panel.fit(
|
196
|
+
f"[bold]Task Result:[/bold]\n{result}",
|
197
|
+
title="[bold]Execution Output[/bold]",
|
198
|
+
border_style="green"
|
199
|
+
)
|
200
|
+
)
|
201
|
+
else:
|
202
|
+
if task:
|
203
|
+
check_new_version()
|
204
|
+
task_content = task
|
205
|
+
# Execute single task from command line
|
206
|
+
logger.debug(f"Solving task with agent: {task_content}")
|
207
|
+
if max_iterations < 1:
|
208
|
+
raise ValueError("max_iterations must be greater than 0")
|
209
|
+
result = agent.solve_task(task=task_content, max_iterations=max_iterations, streaming=not no_stream)
|
210
|
+
logger.debug(f"Task solved with result: {result} using {max_iterations} iterations")
|
211
|
+
|
212
|
+
console.print(
|
213
|
+
Panel.fit(
|
214
|
+
f"[bold]Task Result:[/bold]\n{result}",
|
215
|
+
title="[bold]Execution Output[/bold]",
|
216
|
+
border_style="green"
|
217
|
+
)
|
218
|
+
)
|
219
|
+
else:
|
220
|
+
# Interactive mode
|
221
|
+
display_welcome_message(
|
222
|
+
console=console,
|
223
|
+
model_name=model_name,
|
224
|
+
version=get_version(),
|
225
|
+
vision_model_name=vision_model_name,
|
226
|
+
max_iterations=max_iterations,
|
227
|
+
compact_every_n_iteration=compact_every_n_iteration,
|
228
|
+
max_tokens_working_memory=max_tokens_working_memory,
|
229
|
+
mode=mode,
|
230
|
+
)
|
231
|
+
check_new_version()
|
232
|
+
logger.debug(
|
233
|
+
f"Created agent for mode: {mode} with model: {model_name}, vision model: {vision_model_name}, no_stream: {no_stream}"
|
234
|
+
)
|
235
|
+
|
236
|
+
events = [
|
237
|
+
"task_start",
|
238
|
+
"task_think_start",
|
239
|
+
"task_think_end",
|
240
|
+
"task_complete",
|
241
|
+
"tool_execution_start",
|
242
|
+
"tool_execution_end",
|
243
|
+
"error_max_iterations_reached",
|
244
|
+
"memory_full",
|
245
|
+
"memory_compacted",
|
246
|
+
"memory_summary",
|
247
|
+
]
|
248
|
+
|
249
|
+
# Add spinner control to event handlers
|
250
|
+
def handle_task_think_start(*args, **kwargs):
|
251
|
+
start_spinner(console)
|
252
|
+
|
253
|
+
def handle_task_think_end(*args, **kwargs):
|
254
|
+
stop_spinner(console)
|
255
|
+
|
256
|
+
def handle_stream_chunk(event: str, data: str) -> None:
|
257
|
+
if current_spinner:
|
258
|
+
stop_spinner(console)
|
259
|
+
if data is not None:
|
260
|
+
console.print(data, end="", markup=False)
|
261
|
+
|
262
|
+
agent.event_emitter.on(
|
263
|
+
event=events,
|
264
|
+
listener=console_print_events,
|
265
|
+
)
|
266
|
+
|
267
|
+
agent.event_emitter.on(
|
268
|
+
event="task_think_start",
|
269
|
+
listener=handle_task_think_start,
|
270
|
+
)
|
271
|
+
|
272
|
+
agent.event_emitter.on(
|
273
|
+
event="task_think_end",
|
274
|
+
listener=handle_task_think_end,
|
275
|
+
)
|
276
|
+
|
277
|
+
agent.event_emitter.on(
|
278
|
+
event="stream_chunk",
|
279
|
+
listener=handle_stream_chunk,
|
280
|
+
)
|
281
|
+
|
282
|
+
logger.debug("Registered event handlers for agent events with events: {events}")
|
283
|
+
|
284
|
+
interactive_task_runner(agent, console, max_iterations, no_stream)
|
quantalogic/tools/llm_tool.py
CHANGED
@@ -0,0 +1,41 @@
|
|
1
|
+
"""Module for checking and displaying version updates."""
|
2
|
+
|
3
|
+
import random
|
4
|
+
|
5
|
+
from rich.console import Console
|
6
|
+
from rich.panel import Panel
|
7
|
+
|
8
|
+
from quantalogic.utils.check_version import check_if_is_latest_version
|
9
|
+
from quantalogic.version import get_version
|
10
|
+
|
11
|
+
|
12
|
+
def check_new_version() -> None:
|
13
|
+
"""Randomly check for updates and display a notification if a new version is available.
|
14
|
+
|
15
|
+
This function has a 1 in 10 chance of running when called. When it runs, it checks
|
16
|
+
if there's a newer version of the package available and displays an update panel
|
17
|
+
with installation instructions if a new version is found.
|
18
|
+
"""
|
19
|
+
# Randomly check for updates (1 in 10 chance)
|
20
|
+
if random.randint(1, 10) == 1:
|
21
|
+
try:
|
22
|
+
current_version = get_version()
|
23
|
+
has_new_version, latest_version = check_if_is_latest_version()
|
24
|
+
|
25
|
+
if has_new_version:
|
26
|
+
console = Console()
|
27
|
+
console.print(
|
28
|
+
Panel.fit(
|
29
|
+
f"[yellow]⚠️ Update Available![/yellow]\n\n"
|
30
|
+
f"Current version: [bold]{current_version}[/bold]\n"
|
31
|
+
f"Latest version: [bold]{latest_version}[/bold]\n\n"
|
32
|
+
"To update, run:\n"
|
33
|
+
"[bold]pip install --upgrade quantalogic[/bold]\n"
|
34
|
+
"or if using pipx:\n"
|
35
|
+
"[bold]pipx upgrade quantalogic[/bold]",
|
36
|
+
title="[bold]Update Available[/bold]",
|
37
|
+
border_style="yellow",
|
38
|
+
)
|
39
|
+
)
|
40
|
+
except Exception:
|
41
|
+
return
|
@@ -0,0 +1,86 @@
|
|
1
|
+
"""Module for displaying welcome messages and instructions."""
|
2
|
+
|
3
|
+
from rich.console import Console
|
4
|
+
from rich.panel import Panel
|
5
|
+
|
6
|
+
|
7
|
+
def create_config_table(
|
8
|
+
mode: str,
|
9
|
+
model_name: str,
|
10
|
+
vision_model_name: str | None,
|
11
|
+
max_iterations: int,
|
12
|
+
compact_every_n_iteration: int | None,
|
13
|
+
max_tokens_working_memory: int | None,
|
14
|
+
) -> str:
|
15
|
+
"""Create a formatted string representation of the configuration table."""
|
16
|
+
return (
|
17
|
+
f"• Mode [cyan]{mode}[/cyan]\n"
|
18
|
+
f"• Language Model [cyan]{model_name}[/cyan]\n"
|
19
|
+
f"• Vision Model {vision_model_name or '[dim]Not Configured[/dim]'}\n"
|
20
|
+
f"• Max Iterations {max_iterations}\n"
|
21
|
+
f"• Memory Compaction {compact_every_n_iteration or '[dim]Default[/dim]'}\n"
|
22
|
+
f"• Max Memory Tokens {max_tokens_working_memory or '[dim]Default[/dim]'}"
|
23
|
+
)
|
24
|
+
|
25
|
+
|
26
|
+
def create_tips_section() -> str:
|
27
|
+
"""Create a formatted string representation of the tips section."""
|
28
|
+
return (
|
29
|
+
"💡 [bold cyan]Be specific[/bold cyan] in your task descriptions\n"
|
30
|
+
"💭 Use [bold cyan]clear and concise[/bold cyan] language\n"
|
31
|
+
"✨ Include [bold cyan]relevant context[/bold cyan] for coding tasks\n"
|
32
|
+
"🚀 Don't hesitate to ask [bold cyan]challenging questions[/bold cyan]!"
|
33
|
+
)
|
34
|
+
|
35
|
+
|
36
|
+
def display_welcome_message(
|
37
|
+
console: Console,
|
38
|
+
model_name: str,
|
39
|
+
version: str,
|
40
|
+
vision_model_name: str | None = None,
|
41
|
+
max_iterations: int = 50,
|
42
|
+
compact_every_n_iteration: int | None = None,
|
43
|
+
max_tokens_working_memory: int | None = None,
|
44
|
+
mode: str = "basic",
|
45
|
+
) -> None:
|
46
|
+
"""Display a welcome message and instructions for the QuantaLogic AI Assistant.
|
47
|
+
|
48
|
+
Args:
|
49
|
+
console: Rich Console instance for rendering output
|
50
|
+
model_name: Name of the language model being used
|
51
|
+
version: Version of the QuantaLogic AI Assistant
|
52
|
+
vision_model_name: Optional name of the vision model
|
53
|
+
max_iterations: Maximum number of iterations for task solving
|
54
|
+
compact_every_n_iteration: Frequency of memory compaction
|
55
|
+
max_tokens_working_memory: Maximum tokens allowed in working memory
|
56
|
+
mode: Current agent mode of operation
|
57
|
+
"""
|
58
|
+
config_section = create_config_table(
|
59
|
+
mode,
|
60
|
+
model_name,
|
61
|
+
vision_model_name,
|
62
|
+
max_iterations,
|
63
|
+
compact_every_n_iteration,
|
64
|
+
max_tokens_working_memory,
|
65
|
+
)
|
66
|
+
|
67
|
+
tips_section = create_tips_section()
|
68
|
+
|
69
|
+
welcome_content = (
|
70
|
+
"\n[bold violet]Welcome to QuantaLogic AI Assistant[/bold violet] "
|
71
|
+
f"[bold blue]v{version}[/bold blue]\n\n"
|
72
|
+
"[bold]System Configuration[/bold]\n"
|
73
|
+
f"{config_section}\n\n"
|
74
|
+
"[bold magenta]Pro Tips[/bold magenta]\n"
|
75
|
+
f"{tips_section}\n"
|
76
|
+
)
|
77
|
+
|
78
|
+
welcome_panel = Panel(
|
79
|
+
welcome_content,
|
80
|
+
border_style="blue",
|
81
|
+
title="[bold]🤖 QuantaLogic[/bold]",
|
82
|
+
subtitle="[bold cyan]Ready to assist you[/bold cyan]",
|
83
|
+
padding=(1, 2),
|
84
|
+
)
|
85
|
+
|
86
|
+
console.print(welcome_panel)
|
@@ -1,14 +1,17 @@
|
|
1
|
+
quantalogic/.DS_Store,sha256=eca-id755LHcqhOPlLg84Op_uvWhoJGFTR_i_r8G2K8,6148
|
1
2
|
quantalogic/__init__.py,sha256=kX0c_xmD9OslWnAE92YHMGuD7xZcTo8ZOF_5R64HKps,784
|
2
|
-
quantalogic/agent.py,sha256=
|
3
|
+
quantalogic/agent.py,sha256=r6MbnVaBv9tYNk8WR7YJAjXlEGgko8GSYitWzr7uNMY,31841
|
3
4
|
quantalogic/agent_config.py,sha256=9sjDnCPlAqVM45oguB_D509WSCaXZmuaVUtLcOvDlPg,7572
|
5
|
+
quantalogic/agent_factory.py,sha256=ODVGuGtugSzmSdP6jiWlT8WyC5onANc6BIs83FC90Bg,3782
|
4
6
|
quantalogic/coding_agent.py,sha256=UJ0fdKKA8XSB2py0NW4-e-orozo78ZAprXWuortYBiA,4935
|
5
7
|
quantalogic/console_print_events.py,sha256=KB-DGi52As8M96eUs1N_vgNqKIFtqv_H8NTOd3TLTgQ,2163
|
6
8
|
quantalogic/console_print_token.py,sha256=qSU-3kmoZk4T5-1ybrEBi8tIXDPcz7eyWKhGh3E8uIg,395
|
7
9
|
quantalogic/docs_cli.py,sha256=3giVbUpespB9ZdTSJ955A3BhcOaBl5Lwsn1AVy9XAeY,1663
|
8
10
|
quantalogic/event_emitter.py,sha256=jqot2g4JRXc88K6PW837Oqxbf7shZfO-xdPaUWmzupk,7901
|
9
|
-
quantalogic/generative_model.py,sha256=
|
10
|
-
quantalogic/
|
11
|
-
quantalogic/
|
11
|
+
quantalogic/generative_model.py,sha256=az_kqWdEBQRROvvYZu6d68JQ4nzDbQG23gADeHLMypc,16761
|
12
|
+
quantalogic/get_model_info.py,sha256=YCBZ8qynlq_iLUc--xBrQxacFZL9RHZPv5cdVwjukcw,602
|
13
|
+
quantalogic/interactive_text_editor.py,sha256=_pNPnUG3Y3_YX0R9-kx0vcaUWU0AAC350jpJ5UjrTuE,6986
|
14
|
+
quantalogic/main.py,sha256=YYP0DSnzlLpbyQPlQxbq-ZKIoA5ezv0kjy5LsQlT6bI,5227
|
12
15
|
quantalogic/memory.py,sha256=zbtRuM05jaS2lJll-92dt5JfYVLERnF_m_9xqp2x-k0,6304
|
13
16
|
quantalogic/model_names.py,sha256=UZlz25zG9B2dpfwdw_e1Gw5qFsKQ7iME9FJh9Ts4u6s,938
|
14
17
|
quantalogic/prompts.py,sha256=CW4CRgW1hTpXeWdeJNbPaRPUeUm-xKuGHJrT8mOtvkw,3602
|
@@ -21,6 +24,8 @@ quantalogic/server/state.py,sha256=TwtL0BTp_LT-fynF1IR4k8WVXuxXWtSv3NgWG9fuUME,7
|
|
21
24
|
quantalogic/server/static/js/event_visualizer.js,sha256=eFkkWyNZw3zOZlF18kxbfsWql8a2C13qBFEOAPzrj88,19646
|
22
25
|
quantalogic/server/static/js/quantalogic.js,sha256=x7TrlZGR1Y0WLK2DWl1xY847BhEWMPnL0Ua7KtOldUc,22311
|
23
26
|
quantalogic/server/templates/index.html,sha256=nDnXJoQEm1vXbhXtgaYk0G5VXj0wwzE6KrqEDhHFpj4,7773
|
27
|
+
quantalogic/task_file_reader.py,sha256=AMIJoeVY9Hhu0dBJ-C5EyaOFsXLkhn2oBhVs-WTnnLk,1460
|
28
|
+
quantalogic/task_runner.py,sha256=FtxfZs2dxdsSZoiW92K3dpfegFe0dyKx9ZP5CCyEAzo,9965
|
24
29
|
quantalogic/tool_manager.py,sha256=JAC5E5kLfYzYJx0QRIWbG14q1hlkOcwJFBG7HE8twpU,2425
|
25
30
|
quantalogic/tools/__init__.py,sha256=GcYjE1r6aNQ_JZ8uwk0yaCCCMBz6zrD_PjkRtZiUhSk,1923
|
26
31
|
quantalogic/tools/agent_tool.py,sha256=MXCXxWHRch7VK4UWhtRP1jeI8Np9Ne2CUGo8vm1oZiM,3064
|
@@ -43,7 +48,7 @@ quantalogic/tools/language_handlers/rust_handler.py,sha256=t_AqKVa3KVk6SVkq_UjUU
|
|
43
48
|
quantalogic/tools/language_handlers/scala_handler.py,sha256=wr-cWOIFOc0UYwODmEtT6rV63Qf1NyNB_BLo23GLrvk,1281
|
44
49
|
quantalogic/tools/language_handlers/typescript_handler.py,sha256=L4vuJMYxKO3_83dQhdwZ9fogauIV7rwoicRT0xLGfkQ,1738
|
45
50
|
quantalogic/tools/list_directory_tool.py,sha256=8Hy38DelSh-mRqS_uDLpeBYoHLtEy5ji77xI-TJu3Ms,4176
|
46
|
-
quantalogic/tools/llm_tool.py,sha256=
|
51
|
+
quantalogic/tools/llm_tool.py,sha256=CFTvr-RTFiuGWlOLtvw4zv93s_CLUHuHfNmvK6QpQiQ,7014
|
47
52
|
quantalogic/tools/llm_vision_tool.py,sha256=eVDIrANxxZCHxYp9xaAN8hLdFhlYm7bUu2tX9-1xUbI,5496
|
48
53
|
quantalogic/tools/markitdown_tool.py,sha256=lpbJBLx43_x2DjiZAV1HSidkHeqkkV0KvgeLG2fphK4,4339
|
49
54
|
quantalogic/tools/nodejs_tool.py,sha256=zdnE0VFj_5786uR2L0o-SKR0Gk8L-U7rdj7xGHJYIq0,19905
|
@@ -71,10 +76,12 @@ quantalogic/utils/git_ls.py,sha256=_k6QIQtc0aM1bsG340jBp4VrdevbcH8Pg2CV4r9oHok,5
|
|
71
76
|
quantalogic/utils/read_file.py,sha256=tSRVHk8dIP4nNLL89v5kRki4hOTjVyjbmuEb2zwvwCY,2077
|
72
77
|
quantalogic/utils/read_http_text_content.py,sha256=n3IayT5KcqctIVVF2gOQQAMf3Ow6eenlVgfXTpLcQbw,4410
|
73
78
|
quantalogic/version.py,sha256=ea_cRutaQk5_lwlLbUUvPFuOT7Of7-gAsDl7wdveS-g,107
|
79
|
+
quantalogic/version_check.py,sha256=cttR1lR3OienGLl7NrK1Te1fhDkqSjCci7HC1vFUTSY,1627
|
80
|
+
quantalogic/welcome_message.py,sha256=IXMhem8h7srzNUwvw8G_lmEkHU8PFfote021E_BXmVk,3039
|
74
81
|
quantalogic/xml_parser.py,sha256=uMLQNHTRCg116FwcjRoquZmSwVtE4LEH-6V2E3RD-dA,11466
|
75
82
|
quantalogic/xml_tool_parser.py,sha256=Vz4LEgDbelJynD1siLOVkJ3gLlfHsUk65_gCwbYJyGc,3784
|
76
|
-
quantalogic-0.2.
|
77
|
-
quantalogic-0.2.
|
78
|
-
quantalogic-0.2.
|
79
|
-
quantalogic-0.2.
|
80
|
-
quantalogic-0.2.
|
83
|
+
quantalogic-0.2.26.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
84
|
+
quantalogic-0.2.26.dist-info/METADATA,sha256=jb0PIHKkAGlfW5x3GPQvcbzJNbG-79nxJm-t8zCN7hA,20216
|
85
|
+
quantalogic-0.2.26.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
86
|
+
quantalogic-0.2.26.dist-info/entry_points.txt,sha256=h74O_Q3qBRCrDR99qvwB4BpBGzASPUIjCfxHq6Qnups,183
|
87
|
+
quantalogic-0.2.26.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|