quantalogic 0.2.23__py3-none-any.whl → 0.2.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
quantalogic/agent.py CHANGED
@@ -140,7 +140,11 @@ class Agent(BaseModel):
140
140
  logger.error(f"Failed to initialize agent: {str(e)}")
141
141
  raise
142
142
 
143
- def solve_task(self, task: str, max_iterations: int = 30, streaming: bool = False) -> str:
143
+ def clear_memory(self):
144
+ """Clear the memory and reset the session."""
145
+ self._reset_session(clear_memory=True)
146
+
147
+ def solve_task(self, task: str, max_iterations: int = 30, streaming: bool = False, clear_memory: bool = True) -> str:
144
148
  """Solve the given task using the ReAct framework.
145
149
 
146
150
  Args:
@@ -148,12 +152,13 @@ class Agent(BaseModel):
148
152
  max_iterations (int, optional): Maximum number of iterations to attempt solving the task.
149
153
  Defaults to 30 to prevent infinite loops and ensure timely task completion.
150
154
  streaming (bool, optional): Whether to use streaming mode for generating responses.
155
+ clear_memory (bool, optional): Whether to clear the memory before solving the task.
151
156
 
152
157
  Returns:
153
158
  str: The final response after task completion.
154
159
  """
155
160
  logger.debug(f"Solving task... {task}")
156
- self._reset_session(task_to_solve=task, max_iterations=max_iterations)
161
+ self._reset_session(task_to_solve=task, max_iterations=max_iterations,clear_memory=clear_memory)
157
162
 
158
163
  # Generate task summary
159
164
  self.task_to_solve_summary = self._generate_task_summary(task)
@@ -263,13 +268,15 @@ class Agent(BaseModel):
263
268
 
264
269
  return answer
265
270
 
266
- def _reset_session(self, task_to_solve: str = "", max_iterations: int = 30):
271
+ def _reset_session(self, task_to_solve: str = "", max_iterations: int = 30,clear_memory: bool = True):
267
272
  """Reset the agent's session."""
268
273
  logger.debug("Resetting session...")
269
274
  self.task_to_solve = task_to_solve
270
- self.memory.reset()
271
- self.variable_store.reset()
272
- self.total_tokens = 0
275
+ if clear_memory:
276
+ logger.debug("Clearing memory...")
277
+ self.memory.reset()
278
+ self.variable_store.reset()
279
+ self.total_tokens = 0
273
280
  self.current_iteration = 0
274
281
  self.max_output_tokens = self.model.get_model_max_output_tokens() or DEFAULT_MAX_OUTPUT_TOKENS
275
282
  self.max_input_tokens = self.model.get_model_max_input_tokens() or DEFAULT_MAX_INPUT_TOKENS
@@ -0,0 +1,106 @@
1
+ """Agent factory module for creating different types of agents."""
2
+
3
+ from typing import Optional
4
+
5
+ from loguru import logger
6
+
7
+ from quantalogic.agent import Agent
8
+ from quantalogic.agent_config import (
9
+ create_basic_agent,
10
+ create_full_agent,
11
+ create_interpreter_agent,
12
+ )
13
+ from quantalogic.coding_agent import create_coding_agent
14
+ from quantalogic.search_agent import create_search_agent
15
+
16
+
17
+ def create_agent_for_mode(
18
+ mode: str,
19
+ model_name: str,
20
+ vision_model_name: Optional[str],
21
+ no_stream: bool = False,
22
+ compact_every_n_iteration: Optional[int] = None,
23
+ max_tokens_working_memory: Optional[int] = None
24
+ ) -> Agent:
25
+ """Create an agent based on the specified mode.
26
+
27
+ Args:
28
+ mode: The mode of operation for the agent
29
+ model_name: The name of the language model to use
30
+ vision_model_name: Optional name of the vision model
31
+ no_stream: Whether to disable streaming mode
32
+ compact_every_n_iteration: Optional number of iterations before compacting memory
33
+ max_tokens_working_memory: Optional maximum tokens for working memory
34
+
35
+ Returns:
36
+ Agent: The created agent instance
37
+
38
+ Raises:
39
+ ValueError: If an unknown agent mode is specified
40
+ """
41
+ logger.debug(f"Creating agent for mode: {mode} with model: {model_name}")
42
+ logger.debug(f"Using vision model: {vision_model_name}")
43
+ logger.debug(f"Using no_stream: {no_stream}")
44
+ logger.debug(f"Using compact_every_n_iteration: {compact_every_n_iteration}")
45
+ logger.debug(f"Using max_tokens_working_memory: {max_tokens_working_memory}")
46
+
47
+ if mode == "code":
48
+ logger.debug("Creating code agent without basic mode")
49
+ return create_coding_agent(
50
+ model_name,
51
+ vision_model_name,
52
+ basic=False,
53
+ no_stream=no_stream,
54
+ compact_every_n_iteration=compact_every_n_iteration,
55
+ max_tokens_working_memory=max_tokens_working_memory
56
+ )
57
+ if mode == "code-basic":
58
+ return create_coding_agent(
59
+ model_name,
60
+ vision_model_name,
61
+ basic=True,
62
+ no_stream=no_stream,
63
+ compact_every_n_iteration=compact_every_n_iteration,
64
+ max_tokens_working_memory=max_tokens_working_memory
65
+ )
66
+ elif mode == "basic":
67
+ return create_basic_agent(
68
+ model_name,
69
+ vision_model_name,
70
+ no_stream=no_stream,
71
+ compact_every_n_iteration=compact_every_n_iteration,
72
+ max_tokens_working_memory=max_tokens_working_memory
73
+ )
74
+ elif mode == "full":
75
+ return create_full_agent(
76
+ model_name,
77
+ vision_model_name,
78
+ no_stream=no_stream,
79
+ compact_every_n_iteration=compact_every_n_iteration,
80
+ max_tokens_working_memory=max_tokens_working_memory
81
+ )
82
+ elif mode == "interpreter":
83
+ return create_interpreter_agent(
84
+ model_name,
85
+ vision_model_name,
86
+ no_stream=no_stream,
87
+ compact_every_n_iteration=compact_every_n_iteration,
88
+ max_tokens_working_memory=max_tokens_working_memory
89
+ )
90
+ elif mode == "search":
91
+ return create_search_agent(
92
+ model_name,
93
+ no_stream=no_stream,
94
+ compact_every_n_iteration=compact_every_n_iteration,
95
+ max_tokens_working_memory=max_tokens_working_memory
96
+ )
97
+ if mode == "search-full":
98
+ return create_search_agent(
99
+ model_name,
100
+ mode_full=True,
101
+ no_stream=no_stream,
102
+ compact_every_n_iteration=compact_every_n_iteration,
103
+ max_tokens_working_memory=max_tokens_working_memory
104
+ )
105
+ else:
106
+ raise ValueError(f"Unknown agent mode: {mode}")
@@ -1,16 +1,17 @@
1
1
  """Generative model module for AI-powered text generation."""
2
2
 
3
3
  import functools
4
- from typing import Dict, Any, Optional, List
5
4
  from datetime import datetime
5
+ from typing import Any, Dict, List
6
6
 
7
7
  import litellm
8
8
  import openai
9
- from litellm import completion, exceptions, get_max_tokens, get_model_info, token_counter, image_generation
9
+ from litellm import completion, exceptions, get_max_tokens, get_model_info, image_generation, token_counter
10
10
  from loguru import logger
11
11
  from pydantic import BaseModel, Field, field_validator
12
12
 
13
13
  from quantalogic.event_emitter import EventEmitter # Importing the EventEmitter class
14
+ from quantalogic.get_model_info import get_max_input_tokens, get_max_output_tokens, model_info
14
15
 
15
16
  MIN_RETRIES = 1
16
17
 
@@ -265,15 +266,23 @@ class GenerativeModel:
265
266
  def _get_model_info_impl(self, model_name: str) -> dict:
266
267
  """Get information about the model with prefix fallback logic."""
267
268
  original_model = model_name
268
-
269
+ tried_models = [model_name]
270
+
269
271
  while True:
270
272
  try:
271
273
  logger.debug(f"Attempting to retrieve model info for: {model_name}")
272
- model_info = get_model_info(model_name)
273
- if model_info:
274
- logger.debug(f"Found model info for {model_name}: {model_info}")
275
- return model_info
276
- except Exception:
274
+ # Try direct lookup from model_info dictionary first
275
+ if model_name in model_info:
276
+ logger.debug(f"Found model info for {model_name} in model_info")
277
+ return model_info[model_name]
278
+
279
+ # Try get_model_info as fallback
280
+ info = get_model_info(model_name)
281
+ if info:
282
+ logger.debug(f"Found model info for {model_name} via get_model_info")
283
+ return info
284
+ except Exception as e:
285
+ logger.debug(f"Failed to get model info for {model_name}: {str(e)}")
277
286
  pass
278
287
 
279
288
  # Try removing one prefix level
@@ -281,8 +290,9 @@ class GenerativeModel:
281
290
  if len(parts) <= 1:
282
291
  break
283
292
  model_name = "/".join(parts[1:])
293
+ tried_models.append(model_name)
284
294
 
285
- error_msg = f"Could not find model info for {original_model} after trying: {self.model}{model_name}"
295
+ error_msg = f"Could not find model info for {original_model} after trying: {''.join(tried_models)}"
286
296
  logger.error(error_msg)
287
297
  raise ValueError(error_msg)
288
298
 
@@ -292,12 +302,23 @@ class GenerativeModel:
292
302
  model_name = self.model
293
303
  return self._get_model_info_cached(model_name)
294
304
 
295
- def get_model_max_input_tokens(self) -> int:
305
+ def get_model_max_input_tokens(self) -> int | None:
296
306
  """Get the maximum number of input tokens for the model."""
297
307
  try:
308
+ # First try direct lookup
309
+ max_tokens = get_max_input_tokens(self.model)
310
+ if max_tokens is not None:
311
+ return max_tokens
312
+
313
+ # If not found, try getting from model info
298
314
  model_info = self.get_model_info()
299
- max_tokens = model_info.get("max_input_tokens") if model_info else None
300
- return max_tokens
315
+ if model_info:
316
+ return model_info.get("max_input_tokens")
317
+
318
+ # If still not found, log warning and return default
319
+ logger.warning(f"No max input tokens found for {self.model}. Using default.")
320
+ return 8192 # A reasonable default for many models
321
+
301
322
  except Exception as e:
302
323
  logger.error(f"Error getting max input tokens for {self.model}: {e}")
303
324
  return None
@@ -305,13 +326,20 @@ class GenerativeModel:
305
326
  def get_model_max_output_tokens(self) -> int | None:
306
327
  """Get the maximum number of output tokens for the model."""
307
328
  try:
329
+ # First try direct lookup
330
+ max_tokens = get_max_output_tokens(self.model)
331
+ if max_tokens is not None:
332
+ return max_tokens
333
+
334
+ # If not found, try getting from model info
308
335
  model_info = self.get_model_info()
309
336
  if model_info:
310
337
  return model_info.get("max_output_tokens")
311
-
312
- # Fallback for unmapped models
338
+
339
+ # If still not found, log warning and return default
313
340
  logger.warning(f"No max output tokens found for {self.model}. Using default.")
314
- return 4096 # A reasonable default for many chat models
341
+ return 4096 # A reasonable default for many models
342
+
315
343
  except Exception as e:
316
344
  logger.error(f"Error getting max output tokens for {self.model}: {e}")
317
345
  return None
@@ -0,0 +1,14 @@
1
+ model_info = {
2
+ "deepseek-reasoner": {"max_output_tokens": 8 * 1024, "max_input_tokens": 1024 * 128},
3
+ "openrouter/deepseek/deepseek-r1": {"max_output_tokens": 8 * 1024, "max_input_tokens": 1024 * 128},
4
+ }
5
+
6
+
7
+ def get_max_output_tokens(model_name: str) -> int | None:
8
+ """Get the maximum output tokens for a given model name."""
9
+ return model_info.get(model_name, {}).get("max_output_tokens", None)
10
+
11
+
12
+ def get_max_input_tokens(model_name: str) -> int | None:
13
+ """Get the maximum input tokens for a given model name."""
14
+ return model_info.get(model_name, {}).get("max_input_tokens", None)
@@ -171,6 +171,10 @@ def get_multiline_input(console: Console) -> str:
171
171
  prompt_text = f"{line_number:>3}: "
172
172
  line = session.prompt(prompt_text, rprompt="Press Enter twice to submit")
173
173
 
174
+ # Handle commands with single return
175
+ if line.strip().startswith('/'):
176
+ return line.strip()
177
+
174
178
  if line.strip() == "":
175
179
  blank_lines += 1
176
180
  if blank_lines == 2:
quantalogic/main.py CHANGED
@@ -2,7 +2,6 @@
2
2
  """Main module for the QuantaLogic agent."""
3
3
 
4
4
  # Standard library imports
5
- import random
6
5
  import sys
7
6
  from typing import Optional
8
7
 
@@ -10,192 +9,29 @@ from typing import Optional
10
9
  import click
11
10
  from loguru import logger
12
11
 
13
- from quantalogic.console_print_events import console_print_events
14
- from quantalogic.utils.check_version import check_if_is_latest_version
15
12
  from quantalogic.version import get_version
16
13
 
17
14
  # Configure logger
18
- logger.remove() # Remove default logger
19
-
20
- from threading import Lock # noqa: E402
15
+ logger.remove()
21
16
 
22
17
  from rich.console import Console # noqa: E402
23
18
  from rich.panel import Panel # noqa: E402
24
- from rich.prompt import Confirm # noqa: E402
25
-
26
- from quantalogic.agent import Agent # noqa: E402
27
19
 
28
20
  # Local application imports
29
21
  from quantalogic.agent_config import ( # noqa: E402
30
22
  MODEL_NAME,
31
- create_basic_agent,
32
- create_full_agent,
33
- create_interpreter_agent,
34
23
  )
35
- from quantalogic.coding_agent import create_coding_agent # noqa: E402
36
- from quantalogic.interactive_text_editor import get_multiline_input # noqa: E402
37
- from quantalogic.search_agent import create_search_agent # noqa: E402
24
+ from quantalogic.task_runner import task_runner # noqa: E402
38
25
 
39
26
  AGENT_MODES = ["code", "basic", "interpreter", "full", "code-basic", "search", "search-full"]
40
27
 
41
28
 
42
- def create_agent_for_mode(mode: str, model_name: str, vision_model_name: str | None, no_stream: bool = False, compact_every_n_iteration: int | None = None, max_tokens_working_memory: int | None = None) -> Agent:
43
- """Create an agent based on the specified mode."""
44
- logger.debug(f"Creating agent for mode: {mode} with model: {model_name}")
45
- logger.debug(f"Using vision model: {vision_model_name}")
46
- logger.debug(f"Using no_stream: {no_stream}")
47
- logger.debug(f"Using compact_every_n_iteration: {compact_every_n_iteration}")
48
- logger.debug(f"Using max_tokens_working_memory: {max_tokens_working_memory}")
49
- if mode == "code":
50
- logger.debug("Creating code agent without basic mode")
51
- return create_coding_agent(model_name, vision_model_name, basic=False, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
52
- if mode == "code-basic":
53
- return create_coding_agent(model_name, vision_model_name, basic=True, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
54
- elif mode == "basic":
55
- return create_basic_agent(model_name, vision_model_name, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
56
- elif mode == "full":
57
- return create_full_agent(model_name, vision_model_name, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
58
- elif mode == "interpreter":
59
- return create_interpreter_agent(model_name, vision_model_name, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
60
- elif mode == "search":
61
- return create_search_agent(model_name, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
62
- if mode == "search-full":
63
- return create_search_agent(model_name, mode_full=True, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
64
- else:
65
- raise ValueError(f"Unknown agent mode: {mode}")
66
-
67
-
68
- def check_new_version():
69
- # Randomly check for updates (1 in 10 chance)
70
- if random.randint(1, 10) == 1:
71
- try:
72
- current_version = get_version()
73
- has_new_version, latest_version = check_if_is_latest_version()
74
-
75
- if has_new_version:
76
- console = Console()
77
- console.print(
78
- Panel.fit(
79
- f"[yellow]⚠️ Update Available![/yellow]\n\n"
80
- f"Current version: [bold]{current_version}[/bold]\n"
81
- f"Latest version: [bold]{latest_version}[/bold]\n\n"
82
- "To update, run:\n"
83
- "[bold]pip install --upgrade quantalogic[/bold]\n"
84
- "or if using pipx:\n"
85
- "[bold]pipx upgrade quantalogic[/bold]",
86
- title="[bold]Update Available[/bold]",
87
- border_style="yellow",
88
- )
89
- )
90
- except Exception:
91
- return
92
-
93
-
94
- def configure_logger(log_level: str) -> None:
95
- """Configure the logger with the specified log level and format."""
96
- logger.remove()
97
- logger.add(
98
- sys.stderr,
99
- level=log_level.upper(),
100
- format="<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <8}</level> | <cyan>{process}</cyan> | <magenta>{file}:{line}</magenta> | {message}",
101
- )
102
- logger.debug(f"Log level set to: {log_level}")
103
-
104
-
105
- def set_litellm_verbose(verbose_mode: bool) -> None:
106
- """Set the verbosity of the litellm library."""
107
- import litellm
108
-
109
- litellm.set_verbose = verbose_mode
110
-
111
-
112
- def switch_verbose(verbose_mode: bool, log_level: str = "info") -> None:
113
- """Switch verbose mode and configure logger and litellm verbosity."""
114
- if log_level == "debug":
115
- configure_logger("DEBUG")
116
- else:
117
- configure_logger(log_level)
118
-
119
- set_litellm_verbose(verbose_mode)
120
-
121
-
122
- def get_task_from_file(file_path: str) -> str:
123
- """Get task content from specified file."""
124
- try:
125
- with open(file_path, encoding="utf-8") as f:
126
- return f.read().strip()
127
- except FileNotFoundError:
128
- raise FileNotFoundError(f"Error: File '{file_path}' not found.")
129
- except PermissionError:
130
- raise PermissionError(f"Error: Permission denied when reading '{file_path}'.")
131
- except Exception as e:
132
- raise Exception(f"Unexpected error reading file: {e}")
133
-
134
-
135
- # Spinner control
136
- spinner_lock = Lock()
137
- current_spinner = None
138
-
139
- def start_spinner(console: Console) -> None:
140
- """Start the thinking spinner."""
141
- global current_spinner
142
- with spinner_lock:
143
- if current_spinner is None:
144
- current_spinner = console.status("[yellow]Thinking...", spinner="dots")
145
- current_spinner.start()
146
-
147
- def stop_spinner(console: Console) -> None:
148
- """Stop the thinking spinner."""
149
- global current_spinner
150
- with spinner_lock:
151
- if current_spinner is not None:
152
- current_spinner.stop()
153
- current_spinner = None
154
-
155
-
156
- def display_welcome_message(
157
- console: Console,
158
- model_name: str,
159
- vision_model_name: str | None,
160
- max_iterations: int = 50,
161
- compact_every_n_iteration: int | None = None,
162
- max_tokens_working_memory: int | None = None,
163
- mode: str = "basic"
164
- ) -> None:
165
- """Display the welcome message and instructions."""
166
- version = get_version()
167
- console.print(
168
- Panel.fit(
169
- f"[bold cyan]🌟 Welcome to QuantaLogic AI Assistant v{version} ! 🌟[/bold cyan]\n\n"
170
- "[green]🎯 How to Use:[/green]\n\n"
171
- "1. [bold]Describe your task[/bold]: Tell the AI what you need help with.\n"
172
- "2. [bold]Submit your task[/bold]: Press [bold]Enter[/bold] twice to send your request.\n\n"
173
- "3. [bold]Exit the app[/bold]: Leave the input blank and press [bold]Enter[/bold] twice to close the assistant.\n\n"
174
- f"[yellow] 🤖 System Info:[/yellow]\n\n"
175
- "\n"
176
- f"- Model: {model_name}\n"
177
- f"- Vision Model: {vision_model_name}\n"
178
- f"- Mode: {mode}\n"
179
- f"- Max Iterations: {max_iterations}\n"
180
- f"- Memory Compact Frequency: {compact_every_n_iteration or 'Default (Max Iterations)'}\n"
181
- f"- Max Working Memory Tokens: {max_tokens_working_memory or 'Default'}\n\n"
182
- "[bold magenta]💡 Pro Tips:[/bold magenta]\n\n"
183
- "- Be as specific as possible in your task description to get the best results!\n"
184
- "- Use clear and concise language when describing your task\n"
185
- "- For coding tasks, include relevant context and requirements\n"
186
- "- The coding agent mode can handle complex tasks - don't hesitate to ask challenging questions!",
187
- title="[bold]Instructions[/bold]",
188
- border_style="blue",
189
- )
190
- )
191
-
192
-
193
29
  @click.group(invoke_without_command=True)
194
30
  @click.option(
195
31
  "--compact-every-n-iteration",
196
32
  type=int,
197
33
  default=None,
198
- help="Set the frequency of memory compaction for the agent (default: max_iterations)."
34
+ help="Set the frequency of memory compaction for the agent (default: max_iterations).",
199
35
  )
200
36
  @click.option("--version", is_flag=True, help="Show version information.")
201
37
  @click.option(
@@ -226,7 +62,7 @@ def display_welcome_message(
226
62
  "--max-tokens-working-memory",
227
63
  type=int,
228
64
  default=None,
229
- help="Set the maximum number of tokens allowed in the working memory."
65
+ help="Set the maximum number of tokens allowed in the working memory.",
230
66
  )
231
67
  @click.pass_context
232
68
  def cli(
@@ -244,8 +80,12 @@ def cli(
244
80
  """QuantaLogic AI Assistant - A powerful AI tool for various tasks."""
245
81
  if version:
246
82
  console = Console()
247
- console.print(f"QuantaLogic version: {get_version()}")
248
- sys.exit(0)
83
+ current_version = get_version()
84
+ console.print(
85
+ Panel(f"QuantaLogic Version: [bold green]{current_version}[/bold green]", title="Version Information")
86
+ )
87
+ ctx.exit()
88
+
249
89
  if ctx.invoked_subcommand is None:
250
90
  ctx.invoke(
251
91
  task,
@@ -261,7 +101,7 @@ def cli(
261
101
 
262
102
 
263
103
  @cli.command()
264
- @click.option("--file", type=click.Path(exists=True), help="Path to task file.")
104
+ @click.option("--file", type=str, help="Path to task file or URL.")
265
105
  @click.option(
266
106
  "--model-name",
267
107
  default=MODEL_NAME,
@@ -290,13 +130,13 @@ def cli(
290
130
  "--compact-every-n-iteration",
291
131
  type=int,
292
132
  default=None,
293
- help="Set the frequency of memory compaction for the agent (default: max_iterations)."
133
+ help="Set the frequency of memory compaction for the agent (default: max_iterations).",
294
134
  )
295
135
  @click.option(
296
136
  "--max-tokens-working-memory",
297
137
  type=int,
298
138
  default=None,
299
- help="Set the maximum number of tokens allowed in the working memory."
139
+ help="Set the maximum number of tokens allowed in the working memory.",
300
140
  )
301
141
  @click.option(
302
142
  "--no-stream",
@@ -317,122 +157,23 @@ def task(
317
157
  max_tokens_working_memory: int | None,
318
158
  no_stream: bool,
319
159
  ) -> None:
320
- """Execute a task with the QuantaLogic AI Assistant."""
321
160
  console = Console()
322
- switch_verbose(verbose, log)
323
161
 
324
162
  try:
325
- if file:
326
- task_content = get_task_from_file(file)
327
- else:
328
- if task:
329
- check_new_version()
330
- task_content = task
331
- else:
332
- display_welcome_message(
333
- console,
334
- model_name,
335
- vision_model_name,
336
- max_iterations=max_iterations,
337
- compact_every_n_iteration=compact_every_n_iteration,
338
- max_tokens_working_memory=max_tokens_working_memory,
339
- mode=mode
340
- )
341
- check_new_version()
342
- logger.debug("Waiting for user input...")
343
- task_content = get_multiline_input(console).strip()
344
- logger.debug(f"User input received. Task content: {task_content}")
345
- if not task_content:
346
- logger.info("No task provided. Exiting...")
347
- console.print("[yellow]No task provided. Exiting...[/yellow]")
348
- sys.exit(2)
349
-
350
- console.print(
351
- Panel.fit(
352
- f"[bold]Task to be submitted:[/bold]\n{task_content}",
353
- title="[bold]Task Preview[/bold]",
354
- border_style="blue",
355
- )
356
- )
357
- if not Confirm.ask("[bold]Are you sure you want to submit this task?[/bold]"):
358
- console.print("[yellow]Task submission cancelled. Exiting...[/yellow]")
359
- sys.exit(0)
360
-
361
- console.print(
362
- Panel.fit(
363
- "[green]✓ Task successfully submitted! Processing...[/green]",
364
- title="[bold]Status[/bold]",
365
- border_style="green",
366
- )
367
- )
368
-
369
- logger.debug(
370
- f"Creating agent for mode: {mode} with model: {model_name}, vision model: {vision_model_name}, no_stream: {no_stream}"
371
- )
372
- agent = create_agent_for_mode(mode, model_name, vision_model_name=vision_model_name, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
373
- logger.debug(
374
- f"Created agent for mode: {mode} with model: {model_name}, vision model: {vision_model_name}, no_stream: {no_stream}"
375
- )
376
-
377
- events = [
378
- "task_start",
379
- "task_think_start",
380
- "task_think_end",
381
- "task_complete",
382
- "tool_execution_start",
383
- "tool_execution_end",
384
- "error_max_iterations_reached",
385
- "memory_full",
386
- "memory_compacted",
387
- "memory_summary",
388
- ]
389
- # Add spinner control to event handlers
390
- def handle_task_think_start(*args, **kwargs):
391
- start_spinner(console)
392
-
393
- def handle_task_think_end(*args, **kwargs):
394
- stop_spinner(console)
395
-
396
- def handle_stream_chunk(event: str, data: str) -> None:
397
- if current_spinner:
398
- stop_spinner(console)
399
- if data is not None:
400
- console.print(data, end="", markup=False)
401
-
402
- agent.event_emitter.on(
403
- event=events,
404
- listener=console_print_events,
405
- )
406
-
407
- agent.event_emitter.on(
408
- event="task_think_start",
409
- listener=handle_task_think_start,
410
- )
411
-
412
- agent.event_emitter.on(
413
- event="task_think_end",
414
- listener=handle_task_think_end,
415
- )
416
-
417
- agent.event_emitter.on(
418
- event="stream_chunk",
419
- listener=handle_stream_chunk,
420
- )
421
-
422
- logger.debug("Registered event handlers for agent events with events: {events}")
423
-
424
- logger.debug(f"Solving task with agent: {task_content}")
425
- if max_iterations < 1:
426
- raise ValueError("max_iterations must be greater than 0")
427
- result = agent.solve_task(task=task_content, max_iterations=max_iterations, streaming=not no_stream)
428
- logger.debug(f"Task solved with result: {result} using {max_iterations} iterations")
429
-
430
- console.print(
431
- Panel.fit(
432
- f"[bold]Task Result:[/bold]\n{result}", title="[bold]Execution Output[/bold]", border_style="green"
433
- )
163
+ task_runner(
164
+ console,
165
+ file,
166
+ model_name,
167
+ verbose,
168
+ mode,
169
+ log,
170
+ vision_model_name,
171
+ task,
172
+ max_iterations,
173
+ compact_every_n_iteration,
174
+ max_tokens_working_memory,
175
+ no_stream,
434
176
  )
435
-
436
177
  except Exception as e:
437
178
  console.print(f"[red]{str(e)}[/red]")
438
179
  logger.error(f"Error in task execution: {e}", exc_info=True)
@@ -0,0 +1,38 @@
1
+ """Module for reading task content from files or URLs."""
2
+
3
+ import requests
4
+
5
+
6
+ def get_task_from_file(source: str) -> str:
7
+ """Get task content from specified file path or URL.
8
+
9
+ Args:
10
+ source (str): File path or URL to read task content from
11
+
12
+ Returns:
13
+ str: Stripped task content from the file or URL
14
+
15
+ Raises:
16
+ FileNotFoundError: If the local file does not exist
17
+ PermissionError: If there are permission issues reading the file
18
+ requests.exceptions.RequestException: If there are issues retrieving URL content
19
+ Exception: For any other unexpected errors
20
+ """
21
+ try:
22
+ # Check if source is a URL
23
+ if source.startswith(('http://', 'https://')):
24
+ response = requests.get(source, timeout=10)
25
+ response.raise_for_status() # Raise an exception for bad status codes
26
+ return response.text.strip()
27
+
28
+ # If not a URL, treat as a local file path
29
+ with open(source, encoding="utf-8") as f:
30
+ return f.read().strip()
31
+ except FileNotFoundError:
32
+ raise FileNotFoundError(f"Error: File '{source}' not found.")
33
+ except PermissionError:
34
+ raise PermissionError(f"Error: Permission denied when reading '{source}'.")
35
+ except requests.exceptions.RequestException as e:
36
+ raise Exception(f"Error retrieving URL content: {e}")
37
+ except Exception as e:
38
+ raise Exception(f"Unexpected error: {e}")
@@ -0,0 +1,284 @@
1
+ """Task runner module for executing tasks with the QuantaLogic AI Assistant."""
2
+
3
+ import sys
4
+ from threading import Lock
5
+ from typing import Optional
6
+
7
+ from loguru import logger
8
+ from rich.console import Console
9
+ from rich.panel import Panel
10
+ from rich.prompt import Confirm
11
+
12
+ from quantalogic.agent_factory import create_agent_for_mode
13
+ from quantalogic.console_print_events import console_print_events
14
+ from quantalogic.interactive_text_editor import get_multiline_input
15
+ from quantalogic.task_file_reader import get_task_from_file
16
+ from quantalogic.version_check import check_new_version, get_version
17
+ from quantalogic.welcome_message import display_welcome_message
18
+
19
+ # Spinner control
20
+ spinner_lock = Lock()
21
+ current_spinner = None
22
+
23
+
24
+ def configure_logger(log_level: str) -> None:
25
+ """Configure the logger with the specified log level and format."""
26
+ logger.add(
27
+ sys.stderr,
28
+ level=log_level.upper(),
29
+ format="<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <8}</level> | <cyan>{process}</cyan> | <magenta>{file}:{line}</magenta> | {message}",
30
+ )
31
+ logger.debug(f"Log level set to: {log_level}")
32
+
33
+
34
+ def set_litellm_verbose(verbose_mode: bool) -> None:
35
+ """Set the verbosity of the litellm library."""
36
+ import litellm
37
+
38
+ litellm.set_verbose = verbose_mode
39
+
40
+
41
+ def switch_verbose(verbose_mode: bool, log_level: str = "info") -> None:
42
+ """Switch verbose mode and configure logger and litellm verbosity."""
43
+ if log_level == "debug":
44
+ configure_logger("DEBUG")
45
+ else:
46
+ configure_logger(log_level)
47
+
48
+ set_litellm_verbose(verbose_mode)
49
+
50
+
51
+ def start_spinner(console: Console) -> None:
52
+ """Start the thinking spinner."""
53
+ global current_spinner
54
+ with spinner_lock:
55
+ if current_spinner is None:
56
+ current_spinner = console.status("[yellow]Thinking...", spinner="dots")
57
+ current_spinner.start()
58
+
59
+
60
+ def stop_spinner(console: Console) -> None:
61
+ """Stop the thinking spinner."""
62
+ global current_spinner
63
+ with spinner_lock:
64
+ if current_spinner is not None:
65
+ current_spinner.stop()
66
+ current_spinner = None
67
+
68
+
69
+ def interactive_task_runner(
70
+ agent,
71
+ console: Console,
72
+ max_iterations: int,
73
+ no_stream: bool,
74
+ ) -> None:
75
+ """Run tasks interactively, asking the user if they want to continue after each task.
76
+
77
+ Args:
78
+ agent: The agent instance to use for solving tasks
79
+ console: Rich console instance for output
80
+ max_iterations: Maximum number of iterations per task
81
+ no_stream: Disable streaming output
82
+ """
83
+ while True:
84
+ logger.debug("Waiting for user input...")
85
+ task_content = get_multiline_input(console).strip()
86
+
87
+ if not task_content:
88
+ logger.info("No task provided. Exiting...")
89
+ console.print("[yellow]No task provided. Exiting...[/yellow]")
90
+ break
91
+
92
+ # Handle commands with single return
93
+ if task_content.startswith('/'):
94
+ command = task_content.lower()
95
+ if command == '/clear':
96
+ logger.info("Clearing agent memory...")
97
+ console.print("[yellow]Clearing agent memory...[/yellow]")
98
+ agent.clear_memory()
99
+ console.print("[green]Memory cleared successfully![/green]")
100
+ continue
101
+ else:
102
+ console.print(f"[red]Unknown command: {command}[/red]")
103
+ continue
104
+
105
+ # For non-commands, ask for confirmation
106
+ console.print(
107
+ Panel.fit(
108
+ f"[bold]Task to be submitted:[/bold]\n{task_content}",
109
+ title="[bold]Task Preview[/bold]",
110
+ border_style="blue",
111
+ )
112
+ )
113
+
114
+ if not Confirm.ask("[bold]Are you sure you want to submit this task?[/bold]"):
115
+ console.print("[yellow]Task submission cancelled.[/yellow]")
116
+ if not Confirm.ask("[bold]Would you like to ask another question?[/bold]"):
117
+ break
118
+ continue
119
+
120
+ console.print(
121
+ Panel.fit(
122
+ "[green]✓ Task successfully submitted! Processing...[/green]",
123
+ title="[bold]Status[/bold]",
124
+ border_style="green",
125
+ )
126
+ )
127
+
128
+ logger.debug(f"Solving task with agent: {task_content}")
129
+ result = agent.solve_task(task=task_content, max_iterations=max_iterations, streaming=not no_stream,clear_memory=False)
130
+ logger.debug(f"Task solved with result: {result} using {max_iterations} iterations")
131
+
132
+ console.print(
133
+ Panel.fit(
134
+ f"[bold]Task Result:[/bold]\n{result}",
135
+ title="[bold]Execution Output[/bold]",
136
+ border_style="green"
137
+ )
138
+ )
139
+
140
+ if not Confirm.ask("[bold]Would you like to ask another question?[/bold]"):
141
+ break
142
+
143
+
144
+ def task_runner(
145
+ console: Console,
146
+ file: Optional[str],
147
+ model_name: str,
148
+ verbose: bool,
149
+ mode: str,
150
+ log: str,
151
+ vision_model_name: str | None,
152
+ task: Optional[str],
153
+ max_iterations: int,
154
+ compact_every_n_iteration: int | None,
155
+ max_tokens_working_memory: int | None,
156
+ no_stream: bool,
157
+ ) -> None:
158
+ """Execute a task with the QuantaLogic AI Assistant.
159
+
160
+ Args:
161
+ console: Rich console instance for output
162
+ file: Optional path to task file
163
+ model_name: Name of the model to use
164
+ verbose: Enable verbose logging
165
+ mode: Operation mode
166
+ log: Log level
167
+ vision_model_name: Optional vision model name
168
+ task: Optional task string
169
+ max_iterations: Maximum number of iterations
170
+ compact_every_n_iteration: Optional number of iterations before memory compaction
171
+ max_tokens_working_memory: Optional maximum tokens for working memory
172
+ no_stream: Disable streaming output
173
+ """
174
+ switch_verbose(verbose, log)
175
+
176
+ # Create the agent instance with the specified configuration
177
+ agent = create_agent_for_mode(
178
+ mode=mode,
179
+ model_name=model_name,
180
+ vision_model_name=vision_model_name,
181
+ compact_every_n_iteration=compact_every_n_iteration,
182
+ max_tokens_working_memory=max_tokens_working_memory
183
+ )
184
+
185
+ if file:
186
+ task_content = get_task_from_file(file)
187
+ # Execute single task from file
188
+ logger.debug(f"Solving task with agent: {task_content}")
189
+ if max_iterations < 1:
190
+ raise ValueError("max_iterations must be greater than 0")
191
+ result = agent.solve_task(task=task_content, max_iterations=max_iterations, streaming=not no_stream)
192
+ logger.debug(f"Task solved with result: {result} using {max_iterations} iterations")
193
+
194
+ console.print(
195
+ Panel.fit(
196
+ f"[bold]Task Result:[/bold]\n{result}",
197
+ title="[bold]Execution Output[/bold]",
198
+ border_style="green"
199
+ )
200
+ )
201
+ else:
202
+ if task:
203
+ check_new_version()
204
+ task_content = task
205
+ # Execute single task from command line
206
+ logger.debug(f"Solving task with agent: {task_content}")
207
+ if max_iterations < 1:
208
+ raise ValueError("max_iterations must be greater than 0")
209
+ result = agent.solve_task(task=task_content, max_iterations=max_iterations, streaming=not no_stream)
210
+ logger.debug(f"Task solved with result: {result} using {max_iterations} iterations")
211
+
212
+ console.print(
213
+ Panel.fit(
214
+ f"[bold]Task Result:[/bold]\n{result}",
215
+ title="[bold]Execution Output[/bold]",
216
+ border_style="green"
217
+ )
218
+ )
219
+ else:
220
+ # Interactive mode
221
+ display_welcome_message(
222
+ console=console,
223
+ model_name=model_name,
224
+ version=get_version(),
225
+ vision_model_name=vision_model_name,
226
+ max_iterations=max_iterations,
227
+ compact_every_n_iteration=compact_every_n_iteration,
228
+ max_tokens_working_memory=max_tokens_working_memory,
229
+ mode=mode,
230
+ )
231
+ check_new_version()
232
+ logger.debug(
233
+ f"Created agent for mode: {mode} with model: {model_name}, vision model: {vision_model_name}, no_stream: {no_stream}"
234
+ )
235
+
236
+ events = [
237
+ "task_start",
238
+ "task_think_start",
239
+ "task_think_end",
240
+ "task_complete",
241
+ "tool_execution_start",
242
+ "tool_execution_end",
243
+ "error_max_iterations_reached",
244
+ "memory_full",
245
+ "memory_compacted",
246
+ "memory_summary",
247
+ ]
248
+
249
+ # Add spinner control to event handlers
250
+ def handle_task_think_start(*args, **kwargs):
251
+ start_spinner(console)
252
+
253
+ def handle_task_think_end(*args, **kwargs):
254
+ stop_spinner(console)
255
+
256
+ def handle_stream_chunk(event: str, data: str) -> None:
257
+ if current_spinner:
258
+ stop_spinner(console)
259
+ if data is not None:
260
+ console.print(data, end="", markup=False)
261
+
262
+ agent.event_emitter.on(
263
+ event=events,
264
+ listener=console_print_events,
265
+ )
266
+
267
+ agent.event_emitter.on(
268
+ event="task_think_start",
269
+ listener=handle_task_think_start,
270
+ )
271
+
272
+ agent.event_emitter.on(
273
+ event="task_think_end",
274
+ listener=handle_task_think_end,
275
+ )
276
+
277
+ agent.event_emitter.on(
278
+ event="stream_chunk",
279
+ listener=handle_stream_chunk,
280
+ )
281
+
282
+ logger.debug("Registered event handlers for agent events with events: {events}")
283
+
284
+ interactive_task_runner(agent, console, max_iterations, no_stream)
@@ -0,0 +1,41 @@
1
+ """Module for checking and displaying version updates."""
2
+
3
+ import random
4
+
5
+ from rich.console import Console
6
+ from rich.panel import Panel
7
+
8
+ from quantalogic.utils.check_version import check_if_is_latest_version
9
+ from quantalogic.version import get_version
10
+
11
+
12
+ def check_new_version() -> None:
13
+ """Randomly check for updates and display a notification if a new version is available.
14
+
15
+ This function has a 1 in 10 chance of running when called. When it runs, it checks
16
+ if there's a newer version of the package available and displays an update panel
17
+ with installation instructions if a new version is found.
18
+ """
19
+ # Randomly check for updates (1 in 10 chance)
20
+ if random.randint(1, 10) == 1:
21
+ try:
22
+ current_version = get_version()
23
+ has_new_version, latest_version = check_if_is_latest_version()
24
+
25
+ if has_new_version:
26
+ console = Console()
27
+ console.print(
28
+ Panel.fit(
29
+ f"[yellow]⚠️ Update Available![/yellow]\n\n"
30
+ f"Current version: [bold]{current_version}[/bold]\n"
31
+ f"Latest version: [bold]{latest_version}[/bold]\n\n"
32
+ "To update, run:\n"
33
+ "[bold]pip install --upgrade quantalogic[/bold]\n"
34
+ "or if using pipx:\n"
35
+ "[bold]pipx upgrade quantalogic[/bold]",
36
+ title="[bold]Update Available[/bold]",
37
+ border_style="yellow",
38
+ )
39
+ )
40
+ except Exception:
41
+ return
@@ -0,0 +1,86 @@
1
+ """Module for displaying welcome messages and instructions."""
2
+
3
+ from rich.console import Console
4
+ from rich.panel import Panel
5
+
6
+
7
+ def create_config_table(
8
+ mode: str,
9
+ model_name: str,
10
+ vision_model_name: str | None,
11
+ max_iterations: int,
12
+ compact_every_n_iteration: int | None,
13
+ max_tokens_working_memory: int | None,
14
+ ) -> str:
15
+ """Create a formatted string representation of the configuration table."""
16
+ return (
17
+ f"• Mode [cyan]{mode}[/cyan]\n"
18
+ f"• Language Model [cyan]{model_name}[/cyan]\n"
19
+ f"• Vision Model {vision_model_name or '[dim]Not Configured[/dim]'}\n"
20
+ f"• Max Iterations {max_iterations}\n"
21
+ f"• Memory Compaction {compact_every_n_iteration or '[dim]Default[/dim]'}\n"
22
+ f"• Max Memory Tokens {max_tokens_working_memory or '[dim]Default[/dim]'}"
23
+ )
24
+
25
+
26
+ def create_tips_section() -> str:
27
+ """Create a formatted string representation of the tips section."""
28
+ return (
29
+ "💡 [bold cyan]Be specific[/bold cyan] in your task descriptions\n"
30
+ "💭 Use [bold cyan]clear and concise[/bold cyan] language\n"
31
+ "✨ Include [bold cyan]relevant context[/bold cyan] for coding tasks\n"
32
+ "🚀 Don't hesitate to ask [bold cyan]challenging questions[/bold cyan]!"
33
+ )
34
+
35
+
36
+ def display_welcome_message(
37
+ console: Console,
38
+ model_name: str,
39
+ version: str,
40
+ vision_model_name: str | None = None,
41
+ max_iterations: int = 50,
42
+ compact_every_n_iteration: int | None = None,
43
+ max_tokens_working_memory: int | None = None,
44
+ mode: str = "basic",
45
+ ) -> None:
46
+ """Display a welcome message and instructions for the QuantaLogic AI Assistant.
47
+
48
+ Args:
49
+ console: Rich Console instance for rendering output
50
+ model_name: Name of the language model being used
51
+ version: Version of the QuantaLogic AI Assistant
52
+ vision_model_name: Optional name of the vision model
53
+ max_iterations: Maximum number of iterations for task solving
54
+ compact_every_n_iteration: Frequency of memory compaction
55
+ max_tokens_working_memory: Maximum tokens allowed in working memory
56
+ mode: Current agent mode of operation
57
+ """
58
+ config_section = create_config_table(
59
+ mode,
60
+ model_name,
61
+ vision_model_name,
62
+ max_iterations,
63
+ compact_every_n_iteration,
64
+ max_tokens_working_memory,
65
+ )
66
+
67
+ tips_section = create_tips_section()
68
+
69
+ welcome_content = (
70
+ "\n[bold violet]Welcome to QuantaLogic AI Assistant[/bold violet] "
71
+ f"[bold blue]v{version}[/bold blue]\n\n"
72
+ "[bold]System Configuration[/bold]\n"
73
+ f"{config_section}\n\n"
74
+ "[bold magenta]Pro Tips[/bold magenta]\n"
75
+ f"{tips_section}\n"
76
+ )
77
+
78
+ welcome_panel = Panel(
79
+ welcome_content,
80
+ border_style="blue",
81
+ title="[bold]🤖 QuantaLogic[/bold]",
82
+ subtitle="[bold cyan]Ready to assist you[/bold cyan]",
83
+ padding=(1, 2),
84
+ )
85
+
86
+ console.print(welcome_panel)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: quantalogic
3
- Version: 0.2.23
3
+ Version: 0.2.25
4
4
  Summary: QuantaLogic ReAct Agents
5
5
  Author: Raphaël MANSUY
6
6
  Author-email: raphael.mansuy@gmail.com
@@ -32,6 +32,7 @@ Requires-Dist: pathspec (>=0.12.1,<0.13.0)
32
32
  Requires-Dist: prompt-toolkit (>=3.0.48,<4.0.0)
33
33
  Requires-Dist: pydantic (>=2.10.4,<3.0.0)
34
34
  Requires-Dist: pymdown-extensions (>=10.3.1,<11.0.0)
35
+ Requires-Dist: requests (>=2.32.3,<3.0.0)
35
36
  Requires-Dist: rich (>=13.9.4,<14.0.0)
36
37
  Requires-Dist: serpapi (>=0.1.5,<0.2.0)
37
38
  Requires-Dist: tenacity (>=9.0.0,<10.0.0)
@@ -1,14 +1,16 @@
1
1
  quantalogic/__init__.py,sha256=kX0c_xmD9OslWnAE92YHMGuD7xZcTo8ZOF_5R64HKps,784
2
- quantalogic/agent.py,sha256=4Z3ImHbLs3wThspoSYR-mR_zSExMf1nwcLGxZjTQLXY,31226
2
+ quantalogic/agent.py,sha256=JJQkUcUtwkXHMk2Fk3_PG-R1ZI_axz0v6L0FcEztDJM,31615
3
3
  quantalogic/agent_config.py,sha256=9sjDnCPlAqVM45oguB_D509WSCaXZmuaVUtLcOvDlPg,7572
4
+ quantalogic/agent_factory.py,sha256=ODVGuGtugSzmSdP6jiWlT8WyC5onANc6BIs83FC90Bg,3782
4
5
  quantalogic/coding_agent.py,sha256=UJ0fdKKA8XSB2py0NW4-e-orozo78ZAprXWuortYBiA,4935
5
6
  quantalogic/console_print_events.py,sha256=KB-DGi52As8M96eUs1N_vgNqKIFtqv_H8NTOd3TLTgQ,2163
6
7
  quantalogic/console_print_token.py,sha256=qSU-3kmoZk4T5-1ybrEBi8tIXDPcz7eyWKhGh3E8uIg,395
7
8
  quantalogic/docs_cli.py,sha256=3giVbUpespB9ZdTSJ955A3BhcOaBl5Lwsn1AVy9XAeY,1663
8
9
  quantalogic/event_emitter.py,sha256=jqot2g4JRXc88K6PW837Oqxbf7shZfO-xdPaUWmzupk,7901
9
- quantalogic/generative_model.py,sha256=vv7fvkZz3GRvtuhYGeq64bJUhxyjmhbz-G5mDbsvCiE,15451
10
- quantalogic/interactive_text_editor.py,sha256=kYeTA2qej5kxtPvAUHy_Dr2MhrGQAyenLFpW9mU9Rmw,6855
11
- quantalogic/main.py,sha256=9jymO-5P0lFlMVTRRaZde1jO9aeZtwRipH2qQbS91BQ,16777
10
+ quantalogic/generative_model.py,sha256=az_kqWdEBQRROvvYZu6d68JQ4nzDbQG23gADeHLMypc,16761
11
+ quantalogic/get_model_info.py,sha256=YCBZ8qynlq_iLUc--xBrQxacFZL9RHZPv5cdVwjukcw,602
12
+ quantalogic/interactive_text_editor.py,sha256=_pNPnUG3Y3_YX0R9-kx0vcaUWU0AAC350jpJ5UjrTuE,6986
13
+ quantalogic/main.py,sha256=YYP0DSnzlLpbyQPlQxbq-ZKIoA5ezv0kjy5LsQlT6bI,5227
12
14
  quantalogic/memory.py,sha256=zbtRuM05jaS2lJll-92dt5JfYVLERnF_m_9xqp2x-k0,6304
13
15
  quantalogic/model_names.py,sha256=UZlz25zG9B2dpfwdw_e1Gw5qFsKQ7iME9FJh9Ts4u6s,938
14
16
  quantalogic/prompts.py,sha256=CW4CRgW1hTpXeWdeJNbPaRPUeUm-xKuGHJrT8mOtvkw,3602
@@ -21,6 +23,8 @@ quantalogic/server/state.py,sha256=TwtL0BTp_LT-fynF1IR4k8WVXuxXWtSv3NgWG9fuUME,7
21
23
  quantalogic/server/static/js/event_visualizer.js,sha256=eFkkWyNZw3zOZlF18kxbfsWql8a2C13qBFEOAPzrj88,19646
22
24
  quantalogic/server/static/js/quantalogic.js,sha256=x7TrlZGR1Y0WLK2DWl1xY847BhEWMPnL0Ua7KtOldUc,22311
23
25
  quantalogic/server/templates/index.html,sha256=nDnXJoQEm1vXbhXtgaYk0G5VXj0wwzE6KrqEDhHFpj4,7773
26
+ quantalogic/task_file_reader.py,sha256=AMIJoeVY9Hhu0dBJ-C5EyaOFsXLkhn2oBhVs-WTnnLk,1460
27
+ quantalogic/task_runner.py,sha256=FtxfZs2dxdsSZoiW92K3dpfegFe0dyKx9ZP5CCyEAzo,9965
24
28
  quantalogic/tool_manager.py,sha256=JAC5E5kLfYzYJx0QRIWbG14q1hlkOcwJFBG7HE8twpU,2425
25
29
  quantalogic/tools/__init__.py,sha256=GcYjE1r6aNQ_JZ8uwk0yaCCCMBz6zrD_PjkRtZiUhSk,1923
26
30
  quantalogic/tools/agent_tool.py,sha256=MXCXxWHRch7VK4UWhtRP1jeI8Np9Ne2CUGo8vm1oZiM,3064
@@ -71,10 +75,12 @@ quantalogic/utils/git_ls.py,sha256=_k6QIQtc0aM1bsG340jBp4VrdevbcH8Pg2CV4r9oHok,5
71
75
  quantalogic/utils/read_file.py,sha256=tSRVHk8dIP4nNLL89v5kRki4hOTjVyjbmuEb2zwvwCY,2077
72
76
  quantalogic/utils/read_http_text_content.py,sha256=n3IayT5KcqctIVVF2gOQQAMf3Ow6eenlVgfXTpLcQbw,4410
73
77
  quantalogic/version.py,sha256=ea_cRutaQk5_lwlLbUUvPFuOT7Of7-gAsDl7wdveS-g,107
78
+ quantalogic/version_check.py,sha256=cttR1lR3OienGLl7NrK1Te1fhDkqSjCci7HC1vFUTSY,1627
79
+ quantalogic/welcome_message.py,sha256=IXMhem8h7srzNUwvw8G_lmEkHU8PFfote021E_BXmVk,3039
74
80
  quantalogic/xml_parser.py,sha256=uMLQNHTRCg116FwcjRoquZmSwVtE4LEH-6V2E3RD-dA,11466
75
81
  quantalogic/xml_tool_parser.py,sha256=Vz4LEgDbelJynD1siLOVkJ3gLlfHsUk65_gCwbYJyGc,3784
76
- quantalogic-0.2.23.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
77
- quantalogic-0.2.23.dist-info/METADATA,sha256=RL78oJwIbWXJuTTEgt6blHokOqiDpX6G3SdCy3i-ZFk,20174
78
- quantalogic-0.2.23.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
79
- quantalogic-0.2.23.dist-info/entry_points.txt,sha256=h74O_Q3qBRCrDR99qvwB4BpBGzASPUIjCfxHq6Qnups,183
80
- quantalogic-0.2.23.dist-info/RECORD,,
82
+ quantalogic-0.2.25.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
83
+ quantalogic-0.2.25.dist-info/METADATA,sha256=MwnMh_F2PiVY2lYXAStsCFpf0Slcs67RAFtbu1jbFwk,20216
84
+ quantalogic-0.2.25.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
85
+ quantalogic-0.2.25.dist-info/entry_points.txt,sha256=h74O_Q3qBRCrDR99qvwB4BpBGzASPUIjCfxHq6Qnups,183
86
+ quantalogic-0.2.25.dist-info/RECORD,,