quantalogic 0.2.23__tar.gz → 0.2.25__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. {quantalogic-0.2.23 → quantalogic-0.2.25}/PKG-INFO +2 -1
  2. {quantalogic-0.2.23 → quantalogic-0.2.25}/pyproject.toml +2 -1
  3. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/agent.py +13 -6
  4. quantalogic-0.2.25/quantalogic/agent_factory.py +106 -0
  5. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/generative_model.py +43 -15
  6. quantalogic-0.2.25/quantalogic/get_model_info.py +14 -0
  7. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/interactive_text_editor.py +4 -0
  8. quantalogic-0.2.25/quantalogic/main.py +189 -0
  9. quantalogic-0.2.25/quantalogic/task_file_reader.py +38 -0
  10. quantalogic-0.2.25/quantalogic/task_runner.py +284 -0
  11. quantalogic-0.2.25/quantalogic/version_check.py +41 -0
  12. quantalogic-0.2.25/quantalogic/welcome_message.py +86 -0
  13. quantalogic-0.2.23/quantalogic/main.py +0 -448
  14. {quantalogic-0.2.23 → quantalogic-0.2.25}/LICENSE +0 -0
  15. {quantalogic-0.2.23 → quantalogic-0.2.25}/README.md +0 -0
  16. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/__init__.py +0 -0
  17. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/agent_config.py +0 -0
  18. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/coding_agent.py +0 -0
  19. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/console_print_events.py +0 -0
  20. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/console_print_token.py +0 -0
  21. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/docs_cli.py +0 -0
  22. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/event_emitter.py +0 -0
  23. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/memory.py +0 -0
  24. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/model_names.py +0 -0
  25. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/prompts.py +0 -0
  26. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/search_agent.py +0 -0
  27. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/server/__init__.py +0 -0
  28. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/server/agent_server.py +0 -0
  29. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/server/models.py +0 -0
  30. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/server/routes.py +0 -0
  31. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/server/state.py +0 -0
  32. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/server/static/js/event_visualizer.js +0 -0
  33. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/server/static/js/quantalogic.js +0 -0
  34. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/server/templates/index.html +0 -0
  35. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tool_manager.py +0 -0
  36. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/__init__.py +0 -0
  37. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/agent_tool.py +0 -0
  38. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/dalle_e.py +0 -0
  39. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/download_http_file_tool.py +0 -0
  40. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/duckduckgo_search_tool.py +0 -0
  41. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/edit_whole_content_tool.py +0 -0
  42. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/elixir_tool.py +0 -0
  43. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/execute_bash_command_tool.py +0 -0
  44. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/input_question_tool.py +0 -0
  45. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/jinja_tool.py +0 -0
  46. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/language_handlers/__init__.py +0 -0
  47. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/language_handlers/c_handler.py +0 -0
  48. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/language_handlers/cpp_handler.py +0 -0
  49. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/language_handlers/go_handler.py +0 -0
  50. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/language_handlers/java_handler.py +0 -0
  51. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/language_handlers/javascript_handler.py +0 -0
  52. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/language_handlers/python_handler.py +0 -0
  53. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/language_handlers/rust_handler.py +0 -0
  54. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/language_handlers/scala_handler.py +0 -0
  55. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/language_handlers/typescript_handler.py +0 -0
  56. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/list_directory_tool.py +0 -0
  57. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/llm_tool.py +0 -0
  58. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/llm_vision_tool.py +0 -0
  59. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/markitdown_tool.py +0 -0
  60. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/nodejs_tool.py +0 -0
  61. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/python_tool.py +0 -0
  62. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/read_file_block_tool.py +0 -0
  63. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/read_file_tool.py +0 -0
  64. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/read_html_tool.py +0 -0
  65. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/replace_in_file_tool.py +0 -0
  66. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/ripgrep_tool.py +0 -0
  67. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/search_definition_names.py +0 -0
  68. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/serpapi_search_tool.py +0 -0
  69. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/task_complete_tool.py +0 -0
  70. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/tool.py +0 -0
  71. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/unified_diff_tool.py +0 -0
  72. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/wikipedia_search_tool.py +0 -0
  73. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/tools/write_file_tool.py +0 -0
  74. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/utils/__init__.py +0 -0
  75. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/utils/ask_user_validation.py +0 -0
  76. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/utils/check_version.py +0 -0
  77. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/utils/download_http_file.py +0 -0
  78. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/utils/get_coding_environment.py +0 -0
  79. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/utils/get_environment.py +0 -0
  80. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/utils/get_quantalogic_rules_content.py +0 -0
  81. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/utils/git_ls.py +0 -0
  82. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/utils/read_file.py +0 -0
  83. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/utils/read_http_text_content.py +0 -0
  84. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/version.py +0 -0
  85. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/xml_parser.py +0 -0
  86. {quantalogic-0.2.23 → quantalogic-0.2.25}/quantalogic/xml_tool_parser.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: quantalogic
3
- Version: 0.2.23
3
+ Version: 0.2.25
4
4
  Summary: QuantaLogic ReAct Agents
5
5
  Author: Raphaël MANSUY
6
6
  Author-email: raphael.mansuy@gmail.com
@@ -32,6 +32,7 @@ Requires-Dist: pathspec (>=0.12.1,<0.13.0)
32
32
  Requires-Dist: prompt-toolkit (>=3.0.48,<4.0.0)
33
33
  Requires-Dist: pydantic (>=2.10.4,<3.0.0)
34
34
  Requires-Dist: pymdown-extensions (>=10.3.1,<11.0.0)
35
+ Requires-Dist: requests (>=2.32.3,<3.0.0)
35
36
  Requires-Dist: rich (>=13.9.4,<14.0.0)
36
37
  Requires-Dist: serpapi (>=0.1.5,<0.2.0)
37
38
  Requires-Dist: tenacity (>=9.0.0,<10.0.0)
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "quantalogic"
3
- version = "0.2.23"
3
+ version = "0.2.25"
4
4
  description = "QuantaLogic ReAct Agents"
5
5
  authors = ["Raphaël MANSUY <raphael.mansuy@gmail.com>"]
6
6
  readme = "README.md"
@@ -49,6 +49,7 @@ llmlingua = "^0.2.2"
49
49
  jinja2 = "^3.1.5"
50
50
  beautifulsoup4 = "^4.12.3"
51
51
  markdownify = "^0.14.1"
52
+ requests = "^2.32.3"
52
53
 
53
54
  [tool.poetry.scripts]
54
55
  quantalogic = "quantalogic.main:cli"
@@ -140,7 +140,11 @@ class Agent(BaseModel):
140
140
  logger.error(f"Failed to initialize agent: {str(e)}")
141
141
  raise
142
142
 
143
- def solve_task(self, task: str, max_iterations: int = 30, streaming: bool = False) -> str:
143
+ def clear_memory(self):
144
+ """Clear the memory and reset the session."""
145
+ self._reset_session(clear_memory=True)
146
+
147
+ def solve_task(self, task: str, max_iterations: int = 30, streaming: bool = False, clear_memory: bool = True) -> str:
144
148
  """Solve the given task using the ReAct framework.
145
149
 
146
150
  Args:
@@ -148,12 +152,13 @@ class Agent(BaseModel):
148
152
  max_iterations (int, optional): Maximum number of iterations to attempt solving the task.
149
153
  Defaults to 30 to prevent infinite loops and ensure timely task completion.
150
154
  streaming (bool, optional): Whether to use streaming mode for generating responses.
155
+ clear_memory (bool, optional): Whether to clear the memory before solving the task.
151
156
 
152
157
  Returns:
153
158
  str: The final response after task completion.
154
159
  """
155
160
  logger.debug(f"Solving task... {task}")
156
- self._reset_session(task_to_solve=task, max_iterations=max_iterations)
161
+ self._reset_session(task_to_solve=task, max_iterations=max_iterations,clear_memory=clear_memory)
157
162
 
158
163
  # Generate task summary
159
164
  self.task_to_solve_summary = self._generate_task_summary(task)
@@ -263,13 +268,15 @@ class Agent(BaseModel):
263
268
 
264
269
  return answer
265
270
 
266
- def _reset_session(self, task_to_solve: str = "", max_iterations: int = 30):
271
+ def _reset_session(self, task_to_solve: str = "", max_iterations: int = 30,clear_memory: bool = True):
267
272
  """Reset the agent's session."""
268
273
  logger.debug("Resetting session...")
269
274
  self.task_to_solve = task_to_solve
270
- self.memory.reset()
271
- self.variable_store.reset()
272
- self.total_tokens = 0
275
+ if clear_memory:
276
+ logger.debug("Clearing memory...")
277
+ self.memory.reset()
278
+ self.variable_store.reset()
279
+ self.total_tokens = 0
273
280
  self.current_iteration = 0
274
281
  self.max_output_tokens = self.model.get_model_max_output_tokens() or DEFAULT_MAX_OUTPUT_TOKENS
275
282
  self.max_input_tokens = self.model.get_model_max_input_tokens() or DEFAULT_MAX_INPUT_TOKENS
@@ -0,0 +1,106 @@
1
+ """Agent factory module for creating different types of agents."""
2
+
3
+ from typing import Optional
4
+
5
+ from loguru import logger
6
+
7
+ from quantalogic.agent import Agent
8
+ from quantalogic.agent_config import (
9
+ create_basic_agent,
10
+ create_full_agent,
11
+ create_interpreter_agent,
12
+ )
13
+ from quantalogic.coding_agent import create_coding_agent
14
+ from quantalogic.search_agent import create_search_agent
15
+
16
+
17
+ def create_agent_for_mode(
18
+ mode: str,
19
+ model_name: str,
20
+ vision_model_name: Optional[str],
21
+ no_stream: bool = False,
22
+ compact_every_n_iteration: Optional[int] = None,
23
+ max_tokens_working_memory: Optional[int] = None
24
+ ) -> Agent:
25
+ """Create an agent based on the specified mode.
26
+
27
+ Args:
28
+ mode: The mode of operation for the agent
29
+ model_name: The name of the language model to use
30
+ vision_model_name: Optional name of the vision model
31
+ no_stream: Whether to disable streaming mode
32
+ compact_every_n_iteration: Optional number of iterations before compacting memory
33
+ max_tokens_working_memory: Optional maximum tokens for working memory
34
+
35
+ Returns:
36
+ Agent: The created agent instance
37
+
38
+ Raises:
39
+ ValueError: If an unknown agent mode is specified
40
+ """
41
+ logger.debug(f"Creating agent for mode: {mode} with model: {model_name}")
42
+ logger.debug(f"Using vision model: {vision_model_name}")
43
+ logger.debug(f"Using no_stream: {no_stream}")
44
+ logger.debug(f"Using compact_every_n_iteration: {compact_every_n_iteration}")
45
+ logger.debug(f"Using max_tokens_working_memory: {max_tokens_working_memory}")
46
+
47
+ if mode == "code":
48
+ logger.debug("Creating code agent without basic mode")
49
+ return create_coding_agent(
50
+ model_name,
51
+ vision_model_name,
52
+ basic=False,
53
+ no_stream=no_stream,
54
+ compact_every_n_iteration=compact_every_n_iteration,
55
+ max_tokens_working_memory=max_tokens_working_memory
56
+ )
57
+ if mode == "code-basic":
58
+ return create_coding_agent(
59
+ model_name,
60
+ vision_model_name,
61
+ basic=True,
62
+ no_stream=no_stream,
63
+ compact_every_n_iteration=compact_every_n_iteration,
64
+ max_tokens_working_memory=max_tokens_working_memory
65
+ )
66
+ elif mode == "basic":
67
+ return create_basic_agent(
68
+ model_name,
69
+ vision_model_name,
70
+ no_stream=no_stream,
71
+ compact_every_n_iteration=compact_every_n_iteration,
72
+ max_tokens_working_memory=max_tokens_working_memory
73
+ )
74
+ elif mode == "full":
75
+ return create_full_agent(
76
+ model_name,
77
+ vision_model_name,
78
+ no_stream=no_stream,
79
+ compact_every_n_iteration=compact_every_n_iteration,
80
+ max_tokens_working_memory=max_tokens_working_memory
81
+ )
82
+ elif mode == "interpreter":
83
+ return create_interpreter_agent(
84
+ model_name,
85
+ vision_model_name,
86
+ no_stream=no_stream,
87
+ compact_every_n_iteration=compact_every_n_iteration,
88
+ max_tokens_working_memory=max_tokens_working_memory
89
+ )
90
+ elif mode == "search":
91
+ return create_search_agent(
92
+ model_name,
93
+ no_stream=no_stream,
94
+ compact_every_n_iteration=compact_every_n_iteration,
95
+ max_tokens_working_memory=max_tokens_working_memory
96
+ )
97
+ if mode == "search-full":
98
+ return create_search_agent(
99
+ model_name,
100
+ mode_full=True,
101
+ no_stream=no_stream,
102
+ compact_every_n_iteration=compact_every_n_iteration,
103
+ max_tokens_working_memory=max_tokens_working_memory
104
+ )
105
+ else:
106
+ raise ValueError(f"Unknown agent mode: {mode}")
@@ -1,16 +1,17 @@
1
1
  """Generative model module for AI-powered text generation."""
2
2
 
3
3
  import functools
4
- from typing import Dict, Any, Optional, List
5
4
  from datetime import datetime
5
+ from typing import Any, Dict, List
6
6
 
7
7
  import litellm
8
8
  import openai
9
- from litellm import completion, exceptions, get_max_tokens, get_model_info, token_counter, image_generation
9
+ from litellm import completion, exceptions, get_max_tokens, get_model_info, image_generation, token_counter
10
10
  from loguru import logger
11
11
  from pydantic import BaseModel, Field, field_validator
12
12
 
13
13
  from quantalogic.event_emitter import EventEmitter # Importing the EventEmitter class
14
+ from quantalogic.get_model_info import get_max_input_tokens, get_max_output_tokens, model_info
14
15
 
15
16
  MIN_RETRIES = 1
16
17
 
@@ -265,15 +266,23 @@ class GenerativeModel:
265
266
  def _get_model_info_impl(self, model_name: str) -> dict:
266
267
  """Get information about the model with prefix fallback logic."""
267
268
  original_model = model_name
268
-
269
+ tried_models = [model_name]
270
+
269
271
  while True:
270
272
  try:
271
273
  logger.debug(f"Attempting to retrieve model info for: {model_name}")
272
- model_info = get_model_info(model_name)
273
- if model_info:
274
- logger.debug(f"Found model info for {model_name}: {model_info}")
275
- return model_info
276
- except Exception:
274
+ # Try direct lookup from model_info dictionary first
275
+ if model_name in model_info:
276
+ logger.debug(f"Found model info for {model_name} in model_info")
277
+ return model_info[model_name]
278
+
279
+ # Try get_model_info as fallback
280
+ info = get_model_info(model_name)
281
+ if info:
282
+ logger.debug(f"Found model info for {model_name} via get_model_info")
283
+ return info
284
+ except Exception as e:
285
+ logger.debug(f"Failed to get model info for {model_name}: {str(e)}")
277
286
  pass
278
287
 
279
288
  # Try removing one prefix level
@@ -281,8 +290,9 @@ class GenerativeModel:
281
290
  if len(parts) <= 1:
282
291
  break
283
292
  model_name = "/".join(parts[1:])
293
+ tried_models.append(model_name)
284
294
 
285
- error_msg = f"Could not find model info for {original_model} after trying: {self.model}{model_name}"
295
+ error_msg = f"Could not find model info for {original_model} after trying: {''.join(tried_models)}"
286
296
  logger.error(error_msg)
287
297
  raise ValueError(error_msg)
288
298
 
@@ -292,12 +302,23 @@ class GenerativeModel:
292
302
  model_name = self.model
293
303
  return self._get_model_info_cached(model_name)
294
304
 
295
- def get_model_max_input_tokens(self) -> int:
305
+ def get_model_max_input_tokens(self) -> int | None:
296
306
  """Get the maximum number of input tokens for the model."""
297
307
  try:
308
+ # First try direct lookup
309
+ max_tokens = get_max_input_tokens(self.model)
310
+ if max_tokens is not None:
311
+ return max_tokens
312
+
313
+ # If not found, try getting from model info
298
314
  model_info = self.get_model_info()
299
- max_tokens = model_info.get("max_input_tokens") if model_info else None
300
- return max_tokens
315
+ if model_info:
316
+ return model_info.get("max_input_tokens")
317
+
318
+ # If still not found, log warning and return default
319
+ logger.warning(f"No max input tokens found for {self.model}. Using default.")
320
+ return 8192 # A reasonable default for many models
321
+
301
322
  except Exception as e:
302
323
  logger.error(f"Error getting max input tokens for {self.model}: {e}")
303
324
  return None
@@ -305,13 +326,20 @@ class GenerativeModel:
305
326
  def get_model_max_output_tokens(self) -> int | None:
306
327
  """Get the maximum number of output tokens for the model."""
307
328
  try:
329
+ # First try direct lookup
330
+ max_tokens = get_max_output_tokens(self.model)
331
+ if max_tokens is not None:
332
+ return max_tokens
333
+
334
+ # If not found, try getting from model info
308
335
  model_info = self.get_model_info()
309
336
  if model_info:
310
337
  return model_info.get("max_output_tokens")
311
-
312
- # Fallback for unmapped models
338
+
339
+ # If still not found, log warning and return default
313
340
  logger.warning(f"No max output tokens found for {self.model}. Using default.")
314
- return 4096 # A reasonable default for many chat models
341
+ return 4096 # A reasonable default for many models
342
+
315
343
  except Exception as e:
316
344
  logger.error(f"Error getting max output tokens for {self.model}: {e}")
317
345
  return None
@@ -0,0 +1,14 @@
1
+ model_info = {
2
+ "deepseek-reasoner": {"max_output_tokens": 8 * 1024, "max_input_tokens": 1024 * 128},
3
+ "openrouter/deepseek/deepseek-r1": {"max_output_tokens": 8 * 1024, "max_input_tokens": 1024 * 128},
4
+ }
5
+
6
+
7
+ def get_max_output_tokens(model_name: str) -> int | None:
8
+ """Get the maximum output tokens for a given model name."""
9
+ return model_info.get(model_name, {}).get("max_output_tokens", None)
10
+
11
+
12
+ def get_max_input_tokens(model_name: str) -> int | None:
13
+ """Get the maximum input tokens for a given model name."""
14
+ return model_info.get(model_name, {}).get("max_input_tokens", None)
@@ -171,6 +171,10 @@ def get_multiline_input(console: Console) -> str:
171
171
  prompt_text = f"{line_number:>3}: "
172
172
  line = session.prompt(prompt_text, rprompt="Press Enter twice to submit")
173
173
 
174
+ # Handle commands with single return
175
+ if line.strip().startswith('/'):
176
+ return line.strip()
177
+
174
178
  if line.strip() == "":
175
179
  blank_lines += 1
176
180
  if blank_lines == 2:
@@ -0,0 +1,189 @@
1
+ #!/usr/bin/env python
2
+ """Main module for the QuantaLogic agent."""
3
+
4
+ # Standard library imports
5
+ import sys
6
+ from typing import Optional
7
+
8
+ # Third-party imports
9
+ import click
10
+ from loguru import logger
11
+
12
+ from quantalogic.version import get_version
13
+
14
+ # Configure logger
15
+ logger.remove()
16
+
17
+ from rich.console import Console # noqa: E402
18
+ from rich.panel import Panel # noqa: E402
19
+
20
+ # Local application imports
21
+ from quantalogic.agent_config import ( # noqa: E402
22
+ MODEL_NAME,
23
+ )
24
+ from quantalogic.task_runner import task_runner # noqa: E402
25
+
26
+ AGENT_MODES = ["code", "basic", "interpreter", "full", "code-basic", "search", "search-full"]
27
+
28
+
29
+ @click.group(invoke_without_command=True)
30
+ @click.option(
31
+ "--compact-every-n-iteration",
32
+ type=int,
33
+ default=None,
34
+ help="Set the frequency of memory compaction for the agent (default: max_iterations).",
35
+ )
36
+ @click.option("--version", is_flag=True, help="Show version information.")
37
+ @click.option(
38
+ "--model-name",
39
+ default=MODEL_NAME,
40
+ help='Specify the model to use (litellm format, e.g. "openrouter/deepseek/deepseek-chat").',
41
+ )
42
+ @click.option(
43
+ "--log",
44
+ type=click.Choice(["info", "debug", "warning"]),
45
+ default="info",
46
+ help="Set logging level (info/debug/warning).",
47
+ )
48
+ @click.option("--verbose", is_flag=True, help="Enable verbose output.")
49
+ @click.option("--mode", type=click.Choice(AGENT_MODES), default="basic", help="Agent mode (code/search/full).")
50
+ @click.option(
51
+ "--vision-model-name",
52
+ default=None,
53
+ help='Specify the vision model to use (litellm format, e.g. "openrouter/A/gpt-4o-mini").',
54
+ )
55
+ @click.option(
56
+ "--max-iterations",
57
+ type=int,
58
+ default=30,
59
+ help="Maximum number of iterations for task solving (default: 30).",
60
+ )
61
+ @click.option(
62
+ "--max-tokens-working-memory",
63
+ type=int,
64
+ default=None,
65
+ help="Set the maximum number of tokens allowed in the working memory.",
66
+ )
67
+ @click.pass_context
68
+ def cli(
69
+ ctx: click.Context,
70
+ version: bool,
71
+ model_name: str,
72
+ verbose: bool,
73
+ mode: str,
74
+ log: str,
75
+ vision_model_name: str | None,
76
+ max_iterations: int,
77
+ compact_every_n_iteration: int | None,
78
+ max_tokens_working_memory: int | None,
79
+ ) -> None:
80
+ """QuantaLogic AI Assistant - A powerful AI tool for various tasks."""
81
+ if version:
82
+ console = Console()
83
+ current_version = get_version()
84
+ console.print(
85
+ Panel(f"QuantaLogic Version: [bold green]{current_version}[/bold green]", title="Version Information")
86
+ )
87
+ ctx.exit()
88
+
89
+ if ctx.invoked_subcommand is None:
90
+ ctx.invoke(
91
+ task,
92
+ model_name=model_name,
93
+ verbose=verbose,
94
+ mode=mode,
95
+ log=log,
96
+ vision_model_name=vision_model_name,
97
+ max_iterations=max_iterations,
98
+ compact_every_n_iteration=compact_every_n_iteration,
99
+ max_tokens_working_memory=max_tokens_working_memory,
100
+ )
101
+
102
+
103
+ @cli.command()
104
+ @click.option("--file", type=str, help="Path to task file or URL.")
105
+ @click.option(
106
+ "--model-name",
107
+ default=MODEL_NAME,
108
+ help='Specify the model to use (litellm format, e.g. "openrouter/deepseek/deepseek-chat").',
109
+ )
110
+ @click.option("--verbose", is_flag=True, help="Enable verbose output.")
111
+ @click.option("--mode", type=click.Choice(AGENT_MODES), default="basic", help="Agent mode (code/search/full).")
112
+ @click.option(
113
+ "--log",
114
+ type=click.Choice(["info", "debug", "warning"]),
115
+ default="info",
116
+ help="Set logging level (info/debug/warning).",
117
+ )
118
+ @click.option(
119
+ "--vision-model-name",
120
+ default=None,
121
+ help='Specify the vision model to use (litellm format, e.g. "openrouter/openai/gpt-4o-mini").',
122
+ )
123
+ @click.option(
124
+ "--max-iterations",
125
+ type=int,
126
+ default=30,
127
+ help="Maximum number of iterations for task solving (default: 30).",
128
+ )
129
+ @click.option(
130
+ "--compact-every-n-iteration",
131
+ type=int,
132
+ default=None,
133
+ help="Set the frequency of memory compaction for the agent (default: max_iterations).",
134
+ )
135
+ @click.option(
136
+ "--max-tokens-working-memory",
137
+ type=int,
138
+ default=None,
139
+ help="Set the maximum number of tokens allowed in the working memory.",
140
+ )
141
+ @click.option(
142
+ "--no-stream",
143
+ is_flag=True,
144
+ help="Disable streaming output (default: streaming enabled).",
145
+ )
146
+ @click.argument("task", required=False)
147
+ def task(
148
+ file: Optional[str],
149
+ model_name: str,
150
+ verbose: bool,
151
+ mode: str,
152
+ log: str,
153
+ vision_model_name: str | None,
154
+ task: Optional[str],
155
+ max_iterations: int,
156
+ compact_every_n_iteration: int | None,
157
+ max_tokens_working_memory: int | None,
158
+ no_stream: bool,
159
+ ) -> None:
160
+ console = Console()
161
+
162
+ try:
163
+ task_runner(
164
+ console,
165
+ file,
166
+ model_name,
167
+ verbose,
168
+ mode,
169
+ log,
170
+ vision_model_name,
171
+ task,
172
+ max_iterations,
173
+ compact_every_n_iteration,
174
+ max_tokens_working_memory,
175
+ no_stream,
176
+ )
177
+ except Exception as e:
178
+ console.print(f"[red]{str(e)}[/red]")
179
+ logger.error(f"Error in task execution: {e}", exc_info=True)
180
+ sys.exit(1)
181
+
182
+
183
+ def main():
184
+ """Main Entry point"""
185
+ cli()
186
+
187
+
188
+ if __name__ == "__main__":
189
+ main()
@@ -0,0 +1,38 @@
1
+ """Module for reading task content from files or URLs."""
2
+
3
+ import requests
4
+
5
+
6
+ def get_task_from_file(source: str) -> str:
7
+ """Get task content from specified file path or URL.
8
+
9
+ Args:
10
+ source (str): File path or URL to read task content from
11
+
12
+ Returns:
13
+ str: Stripped task content from the file or URL
14
+
15
+ Raises:
16
+ FileNotFoundError: If the local file does not exist
17
+ PermissionError: If there are permission issues reading the file
18
+ requests.exceptions.RequestException: If there are issues retrieving URL content
19
+ Exception: For any other unexpected errors
20
+ """
21
+ try:
22
+ # Check if source is a URL
23
+ if source.startswith(('http://', 'https://')):
24
+ response = requests.get(source, timeout=10)
25
+ response.raise_for_status() # Raise an exception for bad status codes
26
+ return response.text.strip()
27
+
28
+ # If not a URL, treat as a local file path
29
+ with open(source, encoding="utf-8") as f:
30
+ return f.read().strip()
31
+ except FileNotFoundError:
32
+ raise FileNotFoundError(f"Error: File '{source}' not found.")
33
+ except PermissionError:
34
+ raise PermissionError(f"Error: Permission denied when reading '{source}'.")
35
+ except requests.exceptions.RequestException as e:
36
+ raise Exception(f"Error retrieving URL content: {e}")
37
+ except Exception as e:
38
+ raise Exception(f"Unexpected error: {e}")