quantalogic 0.2.17__tar.gz → 0.2.19__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. {quantalogic-0.2.17 → quantalogic-0.2.19}/PKG-INFO +55 -9
  2. {quantalogic-0.2.17 → quantalogic-0.2.19}/README.md +54 -8
  3. {quantalogic-0.2.17 → quantalogic-0.2.19}/pyproject.toml +1 -1
  4. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/agent.py +39 -8
  5. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/agent_config.py +48 -5
  6. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/coding_agent.py +12 -1
  7. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/generative_model.py +6 -0
  8. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/main.py +58 -14
  9. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/prompts.py +2 -2
  10. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/search_agent.py +12 -1
  11. {quantalogic-0.2.17 → quantalogic-0.2.19}/LICENSE +0 -0
  12. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/__init__.py +0 -0
  13. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/console_print_events.py +0 -0
  14. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/console_print_token.py +0 -0
  15. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/docs_cli.py +0 -0
  16. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/event_emitter.py +0 -0
  17. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/interactive_text_editor.py +0 -0
  18. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/memory.py +0 -0
  19. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/model_names.py +0 -0
  20. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/server/__init__.py +0 -0
  21. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/server/agent_server.py +0 -0
  22. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/server/models.py +0 -0
  23. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/server/routes.py +0 -0
  24. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/server/state.py +0 -0
  25. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/server/static/js/event_visualizer.js +0 -0
  26. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/server/static/js/quantalogic.js +0 -0
  27. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/server/templates/index.html +0 -0
  28. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tool_manager.py +0 -0
  29. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/__init__.py +0 -0
  30. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/agent_tool.py +0 -0
  31. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/download_http_file_tool.py +0 -0
  32. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/duckduckgo_search_tool.py +0 -0
  33. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/edit_whole_content_tool.py +0 -0
  34. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/elixir_tool.py +0 -0
  35. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/execute_bash_command_tool.py +0 -0
  36. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/input_question_tool.py +0 -0
  37. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/language_handlers/__init__.py +0 -0
  38. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/language_handlers/c_handler.py +0 -0
  39. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/language_handlers/cpp_handler.py +0 -0
  40. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/language_handlers/go_handler.py +0 -0
  41. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/language_handlers/java_handler.py +0 -0
  42. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/language_handlers/javascript_handler.py +0 -0
  43. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/language_handlers/python_handler.py +0 -0
  44. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/language_handlers/rust_handler.py +0 -0
  45. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/language_handlers/scala_handler.py +0 -0
  46. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/language_handlers/typescript_handler.py +0 -0
  47. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/list_directory_tool.py +0 -0
  48. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/llm_tool.py +0 -0
  49. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/llm_vision_tool.py +0 -0
  50. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/markitdown_tool.py +0 -0
  51. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/nodejs_tool.py +0 -0
  52. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/python_tool.py +0 -0
  53. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/read_file_block_tool.py +0 -0
  54. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/read_file_tool.py +0 -0
  55. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/replace_in_file_tool.py +0 -0
  56. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/ripgrep_tool.py +0 -0
  57. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/search_definition_names.py +0 -0
  58. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/serpapi_search_tool.py +0 -0
  59. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/task_complete_tool.py +0 -0
  60. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/tool.py +0 -0
  61. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/unified_diff_tool.py +0 -0
  62. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/wikipedia_search_tool.py +0 -0
  63. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/tools/write_file_tool.py +0 -0
  64. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/utils/__init__.py +0 -0
  65. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/utils/ask_user_validation.py +0 -0
  66. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/utils/check_version.py +0 -0
  67. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/utils/download_http_file.py +0 -0
  68. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/utils/get_coding_environment.py +0 -0
  69. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/utils/get_environment.py +0 -0
  70. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/utils/get_quantalogic_rules_content.py +0 -0
  71. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/utils/git_ls.py +0 -0
  72. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/utils/read_file.py +0 -0
  73. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/utils/read_http_text_content.py +0 -0
  74. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/version.py +0 -0
  75. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/xml_parser.py +0 -0
  76. {quantalogic-0.2.17 → quantalogic-0.2.19}/quantalogic/xml_tool_parser.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: quantalogic
3
- Version: 0.2.17
3
+ Version: 0.2.19
4
4
  Summary: QuantaLogic ReAct Agents
5
5
  Author: Raphaël MANSUY
6
6
  Author-email: raphael.mansuy@gmail.com
@@ -53,6 +53,7 @@ Description-Content-Type: text/markdown
53
53
  [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
54
54
  [![Python](https://img.shields.io/badge/Python-3.12+-blue.svg)](https://www.python.org/downloads/)
55
55
  [![Documentation](https://img.shields.io/badge/docs-latest-brightgreen.svg)](https://quantalogic.github.io/quantalogic/)
56
+ [HowTo Guide](./docs/howto/howto.md)
56
57
 
57
58
  QuantaLogic is a ReAct (Reasoning & Action) framework for building advanced AI agents.
58
59
 
@@ -62,6 +63,7 @@ The `cli` version include coding capabilities comparable to Aider.
62
63
 
63
64
  [📖 Documentation](https://quantalogic.github.io/quantalogic/)
64
65
 
66
+ ![Video](./examples/generated_tutorials/python/quantalogic_8s.gif)
65
67
 
66
68
  ## Why QuantaLogic?
67
69
 
@@ -81,6 +83,8 @@ We created [QuantaLogic](https://www.quantalogic.app) because we saw a significa
81
83
 
82
84
  ## 📋 Table of Contents
83
85
 
86
+ - [Release Notes](#release-notes)
87
+
84
88
  - [Installation](#-installation)
85
89
  - [Quick Start](#-quickstart)
86
90
  - [Key Components](#-key-components)
@@ -93,6 +97,12 @@ We created [QuantaLogic](https://www.quantalogic.app) because we saw a significa
93
97
  - [License](#-license)
94
98
  - [Documentation Development](#-documentation-development)
95
99
 
100
+ ## Release Notes
101
+
102
+ See our [Release Notes](RELEASE_NOTES.MD) for detailed version history and changes.
103
+
104
+ [TODO List](TODO.md)
105
+
96
106
  ## 📦 Installation
97
107
 
98
108
  ### Prerequisites
@@ -134,20 +144,18 @@ The QuantaLogic CLI provides powerful command-line capabilities:
134
144
 
135
145
  ```bash
136
146
  Usage: quantalogic [OPTIONS] COMMAND [ARGS]...
137
-
138
147
  QuantaLogic AI Assistant - A powerful AI tool for various tasks.
139
148
 
140
149
  Options:
141
150
  --version Show version information.
142
- --model-name TEXT Specify the text model to use (litellm format,
143
- e.g. "openrouter/deepseek/deepseek-chat").
144
- --vision-model-name TEXT Specify the vision model to use (litellm format,
145
- e.g. "openrouter/openai/gpt-4o-mini").
146
- --log [info|debug|warning] Set logging level (info/debug/warning).
151
+ --model-name TEXT Specify the model (litellm format, e.g., "openrouter/deepseek/deepseek-chat").
152
+ --log [info|debug|warning] Set logging level.
147
153
  --verbose Enable verbose output.
154
+ --mode [code|basic|interpreter|full|code-basic|search|search-full] Agent mode.
155
+ --vision-model-name TEXT Specify the vision model (litellm format, e.g., "openrouter/A/gpt-4o-mini").
148
156
  --max-iterations INTEGER Maximum iterations for task solving (default: 30).
149
- --mode [code|basic|interpreter|full|code-basic|search|search-full]
150
- Agent mode (code/search/full).
157
+ --max-tokens-working-memory INTEGER Maximum tokens to keep in working memory (default: 4000).
158
+ --compact-every-n-iteration INTEGER Compact memory every N iterations (default: 5).
151
159
  --help Show this message and exit.
152
160
 
153
161
  Commands:
@@ -157,6 +165,38 @@ Commands:
157
165
  ### Commands
158
166
  task Execute a task with the QuantaLogic AI Assistant
159
167
 
168
+ **Usage:** `quantalogic task [OPTIONS] [TASK]`
169
+ **Description:** Execute a task with the QuantaLogic AI Assistant.
170
+ **Options:**
171
+ - `--file PATH`: Path to task file.
172
+ - `--model-name TEXT`: Specify the model (litellm format, e.g., `openrouter/deepseek/deepseek-chat`).
173
+ - `--verbose`: Enable verbose output.
174
+ - `--mode [code|basic|interpreter|full|code-basic|search|search-full]`: Agent mode.
175
+ - `--log [info|debug|warning]`: Set logging level.
176
+ - `--vision-model-name TEXT`: Specify the vision model (litellm format).
177
+ - `--max-iterations INTEGER`: Maximum iterations for task solving (default: 30).
178
+ - `--max-tokens-working-memory INTEGER`: Maximum tokens to keep in working memory (default: 4000).
179
+ - `--compact-every-n-iteration INTEGER`: Compact memory every N iterations (default: 5).
180
+ - `--no-stream`: Disable streaming output (default: enabled).
181
+ - `--help`: Show this message and exit.
182
+
183
+ **Detailed Parameter Descriptions:**
184
+
185
+ - **--model-name**: Specifies the LLM model to use (e.g., "openrouter/deepseek/deepseek-chat")
186
+ - **--mode**: Selects agent capabilities:
187
+ - *code*: Coding-focused with basic capabilities
188
+ - *basic*: General-purpose without coding tools
189
+ - *interpreter*: Interactive code execution
190
+ - *full*: All capabilities enabled
191
+ - *code-basic*: Coding with basic reasoning
192
+ - *search*: Web search integration
193
+ - **--log**: Controls logging verbosity (info, debug, warning)
194
+ - **--vision-model-name**: Specifies vision model for image processing
195
+ - **--max-iterations**: Limits task-solving attempts (default: 30)
196
+ - **--max-tokens-working-memory**: Controls memory usage (default: None)
197
+ - **--compact-every-n-iteration**: Memory optimization frequency (default: None)
198
+ - **--no-stream**: Disables real-time output streaming
199
+
160
200
 
161
201
 
162
202
  ### Detailed Usage
@@ -232,6 +272,12 @@ print(result)
232
272
 
233
273
  ## 📖 Examples
234
274
 
275
+ Watch how QuantaLogic can generate complete tutorials from simple prompts:
276
+
277
+ [![Tutorial Generation Demo](./examples/generated_tutorials/python/quantalogic_long.mp4)](./examples/generated_tutorials/python/quantalogic_long.mp4)
278
+
279
+ Example prompt: [04-write-a-tutorial.md](./examples/tasks/04-write-a-tutorial.md)
280
+
235
281
  Here are some practical examples to help you get started:
236
282
 
237
283
  Here is the markdown table based on the provided directory listing:
@@ -3,6 +3,7 @@
3
3
  [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
4
4
  [![Python](https://img.shields.io/badge/Python-3.12+-blue.svg)](https://www.python.org/downloads/)
5
5
  [![Documentation](https://img.shields.io/badge/docs-latest-brightgreen.svg)](https://quantalogic.github.io/quantalogic/)
6
+ [HowTo Guide](./docs/howto/howto.md)
6
7
 
7
8
  QuantaLogic is a ReAct (Reasoning & Action) framework for building advanced AI agents.
8
9
 
@@ -12,6 +13,7 @@ The `cli` version include coding capabilities comparable to Aider.
12
13
 
13
14
  [📖 Documentation](https://quantalogic.github.io/quantalogic/)
14
15
 
16
+ ![Video](./examples/generated_tutorials/python/quantalogic_8s.gif)
15
17
 
16
18
  ## Why QuantaLogic?
17
19
 
@@ -31,6 +33,8 @@ We created [QuantaLogic](https://www.quantalogic.app) because we saw a significa
31
33
 
32
34
  ## 📋 Table of Contents
33
35
 
36
+ - [Release Notes](#release-notes)
37
+
34
38
  - [Installation](#-installation)
35
39
  - [Quick Start](#-quickstart)
36
40
  - [Key Components](#-key-components)
@@ -43,6 +47,12 @@ We created [QuantaLogic](https://www.quantalogic.app) because we saw a significa
43
47
  - [License](#-license)
44
48
  - [Documentation Development](#-documentation-development)
45
49
 
50
+ ## Release Notes
51
+
52
+ See our [Release Notes](RELEASE_NOTES.MD) for detailed version history and changes.
53
+
54
+ [TODO List](TODO.md)
55
+
46
56
  ## 📦 Installation
47
57
 
48
58
  ### Prerequisites
@@ -84,20 +94,18 @@ The QuantaLogic CLI provides powerful command-line capabilities:
84
94
 
85
95
  ```bash
86
96
  Usage: quantalogic [OPTIONS] COMMAND [ARGS]...
87
-
88
97
  QuantaLogic AI Assistant - A powerful AI tool for various tasks.
89
98
 
90
99
  Options:
91
100
  --version Show version information.
92
- --model-name TEXT Specify the text model to use (litellm format,
93
- e.g. "openrouter/deepseek/deepseek-chat").
94
- --vision-model-name TEXT Specify the vision model to use (litellm format,
95
- e.g. "openrouter/openai/gpt-4o-mini").
96
- --log [info|debug|warning] Set logging level (info/debug/warning).
101
+ --model-name TEXT Specify the model (litellm format, e.g., "openrouter/deepseek/deepseek-chat").
102
+ --log [info|debug|warning] Set logging level.
97
103
  --verbose Enable verbose output.
104
+ --mode [code|basic|interpreter|full|code-basic|search|search-full] Agent mode.
105
+ --vision-model-name TEXT Specify the vision model (litellm format, e.g., "openrouter/A/gpt-4o-mini").
98
106
  --max-iterations INTEGER Maximum iterations for task solving (default: 30).
99
- --mode [code|basic|interpreter|full|code-basic|search|search-full]
100
- Agent mode (code/search/full).
107
+ --max-tokens-working-memory INTEGER Maximum tokens to keep in working memory (default: 4000).
108
+ --compact-every-n-iteration INTEGER Compact memory every N iterations (default: 5).
101
109
  --help Show this message and exit.
102
110
 
103
111
  Commands:
@@ -107,6 +115,38 @@ Commands:
107
115
  ### Commands
108
116
  task Execute a task with the QuantaLogic AI Assistant
109
117
 
118
+ **Usage:** `quantalogic task [OPTIONS] [TASK]`
119
+ **Description:** Execute a task with the QuantaLogic AI Assistant.
120
+ **Options:**
121
+ - `--file PATH`: Path to task file.
122
+ - `--model-name TEXT`: Specify the model (litellm format, e.g., `openrouter/deepseek/deepseek-chat`).
123
+ - `--verbose`: Enable verbose output.
124
+ - `--mode [code|basic|interpreter|full|code-basic|search|search-full]`: Agent mode.
125
+ - `--log [info|debug|warning]`: Set logging level.
126
+ - `--vision-model-name TEXT`: Specify the vision model (litellm format).
127
+ - `--max-iterations INTEGER`: Maximum iterations for task solving (default: 30).
128
+ - `--max-tokens-working-memory INTEGER`: Maximum tokens to keep in working memory (default: 4000).
129
+ - `--compact-every-n-iteration INTEGER`: Compact memory every N iterations (default: 5).
130
+ - `--no-stream`: Disable streaming output (default: enabled).
131
+ - `--help`: Show this message and exit.
132
+
133
+ **Detailed Parameter Descriptions:**
134
+
135
+ - **--model-name**: Specifies the LLM model to use (e.g., "openrouter/deepseek/deepseek-chat")
136
+ - **--mode**: Selects agent capabilities:
137
+ - *code*: Coding-focused with basic capabilities
138
+ - *basic*: General-purpose without coding tools
139
+ - *interpreter*: Interactive code execution
140
+ - *full*: All capabilities enabled
141
+ - *code-basic*: Coding with basic reasoning
142
+ - *search*: Web search integration
143
+ - **--log**: Controls logging verbosity (info, debug, warning)
144
+ - **--vision-model-name**: Specifies vision model for image processing
145
+ - **--max-iterations**: Limits task-solving attempts (default: 30)
146
+ - **--max-tokens-working-memory**: Controls memory usage (default: None)
147
+ - **--compact-every-n-iteration**: Memory optimization frequency (default: None)
148
+ - **--no-stream**: Disables real-time output streaming
149
+
110
150
 
111
151
 
112
152
  ### Detailed Usage
@@ -182,6 +222,12 @@ print(result)
182
222
 
183
223
  ## 📖 Examples
184
224
 
225
+ Watch how QuantaLogic can generate complete tutorials from simple prompts:
226
+
227
+ [![Tutorial Generation Demo](./examples/generated_tutorials/python/quantalogic_long.mp4)](./examples/generated_tutorials/python/quantalogic_long.mp4)
228
+
229
+ Example prompt: [04-write-a-tutorial.md](./examples/tasks/04-write-a-tutorial.md)
230
+
185
231
  Here are some practical examples to help you get started:
186
232
 
187
233
  Here is the markdown table based on the provided directory listing:
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "quantalogic"
3
- version = "0.2.17"
3
+ version = "0.2.19"
4
4
  description = "QuantaLogic ReAct Agents"
5
5
  authors = ["Raphaël MANSUY <raphael.mansuy@gmail.com>"]
6
6
  readme = "README.md"
@@ -71,6 +71,8 @@ class Agent(BaseModel):
71
71
  max_output_tokens: int = DEFAULT_MAX_OUTPUT_TOKENS
72
72
  max_iterations: int = 30
73
73
  system_prompt: str = ""
74
+ compact_every_n_iterations: int | None = None # Add this to the class attributes
75
+ max_tokens_working_memory: int | None = None # Add max_tokens_working_memory attribute
74
76
 
75
77
  def __init__(
76
78
  self,
@@ -81,6 +83,8 @@ class Agent(BaseModel):
81
83
  task_to_solve: str = "",
82
84
  specific_expertise: str = "General AI assistant with coding and problem-solving capabilities",
83
85
  get_environment: Callable[[], str] = get_environment,
86
+ compact_every_n_iterations: int | None = None, # New parameter
87
+ max_tokens_working_memory: int | None = None, # New parameter to set max working memory tokens
84
88
  ):
85
89
  """Initialize the agent with model, memory, tools, and configurations."""
86
90
  try:
@@ -121,6 +125,15 @@ class Agent(BaseModel):
121
125
  specific_expertise=specific_expertise,
122
126
  event_emitter=event_emitter,
123
127
  )
128
+
129
+ # Set the new compact_every_n_iterations parameter
130
+ self.compact_every_n_iterations = compact_every_n_iterations or self.max_iterations
131
+ logger.debug(f"Memory will be compacted every {self.compact_every_n_iterations} iterations")
132
+
133
+ # Set the max_tokens_working_memory parameter
134
+ self.max_tokens_working_memory = max_tokens_working_memory
135
+ logger.debug(f"Max tokens for working memory set to: {self.max_tokens_working_memory}")
136
+
124
137
  logger.debug("Agent initialized successfully.")
125
138
  except Exception as e:
126
139
  logger.error(f"Failed to initialize agent: {str(e)}")
@@ -265,9 +278,31 @@ class Agent(BaseModel):
265
278
  self.total_tokens = self.model.token_counter_with_history(message_history, prompt)
266
279
 
267
280
  def _compact_memory_if_needed(self, current_prompt: str = ""):
268
- """Compacts the memory if it exceeds the maximum occupancy."""
281
+ """Compacts the memory if it exceeds the maximum occupancy or token limit."""
269
282
  ratio_occupied = self._calculate_context_occupancy()
270
- if ratio_occupied >= MAX_OCCUPANCY:
283
+
284
+ # Compact memory if any of these conditions are met:
285
+ # 1. Memory occupancy exceeds MAX_OCCUPANCY, or
286
+ # 2. Current iteration is a multiple of compact_every_n_iterations, or
287
+ # 3. Working memory exceeds max_tokens_working_memory (if set)
288
+ should_compact_by_occupancy = ratio_occupied >= MAX_OCCUPANCY
289
+ should_compact_by_iteration = (
290
+ self.compact_every_n_iterations is not None and
291
+ self.current_iteration > 0 and
292
+ self.current_iteration % self.compact_every_n_iterations == 0
293
+ )
294
+ should_compact_by_token_limit = (
295
+ self.max_tokens_working_memory is not None and
296
+ self.total_tokens > self.max_tokens_working_memory
297
+ )
298
+
299
+ if should_compact_by_occupancy or should_compact_by_iteration or should_compact_by_token_limit:
300
+ if should_compact_by_occupancy:
301
+ logger.debug(f"Memory compaction triggered: Occupancy {ratio_occupied}% exceeds {MAX_OCCUPANCY}%")
302
+
303
+ if should_compact_by_iteration:
304
+ logger.debug(f"Memory compaction triggered: Iteration {self.current_iteration} is a multiple of {self.compact_every_n_iterations}")
305
+
271
306
  self._emit_event("memory_full")
272
307
  self.memory.compact()
273
308
  self.total_tokens = self.model.token_counter_with_history(self.memory.memory, current_prompt)
@@ -387,7 +422,7 @@ class Agent(BaseModel):
387
422
  answer=None,
388
423
  )
389
424
 
390
- def _handle_repeated_tool_call(self, tool_name: str, arguments_with_values: dict) -> ObserveResponseResult:
425
+ def _handle_repeated_tool_call(self, tool_name: str, arguments_with_values: dict) -> (str,str):
391
426
  """Handle the case where a tool call is repeated."""
392
427
  repeat_count = self.last_tool_call.get("count", 0)
393
428
  error_message = (
@@ -401,11 +436,7 @@ class Agent(BaseModel):
401
436
  "3. Use a different tool or modify the arguments\n"
402
437
  "4. Ensure you're making progress towards the goal"
403
438
  )
404
- return ObserveResponseResult(
405
- next_prompt=error_message,
406
- executed_tool="",
407
- answer=None,
408
- )
439
+ return tool_name, error_message
409
440
 
410
441
  def _handle_tool_execution_failure(self, response: str) -> ObserveResponseResult:
411
442
  """Handle the case where tool execution fails."""
@@ -32,13 +32,21 @@ from quantalogic.tools import (
32
32
  MODEL_NAME = "deepseek/deepseek-chat"
33
33
 
34
34
 
35
- def create_agent(model_name: str, vision_model_name: str | None, no_stream: bool = False) -> Agent:
35
+ def create_agent(
36
+ model_name: str,
37
+ vision_model_name: str | None,
38
+ no_stream: bool = False,
39
+ compact_every_n_iteration: int | None = None,
40
+ max_tokens_working_memory: int | None = None
41
+ ) -> Agent:
36
42
  """Create an agent with the specified model and tools.
37
43
 
38
44
  Args:
39
45
  model_name (str): Name of the model to use
40
46
  vision_model_name (str | None): Name of the vision model to use
41
47
  no_stream (bool, optional): If True, the agent will not stream results.
48
+ compact_every_n_iteration (int | None, optional): Frequency of memory compaction.
49
+ max_tokens_working_memory (int | None, optional): Maximum tokens for working memory.
42
50
 
43
51
  Returns:
44
52
  Agent: An agent with the specified model and tools
@@ -66,16 +74,26 @@ def create_agent(model_name: str, vision_model_name: str | None, no_stream: bool
66
74
  return Agent(
67
75
  model_name=model_name,
68
76
  tools=tools,
77
+ compact_every_n_iterations=compact_every_n_iteration,
78
+ max_tokens_working_memory=max_tokens_working_memory,
69
79
  )
70
80
 
71
81
 
72
- def create_interpreter_agent(model_name: str, vision_model_name: str | None, no_stream: bool = False) -> Agent:
82
+ def create_interpreter_agent(
83
+ model_name: str,
84
+ vision_model_name: str | None,
85
+ no_stream: bool = False,
86
+ compact_every_n_iteration: int | None = None,
87
+ max_tokens_working_memory: int | None = None
88
+ ) -> Agent:
73
89
  """Create an interpreter agent with the specified model and tools.
74
90
 
75
91
  Args:
76
92
  model_name (str): Name of the model to use
77
93
  vision_model_name (str | None): Name of the vision model to use
78
94
  no_stream (bool, optional): If True, the agent will not stream results.
95
+ compact_every_n_iteration (int | None, optional): Frequency of memory compaction.
96
+ max_tokens_working_memory (int | None, optional): Maximum tokens for working memory.
79
97
 
80
98
  Returns:
81
99
  Agent: An interpreter agent with the specified model and tools
@@ -98,16 +116,29 @@ def create_interpreter_agent(model_name: str, vision_model_name: str | None, no_
98
116
  LLMTool(model_name=model_name, on_token=console_print_token if not no_stream else None),
99
117
  DownloadHttpFileTool(),
100
118
  ]
101
- return Agent(model_name=model_name, tools=tools)
119
+ return Agent(
120
+ model_name=model_name,
121
+ tools=tools,
122
+ compact_every_n_iterations=compact_every_n_iteration,
123
+ max_tokens_working_memory=max_tokens_working_memory,
124
+ )
102
125
 
103
126
 
104
- def create_full_agent(model_name: str, vision_model_name: str | None, no_stream: bool = False) -> Agent:
127
+ def create_full_agent(
128
+ model_name: str,
129
+ vision_model_name: str | None,
130
+ no_stream: bool = False,
131
+ compact_every_n_iteration: int | None = None,
132
+ max_tokens_working_memory: int | None = None
133
+ ) -> Agent:
105
134
  """Create an agent with the specified model and many tools.
106
135
 
107
136
  Args:
108
137
  model_name (str): Name of the model to use
109
138
  vision_model_name (str | None): Name of the vision model to use
110
139
  no_stream (bool, optional): If True, the agent will not stream results.
140
+ compact_every_n_iteration (int | None, optional): Frequency of memory compaction.
141
+ max_tokens_working_memory (int | None, optional): Maximum tokens for working memory.
111
142
 
112
143
  Returns:
113
144
  Agent: An agent with the specified model and tools
@@ -140,16 +171,26 @@ def create_full_agent(model_name: str, vision_model_name: str | None, no_stream:
140
171
  return Agent(
141
172
  model_name=model_name,
142
173
  tools=tools,
174
+ compact_every_n_iterations=compact_every_n_iteration,
175
+ max_tokens_working_memory=max_tokens_working_memory,
143
176
  )
144
177
 
145
178
 
146
- def create_orchestrator_agent(model_name: str, vision_model_name: str | None = None, no_stream: bool = False) -> Agent:
179
+ def create_orchestrator_agent(
180
+ model_name: str,
181
+ vision_model_name: str | None = None,
182
+ no_stream: bool = False,
183
+ compact_every_n_iteration: int | None = None,
184
+ max_tokens_working_memory: int | None = None
185
+ ) -> Agent:
147
186
  """Create an agent with the specified model and tools.
148
187
 
149
188
  Args:
150
189
  model_name (str): Name of the model to use
151
190
  vision_model_name (str | None): Name of the vision model to use
152
191
  no_stream (bool, optional): If True, the agent will not stream results.
192
+ compact_every_n_iteration (int | None, optional): Frequency of memory compaction.
193
+ max_tokens_working_memory (int | None, optional): Maximum tokens for working memory.
153
194
 
154
195
  Returns:
155
196
  Agent: An agent with the specified model and tools
@@ -175,4 +216,6 @@ def create_orchestrator_agent(model_name: str, vision_model_name: str | None = N
175
216
  return Agent(
176
217
  model_name=model_name,
177
218
  tools=tools,
219
+ compact_every_n_iterations=compact_every_n_iteration,
220
+ max_tokens_working_memory=max_tokens_working_memory,
178
221
  )
@@ -20,7 +20,14 @@ from quantalogic.utils import get_coding_environment
20
20
  from quantalogic.utils.get_quantalogic_rules_content import get_quantalogic_rules_file_content
21
21
 
22
22
 
23
- def create_coding_agent(model_name: str, vision_model_name: str | None = None, basic: bool = False,no_stream: bool = False) -> Agent:
23
+ def create_coding_agent(
24
+ model_name: str,
25
+ vision_model_name: str | None = None,
26
+ basic: bool = False,
27
+ no_stream: bool = False,
28
+ compact_every_n_iteration: int | None = None,
29
+ max_tokens_working_memory: int | None = None
30
+ ) -> Agent:
24
31
  """Creates and configures a coding agent with a comprehensive set of tools.
25
32
 
26
33
  Args:
@@ -28,6 +35,8 @@ def create_coding_agent(model_name: str, vision_model_name: str | None = None, b
28
35
  vision_model_name (str | None): Name of the vision model to use for the agent's core capabilities
29
36
  basic (bool, optional): If True, the agent will be configured with a basic set of tools.
30
37
  no_stream (bool, optional): If True, the agent will not stream results.
38
+ compact_every_n_iteration (int | None, optional): Frequency of memory compaction.
39
+ max_tokens_working_memory (int | None, optional): Maximum tokens for working memory.
31
40
 
32
41
  Returns:
33
42
  Agent: A fully configured coding agent instance with:
@@ -91,4 +100,6 @@ def create_coding_agent(model_name: str, vision_model_name: str | None = None, b
91
100
  tools=tools,
92
101
  specific_expertise=specific_expertise,
93
102
  get_environment=get_coding_environment,
103
+ compact_every_n_iterations=compact_every_n_iteration,
104
+ max_tokens_working_memory=max_tokens_working_memory,
94
105
  )
@@ -2,6 +2,7 @@
2
2
 
3
3
  import functools
4
4
 
5
+ import litellm
5
6
  import openai
6
7
  from litellm import completion, exceptions, get_max_tokens, get_model_info, token_counter
7
8
  from loguru import logger
@@ -12,6 +13,11 @@ from quantalogic.event_emitter import EventEmitter # Importing the EventEmitter
12
13
  MIN_RETRIES = 1
13
14
 
14
15
 
16
+
17
+ litellm.suppress_debug_info = True # Very important to suppress prints don't remove
18
+
19
+
20
+
15
21
  # Define the Message class for conversation handling
16
22
  class Message(BaseModel):
17
23
  """Represents a message in a conversation with a specific role and content."""
@@ -11,7 +11,6 @@ import click
11
11
  from loguru import logger
12
12
 
13
13
  from quantalogic.console_print_events import console_print_events
14
- from quantalogic.console_print_token import console_print_token
15
14
  from quantalogic.utils.check_version import check_if_is_latest_version
16
15
  from quantalogic.version import get_version
17
16
 
@@ -23,7 +22,6 @@ from threading import Lock # noqa: E402
23
22
  from rich.console import Console # noqa: E402
24
23
  from rich.panel import Panel # noqa: E402
25
24
  from rich.prompt import Confirm # noqa: E402
26
- from rich.spinner import Spinner # noqa: E402
27
25
 
28
26
  from quantalogic.agent import Agent # noqa: E402
29
27
 
@@ -41,26 +39,28 @@ from quantalogic.search_agent import create_search_agent # noqa: E402
41
39
  AGENT_MODES = ["code", "basic", "interpreter", "full", "code-basic", "search", "search-full"]
42
40
 
43
41
 
44
- def create_agent_for_mode(mode: str, model_name: str, vision_model_name: str | None, no_stream: bool = False) -> Agent:
42
+ def create_agent_for_mode(mode: str, model_name: str, vision_model_name: str | None, no_stream: bool = False, compact_every_n_iteration: int | None = None, max_tokens_working_memory: int | None = None) -> Agent:
45
43
  """Create an agent based on the specified mode."""
46
44
  logger.debug(f"Creating agent for mode: {mode} with model: {model_name}")
47
45
  logger.debug(f"Using vision model: {vision_model_name}")
48
46
  logger.debug(f"Using no_stream: {no_stream}")
47
+ logger.debug(f"Using compact_every_n_iteration: {compact_every_n_iteration}")
48
+ logger.debug(f"Using max_tokens_working_memory: {max_tokens_working_memory}")
49
49
  if mode == "code":
50
50
  logger.debug("Creating code agent without basic mode")
51
- return create_coding_agent(model_name, vision_model_name, basic=False, no_stream=no_stream)
51
+ return create_coding_agent(model_name, vision_model_name, basic=False, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
52
52
  if mode == "code-basic":
53
- return create_coding_agent(model_name, vision_model_name, basic=True, no_stream=no_stream)
53
+ return create_coding_agent(model_name, vision_model_name, basic=True, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
54
54
  elif mode == "basic":
55
- return create_orchestrator_agent(model_name, vision_model_name, no_stream=no_stream)
55
+ return create_orchestrator_agent(model_name, vision_model_name, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
56
56
  elif mode == "full":
57
- return create_full_agent(model_name, vision_model_name, no_stream=no_stream)
57
+ return create_full_agent(model_name, vision_model_name, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
58
58
  elif mode == "interpreter":
59
- return create_interpreter_agent(model_name, vision_model_name, no_stream=no_stream)
59
+ return create_interpreter_agent(model_name, vision_model_name, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
60
60
  elif mode == "search":
61
- return create_search_agent(model_name, no_stream=no_stream)
61
+ return create_search_agent(model_name, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
62
62
  if mode == "search-full":
63
- return create_search_agent(model_name, mode_full=True, no_stream=no_stream)
63
+ return create_search_agent(model_name, mode_full=True, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
64
64
  else:
65
65
  raise ValueError(f"Unknown agent mode: {mode}")
66
66
 
@@ -154,7 +154,12 @@ def stop_spinner(console: Console) -> None:
154
154
 
155
155
 
156
156
  def display_welcome_message(
157
- console: Console, model_name: str, vision_model_name: str | None, max_iterations: int = 50
157
+ console: Console,
158
+ model_name: str,
159
+ vision_model_name: str | None,
160
+ max_iterations: int = 50,
161
+ compact_every_n_iteration: int | None = None,
162
+ max_tokens_working_memory: int | None = None
158
163
  ) -> None:
159
164
  """Display the welcome message and instructions."""
160
165
  version = get_version()
@@ -169,7 +174,9 @@ def display_welcome_message(
169
174
  "\n"
170
175
  f"- Model: {model_name}\n"
171
176
  f"- Vision Model: {vision_model_name}\n"
172
- f"- Max Iterations: {max_iterations}\n\n"
177
+ f"- Max Iterations: {max_iterations}\n"
178
+ f"- Memory Compact Frequency: {compact_every_n_iteration or 'Default (Max Iterations)'}\n"
179
+ f"- Max Working Memory Tokens: {max_tokens_working_memory or 'Default'}\n\n"
173
180
  "[bold magenta]💡 Pro Tips:[/bold magenta]\n\n"
174
181
  "- Be as specific as possible in your task description to get the best results!\n"
175
182
  "- Use clear and concise language when describing your task\n"
@@ -182,6 +189,12 @@ def display_welcome_message(
182
189
 
183
190
 
184
191
  @click.group(invoke_without_command=True)
192
+ @click.option(
193
+ "--compact-every-n-iteration",
194
+ type=int,
195
+ default=None,
196
+ help="Set the frequency of memory compaction for the agent (default: max_iterations)."
197
+ )
185
198
  @click.option("--version", is_flag=True, help="Show version information.")
186
199
  @click.option(
187
200
  "--model-name",
@@ -207,6 +220,12 @@ def display_welcome_message(
207
220
  default=30,
208
221
  help="Maximum number of iterations for task solving (default: 30).",
209
222
  )
223
+ @click.option(
224
+ "--max-tokens-working-memory",
225
+ type=int,
226
+ default=None,
227
+ help="Set the maximum number of tokens allowed in the working memory."
228
+ )
210
229
  @click.pass_context
211
230
  def cli(
212
231
  ctx: click.Context,
@@ -217,6 +236,8 @@ def cli(
217
236
  log: str,
218
237
  vision_model_name: str | None,
219
238
  max_iterations: int,
239
+ compact_every_n_iteration: int | None,
240
+ max_tokens_working_memory: int | None,
220
241
  ) -> None:
221
242
  """QuantaLogic AI Assistant - A powerful AI tool for various tasks."""
222
243
  if version:
@@ -232,6 +253,8 @@ def cli(
232
253
  log=log,
233
254
  vision_model_name=vision_model_name,
234
255
  max_iterations=max_iterations,
256
+ compact_every_n_iteration=compact_every_n_iteration,
257
+ max_tokens_working_memory=max_tokens_working_memory,
235
258
  )
236
259
 
237
260
 
@@ -261,6 +284,18 @@ def cli(
261
284
  default=30,
262
285
  help="Maximum number of iterations for task solving (default: 30).",
263
286
  )
287
+ @click.option(
288
+ "--compact-every-n-iteration",
289
+ type=int,
290
+ default=None,
291
+ help="Set the frequency of memory compaction for the agent (default: max_iterations)."
292
+ )
293
+ @click.option(
294
+ "--max-tokens-working-memory",
295
+ type=int,
296
+ default=None,
297
+ help="Set the maximum number of tokens allowed in the working memory."
298
+ )
264
299
  @click.option(
265
300
  "--no-stream",
266
301
  is_flag=True,
@@ -276,6 +311,8 @@ def task(
276
311
  vision_model_name: str | None,
277
312
  task: Optional[str],
278
313
  max_iterations: int,
314
+ compact_every_n_iteration: int | None,
315
+ max_tokens_working_memory: int | None,
279
316
  no_stream: bool,
280
317
  ) -> None:
281
318
  """Execute a task with the QuantaLogic AI Assistant."""
@@ -290,7 +327,14 @@ def task(
290
327
  check_new_version()
291
328
  task_content = task
292
329
  else:
293
- display_welcome_message(console, model_name, vision_model_name, max_iterations=max_iterations)
330
+ display_welcome_message(
331
+ console,
332
+ model_name,
333
+ vision_model_name,
334
+ max_iterations=max_iterations,
335
+ compact_every_n_iteration=compact_every_n_iteration,
336
+ max_tokens_working_memory=max_tokens_working_memory
337
+ )
294
338
  check_new_version()
295
339
  logger.debug("Waiting for user input...")
296
340
  task_content = get_multiline_input(console).strip()
@@ -322,7 +366,7 @@ def task(
322
366
  logger.debug(
323
367
  f"Creating agent for mode: {mode} with model: {model_name}, vision model: {vision_model_name}, no_stream: {no_stream}"
324
368
  )
325
- agent = create_agent_for_mode(mode, model_name, vision_model_name=vision_model_name, no_stream=no_stream)
369
+ agent = create_agent_for_mode(mode, model_name, vision_model_name=vision_model_name, no_stream=no_stream, compact_every_n_iteration=compact_every_n_iteration, max_tokens_working_memory=max_tokens_working_memory)
326
370
  logger.debug(
327
371
  f"Created agent for mode: {mode} with model: {model_name}, vision model: {vision_model_name}, no_stream: {no_stream}"
328
372
  )
@@ -17,7 +17,7 @@ Every response must contain exactly two XML blocks:
17
17
  1. Analysis Block:
18
18
  ```xml
19
19
  <thinking>
20
- <!-- You must follow this precise format, be very concise and very precise -->
20
+ <!-- Must follow this precise format, concise, dense, use abreviations, emojis, unicode characters to make it denser -->
21
21
  <task_analysis_if_no_history>
22
22
  Only if no conversation history:
23
23
  * Rewrite the <task> and its context with your own words in detailed, clear, and specific manner.
@@ -51,7 +51,7 @@ Every response must contain exactly two XML blocks:
51
51
  </last_observation>
52
52
  <progess_analysis>
53
53
  <!-- if there is a conversation history -->
54
- * Detail each step failed and completed so far.
54
+ * Detail each step failed and completed so far, be concise.
55
55
  * Identify and evaluate any blockers or challenges to the progress of global task.
56
56
  * Identify repetitions: if repeated steps, take a step back and rethink your approach.
57
57
  * Provide potential solutions, and if needed, suggest reevaluating the approach and the plan.
@@ -12,12 +12,21 @@ from quantalogic.tools import (
12
12
  )
13
13
 
14
14
 
15
- def create_search_agent(model_name: str, mode_full: bool = False) -> Agent:
15
+ def create_search_agent(
16
+ model_name: str,
17
+ mode_full: bool = False,
18
+ no_stream: bool = False,
19
+ compact_every_n_iteration: int | None = None,
20
+ max_tokens_working_memory: int | None = None
21
+ ) -> Agent:
16
22
  """Creates and configures a search agent with web, knowledge, and privacy-focused search tools.
17
23
 
18
24
  Args:
19
25
  model_name (str): Name of the language model to use for the agent's core capabilities
20
26
  mode_full (bool, optional): If True, the agent will be configured with a full set of tools.
27
+ no_stream (bool, optional): If True, the agent will not stream results.
28
+ compact_every_n_iteration (int | None, optional): Frequency of memory compaction.
29
+ max_tokens_working_memory (int | None, optional): Maximum tokens for working memory.
21
30
 
22
31
  Returns:
23
32
  Agent: A fully configured search agent instance with:
@@ -57,4 +66,6 @@ def create_search_agent(model_name: str, mode_full: bool = False) -> Agent:
57
66
  model_name=model_name,
58
67
  tools=tools,
59
68
  specific_expertise=specific_expertise,
69
+ compact_every_n_iterations=compact_every_n_iteration,
70
+ max_tokens_working_memory=max_tokens_working_memory,
60
71
  )
File without changes