quantalogic 0.30.1__tar.gz → 0.30.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. {quantalogic-0.30.1 → quantalogic-0.30.4}/PKG-INFO +56 -62
  2. {quantalogic-0.30.1 → quantalogic-0.30.4}/README.md +55 -61
  3. {quantalogic-0.30.1 → quantalogic-0.30.4}/pyproject.toml +1 -1
  4. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/agent.py +19 -30
  5. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/get_model_info.py +2 -0
  6. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tool_manager.py +42 -0
  7. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/search_definition_names.py +11 -2
  8. {quantalogic-0.30.1 → quantalogic-0.30.4}/LICENSE +0 -0
  9. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/__init__.py +0 -0
  10. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/agent_config.py +0 -0
  11. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/agent_factory.py +0 -0
  12. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/coding_agent.py +0 -0
  13. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/console_print_events.py +0 -0
  14. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/console_print_token.py +0 -0
  15. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/docs_cli.py +0 -0
  16. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/event_emitter.py +0 -0
  17. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/generative_model.py +0 -0
  18. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/interactive_text_editor.py +0 -0
  19. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/main.py +0 -0
  20. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/memory.py +0 -0
  21. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/model_names.py +0 -0
  22. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/prompts.py +0 -0
  23. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/search_agent.py +0 -0
  24. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/server/__init__.py +0 -0
  25. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/server/agent_server.py +0 -0
  26. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/server/models.py +0 -0
  27. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/server/routes.py +0 -0
  28. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/server/state.py +0 -0
  29. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/server/static/js/event_visualizer.js +0 -0
  30. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/server/static/js/quantalogic.js +0 -0
  31. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/server/templates/index.html +0 -0
  32. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/task_file_reader.py +0 -0
  33. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/task_runner.py +0 -0
  34. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/__init__.py +0 -0
  35. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/agent_tool.py +0 -0
  36. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/dalle_e.py +0 -0
  37. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/download_http_file_tool.py +0 -0
  38. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/duckduckgo_search_tool.py +0 -0
  39. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/edit_whole_content_tool.py +0 -0
  40. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/elixir_tool.py +0 -0
  41. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/execute_bash_command_tool.py +0 -0
  42. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/generate_database_report_tool.py +0 -0
  43. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/grep_app_tool.py +0 -0
  44. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/input_question_tool.py +0 -0
  45. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/jinja_tool.py +0 -0
  46. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/language_handlers/__init__.py +0 -0
  47. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/language_handlers/c_handler.py +0 -0
  48. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/language_handlers/cpp_handler.py +0 -0
  49. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/language_handlers/go_handler.py +0 -0
  50. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/language_handlers/java_handler.py +0 -0
  51. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/language_handlers/javascript_handler.py +0 -0
  52. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/language_handlers/python_handler.py +0 -0
  53. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/language_handlers/rust_handler.py +0 -0
  54. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/language_handlers/scala_handler.py +0 -0
  55. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/language_handlers/typescript_handler.py +0 -0
  56. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/list_directory_tool.py +0 -0
  57. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/llm_tool.py +0 -0
  58. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/llm_vision_tool.py +0 -0
  59. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/markitdown_tool.py +0 -0
  60. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/nodejs_tool.py +0 -0
  61. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/python_tool.py +0 -0
  62. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/read_file_block_tool.py +0 -0
  63. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/read_file_tool.py +0 -0
  64. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/read_html_tool.py +0 -0
  65. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/replace_in_file_tool.py +0 -0
  66. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/ripgrep_tool.py +0 -0
  67. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/serpapi_search_tool.py +0 -0
  68. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/sql_query_tool.py +0 -0
  69. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/task_complete_tool.py +0 -0
  70. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/tool.py +0 -0
  71. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/unified_diff_tool.py +0 -0
  72. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/utils/__init__.py +0 -0
  73. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/utils/create_sample_database.py +0 -0
  74. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/utils/generate_database_report.py +0 -0
  75. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/wikipedia_search_tool.py +0 -0
  76. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/tools/write_file_tool.py +0 -0
  77. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/utils/__init__.py +0 -0
  78. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/utils/ask_user_validation.py +0 -0
  79. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/utils/check_version.py +0 -0
  80. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/utils/download_http_file.py +0 -0
  81. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/utils/get_coding_environment.py +0 -0
  82. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/utils/get_environment.py +0 -0
  83. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/utils/get_quantalogic_rules_content.py +0 -0
  84. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/utils/git_ls.py +0 -0
  85. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/utils/read_file.py +0 -0
  86. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/utils/read_http_text_content.py +0 -0
  87. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/version.py +0 -0
  88. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/version_check.py +0 -0
  89. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/welcome_message.py +0 -0
  90. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/xml_parser.py +0 -0
  91. {quantalogic-0.30.1 → quantalogic-0.30.4}/quantalogic/xml_tool_parser.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: quantalogic
3
- Version: 0.30.1
3
+ Version: 0.30.4
4
4
  Summary: QuantaLogic ReAct Agents
5
5
  Author: Raphaël MANSUY
6
6
  Author-email: raphael.mansuy@gmail.com
@@ -94,12 +94,10 @@ We created [QuantaLogic](https://www.quantalogic.app) because we saw a significa
94
94
  - **Memory Management**: Intelligent context handling and optimization
95
95
  - **Enterprise Ready**: Comprehensive logging, error handling, and validation system
96
96
 
97
- ## Environment Configuration
98
-
99
- To configure the environment API key for Quantalogic using LiteLLM, set the required environment variable for your chosen provider (e.g., `OPENAI_API_KEY` for OpenAI, `ANTHROPIC_API_KEY` for Anthropic, or `DEEPSEEK_API_KEY` for DeepSeek) and any optional variables like `OPENAI_API_BASE` or `OPENROUTER_REFERRER`. Use a `.env` file or a secrets manager to securely store these keys, and load them in your code using `python-dotenv`. For advanced configurations, refer to the [LiteLLM documentation](https://docs.litellm.ai/docs/).
100
97
 
101
98
  ## 📋 Table of Contents
102
99
 
100
+ - [Usage](#usage)
103
101
  - [Release Notes](#release-notes)
104
102
 
105
103
  - [Installation](#-installation)
@@ -114,12 +112,60 @@ To configure the environment API key for Quantalogic using LiteLLM, set the requ
114
112
  - [License](#-license)
115
113
  - [Documentation Development](#-documentation-development)
116
114
 
115
+ ## Usage
116
+
117
+ **Usage:** `quantalogic [OPTIONS] COMMAND [ARGS]...`
118
+ **Environment Variables:** Set `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, and `DEEPSEEK_API_KEY` for API integration.
119
+
120
+ **Options:**
121
+ - `--model-name TEXT`: Specify the model (litellm format, e.g., "openrouter/deepseek/deepseek-chat")
122
+ - `--log [info|debug|warning]`: Set logging level
123
+ - `--mode [code|basic|interpreter|full|code-basic|search|search-full]`: Agent mode
124
+ - `--vision-model-name TEXT`: Specify vision model (litellm format)
125
+ - `--max-tokens-working-memory INTEGER`: Maximum tokens in working memory (default: 4000)
126
+ - `--max-iterations INTEGER`: Maximum task iterations (default: 30)
127
+ - `--compact-every-n-iteration INTEGER`: Compact memory every N iterations (default: 5)
128
+ - `--no-stream`: Disable streaming output (default: enabled)
129
+ - `--help`: Show help message
130
+
131
+ **Commands:**
132
+ - `task`: Execute a task with the QuantaLogic AI Assistant
133
+ - `--file PATH`: Path to task file
134
+ - `--model-name TEXT`: Specify model
135
+ - `--verbose`: Enable verbose output
136
+ - `--mode`: Select agent capabilities
137
+ - `--log`: Set logging level
138
+ - `--vision-model-name`: Specify vision model
139
+ - `--max-iterations`: Maximum task iterations
140
+ - `--max-tokens-working-memory`: Memory limit
141
+ - `--compact-every-n-iteration`: Memory optimization
142
+ - `--no-stream`: Disable streaming
143
+
117
144
  ## Release Notes
118
145
 
119
146
  See our [Release Notes](RELEASE_NOTES.MD) for detailed version history and changes.
120
147
 
121
148
  [TODO List](TODO.md)
122
149
 
150
+ ## Environment Configuration
151
+
152
+ ### Supported Models
153
+
154
+ | Model Name | API Key Environment Variable | Description |
155
+ |------------|------------------------------|-------------|
156
+ | openai/gpt-4o-mini | OPENAI_API_KEY | OpenAI's compact version of GPT-4, optimized for efficiency and cost-effectiveness while maintaining strong performance. |
157
+ | openai/gpt-4o | OPENAI_API_KEY | OpenAI's flagship model offering state-of-the-art performance across various tasks with enhanced reasoning capabilities. |
158
+ | anthropic/claude-3.5-sonnet | ANTHROPIC_API_KEY | Claude 3.5 Sonnet model from Anthropic, balancing performance and speed with strong reasoning capabilities. |
159
+ | deepseek/deepseek-chat | DEEPSEEK_API_KEY | DeepSeek's conversational model optimized for chat-based interactions and general-purpose tasks. |
160
+ | deepseek/deepseek-reasoner | DEEPSEEK_API_KEY | DeepSeek's specialized model for complex reasoning tasks and problem-solving. |
161
+ | openrouter/deepseek/deepseek-r1 | OPENROUTER_API_KEY | DeepSeek R1 model available through OpenRouter, optimized for research and development tasks. |
162
+ | openrouter/openai/gpt-4o | OPENROUTER_API_KEY | OpenAI's GPT-4o model accessible through OpenRouter platform. |
163
+ | openrouter/mistralai/mistral-large-2411 | OPENROUTER_API_KEY | Mistral's large model optimized for complex reasoning tasks, available through OpenRouter with enhanced multilingual capabilities. |
164
+ | mistral/mistral-large-2407 | MISTRAL_API_KEY | Mistral's high-performance model designed for enterprise-grade applications, offering advanced reasoning and multilingual support. |
165
+
166
+ To configure the environment API key for Quantalogic using LiteLLM, set the required environment variable for your chosen provider and any optional variables like `OPENAI_API_BASE` or `OPENROUTER_REFERRER`. Use a `.env` file or a secrets manager to securely store these keys, and load them in your code using `python-dotenv`. For advanced configurations, refer to the [LiteLLM documentation](https://docs.litellm.ai/docs/).
167
+
168
+
123
169
  ## 📦 Installation
124
170
 
125
171
  ### Prerequisites
@@ -155,64 +201,7 @@ pipx install quantalogic
155
201
 
156
202
  ### Basic Usage
157
203
 
158
- ## 📖 CLI Reference
159
-
160
- The QuantaLogic CLI provides powerful command-line capabilities:
161
-
162
- ```bash
163
- Usage: quantalogic [OPTIONS] COMMAND [ARGS]...
164
- QuantaLogic AI Assistant - A powerful AI tool for various tasks.
165
-
166
- Options:
167
- --version Show version information.
168
- --model-name TEXT Specify the model (litellm format, e.g., "openrouter/deepseek/deepseek-chat").
169
- --log [info|debug|warning] Set logging level.
170
- --verbose Enable verbose output.
171
- --mode [code|basic|interpreter|full|code-basic|search|search-full] Agent mode.
172
- --vision-model-name TEXT Specify the vision model (litellm format, e.g., "openrouter/A/gpt-4o-mini").
173
- --max-iterations INTEGER Maximum iterations for task solving (default: 30).
174
- --max-tokens-working-memory INTEGER Maximum tokens to keep in working memory (default: 4000).
175
- --compact-every-n-iteration INTEGER Compact memory every N iterations (default: 5).
176
- --help Show this message and exit.
177
-
178
- Commands:
179
- task Execute a task with the QuantaLogic AI Assistant.
180
- ```
181
204
 
182
- ### Commands
183
- task Execute a task with the QuantaLogic AI Assistant
184
-
185
- **Usage:** `quantalogic task [OPTIONS] [TASK]`
186
- **Description:** Execute a task with the QuantaLogic AI Assistant.
187
- **Options:**
188
- - `--file PATH`: Path to task file.
189
- - `--model-name TEXT`: Specify the model (litellm format, e.g., `openrouter/deepseek/deepseek-chat`).
190
- - `--verbose`: Enable verbose output.
191
- - `--mode [code|basic|interpreter|full|code-basic|search|search-full]`: Agent mode.
192
- - `--log [info|debug|warning]`: Set logging level.
193
- - `--vision-model-name TEXT`: Specify the vision model (litellm format).
194
- - `--max-iterations INTEGER`: Maximum iterations for task solving (default: 30).
195
- - `--max-tokens-working-memory INTEGER`: Maximum tokens to keep in working memory (default: 4000).
196
- - `--compact-every-n-iteration INTEGER`: Compact memory every N iterations (default: 5).
197
- - `--no-stream`: Disable streaming output (default: enabled).
198
- - `--help`: Show this message and exit.
199
-
200
- **Detailed Parameter Descriptions:**
201
-
202
- - **--model-name**: Specifies the LLM model to use (e.g., "openrouter/deepseek/deepseek-chat")
203
- - **--mode**: Selects agent capabilities:
204
- - *code*: Coding-focused with basic capabilities
205
- - *basic*: General-purpose without coding tools
206
- - *interpreter*: Interactive code execution
207
- - *full*: All capabilities enabled
208
- - *code-basic*: Coding with basic reasoning
209
- - *search*: Web search integration
210
- - **--log**: Controls logging verbosity (info, debug, warning)
211
- - **--vision-model-name**: Specifies vision model for image processing
212
- - **--max-iterations**: Limits task-solving attempts (default: 30)
213
- - **--max-tokens-working-memory**: Controls memory usage (default: None)
214
- - **--compact-every-n-iteration**: Memory optimization frequency (default: None)
215
- - **--no-stream**: Disables real-time output streaming
216
205
 
217
206
 
218
207
 
@@ -297,7 +286,6 @@ Example prompt: [04-write-a-tutorial.md](./examples/tasks/04-write-a-tutorial.md
297
286
 
298
287
  Here are some practical examples to help you get started:
299
288
 
300
- Here is the markdown table based on the provided directory listing:
301
289
 
302
290
  | Example | Description | File |
303
291
  |---------|-------------|------|
@@ -306,6 +294,12 @@ Here is the markdown table based on the provided directory listing:
306
294
  | Agent with Interpreter | An example of an agent that includes an interpreter. | [examples/03-agent-with-interpreter.py](examples/03-agent-with-interpreter.py) |
307
295
  | Agent Summary Task | An example of an agent performing a summary task. | [examples/04-agent-summary-task.py](examples/04-agent-summary-task.py) |
308
296
  | Code Example | A general code example. | [examples/05-code.py](examples/05-code.py) |
297
+ | Code Screen Example | An example demonstrating code execution with screen output. | [examples/06-code-screen.py](examples/06-code-screen.py) |
298
+ | Write Tutorial | An example of generating a tutorial using the agent. | [examples/07-write-tutorial.py](examples/07-write-tutorial.py) |
299
+ | PRD Writer | An example of generating a Product Requirements Document (PRD). | [examples/08-prd-writer.py](examples/08-prd-writer.py) |
300
+ | SQL Query | An example of executing SQL queries using the agent. | [examples/09-sql-query.py](examples/09-sql-query.py) |
301
+ | Finance Agent | An example of a finance-focused agent. | [examples/10-finance-agent.py](examples/10-finance-agent.py) |
302
+ | Textual Agent Interface | An example of a textual user interface for the agent. | [examples/11-textual-agent-interface.py](examples/11-textual-agent-interface.py) |
309
303
 
310
304
 
311
305
  ## 🔨 Key Components
@@ -36,12 +36,10 @@ We created [QuantaLogic](https://www.quantalogic.app) because we saw a significa
36
36
  - **Memory Management**: Intelligent context handling and optimization
37
37
  - **Enterprise Ready**: Comprehensive logging, error handling, and validation system
38
38
 
39
- ## Environment Configuration
40
-
41
- To configure the environment API key for Quantalogic using LiteLLM, set the required environment variable for your chosen provider (e.g., `OPENAI_API_KEY` for OpenAI, `ANTHROPIC_API_KEY` for Anthropic, or `DEEPSEEK_API_KEY` for DeepSeek) and any optional variables like `OPENAI_API_BASE` or `OPENROUTER_REFERRER`. Use a `.env` file or a secrets manager to securely store these keys, and load them in your code using `python-dotenv`. For advanced configurations, refer to the [LiteLLM documentation](https://docs.litellm.ai/docs/).
42
39
 
43
40
  ## 📋 Table of Contents
44
41
 
42
+ - [Usage](#usage)
45
43
  - [Release Notes](#release-notes)
46
44
 
47
45
  - [Installation](#-installation)
@@ -56,12 +54,60 @@ To configure the environment API key for Quantalogic using LiteLLM, set the requ
56
54
  - [License](#-license)
57
55
  - [Documentation Development](#-documentation-development)
58
56
 
57
+ ## Usage
58
+
59
+ **Usage:** `quantalogic [OPTIONS] COMMAND [ARGS]...`
60
+ **Environment Variables:** Set `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, and `DEEPSEEK_API_KEY` for API integration.
61
+
62
+ **Options:**
63
+ - `--model-name TEXT`: Specify the model (litellm format, e.g., "openrouter/deepseek/deepseek-chat")
64
+ - `--log [info|debug|warning]`: Set logging level
65
+ - `--mode [code|basic|interpreter|full|code-basic|search|search-full]`: Agent mode
66
+ - `--vision-model-name TEXT`: Specify vision model (litellm format)
67
+ - `--max-tokens-working-memory INTEGER`: Maximum tokens in working memory (default: 4000)
68
+ - `--max-iterations INTEGER`: Maximum task iterations (default: 30)
69
+ - `--compact-every-n-iteration INTEGER`: Compact memory every N iterations (default: 5)
70
+ - `--no-stream`: Disable streaming output (default: enabled)
71
+ - `--help`: Show help message
72
+
73
+ **Commands:**
74
+ - `task`: Execute a task with the QuantaLogic AI Assistant
75
+ - `--file PATH`: Path to task file
76
+ - `--model-name TEXT`: Specify model
77
+ - `--verbose`: Enable verbose output
78
+ - `--mode`: Select agent capabilities
79
+ - `--log`: Set logging level
80
+ - `--vision-model-name`: Specify vision model
81
+ - `--max-iterations`: Maximum task iterations
82
+ - `--max-tokens-working-memory`: Memory limit
83
+ - `--compact-every-n-iteration`: Memory optimization
84
+ - `--no-stream`: Disable streaming
85
+
59
86
  ## Release Notes
60
87
 
61
88
  See our [Release Notes](RELEASE_NOTES.MD) for detailed version history and changes.
62
89
 
63
90
  [TODO List](TODO.md)
64
91
 
92
+ ## Environment Configuration
93
+
94
+ ### Supported Models
95
+
96
+ | Model Name | API Key Environment Variable | Description |
97
+ |------------|------------------------------|-------------|
98
+ | openai/gpt-4o-mini | OPENAI_API_KEY | OpenAI's compact version of GPT-4, optimized for efficiency and cost-effectiveness while maintaining strong performance. |
99
+ | openai/gpt-4o | OPENAI_API_KEY | OpenAI's flagship model offering state-of-the-art performance across various tasks with enhanced reasoning capabilities. |
100
+ | anthropic/claude-3.5-sonnet | ANTHROPIC_API_KEY | Claude 3.5 Sonnet model from Anthropic, balancing performance and speed with strong reasoning capabilities. |
101
+ | deepseek/deepseek-chat | DEEPSEEK_API_KEY | DeepSeek's conversational model optimized for chat-based interactions and general-purpose tasks. |
102
+ | deepseek/deepseek-reasoner | DEEPSEEK_API_KEY | DeepSeek's specialized model for complex reasoning tasks and problem-solving. |
103
+ | openrouter/deepseek/deepseek-r1 | OPENROUTER_API_KEY | DeepSeek R1 model available through OpenRouter, optimized for research and development tasks. |
104
+ | openrouter/openai/gpt-4o | OPENROUTER_API_KEY | OpenAI's GPT-4o model accessible through OpenRouter platform. |
105
+ | openrouter/mistralai/mistral-large-2411 | OPENROUTER_API_KEY | Mistral's large model optimized for complex reasoning tasks, available through OpenRouter with enhanced multilingual capabilities. |
106
+ | mistral/mistral-large-2407 | MISTRAL_API_KEY | Mistral's high-performance model designed for enterprise-grade applications, offering advanced reasoning and multilingual support. |
107
+
108
+ To configure the environment API key for Quantalogic using LiteLLM, set the required environment variable for your chosen provider and any optional variables like `OPENAI_API_BASE` or `OPENROUTER_REFERRER`. Use a `.env` file or a secrets manager to securely store these keys, and load them in your code using `python-dotenv`. For advanced configurations, refer to the [LiteLLM documentation](https://docs.litellm.ai/docs/).
109
+
110
+
65
111
  ## 📦 Installation
66
112
 
67
113
  ### Prerequisites
@@ -97,64 +143,7 @@ pipx install quantalogic
97
143
 
98
144
  ### Basic Usage
99
145
 
100
- ## 📖 CLI Reference
101
-
102
- The QuantaLogic CLI provides powerful command-line capabilities:
103
-
104
- ```bash
105
- Usage: quantalogic [OPTIONS] COMMAND [ARGS]...
106
- QuantaLogic AI Assistant - A powerful AI tool for various tasks.
107
-
108
- Options:
109
- --version Show version information.
110
- --model-name TEXT Specify the model (litellm format, e.g., "openrouter/deepseek/deepseek-chat").
111
- --log [info|debug|warning] Set logging level.
112
- --verbose Enable verbose output.
113
- --mode [code|basic|interpreter|full|code-basic|search|search-full] Agent mode.
114
- --vision-model-name TEXT Specify the vision model (litellm format, e.g., "openrouter/A/gpt-4o-mini").
115
- --max-iterations INTEGER Maximum iterations for task solving (default: 30).
116
- --max-tokens-working-memory INTEGER Maximum tokens to keep in working memory (default: 4000).
117
- --compact-every-n-iteration INTEGER Compact memory every N iterations (default: 5).
118
- --help Show this message and exit.
119
-
120
- Commands:
121
- task Execute a task with the QuantaLogic AI Assistant.
122
- ```
123
146
 
124
- ### Commands
125
- task Execute a task with the QuantaLogic AI Assistant
126
-
127
- **Usage:** `quantalogic task [OPTIONS] [TASK]`
128
- **Description:** Execute a task with the QuantaLogic AI Assistant.
129
- **Options:**
130
- - `--file PATH`: Path to task file.
131
- - `--model-name TEXT`: Specify the model (litellm format, e.g., `openrouter/deepseek/deepseek-chat`).
132
- - `--verbose`: Enable verbose output.
133
- - `--mode [code|basic|interpreter|full|code-basic|search|search-full]`: Agent mode.
134
- - `--log [info|debug|warning]`: Set logging level.
135
- - `--vision-model-name TEXT`: Specify the vision model (litellm format).
136
- - `--max-iterations INTEGER`: Maximum iterations for task solving (default: 30).
137
- - `--max-tokens-working-memory INTEGER`: Maximum tokens to keep in working memory (default: 4000).
138
- - `--compact-every-n-iteration INTEGER`: Compact memory every N iterations (default: 5).
139
- - `--no-stream`: Disable streaming output (default: enabled).
140
- - `--help`: Show this message and exit.
141
-
142
- **Detailed Parameter Descriptions:**
143
-
144
- - **--model-name**: Specifies the LLM model to use (e.g., "openrouter/deepseek/deepseek-chat")
145
- - **--mode**: Selects agent capabilities:
146
- - *code*: Coding-focused with basic capabilities
147
- - *basic*: General-purpose without coding tools
148
- - *interpreter*: Interactive code execution
149
- - *full*: All capabilities enabled
150
- - *code-basic*: Coding with basic reasoning
151
- - *search*: Web search integration
152
- - **--log**: Controls logging verbosity (info, debug, warning)
153
- - **--vision-model-name**: Specifies vision model for image processing
154
- - **--max-iterations**: Limits task-solving attempts (default: 30)
155
- - **--max-tokens-working-memory**: Controls memory usage (default: None)
156
- - **--compact-every-n-iteration**: Memory optimization frequency (default: None)
157
- - **--no-stream**: Disables real-time output streaming
158
147
 
159
148
 
160
149
 
@@ -239,7 +228,6 @@ Example prompt: [04-write-a-tutorial.md](./examples/tasks/04-write-a-tutorial.md
239
228
 
240
229
  Here are some practical examples to help you get started:
241
230
 
242
- Here is the markdown table based on the provided directory listing:
243
231
 
244
232
  | Example | Description | File |
245
233
  |---------|-------------|------|
@@ -248,6 +236,12 @@ Here is the markdown table based on the provided directory listing:
248
236
  | Agent with Interpreter | An example of an agent that includes an interpreter. | [examples/03-agent-with-interpreter.py](examples/03-agent-with-interpreter.py) |
249
237
  | Agent Summary Task | An example of an agent performing a summary task. | [examples/04-agent-summary-task.py](examples/04-agent-summary-task.py) |
250
238
  | Code Example | A general code example. | [examples/05-code.py](examples/05-code.py) |
239
+ | Code Screen Example | An example demonstrating code execution with screen output. | [examples/06-code-screen.py](examples/06-code-screen.py) |
240
+ | Write Tutorial | An example of generating a tutorial using the agent. | [examples/07-write-tutorial.py](examples/07-write-tutorial.py) |
241
+ | PRD Writer | An example of generating a Product Requirements Document (PRD). | [examples/08-prd-writer.py](examples/08-prd-writer.py) |
242
+ | SQL Query | An example of executing SQL queries using the agent. | [examples/09-sql-query.py](examples/09-sql-query.py) |
243
+ | Finance Agent | An example of a finance-focused agent. | [examples/10-finance-agent.py](examples/10-finance-agent.py) |
244
+ | Textual Agent Interface | An example of a textual user interface for the agent. | [examples/11-textual-agent-interface.py](examples/11-textual-agent-interface.py) |
251
245
 
252
246
 
253
247
  ## 🔨 Key Components
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "quantalogic"
3
- version = "0.30.1"
3
+ version = "0.30.4"
4
4
  description = "QuantaLogic ReAct Agents"
5
5
  authors = ["Raphaël MANSUY <raphael.mansuy@gmail.com>"]
6
6
  readme = "README.md"
@@ -467,27 +467,6 @@ class Agent(BaseModel):
467
467
  answer=None,
468
468
  )
469
469
 
470
- def _format_observation_response(self, response: str, variable_name: str, iteration: int) -> str:
471
- """Format the observation response with the given response, variable name, and iteration."""
472
- response_display = response
473
- if len(response) > MAX_RESPONSE_LENGTH:
474
- response_display = response[:MAX_RESPONSE_LENGTH]
475
- response_display += (
476
- f"... content was truncated. Full content available by interpolation in variable {variable_name}"
477
- )
478
-
479
- formatted_response = (
480
- "\n"
481
- f"--- Observations for iteration {iteration} / max {self.max_iterations} ---\n"
482
- "\n"
483
- f"\n --- Tool execution result stored in variable ${variable_name}$ --- \n"
484
- "\n"
485
- f"<{variable_name}>\n{response_display}\n</{variable_name}>\n" + "\n"
486
- "\n"
487
- "--- Tools --- \n"
488
- )
489
- return formatted_response
490
-
491
470
  def _format_observation_response(self, response: str, variable_name: str, iteration: int) -> str:
492
471
  """Format the observation response with the given response, variable name, and iteration."""
493
472
  response_display = response
@@ -552,11 +531,11 @@ class Agent(BaseModel):
552
531
  )
553
532
 
554
533
  question_validation: str = (
555
- "Do you permit the execution of this tool?"
556
- f"Tool: {tool_name}"
557
- f"Arguments: {arguments_with_values}"
534
+ "Do you permit the execution of this tool?\n"
535
+ f"Tool: {tool_name}\n"
536
+ f"Arguments: {arguments_with_values}\n"
558
537
  "Yes or No"
559
- ).join("\n")
538
+ )
560
539
  permission_granted = self.ask_for_user_validation(question_validation)
561
540
 
562
541
  self._emit_event(
@@ -579,21 +558,29 @@ class Agent(BaseModel):
579
558
  arguments_with_values_interpolated = {
580
559
  key: self._interpolate_variables(value) for key, value in arguments_with_values.items()
581
560
  }
561
+
562
+ # Convert arguments to correct types
563
+ try:
564
+ converted_args = self.tools._convert_kwargs_types(tool_name, **arguments_with_values_interpolated)
565
+ except ValueError as e:
566
+ logger.error(f"Type conversion failed: {str(e)}")
567
+ return "", f"Error: Type conversion failed for tool '{tool_name}': {str(e)}"
568
+
582
569
  # test if tool need variables in context
583
570
  if tool.need_variables:
584
571
  # Inject variables into the tool if needed
585
- arguments_with_values_interpolated["variables"] = self.variable_store
572
+ converted_args["variables"] = self.variable_store
586
573
  if tool.need_caller_context_memory:
587
574
  # Inject caller context into the tool if needed
588
- arguments_with_values_interpolated["caller_context_memory"] = self.memory.memory
575
+ converted_args["caller_context_memory"] = self.memory.memory
589
576
 
590
577
  # Add injectable variables
591
578
  injectable_properties = tool.get_injectable_properties_in_execution()
592
579
  for key, value in injectable_properties.items():
593
- arguments_with_values_interpolated[key] = value
580
+ converted_args[key] = value
594
581
 
595
582
  # Call tool execute with named arguments
596
- response = tool.execute(**arguments_with_values_interpolated)
583
+ response = tool.execute(**converted_args)
597
584
  executed_tool = tool.name
598
585
  except Exception as e:
599
586
  response = f"Error executing tool: {tool_name}: {str(e)}\n"
@@ -702,7 +689,9 @@ class Agent(BaseModel):
702
689
  # Remove the last assistant / user message
703
690
  user_message = memory_copy.pop()
704
691
  assistant_message = memory_copy.pop()
705
- summary = self.model.generate_with_history(messages_history=memory_copy, prompt=prompt_summary)
692
+ summary = self.model.generate_with_history(
693
+ messages_history=memory_copy, prompt=prompt_summary
694
+ )
706
695
  # Remove user message
707
696
  memory_copy.pop()
708
697
  # Replace by summary
@@ -1,6 +1,8 @@
1
1
  model_info = {
2
2
  "deepseek-reasoner": {"max_output_tokens": 8 * 1024, "max_input_tokens": 1024 * 128},
3
3
  "openrouter/deepseek/deepseek-r1": {"max_output_tokens": 8 * 1024, "max_input_tokens": 1024 * 128},
4
+ "openrouter/mistralai/mistral-large-2411": {"max_output_tokens": 128 * 1024, "max_input_tokens": 1024 * 128},
5
+ "mistralai/mistral-large-2411": {"max_output_tokens": 128 * 1024, "max_input_tokens": 1024 * 128},
4
6
  }
5
7
 
6
8
 
@@ -66,3 +66,45 @@ class ToolManager(BaseModel):
66
66
  markdown += "\n"
67
67
  index += 1
68
68
  return markdown
69
+
70
+ def _convert_kwargs_types(self, tool_name: str, **kwargs) -> dict:
71
+ """Convert kwargs values to their expected types based on tool definition.
72
+
73
+ Args:
74
+ tool_name: Name of the tool to get argument types from
75
+ **kwargs: Input arguments to convert
76
+
77
+ Returns:
78
+ Dictionary of converted arguments
79
+
80
+ Raises:
81
+ ValueError: If type conversion fails for a required argument
82
+ """
83
+ tool = self.tools[tool_name]
84
+ converted = {}
85
+
86
+ for arg in tool.arguments:
87
+ if arg.name in kwargs:
88
+ try:
89
+ if arg.arg_type == "int":
90
+ converted[arg.name] = int(kwargs[arg.name])
91
+ elif arg.arg_type == "float":
92
+ converted[arg.name] = float(kwargs[arg.name])
93
+ elif arg.arg_type == "boolean":
94
+ val = str(kwargs[arg.name]).lower()
95
+ converted[arg.name] = val in ("true", "1", "yes", "y")
96
+ else: # string
97
+ converted[arg.name] = str(kwargs[arg.name])
98
+ except (ValueError, TypeError) as e:
99
+ if arg.required:
100
+ raise ValueError(
101
+ f"Failed to convert required argument '{arg.name}' to {arg.arg_type}: {str(e)}"
102
+ )
103
+ logger.warning(
104
+ f"Failed to convert optional argument '{arg.name}' to {arg.arg_type}: {str(e)}"
105
+ )
106
+ converted[arg.name] = kwargs[arg.name] # keep original value
107
+ elif arg.required and arg.default is None:
108
+ raise ValueError(f"Missing required argument: {arg.name}")
109
+
110
+ return converted
@@ -103,7 +103,13 @@ class SearchDefinitionNames(Tool):
103
103
  raise ValueError(f"Unsupported language: {language_name}")
104
104
 
105
105
  def execute(
106
- self, directory_path: str, language_name: str, file_pattern: str = "*", output_format: str = "text", page: int = 1, page_size: int = 10
106
+ self,
107
+ directory_path: str,
108
+ language_name: str,
109
+ file_pattern: str = "*",
110
+ output_format: str = "text",
111
+ page: int = 1,
112
+ page_size: int = 10,
107
113
  ) -> Union[str, Dict]:
108
114
  """Searches for definition names in a directory using Tree-sitter.
109
115
 
@@ -119,6 +125,9 @@ class SearchDefinitionNames(Tool):
119
125
  Union[str, Dict]: The search results in the specified format.
120
126
  """
121
127
  try:
128
+ page = int(page)
129
+ page_size = int(page_size)
130
+
122
131
  # Validate pagination parameters
123
132
  if page < 1:
124
133
  raise ValueError("Page number must be a positive integer.")
@@ -175,7 +184,7 @@ class SearchDefinitionNames(Tool):
175
184
  output_format,
176
185
  page,
177
186
  page_size,
178
- len(results)
187
+ len(results),
179
188
  )
180
189
 
181
190
  except Exception as e:
File without changes