quantalogic 0.2.16__py3-none-any.whl → 0.2.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,9 +1,11 @@
1
1
  """LLM Tool for generating answers to questions using a language model."""
2
2
 
3
- import logging
3
+ from typing import Callable
4
4
 
5
+ from loguru import logger
5
6
  from pydantic import ConfigDict, Field
6
7
 
8
+ from quantalogic.console_print_token import console_print_token
7
9
  from quantalogic.generative_model import GenerativeModel, Message
8
10
  from quantalogic.tools.tool import Tool, ToolArgument
9
11
 
@@ -53,15 +55,42 @@ class LLMTool(Tool):
53
55
  )
54
56
 
55
57
  model_name: str = Field(..., description="The name of the language model to use")
56
- generative_model: GenerativeModel | None = Field(default=None)
57
58
  system_prompt: str | None = Field(default=None)
59
+ on_token: Callable | None = Field(default=None, exclude=True)
60
+ generative_model: GenerativeModel | None = Field(default=None, exclude=True)
61
+
62
+ def __init__(
63
+ self,
64
+ model_name: str,
65
+ system_prompt: str | None = None,
66
+ on_token: Callable | None = None,
67
+ name: str = "llm_tool",
68
+ generative_model: GenerativeModel | None = None,
69
+ ):
70
+ # Use dict to pass validated data to parent constructor
71
+ super().__init__(
72
+ **{
73
+ "model_name": model_name,
74
+ "system_prompt": system_prompt,
75
+ "on_token": on_token,
76
+ "name": name,
77
+ "generative_model": generative_model,
78
+ }
79
+ )
80
+
81
+ # Initialize the generative model
82
+ self.model_post_init(None)
58
83
 
59
84
  def model_post_init(self, __context):
60
85
  """Initialize the generative model after model initialization."""
61
86
  if self.generative_model is None:
62
87
  self.generative_model = GenerativeModel(model=self.model_name)
63
- logging.debug(f"Initialized LLMTool with model: {self.model_name}")
88
+ logger.debug(f"Initialized LLMTool with model: {self.model_name}")
64
89
 
90
+ # Only set up event listener if on_token is provided
91
+ if self.on_token is not None:
92
+ logger.debug(f"Setting up event listener for LLMTool with model: {self.model_name}")
93
+ self.generative_model.event_emitter.on("stream_chunk", self.on_token)
65
94
 
66
95
  def execute(
67
96
  self, system_prompt: str | None = None, prompt: str | None = None, temperature: str | None = None
@@ -85,7 +114,7 @@ class LLMTool(Tool):
85
114
  if not (0.0 <= temp <= 1.0):
86
115
  raise ValueError("Temperature must be between 0 and 1.")
87
116
  except ValueError as ve:
88
- logging.error(f"Invalid temperature value: {temperature}")
117
+ logger.error(f"Invalid temperature value: {temperature}")
89
118
  raise ValueError(f"Invalid temperature value: {temperature}") from ve
90
119
 
91
120
  used_system_prompt = self.system_prompt if self.system_prompt else system_prompt
@@ -96,20 +125,29 @@ class LLMTool(Tool):
96
125
  Message(role="user", content=prompt),
97
126
  ]
98
127
 
128
+ is_streaming = self.on_token is not None
129
+
99
130
  # Set the model's temperature
100
131
  if self.generative_model:
101
132
  self.generative_model.temperature = temp
102
133
 
103
134
  # Generate the response using the generative model
104
135
  try:
105
- response_stats = self.generative_model.generate_with_history(
106
- messages_history=messages_history, prompt=""
136
+ result = self.generative_model.generate_with_history(
137
+ messages_history=messages_history, prompt=prompt, streaming=is_streaming
107
138
  )
108
- response = response_stats.response.strip()
109
- logging.info(f"Generated response: {response}")
139
+
140
+ if is_streaming:
141
+ response = ""
142
+ for chunk in result:
143
+ response += chunk
144
+ else:
145
+ response = result.response
146
+
147
+ logger.debug(f"Generated response: {response}")
110
148
  return response
111
149
  except Exception as e:
112
- logging.error(f"Error generating response: {e}")
150
+ logger.error(f"Error generating response: {e}")
113
151
  raise Exception(f"Error generating response: {e}") from e
114
152
  else:
115
153
  raise ValueError("Generative model not initialized")
@@ -123,6 +161,9 @@ if __name__ == "__main__":
123
161
  temperature = "0.7"
124
162
  answer = tool.execute(system_prompt=system_prompt, prompt=question, temperature=temperature)
125
163
  print(answer)
126
- pirate = LLMTool(model_name="openrouter/openai/gpt-4o-mini", system_prompt="You are a pirate.")
164
+ pirate = LLMTool(
165
+ model_name="openrouter/openai/gpt-4o-mini", system_prompt="You are a pirate.", on_token=console_print_token
166
+ )
127
167
  pirate_answer = pirate.execute(system_prompt=system_prompt, prompt=question, temperature=temperature)
128
- print(pirate_answer)
168
+ print("\n")
169
+ print(f"Anwser: {pirate_answer}")
@@ -1,8 +1,8 @@
1
1
  """LLM Vision Tool for analyzing images using a language model."""
2
2
 
3
- import logging
4
3
  from typing import Optional
5
4
 
5
+ from loguru import logger
6
6
  from pydantic import ConfigDict, Field
7
7
 
8
8
  from quantalogic.generative_model import GenerativeModel, Message
@@ -65,7 +65,12 @@ class LLMVisionTool(Tool):
65
65
  """Initialize the generative model after model initialization."""
66
66
  if self.generative_model is None:
67
67
  self.generative_model = GenerativeModel(model=self.model_name)
68
- logging.debug(f"Initialized LLMVisionTool with model: {self.model_name}")
68
+ logger.debug(f"Initialized LLMVisionTool with model: {self.model_name}")
69
+
70
+ # Only set up event listener if on_token is provided
71
+ if self.on_token is not None:
72
+ logger.debug(f"Setting up event listener for LLMVisionTool with model: {self.model_name}")
73
+ self.generative_model.event_emitter.on("stream_chunk", self.on_token)
69
74
 
70
75
  def execute(self, system_prompt: str, prompt: str, image_url: str, temperature: str = "0.7") -> str:
71
76
  """Execute the tool to analyze an image and generate a response.
@@ -88,7 +93,7 @@ class LLMVisionTool(Tool):
88
93
  if not (0.0 <= temp <= 1.0):
89
94
  raise ValueError("Temperature must be between 0 and 1.")
90
95
  except ValueError as ve:
91
- logging.error(f"Invalid temperature value: {temperature}")
96
+ logger.error(f"Invalid temperature value: {temperature}")
92
97
  raise ValueError(f"Invalid temperature value: {temperature}") from ve
93
98
 
94
99
  if not image_url.startswith(("http://", "https://")):
@@ -105,14 +110,25 @@ class LLMVisionTool(Tool):
105
110
  self.generative_model.temperature = temp
106
111
 
107
112
  try:
113
+ is_streaming = self.on_token is not None
108
114
  response_stats = self.generative_model.generate_with_history(
109
- messages_history=messages_history, prompt=prompt, image_url=image_url
115
+ messages_history=messages_history,
116
+ prompt=prompt,
117
+ image_url=image_url,
118
+ streaming=is_streaming
110
119
  )
111
- response = response_stats.response.strip()
112
- logging.info(f"Generated response: {response}")
120
+
121
+ if is_streaming:
122
+ response = ""
123
+ for chunk in response_stats:
124
+ response += chunk
125
+ else:
126
+ response = response_stats.response.strip()
127
+
128
+ logger.info(f"Generated response: {response}")
113
129
  return response
114
130
  except Exception as e:
115
- logging.error(f"Error generating response: {e}")
131
+ logger.error(f"Error generating response: {e}")
116
132
  raise Exception(f"Error generating response: {e}") from e
117
133
 
118
134
 
quantalogic/xml_parser.py CHANGED
@@ -7,6 +7,7 @@ with support for handling malformed XML and CDATA sections.
7
7
  import html
8
8
  import re
9
9
  from collections import defaultdict
10
+ from functools import lru_cache
10
11
  from typing import Self
11
12
 
12
13
  from loguru import logger
@@ -51,15 +52,38 @@ class ToleranceXMLParser:
51
52
  edge cases such as incomplete tags and CDATA sections.
52
53
  """
53
54
 
55
+ # Default mappings for element name normalization
56
+ DEFAULT_NAME_MAP = {
57
+ "o": "output",
58
+ "i": "input",
59
+ "opt": "optional"
60
+ }
61
+
54
62
  def __init__(self: Self) -> None:
55
63
  """Initialize the parser with regex patterns for matching XML-like elements."""
56
- # Pattern for matching individual XML elements, including malformed tags
57
- # Modified to be more lenient with content and preserve exact formatting
58
- self.element_pattern = re.compile(r"<([^/>]+?)>(.*?)(?:</\1>|<\1>)", re.DOTALL)
64
+ # Pattern for matching individual XML elements with better whitespace handling
65
+ self.element_pattern = re.compile(
66
+ r"<\s*([^/>]+?)\s*>(.*?)(?:</\s*\1\s*>|<\s*\1\s*>)",
67
+ re.DOTALL
68
+ )
59
69
  # Pattern for matching CDATA sections
60
70
  self.cdata_pattern = re.compile(r"<!\[CDATA\[(.*?)]]>", re.DOTALL)
61
71
  logger.debug("Initialized ToleranceXMLParser with regex patterns")
62
72
 
73
+ def _validate_input(self, text: str) -> None:
74
+ """Validate input text before processing.
75
+
76
+ Args:
77
+ text: Input text to validate.
78
+
79
+ Raises:
80
+ ValueError: If input text is invalid.
81
+ """
82
+ if not text or not isinstance(text, str):
83
+ raise ValueError("Input text must be a non-empty string")
84
+ if len(text.strip()) == 0:
85
+ raise ValueError("Input text cannot be whitespace only")
86
+
63
87
  def _extract_and_remove_cdata(self: Self, content: str, preserve_cdata: bool = False) -> tuple[str, list[str]]:
64
88
  """Extract CDATA sections from content.
65
89
 
@@ -96,6 +120,7 @@ class ToleranceXMLParser:
96
120
  # Only unescape HTML entities, preserve everything else exactly as is
97
121
  return html.unescape(content)
98
122
 
123
+ @lru_cache(maxsize=128)
99
124
  def _map_element_name(self: Self, name: str) -> str:
100
125
  """Map element names to their canonical form.
101
126
 
@@ -105,9 +130,82 @@ class ToleranceXMLParser:
105
130
  Returns:
106
131
  Canonical element name.
107
132
  """
108
- # Map common element name variations
109
- name_map = {"o": "output", "i": "input", "opt": "optional"}
110
- return name_map.get(name.strip(), name.strip())
133
+ return self.DEFAULT_NAME_MAP.get(name.strip(), name.strip())
134
+
135
+ def _build_element_pattern(self, element_name: str) -> re.Pattern[str]:
136
+ """Build regex pattern for finding specific XML elements.
137
+
138
+ Args:
139
+ element_name: Name of the element to match.
140
+
141
+ Returns:
142
+ Compiled regex pattern for matching the element.
143
+ """
144
+ non_cdata = r"(?:(?!<!\[CDATA\[|]]>).)*?"
145
+ cdata_section = r"(?:<!\[CDATA\[.*?]]>)?"
146
+ content_pattern = f"({non_cdata}{cdata_section}{non_cdata})"
147
+ closing_pattern = "(?:</\1>|<\1>)"
148
+
149
+ return re.compile(
150
+ f"<{element_name}>{content_pattern}{closing_pattern}",
151
+ re.DOTALL
152
+ )
153
+
154
+ def _find_all_elements(self, text: str) -> list[tuple[str, str]]:
155
+ """Find all XML elements in text.
156
+
157
+ Args:
158
+ text: Input text to search.
159
+
160
+ Returns:
161
+ List of tuples containing element names and their content.
162
+ """
163
+ return [(match.group(1), match.group(2) or "")
164
+ for match in self.element_pattern.finditer(text)]
165
+
166
+ def _process_element_content(self, content: str, preserve_cdata: bool) -> str:
167
+ """Process content of a single element.
168
+
169
+ Args:
170
+ content: Raw element content.
171
+ preserve_cdata: Whether to preserve CDATA sections.
172
+
173
+ Returns:
174
+ Processed content string.
175
+ """
176
+ content, cdata_sections = self._extract_and_remove_cdata(content, preserve_cdata)
177
+ content = self._clean_content(content)
178
+
179
+ # If content is empty but we have CDATA sections and we're not preserving them
180
+ if not content.strip() and cdata_sections and not preserve_cdata:
181
+ return cdata_sections[0]
182
+ return content
183
+
184
+ def _process_elements(
185
+ self,
186
+ elements: list[tuple[str, str]],
187
+ preserve_cdata: bool
188
+ ) -> dict[str, str]:
189
+ """Process found elements and handle CDATA sections.
190
+
191
+ Args:
192
+ elements: List of element name and content tuples.
193
+ preserve_cdata: Whether to preserve CDATA sections.
194
+
195
+ Returns:
196
+ Dictionary mapping element names to their processed content.
197
+ """
198
+ result: dict[str, str] = defaultdict(str)
199
+ for name, content in elements:
200
+ name = self._map_element_name(name)
201
+ result[name] = self._process_element_content(content, preserve_cdata)
202
+
203
+ # Handle nested elements
204
+ nested_elements = self._find_all_elements(content)
205
+ nested_results = self._process_elements(nested_elements, preserve_cdata)
206
+ result.update(nested_results)
207
+
208
+ return dict(result)
111
209
 
112
210
  def _extract_element_content(self: Self, text: str, preserve_cdata: bool = False) -> dict[str, str]:
113
211
  """Extract content from nested XML elements.
@@ -119,35 +217,8 @@ class ToleranceXMLParser:
119
217
  Returns:
120
218
  Dictionary mapping element names to their content values.
121
219
  """
122
- elements: dict[str, str] = defaultdict(str)
123
-
124
- # Process each match
125
- for match in self.element_pattern.finditer(text):
126
- name = match.group(1)
127
- content = match.group(2) or ""
128
-
129
- # Map element name to canonical form
130
- name = self._map_element_name(name)
131
-
132
- # Extract and handle CDATA sections
133
- content, cdata_sections = self._extract_and_remove_cdata(content, preserve_cdata)
134
-
135
- # Clean and normalize content
136
- content = self._clean_content(content)
137
-
138
- # If the content is empty but we have CDATA sections and we're
139
- # not preserving them
140
- if not content.strip() and cdata_sections and not preserve_cdata:
141
- content = cdata_sections[0]
142
-
143
- # Store the element content
144
- elements[name] = content
145
-
146
- # Extract nested elements from the content
147
- nested_elements = self._extract_element_content(content, preserve_cdata)
148
- elements.update(nested_elements)
149
-
150
- return dict(elements) # Convert defaultdict to regular dict
220
+ elements = self._find_all_elements(text)
221
+ return self._process_elements(elements, preserve_cdata)
151
222
 
152
223
  def extract_elements(
153
224
  self: Self,
@@ -172,9 +243,7 @@ class ToleranceXMLParser:
172
243
  ValueError: If the input text is invalid or contains malformed XML.
173
244
  """
174
245
  try:
175
- if not text or not isinstance(text, str):
176
- raise ValueError("Input text must be a non-empty string")
177
-
246
+ self._validate_input(text)
178
247
  logger.debug(f"Extracting elements: {element_names or 'all'}")
179
248
 
180
249
  # Extract all elements and their content
@@ -206,18 +275,9 @@ class ToleranceXMLParser:
206
275
  ValueError: If the input text is invalid or contains malformed XML.
207
276
  """
208
277
  try:
209
- if not text or not isinstance(text, str):
210
- raise ValueError("Input text must be a non-empty string")
211
-
278
+ self._validate_input(text)
212
279
  elements: list[XMLElement] = []
213
- pattern = re.compile(
214
- f"<{element_name}>"
215
- r"((?:(?!<!\[CDATA\[|]]>).)*?"
216
- r"(?:<!\[CDATA\[.*?]]>)?"
217
- r"(?:(?!<!\[CDATA\[|]]>).)*?)"
218
- f"(?:</\1>|<\1>)",
219
- re.DOTALL,
220
- )
280
+ pattern = self._build_element_pattern(element_name)
221
281
 
222
282
  for match in pattern.finditer(text):
223
283
  content = match.group(1)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: quantalogic
3
- Version: 0.2.16
3
+ Version: 0.2.18
4
4
  Summary: QuantaLogic ReAct Agents
5
5
  Author: Raphaël MANSUY
6
6
  Author-email: raphael.mansuy@gmail.com
@@ -15,11 +15,20 @@ Requires-Dist: fastapi (>=0.115.6,<0.116.0)
15
15
  Requires-Dist: google-auth (>=2.20.0,<3.0.0)
16
16
  Requires-Dist: google-search-results (>=2.4.2,<3.0.0)
17
17
  Requires-Dist: litellm (>=1.56.4,<2.0.0)
18
+ Requires-Dist: llmlingua (>=0.2.2,<0.3.0)
18
19
  Requires-Dist: loguru (>=0.7.3,<0.8.0)
19
20
  Requires-Dist: markitdown (>=0.0.1a3,<0.0.2)
21
+ Requires-Dist: mkdocs-git-revision-date-localized-plugin (>=1.2.0,<2.0.0)
22
+ Requires-Dist: mkdocs-macros-plugin (>=1.0.4,<2.0.0)
23
+ Requires-Dist: mkdocs-material[imaging] (>=9.5.49,<10.0.0)
24
+ Requires-Dist: mkdocs-mermaid2-plugin (>=1.1.1,<2.0.0)
25
+ Requires-Dist: mkdocs-minify-plugin (>=0.7.1,<0.8.0)
26
+ Requires-Dist: mkdocstrings (>=0.24.0,<0.25.0)
27
+ Requires-Dist: mkdocstrings-python (>=1.7.0,<2.0.0)
20
28
  Requires-Dist: pathspec (>=0.12.1,<0.13.0)
21
29
  Requires-Dist: prompt-toolkit (>=3.0.48,<4.0.0)
22
30
  Requires-Dist: pydantic (>=2.10.4,<3.0.0)
31
+ Requires-Dist: pymdown-extensions (>=10.3.1,<11.0.0)
23
32
  Requires-Dist: rich (>=13.9.4,<14.0.0)
24
33
  Requires-Dist: serpapi (>=0.1.5,<0.2.0)
25
34
  Requires-Dist: tenacity (>=9.0.0,<10.0.0)
@@ -43,7 +52,7 @@ Description-Content-Type: text/markdown
43
52
 
44
53
  [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
45
54
  [![Python](https://img.shields.io/badge/Python-3.12+-blue.svg)](https://www.python.org/downloads/)
46
- [![Documentation](https://img.shields.io/badge/docs-latest-brightgreen.svg)]()
55
+ [![Documentation](https://img.shields.io/badge/docs-latest-brightgreen.svg)](https://quantalogic.github.io/quantalogic/)
47
56
 
48
57
  QuantaLogic is a ReAct (Reasoning & Action) framework for building advanced AI agents.
49
58
 
@@ -51,6 +60,8 @@ It seamlessly integrates large language models (LLMs) with a robust tool system,
51
60
 
52
61
  The `cli` version include coding capabilities comparable to Aider.
53
62
 
63
+ [📖 Documentation](https://quantalogic.github.io/quantalogic/)
64
+
54
65
 
55
66
  ## Why QuantaLogic?
56
67
 
@@ -70,6 +81,8 @@ We created [QuantaLogic](https://www.quantalogic.app) because we saw a significa
70
81
 
71
82
  ## 📋 Table of Contents
72
83
 
84
+ - [Release Notes](#release-notes)
85
+
73
86
  - [Installation](#-installation)
74
87
  - [Quick Start](#-quickstart)
75
88
  - [Key Components](#-key-components)
@@ -80,6 +93,13 @@ We created [QuantaLogic](https://www.quantalogic.app) because we saw a significa
80
93
  - [Development](#-development)
81
94
  - [Contributing](#-contributing)
82
95
  - [License](#-license)
96
+ - [Documentation Development](#-documentation-development)
97
+
98
+ ## Release Notes
99
+
100
+ See our [Release Notes](RELEASE_NOTES.MD) for detailed version history and changes.
101
+
102
+ [TODO List](TODO.md)
83
103
 
84
104
  ## 📦 Installation
85
105
 
@@ -122,20 +142,18 @@ The QuantaLogic CLI provides powerful command-line capabilities:
122
142
 
123
143
  ```bash
124
144
  Usage: quantalogic [OPTIONS] COMMAND [ARGS]...
125
-
126
145
  QuantaLogic AI Assistant - A powerful AI tool for various tasks.
127
146
 
128
147
  Options:
129
148
  --version Show version information.
130
- --model-name TEXT Specify the text model to use (litellm format,
131
- e.g. "openrouter/deepseek-chat").
132
- --vision-model-name TEXT Specify the vision model to use (litellm format,
133
- e.g. "openrouter/A/gpt-4o-mini").
134
- --log [info|debug|warning] Set logging level (info/debug/warning).
149
+ --model-name TEXT Specify the model (litellm format, e.g., "openrouter/deepseek/deepseek-chat").
150
+ --log [info|debug|warning] Set logging level.
135
151
  --verbose Enable verbose output.
152
+ --mode [code|basic|interpreter|full|code-basic|search|search-full] Agent mode.
153
+ --vision-model-name TEXT Specify the vision model (litellm format, e.g., "openrouter/A/gpt-4o-mini").
136
154
  --max-iterations INTEGER Maximum iterations for task solving (default: 30).
137
- --mode [code|basic|interpreter|full|code-basic|search|search-full]
138
- Agent mode (code/search/full).
155
+ --max-tokens-working-memory INTEGER Maximum tokens to keep in working memory (default: 4000).
156
+ --compact-every-n-iteration INTEGER Compact memory every N iterations (default: 5).
139
157
  --help Show this message and exit.
140
158
 
141
159
  Commands:
@@ -145,6 +163,38 @@ Commands:
145
163
  ### Commands
146
164
  task Execute a task with the QuantaLogic AI Assistant
147
165
 
166
+ **Usage:** `quantalogic task [OPTIONS] [TASK]`
167
+ **Description:** Execute a task with the QuantaLogic AI Assistant.
168
+ **Options:**
169
+ - `--file PATH`: Path to task file.
170
+ - `--model-name TEXT`: Specify the model (litellm format, e.g., `openrouter/deepseek/deepseek-chat`).
171
+ - `--verbose`: Enable verbose output.
172
+ - `--mode [code|basic|interpreter|full|code-basic|search|search-full]`: Agent mode.
173
+ - `--log [info|debug|warning]`: Set logging level.
174
+ - `--vision-model-name TEXT`: Specify the vision model (litellm format).
175
+ - `--max-iterations INTEGER`: Maximum iterations for task solving (default: 30).
176
+ - `--max-tokens-working-memory INTEGER`: Maximum tokens to keep in working memory (default: 4000).
177
+ - `--compact-every-n-iteration INTEGER`: Compact memory every N iterations (default: 5).
178
+ - `--no-stream`: Disable streaming output (default: enabled).
179
+ - `--help`: Show this message and exit.
180
+
181
+ **Detailed Parameter Descriptions:**
182
+
183
+ - **--model-name**: Specifies the LLM model to use (e.g., "openrouter/deepseek/deepseek-chat")
184
+ - **--mode**: Selects agent capabilities:
185
+ - *code*: Coding-focused with basic capabilities
186
+ - *basic*: General-purpose without coding tools
187
+ - *interpreter*: Interactive code execution
188
+ - *full*: All capabilities enabled
189
+ - *code-basic*: Coding with basic reasoning
190
+ - *search*: Web search integration
191
+ - **--log**: Controls logging verbosity (info, debug, warning)
192
+ - **--vision-model-name**: Specifies vision model for image processing
193
+ - **--max-iterations**: Limits task-solving attempts (default: 30)
194
+ - **--max-tokens-working-memory**: Controls memory usage (default: None)
195
+ - **--compact-every-n-iteration**: Memory optimization frequency (default: None)
196
+ - **--no-stream**: Disables real-time output streaming
197
+
148
198
 
149
199
 
150
200
  ### Detailed Usage
@@ -251,7 +301,7 @@ from quantalogic.tools import PythonTool, ReadFileTool
251
301
 
252
302
  # Create agent with specific tools
253
303
  agent = Agent(
254
- model_name="openrouter/deepseek-chat",
304
+ model_name="openrouter/deepseek/deepseek-chat",
255
305
  tools=[
256
306
  PythonTool(),
257
307
  ReadFileTool()
@@ -844,148 +894,7 @@ print(results)
844
894
  ```
845
895
  ```
846
896
 
847
- #### Creating Custom Tools
848
-
849
- ```python
850
- from quantalogic.tools import Tool, ToolArgument
851
-
852
- class DatabaseTool(Tool):
853
- name: str = "database_tool"
854
- description: str = "Execute database operations"
855
- need_validation: bool = True
856
-
857
- arguments: list[ToolArgument] = [
858
- ToolArgument(
859
- name="query",
860
- arg_type="string",
861
- description="SQL query to execute",
862
- required=True
863
- )
864
- ]
865
-
866
- def execute(self, query: str) -> str:
867
- # Tool implementation
868
- return "Query results"
869
897
  ```
870
-
871
-
872
- ## 🌐 Web Interface
873
-
874
- Features:
875
- - Real-time event visualization
876
- - Task submission and monitoring
877
- - Interactive validation dialogs
878
- - Model selection
879
- - Event filtering and search
880
-
881
- ### API Endpoints
882
-
883
- | Endpoint | Method | Description |
884
- | ------------------ | ------ | --------------- |
885
- | `/tasks` | POST | Submit tasks |
886
- | `/tasks/{task_id}` | GET | Task status |
887
- | `/events` | GET | SSE endpoint |
888
- | `/validate` | POST | Task validation |
889
-
890
-
891
- ## 📖 Examples
892
-
893
- ### Python Tool Integration Example
894
-
895
- ```python
896
- import os
897
-
898
- from quantalogic import Agent, console_print_events
899
- from quantalogic.tools import (
900
- PythonTool,
901
- )
902
-
903
- # Verify API key is set - required for authentication with DeepSeek's API
904
- # This check ensures the agent won't fail during runtime due to missing credentials
905
- if not os.environ.get("DEEPSEEK_API_KEY"):
906
- raise ValueError("DEEPSEEK_API_KEY environment variable is not set")
907
-
908
- # Initialize agent with DeepSeek model and Python tool
909
- agent = Agent(model_name="deepseek/deepseek-chat", tools=[PythonTool()])
910
-
911
- # Configure comprehensive event monitoring system
912
- # Tracks all agent activities including:
913
- # - Code execution steps
914
- # - Tool interactions
915
- # - Error conditions
916
- # Essential for debugging and performance optimization
917
- agent.event_emitter.on(
918
- "*",
919
- console_print_events,
920
- )
921
-
922
- # Execute a precision mathematics task demonstrating:
923
- # - High-precision calculations
924
- # - PythonTool integration
925
- # - Real-time monitoring capabilities
926
- result = agent.solve_task("1. Calculate PI with 10000 decimal places.")
927
- print(result)
928
- ```
929
-
930
- ### Agent with Event Monitoring
931
-
932
- ```python
933
- import os
934
-
935
- from quantalogic import Agent, console_print_events
936
- from quantalogic.tools import (
937
- LLMTool,
938
- )
939
-
940
- # Verify API key is set - required for authentication with DeepSeek's API
941
- # This check ensures the agent won't fail during runtime due to missing credentials
942
- if not os.environ.get("DEEPSEEK_API_KEY"):
943
- raise ValueError("DEEPSEEK_API_KEY environment variable is not set")
944
-
945
- # Initialize agent with DeepSeek model and LLM tool
946
- # The LLM tool serves dual purpose:
947
- # 1. As a reasoning engine for the agent's cognitive processes
948
- # 2. As a latent space explorer, enabling the agent to:
949
- # - Discover novel solution paths
950
- # - Generate creative combinations of concepts
951
- # - Explore alternative reasoning strategies
952
- # Using the same model ensures consistent behavior across both roles
953
- agent = Agent(model_name="deepseek/deepseek-chat", tools=[LLMTool(model_name="deepseek/deepseek-chat")])
954
-
955
- # Set up event monitoring to track agent's lifecycle
956
- # This helps in debugging and understanding the agent's behavior
957
- agent.event_emitter.on(
958
- [
959
- "task_complete",
960
- "task_think_start",
961
- "task_think_end",
962
- "tool_execution_start",
963
- "tool_execution_end",
964
- "error_max_iterations_reached",
965
- "memory_full",
966
- "memory_compacted",
967
- "memory_summary",
968
- ],
969
- console_print_events,
970
- )
971
-
972
- # Execute a multi-step task showcasing agent's capabilities
973
- # Demonstrates:
974
- # 1. Creative content generation
975
- # 2. Language translation
976
- # 3. Style adaptation
977
- # 4. Multi-step reasoning and execution
978
- result = agent.solve_task(
979
- "1. Write a poem in English about a dog. "
980
- "2. Translate the poem into French. "
981
- "3. Choose 2 French authors"
982
- "4. Rewrite the translated poem with the style of the chosen authors. "
983
- )
984
- print(result)
985
- ```
986
-
987
-
988
-
989
898
  ### Project Documentation
990
899
 
991
900
  ```python
@@ -993,7 +902,7 @@ from quantalogic import Agent
993
902
  from quantalogic.tools import MarkitdownTool, ReadFileTool
994
903
 
995
904
  agent = Agent(
996
- model_name="openrouter/deepseek-chat",
905
+ model_name="openrouter/deepseek/deepseek-chat",
997
906
  tools=[MarkitdownTool(), ReadFileTool()]
998
907
  )
999
908