quantalogic 0.35.0__py3-none-any.whl → 0.50.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. quantalogic/__init__.py +0 -4
  2. quantalogic/agent.py +603 -363
  3. quantalogic/agent_config.py +233 -46
  4. quantalogic/agent_factory.py +34 -22
  5. quantalogic/coding_agent.py +16 -14
  6. quantalogic/config.py +2 -1
  7. quantalogic/console_print_events.py +4 -8
  8. quantalogic/console_print_token.py +2 -2
  9. quantalogic/docs_cli.py +15 -10
  10. quantalogic/event_emitter.py +258 -83
  11. quantalogic/flow/__init__.py +23 -0
  12. quantalogic/flow/flow.py +595 -0
  13. quantalogic/flow/flow_extractor.py +672 -0
  14. quantalogic/flow/flow_generator.py +89 -0
  15. quantalogic/flow/flow_manager.py +407 -0
  16. quantalogic/flow/flow_manager_schema.py +169 -0
  17. quantalogic/flow/flow_yaml.md +419 -0
  18. quantalogic/generative_model.py +109 -77
  19. quantalogic/get_model_info.py +5 -5
  20. quantalogic/interactive_text_editor.py +100 -73
  21. quantalogic/main.py +17 -21
  22. quantalogic/model_info_list.py +3 -3
  23. quantalogic/model_info_litellm.py +14 -14
  24. quantalogic/prompts.py +2 -1
  25. quantalogic/{llm.py → quantlitellm.py} +29 -39
  26. quantalogic/search_agent.py +4 -4
  27. quantalogic/server/models.py +4 -1
  28. quantalogic/task_file_reader.py +5 -5
  29. quantalogic/task_runner.py +20 -20
  30. quantalogic/tool_manager.py +10 -21
  31. quantalogic/tools/__init__.py +98 -68
  32. quantalogic/tools/composio/composio.py +416 -0
  33. quantalogic/tools/{generate_database_report_tool.py → database/generate_database_report_tool.py} +4 -9
  34. quantalogic/tools/database/sql_query_tool_advanced.py +261 -0
  35. quantalogic/tools/document_tools/markdown_to_docx_tool.py +620 -0
  36. quantalogic/tools/document_tools/markdown_to_epub_tool.py +438 -0
  37. quantalogic/tools/document_tools/markdown_to_html_tool.py +362 -0
  38. quantalogic/tools/document_tools/markdown_to_ipynb_tool.py +319 -0
  39. quantalogic/tools/document_tools/markdown_to_latex_tool.py +420 -0
  40. quantalogic/tools/document_tools/markdown_to_pdf_tool.py +623 -0
  41. quantalogic/tools/document_tools/markdown_to_pptx_tool.py +319 -0
  42. quantalogic/tools/duckduckgo_search_tool.py +2 -4
  43. quantalogic/tools/finance/alpha_vantage_tool.py +440 -0
  44. quantalogic/tools/finance/ccxt_tool.py +373 -0
  45. quantalogic/tools/finance/finance_llm_tool.py +387 -0
  46. quantalogic/tools/finance/google_finance.py +192 -0
  47. quantalogic/tools/finance/market_intelligence_tool.py +520 -0
  48. quantalogic/tools/finance/technical_analysis_tool.py +491 -0
  49. quantalogic/tools/finance/tradingview_tool.py +336 -0
  50. quantalogic/tools/finance/yahoo_finance.py +236 -0
  51. quantalogic/tools/git/bitbucket_clone_repo_tool.py +181 -0
  52. quantalogic/tools/git/bitbucket_operations_tool.py +326 -0
  53. quantalogic/tools/git/clone_repo_tool.py +189 -0
  54. quantalogic/tools/git/git_operations_tool.py +532 -0
  55. quantalogic/tools/google_packages/google_news_tool.py +480 -0
  56. quantalogic/tools/grep_app_tool.py +123 -186
  57. quantalogic/tools/{dalle_e.py → image_generation/dalle_e.py} +37 -27
  58. quantalogic/tools/jinja_tool.py +6 -10
  59. quantalogic/tools/language_handlers/__init__.py +22 -9
  60. quantalogic/tools/list_directory_tool.py +131 -42
  61. quantalogic/tools/llm_tool.py +45 -15
  62. quantalogic/tools/llm_vision_tool.py +59 -7
  63. quantalogic/tools/markitdown_tool.py +17 -5
  64. quantalogic/tools/nasa_packages/models.py +47 -0
  65. quantalogic/tools/nasa_packages/nasa_apod_tool.py +232 -0
  66. quantalogic/tools/nasa_packages/nasa_neows_tool.py +147 -0
  67. quantalogic/tools/nasa_packages/services.py +82 -0
  68. quantalogic/tools/presentation_tools/presentation_llm_tool.py +396 -0
  69. quantalogic/tools/product_hunt/product_hunt_tool.py +258 -0
  70. quantalogic/tools/product_hunt/services.py +63 -0
  71. quantalogic/tools/rag_tool/__init__.py +48 -0
  72. quantalogic/tools/rag_tool/document_metadata.py +15 -0
  73. quantalogic/tools/rag_tool/query_response.py +20 -0
  74. quantalogic/tools/rag_tool/rag_tool.py +566 -0
  75. quantalogic/tools/rag_tool/rag_tool_beta.py +264 -0
  76. quantalogic/tools/read_html_tool.py +24 -38
  77. quantalogic/tools/replace_in_file_tool.py +10 -10
  78. quantalogic/tools/safe_python_interpreter_tool.py +10 -24
  79. quantalogic/tools/search_definition_names.py +2 -2
  80. quantalogic/tools/sequence_tool.py +14 -23
  81. quantalogic/tools/sql_query_tool.py +17 -19
  82. quantalogic/tools/tool.py +39 -15
  83. quantalogic/tools/unified_diff_tool.py +1 -1
  84. quantalogic/tools/utilities/csv_processor_tool.py +234 -0
  85. quantalogic/tools/utilities/download_file_tool.py +179 -0
  86. quantalogic/tools/utilities/mermaid_validator_tool.py +661 -0
  87. quantalogic/tools/utils/__init__.py +1 -4
  88. quantalogic/tools/utils/create_sample_database.py +24 -38
  89. quantalogic/tools/utils/generate_database_report.py +74 -82
  90. quantalogic/tools/wikipedia_search_tool.py +17 -21
  91. quantalogic/utils/ask_user_validation.py +1 -1
  92. quantalogic/utils/async_utils.py +35 -0
  93. quantalogic/utils/check_version.py +3 -5
  94. quantalogic/utils/get_all_models.py +2 -1
  95. quantalogic/utils/git_ls.py +21 -7
  96. quantalogic/utils/lm_studio_model_info.py +9 -7
  97. quantalogic/utils/python_interpreter.py +113 -43
  98. quantalogic/utils/xml_utility.py +178 -0
  99. quantalogic/version_check.py +1 -1
  100. quantalogic/welcome_message.py +7 -7
  101. quantalogic/xml_parser.py +0 -1
  102. {quantalogic-0.35.0.dist-info → quantalogic-0.50.0.dist-info}/METADATA +40 -1
  103. quantalogic-0.50.0.dist-info/RECORD +148 -0
  104. quantalogic-0.35.0.dist-info/RECORD +0 -102
  105. {quantalogic-0.35.0.dist-info → quantalogic-0.50.0.dist-info}/LICENSE +0 -0
  106. {quantalogic-0.35.0.dist-info → quantalogic-0.50.0.dist-info}/WHEEL +0 -0
  107. {quantalogic-0.35.0.dist-info → quantalogic-0.50.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,264 @@
1
+ """RAG (Retrieval Augmented Generation) Tool using LlamaIndex.
2
+
3
+ This tool provides a flexible RAG implementation supporting multiple vector stores
4
+ and embedding models, with configurable document processing options.
5
+ """
6
+
7
+ import os
8
+ from enum import Enum
9
+ from typing import Any, List, Optional
10
+
11
+ import chromadb
12
+ from llama_index.core import (
13
+ SimpleDirectoryReader,
14
+ StorageContext,
15
+ VectorStoreIndex,
16
+ load_index_from_storage,
17
+ )
18
+ from llama_index.core.settings import Settings
19
+ from llama_index.embeddings.bedrock import BedrockEmbedding
20
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
21
+ from llama_index.embeddings.instructor import InstructorEmbedding
22
+ from llama_index.embeddings.openai import OpenAIEmbedding
23
+ from llama_index.vector_stores.chroma import ChromaVectorStore
24
+ from llama_index.vector_stores.faiss import FaissVectorStore
25
+ from loguru import logger
26
+
27
+ from quantalogic.tools.tool import Tool, ToolArgument
28
+
29
+
30
+ class VectorStoreType(str, Enum):
31
+ """Supported vector store types."""
32
+ CHROMA = "chroma"
33
+ FAISS = "faiss"
34
+
35
+ class EmbeddingType(str, Enum):
36
+ """Supported embedding model types."""
37
+ OPENAI = "openai"
38
+ HUGGINGFACE = "huggingface"
39
+ INSTRUCTOR = "instructor"
40
+ BEDROCK = "bedrock"
41
+
42
+ class RagTool(Tool):
43
+ """Tool for performing RAG operations using LlamaIndex."""
44
+
45
+ name: str = "rag_tool"
46
+ description: str = (
47
+ "Retrieval Augmented Generation (RAG) tool for querying indexed documents "
48
+ "using vector stores and embedding models."
49
+ )
50
+ arguments: List[ToolArgument] = [
51
+ ToolArgument(
52
+ name="query",
53
+ arg_type="string",
54
+ description="Query string for searching the index",
55
+ required=True,
56
+ example="What is the main topic?",
57
+ ),
58
+ ]
59
+
60
+ def __init__(
61
+ self,
62
+ vector_store: str = "chroma",
63
+ embedding_model: str = "openai",
64
+ persist_dir: str = "./storage/rag",
65
+ document_paths: Optional[List[str]] = None,
66
+ ):
67
+ """Initialize the RAG tool with vector store, embedding model, and optional documents.
68
+
69
+ Args:
70
+ vector_store: Vector store type (chroma, faiss)
71
+ embedding_model: Embedding model type (openai, huggingface, instructor, bedrock)
72
+ persist_dir: Directory for persistence
73
+ document_paths: Optional list of paths to documents or directories to index
74
+ """
75
+ super().__init__()
76
+ self.persist_dir = os.path.abspath(persist_dir)
77
+ self.embed_model = self._setup_embedding_model(embedding_model)
78
+ self.vector_store = self._setup_vector_store(vector_store, self.persist_dir)
79
+
80
+ # Configure llama-index settings with the embedding model
81
+ Settings.embed_model = self.embed_model
82
+ self.storage_context = StorageContext.from_defaults(vector_store=self.vector_store)
83
+
84
+ # Initialize index
85
+ self.index = None
86
+
87
+ # Check if we have documents to initialize with
88
+ if document_paths:
89
+ self._initialize_with_documents(document_paths)
90
+ else:
91
+ # Only try to load existing index if no documents were provided
92
+ index_exists = os.path.exists(os.path.join(self.persist_dir, "docstore.json"))
93
+ if index_exists:
94
+ try:
95
+ self.index = load_index_from_storage(
96
+ storage_context=self.storage_context,
97
+ )
98
+ logger.info(f"Loaded existing index from {self.persist_dir}")
99
+ except Exception as e:
100
+ logger.error(f"Failed to load existing index: {str(e)}")
101
+ self.index = None
102
+ else:
103
+ logger.warning("No existing index found and no documents provided")
104
+
105
+ def _initialize_with_documents(self, document_paths: List[str]) -> None:
106
+ """Initialize the index with the given documents.
107
+
108
+ Args:
109
+ document_paths: List of paths to documents or directories
110
+ """
111
+ try:
112
+ all_documents = []
113
+ for path in document_paths:
114
+ if not os.path.exists(path):
115
+ logger.warning(f"Document path does not exist: {path}")
116
+ continue
117
+
118
+ documents = SimpleDirectoryReader(
119
+ input_files=[path] if os.path.isfile(path) else None,
120
+ input_dir=path if os.path.isdir(path) else None,
121
+ ).load_data()
122
+ all_documents.extend(documents)
123
+
124
+ if all_documents:
125
+ self.index = VectorStoreIndex.from_documents(
126
+ all_documents,
127
+ storage_context=self.storage_context,
128
+ )
129
+ # Persist the index after creation
130
+ self.storage_context.persist(persist_dir=self.persist_dir)
131
+ logger.info(f"Created and persisted new index with {len(all_documents)} documents")
132
+ else:
133
+ logger.warning("No valid documents found in provided paths")
134
+
135
+ except Exception as e:
136
+ logger.error(f"Error initializing with documents: {str(e)}")
137
+ raise RuntimeError(f"Failed to initialize with documents: {str(e)}")
138
+
139
+ def _setup_embedding_model(self, model_type: str) -> Any:
140
+ """Set up the embedding model based on type.
141
+
142
+ Args:
143
+ model_type: Type of embedding model to use
144
+
145
+ Returns:
146
+ Configured embedding model instance
147
+ """
148
+ model_type = EmbeddingType(model_type.lower())
149
+ if model_type == EmbeddingType.OPENAI:
150
+ return OpenAIEmbedding()
151
+ elif model_type == EmbeddingType.HUGGINGFACE:
152
+ return HuggingFaceEmbedding()
153
+ elif model_type == EmbeddingType.INSTRUCTOR:
154
+ return InstructorEmbedding()
155
+ elif model_type == EmbeddingType.BEDROCK:
156
+ return BedrockEmbedding()
157
+ else:
158
+ raise ValueError(f"Unsupported embedding model type: {model_type}")
159
+
160
+ def _setup_vector_store(self, store_type: str, persist_dir: str) -> Any:
161
+ """Set up the vector store based on type.
162
+
163
+ Args:
164
+ store_type: Type of vector store to use
165
+ persist_dir: Directory for persistence
166
+
167
+ Returns:
168
+ Configured vector store instance
169
+ """
170
+ store_type = VectorStoreType(store_type.lower())
171
+
172
+ # Ensure the persist directory exists
173
+ os.makedirs(persist_dir, exist_ok=True)
174
+
175
+ if store_type == VectorStoreType.CHROMA:
176
+ # Use PersistentClient with explicit settings
177
+ chroma_persist_dir = os.path.join(persist_dir, "chroma")
178
+ os.makedirs(chroma_persist_dir, exist_ok=True)
179
+
180
+ chroma_client = chromadb.PersistentClient(
181
+ path=chroma_persist_dir,
182
+ )
183
+ collection = chroma_client.create_collection(
184
+ name="default_collection",
185
+ get_or_create=True
186
+ )
187
+ return ChromaVectorStore(
188
+ chroma_collection=collection,
189
+ )
190
+ elif store_type == VectorStoreType.FAISS:
191
+ return FaissVectorStore()
192
+ else:
193
+ raise ValueError(f"Unsupported vector store type: {store_type}")
194
+
195
+ def add_documents(self, document_path: str) -> bool:
196
+ """Add documents to the RAG system.
197
+
198
+ Args:
199
+ document_path: Path to document or directory of documents
200
+
201
+ Returns:
202
+ bool: True if documents were added successfully
203
+ """
204
+ try:
205
+ if not os.path.exists(document_path):
206
+ logger.error(f"Document path does not exist: {document_path}")
207
+ return False
208
+
209
+ documents = SimpleDirectoryReader(
210
+ input_files=[document_path] if os.path.isfile(document_path) else None,
211
+ input_dir=document_path if os.path.isdir(document_path) else None,
212
+ ).load_data()
213
+
214
+ # Create index with configured settings and storage context
215
+ self.index = VectorStoreIndex.from_documents(
216
+ documents,
217
+ storage_context=self.storage_context,
218
+ )
219
+
220
+ return True
221
+ except Exception as e:
222
+ logger.error(f"Error adding documents: {str(e)}")
223
+ return False
224
+
225
+ def execute(self, query: str) -> str:
226
+ """Execute a query against the indexed documents.
227
+
228
+ Args:
229
+ query: Query string for searching
230
+
231
+ Returns:
232
+ Query response
233
+
234
+ Raises:
235
+ ValueError: If no index is available
236
+ """
237
+ try:
238
+ if not self.index:
239
+ raise ValueError("No index available. Please add documents first using add_documents()")
240
+
241
+ # Query the index
242
+ query_engine = self.index.as_query_engine()
243
+ response = query_engine.query(query)
244
+ return str(response)
245
+
246
+ except Exception as e:
247
+ logger.error(f"Error in RAG query: {str(e)}")
248
+ raise RuntimeError(f"Query failed: {str(e)}")
249
+
250
+
251
+ if __name__ == "__main__":
252
+ # Example usage
253
+ tool = RagTool(
254
+ vector_store="chroma",
255
+ embedding_model="openai",
256
+ persist_dir="./storage/rag",
257
+ document_paths=[
258
+ "./docs/file1.pdf",
259
+ "./docs/directory1"
260
+ ]
261
+ )
262
+
263
+ # Query
264
+ print(tool.execute("What is the main topic?"))
@@ -32,56 +32,48 @@ USER_AGENTS = [
32
32
  "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:89.0) Gecko/20100101 Firefox/89.0",
33
33
  # Safari on macOS
34
34
  "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko)"
35
- " Version/14.1.1 Safari/605.1.15"
35
+ " Version/14.1.1 Safari/605.1.15",
36
36
  ]
37
37
 
38
38
  # Additional headers to mimic real browser requests
39
39
  ADDITIONAL_HEADERS = {
40
- "Accept": "text/html,application/xhtml+xml,application/xml;"
41
- "q=0.9,image/webp,*/*;q=0.8",
40
+ "Accept": "text/html,application/xhtml+xml,application/xml;" "q=0.9,image/webp,*/*;q=0.8",
42
41
  "Accept-Language": "en-US,en;q=0.5",
43
42
  "Upgrade-Insecure-Requests": "1",
44
43
  "DNT": "1", # Do Not Track
45
44
  "Connection": "keep-alive",
46
- "Cache-Control": "max-age=0"
45
+ "Cache-Control": "max-age=0",
47
46
  }
48
47
 
48
+
49
49
  class ReadHTMLTool(Tool):
50
50
  """Tool for reading HTML content from files or URLs in specified line ranges."""
51
51
 
52
52
  class Arguments(BaseModel):
53
53
  source: str = Field(
54
- ...,
55
- description="The file path or URL to read HTML from",
56
- example="https://example.com or ./example.html"
54
+ ..., description="The file path or URL to read HTML from", example="https://example.com or ./example.html"
57
55
  )
58
56
  convert: Optional[str] = Field(
59
57
  "text",
60
58
  description="Convert input to 'text' (Markdown) or 'html' no conversion. Default is 'text'",
61
- example="'text' or 'html'"
59
+ example="'text' or 'html'",
62
60
  )
63
61
  line_start: Optional[int] = Field(
64
- 1,
65
- description="The starting line number (1-based index). Default: 1",
66
- ge=1,
67
- example="1"
62
+ 1, description="The starting line number (1-based index). Default: 1", ge=1, example="1"
68
63
  )
69
64
  line_end: Optional[int] = Field(
70
- 300,
71
- description="The ending line number (1-based index). Default: 300",
72
- ge=1,
73
- example="300"
65
+ 300, description="The ending line number (1-based index). Default: 300", ge=1, example="300"
74
66
  )
75
67
 
76
- @field_validator('convert')
68
+ @field_validator("convert")
77
69
  def validate_convert(cls, v):
78
70
  if v not in ["text", "html"]:
79
71
  raise ValueError("Convert must be either 'text' or 'html'")
80
72
  return v
81
73
 
82
- @field_validator('line_end')
74
+ @field_validator("line_end")
83
75
  def validate_line_end(cls, v, values):
84
- if 'line_start' in values and v < values['line_start']:
76
+ if "line_start" in values and v < values["line_start"]:
85
77
  raise ValueError("line_end must be greater than or equal to line_start")
86
78
  return v
87
79
 
@@ -97,13 +89,13 @@ class ReadHTMLTool(Tool):
97
89
  arg_type="string",
98
90
  description="The file path or URL to read HTML from",
99
91
  required=True,
100
- example="https://example.com or ./example.html"
92
+ example="https://example.com or ./example.html",
101
93
  ),
102
94
  ToolArgument(
103
95
  name="convert",
104
96
  arg_type="string",
105
97
  description="Convert input to 'text' (Markdown) or 'html'. Default is 'text'",
106
- default='text',
98
+ default="text",
107
99
  required=False,
108
100
  example="'text' or 'html'",
109
101
  ),
@@ -113,7 +105,7 @@ class ReadHTMLTool(Tool):
113
105
  description="The starting line number (1-based index). Default: 1",
114
106
  required=False,
115
107
  example="1",
116
- default="1"
108
+ default="1",
117
109
  ),
118
110
  ToolArgument(
119
111
  name="line_end",
@@ -121,8 +113,8 @@ class ReadHTMLTool(Tool):
121
113
  description="The ending line number (1-based index). Default: 300",
122
114
  required=False,
123
115
  example="300",
124
- default="300"
125
- )
116
+ default="300",
117
+ ),
126
118
  ]
127
119
 
128
120
  def validate_source(self, source: str) -> bool:
@@ -140,7 +132,7 @@ class ReadHTMLTool(Tool):
140
132
  def read_from_file(self, file_path: str) -> str:
141
133
  """Read HTML content from a file."""
142
134
  try:
143
- with open(file_path, encoding='utf-8') as file:
135
+ with open(file_path, encoding="utf-8") as file:
144
136
  return file.read()
145
137
  except Exception as e:
146
138
  logger.error(f"Error reading file: {e}")
@@ -157,12 +149,7 @@ class ReadHTMLTool(Tool):
157
149
  time.sleep(random.uniform(0.5, 2.0))
158
150
 
159
151
  # Use a timeout to prevent hanging
160
- response = requests.get(
161
- url,
162
- headers=headers,
163
- timeout=10,
164
- allow_redirects=True
165
- )
152
+ response = requests.get(url, headers=headers, timeout=10, allow_redirects=True)
166
153
  response.raise_for_status()
167
154
  return response.text
168
155
  except requests.RequestException as e:
@@ -172,7 +159,7 @@ class ReadHTMLTool(Tool):
172
159
  def parse_html(self, html_content: str) -> BeautifulSoup:
173
160
  """Parse HTML content using BeautifulSoup."""
174
161
  try:
175
- return BeautifulSoup(html_content, 'html.parser')
162
+ return BeautifulSoup(html_content, "html.parser")
176
163
  except Exception as e:
177
164
  logger.error(f"Error parsing HTML: {e}")
178
165
  raise ValueError(f"Error parsing HTML: {e}")
@@ -207,7 +194,7 @@ class ReadHTMLTool(Tool):
207
194
  if convert_type == "html":
208
195
  # Ensure content is valid HTML
209
196
  try:
210
- soup = BeautifulSoup(content, 'html.parser')
197
+ soup = BeautifulSoup(content, "html.parser")
211
198
  return soup.prettify()
212
199
  except Exception as e:
213
200
  logger.error(f"Error prettifying HTML: {e}")
@@ -215,8 +202,7 @@ class ReadHTMLTool(Tool):
215
202
 
216
203
  return content
217
204
 
218
- def execute(self, source: str, convert: Optional[str] = 'text',
219
- line_start: int = 1, line_end: int = 300) -> str:
205
+ def execute(self, source: str, convert: Optional[str] = "text", line_start: int = 1, line_end: int = 300) -> str:
220
206
  """Execute the tool to read and parse HTML content in specified line ranges."""
221
207
  logger.debug(f"Executing read_html_tool with source: {source}")
222
208
 
@@ -242,7 +228,7 @@ class ReadHTMLTool(Tool):
242
228
  adjusted_end_line = min(line_end, total_lines)
243
229
 
244
230
  # Step 5: Slice lines based on line_start and adjusted_end_line
245
- sliced_lines = lines[line_start - 1: adjusted_end_line]
231
+ sliced_lines = lines[line_start - 1 : adjusted_end_line]
246
232
  sliced_content = "\n".join(sliced_lines)
247
233
 
248
234
  # Step 6: Calculate actual_end_line based on lines returned
@@ -268,7 +254,7 @@ class ReadHTMLTool(Tool):
268
254
  f"Is Last Block: {'Yes' if is_last_block else 'No'}",
269
255
  "==== Content ====",
270
256
  sliced_content,
271
- "==== End of Block ===="
257
+ "==== End of Block ====",
272
258
  ]
273
259
 
274
260
  return "\n".join(result)
@@ -302,4 +288,4 @@ if __name__ == "__main__":
302
288
  else:
303
289
  print("No local test file found.")
304
290
  except Exception as e:
305
- print(f"Local File Test Failed: {e}")
291
+ print(f"Local File Test Failed: {e}")
@@ -64,7 +64,7 @@ class ReplaceInFileTool(Tool):
64
64
  "Returns the updated content or an error."
65
65
  "⚠️ THIS TOOL MUST BE USED IN PRIORITY TO UPDATE AN EXISTING FILE."
66
66
  )
67
- need_validation: bool = True
67
+ need_validation: bool = False
68
68
 
69
69
  SIMILARITY_THRESHOLD: float = 0.85
70
70
 
@@ -126,12 +126,12 @@ class ReplaceInFileTool(Tool):
126
126
 
127
127
  def normalize_whitespace(self, text: str) -> str:
128
128
  """Normalize leading whitespace by converting tabs to spaces."""
129
- return '\n'.join([self._normalize_line(line) for line in text.split('\n')])
129
+ return "\n".join([self._normalize_line(line) for line in text.split("\n")])
130
130
 
131
131
  def _normalize_line(self, line: str) -> str:
132
132
  """Normalize leading whitespace in a single line."""
133
133
  leading_ws = len(line) - len(line.lstrip())
134
- return line.replace('\t', ' ', leading_ws) # Convert tabs to 4 spaces only in leading whitespace
134
+ return line.replace("\t", " ", leading_ws) # Convert tabs to 4 spaces only in leading whitespace
135
135
 
136
136
  def parse_diff(self, diff: str) -> list[SearchReplaceBlock]:
137
137
  """Parses the diff string into a list of SearchReplaceBlock instances."""
@@ -279,25 +279,25 @@ class ReplaceInFileTool(Tool):
279
279
  def find_similar_match(self, search: str, content: str) -> Tuple[float, str]:
280
280
  """Finds the most similar substring in content compared to search with whitespace normalization."""
281
281
  norm_search = self.normalize_whitespace(search)
282
- content_lines = content.split('\n')
282
+ content_lines = content.split("\n")
283
283
  norm_content = self.normalize_whitespace(content)
284
- norm_content_lines = norm_content.split('\n')
284
+ norm_content_lines = norm_content.split("\n")
285
285
 
286
- if len(norm_content_lines) < len(norm_search.split('\n')):
286
+ if len(norm_content_lines) < len(norm_search.split("\n")):
287
287
  return 0.0, ""
288
288
 
289
289
  max_similarity = 0.0
290
290
  best_match = ""
291
- search_line_count = len(norm_search.split('\n'))
291
+ search_line_count = len(norm_search.split("\n"))
292
292
 
293
293
  for i in range(len(norm_content_lines) - search_line_count + 1):
294
- candidate_norm = '\n'.join(norm_content_lines[i:i+search_line_count])
294
+ candidate_norm = "\n".join(norm_content_lines[i : i + search_line_count])
295
295
  similarity = difflib.SequenceMatcher(None, norm_search, candidate_norm).ratio()
296
296
 
297
297
  if similarity > max_similarity:
298
298
  max_similarity = similarity
299
299
  # Get original lines (non-normalized) for accurate replacement
300
- best_match = '\n'.join(content_lines[i:i+search_line_count])
300
+ best_match = "\n".join(content_lines[i : i + search_line_count])
301
301
 
302
302
  return max_similarity, best_match
303
303
 
@@ -308,4 +308,4 @@ class ReplaceInFileTool(Tool):
308
308
 
309
309
  if __name__ == "__main__":
310
310
  tool = ReplaceInFileTool()
311
- print(tool.to_markdown())
311
+ print(tool.to_markdown())
@@ -29,17 +29,12 @@ class SafePythonInterpreterTool(Tool):
29
29
  A tool to safely execute Python code while only allowing a specific set
30
30
  of modules as defined in `allowed_modules`.
31
31
  """
32
+
32
33
  # Allowed modules must be provided during initialization.
33
- allowed_modules: List[str] = Field(
34
- ...,
35
- description="List of Python module names allowed for code execution."
36
- )
34
+ allowed_modules: List[str] = Field(..., description="List of Python module names allowed for code execution.")
37
35
  # Additional fields to support the Tool API.
38
36
  code: str | None = None # Provided at runtime via kwargs.
39
- time_limit: int = Field(
40
- default=60,
41
- description="Maximum execution time (in seconds) for running the Python code."
42
- )
37
+ time_limit: int = Field(default=60, description="Maximum execution time (in seconds) for running the Python code.")
43
38
  # Define tool arguments so that they appear in the tool's markdown description.
44
39
  arguments: list[ToolArgument] = [
45
40
  ToolArgument(
@@ -59,7 +54,7 @@ def transform_array(x):
59
54
  array_input = np.array([1, 4, 9, 16, 25])
60
55
  result = transform_array(array_input)
61
56
  result
62
- """.strip()
57
+ """.strip(),
63
58
  ),
64
59
  ToolArgument(
65
60
  name="time_limit",
@@ -67,8 +62,8 @@ result
67
62
  description="The execution timeout (in seconds).",
68
63
  required=False,
69
64
  default="60",
70
- example="60"
71
- )
65
+ example="60",
66
+ ),
72
67
  ]
73
68
  name: Literal["safe_python_interpreter"] = "safe_python_interpreter"
74
69
  description: str | None = None
@@ -114,18 +109,12 @@ result
114
109
  def run_interpreter() -> Any:
115
110
  logger.debug("Starting interpretation of code.")
116
111
  import ast # new import for AST processing
112
+
117
113
  # Delegate to monkeypatched interpret_code if available.
118
114
  if interpret_code.__module__ != "quantalogic.utils.python_interpreter":
119
115
  return interpret_code(code, self.allowed_modules)
120
116
  # Build safe globals with only allowed modules and minimal builtins.
121
- safe_globals = {
122
- "__builtins__": {
123
- "range": range,
124
- "len": len,
125
- "print": print,
126
- "__import__": __import__
127
- }
128
- }
117
+ safe_globals = {"__builtins__": {"range": range, "len": len, "print": print, "__import__": __import__}}
129
118
  for mod in self.allowed_modules:
130
119
  safe_globals[mod] = __import__(mod)
131
120
  local_vars = {}
@@ -139,10 +128,7 @@ result
139
128
  tree = ast.parse(code)
140
129
  if tree.body and isinstance(tree.body[-1], ast.Expr):
141
130
  last_expr = tree.body.pop()
142
- assign = ast.Assign(
143
- targets=[ast.Name(id="_result", ctx=ast.Store())],
144
- value=last_expr.value
145
- )
131
+ assign = ast.Assign(targets=[ast.Name(id="_result", ctx=ast.Store())], value=last_expr.value)
146
132
  assign = ast.copy_location(assign, last_expr)
147
133
  tree.body.append(assign)
148
134
  fixed_tree = ast.fix_missing_locations(tree)
@@ -210,4 +196,4 @@ result
210
196
  print("Interpreter Output:")
211
197
  print(output)
212
198
  except Exception as e:
213
- print(f"An error occurred during interpretation: {e}")
199
+ print(f"An error occurred during interpretation: {e}")
@@ -69,7 +69,7 @@ class SearchDefinitionNames(Tool):
69
69
  description="The page number to retrieve (1-based index).",
70
70
  required=False,
71
71
  example="1",
72
- default="1"
72
+ default="1",
73
73
  ),
74
74
  ToolArgument(
75
75
  name="page_size",
@@ -77,7 +77,7 @@ class SearchDefinitionNames(Tool):
77
77
  description="The number of results per page (default: 10).",
78
78
  required=False,
79
79
  example="10",
80
- default="10"
80
+ default="10",
81
81
  ),
82
82
  ]
83
83
 
@@ -61,9 +61,9 @@ class SequenceTool(Tool):
61
61
  wraps the tool calls in a single <sequence> element or is the raw collection of tool calls.
62
62
  The tag for each tool call must be one of the authorized tool names.
63
63
  Authorized tools: {authorized_list}
64
-
64
+
65
65
  For each tool call, include the required parameters as sub-elements. The syntax is as follows:
66
-
66
+
67
67
  <sequence>
68
68
  <tool_name>
69
69
  <param1>value1</param1>
@@ -72,7 +72,7 @@ class SequenceTool(Tool):
72
72
  </tool_name>
73
73
  ...
74
74
  </sequence>
75
-
75
+
76
76
  Objective:
77
77
  Execute each tool call in order—even if some calls fail—and return an XML report that includes
78
78
  the order of execution, provided parameters, execution status (success/failure), and the output or
@@ -149,11 +149,7 @@ class SequenceTool(Tool):
149
149
 
150
150
  # Try to locate <sequence> elements in the input.
151
151
  all_elements = xml_parser._find_all_elements(sequence_xml)
152
- sequence_elems = [
153
- (tag, content)
154
- for tag, content in all_elements
155
- if tag.strip().lower() == "sequence"
156
- ]
152
+ sequence_elems = [(tag, content) for tag, content in all_elements if tag.strip().lower() == "sequence"]
157
153
  if len(sequence_elems) > 1:
158
154
  raise ValueError("Input XML must contain exactly one <sequence> element.")
159
155
  elif len(sequence_elems) == 1:
@@ -225,20 +221,15 @@ if __name__ == "__main__":
225
221
  def __init__(self, **data: Any):
226
222
  data.setdefault("name", "write_file_tool")
227
223
  data.setdefault("description", "Writes content to a file at the specified path.")
228
- data.setdefault("arguments", [
229
- ToolArgument(
230
- name="file_path",
231
- arg_type="string",
232
- required=True,
233
- description="Path to the file"
234
- ),
235
- ToolArgument(
236
- name="content",
237
- arg_type="string",
238
- required=True,
239
- description="Content to write to the file"
240
- )
241
- ])
224
+ data.setdefault(
225
+ "arguments",
226
+ [
227
+ ToolArgument(name="file_path", arg_type="string", required=True, description="Path to the file"),
228
+ ToolArgument(
229
+ name="content", arg_type="string", required=True, description="Content to write to the file"
230
+ ),
231
+ ],
232
+ )
242
233
  super().__init__(**data)
243
234
 
244
235
  def execute(self, **kwargs) -> str:
@@ -282,4 +273,4 @@ And hope finds its own sweet way.
282
273
  result_xml = sequence_tool.execute(sequence=xml_sequence)
283
274
  print("SequenceTool result:\n", result_xml)
284
275
  except Exception as e:
285
- print("SequenceTool error:\n", str(e))
276
+ print("SequenceTool error:\n", str(e))