quantalogic 0.2.25__py3-none-any.whl → 0.2.27__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quantalogic/agent.py +5 -1
- quantalogic/coding_agent.py +3 -1
- quantalogic/tools/__init__.py +7 -1
- quantalogic/tools/generate_database_report_tool.py +52 -0
- quantalogic/tools/grep_app_tool.py +499 -0
- quantalogic/tools/llm_tool.py +0 -1
- quantalogic/tools/sql_query_tool.py +167 -0
- quantalogic/tools/utils/__init__.py +13 -0
- quantalogic/tools/utils/create_sample_database.py +124 -0
- quantalogic/tools/utils/generate_database_report.py +289 -0
- {quantalogic-0.2.25.dist-info → quantalogic-0.2.27.dist-info}/METADATA +6 -2
- {quantalogic-0.2.25.dist-info → quantalogic-0.2.27.dist-info}/RECORD +15 -9
- {quantalogic-0.2.25.dist-info → quantalogic-0.2.27.dist-info}/LICENSE +0 -0
- {quantalogic-0.2.25.dist-info → quantalogic-0.2.27.dist-info}/WHEEL +0 -0
- {quantalogic-0.2.25.dist-info → quantalogic-0.2.27.dist-info}/entry_points.txt +0 -0
quantalogic/agent.py
CHANGED
@@ -164,7 +164,11 @@ class Agent(BaseModel):
|
|
164
164
|
self.task_to_solve_summary = self._generate_task_summary(task)
|
165
165
|
|
166
166
|
# Add system prompt to memory
|
167
|
-
|
167
|
+
# Check if system prompt is already in memory
|
168
|
+
# if not add it
|
169
|
+
# The system message is always the first message in memory
|
170
|
+
if not self.memory.memory or self.memory.memory[0].role != "system":
|
171
|
+
self.memory.add(Message(role="system", content=self.config.system_prompt))
|
168
172
|
|
169
173
|
self._emit_event(
|
170
174
|
"session_start",
|
quantalogic/coding_agent.py
CHANGED
@@ -4,6 +4,7 @@ from quantalogic.tools import (
|
|
4
4
|
DuckDuckGoSearchTool,
|
5
5
|
EditWholeContentTool,
|
6
6
|
ExecuteBashCommandTool,
|
7
|
+
GrepAppTool,
|
7
8
|
InputQuestionTool,
|
8
9
|
JinjaTool,
|
9
10
|
ListDirectoryTool,
|
@@ -75,7 +76,8 @@ def create_coding_agent(
|
|
75
76
|
InputQuestionTool(),
|
76
77
|
DuckDuckGoSearchTool(),
|
77
78
|
JinjaTool(),
|
78
|
-
ReadHTMLTool()
|
79
|
+
ReadHTMLTool(),
|
80
|
+
GrepAppTool()
|
79
81
|
]
|
80
82
|
|
81
83
|
if vision_model_name:
|
quantalogic/tools/__init__.py
CHANGED
@@ -7,6 +7,8 @@ from .duckduckgo_search_tool import DuckDuckGoSearchTool
|
|
7
7
|
from .edit_whole_content_tool import EditWholeContentTool
|
8
8
|
from .elixir_tool import ElixirTool
|
9
9
|
from .execute_bash_command_tool import ExecuteBashCommandTool
|
10
|
+
from .generate_database_report_tool import GenerateDatabaseReportTool
|
11
|
+
from .grep_app_tool import GrepAppTool
|
10
12
|
from .input_question_tool import InputQuestionTool
|
11
13
|
from .jinja_tool import JinjaTool
|
12
14
|
from .list_directory_tool import ListDirectoryTool
|
@@ -22,6 +24,7 @@ from .replace_in_file_tool import ReplaceInFileTool
|
|
22
24
|
from .ripgrep_tool import RipgrepTool
|
23
25
|
from .search_definition_names import SearchDefinitionNames
|
24
26
|
from .serpapi_search_tool import SerpApiSearchTool
|
27
|
+
from .sql_query_tool import SQLQueryTool
|
25
28
|
from .task_complete_tool import TaskCompleteTool
|
26
29
|
from .tool import Tool, ToolArgument
|
27
30
|
from .unified_diff_tool import UnifiedDiffTool
|
@@ -56,5 +59,8 @@ __all__ = [
|
|
56
59
|
"EditWholeContentTool",
|
57
60
|
"JinjaTool",
|
58
61
|
"LLMImageGenerationTool",
|
59
|
-
"ReadHTMLTool"
|
62
|
+
"ReadHTMLTool",
|
63
|
+
"GrepAppTool",
|
64
|
+
"GenerateDatabaseReportTool",
|
65
|
+
'SQLQueryTool'
|
60
66
|
]
|
@@ -0,0 +1,52 @@
|
|
1
|
+
"""Tool for generating comprehensive database documentation reports."""
|
2
|
+
|
3
|
+
from pydantic import Field, ValidationError
|
4
|
+
|
5
|
+
from quantalogic.tools.tool import Tool
|
6
|
+
from quantalogic.tools.utils.generate_database_report import generate_database_report
|
7
|
+
|
8
|
+
|
9
|
+
class GenerateDatabaseReportTool(Tool):
|
10
|
+
"""Tool for generating database documentation reports from a connection string."""
|
11
|
+
|
12
|
+
name: str = "generate_database_report_tool"
|
13
|
+
description: str = (
|
14
|
+
"Generates a comprehensive Markdown database documentation report with ER diagram. "
|
15
|
+
)
|
16
|
+
arguments: list = [] # No execution arguments - connection string is configured during tool setup
|
17
|
+
connection_string: str = Field(
|
18
|
+
...,
|
19
|
+
description="SQLAlchemy-compatible database connection string (e.g., 'sqlite:///database.db')",
|
20
|
+
example="postgresql://user:password@localhost/mydatabase"
|
21
|
+
)
|
22
|
+
|
23
|
+
def execute(self) -> str:
|
24
|
+
"""Generates a database documentation report using the configured connection string.
|
25
|
+
|
26
|
+
Returns:
|
27
|
+
str: Markdown-formatted database report
|
28
|
+
|
29
|
+
Raises:
|
30
|
+
ValueError: For invalid connection strings or database connection errors
|
31
|
+
RuntimeError: For errors during report generation
|
32
|
+
"""
|
33
|
+
try:
|
34
|
+
return generate_database_report(self.connection_string)
|
35
|
+
except ValidationError as e:
|
36
|
+
raise ValueError(f"Invalid connection configuration: {e}") from e
|
37
|
+
except Exception as e:
|
38
|
+
raise RuntimeError(f"Database report generation failed: {e}") from e
|
39
|
+
|
40
|
+
|
41
|
+
if __name__ == "__main__":
|
42
|
+
|
43
|
+
from quantalogic.tools.utils.create_sample_database import create_sample_database
|
44
|
+
|
45
|
+
# Create and document sample database
|
46
|
+
create_sample_database("sample.db")
|
47
|
+
|
48
|
+
# Example usage
|
49
|
+
tool = GenerateDatabaseReportTool(
|
50
|
+
connection_string="sqlite:///sample.db"
|
51
|
+
)
|
52
|
+
print(tool.execute())
|
@@ -0,0 +1,499 @@
|
|
1
|
+
# quantalogic/tools/grep_app_tool.py
|
2
|
+
|
3
|
+
import random
|
4
|
+
import sys
|
5
|
+
import time
|
6
|
+
from typing import Any, ClassVar, Dict, Optional, Union
|
7
|
+
|
8
|
+
import requests
|
9
|
+
from loguru import logger
|
10
|
+
from pydantic import BaseModel, Field, ValidationError, model_validator
|
11
|
+
|
12
|
+
from quantalogic.tools.tool import Tool, ToolArgument
|
13
|
+
|
14
|
+
# Configurable User Agents
|
15
|
+
USER_AGENTS = [
|
16
|
+
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
|
17
|
+
"Chrome/91.0.4472.124 Safari/537.36",
|
18
|
+
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) "
|
19
|
+
"Chrome/91.0.4472.124 Safari/537.36",
|
20
|
+
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0",
|
21
|
+
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:89.0) Gecko/20100101 Firefox/89.0",
|
22
|
+
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) "
|
23
|
+
"Version/14.1.1 Safari/605.1.15"
|
24
|
+
]
|
25
|
+
|
26
|
+
class SearchError(Exception):
|
27
|
+
"""Custom exception for search-related errors"""
|
28
|
+
pass
|
29
|
+
|
30
|
+
class GrepAppArguments(BaseModel):
|
31
|
+
"""Pydantic model for grep.app search arguments"""
|
32
|
+
search_query: str = Field(
|
33
|
+
...,
|
34
|
+
description="GitHub Code search using simple keyword or regular expression",
|
35
|
+
example="code2prompt"
|
36
|
+
)
|
37
|
+
repository: Optional[str] = Field(
|
38
|
+
None,
|
39
|
+
description="Filter by repository (e.g. user/repo)",
|
40
|
+
example="quantalogic/quantalogic",
|
41
|
+
)
|
42
|
+
page: int = Field(
|
43
|
+
1,
|
44
|
+
description="Results page number",
|
45
|
+
ge=1
|
46
|
+
)
|
47
|
+
per_page: int = Field(
|
48
|
+
10,
|
49
|
+
description="Number of results per page",
|
50
|
+
ge=1,
|
51
|
+
le=100
|
52
|
+
)
|
53
|
+
regexp: bool = Field(
|
54
|
+
False,
|
55
|
+
description="Enable regular expression search"
|
56
|
+
)
|
57
|
+
case: bool = Field(
|
58
|
+
False,
|
59
|
+
description="Enable case-sensitive search"
|
60
|
+
)
|
61
|
+
words: bool = Field(
|
62
|
+
False,
|
63
|
+
description="Match whole words only"
|
64
|
+
)
|
65
|
+
|
66
|
+
@model_validator(mode='before')
|
67
|
+
@classmethod
|
68
|
+
def convert_types(cls, data: Dict[str, Any]) -> Dict[str, Any]:
|
69
|
+
"""Convert input types before validation"""
|
70
|
+
# Convert string numbers to integers
|
71
|
+
for field in ['page', 'per_page']:
|
72
|
+
if field in data and isinstance(data[field], str):
|
73
|
+
try:
|
74
|
+
data[field] = int(data[field])
|
75
|
+
except ValueError:
|
76
|
+
raise ValueError(f"{field} must be a valid integer")
|
77
|
+
|
78
|
+
# Convert various string representations to booleans
|
79
|
+
for field in ['regexp', 'case', 'words']:
|
80
|
+
if field in data:
|
81
|
+
if isinstance(data[field], str):
|
82
|
+
data[field] = data[field].lower() in ['true', '1', 'yes', 'on']
|
83
|
+
|
84
|
+
return data
|
85
|
+
|
86
|
+
@model_validator(mode='after')
|
87
|
+
def validate_search_query(self) -> 'GrepAppArguments':
|
88
|
+
"""Validate search query is not empty and has reasonable length"""
|
89
|
+
if not self.search_query or not self.search_query.strip():
|
90
|
+
raise ValueError("Search query cannot be empty")
|
91
|
+
if len(self.search_query) > 500: # Reasonable limit for search query
|
92
|
+
raise ValueError("Search query is too long (max 500 characters)")
|
93
|
+
return self
|
94
|
+
|
95
|
+
class GrepAppTool(Tool):
|
96
|
+
"""Tool for searching GitHub code via grep.app API"""
|
97
|
+
|
98
|
+
BASE_URL: ClassVar[str] = "https://grep.app/api/search"
|
99
|
+
TIMEOUT: ClassVar[int] = 10
|
100
|
+
|
101
|
+
def __init__(self):
|
102
|
+
super().__init__(
|
103
|
+
name="grep_app_tool",
|
104
|
+
description="Searches GitHub code using grep.app API. Returns code matches with metadata."
|
105
|
+
)
|
106
|
+
self.arguments = [
|
107
|
+
ToolArgument(
|
108
|
+
name="search_query",
|
109
|
+
arg_type="string",
|
110
|
+
description="Search query using grep.app syntax",
|
111
|
+
required=True
|
112
|
+
),
|
113
|
+
ToolArgument(
|
114
|
+
name="repository",
|
115
|
+
arg_type="string",
|
116
|
+
description="Filter by repository",
|
117
|
+
required=False
|
118
|
+
),
|
119
|
+
ToolArgument(
|
120
|
+
name="page",
|
121
|
+
arg_type="int",
|
122
|
+
description="Pagination page number",
|
123
|
+
default="1",
|
124
|
+
required=False
|
125
|
+
),
|
126
|
+
ToolArgument(
|
127
|
+
name="per_page",
|
128
|
+
arg_type="int",
|
129
|
+
description="Results per page",
|
130
|
+
default="10",
|
131
|
+
required=False
|
132
|
+
),
|
133
|
+
ToolArgument(
|
134
|
+
name="regexp",
|
135
|
+
arg_type="boolean",
|
136
|
+
description="Enable regular expression search",
|
137
|
+
default="False",
|
138
|
+
required=False
|
139
|
+
),
|
140
|
+
ToolArgument(
|
141
|
+
name="case",
|
142
|
+
arg_type="boolean",
|
143
|
+
description="Enable case-sensitive search",
|
144
|
+
default="False",
|
145
|
+
required=False
|
146
|
+
),
|
147
|
+
ToolArgument(
|
148
|
+
name="words",
|
149
|
+
arg_type="boolean",
|
150
|
+
description="Match whole words only",
|
151
|
+
default="False",
|
152
|
+
required=False
|
153
|
+
)
|
154
|
+
]
|
155
|
+
|
156
|
+
def _build_headers(self) -> Dict[str, str]:
|
157
|
+
"""Build request headers with random User-Agent"""
|
158
|
+
headers = {
|
159
|
+
"User-Agent": random.choice(USER_AGENTS),
|
160
|
+
"Accept": "application/json",
|
161
|
+
"Accept-Language": "en-US,en;q=0.5",
|
162
|
+
"DNT": "1"
|
163
|
+
}
|
164
|
+
logger.debug(f"Built headers: {headers}")
|
165
|
+
return headers
|
166
|
+
|
167
|
+
def _build_params(self, args: GrepAppArguments) -> Dict[str, Any]:
|
168
|
+
"""Build request parameters from arguments"""
|
169
|
+
params = {
|
170
|
+
"q": args.search_query,
|
171
|
+
"page": args.page,
|
172
|
+
"per_page": args.per_page
|
173
|
+
}
|
174
|
+
if args.repository:
|
175
|
+
params["filter[repo][0]"] = args.repository
|
176
|
+
if args.regexp:
|
177
|
+
params["regexp"] = "true"
|
178
|
+
if args.case:
|
179
|
+
params["case"] = "true"
|
180
|
+
if args.words:
|
181
|
+
params["words"] = "true"
|
182
|
+
logger.debug(f"Built params: {params}")
|
183
|
+
return params
|
184
|
+
|
185
|
+
def _make_request(self, params: Dict[str, Any], headers: Dict[str, str]) -> Dict[str, Any]:
|
186
|
+
"""Make the API request"""
|
187
|
+
logger.info("Making API request to grep.app")
|
188
|
+
response = requests.get(
|
189
|
+
self.BASE_URL,
|
190
|
+
params=params,
|
191
|
+
headers=headers,
|
192
|
+
timeout=self.TIMEOUT
|
193
|
+
)
|
194
|
+
logger.debug(f"API Response Status Code: {response.status_code}")
|
195
|
+
response.raise_for_status()
|
196
|
+
data = response.json()
|
197
|
+
if not isinstance(data, dict):
|
198
|
+
raise SearchError("Invalid response format from API")
|
199
|
+
logger.debug(f"API Response Data: {data}")
|
200
|
+
return data
|
201
|
+
|
202
|
+
def execute(self,
|
203
|
+
search_query: str,
|
204
|
+
repository: Optional[str] = None,
|
205
|
+
page: Union[int, str] = 1,
|
206
|
+
per_page: Union[int, str] = 10,
|
207
|
+
regexp: bool = False,
|
208
|
+
case: bool = False,
|
209
|
+
words: bool = False,
|
210
|
+
skip_delay: bool = False) -> str:
|
211
|
+
"""Execute grep.app API search with pagination and return formatted results as a string"""
|
212
|
+
try:
|
213
|
+
# Validate and convert arguments
|
214
|
+
args = GrepAppArguments(
|
215
|
+
search_query=search_query,
|
216
|
+
repository=repository,
|
217
|
+
page=int(page),
|
218
|
+
per_page=int(per_page),
|
219
|
+
regexp=regexp,
|
220
|
+
case=case,
|
221
|
+
words=words
|
222
|
+
)
|
223
|
+
|
224
|
+
logger.info(f"Executing search: '{args.search_query}'")
|
225
|
+
logger.debug(f"Search parameters: {args.model_dump()}")
|
226
|
+
|
227
|
+
# Add random delay to mimic human behavior (unless skipped for testing)
|
228
|
+
if not skip_delay:
|
229
|
+
delay = random.uniform(0.5, 1.5)
|
230
|
+
logger.debug(f"Sleeping for {delay:.2f} seconds to mimic human behavior")
|
231
|
+
time.sleep(delay)
|
232
|
+
|
233
|
+
# Make API request
|
234
|
+
headers = self._build_headers()
|
235
|
+
params = self._build_params(args)
|
236
|
+
results = self._make_request(params, headers)
|
237
|
+
|
238
|
+
# Format and return results
|
239
|
+
return self._format_results(results)
|
240
|
+
|
241
|
+
except ValidationError as e:
|
242
|
+
logger.error(f"Validation error: {e}")
|
243
|
+
return self._format_error("Validation Error", str(e))
|
244
|
+
except requests.RequestException as e:
|
245
|
+
logger.error(f"API request failed: {e}")
|
246
|
+
return self._format_error(
|
247
|
+
"API Error",
|
248
|
+
str(e),
|
249
|
+
{"Request URL": getattr(e.response, 'url', 'N/A') if hasattr(e, 'response') else 'N/A'}
|
250
|
+
)
|
251
|
+
except SearchError as e:
|
252
|
+
logger.error(f"Search error: {e}")
|
253
|
+
return self._format_error("Search Error", str(e))
|
254
|
+
except Exception as e:
|
255
|
+
logger.error(f"Unexpected error: {e}")
|
256
|
+
return self._format_error("Unexpected Error", str(e))
|
257
|
+
|
258
|
+
def _format_results(self, data: Dict[str, Any]) -> str:
|
259
|
+
"""Format API results into a structured Markdown string"""
|
260
|
+
query = data.get('query', '')
|
261
|
+
total_results = data.get('hits', {}).get('total', 0)
|
262
|
+
hits = data.get("hits", {}).get("hits", [])
|
263
|
+
|
264
|
+
output = [
|
265
|
+
"# 🔍 Search Results",
|
266
|
+
"",
|
267
|
+
f"**Query:** `{query if query else '<empty>'}` • **Found:** {total_results} matches",
|
268
|
+
""
|
269
|
+
]
|
270
|
+
|
271
|
+
if not hits:
|
272
|
+
output.append("> No matches found for your search query.")
|
273
|
+
else:
|
274
|
+
for idx, result in enumerate(hits, 1):
|
275
|
+
repo = result.get('repo', {}).get('raw', 'N/A')
|
276
|
+
file_path = result.get('path', {}).get('raw', 'N/A')
|
277
|
+
language = result.get('language', 'N/A').lower()
|
278
|
+
content = result.get("content", {})
|
279
|
+
|
280
|
+
# Extract the actual code and line info
|
281
|
+
snippet = content.get("snippet", "")
|
282
|
+
line_num = content.get("line", "")
|
283
|
+
|
284
|
+
# Clean up the snippet
|
285
|
+
import re
|
286
|
+
clean_snippet = re.sub(r'<[^>]+>', '', snippet)
|
287
|
+
clean_snippet = re.sub(r'"', '"', clean_snippet)
|
288
|
+
clean_snippet = re.sub(r'<', '<', clean_snippet)
|
289
|
+
clean_snippet = re.sub(r'>', '>', clean_snippet)
|
290
|
+
clean_snippet = clean_snippet.strip()
|
291
|
+
|
292
|
+
# Split into lines and clean each line
|
293
|
+
raw_lines = clean_snippet.split('\n')
|
294
|
+
lines = []
|
295
|
+
current_line_num = int(line_num) if line_num else 1
|
296
|
+
|
297
|
+
# First pass: collect all lines and their content
|
298
|
+
for line in raw_lines:
|
299
|
+
# Remove excess whitespace but preserve indentation
|
300
|
+
stripped = line.rstrip()
|
301
|
+
if not stripped:
|
302
|
+
lines.append(('', current_line_num))
|
303
|
+
current_line_num += 1
|
304
|
+
continue
|
305
|
+
|
306
|
+
# Remove duplicate indentation
|
307
|
+
if stripped.startswith(' '):
|
308
|
+
stripped = stripped[4:]
|
309
|
+
|
310
|
+
# Handle URLs that might be split across lines
|
311
|
+
if stripped.startswith(('prompt', '-working')):
|
312
|
+
if lines and lines[-1][0].endswith('/'):
|
313
|
+
# Combine with previous line
|
314
|
+
prev_content, prev_num = lines.pop()
|
315
|
+
lines.append((prev_content + stripped, prev_num))
|
316
|
+
continue
|
317
|
+
|
318
|
+
# Handle concatenated lines by looking for line numbers
|
319
|
+
line_parts = re.split(r'(\d+)(?=\s*[^\d])', stripped)
|
320
|
+
if len(line_parts) > 1:
|
321
|
+
# Process each part that might be a new line
|
322
|
+
for i in range(0, len(line_parts)-1, 2):
|
323
|
+
prefix = line_parts[i].rstrip()
|
324
|
+
if prefix:
|
325
|
+
if not any(l[0] == prefix for l in lines): # Avoid duplicates
|
326
|
+
lines.append((prefix, current_line_num))
|
327
|
+
|
328
|
+
# Update line number if found
|
329
|
+
try:
|
330
|
+
current_line_num = int(line_parts[i+1])
|
331
|
+
except ValueError:
|
332
|
+
current_line_num += 1
|
333
|
+
|
334
|
+
# Add the content after the line number
|
335
|
+
if i+2 < len(line_parts):
|
336
|
+
content = line_parts[i+2].lstrip()
|
337
|
+
if content and not any(l[0] == content for l in lines): # Avoid duplicates
|
338
|
+
lines.append((content, current_line_num))
|
339
|
+
else:
|
340
|
+
if not any(l[0] == stripped for l in lines): # Avoid duplicates
|
341
|
+
lines.append((stripped, current_line_num))
|
342
|
+
current_line_num += 1
|
343
|
+
|
344
|
+
# Format line numbers and code
|
345
|
+
formatted_lines = []
|
346
|
+
max_line_width = len(str(max(line[1] for line in lines))) if lines else 3
|
347
|
+
|
348
|
+
# Second pass: format each line
|
349
|
+
for line_content, line_no in lines:
|
350
|
+
if not line_content: # Empty line
|
351
|
+
formatted_lines.append('')
|
352
|
+
continue
|
353
|
+
|
354
|
+
# Special handling for markdown badges and links
|
355
|
+
if '[![' in line_content or '[!' in line_content:
|
356
|
+
badges = re.findall(r'(\[!\[.*?\]\(.*?\)\]\(.*?\))', line_content)
|
357
|
+
if badges:
|
358
|
+
for badge in badges:
|
359
|
+
if not any(badge in l for l in formatted_lines): # Avoid duplicates
|
360
|
+
formatted_lines.append(f"{str(line_no).rjust(max_line_width)} │ {badge}")
|
361
|
+
continue
|
362
|
+
|
363
|
+
# Add syntax highlighting for comments
|
364
|
+
if line_content.lstrip().startswith(('// ', '# ', '/* ', '* ', '*/')):
|
365
|
+
line_str = f"{str(line_no).rjust(max_line_width)} │ <dim>{line_content}</dim>"
|
366
|
+
if not any(line_str in l for l in formatted_lines): # Avoid duplicates
|
367
|
+
formatted_lines.append(line_str)
|
368
|
+
else:
|
369
|
+
# Split line into indentation and content for better formatting
|
370
|
+
indent = len(line_content) - len(line_content.lstrip())
|
371
|
+
indentation = line_content[:indent]
|
372
|
+
content = line_content[indent:]
|
373
|
+
|
374
|
+
# Highlight strings and special syntax
|
375
|
+
content = re.sub(r'(["\'])(.*?)\1', r'<str>\1\2\1</str>', content)
|
376
|
+
content = re.sub(r'\b(function|const|let|var|import|export|class|interface|type|enum)\b',
|
377
|
+
r'<keyword>\1</keyword>', content)
|
378
|
+
|
379
|
+
line_str = f"{str(line_no).rjust(max_line_width)} │ {indentation}{content}"
|
380
|
+
if not any(line_str in l for l in formatted_lines): # Avoid duplicates
|
381
|
+
formatted_lines.append(line_str)
|
382
|
+
|
383
|
+
# Truncate if too long and add line count
|
384
|
+
if len(formatted_lines) > 5:
|
385
|
+
remaining = len(formatted_lines) - 5
|
386
|
+
formatted_lines = formatted_lines[:5]
|
387
|
+
if remaining > 0:
|
388
|
+
formatted_lines.append(f" ┆ {remaining} more line{'s' if remaining > 1 else ''}")
|
389
|
+
|
390
|
+
clean_snippet = '\n'.join(formatted_lines)
|
391
|
+
|
392
|
+
# Format the repository link to be clickable
|
393
|
+
if '/' in repo:
|
394
|
+
repo_link = f"[`{repo}`](https://github.com/{repo})"
|
395
|
+
else:
|
396
|
+
repo_link = f"`{repo}`"
|
397
|
+
|
398
|
+
# Determine the best language display and icon
|
399
|
+
lang_display = language if language != 'n/a' else ''
|
400
|
+
lang_icon = {
|
401
|
+
'python': '🐍',
|
402
|
+
'typescript': '📘',
|
403
|
+
'javascript': '📒',
|
404
|
+
'markdown': '📝',
|
405
|
+
'toml': '⚙️',
|
406
|
+
'yaml': '📋',
|
407
|
+
'json': '📦',
|
408
|
+
'shell': '🐚',
|
409
|
+
'rust': '🦀',
|
410
|
+
'go': '🔵',
|
411
|
+
'java': '☕',
|
412
|
+
'ruby': '💎',
|
413
|
+
}.get(lang_display, '📄')
|
414
|
+
|
415
|
+
# Format file path with language icon and line info
|
416
|
+
file_info = [f"{lang_icon} `{file_path}`"]
|
417
|
+
if line_num:
|
418
|
+
file_info.append(f"Line {line_num}")
|
419
|
+
|
420
|
+
output.extend([
|
421
|
+
f"### {repo_link}",
|
422
|
+
" • ".join(file_info),
|
423
|
+
"```",
|
424
|
+
clean_snippet,
|
425
|
+
"```",
|
426
|
+
""
|
427
|
+
])
|
428
|
+
|
429
|
+
return "\n".join(filter(None, output))
|
430
|
+
|
431
|
+
def _format_error(self, error_type: str, message: str, additional_info: Dict[str, str] = None) -> str:
|
432
|
+
"""Format error messages consistently using Markdown"""
|
433
|
+
output = [
|
434
|
+
f"## {error_type}",
|
435
|
+
f"**Message:** {message}"
|
436
|
+
]
|
437
|
+
|
438
|
+
if additional_info:
|
439
|
+
output.append("**Additional Information:**")
|
440
|
+
for key, value in additional_info.items():
|
441
|
+
output.append(f"- **{key}:** {value}")
|
442
|
+
|
443
|
+
output.append(f"## End {error_type}")
|
444
|
+
return "\n\n".join(output)
|
445
|
+
|
446
|
+
if __name__ == "__main__":
|
447
|
+
# Configure logger
|
448
|
+
logger.remove() # Remove default handlers
|
449
|
+
logger.add(sys.stderr, level="INFO", format="<green>{time}</green> <level>{message}</level>")
|
450
|
+
|
451
|
+
logger.info("Starting GrepAppTool test cases")
|
452
|
+
tool = GrepAppTool()
|
453
|
+
|
454
|
+
test_cases = [
|
455
|
+
{
|
456
|
+
"name": "Python __init__ Methods Search",
|
457
|
+
"args": {
|
458
|
+
"search_query": "lang:python def __init__",
|
459
|
+
"per_page": 5,
|
460
|
+
"skip_delay": True # Skip delay for testing
|
461
|
+
}
|
462
|
+
},
|
463
|
+
{
|
464
|
+
"name": "Logging Patterns Search",
|
465
|
+
"args": {
|
466
|
+
"search_query": "logger",
|
467
|
+
"per_page": 3,
|
468
|
+
"skip_delay": True
|
469
|
+
}
|
470
|
+
},
|
471
|
+
{
|
472
|
+
"name": "Repository-Specific Search",
|
473
|
+
"args": {
|
474
|
+
"search_query": "def",
|
475
|
+
"repository": "quantalogic/quantalogic",
|
476
|
+
"per_page": 5,
|
477
|
+
"words": True,
|
478
|
+
"skip_delay": True
|
479
|
+
}
|
480
|
+
},
|
481
|
+
{
|
482
|
+
"name": "Raphaël MANSUY",
|
483
|
+
"args": {
|
484
|
+
"search_query": "raphaelmansuy",
|
485
|
+
"per_page": 3,
|
486
|
+
"skip_delay": True
|
487
|
+
}
|
488
|
+
}
|
489
|
+
]
|
490
|
+
|
491
|
+
for test in test_cases:
|
492
|
+
try:
|
493
|
+
logger.info(f"Running test: {test['name']}")
|
494
|
+
logger.info(f"Executing with arguments: {test['args']}")
|
495
|
+
result = tool.execute(**test['args'])
|
496
|
+
print(f"\n### Test: {test['name']}\n{result}\n")
|
497
|
+
time.sleep(1) # Add a small delay between tests to avoid rate limiting
|
498
|
+
except Exception as e:
|
499
|
+
logger.error(f"{test['name']} Failed: {e}", exc_info=True)
|
quantalogic/tools/llm_tool.py
CHANGED
@@ -0,0 +1,167 @@
|
|
1
|
+
"""Tool for executing SQL queries and returning paginated results in markdown format."""
|
2
|
+
|
3
|
+
from typing import Any, Dict, List
|
4
|
+
|
5
|
+
from pydantic import Field, ValidationError
|
6
|
+
from sqlalchemy import create_engine, text
|
7
|
+
from sqlalchemy.exc import SQLAlchemyError
|
8
|
+
|
9
|
+
from quantalogic.tools.tool import Tool, ToolArgument
|
10
|
+
|
11
|
+
|
12
|
+
class SQLQueryTool(Tool):
|
13
|
+
"""Tool for executing SQL queries and returning paginated results in markdown format."""
|
14
|
+
|
15
|
+
name: str = "sql_query_tool"
|
16
|
+
description: str = (
|
17
|
+
"Executes a SQL query and returns results in markdown table format "
|
18
|
+
"with pagination support. Results are truncated based on start/end row numbers."
|
19
|
+
)
|
20
|
+
arguments: list = [
|
21
|
+
ToolArgument(
|
22
|
+
name="query",
|
23
|
+
arg_type="string",
|
24
|
+
description="The SQL query to execute",
|
25
|
+
required=True,
|
26
|
+
example="SELECT * FROM customers WHERE country = 'France'"
|
27
|
+
),
|
28
|
+
ToolArgument(
|
29
|
+
name="start_row",
|
30
|
+
arg_type="int",
|
31
|
+
description="1-based starting row number for results",
|
32
|
+
required=True,
|
33
|
+
example="1"
|
34
|
+
),
|
35
|
+
ToolArgument(
|
36
|
+
name="end_row",
|
37
|
+
arg_type="int",
|
38
|
+
description="1-based ending row number for results",
|
39
|
+
required=True,
|
40
|
+
example="100"
|
41
|
+
),
|
42
|
+
]
|
43
|
+
connection_string: str = Field(
|
44
|
+
...,
|
45
|
+
description="SQLAlchemy-compatible database connection string",
|
46
|
+
example="postgresql://user:password@localhost/mydb"
|
47
|
+
)
|
48
|
+
|
49
|
+
def execute(self, query: str, start_row: Any, end_row: Any) -> str:
|
50
|
+
"""
|
51
|
+
Executes a SQL query and returns formatted results.
|
52
|
+
|
53
|
+
Args:
|
54
|
+
query: SQL query to execute
|
55
|
+
start_row: 1-based starting row number (supports various numeric types)
|
56
|
+
end_row: 1-based ending row number (supports various numeric types)
|
57
|
+
|
58
|
+
Returns:
|
59
|
+
str: Markdown-formatted results with pagination metadata
|
60
|
+
|
61
|
+
Raises:
|
62
|
+
ValueError: For invalid parameters or query errors
|
63
|
+
RuntimeError: For database connection issues
|
64
|
+
"""
|
65
|
+
try:
|
66
|
+
# Convert and validate row numbers
|
67
|
+
start = self._convert_row_number(start_row, "start_row")
|
68
|
+
end = self._convert_row_number(end_row, "end_row")
|
69
|
+
|
70
|
+
if start > end:
|
71
|
+
raise ValueError(f"start_row ({start}) must be <= end_row ({end})")
|
72
|
+
|
73
|
+
# Execute query
|
74
|
+
engine = create_engine(self.connection_string)
|
75
|
+
with engine.connect() as conn:
|
76
|
+
result = conn.execute(text(query))
|
77
|
+
columns: List[str] = result.keys()
|
78
|
+
all_rows: List[Dict] = [dict(row._mapping) for row in result]
|
79
|
+
|
80
|
+
# Apply pagination
|
81
|
+
total_rows = len(all_rows)
|
82
|
+
actual_start = max(1, start)
|
83
|
+
actual_end = min(end, total_rows)
|
84
|
+
|
85
|
+
if actual_start > total_rows:
|
86
|
+
return f"No results found (total rows: {total_rows})"
|
87
|
+
|
88
|
+
# Slice results (convert to 0-based index)
|
89
|
+
displayed_rows = all_rows[actual_start-1:actual_end]
|
90
|
+
|
91
|
+
# Format results
|
92
|
+
markdown = [
|
93
|
+
f"**Query Results:** `{actual_start}-{actual_end}` of `{total_rows}` rows",
|
94
|
+
self._format_table(columns, displayed_rows)
|
95
|
+
]
|
96
|
+
|
97
|
+
# Add pagination notice
|
98
|
+
if actual_end < total_rows:
|
99
|
+
remaining = total_rows - actual_end
|
100
|
+
markdown.append(f"\n*Showing first {actual_end} rows - {remaining} more row{'s' if remaining > 1 else ''} available*")
|
101
|
+
|
102
|
+
return "\n".join(markdown)
|
103
|
+
|
104
|
+
except SQLAlchemyError as e:
|
105
|
+
raise ValueError(f"SQL Error: {str(e)}") from e
|
106
|
+
except ValidationError as e:
|
107
|
+
raise ValueError(f"Validation Error: {str(e)}") from e
|
108
|
+
except Exception as e:
|
109
|
+
raise RuntimeError(f"Database Error: {str(e)}") from e
|
110
|
+
|
111
|
+
def _convert_row_number(self, value: Any, field_name: str) -> int:
|
112
|
+
"""Convert and validate row number input."""
|
113
|
+
try:
|
114
|
+
# Handle numeric strings and floats
|
115
|
+
if isinstance(value, str):
|
116
|
+
if "." in value:
|
117
|
+
num = float(value)
|
118
|
+
else:
|
119
|
+
num = int(value)
|
120
|
+
else:
|
121
|
+
num = value
|
122
|
+
|
123
|
+
converted = int(num)
|
124
|
+
if converted != num: # Check if float had decimal part
|
125
|
+
raise ValueError("Decimal values are not allowed for row numbers")
|
126
|
+
|
127
|
+
if converted <= 0:
|
128
|
+
raise ValueError(f"{field_name} must be a positive integer")
|
129
|
+
|
130
|
+
return converted
|
131
|
+
except (ValueError, TypeError) as e:
|
132
|
+
raise ValueError(f"Invalid value for {field_name}: {repr(value)}") from e
|
133
|
+
|
134
|
+
def _format_table(self, columns: List[str], rows: List[Dict]) -> str:
|
135
|
+
"""Format results as markdown table with truncation."""
|
136
|
+
if not rows:
|
137
|
+
return "No results found"
|
138
|
+
|
139
|
+
# Create header
|
140
|
+
header = "| " + " | ".join(columns) + " |"
|
141
|
+
separator = "| " + " | ".join(["---"] * len(columns)) + " |"
|
142
|
+
|
143
|
+
# Create rows with truncation
|
144
|
+
body = []
|
145
|
+
for row in rows:
|
146
|
+
values = []
|
147
|
+
for col in columns:
|
148
|
+
val = str(row.get(col, ""))
|
149
|
+
# Truncate long values
|
150
|
+
values.append(val[:50] + "..." if len(val) > 50 else val)
|
151
|
+
body.append("| " + " | ".join(values) + " |")
|
152
|
+
|
153
|
+
return "\n".join([header, separator] + body)
|
154
|
+
|
155
|
+
|
156
|
+
|
157
|
+
if __name__ == "__main__":
|
158
|
+
from quantalogic.tools.utils.create_sample_database import create_sample_database
|
159
|
+
|
160
|
+
# Create and document sample database
|
161
|
+
create_sample_database("sample.db")
|
162
|
+
tool = SQLQueryTool(connection_string="sqlite:///sample.db")
|
163
|
+
print(tool.execute("select * from customers", 1, 10))
|
164
|
+
print(tool.execute("select * from customers", 11, 20))
|
165
|
+
|
166
|
+
|
167
|
+
|
@@ -0,0 +1,13 @@
|
|
1
|
+
"""
|
2
|
+
Utility functions and classes for quantalogic tools.
|
3
|
+
|
4
|
+
This module provides common utility functions used across the quantalogic package.
|
5
|
+
"""
|
6
|
+
|
7
|
+
from .create_sample_database import create_sample_database
|
8
|
+
from .generate_database_report import generate_database_report
|
9
|
+
|
10
|
+
__all__ = [
|
11
|
+
'create_sample_database',
|
12
|
+
'generate_database_report'
|
13
|
+
]
|
@@ -0,0 +1,124 @@
|
|
1
|
+
import random
|
2
|
+
from datetime import datetime, timedelta
|
3
|
+
|
4
|
+
from faker import Faker
|
5
|
+
from sqlalchemy import Column, Date, Float, ForeignKey, Integer, String, create_engine
|
6
|
+
from sqlalchemy.orm import declarative_base, relationship, sessionmaker
|
7
|
+
|
8
|
+
Base = declarative_base()
|
9
|
+
fake = Faker()
|
10
|
+
|
11
|
+
def create_sample_database(db_path: str) -> None:
|
12
|
+
"""
|
13
|
+
Creates a sample SQLite database with 5 tables and 10 rows each.
|
14
|
+
|
15
|
+
Args:
|
16
|
+
db_path: Path to the SQLite database file (e.g., 'sample.db')
|
17
|
+
"""
|
18
|
+
# Define database schema
|
19
|
+
class Customer(Base):
|
20
|
+
__tablename__ = 'customers'
|
21
|
+
id = Column(Integer, primary_key=True)
|
22
|
+
name = Column(String)
|
23
|
+
email = Column(String)
|
24
|
+
addresses = relationship("Address", back_populates="customer")
|
25
|
+
orders = relationship("Order", back_populates="customer")
|
26
|
+
|
27
|
+
class Address(Base):
|
28
|
+
__tablename__ = 'addresses'
|
29
|
+
id = Column(Integer, primary_key=True)
|
30
|
+
street = Column(String)
|
31
|
+
city = Column(String)
|
32
|
+
customer_id = Column(Integer, ForeignKey('customers.id'))
|
33
|
+
customer = relationship("Customer", back_populates="addresses")
|
34
|
+
|
35
|
+
class Product(Base):
|
36
|
+
__tablename__ = 'products'
|
37
|
+
id = Column(Integer, primary_key=True)
|
38
|
+
name = Column(String)
|
39
|
+
price = Column(Float)
|
40
|
+
|
41
|
+
class Order(Base):
|
42
|
+
__tablename__ = 'orders'
|
43
|
+
id = Column(Integer, primary_key=True)
|
44
|
+
order_date = Column(Date)
|
45
|
+
customer_id = Column(Integer, ForeignKey('customers.id'))
|
46
|
+
customer = relationship("Customer", back_populates="orders")
|
47
|
+
items = relationship("OrderItem", back_populates="order")
|
48
|
+
|
49
|
+
class OrderItem(Base):
|
50
|
+
__tablename__ = 'order_items'
|
51
|
+
id = Column(Integer, primary_key=True)
|
52
|
+
quantity = Column(Integer)
|
53
|
+
order_id = Column(Integer, ForeignKey('orders.id'))
|
54
|
+
product_id = Column(Integer, ForeignKey('products.id'))
|
55
|
+
order = relationship("Order", back_populates="items")
|
56
|
+
product = relationship("Product")
|
57
|
+
|
58
|
+
# Create database and tables
|
59
|
+
engine = create_engine(f'sqlite:///{db_path}')
|
60
|
+
Base.metadata.create_all(engine)
|
61
|
+
Session = sessionmaker(bind=engine) # noqa: N806
|
62
|
+
session = Session()
|
63
|
+
|
64
|
+
# Generate sample data
|
65
|
+
try:
|
66
|
+
# Create 10 customers
|
67
|
+
customers = []
|
68
|
+
for _ in range(10):
|
69
|
+
customer = Customer(
|
70
|
+
name=fake.name(),
|
71
|
+
email=fake.email()
|
72
|
+
)
|
73
|
+
customers.append(customer)
|
74
|
+
session.add(customer)
|
75
|
+
|
76
|
+
session.commit()
|
77
|
+
|
78
|
+
# Create 10 addresses (1 per customer)
|
79
|
+
for customer in customers:
|
80
|
+
address = Address(
|
81
|
+
street=fake.street_address(),
|
82
|
+
city=fake.city(),
|
83
|
+
customer=customer
|
84
|
+
)
|
85
|
+
session.add(address)
|
86
|
+
|
87
|
+
# Create 10 products
|
88
|
+
products = []
|
89
|
+
for _ in range(10):
|
90
|
+
product = Product(
|
91
|
+
name=fake.word().capitalize(),
|
92
|
+
price=round(random.uniform(10, 1000), 2)
|
93
|
+
)
|
94
|
+
products.append(product)
|
95
|
+
session.add(product)
|
96
|
+
|
97
|
+
# Create 10 orders (1 per customer)
|
98
|
+
orders = []
|
99
|
+
start_date = datetime.now() - timedelta(days=365)
|
100
|
+
for customer in customers:
|
101
|
+
order = Order(
|
102
|
+
order_date=fake.date_between(start_date=start_date),
|
103
|
+
customer=customer
|
104
|
+
)
|
105
|
+
orders.append(order)
|
106
|
+
session.add(order)
|
107
|
+
|
108
|
+
# Create 10 order items (1 per order)
|
109
|
+
for order in orders:
|
110
|
+
order_item = OrderItem(
|
111
|
+
quantity=random.randint(1, 5),
|
112
|
+
order=order,
|
113
|
+
product=random.choice(products)
|
114
|
+
)
|
115
|
+
session.add(order_item)
|
116
|
+
|
117
|
+
session.commit()
|
118
|
+
finally:
|
119
|
+
session.close()
|
120
|
+
|
121
|
+
# Example usage
|
122
|
+
if __name__ == "__main__":
|
123
|
+
create_sample_database("sample.db")
|
124
|
+
print("Sample database created successfully!")
|
@@ -0,0 +1,289 @@
|
|
1
|
+
from datetime import UTC, datetime
|
2
|
+
from typing import Dict, List
|
3
|
+
|
4
|
+
import networkx as nx
|
5
|
+
from sqlalchemy import create_engine, inspect, text
|
6
|
+
from sqlalchemy.engine import Inspector
|
7
|
+
|
8
|
+
|
9
|
+
def generate_database_report(connection_string: str) -> str:
|
10
|
+
"""
|
11
|
+
Generates a comprehensive Markdown database documentation report with ER diagram.
|
12
|
+
|
13
|
+
Args:
|
14
|
+
connection_string: SQLAlchemy-compatible database connection string
|
15
|
+
|
16
|
+
Returns:
|
17
|
+
Markdown-formatted report as a string
|
18
|
+
"""
|
19
|
+
# Setup database connection and inspection
|
20
|
+
engine = create_engine(connection_string)
|
21
|
+
inspector = inspect(engine)
|
22
|
+
|
23
|
+
# Collect database metadata
|
24
|
+
db_metadata = {
|
25
|
+
'name': engine.url.database,
|
26
|
+
'dialect': engine.dialect.name,
|
27
|
+
'tables': inspector.get_table_names()
|
28
|
+
}
|
29
|
+
|
30
|
+
# Initialize data structures
|
31
|
+
graph = nx.DiGraph()
|
32
|
+
table_metadata: Dict[str, dict] = {}
|
33
|
+
fk_relationships: List[dict] = []
|
34
|
+
sampled_ids: Dict[str, list] = {}
|
35
|
+
sample_data: Dict[str, list] = {}
|
36
|
+
|
37
|
+
# Collect schema metadata and relationships
|
38
|
+
for table in db_metadata['tables']:
|
39
|
+
columns = inspector.get_columns(table)
|
40
|
+
pk = inspector.get_pk_constraint(table).get('constrained_columns', [])
|
41
|
+
indexes = inspector.get_indexes(table)
|
42
|
+
fks = inspector.get_foreign_keys(table)
|
43
|
+
|
44
|
+
# Process foreign keys
|
45
|
+
for fk in fks:
|
46
|
+
process_foreign_key(table, fk, inspector, graph, fk_relationships)
|
47
|
+
|
48
|
+
table_metadata[table] = {
|
49
|
+
'columns': columns,
|
50
|
+
'primary_keys': pk,
|
51
|
+
'indexes': indexes,
|
52
|
+
'foreign_keys': fks
|
53
|
+
}
|
54
|
+
|
55
|
+
# Process tables in dependency order
|
56
|
+
sorted_tables = get_sorted_tables(graph, db_metadata['tables'])
|
57
|
+
|
58
|
+
# Collect sample data with parent-child relationships
|
59
|
+
collect_sample_data(engine, sorted_tables, table_metadata, sample_data, sampled_ids)
|
60
|
+
|
61
|
+
# Generate Markdown report
|
62
|
+
return generate_markdown_report(db_metadata, sorted_tables, table_metadata,
|
63
|
+
fk_relationships, sample_data)
|
64
|
+
|
65
|
+
|
66
|
+
def process_foreign_key(
|
67
|
+
table: str,
|
68
|
+
fk: dict,
|
69
|
+
inspector: Inspector,
|
70
|
+
graph: nx.DiGraph,
|
71
|
+
fk_relationships: List[dict]
|
72
|
+
) -> None:
|
73
|
+
"""Process and record foreign key relationships with cardinality information."""
|
74
|
+
src_col = fk['constrained_columns'][0]
|
75
|
+
tgt_table = fk['referred_table']
|
76
|
+
tgt_col = fk['referred_columns'][0]
|
77
|
+
|
78
|
+
# Check uniqueness and nullability in source column
|
79
|
+
src_columns = inspector.get_columns(table)
|
80
|
+
src_col_meta = next(c for c in src_columns if c['name'] == src_col)
|
81
|
+
is_unique = src_col_meta.get('unique', False) or src_col in inspector.get_pk_constraint(table).get('constrained_columns', [])
|
82
|
+
is_nullable = src_col_meta['nullable']
|
83
|
+
|
84
|
+
fk_relationships.append({
|
85
|
+
'source_table': table,
|
86
|
+
'source_column': src_col,
|
87
|
+
'target_table': tgt_table,
|
88
|
+
'target_column': tgt_col,
|
89
|
+
'constraint_name': fk['name'],
|
90
|
+
'is_unique': is_unique,
|
91
|
+
'is_nullable': is_nullable
|
92
|
+
})
|
93
|
+
graph.add_edge(table, tgt_table)
|
94
|
+
|
95
|
+
|
96
|
+
def get_sorted_tables(graph: nx.DiGraph, tables: List[str]) -> List[str]:
|
97
|
+
"""Return tables sorted topologically with fallback to original order."""
|
98
|
+
try:
|
99
|
+
return list(nx.topological_sort(graph))
|
100
|
+
except nx.NetworkXUnfeasible:
|
101
|
+
return tables
|
102
|
+
|
103
|
+
|
104
|
+
def collect_sample_data(
|
105
|
+
engine,
|
106
|
+
tables: List[str],
|
107
|
+
table_metadata: Dict[str, dict],
|
108
|
+
sample_data: Dict[str, list],
|
109
|
+
sampled_ids: Dict[str, list]
|
110
|
+
) -> None:
|
111
|
+
"""Collect sample data while maintaining referential integrity."""
|
112
|
+
for table in tables:
|
113
|
+
with engine.connect() as conn:
|
114
|
+
# Get parent samples
|
115
|
+
result = conn.execute(text(f"SELECT * FROM {table} LIMIT 5"))
|
116
|
+
samples = [dict(row._mapping) for row in result]
|
117
|
+
sample_data[table] = samples
|
118
|
+
|
119
|
+
# Store IDs for child sampling
|
120
|
+
if samples and table_metadata[table]['primary_keys']:
|
121
|
+
pk_col = table_metadata[table]['primary_keys'][0]
|
122
|
+
sampled_ids[table] = [row[pk_col] for row in samples]
|
123
|
+
|
124
|
+
|
125
|
+
def generate_markdown_report(
|
126
|
+
db_metadata: dict,
|
127
|
+
tables: List[str],
|
128
|
+
table_metadata: Dict[str, dict],
|
129
|
+
fk_relationships: List[dict],
|
130
|
+
sample_data: Dict[str, list]
|
131
|
+
) -> str:
|
132
|
+
"""Generate the complete Markdown report."""
|
133
|
+
md = []
|
134
|
+
|
135
|
+
# Database Summary
|
136
|
+
md.append("# Database Documentation Report\n")
|
137
|
+
md.append(f"**Database Type**: {db_metadata['dialect'].capitalize()}\n")
|
138
|
+
md.append(f"**Database Name**: {db_metadata['name']}\n")
|
139
|
+
md.append(f"**Total Tables**: {len(db_metadata['tables'])}\n")
|
140
|
+
md.append(f"**Generated At**: {datetime.now(UTC).strftime('%Y-%m-%d %H:%M:%S UTC')}\n\n")
|
141
|
+
|
142
|
+
# ERD Section
|
143
|
+
md.append("## Entity Relationship Diagram\n")
|
144
|
+
md.append("```mermaid\nerDiagram\n")
|
145
|
+
generate_erd_section(md, tables, table_metadata, fk_relationships)
|
146
|
+
md.append("```\n\n")
|
147
|
+
|
148
|
+
# Schema Details
|
149
|
+
md.append("## Schema Details\n")
|
150
|
+
for table in tables:
|
151
|
+
meta = table_metadata[table]
|
152
|
+
md.append(f"### {table}\n")
|
153
|
+
generate_columns_section(md, meta)
|
154
|
+
generate_indexes_section(md, meta)
|
155
|
+
|
156
|
+
# Relationships
|
157
|
+
generate_relationships_section(md, fk_relationships)
|
158
|
+
|
159
|
+
# Cardinality Report
|
160
|
+
generate_cardinality_section(md, fk_relationships)
|
161
|
+
|
162
|
+
# Data Samples
|
163
|
+
md.append("## Data Samples\n")
|
164
|
+
for table in tables:
|
165
|
+
samples = sample_data[table]
|
166
|
+
md.append(f"### {table}\n")
|
167
|
+
generate_sample_table(md, samples)
|
168
|
+
|
169
|
+
return '\n'.join(md)
|
170
|
+
|
171
|
+
|
172
|
+
def generate_erd_section(md: List[str], tables: List[str], table_metadata: Dict[str, dict], fk_relationships: List[dict]) -> None:
|
173
|
+
"""Generate Mermaid ER diagram section."""
|
174
|
+
# Define tables with their columns
|
175
|
+
for table in tables:
|
176
|
+
table_upper = table.upper()
|
177
|
+
md.append(f" {table_upper} {{\n")
|
178
|
+
for col in table_metadata[table]['columns']:
|
179
|
+
col_type = str(col['type']).split('(')[0].upper() # Simplify type names
|
180
|
+
annotations = []
|
181
|
+
if col['name'] in table_metadata[table]['primary_keys']:
|
182
|
+
annotations.append("PK")
|
183
|
+
# Check if column is a foreign key
|
184
|
+
for fk in fk_relationships:
|
185
|
+
if fk['source_table'] == table and fk['source_column'] == col['name']:
|
186
|
+
annotations.append("FK")
|
187
|
+
break
|
188
|
+
annotation_str = " ".join(annotations)
|
189
|
+
md.append(f" {col_type} {col['name']} {annotation_str}\n")
|
190
|
+
md.append(" }\n")
|
191
|
+
|
192
|
+
# Define relationships with cardinality
|
193
|
+
for fk in fk_relationships:
|
194
|
+
target_table = fk['target_table'].upper()
|
195
|
+
source_table = fk['source_table'].upper()
|
196
|
+
source_cardinality = get_source_cardinality(fk['is_unique'], fk['is_nullable'])
|
197
|
+
md.append(f" {target_table} ||--{source_cardinality} {source_table} : \"{fk['constraint_name']}\"\n")
|
198
|
+
|
199
|
+
|
200
|
+
def get_source_cardinality(is_unique: bool, is_nullable: bool) -> str:
|
201
|
+
"""Determine Mermaid cardinality symbol for source side of relationship."""
|
202
|
+
if is_unique:
|
203
|
+
return "|o" if is_nullable else "||"
|
204
|
+
else:
|
205
|
+
return "o{" if is_nullable else "|{"
|
206
|
+
|
207
|
+
|
208
|
+
def generate_relationships_section(md: List[str], fk_relationships: List[dict]) -> None:
|
209
|
+
"""Generate foreign key relationships section."""
|
210
|
+
if fk_relationships:
|
211
|
+
md.append("## Relationships\n")
|
212
|
+
for fk in fk_relationships:
|
213
|
+
src = f"{fk['source_table']}.{fk['source_column']}"
|
214
|
+
tgt = f"{fk['target_table']}.{fk['target_column']}"
|
215
|
+
md.append(f"- `{src}` → `{tgt}` (Constraint: `{fk['constraint_name']}`)\n")
|
216
|
+
md.append("\n")
|
217
|
+
|
218
|
+
|
219
|
+
def generate_cardinality_section(md: List[str], fk_relationships: List[dict]) -> None:
|
220
|
+
"""Generate cardinality report section."""
|
221
|
+
cardinalities = {}
|
222
|
+
for fk in fk_relationships:
|
223
|
+
key = (fk['target_table'], fk['source_table'])
|
224
|
+
if key in cardinalities:
|
225
|
+
continue
|
226
|
+
|
227
|
+
if fk['is_unique']:
|
228
|
+
cardinality = "(1) → (1)"
|
229
|
+
else:
|
230
|
+
cardinality = "(1) → (N)"
|
231
|
+
|
232
|
+
cardinalities[key] = f"{fk['target_table']} {cardinality} {fk['source_table']}"
|
233
|
+
|
234
|
+
if cardinalities:
|
235
|
+
md.append("## Cardinality Report\n")
|
236
|
+
for entry in cardinalities.values():
|
237
|
+
md.append(f"- {entry}\n")
|
238
|
+
md.append("\n")
|
239
|
+
|
240
|
+
|
241
|
+
def generate_columns_section(md: List[str], meta: dict) -> None:
|
242
|
+
"""Generate columns table section."""
|
243
|
+
md.append("#### Columns\n")
|
244
|
+
md.append("| Column Name | Data Type | Nullable? | Primary Key? |\n")
|
245
|
+
md.append("|-------------|-----------|-----------|--------------|\n")
|
246
|
+
for col in meta['columns']:
|
247
|
+
pk = "Yes" if col['name'] in meta['primary_keys'] else "No"
|
248
|
+
md.append(f"| `{col['name']}` | {col['type']} | {'Yes' if col['nullable'] else 'No'} | {pk} |\n")
|
249
|
+
md.append("\n")
|
250
|
+
|
251
|
+
|
252
|
+
def generate_indexes_section(md: List[str], meta: dict) -> None:
|
253
|
+
"""Generate indexes section."""
|
254
|
+
if meta['indexes']:
|
255
|
+
md.append("#### Indexes\n")
|
256
|
+
for idx in meta['indexes']:
|
257
|
+
columns = ", ".join(idx['column_names'])
|
258
|
+
md.append(f"- `{idx['name']}` ({idx['type'] or 'INDEX'}) → {columns}\n")
|
259
|
+
md.append("\n")
|
260
|
+
|
261
|
+
|
262
|
+
def generate_sample_table(md: List[str], samples: list) -> None:
|
263
|
+
"""Generate sample data table section."""
|
264
|
+
if not samples:
|
265
|
+
md.append("No records found.\n\n")
|
266
|
+
return
|
267
|
+
|
268
|
+
headers = samples[0].keys()
|
269
|
+
md.append("| " + " | ".join(headers) + " |\n")
|
270
|
+
md.append("|" + "|".join(["---"] * len(headers)) + "|\n")
|
271
|
+
|
272
|
+
for row in samples:
|
273
|
+
values = []
|
274
|
+
for val in row.values():
|
275
|
+
if isinstance(val, str) and len(val) > 50:
|
276
|
+
values.append(f"{val[:47]}...")
|
277
|
+
else:
|
278
|
+
values.append(str(val))
|
279
|
+
md.append("| " + " | ".join(values) + " |\n")
|
280
|
+
md.append("\n")
|
281
|
+
|
282
|
+
|
283
|
+
if __name__ == "__main__":
|
284
|
+
from quantalogic.tools.utils.create_sample_database import create_sample_database
|
285
|
+
|
286
|
+
# Create and document sample database
|
287
|
+
create_sample_database("sample.db")
|
288
|
+
report = generate_database_report("sqlite:///sample.db")
|
289
|
+
print(report)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: quantalogic
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.27
|
4
4
|
Summary: QuantaLogic ReAct Agents
|
5
5
|
Author: Raphaël MANSUY
|
6
6
|
Author-email: raphael.mansuy@gmail.com
|
@@ -12,6 +12,7 @@ Requires-Dist: beautifulsoup4 (>=4.12.3,<5.0.0)
|
|
12
12
|
Requires-Dist: boto3 (>=1.35.86,<2.0.0)
|
13
13
|
Requires-Dist: click (>=8.1.8,<9.0.0)
|
14
14
|
Requires-Dist: duckduckgo-search (>=7.2.1,<8.0.0)
|
15
|
+
Requires-Dist: faker (>=33.3.1,<34.0.0)
|
15
16
|
Requires-Dist: fastapi (>=0.115.6,<0.116.0)
|
16
17
|
Requires-Dist: google-auth (>=2.20.0,<3.0.0)
|
17
18
|
Requires-Dist: google-search-results (>=2.4.2,<3.0.0)
|
@@ -28,13 +29,16 @@ Requires-Dist: mkdocs-mermaid2-plugin (>=1.1.1,<2.0.0)
|
|
28
29
|
Requires-Dist: mkdocs-minify-plugin (>=0.7.1,<0.8.0)
|
29
30
|
Requires-Dist: mkdocstrings (>=0.24.0,<0.25.0)
|
30
31
|
Requires-Dist: mkdocstrings-python (>=1.7.0,<2.0.0)
|
32
|
+
Requires-Dist: networkx (>=3.4.2,<4.0.0)
|
31
33
|
Requires-Dist: pathspec (>=0.12.1,<0.13.0)
|
32
34
|
Requires-Dist: prompt-toolkit (>=3.0.48,<4.0.0)
|
33
35
|
Requires-Dist: pydantic (>=2.10.4,<3.0.0)
|
34
36
|
Requires-Dist: pymdown-extensions (>=10.3.1,<11.0.0)
|
37
|
+
Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
|
35
38
|
Requires-Dist: requests (>=2.32.3,<3.0.0)
|
36
39
|
Requires-Dist: rich (>=13.9.4,<14.0.0)
|
37
40
|
Requires-Dist: serpapi (>=0.1.5,<0.2.0)
|
41
|
+
Requires-Dist: sqlalchemy (>=2.0.37,<3.0.0)
|
38
42
|
Requires-Dist: tenacity (>=9.0.0,<10.0.0)
|
39
43
|
Requires-Dist: toml (>=0.10.2,<0.11.0)
|
40
44
|
Requires-Dist: tree-sitter (>=0.23.2,<0.24.0)
|
@@ -82,7 +86,7 @@ We created [QuantaLogic](https://www.quantalogic.app) because we saw a significa
|
|
82
86
|
## 🌟 Highlights
|
83
87
|
|
84
88
|
- **ReAct Framework**: Advanced implementation combining LLM reasoning with concrete actions
|
85
|
-
- **Universal LLM Support**: Integration with OpenAI, Anthropic, LM Studio, Bedrock, Ollama, DeepSeek V3, via LiteLLM
|
89
|
+
- **Universal LLM Support**: Integration with OpenAI, Anthropic, LM Studio, Bedrock, Ollama, DeepSeek V3, DeepSeek R1, via LiteLLM. Example usage: `quantalogic --model-name deepseek/deepseek-reasoner` or `quantalogic --model-name openrouter/deepseek/deepseek-r1`
|
86
90
|
- **Secure Tool System**: Docker-based code execution and file manipulation tools
|
87
91
|
- **Real-time Monitoring**: Web interface with SSE-based event visualization
|
88
92
|
- **Memory Management**: Intelligent context handling and optimization
|
@@ -1,8 +1,8 @@
|
|
1
1
|
quantalogic/__init__.py,sha256=kX0c_xmD9OslWnAE92YHMGuD7xZcTo8ZOF_5R64HKps,784
|
2
|
-
quantalogic/agent.py,sha256=
|
2
|
+
quantalogic/agent.py,sha256=r6MbnVaBv9tYNk8WR7YJAjXlEGgko8GSYitWzr7uNMY,31841
|
3
3
|
quantalogic/agent_config.py,sha256=9sjDnCPlAqVM45oguB_D509WSCaXZmuaVUtLcOvDlPg,7572
|
4
4
|
quantalogic/agent_factory.py,sha256=ODVGuGtugSzmSdP6jiWlT8WyC5onANc6BIs83FC90Bg,3782
|
5
|
-
quantalogic/coding_agent.py,sha256=
|
5
|
+
quantalogic/coding_agent.py,sha256=Z7ik6LUvLKDnaW9Ax1iZGC7p1WMnlYEUIlE5lkBP414,4975
|
6
6
|
quantalogic/console_print_events.py,sha256=KB-DGi52As8M96eUs1N_vgNqKIFtqv_H8NTOd3TLTgQ,2163
|
7
7
|
quantalogic/console_print_token.py,sha256=qSU-3kmoZk4T5-1ybrEBi8tIXDPcz7eyWKhGh3E8uIg,395
|
8
8
|
quantalogic/docs_cli.py,sha256=3giVbUpespB9ZdTSJ955A3BhcOaBl5Lwsn1AVy9XAeY,1663
|
@@ -26,7 +26,7 @@ quantalogic/server/templates/index.html,sha256=nDnXJoQEm1vXbhXtgaYk0G5VXj0wwzE6K
|
|
26
26
|
quantalogic/task_file_reader.py,sha256=AMIJoeVY9Hhu0dBJ-C5EyaOFsXLkhn2oBhVs-WTnnLk,1460
|
27
27
|
quantalogic/task_runner.py,sha256=FtxfZs2dxdsSZoiW92K3dpfegFe0dyKx9ZP5CCyEAzo,9965
|
28
28
|
quantalogic/tool_manager.py,sha256=JAC5E5kLfYzYJx0QRIWbG14q1hlkOcwJFBG7HE8twpU,2425
|
29
|
-
quantalogic/tools/__init__.py,sha256=
|
29
|
+
quantalogic/tools/__init__.py,sha256=pTirT5UBynuTkAzFYebu7ttGAMP3_A0idFvDp6lGZJQ,2146
|
30
30
|
quantalogic/tools/agent_tool.py,sha256=MXCXxWHRch7VK4UWhtRP1jeI8Np9Ne2CUGo8vm1oZiM,3064
|
31
31
|
quantalogic/tools/dalle_e.py,sha256=nur2kl6DKjaWWaHcmF_y9vS5bvty2fW8hQfdgf5KWfs,10948
|
32
32
|
quantalogic/tools/download_http_file_tool.py,sha256=wTfanbXjIRi5-qrbluuLvNmDNhvmYAnlMVb3dO8C2ss,2210
|
@@ -34,6 +34,8 @@ quantalogic/tools/duckduckgo_search_tool.py,sha256=xVaEb_SUK5NL3lwMQXj1rGQYYvNT-
|
|
34
34
|
quantalogic/tools/edit_whole_content_tool.py,sha256=nXmpAvojvqvAcqNMy1kUKZ1ocboky_ZcnCR4SNCSPgw,2360
|
35
35
|
quantalogic/tools/elixir_tool.py,sha256=fzPPtAW-Koy9KB0r5k2zV1f1U0WphL-LXPPOBkeNkug,7652
|
36
36
|
quantalogic/tools/execute_bash_command_tool.py,sha256=fnx-zSPpxR2EofaleygAorrR21gRs43jBWh7IBAoNKw,4131
|
37
|
+
quantalogic/tools/generate_database_report_tool.py,sha256=QbZjtmegGEOEZAIa-CSeBo5O9dYBZTk_PWrumyFUg1Q,1890
|
38
|
+
quantalogic/tools/grep_app_tool.py,sha256=BDxygwx7WCbqbiP2jmSRnIsoIUVYG5A4SKzId524ys4,19957
|
37
39
|
quantalogic/tools/input_question_tool.py,sha256=UoTlNhdmdr-eyiVtVCG2qJe_R4bU_ag-DzstSdmYkvM,1848
|
38
40
|
quantalogic/tools/jinja_tool.py,sha256=1bqkFia2GtfntIyTVg4tCiPP8S1dX43U7QsrBE1Ngps,2893
|
39
41
|
quantalogic/tools/language_handlers/__init__.py,sha256=5GD6TYsMqRni0nwePp2KOjNQ04GnT5wihT6YAuvx43c,699
|
@@ -47,7 +49,7 @@ quantalogic/tools/language_handlers/rust_handler.py,sha256=t_AqKVa3KVk6SVkq_UjUU
|
|
47
49
|
quantalogic/tools/language_handlers/scala_handler.py,sha256=wr-cWOIFOc0UYwODmEtT6rV63Qf1NyNB_BLo23GLrvk,1281
|
48
50
|
quantalogic/tools/language_handlers/typescript_handler.py,sha256=L4vuJMYxKO3_83dQhdwZ9fogauIV7rwoicRT0xLGfkQ,1738
|
49
51
|
quantalogic/tools/list_directory_tool.py,sha256=8Hy38DelSh-mRqS_uDLpeBYoHLtEy5ji77xI-TJu3Ms,4176
|
50
|
-
quantalogic/tools/llm_tool.py,sha256=
|
52
|
+
quantalogic/tools/llm_tool.py,sha256=CFTvr-RTFiuGWlOLtvw4zv93s_CLUHuHfNmvK6QpQiQ,7014
|
51
53
|
quantalogic/tools/llm_vision_tool.py,sha256=eVDIrANxxZCHxYp9xaAN8hLdFhlYm7bUu2tX9-1xUbI,5496
|
52
54
|
quantalogic/tools/markitdown_tool.py,sha256=lpbJBLx43_x2DjiZAV1HSidkHeqkkV0KvgeLG2fphK4,4339
|
53
55
|
quantalogic/tools/nodejs_tool.py,sha256=zdnE0VFj_5786uR2L0o-SKR0Gk8L-U7rdj7xGHJYIq0,19905
|
@@ -59,9 +61,13 @@ quantalogic/tools/replace_in_file_tool.py,sha256=n63s09Y8RXOKGjxfWw0D6F6JpQ6ERSJ
|
|
59
61
|
quantalogic/tools/ripgrep_tool.py,sha256=sRzHaWac9fa0cCGhECJN04jw_Ko0O3u45KDWzMIYcvY,14291
|
60
62
|
quantalogic/tools/search_definition_names.py,sha256=Qj9ex226vHs8Jf-kydmTh7B_R8O5buIsJpQu3CvYw7k,18601
|
61
63
|
quantalogic/tools/serpapi_search_tool.py,sha256=sX-Noch77kGP2XiwislPNFyy3_4TH6TwMK6C81L3q9Y,5316
|
64
|
+
quantalogic/tools/sql_query_tool.py,sha256=oScXJeLevIiKj_2-byPtD1fQ2FltlzywazVI68aNidc,6068
|
62
65
|
quantalogic/tools/task_complete_tool.py,sha256=L8tuyVoN07Q2hOsxx17JTW0C5Jd_N-C0i_0PtCUQUKU,929
|
63
66
|
quantalogic/tools/tool.py,sha256=fdD-wwAOgfua2RRk1FHv_mlNBQ1FTzPO8vMIKiRirZM,9800
|
64
67
|
quantalogic/tools/unified_diff_tool.py,sha256=wTKXIoBEPcC_EcQmpJZVi95vq0Ncvsw1Kyc7XqPO6dU,14147
|
68
|
+
quantalogic/tools/utils/__init__.py,sha256=qLQaS1JvZt_Bfg5sTj-TUa47u2IatLaIwDZe0EtXELI,344
|
69
|
+
quantalogic/tools/utils/create_sample_database.py,sha256=Aus9xRLGfQfsYnxsAkJ5CW-Za6fwKQeqm2mOXqgkMis,4018
|
70
|
+
quantalogic/tools/utils/generate_database_report.py,sha256=0D-5fWOfpAh1jEcld5OTQP5x6XkJE5jpNY6FyHv1L2s,10345
|
65
71
|
quantalogic/tools/wikipedia_search_tool.py,sha256=bdZ_0dYTxpEfU04tBFsatnLM5P9Z3kAZgKQEjsopJLA,5405
|
66
72
|
quantalogic/tools/write_file_tool.py,sha256=_mx9_Zjg2oMAAVzlcHEKjZVZUxQVgbRfcoMKgWnoZcg,3764
|
67
73
|
quantalogic/utils/__init__.py,sha256=Ltq7tzLuHCl9BpCvfRVA9Sjrtp1RJesrn7G980lbl_c,563
|
@@ -79,8 +85,8 @@ quantalogic/version_check.py,sha256=cttR1lR3OienGLl7NrK1Te1fhDkqSjCci7HC1vFUTSY,
|
|
79
85
|
quantalogic/welcome_message.py,sha256=IXMhem8h7srzNUwvw8G_lmEkHU8PFfote021E_BXmVk,3039
|
80
86
|
quantalogic/xml_parser.py,sha256=uMLQNHTRCg116FwcjRoquZmSwVtE4LEH-6V2E3RD-dA,11466
|
81
87
|
quantalogic/xml_tool_parser.py,sha256=Vz4LEgDbelJynD1siLOVkJ3gLlfHsUk65_gCwbYJyGc,3784
|
82
|
-
quantalogic-0.2.
|
83
|
-
quantalogic-0.2.
|
84
|
-
quantalogic-0.2.
|
85
|
-
quantalogic-0.2.
|
86
|
-
quantalogic-0.2.
|
88
|
+
quantalogic-0.2.27.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
89
|
+
quantalogic-0.2.27.dist-info/METADATA,sha256=a2770Es_XksLjabRKUzbpKVp9BVrHaob7MdUVnGH7YE,20532
|
90
|
+
quantalogic-0.2.27.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
91
|
+
quantalogic-0.2.27.dist-info/entry_points.txt,sha256=h74O_Q3qBRCrDR99qvwB4BpBGzASPUIjCfxHq6Qnups,183
|
92
|
+
quantalogic-0.2.27.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|