open-db 1.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_server/__init__.py +0 -0
- mcp_server/__main__.py +34 -0
- mcp_server/client.py +363 -0
- mcp_server/models.py +140 -0
- mcp_server/server.py +370 -0
- open_db-1.3.0.dist-info/METADATA +455 -0
- open_db-1.3.0.dist-info/RECORD +60 -0
- open_db-1.3.0.dist-info/WHEEL +5 -0
- open_db-1.3.0.dist-info/entry_points.txt +2 -0
- open_db-1.3.0.dist-info/licenses/LICENSE +21 -0
- open_db-1.3.0.dist-info/top_level.txt +4 -0
- opendb/__init__.py +21 -0
- opendb/cli.py +230 -0
- opendb_core/__init__.py +0 -0
- opendb_core/config.py +51 -0
- opendb_core/database.py +46 -0
- opendb_core/main.py +96 -0
- opendb_core/middleware/__init__.py +0 -0
- opendb_core/middleware/auth.py +40 -0
- opendb_core/parsers/__init__.py +0 -0
- opendb_core/parsers/base.py +23 -0
- opendb_core/parsers/docx.py +166 -0
- opendb_core/parsers/image.py +56 -0
- opendb_core/parsers/pdf.py +174 -0
- opendb_core/parsers/pptx.py +116 -0
- opendb_core/parsers/registry.py +28 -0
- opendb_core/parsers/spreadsheet.py +278 -0
- opendb_core/parsers/text.py +91 -0
- opendb_core/routers/__init__.py +0 -0
- opendb_core/routers/files.py +107 -0
- opendb_core/routers/glob.py +68 -0
- opendb_core/routers/health.py +13 -0
- opendb_core/routers/index.py +83 -0
- opendb_core/routers/info.py +16 -0
- opendb_core/routers/memory.py +98 -0
- opendb_core/routers/read.py +75 -0
- opendb_core/routers/search.py +81 -0
- opendb_core/services/__init__.py +0 -0
- opendb_core/services/grep_service.py +156 -0
- opendb_core/services/index_service.py +179 -0
- opendb_core/services/ingest_service.py +295 -0
- opendb_core/services/memory_service.py +108 -0
- opendb_core/services/read_service.py +230 -0
- opendb_core/services/search_service.py +21 -0
- opendb_core/services/vision_service.py +147 -0
- opendb_core/services/watch_service.py +305 -0
- opendb_core/storage/__init__.py +130 -0
- opendb_core/storage/base.py +234 -0
- opendb_core/storage/postgres.py +893 -0
- opendb_core/storage/shared.py +165 -0
- opendb_core/storage/sqlite.py +994 -0
- opendb_core/utils/__init__.py +0 -0
- opendb_core/utils/hashing.py +11 -0
- opendb_core/utils/text.py +181 -0
- opendb_core/utils/tokenizer.py +108 -0
- opendb_core/workspace.py +286 -0
- opendb_integration/__init__.py +27 -0
- opendb_integration/client.py +500 -0
- opendb_integration/index.py +77 -0
- opendb_integration/tools.py +652 -0
mcp_server/__init__.py
ADDED
|
File without changes
|
mcp_server/__main__.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
"""Entry point: python -m mcp_server [--transport stdio|streamable_http] [--port 8200]"""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import argparse
|
|
6
|
+
import sys
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def main():
|
|
10
|
+
parser = argparse.ArgumentParser(description="OpenDB MCP Server")
|
|
11
|
+
parser.add_argument(
|
|
12
|
+
"--transport",
|
|
13
|
+
choices=["stdio", "streamable_http"],
|
|
14
|
+
default="stdio",
|
|
15
|
+
help="Transport type (default: stdio)",
|
|
16
|
+
)
|
|
17
|
+
parser.add_argument(
|
|
18
|
+
"--port",
|
|
19
|
+
type=int,
|
|
20
|
+
default=8200,
|
|
21
|
+
help="Port for streamable_http transport (default: 8200)",
|
|
22
|
+
)
|
|
23
|
+
args = parser.parse_args()
|
|
24
|
+
|
|
25
|
+
from mcp_server.server import mcp
|
|
26
|
+
|
|
27
|
+
if args.transport == "streamable_http":
|
|
28
|
+
mcp.run(transport="streamable_http", port=args.port)
|
|
29
|
+
else:
|
|
30
|
+
mcp.run()
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
if __name__ == "__main__":
|
|
34
|
+
main()
|
mcp_server/client.py
ADDED
|
@@ -0,0 +1,363 @@
|
|
|
1
|
+
"""HTTP client for OpenDB REST API."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
|
|
8
|
+
import httpx
|
|
9
|
+
|
|
10
|
+
OPENDB_URL = os.environ.get("OPENDB_URL", "http://localhost:8000")
|
|
11
|
+
|
|
12
|
+
_client: httpx.AsyncClient | None = None
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
async def get_client() -> httpx.AsyncClient:
|
|
16
|
+
"""Get or create the shared httpx client."""
|
|
17
|
+
global _client
|
|
18
|
+
if _client is None or _client.is_closed:
|
|
19
|
+
_client = httpx.AsyncClient(base_url=OPENDB_URL, timeout=60.0)
|
|
20
|
+
return _client
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
async def close_client() -> None:
|
|
24
|
+
"""Close the shared httpx client."""
|
|
25
|
+
global _client
|
|
26
|
+
if _client and not _client.is_closed:
|
|
27
|
+
await _client.aclose()
|
|
28
|
+
_client = None
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _handle_error(response: httpx.Response) -> str:
|
|
32
|
+
"""Format error response into a readable string."""
|
|
33
|
+
if response.status_code == 404:
|
|
34
|
+
try:
|
|
35
|
+
data = response.json()
|
|
36
|
+
return f"Error: {data.get('detail', 'Not found')}"
|
|
37
|
+
except Exception:
|
|
38
|
+
return "Error: Resource not found"
|
|
39
|
+
if response.status_code == 409:
|
|
40
|
+
try:
|
|
41
|
+
data = response.json()
|
|
42
|
+
candidates = data.get("candidates", [])
|
|
43
|
+
names = [c.get("filename", c.get("id", "?")) for c in candidates]
|
|
44
|
+
return f"Error: Ambiguous filename. Candidates: {', '.join(names)}"
|
|
45
|
+
except Exception:
|
|
46
|
+
return "Error: Ambiguous filename"
|
|
47
|
+
if response.status_code == 400:
|
|
48
|
+
try:
|
|
49
|
+
data = response.json()
|
|
50
|
+
return f"Error: {data.get('detail', 'Bad request')}"
|
|
51
|
+
except Exception:
|
|
52
|
+
return "Error: Bad request"
|
|
53
|
+
return f"Error: OpenDB returned status {response.status_code}"
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
async def read_file(
|
|
57
|
+
filename: str,
|
|
58
|
+
numbered: bool = False,
|
|
59
|
+
pages: str | None = None,
|
|
60
|
+
lines: str | None = None,
|
|
61
|
+
grep: str | None = None,
|
|
62
|
+
format: str | None = None,
|
|
63
|
+
) -> str:
|
|
64
|
+
"""Call GET /read/{filename} with optional parameters."""
|
|
65
|
+
client = await get_client()
|
|
66
|
+
params: dict[str, str] = {}
|
|
67
|
+
if numbered:
|
|
68
|
+
params["numbered"] = "true"
|
|
69
|
+
if pages:
|
|
70
|
+
params["pages"] = pages
|
|
71
|
+
if lines:
|
|
72
|
+
params["lines"] = lines
|
|
73
|
+
if grep:
|
|
74
|
+
params["grep"] = grep
|
|
75
|
+
if format:
|
|
76
|
+
params["format"] = format
|
|
77
|
+
|
|
78
|
+
response = await client.get(f"/read/{filename}", params=params)
|
|
79
|
+
|
|
80
|
+
if response.status_code != 200:
|
|
81
|
+
return _handle_error(response)
|
|
82
|
+
|
|
83
|
+
if format == "json":
|
|
84
|
+
return json.dumps(response.json(), indent=2, ensure_ascii=False)
|
|
85
|
+
|
|
86
|
+
return response.text
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
async def search(
|
|
90
|
+
query: str,
|
|
91
|
+
mode: str = "fts",
|
|
92
|
+
path: str | None = None,
|
|
93
|
+
glob: str | None = None,
|
|
94
|
+
case_insensitive: bool = False,
|
|
95
|
+
context: int = 0,
|
|
96
|
+
limit: int = 20,
|
|
97
|
+
offset: int = 0,
|
|
98
|
+
filters: dict | None = None,
|
|
99
|
+
max_results: int = 100,
|
|
100
|
+
) -> str:
|
|
101
|
+
"""Call POST /search."""
|
|
102
|
+
client = await get_client()
|
|
103
|
+
body: dict = {"query": query, "mode": mode, "limit": limit, "offset": offset}
|
|
104
|
+
if path:
|
|
105
|
+
body["path"] = path
|
|
106
|
+
if glob:
|
|
107
|
+
body["glob"] = glob
|
|
108
|
+
if case_insensitive:
|
|
109
|
+
body["case_insensitive"] = True
|
|
110
|
+
if context:
|
|
111
|
+
body["context"] = context
|
|
112
|
+
if filters:
|
|
113
|
+
body["filters"] = filters
|
|
114
|
+
if max_results != 100:
|
|
115
|
+
body["max_results"] = max_results
|
|
116
|
+
|
|
117
|
+
response = await client.post("/search", json=body)
|
|
118
|
+
|
|
119
|
+
if response.status_code != 200:
|
|
120
|
+
return _handle_error(response)
|
|
121
|
+
|
|
122
|
+
data = response.json()
|
|
123
|
+
|
|
124
|
+
# Format results as readable text
|
|
125
|
+
if data.get("error"):
|
|
126
|
+
return f"Error: {data['error']}"
|
|
127
|
+
|
|
128
|
+
results = data.get("results", [])
|
|
129
|
+
total = data.get("total", 0)
|
|
130
|
+
|
|
131
|
+
if not results:
|
|
132
|
+
return f"No results found for '{query}'"
|
|
133
|
+
|
|
134
|
+
# Split grep vs fts results (grep rows have 'file' key, fts rows have 'filename').
|
|
135
|
+
grep_rows = [r for r in results if r.get("file")]
|
|
136
|
+
fts_rows = [r for r in results if not r.get("file") and r.get("filename")]
|
|
137
|
+
|
|
138
|
+
lines_out: list[str] = []
|
|
139
|
+
|
|
140
|
+
if grep_rows:
|
|
141
|
+
lines_out.append(f"Found {total} results:")
|
|
142
|
+
lines_out.append("")
|
|
143
|
+
for r in grep_rows:
|
|
144
|
+
ctx_before = r.get("context_before", [])
|
|
145
|
+
ctx_after = r.get("context_after", [])
|
|
146
|
+
if ctx_before:
|
|
147
|
+
for j, cl in enumerate(ctx_before):
|
|
148
|
+
ln = r["line"] - len(ctx_before) + j
|
|
149
|
+
lines_out.append(f" {r['file']}:{ln}: {cl}")
|
|
150
|
+
lines_out.append(f" {r['file']}:{r['line']}: {r['text']}")
|
|
151
|
+
if ctx_after:
|
|
152
|
+
for j, cl in enumerate(ctx_after):
|
|
153
|
+
ln = r["line"] + 1 + j
|
|
154
|
+
lines_out.append(f" {r['file']}:{ln}: {cl}")
|
|
155
|
+
lines_out.append("")
|
|
156
|
+
|
|
157
|
+
if fts_rows:
|
|
158
|
+
# Group FTS results by filename, preserving the order of first appearance
|
|
159
|
+
# (input is already sorted best-match first, so the first row per file is its best).
|
|
160
|
+
groups: dict[str, list[dict]] = {}
|
|
161
|
+
for r in fts_rows:
|
|
162
|
+
groups.setdefault(r["filename"], []).append(r)
|
|
163
|
+
|
|
164
|
+
lines_out.append(f"Found {total} matches across {len(groups)} files:")
|
|
165
|
+
lines_out.append("")
|
|
166
|
+
for filename, rows in groups.items():
|
|
167
|
+
best = rows[0]
|
|
168
|
+
score = best.get("relevance_score", 0)
|
|
169
|
+
best_page = best.get("page_number", "?")
|
|
170
|
+
section = best.get("section_title", "")
|
|
171
|
+
highlight = best.get("highlight", "")
|
|
172
|
+
updated = best.get("updated_at", "")
|
|
173
|
+
loc = f"page {best_page}"
|
|
174
|
+
if section:
|
|
175
|
+
loc = f"{section} ({loc})"
|
|
176
|
+
match_count = len(rows)
|
|
177
|
+
score_part = f"score: {score}"
|
|
178
|
+
if updated:
|
|
179
|
+
score_part += f", updated: {updated}"
|
|
180
|
+
header = (
|
|
181
|
+
f" {filename} ({match_count} match{'es' if match_count != 1 else ''}, "
|
|
182
|
+
f"best: {loc}) [{score_part}]"
|
|
183
|
+
)
|
|
184
|
+
lines_out.append(header)
|
|
185
|
+
lines_out.append(f" {highlight}")
|
|
186
|
+
if match_count > 1:
|
|
187
|
+
other_pages = [str(r.get("page_number", "?")) for r in rows[1:]]
|
|
188
|
+
# Cap list to keep the line compact
|
|
189
|
+
shown = ", ".join(other_pages[:10])
|
|
190
|
+
suffix = f" (+{len(other_pages) - 10} more)" if len(other_pages) > 10 else ""
|
|
191
|
+
lines_out.append(f" also on pages: {shown}{suffix}")
|
|
192
|
+
lines_out.append("")
|
|
193
|
+
|
|
194
|
+
if data.get("truncated"):
|
|
195
|
+
lines_out.append(f"... (truncated at {len(results)} results, {total} total)")
|
|
196
|
+
|
|
197
|
+
return "\n".join(lines_out)
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
async def get_info() -> str:
|
|
201
|
+
"""Call GET /info and format as readable text."""
|
|
202
|
+
client = await get_client()
|
|
203
|
+
response = await client.get("/info")
|
|
204
|
+
|
|
205
|
+
if response.status_code != 200:
|
|
206
|
+
return _handle_error(response)
|
|
207
|
+
|
|
208
|
+
data = response.json()
|
|
209
|
+
by_status = data.get("by_status", {})
|
|
210
|
+
by_type = data.get("by_type", [])
|
|
211
|
+
recent = data.get("recent", [])
|
|
212
|
+
|
|
213
|
+
total = sum(by_status.values())
|
|
214
|
+
status_parts = [f"{s}: {c}" for s, c in sorted(by_status.items())]
|
|
215
|
+
lines_out = [f"Workspace: {total} files ({', '.join(status_parts)})"]
|
|
216
|
+
|
|
217
|
+
if by_type:
|
|
218
|
+
lines_out.append("")
|
|
219
|
+
lines_out.append("By type:")
|
|
220
|
+
for mime, count in by_type:
|
|
221
|
+
lines_out.append(f" {mime:<40} {count} files")
|
|
222
|
+
|
|
223
|
+
if recent:
|
|
224
|
+
lines_out.append("")
|
|
225
|
+
lines_out.append("Recently updated:")
|
|
226
|
+
for r in recent:
|
|
227
|
+
lines_out.append(f" {r['filename']:<40} {r.get('updated_at', '?')}")
|
|
228
|
+
|
|
229
|
+
memory = data.get("memory")
|
|
230
|
+
if memory and memory.get("total", 0) > 0:
|
|
231
|
+
lines_out.append("")
|
|
232
|
+
lines_out.append(f"Memories: {memory['total']} total")
|
|
233
|
+
mem_types = memory.get("by_type", {})
|
|
234
|
+
if mem_types:
|
|
235
|
+
parts = [f"{t}: {c}" for t, c in sorted(mem_types.items())]
|
|
236
|
+
lines_out.append(f" {', '.join(parts)}")
|
|
237
|
+
|
|
238
|
+
return "\n".join(lines_out)
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
# ------------------------------------------------------------------
|
|
242
|
+
# Agent Memory
|
|
243
|
+
# ------------------------------------------------------------------
|
|
244
|
+
|
|
245
|
+
async def memory_store(
|
|
246
|
+
content: str,
|
|
247
|
+
memory_type: str = "semantic",
|
|
248
|
+
tags: list[str] | None = None,
|
|
249
|
+
metadata: dict | None = None,
|
|
250
|
+
) -> str:
|
|
251
|
+
"""Call POST /memory to store a memory."""
|
|
252
|
+
client = await get_client()
|
|
253
|
+
body: dict = {"content": content, "memory_type": memory_type}
|
|
254
|
+
if tags:
|
|
255
|
+
body["tags"] = tags
|
|
256
|
+
if metadata:
|
|
257
|
+
body["metadata"] = metadata
|
|
258
|
+
|
|
259
|
+
response = await client.post("/memory", json=body)
|
|
260
|
+
if response.status_code != 200:
|
|
261
|
+
return _handle_error(response)
|
|
262
|
+
|
|
263
|
+
data = response.json()
|
|
264
|
+
return (
|
|
265
|
+
f"Memory stored (id: {data.get('memory_id', '?')}, "
|
|
266
|
+
f"type: {data.get('memory_type', '?')})"
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
async def memory_recall(
|
|
271
|
+
query: str,
|
|
272
|
+
memory_type: str | None = None,
|
|
273
|
+
tags: list[str] | None = None,
|
|
274
|
+
limit: int = 10,
|
|
275
|
+
) -> str:
|
|
276
|
+
"""Call POST /memory/recall to search memories."""
|
|
277
|
+
client = await get_client()
|
|
278
|
+
body: dict = {"query": query, "limit": limit}
|
|
279
|
+
if memory_type:
|
|
280
|
+
body["memory_type"] = memory_type
|
|
281
|
+
if tags:
|
|
282
|
+
body["tags"] = tags
|
|
283
|
+
|
|
284
|
+
response = await client.post("/memory/recall", json=body)
|
|
285
|
+
if response.status_code != 200:
|
|
286
|
+
return _handle_error(response)
|
|
287
|
+
|
|
288
|
+
data = response.json()
|
|
289
|
+
results = data.get("results", [])
|
|
290
|
+
total = data.get("total", 0)
|
|
291
|
+
|
|
292
|
+
if not results:
|
|
293
|
+
return f"No memories found for '{query}'"
|
|
294
|
+
|
|
295
|
+
lines_out: list[str] = [f"Found {total} memories:"]
|
|
296
|
+
lines_out.append("")
|
|
297
|
+
for r in results:
|
|
298
|
+
mtype = r.get("memory_type", "?")
|
|
299
|
+
score = r.get("score", 0)
|
|
300
|
+
created = r.get("created_at", "?")
|
|
301
|
+
tags_str = ", ".join(r.get("tags", []))
|
|
302
|
+
header = f" [{mtype}] (score: {score}, created: {created})"
|
|
303
|
+
if tags_str:
|
|
304
|
+
header += f" tags: {tags_str}"
|
|
305
|
+
lines_out.append(header)
|
|
306
|
+
# Show highlight if available, otherwise truncate content
|
|
307
|
+
highlight = r.get("highlight") or r.get("content", "")[:150]
|
|
308
|
+
lines_out.append(f" {highlight}")
|
|
309
|
+
lines_out.append(f" id: {r.get('memory_id', '?')}")
|
|
310
|
+
lines_out.append("")
|
|
311
|
+
|
|
312
|
+
return "\n".join(lines_out)
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
async def memory_forget(
|
|
316
|
+
memory_id: str | None = None,
|
|
317
|
+
query: str | None = None,
|
|
318
|
+
memory_type: str | None = None,
|
|
319
|
+
) -> str:
|
|
320
|
+
"""Call POST /memory/forget to delete memories."""
|
|
321
|
+
client = await get_client()
|
|
322
|
+
body: dict = {}
|
|
323
|
+
if memory_id:
|
|
324
|
+
body["memory_id"] = memory_id
|
|
325
|
+
if query:
|
|
326
|
+
body["query"] = query
|
|
327
|
+
if memory_type:
|
|
328
|
+
body["memory_type"] = memory_type
|
|
329
|
+
|
|
330
|
+
response = await client.post("/memory/forget", json=body)
|
|
331
|
+
if response.status_code != 200:
|
|
332
|
+
return _handle_error(response)
|
|
333
|
+
|
|
334
|
+
data = response.json()
|
|
335
|
+
deleted = data.get("deleted", 0)
|
|
336
|
+
by = data.get("by", "?")
|
|
337
|
+
return f"Deleted {deleted} memory/memories (by {by})"
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
async def glob_files(pattern: str, path: str | None = None) -> str:
|
|
341
|
+
"""Call GET /glob."""
|
|
342
|
+
client = await get_client()
|
|
343
|
+
params: dict[str, str] = {"pattern": pattern}
|
|
344
|
+
if path:
|
|
345
|
+
params["path"] = path
|
|
346
|
+
|
|
347
|
+
response = await client.get("/glob", params=params)
|
|
348
|
+
|
|
349
|
+
if response.status_code != 200:
|
|
350
|
+
return _handle_error(response)
|
|
351
|
+
|
|
352
|
+
data = response.json()
|
|
353
|
+
files = data.get("files", [])
|
|
354
|
+
count = data.get("count", 0)
|
|
355
|
+
truncated = data.get("truncated", False)
|
|
356
|
+
|
|
357
|
+
if not files:
|
|
358
|
+
return f"No files found matching '{pattern}'"
|
|
359
|
+
|
|
360
|
+
result = "\n".join(files)
|
|
361
|
+
if truncated:
|
|
362
|
+
result += f"\n\n... ({count} shown, more results truncated)"
|
|
363
|
+
return result
|
mcp_server/models.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
"""Pydantic input models for OpenDB MCP tools."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class ReadInput(BaseModel):
|
|
9
|
+
"""Input for reading any file — code with line numbers, documents as plain text."""
|
|
10
|
+
|
|
11
|
+
model_config = ConfigDict(str_strip_whitespace=True, extra="forbid")
|
|
12
|
+
|
|
13
|
+
filename: str = Field(
|
|
14
|
+
..., description="File path, filename, partial match, or UUID", min_length=1
|
|
15
|
+
)
|
|
16
|
+
offset: int | None = Field(
|
|
17
|
+
None, description="Start line number (1-based)", ge=1
|
|
18
|
+
)
|
|
19
|
+
limit: int | None = Field(
|
|
20
|
+
None, description="Max lines to return", ge=1
|
|
21
|
+
)
|
|
22
|
+
pages: str | None = Field(
|
|
23
|
+
None, description="Page range '1-3', page number '5', or sheet name 'Revenue'"
|
|
24
|
+
)
|
|
25
|
+
grep: str | None = Field(
|
|
26
|
+
None, description="Search within file. Use + for AND: 'revenue+growth'"
|
|
27
|
+
)
|
|
28
|
+
format: str | None = Field(
|
|
29
|
+
None, description="Set to 'json' for structured spreadsheet output with columns and rows"
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class SearchInput(BaseModel):
|
|
34
|
+
"""Input for searching across code files (regex) and documents (full-text)."""
|
|
35
|
+
|
|
36
|
+
model_config = ConfigDict(str_strip_whitespace=True, extra="forbid")
|
|
37
|
+
|
|
38
|
+
query: str = Field(
|
|
39
|
+
..., description="Search query or regex pattern", min_length=1
|
|
40
|
+
)
|
|
41
|
+
mode: str = Field(
|
|
42
|
+
"auto",
|
|
43
|
+
description="'grep' for regex code search, 'fts' for document full-text search, 'auto' to detect",
|
|
44
|
+
)
|
|
45
|
+
path: str | None = Field(
|
|
46
|
+
None, description="Directory to search in (grep mode)"
|
|
47
|
+
)
|
|
48
|
+
glob: str | None = Field(
|
|
49
|
+
None, description="File pattern filter e.g. '*.py', '*.{ts,tsx}' (grep mode)"
|
|
50
|
+
)
|
|
51
|
+
case_insensitive: bool = Field(
|
|
52
|
+
False, description="Case insensitive search"
|
|
53
|
+
)
|
|
54
|
+
context: int = Field(
|
|
55
|
+
0, description="Context lines before/after each match (grep mode)", ge=0, le=10
|
|
56
|
+
)
|
|
57
|
+
limit: int = Field(20, description="Max results", ge=1, le=100)
|
|
58
|
+
offset: int = Field(0, description="Pagination offset", ge=0)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class GlobInput(BaseModel):
|
|
62
|
+
"""Input for finding files matching a glob pattern."""
|
|
63
|
+
|
|
64
|
+
model_config = ConfigDict(str_strip_whitespace=True, extra="forbid")
|
|
65
|
+
|
|
66
|
+
pattern: str = Field(
|
|
67
|
+
..., description="Glob pattern e.g. '**/*.py', 'src/**/*.ts'", min_length=1
|
|
68
|
+
)
|
|
69
|
+
path: str | None = Field(
|
|
70
|
+
None, description="Root directory to search in"
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class InfoInput(BaseModel):
|
|
75
|
+
"""Input for workspace info (no parameters needed)."""
|
|
76
|
+
|
|
77
|
+
model_config = ConfigDict(extra="forbid")
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
# ------------------------------------------------------------------
|
|
81
|
+
# Agent Memory
|
|
82
|
+
# ------------------------------------------------------------------
|
|
83
|
+
|
|
84
|
+
class MemoryStoreInput(BaseModel):
|
|
85
|
+
"""Input for storing a memory."""
|
|
86
|
+
|
|
87
|
+
model_config = ConfigDict(str_strip_whitespace=True, extra="forbid")
|
|
88
|
+
|
|
89
|
+
content: str = Field(
|
|
90
|
+
..., description="Memory content text", min_length=1, max_length=10000
|
|
91
|
+
)
|
|
92
|
+
memory_type: str = Field(
|
|
93
|
+
"semantic",
|
|
94
|
+
description="Type: 'episodic' (past events), 'semantic' (facts/knowledge), 'procedural' (workflows/rules)",
|
|
95
|
+
)
|
|
96
|
+
pinned: bool = Field(
|
|
97
|
+
False, description="Pin this memory so it always surfaces first in recall results"
|
|
98
|
+
)
|
|
99
|
+
tags: list[str] = Field(
|
|
100
|
+
default_factory=list, description="Tags for categorization"
|
|
101
|
+
)
|
|
102
|
+
metadata: dict = Field(
|
|
103
|
+
default_factory=dict, description="Additional key-value metadata"
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
class MemoryRecallInput(BaseModel):
|
|
108
|
+
"""Input for recalling memories."""
|
|
109
|
+
|
|
110
|
+
model_config = ConfigDict(str_strip_whitespace=True, extra="forbid")
|
|
111
|
+
|
|
112
|
+
query: str = Field(
|
|
113
|
+
"", description="Search query for memory recall"
|
|
114
|
+
)
|
|
115
|
+
memory_type: str | None = Field(
|
|
116
|
+
None, description="Filter by type: episodic, semantic, procedural"
|
|
117
|
+
)
|
|
118
|
+
tags: list[str] | None = Field(
|
|
119
|
+
None, description="Filter by tags"
|
|
120
|
+
)
|
|
121
|
+
limit: int = Field(10, description="Max results", ge=1, le=50)
|
|
122
|
+
pinned_only: bool = Field(
|
|
123
|
+
False, description="If true, return only pinned memories (no search needed)"
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
class MemoryForgetInput(BaseModel):
|
|
128
|
+
"""Input for deleting memories."""
|
|
129
|
+
|
|
130
|
+
model_config = ConfigDict(str_strip_whitespace=True, extra="forbid")
|
|
131
|
+
|
|
132
|
+
memory_id: str | None = Field(
|
|
133
|
+
None, description="Specific memory ID to delete"
|
|
134
|
+
)
|
|
135
|
+
query: str | None = Field(
|
|
136
|
+
None, description="Delete memories matching this search query"
|
|
137
|
+
)
|
|
138
|
+
memory_type: str | None = Field(
|
|
139
|
+
None, description="Filter by type when deleting by query"
|
|
140
|
+
)
|