python-slack-agents 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- python_slack_agents-0.5.0.dist-info/METADATA +318 -0
- python_slack_agents-0.5.0.dist-info/RECORD +54 -0
- python_slack_agents-0.5.0.dist-info/WHEEL +4 -0
- python_slack_agents-0.5.0.dist-info/entry_points.txt +2 -0
- python_slack_agents-0.5.0.dist-info/licenses/LICENSE +202 -0
- slack_agents/Dockerfile +25 -0
- slack_agents/__init__.py +25 -0
- slack_agents/access/__init__.py +0 -0
- slack_agents/access/allow_all.py +9 -0
- slack_agents/access/allow_list.py +19 -0
- slack_agents/access/base.py +20 -0
- slack_agents/agent_loop.py +208 -0
- slack_agents/cli/__init__.py +48 -0
- slack_agents/cli/build_docker.py +94 -0
- slack_agents/cli/export_conversations.py +84 -0
- slack_agents/cli/export_conversations_html.py +605 -0
- slack_agents/cli/export_usage.py +81 -0
- slack_agents/cli/export_usage_csv.py +151 -0
- slack_agents/cli/healthcheck.py +67 -0
- slack_agents/cli/run.py +16 -0
- slack_agents/config.py +113 -0
- slack_agents/conversations.py +273 -0
- slack_agents/files.py +59 -0
- slack_agents/llm/__init__.py +1 -0
- slack_agents/llm/anthropic.py +207 -0
- slack_agents/llm/base.py +82 -0
- slack_agents/llm/openai.py +283 -0
- slack_agents/main.py +55 -0
- slack_agents/observability.py +175 -0
- slack_agents/py.typed +0 -0
- slack_agents/scripts/__init__.py +0 -0
- slack_agents/scripts/download_fonts.py +39 -0
- slack_agents/slack/__init__.py +0 -0
- slack_agents/slack/actions.py +119 -0
- slack_agents/slack/agent.py +688 -0
- slack_agents/slack/canvases.py +225 -0
- slack_agents/slack/files.py +102 -0
- slack_agents/slack/format.py +55 -0
- slack_agents/slack/streaming.py +70 -0
- slack_agents/slack/streaming_formatter.py +182 -0
- slack_agents/slack/tool_blocks.py +97 -0
- slack_agents/storage/__init__.py +0 -0
- slack_agents/storage/base.py +304 -0
- slack_agents/storage/postgres.py +612 -0
- slack_agents/storage/postgres.sql +120 -0
- slack_agents/storage/sqlite.py +473 -0
- slack_agents/storage/sqlite.sql +73 -0
- slack_agents/tools/__init__.py +0 -0
- slack_agents/tools/base.py +140 -0
- slack_agents/tools/canvas.py +401 -0
- slack_agents/tools/file_exporter.py +582 -0
- slack_agents/tools/file_importer.py +363 -0
- slack_agents/tools/mcp_http.py +203 -0
- slack_agents/tools/user_context.py +239 -0
|
@@ -0,0 +1,363 @@
|
|
|
1
|
+
"""Built-in file input provider: document import (PDF, DOCX, XLSX, PPTX, text, images).
|
|
2
|
+
|
|
3
|
+
Exports a Provider class that subclasses BaseFileImporterProvider.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import base64
|
|
7
|
+
import io
|
|
8
|
+
import logging
|
|
9
|
+
|
|
10
|
+
from slack_agents import InputFile, UserConversationContext
|
|
11
|
+
from slack_agents.storage.base import BaseStorageProvider
|
|
12
|
+
from slack_agents.tools.base import BaseFileImporterProvider, ContentBlock, FileImportToolException
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
MIME_DOCX = "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
|
|
17
|
+
MIME_XLSX = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
|
|
18
|
+
MIME_PPTX = "application/vnd.openxmlformats-officedocument.presentationml.presentation"
|
|
19
|
+
|
|
20
|
+
_TEXT_ONLY_NOTE = (
|
|
21
|
+
"(Note: only text was extracted; images are shown as [IMAGE] placeholders"
|
|
22
|
+
" with alt text when available. Charts and visual formatting are not included.)"
|
|
23
|
+
)
|
|
24
|
+
_CELLS_ONLY_NOTE = (
|
|
25
|
+
"(Note: only cell values were extracted — formulas appear as their"
|
|
26
|
+
" last-calculated values (may be blank if never opened in Excel),"
|
|
27
|
+
" and pivot tables appear as their cached display values."
|
|
28
|
+
" Images, charts, and formatting are not included.)"
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _table_to_md(rows: list[list[str]]) -> str:
|
|
33
|
+
if not rows:
|
|
34
|
+
return "(empty)"
|
|
35
|
+
max_cols = max(len(r) for r in rows)
|
|
36
|
+
norm = [r + [""] * (max_cols - len(r)) for r in rows]
|
|
37
|
+
for r in norm:
|
|
38
|
+
for i, cell in enumerate(r):
|
|
39
|
+
r[i] = cell.replace("|", "\\|").replace("\n", " ")
|
|
40
|
+
header = "| " + " | ".join(norm[0]) + " |"
|
|
41
|
+
sep = "| " + " | ".join("---" for _ in norm[0]) + " |"
|
|
42
|
+
body_lines = ["| " + " | ".join(row) + " |" for row in norm[1:]]
|
|
43
|
+
return "\n".join([header, sep] + body_lines)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _extract_pdf(file_bytes: bytes) -> str:
|
|
47
|
+
import pymupdf
|
|
48
|
+
import pymupdf4llm
|
|
49
|
+
|
|
50
|
+
doc = pymupdf.open(stream=file_bytes, filetype="pdf")
|
|
51
|
+
md_text = pymupdf4llm.to_markdown(doc)
|
|
52
|
+
doc.close()
|
|
53
|
+
return f"{_TEXT_ONLY_NOTE}\n\n{md_text}"
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _get_docx_image_alt(shape) -> str:
|
|
57
|
+
try:
|
|
58
|
+
el = shape._element
|
|
59
|
+
for child in el:
|
|
60
|
+
if child.tag.endswith("}docPr") or child.tag == "docPr":
|
|
61
|
+
return child.get("descr", "")
|
|
62
|
+
except Exception:
|
|
63
|
+
pass
|
|
64
|
+
return ""
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _docx_runs_to_md(runs) -> str:
|
|
68
|
+
parts = []
|
|
69
|
+
for run in runs:
|
|
70
|
+
text = run.text
|
|
71
|
+
if not text:
|
|
72
|
+
continue
|
|
73
|
+
bold = run.bold
|
|
74
|
+
italic = run.italic
|
|
75
|
+
if bold and italic:
|
|
76
|
+
text = f"***{text}***"
|
|
77
|
+
elif bold:
|
|
78
|
+
text = f"**{text}**"
|
|
79
|
+
elif italic:
|
|
80
|
+
text = f"*{text}*"
|
|
81
|
+
parts.append(text)
|
|
82
|
+
return "".join(parts)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def _docx_paragraph_to_md(p) -> str:
|
|
86
|
+
style_name = (p.style.name or "").lower() if p.style else ""
|
|
87
|
+
|
|
88
|
+
prefix = ""
|
|
89
|
+
if style_name.startswith("heading"):
|
|
90
|
+
try:
|
|
91
|
+
level = int(style_name.split()[-1])
|
|
92
|
+
prefix = "#" * min(level, 6) + " "
|
|
93
|
+
except (ValueError, IndexError):
|
|
94
|
+
prefix = "# "
|
|
95
|
+
elif style_name == "title":
|
|
96
|
+
prefix = "# "
|
|
97
|
+
elif style_name == "subtitle":
|
|
98
|
+
prefix = "## "
|
|
99
|
+
elif style_name.startswith("list bullet"):
|
|
100
|
+
prefix = "- "
|
|
101
|
+
elif style_name.startswith("list number"):
|
|
102
|
+
prefix = "1. "
|
|
103
|
+
|
|
104
|
+
text = _docx_runs_to_md(p.runs)
|
|
105
|
+
if not text.strip():
|
|
106
|
+
return ""
|
|
107
|
+
return prefix + text
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def _extract_docx(file_bytes: bytes) -> str:
|
|
111
|
+
from docx import Document
|
|
112
|
+
from docx.oxml.ns import qn
|
|
113
|
+
from docx.table import Table
|
|
114
|
+
from docx.text.paragraph import Paragraph
|
|
115
|
+
|
|
116
|
+
doc = Document(io.BytesIO(file_bytes))
|
|
117
|
+
parts = []
|
|
118
|
+
|
|
119
|
+
for child in doc.element.body:
|
|
120
|
+
if child.tag == qn("w:p"):
|
|
121
|
+
p = Paragraph(child, doc)
|
|
122
|
+
md = _docx_paragraph_to_md(p)
|
|
123
|
+
if md:
|
|
124
|
+
parts.append(md)
|
|
125
|
+
elif child.tag == qn("w:tbl"):
|
|
126
|
+
tbl = Table(child, doc)
|
|
127
|
+
rows = []
|
|
128
|
+
for row in tbl.rows:
|
|
129
|
+
rows.append([cell.text for cell in row.cells])
|
|
130
|
+
md = _table_to_md(rows)
|
|
131
|
+
parts.append(md)
|
|
132
|
+
|
|
133
|
+
image_placeholders = []
|
|
134
|
+
for shape in doc.inline_shapes:
|
|
135
|
+
alt = _get_docx_image_alt(shape)
|
|
136
|
+
image_placeholders.append(f"[IMAGE: {alt}]" if alt else "[IMAGE]")
|
|
137
|
+
|
|
138
|
+
text = "\n\n".join(parts)
|
|
139
|
+
if image_placeholders:
|
|
140
|
+
text += "\n\n(Images found — original positions in document not preserved):\n" + "\n".join(
|
|
141
|
+
image_placeholders
|
|
142
|
+
)
|
|
143
|
+
return f"{_TEXT_ONLY_NOTE}\n\n" + text
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def _extract_xlsx(file_bytes: bytes) -> str:
|
|
147
|
+
from openpyxl import load_workbook
|
|
148
|
+
|
|
149
|
+
wb = load_workbook(io.BytesIO(file_bytes), read_only=True, data_only=True)
|
|
150
|
+
sheets = []
|
|
151
|
+
for sheet_name in wb.sheetnames:
|
|
152
|
+
ws = wb[sheet_name]
|
|
153
|
+
rows = []
|
|
154
|
+
for row in ws.iter_rows(values_only=True):
|
|
155
|
+
cells = [str(c) if c is not None else "" for c in row]
|
|
156
|
+
rows.append(cells)
|
|
157
|
+
if rows:
|
|
158
|
+
table_md = _table_to_md(rows)
|
|
159
|
+
else:
|
|
160
|
+
table_md = "(empty)"
|
|
161
|
+
sheets.append(f"## Sheet: {sheet_name}\n{table_md}")
|
|
162
|
+
wb.close()
|
|
163
|
+
return f"{_CELLS_ONLY_NOTE}\n\n" + "\n\n".join(sheets)
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def _get_pptx_shape_alt(shape) -> str:
|
|
167
|
+
try:
|
|
168
|
+
return shape._element.nvPicPr.cNvPr.get("descr", "")
|
|
169
|
+
except (AttributeError, KeyError):
|
|
170
|
+
return ""
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def _pptx_text_frame_to_md(text_frame) -> str:
|
|
174
|
+
lines = []
|
|
175
|
+
for para in text_frame.paragraphs:
|
|
176
|
+
parts = []
|
|
177
|
+
for run in para.runs:
|
|
178
|
+
text = run.text
|
|
179
|
+
if not text:
|
|
180
|
+
continue
|
|
181
|
+
bold = run.font.bold
|
|
182
|
+
italic = run.font.italic
|
|
183
|
+
if bold and italic:
|
|
184
|
+
text = f"***{text}***"
|
|
185
|
+
elif bold:
|
|
186
|
+
text = f"**{text}**"
|
|
187
|
+
elif italic:
|
|
188
|
+
text = f"*{text}*"
|
|
189
|
+
parts.append(text)
|
|
190
|
+
line = "".join(parts)
|
|
191
|
+
if not line.strip():
|
|
192
|
+
continue
|
|
193
|
+
level = para.level or 0
|
|
194
|
+
if level > 0:
|
|
195
|
+
line = " " * level + "- " + line
|
|
196
|
+
lines.append(line)
|
|
197
|
+
return "\n".join(lines)
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def _extract_pptx(file_bytes: bytes) -> str:
|
|
201
|
+
from pptx import Presentation
|
|
202
|
+
from pptx.enum.shapes import MSO_SHAPE_TYPE
|
|
203
|
+
|
|
204
|
+
prs = Presentation(io.BytesIO(file_bytes))
|
|
205
|
+
slides = []
|
|
206
|
+
for i, slide in enumerate(prs.slides, 1):
|
|
207
|
+
title = ""
|
|
208
|
+
if slide.shapes.title and slide.shapes.title.text.strip():
|
|
209
|
+
title = slide.shapes.title.text.strip()
|
|
210
|
+
|
|
211
|
+
body_parts = []
|
|
212
|
+
for shape in slide.shapes:
|
|
213
|
+
if shape == slide.shapes.title:
|
|
214
|
+
continue
|
|
215
|
+
if shape.shape_type == MSO_SHAPE_TYPE.PICTURE:
|
|
216
|
+
alt = _get_pptx_shape_alt(shape)
|
|
217
|
+
body_parts.append(f"[IMAGE: {alt}]" if alt else "[IMAGE]")
|
|
218
|
+
elif shape.has_table:
|
|
219
|
+
rows = []
|
|
220
|
+
for row in shape.table.rows:
|
|
221
|
+
rows.append([cell.text for cell in row.cells])
|
|
222
|
+
body_parts.append(_table_to_md(rows))
|
|
223
|
+
elif shape.has_text_frame:
|
|
224
|
+
text = _pptx_text_frame_to_md(shape.text_frame)
|
|
225
|
+
if text:
|
|
226
|
+
body_parts.append(text)
|
|
227
|
+
|
|
228
|
+
notes = ""
|
|
229
|
+
if slide.has_notes_slide and slide.notes_slide.notes_text_frame:
|
|
230
|
+
notes = slide.notes_slide.notes_text_frame.text.strip()
|
|
231
|
+
|
|
232
|
+
header = f"## Slide {i}: {title}" if title else f"## Slide {i}"
|
|
233
|
+
parts = [header]
|
|
234
|
+
if body_parts:
|
|
235
|
+
parts.append("\n".join(body_parts))
|
|
236
|
+
if notes:
|
|
237
|
+
parts.append(f"Notes: {notes}")
|
|
238
|
+
slides.append("\n".join(parts))
|
|
239
|
+
|
|
240
|
+
return f"{_TEXT_ONLY_NOTE}\n\n" + "\n\n".join(slides)
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
# ---------------------------------------------------------------------------
|
|
244
|
+
# Handler wrappers — return content block dicts
|
|
245
|
+
# ---------------------------------------------------------------------------
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
def _import_pdf(f: InputFile) -> ContentBlock:
|
|
249
|
+
try:
|
|
250
|
+
text = _extract_pdf(f["file_bytes"])
|
|
251
|
+
return {"type": "text", "text": f"[File: {f['filename']}]\n\n{text}"}
|
|
252
|
+
except Exception as exc:
|
|
253
|
+
raise FileImportToolException(f"Failed to extract text from {f['filename']}") from exc
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
def _import_docx(f: InputFile) -> ContentBlock:
|
|
257
|
+
try:
|
|
258
|
+
text = _extract_docx(f["file_bytes"])
|
|
259
|
+
return {"type": "text", "text": f"[File: {f['filename']}]\n\n{text}"}
|
|
260
|
+
except Exception as exc:
|
|
261
|
+
raise FileImportToolException(f"Failed to extract text from {f['filename']}") from exc
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
def _import_xlsx(f: InputFile) -> ContentBlock:
|
|
265
|
+
try:
|
|
266
|
+
text = _extract_xlsx(f["file_bytes"])
|
|
267
|
+
return {"type": "text", "text": f"[File: {f['filename']}]\n\n{text}"}
|
|
268
|
+
except Exception as exc:
|
|
269
|
+
raise FileImportToolException(f"Failed to extract text from {f['filename']}") from exc
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
def _import_pptx(f: InputFile) -> ContentBlock:
|
|
273
|
+
try:
|
|
274
|
+
text = _extract_pptx(f["file_bytes"])
|
|
275
|
+
return {"type": "text", "text": f"[File: {f['filename']}]\n\n{text}"}
|
|
276
|
+
except Exception as exc:
|
|
277
|
+
raise FileImportToolException(f"Failed to extract text from {f['filename']}") from exc
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
def _import_text(f: InputFile) -> ContentBlock:
|
|
281
|
+
try:
|
|
282
|
+
text = f["file_bytes"].decode("utf-8", errors="replace")
|
|
283
|
+
return {"type": "text", "text": f"[File: {f['filename']}]\n\n{text}"}
|
|
284
|
+
except Exception as exc:
|
|
285
|
+
raise FileImportToolException(f"Failed to extract text from {f['filename']}") from exc
|
|
286
|
+
|
|
287
|
+
|
|
288
|
+
def _import_image(f: InputFile) -> ContentBlock:
|
|
289
|
+
try:
|
|
290
|
+
b64 = base64.standard_b64encode(f["file_bytes"]).decode("utf-8")
|
|
291
|
+
return {
|
|
292
|
+
"type": "image",
|
|
293
|
+
"source": {
|
|
294
|
+
"type": "base64",
|
|
295
|
+
"media_type": f["mimetype"],
|
|
296
|
+
"data": b64,
|
|
297
|
+
},
|
|
298
|
+
}
|
|
299
|
+
except Exception as exc:
|
|
300
|
+
raise FileImportToolException(f"Failed to process image {f['filename']}") from exc
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
_HANDLER_MANIFEST = [
|
|
304
|
+
{
|
|
305
|
+
"name": "import_pdf",
|
|
306
|
+
"mimes": {"application/pdf"},
|
|
307
|
+
"max_size": 10_000_000,
|
|
308
|
+
"handler": _import_pdf,
|
|
309
|
+
},
|
|
310
|
+
{
|
|
311
|
+
"name": "import_docx",
|
|
312
|
+
"mimes": {MIME_DOCX},
|
|
313
|
+
"max_size": 10_000_000,
|
|
314
|
+
"handler": _import_docx,
|
|
315
|
+
},
|
|
316
|
+
{
|
|
317
|
+
"name": "import_xlsx",
|
|
318
|
+
"mimes": {MIME_XLSX},
|
|
319
|
+
"max_size": 10_000_000,
|
|
320
|
+
"handler": _import_xlsx,
|
|
321
|
+
},
|
|
322
|
+
{
|
|
323
|
+
"name": "import_pptx",
|
|
324
|
+
"mimes": {MIME_PPTX},
|
|
325
|
+
"max_size": 10_000_000,
|
|
326
|
+
"handler": _import_pptx,
|
|
327
|
+
},
|
|
328
|
+
{
|
|
329
|
+
"name": "import_text",
|
|
330
|
+
"mimes": {"text/plain", "text/csv", "text/markdown"},
|
|
331
|
+
"max_size": 10_000_000,
|
|
332
|
+
"handler": _import_text,
|
|
333
|
+
},
|
|
334
|
+
{
|
|
335
|
+
"name": "import_image",
|
|
336
|
+
"mimes": {"image/png", "image/jpeg", "image/gif", "image/webp"},
|
|
337
|
+
"max_size": 10_000_000,
|
|
338
|
+
"handler": _import_image,
|
|
339
|
+
},
|
|
340
|
+
]
|
|
341
|
+
|
|
342
|
+
|
|
343
|
+
class Provider(BaseFileImporterProvider):
|
|
344
|
+
"""Built-in document import handlers (PDF, DOCX, XLSX, PPTX, text, images)."""
|
|
345
|
+
|
|
346
|
+
def __init__(self, allowed_functions: list[str], **kwargs):
|
|
347
|
+
super().__init__(allowed_functions, **kwargs)
|
|
348
|
+
self._handler_map = {h["name"]: h["handler"] for h in _HANDLER_MANIFEST}
|
|
349
|
+
|
|
350
|
+
def _get_all_tools(self) -> list[dict]:
|
|
351
|
+
return _HANDLER_MANIFEST
|
|
352
|
+
|
|
353
|
+
async def call_tool(
|
|
354
|
+
self,
|
|
355
|
+
name: str,
|
|
356
|
+
arguments: dict,
|
|
357
|
+
user_conversation_context: UserConversationContext,
|
|
358
|
+
storage: BaseStorageProvider,
|
|
359
|
+
) -> ContentBlock:
|
|
360
|
+
handler = self._handler_map.get(name)
|
|
361
|
+
if not handler:
|
|
362
|
+
raise FileImportToolException(f"Unknown import handler: {name}")
|
|
363
|
+
return handler(arguments)
|
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
"""MCP over HTTP/SSE tool provider."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import base64
|
|
5
|
+
import contextlib
|
|
6
|
+
import json
|
|
7
|
+
import logging
|
|
8
|
+
from urllib.parse import unquote, urlparse
|
|
9
|
+
|
|
10
|
+
import httpx
|
|
11
|
+
import mcp
|
|
12
|
+
from mcp.client.streamable_http import streamable_http_client
|
|
13
|
+
from mcp.types import BlobResourceContents, EmbeddedResource, ImageContent
|
|
14
|
+
|
|
15
|
+
from slack_agents import UserConversationContext
|
|
16
|
+
from slack_agents.llm import CHARS_PER_TOKEN
|
|
17
|
+
from slack_agents.storage.base import BaseStorageProvider
|
|
18
|
+
from slack_agents.tools.base import BaseToolProvider, ToolResult
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _uri_to_filename(uri: str) -> str:
|
|
24
|
+
"""Extract a filename from an MCP resource URI."""
|
|
25
|
+
parsed = urlparse(str(uri))
|
|
26
|
+
path = unquote(parsed.path)
|
|
27
|
+
name = path.rsplit("/", 1)[-1] if "/" in path else path
|
|
28
|
+
return name or "file"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class Provider(BaseToolProvider):
|
|
32
|
+
"""MCP over HTTP tool provider. Connects to a single MCP server."""
|
|
33
|
+
|
|
34
|
+
DEFAULT_INIT_RETRIES = [5, 10, 30]
|
|
35
|
+
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
url: str,
|
|
39
|
+
allowed_functions: list[str],
|
|
40
|
+
headers: dict | None = None,
|
|
41
|
+
init_retries: list[int | float] | None = None,
|
|
42
|
+
):
|
|
43
|
+
super().__init__(allowed_functions)
|
|
44
|
+
self._url = url
|
|
45
|
+
self._headers = headers or {}
|
|
46
|
+
self._init_retries = init_retries if init_retries is not None else self.DEFAULT_INIT_RETRIES
|
|
47
|
+
self._tool_map: dict[str, mcp.ClientSession] = {}
|
|
48
|
+
self._all_tools: list[dict] = []
|
|
49
|
+
self._session: mcp.ClientSession | None = None
|
|
50
|
+
self._exit_stack: contextlib.AsyncExitStack | None = None
|
|
51
|
+
|
|
52
|
+
def _get_all_tools(self) -> list[dict]:
|
|
53
|
+
return self._all_tools
|
|
54
|
+
|
|
55
|
+
async def _connect(self) -> None:
|
|
56
|
+
"""Establish connection to the MCP server."""
|
|
57
|
+
http_client = httpx.AsyncClient(
|
|
58
|
+
headers=self._headers,
|
|
59
|
+
timeout=httpx.Timeout(30.0, read=300.0),
|
|
60
|
+
follow_redirects=True,
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
stack = contextlib.AsyncExitStack()
|
|
64
|
+
self._exit_stack = stack
|
|
65
|
+
|
|
66
|
+
await stack.enter_async_context(http_client)
|
|
67
|
+
read_stream, write_stream, _get_session_id = await stack.enter_async_context(
|
|
68
|
+
streamable_http_client(url=self._url, http_client=http_client)
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
session = mcp.ClientSession(read_stream, write_stream)
|
|
72
|
+
await stack.enter_async_context(session)
|
|
73
|
+
await session.initialize()
|
|
74
|
+
self._session = session
|
|
75
|
+
|
|
76
|
+
async def initialize(self) -> None:
|
|
77
|
+
"""Connect to the MCP server and discover tools, retrying on connection errors."""
|
|
78
|
+
max_attempts = 1 + len(self._init_retries)
|
|
79
|
+
for attempt in range(1, max_attempts + 1):
|
|
80
|
+
try:
|
|
81
|
+
await self._connect()
|
|
82
|
+
break
|
|
83
|
+
except (
|
|
84
|
+
httpx.ConnectError,
|
|
85
|
+
httpx.ConnectTimeout,
|
|
86
|
+
OSError,
|
|
87
|
+
asyncio.CancelledError,
|
|
88
|
+
) as exc:
|
|
89
|
+
if attempt == max_attempts:
|
|
90
|
+
logger.error(
|
|
91
|
+
"MCP %s: failed after %d attempts: %s", self._url, max_attempts, exc
|
|
92
|
+
)
|
|
93
|
+
raise
|
|
94
|
+
backoff = self._init_retries[attempt - 1]
|
|
95
|
+
logger.warning(
|
|
96
|
+
"MCP %s: connection attempt %d/%d failed (%s), retrying in %gs",
|
|
97
|
+
self._url,
|
|
98
|
+
attempt,
|
|
99
|
+
max_attempts,
|
|
100
|
+
exc,
|
|
101
|
+
backoff,
|
|
102
|
+
)
|
|
103
|
+
# Clean up partial state before retrying
|
|
104
|
+
if self._exit_stack:
|
|
105
|
+
with contextlib.suppress(BaseException):
|
|
106
|
+
await self._exit_stack.aclose()
|
|
107
|
+
self._exit_stack = None
|
|
108
|
+
await asyncio.sleep(backoff)
|
|
109
|
+
|
|
110
|
+
tools_result = await self._session.list_tools()
|
|
111
|
+
|
|
112
|
+
server_tokens = 0
|
|
113
|
+
for tool in tools_result.tools:
|
|
114
|
+
tool_def = {
|
|
115
|
+
"name": tool.name,
|
|
116
|
+
"description": tool.description or "",
|
|
117
|
+
"input_schema": tool.inputSchema or {"type": "object", "properties": {}},
|
|
118
|
+
}
|
|
119
|
+
tool_tokens = len(json.dumps(tool_def)) // CHARS_PER_TOKEN
|
|
120
|
+
server_tokens += tool_tokens
|
|
121
|
+
self._tool_map[tool.name] = self._session
|
|
122
|
+
self._all_tools.append(tool_def)
|
|
123
|
+
|
|
124
|
+
# Log filtered tools
|
|
125
|
+
allowed = self.tools
|
|
126
|
+
filtered_count = len(self._all_tools) - len(allowed)
|
|
127
|
+
if filtered_count:
|
|
128
|
+
logger.info(
|
|
129
|
+
"MCP %s: %d tools loaded, %d filtered out, ~%d tokens",
|
|
130
|
+
self._url,
|
|
131
|
+
len(allowed),
|
|
132
|
+
filtered_count,
|
|
133
|
+
server_tokens,
|
|
134
|
+
)
|
|
135
|
+
else:
|
|
136
|
+
logger.info(
|
|
137
|
+
"MCP %s: %d tools loaded, ~%d tokens",
|
|
138
|
+
self._url,
|
|
139
|
+
len(allowed),
|
|
140
|
+
server_tokens,
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
async def call_tool(
|
|
144
|
+
self,
|
|
145
|
+
tool_name: str,
|
|
146
|
+
arguments: dict,
|
|
147
|
+
user_conversation_context: UserConversationContext,
|
|
148
|
+
storage: BaseStorageProvider,
|
|
149
|
+
) -> ToolResult:
|
|
150
|
+
"""Execute a tool call and return the result."""
|
|
151
|
+
session = self._tool_map.get(tool_name)
|
|
152
|
+
if not session:
|
|
153
|
+
return {"content": f"Unknown tool: {tool_name}", "is_error": True, "files": []}
|
|
154
|
+
|
|
155
|
+
logger.info("Calling MCP tool %s", tool_name)
|
|
156
|
+
|
|
157
|
+
try:
|
|
158
|
+
result = await session.call_tool(name=tool_name, arguments=arguments)
|
|
159
|
+
text_parts = []
|
|
160
|
+
files = []
|
|
161
|
+
|
|
162
|
+
for content in result.content:
|
|
163
|
+
if isinstance(content, EmbeddedResource) and isinstance(
|
|
164
|
+
content.resource, BlobResourceContents
|
|
165
|
+
):
|
|
166
|
+
data = base64.b64decode(content.resource.blob)
|
|
167
|
+
filename = _uri_to_filename(content.resource.uri)
|
|
168
|
+
mime = content.resource.mimeType or "application/octet-stream"
|
|
169
|
+
files.append({"data": data, "filename": filename, "mimeType": mime})
|
|
170
|
+
elif isinstance(content, ImageContent):
|
|
171
|
+
data = base64.b64decode(content.data)
|
|
172
|
+
ext = content.mimeType.split("/")[-1] if content.mimeType else "png"
|
|
173
|
+
files.append(
|
|
174
|
+
{
|
|
175
|
+
"data": data,
|
|
176
|
+
"filename": f"image.{ext}",
|
|
177
|
+
"mimeType": content.mimeType,
|
|
178
|
+
}
|
|
179
|
+
)
|
|
180
|
+
elif hasattr(content, "text"):
|
|
181
|
+
text_parts.append(content.text)
|
|
182
|
+
else:
|
|
183
|
+
text_parts.append(str(content))
|
|
184
|
+
|
|
185
|
+
return {
|
|
186
|
+
"content": "\n".join(text_parts) if text_parts else "(empty result)",
|
|
187
|
+
"is_error": bool(result.isError),
|
|
188
|
+
"files": files,
|
|
189
|
+
}
|
|
190
|
+
except Exception as e:
|
|
191
|
+
logger.exception("MCP tool call failed: %s", tool_name)
|
|
192
|
+
return {"content": f"Tool execution error: {e}", "is_error": True, "files": []}
|
|
193
|
+
|
|
194
|
+
async def close(self) -> None:
|
|
195
|
+
if self._exit_stack:
|
|
196
|
+
try:
|
|
197
|
+
await self._exit_stack.aclose()
|
|
198
|
+
except Exception:
|
|
199
|
+
logger.exception("Error closing MCP connection")
|
|
200
|
+
self._exit_stack = None
|
|
201
|
+
self._session = None
|
|
202
|
+
self._tool_map.clear()
|
|
203
|
+
self._all_tools.clear()
|