python-slack-agents 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- python_slack_agents-0.5.0.dist-info/METADATA +318 -0
- python_slack_agents-0.5.0.dist-info/RECORD +54 -0
- python_slack_agents-0.5.0.dist-info/WHEEL +4 -0
- python_slack_agents-0.5.0.dist-info/entry_points.txt +2 -0
- python_slack_agents-0.5.0.dist-info/licenses/LICENSE +202 -0
- slack_agents/Dockerfile +25 -0
- slack_agents/__init__.py +25 -0
- slack_agents/access/__init__.py +0 -0
- slack_agents/access/allow_all.py +9 -0
- slack_agents/access/allow_list.py +19 -0
- slack_agents/access/base.py +20 -0
- slack_agents/agent_loop.py +208 -0
- slack_agents/cli/__init__.py +48 -0
- slack_agents/cli/build_docker.py +94 -0
- slack_agents/cli/export_conversations.py +84 -0
- slack_agents/cli/export_conversations_html.py +605 -0
- slack_agents/cli/export_usage.py +81 -0
- slack_agents/cli/export_usage_csv.py +151 -0
- slack_agents/cli/healthcheck.py +67 -0
- slack_agents/cli/run.py +16 -0
- slack_agents/config.py +113 -0
- slack_agents/conversations.py +273 -0
- slack_agents/files.py +59 -0
- slack_agents/llm/__init__.py +1 -0
- slack_agents/llm/anthropic.py +207 -0
- slack_agents/llm/base.py +82 -0
- slack_agents/llm/openai.py +283 -0
- slack_agents/main.py +55 -0
- slack_agents/observability.py +175 -0
- slack_agents/py.typed +0 -0
- slack_agents/scripts/__init__.py +0 -0
- slack_agents/scripts/download_fonts.py +39 -0
- slack_agents/slack/__init__.py +0 -0
- slack_agents/slack/actions.py +119 -0
- slack_agents/slack/agent.py +688 -0
- slack_agents/slack/canvases.py +225 -0
- slack_agents/slack/files.py +102 -0
- slack_agents/slack/format.py +55 -0
- slack_agents/slack/streaming.py +70 -0
- slack_agents/slack/streaming_formatter.py +182 -0
- slack_agents/slack/tool_blocks.py +97 -0
- slack_agents/storage/__init__.py +0 -0
- slack_agents/storage/base.py +304 -0
- slack_agents/storage/postgres.py +612 -0
- slack_agents/storage/postgres.sql +120 -0
- slack_agents/storage/sqlite.py +473 -0
- slack_agents/storage/sqlite.sql +73 -0
- slack_agents/tools/__init__.py +0 -0
- slack_agents/tools/base.py +140 -0
- slack_agents/tools/canvas.py +401 -0
- slack_agents/tools/file_exporter.py +582 -0
- slack_agents/tools/file_importer.py +363 -0
- slack_agents/tools/mcp_http.py +203 -0
- slack_agents/tools/user_context.py +239 -0
|
@@ -0,0 +1,582 @@
|
|
|
1
|
+
"""Built-in tool: document generation (PDF, DOCX, XLSX, CSV, PPTX).
|
|
2
|
+
|
|
3
|
+
Exports a Provider class that subclasses BaseToolProvider.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import csv
|
|
7
|
+
import io
|
|
8
|
+
import logging
|
|
9
|
+
import re
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
from docx import Document
|
|
13
|
+
from fpdf import FPDF
|
|
14
|
+
from openpyxl import Workbook
|
|
15
|
+
|
|
16
|
+
from slack_agents import UserConversationContext
|
|
17
|
+
from slack_agents.storage.base import BaseStorageProvider
|
|
18
|
+
from slack_agents.tools.base import BaseToolProvider, ToolResult
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
_FONT_DIR = Path(__file__).resolve().parent.parent.parent.parent / "fonts"
|
|
23
|
+
_FONT_REGULAR = _FONT_DIR / "DejaVuSans.ttf"
|
|
24
|
+
_FONT_BOLD = _FONT_DIR / "DejaVuSans-Bold.ttf"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def _sanitize_filename(name: str, ext: str) -> str:
|
|
28
|
+
"""Sanitize a string for use as a filename."""
|
|
29
|
+
name = re.sub(r"[^\w\s-]", "", name).strip()
|
|
30
|
+
name = re.sub(r"\s+", "_", name)
|
|
31
|
+
return f"{name or 'document'}.{ext}"
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _parse_body_lines(body: str):
|
|
35
|
+
"""Parse markdown-ish body text into structured elements.
|
|
36
|
+
|
|
37
|
+
Yields (type, data) tuples:
|
|
38
|
+
("h1", "Heading text")
|
|
39
|
+
("h2", "Sub-heading text")
|
|
40
|
+
("h3", "Sub-sub-heading text")
|
|
41
|
+
("bullet", "List item text")
|
|
42
|
+
("numbered", "Numbered item text")
|
|
43
|
+
("table", [["cell", ...], ...])
|
|
44
|
+
("paragraph", "Normal text")
|
|
45
|
+
"""
|
|
46
|
+
_numbered_re = re.compile(r"^\d+\.\s")
|
|
47
|
+
_separator_re = re.compile(r"^:?-+:?$")
|
|
48
|
+
|
|
49
|
+
def _is_table_line(line: str) -> bool:
|
|
50
|
+
s = line.strip()
|
|
51
|
+
return s.startswith("|") and s.endswith("|")
|
|
52
|
+
|
|
53
|
+
def _parse_table_line(line: str) -> list[str]:
|
|
54
|
+
return [cell.strip() for cell in line.strip().strip("|").split("|")]
|
|
55
|
+
|
|
56
|
+
def _is_separator(cells: list[str]) -> bool:
|
|
57
|
+
return all(_separator_re.match(c) for c in cells)
|
|
58
|
+
|
|
59
|
+
table_buf: list[list[str]] = []
|
|
60
|
+
|
|
61
|
+
for line in body.split("\n"):
|
|
62
|
+
stripped = line.strip()
|
|
63
|
+
|
|
64
|
+
if _is_table_line(stripped):
|
|
65
|
+
cells = _parse_table_line(stripped)
|
|
66
|
+
if not _is_separator(cells):
|
|
67
|
+
table_buf.append(cells)
|
|
68
|
+
continue
|
|
69
|
+
|
|
70
|
+
if table_buf:
|
|
71
|
+
yield ("table", table_buf)
|
|
72
|
+
table_buf = []
|
|
73
|
+
|
|
74
|
+
if not stripped:
|
|
75
|
+
yield ("paragraph", "")
|
|
76
|
+
elif stripped.startswith("### "):
|
|
77
|
+
yield ("h3", stripped[4:])
|
|
78
|
+
elif stripped.startswith("## "):
|
|
79
|
+
yield ("h2", stripped[3:])
|
|
80
|
+
elif stripped.startswith("# "):
|
|
81
|
+
yield ("h1", stripped[2:])
|
|
82
|
+
elif stripped.startswith("- ") or stripped.startswith("* "):
|
|
83
|
+
yield ("bullet", stripped[2:])
|
|
84
|
+
elif _numbered_re.match(stripped):
|
|
85
|
+
yield ("numbered", _numbered_re.sub("", stripped, count=1))
|
|
86
|
+
else:
|
|
87
|
+
yield ("paragraph", stripped)
|
|
88
|
+
|
|
89
|
+
if table_buf:
|
|
90
|
+
yield ("table", table_buf)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def _make_pdf() -> FPDF:
|
|
94
|
+
"""Create an FPDF instance with Unicode font support."""
|
|
95
|
+
pdf = FPDF()
|
|
96
|
+
if _FONT_REGULAR.exists() and _FONT_BOLD.exists():
|
|
97
|
+
pdf.add_font("DejaVu", "", str(_FONT_REGULAR))
|
|
98
|
+
pdf.add_font("DejaVu", "B", str(_FONT_BOLD))
|
|
99
|
+
pdf._font_family = "DejaVu" # noqa: SLF001
|
|
100
|
+
else:
|
|
101
|
+
logger.warning(
|
|
102
|
+
"DejaVu fonts not found at %s — falling back to Helvetica (latin-1 only). "
|
|
103
|
+
"Run: python -m slack_agents.scripts.download_fonts",
|
|
104
|
+
_FONT_DIR,
|
|
105
|
+
)
|
|
106
|
+
pdf._font_family = "Helvetica" # noqa: SLF001
|
|
107
|
+
return pdf
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def _pdf_font(pdf: FPDF) -> str:
|
|
111
|
+
return pdf._font_family # noqa: SLF001
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
_RICH_TEXT_RE = re.compile(r"(\*\*.*?\*\*|\*(?!\*).*?(?<!\*)\*(?!\*))")
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def _write_rich_text(pdf: FPDF, text: str, line_height: float) -> None:
|
|
118
|
+
"""Write text with **bold** and *italic* support to a PDF."""
|
|
119
|
+
font = _pdf_font(pdf)
|
|
120
|
+
is_ttf = font != "Helvetica"
|
|
121
|
+
parts = _RICH_TEXT_RE.split(text)
|
|
122
|
+
for part in parts:
|
|
123
|
+
if part.startswith("**") and part.endswith("**"):
|
|
124
|
+
pdf.set_font(font, "B", 11)
|
|
125
|
+
pdf.write(line_height, part[2:-2])
|
|
126
|
+
pdf.set_font(font, "", 11)
|
|
127
|
+
elif part.startswith("*") and part.endswith("*") and len(part) > 2:
|
|
128
|
+
if not is_ttf:
|
|
129
|
+
pdf.set_font(font, "I", 11)
|
|
130
|
+
pdf.write(line_height, part[1:-1])
|
|
131
|
+
if not is_ttf:
|
|
132
|
+
pdf.set_font(font, "", 11)
|
|
133
|
+
else:
|
|
134
|
+
pdf.write(line_height, part)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
async def _export_pdf(arguments: dict) -> dict:
|
|
138
|
+
title = arguments["title"]
|
|
139
|
+
body = arguments["body"]
|
|
140
|
+
filename = _sanitize_filename(title, "pdf")
|
|
141
|
+
|
|
142
|
+
pdf = _make_pdf()
|
|
143
|
+
font = _pdf_font(pdf)
|
|
144
|
+
pdf.set_auto_page_break(auto=True, margin=15)
|
|
145
|
+
pdf.add_page()
|
|
146
|
+
|
|
147
|
+
pdf.set_font(font, "B", 18)
|
|
148
|
+
pdf.cell(0, 12, title, new_x="LMARGIN", new_y="NEXT")
|
|
149
|
+
pdf.ln(4)
|
|
150
|
+
|
|
151
|
+
pdf.set_font(font, "", 11)
|
|
152
|
+
is_ttf = font != "Helvetica"
|
|
153
|
+
bullet_char = "\u2022" if is_ttf else "-"
|
|
154
|
+
line_height = 6
|
|
155
|
+
|
|
156
|
+
for elem_type, data in _parse_body_lines(body):
|
|
157
|
+
if elem_type == "table":
|
|
158
|
+
pdf.ln(2)
|
|
159
|
+
with pdf.table(first_row_as_headings=True) as table:
|
|
160
|
+
for row_cells in data:
|
|
161
|
+
row = table.row()
|
|
162
|
+
for cell in row_cells:
|
|
163
|
+
row.cell(cell)
|
|
164
|
+
pdf.ln(2)
|
|
165
|
+
pdf.set_font(font, "", 11)
|
|
166
|
+
elif elem_type == "h1":
|
|
167
|
+
pdf.ln(4)
|
|
168
|
+
pdf.set_font(font, "B", 15)
|
|
169
|
+
pdf.cell(0, 8, data, new_x="LMARGIN", new_y="NEXT")
|
|
170
|
+
pdf.set_font(font, "", 11)
|
|
171
|
+
elif elem_type == "h2":
|
|
172
|
+
pdf.ln(2)
|
|
173
|
+
pdf.set_font(font, "B", 13)
|
|
174
|
+
pdf.cell(0, 7, data, new_x="LMARGIN", new_y="NEXT")
|
|
175
|
+
pdf.set_font(font, "", 11)
|
|
176
|
+
elif elem_type == "h3":
|
|
177
|
+
pdf.ln(2)
|
|
178
|
+
pdf.set_font(font, "B", 12)
|
|
179
|
+
pdf.cell(0, 7, data, new_x="LMARGIN", new_y="NEXT")
|
|
180
|
+
pdf.set_font(font, "", 11)
|
|
181
|
+
elif elem_type == "bullet":
|
|
182
|
+
pdf.cell(6)
|
|
183
|
+
_write_rich_text(pdf, f"{bullet_char} {data}", line_height)
|
|
184
|
+
pdf.ln(line_height)
|
|
185
|
+
elif elem_type == "numbered":
|
|
186
|
+
pdf.cell(6)
|
|
187
|
+
_write_rich_text(pdf, data, line_height)
|
|
188
|
+
pdf.ln(line_height)
|
|
189
|
+
elif data:
|
|
190
|
+
_write_rich_text(pdf, data, line_height)
|
|
191
|
+
pdf.ln(line_height)
|
|
192
|
+
else:
|
|
193
|
+
pdf.ln(line_height // 2)
|
|
194
|
+
|
|
195
|
+
data = pdf.output()
|
|
196
|
+
pages = pdf.pages_count
|
|
197
|
+
return {
|
|
198
|
+
"content": f"Created {filename} ({pages} page{'s' if pages != 1 else ''})",
|
|
199
|
+
"is_error": False,
|
|
200
|
+
"files": [{"data": bytes(data), "filename": filename, "mimeType": "application/pdf"}],
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def _add_rich_paragraph(doc: Document, text: str) -> None:
|
|
205
|
+
para = doc.add_paragraph()
|
|
206
|
+
_add_rich_runs(para, text)
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
def _add_rich_runs(para, text: str) -> None:
|
|
210
|
+
parts = _RICH_TEXT_RE.split(text)
|
|
211
|
+
for part in parts:
|
|
212
|
+
if part.startswith("**") and part.endswith("**"):
|
|
213
|
+
para.add_run(part[2:-2]).bold = True
|
|
214
|
+
elif part.startswith("*") and part.endswith("*") and len(part) > 2:
|
|
215
|
+
para.add_run(part[1:-1]).italic = True
|
|
216
|
+
else:
|
|
217
|
+
para.add_run(part)
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
async def _export_docx(arguments: dict) -> dict:
|
|
221
|
+
title = arguments["title"]
|
|
222
|
+
body = arguments["body"]
|
|
223
|
+
filename = _sanitize_filename(title, "docx")
|
|
224
|
+
|
|
225
|
+
doc = Document()
|
|
226
|
+
doc.add_heading(title, level=0)
|
|
227
|
+
|
|
228
|
+
for elem_type, data in _parse_body_lines(body):
|
|
229
|
+
if elem_type == "table":
|
|
230
|
+
if data:
|
|
231
|
+
tbl = doc.add_table(rows=len(data), cols=len(data[0]), style="Table Grid")
|
|
232
|
+
for i, row_cells in enumerate(data):
|
|
233
|
+
for j, cell_text in enumerate(row_cells):
|
|
234
|
+
tbl.rows[i].cells[j].text = cell_text
|
|
235
|
+
elif elem_type == "h1":
|
|
236
|
+
doc.add_heading(data, level=1)
|
|
237
|
+
elif elem_type == "h2":
|
|
238
|
+
doc.add_heading(data, level=2)
|
|
239
|
+
elif elem_type == "h3":
|
|
240
|
+
doc.add_heading(data, level=3)
|
|
241
|
+
elif elem_type == "bullet":
|
|
242
|
+
doc.add_paragraph(data, style="List Bullet")
|
|
243
|
+
elif elem_type == "numbered":
|
|
244
|
+
doc.add_paragraph(data, style="List Number")
|
|
245
|
+
elif data:
|
|
246
|
+
_add_rich_paragraph(doc, data)
|
|
247
|
+
|
|
248
|
+
buf = io.BytesIO()
|
|
249
|
+
doc.save(buf)
|
|
250
|
+
buf_data = buf.getvalue()
|
|
251
|
+
|
|
252
|
+
return {
|
|
253
|
+
"content": f"Created {filename}",
|
|
254
|
+
"is_error": False,
|
|
255
|
+
"files": [
|
|
256
|
+
{
|
|
257
|
+
"data": buf_data,
|
|
258
|
+
"filename": filename,
|
|
259
|
+
"mimeType": (
|
|
260
|
+
"application/vnd.openxmlformats-officedocument.wordprocessingml.document"
|
|
261
|
+
),
|
|
262
|
+
}
|
|
263
|
+
],
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
async def _export_xlsx(arguments: dict) -> dict:
|
|
268
|
+
filename_base = arguments["filename"]
|
|
269
|
+
sheets = arguments["sheets"]
|
|
270
|
+
filename = _sanitize_filename(filename_base, "xlsx")
|
|
271
|
+
|
|
272
|
+
wb = Workbook()
|
|
273
|
+
wb.remove(wb.active)
|
|
274
|
+
|
|
275
|
+
total_rows = 0
|
|
276
|
+
for sheet_def in sheets:
|
|
277
|
+
ws = wb.create_sheet(title=sheet_def["name"])
|
|
278
|
+
for row in sheet_def["rows"]:
|
|
279
|
+
ws.append(row)
|
|
280
|
+
total_rows += 1
|
|
281
|
+
|
|
282
|
+
buf = io.BytesIO()
|
|
283
|
+
wb.save(buf)
|
|
284
|
+
data = buf.getvalue()
|
|
285
|
+
|
|
286
|
+
sheet_count = len(sheets)
|
|
287
|
+
return {
|
|
288
|
+
"content": (
|
|
289
|
+
f"Created {filename} ({sheet_count} sheet{'s' if sheet_count != 1 else ''}, "
|
|
290
|
+
f"{total_rows} rows)"
|
|
291
|
+
),
|
|
292
|
+
"is_error": False,
|
|
293
|
+
"files": [
|
|
294
|
+
{
|
|
295
|
+
"data": data,
|
|
296
|
+
"filename": filename,
|
|
297
|
+
"mimeType": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
|
298
|
+
}
|
|
299
|
+
],
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
async def _export_csv(arguments: dict) -> dict:
|
|
304
|
+
filename_base = arguments["filename"]
|
|
305
|
+
rows = arguments["rows"]
|
|
306
|
+
filename = _sanitize_filename(filename_base, "csv")
|
|
307
|
+
|
|
308
|
+
buf = io.StringIO()
|
|
309
|
+
writer = csv.writer(buf)
|
|
310
|
+
for row in rows:
|
|
311
|
+
writer.writerow(row)
|
|
312
|
+
data = buf.getvalue().encode("utf-8")
|
|
313
|
+
|
|
314
|
+
return {
|
|
315
|
+
"content": f"Created {filename} ({len(rows)} rows)",
|
|
316
|
+
"is_error": False,
|
|
317
|
+
"files": [{"data": data, "filename": filename, "mimeType": "text/csv"}],
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
def _pptx_add_rich_text(paragraph, text: str) -> None:
|
|
322
|
+
parts = _RICH_TEXT_RE.split(text)
|
|
323
|
+
for part in parts:
|
|
324
|
+
run = paragraph.add_run()
|
|
325
|
+
if part.startswith("**") and part.endswith("**"):
|
|
326
|
+
run.text = part[2:-2]
|
|
327
|
+
run.font.bold = True
|
|
328
|
+
elif part.startswith("*") and part.endswith("*") and len(part) > 2:
|
|
329
|
+
run.text = part[1:-1]
|
|
330
|
+
run.font.italic = True
|
|
331
|
+
else:
|
|
332
|
+
run.text = part
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
async def _export_pptx(arguments: dict) -> dict:
|
|
336
|
+
from pptx import Presentation
|
|
337
|
+
from pptx.util import Emu, Inches
|
|
338
|
+
|
|
339
|
+
title = arguments["title"]
|
|
340
|
+
slides_data = arguments["slides"]
|
|
341
|
+
filename = _sanitize_filename(title, "pptx")
|
|
342
|
+
|
|
343
|
+
prs = Presentation()
|
|
344
|
+
for slide_def in slides_data:
|
|
345
|
+
layout = prs.slide_layouts[1]
|
|
346
|
+
slide = prs.slides.add_slide(layout)
|
|
347
|
+
slide.shapes.title.text = slide_def["title"]
|
|
348
|
+
|
|
349
|
+
body_placeholder = slide.placeholders[1]
|
|
350
|
+
tf = body_placeholder.text_frame
|
|
351
|
+
tf.clear()
|
|
352
|
+
first = True
|
|
353
|
+
|
|
354
|
+
elements = list(_parse_body_lines(slide_def.get("body", "")))
|
|
355
|
+
for elem_type, data in elements:
|
|
356
|
+
if elem_type == "table":
|
|
357
|
+
if data:
|
|
358
|
+
rows_count = len(data)
|
|
359
|
+
cols_count = len(data[0])
|
|
360
|
+
ph_left = body_placeholder.left
|
|
361
|
+
ph_top = body_placeholder.top + body_placeholder.height + Inches(0.1)
|
|
362
|
+
ph_width = body_placeholder.width
|
|
363
|
+
row_height = Emu(370000)
|
|
364
|
+
tbl_height = row_height * rows_count
|
|
365
|
+
shape = slide.shapes.add_table(
|
|
366
|
+
rows_count, cols_count, ph_left, ph_top, ph_width, tbl_height
|
|
367
|
+
)
|
|
368
|
+
for i, row_cells in enumerate(data):
|
|
369
|
+
for j, cell_text in enumerate(row_cells):
|
|
370
|
+
shape.table.cell(i, j).text = cell_text
|
|
371
|
+
continue
|
|
372
|
+
|
|
373
|
+
if elem_type == "paragraph" and not data:
|
|
374
|
+
continue
|
|
375
|
+
|
|
376
|
+
if first:
|
|
377
|
+
p = tf.paragraphs[0]
|
|
378
|
+
first = False
|
|
379
|
+
else:
|
|
380
|
+
p = tf.add_paragraph()
|
|
381
|
+
|
|
382
|
+
if elem_type == "bullet":
|
|
383
|
+
p.level = 1
|
|
384
|
+
|
|
385
|
+
_pptx_add_rich_text(p, data)
|
|
386
|
+
|
|
387
|
+
buf = io.BytesIO()
|
|
388
|
+
prs.save(buf)
|
|
389
|
+
buf_data = buf.getvalue()
|
|
390
|
+
|
|
391
|
+
slide_count = len(slides_data)
|
|
392
|
+
return {
|
|
393
|
+
"content": (f"Created {filename} ({slide_count} slide{'s' if slide_count != 1 else ''})"),
|
|
394
|
+
"is_error": False,
|
|
395
|
+
"files": [
|
|
396
|
+
{
|
|
397
|
+
"data": buf_data,
|
|
398
|
+
"filename": filename,
|
|
399
|
+
"mimeType": (
|
|
400
|
+
"application/vnd.openxmlformats-officedocument.presentationml.presentation"
|
|
401
|
+
),
|
|
402
|
+
}
|
|
403
|
+
],
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
_TOOL_MANIFEST = [
|
|
408
|
+
{
|
|
409
|
+
"name": "export_pdf",
|
|
410
|
+
"description": (
|
|
411
|
+
"Generate a simple PDF document with a basic layout meant as a starting point. "
|
|
412
|
+
"The body supports formatting: #, ##, ### for headings, **bold**, *italic*, "
|
|
413
|
+
"lines starting with - for bullet lists, numbered lists (1. ), and markdown "
|
|
414
|
+
"tables (| col1 | col2 | with |---| separator). "
|
|
415
|
+
"No images, charts, or advanced styling."
|
|
416
|
+
),
|
|
417
|
+
"input_schema": {
|
|
418
|
+
"type": "object",
|
|
419
|
+
"properties": {
|
|
420
|
+
"title": {"type": "string", "description": "Document title (used as filename)"},
|
|
421
|
+
"body": {
|
|
422
|
+
"type": "string",
|
|
423
|
+
"description": "Document body with optional markdown-style formatting",
|
|
424
|
+
},
|
|
425
|
+
},
|
|
426
|
+
"required": ["title", "body"],
|
|
427
|
+
},
|
|
428
|
+
"handler": _export_pdf,
|
|
429
|
+
},
|
|
430
|
+
{
|
|
431
|
+
"name": "export_docx",
|
|
432
|
+
"description": (
|
|
433
|
+
"Generate a simple Word (.docx) document with a basic layout meant as a starting "
|
|
434
|
+
"point. The body supports formatting: #, ##, ### for headings, **bold**, *italic*, "
|
|
435
|
+
"lines starting with - for bullet lists, numbered lists (1. ), and markdown "
|
|
436
|
+
"tables (| col1 | col2 | with |---| separator). "
|
|
437
|
+
"No images, charts, or advanced styling."
|
|
438
|
+
),
|
|
439
|
+
"input_schema": {
|
|
440
|
+
"type": "object",
|
|
441
|
+
"properties": {
|
|
442
|
+
"title": {"type": "string", "description": "Document title (used as filename)"},
|
|
443
|
+
"body": {
|
|
444
|
+
"type": "string",
|
|
445
|
+
"description": "Document body with optional markdown-style formatting",
|
|
446
|
+
},
|
|
447
|
+
},
|
|
448
|
+
"required": ["title", "body"],
|
|
449
|
+
},
|
|
450
|
+
"handler": _export_docx,
|
|
451
|
+
},
|
|
452
|
+
{
|
|
453
|
+
"name": "export_xlsx",
|
|
454
|
+
"description": (
|
|
455
|
+
"Generate a simple Excel (.xlsx) spreadsheet from structured data. "
|
|
456
|
+
"Produces a basic layout meant as a starting point — text and numbers only, "
|
|
457
|
+
"no formulas, charts, or styling."
|
|
458
|
+
),
|
|
459
|
+
"input_schema": {
|
|
460
|
+
"type": "object",
|
|
461
|
+
"properties": {
|
|
462
|
+
"filename": {
|
|
463
|
+
"type": "string",
|
|
464
|
+
"description": "Filename for the spreadsheet (without extension)",
|
|
465
|
+
},
|
|
466
|
+
"sheets": {
|
|
467
|
+
"type": "array",
|
|
468
|
+
"description": "List of sheets, each with a name and rows of data",
|
|
469
|
+
"items": {
|
|
470
|
+
"type": "object",
|
|
471
|
+
"properties": {
|
|
472
|
+
"name": {"type": "string", "description": "Sheet name"},
|
|
473
|
+
"rows": {
|
|
474
|
+
"type": "array",
|
|
475
|
+
"description": "Rows of data (first row is typically headers)",
|
|
476
|
+
"items": {
|
|
477
|
+
"type": "array",
|
|
478
|
+
"items": {"type": "string"},
|
|
479
|
+
},
|
|
480
|
+
},
|
|
481
|
+
},
|
|
482
|
+
"required": ["name", "rows"],
|
|
483
|
+
},
|
|
484
|
+
},
|
|
485
|
+
},
|
|
486
|
+
"required": ["filename", "sheets"],
|
|
487
|
+
},
|
|
488
|
+
"handler": _export_xlsx,
|
|
489
|
+
},
|
|
490
|
+
{
|
|
491
|
+
"name": "export_csv",
|
|
492
|
+
"description": "Generate a simple CSV file from rows of data. Plain text only.",
|
|
493
|
+
"input_schema": {
|
|
494
|
+
"type": "object",
|
|
495
|
+
"properties": {
|
|
496
|
+
"filename": {
|
|
497
|
+
"type": "string",
|
|
498
|
+
"description": "Filename for the CSV (without extension)",
|
|
499
|
+
},
|
|
500
|
+
"rows": {
|
|
501
|
+
"type": "array",
|
|
502
|
+
"description": "Rows of data (first row is typically headers)",
|
|
503
|
+
"items": {
|
|
504
|
+
"type": "array",
|
|
505
|
+
"items": {"type": "string"},
|
|
506
|
+
},
|
|
507
|
+
},
|
|
508
|
+
},
|
|
509
|
+
"required": ["filename", "rows"],
|
|
510
|
+
},
|
|
511
|
+
"handler": _export_csv,
|
|
512
|
+
},
|
|
513
|
+
{
|
|
514
|
+
"name": "export_pptx",
|
|
515
|
+
"description": (
|
|
516
|
+
"Generate a simple PowerPoint (.pptx) presentation with a basic layout meant as "
|
|
517
|
+
"a starting point. Provide a title and an array of slides, each with a title and "
|
|
518
|
+
"body. Body supports **bold**, *italic*, lines starting with - for bullet lists, "
|
|
519
|
+
"numbered lists (1. ), #/##/### headings, and markdown tables "
|
|
520
|
+
"(| col1 | col2 | with |---| separator). "
|
|
521
|
+
"No images, charts, or advanced styling."
|
|
522
|
+
),
|
|
523
|
+
"input_schema": {
|
|
524
|
+
"type": "object",
|
|
525
|
+
"properties": {
|
|
526
|
+
"title": {
|
|
527
|
+
"type": "string",
|
|
528
|
+
"description": "Presentation title (used as filename)",
|
|
529
|
+
},
|
|
530
|
+
"slides": {
|
|
531
|
+
"type": "array",
|
|
532
|
+
"description": "List of slides, each with a title and body",
|
|
533
|
+
"items": {
|
|
534
|
+
"type": "object",
|
|
535
|
+
"properties": {
|
|
536
|
+
"title": {"type": "string", "description": "Slide title"},
|
|
537
|
+
"body": {
|
|
538
|
+
"type": "string",
|
|
539
|
+
"description": (
|
|
540
|
+
"Slide body text; lines starting with - become bullet points"
|
|
541
|
+
),
|
|
542
|
+
},
|
|
543
|
+
},
|
|
544
|
+
"required": ["title"],
|
|
545
|
+
},
|
|
546
|
+
},
|
|
547
|
+
},
|
|
548
|
+
"required": ["title", "slides"],
|
|
549
|
+
},
|
|
550
|
+
"handler": _export_pptx,
|
|
551
|
+
},
|
|
552
|
+
]
|
|
553
|
+
|
|
554
|
+
|
|
555
|
+
class Provider(BaseToolProvider):
|
|
556
|
+
"""Built-in document export tools (PDF, DOCX, XLSX, CSV, PPTX)."""
|
|
557
|
+
|
|
558
|
+
def __init__(self, allowed_functions: list[str]):
|
|
559
|
+
super().__init__(allowed_functions)
|
|
560
|
+
self._handlers = {t["name"]: t["handler"] for t in _TOOL_MANIFEST}
|
|
561
|
+
|
|
562
|
+
def _get_all_tools(self) -> list[dict]:
|
|
563
|
+
return [
|
|
564
|
+
{"name": t["name"], "description": t["description"], "input_schema": t["input_schema"]}
|
|
565
|
+
for t in _TOOL_MANIFEST
|
|
566
|
+
]
|
|
567
|
+
|
|
568
|
+
async def call_tool(
|
|
569
|
+
self,
|
|
570
|
+
name: str,
|
|
571
|
+
arguments: dict,
|
|
572
|
+
user_conversation_context: UserConversationContext,
|
|
573
|
+
storage: BaseStorageProvider,
|
|
574
|
+
) -> ToolResult:
|
|
575
|
+
handler = self._handlers.get(name)
|
|
576
|
+
if not handler:
|
|
577
|
+
return {"content": f"Unknown tool: {name}", "is_error": True, "files": []}
|
|
578
|
+
try:
|
|
579
|
+
return await handler(arguments)
|
|
580
|
+
except Exception as e:
|
|
581
|
+
logger.exception("Export tool call failed: %s", name)
|
|
582
|
+
return {"content": f"Tool execution error: {e}", "is_error": True, "files": []}
|