python-slack-agents 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- python_slack_agents-0.5.0.dist-info/METADATA +318 -0
- python_slack_agents-0.5.0.dist-info/RECORD +54 -0
- python_slack_agents-0.5.0.dist-info/WHEEL +4 -0
- python_slack_agents-0.5.0.dist-info/entry_points.txt +2 -0
- python_slack_agents-0.5.0.dist-info/licenses/LICENSE +202 -0
- slack_agents/Dockerfile +25 -0
- slack_agents/__init__.py +25 -0
- slack_agents/access/__init__.py +0 -0
- slack_agents/access/allow_all.py +9 -0
- slack_agents/access/allow_list.py +19 -0
- slack_agents/access/base.py +20 -0
- slack_agents/agent_loop.py +208 -0
- slack_agents/cli/__init__.py +48 -0
- slack_agents/cli/build_docker.py +94 -0
- slack_agents/cli/export_conversations.py +84 -0
- slack_agents/cli/export_conversations_html.py +605 -0
- slack_agents/cli/export_usage.py +81 -0
- slack_agents/cli/export_usage_csv.py +151 -0
- slack_agents/cli/healthcheck.py +67 -0
- slack_agents/cli/run.py +16 -0
- slack_agents/config.py +113 -0
- slack_agents/conversations.py +273 -0
- slack_agents/files.py +59 -0
- slack_agents/llm/__init__.py +1 -0
- slack_agents/llm/anthropic.py +207 -0
- slack_agents/llm/base.py +82 -0
- slack_agents/llm/openai.py +283 -0
- slack_agents/main.py +55 -0
- slack_agents/observability.py +175 -0
- slack_agents/py.typed +0 -0
- slack_agents/scripts/__init__.py +0 -0
- slack_agents/scripts/download_fonts.py +39 -0
- slack_agents/slack/__init__.py +0 -0
- slack_agents/slack/actions.py +119 -0
- slack_agents/slack/agent.py +688 -0
- slack_agents/slack/canvases.py +225 -0
- slack_agents/slack/files.py +102 -0
- slack_agents/slack/format.py +55 -0
- slack_agents/slack/streaming.py +70 -0
- slack_agents/slack/streaming_formatter.py +182 -0
- slack_agents/slack/tool_blocks.py +97 -0
- slack_agents/storage/__init__.py +0 -0
- slack_agents/storage/base.py +304 -0
- slack_agents/storage/postgres.py +612 -0
- slack_agents/storage/postgres.sql +120 -0
- slack_agents/storage/sqlite.py +473 -0
- slack_agents/storage/sqlite.sql +73 -0
- slack_agents/tools/__init__.py +0 -0
- slack_agents/tools/base.py +140 -0
- slack_agents/tools/canvas.py +401 -0
- slack_agents/tools/file_exporter.py +582 -0
- slack_agents/tools/file_importer.py +363 -0
- slack_agents/tools/mcp_http.py +203 -0
- slack_agents/tools/user_context.py +239 -0
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
"""Access provider that checks user against an allow list of user IDs."""
|
|
2
|
+
|
|
3
|
+
from slack_agents import UserConversationContext
|
|
4
|
+
from slack_agents.access.base import (
|
|
5
|
+
AccessDenied,
|
|
6
|
+
AccessGranted,
|
|
7
|
+
BaseAccessProvider,
|
|
8
|
+
)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Provider(BaseAccessProvider):
|
|
12
|
+
def __init__(self, *, userid_list: list[str], deny_message: str) -> None:
|
|
13
|
+
self._userid_list = set(userid_list)
|
|
14
|
+
self._deny_message = deny_message
|
|
15
|
+
|
|
16
|
+
async def check_access(self, *, context: UserConversationContext) -> AccessGranted:
|
|
17
|
+
if context["user_id"] in self._userid_list:
|
|
18
|
+
return AccessGranted()
|
|
19
|
+
raise AccessDenied(self._deny_message)
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
"""Abstract access-control provider interface."""
|
|
2
|
+
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
from typing import TypedDict
|
|
5
|
+
|
|
6
|
+
from slack_agents import UserConversationContext
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class AccessGranted(TypedDict):
|
|
10
|
+
pass
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class AccessDenied(Exception):
|
|
14
|
+
pass
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class BaseAccessProvider(ABC):
|
|
18
|
+
@abstractmethod
|
|
19
|
+
async def check_access(self, *, context: UserConversationContext) -> AccessGranted:
|
|
20
|
+
"""Check access. Returns AccessGranted on success, raises AccessDenied on denial."""
|
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
"""Core agent loop: LLM -> tools -> LLM -> ... until done."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import json
|
|
5
|
+
import logging
|
|
6
|
+
from typing import AsyncIterator
|
|
7
|
+
|
|
8
|
+
from slack_agents import UserConversationContext
|
|
9
|
+
from slack_agents.llm import CHARS_PER_TOKEN
|
|
10
|
+
from slack_agents.llm.base import BaseLLMProvider, LLMResponse, Message, StreamEvent
|
|
11
|
+
from slack_agents.observability import observe
|
|
12
|
+
from slack_agents.storage.base import BaseStorageProvider
|
|
13
|
+
from slack_agents.tools.base import BaseToolProvider, ToolResult
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
MAX_ITERATIONS = 15
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _estimate_input_tokens(
|
|
21
|
+
messages: list[Message],
|
|
22
|
+
system_prompt: str,
|
|
23
|
+
tools: list[dict] | None,
|
|
24
|
+
) -> int:
|
|
25
|
+
"""Estimate total input tokens from character count."""
|
|
26
|
+
chars = len(system_prompt)
|
|
27
|
+
chars += len(json.dumps(tools)) if tools else 0
|
|
28
|
+
for msg in messages:
|
|
29
|
+
if isinstance(msg.content, str):
|
|
30
|
+
chars += len(msg.content)
|
|
31
|
+
else:
|
|
32
|
+
chars += len(json.dumps(msg.content))
|
|
33
|
+
return chars // CHARS_PER_TOKEN
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@observe(name="agent_loop_streaming")
|
|
37
|
+
async def run_agent_loop_streaming(
|
|
38
|
+
llm: BaseLLMProvider,
|
|
39
|
+
messages: list[Message],
|
|
40
|
+
system_prompt: str = "",
|
|
41
|
+
tool_providers: list[BaseToolProvider] | None = None,
|
|
42
|
+
user_conversation_context: UserConversationContext | None = None,
|
|
43
|
+
storage: BaseStorageProvider | None = None,
|
|
44
|
+
) -> AsyncIterator[StreamEvent | dict]:
|
|
45
|
+
"""Run the agent loop with streaming.
|
|
46
|
+
|
|
47
|
+
Yields StreamEvents for text and status dicts for tool calls.
|
|
48
|
+
"""
|
|
49
|
+
providers = tool_providers or []
|
|
50
|
+
tools = [t for p in providers for t in p.tools] or None
|
|
51
|
+
provider_map = {t["name"]: p for p in providers for t in p.tools}
|
|
52
|
+
total_input_tokens = 0
|
|
53
|
+
total_output_tokens = 0
|
|
54
|
+
total_cache_creation_input_tokens = 0
|
|
55
|
+
total_cache_read_input_tokens = 0
|
|
56
|
+
peak_single_call_input_tokens = 0
|
|
57
|
+
|
|
58
|
+
for iteration in range(MAX_ITERATIONS):
|
|
59
|
+
logger.info("Agent loop streaming iteration %d", iteration + 1)
|
|
60
|
+
|
|
61
|
+
estimated = _estimate_input_tokens(messages, system_prompt, tools)
|
|
62
|
+
if estimated > llm.max_input_tokens:
|
|
63
|
+
logger.warning(
|
|
64
|
+
"Estimated input ~%d tokens exceeds limit of %d",
|
|
65
|
+
estimated,
|
|
66
|
+
llm.max_input_tokens,
|
|
67
|
+
)
|
|
68
|
+
yield StreamEvent(
|
|
69
|
+
type="text_delta",
|
|
70
|
+
text="\n\n_This conversation has grown too long. Please start a new thread._",
|
|
71
|
+
)
|
|
72
|
+
yield StreamEvent(
|
|
73
|
+
type="message_end",
|
|
74
|
+
stop_reason="max_input_tokens",
|
|
75
|
+
input_tokens=total_input_tokens,
|
|
76
|
+
output_tokens=total_output_tokens,
|
|
77
|
+
cache_creation_input_tokens=total_cache_creation_input_tokens,
|
|
78
|
+
cache_read_input_tokens=total_cache_read_input_tokens,
|
|
79
|
+
peak_single_call_input_tokens=peak_single_call_input_tokens,
|
|
80
|
+
)
|
|
81
|
+
return
|
|
82
|
+
|
|
83
|
+
collected_text = ""
|
|
84
|
+
collected_tool_calls = []
|
|
85
|
+
current_tool_inputs: dict[str, list[str]] = {}
|
|
86
|
+
stop_reason = ""
|
|
87
|
+
|
|
88
|
+
async for event in llm.stream(
|
|
89
|
+
messages=messages,
|
|
90
|
+
system_prompt=system_prompt,
|
|
91
|
+
tools=tools or None,
|
|
92
|
+
):
|
|
93
|
+
if event.type == "text_delta":
|
|
94
|
+
collected_text += event.text
|
|
95
|
+
yield event
|
|
96
|
+
|
|
97
|
+
elif event.type == "tool_use_start":
|
|
98
|
+
if event.tool_call:
|
|
99
|
+
current_tool_inputs[event.tool_call.id] = []
|
|
100
|
+
|
|
101
|
+
elif event.type == "tool_use_delta":
|
|
102
|
+
for tid in current_tool_inputs:
|
|
103
|
+
current_tool_inputs[tid].append(event.tool_input_delta)
|
|
104
|
+
|
|
105
|
+
elif event.type == "tool_use_end":
|
|
106
|
+
if event.tool_call:
|
|
107
|
+
collected_tool_calls.append(event.tool_call)
|
|
108
|
+
|
|
109
|
+
elif event.type == "message_end":
|
|
110
|
+
stop_reason = event.stop_reason
|
|
111
|
+
total_input_tokens += event.input_tokens
|
|
112
|
+
total_output_tokens += event.output_tokens
|
|
113
|
+
total_cache_creation_input_tokens += event.cache_creation_input_tokens
|
|
114
|
+
total_cache_read_input_tokens += event.cache_read_input_tokens
|
|
115
|
+
call_input = (
|
|
116
|
+
event.input_tokens
|
|
117
|
+
+ event.cache_creation_input_tokens
|
|
118
|
+
+ event.cache_read_input_tokens
|
|
119
|
+
)
|
|
120
|
+
peak_single_call_input_tokens = max(peak_single_call_input_tokens, call_input)
|
|
121
|
+
|
|
122
|
+
if not collected_tool_calls:
|
|
123
|
+
yield StreamEvent(
|
|
124
|
+
type="message_end",
|
|
125
|
+
stop_reason=stop_reason,
|
|
126
|
+
input_tokens=total_input_tokens,
|
|
127
|
+
output_tokens=total_output_tokens,
|
|
128
|
+
cache_creation_input_tokens=total_cache_creation_input_tokens,
|
|
129
|
+
cache_read_input_tokens=total_cache_read_input_tokens,
|
|
130
|
+
peak_single_call_input_tokens=peak_single_call_input_tokens,
|
|
131
|
+
)
|
|
132
|
+
return
|
|
133
|
+
|
|
134
|
+
response = LLMResponse(
|
|
135
|
+
text=collected_text,
|
|
136
|
+
tool_calls=collected_tool_calls,
|
|
137
|
+
stop_reason=stop_reason,
|
|
138
|
+
)
|
|
139
|
+
assistant_content = _build_assistant_content(response)
|
|
140
|
+
messages.append(Message(role="assistant", content=assistant_content))
|
|
141
|
+
|
|
142
|
+
for tc in collected_tool_calls:
|
|
143
|
+
yield {
|
|
144
|
+
"type": "tool_status",
|
|
145
|
+
"tool_id": tc.id,
|
|
146
|
+
"tool_name": tc.name,
|
|
147
|
+
"status": "calling",
|
|
148
|
+
"tool_input": tc.input,
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
async def _call(tc) -> ToolResult:
|
|
152
|
+
provider = provider_map.get(tc.name)
|
|
153
|
+
if provider:
|
|
154
|
+
return await provider.call_tool(
|
|
155
|
+
tc.name, tc.input, user_conversation_context, storage
|
|
156
|
+
)
|
|
157
|
+
return {"content": f"Unknown tool: {tc.name}", "is_error": True, "files": []}
|
|
158
|
+
|
|
159
|
+
results = await asyncio.gather(*[_call(tc) for tc in collected_tool_calls])
|
|
160
|
+
|
|
161
|
+
tool_results = []
|
|
162
|
+
for tc, result in zip(collected_tool_calls, results):
|
|
163
|
+
yield {
|
|
164
|
+
"type": "tool_status",
|
|
165
|
+
"tool_id": tc.id,
|
|
166
|
+
"tool_name": tc.name,
|
|
167
|
+
"status": "done",
|
|
168
|
+
"tool_input": tc.input,
|
|
169
|
+
"tool_result": result,
|
|
170
|
+
}
|
|
171
|
+
tool_results.append(
|
|
172
|
+
{
|
|
173
|
+
"type": "tool_result",
|
|
174
|
+
"tool_use_id": tc.id,
|
|
175
|
+
"content": result["content"],
|
|
176
|
+
**({"is_error": True} if result["is_error"] else {}),
|
|
177
|
+
}
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
messages.append(Message(role="user", content=tool_results))
|
|
181
|
+
|
|
182
|
+
yield StreamEvent(type="text_delta", text="\n\n_Reached maximum tool-calling steps._")
|
|
183
|
+
yield StreamEvent(
|
|
184
|
+
type="message_end",
|
|
185
|
+
stop_reason="max_iterations",
|
|
186
|
+
input_tokens=total_input_tokens,
|
|
187
|
+
output_tokens=total_output_tokens,
|
|
188
|
+
cache_creation_input_tokens=total_cache_creation_input_tokens,
|
|
189
|
+
cache_read_input_tokens=total_cache_read_input_tokens,
|
|
190
|
+
peak_single_call_input_tokens=peak_single_call_input_tokens,
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def _build_assistant_content(response: LLMResponse) -> list[dict]:
|
|
195
|
+
"""Build Anthropic-style assistant content blocks from an LLMResponse."""
|
|
196
|
+
blocks: list[dict] = []
|
|
197
|
+
if response.text:
|
|
198
|
+
blocks.append({"type": "text", "text": response.text})
|
|
199
|
+
for tc in response.tool_calls:
|
|
200
|
+
blocks.append(
|
|
201
|
+
{
|
|
202
|
+
"type": "tool_use",
|
|
203
|
+
"id": tc.id,
|
|
204
|
+
"name": tc.name,
|
|
205
|
+
"input": tc.input,
|
|
206
|
+
}
|
|
207
|
+
)
|
|
208
|
+
return blocks
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
"""Unified CLI for slack-agents."""
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
|
|
6
|
+
from slack_agents.cli import (
|
|
7
|
+
build_docker,
|
|
8
|
+
export_conversations,
|
|
9
|
+
export_usage,
|
|
10
|
+
healthcheck,
|
|
11
|
+
run,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _parse_tz_aware(value: str) -> datetime:
|
|
16
|
+
"""Parse an ISO datetime string, rejecting naive (tz-unaware) values."""
|
|
17
|
+
dt = datetime.fromisoformat(value)
|
|
18
|
+
if dt.tzinfo is None:
|
|
19
|
+
raise argparse.ArgumentTypeError(
|
|
20
|
+
f"datetime must include a timezone offset (e.g. +00:00), got: {value}"
|
|
21
|
+
)
|
|
22
|
+
return dt
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def build_parser() -> argparse.ArgumentParser:
|
|
26
|
+
parser = argparse.ArgumentParser(
|
|
27
|
+
prog="slack-agents",
|
|
28
|
+
description="CLI for running and managing slack-agents.",
|
|
29
|
+
)
|
|
30
|
+
subparsers = parser.add_subparsers(dest="command", required=True)
|
|
31
|
+
|
|
32
|
+
run.register(subparsers)
|
|
33
|
+
healthcheck.register(subparsers)
|
|
34
|
+
export_conversations.register(subparsers, _parse_tz_aware)
|
|
35
|
+
export_usage.register(subparsers, _parse_tz_aware)
|
|
36
|
+
build_docker.register(subparsers)
|
|
37
|
+
|
|
38
|
+
return parser
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def main(argv: list[str] | None = None) -> None:
|
|
42
|
+
parser = build_parser()
|
|
43
|
+
args = parser.parse_args(argv)
|
|
44
|
+
args.handler(args)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
if __name__ == "__main__":
|
|
48
|
+
main()
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
"""CLI subcommand: build-docker."""
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def register(subparsers):
|
|
5
|
+
p = subparsers.add_parser("build-docker", help="Build a Docker image for an agent")
|
|
6
|
+
p.add_argument(
|
|
7
|
+
"agent_dir",
|
|
8
|
+
help="Path to agent directory containing config.yaml and system_prompt.txt",
|
|
9
|
+
)
|
|
10
|
+
p.add_argument(
|
|
11
|
+
"--push",
|
|
12
|
+
metavar="REGISTRY",
|
|
13
|
+
help="Push image to registry after building (e.g. registry.example.com)",
|
|
14
|
+
)
|
|
15
|
+
p.add_argument(
|
|
16
|
+
"--image-name",
|
|
17
|
+
metavar="NAME",
|
|
18
|
+
help="Custom image name (default: slack-agents-<agent-dir-name>)",
|
|
19
|
+
)
|
|
20
|
+
p.add_argument(
|
|
21
|
+
"--platform",
|
|
22
|
+
default="linux/amd64",
|
|
23
|
+
help="Target platform (default: linux/amd64)",
|
|
24
|
+
)
|
|
25
|
+
p.set_defaults(handler=execute)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _is_valid_docker_name(name: str) -> bool:
|
|
29
|
+
"""Check if a string is a valid Docker image name component."""
|
|
30
|
+
import re
|
|
31
|
+
|
|
32
|
+
return bool(re.fullmatch(r"[a-z0-9]+(?:[._-][a-z0-9]+)*", name))
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def execute(args):
|
|
36
|
+
import subprocess
|
|
37
|
+
import sys
|
|
38
|
+
from pathlib import Path
|
|
39
|
+
|
|
40
|
+
from slack_agents.config import load_agent_config
|
|
41
|
+
from slack_agents.main import setup_environment
|
|
42
|
+
|
|
43
|
+
setup_environment()
|
|
44
|
+
|
|
45
|
+
agent_dir = Path(args.agent_dir)
|
|
46
|
+
if not agent_dir.is_dir():
|
|
47
|
+
print(f"Error: agent directory not found: {agent_dir}", file=sys.stderr)
|
|
48
|
+
sys.exit(1)
|
|
49
|
+
|
|
50
|
+
config, _system_prompt, agent_name = load_agent_config(agent_dir)
|
|
51
|
+
version = config.version
|
|
52
|
+
dockerfile = Path(__file__).resolve().parent.parent / "Dockerfile"
|
|
53
|
+
image_name = args.image_name or f"slack-agents-{agent_name}"
|
|
54
|
+
|
|
55
|
+
if not _is_valid_docker_name(image_name):
|
|
56
|
+
print(
|
|
57
|
+
f"Error: '{image_name}' is not a valid Docker image name. "
|
|
58
|
+
"Names must be lowercase alphanumeric, optionally separated by "
|
|
59
|
+
"'.', '-', or '_'. Use --image-name to provide a valid name.",
|
|
60
|
+
file=sys.stderr,
|
|
61
|
+
)
|
|
62
|
+
sys.exit(1)
|
|
63
|
+
|
|
64
|
+
if args.push:
|
|
65
|
+
image_tag = f"{args.push}/{image_name}:{version}"
|
|
66
|
+
else:
|
|
67
|
+
image_tag = f"{image_name}:{version}"
|
|
68
|
+
|
|
69
|
+
print(f"Building {image_tag} ...")
|
|
70
|
+
result = subprocess.run(
|
|
71
|
+
[
|
|
72
|
+
"docker",
|
|
73
|
+
"build",
|
|
74
|
+
"--platform",
|
|
75
|
+
args.platform,
|
|
76
|
+
"--build-arg",
|
|
77
|
+
f"AGENT_PATH={agent_dir}",
|
|
78
|
+
"-f",
|
|
79
|
+
str(dockerfile),
|
|
80
|
+
"-t",
|
|
81
|
+
image_tag,
|
|
82
|
+
".",
|
|
83
|
+
]
|
|
84
|
+
)
|
|
85
|
+
if result.returncode != 0:
|
|
86
|
+
sys.exit(result.returncode)
|
|
87
|
+
|
|
88
|
+
if args.push:
|
|
89
|
+
print(f"Pushing {image_tag} ...")
|
|
90
|
+
result = subprocess.run(["docker", "push", image_tag])
|
|
91
|
+
if result.returncode != 0:
|
|
92
|
+
sys.exit(result.returncode)
|
|
93
|
+
|
|
94
|
+
print(f"Done: {image_tag}")
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
"""CLI subcommand: export-conversations."""
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def register(subparsers, parse_tz_aware):
|
|
5
|
+
p = subparsers.add_parser("export-conversations", help="Export conversations from an agent")
|
|
6
|
+
p.add_argument("agent_dir", help="Path to agent directory (e.g. agents/hello-world)")
|
|
7
|
+
p.add_argument(
|
|
8
|
+
"--format",
|
|
9
|
+
required=True,
|
|
10
|
+
choices=["html"],
|
|
11
|
+
help="Export format (currently: html)",
|
|
12
|
+
)
|
|
13
|
+
p.add_argument("--handle", help="Filter by Slack user handle")
|
|
14
|
+
p.add_argument(
|
|
15
|
+
"--date-from",
|
|
16
|
+
type=parse_tz_aware,
|
|
17
|
+
help="Filter start datetime (ISO format with tz, e.g. 2026-01-01T00:00:00+00:00)",
|
|
18
|
+
)
|
|
19
|
+
p.add_argument(
|
|
20
|
+
"--date-to",
|
|
21
|
+
type=parse_tz_aware,
|
|
22
|
+
help="Filter end datetime (ISO format with tz)",
|
|
23
|
+
)
|
|
24
|
+
p.add_argument("--output", required=True, help="Output directory")
|
|
25
|
+
p.set_defaults(handler=execute)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def execute(args):
|
|
29
|
+
import asyncio
|
|
30
|
+
import sys
|
|
31
|
+
from pathlib import Path
|
|
32
|
+
|
|
33
|
+
from slack_agents.config import load_agent_config, load_plugin
|
|
34
|
+
from slack_agents.conversations import ConversationManager
|
|
35
|
+
from slack_agents.main import setup_environment
|
|
36
|
+
|
|
37
|
+
setup_environment()
|
|
38
|
+
|
|
39
|
+
agent_dir = Path(args.agent_dir).resolve()
|
|
40
|
+
if not agent_dir.exists():
|
|
41
|
+
print(f"Error: agent directory not found: {agent_dir}", file=sys.stderr)
|
|
42
|
+
sys.exit(1)
|
|
43
|
+
|
|
44
|
+
config, _system_prompt, agent_name = load_agent_config(agent_dir)
|
|
45
|
+
|
|
46
|
+
storage_config = dict(config.storage)
|
|
47
|
+
type_path = storage_config.pop("type")
|
|
48
|
+
|
|
49
|
+
async def run() -> None:
|
|
50
|
+
storage = load_plugin(type_path, **storage_config)
|
|
51
|
+
await storage.initialize()
|
|
52
|
+
try:
|
|
53
|
+
conversations = ConversationManager(storage)
|
|
54
|
+
|
|
55
|
+
if not conversations.supports_export:
|
|
56
|
+
print(
|
|
57
|
+
"Error: export-conversations requires persistent storage"
|
|
58
|
+
" (file-based SQLite or PostgreSQL).\n"
|
|
59
|
+
"The current storage backend does not support conversation export.",
|
|
60
|
+
file=sys.stderr,
|
|
61
|
+
)
|
|
62
|
+
sys.exit(1)
|
|
63
|
+
|
|
64
|
+
output_dir = args.output
|
|
65
|
+
|
|
66
|
+
if args.format == "html":
|
|
67
|
+
from slack_agents.cli.export_conversations_html import export_conversations_html
|
|
68
|
+
|
|
69
|
+
count = await export_conversations_html(
|
|
70
|
+
conversations,
|
|
71
|
+
agent_name,
|
|
72
|
+
output_dir,
|
|
73
|
+
handle=args.handle,
|
|
74
|
+
date_from=args.date_from.isoformat() if args.date_from else None,
|
|
75
|
+
date_to=args.date_to.isoformat() if args.date_to else None,
|
|
76
|
+
)
|
|
77
|
+
if count == 0:
|
|
78
|
+
print("No conversations found matching the filters.")
|
|
79
|
+
else:
|
|
80
|
+
print(f"Exported {count} conversation(s) to {output_dir}/")
|
|
81
|
+
finally:
|
|
82
|
+
await storage.close()
|
|
83
|
+
|
|
84
|
+
asyncio.run(run())
|