takopi-slack-plugin 0.0.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- takopi_slack_plugin/__init__.py +1 -0
- takopi_slack_plugin/backend.py +193 -0
- takopi_slack_plugin/bridge.py +1380 -0
- takopi_slack_plugin/client.py +254 -0
- takopi_slack_plugin/commands/__init__.py +3 -0
- takopi_slack_plugin/commands/dispatch.py +114 -0
- takopi_slack_plugin/commands/executor.py +192 -0
- takopi_slack_plugin/config.py +60 -0
- takopi_slack_plugin/engine.py +142 -0
- takopi_slack_plugin/onboarding.py +58 -0
- takopi_slack_plugin/outbox.py +165 -0
- takopi_slack_plugin/overrides.py +20 -0
- takopi_slack_plugin/thread_sessions.py +289 -0
- takopi_slack_plugin-0.0.15.dist-info/METADATA +151 -0
- takopi_slack_plugin-0.0.15.dist-info/RECORD +17 -0
- takopi_slack_plugin-0.0.15.dist-info/WHEEL +4 -0
- takopi_slack_plugin-0.0.15.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any, Awaitable, Callable
|
|
4
|
+
|
|
5
|
+
import anyio
|
|
6
|
+
|
|
7
|
+
from takopi.api import (
|
|
8
|
+
ConfigError,
|
|
9
|
+
ExecBridgeConfig,
|
|
10
|
+
IncomingMessage as RunnerIncomingMessage,
|
|
11
|
+
MessageRef,
|
|
12
|
+
RenderedMessage,
|
|
13
|
+
RunContext,
|
|
14
|
+
RunnerUnavailableError,
|
|
15
|
+
RunningTasks,
|
|
16
|
+
SendOptions,
|
|
17
|
+
TransportRuntime,
|
|
18
|
+
bind_run_context,
|
|
19
|
+
clear_context,
|
|
20
|
+
handle_message,
|
|
21
|
+
reset_run_base_dir,
|
|
22
|
+
set_run_base_dir,
|
|
23
|
+
)
|
|
24
|
+
from takopi.runners.run_options import EngineRunOptions, apply_run_options
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
async def send_plain(
|
|
28
|
+
exec_cfg: ExecBridgeConfig,
|
|
29
|
+
*,
|
|
30
|
+
channel_id: str,
|
|
31
|
+
user_msg_id: str,
|
|
32
|
+
thread_id: str | None,
|
|
33
|
+
text: str,
|
|
34
|
+
notify: bool = True,
|
|
35
|
+
) -> MessageRef | None:
|
|
36
|
+
reply_ref = MessageRef(
|
|
37
|
+
channel_id=channel_id,
|
|
38
|
+
message_id=user_msg_id,
|
|
39
|
+
thread_id=thread_id,
|
|
40
|
+
)
|
|
41
|
+
return await exec_cfg.transport.send(
|
|
42
|
+
channel_id=channel_id,
|
|
43
|
+
message=RenderedMessage(text=text),
|
|
44
|
+
options=SendOptions(reply_to=reply_ref, notify=notify, thread_id=thread_id),
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
async def run_engine(
|
|
49
|
+
*,
|
|
50
|
+
exec_cfg: ExecBridgeConfig,
|
|
51
|
+
runtime: TransportRuntime,
|
|
52
|
+
running_tasks: RunningTasks,
|
|
53
|
+
channel_id: str,
|
|
54
|
+
user_msg_id: str,
|
|
55
|
+
text: str,
|
|
56
|
+
resume_token,
|
|
57
|
+
context: RunContext | None,
|
|
58
|
+
engine_override,
|
|
59
|
+
thread_id: str | None,
|
|
60
|
+
on_thread_known: Callable[[Any, anyio.Event], Awaitable[None]] | None = None,
|
|
61
|
+
run_options: EngineRunOptions | None = None,
|
|
62
|
+
) -> None:
|
|
63
|
+
try:
|
|
64
|
+
try:
|
|
65
|
+
entry = runtime.resolve_runner(
|
|
66
|
+
resume_token=resume_token,
|
|
67
|
+
engine_override=engine_override,
|
|
68
|
+
)
|
|
69
|
+
except RunnerUnavailableError as exc:
|
|
70
|
+
await send_plain(
|
|
71
|
+
exec_cfg,
|
|
72
|
+
channel_id=channel_id,
|
|
73
|
+
user_msg_id=user_msg_id,
|
|
74
|
+
thread_id=thread_id,
|
|
75
|
+
text=f"error:\n{exc}",
|
|
76
|
+
notify=False,
|
|
77
|
+
)
|
|
78
|
+
return
|
|
79
|
+
|
|
80
|
+
runner = entry.runner
|
|
81
|
+
if not entry.available:
|
|
82
|
+
reason = entry.issue or "engine unavailable"
|
|
83
|
+
await send_plain(
|
|
84
|
+
exec_cfg,
|
|
85
|
+
channel_id=channel_id,
|
|
86
|
+
user_msg_id=user_msg_id,
|
|
87
|
+
thread_id=thread_id,
|
|
88
|
+
text=f"error:\n{reason}",
|
|
89
|
+
notify=False,
|
|
90
|
+
)
|
|
91
|
+
return
|
|
92
|
+
|
|
93
|
+
try:
|
|
94
|
+
cwd = runtime.resolve_run_cwd(context)
|
|
95
|
+
except ConfigError as exc:
|
|
96
|
+
await send_plain(
|
|
97
|
+
exec_cfg,
|
|
98
|
+
channel_id=channel_id,
|
|
99
|
+
user_msg_id=user_msg_id,
|
|
100
|
+
thread_id=thread_id,
|
|
101
|
+
text=f"error:\n{exc}",
|
|
102
|
+
notify=False,
|
|
103
|
+
)
|
|
104
|
+
return
|
|
105
|
+
|
|
106
|
+
run_base_token = set_run_base_dir(cwd)
|
|
107
|
+
try:
|
|
108
|
+
run_fields: dict[str, Any] = {
|
|
109
|
+
"channel_id": channel_id,
|
|
110
|
+
"user_msg_id": user_msg_id,
|
|
111
|
+
"engine": runner.engine,
|
|
112
|
+
"resume": resume_token.value if resume_token else None,
|
|
113
|
+
}
|
|
114
|
+
if context is not None:
|
|
115
|
+
run_fields["project"] = context.project
|
|
116
|
+
run_fields["branch"] = context.branch
|
|
117
|
+
if cwd is not None:
|
|
118
|
+
run_fields["cwd"] = str(cwd)
|
|
119
|
+
bind_run_context(**run_fields)
|
|
120
|
+
context_line = runtime.format_context_line(context)
|
|
121
|
+
incoming = RunnerIncomingMessage(
|
|
122
|
+
channel_id=channel_id,
|
|
123
|
+
message_id=user_msg_id,
|
|
124
|
+
text=text,
|
|
125
|
+
thread_id=thread_id,
|
|
126
|
+
)
|
|
127
|
+
with apply_run_options(run_options):
|
|
128
|
+
await handle_message(
|
|
129
|
+
exec_cfg,
|
|
130
|
+
runner=runner,
|
|
131
|
+
incoming=incoming,
|
|
132
|
+
resume_token=resume_token,
|
|
133
|
+
context=context,
|
|
134
|
+
context_line=context_line,
|
|
135
|
+
strip_resume_line=runtime.is_resume_line,
|
|
136
|
+
running_tasks=running_tasks,
|
|
137
|
+
on_thread_known=on_thread_known,
|
|
138
|
+
)
|
|
139
|
+
finally:
|
|
140
|
+
reset_run_base_dir(run_base_token)
|
|
141
|
+
finally:
|
|
142
|
+
clear_context()
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
import questionary
|
|
7
|
+
|
|
8
|
+
from takopi.api import ConfigError, HOME_CONFIG_PATH, read_config, write_config
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
async def interactive_setup(*, force: bool) -> bool:
|
|
12
|
+
_ = force
|
|
13
|
+
config_path = HOME_CONFIG_PATH
|
|
14
|
+
try:
|
|
15
|
+
config = read_config(config_path)
|
|
16
|
+
except ConfigError:
|
|
17
|
+
config = {}
|
|
18
|
+
|
|
19
|
+
bot_token = questionary.password("Slack bot token").ask()
|
|
20
|
+
if not bot_token:
|
|
21
|
+
return False
|
|
22
|
+
app_token = questionary.password("Slack app token (xapp-)").ask()
|
|
23
|
+
if not app_token:
|
|
24
|
+
return False
|
|
25
|
+
channel_id = questionary.text("Slack channel ID").ask()
|
|
26
|
+
if not channel_id:
|
|
27
|
+
return False
|
|
28
|
+
transports = _ensure_table(config, "transports", config_path=config_path)
|
|
29
|
+
slack = _ensure_table(
|
|
30
|
+
transports,
|
|
31
|
+
"slack",
|
|
32
|
+
config_path=config_path,
|
|
33
|
+
label="transports.slack",
|
|
34
|
+
)
|
|
35
|
+
slack["bot_token"] = str(bot_token).strip()
|
|
36
|
+
slack["app_token"] = str(app_token).strip()
|
|
37
|
+
slack["channel_id"] = str(channel_id).strip()
|
|
38
|
+
config["transport"] = "slack"
|
|
39
|
+
write_config(config, config_path)
|
|
40
|
+
return True
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def _ensure_table(
|
|
44
|
+
config: dict[str, Any],
|
|
45
|
+
key: str,
|
|
46
|
+
*,
|
|
47
|
+
config_path: Path,
|
|
48
|
+
label: str | None = None,
|
|
49
|
+
) -> dict[str, Any]:
|
|
50
|
+
value = config.get(key)
|
|
51
|
+
if value is None:
|
|
52
|
+
table: dict[str, Any] = {}
|
|
53
|
+
config[key] = table
|
|
54
|
+
return table
|
|
55
|
+
if not isinstance(value, dict):
|
|
56
|
+
name = label or key
|
|
57
|
+
raise ConfigError(f"Invalid `{name}` in {config_path}; expected a table.")
|
|
58
|
+
return value
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
from dataclasses import dataclass, field
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
import anyio
|
|
8
|
+
|
|
9
|
+
__all__ = [
|
|
10
|
+
"DELETE_PRIORITY",
|
|
11
|
+
"EDIT_PRIORITY",
|
|
12
|
+
"SEND_PRIORITY",
|
|
13
|
+
"OutboxOp",
|
|
14
|
+
"SlackOutbox",
|
|
15
|
+
]
|
|
16
|
+
|
|
17
|
+
SEND_PRIORITY = 0
|
|
18
|
+
DELETE_PRIORITY = 1
|
|
19
|
+
EDIT_PRIORITY = 2
|
|
20
|
+
|
|
21
|
+
DEFAULT_CHANNEL_INTERVAL = 0.3
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass(slots=True)
|
|
25
|
+
class OutboxOp:
|
|
26
|
+
execute: callable
|
|
27
|
+
priority: int
|
|
28
|
+
queued_at: float
|
|
29
|
+
channel_id: str | None
|
|
30
|
+
label: str | None = None
|
|
31
|
+
done: anyio.Event = field(default_factory=anyio.Event)
|
|
32
|
+
result: Any = None
|
|
33
|
+
|
|
34
|
+
def set_result(self, result: Any) -> None:
|
|
35
|
+
if self.done.is_set():
|
|
36
|
+
return
|
|
37
|
+
self.result = result
|
|
38
|
+
self.done.set()
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class SlackOutbox:
|
|
42
|
+
def __init__(
|
|
43
|
+
self,
|
|
44
|
+
*,
|
|
45
|
+
interval_for_channel: callable | None = None,
|
|
46
|
+
clock: callable = time.monotonic,
|
|
47
|
+
sleep: callable = anyio.sleep,
|
|
48
|
+
on_error: callable | None = None,
|
|
49
|
+
on_outbox_error: callable | None = None,
|
|
50
|
+
) -> None:
|
|
51
|
+
self._interval_for_channel = interval_for_channel or (
|
|
52
|
+
lambda _: DEFAULT_CHANNEL_INTERVAL
|
|
53
|
+
)
|
|
54
|
+
self._clock = clock
|
|
55
|
+
self._sleep = sleep
|
|
56
|
+
self._on_error = on_error
|
|
57
|
+
self._on_outbox_error = on_outbox_error
|
|
58
|
+
self._pending: dict[object, OutboxOp] = {}
|
|
59
|
+
self._cond = anyio.Condition()
|
|
60
|
+
self._start_lock = anyio.Lock()
|
|
61
|
+
self._closed = False
|
|
62
|
+
self._tg: anyio.abc.TaskGroup | None = None
|
|
63
|
+
self._next_at = 0.0
|
|
64
|
+
|
|
65
|
+
async def ensure_worker(self) -> None:
|
|
66
|
+
async with self._start_lock:
|
|
67
|
+
if self._tg is not None or self._closed:
|
|
68
|
+
return
|
|
69
|
+
self._tg = await anyio.create_task_group().__aenter__()
|
|
70
|
+
self._tg.start_soon(self._run)
|
|
71
|
+
|
|
72
|
+
async def enqueue(self, *, key: object, op: OutboxOp, wait: bool = True) -> Any:
|
|
73
|
+
await self.ensure_worker()
|
|
74
|
+
async with self._cond:
|
|
75
|
+
if self._closed:
|
|
76
|
+
op.set_result(None)
|
|
77
|
+
return op.result
|
|
78
|
+
previous = self._pending.get(key)
|
|
79
|
+
if previous is not None:
|
|
80
|
+
op.queued_at = previous.queued_at
|
|
81
|
+
previous.set_result(None)
|
|
82
|
+
self._pending[key] = op
|
|
83
|
+
self._cond.notify()
|
|
84
|
+
if not wait:
|
|
85
|
+
return None
|
|
86
|
+
await op.done.wait()
|
|
87
|
+
return op.result
|
|
88
|
+
|
|
89
|
+
async def drop_pending(self, *, key: object) -> None:
|
|
90
|
+
async with self._cond:
|
|
91
|
+
pending = self._pending.pop(key, None)
|
|
92
|
+
if pending is not None:
|
|
93
|
+
pending.set_result(None)
|
|
94
|
+
self._cond.notify()
|
|
95
|
+
|
|
96
|
+
async def close(self) -> None:
|
|
97
|
+
async with self._cond:
|
|
98
|
+
self._closed = True
|
|
99
|
+
self._fail_pending()
|
|
100
|
+
self._cond.notify_all()
|
|
101
|
+
if self._tg is not None:
|
|
102
|
+
await self._tg.__aexit__(None, None, None)
|
|
103
|
+
self._tg = None
|
|
104
|
+
|
|
105
|
+
def _fail_pending(self) -> None:
|
|
106
|
+
for pending in list(self._pending.values()):
|
|
107
|
+
pending.set_result(None)
|
|
108
|
+
self._pending.clear()
|
|
109
|
+
|
|
110
|
+
def _pick_locked(self) -> tuple[object, OutboxOp] | None:
|
|
111
|
+
if not self._pending:
|
|
112
|
+
return None
|
|
113
|
+
return min(
|
|
114
|
+
self._pending.items(),
|
|
115
|
+
key=lambda item: (item[1].priority, item[1].queued_at),
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
async def _execute_op(self, op: OutboxOp) -> Any:
|
|
119
|
+
try:
|
|
120
|
+
return await op.execute()
|
|
121
|
+
except Exception as exc: # noqa: BLE001
|
|
122
|
+
if self._on_error is not None:
|
|
123
|
+
self._on_error(op, exc)
|
|
124
|
+
return None
|
|
125
|
+
|
|
126
|
+
async def _sleep_until(self, deadline: float) -> None:
|
|
127
|
+
delay = deadline - self._clock()
|
|
128
|
+
if delay > 0:
|
|
129
|
+
await self._sleep(delay)
|
|
130
|
+
|
|
131
|
+
async def _run(self) -> None:
|
|
132
|
+
cancel_exc = anyio.get_cancelled_exc_class()
|
|
133
|
+
try:
|
|
134
|
+
while True:
|
|
135
|
+
async with self._cond:
|
|
136
|
+
while not self._pending and not self._closed:
|
|
137
|
+
await self._cond.wait()
|
|
138
|
+
if self._closed and not self._pending:
|
|
139
|
+
return
|
|
140
|
+
|
|
141
|
+
if self._clock() < self._next_at:
|
|
142
|
+
await self._sleep_until(self._next_at)
|
|
143
|
+
continue
|
|
144
|
+
|
|
145
|
+
async with self._cond:
|
|
146
|
+
if self._closed and not self._pending:
|
|
147
|
+
return
|
|
148
|
+
picked = self._pick_locked()
|
|
149
|
+
if picked is None:
|
|
150
|
+
continue
|
|
151
|
+
key, op = picked
|
|
152
|
+
self._pending.pop(key, None)
|
|
153
|
+
|
|
154
|
+
interval = self._interval_for_channel(op.channel_id)
|
|
155
|
+
if interval:
|
|
156
|
+
self._next_at = max(self._next_at, self._clock()) + interval
|
|
157
|
+
result = await self._execute_op(op)
|
|
158
|
+
op.set_result(result)
|
|
159
|
+
except cancel_exc:
|
|
160
|
+
return
|
|
161
|
+
except Exception as exc: # noqa: BLE001
|
|
162
|
+
self._fail_pending()
|
|
163
|
+
if self._on_outbox_error is not None:
|
|
164
|
+
self._on_outbox_error(exc)
|
|
165
|
+
return
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
|
|
5
|
+
REASONING_LEVELS = frozenset({"minimal", "low", "medium", "high", "xhigh"})
|
|
6
|
+
REASONING_ENGINES = frozenset({"codex"})
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass(frozen=True, slots=True)
|
|
10
|
+
class ResolvedOverrides:
|
|
11
|
+
model: str | None = None
|
|
12
|
+
reasoning: str | None = None
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def supports_reasoning(engine_id: str) -> bool:
|
|
16
|
+
return engine_id in REASONING_ENGINES
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def is_valid_reasoning_level(level: str) -> bool:
|
|
20
|
+
return level in REASONING_LEVELS
|
|
@@ -0,0 +1,289 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
import msgspec
|
|
5
|
+
|
|
6
|
+
from takopi.api import ResumeToken, RunContext, get_logger
|
|
7
|
+
from takopi.telegram.state_store import JsonStateStore
|
|
8
|
+
|
|
9
|
+
logger = get_logger(__name__)
|
|
10
|
+
|
|
11
|
+
STATE_VERSION = 1
|
|
12
|
+
STATE_FILENAME = "slack_thread_sessions_state.json"
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class _ThreadSession(msgspec.Struct, forbid_unknown_fields=False):
|
|
16
|
+
resumes: dict[str, str] = msgspec.field(default_factory=dict)
|
|
17
|
+
context: dict[str, str] | None = None
|
|
18
|
+
model_overrides: dict[str, str] | None = None
|
|
19
|
+
reasoning_overrides: dict[str, str] | None = None
|
|
20
|
+
default_engine: str | None = None
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class _ThreadSessionsState(msgspec.Struct, forbid_unknown_fields=False):
|
|
24
|
+
version: int
|
|
25
|
+
threads: dict[str, _ThreadSession] = msgspec.field(default_factory=dict)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def resolve_sessions_path(config_path: Path) -> Path:
|
|
29
|
+
return config_path.with_name(STATE_FILENAME)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _thread_key(channel_id: str, thread_id: str) -> str:
|
|
33
|
+
return f"{channel_id}:{thread_id}"
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _new_state() -> _ThreadSessionsState:
|
|
37
|
+
return _ThreadSessionsState(version=STATE_VERSION, threads={})
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class SlackThreadSessionStore(JsonStateStore[_ThreadSessionsState]):
|
|
41
|
+
def __init__(self, path: Path) -> None:
|
|
42
|
+
super().__init__(
|
|
43
|
+
path,
|
|
44
|
+
version=STATE_VERSION,
|
|
45
|
+
state_type=_ThreadSessionsState,
|
|
46
|
+
state_factory=_new_state,
|
|
47
|
+
log_prefix="slack.thread_sessions",
|
|
48
|
+
logger=logger,
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
@staticmethod
|
|
52
|
+
def _thread_key(channel_id: str, thread_id: str) -> str:
|
|
53
|
+
return _thread_key(channel_id, thread_id)
|
|
54
|
+
|
|
55
|
+
def _get_or_create(self, key: str) -> _ThreadSession:
|
|
56
|
+
session = self._state.threads.get(key)
|
|
57
|
+
if session is None:
|
|
58
|
+
session = _ThreadSession()
|
|
59
|
+
self._state.threads[key] = session
|
|
60
|
+
return session
|
|
61
|
+
|
|
62
|
+
async def get_resume(
|
|
63
|
+
self, *, channel_id: str, thread_id: str, engine: str
|
|
64
|
+
) -> ResumeToken | None:
|
|
65
|
+
key = self._thread_key(channel_id, thread_id)
|
|
66
|
+
async with self._lock:
|
|
67
|
+
self._reload_locked_if_needed()
|
|
68
|
+
session = self._state.threads.get(key)
|
|
69
|
+
if session is None:
|
|
70
|
+
return None
|
|
71
|
+
value = session.resumes.get(engine)
|
|
72
|
+
if not value:
|
|
73
|
+
return None
|
|
74
|
+
return ResumeToken(engine=engine, value=value)
|
|
75
|
+
|
|
76
|
+
async def set_resume(
|
|
77
|
+
self, *, channel_id: str, thread_id: str, token: ResumeToken
|
|
78
|
+
) -> None:
|
|
79
|
+
key = self._thread_key(channel_id, thread_id)
|
|
80
|
+
async with self._lock:
|
|
81
|
+
self._reload_locked_if_needed()
|
|
82
|
+
session = self._get_or_create(key)
|
|
83
|
+
session.resumes[token.engine] = token.value
|
|
84
|
+
self._save_locked()
|
|
85
|
+
|
|
86
|
+
async def clear_thread(self, *, channel_id: str, thread_id: str) -> None:
|
|
87
|
+
key = self._thread_key(channel_id, thread_id)
|
|
88
|
+
async with self._lock:
|
|
89
|
+
self._reload_locked_if_needed()
|
|
90
|
+
if key not in self._state.threads:
|
|
91
|
+
return
|
|
92
|
+
self._state.threads.pop(key, None)
|
|
93
|
+
self._save_locked()
|
|
94
|
+
|
|
95
|
+
async def clear_resumes(self, *, channel_id: str, thread_id: str) -> None:
|
|
96
|
+
key = self._thread_key(channel_id, thread_id)
|
|
97
|
+
async with self._lock:
|
|
98
|
+
self._reload_locked_if_needed()
|
|
99
|
+
session = self._state.threads.get(key)
|
|
100
|
+
if session is None:
|
|
101
|
+
return
|
|
102
|
+
session.resumes = {}
|
|
103
|
+
self._save_locked()
|
|
104
|
+
|
|
105
|
+
async def get_context(
|
|
106
|
+
self, *, channel_id: str, thread_id: str
|
|
107
|
+
) -> RunContext | None:
|
|
108
|
+
key = self._thread_key(channel_id, thread_id)
|
|
109
|
+
async with self._lock:
|
|
110
|
+
self._reload_locked_if_needed()
|
|
111
|
+
session = self._state.threads.get(key)
|
|
112
|
+
if session is None or session.context is None:
|
|
113
|
+
return None
|
|
114
|
+
project = session.context.get("project")
|
|
115
|
+
if not project:
|
|
116
|
+
return None
|
|
117
|
+
branch = session.context.get("branch")
|
|
118
|
+
return RunContext(project=project, branch=branch)
|
|
119
|
+
|
|
120
|
+
async def set_context(
|
|
121
|
+
self,
|
|
122
|
+
*,
|
|
123
|
+
channel_id: str,
|
|
124
|
+
thread_id: str,
|
|
125
|
+
context: RunContext | None,
|
|
126
|
+
) -> None:
|
|
127
|
+
key = self._thread_key(channel_id, thread_id)
|
|
128
|
+
async with self._lock:
|
|
129
|
+
self._reload_locked_if_needed()
|
|
130
|
+
session = self._get_or_create(key)
|
|
131
|
+
if context is None:
|
|
132
|
+
session.context = None
|
|
133
|
+
else:
|
|
134
|
+
payload: dict[str, str] = {"project": context.project}
|
|
135
|
+
if context.branch:
|
|
136
|
+
payload["branch"] = context.branch
|
|
137
|
+
session.context = payload
|
|
138
|
+
self._save_locked()
|
|
139
|
+
|
|
140
|
+
async def get_default_engine(
|
|
141
|
+
self, *, channel_id: str, thread_id: str
|
|
142
|
+
) -> str | None:
|
|
143
|
+
key = self._thread_key(channel_id, thread_id)
|
|
144
|
+
async with self._lock:
|
|
145
|
+
self._reload_locked_if_needed()
|
|
146
|
+
session = self._state.threads.get(key)
|
|
147
|
+
if session is None:
|
|
148
|
+
return None
|
|
149
|
+
return session.default_engine
|
|
150
|
+
|
|
151
|
+
async def get_state(
|
|
152
|
+
self, *, channel_id: str, thread_id: str
|
|
153
|
+
) -> dict[str, object] | None:
|
|
154
|
+
key = self._thread_key(channel_id, thread_id)
|
|
155
|
+
async with self._lock:
|
|
156
|
+
self._reload_locked_if_needed()
|
|
157
|
+
session = self._state.threads.get(key)
|
|
158
|
+
if session is None:
|
|
159
|
+
return None
|
|
160
|
+
return {
|
|
161
|
+
"context": dict(session.context) if session.context else None,
|
|
162
|
+
"default_engine": session.default_engine,
|
|
163
|
+
"model_overrides": dict(session.model_overrides)
|
|
164
|
+
if session.model_overrides
|
|
165
|
+
else None,
|
|
166
|
+
"reasoning_overrides": dict(session.reasoning_overrides)
|
|
167
|
+
if session.reasoning_overrides
|
|
168
|
+
else None,
|
|
169
|
+
"resumes": dict(session.resumes) if session.resumes else None,
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
async def set_default_engine(
|
|
173
|
+
self,
|
|
174
|
+
*,
|
|
175
|
+
channel_id: str,
|
|
176
|
+
thread_id: str,
|
|
177
|
+
engine: str | None,
|
|
178
|
+
) -> None:
|
|
179
|
+
key = self._thread_key(channel_id, thread_id)
|
|
180
|
+
async with self._lock:
|
|
181
|
+
self._reload_locked_if_needed()
|
|
182
|
+
session = self._get_or_create(key)
|
|
183
|
+
session.default_engine = _normalize_override(engine)
|
|
184
|
+
self._save_locked()
|
|
185
|
+
|
|
186
|
+
async def get_model_override(
|
|
187
|
+
self, *, channel_id: str, thread_id: str, engine: str
|
|
188
|
+
) -> str | None:
|
|
189
|
+
return await self._get_override(
|
|
190
|
+
channel_id=channel_id,
|
|
191
|
+
thread_id=thread_id,
|
|
192
|
+
engine=engine,
|
|
193
|
+
field="model_overrides",
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
async def set_model_override(
|
|
197
|
+
self,
|
|
198
|
+
*,
|
|
199
|
+
channel_id: str,
|
|
200
|
+
thread_id: str,
|
|
201
|
+
engine: str,
|
|
202
|
+
model: str | None,
|
|
203
|
+
) -> None:
|
|
204
|
+
await self._set_override(
|
|
205
|
+
channel_id=channel_id,
|
|
206
|
+
thread_id=thread_id,
|
|
207
|
+
engine=engine,
|
|
208
|
+
value=model,
|
|
209
|
+
field="model_overrides",
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
async def get_reasoning_override(
|
|
213
|
+
self, *, channel_id: str, thread_id: str, engine: str
|
|
214
|
+
) -> str | None:
|
|
215
|
+
return await self._get_override(
|
|
216
|
+
channel_id=channel_id,
|
|
217
|
+
thread_id=thread_id,
|
|
218
|
+
engine=engine,
|
|
219
|
+
field="reasoning_overrides",
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
async def set_reasoning_override(
|
|
223
|
+
self,
|
|
224
|
+
*,
|
|
225
|
+
channel_id: str,
|
|
226
|
+
thread_id: str,
|
|
227
|
+
engine: str,
|
|
228
|
+
level: str | None,
|
|
229
|
+
) -> None:
|
|
230
|
+
await self._set_override(
|
|
231
|
+
channel_id=channel_id,
|
|
232
|
+
thread_id=thread_id,
|
|
233
|
+
engine=engine,
|
|
234
|
+
value=level,
|
|
235
|
+
field="reasoning_overrides",
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
async def _get_override(
|
|
239
|
+
self,
|
|
240
|
+
*,
|
|
241
|
+
channel_id: str,
|
|
242
|
+
thread_id: str,
|
|
243
|
+
engine: str,
|
|
244
|
+
field: str,
|
|
245
|
+
) -> str | None:
|
|
246
|
+
key = self._thread_key(channel_id, thread_id)
|
|
247
|
+
async with self._lock:
|
|
248
|
+
self._reload_locked_if_needed()
|
|
249
|
+
session = self._state.threads.get(key)
|
|
250
|
+
if session is None:
|
|
251
|
+
return None
|
|
252
|
+
overrides = getattr(session, field)
|
|
253
|
+
if not isinstance(overrides, dict):
|
|
254
|
+
return None
|
|
255
|
+
value = overrides.get(engine)
|
|
256
|
+
return _normalize_override(value)
|
|
257
|
+
|
|
258
|
+
async def _set_override(
|
|
259
|
+
self,
|
|
260
|
+
*,
|
|
261
|
+
channel_id: str,
|
|
262
|
+
thread_id: str,
|
|
263
|
+
engine: str,
|
|
264
|
+
value: str | None,
|
|
265
|
+
field: str,
|
|
266
|
+
) -> None:
|
|
267
|
+
key = self._thread_key(channel_id, thread_id)
|
|
268
|
+
normalized = _normalize_override(value)
|
|
269
|
+
async with self._lock:
|
|
270
|
+
self._reload_locked_if_needed()
|
|
271
|
+
session = self._get_or_create(key)
|
|
272
|
+
overrides = getattr(session, field)
|
|
273
|
+
if overrides is None or not isinstance(overrides, dict):
|
|
274
|
+
overrides = {}
|
|
275
|
+
setattr(session, field, overrides)
|
|
276
|
+
if normalized is None:
|
|
277
|
+
overrides.pop(engine, None)
|
|
278
|
+
if not overrides:
|
|
279
|
+
setattr(session, field, None)
|
|
280
|
+
else:
|
|
281
|
+
overrides[engine] = normalized
|
|
282
|
+
self._save_locked()
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
def _normalize_override(value: str | None) -> str | None:
|
|
286
|
+
if value is None:
|
|
287
|
+
return None
|
|
288
|
+
cleaned = value.strip()
|
|
289
|
+
return cleaned or None
|