yee88 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- yee88/__init__.py +1 -0
- yee88/api.py +116 -0
- yee88/backends.py +25 -0
- yee88/backends_helpers.py +14 -0
- yee88/cli/__init__.py +228 -0
- yee88/cli/config.py +320 -0
- yee88/cli/doctor.py +173 -0
- yee88/cli/init.py +113 -0
- yee88/cli/onboarding_cmd.py +126 -0
- yee88/cli/plugins.py +196 -0
- yee88/cli/run.py +419 -0
- yee88/cli/topic.py +355 -0
- yee88/commands.py +134 -0
- yee88/config.py +142 -0
- yee88/config_migrations.py +124 -0
- yee88/config_watch.py +146 -0
- yee88/context.py +9 -0
- yee88/directives.py +146 -0
- yee88/engines.py +53 -0
- yee88/events.py +170 -0
- yee88/ids.py +17 -0
- yee88/lockfile.py +158 -0
- yee88/logging.py +283 -0
- yee88/markdown.py +298 -0
- yee88/model.py +77 -0
- yee88/plugins.py +312 -0
- yee88/presenter.py +25 -0
- yee88/progress.py +99 -0
- yee88/router.py +113 -0
- yee88/runner.py +712 -0
- yee88/runner_bridge.py +619 -0
- yee88/runners/__init__.py +1 -0
- yee88/runners/claude.py +483 -0
- yee88/runners/codex.py +656 -0
- yee88/runners/mock.py +221 -0
- yee88/runners/opencode.py +505 -0
- yee88/runners/pi.py +523 -0
- yee88/runners/run_options.py +39 -0
- yee88/runners/tool_actions.py +90 -0
- yee88/runtime_loader.py +207 -0
- yee88/scheduler.py +159 -0
- yee88/schemas/__init__.py +1 -0
- yee88/schemas/claude.py +238 -0
- yee88/schemas/codex.py +169 -0
- yee88/schemas/opencode.py +51 -0
- yee88/schemas/pi.py +117 -0
- yee88/settings.py +360 -0
- yee88/telegram/__init__.py +20 -0
- yee88/telegram/api_models.py +37 -0
- yee88/telegram/api_schemas.py +152 -0
- yee88/telegram/backend.py +163 -0
- yee88/telegram/bridge.py +425 -0
- yee88/telegram/chat_prefs.py +242 -0
- yee88/telegram/chat_sessions.py +112 -0
- yee88/telegram/client.py +409 -0
- yee88/telegram/client_api.py +539 -0
- yee88/telegram/commands/__init__.py +12 -0
- yee88/telegram/commands/agent.py +196 -0
- yee88/telegram/commands/cancel.py +116 -0
- yee88/telegram/commands/dispatch.py +111 -0
- yee88/telegram/commands/executor.py +449 -0
- yee88/telegram/commands/file_transfer.py +586 -0
- yee88/telegram/commands/handlers.py +45 -0
- yee88/telegram/commands/media.py +143 -0
- yee88/telegram/commands/menu.py +139 -0
- yee88/telegram/commands/model.py +215 -0
- yee88/telegram/commands/overrides.py +159 -0
- yee88/telegram/commands/parse.py +30 -0
- yee88/telegram/commands/plan.py +16 -0
- yee88/telegram/commands/reasoning.py +234 -0
- yee88/telegram/commands/reply.py +23 -0
- yee88/telegram/commands/topics.py +332 -0
- yee88/telegram/commands/trigger.py +143 -0
- yee88/telegram/context.py +140 -0
- yee88/telegram/engine_defaults.py +86 -0
- yee88/telegram/engine_overrides.py +105 -0
- yee88/telegram/files.py +178 -0
- yee88/telegram/loop.py +1822 -0
- yee88/telegram/onboarding.py +1088 -0
- yee88/telegram/outbox.py +177 -0
- yee88/telegram/parsing.py +239 -0
- yee88/telegram/render.py +198 -0
- yee88/telegram/state_store.py +88 -0
- yee88/telegram/topic_state.py +334 -0
- yee88/telegram/topics.py +256 -0
- yee88/telegram/trigger_mode.py +68 -0
- yee88/telegram/types.py +63 -0
- yee88/telegram/voice.py +110 -0
- yee88/transport.py +53 -0
- yee88/transport_runtime.py +323 -0
- yee88/transports.py +76 -0
- yee88/utils/__init__.py +1 -0
- yee88/utils/git.py +87 -0
- yee88/utils/json_state.py +21 -0
- yee88/utils/paths.py +47 -0
- yee88/utils/streams.py +44 -0
- yee88/utils/subprocess.py +86 -0
- yee88/worktrees.py +135 -0
- yee88-0.3.0.dist-info/METADATA +116 -0
- yee88-0.3.0.dist-info/RECORD +103 -0
- yee88-0.3.0.dist-info/WHEEL +4 -0
- yee88-0.3.0.dist-info/entry_points.txt +11 -0
- yee88-0.3.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Awaitable, Callable, Sequence
|
|
4
|
+
from typing import TYPE_CHECKING
|
|
5
|
+
|
|
6
|
+
from ...context import RunContext
|
|
7
|
+
from ...directives import DirectiveError
|
|
8
|
+
from ...transport_runtime import ResolvedMessage
|
|
9
|
+
from ..context import _merge_topic_context
|
|
10
|
+
from ..files import parse_file_command
|
|
11
|
+
from ..topic_state import TopicStateStore
|
|
12
|
+
from ..topics import _topic_key, _topics_chat_project
|
|
13
|
+
from ..types import TelegramIncomingMessage
|
|
14
|
+
from .file_transfer import (
|
|
15
|
+
FILE_PUT_USAGE,
|
|
16
|
+
_format_file_put_failures,
|
|
17
|
+
_handle_file_put_group,
|
|
18
|
+
_save_file_put_group,
|
|
19
|
+
)
|
|
20
|
+
from .parse import _parse_slash_command
|
|
21
|
+
from .reply import make_reply
|
|
22
|
+
|
|
23
|
+
if TYPE_CHECKING:
|
|
24
|
+
from ..bridge import TelegramBridgeConfig
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
async def _handle_media_group(
|
|
28
|
+
cfg: TelegramBridgeConfig,
|
|
29
|
+
messages: Sequence[TelegramIncomingMessage],
|
|
30
|
+
topic_store: TopicStateStore | None,
|
|
31
|
+
run_prompt: Callable[
|
|
32
|
+
[TelegramIncomingMessage, str, ResolvedMessage], Awaitable[None]
|
|
33
|
+
]
|
|
34
|
+
| None = None,
|
|
35
|
+
resolve_prompt: Callable[
|
|
36
|
+
[TelegramIncomingMessage, str, RunContext | None],
|
|
37
|
+
Awaitable[ResolvedMessage | None],
|
|
38
|
+
]
|
|
39
|
+
| None = None,
|
|
40
|
+
) -> None:
|
|
41
|
+
if not messages:
|
|
42
|
+
return
|
|
43
|
+
ordered = sorted(messages, key=lambda item: item.message_id)
|
|
44
|
+
command_msg = next(
|
|
45
|
+
(item for item in ordered if item.text.strip()),
|
|
46
|
+
ordered[0],
|
|
47
|
+
)
|
|
48
|
+
reply = make_reply(cfg, command_msg)
|
|
49
|
+
topic_key = _topic_key(command_msg, cfg) if topic_store is not None else None
|
|
50
|
+
chat_project = _topics_chat_project(cfg, command_msg.chat_id)
|
|
51
|
+
bound_context = (
|
|
52
|
+
await topic_store.get_context(*topic_key)
|
|
53
|
+
if topic_store is not None and topic_key is not None
|
|
54
|
+
else None
|
|
55
|
+
)
|
|
56
|
+
ambient_context = _merge_topic_context(
|
|
57
|
+
chat_project=chat_project, bound=bound_context
|
|
58
|
+
)
|
|
59
|
+
command_id, args_text = _parse_slash_command(command_msg.text)
|
|
60
|
+
if command_id == "file":
|
|
61
|
+
command, rest, error = parse_file_command(args_text)
|
|
62
|
+
if error is not None:
|
|
63
|
+
await reply(text=error)
|
|
64
|
+
return
|
|
65
|
+
if command == "put":
|
|
66
|
+
await _handle_file_put_group(
|
|
67
|
+
cfg,
|
|
68
|
+
command_msg,
|
|
69
|
+
rest,
|
|
70
|
+
ordered,
|
|
71
|
+
ambient_context,
|
|
72
|
+
topic_store,
|
|
73
|
+
)
|
|
74
|
+
return
|
|
75
|
+
if cfg.files.enabled and cfg.files.auto_put:
|
|
76
|
+
caption_text = command_msg.text.strip()
|
|
77
|
+
if cfg.files.auto_put_mode == "prompt" and caption_text:
|
|
78
|
+
if resolve_prompt is None:
|
|
79
|
+
try:
|
|
80
|
+
resolved = cfg.runtime.resolve_message(
|
|
81
|
+
text=caption_text,
|
|
82
|
+
reply_text=command_msg.reply_to_text,
|
|
83
|
+
ambient_context=ambient_context,
|
|
84
|
+
chat_id=command_msg.chat_id,
|
|
85
|
+
)
|
|
86
|
+
except DirectiveError as exc:
|
|
87
|
+
await reply(text=f"error:\n{exc}")
|
|
88
|
+
return
|
|
89
|
+
else:
|
|
90
|
+
resolved = await resolve_prompt(
|
|
91
|
+
command_msg, caption_text, ambient_context
|
|
92
|
+
)
|
|
93
|
+
if resolved is None:
|
|
94
|
+
return
|
|
95
|
+
saved_group = await _save_file_put_group(
|
|
96
|
+
cfg,
|
|
97
|
+
command_msg,
|
|
98
|
+
"",
|
|
99
|
+
ordered,
|
|
100
|
+
resolved.context,
|
|
101
|
+
topic_store,
|
|
102
|
+
)
|
|
103
|
+
if saved_group is None:
|
|
104
|
+
return
|
|
105
|
+
if not saved_group.saved:
|
|
106
|
+
failure_text = _format_file_put_failures(saved_group.failed)
|
|
107
|
+
text = "failed to upload files."
|
|
108
|
+
if failure_text is not None:
|
|
109
|
+
text = f"{text}\n\n{failure_text}"
|
|
110
|
+
await reply(text=text)
|
|
111
|
+
return
|
|
112
|
+
if saved_group.failed:
|
|
113
|
+
failure_text = _format_file_put_failures(saved_group.failed)
|
|
114
|
+
if failure_text is not None:
|
|
115
|
+
await reply(text=f"some files failed to upload.\n\n{failure_text}")
|
|
116
|
+
if run_prompt is None:
|
|
117
|
+
await reply(text=FILE_PUT_USAGE)
|
|
118
|
+
return
|
|
119
|
+
paths = [
|
|
120
|
+
item.rel_path.as_posix()
|
|
121
|
+
for item in saved_group.saved
|
|
122
|
+
if item.rel_path is not None
|
|
123
|
+
]
|
|
124
|
+
files_text = "\n".join(f"- {path}" for path in paths)
|
|
125
|
+
prompt_base = resolved.prompt
|
|
126
|
+
annotation = f"[uploaded files]\n{files_text}"
|
|
127
|
+
if prompt_base and prompt_base.strip():
|
|
128
|
+
prompt = f"{prompt_base}\n\n{annotation}"
|
|
129
|
+
else:
|
|
130
|
+
prompt = annotation
|
|
131
|
+
await run_prompt(command_msg, prompt, resolved)
|
|
132
|
+
return
|
|
133
|
+
if not caption_text:
|
|
134
|
+
await _handle_file_put_group(
|
|
135
|
+
cfg,
|
|
136
|
+
command_msg,
|
|
137
|
+
"",
|
|
138
|
+
ordered,
|
|
139
|
+
ambient_context,
|
|
140
|
+
topic_store,
|
|
141
|
+
)
|
|
142
|
+
return
|
|
143
|
+
await reply(text=FILE_PUT_USAGE)
|
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING
|
|
4
|
+
|
|
5
|
+
from ...commands import get_command
|
|
6
|
+
from ...config import ConfigError
|
|
7
|
+
from ...ids import RESERVED_COMMAND_IDS, is_valid_id
|
|
8
|
+
from ...logging import get_logger
|
|
9
|
+
from ...plugins import COMMAND_GROUP, list_entrypoints
|
|
10
|
+
from ...transport_runtime import TransportRuntime
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
from ..bridge import TelegramBridgeConfig
|
|
14
|
+
|
|
15
|
+
logger = get_logger(__name__)
|
|
16
|
+
|
|
17
|
+
_MAX_BOT_COMMANDS = 100
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def build_bot_commands(
|
|
21
|
+
runtime: TransportRuntime,
|
|
22
|
+
*,
|
|
23
|
+
include_file: bool = True,
|
|
24
|
+
include_topics: bool = False,
|
|
25
|
+
) -> list[dict[str, str]]:
|
|
26
|
+
commands: list[dict[str, str]] = []
|
|
27
|
+
seen: set[str] = set()
|
|
28
|
+
for engine_id in runtime.available_engine_ids():
|
|
29
|
+
cmd = engine_id.lower()
|
|
30
|
+
if cmd in seen:
|
|
31
|
+
continue
|
|
32
|
+
commands.append({"command": cmd, "description": f"use engine: {cmd}"})
|
|
33
|
+
seen.add(cmd)
|
|
34
|
+
for alias in runtime.project_aliases():
|
|
35
|
+
cmd = alias.lower()
|
|
36
|
+
if cmd in seen:
|
|
37
|
+
continue
|
|
38
|
+
if not is_valid_id(cmd):
|
|
39
|
+
logger.debug(
|
|
40
|
+
"startup.command_menu.skip_project",
|
|
41
|
+
alias=alias,
|
|
42
|
+
)
|
|
43
|
+
continue
|
|
44
|
+
commands.append({"command": cmd, "description": f"work on: {cmd}"})
|
|
45
|
+
seen.add(cmd)
|
|
46
|
+
allowlist = runtime.allowlist
|
|
47
|
+
for ep in list_entrypoints(
|
|
48
|
+
COMMAND_GROUP,
|
|
49
|
+
allowlist=allowlist,
|
|
50
|
+
reserved_ids=RESERVED_COMMAND_IDS,
|
|
51
|
+
):
|
|
52
|
+
try:
|
|
53
|
+
backend = get_command(ep.name, allowlist=allowlist)
|
|
54
|
+
except ConfigError as exc:
|
|
55
|
+
logger.info(
|
|
56
|
+
"startup.command_menu.skip_command",
|
|
57
|
+
command=ep.name,
|
|
58
|
+
error=str(exc),
|
|
59
|
+
)
|
|
60
|
+
continue
|
|
61
|
+
cmd = backend.id.lower()
|
|
62
|
+
if cmd in seen:
|
|
63
|
+
continue
|
|
64
|
+
if not is_valid_id(cmd):
|
|
65
|
+
logger.debug(
|
|
66
|
+
"startup.command_menu.skip_command_id",
|
|
67
|
+
command=cmd,
|
|
68
|
+
)
|
|
69
|
+
continue
|
|
70
|
+
description = backend.description or f"command: {cmd}"
|
|
71
|
+
commands.append({"command": cmd, "description": description})
|
|
72
|
+
seen.add(cmd)
|
|
73
|
+
for cmd, description in [
|
|
74
|
+
("new", "start a new thread"),
|
|
75
|
+
("ctx", "show or update context"),
|
|
76
|
+
("agent", "set default engine"),
|
|
77
|
+
("model", "set model override"),
|
|
78
|
+
("reasoning", "set reasoning override"),
|
|
79
|
+
("trigger", "set trigger mode"),
|
|
80
|
+
]:
|
|
81
|
+
if cmd in seen:
|
|
82
|
+
continue
|
|
83
|
+
commands.append({"command": cmd, "description": description})
|
|
84
|
+
seen.add(cmd)
|
|
85
|
+
if include_topics:
|
|
86
|
+
for cmd, description in [("topic", "create or bind a topic")]:
|
|
87
|
+
if cmd in seen:
|
|
88
|
+
continue
|
|
89
|
+
commands.append({"command": cmd, "description": description})
|
|
90
|
+
seen.add(cmd)
|
|
91
|
+
if include_file and "file" not in seen:
|
|
92
|
+
commands.append({"command": "file", "description": "upload or fetch files"})
|
|
93
|
+
seen.add("file")
|
|
94
|
+
if "cancel" not in seen:
|
|
95
|
+
commands.append({"command": "cancel", "description": "cancel run"})
|
|
96
|
+
if len(commands) > _MAX_BOT_COMMANDS:
|
|
97
|
+
logger.warning(
|
|
98
|
+
"startup.command_menu.too_many",
|
|
99
|
+
count=len(commands),
|
|
100
|
+
limit=_MAX_BOT_COMMANDS,
|
|
101
|
+
)
|
|
102
|
+
commands = commands[:_MAX_BOT_COMMANDS]
|
|
103
|
+
if not any(cmd["command"] == "cancel" for cmd in commands):
|
|
104
|
+
commands[-1] = {"command": "cancel", "description": "cancel run"}
|
|
105
|
+
return commands
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def _reserved_commands(runtime: TransportRuntime) -> set[str]:
|
|
109
|
+
return {
|
|
110
|
+
*{engine.lower() for engine in runtime.engine_ids},
|
|
111
|
+
*{alias.lower() for alias in runtime.project_aliases()},
|
|
112
|
+
*RESERVED_COMMAND_IDS,
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
async def _set_command_menu(cfg: TelegramBridgeConfig) -> None:
|
|
117
|
+
commands = build_bot_commands(
|
|
118
|
+
cfg.runtime,
|
|
119
|
+
include_file=cfg.files.enabled,
|
|
120
|
+
include_topics=cfg.topics.enabled,
|
|
121
|
+
)
|
|
122
|
+
if not commands:
|
|
123
|
+
return
|
|
124
|
+
try:
|
|
125
|
+
ok = await cfg.bot.set_my_commands(commands)
|
|
126
|
+
except Exception as exc: # noqa: BLE001
|
|
127
|
+
logger.info(
|
|
128
|
+
"startup.command_menu.failed",
|
|
129
|
+
error=str(exc),
|
|
130
|
+
error_type=exc.__class__.__name__,
|
|
131
|
+
)
|
|
132
|
+
return
|
|
133
|
+
if not ok:
|
|
134
|
+
logger.info("startup.command_menu.rejected")
|
|
135
|
+
return
|
|
136
|
+
logger.info(
|
|
137
|
+
"startup.command_menu.updated",
|
|
138
|
+
commands=[cmd["command"] for cmd in commands],
|
|
139
|
+
)
|
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING
|
|
4
|
+
|
|
5
|
+
from ...context import RunContext
|
|
6
|
+
from ..chat_prefs import ChatPrefsStore
|
|
7
|
+
from ..engine_overrides import EngineOverrides, resolve_override_value
|
|
8
|
+
from ..files import split_command_args
|
|
9
|
+
from ..topic_state import TopicStateStore
|
|
10
|
+
from ..topics import _topic_key
|
|
11
|
+
from ..types import TelegramIncomingMessage
|
|
12
|
+
from .overrides import (
|
|
13
|
+
ENGINE_SOURCE_LABELS,
|
|
14
|
+
OVERRIDE_SOURCE_LABELS,
|
|
15
|
+
apply_engine_override,
|
|
16
|
+
parse_set_args,
|
|
17
|
+
require_admin_or_private,
|
|
18
|
+
resolve_engine_selection,
|
|
19
|
+
)
|
|
20
|
+
from .reply import make_reply
|
|
21
|
+
|
|
22
|
+
if TYPE_CHECKING:
|
|
23
|
+
from ..bridge import TelegramBridgeConfig
|
|
24
|
+
|
|
25
|
+
MODEL_USAGE = (
|
|
26
|
+
"usage: `/model`, `/model set <model>`, "
|
|
27
|
+
"`/model set <engine> <model>`, or `/model clear [engine]`"
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
async def _handle_model_command(
|
|
32
|
+
cfg: TelegramBridgeConfig,
|
|
33
|
+
msg: TelegramIncomingMessage,
|
|
34
|
+
args_text: str,
|
|
35
|
+
ambient_context: RunContext | None,
|
|
36
|
+
topic_store: TopicStateStore | None,
|
|
37
|
+
chat_prefs: ChatPrefsStore | None,
|
|
38
|
+
*,
|
|
39
|
+
resolved_scope: str | None = None,
|
|
40
|
+
scope_chat_ids: frozenset[int] | None = None,
|
|
41
|
+
) -> None:
|
|
42
|
+
reply = make_reply(cfg, msg)
|
|
43
|
+
tkey = (
|
|
44
|
+
_topic_key(msg, cfg, scope_chat_ids=scope_chat_ids)
|
|
45
|
+
if topic_store is not None
|
|
46
|
+
else None
|
|
47
|
+
)
|
|
48
|
+
tokens = split_command_args(args_text)
|
|
49
|
+
action = tokens[0].lower() if tokens else "show"
|
|
50
|
+
engine_ids = {engine.lower() for engine in cfg.runtime.engine_ids}
|
|
51
|
+
|
|
52
|
+
if action in {"show", ""}:
|
|
53
|
+
selection = await resolve_engine_selection(
|
|
54
|
+
cfg,
|
|
55
|
+
msg,
|
|
56
|
+
ambient_context=ambient_context,
|
|
57
|
+
topic_store=topic_store,
|
|
58
|
+
chat_prefs=chat_prefs,
|
|
59
|
+
topic_key=tkey,
|
|
60
|
+
)
|
|
61
|
+
if selection is None:
|
|
62
|
+
return
|
|
63
|
+
engine, engine_source = selection
|
|
64
|
+
topic_override = None
|
|
65
|
+
if tkey is not None and topic_store is not None:
|
|
66
|
+
topic_override = await topic_store.get_engine_override(
|
|
67
|
+
tkey[0], tkey[1], engine
|
|
68
|
+
)
|
|
69
|
+
chat_override = None
|
|
70
|
+
if chat_prefs is not None:
|
|
71
|
+
chat_override = await chat_prefs.get_engine_override(msg.chat_id, engine)
|
|
72
|
+
resolution = resolve_override_value(
|
|
73
|
+
topic_override=topic_override,
|
|
74
|
+
chat_override=chat_override,
|
|
75
|
+
field="model",
|
|
76
|
+
)
|
|
77
|
+
engine_line = f"engine: {engine} ({ENGINE_SOURCE_LABELS[engine_source]})"
|
|
78
|
+
model_value = resolution.value or "default"
|
|
79
|
+
model_line = (
|
|
80
|
+
f"model: {model_value} ({OVERRIDE_SOURCE_LABELS[resolution.source]})"
|
|
81
|
+
)
|
|
82
|
+
topic_label = resolution.topic_value or "none"
|
|
83
|
+
if tkey is None:
|
|
84
|
+
topic_label = "none"
|
|
85
|
+
chat_label = (
|
|
86
|
+
"unavailable" if chat_prefs is None else resolution.chat_value or "none"
|
|
87
|
+
)
|
|
88
|
+
defaults_line = f"defaults: topic: {topic_label}, chat: {chat_label}"
|
|
89
|
+
available_line = f"available engines: {', '.join(cfg.runtime.engine_ids)}"
|
|
90
|
+
await reply(
|
|
91
|
+
text="\n\n".join([engine_line, model_line, defaults_line, available_line])
|
|
92
|
+
)
|
|
93
|
+
return
|
|
94
|
+
|
|
95
|
+
if action == "set":
|
|
96
|
+
engine_arg, model = parse_set_args(tokens, engine_ids=engine_ids)
|
|
97
|
+
if model is None:
|
|
98
|
+
await reply(text=MODEL_USAGE)
|
|
99
|
+
return
|
|
100
|
+
if not await require_admin_or_private(
|
|
101
|
+
cfg,
|
|
102
|
+
msg,
|
|
103
|
+
missing_sender="cannot verify sender for model overrides.",
|
|
104
|
+
failed_member="failed to verify model override permissions.",
|
|
105
|
+
denied="changing model overrides is restricted to group admins.",
|
|
106
|
+
):
|
|
107
|
+
return
|
|
108
|
+
if engine_arg is None:
|
|
109
|
+
selection = await resolve_engine_selection(
|
|
110
|
+
cfg,
|
|
111
|
+
msg,
|
|
112
|
+
ambient_context=ambient_context,
|
|
113
|
+
topic_store=topic_store,
|
|
114
|
+
chat_prefs=chat_prefs,
|
|
115
|
+
topic_key=tkey,
|
|
116
|
+
)
|
|
117
|
+
if selection is None:
|
|
118
|
+
return
|
|
119
|
+
engine, _ = selection
|
|
120
|
+
else:
|
|
121
|
+
engine = engine_arg
|
|
122
|
+
if engine not in engine_ids:
|
|
123
|
+
available = ", ".join(cfg.runtime.engine_ids)
|
|
124
|
+
await reply(
|
|
125
|
+
text=f"unknown engine `{engine}`.\navailable engines: `{available}`"
|
|
126
|
+
)
|
|
127
|
+
return
|
|
128
|
+
scope = await apply_engine_override(
|
|
129
|
+
reply=reply,
|
|
130
|
+
tkey=tkey,
|
|
131
|
+
topic_store=topic_store,
|
|
132
|
+
chat_prefs=chat_prefs,
|
|
133
|
+
chat_id=msg.chat_id,
|
|
134
|
+
engine=engine,
|
|
135
|
+
update=lambda current: EngineOverrides(
|
|
136
|
+
model=model,
|
|
137
|
+
reasoning=current.reasoning if current is not None else None,
|
|
138
|
+
),
|
|
139
|
+
topic_unavailable="topic model overrides are unavailable.",
|
|
140
|
+
chat_unavailable="chat model overrides are unavailable (no config path).",
|
|
141
|
+
)
|
|
142
|
+
if scope is None:
|
|
143
|
+
return
|
|
144
|
+
if scope == "topic":
|
|
145
|
+
await reply(
|
|
146
|
+
text=(
|
|
147
|
+
f"topic model override set to `{model}` for `{engine}`.\n"
|
|
148
|
+
"If you want a clean start on the new model, run `/new`."
|
|
149
|
+
)
|
|
150
|
+
)
|
|
151
|
+
return
|
|
152
|
+
await reply(
|
|
153
|
+
text=(
|
|
154
|
+
f"chat model override set to `{model}` for `{engine}`.\n"
|
|
155
|
+
"If you want a clean start on the new model, run `/new`."
|
|
156
|
+
)
|
|
157
|
+
)
|
|
158
|
+
return
|
|
159
|
+
|
|
160
|
+
if action == "clear":
|
|
161
|
+
engine = None
|
|
162
|
+
if len(tokens) > 2:
|
|
163
|
+
await reply(text=MODEL_USAGE)
|
|
164
|
+
return
|
|
165
|
+
if len(tokens) == 2:
|
|
166
|
+
engine = tokens[1].strip().lower() or None
|
|
167
|
+
if not await require_admin_or_private(
|
|
168
|
+
cfg,
|
|
169
|
+
msg,
|
|
170
|
+
missing_sender="cannot verify sender for model overrides.",
|
|
171
|
+
failed_member="failed to verify model override permissions.",
|
|
172
|
+
denied="changing model overrides is restricted to group admins.",
|
|
173
|
+
):
|
|
174
|
+
return
|
|
175
|
+
if engine is None:
|
|
176
|
+
selection = await resolve_engine_selection(
|
|
177
|
+
cfg,
|
|
178
|
+
msg,
|
|
179
|
+
ambient_context=ambient_context,
|
|
180
|
+
topic_store=topic_store,
|
|
181
|
+
chat_prefs=chat_prefs,
|
|
182
|
+
topic_key=tkey,
|
|
183
|
+
)
|
|
184
|
+
if selection is None:
|
|
185
|
+
return
|
|
186
|
+
engine, _ = selection
|
|
187
|
+
if engine not in engine_ids:
|
|
188
|
+
available = ", ".join(cfg.runtime.engine_ids)
|
|
189
|
+
await reply(
|
|
190
|
+
text=f"unknown engine `{engine}`.\navailable engines: `{available}`"
|
|
191
|
+
)
|
|
192
|
+
return
|
|
193
|
+
scope = await apply_engine_override(
|
|
194
|
+
reply=reply,
|
|
195
|
+
tkey=tkey,
|
|
196
|
+
topic_store=topic_store,
|
|
197
|
+
chat_prefs=chat_prefs,
|
|
198
|
+
chat_id=msg.chat_id,
|
|
199
|
+
engine=engine,
|
|
200
|
+
update=lambda current: EngineOverrides(
|
|
201
|
+
model=None,
|
|
202
|
+
reasoning=current.reasoning if current is not None else None,
|
|
203
|
+
),
|
|
204
|
+
topic_unavailable="topic model overrides are unavailable.",
|
|
205
|
+
chat_unavailable="chat model overrides are unavailable (no config path).",
|
|
206
|
+
)
|
|
207
|
+
if scope is None:
|
|
208
|
+
return
|
|
209
|
+
if scope == "topic":
|
|
210
|
+
await reply(text="topic model override cleared (using chat default).")
|
|
211
|
+
return
|
|
212
|
+
await reply(text="chat model override cleared.")
|
|
213
|
+
return
|
|
214
|
+
|
|
215
|
+
await reply(text=MODEL_USAGE)
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Awaitable, Callable
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from typing import TYPE_CHECKING, Literal
|
|
6
|
+
|
|
7
|
+
from ...context import RunContext
|
|
8
|
+
from ...directives import DirectiveError
|
|
9
|
+
from ..chat_prefs import ChatPrefsStore
|
|
10
|
+
from ..engine_defaults import resolve_engine_for_message
|
|
11
|
+
from ..engine_overrides import EngineOverrides
|
|
12
|
+
from ..topic_state import TopicStateStore
|
|
13
|
+
from ..types import TelegramIncomingMessage
|
|
14
|
+
from .reply import make_reply
|
|
15
|
+
|
|
16
|
+
if TYPE_CHECKING:
|
|
17
|
+
from ..bridge import TelegramBridgeConfig
|
|
18
|
+
|
|
19
|
+
ENGINE_SOURCE_LABELS = {
|
|
20
|
+
"directive": "directive",
|
|
21
|
+
"topic_default": "topic default",
|
|
22
|
+
"chat_default": "chat default",
|
|
23
|
+
"project_default": "project default",
|
|
24
|
+
"global_default": "global default",
|
|
25
|
+
}
|
|
26
|
+
OVERRIDE_SOURCE_LABELS = {
|
|
27
|
+
"topic_override": "topic override",
|
|
28
|
+
"chat_default": "chat default",
|
|
29
|
+
"default": "no override",
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
async def require_admin_or_private(
|
|
34
|
+
cfg: TelegramBridgeConfig,
|
|
35
|
+
msg: TelegramIncomingMessage,
|
|
36
|
+
*,
|
|
37
|
+
missing_sender: str,
|
|
38
|
+
failed_member: str,
|
|
39
|
+
denied: str,
|
|
40
|
+
) -> bool:
|
|
41
|
+
reply = make_reply(cfg, msg)
|
|
42
|
+
decision = await check_admin_or_private(
|
|
43
|
+
cfg,
|
|
44
|
+
msg,
|
|
45
|
+
missing_sender=missing_sender,
|
|
46
|
+
failed_member=failed_member,
|
|
47
|
+
denied=denied,
|
|
48
|
+
)
|
|
49
|
+
if decision.allowed:
|
|
50
|
+
return True
|
|
51
|
+
if decision.error_text is not None:
|
|
52
|
+
await reply(text=decision.error_text)
|
|
53
|
+
return False
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@dataclass(frozen=True, slots=True)
|
|
57
|
+
class PermissionDecision:
|
|
58
|
+
allowed: bool
|
|
59
|
+
error_text: str | None = None
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
async def check_admin_or_private(
|
|
63
|
+
cfg: TelegramBridgeConfig,
|
|
64
|
+
msg: TelegramIncomingMessage,
|
|
65
|
+
*,
|
|
66
|
+
missing_sender: str,
|
|
67
|
+
failed_member: str,
|
|
68
|
+
denied: str,
|
|
69
|
+
) -> PermissionDecision:
|
|
70
|
+
sender_id = msg.sender_id
|
|
71
|
+
if sender_id is None:
|
|
72
|
+
return PermissionDecision(allowed=False, error_text=missing_sender)
|
|
73
|
+
if msg.is_private:
|
|
74
|
+
return PermissionDecision(allowed=True)
|
|
75
|
+
member = await cfg.bot.get_chat_member(msg.chat_id, sender_id)
|
|
76
|
+
if member is None:
|
|
77
|
+
return PermissionDecision(allowed=False, error_text=failed_member)
|
|
78
|
+
if member.status in {"creator", "administrator"}:
|
|
79
|
+
return PermissionDecision(allowed=True)
|
|
80
|
+
return PermissionDecision(allowed=False, error_text=denied)
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
async def resolve_engine_selection(
|
|
84
|
+
cfg: TelegramBridgeConfig,
|
|
85
|
+
msg: TelegramIncomingMessage,
|
|
86
|
+
*,
|
|
87
|
+
ambient_context: RunContext | None,
|
|
88
|
+
topic_store: TopicStateStore | None,
|
|
89
|
+
chat_prefs: ChatPrefsStore | None,
|
|
90
|
+
topic_key: tuple[int, int] | None,
|
|
91
|
+
) -> tuple[str, str] | None:
|
|
92
|
+
reply = make_reply(cfg, msg)
|
|
93
|
+
try:
|
|
94
|
+
resolved = cfg.runtime.resolve_message(
|
|
95
|
+
text="",
|
|
96
|
+
reply_text=msg.reply_to_text,
|
|
97
|
+
ambient_context=ambient_context,
|
|
98
|
+
chat_id=msg.chat_id,
|
|
99
|
+
)
|
|
100
|
+
except DirectiveError as exc:
|
|
101
|
+
await reply(text=f"error:\n{exc}")
|
|
102
|
+
return None
|
|
103
|
+
selection = await resolve_engine_for_message(
|
|
104
|
+
runtime=cfg.runtime,
|
|
105
|
+
context=resolved.context,
|
|
106
|
+
explicit_engine=None,
|
|
107
|
+
chat_id=msg.chat_id,
|
|
108
|
+
topic_key=topic_key,
|
|
109
|
+
topic_store=topic_store,
|
|
110
|
+
chat_prefs=chat_prefs,
|
|
111
|
+
)
|
|
112
|
+
return selection.engine, selection.source
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def parse_set_args(
|
|
116
|
+
tokens: tuple[str, ...], *, engine_ids: set[str]
|
|
117
|
+
) -> tuple[str | None, str | None]:
|
|
118
|
+
if len(tokens) < 2:
|
|
119
|
+
return None, None
|
|
120
|
+
if len(tokens) == 2:
|
|
121
|
+
maybe_engine = tokens[1].strip().lower()
|
|
122
|
+
if maybe_engine in engine_ids:
|
|
123
|
+
return None, None
|
|
124
|
+
return None, tokens[1].strip()
|
|
125
|
+
maybe_engine = tokens[1].strip().lower()
|
|
126
|
+
if maybe_engine in engine_ids:
|
|
127
|
+
value = " ".join(tokens[2:]).strip()
|
|
128
|
+
return maybe_engine, value or None
|
|
129
|
+
value = " ".join(tokens[1:]).strip()
|
|
130
|
+
return None, value or None
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
async def apply_engine_override(
|
|
134
|
+
*,
|
|
135
|
+
reply: Callable[..., Awaitable[None]],
|
|
136
|
+
tkey: tuple[int, int] | None,
|
|
137
|
+
topic_store: TopicStateStore | None,
|
|
138
|
+
chat_prefs: ChatPrefsStore | None,
|
|
139
|
+
chat_id: int,
|
|
140
|
+
engine: str,
|
|
141
|
+
update: Callable[[EngineOverrides | None], EngineOverrides],
|
|
142
|
+
topic_unavailable: str,
|
|
143
|
+
chat_unavailable: str,
|
|
144
|
+
) -> Literal["topic", "chat"] | None:
|
|
145
|
+
if tkey is not None:
|
|
146
|
+
if topic_store is None:
|
|
147
|
+
await reply(text=topic_unavailable)
|
|
148
|
+
return None
|
|
149
|
+
current = await topic_store.get_engine_override(tkey[0], tkey[1], engine)
|
|
150
|
+
updated = update(current)
|
|
151
|
+
await topic_store.set_engine_override(tkey[0], tkey[1], engine, updated)
|
|
152
|
+
return "topic"
|
|
153
|
+
if chat_prefs is None:
|
|
154
|
+
await reply(text=chat_unavailable)
|
|
155
|
+
return None
|
|
156
|
+
current = await chat_prefs.get_engine_override(chat_id, engine)
|
|
157
|
+
updated = update(current)
|
|
158
|
+
await chat_prefs.set_engine_override(chat_id, engine, updated)
|
|
159
|
+
return "chat"
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def is_cancel_command(text: str) -> bool:
|
|
5
|
+
stripped = text.strip()
|
|
6
|
+
if not stripped:
|
|
7
|
+
return False
|
|
8
|
+
command = stripped.split(maxsplit=1)[0]
|
|
9
|
+
return command == "/cancel" or command.startswith("/cancel@")
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _parse_slash_command(text: str) -> tuple[str | None, str]:
|
|
13
|
+
stripped = text.lstrip()
|
|
14
|
+
if not stripped.startswith("/"):
|
|
15
|
+
return None, text
|
|
16
|
+
lines = stripped.splitlines()
|
|
17
|
+
if not lines:
|
|
18
|
+
return None, text
|
|
19
|
+
first_line = lines[0]
|
|
20
|
+
token, _, rest = first_line.partition(" ")
|
|
21
|
+
command = token[1:]
|
|
22
|
+
if not command:
|
|
23
|
+
return None, text
|
|
24
|
+
if "@" in command:
|
|
25
|
+
command = command.split("@", 1)[0]
|
|
26
|
+
args_text = rest
|
|
27
|
+
if len(lines) > 1:
|
|
28
|
+
tail = "\n".join(lines[1:])
|
|
29
|
+
args_text = f"{args_text}\n{tail}" if args_text else tail
|
|
30
|
+
return command.lower(), args_text
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Awaitable, Callable
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@dataclass(frozen=True, slots=True)
|
|
8
|
+
class ActionPlan:
|
|
9
|
+
reply_text: str | None
|
|
10
|
+
actions: tuple[Callable[[], Awaitable[None]], ...] = ()
|
|
11
|
+
|
|
12
|
+
async def execute(self, reply: Callable[..., Awaitable[None]]) -> None:
|
|
13
|
+
for action in self.actions:
|
|
14
|
+
await action()
|
|
15
|
+
if self.reply_text is not None:
|
|
16
|
+
await reply(text=self.reply_text)
|