ripperdoc 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ripperdoc/__init__.py +3 -0
- ripperdoc/__main__.py +25 -0
- ripperdoc/cli/__init__.py +1 -0
- ripperdoc/cli/cli.py +317 -0
- ripperdoc/cli/commands/__init__.py +76 -0
- ripperdoc/cli/commands/agents_cmd.py +234 -0
- ripperdoc/cli/commands/base.py +19 -0
- ripperdoc/cli/commands/clear_cmd.py +18 -0
- ripperdoc/cli/commands/compact_cmd.py +19 -0
- ripperdoc/cli/commands/config_cmd.py +31 -0
- ripperdoc/cli/commands/context_cmd.py +114 -0
- ripperdoc/cli/commands/cost_cmd.py +77 -0
- ripperdoc/cli/commands/exit_cmd.py +19 -0
- ripperdoc/cli/commands/help_cmd.py +20 -0
- ripperdoc/cli/commands/mcp_cmd.py +65 -0
- ripperdoc/cli/commands/models_cmd.py +327 -0
- ripperdoc/cli/commands/resume_cmd.py +97 -0
- ripperdoc/cli/commands/status_cmd.py +167 -0
- ripperdoc/cli/commands/tasks_cmd.py +240 -0
- ripperdoc/cli/commands/todos_cmd.py +69 -0
- ripperdoc/cli/commands/tools_cmd.py +19 -0
- ripperdoc/cli/ui/__init__.py +1 -0
- ripperdoc/cli/ui/context_display.py +297 -0
- ripperdoc/cli/ui/helpers.py +22 -0
- ripperdoc/cli/ui/rich_ui.py +1010 -0
- ripperdoc/cli/ui/spinner.py +50 -0
- ripperdoc/core/__init__.py +1 -0
- ripperdoc/core/agents.py +306 -0
- ripperdoc/core/commands.py +33 -0
- ripperdoc/core/config.py +382 -0
- ripperdoc/core/default_tools.py +57 -0
- ripperdoc/core/permissions.py +227 -0
- ripperdoc/core/query.py +682 -0
- ripperdoc/core/system_prompt.py +418 -0
- ripperdoc/core/tool.py +214 -0
- ripperdoc/sdk/__init__.py +9 -0
- ripperdoc/sdk/client.py +309 -0
- ripperdoc/tools/__init__.py +1 -0
- ripperdoc/tools/background_shell.py +291 -0
- ripperdoc/tools/bash_output_tool.py +98 -0
- ripperdoc/tools/bash_tool.py +822 -0
- ripperdoc/tools/file_edit_tool.py +281 -0
- ripperdoc/tools/file_read_tool.py +168 -0
- ripperdoc/tools/file_write_tool.py +141 -0
- ripperdoc/tools/glob_tool.py +134 -0
- ripperdoc/tools/grep_tool.py +232 -0
- ripperdoc/tools/kill_bash_tool.py +136 -0
- ripperdoc/tools/ls_tool.py +298 -0
- ripperdoc/tools/mcp_tools.py +804 -0
- ripperdoc/tools/multi_edit_tool.py +393 -0
- ripperdoc/tools/notebook_edit_tool.py +325 -0
- ripperdoc/tools/task_tool.py +282 -0
- ripperdoc/tools/todo_tool.py +362 -0
- ripperdoc/tools/tool_search_tool.py +366 -0
- ripperdoc/utils/__init__.py +1 -0
- ripperdoc/utils/bash_constants.py +51 -0
- ripperdoc/utils/bash_output_utils.py +43 -0
- ripperdoc/utils/exit_code_handlers.py +241 -0
- ripperdoc/utils/log.py +76 -0
- ripperdoc/utils/mcp.py +427 -0
- ripperdoc/utils/memory.py +239 -0
- ripperdoc/utils/message_compaction.py +640 -0
- ripperdoc/utils/messages.py +399 -0
- ripperdoc/utils/output_utils.py +233 -0
- ripperdoc/utils/path_utils.py +46 -0
- ripperdoc/utils/permissions/__init__.py +21 -0
- ripperdoc/utils/permissions/path_validation_utils.py +165 -0
- ripperdoc/utils/permissions/shell_command_validation.py +74 -0
- ripperdoc/utils/permissions/tool_permission_utils.py +279 -0
- ripperdoc/utils/safe_get_cwd.py +24 -0
- ripperdoc/utils/sandbox_utils.py +38 -0
- ripperdoc/utils/session_history.py +223 -0
- ripperdoc/utils/session_usage.py +110 -0
- ripperdoc/utils/shell_token_utils.py +95 -0
- ripperdoc/utils/todo.py +199 -0
- ripperdoc-0.1.0.dist-info/METADATA +178 -0
- ripperdoc-0.1.0.dist-info/RECORD +81 -0
- ripperdoc-0.1.0.dist-info/WHEEL +5 -0
- ripperdoc-0.1.0.dist-info/entry_points.txt +3 -0
- ripperdoc-0.1.0.dist-info/licenses/LICENSE +53 -0
- ripperdoc-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
from .base import SlashCommand
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def _handle(ui: Any, trimmed_arg: str) -> bool:
|
|
8
|
+
asyncio.run(ui._run_manual_compact(trimmed_arg))
|
|
9
|
+
return True
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
command = SlashCommand(
|
|
13
|
+
name="compact",
|
|
14
|
+
description="Compact conversation history",
|
|
15
|
+
handler=_handle,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
__all__ = ["command"]
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
from rich.markup import escape
|
|
2
|
+
|
|
3
|
+
from ripperdoc.core.config import get_global_config
|
|
4
|
+
from ripperdoc.cli.ui.helpers import get_profile_for_pointer
|
|
5
|
+
|
|
6
|
+
from typing import Any
|
|
7
|
+
from .base import SlashCommand
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _handle(ui: Any, _: str) -> bool:
|
|
11
|
+
config = get_global_config()
|
|
12
|
+
profile = get_profile_for_pointer("main")
|
|
13
|
+
main_pointer = getattr(config.model_pointers, "main", "default")
|
|
14
|
+
model_label = profile.model if profile else "Not configured"
|
|
15
|
+
|
|
16
|
+
ui.console.print(
|
|
17
|
+
f"\n[bold]Model (main -> {escape(str(main_pointer))}):[/bold] {escape(str(model_label))}"
|
|
18
|
+
)
|
|
19
|
+
ui.console.print(f"[bold]Safe Mode:[/bold] {escape(str(ui.safe_mode))}")
|
|
20
|
+
ui.console.print(f"[bold]Verbose:[/bold] {escape(str(ui.verbose))}")
|
|
21
|
+
return True
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
command = SlashCommand(
|
|
25
|
+
name="config",
|
|
26
|
+
description="Show current configuration",
|
|
27
|
+
handler=_handle,
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
__all__ = ["command"]
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import json
|
|
3
|
+
|
|
4
|
+
from ripperdoc.cli.ui.helpers import get_profile_for_pointer
|
|
5
|
+
from ripperdoc.cli.ui.context_display import format_tokens
|
|
6
|
+
from ripperdoc.core.config import get_global_config, provider_protocol
|
|
7
|
+
from ripperdoc.core.query import QueryContext
|
|
8
|
+
from ripperdoc.core.system_prompt import build_system_prompt
|
|
9
|
+
from ripperdoc.utils.memory import build_memory_instructions
|
|
10
|
+
from ripperdoc.utils.message_compaction import (
|
|
11
|
+
estimate_tokens_from_text,
|
|
12
|
+
get_remaining_context_tokens,
|
|
13
|
+
resolve_auto_compact_enabled,
|
|
14
|
+
summarize_context_usage,
|
|
15
|
+
)
|
|
16
|
+
from ripperdoc.utils.mcp import (
|
|
17
|
+
estimate_mcp_tokens,
|
|
18
|
+
format_mcp_instructions,
|
|
19
|
+
load_mcp_servers_async,
|
|
20
|
+
shutdown_mcp_runtime,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
from typing import Any
|
|
24
|
+
from .base import SlashCommand
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def _handle(ui: Any, _: str) -> bool:
|
|
28
|
+
config = get_global_config()
|
|
29
|
+
model_profile = get_profile_for_pointer("main")
|
|
30
|
+
max_context_tokens = get_remaining_context_tokens(model_profile, config.context_token_limit)
|
|
31
|
+
auto_compact_enabled = resolve_auto_compact_enabled(config)
|
|
32
|
+
protocol = provider_protocol(model_profile.provider) if model_profile else "openai"
|
|
33
|
+
|
|
34
|
+
if not ui.query_context:
|
|
35
|
+
ui.query_context = QueryContext(
|
|
36
|
+
tools=ui.get_default_tools(),
|
|
37
|
+
safe_mode=ui.safe_mode,
|
|
38
|
+
verbose=ui.verbose,
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
async def _load_servers():
|
|
42
|
+
try:
|
|
43
|
+
return await load_mcp_servers_async(ui.project_path)
|
|
44
|
+
finally:
|
|
45
|
+
await shutdown_mcp_runtime()
|
|
46
|
+
|
|
47
|
+
servers = asyncio.run(_load_servers())
|
|
48
|
+
mcp_instructions = format_mcp_instructions(servers)
|
|
49
|
+
base_system_prompt = build_system_prompt(
|
|
50
|
+
ui.query_context.tools,
|
|
51
|
+
"",
|
|
52
|
+
{},
|
|
53
|
+
mcp_instructions=mcp_instructions,
|
|
54
|
+
)
|
|
55
|
+
memory_instructions = build_memory_instructions()
|
|
56
|
+
memory_tokens = estimate_tokens_from_text(memory_instructions) if memory_instructions else 0
|
|
57
|
+
mcp_tokens = estimate_mcp_tokens(servers) if mcp_instructions else 0
|
|
58
|
+
|
|
59
|
+
breakdown = summarize_context_usage(
|
|
60
|
+
ui.conversation_messages,
|
|
61
|
+
ui.query_context.tools,
|
|
62
|
+
base_system_prompt,
|
|
63
|
+
max_context_tokens,
|
|
64
|
+
auto_compact_enabled,
|
|
65
|
+
memory_tokens=memory_tokens,
|
|
66
|
+
mcp_tokens=mcp_tokens,
|
|
67
|
+
protocol=protocol,
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
model_label = model_profile.model if model_profile else "Unknown model"
|
|
71
|
+
lines = ui._context_usage_lines(breakdown, model_label, auto_compact_enabled)
|
|
72
|
+
|
|
73
|
+
lines.append("")
|
|
74
|
+
# Append a brief tool listing so users can see which tools are currently loaded.
|
|
75
|
+
try:
|
|
76
|
+
# Detailed MCP tool listing with token estimates.
|
|
77
|
+
mcp_tools = [
|
|
78
|
+
tool
|
|
79
|
+
for tool in getattr(ui.query_context, "tool_registry", ui.query_context).all_tools
|
|
80
|
+
if getattr(tool, "is_mcp", False) or getattr(tool, "name", "").startswith("mcp__")
|
|
81
|
+
]
|
|
82
|
+
if mcp_tools:
|
|
83
|
+
lines.append(" MCP tools · /mcp")
|
|
84
|
+
for tool in mcp_tools[:20]:
|
|
85
|
+
name = getattr(tool, "name", "unknown")
|
|
86
|
+
display = name
|
|
87
|
+
parts = name.split("__")
|
|
88
|
+
if len(parts) >= 3 and parts[0] == "mcp":
|
|
89
|
+
server = parts[1]
|
|
90
|
+
display = "__".join(parts[2:])
|
|
91
|
+
display = f"{display} ({server})"
|
|
92
|
+
try:
|
|
93
|
+
schema = tool.input_schema.model_json_schema()
|
|
94
|
+
token_est = estimate_tokens_from_text(json.dumps(schema, sort_keys=True))
|
|
95
|
+
except Exception:
|
|
96
|
+
token_est = 0
|
|
97
|
+
lines.append(f" └ {display}: {format_tokens(token_est)} tokens")
|
|
98
|
+
if len(mcp_tools) > 20:
|
|
99
|
+
lines.append(f" └ ... (+{len(mcp_tools) - 20} more)")
|
|
100
|
+
except Exception:
|
|
101
|
+
pass
|
|
102
|
+
for line in lines:
|
|
103
|
+
ui.console.print(line)
|
|
104
|
+
return True
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
command = SlashCommand(
|
|
108
|
+
name="context",
|
|
109
|
+
description="Show current conversation context summary",
|
|
110
|
+
handler=_handle,
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
__all__ = ["command"]
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
from ripperdoc.utils.session_usage import get_session_usage
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
from .base import SlashCommand
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def _fmt_tokens(value: int) -> str:
|
|
8
|
+
"""Format integers with thousand separators."""
|
|
9
|
+
return f"{int(value):,}"
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _format_duration(duration_ms: float) -> str:
|
|
13
|
+
"""Render milliseconds into a compact human-readable duration."""
|
|
14
|
+
seconds = int(duration_ms // 1000)
|
|
15
|
+
if seconds < 60:
|
|
16
|
+
return f"{duration_ms / 1000:.2f}s"
|
|
17
|
+
minutes, secs = divmod(seconds, 60)
|
|
18
|
+
if minutes < 60:
|
|
19
|
+
return f"{minutes}m {secs}s"
|
|
20
|
+
hours, mins = divmod(minutes, 60)
|
|
21
|
+
return f"{hours}h {mins}m {secs}s"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _handle(ui: Any, _: str) -> bool:
|
|
25
|
+
usage = get_session_usage()
|
|
26
|
+
if not usage.models:
|
|
27
|
+
ui.console.print("[yellow]No model usage recorded yet.[/yellow]")
|
|
28
|
+
return True
|
|
29
|
+
|
|
30
|
+
total_input = usage.total_input_tokens
|
|
31
|
+
total_output = usage.total_output_tokens
|
|
32
|
+
total_cache_read = usage.total_cache_read_tokens
|
|
33
|
+
total_cache_creation = usage.total_cache_creation_tokens
|
|
34
|
+
total_tokens = total_input + total_output + total_cache_read + total_cache_creation
|
|
35
|
+
|
|
36
|
+
ui.console.print("\n[bold]Session token usage[/bold]")
|
|
37
|
+
ui.console.print(
|
|
38
|
+
f" Total: {_fmt_tokens(total_tokens)} tokens "
|
|
39
|
+
f"(input {_fmt_tokens(total_input)}, output {_fmt_tokens(total_output)})"
|
|
40
|
+
)
|
|
41
|
+
if total_cache_read or total_cache_creation:
|
|
42
|
+
ui.console.print(
|
|
43
|
+
f" Cache: {_fmt_tokens(total_cache_read)} read, "
|
|
44
|
+
f"{_fmt_tokens(total_cache_creation)} write"
|
|
45
|
+
)
|
|
46
|
+
ui.console.print(f" Requests: {usage.total_requests}")
|
|
47
|
+
if usage.total_duration_ms:
|
|
48
|
+
ui.console.print(f" API time: {_format_duration(usage.total_duration_ms)}")
|
|
49
|
+
|
|
50
|
+
ui.console.print("\n[bold]By model:[/bold]")
|
|
51
|
+
for model_name, stats in usage.models.items():
|
|
52
|
+
line = (
|
|
53
|
+
f" {model_name}: "
|
|
54
|
+
f"{_fmt_tokens(stats.input_tokens)} in, "
|
|
55
|
+
f"{_fmt_tokens(stats.output_tokens)} out"
|
|
56
|
+
)
|
|
57
|
+
if stats.cache_read_input_tokens:
|
|
58
|
+
line += f", {_fmt_tokens(stats.cache_read_input_tokens)} cache read"
|
|
59
|
+
if stats.cache_creation_input_tokens:
|
|
60
|
+
line += f", {_fmt_tokens(stats.cache_creation_input_tokens)} cache write"
|
|
61
|
+
line += f" ({stats.requests} call" f"{'' if stats.requests == 1 else 's'}"
|
|
62
|
+
if stats.duration_ms:
|
|
63
|
+
line += f", {_format_duration(stats.duration_ms)} total"
|
|
64
|
+
line += ")"
|
|
65
|
+
ui.console.print(line)
|
|
66
|
+
|
|
67
|
+
return True
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
command = SlashCommand(
|
|
71
|
+
name="cost",
|
|
72
|
+
description="Show total tokens used in this session",
|
|
73
|
+
handler=_handle,
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
__all__ = ["command"]
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
from .base import SlashCommand
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def _handle(ui: Any, _: str) -> bool:
|
|
6
|
+
ui.console.print("[yellow]Goodbye![/yellow]")
|
|
7
|
+
ui._should_exit = True
|
|
8
|
+
return True
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
command = SlashCommand(
|
|
12
|
+
name="exit",
|
|
13
|
+
description="Exit Ripperdoc",
|
|
14
|
+
handler=_handle,
|
|
15
|
+
aliases=("quit",),
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
__all__ = ["command"]
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
from .base import SlashCommand
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def _handle(ui: Any, _: str) -> bool:
|
|
6
|
+
ui.console.print("\n[bold]Available Slash Commands:[/bold]")
|
|
7
|
+
for cmd in ui.command_list:
|
|
8
|
+
alias_text = f" (aliases: {', '.join(cmd.aliases)})" if cmd.aliases else ""
|
|
9
|
+
ui.console.print(f" /{cmd.name:<8} - {cmd.description}{alias_text}")
|
|
10
|
+
return True
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
command = SlashCommand(
|
|
14
|
+
name="help",
|
|
15
|
+
description="Show available slash commands",
|
|
16
|
+
handler=_handle,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
__all__ = ["command"]
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
|
|
3
|
+
from rich.markup import escape
|
|
4
|
+
|
|
5
|
+
from ripperdoc.utils.mcp import load_mcp_servers_async, shutdown_mcp_runtime
|
|
6
|
+
|
|
7
|
+
from typing import Any
|
|
8
|
+
from .base import SlashCommand
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _handle(ui: Any, _: str) -> bool:
|
|
12
|
+
async def _load() -> list:
|
|
13
|
+
try:
|
|
14
|
+
return await load_mcp_servers_async(ui.project_path)
|
|
15
|
+
finally:
|
|
16
|
+
await shutdown_mcp_runtime()
|
|
17
|
+
|
|
18
|
+
servers = asyncio.run(_load())
|
|
19
|
+
if not servers:
|
|
20
|
+
ui.console.print(
|
|
21
|
+
"[yellow]No MCP servers configured. Add servers to ~/.ripperdoc/mcp.json, ~/.mcp.json, or a project .mcp.json file.[/yellow]"
|
|
22
|
+
)
|
|
23
|
+
return True
|
|
24
|
+
|
|
25
|
+
ui.console.print("\n[bold]MCP servers[/bold]")
|
|
26
|
+
for server in servers:
|
|
27
|
+
status = server.status or "unknown"
|
|
28
|
+
url_part = f" ({server.url})" if server.url else ""
|
|
29
|
+
ui.console.print(f"- {server.name}{url_part} — {status}", markup=False)
|
|
30
|
+
if server.command:
|
|
31
|
+
cmd_line = " ".join([server.command, *server.args]) if server.args else server.command
|
|
32
|
+
ui.console.print(f" Command: {cmd_line}", markup=False)
|
|
33
|
+
if server.description:
|
|
34
|
+
ui.console.print(f" {server.description}", markup=False)
|
|
35
|
+
if server.error:
|
|
36
|
+
ui.console.print(f" [red]Error:[/red] {escape(str(server.error))}")
|
|
37
|
+
if server.instructions:
|
|
38
|
+
snippet = server.instructions.strip()
|
|
39
|
+
if len(snippet) > 160:
|
|
40
|
+
snippet = snippet[:157] + "..."
|
|
41
|
+
ui.console.print(f" Instructions: {snippet}", markup=False)
|
|
42
|
+
if server.tools:
|
|
43
|
+
ui.console.print(" Tools:")
|
|
44
|
+
for tool in server.tools:
|
|
45
|
+
desc = f" — {tool.description}" if tool.description else ""
|
|
46
|
+
ui.console.print(f" • {tool.name}{desc}", markup=False)
|
|
47
|
+
else:
|
|
48
|
+
ui.console.print(" Tools: none discovered")
|
|
49
|
+
if server.resources:
|
|
50
|
+
ui.console.print(
|
|
51
|
+
" Resources: " + ", ".join(res.uri for res in server.resources), markup=False
|
|
52
|
+
)
|
|
53
|
+
elif not server.tools:
|
|
54
|
+
ui.console.print(" Resources: none")
|
|
55
|
+
return True
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
command = SlashCommand(
|
|
59
|
+
name="mcp",
|
|
60
|
+
description="Show configured MCP servers and their tools",
|
|
61
|
+
handler=_handle,
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
__all__ = ["command"]
|
|
@@ -0,0 +1,327 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
from getpass import getpass
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
from rich.markup import escape
|
|
6
|
+
|
|
7
|
+
from ripperdoc.cli.ui.helpers import get_profile_for_pointer
|
|
8
|
+
from ripperdoc.core.config import (
|
|
9
|
+
ModelProfile,
|
|
10
|
+
ProviderType,
|
|
11
|
+
add_model_profile,
|
|
12
|
+
delete_model_profile,
|
|
13
|
+
get_global_config,
|
|
14
|
+
set_model_pointer,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
from .base import SlashCommand
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _handle(ui: Any, trimmed_arg: str) -> bool:
|
|
21
|
+
console = ui.console
|
|
22
|
+
tokens = trimmed_arg.split()
|
|
23
|
+
subcmd = tokens[0].lower() if tokens else ""
|
|
24
|
+
config = get_global_config()
|
|
25
|
+
|
|
26
|
+
def print_models_usage() -> None:
|
|
27
|
+
console.print("[bold]/models[/bold] — list configured models")
|
|
28
|
+
console.print("[bold]/models add <name>[/bold] — add or update a model profile")
|
|
29
|
+
console.print("[bold]/models edit <name>[/bold] — edit an existing model profile")
|
|
30
|
+
console.print("[bold]/models delete <name>[/bold] — delete a model profile")
|
|
31
|
+
console.print("[bold]/models use <name>[/bold] — set the main model pointer")
|
|
32
|
+
|
|
33
|
+
def parse_int(prompt_text: str, default_value: Optional[int]) -> Optional[int]:
|
|
34
|
+
raw = console.input(prompt_text).strip()
|
|
35
|
+
if not raw:
|
|
36
|
+
return default_value
|
|
37
|
+
try:
|
|
38
|
+
return int(raw)
|
|
39
|
+
except ValueError:
|
|
40
|
+
console.print("[yellow]Invalid number, keeping previous value.[/yellow]")
|
|
41
|
+
return default_value
|
|
42
|
+
|
|
43
|
+
def parse_float(prompt_text: str, default_value: float) -> float:
|
|
44
|
+
raw = console.input(prompt_text).strip()
|
|
45
|
+
if not raw:
|
|
46
|
+
return default_value
|
|
47
|
+
try:
|
|
48
|
+
return float(raw)
|
|
49
|
+
except ValueError:
|
|
50
|
+
console.print("[yellow]Invalid number, keeping previous value.[/yellow]")
|
|
51
|
+
return default_value
|
|
52
|
+
|
|
53
|
+
if subcmd in ("help", "-h", "--help"):
|
|
54
|
+
print_models_usage()
|
|
55
|
+
return True
|
|
56
|
+
|
|
57
|
+
if subcmd in ("add", "create"):
|
|
58
|
+
profile_name = tokens[1] if len(tokens) > 1 else console.input("Profile name: ").strip()
|
|
59
|
+
if not profile_name:
|
|
60
|
+
console.print("[red]Model profile name is required.[/red]")
|
|
61
|
+
print_models_usage()
|
|
62
|
+
return True
|
|
63
|
+
|
|
64
|
+
overwrite = False
|
|
65
|
+
existing_profile = config.model_profiles.get(profile_name)
|
|
66
|
+
if existing_profile:
|
|
67
|
+
confirm = (
|
|
68
|
+
console.input(f"Profile '{profile_name}' exists. Overwrite? [y/N]: ")
|
|
69
|
+
.strip()
|
|
70
|
+
.lower()
|
|
71
|
+
)
|
|
72
|
+
if confirm not in ("y", "yes"):
|
|
73
|
+
return True
|
|
74
|
+
overwrite = True
|
|
75
|
+
|
|
76
|
+
current_profile = get_profile_for_pointer("main")
|
|
77
|
+
default_provider = (
|
|
78
|
+
(current_profile.provider.value) if current_profile else ProviderType.ANTHROPIC.value
|
|
79
|
+
)
|
|
80
|
+
provider_input = (
|
|
81
|
+
console.input(
|
|
82
|
+
f"Protocol ({', '.join(p.value for p in ProviderType)}) [{default_provider}]: "
|
|
83
|
+
)
|
|
84
|
+
.strip()
|
|
85
|
+
.lower()
|
|
86
|
+
or default_provider
|
|
87
|
+
)
|
|
88
|
+
try:
|
|
89
|
+
provider = ProviderType(provider_input)
|
|
90
|
+
except ValueError:
|
|
91
|
+
console.print(f"[red]Invalid provider: {escape(provider_input)}[/red]")
|
|
92
|
+
print_models_usage()
|
|
93
|
+
return True
|
|
94
|
+
|
|
95
|
+
default_model = (
|
|
96
|
+
existing_profile.model
|
|
97
|
+
if existing_profile
|
|
98
|
+
else (current_profile.model if current_profile else "")
|
|
99
|
+
)
|
|
100
|
+
model_prompt = f"Model name to send{f' [{default_model}]' if default_model else ''}: "
|
|
101
|
+
model_name = console.input(model_prompt).strip() or default_model
|
|
102
|
+
if not model_name:
|
|
103
|
+
console.print("[red]Model name is required.[/red]")
|
|
104
|
+
return True
|
|
105
|
+
|
|
106
|
+
api_key_input = getpass("API key (leave blank to keep unset): ").strip()
|
|
107
|
+
api_key = api_key_input or (existing_profile.api_key if existing_profile else None)
|
|
108
|
+
|
|
109
|
+
api_base_default = existing_profile.api_base if existing_profile else ""
|
|
110
|
+
api_base = (
|
|
111
|
+
console.input(
|
|
112
|
+
f"API base (optional){f' [{api_base_default}]' if api_base_default else ''}: "
|
|
113
|
+
).strip()
|
|
114
|
+
or api_base_default
|
|
115
|
+
or None
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
max_tokens_default = existing_profile.max_tokens if existing_profile else 4096
|
|
119
|
+
max_tokens = (
|
|
120
|
+
parse_int(
|
|
121
|
+
f"Max output tokens [{max_tokens_default}]: ",
|
|
122
|
+
max_tokens_default,
|
|
123
|
+
)
|
|
124
|
+
or max_tokens_default
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
temp_default = existing_profile.temperature if existing_profile else 0.7
|
|
128
|
+
temperature = parse_float(
|
|
129
|
+
f"Temperature [{temp_default}]: ",
|
|
130
|
+
temp_default,
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
context_window_default = existing_profile.context_window if existing_profile else None
|
|
134
|
+
context_prompt = "Context window tokens (optional"
|
|
135
|
+
if context_window_default:
|
|
136
|
+
context_prompt += f", current {context_window_default}"
|
|
137
|
+
context_prompt += "): "
|
|
138
|
+
context_window = parse_int(context_prompt, context_window_default)
|
|
139
|
+
|
|
140
|
+
default_set_main = (
|
|
141
|
+
not config.model_profiles
|
|
142
|
+
or getattr(config.model_pointers, "main", "") not in config.model_profiles
|
|
143
|
+
)
|
|
144
|
+
set_main_input = (
|
|
145
|
+
console.input(f"Set as main model? [{'Y' if default_set_main else 'y'}/N]: ")
|
|
146
|
+
.strip()
|
|
147
|
+
.lower()
|
|
148
|
+
)
|
|
149
|
+
set_as_main = set_main_input in ("y", "yes") if set_main_input else default_set_main
|
|
150
|
+
|
|
151
|
+
profile = ModelProfile(
|
|
152
|
+
provider=provider,
|
|
153
|
+
model=model_name,
|
|
154
|
+
api_key=api_key,
|
|
155
|
+
api_base=api_base,
|
|
156
|
+
max_tokens=max_tokens,
|
|
157
|
+
temperature=temperature,
|
|
158
|
+
context_window=context_window,
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
try:
|
|
162
|
+
add_model_profile(
|
|
163
|
+
profile_name,
|
|
164
|
+
profile,
|
|
165
|
+
overwrite=overwrite,
|
|
166
|
+
set_as_main=set_as_main,
|
|
167
|
+
)
|
|
168
|
+
except Exception as exc:
|
|
169
|
+
console.print(f"[red]Failed to save model: {escape(str(exc))}[/red]")
|
|
170
|
+
return True
|
|
171
|
+
|
|
172
|
+
marker = " (main)" if set_as_main else ""
|
|
173
|
+
console.print(f"[green]✓ Model '{escape(profile_name)}' saved{marker}[/green]")
|
|
174
|
+
return True
|
|
175
|
+
|
|
176
|
+
if subcmd in ("edit", "update"):
|
|
177
|
+
profile_name = tokens[1] if len(tokens) > 1 else console.input("Profile to edit: ").strip()
|
|
178
|
+
existing_profile = config.model_profiles.get(profile_name or "")
|
|
179
|
+
if not profile_name or not existing_profile:
|
|
180
|
+
console.print("[red]Model profile not found.[/red]")
|
|
181
|
+
print_models_usage()
|
|
182
|
+
return True
|
|
183
|
+
|
|
184
|
+
provider_default = existing_profile.provider.value
|
|
185
|
+
provider_input = (
|
|
186
|
+
console.input(
|
|
187
|
+
f"Protocol ({', '.join(p.value for p in ProviderType)}) [{provider_default}]: "
|
|
188
|
+
)
|
|
189
|
+
.strip()
|
|
190
|
+
.lower()
|
|
191
|
+
or provider_default
|
|
192
|
+
)
|
|
193
|
+
try:
|
|
194
|
+
provider = ProviderType(provider_input)
|
|
195
|
+
except ValueError:
|
|
196
|
+
console.print(f"[red]Invalid provider: {escape(provider_input)}[/red]")
|
|
197
|
+
return True
|
|
198
|
+
|
|
199
|
+
model_name = (
|
|
200
|
+
console.input(f"Model name to send [{existing_profile.model}]: ").strip()
|
|
201
|
+
or existing_profile.model
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
api_key_label = "[set]" if existing_profile.api_key else "[not set]"
|
|
205
|
+
api_key_prompt = f"API key {api_key_label} (Enter=keep, '-'=clear): "
|
|
206
|
+
api_key_input = getpass(api_key_prompt).strip()
|
|
207
|
+
if api_key_input == "-":
|
|
208
|
+
api_key = None
|
|
209
|
+
elif api_key_input:
|
|
210
|
+
api_key = api_key_input
|
|
211
|
+
else:
|
|
212
|
+
api_key = existing_profile.api_key
|
|
213
|
+
|
|
214
|
+
api_base = (
|
|
215
|
+
console.input(f"API base (optional) [{existing_profile.api_base or ''}]: ").strip()
|
|
216
|
+
or existing_profile.api_base
|
|
217
|
+
)
|
|
218
|
+
if api_base == "":
|
|
219
|
+
api_base = None
|
|
220
|
+
|
|
221
|
+
max_tokens = (
|
|
222
|
+
parse_int(
|
|
223
|
+
f"Max output tokens [{existing_profile.max_tokens}]: ",
|
|
224
|
+
existing_profile.max_tokens,
|
|
225
|
+
)
|
|
226
|
+
or existing_profile.max_tokens
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
temperature = parse_float(
|
|
230
|
+
f"Temperature [{existing_profile.temperature}]: ",
|
|
231
|
+
existing_profile.temperature,
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
context_window = parse_int(
|
|
235
|
+
f"Context window tokens [{existing_profile.context_window or 'unset'}]: ",
|
|
236
|
+
existing_profile.context_window,
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
updated_profile = ModelProfile(
|
|
240
|
+
provider=provider,
|
|
241
|
+
model=model_name,
|
|
242
|
+
api_key=api_key,
|
|
243
|
+
api_base=api_base,
|
|
244
|
+
max_tokens=max_tokens,
|
|
245
|
+
temperature=temperature,
|
|
246
|
+
context_window=context_window,
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
try:
|
|
250
|
+
add_model_profile(
|
|
251
|
+
profile_name,
|
|
252
|
+
updated_profile,
|
|
253
|
+
overwrite=True,
|
|
254
|
+
set_as_main=False,
|
|
255
|
+
)
|
|
256
|
+
except Exception as exc:
|
|
257
|
+
console.print(f"[red]Failed to update model: {escape(str(exc))}[/red]")
|
|
258
|
+
return True
|
|
259
|
+
|
|
260
|
+
console.print(f"[green]✓ Model '{escape(profile_name)}' updated[/green]")
|
|
261
|
+
return True
|
|
262
|
+
|
|
263
|
+
if subcmd in ("delete", "del", "remove"):
|
|
264
|
+
target = tokens[1] if len(tokens) > 1 else console.input("Model to delete: ").strip()
|
|
265
|
+
if not target:
|
|
266
|
+
console.print("[red]Model name is required.[/red]")
|
|
267
|
+
print_models_usage()
|
|
268
|
+
return True
|
|
269
|
+
try:
|
|
270
|
+
delete_model_profile(target)
|
|
271
|
+
console.print(f"[green]✓ Deleted model '{escape(target)}'[/green]")
|
|
272
|
+
except KeyError as exc:
|
|
273
|
+
console.print(f"[yellow]{escape(str(exc))}[/yellow]")
|
|
274
|
+
except Exception as exc:
|
|
275
|
+
console.print(f"[red]Failed to delete model: {escape(str(exc))}[/red]")
|
|
276
|
+
print_models_usage()
|
|
277
|
+
return True
|
|
278
|
+
|
|
279
|
+
if subcmd in ("use", "main", "set-main"):
|
|
280
|
+
target = tokens[1] if len(tokens) > 1 else console.input("Model to use as main: ").strip()
|
|
281
|
+
if not target:
|
|
282
|
+
console.print("[red]Model name is required.[/red]")
|
|
283
|
+
print_models_usage()
|
|
284
|
+
return True
|
|
285
|
+
try:
|
|
286
|
+
set_model_pointer("main", target)
|
|
287
|
+
console.print(f"[green]✓ Main model set to '{escape(target)}'[/green]")
|
|
288
|
+
except Exception as exc:
|
|
289
|
+
console.print(f"[red]{escape(str(exc))}[/red]")
|
|
290
|
+
print_models_usage()
|
|
291
|
+
return True
|
|
292
|
+
|
|
293
|
+
print_models_usage()
|
|
294
|
+
pointer_map = config.model_pointers.model_dump()
|
|
295
|
+
if not config.model_profiles:
|
|
296
|
+
console.print(" • No models configured")
|
|
297
|
+
return True
|
|
298
|
+
|
|
299
|
+
console.print("\n[bold]Configured Models:[/bold]")
|
|
300
|
+
for name, profile in config.model_profiles.items():
|
|
301
|
+
markers = [ptr for ptr, value in pointer_map.items() if value == name]
|
|
302
|
+
marker_text = f" ({', '.join(markers)})" if markers else ""
|
|
303
|
+
console.print(f" • {escape(name)}{marker_text}", markup=False)
|
|
304
|
+
console.print(f" protocol: {profile.provider.value}", markup=False)
|
|
305
|
+
console.print(f" model: {profile.model}", markup=False)
|
|
306
|
+
if profile.api_base:
|
|
307
|
+
console.print(f" api_base: {profile.api_base}", markup=False)
|
|
308
|
+
if profile.context_window:
|
|
309
|
+
console.print(f" context: {profile.context_window} tokens", markup=False)
|
|
310
|
+
console.print(
|
|
311
|
+
f" max_tokens: {profile.max_tokens}, temperature: {profile.temperature}",
|
|
312
|
+
markup=False,
|
|
313
|
+
)
|
|
314
|
+
console.print(f" api_key: {'***' if profile.api_key else 'Not set'}", markup=False)
|
|
315
|
+
pointer_labels = ", ".join(f"{p}->{v or '-'}" for p, v in pointer_map.items())
|
|
316
|
+
console.print(f"[dim]Pointers: {escape(pointer_labels)}[/dim]")
|
|
317
|
+
return True
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
command = SlashCommand(
|
|
321
|
+
name="models",
|
|
322
|
+
description="Manage models: list/create/delete/use",
|
|
323
|
+
handler=_handle,
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
|
|
327
|
+
__all__ = ["command"]
|