codemaster-cli 2.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- codemaster_cli-2.2.0.dist-info/METADATA +645 -0
- codemaster_cli-2.2.0.dist-info/RECORD +170 -0
- codemaster_cli-2.2.0.dist-info/WHEEL +4 -0
- codemaster_cli-2.2.0.dist-info/entry_points.txt +3 -0
- vibe/__init__.py +6 -0
- vibe/acp/__init__.py +0 -0
- vibe/acp/acp_agent_loop.py +746 -0
- vibe/acp/entrypoint.py +81 -0
- vibe/acp/tools/__init__.py +0 -0
- vibe/acp/tools/base.py +100 -0
- vibe/acp/tools/builtins/bash.py +134 -0
- vibe/acp/tools/builtins/read_file.py +54 -0
- vibe/acp/tools/builtins/search_replace.py +129 -0
- vibe/acp/tools/builtins/todo.py +65 -0
- vibe/acp/tools/builtins/write_file.py +98 -0
- vibe/acp/tools/session_update.py +118 -0
- vibe/acp/utils.py +213 -0
- vibe/cli/__init__.py +0 -0
- vibe/cli/autocompletion/__init__.py +0 -0
- vibe/cli/autocompletion/base.py +22 -0
- vibe/cli/autocompletion/path_completion.py +177 -0
- vibe/cli/autocompletion/slash_command.py +99 -0
- vibe/cli/cli.py +188 -0
- vibe/cli/clipboard.py +69 -0
- vibe/cli/commands.py +116 -0
- vibe/cli/entrypoint.py +163 -0
- vibe/cli/history_manager.py +91 -0
- vibe/cli/plan_offer/adapters/http_whoami_gateway.py +67 -0
- vibe/cli/plan_offer/decide_plan_offer.py +87 -0
- vibe/cli/plan_offer/ports/whoami_gateway.py +23 -0
- vibe/cli/terminal_setup.py +323 -0
- vibe/cli/textual_ui/__init__.py +0 -0
- vibe/cli/textual_ui/ansi_markdown.py +58 -0
- vibe/cli/textual_ui/app.py +1546 -0
- vibe/cli/textual_ui/app.tcss +1020 -0
- vibe/cli/textual_ui/external_editor.py +32 -0
- vibe/cli/textual_ui/handlers/__init__.py +5 -0
- vibe/cli/textual_ui/handlers/event_handler.py +147 -0
- vibe/cli/textual_ui/widgets/__init__.py +0 -0
- vibe/cli/textual_ui/widgets/approval_app.py +192 -0
- vibe/cli/textual_ui/widgets/banner/banner.py +85 -0
- vibe/cli/textual_ui/widgets/banner/petit_chat.py +195 -0
- vibe/cli/textual_ui/widgets/braille_renderer.py +58 -0
- vibe/cli/textual_ui/widgets/chat_input/__init__.py +7 -0
- vibe/cli/textual_ui/widgets/chat_input/body.py +214 -0
- vibe/cli/textual_ui/widgets/chat_input/completion_manager.py +58 -0
- vibe/cli/textual_ui/widgets/chat_input/completion_popup.py +43 -0
- vibe/cli/textual_ui/widgets/chat_input/container.py +195 -0
- vibe/cli/textual_ui/widgets/chat_input/text_area.py +365 -0
- vibe/cli/textual_ui/widgets/compact.py +41 -0
- vibe/cli/textual_ui/widgets/config_app.py +171 -0
- vibe/cli/textual_ui/widgets/context_progress.py +30 -0
- vibe/cli/textual_ui/widgets/load_more.py +43 -0
- vibe/cli/textual_ui/widgets/loading.py +201 -0
- vibe/cli/textual_ui/widgets/messages.py +277 -0
- vibe/cli/textual_ui/widgets/no_markup_static.py +11 -0
- vibe/cli/textual_ui/widgets/path_display.py +28 -0
- vibe/cli/textual_ui/widgets/proxy_setup_app.py +127 -0
- vibe/cli/textual_ui/widgets/question_app.py +496 -0
- vibe/cli/textual_ui/widgets/spinner.py +194 -0
- vibe/cli/textual_ui/widgets/status_message.py +76 -0
- vibe/cli/textual_ui/widgets/teleport_message.py +31 -0
- vibe/cli/textual_ui/widgets/tool_widgets.py +371 -0
- vibe/cli/textual_ui/widgets/tools.py +201 -0
- vibe/cli/textual_ui/windowing/__init__.py +29 -0
- vibe/cli/textual_ui/windowing/history.py +105 -0
- vibe/cli/textual_ui/windowing/history_windowing.py +71 -0
- vibe/cli/textual_ui/windowing/state.py +105 -0
- vibe/cli/update_notifier/__init__.py +47 -0
- vibe/cli/update_notifier/adapters/filesystem_update_cache_repository.py +59 -0
- vibe/cli/update_notifier/adapters/github_update_gateway.py +101 -0
- vibe/cli/update_notifier/adapters/pypi_update_gateway.py +107 -0
- vibe/cli/update_notifier/ports/update_cache_repository.py +16 -0
- vibe/cli/update_notifier/ports/update_gateway.py +53 -0
- vibe/cli/update_notifier/update.py +139 -0
- vibe/cli/update_notifier/whats_new.py +49 -0
- vibe/core/__init__.py +5 -0
- vibe/core/agent_loop.py +1075 -0
- vibe/core/agents/__init__.py +31 -0
- vibe/core/agents/manager.py +165 -0
- vibe/core/agents/models.py +122 -0
- vibe/core/auth/__init__.py +6 -0
- vibe/core/auth/crypto.py +137 -0
- vibe/core/auth/github.py +178 -0
- vibe/core/autocompletion/__init__.py +0 -0
- vibe/core/autocompletion/completers.py +257 -0
- vibe/core/autocompletion/file_indexer/__init__.py +10 -0
- vibe/core/autocompletion/file_indexer/ignore_rules.py +156 -0
- vibe/core/autocompletion/file_indexer/indexer.py +179 -0
- vibe/core/autocompletion/file_indexer/store.py +169 -0
- vibe/core/autocompletion/file_indexer/watcher.py +71 -0
- vibe/core/autocompletion/fuzzy.py +189 -0
- vibe/core/autocompletion/path_prompt.py +108 -0
- vibe/core/autocompletion/path_prompt_adapter.py +149 -0
- vibe/core/config.py +673 -0
- vibe/core/config_PATCH_INSTRUCTIONS.md +77 -0
- vibe/core/llm/__init__.py +0 -0
- vibe/core/llm/backend/anthropic.py +630 -0
- vibe/core/llm/backend/base.py +38 -0
- vibe/core/llm/backend/factory.py +7 -0
- vibe/core/llm/backend/generic.py +425 -0
- vibe/core/llm/backend/mistral.py +381 -0
- vibe/core/llm/backend/vertex.py +115 -0
- vibe/core/llm/exceptions.py +195 -0
- vibe/core/llm/format.py +184 -0
- vibe/core/llm/message_utils.py +24 -0
- vibe/core/llm/types.py +120 -0
- vibe/core/middleware.py +209 -0
- vibe/core/output_formatters.py +85 -0
- vibe/core/paths/__init__.py +0 -0
- vibe/core/paths/config_paths.py +68 -0
- vibe/core/paths/global_paths.py +40 -0
- vibe/core/programmatic.py +56 -0
- vibe/core/prompts/__init__.py +32 -0
- vibe/core/prompts/cli.md +111 -0
- vibe/core/prompts/compact.md +48 -0
- vibe/core/prompts/dangerous_directory.md +5 -0
- vibe/core/prompts/explore.md +50 -0
- vibe/core/prompts/project_context.md +8 -0
- vibe/core/prompts/tests.md +1 -0
- vibe/core/proxy_setup.py +65 -0
- vibe/core/session/session_loader.py +222 -0
- vibe/core/session/session_logger.py +318 -0
- vibe/core/session/session_migration.py +41 -0
- vibe/core/skills/__init__.py +7 -0
- vibe/core/skills/manager.py +132 -0
- vibe/core/skills/models.py +92 -0
- vibe/core/skills/parser.py +39 -0
- vibe/core/system_prompt.py +466 -0
- vibe/core/telemetry/__init__.py +0 -0
- vibe/core/telemetry/send.py +185 -0
- vibe/core/teleport/errors.py +9 -0
- vibe/core/teleport/git.py +196 -0
- vibe/core/teleport/nuage.py +180 -0
- vibe/core/teleport/teleport.py +208 -0
- vibe/core/teleport/types.py +54 -0
- vibe/core/tools/base.py +336 -0
- vibe/core/tools/builtins/ask_user_question.py +134 -0
- vibe/core/tools/builtins/bash.py +357 -0
- vibe/core/tools/builtins/grep.py +310 -0
- vibe/core/tools/builtins/prompts/__init__.py +0 -0
- vibe/core/tools/builtins/prompts/ask_user_question.md +84 -0
- vibe/core/tools/builtins/prompts/bash.md +73 -0
- vibe/core/tools/builtins/prompts/grep.md +4 -0
- vibe/core/tools/builtins/prompts/read_file.md +13 -0
- vibe/core/tools/builtins/prompts/search_replace.md +43 -0
- vibe/core/tools/builtins/prompts/task.md +24 -0
- vibe/core/tools/builtins/prompts/todo.md +199 -0
- vibe/core/tools/builtins/prompts/write_file.md +42 -0
- vibe/core/tools/builtins/read_file.py +222 -0
- vibe/core/tools/builtins/search_replace.py +456 -0
- vibe/core/tools/builtins/task.py +154 -0
- vibe/core/tools/builtins/todo.py +134 -0
- vibe/core/tools/builtins/write_file.py +160 -0
- vibe/core/tools/manager.py +341 -0
- vibe/core/tools/mcp.py +397 -0
- vibe/core/tools/ui.py +68 -0
- vibe/core/trusted_folders.py +86 -0
- vibe/core/types.py +405 -0
- vibe/core/utils.py +396 -0
- vibe/setup/onboarding/__init__.py +39 -0
- vibe/setup/onboarding/base.py +14 -0
- vibe/setup/onboarding/onboarding.tcss +134 -0
- vibe/setup/onboarding/screens/__init__.py +5 -0
- vibe/setup/onboarding/screens/api_key.py +200 -0
- vibe/setup/onboarding/screens/provider_selection.py +87 -0
- vibe/setup/onboarding/screens/welcome.py +136 -0
- vibe/setup/trusted_folders/trust_folder_dialog.py +180 -0
- vibe/setup/trusted_folders/trust_folder_dialog.tcss +83 -0
- vibe/whats_new.md +5 -0
vibe/core/llm/format.py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from typing import TYPE_CHECKING, Any
|
|
5
|
+
|
|
6
|
+
from pydantic import BaseModel, ConfigDict, Field, ValidationError
|
|
7
|
+
|
|
8
|
+
from vibe.core.tools.base import BaseTool
|
|
9
|
+
from vibe.core.types import (
|
|
10
|
+
AvailableFunction,
|
|
11
|
+
AvailableTool,
|
|
12
|
+
LLMMessage,
|
|
13
|
+
Role,
|
|
14
|
+
StrToolChoice,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from vibe.core.tools.manager import ToolManager
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ParsedToolCall(BaseModel):
|
|
22
|
+
model_config = ConfigDict(frozen=True)
|
|
23
|
+
tool_name: str
|
|
24
|
+
raw_args: dict[str, Any]
|
|
25
|
+
call_id: str = ""
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class ResolvedToolCall(BaseModel):
|
|
29
|
+
model_config = ConfigDict(frozen=True, arbitrary_types_allowed=True)
|
|
30
|
+
tool_name: str
|
|
31
|
+
tool_class: type[BaseTool]
|
|
32
|
+
validated_args: BaseModel
|
|
33
|
+
call_id: str = ""
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
def args_dict(self) -> dict[str, Any]:
|
|
37
|
+
return self.validated_args.model_dump()
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class FailedToolCall(BaseModel):
|
|
41
|
+
model_config = ConfigDict(frozen=True)
|
|
42
|
+
tool_name: str
|
|
43
|
+
call_id: str
|
|
44
|
+
error: str
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class ParsedMessage(BaseModel):
|
|
48
|
+
model_config = ConfigDict(frozen=True)
|
|
49
|
+
tool_calls: list[ParsedToolCall]
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class ResolvedMessage(BaseModel):
|
|
53
|
+
model_config = ConfigDict(frozen=True)
|
|
54
|
+
tool_calls: list[ResolvedToolCall]
|
|
55
|
+
failed_calls: list[FailedToolCall] = Field(default_factory=list)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class APIToolFormatHandler:
|
|
59
|
+
@property
|
|
60
|
+
def name(self) -> str:
|
|
61
|
+
return "api"
|
|
62
|
+
|
|
63
|
+
def get_available_tools(self, tool_manager: ToolManager) -> list[AvailableTool]:
|
|
64
|
+
return [
|
|
65
|
+
AvailableTool(
|
|
66
|
+
function=AvailableFunction(
|
|
67
|
+
name=tool_class.get_name(),
|
|
68
|
+
description=tool_class.description,
|
|
69
|
+
parameters=tool_class.get_parameters(),
|
|
70
|
+
)
|
|
71
|
+
)
|
|
72
|
+
for tool_class in tool_manager.available_tools.values()
|
|
73
|
+
]
|
|
74
|
+
|
|
75
|
+
def get_tool_choice(self) -> StrToolChoice | AvailableTool:
|
|
76
|
+
return "auto"
|
|
77
|
+
|
|
78
|
+
def process_api_response_message(self, message: Any) -> LLMMessage:
|
|
79
|
+
clean_message = {
|
|
80
|
+
"role": message.role,
|
|
81
|
+
"content": message.content,
|
|
82
|
+
"reasoning_content": getattr(message, "reasoning_content", None),
|
|
83
|
+
"reasoning_signature": getattr(message, "reasoning_signature", None),
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
if message.tool_calls:
|
|
87
|
+
clean_message["tool_calls"] = [
|
|
88
|
+
{
|
|
89
|
+
"id": tc.id,
|
|
90
|
+
"index": tc.index,
|
|
91
|
+
"type": "function",
|
|
92
|
+
"function": {
|
|
93
|
+
"name": tc.function.name,
|
|
94
|
+
"arguments": tc.function.arguments,
|
|
95
|
+
},
|
|
96
|
+
}
|
|
97
|
+
for tc in message.tool_calls
|
|
98
|
+
]
|
|
99
|
+
|
|
100
|
+
return LLMMessage.model_validate(clean_message)
|
|
101
|
+
|
|
102
|
+
def parse_message(self, message: LLMMessage) -> ParsedMessage:
|
|
103
|
+
tool_calls = []
|
|
104
|
+
|
|
105
|
+
api_tool_calls = message.tool_calls or []
|
|
106
|
+
for tc in api_tool_calls:
|
|
107
|
+
if not (function_call := tc.function):
|
|
108
|
+
continue
|
|
109
|
+
try:
|
|
110
|
+
args = json.loads(function_call.arguments or "{}")
|
|
111
|
+
except json.JSONDecodeError:
|
|
112
|
+
args = {}
|
|
113
|
+
|
|
114
|
+
tool_calls.append(
|
|
115
|
+
ParsedToolCall(
|
|
116
|
+
tool_name=function_call.name or "",
|
|
117
|
+
raw_args=args,
|
|
118
|
+
call_id=tc.id or "",
|
|
119
|
+
)
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
return ParsedMessage(tool_calls=tool_calls)
|
|
123
|
+
|
|
124
|
+
def resolve_tool_calls(
|
|
125
|
+
self, parsed: ParsedMessage, tool_manager: ToolManager
|
|
126
|
+
) -> ResolvedMessage:
|
|
127
|
+
resolved_calls = []
|
|
128
|
+
failed_calls = []
|
|
129
|
+
|
|
130
|
+
active_tools = tool_manager.available_tools
|
|
131
|
+
|
|
132
|
+
for parsed_call in parsed.tool_calls:
|
|
133
|
+
tool_class = active_tools.get(parsed_call.tool_name)
|
|
134
|
+
if not tool_class:
|
|
135
|
+
failed_calls.append(
|
|
136
|
+
FailedToolCall(
|
|
137
|
+
tool_name=parsed_call.tool_name,
|
|
138
|
+
call_id=parsed_call.call_id,
|
|
139
|
+
error=f"Unknown tool '{parsed_call.tool_name}'",
|
|
140
|
+
)
|
|
141
|
+
)
|
|
142
|
+
continue
|
|
143
|
+
|
|
144
|
+
args_model, _ = tool_class._get_tool_args_results()
|
|
145
|
+
try:
|
|
146
|
+
validated_args = args_model.model_validate(parsed_call.raw_args)
|
|
147
|
+
resolved_calls.append(
|
|
148
|
+
ResolvedToolCall(
|
|
149
|
+
tool_name=parsed_call.tool_name,
|
|
150
|
+
tool_class=tool_class,
|
|
151
|
+
validated_args=validated_args,
|
|
152
|
+
call_id=parsed_call.call_id,
|
|
153
|
+
)
|
|
154
|
+
)
|
|
155
|
+
except ValidationError as e:
|
|
156
|
+
failed_calls.append(
|
|
157
|
+
FailedToolCall(
|
|
158
|
+
tool_name=parsed_call.tool_name,
|
|
159
|
+
call_id=parsed_call.call_id,
|
|
160
|
+
error=f"Invalid arguments: {e}",
|
|
161
|
+
)
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
return ResolvedMessage(tool_calls=resolved_calls, failed_calls=failed_calls)
|
|
165
|
+
|
|
166
|
+
def create_tool_response_message(
|
|
167
|
+
self, tool_call: ResolvedToolCall, result_text: str
|
|
168
|
+
) -> LLMMessage:
|
|
169
|
+
return LLMMessage(
|
|
170
|
+
role=Role.tool,
|
|
171
|
+
tool_call_id=tool_call.call_id,
|
|
172
|
+
name=tool_call.tool_name,
|
|
173
|
+
content=result_text,
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
def create_failed_tool_response_message(
|
|
177
|
+
self, failed: FailedToolCall, error_content: str
|
|
178
|
+
) -> LLMMessage:
|
|
179
|
+
return LLMMessage(
|
|
180
|
+
role=Role.tool,
|
|
181
|
+
tool_call_id=failed.call_id,
|
|
182
|
+
name=failed.tool_name,
|
|
183
|
+
content=error_content,
|
|
184
|
+
)
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from vibe.core.types import LLMMessage, Role
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def merge_consecutive_user_messages(messages: list[LLMMessage]) -> list[LLMMessage]:
|
|
7
|
+
"""Merge consecutive user messages into a single message.
|
|
8
|
+
|
|
9
|
+
This handles cases where middleware injects messages resulting in
|
|
10
|
+
consecutive user messages before sending to the API.
|
|
11
|
+
"""
|
|
12
|
+
result: list[LLMMessage] = []
|
|
13
|
+
for msg in messages:
|
|
14
|
+
if result and result[-1].role == Role.user and msg.role == Role.user:
|
|
15
|
+
prev_content = result[-1].content or ""
|
|
16
|
+
curr_content = msg.content or ""
|
|
17
|
+
merged_content = f"{prev_content}\n\n{curr_content}".strip()
|
|
18
|
+
result[-1] = LLMMessage(
|
|
19
|
+
role=Role.user, content=merged_content, message_id=result[-1].message_id
|
|
20
|
+
)
|
|
21
|
+
else:
|
|
22
|
+
result.append(msg)
|
|
23
|
+
|
|
24
|
+
return result
|
vibe/core/llm/types.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import AsyncGenerator
|
|
4
|
+
import types
|
|
5
|
+
from typing import TYPE_CHECKING, Protocol
|
|
6
|
+
|
|
7
|
+
from vibe.core.types import AvailableTool, LLMChunk, LLMMessage, StrToolChoice
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from vibe.core.config import ModelConfig
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class BackendLike(Protocol):
|
|
14
|
+
"""Port protocol for dependency-injectable LLM backends.
|
|
15
|
+
|
|
16
|
+
Any backend used by AgentLoop should implement this async context manager
|
|
17
|
+
interface with `complete`, `complete_streaming` and `count_tokens` methods.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
async def __aenter__(self) -> BackendLike: ...
|
|
21
|
+
async def __aexit__(
|
|
22
|
+
self,
|
|
23
|
+
exc_type: type[BaseException] | None,
|
|
24
|
+
exc_val: BaseException | None,
|
|
25
|
+
exc_tb: types.TracebackType | None,
|
|
26
|
+
) -> None: ...
|
|
27
|
+
|
|
28
|
+
async def complete(
|
|
29
|
+
self,
|
|
30
|
+
*,
|
|
31
|
+
model: ModelConfig,
|
|
32
|
+
messages: list[LLMMessage],
|
|
33
|
+
temperature: float,
|
|
34
|
+
tools: list[AvailableTool] | None,
|
|
35
|
+
max_tokens: int | None,
|
|
36
|
+
tool_choice: StrToolChoice | AvailableTool | None,
|
|
37
|
+
extra_headers: dict[str, str] | None,
|
|
38
|
+
) -> LLMChunk:
|
|
39
|
+
"""Complete a chat conversation using the specified model and provider.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
model: Model configuration
|
|
43
|
+
messages: List of conversation messages
|
|
44
|
+
temperature: Sampling temperature (0.0 to 1.0)
|
|
45
|
+
tools: Optional list of available tools
|
|
46
|
+
max_tokens: Maximum tokens to generate
|
|
47
|
+
tool_choice: How to choose tools (auto, none, or specific tool)
|
|
48
|
+
extra_headers: Additional HTTP headers to include
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
LLMChunk containing the response message and usage information
|
|
52
|
+
|
|
53
|
+
Raises:
|
|
54
|
+
BackendError: If the API request fails
|
|
55
|
+
"""
|
|
56
|
+
...
|
|
57
|
+
|
|
58
|
+
# Note: actual implementation should be an async function,
|
|
59
|
+
# but we can't make this one async, as it would lead to wrong type inference
|
|
60
|
+
# https://stackoverflow.com/a/68911014
|
|
61
|
+
def complete_streaming(
|
|
62
|
+
self,
|
|
63
|
+
*,
|
|
64
|
+
model: ModelConfig,
|
|
65
|
+
messages: list[LLMMessage],
|
|
66
|
+
temperature: float,
|
|
67
|
+
tools: list[AvailableTool] | None,
|
|
68
|
+
max_tokens: int | None,
|
|
69
|
+
tool_choice: StrToolChoice | AvailableTool | None,
|
|
70
|
+
extra_headers: dict[str, str] | None,
|
|
71
|
+
) -> AsyncGenerator[LLMChunk, None]:
|
|
72
|
+
"""Equivalent of the complete method, but yields LLMEvent objects
|
|
73
|
+
instead of a single LLMEvent.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
model: Model configuration
|
|
77
|
+
messages: List of conversation messages
|
|
78
|
+
temperature: Sampling temperature (0.0 to 1.0)
|
|
79
|
+
tools: Optional list of available tools
|
|
80
|
+
max_tokens: Maximum tokens to generate
|
|
81
|
+
tool_choice: How to choose tools (auto, none, or specific tool)
|
|
82
|
+
extra_headers: Additional HTTP headers to include
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
AsyncGenerator[LLMEvent, None] yielding LLMEvent objects
|
|
86
|
+
|
|
87
|
+
Raises:
|
|
88
|
+
BackendError: If the API request fails
|
|
89
|
+
"""
|
|
90
|
+
...
|
|
91
|
+
|
|
92
|
+
async def count_tokens(
|
|
93
|
+
self,
|
|
94
|
+
*,
|
|
95
|
+
model: ModelConfig,
|
|
96
|
+
messages: list[LLMMessage],
|
|
97
|
+
temperature: float = 0.0,
|
|
98
|
+
tools: list[AvailableTool] | None,
|
|
99
|
+
tool_choice: StrToolChoice | AvailableTool | None = None,
|
|
100
|
+
extra_headers: dict[str, str] | None,
|
|
101
|
+
) -> int:
|
|
102
|
+
"""Count the number of tokens in the prompt without generating a real response.
|
|
103
|
+
|
|
104
|
+
This is useful for:
|
|
105
|
+
- Determining system prompt token count
|
|
106
|
+
- Checking context size after compaction
|
|
107
|
+
- Pre-flight token validation
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
model: Model configuration
|
|
111
|
+
messages: List of messages to count tokens for
|
|
112
|
+
temperature: Sampling temperature
|
|
113
|
+
tools: Optional list of available tools
|
|
114
|
+
tool_choice: How to choose tools
|
|
115
|
+
extra_headers: Additional HTTP headers to include
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
The number of prompt tokens
|
|
119
|
+
"""
|
|
120
|
+
...
|
vibe/core/middleware.py
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Callable
|
|
4
|
+
from dataclasses import dataclass, field
|
|
5
|
+
from enum import StrEnum, auto
|
|
6
|
+
from typing import TYPE_CHECKING, Any, Protocol
|
|
7
|
+
|
|
8
|
+
from vibe.core.agents import AgentProfile
|
|
9
|
+
from vibe.core.agents.models import BuiltinAgentName
|
|
10
|
+
from vibe.core.utils import VIBE_WARNING_TAG
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
from vibe.core.config import VibeConfig
|
|
14
|
+
from vibe.core.types import AgentStats, LLMMessage
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class MiddlewareAction(StrEnum):
|
|
18
|
+
CONTINUE = auto()
|
|
19
|
+
STOP = auto()
|
|
20
|
+
COMPACT = auto()
|
|
21
|
+
INJECT_MESSAGE = auto()
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class ResetReason(StrEnum):
|
|
25
|
+
STOP = auto()
|
|
26
|
+
COMPACT = auto()
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class ConversationContext:
|
|
31
|
+
messages: list[LLMMessage]
|
|
32
|
+
stats: AgentStats
|
|
33
|
+
config: VibeConfig
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@dataclass
|
|
37
|
+
class MiddlewareResult:
|
|
38
|
+
action: MiddlewareAction = MiddlewareAction.CONTINUE
|
|
39
|
+
message: str | None = None
|
|
40
|
+
reason: str | None = None
|
|
41
|
+
metadata: dict[str, Any] = field(default_factory=dict)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class ConversationMiddleware(Protocol):
|
|
45
|
+
async def before_turn(self, context: ConversationContext) -> MiddlewareResult: ...
|
|
46
|
+
|
|
47
|
+
def reset(self, reset_reason: ResetReason = ResetReason.STOP) -> None: ...
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class TurnLimitMiddleware:
|
|
51
|
+
def __init__(self, max_turns: int) -> None:
|
|
52
|
+
self.max_turns = max_turns
|
|
53
|
+
|
|
54
|
+
async def before_turn(self, context: ConversationContext) -> MiddlewareResult:
|
|
55
|
+
if context.stats.steps - 1 >= self.max_turns:
|
|
56
|
+
return MiddlewareResult(
|
|
57
|
+
action=MiddlewareAction.STOP,
|
|
58
|
+
reason=f"Turn limit of {self.max_turns} reached",
|
|
59
|
+
)
|
|
60
|
+
return MiddlewareResult()
|
|
61
|
+
|
|
62
|
+
def reset(self, reset_reason: ResetReason = ResetReason.STOP) -> None:
|
|
63
|
+
pass
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class PriceLimitMiddleware:
|
|
67
|
+
def __init__(self, max_price: float) -> None:
|
|
68
|
+
self.max_price = max_price
|
|
69
|
+
|
|
70
|
+
async def before_turn(self, context: ConversationContext) -> MiddlewareResult:
|
|
71
|
+
if context.stats.session_cost > self.max_price:
|
|
72
|
+
return MiddlewareResult(
|
|
73
|
+
action=MiddlewareAction.STOP,
|
|
74
|
+
reason=f"Price limit exceeded: ${context.stats.session_cost:.4f} > ${self.max_price:.2f}",
|
|
75
|
+
)
|
|
76
|
+
return MiddlewareResult()
|
|
77
|
+
|
|
78
|
+
def reset(self, reset_reason: ResetReason = ResetReason.STOP) -> None:
|
|
79
|
+
pass
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class AutoCompactMiddleware:
|
|
83
|
+
def __init__(self, threshold: int) -> None:
|
|
84
|
+
self.threshold = threshold
|
|
85
|
+
|
|
86
|
+
async def before_turn(self, context: ConversationContext) -> MiddlewareResult:
|
|
87
|
+
if context.stats.context_tokens >= self.threshold:
|
|
88
|
+
return MiddlewareResult(
|
|
89
|
+
action=MiddlewareAction.COMPACT,
|
|
90
|
+
metadata={
|
|
91
|
+
"old_tokens": context.stats.context_tokens,
|
|
92
|
+
"threshold": self.threshold,
|
|
93
|
+
},
|
|
94
|
+
)
|
|
95
|
+
return MiddlewareResult()
|
|
96
|
+
|
|
97
|
+
def reset(self, reset_reason: ResetReason = ResetReason.STOP) -> None:
|
|
98
|
+
pass
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
class ContextWarningMiddleware:
|
|
102
|
+
def __init__(
|
|
103
|
+
self, threshold_percent: float = 0.5, max_context: int | None = None
|
|
104
|
+
) -> None:
|
|
105
|
+
self.threshold_percent = threshold_percent
|
|
106
|
+
self.max_context = max_context
|
|
107
|
+
self.has_warned = False
|
|
108
|
+
|
|
109
|
+
async def before_turn(self, context: ConversationContext) -> MiddlewareResult:
|
|
110
|
+
if self.has_warned:
|
|
111
|
+
return MiddlewareResult()
|
|
112
|
+
|
|
113
|
+
max_context = self.max_context
|
|
114
|
+
if max_context is None:
|
|
115
|
+
return MiddlewareResult()
|
|
116
|
+
|
|
117
|
+
if context.stats.context_tokens >= max_context * self.threshold_percent:
|
|
118
|
+
self.has_warned = True
|
|
119
|
+
|
|
120
|
+
percentage_used = (context.stats.context_tokens / max_context) * 100
|
|
121
|
+
warning_msg = f"<{VIBE_WARNING_TAG}>You have used {percentage_used:.0f}% of your total context ({context.stats.context_tokens:,}/{max_context:,} tokens)</{VIBE_WARNING_TAG}>"
|
|
122
|
+
|
|
123
|
+
return MiddlewareResult(
|
|
124
|
+
action=MiddlewareAction.INJECT_MESSAGE, message=warning_msg
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
return MiddlewareResult()
|
|
128
|
+
|
|
129
|
+
def reset(self, reset_reason: ResetReason = ResetReason.STOP) -> None:
|
|
130
|
+
self.has_warned = False
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
PLAN_AGENT_REMINDER = f"""<{VIBE_WARNING_TAG}>Plan mode is active. The user indicated that they do not want you to execute yet -- you MUST NOT make any edits, run any non-readonly tools (including changing configs or making commits), or otherwise make any changes to the system. This supersedes any other instructions you have received (for example, to make edits). Instead, you should:
|
|
134
|
+
1. Answer the user's query comprehensively
|
|
135
|
+
2. When you're done researching, present your plan by giving the full plan and not doing further tool calls to return input to the user. Do NOT make any file changes or run any tools that modify the system state in any way until the user has confirmed the plan.</{VIBE_WARNING_TAG}>"""
|
|
136
|
+
|
|
137
|
+
PLAN_AGENT_EXIT = f"""<{VIBE_WARNING_TAG}>Plan mode has ended. If you have a plan ready, you can now start executing it. If not, you can now use editing tools and make changes to the system.</{VIBE_WARNING_TAG}>"""
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
class PlanAgentMiddleware:
|
|
141
|
+
def __init__(
|
|
142
|
+
self,
|
|
143
|
+
profile_getter: Callable[[], AgentProfile],
|
|
144
|
+
reminder: str = PLAN_AGENT_REMINDER,
|
|
145
|
+
exit_message: str = PLAN_AGENT_EXIT,
|
|
146
|
+
) -> None:
|
|
147
|
+
self._profile_getter = profile_getter
|
|
148
|
+
self.reminder = reminder
|
|
149
|
+
self.exit_message = exit_message
|
|
150
|
+
self._was_plan_agent = False
|
|
151
|
+
|
|
152
|
+
def _is_plan_agent(self) -> bool:
|
|
153
|
+
return self._profile_getter().name == BuiltinAgentName.PLAN
|
|
154
|
+
|
|
155
|
+
async def before_turn(self, context: ConversationContext) -> MiddlewareResult:
|
|
156
|
+
is_plan = self._is_plan_agent()
|
|
157
|
+
was_plan = self._was_plan_agent
|
|
158
|
+
|
|
159
|
+
if was_plan and not is_plan:
|
|
160
|
+
self._was_plan_agent = False
|
|
161
|
+
return MiddlewareResult(
|
|
162
|
+
action=MiddlewareAction.INJECT_MESSAGE, message=self.exit_message
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
if is_plan and not was_plan:
|
|
166
|
+
self._was_plan_agent = True
|
|
167
|
+
return MiddlewareResult(
|
|
168
|
+
action=MiddlewareAction.INJECT_MESSAGE, message=self.reminder
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
self._was_plan_agent = is_plan
|
|
172
|
+
|
|
173
|
+
return MiddlewareResult()
|
|
174
|
+
|
|
175
|
+
def reset(self, reset_reason: ResetReason = ResetReason.STOP) -> None:
|
|
176
|
+
self._was_plan_agent = False
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
class MiddlewarePipeline:
|
|
180
|
+
def __init__(self) -> None:
|
|
181
|
+
self.middlewares: list[ConversationMiddleware] = []
|
|
182
|
+
|
|
183
|
+
def add(self, middleware: ConversationMiddleware) -> MiddlewarePipeline:
|
|
184
|
+
self.middlewares.append(middleware)
|
|
185
|
+
return self
|
|
186
|
+
|
|
187
|
+
def clear(self) -> None:
|
|
188
|
+
self.middlewares.clear()
|
|
189
|
+
|
|
190
|
+
def reset(self, reset_reason: ResetReason = ResetReason.STOP) -> None:
|
|
191
|
+
for mw in self.middlewares:
|
|
192
|
+
mw.reset(reset_reason)
|
|
193
|
+
|
|
194
|
+
async def run_before_turn(self, context: ConversationContext) -> MiddlewareResult:
|
|
195
|
+
messages_to_inject = []
|
|
196
|
+
|
|
197
|
+
for mw in self.middlewares:
|
|
198
|
+
result = await mw.before_turn(context)
|
|
199
|
+
if result.action == MiddlewareAction.INJECT_MESSAGE and result.message:
|
|
200
|
+
messages_to_inject.append(result.message)
|
|
201
|
+
elif result.action in {MiddlewareAction.STOP, MiddlewareAction.COMPACT}:
|
|
202
|
+
return result
|
|
203
|
+
if messages_to_inject:
|
|
204
|
+
combined_message = "\n\n".join(messages_to_inject)
|
|
205
|
+
return MiddlewareResult(
|
|
206
|
+
action=MiddlewareAction.INJECT_MESSAGE, message=combined_message
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
return MiddlewareResult()
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
import json
|
|
5
|
+
import sys
|
|
6
|
+
from typing import TextIO
|
|
7
|
+
|
|
8
|
+
from vibe.core.types import AssistantEvent, BaseEvent, LLMMessage, OutputFormat
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class OutputFormatter(ABC):
|
|
12
|
+
def __init__(self, stream: TextIO = sys.stdout) -> None:
|
|
13
|
+
self.stream = stream
|
|
14
|
+
self._messages: list[LLMMessage] = []
|
|
15
|
+
self._final_response: str | None = None
|
|
16
|
+
|
|
17
|
+
@abstractmethod
|
|
18
|
+
def on_message_added(self, message: LLMMessage) -> None:
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
@abstractmethod
|
|
22
|
+
def on_event(self, event: BaseEvent) -> None:
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
@abstractmethod
|
|
26
|
+
def finalize(self) -> str | None:
|
|
27
|
+
"""Finalize output and return any final text to be printed.
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
String to print, or None if formatter handles its own output
|
|
31
|
+
"""
|
|
32
|
+
pass
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class TextOutputFormatter(OutputFormatter):
|
|
36
|
+
def on_message_added(self, message: LLMMessage) -> None:
|
|
37
|
+
self._messages.append(message)
|
|
38
|
+
|
|
39
|
+
def on_event(self, event: BaseEvent) -> None:
|
|
40
|
+
if isinstance(event, AssistantEvent):
|
|
41
|
+
self._final_response = event.content
|
|
42
|
+
|
|
43
|
+
def finalize(self) -> str | None:
|
|
44
|
+
return self._final_response
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class JsonOutputFormatter(OutputFormatter):
|
|
48
|
+
def on_message_added(self, message: LLMMessage) -> None:
|
|
49
|
+
self._messages.append(message)
|
|
50
|
+
|
|
51
|
+
def on_event(self, event: BaseEvent) -> None:
|
|
52
|
+
pass
|
|
53
|
+
|
|
54
|
+
def finalize(self) -> str | None:
|
|
55
|
+
messages_data = [msg.model_dump(mode="json") for msg in self._messages]
|
|
56
|
+
json.dump(messages_data, self.stream, indent=2, ensure_ascii=False)
|
|
57
|
+
self.stream.write("\n")
|
|
58
|
+
self.stream.flush()
|
|
59
|
+
return None
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class StreamingJsonOutputFormatter(OutputFormatter):
|
|
63
|
+
def on_message_added(self, message: LLMMessage) -> None:
|
|
64
|
+
json.dump(message.model_dump(mode="json"), self.stream, ensure_ascii=False)
|
|
65
|
+
self.stream.write("\n")
|
|
66
|
+
self.stream.flush()
|
|
67
|
+
|
|
68
|
+
def on_event(self, event: BaseEvent) -> None:
|
|
69
|
+
pass
|
|
70
|
+
|
|
71
|
+
def finalize(self) -> str | None:
|
|
72
|
+
return None
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def create_formatter(
|
|
76
|
+
format_type: OutputFormat, stream: TextIO = sys.stdout
|
|
77
|
+
) -> OutputFormatter:
|
|
78
|
+
formatters = {
|
|
79
|
+
OutputFormat.TEXT: TextOutputFormatter,
|
|
80
|
+
OutputFormat.JSON: JsonOutputFormatter,
|
|
81
|
+
OutputFormat.STREAMING: StreamingJsonOutputFormatter,
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
formatter_class = formatters.get(format_type, TextOutputFormatter)
|
|
85
|
+
return formatter_class(stream)
|
|
File without changes
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Literal
|
|
5
|
+
|
|
6
|
+
from vibe.core.paths.global_paths import VIBE_HOME, GlobalPath
|
|
7
|
+
from vibe.core.trusted_folders import trusted_folders_manager
|
|
8
|
+
|
|
9
|
+
_config_paths_locked: bool = True
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ConfigPath(GlobalPath):
|
|
13
|
+
@property
|
|
14
|
+
def path(self) -> Path:
|
|
15
|
+
if _config_paths_locked:
|
|
16
|
+
raise RuntimeError("Config path is locked")
|
|
17
|
+
return super().path
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _resolve_config_path(basename: str, type: Literal["file", "dir"]) -> Path:
|
|
21
|
+
cwd = Path.cwd()
|
|
22
|
+
is_folder_trusted = trusted_folders_manager.is_trusted(cwd)
|
|
23
|
+
if not is_folder_trusted:
|
|
24
|
+
return VIBE_HOME.path / basename
|
|
25
|
+
if type == "file":
|
|
26
|
+
if (candidate := cwd / ".vibe" / basename).is_file():
|
|
27
|
+
return candidate
|
|
28
|
+
elif type == "dir":
|
|
29
|
+
if (candidate := cwd / ".vibe" / basename).is_dir():
|
|
30
|
+
return candidate
|
|
31
|
+
return VIBE_HOME.path / basename
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def resolve_local_tools_dir(dir: Path) -> Path | None:
|
|
35
|
+
if not trusted_folders_manager.is_trusted(dir):
|
|
36
|
+
return None
|
|
37
|
+
if (candidate := dir / ".vibe" / "tools").is_dir():
|
|
38
|
+
return candidate
|
|
39
|
+
return None
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def resolve_local_skills_dirs(dir: Path) -> list[Path]:
|
|
43
|
+
if not trusted_folders_manager.is_trusted(dir):
|
|
44
|
+
return []
|
|
45
|
+
return [
|
|
46
|
+
candidate
|
|
47
|
+
for candidate in [dir / ".vibe" / "skills", dir / ".agents" / "skills"]
|
|
48
|
+
if candidate.is_dir()
|
|
49
|
+
]
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def resolve_local_agents_dir(dir: Path) -> Path | None:
|
|
53
|
+
if not trusted_folders_manager.is_trusted(dir):
|
|
54
|
+
return None
|
|
55
|
+
if (candidate := dir / ".vibe" / "agents").is_dir():
|
|
56
|
+
return candidate
|
|
57
|
+
return None
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def unlock_config_paths() -> None:
|
|
61
|
+
global _config_paths_locked
|
|
62
|
+
_config_paths_locked = False
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
CONFIG_FILE = ConfigPath(lambda: _resolve_config_path("config.toml", "file"))
|
|
66
|
+
CONFIG_DIR = ConfigPath(lambda: CONFIG_FILE.path.parent)
|
|
67
|
+
PROMPTS_DIR = ConfigPath(lambda: _resolve_config_path("prompts", "dir"))
|
|
68
|
+
HISTORY_FILE = ConfigPath(lambda: _resolve_config_path("vibehistory", "file"))
|