codemaster-cli 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (170) hide show
  1. codemaster_cli-2.2.0.dist-info/METADATA +645 -0
  2. codemaster_cli-2.2.0.dist-info/RECORD +170 -0
  3. codemaster_cli-2.2.0.dist-info/WHEEL +4 -0
  4. codemaster_cli-2.2.0.dist-info/entry_points.txt +3 -0
  5. vibe/__init__.py +6 -0
  6. vibe/acp/__init__.py +0 -0
  7. vibe/acp/acp_agent_loop.py +746 -0
  8. vibe/acp/entrypoint.py +81 -0
  9. vibe/acp/tools/__init__.py +0 -0
  10. vibe/acp/tools/base.py +100 -0
  11. vibe/acp/tools/builtins/bash.py +134 -0
  12. vibe/acp/tools/builtins/read_file.py +54 -0
  13. vibe/acp/tools/builtins/search_replace.py +129 -0
  14. vibe/acp/tools/builtins/todo.py +65 -0
  15. vibe/acp/tools/builtins/write_file.py +98 -0
  16. vibe/acp/tools/session_update.py +118 -0
  17. vibe/acp/utils.py +213 -0
  18. vibe/cli/__init__.py +0 -0
  19. vibe/cli/autocompletion/__init__.py +0 -0
  20. vibe/cli/autocompletion/base.py +22 -0
  21. vibe/cli/autocompletion/path_completion.py +177 -0
  22. vibe/cli/autocompletion/slash_command.py +99 -0
  23. vibe/cli/cli.py +188 -0
  24. vibe/cli/clipboard.py +69 -0
  25. vibe/cli/commands.py +116 -0
  26. vibe/cli/entrypoint.py +163 -0
  27. vibe/cli/history_manager.py +91 -0
  28. vibe/cli/plan_offer/adapters/http_whoami_gateway.py +67 -0
  29. vibe/cli/plan_offer/decide_plan_offer.py +87 -0
  30. vibe/cli/plan_offer/ports/whoami_gateway.py +23 -0
  31. vibe/cli/terminal_setup.py +323 -0
  32. vibe/cli/textual_ui/__init__.py +0 -0
  33. vibe/cli/textual_ui/ansi_markdown.py +58 -0
  34. vibe/cli/textual_ui/app.py +1546 -0
  35. vibe/cli/textual_ui/app.tcss +1020 -0
  36. vibe/cli/textual_ui/external_editor.py +32 -0
  37. vibe/cli/textual_ui/handlers/__init__.py +5 -0
  38. vibe/cli/textual_ui/handlers/event_handler.py +147 -0
  39. vibe/cli/textual_ui/widgets/__init__.py +0 -0
  40. vibe/cli/textual_ui/widgets/approval_app.py +192 -0
  41. vibe/cli/textual_ui/widgets/banner/banner.py +85 -0
  42. vibe/cli/textual_ui/widgets/banner/petit_chat.py +195 -0
  43. vibe/cli/textual_ui/widgets/braille_renderer.py +58 -0
  44. vibe/cli/textual_ui/widgets/chat_input/__init__.py +7 -0
  45. vibe/cli/textual_ui/widgets/chat_input/body.py +214 -0
  46. vibe/cli/textual_ui/widgets/chat_input/completion_manager.py +58 -0
  47. vibe/cli/textual_ui/widgets/chat_input/completion_popup.py +43 -0
  48. vibe/cli/textual_ui/widgets/chat_input/container.py +195 -0
  49. vibe/cli/textual_ui/widgets/chat_input/text_area.py +365 -0
  50. vibe/cli/textual_ui/widgets/compact.py +41 -0
  51. vibe/cli/textual_ui/widgets/config_app.py +171 -0
  52. vibe/cli/textual_ui/widgets/context_progress.py +30 -0
  53. vibe/cli/textual_ui/widgets/load_more.py +43 -0
  54. vibe/cli/textual_ui/widgets/loading.py +201 -0
  55. vibe/cli/textual_ui/widgets/messages.py +277 -0
  56. vibe/cli/textual_ui/widgets/no_markup_static.py +11 -0
  57. vibe/cli/textual_ui/widgets/path_display.py +28 -0
  58. vibe/cli/textual_ui/widgets/proxy_setup_app.py +127 -0
  59. vibe/cli/textual_ui/widgets/question_app.py +496 -0
  60. vibe/cli/textual_ui/widgets/spinner.py +194 -0
  61. vibe/cli/textual_ui/widgets/status_message.py +76 -0
  62. vibe/cli/textual_ui/widgets/teleport_message.py +31 -0
  63. vibe/cli/textual_ui/widgets/tool_widgets.py +371 -0
  64. vibe/cli/textual_ui/widgets/tools.py +201 -0
  65. vibe/cli/textual_ui/windowing/__init__.py +29 -0
  66. vibe/cli/textual_ui/windowing/history.py +105 -0
  67. vibe/cli/textual_ui/windowing/history_windowing.py +71 -0
  68. vibe/cli/textual_ui/windowing/state.py +105 -0
  69. vibe/cli/update_notifier/__init__.py +47 -0
  70. vibe/cli/update_notifier/adapters/filesystem_update_cache_repository.py +59 -0
  71. vibe/cli/update_notifier/adapters/github_update_gateway.py +101 -0
  72. vibe/cli/update_notifier/adapters/pypi_update_gateway.py +107 -0
  73. vibe/cli/update_notifier/ports/update_cache_repository.py +16 -0
  74. vibe/cli/update_notifier/ports/update_gateway.py +53 -0
  75. vibe/cli/update_notifier/update.py +139 -0
  76. vibe/cli/update_notifier/whats_new.py +49 -0
  77. vibe/core/__init__.py +5 -0
  78. vibe/core/agent_loop.py +1075 -0
  79. vibe/core/agents/__init__.py +31 -0
  80. vibe/core/agents/manager.py +165 -0
  81. vibe/core/agents/models.py +122 -0
  82. vibe/core/auth/__init__.py +6 -0
  83. vibe/core/auth/crypto.py +137 -0
  84. vibe/core/auth/github.py +178 -0
  85. vibe/core/autocompletion/__init__.py +0 -0
  86. vibe/core/autocompletion/completers.py +257 -0
  87. vibe/core/autocompletion/file_indexer/__init__.py +10 -0
  88. vibe/core/autocompletion/file_indexer/ignore_rules.py +156 -0
  89. vibe/core/autocompletion/file_indexer/indexer.py +179 -0
  90. vibe/core/autocompletion/file_indexer/store.py +169 -0
  91. vibe/core/autocompletion/file_indexer/watcher.py +71 -0
  92. vibe/core/autocompletion/fuzzy.py +189 -0
  93. vibe/core/autocompletion/path_prompt.py +108 -0
  94. vibe/core/autocompletion/path_prompt_adapter.py +149 -0
  95. vibe/core/config.py +673 -0
  96. vibe/core/config_PATCH_INSTRUCTIONS.md +77 -0
  97. vibe/core/llm/__init__.py +0 -0
  98. vibe/core/llm/backend/anthropic.py +630 -0
  99. vibe/core/llm/backend/base.py +38 -0
  100. vibe/core/llm/backend/factory.py +7 -0
  101. vibe/core/llm/backend/generic.py +425 -0
  102. vibe/core/llm/backend/mistral.py +381 -0
  103. vibe/core/llm/backend/vertex.py +115 -0
  104. vibe/core/llm/exceptions.py +195 -0
  105. vibe/core/llm/format.py +184 -0
  106. vibe/core/llm/message_utils.py +24 -0
  107. vibe/core/llm/types.py +120 -0
  108. vibe/core/middleware.py +209 -0
  109. vibe/core/output_formatters.py +85 -0
  110. vibe/core/paths/__init__.py +0 -0
  111. vibe/core/paths/config_paths.py +68 -0
  112. vibe/core/paths/global_paths.py +40 -0
  113. vibe/core/programmatic.py +56 -0
  114. vibe/core/prompts/__init__.py +32 -0
  115. vibe/core/prompts/cli.md +111 -0
  116. vibe/core/prompts/compact.md +48 -0
  117. vibe/core/prompts/dangerous_directory.md +5 -0
  118. vibe/core/prompts/explore.md +50 -0
  119. vibe/core/prompts/project_context.md +8 -0
  120. vibe/core/prompts/tests.md +1 -0
  121. vibe/core/proxy_setup.py +65 -0
  122. vibe/core/session/session_loader.py +222 -0
  123. vibe/core/session/session_logger.py +318 -0
  124. vibe/core/session/session_migration.py +41 -0
  125. vibe/core/skills/__init__.py +7 -0
  126. vibe/core/skills/manager.py +132 -0
  127. vibe/core/skills/models.py +92 -0
  128. vibe/core/skills/parser.py +39 -0
  129. vibe/core/system_prompt.py +466 -0
  130. vibe/core/telemetry/__init__.py +0 -0
  131. vibe/core/telemetry/send.py +185 -0
  132. vibe/core/teleport/errors.py +9 -0
  133. vibe/core/teleport/git.py +196 -0
  134. vibe/core/teleport/nuage.py +180 -0
  135. vibe/core/teleport/teleport.py +208 -0
  136. vibe/core/teleport/types.py +54 -0
  137. vibe/core/tools/base.py +336 -0
  138. vibe/core/tools/builtins/ask_user_question.py +134 -0
  139. vibe/core/tools/builtins/bash.py +357 -0
  140. vibe/core/tools/builtins/grep.py +310 -0
  141. vibe/core/tools/builtins/prompts/__init__.py +0 -0
  142. vibe/core/tools/builtins/prompts/ask_user_question.md +84 -0
  143. vibe/core/tools/builtins/prompts/bash.md +73 -0
  144. vibe/core/tools/builtins/prompts/grep.md +4 -0
  145. vibe/core/tools/builtins/prompts/read_file.md +13 -0
  146. vibe/core/tools/builtins/prompts/search_replace.md +43 -0
  147. vibe/core/tools/builtins/prompts/task.md +24 -0
  148. vibe/core/tools/builtins/prompts/todo.md +199 -0
  149. vibe/core/tools/builtins/prompts/write_file.md +42 -0
  150. vibe/core/tools/builtins/read_file.py +222 -0
  151. vibe/core/tools/builtins/search_replace.py +456 -0
  152. vibe/core/tools/builtins/task.py +154 -0
  153. vibe/core/tools/builtins/todo.py +134 -0
  154. vibe/core/tools/builtins/write_file.py +160 -0
  155. vibe/core/tools/manager.py +341 -0
  156. vibe/core/tools/mcp.py +397 -0
  157. vibe/core/tools/ui.py +68 -0
  158. vibe/core/trusted_folders.py +86 -0
  159. vibe/core/types.py +405 -0
  160. vibe/core/utils.py +396 -0
  161. vibe/setup/onboarding/__init__.py +39 -0
  162. vibe/setup/onboarding/base.py +14 -0
  163. vibe/setup/onboarding/onboarding.tcss +134 -0
  164. vibe/setup/onboarding/screens/__init__.py +5 -0
  165. vibe/setup/onboarding/screens/api_key.py +200 -0
  166. vibe/setup/onboarding/screens/provider_selection.py +87 -0
  167. vibe/setup/onboarding/screens/welcome.py +136 -0
  168. vibe/setup/trusted_folders/trust_folder_dialog.py +180 -0
  169. vibe/setup/trusted_folders/trust_folder_dialog.tcss +83 -0
  170. vibe/whats_new.md +5 -0
@@ -0,0 +1,630 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import re
5
+ from typing import Any, ClassVar
6
+
7
+ from vibe.core.config import ProviderConfig
8
+ from vibe.core.llm.backend.base import APIAdapter, PreparedRequest
9
+ from vibe.core.types import (
10
+ AvailableTool,
11
+ FunctionCall,
12
+ LLMChunk,
13
+ LLMMessage,
14
+ LLMUsage,
15
+ Role,
16
+ StrToolChoice,
17
+ ToolCall,
18
+ )
19
+
20
+
21
+ class AnthropicMapper:
22
+ """Shared mapper for converting messages to/from Anthropic API format."""
23
+
24
+ def prepare_messages(
25
+ self, messages: list[LLMMessage]
26
+ ) -> tuple[str | None, list[dict[str, Any]]]:
27
+ system_prompt: str | None = None
28
+ converted: list[dict[str, Any]] = []
29
+
30
+ for msg in messages:
31
+ match msg.role:
32
+ case Role.system:
33
+ system_prompt = msg.content or ""
34
+ case Role.user:
35
+ user_content: list[dict[str, Any]] = []
36
+ if msg.content:
37
+ user_content.append({"type": "text", "text": msg.content})
38
+ converted.append({"role": "user", "content": user_content or ""})
39
+ case Role.assistant:
40
+ converted.append(self._convert_assistant_message(msg))
41
+ case Role.tool:
42
+ self._append_tool_result(converted, msg)
43
+
44
+ return system_prompt, converted
45
+
46
+ def _sanitize_tool_call_id(self, tool_id: str | None) -> str:
47
+ return re.sub(r"[^a-zA-Z0-9_-]", "_", tool_id or "")
48
+
49
+ def _convert_assistant_message(self, msg: LLMMessage) -> dict[str, Any]:
50
+ content: list[dict[str, Any]] = []
51
+ if msg.reasoning_content:
52
+ block: dict[str, Any] = {
53
+ "type": "thinking",
54
+ "thinking": msg.reasoning_content,
55
+ }
56
+ if msg.reasoning_signature:
57
+ block["signature"] = msg.reasoning_signature
58
+ content.append(block)
59
+ if msg.content:
60
+ content.append({"type": "text", "text": msg.content})
61
+ if msg.tool_calls:
62
+ for tc in msg.tool_calls:
63
+ content.append(self._convert_tool_call(tc))
64
+ return {"role": "assistant", "content": content if content else ""}
65
+
66
+ def _convert_tool_call(self, tc: ToolCall) -> dict[str, Any]:
67
+ try:
68
+ tool_input = json.loads(tc.function.arguments or "{}")
69
+ except json.JSONDecodeError:
70
+ tool_input = {}
71
+ return {
72
+ "type": "tool_use",
73
+ "id": self._sanitize_tool_call_id(tc.id),
74
+ "name": tc.function.name,
75
+ "input": tool_input,
76
+ }
77
+
78
+ def _append_tool_result(
79
+ self, converted: list[dict[str, Any]], msg: LLMMessage
80
+ ) -> None:
81
+ tool_result = {
82
+ "type": "tool_result",
83
+ "tool_use_id": self._sanitize_tool_call_id(msg.tool_call_id),
84
+ "content": msg.content or "",
85
+ }
86
+
87
+ if not converted or converted[-1]["role"] != "user":
88
+ converted.append({"role": "user", "content": [tool_result]})
89
+ return
90
+
91
+ existing_content = converted[-1]["content"]
92
+ if isinstance(existing_content, str):
93
+ converted[-1]["content"] = [
94
+ {"type": "text", "text": existing_content},
95
+ tool_result,
96
+ ]
97
+ else:
98
+ converted[-1]["content"].append(tool_result)
99
+
100
+ def prepare_tools(
101
+ self, tools: list[AvailableTool] | None
102
+ ) -> list[dict[str, Any]] | None:
103
+ if not tools:
104
+ return None
105
+ return [
106
+ {
107
+ "name": tool.function.name,
108
+ "description": tool.function.description,
109
+ "input_schema": tool.function.parameters,
110
+ }
111
+ for tool in tools
112
+ ]
113
+
114
+ def prepare_tool_choice(
115
+ self, tool_choice: StrToolChoice | AvailableTool | None
116
+ ) -> dict[str, Any] | None:
117
+ if tool_choice is None:
118
+ return None
119
+ if isinstance(tool_choice, str):
120
+ match tool_choice:
121
+ case "none":
122
+ return {"type": "none"}
123
+ case "auto":
124
+ return {"type": "auto"}
125
+ case "any" | "required":
126
+ return {"type": "any"}
127
+ case _:
128
+ return None
129
+ return {"type": "tool", "name": tool_choice.function.name}
130
+
131
+ def parse_response(self, data: dict[str, Any]) -> LLMChunk:
132
+ content_blocks = data.get("content", [])
133
+ text_parts: list[str] = []
134
+ thinking_parts: list[str] = []
135
+ signature_parts: list[str] = []
136
+ tool_calls: list[ToolCall] = []
137
+
138
+ for idx, block in enumerate(content_blocks):
139
+ block_type = block.get("type")
140
+ if block_type == "text":
141
+ text_parts.append(block.get("text", ""))
142
+ elif block_type == "thinking":
143
+ thinking_parts.append(block.get("thinking", ""))
144
+ if "signature" in block:
145
+ signature_parts.append(block["signature"])
146
+ elif block_type == "tool_use":
147
+ tool_calls.append(
148
+ ToolCall(
149
+ id=block.get("id"),
150
+ index=idx,
151
+ function=FunctionCall(
152
+ name=block.get("name"),
153
+ arguments=json.dumps(block.get("input", {})),
154
+ ),
155
+ )
156
+ )
157
+
158
+ usage_data = data.get("usage", {})
159
+ # Total input tokens = input_tokens + cache_creation + cache_read
160
+ total_input_tokens = (
161
+ usage_data.get("input_tokens", 0)
162
+ + usage_data.get("cache_creation_input_tokens", 0)
163
+ + usage_data.get("cache_read_input_tokens", 0)
164
+ )
165
+ usage = LLMUsage(
166
+ prompt_tokens=total_input_tokens,
167
+ completion_tokens=usage_data.get("output_tokens", 0),
168
+ )
169
+
170
+ return LLMChunk(
171
+ message=LLMMessage(
172
+ role=Role.assistant,
173
+ content="".join(text_parts) or None,
174
+ reasoning_content="".join(thinking_parts) or None,
175
+ reasoning_signature="".join(signature_parts) or None,
176
+ tool_calls=tool_calls if tool_calls else None,
177
+ ),
178
+ usage=usage,
179
+ )
180
+
181
+ def parse_streaming_event(
182
+ self, event_type: str, data: dict[str, Any], current_index: int
183
+ ) -> tuple[LLMChunk | None, int]:
184
+ handler = {
185
+ "content_block_start": self._handle_block_start,
186
+ "content_block_delta": self._handle_block_delta,
187
+ "message_delta": self._handle_message_delta,
188
+ "message_start": self._handle_message_start,
189
+ }.get(event_type)
190
+ if handler is None:
191
+ return None, current_index
192
+ return handler(data, current_index)
193
+
194
+ def _handle_block_start(
195
+ self, data: dict[str, Any], current_index: int
196
+ ) -> tuple[LLMChunk | None, int]:
197
+ block = data.get("content_block", {})
198
+ idx = data.get("index", current_index)
199
+
200
+ match block.get("type"):
201
+ case "tool_use":
202
+ chunk = LLMChunk(
203
+ message=LLMMessage(
204
+ role=Role.assistant,
205
+ tool_calls=[
206
+ ToolCall(
207
+ id=block.get("id"),
208
+ index=idx,
209
+ function=FunctionCall(
210
+ name=block.get("name"), arguments=""
211
+ ),
212
+ )
213
+ ],
214
+ )
215
+ )
216
+ return chunk, idx
217
+ case "thinking":
218
+ chunk = LLMChunk(
219
+ message=LLMMessage(
220
+ role=Role.assistant, reasoning_content=block.get("thinking", "")
221
+ )
222
+ )
223
+ return chunk, idx
224
+ case _:
225
+ return None, idx
226
+
227
+ def _handle_block_delta(
228
+ self, data: dict[str, Any], current_index: int
229
+ ) -> tuple[LLMChunk | None, int]:
230
+ delta = data.get("delta", {})
231
+ idx = data.get("index", current_index)
232
+
233
+ match delta.get("type"):
234
+ case "text_delta":
235
+ chunk = LLMChunk(
236
+ message=LLMMessage(
237
+ role=Role.assistant, content=delta.get("text", "")
238
+ )
239
+ )
240
+ case "thinking_delta":
241
+ chunk = LLMChunk(
242
+ message=LLMMessage(
243
+ role=Role.assistant, reasoning_content=delta.get("thinking", "")
244
+ )
245
+ )
246
+ case "signature_delta":
247
+ chunk = LLMChunk(
248
+ message=LLMMessage(
249
+ role=Role.assistant,
250
+ reasoning_signature=delta.get("signature", ""),
251
+ )
252
+ )
253
+ case "input_json_delta":
254
+ chunk = LLMChunk(
255
+ message=LLMMessage(
256
+ role=Role.assistant,
257
+ tool_calls=[
258
+ ToolCall(
259
+ index=idx,
260
+ function=FunctionCall(
261
+ arguments=delta.get("partial_json", "")
262
+ ),
263
+ )
264
+ ],
265
+ )
266
+ )
267
+ case _:
268
+ chunk = None
269
+ return chunk, idx
270
+
271
+ def _handle_message_delta(
272
+ self, data: dict[str, Any], current_index: int
273
+ ) -> tuple[LLMChunk | None, int]:
274
+ usage_data = data.get("usage", {})
275
+ if not usage_data:
276
+ return None, current_index
277
+ chunk = LLMChunk(
278
+ message=LLMMessage(role=Role.assistant),
279
+ usage=LLMUsage(
280
+ prompt_tokens=0, completion_tokens=usage_data.get("output_tokens", 0)
281
+ ),
282
+ )
283
+ return chunk, current_index
284
+
285
+ def _handle_message_start(
286
+ self, data: dict[str, Any], current_index: int
287
+ ) -> tuple[LLMChunk | None, int]:
288
+ message = data.get("message", {})
289
+ usage_data = message.get("usage", {})
290
+ if not usage_data:
291
+ return None, current_index
292
+ # Total input tokens = input_tokens + cache_creation + cache_read
293
+ total_input_tokens = (
294
+ usage_data.get("input_tokens", 0)
295
+ + usage_data.get("cache_creation_input_tokens", 0)
296
+ + usage_data.get("cache_read_input_tokens", 0)
297
+ )
298
+ chunk = LLMChunk(
299
+ message=LLMMessage(role=Role.assistant),
300
+ usage=LLMUsage(prompt_tokens=total_input_tokens, completion_tokens=0),
301
+ )
302
+ return chunk, current_index
303
+
304
+
305
+ STREAMING_EVENT_TYPES = {
306
+ "message_start",
307
+ "message_delta",
308
+ "message_stop",
309
+ "content_block_start",
310
+ "content_block_delta",
311
+ "content_block_stop",
312
+ "ping",
313
+ "error",
314
+ }
315
+
316
+
317
+ class AnthropicAdapter(APIAdapter):
318
+ endpoint: ClassVar[str] = "/v1/messages"
319
+ API_VERSION = "2023-06-01"
320
+ BETA_FEATURES = (
321
+ "interleaved-thinking-2025-05-14,"
322
+ "fine-grained-tool-streaming-2025-05-14,"
323
+ "prompt-caching-2024-07-31"
324
+ )
325
+ THINKING_BUDGETS: ClassVar[dict[str, int]] = {
326
+ "low": 1024,
327
+ "medium": 10_000,
328
+ "high": 32_000,
329
+ }
330
+ DEFAULT_ADAPTIVE_MAX_TOKENS: ClassVar[int] = 32_768
331
+ DEFAULT_MAX_TOKENS = 8192
332
+
333
+ def __init__(self) -> None:
334
+ self._mapper = AnthropicMapper()
335
+ self._current_index: int = 0
336
+
337
+ @staticmethod
338
+ def _has_thinking_content(messages: list[dict[str, Any]]) -> bool:
339
+ for msg in messages:
340
+ if msg.get("role") != "assistant":
341
+ continue
342
+ content = msg.get("content")
343
+ if not isinstance(content, list):
344
+ continue
345
+ for block in content:
346
+ if block.get("type") == "thinking":
347
+ return True
348
+ return False
349
+
350
+ @staticmethod
351
+ def _build_system_blocks(system_prompt: str | None) -> list[dict[str, Any]]:
352
+ blocks: list[dict[str, Any]] = []
353
+ if system_prompt:
354
+ blocks.append({
355
+ "type": "text",
356
+ "text": system_prompt,
357
+ "cache_control": {"type": "ephemeral"},
358
+ })
359
+ return blocks
360
+
361
+ @staticmethod
362
+ def _add_cache_control_to_last_user_message(messages: list[dict[str, Any]]) -> None:
363
+ if not messages:
364
+ return
365
+ last_message = messages[-1]
366
+ if last_message.get("role") != "user":
367
+ return
368
+ content = last_message.get("content")
369
+ if not isinstance(content, list) or not content:
370
+ return
371
+ last_block = content[-1]
372
+ if last_block.get("type") in {"text", "image", "tool_result"}:
373
+ last_block["cache_control"] = {"type": "ephemeral"}
374
+
375
+ @staticmethod
376
+ def _is_adaptive_model(model_name: str) -> bool:
377
+ return "opus-4-6" in model_name
378
+
379
+ def _apply_thinking_config(
380
+ self,
381
+ payload: dict[str, Any],
382
+ *,
383
+ model_name: str,
384
+ messages: list[dict[str, Any]],
385
+ temperature: float,
386
+ max_tokens: int | None,
387
+ thinking: str,
388
+ ) -> None:
389
+ has_thinking = self._has_thinking_content(messages)
390
+ thinking_level = thinking
391
+
392
+ if thinking_level == "off" and not has_thinking:
393
+ payload["temperature"] = temperature
394
+ if max_tokens is not None:
395
+ payload["max_tokens"] = max_tokens
396
+ else:
397
+ payload["max_tokens"] = self.DEFAULT_MAX_TOKENS
398
+ return
399
+
400
+ # Resolve effective level: use config, or fallback to "medium" when
401
+ # forced by thinking content in history
402
+ effective_level = thinking_level if thinking_level != "off" else "medium"
403
+
404
+ if self._is_adaptive_model(model_name):
405
+ payload["thinking"] = {"type": "adaptive"}
406
+ payload["output_config"] = {"effort": effective_level}
407
+ default_max = self.DEFAULT_ADAPTIVE_MAX_TOKENS
408
+ else:
409
+ budget = self.THINKING_BUDGETS[effective_level]
410
+ payload["thinking"] = {"type": "enabled", "budget_tokens": budget}
411
+ default_max = budget + self.DEFAULT_MAX_TOKENS
412
+
413
+ payload["temperature"] = 1
414
+ payload["max_tokens"] = max_tokens if max_tokens is not None else default_max
415
+
416
+ def _build_payload(
417
+ self,
418
+ *,
419
+ model_name: str,
420
+ system_prompt: str | None,
421
+ messages: list[dict[str, Any]],
422
+ temperature: float,
423
+ tools: list[dict[str, Any]] | None,
424
+ max_tokens: int | None,
425
+ tool_choice: dict[str, Any] | None,
426
+ stream: bool,
427
+ thinking: str,
428
+ ) -> dict[str, Any]:
429
+ payload: dict[str, Any] = {"model": model_name, "messages": messages}
430
+
431
+ self._apply_thinking_config(
432
+ payload,
433
+ model_name=model_name,
434
+ messages=messages,
435
+ temperature=temperature,
436
+ max_tokens=max_tokens,
437
+ thinking=thinking,
438
+ )
439
+
440
+ if system_blocks := self._build_system_blocks(system_prompt):
441
+ payload["system"] = system_blocks
442
+
443
+ if tools:
444
+ payload["tools"] = tools
445
+
446
+ if tool_choice:
447
+ payload["tool_choice"] = tool_choice
448
+
449
+ if stream:
450
+ payload["stream"] = True
451
+
452
+ self._add_cache_control_to_last_user_message(messages)
453
+
454
+ return payload
455
+
456
+ def prepare_request( # noqa: PLR0913
457
+ self,
458
+ *,
459
+ model_name: str,
460
+ messages: list[LLMMessage],
461
+ temperature: float,
462
+ tools: list[AvailableTool] | None,
463
+ max_tokens: int | None,
464
+ tool_choice: StrToolChoice | AvailableTool | None,
465
+ enable_streaming: bool,
466
+ provider: ProviderConfig,
467
+ api_key: str | None = None,
468
+ thinking: str = "off",
469
+ ) -> PreparedRequest:
470
+ system_prompt, converted_messages = self._mapper.prepare_messages(messages)
471
+ converted_tools = self._mapper.prepare_tools(tools)
472
+ converted_tool_choice = self._mapper.prepare_tool_choice(tool_choice)
473
+
474
+ payload = self._build_payload(
475
+ model_name=model_name,
476
+ system_prompt=system_prompt,
477
+ messages=converted_messages,
478
+ temperature=temperature,
479
+ tools=converted_tools,
480
+ max_tokens=max_tokens,
481
+ tool_choice=converted_tool_choice,
482
+ stream=enable_streaming,
483
+ thinking=thinking,
484
+ )
485
+
486
+ headers = {
487
+ "Content-Type": "application/json",
488
+ "anthropic-version": self.API_VERSION,
489
+ "anthropic-beta": self.BETA_FEATURES,
490
+ }
491
+
492
+ if api_key:
493
+ headers["x-api-key"] = api_key
494
+
495
+ body = json.dumps(payload).encode("utf-8")
496
+ return PreparedRequest(self.endpoint, headers, body)
497
+
498
+ def parse_response(
499
+ self, data: dict[str, Any], provider: ProviderConfig | None = None
500
+ ) -> LLMChunk:
501
+ event_type = data.get("type")
502
+ if event_type in STREAMING_EVENT_TYPES:
503
+ return self._parse_streaming_event(data)
504
+ return self._mapper.parse_response(data)
505
+
506
+ def _parse_streaming_event(self, data: dict[str, Any]) -> LLMChunk:
507
+ event_type = data.get("type", "")
508
+ empty_chunk = LLMChunk(message=LLMMessage(role=Role.assistant, content=None))
509
+
510
+ if event_type == "message_start":
511
+ self._current_index = 0
512
+ return self._parse_message_start(data)
513
+ if event_type == "content_block_start":
514
+ return self._parse_content_block_start(data) or empty_chunk
515
+ if event_type == "content_block_delta":
516
+ return self._parse_content_block_delta(data)
517
+ if event_type == "content_block_stop":
518
+ return self._parse_content_block_stop(data)
519
+ if event_type == "message_delta":
520
+ return self._parse_message_delta(data)
521
+ if event_type == "error":
522
+ error = data.get("error", {})
523
+ error_type = error.get("type", "unknown_error")
524
+ error_message = error.get("message", "Unknown streaming error")
525
+ raise RuntimeError(
526
+ f"Anthropic stream error ({error_type}): {error_message}"
527
+ )
528
+ return empty_chunk
529
+
530
+ def _parse_message_start(self, data: dict[str, Any]) -> LLMChunk:
531
+ message = data.get("message", {})
532
+ usage_data = message.get("usage", {})
533
+ if not usage_data:
534
+ return LLMChunk(message=LLMMessage(role=Role.assistant, content=None))
535
+ total_input_tokens = (
536
+ usage_data.get("input_tokens", 0)
537
+ + usage_data.get("cache_creation_input_tokens", 0)
538
+ + usage_data.get("cache_read_input_tokens", 0)
539
+ )
540
+ return LLMChunk(
541
+ message=LLMMessage(role=Role.assistant, content=None),
542
+ usage=LLMUsage(prompt_tokens=total_input_tokens, completion_tokens=0),
543
+ )
544
+
545
+ def _parse_content_block_start(self, data: dict[str, Any]) -> LLMChunk | None:
546
+ content_block = data.get("content_block", {})
547
+ index = data.get("index", 0)
548
+ block_type = content_block.get("type")
549
+
550
+ if block_type == "thinking":
551
+ return LLMChunk(
552
+ message=LLMMessage(
553
+ role=Role.assistant,
554
+ reasoning_content=content_block.get("thinking", ""),
555
+ )
556
+ )
557
+ if block_type == "redacted_thinking":
558
+ return None
559
+ if block_type == "tool_use":
560
+ return LLMChunk(
561
+ message=LLMMessage(
562
+ role=Role.assistant,
563
+ tool_calls=[
564
+ ToolCall(
565
+ index=index,
566
+ id=content_block.get("id"),
567
+ function=FunctionCall(
568
+ name=content_block.get("name"), arguments=""
569
+ ),
570
+ )
571
+ ],
572
+ )
573
+ )
574
+ return None
575
+
576
+ def _parse_content_block_delta(self, data: dict[str, Any]) -> LLMChunk:
577
+ delta = data.get("delta", {})
578
+ delta_type = delta.get("type", "")
579
+ index = data.get("index", 0)
580
+
581
+ match delta_type:
582
+ case "text_delta":
583
+ return LLMChunk(
584
+ message=LLMMessage(
585
+ role=Role.assistant, content=delta.get("text", "")
586
+ )
587
+ )
588
+ case "thinking_delta":
589
+ return LLMChunk(
590
+ message=LLMMessage(
591
+ role=Role.assistant, reasoning_content=delta.get("thinking", "")
592
+ )
593
+ )
594
+ case "signature_delta":
595
+ return LLMChunk(
596
+ message=LLMMessage(
597
+ role=Role.assistant,
598
+ reasoning_signature=delta.get("signature", ""),
599
+ )
600
+ )
601
+ case "input_json_delta":
602
+ return LLMChunk(
603
+ message=LLMMessage(
604
+ role=Role.assistant,
605
+ tool_calls=[
606
+ ToolCall(
607
+ index=index,
608
+ function=FunctionCall(
609
+ arguments=delta.get("partial_json", "")
610
+ ),
611
+ )
612
+ ],
613
+ )
614
+ )
615
+ case _:
616
+ return LLMChunk(message=LLMMessage(role=Role.assistant, content=None))
617
+
618
+ def _parse_content_block_stop(self, _data: dict[str, Any]) -> LLMChunk:
619
+ return LLMChunk(message=LLMMessage(role=Role.assistant, content=None))
620
+
621
+ def _parse_message_delta(self, data: dict[str, Any]) -> LLMChunk:
622
+ usage_data = data.get("usage", {})
623
+ if not usage_data:
624
+ return LLMChunk(message=LLMMessage(role=Role.assistant, content=None))
625
+ return LLMChunk(
626
+ message=LLMMessage(role=Role.assistant, content=None),
627
+ usage=LLMUsage(
628
+ prompt_tokens=0, completion_tokens=usage_data.get("output_tokens", 0)
629
+ ),
630
+ )
@@ -0,0 +1,38 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, Any, ClassVar, NamedTuple, Protocol
4
+
5
+ from vibe.core.types import AvailableTool, LLMChunk, LLMMessage, StrToolChoice
6
+
7
+ if TYPE_CHECKING:
8
+ from vibe.core.config import ProviderConfig
9
+
10
+
11
+ class PreparedRequest(NamedTuple):
12
+ endpoint: str
13
+ headers: dict[str, str]
14
+ body: bytes
15
+ base_url: str = ""
16
+
17
+
18
+ class APIAdapter(Protocol):
19
+ endpoint: ClassVar[str]
20
+
21
+ def prepare_request( # noqa: PLR0913
22
+ self,
23
+ *,
24
+ model_name: str,
25
+ messages: list[LLMMessage],
26
+ temperature: float,
27
+ tools: list[AvailableTool] | None,
28
+ max_tokens: int | None,
29
+ tool_choice: StrToolChoice | AvailableTool | None,
30
+ enable_streaming: bool,
31
+ provider: ProviderConfig,
32
+ api_key: str | None = None,
33
+ thinking: str = "off",
34
+ ) -> PreparedRequest: ...
35
+
36
+ def parse_response(
37
+ self, data: dict[str, Any], provider: ProviderConfig
38
+ ) -> LLMChunk: ...
@@ -0,0 +1,7 @@
1
+ from __future__ import annotations
2
+
3
+ from vibe.core.config import Backend
4
+ from vibe.core.llm.backend.generic import GenericBackend
5
+ from vibe.core.llm.backend.mistral import MistralBackend
6
+
7
+ BACKEND_FACTORY = {Backend.MISTRAL: MistralBackend, Backend.GENERIC: GenericBackend}