code-puppy 0.0.214__py3-none-any.whl → 0.0.366__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/__init__.py +7 -1
- code_puppy/agents/__init__.py +2 -0
- code_puppy/agents/agent_c_reviewer.py +59 -6
- code_puppy/agents/agent_code_puppy.py +7 -1
- code_puppy/agents/agent_code_reviewer.py +12 -2
- code_puppy/agents/agent_cpp_reviewer.py +73 -6
- code_puppy/agents/agent_creator_agent.py +45 -4
- code_puppy/agents/agent_golang_reviewer.py +92 -3
- code_puppy/agents/agent_javascript_reviewer.py +101 -8
- code_puppy/agents/agent_manager.py +81 -4
- code_puppy/agents/agent_pack_leader.py +383 -0
- code_puppy/agents/agent_planning.py +163 -0
- code_puppy/agents/agent_python_programmer.py +165 -0
- code_puppy/agents/agent_python_reviewer.py +28 -6
- code_puppy/agents/agent_qa_expert.py +98 -6
- code_puppy/agents/agent_qa_kitten.py +12 -7
- code_puppy/agents/agent_security_auditor.py +113 -3
- code_puppy/agents/agent_terminal_qa.py +323 -0
- code_puppy/agents/agent_typescript_reviewer.py +106 -7
- code_puppy/agents/base_agent.py +802 -176
- code_puppy/agents/event_stream_handler.py +350 -0
- code_puppy/agents/pack/__init__.py +34 -0
- code_puppy/agents/pack/bloodhound.py +304 -0
- code_puppy/agents/pack/husky.py +321 -0
- code_puppy/agents/pack/retriever.py +393 -0
- code_puppy/agents/pack/shepherd.py +348 -0
- code_puppy/agents/pack/terrier.py +287 -0
- code_puppy/agents/pack/watchdog.py +367 -0
- code_puppy/agents/prompt_reviewer.py +145 -0
- code_puppy/agents/subagent_stream_handler.py +276 -0
- code_puppy/api/__init__.py +13 -0
- code_puppy/api/app.py +169 -0
- code_puppy/api/main.py +21 -0
- code_puppy/api/pty_manager.py +446 -0
- code_puppy/api/routers/__init__.py +12 -0
- code_puppy/api/routers/agents.py +36 -0
- code_puppy/api/routers/commands.py +217 -0
- code_puppy/api/routers/config.py +74 -0
- code_puppy/api/routers/sessions.py +232 -0
- code_puppy/api/templates/terminal.html +361 -0
- code_puppy/api/websocket.py +154 -0
- code_puppy/callbacks.py +142 -4
- code_puppy/chatgpt_codex_client.py +283 -0
- code_puppy/claude_cache_client.py +586 -0
- code_puppy/cli_runner.py +916 -0
- code_puppy/command_line/add_model_menu.py +1079 -0
- code_puppy/command_line/agent_menu.py +395 -0
- code_puppy/command_line/attachments.py +10 -5
- code_puppy/command_line/autosave_menu.py +605 -0
- code_puppy/command_line/clipboard.py +527 -0
- code_puppy/command_line/colors_menu.py +520 -0
- code_puppy/command_line/command_handler.py +176 -738
- code_puppy/command_line/command_registry.py +150 -0
- code_puppy/command_line/config_commands.py +715 -0
- code_puppy/command_line/core_commands.py +792 -0
- code_puppy/command_line/diff_menu.py +863 -0
- code_puppy/command_line/load_context_completion.py +15 -22
- code_puppy/command_line/mcp/base.py +0 -3
- code_puppy/command_line/mcp/catalog_server_installer.py +175 -0
- code_puppy/command_line/mcp/custom_server_form.py +688 -0
- code_puppy/command_line/mcp/custom_server_installer.py +195 -0
- code_puppy/command_line/mcp/edit_command.py +148 -0
- code_puppy/command_line/mcp/handler.py +9 -4
- code_puppy/command_line/mcp/help_command.py +6 -5
- code_puppy/command_line/mcp/install_command.py +15 -26
- code_puppy/command_line/mcp/install_menu.py +685 -0
- code_puppy/command_line/mcp/list_command.py +2 -2
- code_puppy/command_line/mcp/logs_command.py +174 -65
- code_puppy/command_line/mcp/remove_command.py +2 -2
- code_puppy/command_line/mcp/restart_command.py +12 -4
- code_puppy/command_line/mcp/search_command.py +16 -10
- code_puppy/command_line/mcp/start_all_command.py +18 -6
- code_puppy/command_line/mcp/start_command.py +47 -25
- code_puppy/command_line/mcp/status_command.py +4 -5
- code_puppy/command_line/mcp/stop_all_command.py +7 -1
- code_puppy/command_line/mcp/stop_command.py +8 -4
- code_puppy/command_line/mcp/test_command.py +2 -2
- code_puppy/command_line/mcp/wizard_utils.py +20 -16
- code_puppy/command_line/mcp_completion.py +174 -0
- code_puppy/command_line/model_picker_completion.py +75 -25
- code_puppy/command_line/model_settings_menu.py +884 -0
- code_puppy/command_line/motd.py +14 -8
- code_puppy/command_line/onboarding_slides.py +179 -0
- code_puppy/command_line/onboarding_wizard.py +340 -0
- code_puppy/command_line/pin_command_completion.py +329 -0
- code_puppy/command_line/prompt_toolkit_completion.py +463 -63
- code_puppy/command_line/session_commands.py +296 -0
- code_puppy/command_line/utils.py +54 -0
- code_puppy/config.py +898 -112
- code_puppy/error_logging.py +118 -0
- code_puppy/gemini_code_assist.py +385 -0
- code_puppy/gemini_model.py +602 -0
- code_puppy/http_utils.py +210 -148
- code_puppy/keymap.py +128 -0
- code_puppy/main.py +5 -698
- code_puppy/mcp_/__init__.py +17 -0
- code_puppy/mcp_/async_lifecycle.py +35 -4
- code_puppy/mcp_/blocking_startup.py +70 -43
- code_puppy/mcp_/captured_stdio_server.py +2 -2
- code_puppy/mcp_/config_wizard.py +4 -4
- code_puppy/mcp_/dashboard.py +15 -6
- code_puppy/mcp_/managed_server.py +65 -38
- code_puppy/mcp_/manager.py +146 -52
- code_puppy/mcp_/mcp_logs.py +224 -0
- code_puppy/mcp_/registry.py +6 -6
- code_puppy/mcp_/server_registry_catalog.py +24 -5
- code_puppy/messaging/__init__.py +199 -2
- code_puppy/messaging/bus.py +610 -0
- code_puppy/messaging/commands.py +167 -0
- code_puppy/messaging/markdown_patches.py +57 -0
- code_puppy/messaging/message_queue.py +17 -48
- code_puppy/messaging/messages.py +500 -0
- code_puppy/messaging/queue_console.py +1 -24
- code_puppy/messaging/renderers.py +43 -146
- code_puppy/messaging/rich_renderer.py +1027 -0
- code_puppy/messaging/spinner/__init__.py +21 -5
- code_puppy/messaging/spinner/console_spinner.py +86 -51
- code_puppy/messaging/subagent_console.py +461 -0
- code_puppy/model_factory.py +634 -83
- code_puppy/model_utils.py +167 -0
- code_puppy/models.json +66 -68
- code_puppy/models_dev_api.json +1 -0
- code_puppy/models_dev_parser.py +592 -0
- code_puppy/plugins/__init__.py +164 -10
- code_puppy/plugins/antigravity_oauth/__init__.py +10 -0
- code_puppy/plugins/antigravity_oauth/accounts.py +406 -0
- code_puppy/plugins/antigravity_oauth/antigravity_model.py +704 -0
- code_puppy/plugins/antigravity_oauth/config.py +42 -0
- code_puppy/plugins/antigravity_oauth/constants.py +136 -0
- code_puppy/plugins/antigravity_oauth/oauth.py +478 -0
- code_puppy/plugins/antigravity_oauth/register_callbacks.py +406 -0
- code_puppy/plugins/antigravity_oauth/storage.py +271 -0
- code_puppy/plugins/antigravity_oauth/test_plugin.py +319 -0
- code_puppy/plugins/antigravity_oauth/token.py +167 -0
- code_puppy/plugins/antigravity_oauth/transport.py +767 -0
- code_puppy/plugins/antigravity_oauth/utils.py +169 -0
- code_puppy/plugins/chatgpt_oauth/__init__.py +8 -0
- code_puppy/plugins/chatgpt_oauth/config.py +52 -0
- code_puppy/plugins/chatgpt_oauth/oauth_flow.py +328 -0
- code_puppy/plugins/chatgpt_oauth/register_callbacks.py +94 -0
- code_puppy/plugins/chatgpt_oauth/test_plugin.py +293 -0
- code_puppy/plugins/chatgpt_oauth/utils.py +489 -0
- code_puppy/plugins/claude_code_oauth/README.md +167 -0
- code_puppy/plugins/claude_code_oauth/SETUP.md +93 -0
- code_puppy/plugins/claude_code_oauth/__init__.py +6 -0
- code_puppy/plugins/claude_code_oauth/config.py +50 -0
- code_puppy/plugins/claude_code_oauth/register_callbacks.py +308 -0
- code_puppy/plugins/claude_code_oauth/test_plugin.py +283 -0
- code_puppy/plugins/claude_code_oauth/utils.py +518 -0
- code_puppy/plugins/customizable_commands/__init__.py +0 -0
- code_puppy/plugins/customizable_commands/register_callbacks.py +169 -0
- code_puppy/plugins/example_custom_command/README.md +280 -0
- code_puppy/plugins/example_custom_command/register_callbacks.py +2 -2
- code_puppy/plugins/file_permission_handler/__init__.py +4 -0
- code_puppy/plugins/file_permission_handler/register_callbacks.py +523 -0
- code_puppy/plugins/frontend_emitter/__init__.py +25 -0
- code_puppy/plugins/frontend_emitter/emitter.py +121 -0
- code_puppy/plugins/frontend_emitter/register_callbacks.py +261 -0
- code_puppy/plugins/oauth_puppy_html.py +228 -0
- code_puppy/plugins/shell_safety/__init__.py +6 -0
- code_puppy/plugins/shell_safety/agent_shell_safety.py +69 -0
- code_puppy/plugins/shell_safety/command_cache.py +156 -0
- code_puppy/plugins/shell_safety/register_callbacks.py +202 -0
- code_puppy/prompts/antigravity_system_prompt.md +1 -0
- code_puppy/prompts/codex_system_prompt.md +310 -0
- code_puppy/pydantic_patches.py +131 -0
- code_puppy/reopenable_async_client.py +8 -8
- code_puppy/round_robin_model.py +9 -12
- code_puppy/session_storage.py +2 -1
- code_puppy/status_display.py +21 -4
- code_puppy/summarization_agent.py +41 -13
- code_puppy/terminal_utils.py +418 -0
- code_puppy/tools/__init__.py +37 -1
- code_puppy/tools/agent_tools.py +536 -52
- code_puppy/tools/browser/__init__.py +37 -0
- code_puppy/tools/browser/browser_control.py +19 -23
- code_puppy/tools/browser/browser_interactions.py +41 -48
- code_puppy/tools/browser/browser_locators.py +36 -38
- code_puppy/tools/browser/browser_manager.py +316 -0
- code_puppy/tools/browser/browser_navigation.py +16 -16
- code_puppy/tools/browser/browser_screenshot.py +79 -143
- code_puppy/tools/browser/browser_scripts.py +32 -42
- code_puppy/tools/browser/browser_workflows.py +44 -27
- code_puppy/tools/browser/chromium_terminal_manager.py +259 -0
- code_puppy/tools/browser/terminal_command_tools.py +521 -0
- code_puppy/tools/browser/terminal_screenshot_tools.py +556 -0
- code_puppy/tools/browser/terminal_tools.py +525 -0
- code_puppy/tools/command_runner.py +930 -147
- code_puppy/tools/common.py +1113 -5
- code_puppy/tools/display.py +84 -0
- code_puppy/tools/file_modifications.py +288 -89
- code_puppy/tools/file_operations.py +226 -154
- code_puppy/tools/subagent_context.py +158 -0
- code_puppy/uvx_detection.py +242 -0
- code_puppy/version_checker.py +30 -11
- code_puppy-0.0.366.data/data/code_puppy/models.json +110 -0
- code_puppy-0.0.366.data/data/code_puppy/models_dev_api.json +1 -0
- {code_puppy-0.0.214.dist-info → code_puppy-0.0.366.dist-info}/METADATA +149 -75
- code_puppy-0.0.366.dist-info/RECORD +217 -0
- {code_puppy-0.0.214.dist-info → code_puppy-0.0.366.dist-info}/WHEEL +1 -1
- code_puppy/command_line/mcp/add_command.py +0 -183
- code_puppy/messaging/spinner/textual_spinner.py +0 -106
- code_puppy/tools/browser/camoufox_manager.py +0 -216
- code_puppy/tools/browser/vqa_agent.py +0 -70
- code_puppy/tui/__init__.py +0 -10
- code_puppy/tui/app.py +0 -1105
- code_puppy/tui/components/__init__.py +0 -21
- code_puppy/tui/components/chat_view.py +0 -551
- code_puppy/tui/components/command_history_modal.py +0 -218
- code_puppy/tui/components/copy_button.py +0 -139
- code_puppy/tui/components/custom_widgets.py +0 -63
- code_puppy/tui/components/human_input_modal.py +0 -175
- code_puppy/tui/components/input_area.py +0 -167
- code_puppy/tui/components/sidebar.py +0 -309
- code_puppy/tui/components/status_bar.py +0 -185
- code_puppy/tui/messages.py +0 -27
- code_puppy/tui/models/__init__.py +0 -8
- code_puppy/tui/models/chat_message.py +0 -25
- code_puppy/tui/models/command_history.py +0 -89
- code_puppy/tui/models/enums.py +0 -24
- code_puppy/tui/screens/__init__.py +0 -17
- code_puppy/tui/screens/autosave_picker.py +0 -175
- code_puppy/tui/screens/help.py +0 -130
- code_puppy/tui/screens/mcp_install_wizard.py +0 -803
- code_puppy/tui/screens/settings.py +0 -306
- code_puppy/tui/screens/tools.py +0 -74
- code_puppy/tui_state.py +0 -55
- code_puppy-0.0.214.data/data/code_puppy/models.json +0 -112
- code_puppy-0.0.214.dist-info/RECORD +0 -131
- {code_puppy-0.0.214.dist-info → code_puppy-0.0.366.dist-info}/entry_points.txt +0 -0
- {code_puppy-0.0.214.dist-info → code_puppy-0.0.366.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,704 @@
|
|
|
1
|
+
"""AntigravityModel - extends GeminiModel with thinking signature handling.
|
|
2
|
+
|
|
3
|
+
This model handles the special Antigravity envelope format and preserves
|
|
4
|
+
Claude thinking signatures for Gemini 3 models.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import base64
|
|
10
|
+
import json
|
|
11
|
+
import logging
|
|
12
|
+
from collections.abc import AsyncIterator
|
|
13
|
+
from contextlib import asynccontextmanager
|
|
14
|
+
from dataclasses import dataclass, field
|
|
15
|
+
from datetime import datetime, timezone
|
|
16
|
+
from typing import Any
|
|
17
|
+
from uuid import uuid4
|
|
18
|
+
|
|
19
|
+
from pydantic_ai._run_context import RunContext
|
|
20
|
+
from pydantic_ai.messages import (
|
|
21
|
+
BuiltinToolCallPart,
|
|
22
|
+
BuiltinToolReturnPart,
|
|
23
|
+
FilePart,
|
|
24
|
+
ModelMessage,
|
|
25
|
+
ModelRequest,
|
|
26
|
+
ModelResponse,
|
|
27
|
+
ModelResponsePart,
|
|
28
|
+
ModelResponseStreamEvent,
|
|
29
|
+
RetryPromptPart,
|
|
30
|
+
SystemPromptPart,
|
|
31
|
+
TextPart,
|
|
32
|
+
ThinkingPart,
|
|
33
|
+
ToolCallPart,
|
|
34
|
+
ToolReturnPart,
|
|
35
|
+
UserPromptPart,
|
|
36
|
+
)
|
|
37
|
+
from pydantic_ai.models import ModelRequestParameters, StreamedResponse
|
|
38
|
+
from pydantic_ai.settings import ModelSettings
|
|
39
|
+
from pydantic_ai.usage import RequestUsage
|
|
40
|
+
from typing_extensions import assert_never
|
|
41
|
+
|
|
42
|
+
from code_puppy.gemini_model import (
|
|
43
|
+
GeminiModel,
|
|
44
|
+
generate_tool_call_id,
|
|
45
|
+
)
|
|
46
|
+
from code_puppy.model_utils import _load_antigravity_prompt
|
|
47
|
+
from code_puppy.plugins.antigravity_oauth.transport import _inline_refs
|
|
48
|
+
|
|
49
|
+
logger = logging.getLogger(__name__)
|
|
50
|
+
|
|
51
|
+
# Type aliases for clarity
|
|
52
|
+
ContentDict = dict[str, Any]
|
|
53
|
+
PartDict = dict[str, Any]
|
|
54
|
+
FunctionCallDict = dict[str, Any]
|
|
55
|
+
BlobDict = dict[str, Any]
|
|
56
|
+
|
|
57
|
+
# Bypass signature for when no real thought signature is available.
|
|
58
|
+
BYPASS_THOUGHT_SIGNATURE = "context_engineering_is_the_way_to_go"
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def _is_signature_error(error_text: str) -> bool:
|
|
62
|
+
"""Check if the error is a thought signature error that can be retried.
|
|
63
|
+
|
|
64
|
+
Detects both:
|
|
65
|
+
- Gemini: "Corrupted thought signature"
|
|
66
|
+
- Claude: "thinking.signature: Field required" or similar
|
|
67
|
+
"""
|
|
68
|
+
return (
|
|
69
|
+
"Corrupted thought signature" in error_text
|
|
70
|
+
or "thinking.signature" in error_text
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class AntigravityModel(GeminiModel):
|
|
75
|
+
"""Custom GeminiModel that correctly handles Claude thinking signatures via Antigravity.
|
|
76
|
+
|
|
77
|
+
This model extends GeminiModel and adds:
|
|
78
|
+
- Proper thoughtSignature handling for both Gemini and Claude models
|
|
79
|
+
- Backfill logic for corrupted thought signatures
|
|
80
|
+
- Special message merging for parallel function calls
|
|
81
|
+
"""
|
|
82
|
+
|
|
83
|
+
def _get_instructions(
|
|
84
|
+
self,
|
|
85
|
+
messages: list,
|
|
86
|
+
model_request_parameters,
|
|
87
|
+
) -> str | None:
|
|
88
|
+
"""Return the Antigravity system prompt.
|
|
89
|
+
|
|
90
|
+
The Antigravity endpoint expects requests to include the special
|
|
91
|
+
Antigravity identity prompt in the systemInstruction field.
|
|
92
|
+
"""
|
|
93
|
+
return _load_antigravity_prompt()
|
|
94
|
+
|
|
95
|
+
def _is_claude_model(self) -> bool:
|
|
96
|
+
"""Check if this is a Claude model (vs Gemini)."""
|
|
97
|
+
return "claude" in self.model_name.lower()
|
|
98
|
+
|
|
99
|
+
def _build_tools(self, tools: list) -> list[dict]:
|
|
100
|
+
"""Build tool definitions with model-appropriate schema handling.
|
|
101
|
+
|
|
102
|
+
Claude and Gemini have different JSON Schema requirements:
|
|
103
|
+
- Gemini: needs anyOf->any_of conversion, etc.
|
|
104
|
+
- Claude: needs standard JSON Schema, simplified unions
|
|
105
|
+
"""
|
|
106
|
+
|
|
107
|
+
is_claude = self._is_claude_model()
|
|
108
|
+
function_declarations = []
|
|
109
|
+
|
|
110
|
+
for tool in tools:
|
|
111
|
+
func_decl = {
|
|
112
|
+
"name": tool.name,
|
|
113
|
+
"description": tool.description or "",
|
|
114
|
+
}
|
|
115
|
+
if tool.parameters_json_schema:
|
|
116
|
+
# Use _inline_refs with appropriate flags for the model type
|
|
117
|
+
func_decl["parameters"] = _inline_refs(
|
|
118
|
+
tool.parameters_json_schema,
|
|
119
|
+
convert_unions=not is_claude, # Gemini needs any_of conversion
|
|
120
|
+
simplify_for_claude=is_claude, # Claude needs simplified unions
|
|
121
|
+
)
|
|
122
|
+
function_declarations.append(func_decl)
|
|
123
|
+
|
|
124
|
+
return [{"functionDeclarations": function_declarations}]
|
|
125
|
+
|
|
126
|
+
async def _map_messages(
|
|
127
|
+
self,
|
|
128
|
+
messages: list[ModelMessage],
|
|
129
|
+
model_request_parameters: ModelRequestParameters,
|
|
130
|
+
) -> tuple[ContentDict | None, list[dict]]:
|
|
131
|
+
"""Map messages to Gemini API format, preserving thinking signatures.
|
|
132
|
+
|
|
133
|
+
IMPORTANT: For Gemini with parallel function calls, the API expects:
|
|
134
|
+
- Model message: [FC1 + signature, FC2, ...] (all function calls together)
|
|
135
|
+
- User message: [FR1, FR2, ...] (all function responses together)
|
|
136
|
+
|
|
137
|
+
If messages are interleaved (FC1, FR1, FC2, FR2), the API returns 400.
|
|
138
|
+
This method merges consecutive same-role messages to fix this.
|
|
139
|
+
"""
|
|
140
|
+
contents: list[dict] = []
|
|
141
|
+
system_parts: list[PartDict] = []
|
|
142
|
+
|
|
143
|
+
for m in messages:
|
|
144
|
+
if isinstance(m, ModelRequest):
|
|
145
|
+
message_parts: list[PartDict] = []
|
|
146
|
+
|
|
147
|
+
for part in m.parts:
|
|
148
|
+
if isinstance(part, SystemPromptPart):
|
|
149
|
+
system_parts.append({"text": part.content})
|
|
150
|
+
elif isinstance(part, UserPromptPart):
|
|
151
|
+
# Use parent's _map_user_prompt
|
|
152
|
+
mapped_parts = await self._map_user_prompt(part)
|
|
153
|
+
# Sanitize bytes to base64 for JSON serialization
|
|
154
|
+
for mp in mapped_parts:
|
|
155
|
+
if "inline_data" in mp and "data" in mp["inline_data"]:
|
|
156
|
+
data = mp["inline_data"]["data"]
|
|
157
|
+
if isinstance(data, bytes):
|
|
158
|
+
mp["inline_data"]["data"] = base64.b64encode(
|
|
159
|
+
data
|
|
160
|
+
).decode("utf-8")
|
|
161
|
+
message_parts.extend(mapped_parts)
|
|
162
|
+
elif isinstance(part, ToolReturnPart):
|
|
163
|
+
message_parts.append(
|
|
164
|
+
{
|
|
165
|
+
"function_response": {
|
|
166
|
+
"name": part.tool_name,
|
|
167
|
+
"response": part.model_response_object(),
|
|
168
|
+
"id": part.tool_call_id,
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
)
|
|
172
|
+
elif isinstance(part, RetryPromptPart):
|
|
173
|
+
if part.tool_name is None:
|
|
174
|
+
message_parts.append({"text": part.model_response()})
|
|
175
|
+
else:
|
|
176
|
+
message_parts.append(
|
|
177
|
+
{
|
|
178
|
+
"function_response": {
|
|
179
|
+
"name": part.tool_name,
|
|
180
|
+
"response": {"error": part.model_response()},
|
|
181
|
+
"id": part.tool_call_id,
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
)
|
|
185
|
+
else:
|
|
186
|
+
assert_never(part)
|
|
187
|
+
|
|
188
|
+
if message_parts:
|
|
189
|
+
# Merge with previous user message if exists (for parallel function responses)
|
|
190
|
+
if contents and contents[-1].get("role") == "user":
|
|
191
|
+
contents[-1]["parts"].extend(message_parts)
|
|
192
|
+
else:
|
|
193
|
+
contents.append({"role": "user", "parts": message_parts})
|
|
194
|
+
|
|
195
|
+
elif isinstance(m, ModelResponse):
|
|
196
|
+
# Use custom helper for thinking signature handling
|
|
197
|
+
maybe_content = _antigravity_content_model_response(
|
|
198
|
+
m, self.system, self._model_name
|
|
199
|
+
)
|
|
200
|
+
if maybe_content:
|
|
201
|
+
# Merge with previous model message if exists (for parallel function calls)
|
|
202
|
+
if contents and contents[-1].get("role") == "model":
|
|
203
|
+
contents[-1]["parts"].extend(maybe_content["parts"])
|
|
204
|
+
else:
|
|
205
|
+
contents.append(maybe_content)
|
|
206
|
+
else:
|
|
207
|
+
assert_never(m)
|
|
208
|
+
|
|
209
|
+
# Google GenAI requires at least one part in the message.
|
|
210
|
+
if not contents:
|
|
211
|
+
contents = [{"role": "user", "parts": [{"text": ""}]}]
|
|
212
|
+
|
|
213
|
+
# Get any injected instructions
|
|
214
|
+
instructions = self._get_instructions(messages, model_request_parameters)
|
|
215
|
+
if instructions:
|
|
216
|
+
system_parts.insert(0, {"text": instructions})
|
|
217
|
+
|
|
218
|
+
system_instruction = (
|
|
219
|
+
ContentDict(role="user", parts=system_parts) if system_parts else None
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
return system_instruction, contents
|
|
223
|
+
|
|
224
|
+
async def request(
|
|
225
|
+
self,
|
|
226
|
+
messages: list[ModelMessage],
|
|
227
|
+
model_settings: ModelSettings | None,
|
|
228
|
+
model_request_parameters: ModelRequestParameters,
|
|
229
|
+
) -> ModelResponse:
|
|
230
|
+
"""Override request to handle Antigravity envelope and thinking signatures."""
|
|
231
|
+
system_instruction, contents = await self._map_messages(
|
|
232
|
+
messages, model_request_parameters
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
# Build generation config from model settings
|
|
236
|
+
gen_config = self._build_generation_config(model_settings)
|
|
237
|
+
|
|
238
|
+
# Build JSON body
|
|
239
|
+
body: dict[str, Any] = {
|
|
240
|
+
"contents": contents,
|
|
241
|
+
}
|
|
242
|
+
if gen_config:
|
|
243
|
+
body["generationConfig"] = gen_config
|
|
244
|
+
if system_instruction:
|
|
245
|
+
body["systemInstruction"] = system_instruction
|
|
246
|
+
|
|
247
|
+
# Serialize tools
|
|
248
|
+
if model_request_parameters.function_tools:
|
|
249
|
+
body["tools"] = self._build_tools(model_request_parameters.function_tools)
|
|
250
|
+
|
|
251
|
+
# Get httpx client
|
|
252
|
+
client = await self._get_client()
|
|
253
|
+
url = f"/models/{self._model_name}:generateContent"
|
|
254
|
+
|
|
255
|
+
# Send request
|
|
256
|
+
response = await client.post(url, json=body)
|
|
257
|
+
|
|
258
|
+
if response.status_code != 200:
|
|
259
|
+
error_text = response.text
|
|
260
|
+
if response.status_code == 400 and _is_signature_error(error_text):
|
|
261
|
+
logger.warning(
|
|
262
|
+
"Received 400 signature error. Backfilling with bypass signatures and retrying. Error: %s",
|
|
263
|
+
error_text[:200],
|
|
264
|
+
)
|
|
265
|
+
_backfill_thought_signatures(messages)
|
|
266
|
+
|
|
267
|
+
# Re-map messages
|
|
268
|
+
system_instruction, contents = await self._map_messages(
|
|
269
|
+
messages, model_request_parameters
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
# Update body
|
|
273
|
+
body["contents"] = contents
|
|
274
|
+
if system_instruction:
|
|
275
|
+
body["systemInstruction"] = system_instruction
|
|
276
|
+
|
|
277
|
+
# Retry request
|
|
278
|
+
response = await client.post(url, json=body)
|
|
279
|
+
if response.status_code != 200:
|
|
280
|
+
raise RuntimeError(
|
|
281
|
+
f"Antigravity API Error {response.status_code}: {response.text}"
|
|
282
|
+
)
|
|
283
|
+
else:
|
|
284
|
+
raise RuntimeError(
|
|
285
|
+
f"Antigravity API Error {response.status_code}: {error_text}"
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
data = response.json()
|
|
289
|
+
|
|
290
|
+
# Extract candidates
|
|
291
|
+
candidates = data.get("candidates", [])
|
|
292
|
+
if not candidates:
|
|
293
|
+
return ModelResponse(
|
|
294
|
+
parts=[TextPart(content="")],
|
|
295
|
+
model_name=self._model_name,
|
|
296
|
+
usage=RequestUsage(),
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
candidate = candidates[0]
|
|
300
|
+
content = candidate.get("content", {})
|
|
301
|
+
parts = content.get("parts", [])
|
|
302
|
+
|
|
303
|
+
# Extract usage
|
|
304
|
+
usage_meta = data.get("usageMetadata", {})
|
|
305
|
+
usage = RequestUsage(
|
|
306
|
+
input_tokens=usage_meta.get("promptTokenCount", 0),
|
|
307
|
+
output_tokens=usage_meta.get("candidatesTokenCount", 0),
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
return _antigravity_process_response_from_parts(
|
|
311
|
+
parts,
|
|
312
|
+
candidate.get("groundingMetadata"),
|
|
313
|
+
self._model_name,
|
|
314
|
+
self.system,
|
|
315
|
+
usage,
|
|
316
|
+
vendor_id=data.get("requestId"),
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
@asynccontextmanager
|
|
320
|
+
async def request_stream(
|
|
321
|
+
self,
|
|
322
|
+
messages: list[ModelMessage],
|
|
323
|
+
model_settings: ModelSettings | None,
|
|
324
|
+
model_request_parameters: ModelRequestParameters,
|
|
325
|
+
run_context: RunContext[Any] | None = None,
|
|
326
|
+
) -> AsyncIterator[StreamedResponse]:
|
|
327
|
+
"""Override request_stream for streaming with signature handling."""
|
|
328
|
+
system_instruction, contents = await self._map_messages(
|
|
329
|
+
messages, model_request_parameters
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
# Build generation config
|
|
333
|
+
gen_config = self._build_generation_config(model_settings)
|
|
334
|
+
|
|
335
|
+
# Build request body
|
|
336
|
+
body: dict[str, Any] = {"contents": contents}
|
|
337
|
+
if gen_config:
|
|
338
|
+
body["generationConfig"] = gen_config
|
|
339
|
+
if system_instruction:
|
|
340
|
+
body["systemInstruction"] = system_instruction
|
|
341
|
+
|
|
342
|
+
# Add tools
|
|
343
|
+
if model_request_parameters.function_tools:
|
|
344
|
+
body["tools"] = self._build_tools(model_request_parameters.function_tools)
|
|
345
|
+
|
|
346
|
+
# Get httpx client
|
|
347
|
+
client = await self._get_client()
|
|
348
|
+
url = f"/models/{self._model_name}:streamGenerateContent?alt=sse"
|
|
349
|
+
|
|
350
|
+
# Create async generator for SSE events
|
|
351
|
+
async def stream_chunks() -> AsyncIterator[dict[str, Any]]:
|
|
352
|
+
retry_count = 0
|
|
353
|
+
nonlocal body # Allow modification for retry
|
|
354
|
+
|
|
355
|
+
while retry_count < 2:
|
|
356
|
+
should_retry = False
|
|
357
|
+
async with client.stream("POST", url, json=body) as response:
|
|
358
|
+
if response.status_code != 200:
|
|
359
|
+
text = await response.aread()
|
|
360
|
+
error_msg = text.decode()
|
|
361
|
+
if (
|
|
362
|
+
response.status_code == 400
|
|
363
|
+
and _is_signature_error(error_msg)
|
|
364
|
+
and retry_count == 0
|
|
365
|
+
):
|
|
366
|
+
should_retry = True
|
|
367
|
+
else:
|
|
368
|
+
raise RuntimeError(
|
|
369
|
+
f"Antigravity API Error {response.status_code}: {error_msg}"
|
|
370
|
+
)
|
|
371
|
+
|
|
372
|
+
if not should_retry:
|
|
373
|
+
async for line in response.aiter_lines():
|
|
374
|
+
line = line.strip()
|
|
375
|
+
if not line:
|
|
376
|
+
continue
|
|
377
|
+
if line.startswith("data: "):
|
|
378
|
+
json_str = line[6:]
|
|
379
|
+
if json_str:
|
|
380
|
+
try:
|
|
381
|
+
yield json.loads(json_str)
|
|
382
|
+
except json.JSONDecodeError:
|
|
383
|
+
continue
|
|
384
|
+
return
|
|
385
|
+
|
|
386
|
+
# Handle retry outside the context manager
|
|
387
|
+
if should_retry:
|
|
388
|
+
logger.warning(
|
|
389
|
+
"Received 400 signature error in stream. Backfilling with bypass signatures and retrying."
|
|
390
|
+
)
|
|
391
|
+
_backfill_thought_signatures(messages)
|
|
392
|
+
|
|
393
|
+
# Re-map messages
|
|
394
|
+
system_instruction, contents = await self._map_messages(
|
|
395
|
+
messages, model_request_parameters
|
|
396
|
+
)
|
|
397
|
+
|
|
398
|
+
# Update body
|
|
399
|
+
body["contents"] = contents
|
|
400
|
+
if system_instruction:
|
|
401
|
+
body["systemInstruction"] = system_instruction
|
|
402
|
+
|
|
403
|
+
retry_count += 1
|
|
404
|
+
|
|
405
|
+
# Create streaming response
|
|
406
|
+
streamed = AntigravityStreamingResponse(
|
|
407
|
+
model_request_parameters=model_request_parameters,
|
|
408
|
+
_chunks=stream_chunks(),
|
|
409
|
+
_model_name_str=self._model_name,
|
|
410
|
+
_provider_name_str=self.system,
|
|
411
|
+
)
|
|
412
|
+
yield streamed
|
|
413
|
+
|
|
414
|
+
|
|
415
|
+
@dataclass
|
|
416
|
+
class AntigravityStreamingResponse(StreamedResponse):
|
|
417
|
+
"""Real streaming response that processes SSE chunks as they arrive."""
|
|
418
|
+
|
|
419
|
+
_chunks: AsyncIterator[dict[str, Any]]
|
|
420
|
+
_model_name_str: str
|
|
421
|
+
_provider_name_str: str = "google"
|
|
422
|
+
_timestamp_val: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
|
423
|
+
|
|
424
|
+
async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
|
|
425
|
+
"""Process streaming chunks and yield events."""
|
|
426
|
+
is_gemini = "gemini" in self._model_name_str.lower()
|
|
427
|
+
pending_signature: str | None = None
|
|
428
|
+
|
|
429
|
+
async for chunk in self._chunks:
|
|
430
|
+
# Extract usage from chunk
|
|
431
|
+
usage_meta = chunk.get("usageMetadata", {})
|
|
432
|
+
if usage_meta:
|
|
433
|
+
self._usage = RequestUsage(
|
|
434
|
+
input_tokens=usage_meta.get("promptTokenCount", 0),
|
|
435
|
+
output_tokens=usage_meta.get("candidatesTokenCount", 0),
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
# Extract response ID
|
|
439
|
+
if chunk.get("responseId"):
|
|
440
|
+
self.provider_response_id = chunk["responseId"]
|
|
441
|
+
|
|
442
|
+
candidates = chunk.get("candidates", [])
|
|
443
|
+
if not candidates:
|
|
444
|
+
continue
|
|
445
|
+
|
|
446
|
+
candidate = candidates[0]
|
|
447
|
+
content = candidate.get("content", {})
|
|
448
|
+
parts = content.get("parts", [])
|
|
449
|
+
|
|
450
|
+
for part in parts:
|
|
451
|
+
# Extract signature
|
|
452
|
+
thought_signature = part.get("thoughtSignature")
|
|
453
|
+
if thought_signature:
|
|
454
|
+
if is_gemini and pending_signature is None:
|
|
455
|
+
pending_signature = thought_signature
|
|
456
|
+
|
|
457
|
+
# Handle thought/thinking part
|
|
458
|
+
if part.get("thought") and part.get("text") is not None:
|
|
459
|
+
text = part["text"]
|
|
460
|
+
|
|
461
|
+
event = self._parts_manager.handle_thinking_delta(
|
|
462
|
+
vendor_part_id=None,
|
|
463
|
+
content=text,
|
|
464
|
+
)
|
|
465
|
+
if event:
|
|
466
|
+
yield event
|
|
467
|
+
|
|
468
|
+
# For Claude: signature is ON the thinking block itself
|
|
469
|
+
if thought_signature and not is_gemini:
|
|
470
|
+
for existing_part in reversed(self._parts_manager._parts):
|
|
471
|
+
if isinstance(existing_part, ThinkingPart):
|
|
472
|
+
object.__setattr__(
|
|
473
|
+
existing_part, "signature", thought_signature
|
|
474
|
+
)
|
|
475
|
+
break
|
|
476
|
+
|
|
477
|
+
# Handle regular text
|
|
478
|
+
elif part.get("text") is not None and not part.get("thought"):
|
|
479
|
+
text = part["text"]
|
|
480
|
+
if len(text) == 0:
|
|
481
|
+
continue
|
|
482
|
+
event = self._parts_manager.handle_text_delta(
|
|
483
|
+
vendor_part_id=None,
|
|
484
|
+
content=text,
|
|
485
|
+
)
|
|
486
|
+
if event:
|
|
487
|
+
yield event
|
|
488
|
+
|
|
489
|
+
# Handle function call
|
|
490
|
+
elif part.get("functionCall"):
|
|
491
|
+
fc = part["functionCall"]
|
|
492
|
+
|
|
493
|
+
# For Gemini: signature on function call belongs to previous thinking
|
|
494
|
+
if is_gemini and thought_signature:
|
|
495
|
+
for existing_part in reversed(self._parts_manager._parts):
|
|
496
|
+
if isinstance(existing_part, ThinkingPart):
|
|
497
|
+
object.__setattr__(
|
|
498
|
+
existing_part, "signature", thought_signature
|
|
499
|
+
)
|
|
500
|
+
break
|
|
501
|
+
|
|
502
|
+
event = self._parts_manager.handle_tool_call_delta(
|
|
503
|
+
vendor_part_id=uuid4(),
|
|
504
|
+
tool_name=fc.get("name"),
|
|
505
|
+
args=fc.get("args"),
|
|
506
|
+
tool_call_id=fc.get("id") or generate_tool_call_id(),
|
|
507
|
+
)
|
|
508
|
+
if event:
|
|
509
|
+
yield event
|
|
510
|
+
|
|
511
|
+
@property
|
|
512
|
+
def model_name(self) -> str:
|
|
513
|
+
return self._model_name_str
|
|
514
|
+
|
|
515
|
+
@property
|
|
516
|
+
def provider_name(self) -> str | None:
|
|
517
|
+
return self._provider_name_str
|
|
518
|
+
|
|
519
|
+
@property
|
|
520
|
+
def timestamp(self) -> datetime:
|
|
521
|
+
return self._timestamp_val
|
|
522
|
+
|
|
523
|
+
|
|
524
|
+
def _antigravity_content_model_response(
|
|
525
|
+
m: ModelResponse, provider_name: str, model_name: str = ""
|
|
526
|
+
) -> ContentDict | None:
|
|
527
|
+
"""Custom serializer for Antigravity that preserves ThinkingPart signatures.
|
|
528
|
+
|
|
529
|
+
Handles different signature protocols:
|
|
530
|
+
- Claude models: signature goes ON the thinking block itself
|
|
531
|
+
- Gemini models: signature goes on the NEXT part after thinking
|
|
532
|
+
"""
|
|
533
|
+
parts: list[PartDict] = []
|
|
534
|
+
|
|
535
|
+
is_claude = "claude" in model_name.lower()
|
|
536
|
+
is_gemini = "gemini" in model_name.lower()
|
|
537
|
+
|
|
538
|
+
pending_signature: str | None = None
|
|
539
|
+
|
|
540
|
+
for item in m.parts:
|
|
541
|
+
part: PartDict = {}
|
|
542
|
+
|
|
543
|
+
if isinstance(item, ToolCallPart):
|
|
544
|
+
function_call = FunctionCallDict(
|
|
545
|
+
name=item.tool_name, args=item.args_as_dict(), id=item.tool_call_id
|
|
546
|
+
)
|
|
547
|
+
part["function_call"] = function_call
|
|
548
|
+
|
|
549
|
+
# For Gemini: ALWAYS attach a thoughtSignature to function calls
|
|
550
|
+
if is_gemini:
|
|
551
|
+
part["thoughtSignature"] = (
|
|
552
|
+
pending_signature
|
|
553
|
+
if pending_signature is not None
|
|
554
|
+
else BYPASS_THOUGHT_SIGNATURE
|
|
555
|
+
)
|
|
556
|
+
|
|
557
|
+
elif isinstance(item, TextPart):
|
|
558
|
+
part["text"] = item.content
|
|
559
|
+
|
|
560
|
+
if is_gemini and pending_signature is not None:
|
|
561
|
+
part["thoughtSignature"] = pending_signature
|
|
562
|
+
pending_signature = None
|
|
563
|
+
|
|
564
|
+
elif isinstance(item, ThinkingPart):
|
|
565
|
+
if item.content:
|
|
566
|
+
part["text"] = item.content
|
|
567
|
+
part["thought"] = True
|
|
568
|
+
|
|
569
|
+
# Try to use original signature first. If the API rejects it
|
|
570
|
+
# (Gemini: "Corrupted thought signature", Claude: "thinking.signature: Field required"),
|
|
571
|
+
# we'll backfill with bypass signatures and retry.
|
|
572
|
+
if item.signature:
|
|
573
|
+
if is_claude:
|
|
574
|
+
# Claude expects signature ON the thinking block
|
|
575
|
+
part["thoughtSignature"] = item.signature
|
|
576
|
+
elif is_gemini:
|
|
577
|
+
# Gemini expects signature on the NEXT part
|
|
578
|
+
pending_signature = item.signature
|
|
579
|
+
else:
|
|
580
|
+
part["thoughtSignature"] = item.signature
|
|
581
|
+
elif is_gemini:
|
|
582
|
+
pending_signature = BYPASS_THOUGHT_SIGNATURE
|
|
583
|
+
|
|
584
|
+
elif isinstance(item, BuiltinToolCallPart):
|
|
585
|
+
pass
|
|
586
|
+
|
|
587
|
+
elif isinstance(item, BuiltinToolReturnPart):
|
|
588
|
+
pass
|
|
589
|
+
|
|
590
|
+
elif isinstance(item, FilePart):
|
|
591
|
+
content = item.content
|
|
592
|
+
data_val = content.data
|
|
593
|
+
if isinstance(data_val, bytes):
|
|
594
|
+
data_val = base64.b64encode(data_val).decode("utf-8")
|
|
595
|
+
|
|
596
|
+
inline_data_dict: BlobDict = {
|
|
597
|
+
"data": data_val,
|
|
598
|
+
"mime_type": content.media_type,
|
|
599
|
+
}
|
|
600
|
+
part["inline_data"] = inline_data_dict
|
|
601
|
+
else:
|
|
602
|
+
assert_never(item)
|
|
603
|
+
|
|
604
|
+
if part:
|
|
605
|
+
parts.append(part)
|
|
606
|
+
|
|
607
|
+
if not parts:
|
|
608
|
+
return None
|
|
609
|
+
return ContentDict(role="model", parts=parts)
|
|
610
|
+
|
|
611
|
+
|
|
612
|
+
def _antigravity_process_response_from_parts(
|
|
613
|
+
parts: list[Any],
|
|
614
|
+
grounding_metadata: Any | None,
|
|
615
|
+
model_name: str,
|
|
616
|
+
provider_name: str,
|
|
617
|
+
usage: RequestUsage,
|
|
618
|
+
vendor_id: str | None,
|
|
619
|
+
vendor_details: dict[str, Any] | None = None,
|
|
620
|
+
) -> ModelResponse:
|
|
621
|
+
"""Custom response parser that extracts signatures from ThinkingParts."""
|
|
622
|
+
items: list[ModelResponsePart] = []
|
|
623
|
+
|
|
624
|
+
is_gemini = "gemini" in str(model_name).lower()
|
|
625
|
+
|
|
626
|
+
def get_attr(obj, attr):
|
|
627
|
+
if isinstance(obj, dict):
|
|
628
|
+
return obj.get(attr)
|
|
629
|
+
return getattr(obj, attr, None)
|
|
630
|
+
|
|
631
|
+
# First pass: collect all parts and their signatures
|
|
632
|
+
parsed_parts = []
|
|
633
|
+
for part in parts:
|
|
634
|
+
thought_signature = get_attr(part, "thoughtSignature") or get_attr(
|
|
635
|
+
part, "thought_signature"
|
|
636
|
+
)
|
|
637
|
+
|
|
638
|
+
pd = get_attr(part, "provider_details")
|
|
639
|
+
if not thought_signature and pd:
|
|
640
|
+
thought_signature = pd.get("thought_signature") or pd.get(
|
|
641
|
+
"thoughtSignature"
|
|
642
|
+
)
|
|
643
|
+
|
|
644
|
+
text = get_attr(part, "text")
|
|
645
|
+
thought = get_attr(part, "thought")
|
|
646
|
+
function_call = get_attr(part, "functionCall") or get_attr(
|
|
647
|
+
part, "function_call"
|
|
648
|
+
)
|
|
649
|
+
|
|
650
|
+
parsed_parts.append(
|
|
651
|
+
{
|
|
652
|
+
"text": text,
|
|
653
|
+
"thought": thought,
|
|
654
|
+
"function_call": function_call,
|
|
655
|
+
"signature": thought_signature,
|
|
656
|
+
}
|
|
657
|
+
)
|
|
658
|
+
|
|
659
|
+
# Second pass: for Gemini, associate signatures from next parts with thinking blocks
|
|
660
|
+
if is_gemini:
|
|
661
|
+
for i, pp in enumerate(parsed_parts):
|
|
662
|
+
if pp["thought"] and not pp["signature"]:
|
|
663
|
+
if i + 1 < len(parsed_parts):
|
|
664
|
+
next_sig = parsed_parts[i + 1].get("signature")
|
|
665
|
+
if next_sig:
|
|
666
|
+
pp["signature"] = next_sig
|
|
667
|
+
|
|
668
|
+
# Third pass: create ModelResponsePart objects
|
|
669
|
+
for pp in parsed_parts:
|
|
670
|
+
if pp["text"] is not None:
|
|
671
|
+
if pp["thought"]:
|
|
672
|
+
items.append(
|
|
673
|
+
ThinkingPart(content=pp["text"], signature=pp["signature"])
|
|
674
|
+
)
|
|
675
|
+
else:
|
|
676
|
+
items.append(TextPart(content=pp["text"]))
|
|
677
|
+
|
|
678
|
+
elif pp["function_call"]:
|
|
679
|
+
fc = pp["function_call"]
|
|
680
|
+
fc_name = get_attr(fc, "name")
|
|
681
|
+
fc_args = get_attr(fc, "args")
|
|
682
|
+
fc_id = get_attr(fc, "id") or generate_tool_call_id()
|
|
683
|
+
|
|
684
|
+
items.append(
|
|
685
|
+
ToolCallPart(tool_name=fc_name, args=fc_args, tool_call_id=fc_id)
|
|
686
|
+
)
|
|
687
|
+
|
|
688
|
+
return ModelResponse(
|
|
689
|
+
parts=items,
|
|
690
|
+
model_name=model_name,
|
|
691
|
+
usage=usage,
|
|
692
|
+
provider_response_id=vendor_id,
|
|
693
|
+
provider_details=vendor_details,
|
|
694
|
+
provider_name=provider_name,
|
|
695
|
+
)
|
|
696
|
+
|
|
697
|
+
|
|
698
|
+
def _backfill_thought_signatures(messages: list[ModelMessage]) -> None:
|
|
699
|
+
"""Backfill all thinking parts with the bypass signature."""
|
|
700
|
+
for m in messages:
|
|
701
|
+
if isinstance(m, ModelResponse):
|
|
702
|
+
for part in m.parts:
|
|
703
|
+
if isinstance(part, ThinkingPart):
|
|
704
|
+
object.__setattr__(part, "signature", BYPASS_THOUGHT_SIGNATURE)
|