code-puppy 0.0.325__py3-none-any.whl → 0.0.341__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. code_puppy/agents/base_agent.py +110 -124
  2. code_puppy/claude_cache_client.py +208 -2
  3. code_puppy/cli_runner.py +152 -32
  4. code_puppy/command_line/add_model_menu.py +4 -0
  5. code_puppy/command_line/autosave_menu.py +23 -24
  6. code_puppy/command_line/clipboard.py +527 -0
  7. code_puppy/command_line/colors_menu.py +5 -0
  8. code_puppy/command_line/config_commands.py +24 -1
  9. code_puppy/command_line/core_commands.py +85 -0
  10. code_puppy/command_line/diff_menu.py +5 -0
  11. code_puppy/command_line/mcp/custom_server_form.py +4 -0
  12. code_puppy/command_line/mcp/install_menu.py +5 -1
  13. code_puppy/command_line/model_settings_menu.py +5 -0
  14. code_puppy/command_line/motd.py +13 -7
  15. code_puppy/command_line/onboarding_slides.py +180 -0
  16. code_puppy/command_line/onboarding_wizard.py +340 -0
  17. code_puppy/command_line/prompt_toolkit_completion.py +118 -0
  18. code_puppy/config.py +3 -2
  19. code_puppy/http_utils.py +201 -279
  20. code_puppy/keymap.py +10 -8
  21. code_puppy/mcp_/managed_server.py +7 -11
  22. code_puppy/messaging/messages.py +3 -0
  23. code_puppy/messaging/rich_renderer.py +114 -22
  24. code_puppy/model_factory.py +102 -15
  25. code_puppy/models.json +2 -2
  26. code_puppy/plugins/antigravity_oauth/__init__.py +10 -0
  27. code_puppy/plugins/antigravity_oauth/accounts.py +406 -0
  28. code_puppy/plugins/antigravity_oauth/antigravity_model.py +668 -0
  29. code_puppy/plugins/antigravity_oauth/config.py +42 -0
  30. code_puppy/plugins/antigravity_oauth/constants.py +136 -0
  31. code_puppy/plugins/antigravity_oauth/oauth.py +478 -0
  32. code_puppy/plugins/antigravity_oauth/register_callbacks.py +406 -0
  33. code_puppy/plugins/antigravity_oauth/storage.py +271 -0
  34. code_puppy/plugins/antigravity_oauth/test_plugin.py +319 -0
  35. code_puppy/plugins/antigravity_oauth/token.py +167 -0
  36. code_puppy/plugins/antigravity_oauth/transport.py +664 -0
  37. code_puppy/plugins/antigravity_oauth/utils.py +169 -0
  38. code_puppy/plugins/chatgpt_oauth/register_callbacks.py +2 -0
  39. code_puppy/plugins/claude_code_oauth/register_callbacks.py +2 -0
  40. code_puppy/plugins/claude_code_oauth/utils.py +126 -7
  41. code_puppy/reopenable_async_client.py +8 -8
  42. code_puppy/terminal_utils.py +295 -3
  43. code_puppy/tools/command_runner.py +43 -54
  44. code_puppy/tools/common.py +3 -9
  45. code_puppy/uvx_detection.py +242 -0
  46. {code_puppy-0.0.325.data → code_puppy-0.0.341.data}/data/code_puppy/models.json +2 -2
  47. {code_puppy-0.0.325.dist-info → code_puppy-0.0.341.dist-info}/METADATA +26 -49
  48. {code_puppy-0.0.325.dist-info → code_puppy-0.0.341.dist-info}/RECORD +52 -36
  49. {code_puppy-0.0.325.data → code_puppy-0.0.341.data}/data/code_puppy/models_dev_api.json +0 -0
  50. {code_puppy-0.0.325.dist-info → code_puppy-0.0.341.dist-info}/WHEEL +0 -0
  51. {code_puppy-0.0.325.dist-info → code_puppy-0.0.341.dist-info}/entry_points.txt +0 -0
  52. {code_puppy-0.0.325.dist-info → code_puppy-0.0.341.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,668 @@
1
+ from __future__ import annotations
2
+
3
+ import base64
4
+ import json
5
+ import logging
6
+ from collections.abc import AsyncIterator
7
+ from contextlib import asynccontextmanager
8
+ from dataclasses import dataclass, field
9
+ from datetime import datetime, timezone
10
+ from typing import Any
11
+ from uuid import uuid4
12
+
13
+ from pydantic_ai._run_context import RunContext
14
+ from pydantic_ai.messages import (
15
+ BuiltinToolCallPart,
16
+ BuiltinToolReturnPart,
17
+ FilePart,
18
+ ModelMessage,
19
+ ModelRequest,
20
+ ModelResponse,
21
+ ModelResponsePart,
22
+ RetryPromptPart,
23
+ SystemPromptPart,
24
+ TextPart,
25
+ ThinkingPart,
26
+ ToolCallPart,
27
+ ToolReturnPart,
28
+ UserPromptPart,
29
+ )
30
+ from typing_extensions import assert_never
31
+
32
+ # Define types locally if needed to avoid import errors
33
+ try:
34
+ from pydantic_ai.messages import BlobDict, ContentDict, FunctionCallDict, PartDict
35
+ except ImportError:
36
+ ContentDict = dict[str, Any]
37
+ PartDict = dict[str, Any]
38
+ FunctionCallDict = dict[str, Any]
39
+ BlobDict = dict[str, Any]
40
+
41
+ from pydantic_ai.messages import ModelResponseStreamEvent
42
+ from pydantic_ai.models import ModelRequestParameters, StreamedResponse
43
+ from pydantic_ai.models.google import GoogleModel, GoogleModelName, _utils
44
+ from pydantic_ai.settings import ModelSettings
45
+ from pydantic_ai.usage import RequestUsage
46
+
47
+ logger = logging.getLogger(__name__)
48
+
49
+
50
+ class AntigravityModel(GoogleModel):
51
+ """Custom GoogleModel that correctly handles Claude thinking signatures via Antigravity."""
52
+
53
+ async def _map_messages(
54
+ self,
55
+ messages: list[ModelMessage],
56
+ model_request_parameters: ModelRequestParameters,
57
+ ) -> tuple[ContentDict | None, list[dict]]:
58
+ """Map messages to Google GenAI format, preserving thinking signatures.
59
+
60
+ IMPORTANT: For Gemini with parallel function calls, the API expects:
61
+ - Model message: [FC1 + signature, FC2, ...] (all function calls together)
62
+ - User message: [FR1, FR2, ...] (all function responses together)
63
+
64
+ If messages are interleaved (FC1, FR1, FC2, FR2), the API returns 400.
65
+ This method merges consecutive same-role messages to fix this.
66
+ """
67
+ contents: list[dict] = []
68
+ system_parts: list[PartDict] = []
69
+
70
+ for m in messages:
71
+ if isinstance(m, ModelRequest):
72
+ message_parts: list[PartDict] = []
73
+
74
+ for part in m.parts:
75
+ if isinstance(part, SystemPromptPart):
76
+ system_parts.append({"text": part.content})
77
+ elif isinstance(part, UserPromptPart):
78
+ # Use parent's _map_user_prompt
79
+ mapped_parts = await self._map_user_prompt(part)
80
+ # Sanitize bytes to base64 for JSON serialization
81
+ for mp in mapped_parts:
82
+ if "inline_data" in mp and "data" in mp["inline_data"]:
83
+ data = mp["inline_data"]["data"]
84
+ if isinstance(data, bytes):
85
+ mp["inline_data"]["data"] = base64.b64encode(
86
+ data
87
+ ).decode("utf-8")
88
+ message_parts.extend(mapped_parts)
89
+ elif isinstance(part, ToolReturnPart):
90
+ message_parts.append(
91
+ {
92
+ "function_response": {
93
+ "name": part.tool_name,
94
+ "response": part.model_response_object(),
95
+ "id": part.tool_call_id,
96
+ }
97
+ }
98
+ )
99
+ elif isinstance(part, RetryPromptPart):
100
+ if part.tool_name is None:
101
+ message_parts.append({"text": part.model_response()})
102
+ else:
103
+ message_parts.append(
104
+ {
105
+ "function_response": {
106
+ "name": part.tool_name,
107
+ "response": {"error": part.model_response()},
108
+ "id": part.tool_call_id,
109
+ }
110
+ }
111
+ )
112
+ else:
113
+ assert_never(part)
114
+
115
+ if message_parts:
116
+ # Merge with previous user message if exists (for parallel function responses)
117
+ if contents and contents[-1].get("role") == "user":
118
+ contents[-1]["parts"].extend(message_parts)
119
+ else:
120
+ contents.append({"role": "user", "parts": message_parts})
121
+
122
+ elif isinstance(m, ModelResponse):
123
+ # USE CUSTOM HELPER HERE
124
+ # Pass model name so we can handle Claude vs Gemini signature placement
125
+ maybe_content = _antigravity_content_model_response(
126
+ m, self.system, self._model_name
127
+ )
128
+ if maybe_content:
129
+ # Merge with previous model message if exists (for parallel function calls)
130
+ if contents and contents[-1].get("role") == "model":
131
+ contents[-1]["parts"].extend(maybe_content["parts"])
132
+ else:
133
+ contents.append(maybe_content)
134
+ else:
135
+ assert_never(m)
136
+
137
+ # Google GenAI requires at least one part in the message.
138
+ if not contents:
139
+ contents = [{"role": "user", "parts": [{"text": ""}]}]
140
+
141
+ if instructions := self._get_instructions(messages, model_request_parameters):
142
+ system_parts.insert(0, {"text": instructions})
143
+ system_instruction = (
144
+ ContentDict(role="user", parts=system_parts) if system_parts else None
145
+ )
146
+
147
+ return system_instruction, contents
148
+
149
+ async def request(
150
+ self,
151
+ messages: list[ModelMessage],
152
+ model_settings: ModelSettings | None,
153
+ model_request_parameters: ModelRequestParameters,
154
+ ) -> ModelResponse:
155
+ """Override request to use direct HTTP calls, bypassing google-genai validation."""
156
+ # Prepare request (normalizes settings)
157
+ model_settings, model_request_parameters = self.prepare_request(
158
+ model_settings, model_request_parameters
159
+ )
160
+
161
+ system_instruction, contents = await self._map_messages(
162
+ messages, model_request_parameters
163
+ )
164
+
165
+ # Build generation config from model settings
166
+ gen_config: dict[str, Any] = {}
167
+ if model_settings:
168
+ if (
169
+ hasattr(model_settings, "temperature")
170
+ and model_settings.temperature is not None
171
+ ):
172
+ gen_config["temperature"] = model_settings.temperature
173
+ if hasattr(model_settings, "top_p") and model_settings.top_p is not None:
174
+ gen_config["topP"] = model_settings.top_p
175
+ if (
176
+ hasattr(model_settings, "max_tokens")
177
+ and model_settings.max_tokens is not None
178
+ ):
179
+ gen_config["maxOutputTokens"] = model_settings.max_tokens
180
+
181
+ # Build JSON body manually to ensure thoughtSignature is preserved
182
+ body: dict[str, Any] = {
183
+ "contents": contents,
184
+ }
185
+ if gen_config:
186
+ body["generationConfig"] = gen_config
187
+ if system_instruction:
188
+ body["systemInstruction"] = system_instruction
189
+
190
+ # Serialize tools manually
191
+ if model_request_parameters.function_tools:
192
+ funcs = []
193
+ for t in model_request_parameters.function_tools:
194
+ funcs.append(
195
+ {
196
+ "name": t.name,
197
+ "description": t.description,
198
+ "parameters": t.parameters_json_schema,
199
+ }
200
+ )
201
+ body["tools"] = [{"functionDeclarations": funcs}]
202
+
203
+ # Use the http_client from the google-genai client directly
204
+ # This bypasses google-genai library's strict validation/serialization
205
+ # Path: self.client._api_client._async_httpx_client
206
+ try:
207
+ client = self.client._api_client._async_httpx_client
208
+ except AttributeError:
209
+ raise RuntimeError(
210
+ "AntigravityModel requires access to the underlying httpx client"
211
+ )
212
+ url = f"/models/{self._model_name}:generateContent"
213
+
214
+ # Send request
215
+ response = await client.post(url, json=body)
216
+
217
+ if response.status_code != 200:
218
+ raise RuntimeError(
219
+ f"Antigravity API Error {response.status_code}: {response.text}"
220
+ )
221
+
222
+ data = response.json()
223
+
224
+ # Extract candidates
225
+ candidates = data.get("candidates", [])
226
+ if not candidates:
227
+ # Handle empty response or safety block?
228
+ return ModelResponse(
229
+ parts=[TextPart(content="")],
230
+ model_name=self._model_name,
231
+ usage=RequestUsage(),
232
+ )
233
+
234
+ candidate = candidates[0]
235
+ content = candidate.get("content", {})
236
+ parts = content.get("parts", [])
237
+
238
+ # Extract usage
239
+ usage_meta = data.get("usageMetadata", {})
240
+ usage = RequestUsage(
241
+ input_tokens=usage_meta.get("promptTokenCount", 0),
242
+ output_tokens=usage_meta.get("candidatesTokenCount", 0),
243
+ )
244
+
245
+ return _antigravity_process_response_from_parts(
246
+ parts,
247
+ candidate.get("groundingMetadata"),
248
+ self._model_name,
249
+ self.system,
250
+ usage,
251
+ vendor_id=data.get("requestId"),
252
+ )
253
+
254
+ @asynccontextmanager
255
+ async def request_stream(
256
+ self,
257
+ messages: list[ModelMessage],
258
+ model_settings: ModelSettings | None,
259
+ model_request_parameters: ModelRequestParameters,
260
+ run_context: RunContext[Any] | None = None,
261
+ ) -> AsyncIterator[StreamedResponse]:
262
+ """Override request_stream to use streaming with proper signature handling."""
263
+ # Prepare request
264
+ model_settings, model_request_parameters = self.prepare_request(
265
+ model_settings, model_request_parameters
266
+ )
267
+
268
+ system_instruction, contents = await self._map_messages(
269
+ messages, model_request_parameters
270
+ )
271
+
272
+ # Build generation config
273
+ gen_config: dict[str, Any] = {}
274
+ if model_settings:
275
+ if (
276
+ hasattr(model_settings, "temperature")
277
+ and model_settings.temperature is not None
278
+ ):
279
+ gen_config["temperature"] = model_settings.temperature
280
+ if hasattr(model_settings, "top_p") and model_settings.top_p is not None:
281
+ gen_config["topP"] = model_settings.top_p
282
+ if (
283
+ hasattr(model_settings, "max_tokens")
284
+ and model_settings.max_tokens is not None
285
+ ):
286
+ gen_config["maxOutputTokens"] = model_settings.max_tokens
287
+
288
+ # Build request body
289
+ body: dict[str, Any] = {"contents": contents}
290
+ if gen_config:
291
+ body["generationConfig"] = gen_config
292
+ if system_instruction:
293
+ body["systemInstruction"] = system_instruction
294
+
295
+ # Add tools
296
+ if model_request_parameters.function_tools:
297
+ funcs = []
298
+ for t in model_request_parameters.function_tools:
299
+ funcs.append(
300
+ {
301
+ "name": t.name,
302
+ "description": t.description,
303
+ "parameters": t.parameters_json_schema,
304
+ }
305
+ )
306
+ body["tools"] = [{"functionDeclarations": funcs}]
307
+
308
+ # Get httpx client
309
+ try:
310
+ client = self.client._api_client._async_httpx_client
311
+ except AttributeError:
312
+ raise RuntimeError(
313
+ "AntigravityModel requires access to the underlying httpx client"
314
+ )
315
+
316
+ # Use streaming endpoint
317
+ url = f"/models/{self._model_name}:streamGenerateContent?alt=sse"
318
+
319
+ # Create async generator for SSE events
320
+ async def stream_chunks() -> AsyncIterator[dict[str, Any]]:
321
+ async with client.stream("POST", url, json=body) as response:
322
+ if response.status_code != 200:
323
+ text = await response.aread()
324
+ raise RuntimeError(
325
+ f"Antigravity API Error {response.status_code}: {text.decode()}"
326
+ )
327
+
328
+ async for line in response.aiter_lines():
329
+ line = line.strip()
330
+ if not line:
331
+ continue
332
+ if line.startswith("data: "):
333
+ json_str = line[6:] # Remove 'data: ' prefix
334
+ if json_str:
335
+ try:
336
+ yield json.loads(json_str)
337
+ except json.JSONDecodeError:
338
+ continue
339
+
340
+ # Create streaming response
341
+ streamed = AntigravityStreamingResponse(
342
+ model_request_parameters=model_request_parameters,
343
+ _chunks=stream_chunks(),
344
+ _model_name_str=self._model_name,
345
+ _provider_name_str=self.system,
346
+ )
347
+ yield streamed
348
+
349
+
350
+ @dataclass
351
+ class AntigravityStreamingResponse(StreamedResponse):
352
+ """Real streaming response that processes SSE chunks as they arrive."""
353
+
354
+ _chunks: AsyncIterator[dict[str, Any]]
355
+ _model_name_str: str
356
+ _provider_name_str: str = "google"
357
+ _timestamp_val: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
358
+
359
+ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
360
+ """Process streaming chunks and yield events."""
361
+ is_gemini = "gemini" in self._model_name_str.lower()
362
+ pending_signature: str | None = None
363
+
364
+ async for chunk in self._chunks:
365
+ # Extract usage from chunk
366
+ usage_meta = chunk.get("usageMetadata", {})
367
+ if usage_meta:
368
+ self._usage = RequestUsage(
369
+ input_tokens=usage_meta.get("promptTokenCount", 0),
370
+ output_tokens=usage_meta.get("candidatesTokenCount", 0),
371
+ )
372
+
373
+ # Extract response ID
374
+ if chunk.get("responseId"):
375
+ self.provider_response_id = chunk["responseId"]
376
+
377
+ candidates = chunk.get("candidates", [])
378
+ if not candidates:
379
+ continue
380
+
381
+ candidate = candidates[0]
382
+ content = candidate.get("content", {})
383
+ parts = content.get("parts", [])
384
+
385
+ for part in parts:
386
+ # Extract signature (for Gemini, it's on the functionCall part)
387
+ thought_signature = part.get("thoughtSignature")
388
+ if thought_signature:
389
+ # For Gemini: if this is a function call with signature,
390
+ # the signature belongs to the previous thinking block
391
+ if is_gemini and pending_signature is None:
392
+ pending_signature = thought_signature
393
+
394
+ # Handle thought/thinking part
395
+ if part.get("thought") and part.get("text") is not None:
396
+ text = part["text"]
397
+
398
+ event = self._parts_manager.handle_thinking_delta(
399
+ vendor_part_id=None,
400
+ content=text,
401
+ )
402
+ if event:
403
+ yield event
404
+
405
+ # For Claude: signature is ON the thinking block itself
406
+ # We need to explicitly set it after the part is created
407
+ if thought_signature and not is_gemini:
408
+ for existing_part in reversed(self._parts_manager._parts):
409
+ if isinstance(existing_part, ThinkingPart):
410
+ object.__setattr__(
411
+ existing_part, "signature", thought_signature
412
+ )
413
+ break
414
+
415
+ # Handle regular text
416
+ elif part.get("text") is not None and not part.get("thought"):
417
+ text = part["text"]
418
+ if len(text) == 0:
419
+ continue
420
+ event = self._parts_manager.handle_text_delta(
421
+ vendor_part_id=None,
422
+ content=text,
423
+ )
424
+ if event:
425
+ yield event
426
+
427
+ # Handle function call
428
+ elif part.get("functionCall"):
429
+ fc = part["functionCall"]
430
+
431
+ # For Gemini: the signature on a function call belongs to the
432
+ # PREVIOUS thinking block. We need to retroactively set it.
433
+ if is_gemini and thought_signature:
434
+ # Find the most recent ThinkingPart and set its signature
435
+ for existing_part in reversed(self._parts_manager._parts):
436
+ if isinstance(existing_part, ThinkingPart):
437
+ # Directly set the signature attribute
438
+ object.__setattr__(
439
+ existing_part, "signature", thought_signature
440
+ )
441
+ break
442
+
443
+ event = self._parts_manager.handle_tool_call_delta(
444
+ vendor_part_id=uuid4(),
445
+ tool_name=fc.get("name"),
446
+ args=fc.get("args"),
447
+ tool_call_id=fc.get("id") or _utils.generate_tool_call_id(),
448
+ )
449
+ if event:
450
+ yield event
451
+
452
+ @property
453
+ def model_name(self) -> str:
454
+ return self._model_name_str
455
+
456
+ @property
457
+ def provider_name(self) -> str | None:
458
+ return self._provider_name_str
459
+
460
+ @property
461
+ def timestamp(self) -> datetime:
462
+ return self._timestamp_val
463
+
464
+
465
+ # Bypass signature for when no real thought signature is available.
466
+ # Gemini API requires EVERY function call to have a thoughtSignature field.
467
+ # When there's no thinking block or no signature was captured, we use this bypass.
468
+ # This specific key is the official bypass token for Gemini 3 Pro.
469
+ BYPASS_THOUGHT_SIGNATURE = "context_engineering_is_the_way_to_go"
470
+
471
+
472
+ def _antigravity_content_model_response(
473
+ m: ModelResponse, provider_name: str, model_name: str = ""
474
+ ) -> ContentDict | None:
475
+ """Custom serializer for Antigravity that preserves ThinkingPart signatures.
476
+
477
+ Handles different signature protocols:
478
+ - Claude models: signature goes ON the thinking block itself
479
+ - Gemini models: signature goes on the NEXT part (function_call or text) after thinking
480
+
481
+ IMPORTANT: For Gemini, EVERY function call MUST have a thoughtSignature field.
482
+ If no real signature is available (no preceding ThinkingPart, or ThinkingPart
483
+ had no signature), we use BYPASS_THOUGHT_SIGNATURE as a fallback.
484
+ """
485
+ parts: list[PartDict] = []
486
+
487
+ # Determine which protocol to use based on model name
488
+ is_claude = "claude" in model_name.lower()
489
+ is_gemini = "gemini" in model_name.lower()
490
+
491
+ # For Gemini: save signature from ThinkingPart to attach to next part
492
+ # Initialize to None - we'll use BYPASS_THOUGHT_SIGNATURE if still None when needed
493
+ pending_signature: str | None = None
494
+
495
+ for item in m.parts:
496
+ part: PartDict = {}
497
+
498
+ if isinstance(item, ToolCallPart):
499
+ function_call = FunctionCallDict(
500
+ name=item.tool_name, args=item.args_as_dict(), id=item.tool_call_id
501
+ )
502
+ part["function_call"] = function_call
503
+
504
+ # For Gemini: ALWAYS attach a thoughtSignature to function calls.
505
+ # Use the real signature if available, otherwise use bypass.
506
+ # NOTE: Do NOT clear pending_signature here! Multiple tool calls
507
+ # in a row (e.g., parallel function calls) all need the same
508
+ # signature from the preceding ThinkingPart.
509
+ if is_gemini:
510
+ part["thoughtSignature"] = (
511
+ pending_signature
512
+ if pending_signature is not None
513
+ else BYPASS_THOUGHT_SIGNATURE
514
+ )
515
+
516
+ elif isinstance(item, TextPart):
517
+ part["text"] = item.content
518
+
519
+ # For Gemini: attach pending signature to text part if available
520
+ # Clear signature after text since text typically ends a response
521
+ if is_gemini and pending_signature is not None:
522
+ part["thoughtSignature"] = pending_signature
523
+ pending_signature = None
524
+
525
+ elif isinstance(item, ThinkingPart):
526
+ if item.content:
527
+ part["text"] = item.content
528
+ part["thought"] = True
529
+
530
+ if item.signature:
531
+ if is_claude:
532
+ # Claude: signature goes ON the thinking block
533
+ part["thoughtSignature"] = item.signature
534
+ elif is_gemini:
535
+ # Gemini: save signature for NEXT part
536
+ pending_signature = item.signature
537
+ else:
538
+ # Default: try both (put on thinking block)
539
+ part["thoughtSignature"] = item.signature
540
+ elif is_gemini:
541
+ # ThinkingPart exists but has no signature - use bypass
542
+ # This ensures subsequent tool calls still get a signature
543
+ pending_signature = BYPASS_THOUGHT_SIGNATURE
544
+
545
+ elif isinstance(item, BuiltinToolCallPart):
546
+ # Skip code execution for now
547
+ pass
548
+
549
+ elif isinstance(item, BuiltinToolReturnPart):
550
+ # Skip code execution result
551
+ pass
552
+
553
+ elif isinstance(item, FilePart):
554
+ content = item.content
555
+ # Ensure data is base64 string, not bytes
556
+ data_val = content.data
557
+ if isinstance(data_val, bytes):
558
+ data_val = base64.b64encode(data_val).decode("utf-8")
559
+
560
+ inline_data_dict: BlobDict = {
561
+ "data": data_val,
562
+ "mime_type": content.media_type,
563
+ }
564
+ part["inline_data"] = inline_data_dict
565
+ else:
566
+ assert_never(item)
567
+
568
+ if part:
569
+ parts.append(part)
570
+
571
+ if not parts:
572
+ return None
573
+ return ContentDict(role="model", parts=parts)
574
+
575
+
576
+ def _antigravity_process_response_from_parts(
577
+ parts: list[Any], # dicts or objects
578
+ grounding_metadata: Any | None,
579
+ model_name: GoogleModelName,
580
+ provider_name: str,
581
+ usage: RequestUsage,
582
+ vendor_id: str | None,
583
+ vendor_details: dict[str, Any] | None = None,
584
+ ) -> ModelResponse:
585
+ """Custom response parser that extracts signatures from ThinkingParts.
586
+
587
+ Handles different signature protocols:
588
+ - Claude: signature is ON the thinking block
589
+ - Gemini: signature is on the NEXT part after thinking (we associate it back)
590
+ """
591
+ items: list[ModelResponsePart] = []
592
+
593
+ is_gemini = "gemini" in str(model_name).lower()
594
+
595
+ # Helper to get attribute from dict or object
596
+ def get_attr(obj, attr):
597
+ if isinstance(obj, dict):
598
+ return obj.get(attr)
599
+ return getattr(obj, attr, None)
600
+
601
+ # First pass: collect all parts and their signatures
602
+ parsed_parts = []
603
+ for part in parts:
604
+ thought_signature = get_attr(part, "thoughtSignature") or get_attr(
605
+ part, "thought_signature"
606
+ )
607
+
608
+ # Also check provider details
609
+ pd = get_attr(part, "provider_details")
610
+ if not thought_signature and pd:
611
+ thought_signature = pd.get("thought_signature") or pd.get(
612
+ "thoughtSignature"
613
+ )
614
+
615
+ text = get_attr(part, "text")
616
+ thought = get_attr(part, "thought")
617
+ # API returns camelCase 'functionCall'
618
+ function_call = get_attr(part, "functionCall") or get_attr(
619
+ part, "function_call"
620
+ )
621
+
622
+ parsed_parts.append(
623
+ {
624
+ "text": text,
625
+ "thought": thought,
626
+ "function_call": function_call,
627
+ "signature": thought_signature,
628
+ }
629
+ )
630
+
631
+ # Second pass: for Gemini, associate signatures from next parts with thinking blocks
632
+ if is_gemini:
633
+ for i, pp in enumerate(parsed_parts):
634
+ if pp["thought"] and not pp["signature"]:
635
+ # Look at next part for signature
636
+ if i + 1 < len(parsed_parts):
637
+ next_sig = parsed_parts[i + 1].get("signature")
638
+ if next_sig:
639
+ pp["signature"] = next_sig
640
+
641
+ # Third pass: create ModelResponsePart objects
642
+ for pp in parsed_parts:
643
+ if pp["text"] is not None:
644
+ if pp["thought"]:
645
+ items.append(
646
+ ThinkingPart(content=pp["text"], signature=pp["signature"])
647
+ )
648
+ else:
649
+ items.append(TextPart(content=pp["text"]))
650
+
651
+ elif pp["function_call"]:
652
+ fc = pp["function_call"]
653
+ fc_name = get_attr(fc, "name")
654
+ fc_args = get_attr(fc, "args")
655
+ fc_id = get_attr(fc, "id") or _utils.generate_tool_call_id()
656
+
657
+ items.append(
658
+ ToolCallPart(tool_name=fc_name, args=fc_args, tool_call_id=fc_id)
659
+ )
660
+
661
+ return ModelResponse(
662
+ parts=items,
663
+ model_name=model_name,
664
+ usage=usage,
665
+ provider_response_id=vendor_id,
666
+ provider_details=vendor_details,
667
+ provider_name=provider_name,
668
+ )