code-puppy 0.0.325__py3-none-any.whl → 0.0.336__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. code_puppy/agents/base_agent.py +41 -103
  2. code_puppy/cli_runner.py +105 -2
  3. code_puppy/command_line/add_model_menu.py +4 -0
  4. code_puppy/command_line/autosave_menu.py +5 -0
  5. code_puppy/command_line/colors_menu.py +5 -0
  6. code_puppy/command_line/config_commands.py +24 -1
  7. code_puppy/command_line/core_commands.py +51 -0
  8. code_puppy/command_line/diff_menu.py +5 -0
  9. code_puppy/command_line/mcp/custom_server_form.py +4 -0
  10. code_puppy/command_line/mcp/install_menu.py +5 -1
  11. code_puppy/command_line/model_settings_menu.py +5 -0
  12. code_puppy/command_line/motd.py +13 -7
  13. code_puppy/command_line/onboarding_slides.py +180 -0
  14. code_puppy/command_line/onboarding_wizard.py +340 -0
  15. code_puppy/config.py +3 -2
  16. code_puppy/http_utils.py +155 -196
  17. code_puppy/keymap.py +10 -8
  18. code_puppy/messaging/rich_renderer.py +101 -19
  19. code_puppy/model_factory.py +86 -15
  20. code_puppy/plugins/antigravity_oauth/__init__.py +10 -0
  21. code_puppy/plugins/antigravity_oauth/accounts.py +406 -0
  22. code_puppy/plugins/antigravity_oauth/antigravity_model.py +653 -0
  23. code_puppy/plugins/antigravity_oauth/config.py +42 -0
  24. code_puppy/plugins/antigravity_oauth/constants.py +136 -0
  25. code_puppy/plugins/antigravity_oauth/oauth.py +478 -0
  26. code_puppy/plugins/antigravity_oauth/register_callbacks.py +406 -0
  27. code_puppy/plugins/antigravity_oauth/storage.py +271 -0
  28. code_puppy/plugins/antigravity_oauth/test_plugin.py +319 -0
  29. code_puppy/plugins/antigravity_oauth/token.py +167 -0
  30. code_puppy/plugins/antigravity_oauth/transport.py +664 -0
  31. code_puppy/plugins/antigravity_oauth/utils.py +169 -0
  32. code_puppy/plugins/chatgpt_oauth/register_callbacks.py +2 -0
  33. code_puppy/plugins/claude_code_oauth/register_callbacks.py +2 -0
  34. code_puppy/reopenable_async_client.py +8 -8
  35. code_puppy/terminal_utils.py +168 -3
  36. code_puppy/tools/command_runner.py +42 -54
  37. code_puppy/uvx_detection.py +242 -0
  38. {code_puppy-0.0.325.dist-info → code_puppy-0.0.336.dist-info}/METADATA +30 -1
  39. {code_puppy-0.0.325.dist-info → code_puppy-0.0.336.dist-info}/RECORD +44 -29
  40. {code_puppy-0.0.325.data → code_puppy-0.0.336.data}/data/code_puppy/models.json +0 -0
  41. {code_puppy-0.0.325.data → code_puppy-0.0.336.data}/data/code_puppy/models_dev_api.json +0 -0
  42. {code_puppy-0.0.325.dist-info → code_puppy-0.0.336.dist-info}/WHEEL +0 -0
  43. {code_puppy-0.0.325.dist-info → code_puppy-0.0.336.dist-info}/entry_points.txt +0 -0
  44. {code_puppy-0.0.325.dist-info → code_puppy-0.0.336.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,653 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import logging
5
+ from collections.abc import AsyncIterator
6
+ from contextlib import asynccontextmanager
7
+ from dataclasses import dataclass, field
8
+ from datetime import datetime, timezone
9
+ from typing import Any
10
+ from uuid import uuid4
11
+
12
+ from pydantic_ai._run_context import RunContext
13
+ from pydantic_ai.messages import (
14
+ BuiltinToolCallPart,
15
+ BuiltinToolReturnPart,
16
+ FilePart,
17
+ ModelMessage,
18
+ ModelRequest,
19
+ ModelResponse,
20
+ ModelResponsePart,
21
+ RetryPromptPart,
22
+ SystemPromptPart,
23
+ TextPart,
24
+ ThinkingPart,
25
+ ToolCallPart,
26
+ ToolReturnPart,
27
+ UserPromptPart,
28
+ )
29
+ from typing_extensions import assert_never
30
+
31
+ # Define types locally if needed to avoid import errors
32
+ try:
33
+ from pydantic_ai.messages import BlobDict, ContentDict, FunctionCallDict, PartDict
34
+ except ImportError:
35
+ ContentDict = dict[str, Any]
36
+ PartDict = dict[str, Any]
37
+ FunctionCallDict = dict[str, Any]
38
+ BlobDict = dict[str, Any]
39
+
40
+ from pydantic_ai.messages import ModelResponseStreamEvent
41
+ from pydantic_ai.models import ModelRequestParameters, StreamedResponse
42
+ from pydantic_ai.models.google import GoogleModel, GoogleModelName, _utils
43
+ from pydantic_ai.settings import ModelSettings
44
+ from pydantic_ai.usage import RequestUsage
45
+
46
+ logger = logging.getLogger(__name__)
47
+
48
+
49
+ class AntigravityModel(GoogleModel):
50
+ """Custom GoogleModel that correctly handles Claude thinking signatures via Antigravity."""
51
+
52
+ async def _map_messages(
53
+ self,
54
+ messages: list[ModelMessage],
55
+ model_request_parameters: ModelRequestParameters,
56
+ ) -> tuple[ContentDict | None, list[dict]]:
57
+ """Map messages to Google GenAI format, preserving thinking signatures.
58
+
59
+ IMPORTANT: For Gemini with parallel function calls, the API expects:
60
+ - Model message: [FC1 + signature, FC2, ...] (all function calls together)
61
+ - User message: [FR1, FR2, ...] (all function responses together)
62
+
63
+ If messages are interleaved (FC1, FR1, FC2, FR2), the API returns 400.
64
+ This method merges consecutive same-role messages to fix this.
65
+ """
66
+ contents: list[dict] = []
67
+ system_parts: list[PartDict] = []
68
+
69
+ for m in messages:
70
+ if isinstance(m, ModelRequest):
71
+ message_parts: list[PartDict] = []
72
+
73
+ for part in m.parts:
74
+ if isinstance(part, SystemPromptPart):
75
+ system_parts.append({"text": part.content})
76
+ elif isinstance(part, UserPromptPart):
77
+ # Use parent's _map_user_prompt
78
+ message_parts.extend(await self._map_user_prompt(part))
79
+ elif isinstance(part, ToolReturnPart):
80
+ message_parts.append(
81
+ {
82
+ "function_response": {
83
+ "name": part.tool_name,
84
+ "response": part.model_response_object(),
85
+ "id": part.tool_call_id,
86
+ }
87
+ }
88
+ )
89
+ elif isinstance(part, RetryPromptPart):
90
+ if part.tool_name is None:
91
+ message_parts.append({"text": part.model_response()})
92
+ else:
93
+ message_parts.append(
94
+ {
95
+ "function_response": {
96
+ "name": part.tool_name,
97
+ "response": {"error": part.model_response()},
98
+ "id": part.tool_call_id,
99
+ }
100
+ }
101
+ )
102
+ else:
103
+ assert_never(part)
104
+
105
+ if message_parts:
106
+ # Merge with previous user message if exists (for parallel function responses)
107
+ if contents and contents[-1].get("role") == "user":
108
+ contents[-1]["parts"].extend(message_parts)
109
+ else:
110
+ contents.append({"role": "user", "parts": message_parts})
111
+
112
+ elif isinstance(m, ModelResponse):
113
+ # USE CUSTOM HELPER HERE
114
+ # Pass model name so we can handle Claude vs Gemini signature placement
115
+ maybe_content = _antigravity_content_model_response(
116
+ m, self.system, self._model_name
117
+ )
118
+ if maybe_content:
119
+ # Merge with previous model message if exists (for parallel function calls)
120
+ if contents and contents[-1].get("role") == "model":
121
+ contents[-1]["parts"].extend(maybe_content["parts"])
122
+ else:
123
+ contents.append(maybe_content)
124
+ else:
125
+ assert_never(m)
126
+
127
+ # Google GenAI requires at least one part in the message.
128
+ if not contents:
129
+ contents = [{"role": "user", "parts": [{"text": ""}]}]
130
+
131
+ if instructions := self._get_instructions(messages, model_request_parameters):
132
+ system_parts.insert(0, {"text": instructions})
133
+ system_instruction = (
134
+ ContentDict(role="user", parts=system_parts) if system_parts else None
135
+ )
136
+
137
+ return system_instruction, contents
138
+
139
+ async def request(
140
+ self,
141
+ messages: list[ModelMessage],
142
+ model_settings: ModelSettings | None,
143
+ model_request_parameters: ModelRequestParameters,
144
+ ) -> ModelResponse:
145
+ """Override request to use direct HTTP calls, bypassing google-genai validation."""
146
+ # Prepare request (normalizes settings)
147
+ model_settings, model_request_parameters = self.prepare_request(
148
+ model_settings, model_request_parameters
149
+ )
150
+
151
+ system_instruction, contents = await self._map_messages(
152
+ messages, model_request_parameters
153
+ )
154
+
155
+ # Build generation config from model settings
156
+ gen_config: dict[str, Any] = {}
157
+ if model_settings:
158
+ if (
159
+ hasattr(model_settings, "temperature")
160
+ and model_settings.temperature is not None
161
+ ):
162
+ gen_config["temperature"] = model_settings.temperature
163
+ if hasattr(model_settings, "top_p") and model_settings.top_p is not None:
164
+ gen_config["topP"] = model_settings.top_p
165
+ if (
166
+ hasattr(model_settings, "max_tokens")
167
+ and model_settings.max_tokens is not None
168
+ ):
169
+ gen_config["maxOutputTokens"] = model_settings.max_tokens
170
+
171
+ # Build JSON body manually to ensure thoughtSignature is preserved
172
+ body: dict[str, Any] = {
173
+ "contents": contents,
174
+ }
175
+ if gen_config:
176
+ body["generationConfig"] = gen_config
177
+ if system_instruction:
178
+ body["systemInstruction"] = system_instruction
179
+
180
+ # Serialize tools manually
181
+ if model_request_parameters.function_tools:
182
+ funcs = []
183
+ for t in model_request_parameters.function_tools:
184
+ funcs.append(
185
+ {
186
+ "name": t.name,
187
+ "description": t.description,
188
+ "parameters": t.parameters_json_schema,
189
+ }
190
+ )
191
+ body["tools"] = [{"functionDeclarations": funcs}]
192
+
193
+ # Use the http_client from the google-genai client directly
194
+ # This bypasses google-genai library's strict validation/serialization
195
+ # Path: self.client._api_client._async_httpx_client
196
+ try:
197
+ client = self.client._api_client._async_httpx_client
198
+ except AttributeError:
199
+ raise RuntimeError(
200
+ "AntigravityModel requires access to the underlying httpx client"
201
+ )
202
+ url = f"/models/{self._model_name}:generateContent"
203
+
204
+ # Send request
205
+ response = await client.post(url, json=body)
206
+
207
+ if response.status_code != 200:
208
+ raise RuntimeError(
209
+ f"Antigravity API Error {response.status_code}: {response.text}"
210
+ )
211
+
212
+ data = response.json()
213
+
214
+ # Extract candidates
215
+ candidates = data.get("candidates", [])
216
+ if not candidates:
217
+ # Handle empty response or safety block?
218
+ return ModelResponse(
219
+ parts=[TextPart(content="")],
220
+ model_name=self._model_name,
221
+ usage=RequestUsage(),
222
+ )
223
+
224
+ candidate = candidates[0]
225
+ content = candidate.get("content", {})
226
+ parts = content.get("parts", [])
227
+
228
+ # Extract usage
229
+ usage_meta = data.get("usageMetadata", {})
230
+ usage = RequestUsage(
231
+ input_tokens=usage_meta.get("promptTokenCount", 0),
232
+ output_tokens=usage_meta.get("candidatesTokenCount", 0),
233
+ )
234
+
235
+ return _antigravity_process_response_from_parts(
236
+ parts,
237
+ candidate.get("groundingMetadata"),
238
+ self._model_name,
239
+ self.system,
240
+ usage,
241
+ vendor_id=data.get("requestId"),
242
+ )
243
+
244
+ @asynccontextmanager
245
+ async def request_stream(
246
+ self,
247
+ messages: list[ModelMessage],
248
+ model_settings: ModelSettings | None,
249
+ model_request_parameters: ModelRequestParameters,
250
+ run_context: RunContext[Any] | None = None,
251
+ ) -> AsyncIterator[StreamedResponse]:
252
+ """Override request_stream to use streaming with proper signature handling."""
253
+ # Prepare request
254
+ model_settings, model_request_parameters = self.prepare_request(
255
+ model_settings, model_request_parameters
256
+ )
257
+
258
+ system_instruction, contents = await self._map_messages(
259
+ messages, model_request_parameters
260
+ )
261
+
262
+ # Build generation config
263
+ gen_config: dict[str, Any] = {}
264
+ if model_settings:
265
+ if (
266
+ hasattr(model_settings, "temperature")
267
+ and model_settings.temperature is not None
268
+ ):
269
+ gen_config["temperature"] = model_settings.temperature
270
+ if hasattr(model_settings, "top_p") and model_settings.top_p is not None:
271
+ gen_config["topP"] = model_settings.top_p
272
+ if (
273
+ hasattr(model_settings, "max_tokens")
274
+ and model_settings.max_tokens is not None
275
+ ):
276
+ gen_config["maxOutputTokens"] = model_settings.max_tokens
277
+
278
+ # Build request body
279
+ body: dict[str, Any] = {"contents": contents}
280
+ if gen_config:
281
+ body["generationConfig"] = gen_config
282
+ if system_instruction:
283
+ body["systemInstruction"] = system_instruction
284
+
285
+ # Add tools
286
+ if model_request_parameters.function_tools:
287
+ funcs = []
288
+ for t in model_request_parameters.function_tools:
289
+ funcs.append(
290
+ {
291
+ "name": t.name,
292
+ "description": t.description,
293
+ "parameters": t.parameters_json_schema,
294
+ }
295
+ )
296
+ body["tools"] = [{"functionDeclarations": funcs}]
297
+
298
+ # Get httpx client
299
+ try:
300
+ client = self.client._api_client._async_httpx_client
301
+ except AttributeError:
302
+ raise RuntimeError(
303
+ "AntigravityModel requires access to the underlying httpx client"
304
+ )
305
+
306
+ # Use streaming endpoint
307
+ url = f"/models/{self._model_name}:streamGenerateContent?alt=sse"
308
+
309
+ # Create async generator for SSE events
310
+ async def stream_chunks() -> AsyncIterator[dict[str, Any]]:
311
+ async with client.stream("POST", url, json=body) as response:
312
+ if response.status_code != 200:
313
+ text = await response.aread()
314
+ raise RuntimeError(
315
+ f"Antigravity API Error {response.status_code}: {text.decode()}"
316
+ )
317
+
318
+ async for line in response.aiter_lines():
319
+ line = line.strip()
320
+ if not line:
321
+ continue
322
+ if line.startswith("data: "):
323
+ json_str = line[6:] # Remove 'data: ' prefix
324
+ if json_str:
325
+ try:
326
+ yield json.loads(json_str)
327
+ except json.JSONDecodeError:
328
+ continue
329
+
330
+ # Create streaming response
331
+ streamed = AntigravityStreamingResponse(
332
+ model_request_parameters=model_request_parameters,
333
+ _chunks=stream_chunks(),
334
+ _model_name_str=self._model_name,
335
+ _provider_name_str=self.system,
336
+ )
337
+ yield streamed
338
+
339
+
340
+ @dataclass
341
+ class AntigravityStreamingResponse(StreamedResponse):
342
+ """Real streaming response that processes SSE chunks as they arrive."""
343
+
344
+ _chunks: AsyncIterator[dict[str, Any]]
345
+ _model_name_str: str
346
+ _provider_name_str: str = "google"
347
+ _timestamp_val: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
348
+
349
+ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
350
+ """Process streaming chunks and yield events."""
351
+ is_gemini = "gemini" in self._model_name_str.lower()
352
+ pending_signature: str | None = None
353
+
354
+ async for chunk in self._chunks:
355
+ # Extract usage from chunk
356
+ usage_meta = chunk.get("usageMetadata", {})
357
+ if usage_meta:
358
+ self._usage = RequestUsage(
359
+ input_tokens=usage_meta.get("promptTokenCount", 0),
360
+ output_tokens=usage_meta.get("candidatesTokenCount", 0),
361
+ )
362
+
363
+ # Extract response ID
364
+ if chunk.get("responseId"):
365
+ self.provider_response_id = chunk["responseId"]
366
+
367
+ candidates = chunk.get("candidates", [])
368
+ if not candidates:
369
+ continue
370
+
371
+ candidate = candidates[0]
372
+ content = candidate.get("content", {})
373
+ parts = content.get("parts", [])
374
+
375
+ for part in parts:
376
+ # Extract signature (for Gemini, it's on the functionCall part)
377
+ thought_signature = part.get("thoughtSignature")
378
+ if thought_signature:
379
+ # For Gemini: if this is a function call with signature,
380
+ # the signature belongs to the previous thinking block
381
+ if is_gemini and pending_signature is None:
382
+ pending_signature = thought_signature
383
+
384
+ # Handle thought/thinking part
385
+ if part.get("thought") and part.get("text") is not None:
386
+ text = part["text"]
387
+
388
+ event = self._parts_manager.handle_thinking_delta(
389
+ vendor_part_id=None,
390
+ content=text,
391
+ )
392
+ if event:
393
+ yield event
394
+
395
+ # For Claude: signature is ON the thinking block itself
396
+ # We need to explicitly set it after the part is created
397
+ if thought_signature and not is_gemini:
398
+ for existing_part in reversed(self._parts_manager._parts):
399
+ if isinstance(existing_part, ThinkingPart):
400
+ object.__setattr__(
401
+ existing_part, "signature", thought_signature
402
+ )
403
+ break
404
+
405
+ # Handle regular text
406
+ elif part.get("text") is not None and not part.get("thought"):
407
+ text = part["text"]
408
+ if len(text) == 0:
409
+ continue
410
+ event = self._parts_manager.handle_text_delta(
411
+ vendor_part_id=None,
412
+ content=text,
413
+ )
414
+ if event:
415
+ yield event
416
+
417
+ # Handle function call
418
+ elif part.get("functionCall"):
419
+ fc = part["functionCall"]
420
+
421
+ # For Gemini: the signature on a function call belongs to the
422
+ # PREVIOUS thinking block. We need to retroactively set it.
423
+ if is_gemini and thought_signature:
424
+ # Find the most recent ThinkingPart and set its signature
425
+ for existing_part in reversed(self._parts_manager._parts):
426
+ if isinstance(existing_part, ThinkingPart):
427
+ # Directly set the signature attribute
428
+ object.__setattr__(
429
+ existing_part, "signature", thought_signature
430
+ )
431
+ break
432
+
433
+ event = self._parts_manager.handle_tool_call_delta(
434
+ vendor_part_id=uuid4(),
435
+ tool_name=fc.get("name"),
436
+ args=fc.get("args"),
437
+ tool_call_id=fc.get("id") or _utils.generate_tool_call_id(),
438
+ )
439
+ if event:
440
+ yield event
441
+
442
+ @property
443
+ def model_name(self) -> str:
444
+ return self._model_name_str
445
+
446
+ @property
447
+ def provider_name(self) -> str | None:
448
+ return self._provider_name_str
449
+
450
+ @property
451
+ def timestamp(self) -> datetime:
452
+ return self._timestamp_val
453
+
454
+
455
+ # Bypass signature for when no real thought signature is available.
456
+ # Gemini API requires EVERY function call to have a thoughtSignature field.
457
+ # When there's no thinking block or no signature was captured, we use this bypass.
458
+ # This specific key is the official bypass token for Gemini 3 Pro.
459
+ BYPASS_THOUGHT_SIGNATURE = "context_engineering_is_the_way_to_go"
460
+
461
+
462
+ def _antigravity_content_model_response(
463
+ m: ModelResponse, provider_name: str, model_name: str = ""
464
+ ) -> ContentDict | None:
465
+ """Custom serializer for Antigravity that preserves ThinkingPart signatures.
466
+
467
+ Handles different signature protocols:
468
+ - Claude models: signature goes ON the thinking block itself
469
+ - Gemini models: signature goes on the NEXT part (function_call or text) after thinking
470
+
471
+ IMPORTANT: For Gemini, EVERY function call MUST have a thoughtSignature field.
472
+ If no real signature is available (no preceding ThinkingPart, or ThinkingPart
473
+ had no signature), we use BYPASS_THOUGHT_SIGNATURE as a fallback.
474
+ """
475
+ parts: list[PartDict] = []
476
+
477
+ # Determine which protocol to use based on model name
478
+ is_claude = "claude" in model_name.lower()
479
+ is_gemini = "gemini" in model_name.lower()
480
+
481
+ # For Gemini: save signature from ThinkingPart to attach to next part
482
+ # Initialize to None - we'll use BYPASS_THOUGHT_SIGNATURE if still None when needed
483
+ pending_signature: str | None = None
484
+
485
+ for item in m.parts:
486
+ part: PartDict = {}
487
+
488
+ if isinstance(item, ToolCallPart):
489
+ function_call = FunctionCallDict(
490
+ name=item.tool_name, args=item.args_as_dict(), id=item.tool_call_id
491
+ )
492
+ part["function_call"] = function_call
493
+
494
+ # For Gemini: ALWAYS attach a thoughtSignature to function calls.
495
+ # Use the real signature if available, otherwise use bypass.
496
+ # NOTE: Do NOT clear pending_signature here! Multiple tool calls
497
+ # in a row (e.g., parallel function calls) all need the same
498
+ # signature from the preceding ThinkingPart.
499
+ if is_gemini:
500
+ part["thoughtSignature"] = (
501
+ pending_signature
502
+ if pending_signature is not None
503
+ else BYPASS_THOUGHT_SIGNATURE
504
+ )
505
+
506
+ elif isinstance(item, TextPart):
507
+ part["text"] = item.content
508
+
509
+ # For Gemini: attach pending signature to text part if available
510
+ # Clear signature after text since text typically ends a response
511
+ if is_gemini and pending_signature is not None:
512
+ part["thoughtSignature"] = pending_signature
513
+ pending_signature = None
514
+
515
+ elif isinstance(item, ThinkingPart):
516
+ if item.content:
517
+ part["text"] = item.content
518
+ part["thought"] = True
519
+
520
+ if item.signature:
521
+ if is_claude:
522
+ # Claude: signature goes ON the thinking block
523
+ part["thoughtSignature"] = item.signature
524
+ elif is_gemini:
525
+ # Gemini: save signature for NEXT part
526
+ pending_signature = item.signature
527
+ else:
528
+ # Default: try both (put on thinking block)
529
+ part["thoughtSignature"] = item.signature
530
+ elif is_gemini:
531
+ # ThinkingPart exists but has no signature - use bypass
532
+ # This ensures subsequent tool calls still get a signature
533
+ pending_signature = BYPASS_THOUGHT_SIGNATURE
534
+
535
+ elif isinstance(item, BuiltinToolCallPart):
536
+ # Skip code execution for now
537
+ pass
538
+
539
+ elif isinstance(item, BuiltinToolReturnPart):
540
+ # Skip code execution result
541
+ pass
542
+
543
+ elif isinstance(item, FilePart):
544
+ content = item.content
545
+ inline_data_dict: BlobDict = {
546
+ "data": content.data,
547
+ "mime_type": content.media_type,
548
+ }
549
+ part["inline_data"] = inline_data_dict
550
+ else:
551
+ assert_never(item)
552
+
553
+ if part:
554
+ parts.append(part)
555
+
556
+ if not parts:
557
+ return None
558
+ return ContentDict(role="model", parts=parts)
559
+
560
+
561
+ def _antigravity_process_response_from_parts(
562
+ parts: list[Any], # dicts or objects
563
+ grounding_metadata: Any | None,
564
+ model_name: GoogleModelName,
565
+ provider_name: str,
566
+ usage: RequestUsage,
567
+ vendor_id: str | None,
568
+ vendor_details: dict[str, Any] | None = None,
569
+ ) -> ModelResponse:
570
+ """Custom response parser that extracts signatures from ThinkingParts.
571
+
572
+ Handles different signature protocols:
573
+ - Claude: signature is ON the thinking block
574
+ - Gemini: signature is on the NEXT part after thinking (we associate it back)
575
+ """
576
+ items: list[ModelResponsePart] = []
577
+
578
+ is_gemini = "gemini" in str(model_name).lower()
579
+
580
+ # Helper to get attribute from dict or object
581
+ def get_attr(obj, attr):
582
+ if isinstance(obj, dict):
583
+ return obj.get(attr)
584
+ return getattr(obj, attr, None)
585
+
586
+ # First pass: collect all parts and their signatures
587
+ parsed_parts = []
588
+ for part in parts:
589
+ thought_signature = get_attr(part, "thoughtSignature") or get_attr(
590
+ part, "thought_signature"
591
+ )
592
+
593
+ # Also check provider details
594
+ pd = get_attr(part, "provider_details")
595
+ if not thought_signature and pd:
596
+ thought_signature = pd.get("thought_signature") or pd.get(
597
+ "thoughtSignature"
598
+ )
599
+
600
+ text = get_attr(part, "text")
601
+ thought = get_attr(part, "thought")
602
+ # API returns camelCase 'functionCall'
603
+ function_call = get_attr(part, "functionCall") or get_attr(
604
+ part, "function_call"
605
+ )
606
+
607
+ parsed_parts.append(
608
+ {
609
+ "text": text,
610
+ "thought": thought,
611
+ "function_call": function_call,
612
+ "signature": thought_signature,
613
+ }
614
+ )
615
+
616
+ # Second pass: for Gemini, associate signatures from next parts with thinking blocks
617
+ if is_gemini:
618
+ for i, pp in enumerate(parsed_parts):
619
+ if pp["thought"] and not pp["signature"]:
620
+ # Look at next part for signature
621
+ if i + 1 < len(parsed_parts):
622
+ next_sig = parsed_parts[i + 1].get("signature")
623
+ if next_sig:
624
+ pp["signature"] = next_sig
625
+
626
+ # Third pass: create ModelResponsePart objects
627
+ for pp in parsed_parts:
628
+ if pp["text"] is not None:
629
+ if pp["thought"]:
630
+ items.append(
631
+ ThinkingPart(content=pp["text"], signature=pp["signature"])
632
+ )
633
+ else:
634
+ items.append(TextPart(content=pp["text"]))
635
+
636
+ elif pp["function_call"]:
637
+ fc = pp["function_call"]
638
+ fc_name = get_attr(fc, "name")
639
+ fc_args = get_attr(fc, "args")
640
+ fc_id = get_attr(fc, "id") or _utils.generate_tool_call_id()
641
+
642
+ items.append(
643
+ ToolCallPart(tool_name=fc_name, args=fc_args, tool_call_id=fc_id)
644
+ )
645
+
646
+ return ModelResponse(
647
+ parts=items,
648
+ model_name=model_name,
649
+ usage=usage,
650
+ provider_response_id=vendor_id,
651
+ provider_details=vendor_details,
652
+ provider_name=provider_name,
653
+ )
@@ -0,0 +1,42 @@
1
+ """Configuration for the Antigravity OAuth plugin."""
2
+
3
+ from pathlib import Path
4
+ from typing import Any, Dict
5
+
6
+ from code_puppy import config
7
+
8
+ # Antigravity OAuth configuration
9
+ ANTIGRAVITY_OAUTH_CONFIG: Dict[str, Any] = {
10
+ # OAuth endpoints
11
+ "auth_url": "https://accounts.google.com/o/oauth2/v2/auth",
12
+ "token_url": "https://oauth2.googleapis.com/token",
13
+ # Callback handling
14
+ "redirect_host": "http://localhost",
15
+ "redirect_path": "oauth-callback",
16
+ "callback_port_range": (51121, 51150),
17
+ "callback_timeout": 180,
18
+ # Model configuration
19
+ "prefix": "antigravity-",
20
+ "default_context_length": 200000,
21
+ }
22
+
23
+
24
+ def get_token_storage_path() -> Path:
25
+ """Get the path for storing OAuth tokens."""
26
+ data_dir = Path(config.DATA_DIR)
27
+ data_dir.mkdir(parents=True, exist_ok=True, mode=0o700)
28
+ return data_dir / "antigravity_oauth.json"
29
+
30
+
31
+ def get_accounts_storage_path() -> Path:
32
+ """Get the path for storing multi-account data."""
33
+ data_dir = Path(config.DATA_DIR)
34
+ data_dir.mkdir(parents=True, exist_ok=True, mode=0o700)
35
+ return data_dir / "antigravity_accounts.json"
36
+
37
+
38
+ def get_antigravity_models_path() -> Path:
39
+ """Get the path to the antigravity_models.json file."""
40
+ data_dir = Path(config.DATA_DIR)
41
+ data_dir.mkdir(parents=True, exist_ok=True, mode=0o700)
42
+ return data_dir / "antigravity_models.json"