code-puppy 0.0.323__py3-none-any.whl → 0.0.335__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. code_puppy/agents/base_agent.py +74 -93
  2. code_puppy/cli_runner.py +105 -2
  3. code_puppy/command_line/add_model_menu.py +15 -0
  4. code_puppy/command_line/autosave_menu.py +5 -0
  5. code_puppy/command_line/colors_menu.py +5 -0
  6. code_puppy/command_line/config_commands.py +24 -1
  7. code_puppy/command_line/core_commands.py +51 -0
  8. code_puppy/command_line/diff_menu.py +5 -0
  9. code_puppy/command_line/mcp/custom_server_form.py +4 -0
  10. code_puppy/command_line/mcp/install_menu.py +5 -1
  11. code_puppy/command_line/model_settings_menu.py +5 -0
  12. code_puppy/command_line/motd.py +13 -7
  13. code_puppy/command_line/onboarding_slides.py +180 -0
  14. code_puppy/command_line/onboarding_wizard.py +340 -0
  15. code_puppy/config.py +3 -2
  16. code_puppy/http_utils.py +155 -196
  17. code_puppy/keymap.py +10 -8
  18. code_puppy/model_factory.py +86 -15
  19. code_puppy/models.json +2 -2
  20. code_puppy/plugins/__init__.py +12 -0
  21. code_puppy/plugins/antigravity_oauth/__init__.py +10 -0
  22. code_puppy/plugins/antigravity_oauth/accounts.py +406 -0
  23. code_puppy/plugins/antigravity_oauth/antigravity_model.py +612 -0
  24. code_puppy/plugins/antigravity_oauth/config.py +42 -0
  25. code_puppy/plugins/antigravity_oauth/constants.py +136 -0
  26. code_puppy/plugins/antigravity_oauth/oauth.py +478 -0
  27. code_puppy/plugins/antigravity_oauth/register_callbacks.py +406 -0
  28. code_puppy/plugins/antigravity_oauth/storage.py +271 -0
  29. code_puppy/plugins/antigravity_oauth/test_plugin.py +319 -0
  30. code_puppy/plugins/antigravity_oauth/token.py +167 -0
  31. code_puppy/plugins/antigravity_oauth/transport.py +595 -0
  32. code_puppy/plugins/antigravity_oauth/utils.py +169 -0
  33. code_puppy/plugins/chatgpt_oauth/register_callbacks.py +2 -0
  34. code_puppy/plugins/claude_code_oauth/register_callbacks.py +2 -0
  35. code_puppy/reopenable_async_client.py +8 -8
  36. code_puppy/terminal_utils.py +168 -3
  37. code_puppy/tools/command_runner.py +42 -54
  38. code_puppy/uvx_detection.py +242 -0
  39. {code_puppy-0.0.323.data → code_puppy-0.0.335.data}/data/code_puppy/models.json +2 -2
  40. {code_puppy-0.0.323.dist-info → code_puppy-0.0.335.dist-info}/METADATA +30 -1
  41. {code_puppy-0.0.323.dist-info → code_puppy-0.0.335.dist-info}/RECORD +45 -30
  42. {code_puppy-0.0.323.data → code_puppy-0.0.335.data}/data/code_puppy/models_dev_api.json +0 -0
  43. {code_puppy-0.0.323.dist-info → code_puppy-0.0.335.dist-info}/WHEEL +0 -0
  44. {code_puppy-0.0.323.dist-info → code_puppy-0.0.335.dist-info}/entry_points.txt +0 -0
  45. {code_puppy-0.0.323.dist-info → code_puppy-0.0.335.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,612 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import logging
5
+ from collections.abc import AsyncIterator
6
+ from contextlib import asynccontextmanager
7
+ from dataclasses import dataclass, field
8
+ from datetime import datetime, timezone
9
+ from typing import Any
10
+ from uuid import uuid4
11
+
12
+ from pydantic_ai._run_context import RunContext
13
+ from pydantic_ai.messages import (
14
+ BuiltinToolCallPart,
15
+ BuiltinToolReturnPart,
16
+ FilePart,
17
+ ModelMessage,
18
+ ModelRequest,
19
+ ModelResponse,
20
+ ModelResponsePart,
21
+ RetryPromptPart,
22
+ SystemPromptPart,
23
+ TextPart,
24
+ ThinkingPart,
25
+ ToolCallPart,
26
+ ToolReturnPart,
27
+ UserPromptPart,
28
+ )
29
+ from typing_extensions import assert_never
30
+
31
+ # Define types locally if needed to avoid import errors
32
+ try:
33
+ from pydantic_ai.messages import BlobDict, ContentDict, FunctionCallDict, PartDict
34
+ except ImportError:
35
+ ContentDict = dict[str, Any]
36
+ PartDict = dict[str, Any]
37
+ FunctionCallDict = dict[str, Any]
38
+ BlobDict = dict[str, Any]
39
+
40
+ from pydantic_ai.messages import ModelResponseStreamEvent
41
+ from pydantic_ai.models import ModelRequestParameters, StreamedResponse
42
+ from pydantic_ai.models.google import GoogleModel, GoogleModelName, _utils
43
+ from pydantic_ai.settings import ModelSettings
44
+ from pydantic_ai.usage import RequestUsage
45
+
46
+ logger = logging.getLogger(__name__)
47
+
48
+
49
+ class AntigravityModel(GoogleModel):
50
+ """Custom GoogleModel that correctly handles Claude thinking signatures via Antigravity."""
51
+
52
+ async def _map_messages(
53
+ self,
54
+ messages: list[ModelMessage],
55
+ model_request_parameters: ModelRequestParameters,
56
+ ) -> tuple[ContentDict | None, list[dict]]:
57
+ """Map messages to Google GenAI format, preserving thinking signatures."""
58
+ contents: list[dict] = []
59
+ system_parts: list[PartDict] = []
60
+
61
+ for m in messages:
62
+ if isinstance(m, ModelRequest):
63
+ message_parts: list[PartDict] = []
64
+
65
+ for part in m.parts:
66
+ if isinstance(part, SystemPromptPart):
67
+ system_parts.append({"text": part.content})
68
+ elif isinstance(part, UserPromptPart):
69
+ # Use parent's _map_user_prompt
70
+ message_parts.extend(await self._map_user_prompt(part))
71
+ elif isinstance(part, ToolReturnPart):
72
+ message_parts.append(
73
+ {
74
+ "function_response": {
75
+ "name": part.tool_name,
76
+ "response": part.model_response_object(),
77
+ "id": part.tool_call_id,
78
+ }
79
+ }
80
+ )
81
+ elif isinstance(part, RetryPromptPart):
82
+ if part.tool_name is None:
83
+ message_parts.append({"text": part.model_response()})
84
+ else:
85
+ message_parts.append(
86
+ {
87
+ "function_response": {
88
+ "name": part.tool_name,
89
+ "response": {"error": part.model_response()},
90
+ "id": part.tool_call_id,
91
+ }
92
+ }
93
+ )
94
+ else:
95
+ assert_never(part)
96
+
97
+ if message_parts:
98
+ contents.append({"role": "user", "parts": message_parts})
99
+ elif isinstance(m, ModelResponse):
100
+ # USE CUSTOM HELPER HERE
101
+ # Pass model name so we can handle Claude vs Gemini signature placement
102
+ maybe_content = _antigravity_content_model_response(
103
+ m, self.system, self._model_name
104
+ )
105
+ if maybe_content:
106
+ contents.append(maybe_content)
107
+ else:
108
+ assert_never(m)
109
+
110
+ # Google GenAI requires at least one part in the message.
111
+ if not contents:
112
+ contents = [{"role": "user", "parts": [{"text": ""}]}]
113
+
114
+ if instructions := self._get_instructions(messages, model_request_parameters):
115
+ system_parts.insert(0, {"text": instructions})
116
+ system_instruction = (
117
+ ContentDict(role="user", parts=system_parts) if system_parts else None
118
+ )
119
+
120
+ return system_instruction, contents
121
+
122
+ async def request(
123
+ self,
124
+ messages: list[ModelMessage],
125
+ model_settings: ModelSettings | None,
126
+ model_request_parameters: ModelRequestParameters,
127
+ ) -> ModelResponse:
128
+ """Override request to use direct HTTP calls, bypassing google-genai validation."""
129
+ # Prepare request (normalizes settings)
130
+ model_settings, model_request_parameters = self.prepare_request(
131
+ model_settings, model_request_parameters
132
+ )
133
+
134
+ system_instruction, contents = await self._map_messages(
135
+ messages, model_request_parameters
136
+ )
137
+
138
+ # Build generation config from model settings
139
+ gen_config: dict[str, Any] = {}
140
+ if model_settings:
141
+ if (
142
+ hasattr(model_settings, "temperature")
143
+ and model_settings.temperature is not None
144
+ ):
145
+ gen_config["temperature"] = model_settings.temperature
146
+ if hasattr(model_settings, "top_p") and model_settings.top_p is not None:
147
+ gen_config["topP"] = model_settings.top_p
148
+ if (
149
+ hasattr(model_settings, "max_tokens")
150
+ and model_settings.max_tokens is not None
151
+ ):
152
+ gen_config["maxOutputTokens"] = model_settings.max_tokens
153
+
154
+ # Build JSON body manually to ensure thoughtSignature is preserved
155
+ body: dict[str, Any] = {
156
+ "contents": contents,
157
+ }
158
+ if gen_config:
159
+ body["generationConfig"] = gen_config
160
+ if system_instruction:
161
+ body["systemInstruction"] = system_instruction
162
+
163
+ # Serialize tools manually
164
+ if model_request_parameters.function_tools:
165
+ funcs = []
166
+ for t in model_request_parameters.function_tools:
167
+ funcs.append(
168
+ {
169
+ "name": t.name,
170
+ "description": t.description,
171
+ "parameters": t.parameters_json_schema,
172
+ }
173
+ )
174
+ body["tools"] = [{"functionDeclarations": funcs}]
175
+
176
+ # Use the http_client from the google-genai client directly
177
+ # This bypasses google-genai library's strict validation/serialization
178
+ # Path: self.client._api_client._async_httpx_client
179
+ try:
180
+ client = self.client._api_client._async_httpx_client
181
+ except AttributeError:
182
+ raise RuntimeError(
183
+ "AntigravityModel requires access to the underlying httpx client"
184
+ )
185
+ url = f"/models/{self._model_name}:generateContent"
186
+
187
+ # Send request
188
+ response = await client.post(url, json=body)
189
+
190
+ if response.status_code != 200:
191
+ raise RuntimeError(
192
+ f"Antigravity API Error {response.status_code}: {response.text}"
193
+ )
194
+
195
+ data = response.json()
196
+
197
+ # Extract candidates
198
+ candidates = data.get("candidates", [])
199
+ if not candidates:
200
+ # Handle empty response or safety block?
201
+ return ModelResponse(
202
+ parts=[TextPart(content="")],
203
+ model_name=self._model_name,
204
+ usage=RequestUsage(),
205
+ )
206
+
207
+ candidate = candidates[0]
208
+ content = candidate.get("content", {})
209
+ parts = content.get("parts", [])
210
+
211
+ # Extract usage
212
+ usage_meta = data.get("usageMetadata", {})
213
+ usage = RequestUsage(
214
+ input_tokens=usage_meta.get("promptTokenCount", 0),
215
+ output_tokens=usage_meta.get("candidatesTokenCount", 0),
216
+ )
217
+
218
+ return _antigravity_process_response_from_parts(
219
+ parts,
220
+ candidate.get("groundingMetadata"),
221
+ self._model_name,
222
+ self.system,
223
+ usage,
224
+ vendor_id=data.get("requestId"),
225
+ )
226
+
227
+ @asynccontextmanager
228
+ async def request_stream(
229
+ self,
230
+ messages: list[ModelMessage],
231
+ model_settings: ModelSettings | None,
232
+ model_request_parameters: ModelRequestParameters,
233
+ run_context: RunContext[Any] | None = None,
234
+ ) -> AsyncIterator[StreamedResponse]:
235
+ """Override request_stream to use streaming with proper signature handling."""
236
+ # Prepare request
237
+ model_settings, model_request_parameters = self.prepare_request(
238
+ model_settings, model_request_parameters
239
+ )
240
+
241
+ system_instruction, contents = await self._map_messages(
242
+ messages, model_request_parameters
243
+ )
244
+
245
+ # Build generation config
246
+ gen_config: dict[str, Any] = {}
247
+ if model_settings:
248
+ if (
249
+ hasattr(model_settings, "temperature")
250
+ and model_settings.temperature is not None
251
+ ):
252
+ gen_config["temperature"] = model_settings.temperature
253
+ if hasattr(model_settings, "top_p") and model_settings.top_p is not None:
254
+ gen_config["topP"] = model_settings.top_p
255
+ if (
256
+ hasattr(model_settings, "max_tokens")
257
+ and model_settings.max_tokens is not None
258
+ ):
259
+ gen_config["maxOutputTokens"] = model_settings.max_tokens
260
+
261
+ # Build request body
262
+ body: dict[str, Any] = {"contents": contents}
263
+ if gen_config:
264
+ body["generationConfig"] = gen_config
265
+ if system_instruction:
266
+ body["systemInstruction"] = system_instruction
267
+
268
+ # Add tools
269
+ if model_request_parameters.function_tools:
270
+ funcs = []
271
+ for t in model_request_parameters.function_tools:
272
+ funcs.append(
273
+ {
274
+ "name": t.name,
275
+ "description": t.description,
276
+ "parameters": t.parameters_json_schema,
277
+ }
278
+ )
279
+ body["tools"] = [{"functionDeclarations": funcs}]
280
+
281
+ # Get httpx client
282
+ try:
283
+ client = self.client._api_client._async_httpx_client
284
+ except AttributeError:
285
+ raise RuntimeError(
286
+ "AntigravityModel requires access to the underlying httpx client"
287
+ )
288
+
289
+ # Use streaming endpoint
290
+ url = f"/models/{self._model_name}:streamGenerateContent?alt=sse"
291
+
292
+ # Create async generator for SSE events
293
+ async def stream_chunks() -> AsyncIterator[dict[str, Any]]:
294
+ async with client.stream("POST", url, json=body) as response:
295
+ if response.status_code != 200:
296
+ text = await response.aread()
297
+ raise RuntimeError(
298
+ f"Antigravity API Error {response.status_code}: {text.decode()}"
299
+ )
300
+
301
+ async for line in response.aiter_lines():
302
+ line = line.strip()
303
+ if not line:
304
+ continue
305
+ if line.startswith("data: "):
306
+ json_str = line[6:] # Remove 'data: ' prefix
307
+ if json_str:
308
+ try:
309
+ yield json.loads(json_str)
310
+ except json.JSONDecodeError:
311
+ continue
312
+
313
+ # Create streaming response
314
+ streamed = AntigravityStreamingResponse(
315
+ model_request_parameters=model_request_parameters,
316
+ _chunks=stream_chunks(),
317
+ _model_name_str=self._model_name,
318
+ _provider_name_str=self.system,
319
+ )
320
+ yield streamed
321
+
322
+
323
+ @dataclass
324
+ class AntigravityStreamingResponse(StreamedResponse):
325
+ """Real streaming response that processes SSE chunks as they arrive."""
326
+
327
+ _chunks: AsyncIterator[dict[str, Any]]
328
+ _model_name_str: str
329
+ _provider_name_str: str = "google"
330
+ _timestamp_val: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
331
+
332
+ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]:
333
+ """Process streaming chunks and yield events."""
334
+ is_gemini = "gemini" in self._model_name_str.lower()
335
+ pending_signature: str | None = None
336
+
337
+ async for chunk in self._chunks:
338
+ # Extract usage from chunk
339
+ usage_meta = chunk.get("usageMetadata", {})
340
+ if usage_meta:
341
+ self._usage = RequestUsage(
342
+ input_tokens=usage_meta.get("promptTokenCount", 0),
343
+ output_tokens=usage_meta.get("candidatesTokenCount", 0),
344
+ )
345
+
346
+ # Extract response ID
347
+ if chunk.get("responseId"):
348
+ self.provider_response_id = chunk["responseId"]
349
+
350
+ candidates = chunk.get("candidates", [])
351
+ if not candidates:
352
+ continue
353
+
354
+ candidate = candidates[0]
355
+ content = candidate.get("content", {})
356
+ parts = content.get("parts", [])
357
+
358
+ for part in parts:
359
+ # Extract signature (for Gemini, it's on the functionCall part)
360
+ thought_signature = part.get("thoughtSignature")
361
+ if thought_signature:
362
+ # For Gemini: if this is a function call with signature,
363
+ # the signature belongs to the previous thinking block
364
+ if is_gemini and pending_signature is None:
365
+ pending_signature = thought_signature
366
+
367
+ # Handle thought/thinking part
368
+ if part.get("thought") and part.get("text") is not None:
369
+ text = part["text"]
370
+
371
+ event = self._parts_manager.handle_thinking_delta(
372
+ vendor_part_id=None,
373
+ content=text,
374
+ )
375
+ if event:
376
+ yield event
377
+
378
+ # For Claude: signature is ON the thinking block itself
379
+ # We need to explicitly set it after the part is created
380
+ if thought_signature and not is_gemini:
381
+ for existing_part in reversed(self._parts_manager._parts):
382
+ if isinstance(existing_part, ThinkingPart):
383
+ object.__setattr__(
384
+ existing_part, "signature", thought_signature
385
+ )
386
+ break
387
+
388
+ # Handle regular text
389
+ elif part.get("text") is not None and not part.get("thought"):
390
+ text = part["text"]
391
+ if len(text) == 0:
392
+ continue
393
+ event = self._parts_manager.handle_text_delta(
394
+ vendor_part_id=None,
395
+ content=text,
396
+ )
397
+ if event:
398
+ yield event
399
+
400
+ # Handle function call
401
+ elif part.get("functionCall"):
402
+ fc = part["functionCall"]
403
+
404
+ # For Gemini: the signature on a function call belongs to the
405
+ # PREVIOUS thinking block. We need to retroactively set it.
406
+ if is_gemini and thought_signature:
407
+ # Find the most recent ThinkingPart and set its signature
408
+ for existing_part in reversed(self._parts_manager._parts):
409
+ if isinstance(existing_part, ThinkingPart):
410
+ # Directly set the signature attribute
411
+ object.__setattr__(
412
+ existing_part, "signature", thought_signature
413
+ )
414
+ break
415
+
416
+ event = self._parts_manager.handle_tool_call_delta(
417
+ vendor_part_id=uuid4(),
418
+ tool_name=fc.get("name"),
419
+ args=fc.get("args"),
420
+ tool_call_id=fc.get("id") or _utils.generate_tool_call_id(),
421
+ )
422
+ if event:
423
+ yield event
424
+
425
+ @property
426
+ def model_name(self) -> str:
427
+ return self._model_name_str
428
+
429
+ @property
430
+ def provider_name(self) -> str | None:
431
+ return self._provider_name_str
432
+
433
+ @property
434
+ def timestamp(self) -> datetime:
435
+ return self._timestamp_val
436
+
437
+
438
+ def _antigravity_content_model_response(
439
+ m: ModelResponse, provider_name: str, model_name: str = ""
440
+ ) -> ContentDict | None:
441
+ """Custom serializer for Antigravity that preserves ThinkingPart signatures.
442
+
443
+ Handles different signature protocols:
444
+ - Claude models: signature goes ON the thinking block itself
445
+ - Gemini models: signature goes on the NEXT part (function_call or text) after thinking
446
+ """
447
+ parts: list[PartDict] = []
448
+
449
+ # Determine which protocol to use based on model name
450
+ is_claude = "claude" in model_name.lower()
451
+ is_gemini = "gemini" in model_name.lower()
452
+
453
+ # For Gemini: save signature from ThinkingPart to attach to next part
454
+ pending_signature: str | None = None
455
+
456
+ for item in m.parts:
457
+ part: PartDict = {}
458
+
459
+ if isinstance(item, ToolCallPart):
460
+ function_call = FunctionCallDict(
461
+ name=item.tool_name, args=item.args_as_dict(), id=item.tool_call_id
462
+ )
463
+ part["function_call"] = function_call
464
+
465
+ # For Gemini: attach pending signature to function call
466
+ if is_gemini and pending_signature:
467
+ part["thoughtSignature"] = pending_signature
468
+ pending_signature = None
469
+
470
+ elif isinstance(item, TextPart):
471
+ part["text"] = item.content
472
+
473
+ # For Gemini: attach pending signature to text part
474
+ if is_gemini and pending_signature:
475
+ part["thoughtSignature"] = pending_signature
476
+ pending_signature = None
477
+
478
+ elif isinstance(item, ThinkingPart):
479
+ if item.content:
480
+ part["text"] = item.content
481
+ part["thought"] = True
482
+
483
+ if item.signature:
484
+ if is_claude:
485
+ # Claude: signature goes ON the thinking block
486
+ part["thoughtSignature"] = item.signature
487
+ elif is_gemini:
488
+ # Gemini: save signature for NEXT part
489
+ pending_signature = item.signature
490
+ else:
491
+ # Default: try both (put on thinking block)
492
+ part["thoughtSignature"] = item.signature
493
+
494
+ elif isinstance(item, BuiltinToolCallPart):
495
+ # Skip code execution for now
496
+ pass
497
+
498
+ elif isinstance(item, BuiltinToolReturnPart):
499
+ # Skip code execution result
500
+ pass
501
+
502
+ elif isinstance(item, FilePart):
503
+ content = item.content
504
+ inline_data_dict: BlobDict = {
505
+ "data": content.data,
506
+ "mime_type": content.media_type,
507
+ }
508
+ part["inline_data"] = inline_data_dict
509
+ else:
510
+ assert_never(item)
511
+
512
+ if part:
513
+ parts.append(part)
514
+
515
+ if not parts:
516
+ return None
517
+ return ContentDict(role="model", parts=parts)
518
+
519
+
520
+ def _antigravity_process_response_from_parts(
521
+ parts: list[Any], # dicts or objects
522
+ grounding_metadata: Any | None,
523
+ model_name: GoogleModelName,
524
+ provider_name: str,
525
+ usage: RequestUsage,
526
+ vendor_id: str | None,
527
+ vendor_details: dict[str, Any] | None = None,
528
+ ) -> ModelResponse:
529
+ """Custom response parser that extracts signatures from ThinkingParts.
530
+
531
+ Handles different signature protocols:
532
+ - Claude: signature is ON the thinking block
533
+ - Gemini: signature is on the NEXT part after thinking (we associate it back)
534
+ """
535
+ items: list[ModelResponsePart] = []
536
+
537
+ is_gemini = "gemini" in str(model_name).lower()
538
+
539
+ # Helper to get attribute from dict or object
540
+ def get_attr(obj, attr):
541
+ if isinstance(obj, dict):
542
+ return obj.get(attr)
543
+ return getattr(obj, attr, None)
544
+
545
+ # First pass: collect all parts and their signatures
546
+ parsed_parts = []
547
+ for part in parts:
548
+ thought_signature = get_attr(part, "thoughtSignature") or get_attr(
549
+ part, "thought_signature"
550
+ )
551
+
552
+ # Also check provider details
553
+ pd = get_attr(part, "provider_details")
554
+ if not thought_signature and pd:
555
+ thought_signature = pd.get("thought_signature") or pd.get(
556
+ "thoughtSignature"
557
+ )
558
+
559
+ text = get_attr(part, "text")
560
+ thought = get_attr(part, "thought")
561
+ # API returns camelCase 'functionCall'
562
+ function_call = get_attr(part, "functionCall") or get_attr(
563
+ part, "function_call"
564
+ )
565
+
566
+ parsed_parts.append(
567
+ {
568
+ "text": text,
569
+ "thought": thought,
570
+ "function_call": function_call,
571
+ "signature": thought_signature,
572
+ }
573
+ )
574
+
575
+ # Second pass: for Gemini, associate signatures from next parts with thinking blocks
576
+ if is_gemini:
577
+ for i, pp in enumerate(parsed_parts):
578
+ if pp["thought"] and not pp["signature"]:
579
+ # Look at next part for signature
580
+ if i + 1 < len(parsed_parts):
581
+ next_sig = parsed_parts[i + 1].get("signature")
582
+ if next_sig:
583
+ pp["signature"] = next_sig
584
+
585
+ # Third pass: create ModelResponsePart objects
586
+ for pp in parsed_parts:
587
+ if pp["text"] is not None:
588
+ if pp["thought"]:
589
+ items.append(
590
+ ThinkingPart(content=pp["text"], signature=pp["signature"])
591
+ )
592
+ else:
593
+ items.append(TextPart(content=pp["text"]))
594
+
595
+ elif pp["function_call"]:
596
+ fc = pp["function_call"]
597
+ fc_name = get_attr(fc, "name")
598
+ fc_args = get_attr(fc, "args")
599
+ fc_id = get_attr(fc, "id") or _utils.generate_tool_call_id()
600
+
601
+ items.append(
602
+ ToolCallPart(tool_name=fc_name, args=fc_args, tool_call_id=fc_id)
603
+ )
604
+
605
+ return ModelResponse(
606
+ parts=items,
607
+ model_name=model_name,
608
+ usage=usage,
609
+ provider_response_id=vendor_id,
610
+ provider_details=vendor_details,
611
+ provider_name=provider_name,
612
+ )
@@ -0,0 +1,42 @@
1
+ """Configuration for the Antigravity OAuth plugin."""
2
+
3
+ from pathlib import Path
4
+ from typing import Any, Dict
5
+
6
+ from code_puppy import config
7
+
8
+ # Antigravity OAuth configuration
9
+ ANTIGRAVITY_OAUTH_CONFIG: Dict[str, Any] = {
10
+ # OAuth endpoints
11
+ "auth_url": "https://accounts.google.com/o/oauth2/v2/auth",
12
+ "token_url": "https://oauth2.googleapis.com/token",
13
+ # Callback handling
14
+ "redirect_host": "http://localhost",
15
+ "redirect_path": "oauth-callback",
16
+ "callback_port_range": (51121, 51150),
17
+ "callback_timeout": 180,
18
+ # Model configuration
19
+ "prefix": "antigravity-",
20
+ "default_context_length": 200000,
21
+ }
22
+
23
+
24
+ def get_token_storage_path() -> Path:
25
+ """Get the path for storing OAuth tokens."""
26
+ data_dir = Path(config.DATA_DIR)
27
+ data_dir.mkdir(parents=True, exist_ok=True, mode=0o700)
28
+ return data_dir / "antigravity_oauth.json"
29
+
30
+
31
+ def get_accounts_storage_path() -> Path:
32
+ """Get the path for storing multi-account data."""
33
+ data_dir = Path(config.DATA_DIR)
34
+ data_dir.mkdir(parents=True, exist_ok=True, mode=0o700)
35
+ return data_dir / "antigravity_accounts.json"
36
+
37
+
38
+ def get_antigravity_models_path() -> Path:
39
+ """Get the path to the antigravity_models.json file."""
40
+ data_dir = Path(config.DATA_DIR)
41
+ data_dir.mkdir(parents=True, exist_ok=True, mode=0o700)
42
+ return data_dir / "antigravity_models.json"