code-puppy 0.0.374__py3-none-any.whl → 0.0.375__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/agents/agent_manager.py +34 -2
- code_puppy/agents/base_agent.py +61 -4
- code_puppy/callbacks.py +125 -0
- code_puppy/messaging/rich_renderer.py +13 -7
- code_puppy/model_factory.py +63 -258
- code_puppy/model_utils.py +33 -1
- code_puppy/plugins/antigravity_oauth/register_callbacks.py +106 -1
- code_puppy/plugins/antigravity_oauth/utils.py +2 -3
- code_puppy/plugins/chatgpt_oauth/register_callbacks.py +85 -3
- code_puppy/plugins/claude_code_oauth/register_callbacks.py +88 -0
- code_puppy/plugins/ralph/__init__.py +13 -0
- code_puppy/plugins/ralph/agents.py +433 -0
- code_puppy/plugins/ralph/commands.py +208 -0
- code_puppy/plugins/ralph/loop_controller.py +285 -0
- code_puppy/plugins/ralph/models.py +125 -0
- code_puppy/plugins/ralph/register_callbacks.py +133 -0
- code_puppy/plugins/ralph/state_manager.py +322 -0
- code_puppy/plugins/ralph/tools.py +451 -0
- code_puppy/tools/__init__.py +31 -0
- code_puppy/tools/agent_tools.py +1 -1
- code_puppy/tools/command_runner.py +23 -9
- {code_puppy-0.0.374.dist-info → code_puppy-0.0.375.dist-info}/METADATA +1 -1
- {code_puppy-0.0.374.dist-info → code_puppy-0.0.375.dist-info}/RECORD +28 -20
- {code_puppy-0.0.374.data → code_puppy-0.0.375.data}/data/code_puppy/models.json +0 -0
- {code_puppy-0.0.374.data → code_puppy-0.0.375.data}/data/code_puppy/models_dev_api.json +0 -0
- {code_puppy-0.0.374.dist-info → code_puppy-0.0.375.dist-info}/WHEEL +0 -0
- {code_puppy-0.0.374.dist-info → code_puppy-0.0.375.dist-info}/entry_points.txt +0 -0
- {code_puppy-0.0.374.dist-info → code_puppy-0.0.375.dist-info}/licenses/LICENSE +0 -0
code_puppy/model_factory.py
CHANGED
|
@@ -393,78 +393,9 @@ class ModelFactory:
|
|
|
393
393
|
|
|
394
394
|
provider = AnthropicProvider(anthropic_client=anthropic_client)
|
|
395
395
|
return AnthropicModel(model_name=model_config["name"], provider=provider)
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
if model_config.get("oauth_source") == "claude-code-plugin":
|
|
399
|
-
try:
|
|
400
|
-
from code_puppy.plugins.claude_code_oauth.utils import (
|
|
401
|
-
get_valid_access_token,
|
|
402
|
-
)
|
|
403
|
-
|
|
404
|
-
refreshed_token = get_valid_access_token()
|
|
405
|
-
if refreshed_token:
|
|
406
|
-
api_key = refreshed_token
|
|
407
|
-
custom_endpoint = model_config.get("custom_endpoint")
|
|
408
|
-
if isinstance(custom_endpoint, dict):
|
|
409
|
-
custom_endpoint["api_key"] = refreshed_token
|
|
410
|
-
except ImportError:
|
|
411
|
-
pass
|
|
412
|
-
if not api_key:
|
|
413
|
-
emit_warning(
|
|
414
|
-
f"API key is not set for Claude Code endpoint; skipping model '{model_config.get('name')}'."
|
|
415
|
-
)
|
|
416
|
-
return None
|
|
417
|
-
|
|
418
|
-
# Check if interleaved thinking is enabled (defaults to True for OAuth models)
|
|
419
|
-
from code_puppy.config import get_effective_model_settings
|
|
420
|
-
|
|
421
|
-
effective_settings = get_effective_model_settings(model_name)
|
|
422
|
-
interleaved_thinking = effective_settings.get("interleaved_thinking", True)
|
|
423
|
-
|
|
424
|
-
# Handle anthropic-beta header based on interleaved_thinking setting
|
|
425
|
-
if "anthropic-beta" in headers:
|
|
426
|
-
beta_parts = [p.strip() for p in headers["anthropic-beta"].split(",")]
|
|
427
|
-
if interleaved_thinking:
|
|
428
|
-
# Ensure interleaved-thinking is in the header
|
|
429
|
-
if "interleaved-thinking-2025-05-14" not in beta_parts:
|
|
430
|
-
beta_parts.append("interleaved-thinking-2025-05-14")
|
|
431
|
-
else:
|
|
432
|
-
# Remove interleaved-thinking from the header
|
|
433
|
-
beta_parts = [
|
|
434
|
-
p for p in beta_parts if "interleaved-thinking" not in p
|
|
435
|
-
]
|
|
436
|
-
headers["anthropic-beta"] = ",".join(beta_parts) if beta_parts else None
|
|
437
|
-
if headers.get("anthropic-beta") is None:
|
|
438
|
-
del headers["anthropic-beta"]
|
|
439
|
-
elif interleaved_thinking:
|
|
440
|
-
# No existing beta header, add one for interleaved thinking
|
|
441
|
-
headers["anthropic-beta"] = "interleaved-thinking-2025-05-14"
|
|
442
|
-
|
|
443
|
-
# Use a dedicated client wrapper that injects cache_control on /v1/messages
|
|
444
|
-
if verify is None:
|
|
445
|
-
verify = get_cert_bundle_path()
|
|
446
|
-
|
|
447
|
-
http2_enabled = get_http2()
|
|
448
|
-
|
|
449
|
-
client = ClaudeCacheAsyncClient(
|
|
450
|
-
headers=headers,
|
|
451
|
-
verify=verify,
|
|
452
|
-
timeout=180,
|
|
453
|
-
http2=http2_enabled,
|
|
454
|
-
)
|
|
396
|
+
# NOTE: 'claude_code' model type is now handled by the claude_code_oauth plugin
|
|
397
|
+
# via the register_model_type callback. See plugins/claude_code_oauth/register_callbacks.py
|
|
455
398
|
|
|
456
|
-
anthropic_client = AsyncAnthropic(
|
|
457
|
-
base_url=url,
|
|
458
|
-
http_client=client,
|
|
459
|
-
auth_token=api_key,
|
|
460
|
-
)
|
|
461
|
-
# Ensure cache_control is injected at the Anthropic SDK layer too
|
|
462
|
-
# so we don't depend solely on httpx internals.
|
|
463
|
-
patch_anthropic_client_messages(anthropic_client)
|
|
464
|
-
anthropic_client.api_key = None
|
|
465
|
-
anthropic_client.auth_token = api_key
|
|
466
|
-
provider = AnthropicProvider(anthropic_client=anthropic_client)
|
|
467
|
-
return AnthropicModel(model_name=model_config["name"], provider=provider)
|
|
468
399
|
elif model_type == "azure_openai":
|
|
469
400
|
azure_endpoint_config = model_config.get("azure_endpoint")
|
|
470
401
|
if not azure_endpoint_config:
|
|
@@ -571,7 +502,42 @@ class ModelFactory:
|
|
|
571
502
|
)
|
|
572
503
|
setattr(zai_model, "provider", provider)
|
|
573
504
|
return zai_model
|
|
505
|
+
# NOTE: 'antigravity' model type is now handled by the antigravity_oauth plugin
|
|
506
|
+
# via the register_model_type callback. See plugins/antigravity_oauth/register_callbacks.py
|
|
507
|
+
|
|
574
508
|
elif model_type == "custom_gemini":
|
|
509
|
+
# Backwards compatibility: delegate to antigravity plugin if antigravity flag is set
|
|
510
|
+
# New configs use type="antigravity" directly, but old configs may have
|
|
511
|
+
# type="custom_gemini" with antigravity=True
|
|
512
|
+
if model_config.get("antigravity"):
|
|
513
|
+
# Find and call the antigravity handler from the plugin
|
|
514
|
+
registered_handlers = callbacks.on_register_model_types()
|
|
515
|
+
for handler_info in registered_handlers:
|
|
516
|
+
handlers = (
|
|
517
|
+
handler_info
|
|
518
|
+
if isinstance(handler_info, list)
|
|
519
|
+
else [handler_info]
|
|
520
|
+
if handler_info
|
|
521
|
+
else []
|
|
522
|
+
)
|
|
523
|
+
for handler_entry in handlers:
|
|
524
|
+
if (
|
|
525
|
+
isinstance(handler_entry, dict)
|
|
526
|
+
and handler_entry.get("type") == "antigravity"
|
|
527
|
+
):
|
|
528
|
+
handler = handler_entry.get("handler")
|
|
529
|
+
if callable(handler):
|
|
530
|
+
try:
|
|
531
|
+
return handler(model_name, model_config, config)
|
|
532
|
+
except Exception as e:
|
|
533
|
+
logger.error(f"Antigravity handler failed: {e}")
|
|
534
|
+
return None
|
|
535
|
+
# If no antigravity handler found, warn and fall through
|
|
536
|
+
emit_warning(
|
|
537
|
+
f"Model '{model_config.get('name')}' has antigravity=True but antigravity plugin not loaded."
|
|
538
|
+
)
|
|
539
|
+
return None
|
|
540
|
+
|
|
575
541
|
url, headers, verify, api_key = get_custom_config(model_config)
|
|
576
542
|
if not api_key:
|
|
577
543
|
emit_warning(
|
|
@@ -579,114 +545,7 @@ class ModelFactory:
|
|
|
579
545
|
)
|
|
580
546
|
return None
|
|
581
547
|
|
|
582
|
-
|
|
583
|
-
if model_config.get("antigravity"):
|
|
584
|
-
try:
|
|
585
|
-
from code_puppy.plugins.antigravity_oauth.token import (
|
|
586
|
-
is_token_expired,
|
|
587
|
-
refresh_access_token,
|
|
588
|
-
)
|
|
589
|
-
from code_puppy.plugins.antigravity_oauth.transport import (
|
|
590
|
-
create_antigravity_client,
|
|
591
|
-
)
|
|
592
|
-
from code_puppy.plugins.antigravity_oauth.utils import (
|
|
593
|
-
load_stored_tokens,
|
|
594
|
-
save_tokens,
|
|
595
|
-
)
|
|
596
|
-
|
|
597
|
-
# Try to import custom model for thinking signatures
|
|
598
|
-
try:
|
|
599
|
-
from code_puppy.plugins.antigravity_oauth.antigravity_model import (
|
|
600
|
-
AntigravityModel,
|
|
601
|
-
)
|
|
602
|
-
except ImportError:
|
|
603
|
-
AntigravityModel = None
|
|
604
|
-
|
|
605
|
-
# Get fresh access token (refresh if needed)
|
|
606
|
-
tokens = load_stored_tokens()
|
|
607
|
-
if not tokens:
|
|
608
|
-
emit_warning(
|
|
609
|
-
"Antigravity tokens not found; run /antigravity-auth first."
|
|
610
|
-
)
|
|
611
|
-
return None
|
|
612
|
-
|
|
613
|
-
access_token = tokens.get("access_token", "")
|
|
614
|
-
refresh_token = tokens.get("refresh_token", "")
|
|
615
|
-
expires_at = tokens.get("expires_at")
|
|
616
|
-
|
|
617
|
-
# Refresh if expired or about to expire (initial check)
|
|
618
|
-
if is_token_expired(expires_at):
|
|
619
|
-
new_tokens = refresh_access_token(refresh_token)
|
|
620
|
-
if new_tokens:
|
|
621
|
-
access_token = new_tokens.access_token
|
|
622
|
-
refresh_token = new_tokens.refresh_token
|
|
623
|
-
expires_at = new_tokens.expires_at
|
|
624
|
-
tokens["access_token"] = new_tokens.access_token
|
|
625
|
-
tokens["refresh_token"] = new_tokens.refresh_token
|
|
626
|
-
tokens["expires_at"] = new_tokens.expires_at
|
|
627
|
-
save_tokens(tokens)
|
|
628
|
-
else:
|
|
629
|
-
emit_warning(
|
|
630
|
-
"Failed to refresh Antigravity token; run /antigravity-auth again."
|
|
631
|
-
)
|
|
632
|
-
return None
|
|
633
|
-
|
|
634
|
-
# Callback to persist tokens when proactively refreshed during session
|
|
635
|
-
def on_token_refreshed(new_tokens):
|
|
636
|
-
"""Persist new tokens when proactively refreshed."""
|
|
637
|
-
try:
|
|
638
|
-
updated_tokens = load_stored_tokens() or {}
|
|
639
|
-
updated_tokens["access_token"] = new_tokens.access_token
|
|
640
|
-
updated_tokens["refresh_token"] = new_tokens.refresh_token
|
|
641
|
-
updated_tokens["expires_at"] = new_tokens.expires_at
|
|
642
|
-
save_tokens(updated_tokens)
|
|
643
|
-
logger.debug(
|
|
644
|
-
"Persisted proactively refreshed Antigravity tokens"
|
|
645
|
-
)
|
|
646
|
-
except Exception as e:
|
|
647
|
-
logger.warning("Failed to persist refreshed tokens: %s", e)
|
|
648
|
-
|
|
649
|
-
project_id = tokens.get(
|
|
650
|
-
"project_id", model_config.get("project_id", "")
|
|
651
|
-
)
|
|
652
|
-
client = create_antigravity_client(
|
|
653
|
-
access_token=access_token,
|
|
654
|
-
project_id=project_id,
|
|
655
|
-
model_name=model_config["name"],
|
|
656
|
-
base_url=url,
|
|
657
|
-
headers=headers,
|
|
658
|
-
refresh_token=refresh_token,
|
|
659
|
-
expires_at=expires_at,
|
|
660
|
-
on_token_refreshed=on_token_refreshed,
|
|
661
|
-
)
|
|
662
|
-
|
|
663
|
-
# Use custom model with direct httpx client
|
|
664
|
-
if AntigravityModel:
|
|
665
|
-
model = AntigravityModel(
|
|
666
|
-
model_name=model_config["name"],
|
|
667
|
-
api_key=api_key
|
|
668
|
-
or "", # Antigravity uses OAuth, key may be empty
|
|
669
|
-
base_url=url,
|
|
670
|
-
http_client=client,
|
|
671
|
-
)
|
|
672
|
-
else:
|
|
673
|
-
model = GeminiModel(
|
|
674
|
-
model_name=model_config["name"],
|
|
675
|
-
api_key=api_key or "",
|
|
676
|
-
base_url=url,
|
|
677
|
-
http_client=client,
|
|
678
|
-
)
|
|
679
|
-
|
|
680
|
-
return model
|
|
681
|
-
|
|
682
|
-
except ImportError:
|
|
683
|
-
emit_warning(
|
|
684
|
-
f"Antigravity transport not available; skipping model '{model_config.get('name')}'."
|
|
685
|
-
)
|
|
686
|
-
return None
|
|
687
|
-
else:
|
|
688
|
-
client = create_async_client(headers=headers, verify=verify)
|
|
689
|
-
|
|
548
|
+
client = create_async_client(headers=headers, verify=verify)
|
|
690
549
|
model = GeminiModel(
|
|
691
550
|
model_name=model_config["name"],
|
|
692
551
|
api_key=api_key,
|
|
@@ -814,85 +673,8 @@ class ModelFactory:
|
|
|
814
673
|
)
|
|
815
674
|
return model
|
|
816
675
|
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
try:
|
|
820
|
-
try:
|
|
821
|
-
from chatgpt_oauth.config import CHATGPT_OAUTH_CONFIG
|
|
822
|
-
from chatgpt_oauth.utils import (
|
|
823
|
-
get_valid_access_token,
|
|
824
|
-
load_stored_tokens,
|
|
825
|
-
)
|
|
826
|
-
except ImportError:
|
|
827
|
-
from code_puppy.plugins.chatgpt_oauth.config import (
|
|
828
|
-
CHATGPT_OAUTH_CONFIG,
|
|
829
|
-
)
|
|
830
|
-
from code_puppy.plugins.chatgpt_oauth.utils import (
|
|
831
|
-
get_valid_access_token,
|
|
832
|
-
load_stored_tokens,
|
|
833
|
-
)
|
|
834
|
-
except ImportError as exc:
|
|
835
|
-
emit_warning(
|
|
836
|
-
f"ChatGPT OAuth plugin not available; skipping model '{model_config.get('name')}'. "
|
|
837
|
-
f"Error: {exc}"
|
|
838
|
-
)
|
|
839
|
-
return None
|
|
840
|
-
|
|
841
|
-
# Get a valid access token (refreshing if needed)
|
|
842
|
-
access_token = get_valid_access_token()
|
|
843
|
-
if not access_token:
|
|
844
|
-
emit_warning(
|
|
845
|
-
f"Failed to get valid ChatGPT OAuth token; skipping model '{model_config.get('name')}'. "
|
|
846
|
-
"Run /chatgpt-auth to authenticate."
|
|
847
|
-
)
|
|
848
|
-
return None
|
|
849
|
-
|
|
850
|
-
# Get account_id from stored tokens (required for ChatGPT-Account-Id header)
|
|
851
|
-
tokens = load_stored_tokens()
|
|
852
|
-
account_id = tokens.get("account_id", "") if tokens else ""
|
|
853
|
-
if not account_id:
|
|
854
|
-
emit_warning(
|
|
855
|
-
f"No account_id found in ChatGPT OAuth tokens; skipping model '{model_config.get('name')}'. "
|
|
856
|
-
"Run /chatgpt-auth to re-authenticate."
|
|
857
|
-
)
|
|
858
|
-
return None
|
|
859
|
-
|
|
860
|
-
# Build headers for ChatGPT Codex API
|
|
861
|
-
originator = CHATGPT_OAUTH_CONFIG.get("originator", "codex_cli_rs")
|
|
862
|
-
client_version = CHATGPT_OAUTH_CONFIG.get("client_version", "0.72.0")
|
|
863
|
-
|
|
864
|
-
headers = {
|
|
865
|
-
"ChatGPT-Account-Id": account_id,
|
|
866
|
-
"originator": originator,
|
|
867
|
-
"User-Agent": f"{originator}/{client_version}",
|
|
868
|
-
}
|
|
869
|
-
# Merge with any headers from model config
|
|
870
|
-
config_headers = model_config.get("custom_endpoint", {}).get("headers", {})
|
|
871
|
-
headers.update(config_headers)
|
|
872
|
-
|
|
873
|
-
# Get base URL - Codex API uses chatgpt.com, not api.openai.com
|
|
874
|
-
base_url = model_config.get("custom_endpoint", {}).get(
|
|
875
|
-
"url", CHATGPT_OAUTH_CONFIG["api_base_url"]
|
|
876
|
-
)
|
|
877
|
-
|
|
878
|
-
# Create HTTP client with Codex interceptor for store=false injection
|
|
879
|
-
from code_puppy.chatgpt_codex_client import create_codex_async_client
|
|
880
|
-
|
|
881
|
-
verify = get_cert_bundle_path()
|
|
882
|
-
client = create_codex_async_client(headers=headers, verify=verify)
|
|
883
|
-
|
|
884
|
-
provider = OpenAIProvider(
|
|
885
|
-
api_key=access_token,
|
|
886
|
-
base_url=base_url,
|
|
887
|
-
http_client=client,
|
|
888
|
-
)
|
|
889
|
-
|
|
890
|
-
# ChatGPT Codex API only supports Responses format
|
|
891
|
-
model = OpenAIResponsesModel(
|
|
892
|
-
model_name=model_config["name"], provider=provider
|
|
893
|
-
)
|
|
894
|
-
setattr(model, "provider", provider)
|
|
895
|
-
return model
|
|
676
|
+
# NOTE: 'chatgpt_oauth' model type is now handled by the chatgpt_oauth plugin
|
|
677
|
+
# via the register_model_type callback. See plugins/chatgpt_oauth/register_callbacks.py
|
|
896
678
|
|
|
897
679
|
elif model_type == "round_robin":
|
|
898
680
|
# Get the list of model names to use in the round-robin
|
|
@@ -916,4 +698,27 @@ class ModelFactory:
|
|
|
916
698
|
return RoundRobinModel(*models, rotate_every=rotate_every)
|
|
917
699
|
|
|
918
700
|
else:
|
|
701
|
+
# Check for plugin-registered model type handlers
|
|
702
|
+
registered_handlers = callbacks.on_register_model_types()
|
|
703
|
+
for handler_info in registered_handlers:
|
|
704
|
+
# Handler info can be a list of dicts or a single dict
|
|
705
|
+
if isinstance(handler_info, list):
|
|
706
|
+
handlers = handler_info
|
|
707
|
+
else:
|
|
708
|
+
handlers = [handler_info] if handler_info else []
|
|
709
|
+
|
|
710
|
+
for handler_entry in handlers:
|
|
711
|
+
if not isinstance(handler_entry, dict):
|
|
712
|
+
continue
|
|
713
|
+
if handler_entry.get("type") == model_type:
|
|
714
|
+
handler = handler_entry.get("handler")
|
|
715
|
+
if callable(handler):
|
|
716
|
+
try:
|
|
717
|
+
return handler(model_name, model_config, config)
|
|
718
|
+
except Exception as e:
|
|
719
|
+
logger.error(
|
|
720
|
+
f"Plugin handler for model type '{model_type}' failed: {e}"
|
|
721
|
+
)
|
|
722
|
+
return None
|
|
723
|
+
|
|
919
724
|
raise ValueError(f"Unsupported model type: {model_type}")
|
code_puppy/model_utils.py
CHANGED
|
@@ -2,6 +2,9 @@
|
|
|
2
2
|
|
|
3
3
|
This module centralizes logic for handling model-specific behaviors,
|
|
4
4
|
particularly for claude-code and antigravity models which require special prompt handling.
|
|
5
|
+
|
|
6
|
+
Plugins can register custom system prompt handlers via the 'get_model_system_prompt'
|
|
7
|
+
callback to extend support for additional model types.
|
|
5
8
|
"""
|
|
6
9
|
|
|
7
10
|
import pathlib
|
|
@@ -68,7 +71,36 @@ def prepare_prompt_for_model(
|
|
|
68
71
|
user_prompt: str,
|
|
69
72
|
prepend_system_to_user: bool = True,
|
|
70
73
|
) -> PreparedPrompt:
|
|
71
|
-
"""Prepare instructions and prompt for a specific model.
|
|
74
|
+
"""Prepare instructions and prompt for a specific model.
|
|
75
|
+
|
|
76
|
+
This function handles model-specific system prompt requirements. Plugins can
|
|
77
|
+
register custom handlers via the 'get_model_system_prompt' callback to extend
|
|
78
|
+
support for additional model types.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
model_name: The name of the model being used
|
|
82
|
+
system_prompt: The default system prompt from the agent
|
|
83
|
+
user_prompt: The user's prompt/message
|
|
84
|
+
prepend_system_to_user: Whether to prepend system prompt to user prompt
|
|
85
|
+
for models that require it (default: True)
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
PreparedPrompt with instructions and user_prompt ready for the model.
|
|
89
|
+
"""
|
|
90
|
+
# Check for plugin-registered system prompt handlers first
|
|
91
|
+
from code_puppy import callbacks
|
|
92
|
+
|
|
93
|
+
results = callbacks.on_get_model_system_prompt(
|
|
94
|
+
model_name, system_prompt, user_prompt
|
|
95
|
+
)
|
|
96
|
+
for result in results:
|
|
97
|
+
if result and isinstance(result, dict) and result.get("handled"):
|
|
98
|
+
return PreparedPrompt(
|
|
99
|
+
instructions=result.get("instructions", system_prompt),
|
|
100
|
+
user_prompt=result.get("user_prompt", user_prompt),
|
|
101
|
+
is_claude_code=result.get("is_claude_code", False),
|
|
102
|
+
)
|
|
103
|
+
|
|
72
104
|
# Handle Claude Code models
|
|
73
105
|
if is_claude_code_model(model_name):
|
|
74
106
|
modified_prompt = user_prompt
|
|
@@ -1,4 +1,8 @@
|
|
|
1
|
-
"""Antigravity OAuth Plugin callbacks for Code Puppy CLI.
|
|
1
|
+
"""Antigravity OAuth Plugin callbacks for Code Puppy CLI.
|
|
2
|
+
|
|
3
|
+
Provides OAuth authentication for Antigravity models and registers
|
|
4
|
+
the 'antigravity' model type handler.
|
|
5
|
+
"""
|
|
2
6
|
|
|
3
7
|
from __future__ import annotations
|
|
4
8
|
|
|
@@ -30,6 +34,8 @@ from .oauth import (
|
|
|
30
34
|
prepare_oauth_context,
|
|
31
35
|
)
|
|
32
36
|
from .storage import clear_accounts
|
|
37
|
+
from .token import is_token_expired, refresh_access_token
|
|
38
|
+
from .transport import create_antigravity_client
|
|
33
39
|
from .utils import (
|
|
34
40
|
add_models_to_config,
|
|
35
41
|
load_antigravity_models,
|
|
@@ -408,6 +414,105 @@ def _handle_custom_command(command: str, name: str) -> Optional[bool]:
|
|
|
408
414
|
return None
|
|
409
415
|
|
|
410
416
|
|
|
417
|
+
def _create_antigravity_model(model_name: str, model_config: Dict, config: Dict) -> Any:
|
|
418
|
+
"""Create an Antigravity model instance.
|
|
419
|
+
|
|
420
|
+
This handler is registered via the 'register_model_type' callback to handle
|
|
421
|
+
models with type='antigravity'.
|
|
422
|
+
"""
|
|
423
|
+
from code_puppy.gemini_model import GeminiModel
|
|
424
|
+
from code_puppy.model_factory import get_custom_config
|
|
425
|
+
|
|
426
|
+
# Try to import custom model for thinking signatures
|
|
427
|
+
try:
|
|
428
|
+
from .antigravity_model import AntigravityModel
|
|
429
|
+
except ImportError:
|
|
430
|
+
AntigravityModel = None # type: ignore
|
|
431
|
+
|
|
432
|
+
url, headers, verify, api_key = get_custom_config(model_config)
|
|
433
|
+
if not api_key:
|
|
434
|
+
emit_warning(
|
|
435
|
+
f"API key is not set for Antigravity endpoint; skipping model '{model_config.get('name')}'."
|
|
436
|
+
)
|
|
437
|
+
return None
|
|
438
|
+
|
|
439
|
+
# Get fresh access token (refresh if needed)
|
|
440
|
+
tokens = load_stored_tokens()
|
|
441
|
+
if not tokens:
|
|
442
|
+
emit_warning("Antigravity tokens not found; run /antigravity-auth first.")
|
|
443
|
+
return None
|
|
444
|
+
|
|
445
|
+
access_token = tokens.get("access_token", "")
|
|
446
|
+
refresh_token = tokens.get("refresh_token", "")
|
|
447
|
+
expires_at = tokens.get("expires_at")
|
|
448
|
+
|
|
449
|
+
# Refresh if expired or about to expire (initial check)
|
|
450
|
+
if is_token_expired(expires_at):
|
|
451
|
+
new_tokens = refresh_access_token(refresh_token)
|
|
452
|
+
if new_tokens:
|
|
453
|
+
access_token = new_tokens.access_token
|
|
454
|
+
refresh_token = new_tokens.refresh_token
|
|
455
|
+
expires_at = new_tokens.expires_at
|
|
456
|
+
tokens["access_token"] = new_tokens.access_token
|
|
457
|
+
tokens["refresh_token"] = new_tokens.refresh_token
|
|
458
|
+
tokens["expires_at"] = new_tokens.expires_at
|
|
459
|
+
save_tokens(tokens)
|
|
460
|
+
else:
|
|
461
|
+
emit_warning(
|
|
462
|
+
"Failed to refresh Antigravity token; run /antigravity-auth again."
|
|
463
|
+
)
|
|
464
|
+
return None
|
|
465
|
+
|
|
466
|
+
# Callback to persist tokens when proactively refreshed during session
|
|
467
|
+
def on_token_refreshed(new_tokens: Any) -> None:
|
|
468
|
+
"""Persist new tokens when proactively refreshed."""
|
|
469
|
+
try:
|
|
470
|
+
updated_tokens = load_stored_tokens() or {}
|
|
471
|
+
updated_tokens["access_token"] = new_tokens.access_token
|
|
472
|
+
updated_tokens["refresh_token"] = new_tokens.refresh_token
|
|
473
|
+
updated_tokens["expires_at"] = new_tokens.expires_at
|
|
474
|
+
save_tokens(updated_tokens)
|
|
475
|
+
logger.debug("Persisted proactively refreshed Antigravity tokens")
|
|
476
|
+
except Exception as e:
|
|
477
|
+
logger.warning("Failed to persist refreshed tokens: %s", e)
|
|
478
|
+
|
|
479
|
+
project_id = tokens.get("project_id", model_config.get("project_id", ""))
|
|
480
|
+
client = create_antigravity_client(
|
|
481
|
+
access_token=access_token,
|
|
482
|
+
project_id=project_id,
|
|
483
|
+
model_name=model_config["name"],
|
|
484
|
+
base_url=url,
|
|
485
|
+
headers=headers,
|
|
486
|
+
refresh_token=refresh_token,
|
|
487
|
+
expires_at=expires_at,
|
|
488
|
+
on_token_refreshed=on_token_refreshed,
|
|
489
|
+
)
|
|
490
|
+
|
|
491
|
+
# Use custom model with direct httpx client
|
|
492
|
+
if AntigravityModel:
|
|
493
|
+
model = AntigravityModel(
|
|
494
|
+
model_name=model_config["name"],
|
|
495
|
+
api_key=api_key or "", # Antigravity uses OAuth, key may be empty
|
|
496
|
+
base_url=url,
|
|
497
|
+
http_client=client,
|
|
498
|
+
)
|
|
499
|
+
else:
|
|
500
|
+
model = GeminiModel(
|
|
501
|
+
model_name=model_config["name"],
|
|
502
|
+
api_key=api_key or "",
|
|
503
|
+
base_url=url,
|
|
504
|
+
http_client=client,
|
|
505
|
+
)
|
|
506
|
+
|
|
507
|
+
return model
|
|
508
|
+
|
|
509
|
+
|
|
510
|
+
def _register_model_types() -> List[Dict[str, Any]]:
|
|
511
|
+
"""Register the antigravity model type handler."""
|
|
512
|
+
return [{"type": "antigravity", "handler": _create_antigravity_model}]
|
|
513
|
+
|
|
514
|
+
|
|
411
515
|
# Register callbacks
|
|
412
516
|
register_callback("custom_command_help", _custom_help)
|
|
413
517
|
register_callback("custom_command", _handle_custom_command)
|
|
518
|
+
register_callback("register_model_type", _register_model_types)
|
|
@@ -77,9 +77,9 @@ def add_models_to_config(access_token: str, project_id: str = "") -> bool:
|
|
|
77
77
|
# Build custom headers
|
|
78
78
|
headers = dict(ANTIGRAVITY_HEADERS)
|
|
79
79
|
|
|
80
|
-
# Use
|
|
80
|
+
# Use antigravity type - handled by the plugin's register_model_type callback
|
|
81
81
|
models_config[prefixed_name] = {
|
|
82
|
-
"type": "
|
|
82
|
+
"type": "antigravity",
|
|
83
83
|
"name": model_id,
|
|
84
84
|
"custom_endpoint": {
|
|
85
85
|
"url": ANTIGRAVITY_ENDPOINT,
|
|
@@ -90,7 +90,6 @@ def add_models_to_config(access_token: str, project_id: str = "") -> bool:
|
|
|
90
90
|
"context_length": model_info.get("context_length", 200000),
|
|
91
91
|
"family": model_info.get("family", "other"),
|
|
92
92
|
"oauth_source": "antigravity-plugin",
|
|
93
|
-
"antigravity": True, # Flag to use Antigravity transport
|
|
94
93
|
}
|
|
95
94
|
|
|
96
95
|
# Add thinking budget if present
|
|
@@ -1,9 +1,13 @@
|
|
|
1
|
-
"""ChatGPT OAuth plugin callbacks aligned with ChatMock flow.
|
|
1
|
+
"""ChatGPT OAuth plugin callbacks aligned with ChatMock flow.
|
|
2
|
+
|
|
3
|
+
Provides OAuth authentication for ChatGPT models and registers
|
|
4
|
+
the 'chatgpt_oauth' model type handler.
|
|
5
|
+
"""
|
|
2
6
|
|
|
3
7
|
from __future__ import annotations
|
|
4
8
|
|
|
5
9
|
import os
|
|
6
|
-
from typing import List, Optional, Tuple
|
|
10
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
7
11
|
|
|
8
12
|
from code_puppy.callbacks import register_callback
|
|
9
13
|
from code_puppy.messaging import emit_info, emit_success, emit_warning
|
|
@@ -11,7 +15,12 @@ from code_puppy.model_switching import set_model_and_reload_agent
|
|
|
11
15
|
|
|
12
16
|
from .config import CHATGPT_OAUTH_CONFIG, get_token_storage_path
|
|
13
17
|
from .oauth_flow import run_oauth_flow
|
|
14
|
-
from .utils import
|
|
18
|
+
from .utils import (
|
|
19
|
+
get_valid_access_token,
|
|
20
|
+
load_chatgpt_models,
|
|
21
|
+
load_stored_tokens,
|
|
22
|
+
remove_chatgpt_models,
|
|
23
|
+
)
|
|
15
24
|
|
|
16
25
|
|
|
17
26
|
def _custom_help() -> List[Tuple[str, str]]:
|
|
@@ -90,5 +99,78 @@ def _handle_custom_command(command: str, name: str) -> Optional[bool]:
|
|
|
90
99
|
return None
|
|
91
100
|
|
|
92
101
|
|
|
102
|
+
def _create_chatgpt_oauth_model(
|
|
103
|
+
model_name: str, model_config: Dict, config: Dict
|
|
104
|
+
) -> Any:
|
|
105
|
+
"""Create a ChatGPT OAuth model instance.
|
|
106
|
+
|
|
107
|
+
This handler is registered via the 'register_model_type' callback to handle
|
|
108
|
+
models with type='chatgpt_oauth'.
|
|
109
|
+
"""
|
|
110
|
+
from pydantic_ai.models.openai import OpenAIResponsesModel
|
|
111
|
+
from pydantic_ai.providers.openai import OpenAIProvider
|
|
112
|
+
|
|
113
|
+
from code_puppy.chatgpt_codex_client import create_codex_async_client
|
|
114
|
+
from code_puppy.http_utils import get_cert_bundle_path
|
|
115
|
+
|
|
116
|
+
# Get a valid access token (refreshing if needed)
|
|
117
|
+
access_token = get_valid_access_token()
|
|
118
|
+
if not access_token:
|
|
119
|
+
emit_warning(
|
|
120
|
+
f"Failed to get valid ChatGPT OAuth token; skipping model '{model_config.get('name')}'. "
|
|
121
|
+
"Run /chatgpt-auth to authenticate."
|
|
122
|
+
)
|
|
123
|
+
return None
|
|
124
|
+
|
|
125
|
+
# Get account_id from stored tokens (required for ChatGPT-Account-Id header)
|
|
126
|
+
tokens = load_stored_tokens()
|
|
127
|
+
account_id = tokens.get("account_id", "") if tokens else ""
|
|
128
|
+
if not account_id:
|
|
129
|
+
emit_warning(
|
|
130
|
+
f"No account_id found in ChatGPT OAuth tokens; skipping model '{model_config.get('name')}'. "
|
|
131
|
+
"Run /chatgpt-auth to re-authenticate."
|
|
132
|
+
)
|
|
133
|
+
return None
|
|
134
|
+
|
|
135
|
+
# Build headers for ChatGPT Codex API
|
|
136
|
+
originator = CHATGPT_OAUTH_CONFIG.get("originator", "codex_cli_rs")
|
|
137
|
+
client_version = CHATGPT_OAUTH_CONFIG.get("client_version", "0.72.0")
|
|
138
|
+
|
|
139
|
+
headers = {
|
|
140
|
+
"ChatGPT-Account-Id": account_id,
|
|
141
|
+
"originator": originator,
|
|
142
|
+
"User-Agent": f"{originator}/{client_version}",
|
|
143
|
+
}
|
|
144
|
+
# Merge with any headers from model config
|
|
145
|
+
config_headers = model_config.get("custom_endpoint", {}).get("headers", {})
|
|
146
|
+
headers.update(config_headers)
|
|
147
|
+
|
|
148
|
+
# Get base URL - Codex API uses chatgpt.com, not api.openai.com
|
|
149
|
+
base_url = model_config.get("custom_endpoint", {}).get(
|
|
150
|
+
"url", CHATGPT_OAUTH_CONFIG["api_base_url"]
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
# Create HTTP client with Codex interceptor for store=false injection
|
|
154
|
+
verify = get_cert_bundle_path()
|
|
155
|
+
client = create_codex_async_client(headers=headers, verify=verify)
|
|
156
|
+
|
|
157
|
+
provider = OpenAIProvider(
|
|
158
|
+
api_key=access_token,
|
|
159
|
+
base_url=base_url,
|
|
160
|
+
http_client=client,
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
# ChatGPT Codex API only supports Responses format
|
|
164
|
+
model = OpenAIResponsesModel(model_name=model_config["name"], provider=provider)
|
|
165
|
+
setattr(model, "provider", provider)
|
|
166
|
+
return model
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def _register_model_types() -> List[Dict[str, Any]]:
|
|
170
|
+
"""Register the chatgpt_oauth model type handler."""
|
|
171
|
+
return [{"type": "chatgpt_oauth", "handler": _create_chatgpt_oauth_model}]
|
|
172
|
+
|
|
173
|
+
|
|
93
174
|
register_callback("custom_command_help", _custom_help)
|
|
94
175
|
register_callback("custom_command", _handle_custom_command)
|
|
176
|
+
register_callback("register_model_type", _register_model_types)
|