code-puppy 0.0.348__py3-none-any.whl → 0.0.372__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. code_puppy/agents/__init__.py +8 -0
  2. code_puppy/agents/agent_manager.py +272 -1
  3. code_puppy/agents/agent_pack_leader.py +383 -0
  4. code_puppy/agents/agent_qa_kitten.py +12 -7
  5. code_puppy/agents/agent_terminal_qa.py +323 -0
  6. code_puppy/agents/base_agent.py +11 -8
  7. code_puppy/agents/event_stream_handler.py +101 -8
  8. code_puppy/agents/pack/__init__.py +34 -0
  9. code_puppy/agents/pack/bloodhound.py +304 -0
  10. code_puppy/agents/pack/husky.py +321 -0
  11. code_puppy/agents/pack/retriever.py +393 -0
  12. code_puppy/agents/pack/shepherd.py +348 -0
  13. code_puppy/agents/pack/terrier.py +287 -0
  14. code_puppy/agents/pack/watchdog.py +367 -0
  15. code_puppy/agents/subagent_stream_handler.py +276 -0
  16. code_puppy/api/__init__.py +13 -0
  17. code_puppy/api/app.py +169 -0
  18. code_puppy/api/main.py +21 -0
  19. code_puppy/api/pty_manager.py +446 -0
  20. code_puppy/api/routers/__init__.py +12 -0
  21. code_puppy/api/routers/agents.py +36 -0
  22. code_puppy/api/routers/commands.py +217 -0
  23. code_puppy/api/routers/config.py +74 -0
  24. code_puppy/api/routers/sessions.py +232 -0
  25. code_puppy/api/templates/terminal.html +361 -0
  26. code_puppy/api/websocket.py +154 -0
  27. code_puppy/callbacks.py +73 -0
  28. code_puppy/chatgpt_codex_client.py +53 -0
  29. code_puppy/claude_cache_client.py +294 -41
  30. code_puppy/command_line/add_model_menu.py +13 -4
  31. code_puppy/command_line/agent_menu.py +662 -0
  32. code_puppy/command_line/core_commands.py +89 -112
  33. code_puppy/command_line/model_picker_completion.py +3 -20
  34. code_puppy/command_line/model_settings_menu.py +21 -3
  35. code_puppy/config.py +145 -70
  36. code_puppy/gemini_model.py +706 -0
  37. code_puppy/http_utils.py +6 -3
  38. code_puppy/messaging/__init__.py +15 -0
  39. code_puppy/messaging/messages.py +27 -0
  40. code_puppy/messaging/queue_console.py +1 -1
  41. code_puppy/messaging/rich_renderer.py +36 -1
  42. code_puppy/messaging/spinner/__init__.py +20 -2
  43. code_puppy/messaging/subagent_console.py +461 -0
  44. code_puppy/model_factory.py +50 -16
  45. code_puppy/model_switching.py +63 -0
  46. code_puppy/model_utils.py +27 -24
  47. code_puppy/models.json +12 -12
  48. code_puppy/plugins/antigravity_oauth/antigravity_model.py +206 -172
  49. code_puppy/plugins/antigravity_oauth/register_callbacks.py +15 -8
  50. code_puppy/plugins/antigravity_oauth/transport.py +236 -45
  51. code_puppy/plugins/chatgpt_oauth/register_callbacks.py +2 -2
  52. code_puppy/plugins/claude_code_oauth/register_callbacks.py +2 -30
  53. code_puppy/plugins/claude_code_oauth/utils.py +4 -1
  54. code_puppy/plugins/frontend_emitter/__init__.py +25 -0
  55. code_puppy/plugins/frontend_emitter/emitter.py +121 -0
  56. code_puppy/plugins/frontend_emitter/register_callbacks.py +261 -0
  57. code_puppy/prompts/antigravity_system_prompt.md +1 -0
  58. code_puppy/pydantic_patches.py +52 -0
  59. code_puppy/status_display.py +6 -2
  60. code_puppy/tools/__init__.py +37 -1
  61. code_puppy/tools/agent_tools.py +83 -33
  62. code_puppy/tools/browser/__init__.py +37 -0
  63. code_puppy/tools/browser/browser_control.py +6 -6
  64. code_puppy/tools/browser/browser_interactions.py +21 -20
  65. code_puppy/tools/browser/browser_locators.py +9 -9
  66. code_puppy/tools/browser/browser_manager.py +316 -0
  67. code_puppy/tools/browser/browser_navigation.py +7 -7
  68. code_puppy/tools/browser/browser_screenshot.py +78 -140
  69. code_puppy/tools/browser/browser_scripts.py +15 -13
  70. code_puppy/tools/browser/chromium_terminal_manager.py +259 -0
  71. code_puppy/tools/browser/terminal_command_tools.py +521 -0
  72. code_puppy/tools/browser/terminal_screenshot_tools.py +556 -0
  73. code_puppy/tools/browser/terminal_tools.py +525 -0
  74. code_puppy/tools/command_runner.py +292 -101
  75. code_puppy/tools/common.py +176 -1
  76. code_puppy/tools/display.py +84 -0
  77. code_puppy/tools/subagent_context.py +158 -0
  78. {code_puppy-0.0.348.data → code_puppy-0.0.372.data}/data/code_puppy/models.json +12 -12
  79. {code_puppy-0.0.348.dist-info → code_puppy-0.0.372.dist-info}/METADATA +17 -16
  80. {code_puppy-0.0.348.dist-info → code_puppy-0.0.372.dist-info}/RECORD +84 -51
  81. code_puppy/prompts/codex_system_prompt.md +0 -310
  82. code_puppy/tools/browser/camoufox_manager.py +0 -235
  83. code_puppy/tools/browser/vqa_agent.py +0 -90
  84. {code_puppy-0.0.348.data → code_puppy-0.0.372.data}/data/code_puppy/models_dev_api.json +0 -0
  85. {code_puppy-0.0.348.dist-info → code_puppy-0.0.372.dist-info}/WHEEL +0 -0
  86. {code_puppy-0.0.348.dist-info → code_puppy-0.0.372.dist-info}/entry_points.txt +0 -0
  87. {code_puppy-0.0.348.dist-info → code_puppy-0.0.372.dist-info}/licenses/LICENSE +0 -0
code_puppy/model_utils.py CHANGED
@@ -1,7 +1,7 @@
1
1
  """Model-related utilities shared across agents and tools.
2
2
 
3
3
  This module centralizes logic for handling model-specific behaviors,
4
- particularly for claude-code and chatgpt-codex models which require special prompt handling.
4
+ particularly for claude-code and antigravity models which require special prompt handling.
5
5
  """
6
6
 
7
7
  import pathlib
@@ -11,27 +11,30 @@ from typing import Optional
11
11
  # The instruction override used for claude-code models
12
12
  CLAUDE_CODE_INSTRUCTIONS = "You are Claude Code, Anthropic's official CLI for Claude."
13
13
 
14
- # Path to the Codex system prompt file
15
- _CODEX_PROMPT_PATH = (
16
- pathlib.Path(__file__).parent / "prompts" / "codex_system_prompt.md"
14
+ # Path to the Antigravity system prompt file
15
+ _ANTIGRAVITY_PROMPT_PATH = (
16
+ pathlib.Path(__file__).parent / "prompts" / "antigravity_system_prompt.md"
17
17
  )
18
18
 
19
- # Cache for the loaded Codex prompt
20
- _codex_prompt_cache: Optional[str] = None
19
+ # Cache for the loaded Antigravity prompt
20
+ _antigravity_prompt_cache: Optional[str] = None
21
21
 
22
22
 
23
- def _load_codex_prompt() -> str:
24
- """Load the Codex system prompt from file, with caching."""
25
- global _codex_prompt_cache
26
- if _codex_prompt_cache is None:
27
- if _CODEX_PROMPT_PATH.exists():
28
- _codex_prompt_cache = _CODEX_PROMPT_PATH.read_text(encoding="utf-8")
23
+ def _load_antigravity_prompt() -> str:
24
+ """Load the Antigravity system prompt from file, with caching."""
25
+ global _antigravity_prompt_cache
26
+ if _antigravity_prompt_cache is None:
27
+ if _ANTIGRAVITY_PROMPT_PATH.exists():
28
+ _antigravity_prompt_cache = _ANTIGRAVITY_PROMPT_PATH.read_text(
29
+ encoding="utf-8"
30
+ )
29
31
  else:
30
32
  # Fallback to a minimal prompt if file is missing
31
- _codex_prompt_cache = (
32
- "You are Codex, a coding agent running in the Codex CLI."
33
+ _antigravity_prompt_cache = (
34
+ "You are Antigravity, a powerful agentic AI coding assistant "
35
+ "designed by the Google Deepmind team."
33
36
  )
34
- return _codex_prompt_cache
37
+ return _antigravity_prompt_cache
35
38
 
36
39
 
37
40
  @dataclass
@@ -54,9 +57,9 @@ def is_claude_code_model(model_name: str) -> bool:
54
57
  return model_name.startswith("claude-code")
55
58
 
56
59
 
57
- def is_chatgpt_codex_model(model_name: str) -> bool:
58
- """Check if a model is a ChatGPT Codex model."""
59
- return model_name.startswith("chatgpt-")
60
+ def is_antigravity_model(model_name: str) -> bool:
61
+ """Check if a model is an Antigravity model."""
62
+ return model_name.startswith("antigravity-")
60
63
 
61
64
 
62
65
  def prepare_prompt_for_model(
@@ -77,8 +80,8 @@ def prepare_prompt_for_model(
77
80
  is_claude_code=True,
78
81
  )
79
82
 
80
- # Handle ChatGPT Codex models
81
- if is_chatgpt_codex_model(model_name):
83
+ # Handle Antigravity models
84
+ if is_antigravity_model(model_name):
82
85
  modified_prompt = user_prompt
83
86
  if prepend_system_to_user and system_prompt:
84
87
  modified_prompt = (
@@ -91,7 +94,7 @@ def prepare_prompt_for_model(
91
94
  f"{user_prompt}"
92
95
  )
93
96
  return PreparedPrompt(
94
- instructions=_load_codex_prompt(),
97
+ instructions=_load_antigravity_prompt(),
95
98
  user_prompt=modified_prompt,
96
99
  is_claude_code=False,
97
100
  )
@@ -108,6 +111,6 @@ def get_claude_code_instructions() -> str:
108
111
  return CLAUDE_CODE_INSTRUCTIONS
109
112
 
110
113
 
111
- def get_chatgpt_codex_instructions() -> str:
112
- """Get the Codex system prompt for ChatGPT Codex models."""
113
- return _load_codex_prompt()
114
+ def get_antigravity_instructions() -> str:
115
+ """Get the Antigravity system prompt for Antigravity models."""
116
+ return _load_antigravity_prompt()
code_puppy/models.json CHANGED
@@ -7,7 +7,7 @@
7
7
  "api_key": "$SYN_API_KEY"
8
8
  },
9
9
  "context_length": 200000,
10
- "supported_settings": ["temperature", "seed"]
10
+ "supported_settings": ["temperature", "seed", "top_p"]
11
11
  },
12
12
  "synthetic-MiniMax-M2.1": {
13
13
  "type": "custom_openai",
@@ -17,7 +17,7 @@
17
17
  "api_key": "$SYN_API_KEY"
18
18
  },
19
19
  "context_length": 195000,
20
- "supported_settings": ["temperature", "seed"]
20
+ "supported_settings": ["temperature", "seed", "top_p"]
21
21
  },
22
22
  "synthetic-Kimi-K2-Thinking": {
23
23
  "type": "custom_openai",
@@ -27,32 +27,32 @@
27
27
  "api_key": "$SYN_API_KEY"
28
28
  },
29
29
  "context_length": 262144,
30
- "supported_settings": ["temperature", "seed"]
30
+ "supported_settings": ["temperature", "seed", "top_p"]
31
31
  },
32
32
  "Gemini-3": {
33
33
  "type": "gemini",
34
34
  "name": "gemini-3-pro-preview",
35
35
  "context_length": 200000,
36
- "supported_settings": ["temperature"]
36
+ "supported_settings": ["temperature", "top_p"]
37
37
  },
38
38
  "Gemini-3-Long-Context": {
39
39
  "type": "gemini",
40
40
  "name": "gemini-3-pro-preview",
41
41
  "context_length": 1000000,
42
- "supported_settings": ["temperature"]
42
+ "supported_settings": ["temperature", "top_p"]
43
43
  },
44
44
  "gpt-5.1": {
45
45
  "type": "openai",
46
46
  "name": "gpt-5.1",
47
47
  "context_length": 272000,
48
- "supported_settings": ["reasoning_effort", "verbosity"],
48
+ "supported_settings": ["temperature", "top_p", "reasoning_effort", "verbosity"],
49
49
  "supports_xhigh_reasoning": false
50
50
  },
51
51
  "gpt-5.1-codex-api": {
52
52
  "type": "openai",
53
53
  "name": "gpt-5.1-codex",
54
54
  "context_length": 272000,
55
- "supported_settings": ["reasoning_effort", "verbosity"],
55
+ "supported_settings": ["temperature", "top_p", "reasoning_effort", "verbosity"],
56
56
  "supports_xhigh_reasoning": true
57
57
  },
58
58
  "Cerebras-GLM-4.7": {
@@ -63,7 +63,7 @@
63
63
  "api_key": "$CEREBRAS_API_KEY"
64
64
  },
65
65
  "context_length": 131072,
66
- "supported_settings": ["temperature", "seed"]
66
+ "supported_settings": ["temperature", "seed", "top_p"]
67
67
  },
68
68
  "claude-4-5-haiku": {
69
69
  "type": "anthropic",
@@ -87,24 +87,24 @@
87
87
  "type": "zai_coding",
88
88
  "name": "glm-4.6",
89
89
  "context_length": 200000,
90
- "supported_settings": ["temperature"]
90
+ "supported_settings": ["temperature", "top_p"]
91
91
  },
92
92
  "zai-glm-4.6-api": {
93
93
  "type": "zai_api",
94
94
  "name": "glm-4.6",
95
95
  "context_length": 200000,
96
- "supported_settings": ["temperature"]
96
+ "supported_settings": ["temperature", "top_p"]
97
97
  },
98
98
  "zai-glm-4.7-coding": {
99
99
  "type": "zai_coding",
100
100
  "name": "glm-4.7",
101
101
  "context_length": 200000,
102
- "supported_settings": ["temperature"]
102
+ "supported_settings": ["temperature", "top_p"]
103
103
  },
104
104
  "zai-glm-4.7-api": {
105
105
  "type": "zai_api",
106
106
  "name": "glm-4.7",
107
107
  "context_length": 200000,
108
- "supported_settings": ["temperature"]
108
+ "supported_settings": ["temperature", "top_p"]
109
109
  }
110
110
  }