klaude-code 1.2.6__py3-none-any.whl → 1.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (205) hide show
  1. klaude_code/auth/__init__.py +24 -0
  2. klaude_code/auth/codex/__init__.py +20 -0
  3. klaude_code/auth/codex/exceptions.py +17 -0
  4. klaude_code/auth/codex/jwt_utils.py +45 -0
  5. klaude_code/auth/codex/oauth.py +229 -0
  6. klaude_code/auth/codex/token_manager.py +84 -0
  7. klaude_code/cli/auth_cmd.py +73 -0
  8. klaude_code/cli/config_cmd.py +91 -0
  9. klaude_code/cli/cost_cmd.py +338 -0
  10. klaude_code/cli/debug.py +78 -0
  11. klaude_code/cli/list_model.py +307 -0
  12. klaude_code/cli/main.py +233 -134
  13. klaude_code/cli/runtime.py +309 -117
  14. klaude_code/{version.py → cli/self_update.py} +114 -5
  15. klaude_code/cli/session_cmd.py +37 -21
  16. klaude_code/command/__init__.py +88 -27
  17. klaude_code/command/clear_cmd.py +8 -7
  18. klaude_code/command/command_abc.py +31 -31
  19. klaude_code/command/debug_cmd.py +79 -0
  20. klaude_code/command/export_cmd.py +19 -53
  21. klaude_code/command/export_online_cmd.py +154 -0
  22. klaude_code/command/fork_session_cmd.py +267 -0
  23. klaude_code/command/help_cmd.py +7 -8
  24. klaude_code/command/model_cmd.py +60 -10
  25. klaude_code/command/model_select.py +84 -0
  26. klaude_code/command/prompt-jj-describe.md +32 -0
  27. klaude_code/command/prompt_command.py +19 -11
  28. klaude_code/command/refresh_cmd.py +8 -10
  29. klaude_code/command/registry.py +139 -40
  30. klaude_code/command/release_notes_cmd.py +84 -0
  31. klaude_code/command/resume_cmd.py +111 -0
  32. klaude_code/command/status_cmd.py +104 -60
  33. klaude_code/command/terminal_setup_cmd.py +7 -9
  34. klaude_code/command/thinking_cmd.py +98 -0
  35. klaude_code/config/__init__.py +14 -6
  36. klaude_code/config/assets/__init__.py +1 -0
  37. klaude_code/config/assets/builtin_config.yaml +303 -0
  38. klaude_code/config/builtin_config.py +38 -0
  39. klaude_code/config/config.py +378 -109
  40. klaude_code/config/select_model.py +117 -53
  41. klaude_code/config/thinking.py +269 -0
  42. klaude_code/{const/__init__.py → const.py} +50 -19
  43. klaude_code/core/agent.py +20 -28
  44. klaude_code/core/executor.py +327 -112
  45. klaude_code/core/manager/__init__.py +2 -4
  46. klaude_code/core/manager/llm_clients.py +1 -15
  47. klaude_code/core/manager/llm_clients_builder.py +10 -11
  48. klaude_code/core/manager/sub_agent_manager.py +37 -6
  49. klaude_code/core/prompt.py +63 -44
  50. klaude_code/core/prompts/prompt-claude-code.md +2 -13
  51. klaude_code/core/prompts/prompt-codex-gpt-5-1-codex-max.md +117 -0
  52. klaude_code/core/prompts/prompt-codex-gpt-5-2-codex.md +117 -0
  53. klaude_code/core/prompts/prompt-codex.md +9 -42
  54. klaude_code/core/prompts/prompt-minimal.md +12 -0
  55. klaude_code/core/prompts/{prompt-subagent-explore.md → prompt-sub-agent-explore.md} +16 -3
  56. klaude_code/core/prompts/{prompt-subagent-oracle.md → prompt-sub-agent-oracle.md} +1 -2
  57. klaude_code/core/prompts/prompt-sub-agent-web.md +51 -0
  58. klaude_code/core/reminders.py +283 -95
  59. klaude_code/core/task.py +113 -75
  60. klaude_code/core/tool/__init__.py +24 -31
  61. klaude_code/core/tool/file/_utils.py +36 -0
  62. klaude_code/core/tool/file/apply_patch.py +17 -25
  63. klaude_code/core/tool/file/apply_patch_tool.py +57 -77
  64. klaude_code/core/tool/file/diff_builder.py +151 -0
  65. klaude_code/core/tool/file/edit_tool.py +50 -63
  66. klaude_code/core/tool/file/move_tool.md +41 -0
  67. klaude_code/core/tool/file/move_tool.py +435 -0
  68. klaude_code/core/tool/file/read_tool.md +1 -1
  69. klaude_code/core/tool/file/read_tool.py +86 -86
  70. klaude_code/core/tool/file/write_tool.py +59 -69
  71. klaude_code/core/tool/report_back_tool.py +84 -0
  72. klaude_code/core/tool/shell/bash_tool.py +265 -22
  73. klaude_code/core/tool/shell/command_safety.py +3 -6
  74. klaude_code/core/tool/{memory → skill}/skill_tool.py +16 -26
  75. klaude_code/core/tool/sub_agent_tool.py +13 -2
  76. klaude_code/core/tool/todo/todo_write_tool.md +0 -157
  77. klaude_code/core/tool/todo/todo_write_tool.py +1 -1
  78. klaude_code/core/tool/todo/todo_write_tool_raw.md +182 -0
  79. klaude_code/core/tool/todo/update_plan_tool.py +1 -1
  80. klaude_code/core/tool/tool_abc.py +18 -0
  81. klaude_code/core/tool/tool_context.py +27 -12
  82. klaude_code/core/tool/tool_registry.py +7 -7
  83. klaude_code/core/tool/tool_runner.py +44 -36
  84. klaude_code/core/tool/truncation.py +29 -14
  85. klaude_code/core/tool/web/mermaid_tool.md +43 -0
  86. klaude_code/core/tool/web/mermaid_tool.py +2 -5
  87. klaude_code/core/tool/web/web_fetch_tool.md +1 -1
  88. klaude_code/core/tool/web/web_fetch_tool.py +112 -22
  89. klaude_code/core/tool/web/web_search_tool.md +23 -0
  90. klaude_code/core/tool/web/web_search_tool.py +130 -0
  91. klaude_code/core/turn.py +168 -66
  92. klaude_code/llm/__init__.py +2 -10
  93. klaude_code/llm/anthropic/client.py +190 -178
  94. klaude_code/llm/anthropic/input.py +39 -15
  95. klaude_code/llm/bedrock/__init__.py +3 -0
  96. klaude_code/llm/bedrock/client.py +60 -0
  97. klaude_code/llm/client.py +7 -21
  98. klaude_code/llm/codex/__init__.py +5 -0
  99. klaude_code/llm/codex/client.py +149 -0
  100. klaude_code/llm/google/__init__.py +3 -0
  101. klaude_code/llm/google/client.py +309 -0
  102. klaude_code/llm/google/input.py +215 -0
  103. klaude_code/llm/input_common.py +3 -9
  104. klaude_code/llm/openai_compatible/client.py +72 -164
  105. klaude_code/llm/openai_compatible/input.py +6 -4
  106. klaude_code/llm/openai_compatible/stream.py +273 -0
  107. klaude_code/llm/openai_compatible/tool_call_accumulator.py +17 -1
  108. klaude_code/llm/openrouter/client.py +89 -160
  109. klaude_code/llm/openrouter/input.py +18 -30
  110. klaude_code/llm/openrouter/reasoning.py +118 -0
  111. klaude_code/llm/registry.py +39 -7
  112. klaude_code/llm/responses/client.py +184 -171
  113. klaude_code/llm/responses/input.py +20 -1
  114. klaude_code/llm/usage.py +17 -12
  115. klaude_code/protocol/commands.py +17 -1
  116. klaude_code/protocol/events.py +31 -4
  117. klaude_code/protocol/llm_param.py +13 -10
  118. klaude_code/protocol/model.py +232 -29
  119. klaude_code/protocol/op.py +90 -1
  120. klaude_code/protocol/op_handler.py +35 -1
  121. klaude_code/protocol/sub_agent/__init__.py +117 -0
  122. klaude_code/protocol/sub_agent/explore.py +63 -0
  123. klaude_code/protocol/sub_agent/oracle.py +91 -0
  124. klaude_code/protocol/sub_agent/task.py +61 -0
  125. klaude_code/protocol/sub_agent/web.py +79 -0
  126. klaude_code/protocol/tools.py +4 -2
  127. klaude_code/session/__init__.py +2 -2
  128. klaude_code/session/codec.py +71 -0
  129. klaude_code/session/export.py +293 -86
  130. klaude_code/session/selector.py +89 -67
  131. klaude_code/session/session.py +320 -309
  132. klaude_code/session/store.py +220 -0
  133. klaude_code/session/templates/export_session.html +595 -83
  134. klaude_code/session/templates/mermaid_viewer.html +926 -0
  135. klaude_code/skill/__init__.py +27 -0
  136. klaude_code/skill/assets/deslop/SKILL.md +17 -0
  137. klaude_code/skill/assets/dev-docs/SKILL.md +108 -0
  138. klaude_code/skill/assets/handoff/SKILL.md +39 -0
  139. klaude_code/skill/assets/jj-workspace/SKILL.md +20 -0
  140. klaude_code/skill/assets/skill-creator/SKILL.md +139 -0
  141. klaude_code/{core/tool/memory/skill_loader.py → skill/loader.py} +55 -15
  142. klaude_code/skill/manager.py +70 -0
  143. klaude_code/skill/system_skills.py +192 -0
  144. klaude_code/trace/__init__.py +20 -2
  145. klaude_code/trace/log.py +150 -5
  146. klaude_code/ui/__init__.py +4 -9
  147. klaude_code/ui/core/input.py +1 -1
  148. klaude_code/ui/core/stage_manager.py +7 -7
  149. klaude_code/ui/modes/debug/display.py +2 -1
  150. klaude_code/ui/modes/repl/__init__.py +3 -48
  151. klaude_code/ui/modes/repl/clipboard.py +5 -5
  152. klaude_code/ui/modes/repl/completers.py +487 -123
  153. klaude_code/ui/modes/repl/display.py +5 -4
  154. klaude_code/ui/modes/repl/event_handler.py +370 -117
  155. klaude_code/ui/modes/repl/input_prompt_toolkit.py +552 -105
  156. klaude_code/ui/modes/repl/key_bindings.py +146 -23
  157. klaude_code/ui/modes/repl/renderer.py +189 -99
  158. klaude_code/ui/renderers/assistant.py +9 -2
  159. klaude_code/ui/renderers/bash_syntax.py +178 -0
  160. klaude_code/ui/renderers/common.py +78 -0
  161. klaude_code/ui/renderers/developer.py +104 -48
  162. klaude_code/ui/renderers/diffs.py +87 -6
  163. klaude_code/ui/renderers/errors.py +11 -6
  164. klaude_code/ui/renderers/mermaid_viewer.py +57 -0
  165. klaude_code/ui/renderers/metadata.py +112 -76
  166. klaude_code/ui/renderers/sub_agent.py +92 -7
  167. klaude_code/ui/renderers/thinking.py +40 -18
  168. klaude_code/ui/renderers/tools.py +405 -227
  169. klaude_code/ui/renderers/user_input.py +73 -13
  170. klaude_code/ui/rich/__init__.py +10 -1
  171. klaude_code/ui/rich/cjk_wrap.py +228 -0
  172. klaude_code/ui/rich/code_panel.py +131 -0
  173. klaude_code/ui/rich/live.py +17 -0
  174. klaude_code/ui/rich/markdown.py +305 -170
  175. klaude_code/ui/rich/searchable_text.py +10 -13
  176. klaude_code/ui/rich/status.py +190 -49
  177. klaude_code/ui/rich/theme.py +135 -39
  178. klaude_code/ui/terminal/__init__.py +55 -0
  179. klaude_code/ui/terminal/color.py +1 -1
  180. klaude_code/ui/terminal/control.py +13 -22
  181. klaude_code/ui/terminal/notifier.py +44 -4
  182. klaude_code/ui/terminal/selector.py +658 -0
  183. klaude_code/ui/utils/common.py +0 -18
  184. klaude_code-1.8.0.dist-info/METADATA +377 -0
  185. klaude_code-1.8.0.dist-info/RECORD +219 -0
  186. {klaude_code-1.2.6.dist-info → klaude_code-1.8.0.dist-info}/entry_points.txt +1 -0
  187. klaude_code/command/diff_cmd.py +0 -138
  188. klaude_code/command/prompt-dev-docs-update.md +0 -56
  189. klaude_code/command/prompt-dev-docs.md +0 -46
  190. klaude_code/config/list_model.py +0 -162
  191. klaude_code/core/manager/agent_manager.py +0 -127
  192. klaude_code/core/prompts/prompt-subagent-webfetch.md +0 -46
  193. klaude_code/core/tool/file/multi_edit_tool.md +0 -42
  194. klaude_code/core/tool/file/multi_edit_tool.py +0 -199
  195. klaude_code/core/tool/memory/memory_tool.md +0 -16
  196. klaude_code/core/tool/memory/memory_tool.py +0 -462
  197. klaude_code/llm/openrouter/reasoning_handler.py +0 -209
  198. klaude_code/protocol/sub_agent.py +0 -348
  199. klaude_code/ui/utils/debouncer.py +0 -42
  200. klaude_code-1.2.6.dist-info/METADATA +0 -178
  201. klaude_code-1.2.6.dist-info/RECORD +0 -167
  202. /klaude_code/core/prompts/{prompt-subagent.md → prompt-sub-agent.md} +0 -0
  203. /klaude_code/core/tool/{memory → skill}/__init__.py +0 -0
  204. /klaude_code/core/tool/{memory → skill}/skill_tool.md +0 -0
  205. {klaude_code-1.2.6.dist-info → klaude_code-1.8.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,149 @@
1
+ """Codex LLM client using ChatGPT subscription via OAuth."""
2
+
3
+ import json
4
+ from collections.abc import AsyncGenerator
5
+ from typing import override
6
+
7
+ import httpx
8
+ import openai
9
+ from openai import AsyncOpenAI
10
+ from openai.types.responses.response_create_params import ResponseCreateParamsStreaming
11
+
12
+ from klaude_code.auth.codex.exceptions import CodexNotLoggedInError
13
+ from klaude_code.auth.codex.oauth import CodexOAuth
14
+ from klaude_code.auth.codex.token_manager import CodexTokenManager
15
+ from klaude_code.llm.client import LLMClientABC
16
+ from klaude_code.llm.input_common import apply_config_defaults
17
+ from klaude_code.llm.registry import register
18
+ from klaude_code.llm.responses.client import parse_responses_stream
19
+ from klaude_code.llm.responses.input import convert_history_to_input, convert_tool_schema
20
+ from klaude_code.llm.usage import MetadataTracker
21
+ from klaude_code.protocol import llm_param, model
22
+ from klaude_code.trace import DebugType, log_debug
23
+
24
+
25
+ def build_payload(param: llm_param.LLMCallParameter) -> ResponseCreateParamsStreaming:
26
+ """Build Codex API request parameters."""
27
+ inputs = convert_history_to_input(param.input, param.model)
28
+ tools = convert_tool_schema(param.tools)
29
+
30
+ session_id = param.session_id or ""
31
+
32
+ payload: ResponseCreateParamsStreaming = {
33
+ "model": str(param.model),
34
+ "tool_choice": "auto",
35
+ "parallel_tool_calls": True,
36
+ "include": [
37
+ "reasoning.encrypted_content",
38
+ ],
39
+ "store": False,
40
+ "stream": True,
41
+ "input": inputs,
42
+ "instructions": param.system,
43
+ "tools": tools,
44
+ "prompt_cache_key": session_id,
45
+ # max_output_token and temperature is not supported in Codex API
46
+ }
47
+
48
+ if param.thinking and param.thinking.reasoning_effort:
49
+ payload["reasoning"] = {
50
+ "effort": param.thinking.reasoning_effort,
51
+ "summary": param.thinking.reasoning_summary,
52
+ }
53
+
54
+ if param.verbosity:
55
+ payload["text"] = {"verbosity": param.verbosity}
56
+
57
+ return payload
58
+
59
+
60
+ # Codex API configuration
61
+ CODEX_BASE_URL = "https://chatgpt.com/backend-api/codex"
62
+ CODEX_HEADERS = {
63
+ "originator": "codex_cli_rs",
64
+ # Mocked Codex-style user agent string
65
+ "User-Agent": "codex_cli_rs/0.0.0-klaude",
66
+ "OpenAI-Beta": "responses=experimental",
67
+ }
68
+
69
+
70
+ @register(llm_param.LLMClientProtocol.CODEX)
71
+ class CodexClient(LLMClientABC):
72
+ """LLM client for Codex API using ChatGPT subscription."""
73
+
74
+ def __init__(self, config: llm_param.LLMConfigParameter):
75
+ super().__init__(config)
76
+ self._token_manager = CodexTokenManager()
77
+ self._oauth = CodexOAuth(self._token_manager)
78
+
79
+ if not self._token_manager.is_logged_in():
80
+ raise CodexNotLoggedInError("Codex authentication required. Run 'klaude login codex' first.")
81
+
82
+ self.client = self._create_client()
83
+
84
+ def _create_client(self) -> AsyncOpenAI:
85
+ """Create OpenAI client with Codex configuration."""
86
+ state = self._token_manager.get_state()
87
+ if state is None:
88
+ raise CodexNotLoggedInError("Not logged in to Codex. Run 'klaude login codex' first.")
89
+
90
+ return AsyncOpenAI(
91
+ api_key=state.access_token,
92
+ base_url=CODEX_BASE_URL,
93
+ timeout=httpx.Timeout(300.0, connect=15.0, read=285.0),
94
+ default_headers={
95
+ **CODEX_HEADERS,
96
+ "chatgpt-account-id": state.account_id,
97
+ },
98
+ )
99
+
100
+ def _ensure_valid_token(self) -> None:
101
+ """Ensure token is valid, refresh if needed."""
102
+ state = self._token_manager.get_state()
103
+ if state is None:
104
+ raise CodexNotLoggedInError("Not logged in to Codex. Run 'klaude login codex' first.")
105
+
106
+ if state.is_expired():
107
+ self._oauth.refresh()
108
+ # Recreate client with new token
109
+ self.client = self._create_client()
110
+
111
+ @classmethod
112
+ @override
113
+ def create(cls, config: llm_param.LLMConfigParameter) -> "LLMClientABC":
114
+ return cls(config)
115
+
116
+ @override
117
+ async def call(self, param: llm_param.LLMCallParameter) -> AsyncGenerator[model.ConversationItem]:
118
+ # Ensure token is valid before API call
119
+ self._ensure_valid_token()
120
+
121
+ param = apply_config_defaults(param, self.get_llm_config())
122
+
123
+ metadata_tracker = MetadataTracker(cost_config=self.get_llm_config().cost)
124
+
125
+ payload = build_payload(param)
126
+
127
+ session_id = param.session_id or ""
128
+ extra_headers: dict[str, str] = {}
129
+ if session_id:
130
+ # Must send conversation_id/session_id headers to improve ChatGPT backend prompt cache hit rate.
131
+ extra_headers["conversation_id"] = session_id
132
+ extra_headers["session_id"] = session_id
133
+
134
+ log_debug(
135
+ json.dumps(payload, ensure_ascii=False, default=str),
136
+ style="yellow",
137
+ debug_type=DebugType.LLM_PAYLOAD,
138
+ )
139
+ try:
140
+ stream = await self.client.responses.create(
141
+ **payload,
142
+ extra_headers=extra_headers,
143
+ )
144
+ except (openai.OpenAIError, httpx.HTTPError) as e:
145
+ yield model.StreamErrorItem(error=f"{e.__class__.__name__} {e!s}")
146
+ return
147
+
148
+ async for item in parse_responses_stream(stream, param, metadata_tracker):
149
+ yield item
@@ -0,0 +1,3 @@
1
+ from .client import GoogleClient
2
+
3
+ __all__ = ["GoogleClient"]
@@ -0,0 +1,309 @@
1
+ # pyright: reportUnknownMemberType=false
2
+ # pyright: reportUnknownVariableType=false
3
+ # pyright: reportUnknownArgumentType=false
4
+ # pyright: reportAttributeAccessIssue=false
5
+
6
+ import json
7
+ from collections.abc import AsyncGenerator, AsyncIterator
8
+ from typing import Any, cast, override
9
+ from uuid import uuid4
10
+
11
+ import httpx
12
+ from google.genai import Client
13
+ from google.genai.errors import APIError, ClientError, ServerError
14
+ from google.genai.types import (
15
+ FunctionCallingConfig,
16
+ FunctionCallingConfigMode,
17
+ GenerateContentConfig,
18
+ HttpOptions,
19
+ ThinkingConfig,
20
+ ToolConfig,
21
+ UsageMetadata,
22
+ )
23
+
24
+ from klaude_code.llm.client import LLMClientABC
25
+ from klaude_code.llm.google.input import convert_history_to_contents, convert_tool_schema
26
+ from klaude_code.llm.input_common import apply_config_defaults
27
+ from klaude_code.llm.registry import register
28
+ from klaude_code.llm.usage import MetadataTracker
29
+ from klaude_code.protocol import llm_param, model
30
+ from klaude_code.trace import DebugType, log_debug
31
+
32
+
33
+ def _build_config(param: llm_param.LLMCallParameter) -> GenerateContentConfig:
34
+ tool_list = convert_tool_schema(param.tools)
35
+ tool_config: ToolConfig | None = None
36
+
37
+ if tool_list:
38
+ tool_config = ToolConfig(
39
+ function_calling_config=FunctionCallingConfig(
40
+ mode=FunctionCallingConfigMode.AUTO,
41
+ # Gemini streams tool args; keep this enabled to maximize fidelity.
42
+ stream_function_call_arguments=True,
43
+ )
44
+ )
45
+
46
+ thinking_config: ThinkingConfig | None = None
47
+ if param.thinking and param.thinking.type == "enabled":
48
+ thinking_config = ThinkingConfig(
49
+ include_thoughts=True,
50
+ thinking_budget=param.thinking.budget_tokens,
51
+ )
52
+
53
+ return GenerateContentConfig(
54
+ system_instruction=param.system,
55
+ temperature=param.temperature,
56
+ max_output_tokens=param.max_tokens,
57
+ tools=tool_list or None,
58
+ tool_config=tool_config,
59
+ thinking_config=thinking_config,
60
+ )
61
+
62
+
63
+ def _usage_from_metadata(
64
+ usage: UsageMetadata | None,
65
+ *,
66
+ context_limit: int | None,
67
+ max_tokens: int | None,
68
+ ) -> model.Usage | None:
69
+ if usage is None:
70
+ return None
71
+
72
+ cached = usage.cached_content_token_count or 0
73
+ prompt = usage.prompt_token_count or 0
74
+ response = usage.response_token_count or 0
75
+ thoughts = usage.thoughts_token_count or 0
76
+
77
+ total = usage.total_token_count
78
+ if total is None:
79
+ total = prompt + cached + response + thoughts
80
+
81
+ return model.Usage(
82
+ input_tokens=prompt + cached,
83
+ cached_tokens=cached,
84
+ output_tokens=response + thoughts,
85
+ reasoning_tokens=thoughts,
86
+ context_size=total,
87
+ context_limit=context_limit,
88
+ max_tokens=max_tokens,
89
+ )
90
+
91
+
92
+ def _partial_arg_value(partial: Any) -> Any:
93
+ if getattr(partial, "string_value", None) is not None:
94
+ return partial.string_value
95
+ if getattr(partial, "number_value", None) is not None:
96
+ return partial.number_value
97
+ if getattr(partial, "bool_value", None) is not None:
98
+ return partial.bool_value
99
+ if getattr(partial, "null_value", None) is not None:
100
+ return None
101
+ return None
102
+
103
+
104
+ def _merge_partial_args(dst: dict[str, Any], partial_args: list[Any] | None) -> None:
105
+ if not partial_args:
106
+ return
107
+ for partial in partial_args:
108
+ json_path = getattr(partial, "json_path", None)
109
+ if not isinstance(json_path, str) or not json_path.startswith("$."):
110
+ continue
111
+ key = json_path[2:]
112
+ if not key or any(ch in key for ch in "[]"):
113
+ continue
114
+ dst[key] = _partial_arg_value(partial)
115
+
116
+
117
+ async def parse_google_stream(
118
+ stream: AsyncIterator[Any],
119
+ param: llm_param.LLMCallParameter,
120
+ metadata_tracker: MetadataTracker,
121
+ ) -> AsyncGenerator[model.ConversationItem]:
122
+ response_id: str | None = None
123
+ started = False
124
+
125
+ accumulated_text: list[str] = []
126
+ accumulated_thoughts: list[str] = []
127
+ thought_signature: str | None = None
128
+
129
+ # Track tool calls where args arrive as partial updates.
130
+ partial_args_by_call: dict[str, dict[str, Any]] = {}
131
+ started_tool_calls: dict[str, str] = {} # call_id -> name
132
+ started_tool_items: set[str] = set()
133
+ emitted_tool_items: set[str] = set()
134
+
135
+ last_usage_metadata: UsageMetadata | None = None
136
+
137
+ async for chunk in stream:
138
+ log_debug(
139
+ chunk.model_dump_json(exclude_none=True),
140
+ style="blue",
141
+ debug_type=DebugType.LLM_STREAM,
142
+ )
143
+
144
+ if response_id is None:
145
+ response_id = getattr(chunk, "response_id", None) or uuid4().hex
146
+ assert response_id is not None
147
+ if not started:
148
+ started = True
149
+ yield model.StartItem(response_id=response_id)
150
+
151
+ if getattr(chunk, "usage_metadata", None) is not None:
152
+ last_usage_metadata = chunk.usage_metadata
153
+
154
+ candidates = getattr(chunk, "candidates", None) or []
155
+ candidate0 = candidates[0] if candidates else None
156
+ content = getattr(candidate0, "content", None) if candidate0 else None
157
+ parts = getattr(content, "parts", None) if content else None
158
+ if not parts:
159
+ continue
160
+
161
+ for part in parts:
162
+ if getattr(part, "text", None) is not None:
163
+ metadata_tracker.record_token()
164
+ text = part.text
165
+ if getattr(part, "thought", False) is True:
166
+ accumulated_thoughts.append(text)
167
+ if getattr(part, "thought_signature", None):
168
+ thought_signature = part.thought_signature
169
+ yield model.ReasoningTextDelta(content=text, response_id=response_id)
170
+ else:
171
+ accumulated_text.append(text)
172
+ yield model.AssistantMessageDelta(content=text, response_id=response_id)
173
+
174
+ function_call = getattr(part, "function_call", None)
175
+ if function_call is None:
176
+ continue
177
+
178
+ metadata_tracker.record_token()
179
+ call_id = getattr(function_call, "id", None) or uuid4().hex
180
+ name = getattr(function_call, "name", None) or ""
181
+ started_tool_calls.setdefault(call_id, name)
182
+
183
+ if call_id not in started_tool_items:
184
+ started_tool_items.add(call_id)
185
+ yield model.ToolCallStartItem(response_id=response_id, call_id=call_id, name=name)
186
+
187
+ args_obj = getattr(function_call, "args", None)
188
+ if args_obj is not None:
189
+ emitted_tool_items.add(call_id)
190
+ yield model.ToolCallItem(
191
+ response_id=response_id,
192
+ call_id=call_id,
193
+ name=name,
194
+ arguments=json.dumps(args_obj, ensure_ascii=False),
195
+ )
196
+ continue
197
+
198
+ partial_args = getattr(function_call, "partial_args", None)
199
+ if partial_args is not None:
200
+ acc = partial_args_by_call.setdefault(call_id, {})
201
+ _merge_partial_args(acc, partial_args)
202
+
203
+ will_continue = getattr(function_call, "will_continue", None)
204
+ if will_continue is False and call_id in partial_args_by_call and call_id not in emitted_tool_items:
205
+ emitted_tool_items.add(call_id)
206
+ yield model.ToolCallItem(
207
+ response_id=response_id,
208
+ call_id=call_id,
209
+ name=name,
210
+ arguments=json.dumps(partial_args_by_call[call_id], ensure_ascii=False),
211
+ )
212
+
213
+ # Flush any pending tool calls that never produced args.
214
+ for call_id, name in started_tool_calls.items():
215
+ if call_id in emitted_tool_items:
216
+ continue
217
+ args = partial_args_by_call.get(call_id, {})
218
+ emitted_tool_items.add(call_id)
219
+ yield model.ToolCallItem(
220
+ response_id=response_id,
221
+ call_id=call_id,
222
+ name=name,
223
+ arguments=json.dumps(args, ensure_ascii=False),
224
+ )
225
+
226
+ if accumulated_thoughts:
227
+ metadata_tracker.record_token()
228
+ yield model.ReasoningTextItem(
229
+ content="".join(accumulated_thoughts),
230
+ response_id=response_id,
231
+ model=str(param.model),
232
+ )
233
+ if thought_signature:
234
+ yield model.ReasoningEncryptedItem(
235
+ encrypted_content=thought_signature,
236
+ response_id=response_id,
237
+ model=str(param.model),
238
+ format="google_thought_signature",
239
+ )
240
+
241
+ if accumulated_text:
242
+ metadata_tracker.record_token()
243
+ yield model.AssistantMessageItem(content="".join(accumulated_text), response_id=response_id)
244
+
245
+ usage = _usage_from_metadata(last_usage_metadata, context_limit=param.context_limit, max_tokens=param.max_tokens)
246
+ if usage is not None:
247
+ metadata_tracker.set_usage(usage)
248
+ metadata_tracker.set_model_name(str(param.model))
249
+ metadata_tracker.set_response_id(response_id)
250
+ yield metadata_tracker.finalize()
251
+
252
+
253
+ @register(llm_param.LLMClientProtocol.GOOGLE)
254
+ class GoogleClient(LLMClientABC):
255
+ def __init__(self, config: llm_param.LLMConfigParameter):
256
+ super().__init__(config)
257
+ http_options: HttpOptions | None = None
258
+ if config.base_url:
259
+ # If base_url already contains version path, don't append api_version.
260
+ http_options = HttpOptions(base_url=str(config.base_url), api_version="")
261
+
262
+ self.client = Client(
263
+ api_key=config.api_key,
264
+ http_options=http_options,
265
+ )
266
+
267
+ @classmethod
268
+ @override
269
+ def create(cls, config: llm_param.LLMConfigParameter) -> "LLMClientABC":
270
+ return cls(config)
271
+
272
+ @override
273
+ async def call(self, param: llm_param.LLMCallParameter) -> AsyncGenerator[model.ConversationItem]:
274
+ param = apply_config_defaults(param, self.get_llm_config())
275
+ metadata_tracker = MetadataTracker(cost_config=self.get_llm_config().cost)
276
+
277
+ contents = convert_history_to_contents(param.input, model_name=str(param.model))
278
+ config = _build_config(param)
279
+
280
+ log_debug(
281
+ json.dumps(
282
+ {
283
+ "model": str(param.model),
284
+ "contents": [c.model_dump(exclude_none=True) for c in contents],
285
+ "config": config.model_dump(exclude_none=True),
286
+ },
287
+ ensure_ascii=False,
288
+ ),
289
+ style="yellow",
290
+ debug_type=DebugType.LLM_PAYLOAD,
291
+ )
292
+
293
+ try:
294
+ stream = await self.client.aio.models.generate_content_stream(
295
+ model=str(param.model),
296
+ contents=cast(Any, contents),
297
+ config=config,
298
+ )
299
+ except (APIError, ClientError, ServerError, httpx.HTTPError) as e:
300
+ yield model.StreamErrorItem(error=f"{e.__class__.__name__} {e!s}")
301
+ yield metadata_tracker.finalize()
302
+ return
303
+
304
+ try:
305
+ async for item in parse_google_stream(stream, param=param, metadata_tracker=metadata_tracker):
306
+ yield item
307
+ except (APIError, ClientError, ServerError, httpx.HTTPError) as e:
308
+ yield model.StreamErrorItem(error=f"{e.__class__.__name__} {e!s}")
309
+ yield metadata_tracker.finalize()
@@ -0,0 +1,215 @@
1
+ # pyright: reportReturnType=false
2
+ # pyright: reportArgumentType=false
3
+ # pyright: reportUnknownMemberType=false
4
+ # pyright: reportAttributeAccessIssue=false
5
+
6
+ import json
7
+ from base64 import b64decode
8
+ from binascii import Error as BinasciiError
9
+ from typing import Any
10
+
11
+ from google.genai import types
12
+
13
+ from klaude_code.llm.input_common import AssistantGroup, ToolGroup, UserGroup, merge_reminder_text, parse_message_groups
14
+ from klaude_code.protocol import llm_param, model
15
+
16
+
17
+ def _data_url_to_blob(url: str) -> types.Blob:
18
+ header_and_media = url.split(",", 1)
19
+ if len(header_and_media) != 2:
20
+ raise ValueError("Invalid data URL for image: missing comma separator")
21
+ header, base64_data = header_and_media
22
+ if not header.startswith("data:"):
23
+ raise ValueError("Invalid data URL for image: missing data: prefix")
24
+ if ";base64" not in header:
25
+ raise ValueError("Invalid data URL for image: missing base64 marker")
26
+
27
+ media_type = header[5:].split(";", 1)[0]
28
+ base64_payload = base64_data.strip()
29
+ if base64_payload == "":
30
+ raise ValueError("Inline image data is empty")
31
+
32
+ try:
33
+ decoded = b64decode(base64_payload, validate=True)
34
+ except (BinasciiError, ValueError) as exc:
35
+ raise ValueError("Inline image data is not valid base64") from exc
36
+
37
+ return types.Blob(data=decoded, mime_type=media_type)
38
+
39
+
40
+ def _image_part_to_part(image: model.ImageURLPart) -> types.Part:
41
+ url = image.image_url.url
42
+ if url.startswith("data:"):
43
+ return types.Part(inline_data=_data_url_to_blob(url))
44
+ # Best-effort: Gemini supports file URIs, and may accept public HTTPS URLs.
45
+ return types.Part(file_data=types.FileData(file_uri=url))
46
+
47
+
48
+ def _user_group_to_content(group: UserGroup) -> types.Content:
49
+ parts: list[types.Part] = []
50
+ for text in group.text_parts:
51
+ parts.append(types.Part(text=text + "\n"))
52
+ for image in group.images:
53
+ parts.append(_image_part_to_part(image))
54
+ if not parts:
55
+ parts.append(types.Part(text=""))
56
+ return types.Content(role="user", parts=parts)
57
+
58
+
59
+ def _tool_groups_to_content(groups: list[ToolGroup], model_name: str | None) -> list[types.Content]:
60
+ supports_multimodal_function_response = bool(model_name and "gemini-3" in model_name.lower())
61
+
62
+ response_parts: list[types.Part] = []
63
+ extra_image_contents: list[types.Content] = []
64
+
65
+ for group in groups:
66
+ merged_text = merge_reminder_text(
67
+ group.tool_result.output or "<system-reminder>Tool ran without output or errors</system-reminder>",
68
+ group.reminder_texts,
69
+ )
70
+ has_text = merged_text.strip() != ""
71
+
72
+ images = list(group.tool_result.images or []) + list(group.reminder_images)
73
+ image_parts: list[types.Part] = []
74
+ for image in images:
75
+ try:
76
+ image_parts.append(_image_part_to_part(image))
77
+ except ValueError:
78
+ # Skip invalid data URLs
79
+ continue
80
+
81
+ has_images = len(image_parts) > 0
82
+ response_value = merged_text if has_text else "(see attached image)" if has_images else ""
83
+ response_payload = (
84
+ {"error": response_value} if group.tool_result.status == "error" else {"output": response_value}
85
+ )
86
+
87
+ function_response = types.FunctionResponse(
88
+ id=group.tool_result.call_id,
89
+ name=group.tool_result.tool_name or "",
90
+ response=response_payload,
91
+ parts=image_parts if (has_images and supports_multimodal_function_response) else None,
92
+ )
93
+ response_parts.append(types.Part(function_response=function_response))
94
+
95
+ if has_images and not supports_multimodal_function_response:
96
+ extra_image_contents.append(
97
+ types.Content(role="user", parts=[types.Part(text="Tool result image:"), *image_parts])
98
+ )
99
+
100
+ contents: list[types.Content] = []
101
+ if response_parts:
102
+ contents.append(types.Content(role="user", parts=response_parts))
103
+ contents.extend(extra_image_contents)
104
+ return contents
105
+
106
+
107
+ def _assistant_group_to_content(group: AssistantGroup, model_name: str | None) -> types.Content | None:
108
+ parts: list[types.Part] = []
109
+
110
+ degraded_thinking_texts: list[str] = []
111
+ pending_thought_text: str | None = None
112
+ pending_thought_signature: str | None = None
113
+
114
+ for item in group.reasoning_items:
115
+ match item:
116
+ case model.ReasoningTextItem():
117
+ if not item.content:
118
+ continue
119
+ if model_name is not None and item.model is not None and item.model != model_name:
120
+ degraded_thinking_texts.append(item.content)
121
+ else:
122
+ pending_thought_text = item.content
123
+ case model.ReasoningEncryptedItem():
124
+ if not (
125
+ model_name is not None
126
+ and item.model == model_name
127
+ and item.encrypted_content
128
+ and (item.format or "").startswith("google")
129
+ and pending_thought_text
130
+ ):
131
+ continue
132
+ pending_thought_signature = item.encrypted_content
133
+ parts.append(
134
+ types.Part(
135
+ text=pending_thought_text,
136
+ thought=True,
137
+ thought_signature=pending_thought_signature,
138
+ )
139
+ )
140
+ pending_thought_text = None
141
+ pending_thought_signature = None
142
+
143
+ if pending_thought_text:
144
+ parts.append(
145
+ types.Part(
146
+ text=pending_thought_text,
147
+ thought=True,
148
+ thought_signature=pending_thought_signature,
149
+ )
150
+ )
151
+
152
+ if degraded_thinking_texts:
153
+ parts.insert(0, types.Part(text="<thinking>\n" + "\n".join(degraded_thinking_texts) + "\n</thinking>"))
154
+
155
+ if group.text_content:
156
+ parts.append(types.Part(text=group.text_content))
157
+
158
+ for tc in group.tool_calls:
159
+ args: dict[str, Any]
160
+ if tc.arguments:
161
+ try:
162
+ args = json.loads(tc.arguments)
163
+ except json.JSONDecodeError:
164
+ args = {"_raw": tc.arguments}
165
+ else:
166
+ args = {}
167
+ parts.append(types.Part(function_call=types.FunctionCall(id=tc.call_id, name=tc.name, args=args)))
168
+
169
+ if not parts:
170
+ return None
171
+ return types.Content(role="model", parts=parts)
172
+
173
+
174
+ def convert_history_to_contents(
175
+ history: list[model.ConversationItem],
176
+ model_name: str | None,
177
+ ) -> list[types.Content]:
178
+ contents: list[types.Content] = []
179
+ pending_tool_groups: list[ToolGroup] = []
180
+
181
+ def flush_tool_groups() -> None:
182
+ nonlocal pending_tool_groups
183
+ if pending_tool_groups:
184
+ contents.extend(_tool_groups_to_content(pending_tool_groups, model_name=model_name))
185
+ pending_tool_groups = []
186
+
187
+ for group in parse_message_groups(history):
188
+ match group:
189
+ case UserGroup():
190
+ flush_tool_groups()
191
+ contents.append(_user_group_to_content(group))
192
+ case ToolGroup():
193
+ pending_tool_groups.append(group)
194
+ case AssistantGroup():
195
+ flush_tool_groups()
196
+ content = _assistant_group_to_content(group, model_name=model_name)
197
+ if content is not None:
198
+ contents.append(content)
199
+
200
+ flush_tool_groups()
201
+ return contents
202
+
203
+
204
+ def convert_tool_schema(tools: list[llm_param.ToolSchema] | None) -> list[types.Tool]:
205
+ if tools is None or len(tools) == 0:
206
+ return []
207
+ declarations = [
208
+ types.FunctionDeclaration(
209
+ name=tool.name,
210
+ description=tool.description,
211
+ parameters_json_schema=tool.parameters,
212
+ )
213
+ for tool in tools
214
+ ]
215
+ return [types.Tool(function_declarations=declarations)]