klaude-code 1.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (167) hide show
  1. klaude_code/__init__.py +0 -0
  2. klaude_code/cli/__init__.py +1 -0
  3. klaude_code/cli/main.py +298 -0
  4. klaude_code/cli/runtime.py +331 -0
  5. klaude_code/cli/session_cmd.py +80 -0
  6. klaude_code/command/__init__.py +43 -0
  7. klaude_code/command/clear_cmd.py +20 -0
  8. klaude_code/command/command_abc.py +92 -0
  9. klaude_code/command/diff_cmd.py +138 -0
  10. klaude_code/command/export_cmd.py +86 -0
  11. klaude_code/command/help_cmd.py +51 -0
  12. klaude_code/command/model_cmd.py +43 -0
  13. klaude_code/command/prompt-dev-docs-update.md +56 -0
  14. klaude_code/command/prompt-dev-docs.md +46 -0
  15. klaude_code/command/prompt-init.md +45 -0
  16. klaude_code/command/prompt_command.py +69 -0
  17. klaude_code/command/refresh_cmd.py +43 -0
  18. klaude_code/command/registry.py +110 -0
  19. klaude_code/command/status_cmd.py +111 -0
  20. klaude_code/command/terminal_setup_cmd.py +252 -0
  21. klaude_code/config/__init__.py +11 -0
  22. klaude_code/config/config.py +177 -0
  23. klaude_code/config/list_model.py +162 -0
  24. klaude_code/config/select_model.py +67 -0
  25. klaude_code/const/__init__.py +133 -0
  26. klaude_code/core/__init__.py +0 -0
  27. klaude_code/core/agent.py +165 -0
  28. klaude_code/core/executor.py +485 -0
  29. klaude_code/core/manager/__init__.py +19 -0
  30. klaude_code/core/manager/agent_manager.py +127 -0
  31. klaude_code/core/manager/llm_clients.py +42 -0
  32. klaude_code/core/manager/llm_clients_builder.py +49 -0
  33. klaude_code/core/manager/sub_agent_manager.py +86 -0
  34. klaude_code/core/prompt.py +89 -0
  35. klaude_code/core/prompts/prompt-claude-code.md +98 -0
  36. klaude_code/core/prompts/prompt-codex.md +331 -0
  37. klaude_code/core/prompts/prompt-gemini.md +43 -0
  38. klaude_code/core/prompts/prompt-subagent-explore.md +27 -0
  39. klaude_code/core/prompts/prompt-subagent-oracle.md +23 -0
  40. klaude_code/core/prompts/prompt-subagent-webfetch.md +46 -0
  41. klaude_code/core/prompts/prompt-subagent.md +8 -0
  42. klaude_code/core/reminders.py +445 -0
  43. klaude_code/core/task.py +237 -0
  44. klaude_code/core/tool/__init__.py +75 -0
  45. klaude_code/core/tool/file/__init__.py +0 -0
  46. klaude_code/core/tool/file/apply_patch.py +492 -0
  47. klaude_code/core/tool/file/apply_patch_tool.md +1 -0
  48. klaude_code/core/tool/file/apply_patch_tool.py +204 -0
  49. klaude_code/core/tool/file/edit_tool.md +9 -0
  50. klaude_code/core/tool/file/edit_tool.py +274 -0
  51. klaude_code/core/tool/file/multi_edit_tool.md +42 -0
  52. klaude_code/core/tool/file/multi_edit_tool.py +199 -0
  53. klaude_code/core/tool/file/read_tool.md +14 -0
  54. klaude_code/core/tool/file/read_tool.py +326 -0
  55. klaude_code/core/tool/file/write_tool.md +8 -0
  56. klaude_code/core/tool/file/write_tool.py +146 -0
  57. klaude_code/core/tool/memory/__init__.py +0 -0
  58. klaude_code/core/tool/memory/memory_tool.md +16 -0
  59. klaude_code/core/tool/memory/memory_tool.py +462 -0
  60. klaude_code/core/tool/memory/skill_loader.py +245 -0
  61. klaude_code/core/tool/memory/skill_tool.md +24 -0
  62. klaude_code/core/tool/memory/skill_tool.py +97 -0
  63. klaude_code/core/tool/shell/__init__.py +0 -0
  64. klaude_code/core/tool/shell/bash_tool.md +43 -0
  65. klaude_code/core/tool/shell/bash_tool.py +123 -0
  66. klaude_code/core/tool/shell/command_safety.py +363 -0
  67. klaude_code/core/tool/sub_agent_tool.py +83 -0
  68. klaude_code/core/tool/todo/__init__.py +0 -0
  69. klaude_code/core/tool/todo/todo_write_tool.md +182 -0
  70. klaude_code/core/tool/todo/todo_write_tool.py +121 -0
  71. klaude_code/core/tool/todo/update_plan_tool.md +3 -0
  72. klaude_code/core/tool/todo/update_plan_tool.py +104 -0
  73. klaude_code/core/tool/tool_abc.py +25 -0
  74. klaude_code/core/tool/tool_context.py +106 -0
  75. klaude_code/core/tool/tool_registry.py +78 -0
  76. klaude_code/core/tool/tool_runner.py +252 -0
  77. klaude_code/core/tool/truncation.py +170 -0
  78. klaude_code/core/tool/web/__init__.py +0 -0
  79. klaude_code/core/tool/web/mermaid_tool.md +21 -0
  80. klaude_code/core/tool/web/mermaid_tool.py +76 -0
  81. klaude_code/core/tool/web/web_fetch_tool.md +8 -0
  82. klaude_code/core/tool/web/web_fetch_tool.py +159 -0
  83. klaude_code/core/turn.py +220 -0
  84. klaude_code/llm/__init__.py +21 -0
  85. klaude_code/llm/anthropic/__init__.py +3 -0
  86. klaude_code/llm/anthropic/client.py +221 -0
  87. klaude_code/llm/anthropic/input.py +200 -0
  88. klaude_code/llm/client.py +49 -0
  89. klaude_code/llm/input_common.py +239 -0
  90. klaude_code/llm/openai_compatible/__init__.py +3 -0
  91. klaude_code/llm/openai_compatible/client.py +211 -0
  92. klaude_code/llm/openai_compatible/input.py +109 -0
  93. klaude_code/llm/openai_compatible/tool_call_accumulator.py +80 -0
  94. klaude_code/llm/openrouter/__init__.py +3 -0
  95. klaude_code/llm/openrouter/client.py +200 -0
  96. klaude_code/llm/openrouter/input.py +160 -0
  97. klaude_code/llm/openrouter/reasoning_handler.py +209 -0
  98. klaude_code/llm/registry.py +22 -0
  99. klaude_code/llm/responses/__init__.py +3 -0
  100. klaude_code/llm/responses/client.py +216 -0
  101. klaude_code/llm/responses/input.py +167 -0
  102. klaude_code/llm/usage.py +109 -0
  103. klaude_code/protocol/__init__.py +4 -0
  104. klaude_code/protocol/commands.py +21 -0
  105. klaude_code/protocol/events.py +163 -0
  106. klaude_code/protocol/llm_param.py +147 -0
  107. klaude_code/protocol/model.py +287 -0
  108. klaude_code/protocol/op.py +89 -0
  109. klaude_code/protocol/op_handler.py +28 -0
  110. klaude_code/protocol/sub_agent.py +348 -0
  111. klaude_code/protocol/tools.py +15 -0
  112. klaude_code/session/__init__.py +4 -0
  113. klaude_code/session/export.py +624 -0
  114. klaude_code/session/selector.py +76 -0
  115. klaude_code/session/session.py +474 -0
  116. klaude_code/session/templates/export_session.html +1434 -0
  117. klaude_code/trace/__init__.py +3 -0
  118. klaude_code/trace/log.py +168 -0
  119. klaude_code/ui/__init__.py +91 -0
  120. klaude_code/ui/core/__init__.py +1 -0
  121. klaude_code/ui/core/display.py +103 -0
  122. klaude_code/ui/core/input.py +71 -0
  123. klaude_code/ui/core/stage_manager.py +55 -0
  124. klaude_code/ui/modes/__init__.py +1 -0
  125. klaude_code/ui/modes/debug/__init__.py +1 -0
  126. klaude_code/ui/modes/debug/display.py +36 -0
  127. klaude_code/ui/modes/exec/__init__.py +1 -0
  128. klaude_code/ui/modes/exec/display.py +63 -0
  129. klaude_code/ui/modes/repl/__init__.py +51 -0
  130. klaude_code/ui/modes/repl/clipboard.py +152 -0
  131. klaude_code/ui/modes/repl/completers.py +429 -0
  132. klaude_code/ui/modes/repl/display.py +60 -0
  133. klaude_code/ui/modes/repl/event_handler.py +375 -0
  134. klaude_code/ui/modes/repl/input_prompt_toolkit.py +198 -0
  135. klaude_code/ui/modes/repl/key_bindings.py +170 -0
  136. klaude_code/ui/modes/repl/renderer.py +281 -0
  137. klaude_code/ui/renderers/__init__.py +0 -0
  138. klaude_code/ui/renderers/assistant.py +21 -0
  139. klaude_code/ui/renderers/common.py +8 -0
  140. klaude_code/ui/renderers/developer.py +158 -0
  141. klaude_code/ui/renderers/diffs.py +215 -0
  142. klaude_code/ui/renderers/errors.py +16 -0
  143. klaude_code/ui/renderers/metadata.py +190 -0
  144. klaude_code/ui/renderers/sub_agent.py +71 -0
  145. klaude_code/ui/renderers/thinking.py +39 -0
  146. klaude_code/ui/renderers/tools.py +551 -0
  147. klaude_code/ui/renderers/user_input.py +65 -0
  148. klaude_code/ui/rich/__init__.py +1 -0
  149. klaude_code/ui/rich/live.py +65 -0
  150. klaude_code/ui/rich/markdown.py +308 -0
  151. klaude_code/ui/rich/quote.py +34 -0
  152. klaude_code/ui/rich/searchable_text.py +71 -0
  153. klaude_code/ui/rich/status.py +240 -0
  154. klaude_code/ui/rich/theme.py +274 -0
  155. klaude_code/ui/terminal/__init__.py +1 -0
  156. klaude_code/ui/terminal/color.py +244 -0
  157. klaude_code/ui/terminal/control.py +147 -0
  158. klaude_code/ui/terminal/notifier.py +107 -0
  159. klaude_code/ui/terminal/progress_bar.py +87 -0
  160. klaude_code/ui/utils/__init__.py +1 -0
  161. klaude_code/ui/utils/common.py +108 -0
  162. klaude_code/ui/utils/debouncer.py +42 -0
  163. klaude_code/version.py +163 -0
  164. klaude_code-1.2.6.dist-info/METADATA +178 -0
  165. klaude_code-1.2.6.dist-info/RECORD +167 -0
  166. klaude_code-1.2.6.dist-info/WHEEL +4 -0
  167. klaude_code-1.2.6.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,239 @@
1
+ """Common utilities for converting conversation history to LLM input formats.
2
+
3
+ This module provides shared abstractions for providers that require message grouping
4
+ (Anthropic, OpenAI-compatible, OpenRouter). The Responses API doesn't need this
5
+ since it uses a flat item list matching our internal protocol.
6
+ """
7
+
8
+ from collections.abc import Iterator
9
+ from dataclasses import dataclass, field
10
+ from enum import Enum
11
+ from typing import TYPE_CHECKING, Iterable
12
+
13
+ from klaude_code import const
14
+
15
+ if TYPE_CHECKING:
16
+ from klaude_code.protocol.llm_param import LLMCallParameter, LLMConfigParameter
17
+
18
+ from klaude_code.protocol import model
19
+
20
+
21
+ class GroupKind(Enum):
22
+ ASSISTANT = "assistant"
23
+ USER = "user"
24
+ TOOL = "tool"
25
+ DEVELOPER = "developer"
26
+ OTHER = "other"
27
+
28
+
29
+ @dataclass
30
+ class UserGroup:
31
+ """Aggregated user message group (UserMessageItem + DeveloperMessageItem)."""
32
+
33
+ text_parts: list[str] = field(default_factory=lambda: [])
34
+ images: list[model.ImageURLPart] = field(default_factory=lambda: [])
35
+
36
+
37
+ @dataclass
38
+ class ToolGroup:
39
+ """Aggregated tool result group (ToolResultItem + trailing DeveloperMessageItems)."""
40
+
41
+ tool_result: model.ToolResultItem
42
+ reminder_texts: list[str] = field(default_factory=lambda: [])
43
+ reminder_images: list[model.ImageURLPart] = field(default_factory=lambda: [])
44
+
45
+
46
+ @dataclass
47
+ class AssistantGroup:
48
+ """Aggregated assistant message group."""
49
+
50
+ text_content: str | None = None
51
+ tool_calls: list[model.ToolCallItem] = field(default_factory=lambda: [])
52
+ reasoning_text: list[model.ReasoningTextItem] = field(default_factory=lambda: [])
53
+ reasoning_encrypted: list[model.ReasoningEncryptedItem] = field(default_factory=lambda: [])
54
+ # Preserve original ordering of reasoning items for providers that
55
+ # need to emit them as an ordered stream (e.g. OpenRouter).
56
+ reasoning_items: list[model.ReasoningTextItem | model.ReasoningEncryptedItem] = field(default_factory=lambda: [])
57
+
58
+
59
+ MessageGroup = UserGroup | ToolGroup | AssistantGroup
60
+
61
+
62
+ def _kind_of(item: model.ConversationItem) -> GroupKind:
63
+ if isinstance(
64
+ item,
65
+ (model.ReasoningTextItem, model.ReasoningEncryptedItem, model.AssistantMessageItem, model.ToolCallItem),
66
+ ):
67
+ return GroupKind.ASSISTANT
68
+ if isinstance(item, model.UserMessageItem):
69
+ return GroupKind.USER
70
+ if isinstance(item, model.ToolResultItem):
71
+ return GroupKind.TOOL
72
+ if isinstance(item, model.DeveloperMessageItem):
73
+ return GroupKind.DEVELOPER
74
+ return GroupKind.OTHER
75
+
76
+
77
+ def group_response_items_gen(
78
+ items: Iterable[model.ConversationItem],
79
+ ) -> Iterator[tuple[GroupKind, list[model.ConversationItem]]]:
80
+ """Group response items into sublists with predictable attachment rules.
81
+
82
+ - Consecutive assistant-side items (ReasoningTextItem | ReasoningEncryptedItem |
83
+ AssistantMessageItem | ToolCallItem) group together.
84
+ - Consecutive UserMessage group together.
85
+ - Each ToolMessage (ToolResultItem) is a single group, but allow following
86
+ DeveloperMessage to attach to it.
87
+ - DeveloperMessage only attaches to the previous UserMessage/ToolMessage group.
88
+ """
89
+ buffer: list[model.ConversationItem] = []
90
+ buffer_kind: GroupKind | None = None
91
+
92
+ def flush() -> Iterator[tuple[GroupKind, list[model.ConversationItem]]]:
93
+ """Yield current group and reset buffer state."""
94
+
95
+ nonlocal buffer, buffer_kind
96
+ if buffer_kind is not None and buffer:
97
+ yield (buffer_kind, buffer)
98
+ buffer = []
99
+ buffer_kind = None
100
+
101
+ for item in items:
102
+ item_kind = _kind_of(item)
103
+ if item_kind == GroupKind.OTHER:
104
+ continue
105
+
106
+ # Developer messages only attach to existing user/tool group.
107
+ if item_kind == GroupKind.DEVELOPER:
108
+ if buffer_kind in (GroupKind.USER, GroupKind.TOOL):
109
+ buffer.append(item)
110
+ continue
111
+
112
+ # Start a new group when there is no active buffer yet.
113
+ if buffer_kind is None:
114
+ buffer_kind = GroupKind.TOOL if item_kind == GroupKind.TOOL else item_kind
115
+ buffer = [item]
116
+ continue
117
+
118
+ # Tool messages always form a standalone group.
119
+ if item_kind == GroupKind.TOOL:
120
+ yield from flush()
121
+ buffer_kind = GroupKind.TOOL
122
+ buffer = [item]
123
+ continue
124
+
125
+ # Same non-tool kind: extend current group.
126
+ if item_kind == buffer_kind:
127
+ buffer.append(item)
128
+ continue
129
+
130
+ # Different non-tool kind: close previous group and start a new one.
131
+ yield from flush()
132
+ buffer_kind = item_kind
133
+ buffer = [item]
134
+
135
+ if buffer_kind is not None and buffer:
136
+ yield (buffer_kind, buffer)
137
+
138
+
139
+ def parse_message_groups(history: list[model.ConversationItem]) -> list[MessageGroup]:
140
+ """Parse conversation history into aggregated message groups.
141
+
142
+ This is the shared grouping logic for Anthropic, OpenAI-compatible, and OpenRouter.
143
+ Each provider then converts these groups to their specific API format.
144
+ """
145
+ groups: list[MessageGroup] = []
146
+
147
+ for kind, items in group_response_items_gen(history):
148
+ match kind:
149
+ case GroupKind.OTHER:
150
+ continue
151
+ case GroupKind.USER:
152
+ group = UserGroup()
153
+ for item in items:
154
+ if isinstance(item, (model.UserMessageItem, model.DeveloperMessageItem)):
155
+ if item.content:
156
+ group.text_parts.append(item.content)
157
+ if item.images:
158
+ group.images.extend(item.images)
159
+ groups.append(group)
160
+
161
+ case GroupKind.TOOL:
162
+ if not items or not isinstance(items[0], model.ToolResultItem):
163
+ continue
164
+ tool_result = items[0]
165
+ group = ToolGroup(tool_result=tool_result)
166
+ for item in items[1:]:
167
+ if isinstance(item, model.DeveloperMessageItem):
168
+ if item.content:
169
+ group.reminder_texts.append(item.content)
170
+ if item.images:
171
+ group.reminder_images.extend(item.images)
172
+ groups.append(group)
173
+
174
+ case GroupKind.ASSISTANT:
175
+ group = AssistantGroup()
176
+ for item in items:
177
+ match item:
178
+ case model.AssistantMessageItem():
179
+ if item.content:
180
+ if group.text_content is None:
181
+ group.text_content = item.content
182
+ else:
183
+ group.text_content += item.content
184
+ case model.ToolCallItem():
185
+ group.tool_calls.append(item)
186
+ case model.ReasoningTextItem():
187
+ group.reasoning_text.append(item)
188
+ group.reasoning_items.append(item)
189
+ case model.ReasoningEncryptedItem():
190
+ group.reasoning_encrypted.append(item)
191
+ group.reasoning_items.append(item)
192
+ case _:
193
+ pass
194
+ groups.append(group)
195
+
196
+ case GroupKind.DEVELOPER:
197
+ pass
198
+
199
+ return groups
200
+
201
+
202
+ def merge_reminder_text(tool_output: str | None, reminder_texts: list[str]) -> str:
203
+ """Merge tool output with reminder texts."""
204
+ base = tool_output or ""
205
+ if reminder_texts:
206
+ base += "\n" + "\n".join(reminder_texts)
207
+ return base
208
+
209
+
210
+ def apply_config_defaults(param: "LLMCallParameter", config: "LLMConfigParameter") -> "LLMCallParameter":
211
+ """Apply config defaults to LLM call parameters."""
212
+ if param.model is None:
213
+ param.model = config.model
214
+ if param.temperature is None:
215
+ param.temperature = config.temperature
216
+ if param.max_tokens is None:
217
+ param.max_tokens = config.max_tokens
218
+ if param.context_limit is None:
219
+ param.context_limit = config.context_limit
220
+ if param.verbosity is None:
221
+ param.verbosity = config.verbosity
222
+ if param.thinking is None:
223
+ param.thinking = config.thinking
224
+ if param.provider_routing is None:
225
+ param.provider_routing = config.provider_routing
226
+
227
+ if param.model is None:
228
+ raise ValueError("Model is required")
229
+ if param.max_tokens is None:
230
+ param.max_tokens = const.DEFAULT_MAX_TOKENS
231
+ if param.temperature is None:
232
+ param.temperature = const.DEFAULT_TEMPERATURE
233
+ if param.thinking is not None and param.thinking.type == "enabled" and param.thinking.budget_tokens is None:
234
+ param.thinking.budget_tokens = const.DEFAULT_ANTHROPIC_THINKING_BUDGET_TOKENS
235
+
236
+ if param.model and "gpt-5" in param.model:
237
+ param.temperature = 1.0 # Required for GPT-5
238
+
239
+ return param
@@ -0,0 +1,3 @@
1
+ from .client import OpenAICompatibleClient
2
+
3
+ __all__ = ["OpenAICompatibleClient"]
@@ -0,0 +1,211 @@
1
+ import json
2
+ from collections.abc import AsyncGenerator
3
+ from typing import Literal, override
4
+
5
+ import httpx
6
+ import openai
7
+ from openai import APIError, RateLimitError
8
+
9
+ from klaude_code.llm.client import LLMClientABC, call_with_logged_payload
10
+ from klaude_code.llm.input_common import apply_config_defaults
11
+ from klaude_code.llm.openai_compatible.input import convert_history_to_input, convert_tool_schema
12
+ from klaude_code.llm.openai_compatible.tool_call_accumulator import BasicToolCallAccumulator, ToolCallAccumulatorABC
13
+ from klaude_code.llm.registry import register
14
+ from klaude_code.llm.usage import MetadataTracker, convert_usage
15
+ from klaude_code.protocol import llm_param, model
16
+ from klaude_code.trace import DebugType, log_debug
17
+
18
+
19
+ @register(llm_param.LLMClientProtocol.OPENAI)
20
+ class OpenAICompatibleClient(LLMClientABC):
21
+ def __init__(self, config: llm_param.LLMConfigParameter):
22
+ super().__init__(config)
23
+ if config.is_azure:
24
+ if not config.base_url:
25
+ raise ValueError("Azure endpoint is required")
26
+ client = openai.AsyncAzureOpenAI(
27
+ api_key=config.api_key,
28
+ azure_endpoint=str(config.base_url),
29
+ api_version=config.azure_api_version,
30
+ timeout=httpx.Timeout(300.0, connect=15.0, read=285.0),
31
+ )
32
+ else:
33
+ client = openai.AsyncOpenAI(
34
+ api_key=config.api_key,
35
+ base_url=config.base_url,
36
+ timeout=httpx.Timeout(300.0, connect=15.0, read=285.0),
37
+ )
38
+ self.client: openai.AsyncAzureOpenAI | openai.AsyncOpenAI = client
39
+
40
+ @classmethod
41
+ @override
42
+ def create(cls, config: llm_param.LLMConfigParameter) -> "LLMClientABC":
43
+ return cls(config)
44
+
45
+ @override
46
+ async def call(self, param: llm_param.LLMCallParameter) -> AsyncGenerator[model.ConversationItem, None]:
47
+ param = apply_config_defaults(param, self.get_llm_config())
48
+ messages = convert_history_to_input(param.input, param.system, param.model)
49
+ tools = convert_tool_schema(param.tools)
50
+
51
+ metadata_tracker = MetadataTracker(cost_config=self._config.cost)
52
+
53
+ extra_body = {}
54
+ extra_headers = {"extra": json.dumps({"session_id": param.session_id})}
55
+
56
+ if param.thinking:
57
+ extra_body["thinking"] = {
58
+ "type": param.thinking.type,
59
+ "budget": param.thinking.budget_tokens,
60
+ }
61
+ stream = call_with_logged_payload(
62
+ self.client.chat.completions.create,
63
+ model=str(param.model),
64
+ tool_choice="auto",
65
+ parallel_tool_calls=True,
66
+ stream=True,
67
+ messages=messages,
68
+ temperature=param.temperature,
69
+ max_tokens=param.max_tokens,
70
+ tools=tools,
71
+ reasoning_effort=param.thinking.reasoning_effort if param.thinking else None,
72
+ verbosity=param.verbosity,
73
+ extra_body=extra_body, # pyright: ignore[reportUnknownArgumentType]
74
+ extra_headers=extra_headers,
75
+ )
76
+
77
+ stage: Literal["waiting", "reasoning", "assistant", "tool", "done"] = "waiting"
78
+ accumulated_reasoning: list[str] = []
79
+ accumulated_content: list[str] = []
80
+ accumulated_tool_calls: ToolCallAccumulatorABC = BasicToolCallAccumulator()
81
+ emitted_tool_start_indices: set[int] = set()
82
+ response_id: str | None = None
83
+
84
+ def flush_reasoning_items() -> list[model.ConversationItem]:
85
+ nonlocal accumulated_reasoning
86
+ if not accumulated_reasoning:
87
+ return []
88
+ item = model.ReasoningTextItem(
89
+ content="".join(accumulated_reasoning),
90
+ response_id=response_id,
91
+ model=str(param.model),
92
+ )
93
+ accumulated_reasoning = []
94
+ return [item]
95
+
96
+ def flush_assistant_items() -> list[model.ConversationItem]:
97
+ nonlocal accumulated_content
98
+ if len(accumulated_content) == 0:
99
+ return []
100
+ item = model.AssistantMessageItem(
101
+ content="".join(accumulated_content),
102
+ response_id=response_id,
103
+ )
104
+ accumulated_content = []
105
+ return [item]
106
+
107
+ def flush_tool_call_items() -> list[model.ToolCallItem]:
108
+ nonlocal accumulated_tool_calls
109
+ items: list[model.ToolCallItem] = accumulated_tool_calls.get()
110
+ if items:
111
+ accumulated_tool_calls.chunks_by_step = [] # pyright: ignore[reportAttributeAccessIssue]
112
+ return items
113
+
114
+ try:
115
+ async for event in await stream:
116
+ log_debug(
117
+ event.model_dump_json(exclude_none=True),
118
+ style="blue",
119
+ debug_type=DebugType.LLM_STREAM,
120
+ )
121
+ if not response_id and event.id:
122
+ response_id = event.id
123
+ accumulated_tool_calls.response_id = response_id
124
+ yield model.StartItem(response_id=response_id)
125
+ if (
126
+ event.usage is not None and event.usage.completion_tokens is not None # pyright: ignore[reportUnnecessaryComparison] gcp gemini will return None usage field
127
+ ):
128
+ metadata_tracker.set_usage(convert_usage(event.usage, param.context_limit))
129
+ if event.model:
130
+ metadata_tracker.set_model_name(event.model)
131
+ if provider := getattr(event, "provider", None):
132
+ metadata_tracker.set_provider(str(provider))
133
+
134
+ if len(event.choices) == 0:
135
+ continue
136
+ delta = event.choices[0].delta
137
+
138
+ # Support Kimi K2's usage field in choice
139
+ if hasattr(event.choices[0], "usage") and getattr(event.choices[0], "usage"):
140
+ metadata_tracker.set_usage(
141
+ convert_usage(
142
+ openai.types.CompletionUsage.model_validate(getattr(event.choices[0], "usage")),
143
+ param.context_limit,
144
+ )
145
+ )
146
+
147
+ # Reasoning
148
+ reasoning_content = ""
149
+ if hasattr(delta, "reasoning") and getattr(delta, "reasoning"):
150
+ reasoning_content = getattr(delta, "reasoning")
151
+ if hasattr(delta, "reasoning_content") and getattr(delta, "reasoning_content"):
152
+ reasoning_content = getattr(delta, "reasoning_content")
153
+ if reasoning_content:
154
+ metadata_tracker.record_token()
155
+ stage = "reasoning"
156
+ accumulated_reasoning.append(reasoning_content)
157
+
158
+ # Assistant
159
+ if delta.content and (
160
+ stage == "assistant" or delta.content.strip()
161
+ ): # Process all content in assistant stage, filter empty content in reasoning stage
162
+ metadata_tracker.record_token()
163
+ if stage == "reasoning":
164
+ for item in flush_reasoning_items():
165
+ yield item
166
+ elif stage == "tool":
167
+ for item in flush_tool_call_items():
168
+ yield item
169
+ stage = "assistant"
170
+ accumulated_content.append(delta.content)
171
+ yield model.AssistantMessageDelta(
172
+ content=delta.content,
173
+ response_id=response_id,
174
+ )
175
+
176
+ # Tool
177
+ if delta.tool_calls and len(delta.tool_calls) > 0:
178
+ metadata_tracker.record_token()
179
+ if stage == "reasoning":
180
+ for item in flush_reasoning_items():
181
+ yield item
182
+ elif stage == "assistant":
183
+ for item in flush_assistant_items():
184
+ yield item
185
+ stage = "tool"
186
+ # Emit ToolCallStartItem for new tool calls
187
+ for tc in delta.tool_calls:
188
+ if tc.index not in emitted_tool_start_indices and tc.function and tc.function.name:
189
+ emitted_tool_start_indices.add(tc.index)
190
+ yield model.ToolCallStartItem(
191
+ response_id=response_id,
192
+ call_id=tc.id or "",
193
+ name=tc.function.name,
194
+ )
195
+ accumulated_tool_calls.add(delta.tool_calls)
196
+ except (RateLimitError, APIError) as e:
197
+ yield model.StreamErrorItem(error=f"{e.__class__.__name__} {str(e)}")
198
+
199
+ # Finalize
200
+ for item in flush_reasoning_items():
201
+ yield item
202
+
203
+ for item in flush_assistant_items():
204
+ yield item
205
+
206
+ if stage == "tool":
207
+ for tool_call_item in flush_tool_call_items():
208
+ yield tool_call_item
209
+
210
+ metadata_tracker.set_response_id(response_id)
211
+ yield metadata_tracker.finalize()
@@ -0,0 +1,109 @@
1
+ # pyright: reportReturnType=false
2
+ # pyright: reportArgumentType=false
3
+ # pyright: reportUnknownMemberType=false
4
+ # pyright: reportAttributeAccessIssue=false
5
+
6
+ from openai.types import chat
7
+ from openai.types.chat import ChatCompletionContentPartParam
8
+
9
+ from klaude_code.llm.input_common import AssistantGroup, ToolGroup, UserGroup, merge_reminder_text, parse_message_groups
10
+ from klaude_code.protocol import llm_param, model
11
+
12
+
13
+ def _user_group_to_message(group: UserGroup) -> chat.ChatCompletionMessageParam:
14
+ parts: list[ChatCompletionContentPartParam] = []
15
+ for text in group.text_parts:
16
+ parts.append({"type": "text", "text": text + "\n"})
17
+ for image in group.images:
18
+ parts.append({"type": "image_url", "image_url": {"url": image.image_url.url}})
19
+ if not parts:
20
+ parts.append({"type": "text", "text": ""})
21
+ return {"role": "user", "content": parts}
22
+
23
+
24
+ def _tool_group_to_message(group: ToolGroup) -> chat.ChatCompletionMessageParam:
25
+ merged_text = merge_reminder_text(
26
+ group.tool_result.output or "<system-reminder>Tool ran without output or errors</system-reminder>",
27
+ group.reminder_texts,
28
+ )
29
+ return {
30
+ "role": "tool",
31
+ "content": [{"type": "text", "text": merged_text}],
32
+ "tool_call_id": group.tool_result.call_id,
33
+ }
34
+
35
+
36
+ def _assistant_group_to_message(
37
+ group: AssistantGroup,
38
+ ) -> chat.ChatCompletionMessageParam:
39
+ assistant_message: dict[str, object] = {"role": "assistant"}
40
+
41
+ if group.text_content:
42
+ assistant_message["content"] = group.text_content
43
+
44
+ if group.tool_calls:
45
+ assistant_message["tool_calls"] = [
46
+ {
47
+ "id": tc.call_id,
48
+ "type": "function",
49
+ "function": {
50
+ "name": tc.name,
51
+ "arguments": tc.arguments,
52
+ },
53
+ }
54
+ for tc in group.tool_calls
55
+ ]
56
+
57
+ return assistant_message
58
+
59
+
60
+ def build_user_content_parts(
61
+ images: list[model.ImageURLPart],
62
+ ) -> list[ChatCompletionContentPartParam]:
63
+ """Build content parts for images only. Used by OpenRouter."""
64
+ return [{"type": "image_url", "image_url": {"url": image.image_url.url}} for image in images]
65
+
66
+
67
+ def convert_history_to_input(
68
+ history: list[model.ConversationItem],
69
+ system: str | None = None,
70
+ model_name: str | None = None,
71
+ ) -> list[chat.ChatCompletionMessageParam]:
72
+ """
73
+ Convert a list of conversation items to a list of chat completion message params.
74
+
75
+ Args:
76
+ history: List of conversation items.
77
+ system: System message.
78
+ model_name: Model name. Not used in OpenAI-compatible, kept for API consistency.
79
+ """
80
+ messages: list[chat.ChatCompletionMessageParam] = [{"role": "system", "content": system}] if system else []
81
+
82
+ for group in parse_message_groups(history):
83
+ match group:
84
+ case UserGroup():
85
+ messages.append(_user_group_to_message(group))
86
+ case ToolGroup():
87
+ messages.append(_tool_group_to_message(group))
88
+ case AssistantGroup():
89
+ messages.append(_assistant_group_to_message(group))
90
+
91
+ return messages
92
+
93
+
94
+ def convert_tool_schema(
95
+ tools: list[llm_param.ToolSchema] | None,
96
+ ) -> list[chat.ChatCompletionToolParam]:
97
+ if tools is None:
98
+ return []
99
+ return [
100
+ {
101
+ "type": "function",
102
+ "function": {
103
+ "name": tool.name,
104
+ "description": tool.description,
105
+ "parameters": tool.parameters,
106
+ },
107
+ }
108
+ for tool in tools
109
+ ]
@@ -0,0 +1,80 @@
1
+ from abc import ABC, abstractmethod
2
+
3
+ from openai.types.chat.chat_completion_chunk import ChoiceDeltaToolCall
4
+ from pydantic import BaseModel, Field
5
+
6
+ from klaude_code.protocol import model
7
+
8
+
9
+ class ToolCallAccumulatorABC(ABC):
10
+ @abstractmethod
11
+ def add(self, chunks: list[ChoiceDeltaToolCall]) -> None:
12
+ pass
13
+
14
+ @abstractmethod
15
+ def get(self) -> list[model.ToolCallItem]:
16
+ pass
17
+
18
+
19
+ class BasicToolCallAccumulator(ToolCallAccumulatorABC, BaseModel):
20
+ """
21
+ Support for API:
22
+ - returns multiple tool calls within a single response in a serial manner.
23
+ - Each step contains exactly one chunk.
24
+
25
+ e.g.:
26
+ Claude, GPT series
27
+ The sequence for each tool call follows this pattern:
28
+ - Initial chunk: contains tool call id and function name
29
+ - Subsequent chunks: contain argument fragments with id and name set to None
30
+ - Every chunk has a valid index
31
+ - Pattern repeats for the next tool call
32
+ [ChoiceDeltaToolCall(index=0, id='toolu_vrtx_01QxTq6QeJZd9tTLt6pvtSy6', function=ChoiceDeltaToolCallFunction(arguments='', name='Bash'), type='function')]
33
+ [ChoiceDeltaToolCall(index=0, id=None, function=ChoiceDeltaToolCallFunction(arguments='', name=None), type='function')]
34
+ [ChoiceDeltaToolCall(index=0, id=None, function=ChoiceDeltaToolCallFunction(arguments='{"comm', name=None), type='function')]
35
+ [ChoiceDeltaToolCall(index=0, id=None, function=ChoiceDeltaToolCallFunction(arguments='an', name=None), type='function')]
36
+ [ChoiceDeltaToolCall(index=0, id=None, function=ChoiceDeltaToolCallFunction(arguments='d": "', name=None), type='function')]
37
+ [ChoiceDeltaToolCall(index=0, id=None, function=ChoiceDeltaToolCallFunction(arguments='pwd"}', name=None), type='function')]
38
+ [ChoiceDeltaToolCall(index=1, id='toolu_vrtx_01Uvxge2edYAZBnNLoYGeDBg', function=ChoiceDeltaToolCallFunction(arguments='', name='Bash'), type='function')]
39
+ [ChoiceDeltaToolCall(index=1, id=None, function=ChoiceDeltaToolCallFunction(arguments='', name=None), type='function')]
40
+ [ChoiceDeltaToolCall(index=1, id=None, function=ChoiceDeltaToolCallFunction(arguments='{"com', name=None), type='function')]
41
+ [ChoiceDeltaToolCall(index=1, id=None, function=ChoiceDeltaToolCallFunction(arguments='mand":', name=None), type='function')]
42
+ [ChoiceDeltaToolCall(index=1, id=None, function=ChoiceDeltaToolCallFunction(arguments=' "ls"}', name=None), type='function')]
43
+
44
+ Grok, Gemini
45
+ Each step is one completed tool call
46
+ [ChoiceDeltaToolCall(index=0, id='call_83297568', function=ChoiceDeltaToolCallFunction(arguments='{"command":"pwd"}', name='Bash'), type='function')]
47
+ [ChoiceDeltaToolCall(index=1, id='call_88931225', function=ChoiceDeltaToolCallFunction(arguments='{"command":"ls"}', name='Bash'), type='function')]
48
+ """
49
+
50
+ chunks_by_step: list[list[ChoiceDeltaToolCall]] = Field(default_factory=list) # pyright: ignore[reportUnknownVariableType]
51
+ response_id: str | None = None
52
+
53
+ def add(self, chunks: list[ChoiceDeltaToolCall]) -> None:
54
+ self.chunks_by_step.append(chunks)
55
+
56
+ def get(self) -> list[model.ToolCallItem]:
57
+ result: list[model.ToolCallItem] = []
58
+ current_index = -1
59
+ for current_step in self.chunks_by_step:
60
+ if len(current_step) == 0:
61
+ continue
62
+ first_chunk = current_step[0]
63
+ if first_chunk.index != current_index:
64
+ current_index = first_chunk.index
65
+ result.append(
66
+ model.ToolCallItem(
67
+ id=first_chunk.id,
68
+ name="",
69
+ arguments="",
70
+ call_id=first_chunk.id or "",
71
+ response_id=self.response_id,
72
+ )
73
+ )
74
+ if first_chunk.function is None:
75
+ continue
76
+ if first_chunk.function.name:
77
+ result[-1].name = first_chunk.function.name
78
+ if first_chunk.function.arguments:
79
+ result[-1].arguments += first_chunk.function.arguments
80
+ return result
@@ -0,0 +1,3 @@
1
+ from .client import OpenRouterClient
2
+
3
+ __all__ = ["OpenRouterClient"]