klaude-code 1.9.0__py3-none-any.whl → 2.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (132) hide show
  1. klaude_code/auth/base.py +2 -6
  2. klaude_code/cli/auth_cmd.py +4 -4
  3. klaude_code/cli/cost_cmd.py +1 -1
  4. klaude_code/cli/list_model.py +1 -1
  5. klaude_code/cli/main.py +1 -1
  6. klaude_code/cli/runtime.py +7 -5
  7. klaude_code/cli/self_update.py +1 -1
  8. klaude_code/cli/session_cmd.py +1 -1
  9. klaude_code/command/clear_cmd.py +6 -2
  10. klaude_code/command/command_abc.py +2 -2
  11. klaude_code/command/debug_cmd.py +4 -4
  12. klaude_code/command/export_cmd.py +2 -2
  13. klaude_code/command/export_online_cmd.py +12 -12
  14. klaude_code/command/fork_session_cmd.py +29 -23
  15. klaude_code/command/help_cmd.py +4 -4
  16. klaude_code/command/model_cmd.py +4 -4
  17. klaude_code/command/model_select.py +1 -1
  18. klaude_code/command/prompt-commit.md +11 -2
  19. klaude_code/command/prompt_command.py +3 -3
  20. klaude_code/command/refresh_cmd.py +2 -2
  21. klaude_code/command/registry.py +7 -5
  22. klaude_code/command/release_notes_cmd.py +4 -4
  23. klaude_code/command/resume_cmd.py +15 -11
  24. klaude_code/command/status_cmd.py +4 -4
  25. klaude_code/command/terminal_setup_cmd.py +8 -8
  26. klaude_code/command/thinking_cmd.py +4 -4
  27. klaude_code/config/assets/builtin_config.yaml +20 -0
  28. klaude_code/config/builtin_config.py +16 -5
  29. klaude_code/config/config.py +7 -2
  30. klaude_code/const.py +147 -91
  31. klaude_code/core/agent.py +3 -12
  32. klaude_code/core/executor.py +18 -39
  33. klaude_code/core/manager/sub_agent_manager.py +71 -7
  34. klaude_code/core/prompts/prompt-sub-agent-image-gen.md +1 -0
  35. klaude_code/core/prompts/prompt-sub-agent-web.md +27 -1
  36. klaude_code/core/reminders.py +88 -69
  37. klaude_code/core/task.py +44 -45
  38. klaude_code/core/tool/file/apply_patch_tool.py +9 -9
  39. klaude_code/core/tool/file/diff_builder.py +3 -5
  40. klaude_code/core/tool/file/edit_tool.py +23 -23
  41. klaude_code/core/tool/file/move_tool.py +43 -43
  42. klaude_code/core/tool/file/read_tool.py +44 -39
  43. klaude_code/core/tool/file/write_tool.py +14 -14
  44. klaude_code/core/tool/report_back_tool.py +4 -4
  45. klaude_code/core/tool/shell/bash_tool.py +23 -23
  46. klaude_code/core/tool/skill/skill_tool.py +7 -7
  47. klaude_code/core/tool/sub_agent_tool.py +38 -9
  48. klaude_code/core/tool/todo/todo_write_tool.py +9 -10
  49. klaude_code/core/tool/todo/update_plan_tool.py +6 -6
  50. klaude_code/core/tool/tool_abc.py +2 -2
  51. klaude_code/core/tool/tool_context.py +27 -0
  52. klaude_code/core/tool/tool_runner.py +88 -42
  53. klaude_code/core/tool/truncation.py +38 -20
  54. klaude_code/core/tool/web/mermaid_tool.py +6 -7
  55. klaude_code/core/tool/web/web_fetch_tool.py +68 -30
  56. klaude_code/core/tool/web/web_search_tool.py +15 -17
  57. klaude_code/core/turn.py +120 -73
  58. klaude_code/llm/anthropic/client.py +79 -44
  59. klaude_code/llm/anthropic/input.py +116 -108
  60. klaude_code/llm/bedrock/client.py +8 -5
  61. klaude_code/llm/claude/client.py +18 -8
  62. klaude_code/llm/client.py +4 -3
  63. klaude_code/llm/codex/client.py +15 -9
  64. klaude_code/llm/google/client.py +122 -60
  65. klaude_code/llm/google/input.py +94 -108
  66. klaude_code/llm/image.py +123 -0
  67. klaude_code/llm/input_common.py +136 -189
  68. klaude_code/llm/openai_compatible/client.py +17 -7
  69. klaude_code/llm/openai_compatible/input.py +36 -66
  70. klaude_code/llm/openai_compatible/stream.py +119 -67
  71. klaude_code/llm/openai_compatible/tool_call_accumulator.py +23 -11
  72. klaude_code/llm/openrouter/client.py +34 -9
  73. klaude_code/llm/openrouter/input.py +63 -64
  74. klaude_code/llm/openrouter/reasoning.py +22 -24
  75. klaude_code/llm/registry.py +20 -17
  76. klaude_code/llm/responses/client.py +107 -45
  77. klaude_code/llm/responses/input.py +115 -98
  78. klaude_code/llm/usage.py +52 -25
  79. klaude_code/protocol/__init__.py +1 -0
  80. klaude_code/protocol/events.py +16 -12
  81. klaude_code/protocol/llm_param.py +20 -2
  82. klaude_code/protocol/message.py +250 -0
  83. klaude_code/protocol/model.py +95 -285
  84. klaude_code/protocol/op.py +2 -15
  85. klaude_code/protocol/op_handler.py +0 -5
  86. klaude_code/protocol/sub_agent/__init__.py +1 -0
  87. klaude_code/protocol/sub_agent/explore.py +10 -0
  88. klaude_code/protocol/sub_agent/image_gen.py +119 -0
  89. klaude_code/protocol/sub_agent/task.py +10 -0
  90. klaude_code/protocol/sub_agent/web.py +10 -0
  91. klaude_code/session/codec.py +6 -6
  92. klaude_code/session/export.py +261 -62
  93. klaude_code/session/selector.py +7 -24
  94. klaude_code/session/session.py +126 -54
  95. klaude_code/session/store.py +5 -32
  96. klaude_code/session/templates/export_session.html +1 -1
  97. klaude_code/session/templates/mermaid_viewer.html +1 -1
  98. klaude_code/trace/log.py +11 -6
  99. klaude_code/ui/core/input.py +1 -1
  100. klaude_code/ui/core/stage_manager.py +1 -8
  101. klaude_code/ui/modes/debug/display.py +2 -2
  102. klaude_code/ui/modes/repl/clipboard.py +2 -2
  103. klaude_code/ui/modes/repl/completers.py +18 -10
  104. klaude_code/ui/modes/repl/event_handler.py +138 -132
  105. klaude_code/ui/modes/repl/input_prompt_toolkit.py +1 -1
  106. klaude_code/ui/modes/repl/key_bindings.py +136 -2
  107. klaude_code/ui/modes/repl/renderer.py +107 -15
  108. klaude_code/ui/renderers/assistant.py +2 -2
  109. klaude_code/ui/renderers/bash_syntax.py +36 -4
  110. klaude_code/ui/renderers/common.py +70 -10
  111. klaude_code/ui/renderers/developer.py +7 -6
  112. klaude_code/ui/renderers/diffs.py +11 -11
  113. klaude_code/ui/renderers/mermaid_viewer.py +49 -2
  114. klaude_code/ui/renderers/metadata.py +33 -5
  115. klaude_code/ui/renderers/sub_agent.py +57 -16
  116. klaude_code/ui/renderers/thinking.py +37 -2
  117. klaude_code/ui/renderers/tools.py +188 -178
  118. klaude_code/ui/rich/live.py +3 -1
  119. klaude_code/ui/rich/markdown.py +39 -7
  120. klaude_code/ui/rich/quote.py +76 -1
  121. klaude_code/ui/rich/status.py +14 -8
  122. klaude_code/ui/rich/theme.py +20 -14
  123. klaude_code/ui/terminal/image.py +34 -0
  124. klaude_code/ui/terminal/notifier.py +2 -1
  125. klaude_code/ui/terminal/progress_bar.py +4 -4
  126. klaude_code/ui/terminal/selector.py +22 -4
  127. klaude_code/ui/utils/common.py +11 -2
  128. {klaude_code-1.9.0.dist-info → klaude_code-2.0.1.dist-info}/METADATA +4 -2
  129. klaude_code-2.0.1.dist-info/RECORD +229 -0
  130. klaude_code-1.9.0.dist-info/RECORD +0 -224
  131. {klaude_code-1.9.0.dist-info → klaude_code-2.0.1.dist-info}/WHEEL +0 -0
  132. {klaude_code-1.9.0.dist-info → klaude_code-2.0.1.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,123 @@
1
+ """Image processing utilities for LLM responses.
2
+
3
+ This module provides reusable image handling primitives that can be shared
4
+ across different LLM providers and protocols (OpenAI, Anthropic, etc.).
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import hashlib
10
+ import mimetypes
11
+ import time
12
+ from base64 import b64decode, b64encode
13
+ from binascii import Error as BinasciiError
14
+ from pathlib import Path
15
+
16
+ from klaude_code.const import (
17
+ IMAGE_OUTPUT_MAX_BYTES,
18
+ TOOL_OUTPUT_TRUNCATION_DIR,
19
+ ProjectPaths,
20
+ project_key_from_cwd,
21
+ )
22
+ from klaude_code.protocol import message
23
+
24
+ IMAGE_EXT_BY_MIME: dict[str, str] = {
25
+ "image/png": ".png",
26
+ "image/jpeg": ".jpg",
27
+ "image/jpg": ".jpg",
28
+ "image/webp": ".webp",
29
+ "image/gif": ".gif",
30
+ }
31
+
32
+
33
+ def parse_data_url(url: str) -> tuple[str, str, bytes]:
34
+ """Parse a base64 data URL and return (mime_type, base64_payload, decoded_bytes)."""
35
+
36
+ header_and_media = url.split(",", 1)
37
+ if len(header_and_media) != 2:
38
+ raise ValueError("Invalid data URL for image: missing comma separator")
39
+ header, base64_data = header_and_media
40
+ if not header.startswith("data:"):
41
+ raise ValueError("Invalid data URL for image: missing data: prefix")
42
+ if ";base64" not in header:
43
+ raise ValueError("Invalid data URL for image: missing base64 marker")
44
+
45
+ mime_type = header[5:].split(";", 1)[0]
46
+ base64_payload = base64_data.strip()
47
+ if base64_payload == "":
48
+ raise ValueError("Inline image data is empty")
49
+
50
+ try:
51
+ decoded = b64decode(base64_payload, validate=True)
52
+ except (BinasciiError, ValueError) as exc:
53
+ raise ValueError("Inline image data is not valid base64") from exc
54
+
55
+ return mime_type, base64_payload, decoded
56
+
57
+
58
+ def parse_data_url_image(url: str) -> tuple[str, bytes]:
59
+ """Parse a base64 data URL and return (mime_type, decoded_bytes)."""
60
+
61
+ mime_type, _, decoded = parse_data_url(url)
62
+ return mime_type, decoded
63
+
64
+
65
+ def get_assistant_image_output_dir(session_id: str | None) -> Path:
66
+ """Get the output directory for assistant-generated images."""
67
+ if session_id:
68
+ paths = ProjectPaths(project_key=project_key_from_cwd())
69
+ return paths.images_dir(session_id)
70
+ return Path(TOOL_OUTPUT_TRUNCATION_DIR) / "images"
71
+
72
+
73
+ def save_assistant_image(
74
+ *, data_url: str, session_id: str | None, response_id: str | None, image_index: int
75
+ ) -> message.ImageFilePart:
76
+ """Decode a data URL image and save it to the session image artifacts directory."""
77
+
78
+ mime_type, decoded = parse_data_url_image(data_url)
79
+
80
+ if len(decoded) > IMAGE_OUTPUT_MAX_BYTES:
81
+ decoded_mb = len(decoded) / (1024 * 1024)
82
+ limit_mb = IMAGE_OUTPUT_MAX_BYTES / (1024 * 1024)
83
+ raise ValueError(f"Image output size ({decoded_mb:.2f}MB) exceeds limit ({limit_mb:.2f}MB)")
84
+
85
+ output_dir = get_assistant_image_output_dir(session_id)
86
+ output_dir.mkdir(parents=True, exist_ok=True)
87
+
88
+ ext = IMAGE_EXT_BY_MIME.get(mime_type, ".bin")
89
+ response_part = (response_id or "unknown").replace("/", "_")
90
+ ts = time.time_ns()
91
+ file_path = output_dir / f"img-{response_part}-{image_index}-{ts}{ext}"
92
+ file_path.write_bytes(decoded)
93
+
94
+ return message.ImageFilePart(
95
+ file_path=str(file_path),
96
+ mime_type=mime_type,
97
+ byte_size=len(decoded),
98
+ sha256=hashlib.sha256(decoded).hexdigest(),
99
+ )
100
+
101
+
102
+ def assistant_image_to_data_url(image: message.ImageFilePart) -> str:
103
+ """Load an assistant image from disk and encode it as a base64 data URL.
104
+
105
+ This is primarily used for multi-turn image editing, where providers require
106
+ sending the previous assistant message (including images) back to the model.
107
+ """
108
+
109
+ file_path = Path(image.file_path)
110
+ decoded = file_path.read_bytes()
111
+
112
+ if len(decoded) > IMAGE_OUTPUT_MAX_BYTES:
113
+ decoded_mb = len(decoded) / (1024 * 1024)
114
+ limit_mb = IMAGE_OUTPUT_MAX_BYTES / (1024 * 1024)
115
+ raise ValueError(f"Assistant image size ({decoded_mb:.2f}MB) exceeds limit ({limit_mb:.2f}MB)")
116
+
117
+ mime_type = image.mime_type
118
+ if not mime_type:
119
+ guessed, _ = mimetypes.guess_type(str(file_path))
120
+ mime_type = guessed or "application/octet-stream"
121
+
122
+ encoded = b64encode(decoded).decode("ascii")
123
+ return f"data:{mime_type};base64,{encoded}"
@@ -1,204 +1,166 @@
1
- """Common utilities for converting conversation history to LLM input formats.
1
+ """Common utilities for converting message history to LLM input formats."""
2
2
 
3
- This module provides shared abstractions for providers that require message grouping
4
- (Anthropic, OpenAI-compatible, OpenRouter). The Responses API doesn't need this
5
- since it uses a flat item list matching our internal protocol.
6
- """
7
-
8
- from collections.abc import Iterable, Iterator
3
+ from collections.abc import Callable, Iterable
9
4
  from dataclasses import dataclass, field
10
- from enum import Enum
11
5
  from typing import TYPE_CHECKING
12
6
 
13
- from klaude_code import const
14
-
15
7
  if TYPE_CHECKING:
16
8
  from klaude_code.protocol.llm_param import LLMCallParameter, LLMConfigParameter
17
9
 
18
- from klaude_code.protocol import model
10
+ from klaude_code.const import EMPTY_TOOL_OUTPUT_MESSAGE
11
+ from klaude_code.protocol import message
19
12
 
20
13
 
21
- class GroupKind(Enum):
22
- ASSISTANT = "assistant"
23
- USER = "user"
24
- TOOL = "tool"
25
- DEVELOPER = "developer"
26
- OTHER = "other"
14
+ def _empty_image_parts() -> list[message.ImageURLPart]:
15
+ return []
27
16
 
28
17
 
29
18
  @dataclass
30
- class UserGroup:
31
- """Aggregated user message group (UserMessageItem + DeveloperMessageItem)."""
19
+ class DeveloperAttachment:
20
+ text: str = ""
21
+ images: list[message.ImageURLPart] = field(default_factory=_empty_image_parts)
32
22
 
33
- text_parts: list[str] = field(default_factory=lambda: [])
34
- images: list[model.ImageURLPart] = field(default_factory=lambda: [])
35
23
 
24
+ def _extract_developer_content(msg: message.DeveloperMessage) -> tuple[str, list[message.ImageURLPart]]:
25
+ text_parts: list[str] = []
26
+ images: list[message.ImageURLPart] = []
27
+ for part in msg.parts:
28
+ if isinstance(part, message.TextPart):
29
+ text_parts.append(part.text + "\n")
30
+ elif isinstance(part, message.ImageURLPart):
31
+ images.append(part)
32
+ return "".join(text_parts), images
36
33
 
37
- @dataclass
38
- class ToolGroup:
39
- """Aggregated tool result group (ToolResultItem + trailing DeveloperMessageItems)."""
40
-
41
- tool_result: model.ToolResultItem
42
- reminder_texts: list[str] = field(default_factory=lambda: [])
43
- reminder_images: list[model.ImageURLPart] = field(default_factory=lambda: [])
44
34
 
35
+ def attach_developer_messages(
36
+ messages: Iterable[message.Message],
37
+ ) -> list[tuple[message.Message, DeveloperAttachment]]:
38
+ """Attach developer messages to the most recent user/tool message.
45
39
 
46
- @dataclass
47
- class AssistantGroup:
48
- """Aggregated assistant message group."""
49
-
50
- text_content: str | None = None
51
- tool_calls: list[model.ToolCallItem] = field(default_factory=lambda: [])
52
- reasoning_items: list[model.ReasoningTextItem | model.ReasoningEncryptedItem] = field(default_factory=lambda: [])
53
-
54
-
55
- MessageGroup = UserGroup | ToolGroup | AssistantGroup
56
-
57
-
58
- def _kind_of(item: model.ConversationItem) -> GroupKind:
59
- if isinstance(
60
- item,
61
- (model.ReasoningTextItem, model.ReasoningEncryptedItem, model.AssistantMessageItem, model.ToolCallItem),
62
- ):
63
- return GroupKind.ASSISTANT
64
- if isinstance(item, model.UserMessageItem):
65
- return GroupKind.USER
66
- if isinstance(item, model.ToolResultItem):
67
- return GroupKind.TOOL
68
- if isinstance(item, model.DeveloperMessageItem):
69
- return GroupKind.DEVELOPER
70
- return GroupKind.OTHER
71
-
72
-
73
- def group_response_items_gen(
74
- items: Iterable[model.ConversationItem],
75
- ) -> Iterator[tuple[GroupKind, list[model.ConversationItem]]]:
76
- """Group response items into sublists with predictable attachment rules.
77
-
78
- - Consecutive assistant-side items (ReasoningTextItem | ReasoningEncryptedItem |
79
- AssistantMessageItem | ToolCallItem) group together.
80
- - Consecutive UserMessage group together.
81
- - Each ToolMessage (ToolResultItem) is a single group, but allow following
82
- DeveloperMessage to attach to it.
83
- - DeveloperMessage only attaches to the previous UserMessage/ToolMessage group.
40
+ Developer messages are removed from the output list and their text/images are
41
+ attached to the previous user/tool message as out-of-band content for provider input.
84
42
  """
85
- buffer: list[model.ConversationItem] = []
86
- buffer_kind: GroupKind | None = None
87
-
88
- def flush() -> Iterator[tuple[GroupKind, list[model.ConversationItem]]]:
89
- """Yield current group and reset buffer state."""
90
-
91
- nonlocal buffer, buffer_kind
92
- if buffer_kind is not None and buffer:
93
- yield (buffer_kind, buffer)
94
- buffer = []
95
- buffer_kind = None
43
+ message_list = list(messages)
44
+ attachments = [DeveloperAttachment() for _ in message_list]
45
+ last_user_tool_idx: int | None = None
96
46
 
97
- for item in items:
98
- item_kind = _kind_of(item)
99
- if item_kind == GroupKind.OTHER:
47
+ for idx, msg in enumerate(message_list):
48
+ if isinstance(msg, (message.UserMessage, message.ToolResultMessage)):
49
+ last_user_tool_idx = idx
100
50
  continue
101
-
102
- # Developer messages only attach to existing user/tool group.
103
- if item_kind == GroupKind.DEVELOPER:
104
- if buffer_kind in (GroupKind.USER, GroupKind.TOOL):
105
- buffer.append(item)
106
- continue
107
-
108
- # Start a new group when there is no active buffer yet.
109
- if buffer_kind is None:
110
- buffer_kind = GroupKind.TOOL if item_kind == GroupKind.TOOL else item_kind
111
- buffer = [item]
112
- continue
113
-
114
- # Tool messages always form a standalone group.
115
- if item_kind == GroupKind.TOOL:
116
- yield from flush()
117
- buffer_kind = GroupKind.TOOL
118
- buffer = [item]
119
- continue
120
-
121
- # Same non-tool kind: extend current group.
122
- if item_kind == buffer_kind:
123
- buffer.append(item)
51
+ if isinstance(msg, message.DeveloperMessage):
52
+ if last_user_tool_idx is None:
53
+ continue
54
+ dev_text, dev_images = _extract_developer_content(msg)
55
+ attachment = attachments[last_user_tool_idx]
56
+ attachment.text += dev_text
57
+ attachment.images.extend(dev_images)
58
+
59
+ result: list[tuple[message.Message, DeveloperAttachment]] = []
60
+ for idx, msg in enumerate(message_list):
61
+ if isinstance(msg, message.DeveloperMessage):
124
62
  continue
63
+ result.append((msg, attachments[idx]))
125
64
 
126
- # Different non-tool kind: close previous group and start a new one.
127
- yield from flush()
128
- buffer_kind = item_kind
129
- buffer = [item]
65
+ return result
130
66
 
131
- if buffer_kind is not None and buffer:
132
- yield (buffer_kind, buffer)
133
67
 
68
+ def merge_reminder_text(tool_output: str | None, reminder_text: str) -> str:
69
+ """Merge tool output with reminder text."""
70
+ base = tool_output or ""
71
+ if reminder_text:
72
+ base += "\n" + reminder_text
73
+ return base
134
74
 
135
- def parse_message_groups(history: list[model.ConversationItem]) -> list[MessageGroup]:
136
- """Parse conversation history into aggregated message groups.
137
75
 
138
- This is the shared grouping logic for Anthropic, OpenAI-compatible, and OpenRouter.
139
- Each provider then converts these groups to their specific API format.
140
- """
141
- groups: list[MessageGroup] = []
142
-
143
- for kind, items in group_response_items_gen(history):
144
- match kind:
145
- case GroupKind.OTHER:
76
+ def collect_text_content(parts: list[message.Part]) -> str:
77
+ return "".join(part.text for part in parts if isinstance(part, message.TextPart))
78
+
79
+
80
+ def build_chat_content_parts(
81
+ msg: message.UserMessage,
82
+ attachment: DeveloperAttachment,
83
+ ) -> list[dict[str, object]]:
84
+ parts: list[dict[str, object]] = []
85
+ for part in msg.parts:
86
+ if isinstance(part, message.TextPart):
87
+ parts.append({"type": "text", "text": part.text})
88
+ elif isinstance(part, message.ImageURLPart):
89
+ parts.append({"type": "image_url", "image_url": {"url": part.url}})
90
+ if attachment.text:
91
+ parts.append({"type": "text", "text": attachment.text})
92
+ for image in attachment.images:
93
+ parts.append({"type": "image_url", "image_url": {"url": image.url}})
94
+ if not parts:
95
+ parts.append({"type": "text", "text": ""})
96
+ return parts
97
+
98
+
99
+ def build_tool_message(
100
+ msg: message.ToolResultMessage,
101
+ attachment: DeveloperAttachment,
102
+ ) -> dict[str, object]:
103
+ merged_text = merge_reminder_text(
104
+ msg.output_text or EMPTY_TOOL_OUTPUT_MESSAGE,
105
+ attachment.text,
106
+ )
107
+ return {
108
+ "role": "tool",
109
+ "content": [{"type": "text", "text": merged_text}],
110
+ "tool_call_id": msg.call_id,
111
+ }
112
+
113
+
114
+ def build_assistant_common_fields(
115
+ msg: message.AssistantMessage,
116
+ *,
117
+ image_to_data_url: Callable[[message.ImageFilePart], str],
118
+ ) -> dict[str, object]:
119
+ result: dict[str, object] = {}
120
+ images = [part for part in msg.parts if isinstance(part, message.ImageFilePart)]
121
+ if images:
122
+ result["images"] = [
123
+ {
124
+ "image_url": {
125
+ "url": image_to_data_url(image),
126
+ }
127
+ }
128
+ for image in images
129
+ ]
130
+
131
+ tool_calls = [part for part in msg.parts if isinstance(part, message.ToolCallPart)]
132
+ if tool_calls:
133
+ result["tool_calls"] = [
134
+ {
135
+ "id": tc.call_id,
136
+ "type": "function",
137
+ "function": {
138
+ "name": tc.tool_name,
139
+ "arguments": tc.arguments_json,
140
+ },
141
+ }
142
+ for tc in tool_calls
143
+ ]
144
+ return result
145
+
146
+
147
+ def split_thinking_parts(
148
+ msg: message.AssistantMessage,
149
+ model_name: str | None,
150
+ ) -> tuple[list[message.ThinkingTextPart | message.ThinkingSignaturePart], list[str]]:
151
+ native_parts: list[message.ThinkingTextPart | message.ThinkingSignaturePart] = []
152
+ degraded_texts: list[str] = []
153
+ for part in msg.parts:
154
+ if isinstance(part, message.ThinkingTextPart):
155
+ if part.model_id and model_name and part.model_id != model_name:
156
+ degraded_texts.append(part.text)
146
157
  continue
147
- case GroupKind.USER:
148
- group = UserGroup()
149
- for item in items:
150
- if isinstance(item, (model.UserMessageItem, model.DeveloperMessageItem)):
151
- if item.content:
152
- group.text_parts.append(item.content + "\n")
153
- if item.images:
154
- group.images.extend(item.images)
155
- groups.append(group)
156
-
157
- case GroupKind.TOOL:
158
- if not items or not isinstance(items[0], model.ToolResultItem):
159
- continue
160
- tool_result = items[0]
161
- group = ToolGroup(tool_result=tool_result)
162
- for item in items[1:]:
163
- if isinstance(item, model.DeveloperMessageItem):
164
- if item.content:
165
- group.reminder_texts.append(item.content)
166
- if item.images:
167
- group.reminder_images.extend(item.images)
168
- groups.append(group)
169
-
170
- case GroupKind.ASSISTANT:
171
- group = AssistantGroup()
172
- for item in items:
173
- match item:
174
- case model.AssistantMessageItem():
175
- if item.content:
176
- if group.text_content is None:
177
- group.text_content = item.content
178
- else:
179
- group.text_content += item.content
180
- case model.ToolCallItem():
181
- group.tool_calls.append(item)
182
- case model.ReasoningTextItem():
183
- group.reasoning_items.append(item)
184
- case model.ReasoningEncryptedItem():
185
- group.reasoning_items.append(item)
186
- case _:
187
- pass
188
- groups.append(group)
189
-
190
- case GroupKind.DEVELOPER:
191
- pass
192
-
193
- return groups
194
-
195
-
196
- def merge_reminder_text(tool_output: str | None, reminder_texts: list[str]) -> str:
197
- """Merge tool output with reminder texts."""
198
- base = tool_output or ""
199
- if reminder_texts:
200
- base += "\n" + "\n".join(reminder_texts)
201
- return base
158
+ native_parts.append(part)
159
+ elif isinstance(part, message.ThinkingSignaturePart):
160
+ if part.model_id and model_name and part.model_id != model_name:
161
+ continue
162
+ native_parts.append(part)
163
+ return native_parts, degraded_texts
202
164
 
203
165
 
204
166
  def apply_config_defaults(param: "LLMCallParameter", config: "LLMConfigParameter") -> "LLMCallParameter":
@@ -215,19 +177,4 @@ def apply_config_defaults(param: "LLMCallParameter", config: "LLMConfigParameter
215
177
  param.verbosity = config.verbosity
216
178
  if param.thinking is None:
217
179
  param.thinking = config.thinking
218
- if param.provider_routing is None:
219
- param.provider_routing = config.provider_routing
220
-
221
- if param.model is None:
222
- raise ValueError("Model is required")
223
- if param.max_tokens is None:
224
- param.max_tokens = const.DEFAULT_MAX_TOKENS
225
- if param.temperature is None:
226
- param.temperature = const.DEFAULT_TEMPERATURE
227
- if param.thinking is not None and param.thinking.type == "enabled" and param.thinking.budget_tokens is None:
228
- param.thinking.budget_tokens = const.DEFAULT_ANTHROPIC_THINKING_BUDGET_TOKENS
229
-
230
- if param.model and "gpt-5" in param.model:
231
- param.temperature = 1.0 # Required for GPT-5
232
-
233
180
  return param
@@ -6,13 +6,14 @@ import httpx
6
6
  import openai
7
7
  from openai.types.chat.completion_create_params import CompletionCreateParamsStreaming
8
8
 
9
+ from klaude_code.const import LLM_HTTP_TIMEOUT_CONNECT, LLM_HTTP_TIMEOUT_READ, LLM_HTTP_TIMEOUT_TOTAL
9
10
  from klaude_code.llm.client import LLMClientABC
10
11
  from klaude_code.llm.input_common import apply_config_defaults
11
12
  from klaude_code.llm.openai_compatible.input import convert_history_to_input, convert_tool_schema
12
13
  from klaude_code.llm.openai_compatible.stream import DefaultReasoningHandler, parse_chat_completions_stream
13
14
  from klaude_code.llm.registry import register
14
15
  from klaude_code.llm.usage import MetadataTracker
15
- from klaude_code.protocol import llm_param, model
16
+ from klaude_code.protocol import llm_param, message
16
17
  from klaude_code.trace import DebugType, log_debug
17
18
 
18
19
 
@@ -56,13 +57,17 @@ class OpenAICompatibleClient(LLMClientABC):
56
57
  api_key=config.api_key,
57
58
  azure_endpoint=str(config.base_url),
58
59
  api_version=config.azure_api_version,
59
- timeout=httpx.Timeout(300.0, connect=15.0, read=285.0),
60
+ timeout=httpx.Timeout(
61
+ LLM_HTTP_TIMEOUT_TOTAL, connect=LLM_HTTP_TIMEOUT_CONNECT, read=LLM_HTTP_TIMEOUT_READ
62
+ ),
60
63
  )
61
64
  else:
62
65
  client = openai.AsyncOpenAI(
63
66
  api_key=config.api_key,
64
67
  base_url=config.base_url,
65
- timeout=httpx.Timeout(300.0, connect=15.0, read=285.0),
68
+ timeout=httpx.Timeout(
69
+ LLM_HTTP_TIMEOUT_TOTAL, connect=LLM_HTTP_TIMEOUT_CONNECT, read=LLM_HTTP_TIMEOUT_READ
70
+ ),
66
71
  )
67
72
  self.client: openai.AsyncAzureOpenAI | openai.AsyncOpenAI = client
68
73
 
@@ -72,12 +77,17 @@ class OpenAICompatibleClient(LLMClientABC):
72
77
  return cls(config)
73
78
 
74
79
  @override
75
- async def call(self, param: llm_param.LLMCallParameter) -> AsyncGenerator[model.ConversationItem]:
80
+ async def call(self, param: llm_param.LLMCallParameter) -> AsyncGenerator[message.LLMStreamItem]:
76
81
  param = apply_config_defaults(param, self.get_llm_config())
77
82
 
78
83
  metadata_tracker = MetadataTracker(cost_config=self.get_llm_config().cost)
79
84
 
80
- payload, extra_body = build_payload(param)
85
+ try:
86
+ payload, extra_body = build_payload(param)
87
+ except (ValueError, OSError) as e:
88
+ yield message.StreamErrorItem(error=f"{e.__class__.__name__} {e!s}")
89
+ yield message.AssistantMessage(parts=[], response_id=None, usage=metadata_tracker.finalize())
90
+ return
81
91
  extra_headers: dict[str, str] = {"extra": json.dumps({"session_id": param.session_id}, sort_keys=True)}
82
92
 
83
93
  log_debug(
@@ -93,8 +103,8 @@ class OpenAICompatibleClient(LLMClientABC):
93
103
  extra_headers=extra_headers,
94
104
  )
95
105
  except (openai.OpenAIError, httpx.HTTPError) as e:
96
- yield model.StreamErrorItem(error=f"{e.__class__.__name__} {e!s}")
97
- yield metadata_tracker.finalize()
106
+ yield message.StreamErrorItem(error=f"{e.__class__.__name__} {e!s}")
107
+ yield message.AssistantMessage(parts=[], response_id=None, usage=metadata_tracker.finalize())
98
108
  return
99
109
 
100
110
  reasoning_handler = DefaultReasoningHandler(
@@ -6,89 +6,59 @@
6
6
  from openai.types import chat
7
7
  from openai.types.chat import ChatCompletionContentPartParam
8
8
 
9
- from klaude_code.llm.input_common import AssistantGroup, ToolGroup, UserGroup, merge_reminder_text, parse_message_groups
10
- from klaude_code.protocol import llm_param, model
11
-
12
-
13
- def user_group_to_openai_message(group: UserGroup) -> chat.ChatCompletionMessageParam:
14
- """Convert a UserGroup to an OpenAI-compatible chat message."""
15
- parts: list[ChatCompletionContentPartParam] = []
16
- for text in group.text_parts:
17
- parts.append({"type": "text", "text": text + "\n"})
18
- for image in group.images:
19
- parts.append({"type": "image_url", "image_url": {"url": image.image_url.url}})
20
- if not parts:
21
- parts.append({"type": "text", "text": ""})
22
- return {"role": "user", "content": parts}
23
-
24
-
25
- def tool_group_to_openai_message(group: ToolGroup) -> chat.ChatCompletionMessageParam:
26
- """Convert a ToolGroup to an OpenAI-compatible chat message."""
27
- merged_text = merge_reminder_text(
28
- group.tool_result.output or "<system-reminder>Tool ran without output or errors</system-reminder>",
29
- group.reminder_texts,
30
- )
31
- return {
32
- "role": "tool",
33
- "content": [{"type": "text", "text": merged_text}],
34
- "tool_call_id": group.tool_result.call_id,
35
- }
36
-
37
-
38
- def _assistant_group_to_message(
39
- group: AssistantGroup,
40
- ) -> chat.ChatCompletionMessageParam:
9
+ from klaude_code.llm.image import assistant_image_to_data_url
10
+ from klaude_code.llm.input_common import (
11
+ attach_developer_messages,
12
+ build_assistant_common_fields,
13
+ build_chat_content_parts,
14
+ build_tool_message,
15
+ collect_text_content,
16
+ )
17
+ from klaude_code.protocol import llm_param, message
18
+
19
+
20
+ def _assistant_message_to_openai(msg: message.AssistantMessage) -> chat.ChatCompletionMessageParam:
41
21
  assistant_message: dict[str, object] = {"role": "assistant"}
42
22
 
43
- if group.text_content:
44
- assistant_message["content"] = group.text_content
45
-
46
- if group.tool_calls:
47
- assistant_message["tool_calls"] = [
48
- {
49
- "id": tc.call_id,
50
- "type": "function",
51
- "function": {
52
- "name": tc.name,
53
- "arguments": tc.arguments,
54
- },
55
- }
56
- for tc in group.tool_calls
57
- ]
23
+ text_content = collect_text_content(msg.parts)
24
+ if text_content:
25
+ assistant_message["content"] = text_content
58
26
 
27
+ assistant_message.update(build_assistant_common_fields(msg, image_to_data_url=assistant_image_to_data_url))
59
28
  return assistant_message
60
29
 
61
30
 
62
31
  def build_user_content_parts(
63
- images: list[model.ImageURLPart],
32
+ images: list[message.ImageURLPart],
64
33
  ) -> list[ChatCompletionContentPartParam]:
65
34
  """Build content parts for images only. Used by OpenRouter."""
66
- return [{"type": "image_url", "image_url": {"url": image.image_url.url}} for image in images]
35
+ return [{"type": "image_url", "image_url": {"url": image.url}} for image in images]
67
36
 
68
37
 
69
38
  def convert_history_to_input(
70
- history: list[model.ConversationItem],
39
+ history: list[message.Message],
71
40
  system: str | None = None,
72
41
  model_name: str | None = None,
73
42
  ) -> list[chat.ChatCompletionMessageParam]:
74
- """
75
- Convert a list of conversation items to a list of chat completion message params.
76
-
77
- Args:
78
- history: List of conversation items.
79
- system: System message.
80
- model_name: Model name. Not used in OpenAI-compatible, kept for API consistency.
81
- """
43
+ """Convert a list of messages to chat completion params."""
44
+ del model_name
82
45
  messages: list[chat.ChatCompletionMessageParam] = [{"role": "system", "content": system}] if system else []
83
46
 
84
- for group in parse_message_groups(history):
85
- match group:
86
- case UserGroup():
87
- messages.append(user_group_to_openai_message(group))
88
- case ToolGroup():
89
- messages.append(tool_group_to_openai_message(group))
90
- case AssistantGroup():
91
- messages.append(_assistant_group_to_message(group))
47
+ for msg, attachment in attach_developer_messages(history):
48
+ match msg:
49
+ case message.SystemMessage():
50
+ system_text = "\n".join(part.text for part in msg.parts)
51
+ if system_text:
52
+ messages.append({"role": "system", "content": system_text})
53
+ case message.UserMessage():
54
+ parts = build_chat_content_parts(msg, attachment)
55
+ messages.append({"role": "user", "content": parts})
56
+ case message.ToolResultMessage():
57
+ messages.append(build_tool_message(msg, attachment))
58
+ case message.AssistantMessage():
59
+ messages.append(_assistant_message_to_openai(msg))
60
+ case _:
61
+ continue
92
62
 
93
63
  return messages
94
64