klaude-code 1.6.0__py3-none-any.whl → 1.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. klaude_code/cli/list_model.py +55 -4
  2. klaude_code/cli/main.py +10 -0
  3. klaude_code/cli/runtime.py +2 -2
  4. klaude_code/cli/session_cmd.py +3 -2
  5. klaude_code/command/fork_session_cmd.py +7 -0
  6. klaude_code/config/assets/builtin_config.yaml +61 -2
  7. klaude_code/config/builtin_config.py +1 -0
  8. klaude_code/config/config.py +19 -0
  9. klaude_code/config/thinking.py +14 -0
  10. klaude_code/const.py +17 -2
  11. klaude_code/core/executor.py +16 -3
  12. klaude_code/core/task.py +5 -3
  13. klaude_code/core/tool/shell/command_safety.py +3 -5
  14. klaude_code/llm/anthropic/client.py +127 -114
  15. klaude_code/llm/bedrock/__init__.py +3 -0
  16. klaude_code/llm/bedrock/client.py +60 -0
  17. klaude_code/llm/google/__init__.py +3 -0
  18. klaude_code/llm/google/client.py +309 -0
  19. klaude_code/llm/google/input.py +215 -0
  20. klaude_code/llm/registry.py +10 -5
  21. klaude_code/protocol/events.py +1 -0
  22. klaude_code/protocol/llm_param.py +9 -0
  23. klaude_code/session/export.py +14 -2
  24. klaude_code/session/session.py +52 -3
  25. klaude_code/session/store.py +3 -0
  26. klaude_code/session/templates/export_session.html +210 -18
  27. klaude_code/ui/modes/repl/input_prompt_toolkit.py +6 -46
  28. klaude_code/ui/modes/repl/renderer.py +5 -1
  29. klaude_code/ui/renderers/developer.py +1 -1
  30. klaude_code/ui/renderers/sub_agent.py +1 -1
  31. {klaude_code-1.6.0.dist-info → klaude_code-1.7.1.dist-info}/METADATA +82 -10
  32. {klaude_code-1.6.0.dist-info → klaude_code-1.7.1.dist-info}/RECORD +34 -29
  33. {klaude_code-1.6.0.dist-info → klaude_code-1.7.1.dist-info}/WHEEL +0 -0
  34. {klaude_code-1.6.0.dist-info → klaude_code-1.7.1.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,215 @@
1
+ # pyright: reportReturnType=false
2
+ # pyright: reportArgumentType=false
3
+ # pyright: reportUnknownMemberType=false
4
+ # pyright: reportAttributeAccessIssue=false
5
+
6
+ import json
7
+ from base64 import b64decode
8
+ from binascii import Error as BinasciiError
9
+ from typing import Any
10
+
11
+ from google.genai import types
12
+
13
+ from klaude_code.llm.input_common import AssistantGroup, ToolGroup, UserGroup, merge_reminder_text, parse_message_groups
14
+ from klaude_code.protocol import llm_param, model
15
+
16
+
17
+ def _data_url_to_blob(url: str) -> types.Blob:
18
+ header_and_media = url.split(",", 1)
19
+ if len(header_and_media) != 2:
20
+ raise ValueError("Invalid data URL for image: missing comma separator")
21
+ header, base64_data = header_and_media
22
+ if not header.startswith("data:"):
23
+ raise ValueError("Invalid data URL for image: missing data: prefix")
24
+ if ";base64" not in header:
25
+ raise ValueError("Invalid data URL for image: missing base64 marker")
26
+
27
+ media_type = header[5:].split(";", 1)[0]
28
+ base64_payload = base64_data.strip()
29
+ if base64_payload == "":
30
+ raise ValueError("Inline image data is empty")
31
+
32
+ try:
33
+ decoded = b64decode(base64_payload, validate=True)
34
+ except (BinasciiError, ValueError) as exc:
35
+ raise ValueError("Inline image data is not valid base64") from exc
36
+
37
+ return types.Blob(data=decoded, mime_type=media_type)
38
+
39
+
40
+ def _image_part_to_part(image: model.ImageURLPart) -> types.Part:
41
+ url = image.image_url.url
42
+ if url.startswith("data:"):
43
+ return types.Part(inline_data=_data_url_to_blob(url))
44
+ # Best-effort: Gemini supports file URIs, and may accept public HTTPS URLs.
45
+ return types.Part(file_data=types.FileData(file_uri=url))
46
+
47
+
48
+ def _user_group_to_content(group: UserGroup) -> types.Content:
49
+ parts: list[types.Part] = []
50
+ for text in group.text_parts:
51
+ parts.append(types.Part(text=text + "\n"))
52
+ for image in group.images:
53
+ parts.append(_image_part_to_part(image))
54
+ if not parts:
55
+ parts.append(types.Part(text=""))
56
+ return types.Content(role="user", parts=parts)
57
+
58
+
59
+ def _tool_groups_to_content(groups: list[ToolGroup], model_name: str | None) -> list[types.Content]:
60
+ supports_multimodal_function_response = bool(model_name and "gemini-3" in model_name.lower())
61
+
62
+ response_parts: list[types.Part] = []
63
+ extra_image_contents: list[types.Content] = []
64
+
65
+ for group in groups:
66
+ merged_text = merge_reminder_text(
67
+ group.tool_result.output or "<system-reminder>Tool ran without output or errors</system-reminder>",
68
+ group.reminder_texts,
69
+ )
70
+ has_text = merged_text.strip() != ""
71
+
72
+ images = list(group.tool_result.images or []) + list(group.reminder_images)
73
+ image_parts: list[types.Part] = []
74
+ for image in images:
75
+ try:
76
+ image_parts.append(_image_part_to_part(image))
77
+ except ValueError:
78
+ # Skip invalid data URLs
79
+ continue
80
+
81
+ has_images = len(image_parts) > 0
82
+ response_value = merged_text if has_text else "(see attached image)" if has_images else ""
83
+ response_payload = (
84
+ {"error": response_value} if group.tool_result.status == "error" else {"output": response_value}
85
+ )
86
+
87
+ function_response = types.FunctionResponse(
88
+ id=group.tool_result.call_id,
89
+ name=group.tool_result.tool_name or "",
90
+ response=response_payload,
91
+ parts=image_parts if (has_images and supports_multimodal_function_response) else None,
92
+ )
93
+ response_parts.append(types.Part(function_response=function_response))
94
+
95
+ if has_images and not supports_multimodal_function_response:
96
+ extra_image_contents.append(
97
+ types.Content(role="user", parts=[types.Part(text="Tool result image:"), *image_parts])
98
+ )
99
+
100
+ contents: list[types.Content] = []
101
+ if response_parts:
102
+ contents.append(types.Content(role="user", parts=response_parts))
103
+ contents.extend(extra_image_contents)
104
+ return contents
105
+
106
+
107
+ def _assistant_group_to_content(group: AssistantGroup, model_name: str | None) -> types.Content | None:
108
+ parts: list[types.Part] = []
109
+
110
+ degraded_thinking_texts: list[str] = []
111
+ pending_thought_text: str | None = None
112
+ pending_thought_signature: str | None = None
113
+
114
+ for item in group.reasoning_items:
115
+ match item:
116
+ case model.ReasoningTextItem():
117
+ if not item.content:
118
+ continue
119
+ if model_name is not None and item.model is not None and item.model != model_name:
120
+ degraded_thinking_texts.append(item.content)
121
+ else:
122
+ pending_thought_text = item.content
123
+ case model.ReasoningEncryptedItem():
124
+ if not (
125
+ model_name is not None
126
+ and item.model == model_name
127
+ and item.encrypted_content
128
+ and (item.format or "").startswith("google")
129
+ and pending_thought_text
130
+ ):
131
+ continue
132
+ pending_thought_signature = item.encrypted_content
133
+ parts.append(
134
+ types.Part(
135
+ text=pending_thought_text,
136
+ thought=True,
137
+ thought_signature=pending_thought_signature,
138
+ )
139
+ )
140
+ pending_thought_text = None
141
+ pending_thought_signature = None
142
+
143
+ if pending_thought_text:
144
+ parts.append(
145
+ types.Part(
146
+ text=pending_thought_text,
147
+ thought=True,
148
+ thought_signature=pending_thought_signature,
149
+ )
150
+ )
151
+
152
+ if degraded_thinking_texts:
153
+ parts.insert(0, types.Part(text="<thinking>\n" + "\n".join(degraded_thinking_texts) + "\n</thinking>"))
154
+
155
+ if group.text_content:
156
+ parts.append(types.Part(text=group.text_content))
157
+
158
+ for tc in group.tool_calls:
159
+ args: dict[str, Any]
160
+ if tc.arguments:
161
+ try:
162
+ args = json.loads(tc.arguments)
163
+ except json.JSONDecodeError:
164
+ args = {"_raw": tc.arguments}
165
+ else:
166
+ args = {}
167
+ parts.append(types.Part(function_call=types.FunctionCall(id=tc.call_id, name=tc.name, args=args)))
168
+
169
+ if not parts:
170
+ return None
171
+ return types.Content(role="model", parts=parts)
172
+
173
+
174
+ def convert_history_to_contents(
175
+ history: list[model.ConversationItem],
176
+ model_name: str | None,
177
+ ) -> list[types.Content]:
178
+ contents: list[types.Content] = []
179
+ pending_tool_groups: list[ToolGroup] = []
180
+
181
+ def flush_tool_groups() -> None:
182
+ nonlocal pending_tool_groups
183
+ if pending_tool_groups:
184
+ contents.extend(_tool_groups_to_content(pending_tool_groups, model_name=model_name))
185
+ pending_tool_groups = []
186
+
187
+ for group in parse_message_groups(history):
188
+ match group:
189
+ case UserGroup():
190
+ flush_tool_groups()
191
+ contents.append(_user_group_to_content(group))
192
+ case ToolGroup():
193
+ pending_tool_groups.append(group)
194
+ case AssistantGroup():
195
+ flush_tool_groups()
196
+ content = _assistant_group_to_content(group, model_name=model_name)
197
+ if content is not None:
198
+ contents.append(content)
199
+
200
+ flush_tool_groups()
201
+ return contents
202
+
203
+
204
+ def convert_tool_schema(tools: list[llm_param.ToolSchema] | None) -> list[types.Tool]:
205
+ if tools is None or len(tools) == 0:
206
+ return []
207
+ declarations = [
208
+ types.FunctionDeclaration(
209
+ name=tool.name,
210
+ description=tool.description,
211
+ parameters_json_schema=tool.parameters,
212
+ )
213
+ for tool in tools
214
+ ]
215
+ return [types.Tool(function_declarations=declarations)]
@@ -1,3 +1,4 @@
1
+ import importlib
1
2
  from collections.abc import Callable
2
3
  from typing import TYPE_CHECKING, TypeVar
3
4
 
@@ -21,15 +22,19 @@ def _load_protocol(protocol: llm_param.LLMClientProtocol) -> None:
21
22
 
22
23
  # Import only the needed module to trigger @register decorator
23
24
  if protocol == llm_param.LLMClientProtocol.ANTHROPIC:
24
- from . import anthropic as _
25
+ importlib.import_module("klaude_code.llm.anthropic")
26
+ elif protocol == llm_param.LLMClientProtocol.BEDROCK:
27
+ importlib.import_module("klaude_code.llm.bedrock")
25
28
  elif protocol == llm_param.LLMClientProtocol.CODEX:
26
- from . import codex as _
29
+ importlib.import_module("klaude_code.llm.codex")
27
30
  elif protocol == llm_param.LLMClientProtocol.OPENAI:
28
- from . import openai_compatible as _
31
+ importlib.import_module("klaude_code.llm.openai_compatible")
29
32
  elif protocol == llm_param.LLMClientProtocol.OPENROUTER:
30
- from . import openrouter as _
33
+ importlib.import_module("klaude_code.llm.openrouter")
31
34
  elif protocol == llm_param.LLMClientProtocol.RESPONSES:
32
- from . import responses as _ # noqa: F401
35
+ importlib.import_module("klaude_code.llm.responses")
36
+ elif protocol == llm_param.LLMClientProtocol.GOOGLE:
37
+ importlib.import_module("klaude_code.llm.google")
33
38
 
34
39
 
35
40
  def register(name: llm_param.LLMClientProtocol) -> Callable[[_T], _T]:
@@ -16,6 +16,7 @@ class EndEvent(BaseModel):
16
16
  class ErrorEvent(BaseModel):
17
17
  error_message: str
18
18
  can_retry: bool = False
19
+ session_id: str | None = None
19
20
 
20
21
 
21
22
  class TaskStartEvent(BaseModel):
@@ -12,7 +12,9 @@ class LLMClientProtocol(Enum):
12
12
  RESPONSES = "responses"
13
13
  OPENROUTER = "openrouter"
14
14
  ANTHROPIC = "anthropic"
15
+ BEDROCK = "bedrock"
15
16
  CODEX = "codex"
17
+ GOOGLE = "google"
16
18
 
17
19
 
18
20
  class ToolSchema(BaseModel):
@@ -91,8 +93,15 @@ class LLMConfigProviderParameter(BaseModel):
91
93
  protocol: LLMClientProtocol
92
94
  base_url: str | None = None
93
95
  api_key: str | None = None
96
+ # Azure OpenAI
94
97
  is_azure: bool = False
95
98
  azure_api_version: str | None = None
99
+ # AWS Bedrock configuration
100
+ aws_access_key: str | None = None
101
+ aws_secret_key: str | None = None
102
+ aws_region: str | None = None
103
+ aws_session_token: str | None = None
104
+ aws_profile: str | None = None
96
105
 
97
106
 
98
107
  class LLMConfigModelParameter(BaseModel):
@@ -308,13 +308,17 @@ def _try_render_todo_args(arguments: str, tool_name: str) -> str | None:
308
308
  return None
309
309
 
310
310
 
311
- def _render_sub_agent_result(content: str) -> str:
311
+ def _render_sub_agent_result(content: str, description: str | None = None) -> str:
312
312
  # Try to format as JSON for better readability
313
313
  try:
314
314
  parsed = json.loads(content)
315
315
  formatted = "```json\n" + json.dumps(parsed, ensure_ascii=False, indent=2) + "\n```"
316
316
  except (json.JSONDecodeError, TypeError):
317
317
  formatted = content
318
+
319
+ if description:
320
+ formatted = f"# {description}\n\n{formatted}"
321
+
318
322
  encoded = _escape_html(formatted)
319
323
  return (
320
324
  f'<div class="sub-agent-result-container">'
@@ -628,7 +632,15 @@ def _format_tool_call(tool_call: model.ToolCallItem, result: model.ToolResultIte
628
632
 
629
633
  if result.output and not should_hide_text:
630
634
  if is_sub_agent_tool(tool_call.name):
631
- items_to_render.append(_render_sub_agent_result(result.output))
635
+ description = None
636
+ try:
637
+ args = json.loads(tool_call.arguments)
638
+ if isinstance(args, dict):
639
+ typed_args = cast(dict[str, Any], args)
640
+ description = cast(str | None, typed_args.get("description"))
641
+ except (json.JSONDecodeError, TypeError):
642
+ pass
643
+ items_to_render.append(_render_sub_agent_result(result.output, description))
632
644
  else:
633
645
  items_to_render.append(_render_text_block(result.output))
634
646
 
@@ -62,6 +62,7 @@ class Session(BaseModel):
62
62
  need_todo_not_used_cooldown_counter: int = Field(exclude=True, default=0)
63
63
 
64
64
  _messages_count_cache: int | None = PrivateAttr(default=None)
65
+ _user_messages_cache: list[str] | None = PrivateAttr(default=None)
65
66
  _store: JsonlSessionStore = PrivateAttr(default_factory=get_default_store)
66
67
 
67
68
  @property
@@ -78,6 +79,20 @@ class Session(BaseModel):
78
79
  def _invalidate_messages_count_cache(self) -> None:
79
80
  self._messages_count_cache = None
80
81
 
82
+ @property
83
+ def user_messages(self) -> list[str]:
84
+ """All user message contents in this session.
85
+
86
+ This is used for session selection UI and search, and is also persisted
87
+ in meta.json to avoid scanning events.jsonl for every session.
88
+ """
89
+
90
+ if self._user_messages_cache is None:
91
+ self._user_messages_cache = [
92
+ it.content for it in self.conversation_history if isinstance(it, model.UserMessageItem) and it.content
93
+ ]
94
+ return self._user_messages_cache
95
+
81
96
  @staticmethod
82
97
  def _project_key() -> str:
83
98
  return _project_key_from_cwd()
@@ -178,6 +193,18 @@ class Session(BaseModel):
178
193
  self.conversation_history.extend(items)
179
194
  self._invalidate_messages_count_cache()
180
195
 
196
+ new_user_messages = [
197
+ it.content for it in items if isinstance(it, model.UserMessageItem) and it.content
198
+ ]
199
+ if new_user_messages:
200
+ if self._user_messages_cache is None:
201
+ # Build from full history once to ensure correctness when resuming older sessions.
202
+ self._user_messages_cache = [
203
+ it.content for it in self.conversation_history if isinstance(it, model.UserMessageItem) and it.content
204
+ ]
205
+ else:
206
+ self._user_messages_cache.extend(new_user_messages)
207
+
181
208
  if self.created_at <= 0:
182
209
  self.created_at = time.time()
183
210
  self.updated_at = time.time()
@@ -188,6 +215,7 @@ class Session(BaseModel):
188
215
  sub_agent_state=self.sub_agent_state,
189
216
  file_tracker=self.file_tracker,
190
217
  todos=list(self.todos),
218
+ user_messages=self.user_messages,
191
219
  created_at=self.created_at,
192
220
  updated_at=self.updated_at,
193
221
  messages_count=self.messages_count,
@@ -218,7 +246,9 @@ class Session(BaseModel):
218
246
  forked.file_tracker = {k: v.model_copy(deep=True) for k, v in self.file_tracker.items()}
219
247
  forked.todos = [todo.model_copy(deep=True) for todo in self.todos]
220
248
 
221
- history_to_copy = self.conversation_history[:until_index] if until_index is not None else self.conversation_history
249
+ history_to_copy = (
250
+ self.conversation_history[:until_index] if until_index is not None else self.conversation_history
251
+ )
222
252
  items = [it.model_copy(deep=True) for it in history_to_copy]
223
253
  if items:
224
254
  forked.append_history(items)
@@ -309,7 +339,7 @@ class Session(BaseModel):
309
339
  case model.DeveloperMessageItem() as dm:
310
340
  yield events.DeveloperMessageEvent(session_id=self.id, item=dm)
311
341
  case model.StreamErrorItem() as se:
312
- yield events.ErrorEvent(error_message=se.error, can_retry=False)
342
+ yield events.ErrorEvent(error_message=se.error, can_retry=False, session_id=self.id)
313
343
  case _:
314
344
  continue
315
345
  prev_item = it
@@ -376,6 +406,17 @@ class Session(BaseModel):
376
406
  pass
377
407
  return messages
378
408
 
409
+ def _maybe_backfill_user_messages(*, meta_path: Path, meta: dict[str, Any], user_messages: list[str]) -> None:
410
+ if isinstance(meta.get("user_messages"), list):
411
+ return
412
+ meta["user_messages"] = user_messages
413
+ try:
414
+ tmp_path = meta_path.with_suffix(".json.tmp")
415
+ tmp_path.write_text(json.dumps(meta, ensure_ascii=False, indent=2), encoding="utf-8")
416
+ tmp_path.replace(meta_path)
417
+ except OSError:
418
+ return
419
+
379
420
  items: list[Session.SessionMetaBrief] = []
380
421
  for meta_path in store.iter_meta_files():
381
422
  data = _read_json_dict(meta_path)
@@ -388,7 +429,15 @@ class Session(BaseModel):
388
429
  created = float(data.get("created_at", meta_path.stat().st_mtime))
389
430
  updated = float(data.get("updated_at", meta_path.stat().st_mtime))
390
431
  work_dir = str(data.get("work_dir", ""))
391
- user_messages = _get_user_messages(sid)
432
+
433
+ user_messages_raw = data.get("user_messages")
434
+ if isinstance(user_messages_raw, list) and all(
435
+ isinstance(m, str) for m in cast(list[object], user_messages_raw)
436
+ ):
437
+ user_messages = cast(list[str], user_messages_raw)
438
+ else:
439
+ user_messages = _get_user_messages(sid)
440
+ _maybe_backfill_user_messages(meta_path=meta_path, meta=data, user_messages=user_messages)
392
441
  messages_count = int(data.get("messages_count", -1))
393
442
  model_name = data.get("model_name") if isinstance(data.get("model_name"), str) else None
394
443
 
@@ -193,6 +193,7 @@ def build_meta_snapshot(
193
193
  sub_agent_state: model.SubAgentState | None,
194
194
  file_tracker: dict[str, model.FileStatus],
195
195
  todos: list[model.TodoItem],
196
+ user_messages: list[str],
196
197
  created_at: float,
197
198
  updated_at: float,
198
199
  messages_count: int,
@@ -206,6 +207,8 @@ def build_meta_snapshot(
206
207
  "sub_agent_state": sub_agent_state.model_dump(mode="json") if sub_agent_state else None,
207
208
  "file_tracker": {path: status.model_dump(mode="json") for path, status in file_tracker.items()},
208
209
  "todos": [todo.model_dump(mode="json", exclude_defaults=True) for todo in todos],
210
+ # Cache user messages to avoid scanning events.jsonl during session listing.
211
+ "user_messages": list(user_messages),
209
212
  "created_at": created_at,
210
213
  "updated_at": updated_at,
211
214
  "messages_count": messages_count,