klaude-code 1.2.2__py3-none-any.whl → 1.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. klaude_code/cli/main.py +7 -0
  2. klaude_code/cli/runtime.py +6 -6
  3. klaude_code/command/__init__.py +7 -5
  4. klaude_code/command/clear_cmd.py +3 -24
  5. klaude_code/command/command_abc.py +36 -1
  6. klaude_code/command/export_cmd.py +14 -20
  7. klaude_code/command/help_cmd.py +1 -0
  8. klaude_code/command/model_cmd.py +3 -30
  9. klaude_code/command/{prompt-update-dev-doc.md → prompt-dev-docs-update.md} +3 -2
  10. klaude_code/command/{prompt-dev-doc.md → prompt-dev-docs.md} +3 -2
  11. klaude_code/command/prompt-init.md +2 -5
  12. klaude_code/command/prompt_command.py +3 -3
  13. klaude_code/command/registry.py +6 -7
  14. klaude_code/config/config.py +1 -1
  15. klaude_code/config/list_model.py +1 -1
  16. klaude_code/const/__init__.py +1 -1
  17. klaude_code/core/agent.py +2 -11
  18. klaude_code/core/executor.py +155 -14
  19. klaude_code/core/prompts/prompt-gemini.md +1 -1
  20. klaude_code/core/reminders.py +24 -0
  21. klaude_code/core/task.py +10 -0
  22. klaude_code/core/tool/shell/bash_tool.py +6 -2
  23. klaude_code/core/tool/sub_agent_tool.py +1 -1
  24. klaude_code/core/tool/tool_context.py +1 -1
  25. klaude_code/core/tool/tool_registry.py +1 -1
  26. klaude_code/core/tool/tool_runner.py +1 -1
  27. klaude_code/core/tool/web/mermaid_tool.py +1 -1
  28. klaude_code/llm/__init__.py +3 -4
  29. klaude_code/llm/anthropic/client.py +12 -9
  30. klaude_code/llm/openai_compatible/client.py +2 -18
  31. klaude_code/llm/openai_compatible/tool_call_accumulator.py +2 -2
  32. klaude_code/llm/openrouter/client.py +2 -18
  33. klaude_code/llm/openrouter/input.py +6 -2
  34. klaude_code/llm/registry.py +2 -71
  35. klaude_code/llm/responses/client.py +2 -0
  36. klaude_code/llm/{metadata_tracker.py → usage.py} +49 -2
  37. klaude_code/protocol/llm_param.py +12 -0
  38. klaude_code/protocol/model.py +23 -3
  39. klaude_code/protocol/op.py +14 -14
  40. klaude_code/protocol/op_handler.py +28 -0
  41. klaude_code/protocol/tools.py +0 -2
  42. klaude_code/session/export.py +124 -35
  43. klaude_code/session/session.py +1 -1
  44. klaude_code/session/templates/export_session.html +180 -42
  45. klaude_code/ui/__init__.py +6 -2
  46. klaude_code/ui/modes/exec/display.py +26 -0
  47. klaude_code/ui/modes/repl/event_handler.py +5 -1
  48. klaude_code/ui/renderers/developer.py +6 -10
  49. klaude_code/ui/renderers/metadata.py +33 -24
  50. klaude_code/ui/renderers/sub_agent.py +1 -1
  51. klaude_code/ui/renderers/tools.py +2 -2
  52. klaude_code/ui/renderers/user_input.py +18 -22
  53. klaude_code/ui/rich/status.py +13 -2
  54. {klaude_code-1.2.2.dist-info → klaude_code-1.2.3.dist-info}/METADATA +1 -1
  55. {klaude_code-1.2.2.dist-info → klaude_code-1.2.3.dist-info}/RECORD +58 -57
  56. /klaude_code/{core → protocol}/sub_agent.py +0 -0
  57. {klaude_code-1.2.2.dist-info → klaude_code-1.2.3.dist-info}/WHEEL +0 -0
  58. {klaude_code-1.2.2.dist-info → klaude_code-1.2.3.dist-info}/entry_points.txt +0 -0
@@ -1,16 +1,42 @@
1
1
  import time
2
2
 
3
- from klaude_code.protocol import model
3
+ import openai.types
4
+
5
+ from klaude_code.protocol import llm_param, model
6
+
7
+
8
+ def calculate_cost(usage: model.Usage, cost_config: llm_param.Cost | None) -> None:
9
+ """Calculate and set cost fields on usage based on cost configuration.
10
+
11
+ Note: input_tokens includes cached_tokens, so we need to subtract cached_tokens
12
+ to get the actual non-cached input tokens for cost calculation.
13
+ """
14
+ if cost_config is None:
15
+ return
16
+
17
+ # Non-cached input tokens cost
18
+ non_cached_input = usage.input_tokens - usage.cached_tokens
19
+ usage.input_cost = (non_cached_input / 1_000_000) * cost_config.input
20
+
21
+ # Output tokens cost (includes reasoning tokens)
22
+ usage.output_cost = (usage.output_tokens / 1_000_000) * cost_config.output
23
+
24
+ # Cache read cost
25
+ usage.cache_read_cost = (usage.cached_tokens / 1_000_000) * cost_config.cache_read
26
+
27
+ # Total cost
28
+ usage.total_cost = usage.input_cost + usage.output_cost + usage.cache_read_cost
4
29
 
5
30
 
6
31
  class MetadataTracker:
7
32
  """Tracks timing and metadata for LLM responses."""
8
33
 
9
- def __init__(self) -> None:
34
+ def __init__(self, cost_config: llm_param.Cost | None = None) -> None:
10
35
  self._request_start_time: float = time.time()
11
36
  self._first_token_time: float | None = None
12
37
  self._last_token_time: float | None = None
13
38
  self._metadata_item = model.ResponseMetadataItem()
39
+ self._cost_config = cost_config
14
40
 
15
41
  @property
16
42
  def metadata_item(self) -> model.ResponseMetadataItem:
@@ -59,4 +85,25 @@ class MetadataTracker:
59
85
  if time_duration >= 0.15:
60
86
  self._metadata_item.usage.throughput_tps = self._metadata_item.usage.output_tokens / time_duration
61
87
 
88
+ # Calculate cost if config is available
89
+ if self._metadata_item.usage:
90
+ calculate_cost(self._metadata_item.usage, self._cost_config)
91
+
62
92
  return self._metadata_item
93
+
94
+
95
+ def convert_usage(usage: openai.types.CompletionUsage, context_limit: int | None = None) -> model.Usage:
96
+ """Convert OpenAI CompletionUsage to internal Usage model."""
97
+ total_tokens = usage.total_tokens
98
+ context_usage_percent = (total_tokens / context_limit) * 100 if context_limit else None
99
+ return model.Usage(
100
+ input_tokens=usage.prompt_tokens,
101
+ cached_tokens=(usage.prompt_tokens_details.cached_tokens if usage.prompt_tokens_details else 0) or 0,
102
+ reasoning_tokens=(usage.completion_tokens_details.reasoning_tokens if usage.completion_tokens_details else 0)
103
+ or 0,
104
+ output_tokens=usage.completion_tokens,
105
+ total_tokens=total_tokens,
106
+ context_usage_percent=context_usage_percent,
107
+ throughput_tps=None,
108
+ first_token_latency_ms=None,
109
+ )
@@ -35,6 +35,15 @@ class Thinking(BaseModel):
35
35
  budget_tokens: int | None = None
36
36
 
37
37
 
38
+ class Cost(BaseModel):
39
+ """Cost configuration per million tokens (USD)."""
40
+
41
+ input: float # Input token price per million tokens
42
+ output: float # Output token price per million tokens
43
+ cache_read: float = 0.0 # Cache read price per million tokens
44
+ cache_write: float = 0.0 # Cache write price per million tokens (ignored in calculation for now)
45
+
46
+
38
47
  class OpenRouterProviderRouting(BaseModel):
39
48
  """
40
49
  https://openrouter.ai/docs/features/provider-routing#json-schema-for-provider-preferences
@@ -99,6 +108,9 @@ class LLMConfigModelParameter(BaseModel):
99
108
  # OpenRouter Provider Routing Preferences
100
109
  provider_routing: OpenRouterProviderRouting | None = None
101
110
 
111
+ # Cost configuration (USD per million tokens)
112
+ cost: Cost | None = None
113
+
102
114
 
103
115
  class LLMConfigParameter(LLMConfigProviderParameter, LLMConfigModelParameter):
104
116
  """
@@ -1,7 +1,8 @@
1
+ from datetime import datetime
1
2
  from enum import Enum
2
3
  from typing import Literal
3
4
 
4
- from pydantic import BaseModel
5
+ from pydantic import BaseModel, Field
5
6
 
6
7
  from klaude_code.protocol.commands import CommandName
7
8
  from klaude_code.protocol.tools import SubAgentType
@@ -20,6 +21,12 @@ class Usage(BaseModel):
20
21
  throughput_tps: float | None = None
21
22
  first_token_latency_ms: float | None = None
22
23
 
24
+ # Cost in USD (calculated from token counts and cost config)
25
+ input_cost: float | None = None # Cost for non-cached input tokens
26
+ output_cost: float | None = None # Cost for output tokens (including reasoning)
27
+ cache_read_cost: float | None = None # Cost for cached tokens
28
+ total_cost: float | None = None # Total cost (input + output + cache_read)
29
+
23
30
 
24
31
  class TodoItem(BaseModel):
25
32
  content: str
@@ -113,16 +120,18 @@ When adding a new item, please also modify the following:
113
120
 
114
121
  class StartItem(BaseModel):
115
122
  response_id: str
123
+ created_at: datetime = Field(default_factory=datetime.now)
116
124
 
117
125
 
118
126
  class InterruptItem(BaseModel):
119
- pass
127
+ created_at: datetime = Field(default_factory=datetime.now)
120
128
 
121
129
 
122
130
  class SystemMessageItem(BaseModel):
123
131
  id: str | None = None
124
132
  role: RoleType = "system"
125
133
  content: str | None = None
134
+ created_at: datetime = Field(default_factory=datetime.now)
126
135
 
127
136
 
128
137
  class DeveloperMessageItem(BaseModel):
@@ -130,6 +139,7 @@ class DeveloperMessageItem(BaseModel):
130
139
  role: RoleType = "developer"
131
140
  content: str | None = None # For LLM input
132
141
  images: list["ImageURLPart"] | None = None
142
+ created_at: datetime = Field(default_factory=datetime.now)
133
143
 
134
144
  # Special fields for reminders UI
135
145
  memory_paths: list[str] | None = None
@@ -137,7 +147,7 @@ class DeveloperMessageItem(BaseModel):
137
147
  todo_use: bool | None = None
138
148
  at_files: list[AtPatternParseResult] | None = None
139
149
  command_output: CommandOutput | None = None
140
- clipboard_images: list[str] | None = None
150
+ user_image_count: int | None = None
141
151
 
142
152
 
143
153
  class ImageURLPart(BaseModel):
@@ -164,6 +174,7 @@ class UserMessageItem(BaseModel):
164
174
  role: RoleType = "user"
165
175
  content: str | None = None
166
176
  images: list[ImageURLPart] | None = None
177
+ created_at: datetime = Field(default_factory=datetime.now)
167
178
 
168
179
 
169
180
  class AssistantMessageItem(BaseModel):
@@ -171,6 +182,7 @@ class AssistantMessageItem(BaseModel):
171
182
  role: RoleType = "assistant"
172
183
  content: str | None = None
173
184
  response_id: str | None = None
185
+ created_at: datetime = Field(default_factory=datetime.now)
174
186
 
175
187
 
176
188
  class ReasoningTextItem(BaseModel):
@@ -178,6 +190,7 @@ class ReasoningTextItem(BaseModel):
178
190
  response_id: str | None = None
179
191
  content: str
180
192
  model: str | None = None
193
+ created_at: datetime = Field(default_factory=datetime.now)
181
194
 
182
195
 
183
196
  class ReasoningEncryptedItem(BaseModel):
@@ -186,6 +199,7 @@ class ReasoningEncryptedItem(BaseModel):
186
199
  encrypted_content: str # OpenAI encrypted content or Anthropic thinking signature
187
200
  format: str | None = None
188
201
  model: str | None
202
+ created_at: datetime = Field(default_factory=datetime.now)
189
203
 
190
204
 
191
205
  class ToolCallStartItem(BaseModel):
@@ -198,6 +212,7 @@ class ToolCallStartItem(BaseModel):
198
212
  response_id: str | None = None
199
213
  call_id: str
200
214
  name: str
215
+ created_at: datetime = Field(default_factory=datetime.now)
201
216
 
202
217
 
203
218
  class ToolCallItem(BaseModel):
@@ -206,6 +221,7 @@ class ToolCallItem(BaseModel):
206
221
  call_id: str
207
222
  name: str
208
223
  arguments: str
224
+ created_at: datetime = Field(default_factory=datetime.now)
209
225
 
210
226
 
211
227
  class ToolResultItem(BaseModel):
@@ -216,15 +232,18 @@ class ToolResultItem(BaseModel):
216
232
  ui_extra: ToolResultUIExtra | None = None # Extra data for UI display, e.g. diff render
217
233
  images: list[ImageURLPart] | None = None
218
234
  side_effects: list[ToolSideEffect] | None = None
235
+ created_at: datetime = Field(default_factory=datetime.now)
219
236
 
220
237
 
221
238
  class AssistantMessageDelta(BaseModel):
222
239
  response_id: str | None = None
223
240
  content: str
241
+ created_at: datetime = Field(default_factory=datetime.now)
224
242
 
225
243
 
226
244
  class StreamErrorItem(BaseModel):
227
245
  error: str
246
+ created_at: datetime = Field(default_factory=datetime.now)
228
247
 
229
248
 
230
249
  class ResponseMetadataItem(BaseModel):
@@ -235,6 +254,7 @@ class ResponseMetadataItem(BaseModel):
235
254
  task_duration_s: float | None = None
236
255
  status: str | None = None
237
256
  error_reason: str | None = None
257
+ created_at: datetime = Field(default_factory=datetime.now)
238
258
 
239
259
 
240
260
  MessageItem = (
@@ -5,7 +5,8 @@ This module defines the operation types and submission structure
5
5
  that the executor uses to handle different types of requests.
6
6
  """
7
7
 
8
- from abc import ABC, abstractmethod
8
+ from __future__ import annotations
9
+
9
10
  from enum import Enum
10
11
  from typing import TYPE_CHECKING
11
12
  from uuid import uuid4
@@ -15,7 +16,7 @@ from pydantic import BaseModel, Field
15
16
  from klaude_code.protocol.model import UserInputPayload
16
17
 
17
18
  if TYPE_CHECKING:
18
- from klaude_code.core.executor import ExecutorContext
19
+ from klaude_code.protocol.op_handler import OperationHandler
19
20
 
20
21
 
21
22
  class OperationType(Enum):
@@ -27,16 +28,15 @@ class OperationType(Enum):
27
28
  END = "end"
28
29
 
29
30
 
30
- class Operation(BaseModel, ABC):
31
+ class Operation(BaseModel):
31
32
  """Base class for all operations that can be submitted to the executor."""
32
33
 
33
34
  type: OperationType
34
35
  id: str = Field(default_factory=lambda: uuid4().hex)
35
36
 
36
- @abstractmethod
37
- async def execute(self, context: "ExecutorContext") -> None:
38
- """Execute this operation within the given executor context."""
39
- pass
37
+ async def execute(self, handler: OperationHandler) -> None:
38
+ """Execute this operation using the given handler."""
39
+ raise NotImplementedError("Subclasses must implement execute()")
40
40
 
41
41
 
42
42
  class UserInputOperation(Operation):
@@ -46,9 +46,9 @@ class UserInputOperation(Operation):
46
46
  input: UserInputPayload
47
47
  session_id: str | None = None
48
48
 
49
- async def execute(self, context: "ExecutorContext") -> None:
49
+ async def execute(self, handler: OperationHandler) -> None:
50
50
  """Execute user input by running it through an agent."""
51
- await context.handle_user_input(self)
51
+ await handler.handle_user_input(self)
52
52
 
53
53
 
54
54
  class InterruptOperation(Operation):
@@ -57,9 +57,9 @@ class InterruptOperation(Operation):
57
57
  type: OperationType = OperationType.INTERRUPT
58
58
  target_session_id: str | None = None # If None, interrupt all sessions
59
59
 
60
- async def execute(self, context: "ExecutorContext") -> None:
60
+ async def execute(self, handler: OperationHandler) -> None:
61
61
  """Execute interrupt by cancelling active tasks."""
62
- await context.handle_interrupt(self)
62
+ await handler.handle_interrupt(self)
63
63
 
64
64
 
65
65
  class InitAgentOperation(Operation):
@@ -68,8 +68,8 @@ class InitAgentOperation(Operation):
68
68
  type: OperationType = OperationType.INIT_AGENT
69
69
  session_id: str | None = None
70
70
 
71
- async def execute(self, context: "ExecutorContext") -> None:
72
- await context.handle_init_agent(self)
71
+ async def execute(self, handler: OperationHandler) -> None:
72
+ await handler.handle_init_agent(self)
73
73
 
74
74
 
75
75
  class EndOperation(Operation):
@@ -77,7 +77,7 @@ class EndOperation(Operation):
77
77
 
78
78
  type: OperationType = OperationType.END
79
79
 
80
- async def execute(self, context: "ExecutorContext") -> None:
80
+ async def execute(self, handler: OperationHandler) -> None:
81
81
  """Execute end operation - this is a no-op, just signals the executor to stop."""
82
82
  pass
83
83
 
@@ -0,0 +1,28 @@
1
+ """
2
+ Operation handler protocol for the executor system.
3
+
4
+ This module defines the protocol that operation handlers must implement.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from typing import TYPE_CHECKING, Protocol
10
+
11
+ if TYPE_CHECKING:
12
+ from klaude_code.protocol.op import InitAgentOperation, InterruptOperation, UserInputOperation
13
+
14
+
15
+ class OperationHandler(Protocol):
16
+ """Protocol defining the interface for handling operations."""
17
+
18
+ async def handle_user_input(self, operation: UserInputOperation) -> None:
19
+ """Handle a user input operation."""
20
+ ...
21
+
22
+ async def handle_interrupt(self, operation: InterruptOperation) -> None:
23
+ """Handle an interrupt operation."""
24
+ ...
25
+
26
+ async def handle_init_agent(self, operation: InitAgentOperation) -> None:
27
+ """Handle an init agent operation."""
28
+ ...
@@ -13,5 +13,3 @@ WEB_FETCH = "WebFetch"
13
13
 
14
14
  # SubAgentType is just a string alias now; agent types are defined via SubAgentProfile
15
15
  SubAgentType = str
16
-
17
-
@@ -11,8 +11,8 @@ from pathlib import Path
11
11
  from string import Template
12
12
  from typing import TYPE_CHECKING, Any, Final, cast
13
13
 
14
- from klaude_code.core.sub_agent import is_sub_agent_tool
15
14
  from klaude_code.protocol import llm_param, model
15
+ from klaude_code.protocol.sub_agent import is_sub_agent_tool
16
16
 
17
17
  if TYPE_CHECKING:
18
18
  from klaude_code.session.session import Session
@@ -45,6 +45,10 @@ def _format_timestamp(value: float | None) -> str:
45
45
  return datetime.fromtimestamp(value).strftime("%Y-%m-%d %H:%M:%S")
46
46
 
47
47
 
48
+ def _format_msg_timestamp(dt: datetime) -> str:
49
+ return dt.strftime("%Y-%m-%d %H:%M:%S")
50
+
51
+
48
52
  def get_first_user_message(history: list[model.ConversationItem]) -> str:
49
53
  """Extract the first user message content from conversation history."""
50
54
  for item in history:
@@ -150,6 +154,10 @@ def _format_token_count(count: int) -> str:
150
154
  return f"{m}M" if rem == 0 else f"{m}M{rem}k"
151
155
 
152
156
 
157
+ def _format_cost(cost: float) -> str:
158
+ return f"${cost:.4f}"
159
+
160
+
153
161
  def _render_metadata_item(item: model.ResponseMetadataItem) -> str:
154
162
  # Line 1: Model Name [@ Provider]
155
163
  model_parts = [f'<span class="metadata-model">{_escape_html(item.model_name)}</span>']
@@ -163,10 +171,25 @@ def _render_metadata_item(item: model.ResponseMetadataItem) -> str:
163
171
  stats_parts: list[str] = []
164
172
  if item.usage:
165
173
  u = item.usage
166
- stats_parts.append(f'<span class="metadata-stat">input: {_format_token_count(u.input_tokens)}</span>')
174
+ # Input with cost
175
+ input_stat = f"input: {_format_token_count(u.input_tokens)}"
176
+ if u.input_cost is not None:
177
+ input_stat += f"({_format_cost(u.input_cost)})"
178
+ stats_parts.append(f'<span class="metadata-stat">{input_stat}</span>')
179
+
180
+ # Cached with cost
167
181
  if u.cached_tokens > 0:
168
- stats_parts.append(f'<span class="metadata-stat">cached: {_format_token_count(u.cached_tokens)}</span>')
169
- stats_parts.append(f'<span class="metadata-stat">output: {_format_token_count(u.output_tokens)}</span>')
182
+ cached_stat = f"cached: {_format_token_count(u.cached_tokens)}"
183
+ if u.cache_read_cost is not None:
184
+ cached_stat += f"({_format_cost(u.cache_read_cost)})"
185
+ stats_parts.append(f'<span class="metadata-stat">{cached_stat}</span>')
186
+
187
+ # Output with cost
188
+ output_stat = f"output: {_format_token_count(u.output_tokens)}"
189
+ if u.output_cost is not None:
190
+ output_stat += f"({_format_cost(u.output_cost)})"
191
+ stats_parts.append(f'<span class="metadata-stat">{output_stat}</span>')
192
+
170
193
  if u.reasoning_tokens > 0:
171
194
  stats_parts.append(
172
195
  f'<span class="metadata-stat">thinking: {_format_token_count(u.reasoning_tokens)}</span>'
@@ -177,7 +200,11 @@ def _render_metadata_item(item: model.ResponseMetadataItem) -> str:
177
200
  stats_parts.append(f'<span class="metadata-stat">tps: {u.throughput_tps:.1f}</span>')
178
201
 
179
202
  if item.task_duration_s is not None:
180
- stats_parts.append(f'<span class="metadata-stat">cost: {item.task_duration_s:.1f}s</span>')
203
+ stats_parts.append(f'<span class="metadata-stat">time: {item.task_duration_s:.1f}s</span>')
204
+
205
+ # Total cost
206
+ if item.usage is not None and item.usage.total_cost is not None:
207
+ stats_parts.append(f'<span class="metadata-stat">cost: {_format_cost(item.usage.total_cost)}</span>')
181
208
 
182
209
  stats_html = ""
183
210
  if stats_parts:
@@ -192,13 +219,15 @@ def _render_metadata_item(item: model.ResponseMetadataItem) -> str:
192
219
  )
193
220
 
194
221
 
195
- def _render_assistant_message(index: int, content: str) -> str:
222
+ def _render_assistant_message(index: int, content: str, timestamp: datetime) -> str:
196
223
  encoded = _escape_html(content)
224
+ ts_str = _format_msg_timestamp(timestamp)
197
225
  return (
198
226
  f'<div class="message-group assistant-message-group">'
199
227
  f'<div class="message-header">'
200
228
  f'<div class="role-label assistant">Assistant</div>'
201
229
  f'<div class="assistant-toolbar">'
230
+ f'<span class="timestamp">{ts_str}</span>'
202
231
  f'<button type="button" class="raw-toggle" aria-pressed="false" title="Toggle raw text view">Raw</button>'
203
232
  f'<button type="button" class="copy-raw-btn" title="Copy raw content">Copy</button>'
204
233
  f"</div>"
@@ -276,9 +305,9 @@ def _render_text_block(text: str) -> str:
276
305
  return (
277
306
  f'<div class="expandable-output expandable">'
278
307
  f'<div class="preview-text" style="white-space: pre-wrap; font-family: var(--font-mono);">{preview}</div>'
279
- f'<div class="expand-hint expand-text">Click to expand full output ({len(lines)} lines)</div>'
308
+ f'<div class="expand-hint expand-text">click to expand full output ({len(lines)} lines)</div>'
280
309
  f'<div class="full-text" style="white-space: pre-wrap; font-family: var(--font-mono);">{full}</div>'
281
- f'<div class="collapse-hint">Click to collapse</div>'
310
+ f'<div class="collapse-hint">click to collapse</div>'
282
311
  f"</div>"
283
312
  )
284
313
 
@@ -324,40 +353,74 @@ def _get_diff_text(ui_extra: model.ToolResultUIExtra | None) -> str | None:
324
353
  def _get_mermaid_link_html(
325
354
  ui_extra: model.ToolResultUIExtra | None, tool_call: model.ToolCallItem | None = None
326
355
  ) -> str | None:
327
- if ui_extra is None:
328
- return None
329
- if ui_extra.type != model.ToolResultUIExtraType.MERMAID_LINK:
330
- return None
331
- if ui_extra.mermaid_link is None or not ui_extra.mermaid_link.link:
332
- return None
333
- link = _escape_html(ui_extra.mermaid_link.link)
334
- lines = ui_extra.mermaid_link.line_count
335
-
336
- copy_btn = ""
337
356
  if tool_call and tool_call.name == "Mermaid":
338
357
  try:
339
358
  args = json.loads(tool_call.arguments)
340
- code = args.get("code")
341
- if code:
342
- escaped_code = _escape_html(code)
343
- copy_btn = f'<button type="button" class="copy-mermaid-btn" data-code="{escaped_code}" title="Copy Mermaid Code">Copy Code</button>'
359
+ code = args.get("code", "")
344
360
  except Exception:
345
- pass
361
+ code = ""
362
+ else:
363
+ code = ""
346
364
 
347
- return (
348
- '<div style="display: flex; justify-content: space-between; align-items: center; font-family: var(--font-mono);">'
349
- f"<span>Lines: {lines}</span>"
350
- f"<div>"
351
- f"{copy_btn}"
352
- f'<a href="{link}" target="_blank" rel="noopener noreferrer" style="color: var(--accent); text-decoration: underline; margin-left: 8px;">View Diagram</a>'
353
- f"</div>"
365
+ if not code and (
366
+ ui_extra is None or ui_extra.type != model.ToolResultUIExtraType.MERMAID_LINK or not ui_extra.mermaid_link
367
+ ):
368
+ return None
369
+
370
+ # Prepare code for rendering and copy
371
+ escaped_code = _escape_html(code) if code else ""
372
+ line_count = code.count("\n") + 1 if code else 0
373
+
374
+ # Build Toolbar
375
+ toolbar_items: list[str] = []
376
+
377
+ if line_count > 0:
378
+ toolbar_items.append(f"<span>Lines: {line_count}</span>")
379
+
380
+ buttons_html: list[str] = []
381
+ if code:
382
+ buttons_html.append(
383
+ f'<button type="button" class="copy-mermaid-btn" data-code="{escaped_code}" title="Copy Mermaid Code">Copy Code</button>'
384
+ )
385
+
386
+ link = (
387
+ ui_extra.mermaid_link.link
388
+ if (ui_extra and ui_extra.type == model.ToolResultUIExtraType.MERMAID_LINK and ui_extra.mermaid_link)
389
+ else None
390
+ )
391
+
392
+ if link:
393
+ link_url = _escape_html(link)
394
+ buttons_html.append(
395
+ f'<a href="{link_url}" target="_blank" rel="noopener noreferrer" style="color: var(--accent); text-decoration: underline; margin-left: 8px;">View Online</a>'
396
+ )
397
+
398
+ toolbar_items.append(f"<div>{''.join(buttons_html)}</div>")
399
+
400
+ toolbar_html = (
401
+ '<div style="display: flex; justify-content: space-between; align-items: center; font-family: var(--font-mono); margin-top: 8px; padding-top: 8px; border-top: 1px dashed var(--border);">'
402
+ f"{''.join(toolbar_items)}"
354
403
  "</div>"
355
404
  )
356
405
 
406
+ # If we have code, render the diagram
407
+ if code:
408
+ return (
409
+ f'<div style="background: white; padding: 16px; border-radius: 4px; margin-top: 8px; border: 1px solid var(--border);">'
410
+ f'<div class="mermaid">{escaped_code}</div>'
411
+ f"{toolbar_html}"
412
+ f"</div>"
413
+ )
414
+
415
+ # Fallback to just link/toolbar if no code available (legacy support behavior)
416
+ return toolbar_html
417
+
357
418
 
358
419
  def _format_tool_call(tool_call: model.ToolCallItem, result: model.ToolResultItem | None) -> str:
359
420
  args_html = None
360
421
  is_todo_list = False
422
+ ts_str = _format_msg_timestamp(tool_call.created_at)
423
+
361
424
  if tool_call.name == "TodoWrite":
362
425
  args_html = _try_render_todo_args(tool_call.arguments)
363
426
  if args_html:
@@ -379,7 +442,21 @@ def _format_tool_call(tool_call: model.ToolCallItem, result: model.ToolResultIte
379
442
  if is_todo_list:
380
443
  args_section = f'<div class="tool-args">{args_html}</div>'
381
444
  else:
382
- open_attr = "" if _should_collapse(args_html) else " open"
445
+ # Always collapse Mermaid, Edit, Write tools by default
446
+ always_collapse_tools = {"Mermaid", "Edit", "Write"}
447
+ force_collapse = tool_call.name in always_collapse_tools
448
+
449
+ # Collapse Memory tool for write operations
450
+ if tool_call.name == "Memory":
451
+ try:
452
+ parsed_args = json.loads(tool_call.arguments)
453
+ if parsed_args.get("command") in {"create", "str_replace", "insert"}:
454
+ force_collapse = True
455
+ except Exception:
456
+ pass
457
+
458
+ should_collapse = force_collapse or _should_collapse(args_html)
459
+ open_attr = "" if should_collapse else " open"
383
460
  args_section = (
384
461
  f'<details class="tool-args-collapsible"{open_attr}>'
385
462
  "<summary>Arguments</summary>"
@@ -391,7 +468,10 @@ def _format_tool_call(tool_call: model.ToolCallItem, result: model.ToolResultIte
391
468
  '<div class="tool-call">',
392
469
  '<div class="tool-header">',
393
470
  f'<span class="tool-name">{_escape_html(tool_call.name)}</span>',
471
+ '<div class="tool-header-right">',
394
472
  f'<span class="tool-id">{_escape_html(tool_call.call_id)}</span>',
473
+ f'<span class="timestamp">{ts_str}</span>',
474
+ "</div>",
395
475
  "</div>",
396
476
  args_section,
397
477
  ]
@@ -455,22 +535,27 @@ def _build_messages_html(
455
535
  for i, item in enumerate(renderable_items):
456
536
  if isinstance(item, model.UserMessageItem):
457
537
  text = _escape_html(item.content or "")
538
+ ts_str = _format_msg_timestamp(item.created_at)
458
539
  blocks.append(
459
540
  f'<div class="message-group">'
460
- f'<div class="role-label user">User</div>'
541
+ f'<div class="role-label user">'
542
+ f"User"
543
+ f'<span class="timestamp">{ts_str}</span>'
544
+ f"</div>"
461
545
  f'<div class="message-content user" style="white-space: pre-wrap;">{text}</div>'
462
546
  f"</div>"
463
547
  )
464
548
  elif isinstance(item, model.ReasoningTextItem):
465
549
  text = _escape_html(item.content.strip())
466
- blocks.append(f'<div class="thinking-block">{text.replace(chr(10), "<br>")}</div>')
550
+ blocks.append(f'<div class="thinking-block markdown-body markdown-content" data-raw="{text}"></div>')
467
551
  elif isinstance(item, model.AssistantMessageItem):
468
552
  assistant_counter += 1
469
- blocks.append(_render_assistant_message(assistant_counter, item.content or ""))
553
+ blocks.append(_render_assistant_message(assistant_counter, item.content or "", item.created_at))
470
554
  elif isinstance(item, model.ResponseMetadataItem):
471
555
  blocks.append(_render_metadata_item(item))
472
556
  elif isinstance(item, model.DeveloperMessageItem):
473
557
  content = _escape_html(item.content or "")
558
+ ts_str = _format_msg_timestamp(item.created_at)
474
559
 
475
560
  next_item = renderable_items[i + 1] if i + 1 < len(renderable_items) else None
476
561
  extra_class = ""
@@ -479,10 +564,14 @@ def _build_messages_html(
479
564
 
480
565
  blocks.append(
481
566
  f'<details class="developer-message{extra_class}">'
482
- f"<summary>Developer</summary>"
567
+ f"<summary>"
568
+ f"Developer"
569
+ f'<span class="timestamp">{ts_str}</span>'
570
+ f"</summary>"
483
571
  f'<div class="details-content" style="white-space: pre-wrap;">{content}</div>'
484
572
  f"</details>"
485
573
  )
574
+
486
575
  elif isinstance(item, model.ToolCallItem):
487
576
  result = tool_results.get(item.call_id)
488
577
  blocks.append(_format_tool_call(item, result))
@@ -204,7 +204,7 @@ class Session(BaseModel):
204
204
  for it in items:
205
205
  # Serialize with explicit type tag for reliable load
206
206
  t = it.__class__.__name__
207
- data = it.model_dump()
207
+ data = it.model_dump(mode="json")
208
208
  f.write(json.dumps({"type": t, "data": data}, ensure_ascii=False))
209
209
  f.write("\n")
210
210
  # Refresh metadata timestamp after history change