klaude-code 1.2.23__py3-none-any.whl → 1.2.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. klaude_code/cli/runtime.py +17 -1
  2. klaude_code/command/prompt-jj-describe.md +32 -0
  3. klaude_code/command/thinking_cmd.py +37 -28
  4. klaude_code/{const/__init__.py → const.py} +7 -6
  5. klaude_code/core/executor.py +46 -3
  6. klaude_code/core/tool/file/read_tool.py +23 -1
  7. klaude_code/core/tool/file/write_tool.py +7 -3
  8. klaude_code/llm/openai_compatible/client.py +29 -102
  9. klaude_code/llm/openai_compatible/stream.py +272 -0
  10. klaude_code/llm/openrouter/client.py +29 -109
  11. klaude_code/llm/openrouter/{reasoning_handler.py → reasoning.py} +24 -2
  12. klaude_code/protocol/model.py +13 -1
  13. klaude_code/protocol/op.py +11 -0
  14. klaude_code/protocol/op_handler.py +5 -0
  15. klaude_code/ui/core/stage_manager.py +0 -3
  16. klaude_code/ui/modes/repl/display.py +2 -0
  17. klaude_code/ui/modes/repl/event_handler.py +97 -57
  18. klaude_code/ui/modes/repl/input_prompt_toolkit.py +25 -4
  19. klaude_code/ui/modes/repl/renderer.py +119 -25
  20. klaude_code/ui/renderers/assistant.py +1 -1
  21. klaude_code/ui/renderers/metadata.py +2 -6
  22. klaude_code/ui/renderers/sub_agent.py +28 -5
  23. klaude_code/ui/renderers/thinking.py +16 -10
  24. klaude_code/ui/renderers/tools.py +26 -2
  25. klaude_code/ui/rich/code_panel.py +24 -5
  26. klaude_code/ui/rich/live.py +17 -0
  27. klaude_code/ui/rich/markdown.py +185 -107
  28. klaude_code/ui/rich/status.py +19 -17
  29. klaude_code/ui/rich/theme.py +63 -12
  30. {klaude_code-1.2.23.dist-info → klaude_code-1.2.25.dist-info}/METADATA +2 -1
  31. {klaude_code-1.2.23.dist-info → klaude_code-1.2.25.dist-info}/RECORD +33 -32
  32. klaude_code/llm/openai_compatible/stream_processor.py +0 -83
  33. {klaude_code-1.2.23.dist-info → klaude_code-1.2.25.dist-info}/WHEEL +0 -0
  34. {klaude_code-1.2.23.dist-info → klaude_code-1.2.25.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,272 @@
1
+ """Shared stream processing utilities for Chat Completions streaming.
2
+
3
+ This module provides reusable primitives for OpenAI-compatible providers:
4
+
5
+ - ``StreamStateManager``: accumulates assistant content and tool calls.
6
+ - ``ReasoningHandlerABC``: provider-specific reasoning extraction + buffering.
7
+ - ``parse_chat_completions_stream``: shared stream loop that emits ConversationItems.
8
+
9
+ OpenRouter uses the same OpenAI Chat Completions API surface but differs in
10
+ how reasoning is represented (``reasoning_details`` vs ``reasoning_content``).
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ from abc import ABC, abstractmethod
16
+ from collections.abc import AsyncGenerator, Callable
17
+ from dataclasses import dataclass
18
+ from typing import Any, Literal, cast
19
+
20
+ import httpx
21
+ import openai
22
+ import openai.types
23
+ from openai import AsyncStream
24
+ from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
25
+
26
+ from klaude_code.llm.openai_compatible.tool_call_accumulator import BasicToolCallAccumulator, ToolCallAccumulatorABC
27
+ from klaude_code.llm.usage import MetadataTracker, convert_usage
28
+ from klaude_code.protocol import llm_param, model
29
+
30
+ StreamStage = Literal["waiting", "reasoning", "assistant", "tool"]
31
+
32
+
33
+ class StreamStateManager:
34
+ """Manages streaming state and provides flush operations for accumulated content.
35
+
36
+ This class encapsulates the common state management logic used by both
37
+ OpenAI-compatible and OpenRouter clients, reducing code duplication.
38
+ """
39
+
40
+ def __init__(
41
+ self,
42
+ param_model: str,
43
+ response_id: str | None = None,
44
+ reasoning_flusher: Callable[[], list[model.ConversationItem]] | None = None,
45
+ ):
46
+ self.param_model = param_model
47
+ self.response_id = response_id
48
+ self.stage: StreamStage = "waiting"
49
+ self.accumulated_reasoning: list[str] = []
50
+ self.accumulated_content: list[str] = []
51
+ self.accumulated_tool_calls: ToolCallAccumulatorABC = BasicToolCallAccumulator()
52
+ self.emitted_tool_start_indices: set[int] = set()
53
+ self._reasoning_flusher = reasoning_flusher
54
+
55
+ def set_response_id(self, response_id: str) -> None:
56
+ """Set the response ID once received from the stream."""
57
+ self.response_id = response_id
58
+ self.accumulated_tool_calls.response_id = response_id # pyright: ignore[reportAttributeAccessIssue]
59
+
60
+ def flush_reasoning(self) -> list[model.ConversationItem]:
61
+ """Flush accumulated reasoning content and return items."""
62
+ if self._reasoning_flusher is not None:
63
+ return self._reasoning_flusher()
64
+ if not self.accumulated_reasoning:
65
+ return []
66
+ item = model.ReasoningTextItem(
67
+ content="".join(self.accumulated_reasoning),
68
+ response_id=self.response_id,
69
+ model=self.param_model,
70
+ )
71
+ self.accumulated_reasoning = []
72
+ return [item]
73
+
74
+ def flush_assistant(self) -> list[model.ConversationItem]:
75
+ """Flush accumulated assistant content and return items."""
76
+ if not self.accumulated_content:
77
+ return []
78
+ item = model.AssistantMessageItem(
79
+ content="".join(self.accumulated_content),
80
+ response_id=self.response_id,
81
+ )
82
+ self.accumulated_content = []
83
+ return [item]
84
+
85
+ def flush_tool_calls(self) -> list[model.ToolCallItem]:
86
+ """Flush accumulated tool calls and return items."""
87
+ items: list[model.ToolCallItem] = self.accumulated_tool_calls.get()
88
+ if items:
89
+ self.accumulated_tool_calls.chunks_by_step = [] # pyright: ignore[reportAttributeAccessIssue]
90
+ return items
91
+
92
+ def flush_all(self) -> list[model.ConversationItem]:
93
+ """Flush all accumulated content in order: reasoning, assistant, tool calls."""
94
+ items: list[model.ConversationItem] = []
95
+ items.extend(self.flush_reasoning())
96
+ items.extend(self.flush_assistant())
97
+ if self.stage == "tool":
98
+ items.extend(self.flush_tool_calls())
99
+ return items
100
+
101
+
102
+ @dataclass(slots=True)
103
+ class ReasoningDeltaResult:
104
+ """Result of processing a single provider delta for reasoning signals."""
105
+
106
+ handled: bool
107
+ outputs: list[str | model.ConversationItem]
108
+
109
+
110
+ class ReasoningHandlerABC(ABC):
111
+ """Provider-specific reasoning handler for Chat Completions streaming."""
112
+
113
+ @abstractmethod
114
+ def set_response_id(self, response_id: str | None) -> None:
115
+ """Update the response identifier used for emitted items."""
116
+
117
+ @abstractmethod
118
+ def on_delta(self, delta: object) -> ReasoningDeltaResult:
119
+ """Process a single delta and return ordered reasoning outputs."""
120
+
121
+ @abstractmethod
122
+ def flush(self) -> list[model.ConversationItem]:
123
+ """Flush buffered reasoning content (usually at stage transition/finalize)."""
124
+
125
+
126
+ class DefaultReasoningHandler(ReasoningHandlerABC):
127
+ """Handles OpenAI-compatible reasoning fields (reasoning_content / reasoning)."""
128
+
129
+ def __init__(
130
+ self,
131
+ *,
132
+ param_model: str,
133
+ response_id: str | None,
134
+ ) -> None:
135
+ self._param_model = param_model
136
+ self._response_id = response_id
137
+ self._accumulated: list[str] = []
138
+
139
+ def set_response_id(self, response_id: str | None) -> None:
140
+ self._response_id = response_id
141
+
142
+ def on_delta(self, delta: object) -> ReasoningDeltaResult:
143
+ reasoning_content = getattr(delta, "reasoning_content", None) or getattr(delta, "reasoning", None) or ""
144
+ if not reasoning_content:
145
+ return ReasoningDeltaResult(handled=False, outputs=[])
146
+ text = str(reasoning_content)
147
+ self._accumulated.append(text)
148
+ return ReasoningDeltaResult(handled=True, outputs=[text])
149
+
150
+ def flush(self) -> list[model.ConversationItem]:
151
+ if not self._accumulated:
152
+ return []
153
+ item = model.ReasoningTextItem(
154
+ content="".join(self._accumulated),
155
+ response_id=self._response_id,
156
+ model=self._param_model,
157
+ )
158
+ self._accumulated = []
159
+ return [item]
160
+
161
+
162
+ async def parse_chat_completions_stream(
163
+ stream: AsyncStream[ChatCompletionChunk],
164
+ *,
165
+ param: llm_param.LLMCallParameter,
166
+ metadata_tracker: MetadataTracker,
167
+ reasoning_handler: ReasoningHandlerABC,
168
+ on_event: Callable[[object], None] | None = None,
169
+ ) -> AsyncGenerator[model.ConversationItem]:
170
+ """Parse OpenAI Chat Completions stream into ConversationItems.
171
+
172
+ This is shared by OpenAI-compatible and OpenRouter clients.
173
+ """
174
+
175
+ state = StreamStateManager(
176
+ param_model=str(param.model),
177
+ reasoning_flusher=reasoning_handler.flush,
178
+ )
179
+
180
+ try:
181
+ async for event in stream:
182
+ if on_event is not None:
183
+ on_event(event)
184
+
185
+ if not state.response_id and (event_id := getattr(event, "id", None)):
186
+ state.set_response_id(str(event_id))
187
+ reasoning_handler.set_response_id(str(event_id))
188
+ yield model.StartItem(response_id=str(event_id))
189
+
190
+ if (event_usage := getattr(event, "usage", None)) is not None:
191
+ metadata_tracker.set_usage(convert_usage(event_usage, param.context_limit, param.max_tokens))
192
+ if event_model := getattr(event, "model", None):
193
+ metadata_tracker.set_model_name(str(event_model))
194
+ if provider := getattr(event, "provider", None):
195
+ metadata_tracker.set_provider(str(provider))
196
+
197
+ choices = cast(Any, getattr(event, "choices", None))
198
+ if not choices:
199
+ continue
200
+
201
+ # Support Moonshot Kimi K2's usage field in choice
202
+ choice0 = choices[0]
203
+ if choice_usage := getattr(choice0, "usage", None):
204
+ try:
205
+ usage = openai.types.CompletionUsage.model_validate(choice_usage)
206
+ metadata_tracker.set_usage(convert_usage(usage, param.context_limit, param.max_tokens))
207
+ except Exception:
208
+ pass
209
+
210
+ delta = cast(Any, getattr(choice0, "delta", None))
211
+ if delta is None:
212
+ continue
213
+
214
+ # Reasoning
215
+ reasoning_result = reasoning_handler.on_delta(delta)
216
+ if reasoning_result.handled:
217
+ state.stage = "reasoning"
218
+ for output in reasoning_result.outputs:
219
+ if isinstance(output, str):
220
+ if not output:
221
+ continue
222
+ metadata_tracker.record_token()
223
+ yield model.ReasoningTextDelta(content=output, response_id=state.response_id)
224
+ else:
225
+ yield output
226
+
227
+ # Assistant
228
+ if (content := getattr(delta, "content", None)) and (state.stage == "assistant" or str(content).strip()):
229
+ metadata_tracker.record_token()
230
+ if state.stage == "reasoning":
231
+ for item in state.flush_reasoning():
232
+ yield item
233
+ elif state.stage == "tool":
234
+ for item in state.flush_tool_calls():
235
+ yield item
236
+ state.stage = "assistant"
237
+ state.accumulated_content.append(str(content))
238
+ yield model.AssistantMessageDelta(
239
+ content=str(content),
240
+ response_id=state.response_id,
241
+ )
242
+
243
+ # Tool
244
+ if (tool_calls := getattr(delta, "tool_calls", None)) and len(tool_calls) > 0:
245
+ metadata_tracker.record_token()
246
+ if state.stage == "reasoning":
247
+ for item in state.flush_reasoning():
248
+ yield item
249
+ elif state.stage == "assistant":
250
+ for item in state.flush_assistant():
251
+ yield item
252
+ state.stage = "tool"
253
+ for tc in tool_calls:
254
+ if tc.index not in state.emitted_tool_start_indices and tc.function and tc.function.name:
255
+ state.emitted_tool_start_indices.add(tc.index)
256
+ yield model.ToolCallStartItem(
257
+ response_id=state.response_id,
258
+ call_id=tc.id or "",
259
+ name=tc.function.name,
260
+ )
261
+ state.accumulated_tool_calls.add(tool_calls)
262
+ except (openai.OpenAIError, httpx.HTTPError) as e:
263
+ yield model.StreamErrorItem(error=f"{e.__class__.__name__} {e!s}")
264
+
265
+ flushed_items = state.flush_all()
266
+ if flushed_items:
267
+ metadata_tracker.record_token()
268
+ for item in flushed_items:
269
+ yield item
270
+
271
+ metadata_tracker.set_response_id(state.response_id)
272
+ yield metadata_tracker.finalize()
@@ -1,6 +1,6 @@
1
1
  import json
2
2
  from collections.abc import AsyncGenerator
3
- from typing import override
3
+ from typing import Any, override
4
4
 
5
5
  import httpx
6
6
  import openai
@@ -9,13 +9,13 @@ from openai.types.chat.completion_create_params import CompletionCreateParamsStr
9
9
  from klaude_code.llm.client import LLMClientABC
10
10
  from klaude_code.llm.input_common import apply_config_defaults
11
11
  from klaude_code.llm.openai_compatible.input import convert_tool_schema
12
- from klaude_code.llm.openai_compatible.stream_processor import StreamStateManager
12
+ from klaude_code.llm.openai_compatible.stream import parse_chat_completions_stream
13
13
  from klaude_code.llm.openrouter.input import convert_history_to_input, is_claude_model
14
- from klaude_code.llm.openrouter.reasoning_handler import ReasoningDetail, ReasoningStreamHandler
14
+ from klaude_code.llm.openrouter.reasoning import ReasoningStreamHandler
15
15
  from klaude_code.llm.registry import register
16
- from klaude_code.llm.usage import MetadataTracker, convert_usage
16
+ from klaude_code.llm.usage import MetadataTracker
17
17
  from klaude_code.protocol import llm_param, model
18
- from klaude_code.trace import DebugType, is_debug_enabled, log, log_debug
18
+ from klaude_code.trace import DebugType, is_debug_enabled, log_debug
19
19
 
20
20
 
21
21
  def build_payload(
@@ -96,114 +96,34 @@ class OpenRouterClient(LLMClientABC):
96
96
  debug_type=DebugType.LLM_PAYLOAD,
97
97
  )
98
98
 
99
- stream = self.client.chat.completions.create(
100
- **payload,
101
- extra_body=extra_body,
102
- extra_headers=extra_headers,
103
- )
99
+ try:
100
+ stream = await self.client.chat.completions.create(
101
+ **payload,
102
+ extra_body=extra_body,
103
+ extra_headers=extra_headers,
104
+ )
105
+ except (openai.OpenAIError, httpx.HTTPError) as e:
106
+ yield model.StreamErrorItem(error=f"{e.__class__.__name__} {e!s}")
107
+ yield metadata_tracker.finalize()
108
+ return
104
109
 
105
110
  reasoning_handler = ReasoningStreamHandler(
106
111
  param_model=str(param.model),
107
112
  response_id=None,
108
113
  )
109
114
 
110
- state = StreamStateManager(
111
- param_model=str(param.model),
112
- reasoning_flusher=reasoning_handler.flush,
113
- )
114
-
115
- try:
116
- async for event in await stream:
117
- log_debug(
118
- event.model_dump_json(exclude_none=True),
119
- style="blue",
120
- debug_type=DebugType.LLM_STREAM,
121
- )
122
-
123
- if not state.response_id and event.id:
124
- state.set_response_id(event.id)
125
- reasoning_handler.set_response_id(event.id)
126
- yield model.StartItem(response_id=event.id)
127
- if event.usage is not None:
128
- metadata_tracker.set_usage(convert_usage(event.usage, param.context_limit, param.max_tokens))
129
- if event.model:
130
- metadata_tracker.set_model_name(event.model)
131
- if provider := getattr(event, "provider", None):
132
- metadata_tracker.set_provider(str(provider))
133
- if len(event.choices) == 0:
134
- continue
135
- delta = event.choices[0].delta
136
-
137
- # Reasoning
138
- if reasoning_details := getattr(delta, "reasoning_details", None):
139
- for item in reasoning_details:
140
- try:
141
- reasoning_detail = ReasoningDetail.model_validate(item)
142
- if reasoning_detail.text or reasoning_detail.summary:
143
- metadata_tracker.record_token()
144
- state.stage = "reasoning"
145
- # Yield delta immediately for streaming
146
- if reasoning_detail.text:
147
- yield model.ReasoningTextDelta(
148
- content=reasoning_detail.text,
149
- response_id=state.response_id,
150
- )
151
- if reasoning_detail.summary:
152
- yield model.ReasoningTextDelta(
153
- content=reasoning_detail.summary,
154
- response_id=state.response_id,
155
- )
156
- # Keep existing handler logic for final items
157
- for conversation_item in reasoning_handler.on_detail(reasoning_detail):
158
- yield conversation_item
159
- except Exception as e:
160
- log("reasoning_details error", str(e), style="red")
161
-
162
- # Assistant
163
- if delta.content and (
164
- state.stage == "assistant" or delta.content.strip()
165
- ): # Process all content in assistant stage, filter empty content in reasoning stage
166
- metadata_tracker.record_token()
167
- if state.stage == "reasoning":
168
- for item in state.flush_reasoning():
169
- yield item
170
- state.stage = "assistant"
171
- state.accumulated_content.append(delta.content)
172
- yield model.AssistantMessageDelta(
173
- content=delta.content,
174
- response_id=state.response_id,
175
- )
176
-
177
- # Tool
178
- if delta.tool_calls and len(delta.tool_calls) > 0:
179
- metadata_tracker.record_token()
180
- if state.stage == "reasoning":
181
- for item in state.flush_reasoning():
182
- yield item
183
- elif state.stage == "assistant":
184
- for item in state.flush_assistant():
185
- yield item
186
- state.stage = "tool"
187
- # Emit ToolCallStartItem for new tool calls
188
- for tc in delta.tool_calls:
189
- if tc.index not in state.emitted_tool_start_indices and tc.function and tc.function.name:
190
- state.emitted_tool_start_indices.add(tc.index)
191
- yield model.ToolCallStartItem(
192
- response_id=state.response_id,
193
- call_id=tc.id or "",
194
- name=tc.function.name,
195
- )
196
- state.accumulated_tool_calls.add(delta.tool_calls)
197
-
198
- except (openai.OpenAIError, httpx.HTTPError) as e:
199
- yield model.StreamErrorItem(error=f"{e.__class__.__name__} {e!s}")
200
-
201
- # Finalize
202
- flushed_items = state.flush_all()
203
- if flushed_items:
204
- metadata_tracker.record_token()
205
- for item in flushed_items:
115
+ def on_event(event: Any) -> None:
116
+ log_debug(
117
+ event.model_dump_json(exclude_none=True),
118
+ style="blue",
119
+ debug_type=DebugType.LLM_STREAM,
120
+ )
121
+
122
+ async for item in parse_chat_completions_stream(
123
+ stream,
124
+ param=param,
125
+ metadata_tracker=metadata_tracker,
126
+ reasoning_handler=reasoning_handler,
127
+ on_event=on_event,
128
+ ):
206
129
  yield item
207
-
208
- metadata_tracker.set_response_id(state.response_id)
209
- yield metadata_tracker.finalize()
@@ -1,6 +1,8 @@
1
1
  from pydantic import BaseModel
2
2
 
3
+ from klaude_code.llm.openai_compatible.stream import ReasoningDeltaResult, ReasoningHandlerABC
3
4
  from klaude_code.protocol import model
5
+ from klaude_code.trace import log
4
6
 
5
7
 
6
8
  class ReasoningDetail(BaseModel):
@@ -16,8 +18,8 @@ class ReasoningDetail(BaseModel):
16
18
  signature: str | None = None # Claude's signature
17
19
 
18
20
 
19
- class ReasoningStreamHandler:
20
- """Accumulates reasoning text and flushes on encrypted content or finalize."""
21
+ class ReasoningStreamHandler(ReasoningHandlerABC):
22
+ """Accumulates OpenRouter reasoning details and emits ordered outputs."""
21
23
 
22
24
  def __init__(
23
25
  self,
@@ -34,6 +36,26 @@ class ReasoningStreamHandler:
34
36
  """Update the response identifier used for emitted items."""
35
37
  self._response_id = response_id
36
38
 
39
+ def on_delta(self, delta: object) -> ReasoningDeltaResult:
40
+ """Parse OpenRouter's reasoning_details and return ordered stream outputs."""
41
+ reasoning_details = getattr(delta, "reasoning_details", None)
42
+ if not reasoning_details:
43
+ return ReasoningDeltaResult(handled=False, outputs=[])
44
+
45
+ outputs: list[str | model.ConversationItem] = []
46
+ for item in reasoning_details:
47
+ try:
48
+ reasoning_detail = ReasoningDetail.model_validate(item)
49
+ if reasoning_detail.text:
50
+ outputs.append(reasoning_detail.text)
51
+ if reasoning_detail.summary:
52
+ outputs.append(reasoning_detail.summary)
53
+ outputs.extend(self.on_detail(reasoning_detail))
54
+ except Exception as e:
55
+ log("reasoning_details error", str(e), style="red")
56
+
57
+ return ReasoningDeltaResult(handled=True, outputs=outputs)
58
+
37
59
  def on_detail(self, detail: ReasoningDetail) -> list[model.ConversationItem]:
38
60
  """Process a single reasoning detail and return streamable items."""
39
61
  items: list[model.ConversationItem] = []
@@ -138,6 +138,12 @@ class TruncationUIExtra(BaseModel):
138
138
  truncated_length: int
139
139
 
140
140
 
141
+ class MarkdownDocUIExtra(BaseModel):
142
+ type: Literal["markdown_doc"] = "markdown_doc"
143
+ file_path: str
144
+ content: str
145
+
146
+
141
147
  class SessionStatusUIExtra(BaseModel):
142
148
  type: Literal["session_status"] = "session_status"
143
149
  usage: "Usage"
@@ -146,7 +152,13 @@ class SessionStatusUIExtra(BaseModel):
146
152
 
147
153
 
148
154
  ToolResultUIExtra = Annotated[
149
- DiffUIExtra | TodoListUIExtra | SessionIdUIExtra | MermaidLinkUIExtra | TruncationUIExtra | SessionStatusUIExtra,
155
+ DiffUIExtra
156
+ | TodoListUIExtra
157
+ | SessionIdUIExtra
158
+ | MermaidLinkUIExtra
159
+ | TruncationUIExtra
160
+ | MarkdownDocUIExtra
161
+ | SessionStatusUIExtra,
150
162
  Field(discriminator="type"),
151
163
  ]
152
164
 
@@ -25,6 +25,7 @@ class OperationType(Enum):
25
25
  USER_INPUT = "user_input"
26
26
  RUN_AGENT = "run_agent"
27
27
  CHANGE_MODEL = "change_model"
28
+ CHANGE_THINKING = "change_thinking"
28
29
  CLEAR_SESSION = "clear_session"
29
30
  EXPORT_SESSION = "export_session"
30
31
  INTERRUPT = "interrupt"
@@ -77,6 +78,16 @@ class ChangeModelOperation(Operation):
77
78
  await handler.handle_change_model(self)
78
79
 
79
80
 
81
+ class ChangeThinkingOperation(Operation):
82
+ """Operation for changing the thinking/reasoning configuration."""
83
+
84
+ type: OperationType = OperationType.CHANGE_THINKING
85
+ session_id: str
86
+
87
+ async def execute(self, handler: OperationHandler) -> None:
88
+ await handler.handle_change_thinking(self)
89
+
90
+
80
91
  class ClearSessionOperation(Operation):
81
92
  """Operation for clearing the active session and starting a new one."""
82
93
 
@@ -11,6 +11,7 @@ from typing import TYPE_CHECKING, Protocol
11
11
  if TYPE_CHECKING:
12
12
  from klaude_code.protocol.op import (
13
13
  ChangeModelOperation,
14
+ ChangeThinkingOperation,
14
15
  ClearSessionOperation,
15
16
  ExportSessionOperation,
16
17
  InitAgentOperation,
@@ -35,6 +36,10 @@ class OperationHandler(Protocol):
35
36
  """Handle a change model operation."""
36
37
  ...
37
38
 
39
+ async def handle_change_thinking(self, operation: ChangeThinkingOperation) -> None:
40
+ """Handle a change thinking operation."""
41
+ ...
42
+
38
43
  async def handle_clear_session(self, operation: ClearSessionOperation) -> None:
39
44
  """Handle a clear session operation."""
40
45
  ...
@@ -20,12 +20,10 @@ class StageManager:
20
20
  *,
21
21
  finish_assistant: Callable[[], Awaitable[None]],
22
22
  finish_thinking: Callable[[], Awaitable[None]],
23
- on_enter_thinking: Callable[[], None],
24
23
  ):
25
24
  self._stage = Stage.WAITING
26
25
  self._finish_assistant = finish_assistant
27
26
  self._finish_thinking = finish_thinking
28
- self._on_enter_thinking = on_enter_thinking
29
27
 
30
28
  @property
31
29
  def current_stage(self) -> Stage:
@@ -41,7 +39,6 @@ class StageManager:
41
39
  if self._stage == Stage.THINKING:
42
40
  return
43
41
  await self.transition_to(Stage.THINKING)
44
- self._on_enter_thinking()
45
42
 
46
43
  async def finish_assistant(self) -> None:
47
44
  if self._stage != Stage.ASSISTANT:
@@ -57,3 +57,5 @@ class REPLDisplay(DisplayABC):
57
57
  # Spinner may already be stopped or not started; ignore.
58
58
  with contextlib.suppress(Exception):
59
59
  self.renderer.spinner_stop()
60
+ with contextlib.suppress(Exception):
61
+ self.renderer.stop_bottom_live()