klaude-code 1.2.22__py3-none-any.whl → 1.2.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. klaude_code/command/prompt-jj-describe.md +32 -0
  2. klaude_code/command/status_cmd.py +1 -1
  3. klaude_code/{const/__init__.py → const.py} +11 -2
  4. klaude_code/core/executor.py +1 -1
  5. klaude_code/core/manager/sub_agent_manager.py +1 -1
  6. klaude_code/core/reminders.py +51 -0
  7. klaude_code/core/task.py +37 -18
  8. klaude_code/core/tool/__init__.py +1 -4
  9. klaude_code/core/tool/file/read_tool.py +23 -1
  10. klaude_code/core/tool/file/write_tool.py +7 -3
  11. klaude_code/core/tool/skill/__init__.py +0 -0
  12. klaude_code/core/tool/{memory → skill}/skill_tool.py +16 -39
  13. klaude_code/llm/openai_compatible/client.py +29 -102
  14. klaude_code/llm/openai_compatible/stream.py +272 -0
  15. klaude_code/llm/openrouter/client.py +29 -109
  16. klaude_code/llm/openrouter/{reasoning_handler.py → reasoning.py} +24 -2
  17. klaude_code/protocol/model.py +15 -2
  18. klaude_code/session/export.py +1 -1
  19. klaude_code/session/store.py +4 -2
  20. klaude_code/skill/__init__.py +27 -0
  21. klaude_code/skill/assets/deslop/SKILL.md +17 -0
  22. klaude_code/skill/assets/dev-docs/SKILL.md +108 -0
  23. klaude_code/skill/assets/handoff/SKILL.md +39 -0
  24. klaude_code/skill/assets/jj-workspace/SKILL.md +20 -0
  25. klaude_code/skill/assets/skill-creator/SKILL.md +139 -0
  26. klaude_code/{core/tool/memory/skill_loader.py → skill/loader.py} +60 -24
  27. klaude_code/skill/manager.py +70 -0
  28. klaude_code/skill/system_skills.py +192 -0
  29. klaude_code/ui/core/stage_manager.py +0 -3
  30. klaude_code/ui/modes/repl/completers.py +103 -3
  31. klaude_code/ui/modes/repl/event_handler.py +101 -49
  32. klaude_code/ui/modes/repl/input_prompt_toolkit.py +55 -6
  33. klaude_code/ui/modes/repl/renderer.py +24 -17
  34. klaude_code/ui/renderers/assistant.py +7 -2
  35. klaude_code/ui/renderers/developer.py +12 -0
  36. klaude_code/ui/renderers/diffs.py +1 -1
  37. klaude_code/ui/renderers/metadata.py +6 -8
  38. klaude_code/ui/renderers/sub_agent.py +28 -5
  39. klaude_code/ui/renderers/thinking.py +16 -10
  40. klaude_code/ui/renderers/tools.py +83 -34
  41. klaude_code/ui/renderers/user_input.py +32 -2
  42. klaude_code/ui/rich/markdown.py +40 -20
  43. klaude_code/ui/rich/status.py +15 -19
  44. klaude_code/ui/rich/theme.py +70 -17
  45. {klaude_code-1.2.22.dist-info → klaude_code-1.2.24.dist-info}/METADATA +18 -13
  46. {klaude_code-1.2.22.dist-info → klaude_code-1.2.24.dist-info}/RECORD +49 -45
  47. klaude_code/command/prompt-deslop.md +0 -14
  48. klaude_code/command/prompt-dev-docs-update.md +0 -56
  49. klaude_code/command/prompt-dev-docs.md +0 -46
  50. klaude_code/command/prompt-handoff.md +0 -33
  51. klaude_code/command/prompt-jj-workspace.md +0 -18
  52. klaude_code/core/tool/memory/__init__.py +0 -5
  53. klaude_code/llm/openai_compatible/stream_processor.py +0 -83
  54. /klaude_code/core/tool/{memory → skill}/skill_tool.md +0 -0
  55. {klaude_code-1.2.22.dist-info → klaude_code-1.2.24.dist-info}/WHEEL +0 -0
  56. {klaude_code-1.2.22.dist-info → klaude_code-1.2.24.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,272 @@
1
+ """Shared stream processing utilities for Chat Completions streaming.
2
+
3
+ This module provides reusable primitives for OpenAI-compatible providers:
4
+
5
+ - ``StreamStateManager``: accumulates assistant content and tool calls.
6
+ - ``ReasoningHandlerABC``: provider-specific reasoning extraction + buffering.
7
+ - ``parse_chat_completions_stream``: shared stream loop that emits ConversationItems.
8
+
9
+ OpenRouter uses the same OpenAI Chat Completions API surface but differs in
10
+ how reasoning is represented (``reasoning_details`` vs ``reasoning_content``).
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ from abc import ABC, abstractmethod
16
+ from collections.abc import AsyncGenerator, Callable
17
+ from dataclasses import dataclass
18
+ from typing import Any, Literal, cast
19
+
20
+ import httpx
21
+ import openai
22
+ import openai.types
23
+ from openai import AsyncStream
24
+ from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
25
+
26
+ from klaude_code.llm.openai_compatible.tool_call_accumulator import BasicToolCallAccumulator, ToolCallAccumulatorABC
27
+ from klaude_code.llm.usage import MetadataTracker, convert_usage
28
+ from klaude_code.protocol import llm_param, model
29
+
30
+ StreamStage = Literal["waiting", "reasoning", "assistant", "tool"]
31
+
32
+
33
+ class StreamStateManager:
34
+ """Manages streaming state and provides flush operations for accumulated content.
35
+
36
+ This class encapsulates the common state management logic used by both
37
+ OpenAI-compatible and OpenRouter clients, reducing code duplication.
38
+ """
39
+
40
+ def __init__(
41
+ self,
42
+ param_model: str,
43
+ response_id: str | None = None,
44
+ reasoning_flusher: Callable[[], list[model.ConversationItem]] | None = None,
45
+ ):
46
+ self.param_model = param_model
47
+ self.response_id = response_id
48
+ self.stage: StreamStage = "waiting"
49
+ self.accumulated_reasoning: list[str] = []
50
+ self.accumulated_content: list[str] = []
51
+ self.accumulated_tool_calls: ToolCallAccumulatorABC = BasicToolCallAccumulator()
52
+ self.emitted_tool_start_indices: set[int] = set()
53
+ self._reasoning_flusher = reasoning_flusher
54
+
55
+ def set_response_id(self, response_id: str) -> None:
56
+ """Set the response ID once received from the stream."""
57
+ self.response_id = response_id
58
+ self.accumulated_tool_calls.response_id = response_id # pyright: ignore[reportAttributeAccessIssue]
59
+
60
+ def flush_reasoning(self) -> list[model.ConversationItem]:
61
+ """Flush accumulated reasoning content and return items."""
62
+ if self._reasoning_flusher is not None:
63
+ return self._reasoning_flusher()
64
+ if not self.accumulated_reasoning:
65
+ return []
66
+ item = model.ReasoningTextItem(
67
+ content="".join(self.accumulated_reasoning),
68
+ response_id=self.response_id,
69
+ model=self.param_model,
70
+ )
71
+ self.accumulated_reasoning = []
72
+ return [item]
73
+
74
+ def flush_assistant(self) -> list[model.ConversationItem]:
75
+ """Flush accumulated assistant content and return items."""
76
+ if not self.accumulated_content:
77
+ return []
78
+ item = model.AssistantMessageItem(
79
+ content="".join(self.accumulated_content),
80
+ response_id=self.response_id,
81
+ )
82
+ self.accumulated_content = []
83
+ return [item]
84
+
85
+ def flush_tool_calls(self) -> list[model.ToolCallItem]:
86
+ """Flush accumulated tool calls and return items."""
87
+ items: list[model.ToolCallItem] = self.accumulated_tool_calls.get()
88
+ if items:
89
+ self.accumulated_tool_calls.chunks_by_step = [] # pyright: ignore[reportAttributeAccessIssue]
90
+ return items
91
+
92
+ def flush_all(self) -> list[model.ConversationItem]:
93
+ """Flush all accumulated content in order: reasoning, assistant, tool calls."""
94
+ items: list[model.ConversationItem] = []
95
+ items.extend(self.flush_reasoning())
96
+ items.extend(self.flush_assistant())
97
+ if self.stage == "tool":
98
+ items.extend(self.flush_tool_calls())
99
+ return items
100
+
101
+
102
+ @dataclass(slots=True)
103
+ class ReasoningDeltaResult:
104
+ """Result of processing a single provider delta for reasoning signals."""
105
+
106
+ handled: bool
107
+ outputs: list[str | model.ConversationItem]
108
+
109
+
110
+ class ReasoningHandlerABC(ABC):
111
+ """Provider-specific reasoning handler for Chat Completions streaming."""
112
+
113
+ @abstractmethod
114
+ def set_response_id(self, response_id: str | None) -> None:
115
+ """Update the response identifier used for emitted items."""
116
+
117
+ @abstractmethod
118
+ def on_delta(self, delta: object) -> ReasoningDeltaResult:
119
+ """Process a single delta and return ordered reasoning outputs."""
120
+
121
+ @abstractmethod
122
+ def flush(self) -> list[model.ConversationItem]:
123
+ """Flush buffered reasoning content (usually at stage transition/finalize)."""
124
+
125
+
126
+ class DefaultReasoningHandler(ReasoningHandlerABC):
127
+ """Handles OpenAI-compatible reasoning fields (reasoning_content / reasoning)."""
128
+
129
+ def __init__(
130
+ self,
131
+ *,
132
+ param_model: str,
133
+ response_id: str | None,
134
+ ) -> None:
135
+ self._param_model = param_model
136
+ self._response_id = response_id
137
+ self._accumulated: list[str] = []
138
+
139
+ def set_response_id(self, response_id: str | None) -> None:
140
+ self._response_id = response_id
141
+
142
+ def on_delta(self, delta: object) -> ReasoningDeltaResult:
143
+ reasoning_content = getattr(delta, "reasoning_content", None) or getattr(delta, "reasoning", None) or ""
144
+ if not reasoning_content:
145
+ return ReasoningDeltaResult(handled=False, outputs=[])
146
+ text = str(reasoning_content)
147
+ self._accumulated.append(text)
148
+ return ReasoningDeltaResult(handled=True, outputs=[text])
149
+
150
+ def flush(self) -> list[model.ConversationItem]:
151
+ if not self._accumulated:
152
+ return []
153
+ item = model.ReasoningTextItem(
154
+ content="".join(self._accumulated),
155
+ response_id=self._response_id,
156
+ model=self._param_model,
157
+ )
158
+ self._accumulated = []
159
+ return [item]
160
+
161
+
162
+ async def parse_chat_completions_stream(
163
+ stream: AsyncStream[ChatCompletionChunk],
164
+ *,
165
+ param: llm_param.LLMCallParameter,
166
+ metadata_tracker: MetadataTracker,
167
+ reasoning_handler: ReasoningHandlerABC,
168
+ on_event: Callable[[object], None] | None = None,
169
+ ) -> AsyncGenerator[model.ConversationItem]:
170
+ """Parse OpenAI Chat Completions stream into ConversationItems.
171
+
172
+ This is shared by OpenAI-compatible and OpenRouter clients.
173
+ """
174
+
175
+ state = StreamStateManager(
176
+ param_model=str(param.model),
177
+ reasoning_flusher=reasoning_handler.flush,
178
+ )
179
+
180
+ try:
181
+ async for event in stream:
182
+ if on_event is not None:
183
+ on_event(event)
184
+
185
+ if not state.response_id and (event_id := getattr(event, "id", None)):
186
+ state.set_response_id(str(event_id))
187
+ reasoning_handler.set_response_id(str(event_id))
188
+ yield model.StartItem(response_id=str(event_id))
189
+
190
+ if (event_usage := getattr(event, "usage", None)) is not None:
191
+ metadata_tracker.set_usage(convert_usage(event_usage, param.context_limit, param.max_tokens))
192
+ if event_model := getattr(event, "model", None):
193
+ metadata_tracker.set_model_name(str(event_model))
194
+ if provider := getattr(event, "provider", None):
195
+ metadata_tracker.set_provider(str(provider))
196
+
197
+ choices = cast(Any, getattr(event, "choices", None))
198
+ if not choices:
199
+ continue
200
+
201
+ # Support Moonshot Kimi K2's usage field in choice
202
+ choice0 = choices[0]
203
+ if choice_usage := getattr(choice0, "usage", None):
204
+ try:
205
+ usage = openai.types.CompletionUsage.model_validate(choice_usage)
206
+ metadata_tracker.set_usage(convert_usage(usage, param.context_limit, param.max_tokens))
207
+ except Exception:
208
+ pass
209
+
210
+ delta = cast(Any, getattr(choice0, "delta", None))
211
+ if delta is None:
212
+ continue
213
+
214
+ # Reasoning
215
+ reasoning_result = reasoning_handler.on_delta(delta)
216
+ if reasoning_result.handled:
217
+ state.stage = "reasoning"
218
+ for output in reasoning_result.outputs:
219
+ if isinstance(output, str):
220
+ if not output:
221
+ continue
222
+ metadata_tracker.record_token()
223
+ yield model.ReasoningTextDelta(content=output, response_id=state.response_id)
224
+ else:
225
+ yield output
226
+
227
+ # Assistant
228
+ if (content := getattr(delta, "content", None)) and (state.stage == "assistant" or str(content).strip()):
229
+ metadata_tracker.record_token()
230
+ if state.stage == "reasoning":
231
+ for item in state.flush_reasoning():
232
+ yield item
233
+ elif state.stage == "tool":
234
+ for item in state.flush_tool_calls():
235
+ yield item
236
+ state.stage = "assistant"
237
+ state.accumulated_content.append(str(content))
238
+ yield model.AssistantMessageDelta(
239
+ content=str(content),
240
+ response_id=state.response_id,
241
+ )
242
+
243
+ # Tool
244
+ if (tool_calls := getattr(delta, "tool_calls", None)) and len(tool_calls) > 0:
245
+ metadata_tracker.record_token()
246
+ if state.stage == "reasoning":
247
+ for item in state.flush_reasoning():
248
+ yield item
249
+ elif state.stage == "assistant":
250
+ for item in state.flush_assistant():
251
+ yield item
252
+ state.stage = "tool"
253
+ for tc in tool_calls:
254
+ if tc.index not in state.emitted_tool_start_indices and tc.function and tc.function.name:
255
+ state.emitted_tool_start_indices.add(tc.index)
256
+ yield model.ToolCallStartItem(
257
+ response_id=state.response_id,
258
+ call_id=tc.id or "",
259
+ name=tc.function.name,
260
+ )
261
+ state.accumulated_tool_calls.add(tool_calls)
262
+ except (openai.OpenAIError, httpx.HTTPError) as e:
263
+ yield model.StreamErrorItem(error=f"{e.__class__.__name__} {e!s}")
264
+
265
+ flushed_items = state.flush_all()
266
+ if flushed_items:
267
+ metadata_tracker.record_token()
268
+ for item in flushed_items:
269
+ yield item
270
+
271
+ metadata_tracker.set_response_id(state.response_id)
272
+ yield metadata_tracker.finalize()
@@ -1,6 +1,6 @@
1
1
  import json
2
2
  from collections.abc import AsyncGenerator
3
- from typing import override
3
+ from typing import Any, override
4
4
 
5
5
  import httpx
6
6
  import openai
@@ -9,13 +9,13 @@ from openai.types.chat.completion_create_params import CompletionCreateParamsStr
9
9
  from klaude_code.llm.client import LLMClientABC
10
10
  from klaude_code.llm.input_common import apply_config_defaults
11
11
  from klaude_code.llm.openai_compatible.input import convert_tool_schema
12
- from klaude_code.llm.openai_compatible.stream_processor import StreamStateManager
12
+ from klaude_code.llm.openai_compatible.stream import parse_chat_completions_stream
13
13
  from klaude_code.llm.openrouter.input import convert_history_to_input, is_claude_model
14
- from klaude_code.llm.openrouter.reasoning_handler import ReasoningDetail, ReasoningStreamHandler
14
+ from klaude_code.llm.openrouter.reasoning import ReasoningStreamHandler
15
15
  from klaude_code.llm.registry import register
16
- from klaude_code.llm.usage import MetadataTracker, convert_usage
16
+ from klaude_code.llm.usage import MetadataTracker
17
17
  from klaude_code.protocol import llm_param, model
18
- from klaude_code.trace import DebugType, is_debug_enabled, log, log_debug
18
+ from klaude_code.trace import DebugType, is_debug_enabled, log_debug
19
19
 
20
20
 
21
21
  def build_payload(
@@ -96,114 +96,34 @@ class OpenRouterClient(LLMClientABC):
96
96
  debug_type=DebugType.LLM_PAYLOAD,
97
97
  )
98
98
 
99
- stream = self.client.chat.completions.create(
100
- **payload,
101
- extra_body=extra_body,
102
- extra_headers=extra_headers,
103
- )
99
+ try:
100
+ stream = await self.client.chat.completions.create(
101
+ **payload,
102
+ extra_body=extra_body,
103
+ extra_headers=extra_headers,
104
+ )
105
+ except (openai.OpenAIError, httpx.HTTPError) as e:
106
+ yield model.StreamErrorItem(error=f"{e.__class__.__name__} {e!s}")
107
+ yield metadata_tracker.finalize()
108
+ return
104
109
 
105
110
  reasoning_handler = ReasoningStreamHandler(
106
111
  param_model=str(param.model),
107
112
  response_id=None,
108
113
  )
109
114
 
110
- state = StreamStateManager(
111
- param_model=str(param.model),
112
- reasoning_flusher=reasoning_handler.flush,
113
- )
114
-
115
- try:
116
- async for event in await stream:
117
- log_debug(
118
- event.model_dump_json(exclude_none=True),
119
- style="blue",
120
- debug_type=DebugType.LLM_STREAM,
121
- )
122
-
123
- if not state.response_id and event.id:
124
- state.set_response_id(event.id)
125
- reasoning_handler.set_response_id(event.id)
126
- yield model.StartItem(response_id=event.id)
127
- if event.usage is not None:
128
- metadata_tracker.set_usage(convert_usage(event.usage, param.context_limit, param.max_tokens))
129
- if event.model:
130
- metadata_tracker.set_model_name(event.model)
131
- if provider := getattr(event, "provider", None):
132
- metadata_tracker.set_provider(str(provider))
133
- if len(event.choices) == 0:
134
- continue
135
- delta = event.choices[0].delta
136
-
137
- # Reasoning
138
- if reasoning_details := getattr(delta, "reasoning_details", None):
139
- for item in reasoning_details:
140
- try:
141
- reasoning_detail = ReasoningDetail.model_validate(item)
142
- if reasoning_detail.text or reasoning_detail.summary:
143
- metadata_tracker.record_token()
144
- state.stage = "reasoning"
145
- # Yield delta immediately for streaming
146
- if reasoning_detail.text:
147
- yield model.ReasoningTextDelta(
148
- content=reasoning_detail.text,
149
- response_id=state.response_id,
150
- )
151
- if reasoning_detail.summary:
152
- yield model.ReasoningTextDelta(
153
- content=reasoning_detail.summary,
154
- response_id=state.response_id,
155
- )
156
- # Keep existing handler logic for final items
157
- for conversation_item in reasoning_handler.on_detail(reasoning_detail):
158
- yield conversation_item
159
- except Exception as e:
160
- log("reasoning_details error", str(e), style="red")
161
-
162
- # Assistant
163
- if delta.content and (
164
- state.stage == "assistant" or delta.content.strip()
165
- ): # Process all content in assistant stage, filter empty content in reasoning stage
166
- metadata_tracker.record_token()
167
- if state.stage == "reasoning":
168
- for item in state.flush_reasoning():
169
- yield item
170
- state.stage = "assistant"
171
- state.accumulated_content.append(delta.content)
172
- yield model.AssistantMessageDelta(
173
- content=delta.content,
174
- response_id=state.response_id,
175
- )
176
-
177
- # Tool
178
- if delta.tool_calls and len(delta.tool_calls) > 0:
179
- metadata_tracker.record_token()
180
- if state.stage == "reasoning":
181
- for item in state.flush_reasoning():
182
- yield item
183
- elif state.stage == "assistant":
184
- for item in state.flush_assistant():
185
- yield item
186
- state.stage = "tool"
187
- # Emit ToolCallStartItem for new tool calls
188
- for tc in delta.tool_calls:
189
- if tc.index not in state.emitted_tool_start_indices and tc.function and tc.function.name:
190
- state.emitted_tool_start_indices.add(tc.index)
191
- yield model.ToolCallStartItem(
192
- response_id=state.response_id,
193
- call_id=tc.id or "",
194
- name=tc.function.name,
195
- )
196
- state.accumulated_tool_calls.add(delta.tool_calls)
197
-
198
- except (openai.OpenAIError, httpx.HTTPError) as e:
199
- yield model.StreamErrorItem(error=f"{e.__class__.__name__} {e!s}")
200
-
201
- # Finalize
202
- flushed_items = state.flush_all()
203
- if flushed_items:
204
- metadata_tracker.record_token()
205
- for item in flushed_items:
115
+ def on_event(event: Any) -> None:
116
+ log_debug(
117
+ event.model_dump_json(exclude_none=True),
118
+ style="blue",
119
+ debug_type=DebugType.LLM_STREAM,
120
+ )
121
+
122
+ async for item in parse_chat_completions_stream(
123
+ stream,
124
+ param=param,
125
+ metadata_tracker=metadata_tracker,
126
+ reasoning_handler=reasoning_handler,
127
+ on_event=on_event,
128
+ ):
206
129
  yield item
207
-
208
- metadata_tracker.set_response_id(state.response_id)
209
- yield metadata_tracker.finalize()
@@ -1,6 +1,8 @@
1
1
  from pydantic import BaseModel
2
2
 
3
+ from klaude_code.llm.openai_compatible.stream import ReasoningDeltaResult, ReasoningHandlerABC
3
4
  from klaude_code.protocol import model
5
+ from klaude_code.trace import log
4
6
 
5
7
 
6
8
  class ReasoningDetail(BaseModel):
@@ -16,8 +18,8 @@ class ReasoningDetail(BaseModel):
16
18
  signature: str | None = None # Claude's signature
17
19
 
18
20
 
19
- class ReasoningStreamHandler:
20
- """Accumulates reasoning text and flushes on encrypted content or finalize."""
21
+ class ReasoningStreamHandler(ReasoningHandlerABC):
22
+ """Accumulates OpenRouter reasoning details and emits ordered outputs."""
21
23
 
22
24
  def __init__(
23
25
  self,
@@ -34,6 +36,26 @@ class ReasoningStreamHandler:
34
36
  """Update the response identifier used for emitted items."""
35
37
  self._response_id = response_id
36
38
 
39
+ def on_delta(self, delta: object) -> ReasoningDeltaResult:
40
+ """Parse OpenRouter's reasoning_details and return ordered stream outputs."""
41
+ reasoning_details = getattr(delta, "reasoning_details", None)
42
+ if not reasoning_details:
43
+ return ReasoningDeltaResult(handled=False, outputs=[])
44
+
45
+ outputs: list[str | model.ConversationItem] = []
46
+ for item in reasoning_details:
47
+ try:
48
+ reasoning_detail = ReasoningDetail.model_validate(item)
49
+ if reasoning_detail.text:
50
+ outputs.append(reasoning_detail.text)
51
+ if reasoning_detail.summary:
52
+ outputs.append(reasoning_detail.summary)
53
+ outputs.extend(self.on_detail(reasoning_detail))
54
+ except Exception as e:
55
+ log("reasoning_details error", str(e), style="red")
56
+
57
+ return ReasoningDeltaResult(handled=True, outputs=outputs)
58
+
37
59
  def on_detail(self, detail: ReasoningDetail) -> list[model.ConversationItem]:
38
60
  """Process a single reasoning detail and return streamable items."""
39
61
  items: list[model.ConversationItem] = []
@@ -138,6 +138,12 @@ class TruncationUIExtra(BaseModel):
138
138
  truncated_length: int
139
139
 
140
140
 
141
+ class MarkdownDocUIExtra(BaseModel):
142
+ type: Literal["markdown_doc"] = "markdown_doc"
143
+ file_path: str
144
+ content: str
145
+
146
+
141
147
  class SessionStatusUIExtra(BaseModel):
142
148
  type: Literal["session_status"] = "session_status"
143
149
  usage: "Usage"
@@ -146,7 +152,13 @@ class SessionStatusUIExtra(BaseModel):
146
152
 
147
153
 
148
154
  ToolResultUIExtra = Annotated[
149
- DiffUIExtra | TodoListUIExtra | SessionIdUIExtra | MermaidLinkUIExtra | TruncationUIExtra | SessionStatusUIExtra,
155
+ DiffUIExtra
156
+ | TodoListUIExtra
157
+ | SessionIdUIExtra
158
+ | MermaidLinkUIExtra
159
+ | TruncationUIExtra
160
+ | MarkdownDocUIExtra
161
+ | SessionStatusUIExtra,
150
162
  Field(discriminator="type"),
151
163
  ]
152
164
 
@@ -230,6 +242,7 @@ class DeveloperMessageItem(BaseModel):
230
242
  at_files: list[AtPatternParseResult] | None = None
231
243
  command_output: CommandOutput | None = None
232
244
  user_image_count: int | None = None
245
+ skill_name: str | None = None # Skill name activated via $skill syntax
233
246
 
234
247
 
235
248
  class ImageURLPart(BaseModel):
@@ -417,7 +430,7 @@ class TaskMetadata(BaseModel):
417
430
  class TaskMetadataItem(BaseModel):
418
431
  """Aggregated metadata for a complete task, stored in conversation history."""
419
432
 
420
- main: TaskMetadata = Field(default_factory=TaskMetadata)
433
+ main_agent: TaskMetadata = Field(default_factory=TaskMetadata) # Main agent metadata
421
434
  sub_agent_task_metadata: list[TaskMetadata] = Field(default_factory=lambda: list[TaskMetadata]())
422
435
  created_at: datetime = Field(default_factory=datetime.now)
423
436
 
@@ -235,7 +235,7 @@ def _render_metadata_item(item: model.TaskMetadataItem) -> str:
235
235
  lines: list[str] = []
236
236
 
237
237
  # Main agent metadata
238
- lines.append(_render_single_metadata(item.main, indent=0, show_context=True))
238
+ lines.append(_render_single_metadata(item.main_agent, indent=0, show_context=True))
239
239
 
240
240
  # Sub-agent metadata with indent
241
241
  for sub in item.sub_agent_task_metadata:
@@ -205,11 +205,13 @@ def build_meta_snapshot(
205
205
  "work_dir": str(work_dir),
206
206
  "sub_agent_state": sub_agent_state.model_dump(mode="json") if sub_agent_state else None,
207
207
  "file_tracker": {path: status.model_dump(mode="json") for path, status in file_tracker.items()},
208
- "todos": [todo.model_dump(mode="json") for todo in todos],
208
+ "todos": [todo.model_dump(mode="json", exclude_defaults=True) for todo in todos],
209
209
  "created_at": created_at,
210
210
  "updated_at": updated_at,
211
211
  "messages_count": messages_count,
212
212
  "model_name": model_name,
213
213
  "model_config_name": model_config_name,
214
- "model_thinking": model_thinking.model_dump(mode="json") if model_thinking else None,
214
+ "model_thinking": model_thinking.model_dump(mode="json", exclude_defaults=True, exclude_none=True)
215
+ if model_thinking
216
+ else None,
215
217
  }
@@ -0,0 +1,27 @@
1
+ """Skill module - independent skill management system.
2
+
3
+ This module provides the core skill functionality:
4
+ - Skill discovery and loading from multiple directories
5
+ - System skill installation
6
+ - Global skill access via manager functions
7
+
8
+ Public API:
9
+ - get_skill(name) - Get a skill by name
10
+ - get_available_skills() - Get list of (name, description, location) tuples
11
+ - get_skill_loader() - Get the global SkillLoader instance
12
+ - list_skill_names() - Get list of skill names
13
+ - Skill - Skill data class
14
+ - SkillLoader - Skill loader class
15
+ """
16
+
17
+ from klaude_code.skill.loader import Skill, SkillLoader
18
+ from klaude_code.skill.manager import get_available_skills, get_skill, get_skill_loader, list_skill_names
19
+
20
+ __all__ = [
21
+ "Skill",
22
+ "SkillLoader",
23
+ "get_available_skills",
24
+ "get_skill",
25
+ "get_skill_loader",
26
+ "list_skill_names",
27
+ ]
@@ -0,0 +1,17 @@
1
+ ---
2
+ name: deslop
3
+ description: Remove AI-generated code slop from files or diffs. Use this skill when reviewing AI-generated code to clean up unnecessary comments, defensive code, type hacks, and style inconsistencies. Triggers include "deslop", "remove slop", "clean up AI code", "review for slop".
4
+ metadata:
5
+ short-description: Remove AI code slop
6
+ ---
7
+
8
+ # Deslop
9
+
10
+ Remove AI-generated slop from code. Check the specified files or diff and remove:
11
+
12
+ - Extra comments that a human wouldn't add or are inconsistent with the rest of the file
13
+ - Extra defensive checks or try/catch blocks that are abnormal for that area of the codebase (especially if called by trusted/validated codepaths)
14
+ - Casts to `any` or `# type: ignore` to get around type issues
15
+ - Any other style that is inconsistent with the file
16
+
17
+ Report at the end with only a 1-3 sentence summary of what you changed.